4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * This file contains all the routines used when modifying on-disk SPA state.
31 * This includes opening, importing, destroying, exporting a pool, and syncing a
35 #include <sys/zfs_context.h>
36 #include <sys/fm/fs/zfs.h>
37 #include <sys/spa_impl.h>
39 #include <sys/zio_checksum.h>
40 #include <sys/zio_compress.h>
42 #include <sys/dmu_tx.h>
45 #include <sys/vdev_impl.h>
46 #include <sys/metaslab.h>
47 #include <sys/uberblock_impl.h>
50 #include <sys/dmu_traverse.h>
51 #include <sys/dmu_objset.h>
52 #include <sys/unique.h>
53 #include <sys/dsl_pool.h>
54 #include <sys/dsl_dataset.h>
55 #include <sys/dsl_dir.h>
56 #include <sys/dsl_prop.h>
57 #include <sys/dsl_synctask.h>
58 #include <sys/fs/zfs.h>
59 #include <sys/callb.h>
60 #include <sys/sunddi.h>
62 int zio_taskq_threads = 0;
63 SYSCTL_DECL(_vfs_zfs);
64 SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO");
65 TUNABLE_INT("vfs.zfs.zio.taskq_threads", &zio_taskq_threads);
66 SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, taskq_threads, CTLFLAG_RW,
67 &zio_taskq_threads, 0, "Number of ZIO threads per ZIO type");
71 * ==========================================================================
72 * SPA state manipulation (open/create/destroy/import/export)
73 * ==========================================================================
77 spa_error_entry_compare(const void *a, const void *b)
79 spa_error_entry_t *sa = (spa_error_entry_t *)a;
80 spa_error_entry_t *sb = (spa_error_entry_t *)b;
83 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
84 sizeof (zbookmark_t));
95 * Utility function which retrieves copies of the current logs and
96 * re-initializes them in the process.
99 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
101 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
103 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
104 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
106 avl_create(&spa->spa_errlist_scrub,
107 spa_error_entry_compare, sizeof (spa_error_entry_t),
108 offsetof(spa_error_entry_t, se_avl));
109 avl_create(&spa->spa_errlist_last,
110 spa_error_entry_compare, sizeof (spa_error_entry_t),
111 offsetof(spa_error_entry_t, se_avl));
115 * Activate an uninitialized pool.
118 spa_activate(spa_t *spa)
121 int nthreads = zio_taskq_threads;
124 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
126 spa->spa_state = POOL_STATE_ACTIVE;
128 spa->spa_normal_class = metaslab_class_create();
131 nthreads = max_ncpus;
132 for (t = 0; t < ZIO_TYPES; t++) {
133 snprintf(name, sizeof(name), "spa_zio_issue %d", t);
134 spa->spa_zio_issue_taskq[t] = taskq_create(name, nthreads,
135 maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE);
136 snprintf(name, sizeof(name), "spa_zio_intr %d", t);
137 spa->spa_zio_intr_taskq[t] = taskq_create(name, nthreads,
138 maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE);
141 rw_init(&spa->spa_traverse_lock, NULL, RW_DEFAULT, NULL);
143 mutex_init(&spa->spa_uberblock_lock, NULL, MUTEX_DEFAULT, NULL);
144 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
145 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
146 mutex_init(&spa->spa_config_lock.scl_lock, NULL, MUTEX_DEFAULT, NULL);
147 cv_init(&spa->spa_config_lock.scl_cv, NULL, CV_DEFAULT, NULL);
148 mutex_init(&spa->spa_sync_bplist.bpl_lock, NULL, MUTEX_DEFAULT, NULL);
149 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
150 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
152 list_create(&spa->spa_dirty_list, sizeof (vdev_t),
153 offsetof(vdev_t, vdev_dirty_node));
155 txg_list_create(&spa->spa_vdev_txg_list,
156 offsetof(struct vdev, vdev_txg_node));
158 avl_create(&spa->spa_errlist_scrub,
159 spa_error_entry_compare, sizeof (spa_error_entry_t),
160 offsetof(spa_error_entry_t, se_avl));
161 avl_create(&spa->spa_errlist_last,
162 spa_error_entry_compare, sizeof (spa_error_entry_t),
163 offsetof(spa_error_entry_t, se_avl));
167 * Opposite of spa_activate().
170 spa_deactivate(spa_t *spa)
174 ASSERT(spa->spa_sync_on == B_FALSE);
175 ASSERT(spa->spa_dsl_pool == NULL);
176 ASSERT(spa->spa_root_vdev == NULL);
178 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
180 txg_list_destroy(&spa->spa_vdev_txg_list);
182 list_destroy(&spa->spa_dirty_list);
184 for (t = 0; t < ZIO_TYPES; t++) {
185 taskq_destroy(spa->spa_zio_issue_taskq[t]);
186 taskq_destroy(spa->spa_zio_intr_taskq[t]);
187 spa->spa_zio_issue_taskq[t] = NULL;
188 spa->spa_zio_intr_taskq[t] = NULL;
191 metaslab_class_destroy(spa->spa_normal_class);
192 spa->spa_normal_class = NULL;
195 * If this was part of an import or the open otherwise failed, we may
196 * still have errors left in the queues. Empty them just in case.
198 spa_errlog_drain(spa);
200 avl_destroy(&spa->spa_errlist_scrub);
201 avl_destroy(&spa->spa_errlist_last);
203 rw_destroy(&spa->spa_traverse_lock);
204 mutex_destroy(&spa->spa_uberblock_lock);
205 mutex_destroy(&spa->spa_errlog_lock);
206 mutex_destroy(&spa->spa_errlist_lock);
207 mutex_destroy(&spa->spa_config_lock.scl_lock);
208 cv_destroy(&spa->spa_config_lock.scl_cv);
209 mutex_destroy(&spa->spa_sync_bplist.bpl_lock);
210 mutex_destroy(&spa->spa_history_lock);
211 mutex_destroy(&spa->spa_props_lock);
213 spa->spa_state = POOL_STATE_UNINITIALIZED;
217 * Verify a pool configuration, and construct the vdev tree appropriately. This
218 * will create all the necessary vdevs in the appropriate layout, with each vdev
219 * in the CLOSED state. This will prep the pool before open/creation/import.
220 * All vdev validation is done by the vdev_alloc() routine.
223 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
224 uint_t id, int atype)
230 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
233 if ((*vdp)->vdev_ops->vdev_op_leaf)
236 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
237 &child, &children) != 0) {
243 for (c = 0; c < children; c++) {
245 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
253 ASSERT(*vdp != NULL);
259 * Opposite of spa_load().
262 spa_unload(spa_t *spa)
269 spa_async_suspend(spa);
274 if (spa->spa_sync_on) {
275 txg_sync_stop(spa->spa_dsl_pool);
276 spa->spa_sync_on = B_FALSE;
280 * Wait for any outstanding prefetch I/O to complete.
282 spa_config_enter(spa, RW_WRITER, FTAG);
283 spa_config_exit(spa, FTAG);
286 * Close the dsl pool.
288 if (spa->spa_dsl_pool) {
289 dsl_pool_close(spa->spa_dsl_pool);
290 spa->spa_dsl_pool = NULL;
296 if (spa->spa_root_vdev)
297 vdev_free(spa->spa_root_vdev);
298 ASSERT(spa->spa_root_vdev == NULL);
300 for (i = 0; i < spa->spa_nspares; i++)
301 vdev_free(spa->spa_spares[i]);
302 if (spa->spa_spares) {
303 kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *));
304 spa->spa_spares = NULL;
306 if (spa->spa_sparelist) {
307 nvlist_free(spa->spa_sparelist);
308 spa->spa_sparelist = NULL;
311 spa->spa_async_suspended = 0;
315 * Load (or re-load) the current list of vdevs describing the active spares for
316 * this pool. When this is called, we have some form of basic information in
317 * 'spa_sparelist'. We parse this into vdevs, try to open them, and then
318 * re-generate a more complete list including status information.
321 spa_load_spares(spa_t *spa)
329 * First, close and free any existing spare vdevs.
331 for (i = 0; i < spa->spa_nspares; i++) {
332 vd = spa->spa_spares[i];
334 /* Undo the call to spa_activate() below */
335 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid)) != NULL &&
337 spa_spare_remove(tvd);
343 kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *));
345 if (spa->spa_sparelist == NULL)
348 VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist,
349 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
351 spa->spa_nspares = (int)nspares;
352 spa->spa_spares = NULL;
358 * Construct the array of vdevs, opening them to get status in the
359 * process. For each spare, there is potentially two different vdev_t
360 * structures associated with it: one in the list of spares (used only
361 * for basic validation purposes) and one in the active vdev
362 * configuration (if it's spared in). During this phase we open and
363 * validate each vdev on the spare list. If the vdev also exists in the
364 * active configuration, then we also mark this vdev as an active spare.
366 spa->spa_spares = kmem_alloc(nspares * sizeof (void *), KM_SLEEP);
367 for (i = 0; i < spa->spa_nspares; i++) {
368 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
369 VDEV_ALLOC_SPARE) == 0);
372 spa->spa_spares[i] = vd;
374 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid)) != NULL) {
375 if (!tvd->vdev_isspare)
379 * We only mark the spare active if we were successfully
380 * able to load the vdev. Otherwise, importing a pool
381 * with a bad active spare would result in strange
382 * behavior, because multiple pool would think the spare
383 * is actively in use.
385 * There is a vulnerability here to an equally bizarre
386 * circumstance, where a dead active spare is later
387 * brought back to life (onlined or otherwise). Given
388 * the rarity of this scenario, and the extra complexity
389 * it adds, we ignore the possibility.
391 if (!vdev_is_dead(tvd))
392 spa_spare_activate(tvd);
395 if (vdev_open(vd) != 0)
399 (void) vdev_validate_spare(vd);
403 * Recompute the stashed list of spares, with status information
406 VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES,
407 DATA_TYPE_NVLIST_ARRAY) == 0);
409 spares = kmem_alloc(spa->spa_nspares * sizeof (void *), KM_SLEEP);
410 for (i = 0; i < spa->spa_nspares; i++)
411 spares[i] = vdev_config_generate(spa, spa->spa_spares[i],
413 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES,
414 spares, spa->spa_nspares) == 0);
415 for (i = 0; i < spa->spa_nspares; i++)
416 nvlist_free(spares[i]);
417 kmem_free(spares, spa->spa_nspares * sizeof (void *));
421 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
429 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
430 nvsize = *(uint64_t *)db->db_data;
431 dmu_buf_rele(db, FTAG);
433 packed = kmem_alloc(nvsize, KM_SLEEP);
434 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed);
436 error = nvlist_unpack(packed, nvsize, value, 0);
437 kmem_free(packed, nvsize);
443 * Load an existing storage pool, using the pool's builtin spa_config as a
444 * source of configuration information.
447 spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig)
450 nvlist_t *nvroot = NULL;
452 uberblock_t *ub = &spa->spa_uberblock;
453 uint64_t config_cache_txg = spa->spa_config_txg;
458 spa->spa_load_state = state;
460 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
461 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
467 * Versioning wasn't explicitly added to the label until later, so if
468 * it's not present treat it as the initial version.
470 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0)
471 version = ZFS_VERSION_INITIAL;
473 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
474 &spa->spa_config_txg);
476 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
477 spa_guid_exists(pool_guid, 0)) {
482 spa->spa_load_guid = pool_guid;
485 * Parse the configuration into a vdev tree. We explicitly set the
486 * value that will be returned by spa_version() since parsing the
487 * configuration requires knowing the version number.
489 spa_config_enter(spa, RW_WRITER, FTAG);
490 spa->spa_ubsync.ub_version = version;
491 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD);
492 spa_config_exit(spa, FTAG);
497 ASSERT(spa->spa_root_vdev == rvd);
498 ASSERT(spa_guid(spa) == pool_guid);
501 * Try to open all vdevs, loading each label in the process.
503 error = vdev_open(rvd);
508 * Validate the labels for all leaf vdevs. We need to grab the config
509 * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD
512 spa_config_enter(spa, RW_READER, FTAG);
513 error = vdev_validate(rvd);
514 spa_config_exit(spa, FTAG);
519 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
525 * Find the best uberblock.
527 bzero(ub, sizeof (uberblock_t));
529 zio = zio_root(spa, NULL, NULL,
530 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
531 vdev_uberblock_load(zio, rvd, ub);
532 error = zio_wait(zio);
535 * If we weren't able to find a single valid uberblock, return failure.
537 if (ub->ub_txg == 0) {
538 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
539 VDEV_AUX_CORRUPT_DATA);
545 * If the pool is newer than the code, we can't open it.
547 if (ub->ub_version > ZFS_VERSION) {
548 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
549 VDEV_AUX_VERSION_NEWER);
555 * If the vdev guid sum doesn't match the uberblock, we have an
556 * incomplete configuration.
558 if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) {
559 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
560 VDEV_AUX_BAD_GUID_SUM);
566 * Initialize internal SPA structures.
568 spa->spa_state = POOL_STATE_ACTIVE;
569 spa->spa_ubsync = spa->spa_uberblock;
570 spa->spa_first_txg = spa_last_synced_txg(spa) + 1;
571 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
573 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
574 VDEV_AUX_CORRUPT_DATA);
577 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
579 if (zap_lookup(spa->spa_meta_objset,
580 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
581 sizeof (uint64_t), 1, &spa->spa_config_object) != 0) {
582 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
583 VDEV_AUX_CORRUPT_DATA);
592 if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) {
593 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
594 VDEV_AUX_CORRUPT_DATA);
600 * hostid is set after the root file system is mounted, so
601 * ignore the check until it's done.
603 if (nvlist_lookup_uint64(newconfig, ZPOOL_CONFIG_HOSTID,
604 &hostid) == 0 && root_mounted()) {
606 unsigned long myhostid = 0;
608 VERIFY(nvlist_lookup_string(newconfig,
609 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
611 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
612 if ((unsigned long)hostid != myhostid) {
613 cmn_err(CE_WARN, "pool '%s' could not be "
614 "loaded as it was last accessed by "
615 "another system (host: %s hostid: 0x%lx). "
616 "See: http://www.sun.com/msg/ZFS-8000-EY",
617 spa->spa_name, hostname,
618 (unsigned long)hostid);
624 spa_config_set(spa, newconfig);
629 return (spa_load(spa, newconfig, state, B_TRUE));
632 if (zap_lookup(spa->spa_meta_objset,
633 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
634 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) {
635 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
636 VDEV_AUX_CORRUPT_DATA);
642 * Load the bit that tells us to use the new accounting function
643 * (raid-z deflation). If we have an older pool, this will not
646 error = zap_lookup(spa->spa_meta_objset,
647 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
648 sizeof (uint64_t), 1, &spa->spa_deflate);
649 if (error != 0 && error != ENOENT) {
650 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
651 VDEV_AUX_CORRUPT_DATA);
657 * Load the persistent error log. If we have an older pool, this will
660 error = zap_lookup(spa->spa_meta_objset,
661 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
662 sizeof (uint64_t), 1, &spa->spa_errlog_last);
663 if (error != 0 && error != ENOENT) {
664 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
665 VDEV_AUX_CORRUPT_DATA);
670 error = zap_lookup(spa->spa_meta_objset,
671 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
672 sizeof (uint64_t), 1, &spa->spa_errlog_scrub);
673 if (error != 0 && error != ENOENT) {
674 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
675 VDEV_AUX_CORRUPT_DATA);
681 * Load the history object. If we have an older pool, this
682 * will not be present.
684 error = zap_lookup(spa->spa_meta_objset,
685 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY,
686 sizeof (uint64_t), 1, &spa->spa_history);
687 if (error != 0 && error != ENOENT) {
688 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
689 VDEV_AUX_CORRUPT_DATA);
695 * Load any hot spares for this pool.
697 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
698 DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares_object);
699 if (error != 0 && error != ENOENT) {
700 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
701 VDEV_AUX_CORRUPT_DATA);
706 ASSERT(spa_version(spa) >= ZFS_VERSION_SPARES);
707 if (load_nvlist(spa, spa->spa_spares_object,
708 &spa->spa_sparelist) != 0) {
709 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
710 VDEV_AUX_CORRUPT_DATA);
715 spa_config_enter(spa, RW_WRITER, FTAG);
716 spa_load_spares(spa);
717 spa_config_exit(spa, FTAG);
720 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
721 DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object);
723 if (error && error != ENOENT) {
724 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
725 VDEV_AUX_CORRUPT_DATA);
731 (void) zap_lookup(spa->spa_meta_objset,
732 spa->spa_pool_props_object,
733 zpool_prop_to_name(ZFS_PROP_BOOTFS),
734 sizeof (uint64_t), 1, &spa->spa_bootfs);
738 * Load the vdev state for all toplevel vdevs.
743 * Propagate the leaf DTLs we just loaded all the way up the tree.
745 spa_config_enter(spa, RW_WRITER, FTAG);
746 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
747 spa_config_exit(spa, FTAG);
750 * Check the state of the root vdev. If it can't be opened, it
751 * indicates one or more toplevel vdevs are faulted.
753 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
758 if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) {
760 int need_update = B_FALSE;
764 * Claim log blocks that haven't been committed yet.
765 * This must all happen in a single txg.
767 tx = dmu_tx_create_assigned(spa_get_dsl(spa),
769 (void) dmu_objset_find(spa->spa_name,
770 zil_claim, tx, DS_FIND_CHILDREN);
773 spa->spa_sync_on = B_TRUE;
774 txg_sync_start(spa->spa_dsl_pool);
777 * Wait for all claims to sync.
779 txg_wait_synced(spa->spa_dsl_pool, 0);
782 * If the config cache is stale, or we have uninitialized
783 * metaslabs (see spa_vdev_add()), then update the config.
785 if (config_cache_txg != spa->spa_config_txg ||
786 state == SPA_LOAD_IMPORT)
787 need_update = B_TRUE;
789 for (c = 0; c < rvd->vdev_children; c++)
790 if (rvd->vdev_child[c]->vdev_ms_array == 0)
791 need_update = B_TRUE;
794 * Update the config cache asychronously in case we're the
795 * root pool, in which case the config cache isn't writable yet.
798 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
803 if (error && error != EBADF)
804 zfs_ereport_post(FM_EREPORT_ZFS_POOL, spa, NULL, NULL, 0, 0);
805 spa->spa_load_state = SPA_LOAD_NONE;
814 * The import case is identical to an open except that the configuration is sent
815 * down from userland, instead of grabbed from the configuration cache. For the
816 * case of an open, the pool configuration will exist in the
817 * POOL_STATE_UNITIALIZED state.
819 * The stats information (gen/count/ustats) is used to gather vdev statistics at
820 * the same time open the pool, without having to keep around the spa_t in some
824 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config)
828 int loaded = B_FALSE;
829 int locked = B_FALSE;
834 * As disgusting as this is, we need to support recursive calls to this
835 * function because dsl_dir_open() is called during spa_load(), and ends
836 * up calling spa_open() again. The real fix is to figure out how to
837 * avoid dsl_dir_open() calling this in the first place.
839 if (mutex_owner(&spa_namespace_lock) != curthread) {
840 mutex_enter(&spa_namespace_lock);
844 if ((spa = spa_lookup(pool)) == NULL) {
846 mutex_exit(&spa_namespace_lock);
849 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
853 error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE);
855 if (error == EBADF) {
857 * If vdev_validate() returns failure (indicated by
858 * EBADF), it indicates that one of the vdevs indicates
859 * that the pool has been exported or destroyed. If
860 * this is the case, the config cache is out of sync and
861 * we should remove the pool from the namespace.
863 zfs_post_ok(spa, NULL);
869 mutex_exit(&spa_namespace_lock);
875 * We can't open the pool, but we still have useful
876 * information: the state of each vdev after the
877 * attempted vdev_open(). Return this to the user.
879 if (config != NULL && spa->spa_root_vdev != NULL) {
880 spa_config_enter(spa, RW_READER, FTAG);
881 *config = spa_config_generate(spa, NULL, -1ULL,
883 spa_config_exit(spa, FTAG);
887 spa->spa_last_open_failed = B_TRUE;
889 mutex_exit(&spa_namespace_lock);
893 zfs_post_ok(spa, NULL);
894 spa->spa_last_open_failed = B_FALSE;
900 spa_open_ref(spa, tag);
902 mutex_exit(&spa_namespace_lock);
906 if (config != NULL) {
907 spa_config_enter(spa, RW_READER, FTAG);
908 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
909 spa_config_exit(spa, FTAG);
913 * If we just loaded the pool, resilver anything that's out of date.
915 if (loaded && (spa_mode & FWRITE))
916 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
922 spa_open(const char *name, spa_t **spapp, void *tag)
924 return (spa_open_common(name, spapp, tag, NULL));
928 * Lookup the given spa_t, incrementing the inject count in the process,
929 * preventing it from being exported or destroyed.
932 spa_inject_addref(char *name)
936 mutex_enter(&spa_namespace_lock);
937 if ((spa = spa_lookup(name)) == NULL) {
938 mutex_exit(&spa_namespace_lock);
941 spa->spa_inject_ref++;
942 mutex_exit(&spa_namespace_lock);
948 spa_inject_delref(spa_t *spa)
950 mutex_enter(&spa_namespace_lock);
951 spa->spa_inject_ref--;
952 mutex_exit(&spa_namespace_lock);
956 spa_add_spares(spa_t *spa, nvlist_t *config)
966 if (spa->spa_nspares == 0)
969 VERIFY(nvlist_lookup_nvlist(config,
970 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
971 VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist,
972 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
974 VERIFY(nvlist_add_nvlist_array(nvroot,
975 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
976 VERIFY(nvlist_lookup_nvlist_array(nvroot,
977 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
980 * Go through and find any spares which have since been
981 * repurposed as an active spare. If this is the case, update
982 * their status appropriately.
984 for (i = 0; i < nspares; i++) {
985 VERIFY(nvlist_lookup_uint64(spares[i],
986 ZPOOL_CONFIG_GUID, &guid) == 0);
987 if (spa_spare_exists(guid, &pool) && pool != 0ULL) {
988 VERIFY(nvlist_lookup_uint64_array(
989 spares[i], ZPOOL_CONFIG_STATS,
990 (uint64_t **)&vs, &vsc) == 0);
991 vs->vs_state = VDEV_STATE_CANT_OPEN;
992 vs->vs_aux = VDEV_AUX_SPARED;
999 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
1005 error = spa_open_common(name, &spa, FTAG, config);
1007 if (spa && *config != NULL) {
1008 VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT,
1009 spa_get_errlog_size(spa)) == 0);
1011 spa_add_spares(spa, *config);
1015 * We want to get the alternate root even for faulted pools, so we cheat
1016 * and call spa_lookup() directly.
1020 mutex_enter(&spa_namespace_lock);
1021 spa = spa_lookup(name);
1023 spa_altroot(spa, altroot, buflen);
1027 mutex_exit(&spa_namespace_lock);
1029 spa_altroot(spa, altroot, buflen);
1034 spa_close(spa, FTAG);
1040 * Validate that the 'spares' array is well formed. We must have an array of
1041 * nvlists, each which describes a valid leaf vdev. If this is an import (mode
1042 * is VDEV_ALLOC_SPARE), then we allow corrupted spares to be specified, as long
1043 * as they are well-formed.
1046 spa_validate_spares(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
1054 * It's acceptable to have no spares specified.
1056 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1057 &spares, &nspares) != 0)
1064 * Make sure the pool is formatted with a version that supports hot
1067 if (spa_version(spa) < ZFS_VERSION_SPARES)
1071 * Set the pending spare list so we correctly handle device in-use
1074 spa->spa_pending_spares = spares;
1075 spa->spa_pending_nspares = nspares;
1077 for (i = 0; i < nspares; i++) {
1078 if ((error = spa_config_parse(spa, &vd, spares[i], NULL, 0,
1082 if (!vd->vdev_ops->vdev_op_leaf) {
1090 if ((error = vdev_open(vd)) == 0 &&
1091 (error = vdev_label_init(vd, crtxg,
1092 VDEV_LABEL_SPARE)) == 0) {
1093 VERIFY(nvlist_add_uint64(spares[i], ZPOOL_CONFIG_GUID,
1094 vd->vdev_guid) == 0);
1099 if (error && mode != VDEV_ALLOC_SPARE)
1106 spa->spa_pending_spares = NULL;
1107 spa->spa_pending_nspares = 0;
1115 spa_create(const char *pool, nvlist_t *nvroot, const char *altroot)
1122 uint64_t txg = TXG_INITIAL;
1127 * If this pool already exists, return failure.
1129 mutex_enter(&spa_namespace_lock);
1130 if (spa_lookup(pool) != NULL) {
1131 mutex_exit(&spa_namespace_lock);
1136 * Allocate a new spa_t structure.
1138 spa = spa_add(pool, altroot);
1141 spa->spa_uberblock.ub_txg = txg - 1;
1142 spa->spa_uberblock.ub_version = ZFS_VERSION;
1143 spa->spa_ubsync = spa->spa_uberblock;
1146 * Create the root vdev.
1148 spa_config_enter(spa, RW_WRITER, FTAG);
1150 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
1152 ASSERT(error != 0 || rvd != NULL);
1153 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
1155 if (error == 0 && rvd->vdev_children == 0)
1159 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
1160 (error = spa_validate_spares(spa, nvroot, txg,
1161 VDEV_ALLOC_ADD)) == 0) {
1162 for (c = 0; c < rvd->vdev_children; c++)
1163 vdev_init(rvd->vdev_child[c], txg);
1164 vdev_config_dirty(rvd);
1167 spa_config_exit(spa, FTAG);
1171 spa_deactivate(spa);
1173 mutex_exit(&spa_namespace_lock);
1178 * Get the list of spares, if specified.
1180 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1181 &spares, &nspares) == 0) {
1182 VERIFY(nvlist_alloc(&spa->spa_sparelist, NV_UNIQUE_NAME,
1184 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist,
1185 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
1186 spa_config_enter(spa, RW_WRITER, FTAG);
1187 spa_load_spares(spa);
1188 spa_config_exit(spa, FTAG);
1189 spa->spa_sync_spares = B_TRUE;
1192 spa->spa_dsl_pool = dp = dsl_pool_create(spa, txg);
1193 spa->spa_meta_objset = dp->dp_meta_objset;
1195 tx = dmu_tx_create_assigned(dp, txg);
1198 * Create the pool config object.
1200 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
1201 DMU_OT_PACKED_NVLIST, 1 << 14,
1202 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
1204 if (zap_add(spa->spa_meta_objset,
1205 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1206 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
1207 cmn_err(CE_PANIC, "failed to add pool config");
1210 /* Newly created pools are always deflated. */
1211 spa->spa_deflate = TRUE;
1212 if (zap_add(spa->spa_meta_objset,
1213 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
1214 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
1215 cmn_err(CE_PANIC, "failed to add deflate");
1219 * Create the deferred-free bplist object. Turn off compression
1220 * because sync-to-convergence takes longer if the blocksize
1223 spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset,
1225 dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj,
1226 ZIO_COMPRESS_OFF, tx);
1228 if (zap_add(spa->spa_meta_objset,
1229 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
1230 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) {
1231 cmn_err(CE_PANIC, "failed to add bplist");
1235 * Create the pool's history object.
1237 spa_history_create_obj(spa, tx);
1241 spa->spa_bootfs = zfs_prop_default_numeric(ZFS_PROP_BOOTFS);
1242 spa->spa_sync_on = B_TRUE;
1243 txg_sync_start(spa->spa_dsl_pool);
1246 * We explicitly wait for the first transaction to complete so that our
1247 * bean counters are appropriately updated.
1249 txg_wait_synced(spa->spa_dsl_pool, txg);
1253 mutex_exit(&spa_namespace_lock);
1259 * Import the given pool into the system. We set up the necessary spa_t and
1260 * then call spa_load() to do the dirty work.
1263 spa_import(const char *pool, nvlist_t *config, const char *altroot)
1271 if (!(spa_mode & FWRITE))
1275 * If a pool with this name exists, return failure.
1277 mutex_enter(&spa_namespace_lock);
1278 if (spa_lookup(pool) != NULL) {
1279 mutex_exit(&spa_namespace_lock);
1284 * Create and initialize the spa structure.
1286 spa = spa_add(pool, altroot);
1290 * Pass off the heavy lifting to spa_load().
1291 * Pass TRUE for mosconfig because the user-supplied config
1292 * is actually the one to trust when doing an import.
1294 error = spa_load(spa, config, SPA_LOAD_IMPORT, B_TRUE);
1296 spa_config_enter(spa, RW_WRITER, FTAG);
1298 * Toss any existing sparelist, as it doesn't have any validity anymore,
1299 * and conflicts with spa_has_spare().
1301 if (spa->spa_sparelist) {
1302 nvlist_free(spa->spa_sparelist);
1303 spa->spa_sparelist = NULL;
1304 spa_load_spares(spa);
1307 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1310 error = spa_validate_spares(spa, nvroot, -1ULL,
1312 spa_config_exit(spa, FTAG);
1316 spa_deactivate(spa);
1318 mutex_exit(&spa_namespace_lock);
1323 * Override any spares as specified by the user, as these may have
1324 * correct device names/devids, etc.
1326 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1327 &spares, &nspares) == 0) {
1328 if (spa->spa_sparelist)
1329 VERIFY(nvlist_remove(spa->spa_sparelist,
1330 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
1332 VERIFY(nvlist_alloc(&spa->spa_sparelist,
1333 NV_UNIQUE_NAME, KM_SLEEP) == 0);
1334 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist,
1335 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
1336 spa_config_enter(spa, RW_WRITER, FTAG);
1337 spa_load_spares(spa);
1338 spa_config_exit(spa, FTAG);
1339 spa->spa_sync_spares = B_TRUE;
1343 * Update the config cache to include the newly-imported pool.
1345 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
1347 mutex_exit(&spa_namespace_lock);
1350 * Resilver anything that's out of date.
1352 if (spa_mode & FWRITE)
1353 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
1359 * This (illegal) pool name is used when temporarily importing a spa_t in order
1360 * to get the vdev stats associated with the imported devices.
1362 #define TRYIMPORT_NAME "$import"
1365 spa_tryimport(nvlist_t *tryconfig)
1367 nvlist_t *config = NULL;
1372 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
1375 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
1379 * Create and initialize the spa structure.
1381 mutex_enter(&spa_namespace_lock);
1382 spa = spa_add(TRYIMPORT_NAME, NULL);
1386 * Pass off the heavy lifting to spa_load().
1387 * Pass TRUE for mosconfig because the user-supplied config
1388 * is actually the one to trust when doing an import.
1390 (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE);
1393 * If 'tryconfig' was at least parsable, return the current config.
1395 if (spa->spa_root_vdev != NULL) {
1396 spa_config_enter(spa, RW_READER, FTAG);
1397 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
1398 spa_config_exit(spa, FTAG);
1399 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
1401 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1403 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
1404 spa->spa_uberblock.ub_timestamp) == 0);
1407 * Add the list of hot spares.
1409 spa_add_spares(spa, config);
1413 spa_deactivate(spa);
1415 mutex_exit(&spa_namespace_lock);
1421 * Pool export/destroy
1423 * The act of destroying or exporting a pool is very simple. We make sure there
1424 * is no more pending I/O and any references to the pool are gone. Then, we
1425 * update the pool state and sync all the labels to disk, removing the
1426 * configuration from the cache afterwards.
1429 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig)
1436 if (!(spa_mode & FWRITE))
1439 mutex_enter(&spa_namespace_lock);
1440 if ((spa = spa_lookup(pool)) == NULL) {
1441 mutex_exit(&spa_namespace_lock);
1446 * Put a hold on the pool, drop the namespace lock, stop async tasks,
1447 * reacquire the namespace lock, and see if we can export.
1449 spa_open_ref(spa, FTAG);
1450 mutex_exit(&spa_namespace_lock);
1451 spa_async_suspend(spa);
1452 mutex_enter(&spa_namespace_lock);
1453 spa_close(spa, FTAG);
1456 * The pool will be in core if it's openable,
1457 * in which case we can modify its state.
1459 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
1461 * Objsets may be open only because they're dirty, so we
1462 * have to force it to sync before checking spa_refcnt.
1464 spa_scrub_suspend(spa);
1465 txg_wait_synced(spa->spa_dsl_pool, 0);
1468 * A pool cannot be exported or destroyed if there are active
1469 * references. If we are resetting a pool, allow references by
1470 * fault injection handlers.
1472 if (!spa_refcount_zero(spa) ||
1473 (spa->spa_inject_ref != 0 &&
1474 new_state != POOL_STATE_UNINITIALIZED)) {
1475 spa_scrub_resume(spa);
1476 spa_async_resume(spa);
1477 mutex_exit(&spa_namespace_lock);
1481 spa_scrub_resume(spa);
1482 VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0);
1485 * We want this to be reflected on every label,
1486 * so mark them all dirty. spa_unload() will do the
1487 * final sync that pushes these changes out.
1489 if (new_state != POOL_STATE_UNINITIALIZED) {
1490 spa_config_enter(spa, RW_WRITER, FTAG);
1491 spa->spa_state = new_state;
1492 spa->spa_final_txg = spa_last_synced_txg(spa) + 1;
1493 vdev_config_dirty(spa->spa_root_vdev);
1494 spa_config_exit(spa, FTAG);
1498 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
1500 spa_deactivate(spa);
1503 if (oldconfig && spa->spa_config)
1504 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
1506 if (new_state != POOL_STATE_UNINITIALIZED) {
1510 mutex_exit(&spa_namespace_lock);
1516 * Destroy a storage pool.
1519 spa_destroy(char *pool)
1521 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL));
1525 * Export a storage pool.
1528 spa_export(char *pool, nvlist_t **oldconfig)
1530 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig));
1534 * Similar to spa_export(), this unloads the spa_t without actually removing it
1535 * from the namespace in any way.
1538 spa_reset(char *pool)
1540 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL));
1545 * ==========================================================================
1546 * Device manipulation
1547 * ==========================================================================
1551 * Add capacity to a storage pool.
1554 spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
1558 vdev_t *rvd = spa->spa_root_vdev;
1563 txg = spa_vdev_enter(spa);
1565 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
1566 VDEV_ALLOC_ADD)) != 0)
1567 return (spa_vdev_exit(spa, NULL, txg, error));
1569 spa->spa_pending_vdev = vd;
1571 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1572 &spares, &nspares) != 0)
1575 if (vd->vdev_children == 0 && nspares == 0) {
1576 spa->spa_pending_vdev = NULL;
1577 return (spa_vdev_exit(spa, vd, txg, EINVAL));
1580 if (vd->vdev_children != 0) {
1581 if ((error = vdev_create(vd, txg, B_FALSE)) != 0) {
1582 spa->spa_pending_vdev = NULL;
1583 return (spa_vdev_exit(spa, vd, txg, error));
1588 * We must validate the spares after checking the children. Otherwise,
1589 * vdev_inuse() will blindly overwrite the spare.
1591 if ((error = spa_validate_spares(spa, nvroot, txg,
1592 VDEV_ALLOC_ADD)) != 0) {
1593 spa->spa_pending_vdev = NULL;
1594 return (spa_vdev_exit(spa, vd, txg, error));
1597 spa->spa_pending_vdev = NULL;
1600 * Transfer each new top-level vdev from vd to rvd.
1602 for (c = 0; c < vd->vdev_children; c++) {
1603 tvd = vd->vdev_child[c];
1604 vdev_remove_child(vd, tvd);
1605 tvd->vdev_id = rvd->vdev_children;
1606 vdev_add_child(rvd, tvd);
1607 vdev_config_dirty(tvd);
1611 if (spa->spa_sparelist != NULL) {
1612 nvlist_t **oldspares;
1614 nvlist_t **newspares;
1616 VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist,
1617 ZPOOL_CONFIG_SPARES, &oldspares, &oldnspares) == 0);
1619 newspares = kmem_alloc(sizeof (void *) *
1620 (nspares + oldnspares), KM_SLEEP);
1621 for (i = 0; i < oldnspares; i++)
1622 VERIFY(nvlist_dup(oldspares[i],
1623 &newspares[i], KM_SLEEP) == 0);
1624 for (i = 0; i < nspares; i++)
1625 VERIFY(nvlist_dup(spares[i],
1626 &newspares[i + oldnspares],
1629 VERIFY(nvlist_remove(spa->spa_sparelist,
1630 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
1632 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist,
1633 ZPOOL_CONFIG_SPARES, newspares,
1634 nspares + oldnspares) == 0);
1635 for (i = 0; i < oldnspares + nspares; i++)
1636 nvlist_free(newspares[i]);
1637 kmem_free(newspares, (oldnspares + nspares) *
1640 VERIFY(nvlist_alloc(&spa->spa_sparelist,
1641 NV_UNIQUE_NAME, KM_SLEEP) == 0);
1642 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist,
1643 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
1646 spa_load_spares(spa);
1647 spa->spa_sync_spares = B_TRUE;
1651 * We have to be careful when adding new vdevs to an existing pool.
1652 * If other threads start allocating from these vdevs before we
1653 * sync the config cache, and we lose power, then upon reboot we may
1654 * fail to open the pool because there are DVAs that the config cache
1655 * can't translate. Therefore, we first add the vdevs without
1656 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
1657 * and then let spa_config_update() initialize the new metaslabs.
1659 * spa_load() checks for added-but-not-initialized vdevs, so that
1660 * if we lose power at any point in this sequence, the remaining
1661 * steps will be completed the next time we load the pool.
1663 (void) spa_vdev_exit(spa, vd, txg, 0);
1665 mutex_enter(&spa_namespace_lock);
1666 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
1667 mutex_exit(&spa_namespace_lock);
1673 * Attach a device to a mirror. The arguments are the path to any device
1674 * in the mirror, and the nvroot for the new device. If the path specifies
1675 * a device that is not mirrored, we automatically insert the mirror vdev.
1677 * If 'replacing' is specified, the new device is intended to replace the
1678 * existing device; in this case the two devices are made into their own
1679 * mirror using the 'replacing' vdev, which is functionally idendical to
1680 * the mirror vdev (it actually reuses all the same ops) but has a few
1681 * extra rules: you can't attach to it after it's been created, and upon
1682 * completion of resilvering, the first disk (the one being replaced)
1683 * is automatically detached.
1686 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
1688 uint64_t txg, open_txg;
1690 vdev_t *rvd = spa->spa_root_vdev;
1691 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
1694 txg = spa_vdev_enter(spa);
1696 oldvd = vdev_lookup_by_guid(rvd, guid);
1699 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
1701 if (!oldvd->vdev_ops->vdev_op_leaf)
1702 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
1704 pvd = oldvd->vdev_parent;
1706 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
1707 VDEV_ALLOC_ADD)) != 0 || newrootvd->vdev_children != 1)
1708 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
1710 newvd = newrootvd->vdev_child[0];
1712 if (!newvd->vdev_ops->vdev_op_leaf)
1713 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
1715 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
1716 return (spa_vdev_exit(spa, newrootvd, txg, error));
1720 * For attach, the only allowable parent is a mirror or the root
1723 if (pvd->vdev_ops != &vdev_mirror_ops &&
1724 pvd->vdev_ops != &vdev_root_ops)
1725 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
1727 pvops = &vdev_mirror_ops;
1730 * Active hot spares can only be replaced by inactive hot
1733 if (pvd->vdev_ops == &vdev_spare_ops &&
1734 pvd->vdev_child[1] == oldvd &&
1735 !spa_has_spare(spa, newvd->vdev_guid))
1736 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
1739 * If the source is a hot spare, and the parent isn't already a
1740 * spare, then we want to create a new hot spare. Otherwise, we
1741 * want to create a replacing vdev. The user is not allowed to
1742 * attach to a spared vdev child unless the 'isspare' state is
1743 * the same (spare replaces spare, non-spare replaces
1746 if (pvd->vdev_ops == &vdev_replacing_ops)
1747 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
1748 else if (pvd->vdev_ops == &vdev_spare_ops &&
1749 newvd->vdev_isspare != oldvd->vdev_isspare)
1750 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
1751 else if (pvd->vdev_ops != &vdev_spare_ops &&
1752 newvd->vdev_isspare)
1753 pvops = &vdev_spare_ops;
1755 pvops = &vdev_replacing_ops;
1759 * Compare the new device size with the replaceable/attachable
1762 if (newvd->vdev_psize < vdev_get_rsize(oldvd))
1763 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
1766 * The new device cannot have a higher alignment requirement
1767 * than the top-level vdev.
1769 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
1770 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
1773 * If this is an in-place replacement, update oldvd's path and devid
1774 * to make it distinguishable from newvd, and unopenable from now on.
1776 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
1777 spa_strfree(oldvd->vdev_path);
1778 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
1780 (void) sprintf(oldvd->vdev_path, "%s/%s",
1781 newvd->vdev_path, "old");
1782 if (oldvd->vdev_devid != NULL) {
1783 spa_strfree(oldvd->vdev_devid);
1784 oldvd->vdev_devid = NULL;
1789 * If the parent is not a mirror, or if we're replacing, insert the new
1790 * mirror/replacing/spare vdev above oldvd.
1792 if (pvd->vdev_ops != pvops)
1793 pvd = vdev_add_parent(oldvd, pvops);
1795 ASSERT(pvd->vdev_top->vdev_parent == rvd);
1796 ASSERT(pvd->vdev_ops == pvops);
1797 ASSERT(oldvd->vdev_parent == pvd);
1800 * Extract the new device from its root and add it to pvd.
1802 vdev_remove_child(newrootvd, newvd);
1803 newvd->vdev_id = pvd->vdev_children;
1804 vdev_add_child(pvd, newvd);
1807 * If newvd is smaller than oldvd, but larger than its rsize,
1808 * the addition of newvd may have decreased our parent's asize.
1810 pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize);
1812 tvd = newvd->vdev_top;
1813 ASSERT(pvd->vdev_top == tvd);
1814 ASSERT(tvd->vdev_parent == rvd);
1816 vdev_config_dirty(tvd);
1819 * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate
1820 * upward when spa_vdev_exit() calls vdev_dtl_reassess().
1822 open_txg = txg + TXG_CONCURRENT_STATES - 1;
1824 mutex_enter(&newvd->vdev_dtl_lock);
1825 space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL,
1826 open_txg - TXG_INITIAL + 1);
1827 mutex_exit(&newvd->vdev_dtl_lock);
1829 if (newvd->vdev_isspare)
1830 spa_spare_activate(newvd);
1833 * Mark newvd's DTL dirty in this txg.
1835 vdev_dirty(tvd, VDD_DTL, newvd, txg);
1837 (void) spa_vdev_exit(spa, newrootvd, open_txg, 0);
1840 * Kick off a resilver to update newvd.
1842 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
1848 * Detach a device from a mirror or replacing vdev.
1849 * If 'replace_done' is specified, only detach if the parent
1850 * is a replacing vdev.
1853 spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done)
1857 vdev_t *rvd = spa->spa_root_vdev;
1858 vdev_t *vd, *pvd, *cvd, *tvd;
1859 boolean_t unspare = B_FALSE;
1860 uint64_t unspare_guid;
1862 txg = spa_vdev_enter(spa);
1864 vd = vdev_lookup_by_guid(rvd, guid);
1867 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
1869 if (!vd->vdev_ops->vdev_op_leaf)
1870 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
1872 pvd = vd->vdev_parent;
1875 * If replace_done is specified, only remove this device if it's
1876 * the first child of a replacing vdev. For the 'spare' vdev, either
1877 * disk can be removed.
1880 if (pvd->vdev_ops == &vdev_replacing_ops) {
1881 if (vd->vdev_id != 0)
1882 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
1883 } else if (pvd->vdev_ops != &vdev_spare_ops) {
1884 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
1888 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
1889 spa_version(spa) >= ZFS_VERSION_SPARES);
1892 * Only mirror, replacing, and spare vdevs support detach.
1894 if (pvd->vdev_ops != &vdev_replacing_ops &&
1895 pvd->vdev_ops != &vdev_mirror_ops &&
1896 pvd->vdev_ops != &vdev_spare_ops)
1897 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
1900 * If there's only one replica, you can't detach it.
1902 if (pvd->vdev_children <= 1)
1903 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
1906 * If all siblings have non-empty DTLs, this device may have the only
1907 * valid copy of the data, which means we cannot safely detach it.
1909 * XXX -- as in the vdev_offline() case, we really want a more
1910 * precise DTL check.
1912 for (c = 0; c < pvd->vdev_children; c++) {
1915 cvd = pvd->vdev_child[c];
1918 if (vdev_is_dead(cvd))
1920 mutex_enter(&cvd->vdev_dtl_lock);
1921 dirty = cvd->vdev_dtl_map.sm_space |
1922 cvd->vdev_dtl_scrub.sm_space;
1923 mutex_exit(&cvd->vdev_dtl_lock);
1929 * If we are a replacing or spare vdev, then we can always detach the
1930 * latter child, as that is how one cancels the operation.
1932 if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) &&
1933 c == pvd->vdev_children)
1934 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
1937 * If we are detaching the original disk from a spare, then it implies
1938 * that the spare should become a real disk, and be removed from the
1939 * active spare list for the pool.
1941 if (pvd->vdev_ops == &vdev_spare_ops &&
1946 * Erase the disk labels so the disk can be used for other things.
1947 * This must be done after all other error cases are handled,
1948 * but before we disembowel vd (so we can still do I/O to it).
1949 * But if we can't do it, don't treat the error as fatal --
1950 * it may be that the unwritability of the disk is the reason
1951 * it's being detached!
1953 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
1956 * Remove vd from its parent and compact the parent's children.
1958 vdev_remove_child(pvd, vd);
1959 vdev_compact_children(pvd);
1962 * Remember one of the remaining children so we can get tvd below.
1964 cvd = pvd->vdev_child[0];
1967 * If we need to remove the remaining child from the list of hot spares,
1968 * do it now, marking the vdev as no longer a spare in the process. We
1969 * must do this before vdev_remove_parent(), because that can change the
1970 * GUID if it creates a new toplevel GUID.
1973 ASSERT(cvd->vdev_isspare);
1974 spa_spare_remove(cvd);
1975 unspare_guid = cvd->vdev_guid;
1979 * If the parent mirror/replacing vdev only has one child,
1980 * the parent is no longer needed. Remove it from the tree.
1982 if (pvd->vdev_children == 1)
1983 vdev_remove_parent(cvd);
1986 * We don't set tvd until now because the parent we just removed
1987 * may have been the previous top-level vdev.
1989 tvd = cvd->vdev_top;
1990 ASSERT(tvd->vdev_parent == rvd);
1993 * Reevaluate the parent vdev state.
1995 vdev_propagate_state(cvd->vdev_parent);
1998 * If the device we just detached was smaller than the others, it may be
1999 * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init()
2000 * can't fail because the existing metaslabs are already in core, so
2001 * there's nothing to read from disk.
2003 VERIFY(vdev_metaslab_init(tvd, txg) == 0);
2005 vdev_config_dirty(tvd);
2008 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
2009 * vd->vdev_detached is set and free vd's DTL object in syncing context.
2010 * But first make sure we're not on any *other* txg's DTL list, to
2011 * prevent vd from being accessed after it's freed.
2013 for (t = 0; t < TXG_SIZE; t++)
2014 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
2015 vd->vdev_detached = B_TRUE;
2016 vdev_dirty(tvd, VDD_DTL, vd, txg);
2018 error = spa_vdev_exit(spa, vd, txg, 0);
2021 * If this was the removal of the original device in a hot spare vdev,
2022 * then we want to go through and remove the device from the hot spare
2023 * list of every other pool.
2027 mutex_enter(&spa_namespace_lock);
2028 while ((spa = spa_next(spa)) != NULL) {
2029 if (spa->spa_state != POOL_STATE_ACTIVE)
2032 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
2034 mutex_exit(&spa_namespace_lock);
2041 * Remove a device from the pool. Currently, this supports removing only hot
2045 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
2048 nvlist_t **spares, *nv, **newspares;
2049 uint_t i, j, nspares;
2052 spa_config_enter(spa, RW_WRITER, FTAG);
2054 vd = spa_lookup_by_guid(spa, guid);
2057 if (spa->spa_spares != NULL &&
2058 nvlist_lookup_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES,
2059 &spares, &nspares) == 0) {
2060 for (i = 0; i < nspares; i++) {
2063 VERIFY(nvlist_lookup_uint64(spares[i],
2064 ZPOOL_CONFIG_GUID, &theguid) == 0);
2065 if (theguid == guid) {
2073 * We only support removing a hot spare, and only if it's not currently
2074 * in use in this pool.
2076 if (nv == NULL && vd == NULL) {
2081 if (nv == NULL && vd != NULL) {
2086 if (!unspare && nv != NULL && vd != NULL) {
2094 newspares = kmem_alloc((nspares - 1) * sizeof (void *),
2096 for (i = 0, j = 0; i < nspares; i++) {
2097 if (spares[i] != nv)
2098 VERIFY(nvlist_dup(spares[i],
2099 &newspares[j++], KM_SLEEP) == 0);
2103 VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES,
2104 DATA_TYPE_NVLIST_ARRAY) == 0);
2105 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES,
2106 newspares, nspares - 1) == 0);
2107 for (i = 0; i < nspares - 1; i++)
2108 nvlist_free(newspares[i]);
2109 kmem_free(newspares, (nspares - 1) * sizeof (void *));
2110 spa_load_spares(spa);
2111 spa->spa_sync_spares = B_TRUE;
2114 spa_config_exit(spa, FTAG);
2120 * Find any device that's done replacing, so we can detach it.
2123 spa_vdev_replace_done_hunt(vdev_t *vd)
2125 vdev_t *newvd, *oldvd;
2128 for (c = 0; c < vd->vdev_children; c++) {
2129 oldvd = spa_vdev_replace_done_hunt(vd->vdev_child[c]);
2134 if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) {
2135 oldvd = vd->vdev_child[0];
2136 newvd = vd->vdev_child[1];
2138 mutex_enter(&newvd->vdev_dtl_lock);
2139 if (newvd->vdev_dtl_map.sm_space == 0 &&
2140 newvd->vdev_dtl_scrub.sm_space == 0) {
2141 mutex_exit(&newvd->vdev_dtl_lock);
2144 mutex_exit(&newvd->vdev_dtl_lock);
2151 spa_vdev_replace_done(spa_t *spa)
2158 spa_config_enter(spa, RW_READER, FTAG);
2160 while ((vd = spa_vdev_replace_done_hunt(spa->spa_root_vdev)) != NULL) {
2161 guid = vd->vdev_guid;
2163 * If we have just finished replacing a hot spared device, then
2164 * we need to detach the parent's first child (the original hot
2167 pvd = vd->vdev_parent;
2168 if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
2169 pvd->vdev_id == 0) {
2170 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
2171 ASSERT(pvd->vdev_parent->vdev_children == 2);
2172 pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid;
2174 spa_config_exit(spa, FTAG);
2175 if (spa_vdev_detach(spa, guid, B_TRUE) != 0)
2177 if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0)
2179 spa_config_enter(spa, RW_READER, FTAG);
2182 spa_config_exit(spa, FTAG);
2186 * Update the stored path for this vdev. Dirty the vdev configuration, relying
2187 * on spa_vdev_enter/exit() to synchronize the labels and cache.
2190 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
2195 rvd = spa->spa_root_vdev;
2197 txg = spa_vdev_enter(spa);
2199 if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
2201 * Determine if this is a reference to a hot spare. In that
2202 * case, update the path as stored in the spare list.
2206 if (spa->spa_sparelist != NULL) {
2207 VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist,
2208 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
2209 for (i = 0; i < nspares; i++) {
2211 VERIFY(nvlist_lookup_uint64(spares[i],
2212 ZPOOL_CONFIG_GUID, &theguid) == 0);
2213 if (theguid == guid)
2218 return (spa_vdev_exit(spa, NULL, txg, ENOENT));
2220 VERIFY(nvlist_add_string(spares[i],
2221 ZPOOL_CONFIG_PATH, newpath) == 0);
2222 spa_load_spares(spa);
2223 spa->spa_sync_spares = B_TRUE;
2224 return (spa_vdev_exit(spa, NULL, txg, 0));
2226 return (spa_vdev_exit(spa, NULL, txg, ENOENT));
2230 if (!vd->vdev_ops->vdev_op_leaf)
2231 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
2233 spa_strfree(vd->vdev_path);
2234 vd->vdev_path = spa_strdup(newpath);
2236 vdev_config_dirty(vd->vdev_top);
2238 return (spa_vdev_exit(spa, NULL, txg, 0));
2242 * ==========================================================================
2244 * ==========================================================================
2248 spa_scrub_io_done(zio_t *zio)
2250 spa_t *spa = zio->io_spa;
2252 zio_data_buf_free(zio->io_data, zio->io_size);
2254 mutex_enter(&spa->spa_scrub_lock);
2255 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
2256 vdev_t *vd = zio->io_vd ? zio->io_vd : spa->spa_root_vdev;
2257 spa->spa_scrub_errors++;
2258 mutex_enter(&vd->vdev_stat_lock);
2259 vd->vdev_stat.vs_scrub_errors++;
2260 mutex_exit(&vd->vdev_stat_lock);
2263 if (--spa->spa_scrub_inflight < spa->spa_scrub_maxinflight)
2264 cv_broadcast(&spa->spa_scrub_io_cv);
2266 ASSERT(spa->spa_scrub_inflight >= 0);
2268 mutex_exit(&spa->spa_scrub_lock);
2272 spa_scrub_io_start(spa_t *spa, blkptr_t *bp, int priority, int flags,
2275 size_t size = BP_GET_LSIZE(bp);
2278 mutex_enter(&spa->spa_scrub_lock);
2280 * Do not give too much work to vdev(s).
2282 while (spa->spa_scrub_inflight >= spa->spa_scrub_maxinflight) {
2283 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2285 spa->spa_scrub_inflight++;
2286 mutex_exit(&spa->spa_scrub_lock);
2288 data = zio_data_buf_alloc(size);
2290 if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)
2291 flags |= ZIO_FLAG_SPECULATIVE; /* intent log block */
2293 flags |= ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL;
2295 zio_nowait(zio_read(NULL, spa, bp, data, size,
2296 spa_scrub_io_done, NULL, priority, flags, zb));
2301 spa_scrub_cb(traverse_blk_cache_t *bc, spa_t *spa, void *a)
2303 blkptr_t *bp = &bc->bc_blkptr;
2304 vdev_t *vd = spa->spa_root_vdev;
2305 dva_t *dva = bp->blk_dva;
2306 int needs_resilver = B_FALSE;
2311 * We can't scrub this block, but we can continue to scrub
2312 * the rest of the pool. Note the error and move along.
2314 mutex_enter(&spa->spa_scrub_lock);
2315 spa->spa_scrub_errors++;
2316 mutex_exit(&spa->spa_scrub_lock);
2318 mutex_enter(&vd->vdev_stat_lock);
2319 vd->vdev_stat.vs_scrub_errors++;
2320 mutex_exit(&vd->vdev_stat_lock);
2325 ASSERT(bp->blk_birth < spa->spa_scrub_maxtxg);
2327 for (d = 0; d < BP_GET_NDVAS(bp); d++) {
2328 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]));
2333 * Keep track of how much data we've examined so that
2334 * zpool(1M) status can make useful progress reports.
2336 mutex_enter(&vd->vdev_stat_lock);
2337 vd->vdev_stat.vs_scrub_examined += DVA_GET_ASIZE(&dva[d]);
2338 mutex_exit(&vd->vdev_stat_lock);
2340 if (spa->spa_scrub_type == POOL_SCRUB_RESILVER) {
2341 if (DVA_GET_GANG(&dva[d])) {
2343 * Gang members may be spread across multiple
2344 * vdevs, so the best we can do is look at the
2346 * XXX -- it would be better to change our
2347 * allocation policy to ensure that this can't
2350 vd = spa->spa_root_vdev;
2352 if (vdev_dtl_contains(&vd->vdev_dtl_map,
2354 needs_resilver = B_TRUE;
2358 if (spa->spa_scrub_type == POOL_SCRUB_EVERYTHING)
2359 spa_scrub_io_start(spa, bp, ZIO_PRIORITY_SCRUB,
2360 ZIO_FLAG_SCRUB, &bc->bc_bookmark);
2361 else if (needs_resilver)
2362 spa_scrub_io_start(spa, bp, ZIO_PRIORITY_RESILVER,
2363 ZIO_FLAG_RESILVER, &bc->bc_bookmark);
2369 spa_scrub_thread(void *arg)
2372 callb_cpr_t cprinfo;
2373 traverse_handle_t *th = spa->spa_scrub_th;
2374 vdev_t *rvd = spa->spa_root_vdev;
2375 pool_scrub_type_t scrub_type = spa->spa_scrub_type;
2379 CALLB_CPR_INIT(&cprinfo, &spa->spa_scrub_lock, callb_generic_cpr, FTAG);
2382 * If we're restarting due to a snapshot create/delete,
2383 * wait for that to complete.
2385 txg_wait_synced(spa_get_dsl(spa), 0);
2387 dprintf("start %s mintxg=%llu maxtxg=%llu\n",
2388 scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub",
2389 spa->spa_scrub_mintxg, spa->spa_scrub_maxtxg);
2391 spa_config_enter(spa, RW_WRITER, FTAG);
2392 vdev_reopen(rvd); /* purge all vdev caches */
2393 vdev_config_dirty(rvd); /* rewrite all disk labels */
2394 vdev_scrub_stat_update(rvd, scrub_type, B_FALSE);
2395 spa_config_exit(spa, FTAG);
2397 mutex_enter(&spa->spa_scrub_lock);
2398 spa->spa_scrub_errors = 0;
2399 spa->spa_scrub_active = 1;
2400 ASSERT(spa->spa_scrub_inflight == 0);
2402 while (!spa->spa_scrub_stop) {
2403 CALLB_CPR_SAFE_BEGIN(&cprinfo);
2404 while (spa->spa_scrub_suspended) {
2405 spa->spa_scrub_active = 0;
2406 cv_broadcast(&spa->spa_scrub_cv);
2407 cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock);
2408 spa->spa_scrub_active = 1;
2410 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_scrub_lock);
2412 if (spa->spa_scrub_restart_txg != 0)
2415 mutex_exit(&spa->spa_scrub_lock);
2416 error = traverse_more(th);
2417 mutex_enter(&spa->spa_scrub_lock);
2418 if (error != EAGAIN)
2422 while (spa->spa_scrub_inflight)
2423 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2425 spa->spa_scrub_active = 0;
2426 cv_broadcast(&spa->spa_scrub_cv);
2428 mutex_exit(&spa->spa_scrub_lock);
2430 spa_config_enter(spa, RW_WRITER, FTAG);
2432 mutex_enter(&spa->spa_scrub_lock);
2435 * Note: we check spa_scrub_restart_txg under both spa_scrub_lock
2436 * AND the spa config lock to synchronize with any config changes
2437 * that revise the DTLs under spa_vdev_enter() / spa_vdev_exit().
2439 if (spa->spa_scrub_restart_txg != 0)
2442 if (spa->spa_scrub_stop)
2446 * Even if there were uncorrectable errors, we consider the scrub
2447 * completed. The downside is that if there is a transient error during
2448 * a resilver, we won't resilver the data properly to the target. But
2449 * if the damage is permanent (more likely) we will resilver forever,
2450 * which isn't really acceptable. Since there is enough information for
2451 * the user to know what has failed and why, this seems like a more
2452 * tractable approach.
2454 complete = (error == 0);
2456 dprintf("end %s to maxtxg=%llu %s, traverse=%d, %llu errors, stop=%u\n",
2457 scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub",
2458 spa->spa_scrub_maxtxg, complete ? "done" : "FAILED",
2459 error, spa->spa_scrub_errors, spa->spa_scrub_stop);
2461 mutex_exit(&spa->spa_scrub_lock);
2464 * If the scrub/resilver completed, update all DTLs to reflect this.
2465 * Whether it succeeded or not, vacate all temporary scrub DTLs.
2467 vdev_dtl_reassess(rvd, spa_last_synced_txg(spa) + 1,
2468 complete ? spa->spa_scrub_maxtxg : 0, B_TRUE);
2469 vdev_scrub_stat_update(rvd, POOL_SCRUB_NONE, complete);
2470 spa_errlog_rotate(spa);
2472 spa_config_exit(spa, FTAG);
2474 mutex_enter(&spa->spa_scrub_lock);
2477 * We may have finished replacing a device.
2478 * Let the async thread assess this and handle the detach.
2480 spa_async_request(spa, SPA_ASYNC_REPLACE_DONE);
2483 * If we were told to restart, our final act is to start a new scrub.
2485 if (error == ERESTART)
2486 spa_async_request(spa, scrub_type == POOL_SCRUB_RESILVER ?
2487 SPA_ASYNC_RESILVER : SPA_ASYNC_SCRUB);
2489 spa->spa_scrub_type = POOL_SCRUB_NONE;
2490 spa->spa_scrub_active = 0;
2491 spa->spa_scrub_thread = NULL;
2492 cv_broadcast(&spa->spa_scrub_cv);
2493 CALLB_CPR_EXIT(&cprinfo); /* drops &spa->spa_scrub_lock */
2498 spa_scrub_suspend(spa_t *spa)
2500 mutex_enter(&spa->spa_scrub_lock);
2501 spa->spa_scrub_suspended++;
2502 while (spa->spa_scrub_active) {
2503 cv_broadcast(&spa->spa_scrub_cv);
2504 cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock);
2506 while (spa->spa_scrub_inflight)
2507 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2508 mutex_exit(&spa->spa_scrub_lock);
2512 spa_scrub_resume(spa_t *spa)
2514 mutex_enter(&spa->spa_scrub_lock);
2515 ASSERT(spa->spa_scrub_suspended != 0);
2516 if (--spa->spa_scrub_suspended == 0)
2517 cv_broadcast(&spa->spa_scrub_cv);
2518 mutex_exit(&spa->spa_scrub_lock);
2522 spa_scrub_restart(spa_t *spa, uint64_t txg)
2525 * Something happened (e.g. snapshot create/delete) that means
2526 * we must restart any in-progress scrubs. The itinerary will
2527 * fix this properly.
2529 mutex_enter(&spa->spa_scrub_lock);
2530 spa->spa_scrub_restart_txg = txg;
2531 mutex_exit(&spa->spa_scrub_lock);
2535 spa_scrub(spa_t *spa, pool_scrub_type_t type, boolean_t force)
2538 uint64_t mintxg, maxtxg;
2539 vdev_t *rvd = spa->spa_root_vdev;
2541 if ((uint_t)type >= POOL_SCRUB_TYPES)
2544 mutex_enter(&spa->spa_scrub_lock);
2547 * If there's a scrub or resilver already in progress, stop it.
2549 while (spa->spa_scrub_thread != NULL) {
2551 * Don't stop a resilver unless forced.
2553 if (spa->spa_scrub_type == POOL_SCRUB_RESILVER && !force) {
2554 mutex_exit(&spa->spa_scrub_lock);
2557 spa->spa_scrub_stop = 1;
2558 cv_broadcast(&spa->spa_scrub_cv);
2559 cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock);
2563 * Terminate the previous traverse.
2565 if (spa->spa_scrub_th != NULL) {
2566 traverse_fini(spa->spa_scrub_th);
2567 spa->spa_scrub_th = NULL;
2571 ASSERT(spa->spa_scrub_stop == 0);
2572 ASSERT(spa->spa_scrub_type == type);
2573 ASSERT(spa->spa_scrub_restart_txg == 0);
2574 mutex_exit(&spa->spa_scrub_lock);
2578 mintxg = TXG_INITIAL - 1;
2579 maxtxg = spa_last_synced_txg(spa) + 1;
2581 mutex_enter(&rvd->vdev_dtl_lock);
2583 if (rvd->vdev_dtl_map.sm_space == 0) {
2585 * The pool-wide DTL is empty.
2586 * If this is a resilver, there's nothing to do except
2587 * check whether any in-progress replacements have completed.
2589 if (type == POOL_SCRUB_RESILVER) {
2590 type = POOL_SCRUB_NONE;
2591 spa_async_request(spa, SPA_ASYNC_REPLACE_DONE);
2595 * The pool-wide DTL is non-empty.
2596 * If this is a normal scrub, upgrade to a resilver instead.
2598 if (type == POOL_SCRUB_EVERYTHING)
2599 type = POOL_SCRUB_RESILVER;
2602 if (type == POOL_SCRUB_RESILVER) {
2604 * Determine the resilvering boundaries.
2606 * Note: (mintxg, maxtxg) is an open interval,
2607 * i.e. mintxg and maxtxg themselves are not included.
2609 * Note: for maxtxg, we MIN with spa_last_synced_txg(spa) + 1
2610 * so we don't claim to resilver a txg that's still changing.
2612 ss = avl_first(&rvd->vdev_dtl_map.sm_root);
2613 mintxg = ss->ss_start - 1;
2614 ss = avl_last(&rvd->vdev_dtl_map.sm_root);
2615 maxtxg = MIN(ss->ss_end, maxtxg);
2618 mutex_exit(&rvd->vdev_dtl_lock);
2620 spa->spa_scrub_stop = 0;
2621 spa->spa_scrub_type = type;
2622 spa->spa_scrub_restart_txg = 0;
2624 if (type != POOL_SCRUB_NONE) {
2625 spa->spa_scrub_mintxg = mintxg;
2626 spa->spa_scrub_maxtxg = maxtxg;
2627 spa->spa_scrub_th = traverse_init(spa, spa_scrub_cb, NULL,
2628 ADVANCE_PRE | ADVANCE_PRUNE | ADVANCE_ZIL,
2630 traverse_add_pool(spa->spa_scrub_th, mintxg, maxtxg);
2631 spa->spa_scrub_thread = thread_create(NULL, 0,
2632 spa_scrub_thread, spa, 0, &p0, TS_RUN, minclsyspri);
2635 mutex_exit(&spa->spa_scrub_lock);
2641 * ==========================================================================
2642 * SPA async task processing
2643 * ==========================================================================
2647 spa_async_reopen(spa_t *spa)
2649 vdev_t *rvd = spa->spa_root_vdev;
2653 spa_config_enter(spa, RW_WRITER, FTAG);
2655 for (c = 0; c < rvd->vdev_children; c++) {
2656 tvd = rvd->vdev_child[c];
2657 if (tvd->vdev_reopen_wanted) {
2658 tvd->vdev_reopen_wanted = 0;
2663 spa_config_exit(spa, FTAG);
2667 spa_async_thread(void *arg)
2672 ASSERT(spa->spa_sync_on);
2674 mutex_enter(&spa->spa_async_lock);
2675 tasks = spa->spa_async_tasks;
2676 spa->spa_async_tasks = 0;
2677 mutex_exit(&spa->spa_async_lock);
2680 * See if the config needs to be updated.
2682 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
2683 mutex_enter(&spa_namespace_lock);
2684 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
2685 mutex_exit(&spa_namespace_lock);
2689 * See if any devices need to be reopened.
2691 if (tasks & SPA_ASYNC_REOPEN)
2692 spa_async_reopen(spa);
2695 * If any devices are done replacing, detach them.
2697 if (tasks & SPA_ASYNC_REPLACE_DONE)
2698 spa_vdev_replace_done(spa);
2703 if (tasks & SPA_ASYNC_SCRUB)
2704 VERIFY(spa_scrub(spa, POOL_SCRUB_EVERYTHING, B_TRUE) == 0);
2707 * Kick off a resilver.
2709 if (tasks & SPA_ASYNC_RESILVER)
2710 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
2713 * Let the world know that we're done.
2715 mutex_enter(&spa->spa_async_lock);
2716 spa->spa_async_thread = NULL;
2717 cv_broadcast(&spa->spa_async_cv);
2718 mutex_exit(&spa->spa_async_lock);
2723 spa_async_suspend(spa_t *spa)
2725 mutex_enter(&spa->spa_async_lock);
2726 spa->spa_async_suspended++;
2727 while (spa->spa_async_thread != NULL)
2728 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
2729 mutex_exit(&spa->spa_async_lock);
2733 spa_async_resume(spa_t *spa)
2735 mutex_enter(&spa->spa_async_lock);
2736 ASSERT(spa->spa_async_suspended != 0);
2737 spa->spa_async_suspended--;
2738 mutex_exit(&spa->spa_async_lock);
2742 spa_async_dispatch(spa_t *spa)
2744 mutex_enter(&spa->spa_async_lock);
2745 if (spa->spa_async_tasks && !spa->spa_async_suspended &&
2746 spa->spa_async_thread == NULL &&
2747 rootdir != NULL && !vn_is_readonly(rootdir))
2748 spa->spa_async_thread = thread_create(NULL, 0,
2749 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
2750 mutex_exit(&spa->spa_async_lock);
2754 spa_async_request(spa_t *spa, int task)
2756 mutex_enter(&spa->spa_async_lock);
2757 spa->spa_async_tasks |= task;
2758 mutex_exit(&spa->spa_async_lock);
2762 * ==========================================================================
2763 * SPA syncing routines
2764 * ==========================================================================
2768 spa_sync_deferred_frees(spa_t *spa, uint64_t txg)
2770 bplist_t *bpl = &spa->spa_sync_bplist;
2778 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD);
2780 while (bplist_iterate(bpl, &itor, &blk) == 0)
2781 zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL));
2783 error = zio_wait(zio);
2784 ASSERT3U(error, ==, 0);
2786 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
2787 bplist_vacate(bpl, tx);
2790 * Pre-dirty the first block so we sync to convergence faster.
2791 * (Usually only the first block is needed.)
2793 dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx);
2798 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
2800 char *packed = NULL;
2804 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
2806 packed = kmem_alloc(nvsize, KM_SLEEP);
2808 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
2811 dmu_write(spa->spa_meta_objset, obj, 0, nvsize, packed, tx);
2813 kmem_free(packed, nvsize);
2815 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
2816 dmu_buf_will_dirty(db, tx);
2817 *(uint64_t *)db->db_data = nvsize;
2818 dmu_buf_rele(db, FTAG);
2822 spa_sync_spares(spa_t *spa, dmu_tx_t *tx)
2828 if (!spa->spa_sync_spares)
2832 * Update the MOS nvlist describing the list of available spares.
2833 * spa_validate_spares() will have already made sure this nvlist is
2834 * valid and the vdevs are labelled appropriately.
2836 if (spa->spa_spares_object == 0) {
2837 spa->spa_spares_object = dmu_object_alloc(spa->spa_meta_objset,
2838 DMU_OT_PACKED_NVLIST, 1 << 14,
2839 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
2840 VERIFY(zap_update(spa->spa_meta_objset,
2841 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SPARES,
2842 sizeof (uint64_t), 1, &spa->spa_spares_object, tx) == 0);
2845 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2846 if (spa->spa_nspares == 0) {
2847 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2850 spares = kmem_alloc(spa->spa_nspares * sizeof (void *),
2852 for (i = 0; i < spa->spa_nspares; i++)
2853 spares[i] = vdev_config_generate(spa,
2854 spa->spa_spares[i], B_FALSE, B_TRUE);
2855 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2856 spares, spa->spa_nspares) == 0);
2857 for (i = 0; i < spa->spa_nspares; i++)
2858 nvlist_free(spares[i]);
2859 kmem_free(spares, spa->spa_nspares * sizeof (void *));
2862 spa_sync_nvlist(spa, spa->spa_spares_object, nvroot, tx);
2863 nvlist_free(nvroot);
2865 spa->spa_sync_spares = B_FALSE;
2869 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
2873 if (list_is_empty(&spa->spa_dirty_list))
2876 config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE);
2878 if (spa->spa_config_syncing)
2879 nvlist_free(spa->spa_config_syncing);
2880 spa->spa_config_syncing = config;
2882 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
2886 spa_sync_props(void *arg1, void *arg2, dmu_tx_t *tx)
2889 nvlist_t *nvp = arg2;
2891 objset_t *mos = spa->spa_meta_objset;
2894 mutex_enter(&spa->spa_props_lock);
2895 if (spa->spa_pool_props_object == 0) {
2896 zapobj = zap_create(mos, DMU_OT_POOL_PROPS, DMU_OT_NONE, 0, tx);
2899 spa->spa_pool_props_object = zapobj;
2901 VERIFY(zap_update(mos, DMU_POOL_DIRECTORY_OBJECT,
2902 DMU_POOL_PROPS, 8, 1,
2903 &spa->spa_pool_props_object, tx) == 0);
2905 mutex_exit(&spa->spa_props_lock);
2908 while ((nvpair = nvlist_next_nvpair(nvp, nvpair))) {
2909 switch (zpool_name_to_prop(nvpair_name(nvpair))) {
2910 case ZFS_PROP_BOOTFS:
2911 VERIFY(nvlist_lookup_uint64(nvp,
2912 nvpair_name(nvpair), &spa->spa_bootfs) == 0);
2913 VERIFY(zap_update(mos,
2914 spa->spa_pool_props_object,
2915 zpool_prop_to_name(ZFS_PROP_BOOTFS), 8, 1,
2916 &spa->spa_bootfs, tx) == 0);
2923 * Sync the specified transaction group. New blocks may be dirtied as
2924 * part of the process, so we iterate until it converges.
2927 spa_sync(spa_t *spa, uint64_t txg)
2929 dsl_pool_t *dp = spa->spa_dsl_pool;
2930 objset_t *mos = spa->spa_meta_objset;
2931 bplist_t *bpl = &spa->spa_sync_bplist;
2932 vdev_t *rvd = spa->spa_root_vdev;
2938 * Lock out configuration changes.
2940 spa_config_enter(spa, RW_READER, FTAG);
2942 spa->spa_syncing_txg = txg;
2943 spa->spa_sync_pass = 0;
2945 VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj));
2947 tx = dmu_tx_create_assigned(dp, txg);
2950 * If we are upgrading to ZFS_VERSION_RAIDZ_DEFLATE this txg,
2951 * set spa_deflate if we have no raid-z vdevs.
2953 if (spa->spa_ubsync.ub_version < ZFS_VERSION_RAIDZ_DEFLATE &&
2954 spa->spa_uberblock.ub_version >= ZFS_VERSION_RAIDZ_DEFLATE) {
2957 for (i = 0; i < rvd->vdev_children; i++) {
2958 vd = rvd->vdev_child[i];
2959 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
2962 if (i == rvd->vdev_children) {
2963 spa->spa_deflate = TRUE;
2964 VERIFY(0 == zap_add(spa->spa_meta_objset,
2965 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
2966 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
2971 * If anything has changed in this txg, push the deferred frees
2972 * from the previous txg. If not, leave them alone so that we
2973 * don't generate work on an otherwise idle system.
2975 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
2976 !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
2977 !txg_list_empty(&dp->dp_sync_tasks, txg))
2978 spa_sync_deferred_frees(spa, txg);
2981 * Iterate to convergence.
2984 spa->spa_sync_pass++;
2986 spa_sync_config_object(spa, tx);
2987 spa_sync_spares(spa, tx);
2988 spa_errlog_sync(spa, txg);
2989 dsl_pool_sync(dp, txg);
2992 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) {
2997 bplist_sync(bpl, tx);
2998 } while (dirty_vdevs);
3002 dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass);
3005 * Rewrite the vdev configuration (which includes the uberblock)
3006 * to commit the transaction group.
3008 * If there are any dirty vdevs, sync the uberblock to all vdevs.
3009 * Otherwise, pick a random top-level vdev that's known to be
3010 * visible in the config cache (see spa_vdev_add() for details).
3011 * If the write fails, try the next vdev until we're tried them all.
3013 if (!list_is_empty(&spa->spa_dirty_list)) {
3014 VERIFY(vdev_config_sync(rvd, txg) == 0);
3016 int children = rvd->vdev_children;
3017 int c0 = spa_get_random(children);
3020 for (c = 0; c < children; c++) {
3021 vd = rvd->vdev_child[(c0 + c) % children];
3022 if (vd->vdev_ms_array == 0)
3024 if (vdev_config_sync(vd, txg) == 0)
3028 VERIFY(vdev_config_sync(rvd, txg) == 0);
3034 * Clear the dirty config list.
3036 while ((vd = list_head(&spa->spa_dirty_list)) != NULL)
3037 vdev_config_clean(vd);
3040 * Now that the new config has synced transactionally,
3041 * let it become visible to the config cache.
3043 if (spa->spa_config_syncing != NULL) {
3044 spa_config_set(spa, spa->spa_config_syncing);
3045 spa->spa_config_txg = txg;
3046 spa->spa_config_syncing = NULL;
3050 * Make a stable copy of the fully synced uberblock.
3051 * We use this as the root for pool traversals.
3053 spa->spa_traverse_wanted = 1; /* tells traverse_more() to stop */
3055 spa_scrub_suspend(spa); /* stop scrubbing and finish I/Os */
3057 rw_enter(&spa->spa_traverse_lock, RW_WRITER);
3058 spa->spa_traverse_wanted = 0;
3059 spa->spa_ubsync = spa->spa_uberblock;
3060 rw_exit(&spa->spa_traverse_lock);
3062 spa_scrub_resume(spa); /* resume scrub with new ubsync */
3065 * Clean up the ZIL records for the synced txg.
3067 dsl_pool_zil_clean(dp);
3070 * Update usable space statistics.
3072 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
3073 vdev_sync_done(vd, txg);
3076 * It had better be the case that we didn't dirty anything
3077 * since vdev_config_sync().
3079 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
3080 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
3081 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
3082 ASSERT(bpl->bpl_queue == NULL);
3084 spa_config_exit(spa, FTAG);
3087 * If any async tasks have been requested, kick them off.
3089 spa_async_dispatch(spa);
3093 * Sync all pools. We don't want to hold the namespace lock across these
3094 * operations, so we take a reference on the spa_t and drop the lock during the
3098 spa_sync_allpools(void)
3101 mutex_enter(&spa_namespace_lock);
3102 while ((spa = spa_next(spa)) != NULL) {
3103 if (spa_state(spa) != POOL_STATE_ACTIVE)
3105 spa_open_ref(spa, FTAG);
3106 mutex_exit(&spa_namespace_lock);
3107 txg_wait_synced(spa_get_dsl(spa), 0);
3108 mutex_enter(&spa_namespace_lock);
3109 spa_close(spa, FTAG);
3111 mutex_exit(&spa_namespace_lock);
3115 * ==========================================================================
3116 * Miscellaneous routines
3117 * ==========================================================================
3121 * Remove all pools in the system.
3129 * Remove all cached state. All pools should be closed now,
3130 * so every spa in the AVL tree should be unreferenced.
3132 mutex_enter(&spa_namespace_lock);
3133 while ((spa = spa_next(NULL)) != NULL) {
3135 * Stop async tasks. The async thread may need to detach
3136 * a device that's been replaced, which requires grabbing
3137 * spa_namespace_lock, so we must drop it here.
3139 spa_open_ref(spa, FTAG);
3140 mutex_exit(&spa_namespace_lock);
3141 spa_async_suspend(spa);
3142 VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0);
3143 mutex_enter(&spa_namespace_lock);
3144 spa_close(spa, FTAG);
3146 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
3148 spa_deactivate(spa);
3152 mutex_exit(&spa_namespace_lock);
3156 spa_lookup_by_guid(spa_t *spa, uint64_t guid)
3158 return (vdev_lookup_by_guid(spa->spa_root_vdev, guid));
3162 spa_upgrade(spa_t *spa)
3164 spa_config_enter(spa, RW_WRITER, FTAG);
3167 * This should only be called for a non-faulted pool, and since a
3168 * future version would result in an unopenable pool, this shouldn't be
3171 ASSERT(spa->spa_uberblock.ub_version <= ZFS_VERSION);
3173 spa->spa_uberblock.ub_version = ZFS_VERSION;
3174 vdev_config_dirty(spa->spa_root_vdev);
3176 spa_config_exit(spa, FTAG);
3178 txg_wait_synced(spa_get_dsl(spa), 0);
3182 spa_has_spare(spa_t *spa, uint64_t guid)
3187 for (i = 0; i < spa->spa_nspares; i++)
3188 if (spa->spa_spares[i]->vdev_guid == guid)
3191 for (i = 0; i < spa->spa_pending_nspares; i++) {
3192 if (nvlist_lookup_uint64(spa->spa_pending_spares[i],
3193 ZPOOL_CONFIG_GUID, &spareguid) == 0 &&
3202 spa_set_props(spa_t *spa, nvlist_t *nvp)
3204 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
3209 spa_get_props(spa_t *spa, nvlist_t **nvp)
3213 objset_t *mos = spa->spa_meta_objset;
3220 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3222 mutex_enter(&spa->spa_props_lock);
3223 /* If no props object, then just return empty nvlist */
3224 if (spa->spa_pool_props_object == 0) {
3225 mutex_exit(&spa->spa_props_lock);
3229 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
3230 (err = zap_cursor_retrieve(&zc, &za)) == 0;
3231 zap_cursor_advance(&zc)) {
3233 if ((prop = zpool_name_to_prop(za.za_name)) == ZFS_PROP_INVAL)
3236 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3237 switch (za.za_integer_length) {
3239 if (zfs_prop_default_numeric(prop) ==
3240 za.za_first_integer)
3241 src = ZFS_SRC_DEFAULT;
3243 src = ZFS_SRC_LOCAL;
3244 value = za.za_first_integer;
3246 if (prop == ZFS_PROP_BOOTFS) {
3248 dsl_dataset_t *ds = NULL;
3249 char strval[MAXPATHLEN];
3251 dp = spa_get_dsl(spa);
3252 rw_enter(&dp->dp_config_rwlock, RW_READER);
3253 if ((err = dsl_dataset_open_obj(dp,
3254 za.za_first_integer, NULL, DS_MODE_NONE,
3256 rw_exit(&dp->dp_config_rwlock);
3259 dsl_dataset_name(ds, strval);
3260 dsl_dataset_close(ds, DS_MODE_NONE, FTAG);
3261 rw_exit(&dp->dp_config_rwlock);
3263 VERIFY(nvlist_add_uint64(propval,
3264 ZFS_PROP_SOURCE, src) == 0);
3265 VERIFY(nvlist_add_string(propval,
3266 ZFS_PROP_VALUE, strval) == 0);
3268 VERIFY(nvlist_add_uint64(propval,
3269 ZFS_PROP_SOURCE, src) == 0);
3270 VERIFY(nvlist_add_uint64(propval,
3271 ZFS_PROP_VALUE, value) == 0);
3273 VERIFY(nvlist_add_nvlist(*nvp, za.za_name,
3277 nvlist_free(propval);
3279 zap_cursor_fini(&zc);
3280 mutex_exit(&spa->spa_props_lock);
3281 if (err && err != ENOENT) {
3290 * If the bootfs property value is dsobj, clear it.
3293 spa_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
3295 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
3296 VERIFY(zap_remove(spa->spa_meta_objset,
3297 spa->spa_pool_props_object,
3298 zpool_prop_to_name(ZFS_PROP_BOOTFS), tx) == 0);
3299 spa->spa_bootfs = 0;