4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * This file contains all the routines used when modifying on-disk SPA state.
31 * This includes opening, importing, destroying, exporting a pool, and syncing a
35 #include <sys/zfs_context.h>
36 #include <sys/fm/fs/zfs.h>
37 #include <sys/spa_impl.h>
39 #include <sys/zio_checksum.h>
40 #include <sys/zio_compress.h>
42 #include <sys/dmu_tx.h>
45 #include <sys/vdev_impl.h>
46 #include <sys/metaslab.h>
47 #include <sys/uberblock_impl.h>
50 #include <sys/dmu_traverse.h>
51 #include <sys/dmu_objset.h>
52 #include <sys/unique.h>
53 #include <sys/dsl_pool.h>
54 #include <sys/dsl_dataset.h>
55 #include <sys/dsl_dir.h>
56 #include <sys/dsl_prop.h>
57 #include <sys/dsl_synctask.h>
58 #include <sys/fs/zfs.h>
59 #include <sys/callb.h>
61 int zio_taskq_threads = 0;
62 SYSCTL_DECL(_vfs_zfs);
63 SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO");
64 TUNABLE_INT("vfs.zfs.zio.taskq_threads", &zio_taskq_threads);
65 SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, taskq_threads, CTLFLAG_RW,
66 &zio_taskq_threads, 0, "Number of ZIO threads per ZIO type");
70 * ==========================================================================
71 * SPA state manipulation (open/create/destroy/import/export)
72 * ==========================================================================
76 spa_error_entry_compare(const void *a, const void *b)
78 spa_error_entry_t *sa = (spa_error_entry_t *)a;
79 spa_error_entry_t *sb = (spa_error_entry_t *)b;
82 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
83 sizeof (zbookmark_t));
94 * Utility function which retrieves copies of the current logs and
95 * re-initializes them in the process.
98 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
100 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
102 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
103 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
105 avl_create(&spa->spa_errlist_scrub,
106 spa_error_entry_compare, sizeof (spa_error_entry_t),
107 offsetof(spa_error_entry_t, se_avl));
108 avl_create(&spa->spa_errlist_last,
109 spa_error_entry_compare, sizeof (spa_error_entry_t),
110 offsetof(spa_error_entry_t, se_avl));
114 * Activate an uninitialized pool.
117 spa_activate(spa_t *spa)
120 int nthreads = zio_taskq_threads;
123 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
125 spa->spa_state = POOL_STATE_ACTIVE;
127 spa->spa_normal_class = metaslab_class_create();
130 nthreads = max_ncpus;
131 for (t = 0; t < ZIO_TYPES; t++) {
132 snprintf(name, sizeof(name), "spa_zio_issue %d", t);
133 spa->spa_zio_issue_taskq[t] = taskq_create(name, nthreads,
134 maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE);
135 snprintf(name, sizeof(name), "spa_zio_intr %d", t);
136 spa->spa_zio_intr_taskq[t] = taskq_create(name, nthreads,
137 maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE);
140 rw_init(&spa->spa_traverse_lock, NULL, RW_DEFAULT, NULL);
142 mutex_init(&spa->spa_uberblock_lock, NULL, MUTEX_DEFAULT, NULL);
143 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
144 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
145 mutex_init(&spa->spa_config_lock.scl_lock, NULL, MUTEX_DEFAULT, NULL);
146 cv_init(&spa->spa_config_lock.scl_cv, NULL, CV_DEFAULT, NULL);
147 mutex_init(&spa->spa_sync_bplist.bpl_lock, NULL, MUTEX_DEFAULT, NULL);
148 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
149 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
151 list_create(&spa->spa_dirty_list, sizeof (vdev_t),
152 offsetof(vdev_t, vdev_dirty_node));
154 txg_list_create(&spa->spa_vdev_txg_list,
155 offsetof(struct vdev, vdev_txg_node));
157 avl_create(&spa->spa_errlist_scrub,
158 spa_error_entry_compare, sizeof (spa_error_entry_t),
159 offsetof(spa_error_entry_t, se_avl));
160 avl_create(&spa->spa_errlist_last,
161 spa_error_entry_compare, sizeof (spa_error_entry_t),
162 offsetof(spa_error_entry_t, se_avl));
166 * Opposite of spa_activate().
169 spa_deactivate(spa_t *spa)
173 ASSERT(spa->spa_sync_on == B_FALSE);
174 ASSERT(spa->spa_dsl_pool == NULL);
175 ASSERT(spa->spa_root_vdev == NULL);
177 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
179 txg_list_destroy(&spa->spa_vdev_txg_list);
181 list_destroy(&spa->spa_dirty_list);
183 for (t = 0; t < ZIO_TYPES; t++) {
184 taskq_destroy(spa->spa_zio_issue_taskq[t]);
185 taskq_destroy(spa->spa_zio_intr_taskq[t]);
186 spa->spa_zio_issue_taskq[t] = NULL;
187 spa->spa_zio_intr_taskq[t] = NULL;
190 metaslab_class_destroy(spa->spa_normal_class);
191 spa->spa_normal_class = NULL;
194 * If this was part of an import or the open otherwise failed, we may
195 * still have errors left in the queues. Empty them just in case.
197 spa_errlog_drain(spa);
199 avl_destroy(&spa->spa_errlist_scrub);
200 avl_destroy(&spa->spa_errlist_last);
202 rw_destroy(&spa->spa_traverse_lock);
203 mutex_destroy(&spa->spa_uberblock_lock);
204 mutex_destroy(&spa->spa_errlog_lock);
205 mutex_destroy(&spa->spa_errlist_lock);
206 mutex_destroy(&spa->spa_config_lock.scl_lock);
207 cv_destroy(&spa->spa_config_lock.scl_cv);
208 mutex_destroy(&spa->spa_sync_bplist.bpl_lock);
209 mutex_destroy(&spa->spa_history_lock);
210 mutex_destroy(&spa->spa_props_lock);
212 spa->spa_state = POOL_STATE_UNINITIALIZED;
216 * Verify a pool configuration, and construct the vdev tree appropriately. This
217 * will create all the necessary vdevs in the appropriate layout, with each vdev
218 * in the CLOSED state. This will prep the pool before open/creation/import.
219 * All vdev validation is done by the vdev_alloc() routine.
222 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
223 uint_t id, int atype)
229 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
232 if ((*vdp)->vdev_ops->vdev_op_leaf)
235 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
236 &child, &children) != 0) {
242 for (c = 0; c < children; c++) {
244 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
252 ASSERT(*vdp != NULL);
258 * Opposite of spa_load().
261 spa_unload(spa_t *spa)
268 spa_async_suspend(spa);
273 if (spa->spa_sync_on) {
274 txg_sync_stop(spa->spa_dsl_pool);
275 spa->spa_sync_on = B_FALSE;
279 * Wait for any outstanding prefetch I/O to complete.
281 spa_config_enter(spa, RW_WRITER, FTAG);
282 spa_config_exit(spa, FTAG);
285 * Close the dsl pool.
287 if (spa->spa_dsl_pool) {
288 dsl_pool_close(spa->spa_dsl_pool);
289 spa->spa_dsl_pool = NULL;
295 if (spa->spa_root_vdev)
296 vdev_free(spa->spa_root_vdev);
297 ASSERT(spa->spa_root_vdev == NULL);
299 for (i = 0; i < spa->spa_nspares; i++)
300 vdev_free(spa->spa_spares[i]);
301 if (spa->spa_spares) {
302 kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *));
303 spa->spa_spares = NULL;
305 if (spa->spa_sparelist) {
306 nvlist_free(spa->spa_sparelist);
307 spa->spa_sparelist = NULL;
310 spa->spa_async_suspended = 0;
314 * Load (or re-load) the current list of vdevs describing the active spares for
315 * this pool. When this is called, we have some form of basic information in
316 * 'spa_sparelist'. We parse this into vdevs, try to open them, and then
317 * re-generate a more complete list including status information.
320 spa_load_spares(spa_t *spa)
328 * First, close and free any existing spare vdevs.
330 for (i = 0; i < spa->spa_nspares; i++) {
331 vd = spa->spa_spares[i];
333 /* Undo the call to spa_activate() below */
334 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid)) != NULL &&
336 spa_spare_remove(tvd);
342 kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *));
344 if (spa->spa_sparelist == NULL)
347 VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist,
348 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
350 spa->spa_nspares = (int)nspares;
351 spa->spa_spares = NULL;
357 * Construct the array of vdevs, opening them to get status in the
358 * process. For each spare, there is potentially two different vdev_t
359 * structures associated with it: one in the list of spares (used only
360 * for basic validation purposes) and one in the active vdev
361 * configuration (if it's spared in). During this phase we open and
362 * validate each vdev on the spare list. If the vdev also exists in the
363 * active configuration, then we also mark this vdev as an active spare.
365 spa->spa_spares = kmem_alloc(nspares * sizeof (void *), KM_SLEEP);
366 for (i = 0; i < spa->spa_nspares; i++) {
367 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
368 VDEV_ALLOC_SPARE) == 0);
371 spa->spa_spares[i] = vd;
373 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid)) != NULL) {
374 if (!tvd->vdev_isspare)
378 * We only mark the spare active if we were successfully
379 * able to load the vdev. Otherwise, importing a pool
380 * with a bad active spare would result in strange
381 * behavior, because multiple pool would think the spare
382 * is actively in use.
384 * There is a vulnerability here to an equally bizarre
385 * circumstance, where a dead active spare is later
386 * brought back to life (onlined or otherwise). Given
387 * the rarity of this scenario, and the extra complexity
388 * it adds, we ignore the possibility.
390 if (!vdev_is_dead(tvd))
391 spa_spare_activate(tvd);
394 if (vdev_open(vd) != 0)
398 (void) vdev_validate_spare(vd);
402 * Recompute the stashed list of spares, with status information
405 VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES,
406 DATA_TYPE_NVLIST_ARRAY) == 0);
408 spares = kmem_alloc(spa->spa_nspares * sizeof (void *), KM_SLEEP);
409 for (i = 0; i < spa->spa_nspares; i++)
410 spares[i] = vdev_config_generate(spa, spa->spa_spares[i],
412 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES,
413 spares, spa->spa_nspares) == 0);
414 for (i = 0; i < spa->spa_nspares; i++)
415 nvlist_free(spares[i]);
416 kmem_free(spares, spa->spa_nspares * sizeof (void *));
420 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
428 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
429 nvsize = *(uint64_t *)db->db_data;
430 dmu_buf_rele(db, FTAG);
432 packed = kmem_alloc(nvsize, KM_SLEEP);
433 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed);
435 error = nvlist_unpack(packed, nvsize, value, 0);
436 kmem_free(packed, nvsize);
442 * Load an existing storage pool, using the pool's builtin spa_config as a
443 * source of configuration information.
446 spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig)
449 nvlist_t *nvroot = NULL;
451 uberblock_t *ub = &spa->spa_uberblock;
452 uint64_t config_cache_txg = spa->spa_config_txg;
457 spa->spa_load_state = state;
459 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
460 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
466 * Versioning wasn't explicitly added to the label until later, so if
467 * it's not present treat it as the initial version.
469 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0)
470 version = ZFS_VERSION_INITIAL;
472 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
473 &spa->spa_config_txg);
475 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
476 spa_guid_exists(pool_guid, 0)) {
481 spa->spa_load_guid = pool_guid;
484 * Parse the configuration into a vdev tree. We explicitly set the
485 * value that will be returned by spa_version() since parsing the
486 * configuration requires knowing the version number.
488 spa_config_enter(spa, RW_WRITER, FTAG);
489 spa->spa_ubsync.ub_version = version;
490 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD);
491 spa_config_exit(spa, FTAG);
496 ASSERT(spa->spa_root_vdev == rvd);
497 ASSERT(spa_guid(spa) == pool_guid);
500 * Try to open all vdevs, loading each label in the process.
502 if (vdev_open(rvd) != 0) {
508 * Validate the labels for all leaf vdevs. We need to grab the config
509 * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD
512 spa_config_enter(spa, RW_READER, FTAG);
513 error = vdev_validate(rvd);
514 spa_config_exit(spa, FTAG);
521 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
527 * Find the best uberblock.
529 bzero(ub, sizeof (uberblock_t));
531 zio = zio_root(spa, NULL, NULL,
532 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
533 vdev_uberblock_load(zio, rvd, ub);
534 error = zio_wait(zio);
537 * If we weren't able to find a single valid uberblock, return failure.
539 if (ub->ub_txg == 0) {
540 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
541 VDEV_AUX_CORRUPT_DATA);
547 * If the pool is newer than the code, we can't open it.
549 if (ub->ub_version > ZFS_VERSION) {
550 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
551 VDEV_AUX_VERSION_NEWER);
557 * If the vdev guid sum doesn't match the uberblock, we have an
558 * incomplete configuration.
560 if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) {
561 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
562 VDEV_AUX_BAD_GUID_SUM);
568 * Initialize internal SPA structures.
570 spa->spa_state = POOL_STATE_ACTIVE;
571 spa->spa_ubsync = spa->spa_uberblock;
572 spa->spa_first_txg = spa_last_synced_txg(spa) + 1;
573 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
575 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
576 VDEV_AUX_CORRUPT_DATA);
579 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
581 if (zap_lookup(spa->spa_meta_objset,
582 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
583 sizeof (uint64_t), 1, &spa->spa_config_object) != 0) {
584 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
585 VDEV_AUX_CORRUPT_DATA);
594 if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) {
595 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
596 VDEV_AUX_CORRUPT_DATA);
601 if (nvlist_lookup_uint64(newconfig, ZPOOL_CONFIG_HOSTID,
604 unsigned long myhostid = 0;
606 VERIFY(nvlist_lookup_string(newconfig,
607 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
609 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
610 if ((unsigned long)hostid != myhostid) {
611 cmn_err(CE_WARN, "pool '%s' could not be "
612 "loaded as it was last accessed by "
613 "another system (host: %s hostid: 0x%lx). "
614 "See: http://www.sun.com/msg/ZFS-8000-EY",
615 spa->spa_name, hostname,
616 (unsigned long)hostid);
622 spa_config_set(spa, newconfig);
627 return (spa_load(spa, newconfig, state, B_TRUE));
630 if (zap_lookup(spa->spa_meta_objset,
631 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
632 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) {
633 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
634 VDEV_AUX_CORRUPT_DATA);
640 * Load the bit that tells us to use the new accounting function
641 * (raid-z deflation). If we have an older pool, this will not
644 error = zap_lookup(spa->spa_meta_objset,
645 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
646 sizeof (uint64_t), 1, &spa->spa_deflate);
647 if (error != 0 && error != ENOENT) {
648 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
649 VDEV_AUX_CORRUPT_DATA);
655 * Load the persistent error log. If we have an older pool, this will
658 error = zap_lookup(spa->spa_meta_objset,
659 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
660 sizeof (uint64_t), 1, &spa->spa_errlog_last);
661 if (error != 0 && error != ENOENT) {
662 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
663 VDEV_AUX_CORRUPT_DATA);
668 error = zap_lookup(spa->spa_meta_objset,
669 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
670 sizeof (uint64_t), 1, &spa->spa_errlog_scrub);
671 if (error != 0 && error != ENOENT) {
672 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
673 VDEV_AUX_CORRUPT_DATA);
679 * Load the history object. If we have an older pool, this
680 * will not be present.
682 error = zap_lookup(spa->spa_meta_objset,
683 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY,
684 sizeof (uint64_t), 1, &spa->spa_history);
685 if (error != 0 && error != ENOENT) {
686 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
687 VDEV_AUX_CORRUPT_DATA);
693 * Load any hot spares for this pool.
695 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
696 DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares_object);
697 if (error != 0 && error != ENOENT) {
698 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
699 VDEV_AUX_CORRUPT_DATA);
704 ASSERT(spa_version(spa) >= ZFS_VERSION_SPARES);
705 if (load_nvlist(spa, spa->spa_spares_object,
706 &spa->spa_sparelist) != 0) {
707 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
708 VDEV_AUX_CORRUPT_DATA);
713 spa_config_enter(spa, RW_WRITER, FTAG);
714 spa_load_spares(spa);
715 spa_config_exit(spa, FTAG);
718 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
719 DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object);
721 if (error && error != ENOENT) {
722 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
723 VDEV_AUX_CORRUPT_DATA);
729 (void) zap_lookup(spa->spa_meta_objset,
730 spa->spa_pool_props_object,
731 zpool_prop_to_name(ZFS_PROP_BOOTFS),
732 sizeof (uint64_t), 1, &spa->spa_bootfs);
736 * Load the vdev state for all toplevel vdevs.
741 * Propagate the leaf DTLs we just loaded all the way up the tree.
743 spa_config_enter(spa, RW_WRITER, FTAG);
744 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
745 spa_config_exit(spa, FTAG);
748 * Check the state of the root vdev. If it can't be opened, it
749 * indicates one or more toplevel vdevs are faulted.
751 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
756 if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) {
758 int need_update = B_FALSE;
762 * Claim log blocks that haven't been committed yet.
763 * This must all happen in a single txg.
765 tx = dmu_tx_create_assigned(spa_get_dsl(spa),
767 (void) dmu_objset_find(spa->spa_name,
768 zil_claim, tx, DS_FIND_CHILDREN);
771 spa->spa_sync_on = B_TRUE;
772 txg_sync_start(spa->spa_dsl_pool);
775 * Wait for all claims to sync.
777 txg_wait_synced(spa->spa_dsl_pool, 0);
780 * If the config cache is stale, or we have uninitialized
781 * metaslabs (see spa_vdev_add()), then update the config.
783 if (config_cache_txg != spa->spa_config_txg ||
784 state == SPA_LOAD_IMPORT)
785 need_update = B_TRUE;
787 for (c = 0; c < rvd->vdev_children; c++)
788 if (rvd->vdev_child[c]->vdev_ms_array == 0)
789 need_update = B_TRUE;
792 * Update the config cache asychronously in case we're the
793 * root pool, in which case the config cache isn't writable yet.
796 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
801 if (error && error != EBADF)
802 zfs_ereport_post(FM_EREPORT_ZFS_POOL, spa, NULL, NULL, 0, 0);
803 spa->spa_load_state = SPA_LOAD_NONE;
812 * The import case is identical to an open except that the configuration is sent
813 * down from userland, instead of grabbed from the configuration cache. For the
814 * case of an open, the pool configuration will exist in the
815 * POOL_STATE_UNITIALIZED state.
817 * The stats information (gen/count/ustats) is used to gather vdev statistics at
818 * the same time open the pool, without having to keep around the spa_t in some
822 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config)
826 int loaded = B_FALSE;
827 int locked = B_FALSE;
832 * As disgusting as this is, we need to support recursive calls to this
833 * function because dsl_dir_open() is called during spa_load(), and ends
834 * up calling spa_open() again. The real fix is to figure out how to
835 * avoid dsl_dir_open() calling this in the first place.
837 if (mutex_owner(&spa_namespace_lock) != curthread) {
838 mutex_enter(&spa_namespace_lock);
842 if ((spa = spa_lookup(pool)) == NULL) {
844 mutex_exit(&spa_namespace_lock);
847 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
851 error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE);
853 if (error == EBADF) {
855 * If vdev_validate() returns failure (indicated by
856 * EBADF), it indicates that one of the vdevs indicates
857 * that the pool has been exported or destroyed. If
858 * this is the case, the config cache is out of sync and
859 * we should remove the pool from the namespace.
861 zfs_post_ok(spa, NULL);
867 mutex_exit(&spa_namespace_lock);
873 * We can't open the pool, but we still have useful
874 * information: the state of each vdev after the
875 * attempted vdev_open(). Return this to the user.
877 if (config != NULL && spa->spa_root_vdev != NULL) {
878 spa_config_enter(spa, RW_READER, FTAG);
879 *config = spa_config_generate(spa, NULL, -1ULL,
881 spa_config_exit(spa, FTAG);
885 spa->spa_last_open_failed = B_TRUE;
887 mutex_exit(&spa_namespace_lock);
891 zfs_post_ok(spa, NULL);
892 spa->spa_last_open_failed = B_FALSE;
898 spa_open_ref(spa, tag);
900 mutex_exit(&spa_namespace_lock);
904 if (config != NULL) {
905 spa_config_enter(spa, RW_READER, FTAG);
906 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
907 spa_config_exit(spa, FTAG);
911 * If we just loaded the pool, resilver anything that's out of date.
913 if (loaded && (spa_mode & FWRITE))
914 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
920 spa_open(const char *name, spa_t **spapp, void *tag)
922 return (spa_open_common(name, spapp, tag, NULL));
926 * Lookup the given spa_t, incrementing the inject count in the process,
927 * preventing it from being exported or destroyed.
930 spa_inject_addref(char *name)
934 mutex_enter(&spa_namespace_lock);
935 if ((spa = spa_lookup(name)) == NULL) {
936 mutex_exit(&spa_namespace_lock);
939 spa->spa_inject_ref++;
940 mutex_exit(&spa_namespace_lock);
946 spa_inject_delref(spa_t *spa)
948 mutex_enter(&spa_namespace_lock);
949 spa->spa_inject_ref--;
950 mutex_exit(&spa_namespace_lock);
954 spa_add_spares(spa_t *spa, nvlist_t *config)
964 if (spa->spa_nspares == 0)
967 VERIFY(nvlist_lookup_nvlist(config,
968 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
969 VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist,
970 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
972 VERIFY(nvlist_add_nvlist_array(nvroot,
973 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
974 VERIFY(nvlist_lookup_nvlist_array(nvroot,
975 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
978 * Go through and find any spares which have since been
979 * repurposed as an active spare. If this is the case, update
980 * their status appropriately.
982 for (i = 0; i < nspares; i++) {
983 VERIFY(nvlist_lookup_uint64(spares[i],
984 ZPOOL_CONFIG_GUID, &guid) == 0);
985 if (spa_spare_exists(guid, &pool) && pool != 0ULL) {
986 VERIFY(nvlist_lookup_uint64_array(
987 spares[i], ZPOOL_CONFIG_STATS,
988 (uint64_t **)&vs, &vsc) == 0);
989 vs->vs_state = VDEV_STATE_CANT_OPEN;
990 vs->vs_aux = VDEV_AUX_SPARED;
997 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
1003 error = spa_open_common(name, &spa, FTAG, config);
1005 if (spa && *config != NULL) {
1006 VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT,
1007 spa_get_errlog_size(spa)) == 0);
1009 spa_add_spares(spa, *config);
1013 * We want to get the alternate root even for faulted pools, so we cheat
1014 * and call spa_lookup() directly.
1018 mutex_enter(&spa_namespace_lock);
1019 spa = spa_lookup(name);
1021 spa_altroot(spa, altroot, buflen);
1025 mutex_exit(&spa_namespace_lock);
1027 spa_altroot(spa, altroot, buflen);
1032 spa_close(spa, FTAG);
1038 * Validate that the 'spares' array is well formed. We must have an array of
1039 * nvlists, each which describes a valid leaf vdev. If this is an import (mode
1040 * is VDEV_ALLOC_SPARE), then we allow corrupted spares to be specified, as long
1041 * as they are well-formed.
1044 spa_validate_spares(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
1052 * It's acceptable to have no spares specified.
1054 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1055 &spares, &nspares) != 0)
1062 * Make sure the pool is formatted with a version that supports hot
1065 if (spa_version(spa) < ZFS_VERSION_SPARES)
1069 * Set the pending spare list so we correctly handle device in-use
1072 spa->spa_pending_spares = spares;
1073 spa->spa_pending_nspares = nspares;
1075 for (i = 0; i < nspares; i++) {
1076 if ((error = spa_config_parse(spa, &vd, spares[i], NULL, 0,
1080 if (!vd->vdev_ops->vdev_op_leaf) {
1088 if ((error = vdev_open(vd)) == 0 &&
1089 (error = vdev_label_init(vd, crtxg,
1090 VDEV_LABEL_SPARE)) == 0) {
1091 VERIFY(nvlist_add_uint64(spares[i], ZPOOL_CONFIG_GUID,
1092 vd->vdev_guid) == 0);
1097 if (error && mode != VDEV_ALLOC_SPARE)
1104 spa->spa_pending_spares = NULL;
1105 spa->spa_pending_nspares = 0;
1113 spa_create(const char *pool, nvlist_t *nvroot, const char *altroot)
1120 uint64_t txg = TXG_INITIAL;
1125 * If this pool already exists, return failure.
1127 mutex_enter(&spa_namespace_lock);
1128 if (spa_lookup(pool) != NULL) {
1129 mutex_exit(&spa_namespace_lock);
1134 * Allocate a new spa_t structure.
1136 spa = spa_add(pool, altroot);
1139 spa->spa_uberblock.ub_txg = txg - 1;
1140 spa->spa_uberblock.ub_version = ZFS_VERSION;
1141 spa->spa_ubsync = spa->spa_uberblock;
1144 * Create the root vdev.
1146 spa_config_enter(spa, RW_WRITER, FTAG);
1148 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
1150 ASSERT(error != 0 || rvd != NULL);
1151 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
1153 if (error == 0 && rvd->vdev_children == 0)
1157 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
1158 (error = spa_validate_spares(spa, nvroot, txg,
1159 VDEV_ALLOC_ADD)) == 0) {
1160 for (c = 0; c < rvd->vdev_children; c++)
1161 vdev_init(rvd->vdev_child[c], txg);
1162 vdev_config_dirty(rvd);
1165 spa_config_exit(spa, FTAG);
1169 spa_deactivate(spa);
1171 mutex_exit(&spa_namespace_lock);
1176 * Get the list of spares, if specified.
1178 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1179 &spares, &nspares) == 0) {
1180 VERIFY(nvlist_alloc(&spa->spa_sparelist, NV_UNIQUE_NAME,
1182 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist,
1183 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
1184 spa_config_enter(spa, RW_WRITER, FTAG);
1185 spa_load_spares(spa);
1186 spa_config_exit(spa, FTAG);
1187 spa->spa_sync_spares = B_TRUE;
1190 spa->spa_dsl_pool = dp = dsl_pool_create(spa, txg);
1191 spa->spa_meta_objset = dp->dp_meta_objset;
1193 tx = dmu_tx_create_assigned(dp, txg);
1196 * Create the pool config object.
1198 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
1199 DMU_OT_PACKED_NVLIST, 1 << 14,
1200 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
1202 if (zap_add(spa->spa_meta_objset,
1203 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1204 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
1205 cmn_err(CE_PANIC, "failed to add pool config");
1208 /* Newly created pools are always deflated. */
1209 spa->spa_deflate = TRUE;
1210 if (zap_add(spa->spa_meta_objset,
1211 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
1212 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
1213 cmn_err(CE_PANIC, "failed to add deflate");
1217 * Create the deferred-free bplist object. Turn off compression
1218 * because sync-to-convergence takes longer if the blocksize
1221 spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset,
1223 dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj,
1224 ZIO_COMPRESS_OFF, tx);
1226 if (zap_add(spa->spa_meta_objset,
1227 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
1228 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) {
1229 cmn_err(CE_PANIC, "failed to add bplist");
1233 * Create the pool's history object.
1235 spa_history_create_obj(spa, tx);
1239 spa->spa_bootfs = zfs_prop_default_numeric(ZFS_PROP_BOOTFS);
1240 spa->spa_sync_on = B_TRUE;
1241 txg_sync_start(spa->spa_dsl_pool);
1244 * We explicitly wait for the first transaction to complete so that our
1245 * bean counters are appropriately updated.
1247 txg_wait_synced(spa->spa_dsl_pool, txg);
1251 mutex_exit(&spa_namespace_lock);
1257 * Import the given pool into the system. We set up the necessary spa_t and
1258 * then call spa_load() to do the dirty work.
1261 spa_import(const char *pool, nvlist_t *config, const char *altroot)
1269 if (!(spa_mode & FWRITE))
1273 * If a pool with this name exists, return failure.
1275 mutex_enter(&spa_namespace_lock);
1276 if (spa_lookup(pool) != NULL) {
1277 mutex_exit(&spa_namespace_lock);
1282 * Create and initialize the spa structure.
1284 spa = spa_add(pool, altroot);
1288 * Pass off the heavy lifting to spa_load().
1289 * Pass TRUE for mosconfig because the user-supplied config
1290 * is actually the one to trust when doing an import.
1292 error = spa_load(spa, config, SPA_LOAD_IMPORT, B_TRUE);
1294 spa_config_enter(spa, RW_WRITER, FTAG);
1296 * Toss any existing sparelist, as it doesn't have any validity anymore,
1297 * and conflicts with spa_has_spare().
1299 if (spa->spa_sparelist) {
1300 nvlist_free(spa->spa_sparelist);
1301 spa->spa_sparelist = NULL;
1302 spa_load_spares(spa);
1305 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1308 error = spa_validate_spares(spa, nvroot, -1ULL,
1310 spa_config_exit(spa, FTAG);
1314 spa_deactivate(spa);
1316 mutex_exit(&spa_namespace_lock);
1321 * Override any spares as specified by the user, as these may have
1322 * correct device names/devids, etc.
1324 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1325 &spares, &nspares) == 0) {
1326 if (spa->spa_sparelist)
1327 VERIFY(nvlist_remove(spa->spa_sparelist,
1328 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
1330 VERIFY(nvlist_alloc(&spa->spa_sparelist,
1331 NV_UNIQUE_NAME, KM_SLEEP) == 0);
1332 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist,
1333 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
1334 spa_config_enter(spa, RW_WRITER, FTAG);
1335 spa_load_spares(spa);
1336 spa_config_exit(spa, FTAG);
1337 spa->spa_sync_spares = B_TRUE;
1341 * Update the config cache to include the newly-imported pool.
1343 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
1345 mutex_exit(&spa_namespace_lock);
1348 * Resilver anything that's out of date.
1350 if (spa_mode & FWRITE)
1351 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
1357 * This (illegal) pool name is used when temporarily importing a spa_t in order
1358 * to get the vdev stats associated with the imported devices.
1360 #define TRYIMPORT_NAME "$import"
1363 spa_tryimport(nvlist_t *tryconfig)
1365 nvlist_t *config = NULL;
1370 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
1373 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
1377 * Create and initialize the spa structure.
1379 mutex_enter(&spa_namespace_lock);
1380 spa = spa_add(TRYIMPORT_NAME, NULL);
1384 * Pass off the heavy lifting to spa_load().
1385 * Pass TRUE for mosconfig because the user-supplied config
1386 * is actually the one to trust when doing an import.
1388 (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE);
1391 * If 'tryconfig' was at least parsable, return the current config.
1393 if (spa->spa_root_vdev != NULL) {
1394 spa_config_enter(spa, RW_READER, FTAG);
1395 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
1396 spa_config_exit(spa, FTAG);
1397 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
1399 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1401 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
1402 spa->spa_uberblock.ub_timestamp) == 0);
1405 * Add the list of hot spares.
1407 spa_add_spares(spa, config);
1411 spa_deactivate(spa);
1413 mutex_exit(&spa_namespace_lock);
1419 * Pool export/destroy
1421 * The act of destroying or exporting a pool is very simple. We make sure there
1422 * is no more pending I/O and any references to the pool are gone. Then, we
1423 * update the pool state and sync all the labels to disk, removing the
1424 * configuration from the cache afterwards.
1427 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig)
1434 if (!(spa_mode & FWRITE))
1437 mutex_enter(&spa_namespace_lock);
1438 if ((spa = spa_lookup(pool)) == NULL) {
1439 mutex_exit(&spa_namespace_lock);
1444 * Put a hold on the pool, drop the namespace lock, stop async tasks,
1445 * reacquire the namespace lock, and see if we can export.
1447 spa_open_ref(spa, FTAG);
1448 mutex_exit(&spa_namespace_lock);
1449 spa_async_suspend(spa);
1450 mutex_enter(&spa_namespace_lock);
1451 spa_close(spa, FTAG);
1454 * The pool will be in core if it's openable,
1455 * in which case we can modify its state.
1457 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
1459 * Objsets may be open only because they're dirty, so we
1460 * have to force it to sync before checking spa_refcnt.
1462 spa_scrub_suspend(spa);
1463 txg_wait_synced(spa->spa_dsl_pool, 0);
1466 * A pool cannot be exported or destroyed if there are active
1467 * references. If we are resetting a pool, allow references by
1468 * fault injection handlers.
1470 if (!spa_refcount_zero(spa) ||
1471 (spa->spa_inject_ref != 0 &&
1472 new_state != POOL_STATE_UNINITIALIZED)) {
1473 spa_scrub_resume(spa);
1474 spa_async_resume(spa);
1475 mutex_exit(&spa_namespace_lock);
1479 spa_scrub_resume(spa);
1480 VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0);
1483 * We want this to be reflected on every label,
1484 * so mark them all dirty. spa_unload() will do the
1485 * final sync that pushes these changes out.
1487 if (new_state != POOL_STATE_UNINITIALIZED) {
1488 spa_config_enter(spa, RW_WRITER, FTAG);
1489 spa->spa_state = new_state;
1490 spa->spa_final_txg = spa_last_synced_txg(spa) + 1;
1491 vdev_config_dirty(spa->spa_root_vdev);
1492 spa_config_exit(spa, FTAG);
1496 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
1498 spa_deactivate(spa);
1501 if (oldconfig && spa->spa_config)
1502 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
1504 if (new_state != POOL_STATE_UNINITIALIZED) {
1508 mutex_exit(&spa_namespace_lock);
1514 * Destroy a storage pool.
1517 spa_destroy(char *pool)
1519 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL));
1523 * Export a storage pool.
1526 spa_export(char *pool, nvlist_t **oldconfig)
1528 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig));
1532 * Similar to spa_export(), this unloads the spa_t without actually removing it
1533 * from the namespace in any way.
1536 spa_reset(char *pool)
1538 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL));
1543 * ==========================================================================
1544 * Device manipulation
1545 * ==========================================================================
1549 * Add capacity to a storage pool.
1552 spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
1556 vdev_t *rvd = spa->spa_root_vdev;
1561 txg = spa_vdev_enter(spa);
1563 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
1564 VDEV_ALLOC_ADD)) != 0)
1565 return (spa_vdev_exit(spa, NULL, txg, error));
1567 spa->spa_pending_vdev = vd;
1569 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1570 &spares, &nspares) != 0)
1573 if (vd->vdev_children == 0 && nspares == 0) {
1574 spa->spa_pending_vdev = NULL;
1575 return (spa_vdev_exit(spa, vd, txg, EINVAL));
1578 if (vd->vdev_children != 0) {
1579 if ((error = vdev_create(vd, txg, B_FALSE)) != 0) {
1580 spa->spa_pending_vdev = NULL;
1581 return (spa_vdev_exit(spa, vd, txg, error));
1586 * We must validate the spares after checking the children. Otherwise,
1587 * vdev_inuse() will blindly overwrite the spare.
1589 if ((error = spa_validate_spares(spa, nvroot, txg,
1590 VDEV_ALLOC_ADD)) != 0) {
1591 spa->spa_pending_vdev = NULL;
1592 return (spa_vdev_exit(spa, vd, txg, error));
1595 spa->spa_pending_vdev = NULL;
1598 * Transfer each new top-level vdev from vd to rvd.
1600 for (c = 0; c < vd->vdev_children; c++) {
1601 tvd = vd->vdev_child[c];
1602 vdev_remove_child(vd, tvd);
1603 tvd->vdev_id = rvd->vdev_children;
1604 vdev_add_child(rvd, tvd);
1605 vdev_config_dirty(tvd);
1609 if (spa->spa_sparelist != NULL) {
1610 nvlist_t **oldspares;
1612 nvlist_t **newspares;
1614 VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist,
1615 ZPOOL_CONFIG_SPARES, &oldspares, &oldnspares) == 0);
1617 newspares = kmem_alloc(sizeof (void *) *
1618 (nspares + oldnspares), KM_SLEEP);
1619 for (i = 0; i < oldnspares; i++)
1620 VERIFY(nvlist_dup(oldspares[i],
1621 &newspares[i], KM_SLEEP) == 0);
1622 for (i = 0; i < nspares; i++)
1623 VERIFY(nvlist_dup(spares[i],
1624 &newspares[i + oldnspares],
1627 VERIFY(nvlist_remove(spa->spa_sparelist,
1628 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
1630 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist,
1631 ZPOOL_CONFIG_SPARES, newspares,
1632 nspares + oldnspares) == 0);
1633 for (i = 0; i < oldnspares + nspares; i++)
1634 nvlist_free(newspares[i]);
1635 kmem_free(newspares, (oldnspares + nspares) *
1638 VERIFY(nvlist_alloc(&spa->spa_sparelist,
1639 NV_UNIQUE_NAME, KM_SLEEP) == 0);
1640 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist,
1641 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
1644 spa_load_spares(spa);
1645 spa->spa_sync_spares = B_TRUE;
1649 * We have to be careful when adding new vdevs to an existing pool.
1650 * If other threads start allocating from these vdevs before we
1651 * sync the config cache, and we lose power, then upon reboot we may
1652 * fail to open the pool because there are DVAs that the config cache
1653 * can't translate. Therefore, we first add the vdevs without
1654 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
1655 * and then let spa_config_update() initialize the new metaslabs.
1657 * spa_load() checks for added-but-not-initialized vdevs, so that
1658 * if we lose power at any point in this sequence, the remaining
1659 * steps will be completed the next time we load the pool.
1661 (void) spa_vdev_exit(spa, vd, txg, 0);
1663 mutex_enter(&spa_namespace_lock);
1664 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
1665 mutex_exit(&spa_namespace_lock);
1671 * Attach a device to a mirror. The arguments are the path to any device
1672 * in the mirror, and the nvroot for the new device. If the path specifies
1673 * a device that is not mirrored, we automatically insert the mirror vdev.
1675 * If 'replacing' is specified, the new device is intended to replace the
1676 * existing device; in this case the two devices are made into their own
1677 * mirror using the 'replacing' vdev, which is functionally idendical to
1678 * the mirror vdev (it actually reuses all the same ops) but has a few
1679 * extra rules: you can't attach to it after it's been created, and upon
1680 * completion of resilvering, the first disk (the one being replaced)
1681 * is automatically detached.
1684 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
1686 uint64_t txg, open_txg;
1688 vdev_t *rvd = spa->spa_root_vdev;
1689 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
1692 txg = spa_vdev_enter(spa);
1694 oldvd = vdev_lookup_by_guid(rvd, guid);
1697 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
1699 if (!oldvd->vdev_ops->vdev_op_leaf)
1700 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
1702 pvd = oldvd->vdev_parent;
1704 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
1705 VDEV_ALLOC_ADD)) != 0 || newrootvd->vdev_children != 1)
1706 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
1708 newvd = newrootvd->vdev_child[0];
1710 if (!newvd->vdev_ops->vdev_op_leaf)
1711 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
1713 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
1714 return (spa_vdev_exit(spa, newrootvd, txg, error));
1718 * For attach, the only allowable parent is a mirror or the root
1721 if (pvd->vdev_ops != &vdev_mirror_ops &&
1722 pvd->vdev_ops != &vdev_root_ops)
1723 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
1725 pvops = &vdev_mirror_ops;
1728 * Active hot spares can only be replaced by inactive hot
1731 if (pvd->vdev_ops == &vdev_spare_ops &&
1732 pvd->vdev_child[1] == oldvd &&
1733 !spa_has_spare(spa, newvd->vdev_guid))
1734 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
1737 * If the source is a hot spare, and the parent isn't already a
1738 * spare, then we want to create a new hot spare. Otherwise, we
1739 * want to create a replacing vdev. The user is not allowed to
1740 * attach to a spared vdev child unless the 'isspare' state is
1741 * the same (spare replaces spare, non-spare replaces
1744 if (pvd->vdev_ops == &vdev_replacing_ops)
1745 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
1746 else if (pvd->vdev_ops == &vdev_spare_ops &&
1747 newvd->vdev_isspare != oldvd->vdev_isspare)
1748 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
1749 else if (pvd->vdev_ops != &vdev_spare_ops &&
1750 newvd->vdev_isspare)
1751 pvops = &vdev_spare_ops;
1753 pvops = &vdev_replacing_ops;
1757 * Compare the new device size with the replaceable/attachable
1760 if (newvd->vdev_psize < vdev_get_rsize(oldvd))
1761 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
1764 * The new device cannot have a higher alignment requirement
1765 * than the top-level vdev.
1767 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
1768 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
1771 * If this is an in-place replacement, update oldvd's path and devid
1772 * to make it distinguishable from newvd, and unopenable from now on.
1774 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
1775 spa_strfree(oldvd->vdev_path);
1776 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
1778 (void) sprintf(oldvd->vdev_path, "%s/%s",
1779 newvd->vdev_path, "old");
1780 if (oldvd->vdev_devid != NULL) {
1781 spa_strfree(oldvd->vdev_devid);
1782 oldvd->vdev_devid = NULL;
1787 * If the parent is not a mirror, or if we're replacing, insert the new
1788 * mirror/replacing/spare vdev above oldvd.
1790 if (pvd->vdev_ops != pvops)
1791 pvd = vdev_add_parent(oldvd, pvops);
1793 ASSERT(pvd->vdev_top->vdev_parent == rvd);
1794 ASSERT(pvd->vdev_ops == pvops);
1795 ASSERT(oldvd->vdev_parent == pvd);
1798 * Extract the new device from its root and add it to pvd.
1800 vdev_remove_child(newrootvd, newvd);
1801 newvd->vdev_id = pvd->vdev_children;
1802 vdev_add_child(pvd, newvd);
1805 * If newvd is smaller than oldvd, but larger than its rsize,
1806 * the addition of newvd may have decreased our parent's asize.
1808 pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize);
1810 tvd = newvd->vdev_top;
1811 ASSERT(pvd->vdev_top == tvd);
1812 ASSERT(tvd->vdev_parent == rvd);
1814 vdev_config_dirty(tvd);
1817 * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate
1818 * upward when spa_vdev_exit() calls vdev_dtl_reassess().
1820 open_txg = txg + TXG_CONCURRENT_STATES - 1;
1822 mutex_enter(&newvd->vdev_dtl_lock);
1823 space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL,
1824 open_txg - TXG_INITIAL + 1);
1825 mutex_exit(&newvd->vdev_dtl_lock);
1827 if (newvd->vdev_isspare)
1828 spa_spare_activate(newvd);
1831 * Mark newvd's DTL dirty in this txg.
1833 vdev_dirty(tvd, VDD_DTL, newvd, txg);
1835 (void) spa_vdev_exit(spa, newrootvd, open_txg, 0);
1838 * Kick off a resilver to update newvd.
1840 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
1846 * Detach a device from a mirror or replacing vdev.
1847 * If 'replace_done' is specified, only detach if the parent
1848 * is a replacing vdev.
1851 spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done)
1855 vdev_t *rvd = spa->spa_root_vdev;
1856 vdev_t *vd, *pvd, *cvd, *tvd;
1857 boolean_t unspare = B_FALSE;
1858 uint64_t unspare_guid;
1860 txg = spa_vdev_enter(spa);
1862 vd = vdev_lookup_by_guid(rvd, guid);
1865 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
1867 if (!vd->vdev_ops->vdev_op_leaf)
1868 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
1870 pvd = vd->vdev_parent;
1873 * If replace_done is specified, only remove this device if it's
1874 * the first child of a replacing vdev. For the 'spare' vdev, either
1875 * disk can be removed.
1878 if (pvd->vdev_ops == &vdev_replacing_ops) {
1879 if (vd->vdev_id != 0)
1880 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
1881 } else if (pvd->vdev_ops != &vdev_spare_ops) {
1882 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
1886 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
1887 spa_version(spa) >= ZFS_VERSION_SPARES);
1890 * Only mirror, replacing, and spare vdevs support detach.
1892 if (pvd->vdev_ops != &vdev_replacing_ops &&
1893 pvd->vdev_ops != &vdev_mirror_ops &&
1894 pvd->vdev_ops != &vdev_spare_ops)
1895 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
1898 * If there's only one replica, you can't detach it.
1900 if (pvd->vdev_children <= 1)
1901 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
1904 * If all siblings have non-empty DTLs, this device may have the only
1905 * valid copy of the data, which means we cannot safely detach it.
1907 * XXX -- as in the vdev_offline() case, we really want a more
1908 * precise DTL check.
1910 for (c = 0; c < pvd->vdev_children; c++) {
1913 cvd = pvd->vdev_child[c];
1916 if (vdev_is_dead(cvd))
1918 mutex_enter(&cvd->vdev_dtl_lock);
1919 dirty = cvd->vdev_dtl_map.sm_space |
1920 cvd->vdev_dtl_scrub.sm_space;
1921 mutex_exit(&cvd->vdev_dtl_lock);
1927 * If we are a replacing or spare vdev, then we can always detach the
1928 * latter child, as that is how one cancels the operation.
1930 if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) &&
1931 c == pvd->vdev_children)
1932 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
1935 * If we are detaching the original disk from a spare, then it implies
1936 * that the spare should become a real disk, and be removed from the
1937 * active spare list for the pool.
1939 if (pvd->vdev_ops == &vdev_spare_ops &&
1944 * Erase the disk labels so the disk can be used for other things.
1945 * This must be done after all other error cases are handled,
1946 * but before we disembowel vd (so we can still do I/O to it).
1947 * But if we can't do it, don't treat the error as fatal --
1948 * it may be that the unwritability of the disk is the reason
1949 * it's being detached!
1951 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
1954 * Remove vd from its parent and compact the parent's children.
1956 vdev_remove_child(pvd, vd);
1957 vdev_compact_children(pvd);
1960 * Remember one of the remaining children so we can get tvd below.
1962 cvd = pvd->vdev_child[0];
1965 * If we need to remove the remaining child from the list of hot spares,
1966 * do it now, marking the vdev as no longer a spare in the process. We
1967 * must do this before vdev_remove_parent(), because that can change the
1968 * GUID if it creates a new toplevel GUID.
1971 ASSERT(cvd->vdev_isspare);
1972 spa_spare_remove(cvd);
1973 unspare_guid = cvd->vdev_guid;
1977 * If the parent mirror/replacing vdev only has one child,
1978 * the parent is no longer needed. Remove it from the tree.
1980 if (pvd->vdev_children == 1)
1981 vdev_remove_parent(cvd);
1984 * We don't set tvd until now because the parent we just removed
1985 * may have been the previous top-level vdev.
1987 tvd = cvd->vdev_top;
1988 ASSERT(tvd->vdev_parent == rvd);
1991 * Reevaluate the parent vdev state.
1993 vdev_propagate_state(cvd->vdev_parent);
1996 * If the device we just detached was smaller than the others, it may be
1997 * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init()
1998 * can't fail because the existing metaslabs are already in core, so
1999 * there's nothing to read from disk.
2001 VERIFY(vdev_metaslab_init(tvd, txg) == 0);
2003 vdev_config_dirty(tvd);
2006 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
2007 * vd->vdev_detached is set and free vd's DTL object in syncing context.
2008 * But first make sure we're not on any *other* txg's DTL list, to
2009 * prevent vd from being accessed after it's freed.
2011 for (t = 0; t < TXG_SIZE; t++)
2012 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
2013 vd->vdev_detached = B_TRUE;
2014 vdev_dirty(tvd, VDD_DTL, vd, txg);
2016 error = spa_vdev_exit(spa, vd, txg, 0);
2019 * If this was the removal of the original device in a hot spare vdev,
2020 * then we want to go through and remove the device from the hot spare
2021 * list of every other pool.
2025 mutex_enter(&spa_namespace_lock);
2026 while ((spa = spa_next(spa)) != NULL) {
2027 if (spa->spa_state != POOL_STATE_ACTIVE)
2030 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
2032 mutex_exit(&spa_namespace_lock);
2039 * Remove a device from the pool. Currently, this supports removing only hot
2043 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
2046 nvlist_t **spares, *nv, **newspares;
2047 uint_t i, j, nspares;
2050 spa_config_enter(spa, RW_WRITER, FTAG);
2052 vd = spa_lookup_by_guid(spa, guid);
2055 if (spa->spa_spares != NULL &&
2056 nvlist_lookup_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES,
2057 &spares, &nspares) == 0) {
2058 for (i = 0; i < nspares; i++) {
2061 VERIFY(nvlist_lookup_uint64(spares[i],
2062 ZPOOL_CONFIG_GUID, &theguid) == 0);
2063 if (theguid == guid) {
2071 * We only support removing a hot spare, and only if it's not currently
2072 * in use in this pool.
2074 if (nv == NULL && vd == NULL) {
2079 if (nv == NULL && vd != NULL) {
2084 if (!unspare && nv != NULL && vd != NULL) {
2092 newspares = kmem_alloc((nspares - 1) * sizeof (void *),
2094 for (i = 0, j = 0; i < nspares; i++) {
2095 if (spares[i] != nv)
2096 VERIFY(nvlist_dup(spares[i],
2097 &newspares[j++], KM_SLEEP) == 0);
2101 VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES,
2102 DATA_TYPE_NVLIST_ARRAY) == 0);
2103 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES,
2104 newspares, nspares - 1) == 0);
2105 for (i = 0; i < nspares - 1; i++)
2106 nvlist_free(newspares[i]);
2107 kmem_free(newspares, (nspares - 1) * sizeof (void *));
2108 spa_load_spares(spa);
2109 spa->spa_sync_spares = B_TRUE;
2112 spa_config_exit(spa, FTAG);
2118 * Find any device that's done replacing, so we can detach it.
2121 spa_vdev_replace_done_hunt(vdev_t *vd)
2123 vdev_t *newvd, *oldvd;
2126 for (c = 0; c < vd->vdev_children; c++) {
2127 oldvd = spa_vdev_replace_done_hunt(vd->vdev_child[c]);
2132 if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) {
2133 oldvd = vd->vdev_child[0];
2134 newvd = vd->vdev_child[1];
2136 mutex_enter(&newvd->vdev_dtl_lock);
2137 if (newvd->vdev_dtl_map.sm_space == 0 &&
2138 newvd->vdev_dtl_scrub.sm_space == 0) {
2139 mutex_exit(&newvd->vdev_dtl_lock);
2142 mutex_exit(&newvd->vdev_dtl_lock);
2149 spa_vdev_replace_done(spa_t *spa)
2156 spa_config_enter(spa, RW_READER, FTAG);
2158 while ((vd = spa_vdev_replace_done_hunt(spa->spa_root_vdev)) != NULL) {
2159 guid = vd->vdev_guid;
2161 * If we have just finished replacing a hot spared device, then
2162 * we need to detach the parent's first child (the original hot
2165 pvd = vd->vdev_parent;
2166 if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
2167 pvd->vdev_id == 0) {
2168 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
2169 ASSERT(pvd->vdev_parent->vdev_children == 2);
2170 pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid;
2172 spa_config_exit(spa, FTAG);
2173 if (spa_vdev_detach(spa, guid, B_TRUE) != 0)
2175 if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0)
2177 spa_config_enter(spa, RW_READER, FTAG);
2180 spa_config_exit(spa, FTAG);
2184 * Update the stored path for this vdev. Dirty the vdev configuration, relying
2185 * on spa_vdev_enter/exit() to synchronize the labels and cache.
2188 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
2193 rvd = spa->spa_root_vdev;
2195 txg = spa_vdev_enter(spa);
2197 if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
2199 * Determine if this is a reference to a hot spare. In that
2200 * case, update the path as stored in the spare list.
2204 if (spa->spa_sparelist != NULL) {
2205 VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist,
2206 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
2207 for (i = 0; i < nspares; i++) {
2209 VERIFY(nvlist_lookup_uint64(spares[i],
2210 ZPOOL_CONFIG_GUID, &theguid) == 0);
2211 if (theguid == guid)
2216 return (spa_vdev_exit(spa, NULL, txg, ENOENT));
2218 VERIFY(nvlist_add_string(spares[i],
2219 ZPOOL_CONFIG_PATH, newpath) == 0);
2220 spa_load_spares(spa);
2221 spa->spa_sync_spares = B_TRUE;
2222 return (spa_vdev_exit(spa, NULL, txg, 0));
2224 return (spa_vdev_exit(spa, NULL, txg, ENOENT));
2228 if (!vd->vdev_ops->vdev_op_leaf)
2229 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
2231 spa_strfree(vd->vdev_path);
2232 vd->vdev_path = spa_strdup(newpath);
2234 vdev_config_dirty(vd->vdev_top);
2236 return (spa_vdev_exit(spa, NULL, txg, 0));
2240 * ==========================================================================
2242 * ==========================================================================
2246 spa_scrub_io_done(zio_t *zio)
2248 spa_t *spa = zio->io_spa;
2250 zio_data_buf_free(zio->io_data, zio->io_size);
2252 mutex_enter(&spa->spa_scrub_lock);
2253 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
2254 vdev_t *vd = zio->io_vd ? zio->io_vd : spa->spa_root_vdev;
2255 spa->spa_scrub_errors++;
2256 mutex_enter(&vd->vdev_stat_lock);
2257 vd->vdev_stat.vs_scrub_errors++;
2258 mutex_exit(&vd->vdev_stat_lock);
2261 if (--spa->spa_scrub_inflight < spa->spa_scrub_maxinflight)
2262 cv_broadcast(&spa->spa_scrub_io_cv);
2264 ASSERT(spa->spa_scrub_inflight >= 0);
2266 mutex_exit(&spa->spa_scrub_lock);
2270 spa_scrub_io_start(spa_t *spa, blkptr_t *bp, int priority, int flags,
2273 size_t size = BP_GET_LSIZE(bp);
2276 mutex_enter(&spa->spa_scrub_lock);
2278 * Do not give too much work to vdev(s).
2280 while (spa->spa_scrub_inflight >= spa->spa_scrub_maxinflight) {
2281 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2283 spa->spa_scrub_inflight++;
2284 mutex_exit(&spa->spa_scrub_lock);
2286 data = zio_data_buf_alloc(size);
2288 if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)
2289 flags |= ZIO_FLAG_SPECULATIVE; /* intent log block */
2291 flags |= ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL;
2293 zio_nowait(zio_read(NULL, spa, bp, data, size,
2294 spa_scrub_io_done, NULL, priority, flags, zb));
2299 spa_scrub_cb(traverse_blk_cache_t *bc, spa_t *spa, void *a)
2301 blkptr_t *bp = &bc->bc_blkptr;
2302 vdev_t *vd = spa->spa_root_vdev;
2303 dva_t *dva = bp->blk_dva;
2304 int needs_resilver = B_FALSE;
2309 * We can't scrub this block, but we can continue to scrub
2310 * the rest of the pool. Note the error and move along.
2312 mutex_enter(&spa->spa_scrub_lock);
2313 spa->spa_scrub_errors++;
2314 mutex_exit(&spa->spa_scrub_lock);
2316 mutex_enter(&vd->vdev_stat_lock);
2317 vd->vdev_stat.vs_scrub_errors++;
2318 mutex_exit(&vd->vdev_stat_lock);
2323 ASSERT(bp->blk_birth < spa->spa_scrub_maxtxg);
2325 for (d = 0; d < BP_GET_NDVAS(bp); d++) {
2326 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]));
2331 * Keep track of how much data we've examined so that
2332 * zpool(1M) status can make useful progress reports.
2334 mutex_enter(&vd->vdev_stat_lock);
2335 vd->vdev_stat.vs_scrub_examined += DVA_GET_ASIZE(&dva[d]);
2336 mutex_exit(&vd->vdev_stat_lock);
2338 if (spa->spa_scrub_type == POOL_SCRUB_RESILVER) {
2339 if (DVA_GET_GANG(&dva[d])) {
2341 * Gang members may be spread across multiple
2342 * vdevs, so the best we can do is look at the
2344 * XXX -- it would be better to change our
2345 * allocation policy to ensure that this can't
2348 vd = spa->spa_root_vdev;
2350 if (vdev_dtl_contains(&vd->vdev_dtl_map,
2352 needs_resilver = B_TRUE;
2356 if (spa->spa_scrub_type == POOL_SCRUB_EVERYTHING)
2357 spa_scrub_io_start(spa, bp, ZIO_PRIORITY_SCRUB,
2358 ZIO_FLAG_SCRUB, &bc->bc_bookmark);
2359 else if (needs_resilver)
2360 spa_scrub_io_start(spa, bp, ZIO_PRIORITY_RESILVER,
2361 ZIO_FLAG_RESILVER, &bc->bc_bookmark);
2367 spa_scrub_thread(void *arg)
2370 callb_cpr_t cprinfo;
2371 traverse_handle_t *th = spa->spa_scrub_th;
2372 vdev_t *rvd = spa->spa_root_vdev;
2373 pool_scrub_type_t scrub_type = spa->spa_scrub_type;
2377 CALLB_CPR_INIT(&cprinfo, &spa->spa_scrub_lock, callb_generic_cpr, FTAG);
2380 * If we're restarting due to a snapshot create/delete,
2381 * wait for that to complete.
2383 txg_wait_synced(spa_get_dsl(spa), 0);
2385 dprintf("start %s mintxg=%llu maxtxg=%llu\n",
2386 scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub",
2387 spa->spa_scrub_mintxg, spa->spa_scrub_maxtxg);
2389 spa_config_enter(spa, RW_WRITER, FTAG);
2390 vdev_reopen(rvd); /* purge all vdev caches */
2391 vdev_config_dirty(rvd); /* rewrite all disk labels */
2392 vdev_scrub_stat_update(rvd, scrub_type, B_FALSE);
2393 spa_config_exit(spa, FTAG);
2395 mutex_enter(&spa->spa_scrub_lock);
2396 spa->spa_scrub_errors = 0;
2397 spa->spa_scrub_active = 1;
2398 ASSERT(spa->spa_scrub_inflight == 0);
2400 while (!spa->spa_scrub_stop) {
2401 CALLB_CPR_SAFE_BEGIN(&cprinfo);
2402 while (spa->spa_scrub_suspended) {
2403 spa->spa_scrub_active = 0;
2404 cv_broadcast(&spa->spa_scrub_cv);
2405 cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock);
2406 spa->spa_scrub_active = 1;
2408 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_scrub_lock);
2410 if (spa->spa_scrub_restart_txg != 0)
2413 mutex_exit(&spa->spa_scrub_lock);
2414 error = traverse_more(th);
2415 mutex_enter(&spa->spa_scrub_lock);
2416 if (error != EAGAIN)
2420 while (spa->spa_scrub_inflight)
2421 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2423 spa->spa_scrub_active = 0;
2424 cv_broadcast(&spa->spa_scrub_cv);
2426 mutex_exit(&spa->spa_scrub_lock);
2428 spa_config_enter(spa, RW_WRITER, FTAG);
2430 mutex_enter(&spa->spa_scrub_lock);
2433 * Note: we check spa_scrub_restart_txg under both spa_scrub_lock
2434 * AND the spa config lock to synchronize with any config changes
2435 * that revise the DTLs under spa_vdev_enter() / spa_vdev_exit().
2437 if (spa->spa_scrub_restart_txg != 0)
2440 if (spa->spa_scrub_stop)
2444 * Even if there were uncorrectable errors, we consider the scrub
2445 * completed. The downside is that if there is a transient error during
2446 * a resilver, we won't resilver the data properly to the target. But
2447 * if the damage is permanent (more likely) we will resilver forever,
2448 * which isn't really acceptable. Since there is enough information for
2449 * the user to know what has failed and why, this seems like a more
2450 * tractable approach.
2452 complete = (error == 0);
2454 dprintf("end %s to maxtxg=%llu %s, traverse=%d, %llu errors, stop=%u\n",
2455 scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub",
2456 spa->spa_scrub_maxtxg, complete ? "done" : "FAILED",
2457 error, spa->spa_scrub_errors, spa->spa_scrub_stop);
2459 mutex_exit(&spa->spa_scrub_lock);
2462 * If the scrub/resilver completed, update all DTLs to reflect this.
2463 * Whether it succeeded or not, vacate all temporary scrub DTLs.
2465 vdev_dtl_reassess(rvd, spa_last_synced_txg(spa) + 1,
2466 complete ? spa->spa_scrub_maxtxg : 0, B_TRUE);
2467 vdev_scrub_stat_update(rvd, POOL_SCRUB_NONE, complete);
2468 spa_errlog_rotate(spa);
2470 spa_config_exit(spa, FTAG);
2472 mutex_enter(&spa->spa_scrub_lock);
2475 * We may have finished replacing a device.
2476 * Let the async thread assess this and handle the detach.
2478 spa_async_request(spa, SPA_ASYNC_REPLACE_DONE);
2481 * If we were told to restart, our final act is to start a new scrub.
2483 if (error == ERESTART)
2484 spa_async_request(spa, scrub_type == POOL_SCRUB_RESILVER ?
2485 SPA_ASYNC_RESILVER : SPA_ASYNC_SCRUB);
2487 spa->spa_scrub_type = POOL_SCRUB_NONE;
2488 spa->spa_scrub_active = 0;
2489 spa->spa_scrub_thread = NULL;
2490 cv_broadcast(&spa->spa_scrub_cv);
2491 CALLB_CPR_EXIT(&cprinfo); /* drops &spa->spa_scrub_lock */
2496 spa_scrub_suspend(spa_t *spa)
2498 mutex_enter(&spa->spa_scrub_lock);
2499 spa->spa_scrub_suspended++;
2500 while (spa->spa_scrub_active) {
2501 cv_broadcast(&spa->spa_scrub_cv);
2502 cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock);
2504 while (spa->spa_scrub_inflight)
2505 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2506 mutex_exit(&spa->spa_scrub_lock);
2510 spa_scrub_resume(spa_t *spa)
2512 mutex_enter(&spa->spa_scrub_lock);
2513 ASSERT(spa->spa_scrub_suspended != 0);
2514 if (--spa->spa_scrub_suspended == 0)
2515 cv_broadcast(&spa->spa_scrub_cv);
2516 mutex_exit(&spa->spa_scrub_lock);
2520 spa_scrub_restart(spa_t *spa, uint64_t txg)
2523 * Something happened (e.g. snapshot create/delete) that means
2524 * we must restart any in-progress scrubs. The itinerary will
2525 * fix this properly.
2527 mutex_enter(&spa->spa_scrub_lock);
2528 spa->spa_scrub_restart_txg = txg;
2529 mutex_exit(&spa->spa_scrub_lock);
2533 spa_scrub(spa_t *spa, pool_scrub_type_t type, boolean_t force)
2536 uint64_t mintxg, maxtxg;
2537 vdev_t *rvd = spa->spa_root_vdev;
2539 if ((uint_t)type >= POOL_SCRUB_TYPES)
2542 mutex_enter(&spa->spa_scrub_lock);
2545 * If there's a scrub or resilver already in progress, stop it.
2547 while (spa->spa_scrub_thread != NULL) {
2549 * Don't stop a resilver unless forced.
2551 if (spa->spa_scrub_type == POOL_SCRUB_RESILVER && !force) {
2552 mutex_exit(&spa->spa_scrub_lock);
2555 spa->spa_scrub_stop = 1;
2556 cv_broadcast(&spa->spa_scrub_cv);
2557 cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock);
2561 * Terminate the previous traverse.
2563 if (spa->spa_scrub_th != NULL) {
2564 traverse_fini(spa->spa_scrub_th);
2565 spa->spa_scrub_th = NULL;
2569 ASSERT(spa->spa_scrub_stop == 0);
2570 ASSERT(spa->spa_scrub_type == type);
2571 ASSERT(spa->spa_scrub_restart_txg == 0);
2572 mutex_exit(&spa->spa_scrub_lock);
2576 mintxg = TXG_INITIAL - 1;
2577 maxtxg = spa_last_synced_txg(spa) + 1;
2579 mutex_enter(&rvd->vdev_dtl_lock);
2581 if (rvd->vdev_dtl_map.sm_space == 0) {
2583 * The pool-wide DTL is empty.
2584 * If this is a resilver, there's nothing to do except
2585 * check whether any in-progress replacements have completed.
2587 if (type == POOL_SCRUB_RESILVER) {
2588 type = POOL_SCRUB_NONE;
2589 spa_async_request(spa, SPA_ASYNC_REPLACE_DONE);
2593 * The pool-wide DTL is non-empty.
2594 * If this is a normal scrub, upgrade to a resilver instead.
2596 if (type == POOL_SCRUB_EVERYTHING)
2597 type = POOL_SCRUB_RESILVER;
2600 if (type == POOL_SCRUB_RESILVER) {
2602 * Determine the resilvering boundaries.
2604 * Note: (mintxg, maxtxg) is an open interval,
2605 * i.e. mintxg and maxtxg themselves are not included.
2607 * Note: for maxtxg, we MIN with spa_last_synced_txg(spa) + 1
2608 * so we don't claim to resilver a txg that's still changing.
2610 ss = avl_first(&rvd->vdev_dtl_map.sm_root);
2611 mintxg = ss->ss_start - 1;
2612 ss = avl_last(&rvd->vdev_dtl_map.sm_root);
2613 maxtxg = MIN(ss->ss_end, maxtxg);
2616 mutex_exit(&rvd->vdev_dtl_lock);
2618 spa->spa_scrub_stop = 0;
2619 spa->spa_scrub_type = type;
2620 spa->spa_scrub_restart_txg = 0;
2622 if (type != POOL_SCRUB_NONE) {
2623 spa->spa_scrub_mintxg = mintxg;
2624 spa->spa_scrub_maxtxg = maxtxg;
2625 spa->spa_scrub_th = traverse_init(spa, spa_scrub_cb, NULL,
2626 ADVANCE_PRE | ADVANCE_PRUNE | ADVANCE_ZIL,
2628 traverse_add_pool(spa->spa_scrub_th, mintxg, maxtxg);
2629 spa->spa_scrub_thread = thread_create(NULL, 0,
2630 spa_scrub_thread, spa, 0, &p0, TS_RUN, minclsyspri);
2633 mutex_exit(&spa->spa_scrub_lock);
2639 * ==========================================================================
2640 * SPA async task processing
2641 * ==========================================================================
2645 spa_async_reopen(spa_t *spa)
2647 vdev_t *rvd = spa->spa_root_vdev;
2651 spa_config_enter(spa, RW_WRITER, FTAG);
2653 for (c = 0; c < rvd->vdev_children; c++) {
2654 tvd = rvd->vdev_child[c];
2655 if (tvd->vdev_reopen_wanted) {
2656 tvd->vdev_reopen_wanted = 0;
2661 spa_config_exit(spa, FTAG);
2665 spa_async_thread(void *arg)
2670 ASSERT(spa->spa_sync_on);
2672 mutex_enter(&spa->spa_async_lock);
2673 tasks = spa->spa_async_tasks;
2674 spa->spa_async_tasks = 0;
2675 mutex_exit(&spa->spa_async_lock);
2678 * See if the config needs to be updated.
2680 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
2681 mutex_enter(&spa_namespace_lock);
2682 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
2683 mutex_exit(&spa_namespace_lock);
2687 * See if any devices need to be reopened.
2689 if (tasks & SPA_ASYNC_REOPEN)
2690 spa_async_reopen(spa);
2693 * If any devices are done replacing, detach them.
2695 if (tasks & SPA_ASYNC_REPLACE_DONE)
2696 spa_vdev_replace_done(spa);
2701 if (tasks & SPA_ASYNC_SCRUB)
2702 VERIFY(spa_scrub(spa, POOL_SCRUB_EVERYTHING, B_TRUE) == 0);
2705 * Kick off a resilver.
2707 if (tasks & SPA_ASYNC_RESILVER)
2708 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
2711 * Let the world know that we're done.
2713 mutex_enter(&spa->spa_async_lock);
2714 spa->spa_async_thread = NULL;
2715 cv_broadcast(&spa->spa_async_cv);
2716 mutex_exit(&spa->spa_async_lock);
2721 spa_async_suspend(spa_t *spa)
2723 mutex_enter(&spa->spa_async_lock);
2724 spa->spa_async_suspended++;
2725 while (spa->spa_async_thread != NULL)
2726 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
2727 mutex_exit(&spa->spa_async_lock);
2731 spa_async_resume(spa_t *spa)
2733 mutex_enter(&spa->spa_async_lock);
2734 ASSERT(spa->spa_async_suspended != 0);
2735 spa->spa_async_suspended--;
2736 mutex_exit(&spa->spa_async_lock);
2740 spa_async_dispatch(spa_t *spa)
2742 mutex_enter(&spa->spa_async_lock);
2743 if (spa->spa_async_tasks && !spa->spa_async_suspended &&
2744 spa->spa_async_thread == NULL &&
2745 rootdir != NULL && !vn_is_readonly(rootdir))
2746 spa->spa_async_thread = thread_create(NULL, 0,
2747 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
2748 mutex_exit(&spa->spa_async_lock);
2752 spa_async_request(spa_t *spa, int task)
2754 mutex_enter(&spa->spa_async_lock);
2755 spa->spa_async_tasks |= task;
2756 mutex_exit(&spa->spa_async_lock);
2760 * ==========================================================================
2761 * SPA syncing routines
2762 * ==========================================================================
2766 spa_sync_deferred_frees(spa_t *spa, uint64_t txg)
2768 bplist_t *bpl = &spa->spa_sync_bplist;
2776 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD);
2778 while (bplist_iterate(bpl, &itor, &blk) == 0)
2779 zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL));
2781 error = zio_wait(zio);
2782 ASSERT3U(error, ==, 0);
2784 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
2785 bplist_vacate(bpl, tx);
2788 * Pre-dirty the first block so we sync to convergence faster.
2789 * (Usually only the first block is needed.)
2791 dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx);
2796 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
2798 char *packed = NULL;
2802 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
2804 packed = kmem_alloc(nvsize, KM_SLEEP);
2806 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
2809 dmu_write(spa->spa_meta_objset, obj, 0, nvsize, packed, tx);
2811 kmem_free(packed, nvsize);
2813 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
2814 dmu_buf_will_dirty(db, tx);
2815 *(uint64_t *)db->db_data = nvsize;
2816 dmu_buf_rele(db, FTAG);
2820 spa_sync_spares(spa_t *spa, dmu_tx_t *tx)
2826 if (!spa->spa_sync_spares)
2830 * Update the MOS nvlist describing the list of available spares.
2831 * spa_validate_spares() will have already made sure this nvlist is
2832 * valid and the vdevs are labelled appropriately.
2834 if (spa->spa_spares_object == 0) {
2835 spa->spa_spares_object = dmu_object_alloc(spa->spa_meta_objset,
2836 DMU_OT_PACKED_NVLIST, 1 << 14,
2837 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
2838 VERIFY(zap_update(spa->spa_meta_objset,
2839 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SPARES,
2840 sizeof (uint64_t), 1, &spa->spa_spares_object, tx) == 0);
2843 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2844 if (spa->spa_nspares == 0) {
2845 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2848 spares = kmem_alloc(spa->spa_nspares * sizeof (void *),
2850 for (i = 0; i < spa->spa_nspares; i++)
2851 spares[i] = vdev_config_generate(spa,
2852 spa->spa_spares[i], B_FALSE, B_TRUE);
2853 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2854 spares, spa->spa_nspares) == 0);
2855 for (i = 0; i < spa->spa_nspares; i++)
2856 nvlist_free(spares[i]);
2857 kmem_free(spares, spa->spa_nspares * sizeof (void *));
2860 spa_sync_nvlist(spa, spa->spa_spares_object, nvroot, tx);
2861 nvlist_free(nvroot);
2863 spa->spa_sync_spares = B_FALSE;
2867 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
2871 if (list_is_empty(&spa->spa_dirty_list))
2874 config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE);
2876 if (spa->spa_config_syncing)
2877 nvlist_free(spa->spa_config_syncing);
2878 spa->spa_config_syncing = config;
2880 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
2884 spa_sync_props(void *arg1, void *arg2, dmu_tx_t *tx)
2887 nvlist_t *nvp = arg2;
2889 objset_t *mos = spa->spa_meta_objset;
2892 mutex_enter(&spa->spa_props_lock);
2893 if (spa->spa_pool_props_object == 0) {
2894 zapobj = zap_create(mos, DMU_OT_POOL_PROPS, DMU_OT_NONE, 0, tx);
2897 spa->spa_pool_props_object = zapobj;
2899 VERIFY(zap_update(mos, DMU_POOL_DIRECTORY_OBJECT,
2900 DMU_POOL_PROPS, 8, 1,
2901 &spa->spa_pool_props_object, tx) == 0);
2903 mutex_exit(&spa->spa_props_lock);
2906 while ((nvpair = nvlist_next_nvpair(nvp, nvpair))) {
2907 switch (zpool_name_to_prop(nvpair_name(nvpair))) {
2908 case ZFS_PROP_BOOTFS:
2909 VERIFY(nvlist_lookup_uint64(nvp,
2910 nvpair_name(nvpair), &spa->spa_bootfs) == 0);
2911 VERIFY(zap_update(mos,
2912 spa->spa_pool_props_object,
2913 zpool_prop_to_name(ZFS_PROP_BOOTFS), 8, 1,
2914 &spa->spa_bootfs, tx) == 0);
2921 * Sync the specified transaction group. New blocks may be dirtied as
2922 * part of the process, so we iterate until it converges.
2925 spa_sync(spa_t *spa, uint64_t txg)
2927 dsl_pool_t *dp = spa->spa_dsl_pool;
2928 objset_t *mos = spa->spa_meta_objset;
2929 bplist_t *bpl = &spa->spa_sync_bplist;
2930 vdev_t *rvd = spa->spa_root_vdev;
2936 * Lock out configuration changes.
2938 spa_config_enter(spa, RW_READER, FTAG);
2940 spa->spa_syncing_txg = txg;
2941 spa->spa_sync_pass = 0;
2943 VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj));
2945 tx = dmu_tx_create_assigned(dp, txg);
2948 * If we are upgrading to ZFS_VERSION_RAIDZ_DEFLATE this txg,
2949 * set spa_deflate if we have no raid-z vdevs.
2951 if (spa->spa_ubsync.ub_version < ZFS_VERSION_RAIDZ_DEFLATE &&
2952 spa->spa_uberblock.ub_version >= ZFS_VERSION_RAIDZ_DEFLATE) {
2955 for (i = 0; i < rvd->vdev_children; i++) {
2956 vd = rvd->vdev_child[i];
2957 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
2960 if (i == rvd->vdev_children) {
2961 spa->spa_deflate = TRUE;
2962 VERIFY(0 == zap_add(spa->spa_meta_objset,
2963 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
2964 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
2969 * If anything has changed in this txg, push the deferred frees
2970 * from the previous txg. If not, leave them alone so that we
2971 * don't generate work on an otherwise idle system.
2973 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
2974 !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
2975 !txg_list_empty(&dp->dp_sync_tasks, txg))
2976 spa_sync_deferred_frees(spa, txg);
2979 * Iterate to convergence.
2982 spa->spa_sync_pass++;
2984 spa_sync_config_object(spa, tx);
2985 spa_sync_spares(spa, tx);
2986 spa_errlog_sync(spa, txg);
2987 dsl_pool_sync(dp, txg);
2990 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) {
2995 bplist_sync(bpl, tx);
2996 } while (dirty_vdevs);
3000 dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass);
3003 * Rewrite the vdev configuration (which includes the uberblock)
3004 * to commit the transaction group.
3006 * If there are any dirty vdevs, sync the uberblock to all vdevs.
3007 * Otherwise, pick a random top-level vdev that's known to be
3008 * visible in the config cache (see spa_vdev_add() for details).
3009 * If the write fails, try the next vdev until we're tried them all.
3011 if (!list_is_empty(&spa->spa_dirty_list)) {
3012 VERIFY(vdev_config_sync(rvd, txg) == 0);
3014 int children = rvd->vdev_children;
3015 int c0 = spa_get_random(children);
3018 for (c = 0; c < children; c++) {
3019 vd = rvd->vdev_child[(c0 + c) % children];
3020 if (vd->vdev_ms_array == 0)
3022 if (vdev_config_sync(vd, txg) == 0)
3026 VERIFY(vdev_config_sync(rvd, txg) == 0);
3032 * Clear the dirty config list.
3034 while ((vd = list_head(&spa->spa_dirty_list)) != NULL)
3035 vdev_config_clean(vd);
3038 * Now that the new config has synced transactionally,
3039 * let it become visible to the config cache.
3041 if (spa->spa_config_syncing != NULL) {
3042 spa_config_set(spa, spa->spa_config_syncing);
3043 spa->spa_config_txg = txg;
3044 spa->spa_config_syncing = NULL;
3048 * Make a stable copy of the fully synced uberblock.
3049 * We use this as the root for pool traversals.
3051 spa->spa_traverse_wanted = 1; /* tells traverse_more() to stop */
3053 spa_scrub_suspend(spa); /* stop scrubbing and finish I/Os */
3055 rw_enter(&spa->spa_traverse_lock, RW_WRITER);
3056 spa->spa_traverse_wanted = 0;
3057 spa->spa_ubsync = spa->spa_uberblock;
3058 rw_exit(&spa->spa_traverse_lock);
3060 spa_scrub_resume(spa); /* resume scrub with new ubsync */
3063 * Clean up the ZIL records for the synced txg.
3065 dsl_pool_zil_clean(dp);
3068 * Update usable space statistics.
3070 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
3071 vdev_sync_done(vd, txg);
3074 * It had better be the case that we didn't dirty anything
3075 * since vdev_config_sync().
3077 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
3078 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
3079 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
3080 ASSERT(bpl->bpl_queue == NULL);
3082 spa_config_exit(spa, FTAG);
3085 * If any async tasks have been requested, kick them off.
3087 spa_async_dispatch(spa);
3091 * Sync all pools. We don't want to hold the namespace lock across these
3092 * operations, so we take a reference on the spa_t and drop the lock during the
3096 spa_sync_allpools(void)
3099 mutex_enter(&spa_namespace_lock);
3100 while ((spa = spa_next(spa)) != NULL) {
3101 if (spa_state(spa) != POOL_STATE_ACTIVE)
3103 spa_open_ref(spa, FTAG);
3104 mutex_exit(&spa_namespace_lock);
3105 txg_wait_synced(spa_get_dsl(spa), 0);
3106 mutex_enter(&spa_namespace_lock);
3107 spa_close(spa, FTAG);
3109 mutex_exit(&spa_namespace_lock);
3113 * ==========================================================================
3114 * Miscellaneous routines
3115 * ==========================================================================
3119 * Remove all pools in the system.
3127 * Remove all cached state. All pools should be closed now,
3128 * so every spa in the AVL tree should be unreferenced.
3130 mutex_enter(&spa_namespace_lock);
3131 while ((spa = spa_next(NULL)) != NULL) {
3133 * Stop async tasks. The async thread may need to detach
3134 * a device that's been replaced, which requires grabbing
3135 * spa_namespace_lock, so we must drop it here.
3137 spa_open_ref(spa, FTAG);
3138 mutex_exit(&spa_namespace_lock);
3139 spa_async_suspend(spa);
3140 VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0);
3141 mutex_enter(&spa_namespace_lock);
3142 spa_close(spa, FTAG);
3144 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
3146 spa_deactivate(spa);
3150 mutex_exit(&spa_namespace_lock);
3154 spa_lookup_by_guid(spa_t *spa, uint64_t guid)
3156 return (vdev_lookup_by_guid(spa->spa_root_vdev, guid));
3160 spa_upgrade(spa_t *spa)
3162 spa_config_enter(spa, RW_WRITER, FTAG);
3165 * This should only be called for a non-faulted pool, and since a
3166 * future version would result in an unopenable pool, this shouldn't be
3169 ASSERT(spa->spa_uberblock.ub_version <= ZFS_VERSION);
3171 spa->spa_uberblock.ub_version = ZFS_VERSION;
3172 vdev_config_dirty(spa->spa_root_vdev);
3174 spa_config_exit(spa, FTAG);
3176 txg_wait_synced(spa_get_dsl(spa), 0);
3180 spa_has_spare(spa_t *spa, uint64_t guid)
3185 for (i = 0; i < spa->spa_nspares; i++)
3186 if (spa->spa_spares[i]->vdev_guid == guid)
3189 for (i = 0; i < spa->spa_pending_nspares; i++) {
3190 if (nvlist_lookup_uint64(spa->spa_pending_spares[i],
3191 ZPOOL_CONFIG_GUID, &spareguid) == 0 &&
3200 spa_set_props(spa_t *spa, nvlist_t *nvp)
3202 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
3207 spa_get_props(spa_t *spa, nvlist_t **nvp)
3211 objset_t *mos = spa->spa_meta_objset;
3218 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3220 mutex_enter(&spa->spa_props_lock);
3221 /* If no props object, then just return empty nvlist */
3222 if (spa->spa_pool_props_object == 0) {
3223 mutex_exit(&spa->spa_props_lock);
3227 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
3228 (err = zap_cursor_retrieve(&zc, &za)) == 0;
3229 zap_cursor_advance(&zc)) {
3231 if ((prop = zpool_name_to_prop(za.za_name)) == ZFS_PROP_INVAL)
3234 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3235 switch (za.za_integer_length) {
3237 if (zfs_prop_default_numeric(prop) ==
3238 za.za_first_integer)
3239 src = ZFS_SRC_DEFAULT;
3241 src = ZFS_SRC_LOCAL;
3242 value = za.za_first_integer;
3244 if (prop == ZFS_PROP_BOOTFS) {
3246 dsl_dataset_t *ds = NULL;
3247 char strval[MAXPATHLEN];
3249 dp = spa_get_dsl(spa);
3250 rw_enter(&dp->dp_config_rwlock, RW_READER);
3251 if ((err = dsl_dataset_open_obj(dp,
3252 za.za_first_integer, NULL, DS_MODE_NONE,
3254 rw_exit(&dp->dp_config_rwlock);
3257 dsl_dataset_name(ds, strval);
3258 dsl_dataset_close(ds, DS_MODE_NONE, FTAG);
3259 rw_exit(&dp->dp_config_rwlock);
3261 VERIFY(nvlist_add_uint64(propval,
3262 ZFS_PROP_SOURCE, src) == 0);
3263 VERIFY(nvlist_add_string(propval,
3264 ZFS_PROP_VALUE, strval) == 0);
3266 VERIFY(nvlist_add_uint64(propval,
3267 ZFS_PROP_SOURCE, src) == 0);
3268 VERIFY(nvlist_add_uint64(propval,
3269 ZFS_PROP_VALUE, value) == 0);
3271 VERIFY(nvlist_add_nvlist(*nvp, za.za_name,
3275 nvlist_free(propval);
3277 zap_cursor_fini(&zc);
3278 mutex_exit(&spa->spa_props_lock);
3279 if (err && err != ENOENT) {
3288 * If the bootfs property value is dsobj, clear it.
3291 spa_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
3293 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
3294 VERIFY(zap_remove(spa->spa_meta_objset,
3295 spa->spa_pool_props_object,
3296 zpool_prop_to_name(ZFS_PROP_BOOTFS), tx) == 0);
3297 spa->spa_bootfs = 0;