4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * This file contains all the routines used when modifying on-disk SPA state.
29 * This includes opening, importing, destroying, exporting a pool, and syncing a
33 #include <sys/zfs_context.h>
34 #include <sys/fm/fs/zfs.h>
35 #include <sys/spa_impl.h>
37 #include <sys/zio_checksum.h>
38 #include <sys/zio_compress.h>
40 #include <sys/dmu_tx.h>
43 #include <sys/vdev_impl.h>
44 #include <sys/metaslab.h>
45 #include <sys/uberblock_impl.h>
48 #include <sys/dmu_traverse.h>
49 #include <sys/dmu_objset.h>
50 #include <sys/unique.h>
51 #include <sys/dsl_pool.h>
52 #include <sys/dsl_dataset.h>
53 #include <sys/dsl_dir.h>
54 #include <sys/dsl_prop.h>
55 #include <sys/dsl_synctask.h>
56 #include <sys/fs/zfs.h>
58 #include <sys/callb.h>
59 #include <sys/sunddi.h>
60 #include <sys/spa_boot.h>
63 #include "zfs_comutil.h"
65 /* Check hostid on import? */
66 static int check_hostid = 1;
68 SYSCTL_DECL(_vfs_zfs);
69 TUNABLE_INT("vfs.zfs.check_hostid", &check_hostid);
70 SYSCTL_INT(_vfs_zfs, OID_AUTO, check_hostid, CTLFLAG_RW, &check_hostid, 0,
71 "Check hostid on import?");
74 zti_mode_fixed, /* value is # of threads (min 1) */
75 zti_mode_online_percent, /* value is % of online CPUs */
76 zti_mode_tune, /* fill from zio_taskq_tune_* */
80 #define ZTI_THREAD_FIX(n) { zti_mode_fixed, (n) }
81 #define ZTI_THREAD_PCT(n) { zti_mode_online_percent, (n) }
82 #define ZTI_THREAD_TUNE { zti_mode_tune, 0 }
84 #define ZTI_THREAD_ONE ZTI_THREAD_FIX(1)
86 typedef struct zio_taskq_info {
89 enum zti_modes zti_mode;
91 } zti_nthreads[ZIO_TASKQ_TYPES];
94 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
98 const zio_taskq_info_t zio_taskqs[ZIO_TYPES] = {
100 { "spa_zio_null", { ZTI_THREAD_ONE, ZTI_THREAD_ONE } },
101 { "spa_zio_read", { ZTI_THREAD_FIX(8), ZTI_THREAD_TUNE } },
102 { "spa_zio_write", { ZTI_THREAD_TUNE, ZTI_THREAD_FIX(8) } },
103 { "spa_zio_free", { ZTI_THREAD_ONE, ZTI_THREAD_ONE } },
104 { "spa_zio_claim", { ZTI_THREAD_ONE, ZTI_THREAD_ONE } },
105 { "spa_zio_ioctl", { ZTI_THREAD_ONE, ZTI_THREAD_ONE } },
108 enum zti_modes zio_taskq_tune_mode = zti_mode_online_percent;
109 uint_t zio_taskq_tune_value = 80; /* #threads = 80% of # online CPUs */
111 static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx);
112 static boolean_t spa_has_active_shared_spare(spa_t *spa);
115 * ==========================================================================
116 * SPA properties routines
117 * ==========================================================================
121 * Add a (source=src, propname=propval) list to an nvlist.
124 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
125 uint64_t intval, zprop_source_t src)
127 const char *propname = zpool_prop_to_name(prop);
130 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
131 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
134 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
136 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
138 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
139 nvlist_free(propval);
143 * Get property values from the spa configuration.
146 spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
150 uint64_t cap, version;
151 zprop_source_t src = ZPROP_SRC_NONE;
152 spa_config_dirent_t *dp;
154 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
156 if (spa->spa_root_vdev != NULL) {
157 size = spa_get_space(spa);
158 used = spa_get_alloc(spa);
159 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
160 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
161 spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src);
162 spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL,
165 cap = (size == 0) ? 0 : (used * 100 / size);
166 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
168 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
169 spa->spa_root_vdev->vdev_state, src);
171 version = spa_version(spa);
172 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
173 src = ZPROP_SRC_DEFAULT;
175 src = ZPROP_SRC_LOCAL;
176 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
179 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
181 if (spa->spa_root != NULL)
182 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
185 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
186 if (dp->scd_path == NULL) {
187 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
188 "none", 0, ZPROP_SRC_LOCAL);
189 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
190 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
191 dp->scd_path, 0, ZPROP_SRC_LOCAL);
197 * Get zpool property values.
200 spa_prop_get(spa_t *spa, nvlist_t **nvp)
204 objset_t *mos = spa->spa_meta_objset;
207 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
209 mutex_enter(&spa->spa_props_lock);
212 * Get properties from the spa config.
214 spa_prop_get_config(spa, nvp);
216 /* If no pool property object, no more prop to get. */
217 if (spa->spa_pool_props_object == 0) {
218 mutex_exit(&spa->spa_props_lock);
223 * Get properties from the MOS pool property object.
225 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
226 (err = zap_cursor_retrieve(&zc, &za)) == 0;
227 zap_cursor_advance(&zc)) {
230 zprop_source_t src = ZPROP_SRC_DEFAULT;
233 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
236 switch (za.za_integer_length) {
238 /* integer property */
239 if (za.za_first_integer !=
240 zpool_prop_default_numeric(prop))
241 src = ZPROP_SRC_LOCAL;
243 if (prop == ZPOOL_PROP_BOOTFS) {
245 dsl_dataset_t *ds = NULL;
247 dp = spa_get_dsl(spa);
248 rw_enter(&dp->dp_config_rwlock, RW_READER);
249 if (err = dsl_dataset_hold_obj(dp,
250 za.za_first_integer, FTAG, &ds)) {
251 rw_exit(&dp->dp_config_rwlock);
256 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
258 dsl_dataset_name(ds, strval);
259 dsl_dataset_rele(ds, FTAG);
260 rw_exit(&dp->dp_config_rwlock);
263 intval = za.za_first_integer;
266 spa_prop_add_list(*nvp, prop, strval, intval, src);
270 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
275 /* string property */
276 strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
277 err = zap_lookup(mos, spa->spa_pool_props_object,
278 za.za_name, 1, za.za_num_integers, strval);
280 kmem_free(strval, za.za_num_integers);
283 spa_prop_add_list(*nvp, prop, strval, 0, src);
284 kmem_free(strval, za.za_num_integers);
291 zap_cursor_fini(&zc);
292 mutex_exit(&spa->spa_props_lock);
294 if (err && err != ENOENT) {
304 * Validate the given pool properties nvlist and modify the list
305 * for the property values to be set.
308 spa_prop_validate(spa_t *spa, nvlist_t *props)
311 int error = 0, reset_bootfs = 0;
315 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
317 char *propname, *strval;
322 propname = nvpair_name(elem);
324 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL)
328 case ZPOOL_PROP_VERSION:
329 error = nvpair_value_uint64(elem, &intval);
331 (intval < spa_version(spa) || intval > SPA_VERSION))
335 case ZPOOL_PROP_DELEGATION:
336 case ZPOOL_PROP_AUTOREPLACE:
337 case ZPOOL_PROP_LISTSNAPS:
338 error = nvpair_value_uint64(elem, &intval);
339 if (!error && intval > 1)
343 case ZPOOL_PROP_BOOTFS:
345 * If the pool version is less than SPA_VERSION_BOOTFS,
346 * or the pool is still being created (version == 0),
347 * the bootfs property cannot be set.
349 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
355 * Make sure the vdev config is bootable
357 if (!vdev_is_bootable(spa->spa_root_vdev)) {
364 error = nvpair_value_string(elem, &strval);
369 if (strval == NULL || strval[0] == '\0') {
370 objnum = zpool_prop_default_numeric(
375 if (error = dmu_objset_open(strval, DMU_OST_ZFS,
376 DS_MODE_USER | DS_MODE_READONLY, &os))
379 /* We don't support gzip bootable datasets */
380 if ((error = dsl_prop_get_integer(strval,
381 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
382 &compress, NULL)) == 0 &&
383 !BOOTFS_COMPRESS_VALID(compress)) {
386 objnum = dmu_objset_id(os);
388 dmu_objset_close(os);
392 case ZPOOL_PROP_FAILUREMODE:
393 error = nvpair_value_uint64(elem, &intval);
394 if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
395 intval > ZIO_FAILURE_MODE_PANIC))
399 * This is a special case which only occurs when
400 * the pool has completely failed. This allows
401 * the user to change the in-core failmode property
402 * without syncing it out to disk (I/Os might
403 * currently be blocked). We do this by returning
404 * EIO to the caller (spa_prop_set) to trick it
405 * into thinking we encountered a property validation
408 if (!error && spa_suspended(spa)) {
409 spa->spa_failmode = intval;
414 case ZPOOL_PROP_CACHEFILE:
415 if ((error = nvpair_value_string(elem, &strval)) != 0)
418 if (strval[0] == '\0')
421 if (strcmp(strval, "none") == 0)
424 if (strval[0] != '/') {
429 slash = strrchr(strval, '/');
430 ASSERT(slash != NULL);
432 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
433 strcmp(slash, "/..") == 0)
442 if (!error && reset_bootfs) {
443 error = nvlist_remove(props,
444 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
447 error = nvlist_add_uint64(props,
448 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
456 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
459 spa_config_dirent_t *dp;
461 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
465 dp = kmem_alloc(sizeof (spa_config_dirent_t),
468 if (cachefile[0] == '\0')
469 dp->scd_path = spa_strdup(spa_config_path);
470 else if (strcmp(cachefile, "none") == 0)
473 dp->scd_path = spa_strdup(cachefile);
475 list_insert_head(&spa->spa_config_list, dp);
477 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
481 spa_prop_set(spa_t *spa, nvlist_t *nvp)
485 boolean_t need_sync = B_FALSE;
488 if ((error = spa_prop_validate(spa, nvp)) != 0)
492 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
493 if ((prop = zpool_name_to_prop(
494 nvpair_name(elem))) == ZPROP_INVAL)
497 if (prop == ZPOOL_PROP_CACHEFILE || prop == ZPOOL_PROP_ALTROOT)
505 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
512 * If the bootfs property value is dsobj, clear it.
515 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
517 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
518 VERIFY(zap_remove(spa->spa_meta_objset,
519 spa->spa_pool_props_object,
520 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
526 * ==========================================================================
527 * SPA state manipulation (open/create/destroy/import/export)
528 * ==========================================================================
532 spa_error_entry_compare(const void *a, const void *b)
534 spa_error_entry_t *sa = (spa_error_entry_t *)a;
535 spa_error_entry_t *sb = (spa_error_entry_t *)b;
538 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
539 sizeof (zbookmark_t));
550 * Utility function which retrieves copies of the current logs and
551 * re-initializes them in the process.
554 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
556 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
558 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
559 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
561 avl_create(&spa->spa_errlist_scrub,
562 spa_error_entry_compare, sizeof (spa_error_entry_t),
563 offsetof(spa_error_entry_t, se_avl));
564 avl_create(&spa->spa_errlist_last,
565 spa_error_entry_compare, sizeof (spa_error_entry_t),
566 offsetof(spa_error_entry_t, se_avl));
570 * Activate an uninitialized pool.
573 spa_activate(spa_t *spa, int mode)
575 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
577 spa->spa_state = POOL_STATE_ACTIVE;
578 spa->spa_mode = mode;
580 spa->spa_normal_class = metaslab_class_create(zfs_metaslab_ops);
581 spa->spa_log_class = metaslab_class_create(zfs_metaslab_ops);
583 for (int t = 0; t < ZIO_TYPES; t++) {
584 const zio_taskq_info_t *ztip = &zio_taskqs[t];
585 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
586 enum zti_modes mode = ztip->zti_nthreads[q].zti_mode;
587 uint_t value = ztip->zti_nthreads[q].zti_value;
590 (void) snprintf(name, sizeof (name),
591 "%s_%s", ztip->zti_name, zio_taskq_types[q]);
593 if (mode == zti_mode_tune) {
594 mode = zio_taskq_tune_mode;
595 value = zio_taskq_tune_value;
596 if (mode == zti_mode_tune)
597 mode = zti_mode_online_percent;
602 ASSERT3U(value, >=, 1);
603 value = MAX(value, 1);
605 spa->spa_zio_taskq[t][q] = taskq_create(name,
606 value, maxclsyspri, 50, INT_MAX,
610 case zti_mode_online_percent:
611 spa->spa_zio_taskq[t][q] = taskq_create(name,
612 value, maxclsyspri, 50, INT_MAX,
613 TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT);
618 panic("unrecognized mode for "
619 "zio_taskqs[%u]->zti_nthreads[%u] (%u:%u) "
627 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
628 offsetof(vdev_t, vdev_config_dirty_node));
629 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
630 offsetof(vdev_t, vdev_state_dirty_node));
632 txg_list_create(&spa->spa_vdev_txg_list,
633 offsetof(struct vdev, vdev_txg_node));
635 avl_create(&spa->spa_errlist_scrub,
636 spa_error_entry_compare, sizeof (spa_error_entry_t),
637 offsetof(spa_error_entry_t, se_avl));
638 avl_create(&spa->spa_errlist_last,
639 spa_error_entry_compare, sizeof (spa_error_entry_t),
640 offsetof(spa_error_entry_t, se_avl));
644 * Opposite of spa_activate().
647 spa_deactivate(spa_t *spa)
649 ASSERT(spa->spa_sync_on == B_FALSE);
650 ASSERT(spa->spa_dsl_pool == NULL);
651 ASSERT(spa->spa_root_vdev == NULL);
652 ASSERT(spa->spa_async_zio_root == NULL);
653 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
655 txg_list_destroy(&spa->spa_vdev_txg_list);
657 list_destroy(&spa->spa_config_dirty_list);
658 list_destroy(&spa->spa_state_dirty_list);
660 for (int t = 0; t < ZIO_TYPES; t++) {
661 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
662 taskq_destroy(spa->spa_zio_taskq[t][q]);
663 spa->spa_zio_taskq[t][q] = NULL;
667 metaslab_class_destroy(spa->spa_normal_class);
668 spa->spa_normal_class = NULL;
670 metaslab_class_destroy(spa->spa_log_class);
671 spa->spa_log_class = NULL;
674 * If this was part of an import or the open otherwise failed, we may
675 * still have errors left in the queues. Empty them just in case.
677 spa_errlog_drain(spa);
679 avl_destroy(&spa->spa_errlist_scrub);
680 avl_destroy(&spa->spa_errlist_last);
682 spa->spa_state = POOL_STATE_UNINITIALIZED;
686 * Verify a pool configuration, and construct the vdev tree appropriately. This
687 * will create all the necessary vdevs in the appropriate layout, with each vdev
688 * in the CLOSED state. This will prep the pool before open/creation/import.
689 * All vdev validation is done by the vdev_alloc() routine.
692 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
693 uint_t id, int atype)
699 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
702 if ((*vdp)->vdev_ops->vdev_op_leaf)
705 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
717 for (c = 0; c < children; c++) {
719 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
727 ASSERT(*vdp != NULL);
733 * Opposite of spa_load().
736 spa_unload(spa_t *spa)
740 ASSERT(MUTEX_HELD(&spa_namespace_lock));
745 spa_async_suspend(spa);
750 if (spa->spa_sync_on) {
751 txg_sync_stop(spa->spa_dsl_pool);
752 spa->spa_sync_on = B_FALSE;
756 * Wait for any outstanding async I/O to complete.
758 if (spa->spa_async_zio_root != NULL) {
759 (void) zio_wait(spa->spa_async_zio_root);
760 spa->spa_async_zio_root = NULL;
764 * Close the dsl pool.
766 if (spa->spa_dsl_pool) {
767 dsl_pool_close(spa->spa_dsl_pool);
768 spa->spa_dsl_pool = NULL;
771 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
774 * Drop and purge level 2 cache
776 spa_l2cache_drop(spa);
781 if (spa->spa_root_vdev)
782 vdev_free(spa->spa_root_vdev);
783 ASSERT(spa->spa_root_vdev == NULL);
785 for (i = 0; i < spa->spa_spares.sav_count; i++)
786 vdev_free(spa->spa_spares.sav_vdevs[i]);
787 if (spa->spa_spares.sav_vdevs) {
788 kmem_free(spa->spa_spares.sav_vdevs,
789 spa->spa_spares.sav_count * sizeof (void *));
790 spa->spa_spares.sav_vdevs = NULL;
792 if (spa->spa_spares.sav_config) {
793 nvlist_free(spa->spa_spares.sav_config);
794 spa->spa_spares.sav_config = NULL;
796 spa->spa_spares.sav_count = 0;
798 for (i = 0; i < spa->spa_l2cache.sav_count; i++)
799 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
800 if (spa->spa_l2cache.sav_vdevs) {
801 kmem_free(spa->spa_l2cache.sav_vdevs,
802 spa->spa_l2cache.sav_count * sizeof (void *));
803 spa->spa_l2cache.sav_vdevs = NULL;
805 if (spa->spa_l2cache.sav_config) {
806 nvlist_free(spa->spa_l2cache.sav_config);
807 spa->spa_l2cache.sav_config = NULL;
809 spa->spa_l2cache.sav_count = 0;
811 spa->spa_async_suspended = 0;
813 spa_config_exit(spa, SCL_ALL, FTAG);
817 * Load (or re-load) the current list of vdevs describing the active spares for
818 * this pool. When this is called, we have some form of basic information in
819 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
820 * then re-generate a more complete list including status information.
823 spa_load_spares(spa_t *spa)
830 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
833 * First, close and free any existing spare vdevs.
835 for (i = 0; i < spa->spa_spares.sav_count; i++) {
836 vd = spa->spa_spares.sav_vdevs[i];
838 /* Undo the call to spa_activate() below */
839 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
840 B_FALSE)) != NULL && tvd->vdev_isspare)
841 spa_spare_remove(tvd);
846 if (spa->spa_spares.sav_vdevs)
847 kmem_free(spa->spa_spares.sav_vdevs,
848 spa->spa_spares.sav_count * sizeof (void *));
850 if (spa->spa_spares.sav_config == NULL)
853 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
854 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
856 spa->spa_spares.sav_count = (int)nspares;
857 spa->spa_spares.sav_vdevs = NULL;
863 * Construct the array of vdevs, opening them to get status in the
864 * process. For each spare, there is potentially two different vdev_t
865 * structures associated with it: one in the list of spares (used only
866 * for basic validation purposes) and one in the active vdev
867 * configuration (if it's spared in). During this phase we open and
868 * validate each vdev on the spare list. If the vdev also exists in the
869 * active configuration, then we also mark this vdev as an active spare.
871 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
873 for (i = 0; i < spa->spa_spares.sav_count; i++) {
874 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
875 VDEV_ALLOC_SPARE) == 0);
878 spa->spa_spares.sav_vdevs[i] = vd;
880 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
882 if (!tvd->vdev_isspare)
886 * We only mark the spare active if we were successfully
887 * able to load the vdev. Otherwise, importing a pool
888 * with a bad active spare would result in strange
889 * behavior, because multiple pool would think the spare
890 * is actively in use.
892 * There is a vulnerability here to an equally bizarre
893 * circumstance, where a dead active spare is later
894 * brought back to life (onlined or otherwise). Given
895 * the rarity of this scenario, and the extra complexity
896 * it adds, we ignore the possibility.
898 if (!vdev_is_dead(tvd))
899 spa_spare_activate(tvd);
903 vd->vdev_aux = &spa->spa_spares;
905 if (vdev_open(vd) != 0)
908 if (vdev_validate_aux(vd) == 0)
913 * Recompute the stashed list of spares, with status information
916 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
917 DATA_TYPE_NVLIST_ARRAY) == 0);
919 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
921 for (i = 0; i < spa->spa_spares.sav_count; i++)
922 spares[i] = vdev_config_generate(spa,
923 spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE);
924 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
925 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
926 for (i = 0; i < spa->spa_spares.sav_count; i++)
927 nvlist_free(spares[i]);
928 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
932 * Load (or re-load) the current list of vdevs describing the active l2cache for
933 * this pool. When this is called, we have some form of basic information in
934 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
935 * then re-generate a more complete list including status information.
936 * Devices which are already active have their details maintained, and are
940 spa_load_l2cache(spa_t *spa)
946 vdev_t *vd, **oldvdevs, **newvdevs;
947 spa_aux_vdev_t *sav = &spa->spa_l2cache;
949 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
951 if (sav->sav_config != NULL) {
952 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
953 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
954 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
959 oldvdevs = sav->sav_vdevs;
960 oldnvdevs = sav->sav_count;
961 sav->sav_vdevs = NULL;
965 * Process new nvlist of vdevs.
967 for (i = 0; i < nl2cache; i++) {
968 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
972 for (j = 0; j < oldnvdevs; j++) {
974 if (vd != NULL && guid == vd->vdev_guid) {
976 * Retain previous vdev for add/remove ops.
984 if (newvdevs[i] == NULL) {
988 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
989 VDEV_ALLOC_L2CACHE) == 0);
994 * Commit this vdev as an l2cache device,
995 * even if it fails to open.
1002 spa_l2cache_activate(vd);
1004 if (vdev_open(vd) != 0)
1007 (void) vdev_validate_aux(vd);
1009 if (!vdev_is_dead(vd)) {
1010 size = vdev_get_rsize(vd);
1011 l2arc_add_vdev(spa, vd,
1012 VDEV_LABEL_START_SIZE,
1013 size - VDEV_LABEL_START_SIZE);
1019 * Purge vdevs that were dropped
1021 for (i = 0; i < oldnvdevs; i++) {
1026 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1027 pool != 0ULL && l2arc_vdev_present(vd))
1028 l2arc_remove_vdev(vd);
1029 (void) vdev_close(vd);
1030 spa_l2cache_remove(vd);
1035 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1037 if (sav->sav_config == NULL)
1040 sav->sav_vdevs = newvdevs;
1041 sav->sav_count = (int)nl2cache;
1044 * Recompute the stashed list of l2cache devices, with status
1045 * information this time.
1047 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1048 DATA_TYPE_NVLIST_ARRAY) == 0);
1050 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
1051 for (i = 0; i < sav->sav_count; i++)
1052 l2cache[i] = vdev_config_generate(spa,
1053 sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE);
1054 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1055 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1057 for (i = 0; i < sav->sav_count; i++)
1058 nvlist_free(l2cache[i]);
1060 kmem_free(l2cache, sav->sav_count * sizeof (void *));
1064 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1067 char *packed = NULL;
1072 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
1073 nvsize = *(uint64_t *)db->db_data;
1074 dmu_buf_rele(db, FTAG);
1076 packed = kmem_alloc(nvsize, KM_SLEEP);
1077 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1080 error = nvlist_unpack(packed, nvsize, value, 0);
1081 kmem_free(packed, nvsize);
1087 * Checks to see if the given vdev could not be opened, in which case we post a
1088 * sysevent to notify the autoreplace code that the device has been removed.
1091 spa_check_removed(vdev_t *vd)
1095 for (c = 0; c < vd->vdev_children; c++)
1096 spa_check_removed(vd->vdev_child[c]);
1098 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) {
1099 zfs_post_autoreplace(vd->vdev_spa, vd);
1100 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
1105 * Check for missing log devices
1108 spa_check_logs(spa_t *spa)
1110 switch (spa->spa_log_state) {
1111 case SPA_LOG_MISSING:
1112 /* need to recheck in case slog has been restored */
1113 case SPA_LOG_UNKNOWN:
1114 if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL,
1115 DS_FIND_CHILDREN)) {
1116 spa->spa_log_state = SPA_LOG_MISSING;
1122 (void) dmu_objset_find(spa->spa_name, zil_clear_log_chain, NULL,
1126 spa->spa_log_state = SPA_LOG_GOOD;
1131 * Load an existing storage pool, using the pool's builtin spa_config as a
1132 * source of configuration information.
1135 spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig)
1138 nvlist_t *nvroot = NULL;
1140 uberblock_t *ub = &spa->spa_uberblock;
1141 uint64_t config_cache_txg = spa->spa_config_txg;
1144 uint64_t autoreplace = 0;
1145 int orig_mode = spa->spa_mode;
1146 char *ereport = FM_EREPORT_ZFS_POOL;
1149 * If this is an untrusted config, access the pool in read-only mode.
1150 * This prevents things like resilvering recently removed devices.
1153 spa->spa_mode = FREAD;
1155 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1157 spa->spa_load_state = state;
1159 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
1160 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
1166 * Versioning wasn't explicitly added to the label until later, so if
1167 * it's not present treat it as the initial version.
1169 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0)
1170 version = SPA_VERSION_INITIAL;
1172 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
1173 &spa->spa_config_txg);
1175 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
1176 spa_guid_exists(pool_guid, 0)) {
1181 spa->spa_load_guid = pool_guid;
1184 * Create "The Godfather" zio to hold all async IOs
1186 spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
1187 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
1190 * Parse the configuration into a vdev tree. We explicitly set the
1191 * value that will be returned by spa_version() since parsing the
1192 * configuration requires knowing the version number.
1194 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1195 spa->spa_ubsync.ub_version = version;
1196 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD);
1197 spa_config_exit(spa, SCL_ALL, FTAG);
1202 ASSERT(spa->spa_root_vdev == rvd);
1203 ASSERT(spa_guid(spa) == pool_guid);
1206 * Try to open all vdevs, loading each label in the process.
1208 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1209 error = vdev_open(rvd);
1210 spa_config_exit(spa, SCL_ALL, FTAG);
1215 * We need to validate the vdev labels against the configuration that
1216 * we have in hand, which is dependent on the setting of mosconfig. If
1217 * mosconfig is true then we're validating the vdev labels based on
1218 * that config. Otherwise, we're validating against the cached config
1219 * (zpool.cache) that was read when we loaded the zfs module, and then
1220 * later we will recursively call spa_load() and validate against
1223 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1224 error = vdev_validate(rvd);
1225 spa_config_exit(spa, SCL_ALL, FTAG);
1229 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1235 * Find the best uberblock.
1237 vdev_uberblock_load(NULL, rvd, ub);
1240 * If we weren't able to find a single valid uberblock, return failure.
1242 if (ub->ub_txg == 0) {
1243 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1244 VDEV_AUX_CORRUPT_DATA);
1250 * If the pool is newer than the code, we can't open it.
1252 if (ub->ub_version > SPA_VERSION) {
1253 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1254 VDEV_AUX_VERSION_NEWER);
1260 * If the vdev guid sum doesn't match the uberblock, we have an
1261 * incomplete configuration.
1263 if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) {
1264 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1265 VDEV_AUX_BAD_GUID_SUM);
1271 * Initialize internal SPA structures.
1273 spa->spa_state = POOL_STATE_ACTIVE;
1274 spa->spa_ubsync = spa->spa_uberblock;
1275 spa->spa_first_txg = spa_last_synced_txg(spa) + 1;
1276 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
1278 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1279 VDEV_AUX_CORRUPT_DATA);
1282 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
1284 if (zap_lookup(spa->spa_meta_objset,
1285 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1286 sizeof (uint64_t), 1, &spa->spa_config_object) != 0) {
1287 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1288 VDEV_AUX_CORRUPT_DATA);
1294 nvlist_t *newconfig;
1297 if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) {
1298 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1299 VDEV_AUX_CORRUPT_DATA);
1304 if (!spa_is_root(spa) && nvlist_lookup_uint64(newconfig,
1305 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
1307 unsigned long myhostid = 0;
1309 VERIFY(nvlist_lookup_string(newconfig,
1310 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
1312 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
1313 if (check_hostid && hostid != 0 && myhostid != 0 &&
1314 (unsigned long)hostid != myhostid) {
1315 cmn_err(CE_WARN, "pool '%s' could not be "
1316 "loaded as it was last accessed by "
1317 "another system (host: %s hostid: 0x%lx). "
1318 "See: http://www.sun.com/msg/ZFS-8000-EY",
1319 spa_name(spa), hostname,
1320 (unsigned long)hostid);
1326 spa_config_set(spa, newconfig);
1328 spa_deactivate(spa);
1329 spa_activate(spa, orig_mode);
1331 return (spa_load(spa, newconfig, state, B_TRUE));
1334 if (zap_lookup(spa->spa_meta_objset,
1335 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
1336 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) {
1337 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1338 VDEV_AUX_CORRUPT_DATA);
1344 * Load the bit that tells us to use the new accounting function
1345 * (raid-z deflation). If we have an older pool, this will not
1348 error = zap_lookup(spa->spa_meta_objset,
1349 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
1350 sizeof (uint64_t), 1, &spa->spa_deflate);
1351 if (error != 0 && error != ENOENT) {
1352 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1353 VDEV_AUX_CORRUPT_DATA);
1359 * Load the persistent error log. If we have an older pool, this will
1362 error = zap_lookup(spa->spa_meta_objset,
1363 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
1364 sizeof (uint64_t), 1, &spa->spa_errlog_last);
1365 if (error != 0 && error != ENOENT) {
1366 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1367 VDEV_AUX_CORRUPT_DATA);
1372 error = zap_lookup(spa->spa_meta_objset,
1373 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
1374 sizeof (uint64_t), 1, &spa->spa_errlog_scrub);
1375 if (error != 0 && error != ENOENT) {
1376 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1377 VDEV_AUX_CORRUPT_DATA);
1383 * Load the history object. If we have an older pool, this
1384 * will not be present.
1386 error = zap_lookup(spa->spa_meta_objset,
1387 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY,
1388 sizeof (uint64_t), 1, &spa->spa_history);
1389 if (error != 0 && error != ENOENT) {
1390 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1391 VDEV_AUX_CORRUPT_DATA);
1397 * Load any hot spares for this pool.
1399 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1400 DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object);
1401 if (error != 0 && error != ENOENT) {
1402 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1403 VDEV_AUX_CORRUPT_DATA);
1408 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
1409 if (load_nvlist(spa, spa->spa_spares.sav_object,
1410 &spa->spa_spares.sav_config) != 0) {
1411 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1412 VDEV_AUX_CORRUPT_DATA);
1417 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1418 spa_load_spares(spa);
1419 spa_config_exit(spa, SCL_ALL, FTAG);
1423 * Load any level 2 ARC devices for this pool.
1425 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1426 DMU_POOL_L2CACHE, sizeof (uint64_t), 1,
1427 &spa->spa_l2cache.sav_object);
1428 if (error != 0 && error != ENOENT) {
1429 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1430 VDEV_AUX_CORRUPT_DATA);
1435 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
1436 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
1437 &spa->spa_l2cache.sav_config) != 0) {
1438 vdev_set_state(rvd, B_TRUE,
1439 VDEV_STATE_CANT_OPEN,
1440 VDEV_AUX_CORRUPT_DATA);
1445 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1446 spa_load_l2cache(spa);
1447 spa_config_exit(spa, SCL_ALL, FTAG);
1450 if (spa_check_logs(spa)) {
1451 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1454 ereport = FM_EREPORT_ZFS_LOG_REPLAY;
1459 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
1461 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1462 DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object);
1464 if (error && error != ENOENT) {
1465 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1466 VDEV_AUX_CORRUPT_DATA);
1472 (void) zap_lookup(spa->spa_meta_objset,
1473 spa->spa_pool_props_object,
1474 zpool_prop_to_name(ZPOOL_PROP_BOOTFS),
1475 sizeof (uint64_t), 1, &spa->spa_bootfs);
1476 (void) zap_lookup(spa->spa_meta_objset,
1477 spa->spa_pool_props_object,
1478 zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE),
1479 sizeof (uint64_t), 1, &autoreplace);
1480 (void) zap_lookup(spa->spa_meta_objset,
1481 spa->spa_pool_props_object,
1482 zpool_prop_to_name(ZPOOL_PROP_DELEGATION),
1483 sizeof (uint64_t), 1, &spa->spa_delegation);
1484 (void) zap_lookup(spa->spa_meta_objset,
1485 spa->spa_pool_props_object,
1486 zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
1487 sizeof (uint64_t), 1, &spa->spa_failmode);
1491 * If the 'autoreplace' property is set, then post a resource notifying
1492 * the ZFS DE that it should not issue any faults for unopenable
1493 * devices. We also iterate over the vdevs, and post a sysevent for any
1494 * unopenable vdevs so that the normal autoreplace handler can take
1497 if (autoreplace && state != SPA_LOAD_TRYIMPORT)
1498 spa_check_removed(spa->spa_root_vdev);
1501 * Load the vdev state for all toplevel vdevs.
1506 * Propagate the leaf DTLs we just loaded all the way up the tree.
1508 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1509 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
1510 spa_config_exit(spa, SCL_ALL, FTAG);
1513 * Check the state of the root vdev. If it can't be opened, it
1514 * indicates one or more toplevel vdevs are faulted.
1516 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1521 if (spa_writeable(spa)) {
1523 int need_update = B_FALSE;
1525 ASSERT(state != SPA_LOAD_TRYIMPORT);
1528 * Claim log blocks that haven't been committed yet.
1529 * This must all happen in a single txg.
1531 tx = dmu_tx_create_assigned(spa_get_dsl(spa),
1532 spa_first_txg(spa));
1533 (void) dmu_objset_find(spa_name(spa),
1534 zil_claim, tx, DS_FIND_CHILDREN);
1537 spa->spa_sync_on = B_TRUE;
1538 txg_sync_start(spa->spa_dsl_pool);
1541 * Wait for all claims to sync.
1543 txg_wait_synced(spa->spa_dsl_pool, 0);
1546 * If the config cache is stale, or we have uninitialized
1547 * metaslabs (see spa_vdev_add()), then update the config.
1549 * If spa_load_verbatim is true, trust the current
1550 * in-core spa_config and update the disk labels.
1552 if (config_cache_txg != spa->spa_config_txg ||
1553 state == SPA_LOAD_IMPORT || spa->spa_load_verbatim)
1554 need_update = B_TRUE;
1556 for (int c = 0; c < rvd->vdev_children; c++)
1557 if (rvd->vdev_child[c]->vdev_ms_array == 0)
1558 need_update = B_TRUE;
1561 * Update the config cache asychronously in case we're the
1562 * root pool, in which case the config cache isn't writable yet.
1565 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
1570 spa->spa_minref = refcount_count(&spa->spa_refcount);
1571 if (error && error != EBADF)
1572 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
1573 spa->spa_load_state = SPA_LOAD_NONE;
1582 * The import case is identical to an open except that the configuration is sent
1583 * down from userland, instead of grabbed from the configuration cache. For the
1584 * case of an open, the pool configuration will exist in the
1585 * POOL_STATE_UNINITIALIZED state.
1587 * The stats information (gen/count/ustats) is used to gather vdev statistics at
1588 * the same time open the pool, without having to keep around the spa_t in some
1592 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config)
1596 int locked = B_FALSE;
1601 * As disgusting as this is, we need to support recursive calls to this
1602 * function because dsl_dir_open() is called during spa_load(), and ends
1603 * up calling spa_open() again. The real fix is to figure out how to
1604 * avoid dsl_dir_open() calling this in the first place.
1606 if (mutex_owner(&spa_namespace_lock) != curthread) {
1607 mutex_enter(&spa_namespace_lock);
1611 if ((spa = spa_lookup(pool)) == NULL) {
1613 mutex_exit(&spa_namespace_lock);
1616 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
1618 spa_activate(spa, spa_mode_global);
1620 error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE);
1622 if (error == EBADF) {
1624 * If vdev_validate() returns failure (indicated by
1625 * EBADF), it indicates that one of the vdevs indicates
1626 * that the pool has been exported or destroyed. If
1627 * this is the case, the config cache is out of sync and
1628 * we should remove the pool from the namespace.
1631 spa_deactivate(spa);
1632 spa_config_sync(spa, B_TRUE, B_TRUE);
1635 mutex_exit(&spa_namespace_lock);
1641 * We can't open the pool, but we still have useful
1642 * information: the state of each vdev after the
1643 * attempted vdev_open(). Return this to the user.
1645 if (config != NULL && spa->spa_root_vdev != NULL)
1646 *config = spa_config_generate(spa, NULL, -1ULL,
1649 spa_deactivate(spa);
1650 spa->spa_last_open_failed = B_TRUE;
1652 mutex_exit(&spa_namespace_lock);
1656 spa->spa_last_open_failed = B_FALSE;
1660 spa_open_ref(spa, tag);
1663 mutex_exit(&spa_namespace_lock);
1668 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
1674 spa_open(const char *name, spa_t **spapp, void *tag)
1676 return (spa_open_common(name, spapp, tag, NULL));
1680 * Lookup the given spa_t, incrementing the inject count in the process,
1681 * preventing it from being exported or destroyed.
1684 spa_inject_addref(char *name)
1688 mutex_enter(&spa_namespace_lock);
1689 if ((spa = spa_lookup(name)) == NULL) {
1690 mutex_exit(&spa_namespace_lock);
1693 spa->spa_inject_ref++;
1694 mutex_exit(&spa_namespace_lock);
1700 spa_inject_delref(spa_t *spa)
1702 mutex_enter(&spa_namespace_lock);
1703 spa->spa_inject_ref--;
1704 mutex_exit(&spa_namespace_lock);
1708 * Add spares device information to the nvlist.
1711 spa_add_spares(spa_t *spa, nvlist_t *config)
1721 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
1723 if (spa->spa_spares.sav_count == 0)
1726 VERIFY(nvlist_lookup_nvlist(config,
1727 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1728 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1729 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1731 VERIFY(nvlist_add_nvlist_array(nvroot,
1732 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
1733 VERIFY(nvlist_lookup_nvlist_array(nvroot,
1734 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1737 * Go through and find any spares which have since been
1738 * repurposed as an active spare. If this is the case, update
1739 * their status appropriately.
1741 for (i = 0; i < nspares; i++) {
1742 VERIFY(nvlist_lookup_uint64(spares[i],
1743 ZPOOL_CONFIG_GUID, &guid) == 0);
1744 if (spa_spare_exists(guid, &pool, NULL) &&
1746 VERIFY(nvlist_lookup_uint64_array(
1747 spares[i], ZPOOL_CONFIG_STATS,
1748 (uint64_t **)&vs, &vsc) == 0);
1749 vs->vs_state = VDEV_STATE_CANT_OPEN;
1750 vs->vs_aux = VDEV_AUX_SPARED;
1757 * Add l2cache device information to the nvlist, including vdev stats.
1760 spa_add_l2cache(spa_t *spa, nvlist_t *config)
1763 uint_t i, j, nl2cache;
1770 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
1772 if (spa->spa_l2cache.sav_count == 0)
1775 VERIFY(nvlist_lookup_nvlist(config,
1776 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1777 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
1778 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1779 if (nl2cache != 0) {
1780 VERIFY(nvlist_add_nvlist_array(nvroot,
1781 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
1782 VERIFY(nvlist_lookup_nvlist_array(nvroot,
1783 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1786 * Update level 2 cache device stats.
1789 for (i = 0; i < nl2cache; i++) {
1790 VERIFY(nvlist_lookup_uint64(l2cache[i],
1791 ZPOOL_CONFIG_GUID, &guid) == 0);
1794 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
1796 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
1797 vd = spa->spa_l2cache.sav_vdevs[j];
1803 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
1804 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
1805 vdev_get_stats(vd, vs);
1811 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
1817 error = spa_open_common(name, &spa, FTAG, config);
1821 * This still leaves a window of inconsistency where the spares
1822 * or l2cache devices could change and the config would be
1823 * self-inconsistent.
1825 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
1827 if (*config != NULL) {
1828 VERIFY(nvlist_add_uint64(*config,
1829 ZPOOL_CONFIG_ERRCOUNT,
1830 spa_get_errlog_size(spa)) == 0);
1832 if (spa_suspended(spa))
1833 VERIFY(nvlist_add_uint64(*config,
1834 ZPOOL_CONFIG_SUSPENDED,
1835 spa->spa_failmode) == 0);
1837 spa_add_spares(spa, *config);
1838 spa_add_l2cache(spa, *config);
1843 * We want to get the alternate root even for faulted pools, so we cheat
1844 * and call spa_lookup() directly.
1848 mutex_enter(&spa_namespace_lock);
1849 spa = spa_lookup(name);
1851 spa_altroot(spa, altroot, buflen);
1855 mutex_exit(&spa_namespace_lock);
1857 spa_altroot(spa, altroot, buflen);
1862 spa_config_exit(spa, SCL_CONFIG, FTAG);
1863 spa_close(spa, FTAG);
1870 * Validate that the auxiliary device array is well formed. We must have an
1871 * array of nvlists, each which describes a valid leaf vdev. If this is an
1872 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
1873 * specified, as long as they are well-formed.
1876 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
1877 spa_aux_vdev_t *sav, const char *config, uint64_t version,
1878 vdev_labeltype_t label)
1885 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1888 * It's acceptable to have no devs specified.
1890 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
1897 * Make sure the pool is formatted with a version that supports this
1900 if (spa_version(spa) < version)
1904 * Set the pending device list so we correctly handle device in-use
1907 sav->sav_pending = dev;
1908 sav->sav_npending = ndev;
1910 for (i = 0; i < ndev; i++) {
1911 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
1915 if (!vd->vdev_ops->vdev_op_leaf) {
1922 * The L2ARC currently only supports disk devices in
1923 * kernel context. For user-level testing, we allow it.
1926 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
1927 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
1934 if ((error = vdev_open(vd)) == 0 &&
1935 (error = vdev_label_init(vd, crtxg, label)) == 0) {
1936 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
1937 vd->vdev_guid) == 0);
1943 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
1950 sav->sav_pending = NULL;
1951 sav->sav_npending = 0;
1956 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
1960 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1962 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1963 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
1964 VDEV_LABEL_SPARE)) != 0) {
1968 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1969 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
1970 VDEV_LABEL_L2CACHE));
1974 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
1979 if (sav->sav_config != NULL) {
1985 * Generate new dev list by concatentating with the
1988 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
1989 &olddevs, &oldndevs) == 0);
1991 newdevs = kmem_alloc(sizeof (void *) *
1992 (ndevs + oldndevs), KM_SLEEP);
1993 for (i = 0; i < oldndevs; i++)
1994 VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
1996 for (i = 0; i < ndevs; i++)
1997 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
2000 VERIFY(nvlist_remove(sav->sav_config, config,
2001 DATA_TYPE_NVLIST_ARRAY) == 0);
2003 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
2004 config, newdevs, ndevs + oldndevs) == 0);
2005 for (i = 0; i < oldndevs + ndevs; i++)
2006 nvlist_free(newdevs[i]);
2007 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
2010 * Generate a new dev list.
2012 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
2014 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
2020 * Stop and drop level 2 ARC devices
2023 spa_l2cache_drop(spa_t *spa)
2027 spa_aux_vdev_t *sav = &spa->spa_l2cache;
2029 for (i = 0; i < sav->sav_count; i++) {
2032 vd = sav->sav_vdevs[i];
2035 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
2036 pool != 0ULL && l2arc_vdev_present(vd))
2037 l2arc_remove_vdev(vd);
2038 if (vd->vdev_isl2cache)
2039 spa_l2cache_remove(vd);
2040 vdev_clear_stats(vd);
2041 (void) vdev_close(vd);
2049 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
2050 const char *history_str, nvlist_t *zplprops)
2053 char *altroot = NULL;
2058 uint64_t txg = TXG_INITIAL;
2059 nvlist_t **spares, **l2cache;
2060 uint_t nspares, nl2cache;
2064 * If this pool already exists, return failure.
2066 mutex_enter(&spa_namespace_lock);
2067 if (spa_lookup(pool) != NULL) {
2068 mutex_exit(&spa_namespace_lock);
2073 * Allocate a new spa_t structure.
2075 (void) nvlist_lookup_string(props,
2076 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
2077 spa = spa_add(pool, altroot);
2078 spa_activate(spa, spa_mode_global);
2080 spa->spa_uberblock.ub_txg = txg - 1;
2082 if (props && (error = spa_prop_validate(spa, props))) {
2083 spa_deactivate(spa);
2085 mutex_exit(&spa_namespace_lock);
2089 if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION),
2091 version = SPA_VERSION;
2092 ASSERT(version <= SPA_VERSION);
2093 spa->spa_uberblock.ub_version = version;
2094 spa->spa_ubsync = spa->spa_uberblock;
2097 * Create "The Godfather" zio to hold all async IOs
2099 spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
2100 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
2103 * Create the root vdev.
2105 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2107 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
2109 ASSERT(error != 0 || rvd != NULL);
2110 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
2112 if (error == 0 && !zfs_allocatable_devs(nvroot))
2116 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
2117 (error = spa_validate_aux(spa, nvroot, txg,
2118 VDEV_ALLOC_ADD)) == 0) {
2119 for (c = 0; c < rvd->vdev_children; c++)
2120 vdev_init(rvd->vdev_child[c], txg);
2121 vdev_config_dirty(rvd);
2124 spa_config_exit(spa, SCL_ALL, FTAG);
2128 spa_deactivate(spa);
2130 mutex_exit(&spa_namespace_lock);
2135 * Get the list of spares, if specified.
2137 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2138 &spares, &nspares) == 0) {
2139 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
2141 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
2142 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2143 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2144 spa_load_spares(spa);
2145 spa_config_exit(spa, SCL_ALL, FTAG);
2146 spa->spa_spares.sav_sync = B_TRUE;
2150 * Get the list of level 2 cache devices, if specified.
2152 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2153 &l2cache, &nl2cache) == 0) {
2154 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2155 NV_UNIQUE_NAME, KM_SLEEP) == 0);
2156 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2157 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2158 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2159 spa_load_l2cache(spa);
2160 spa_config_exit(spa, SCL_ALL, FTAG);
2161 spa->spa_l2cache.sav_sync = B_TRUE;
2164 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
2165 spa->spa_meta_objset = dp->dp_meta_objset;
2167 tx = dmu_tx_create_assigned(dp, txg);
2170 * Create the pool config object.
2172 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
2173 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
2174 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
2176 if (zap_add(spa->spa_meta_objset,
2177 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
2178 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
2179 cmn_err(CE_PANIC, "failed to add pool config");
2182 /* Newly created pools with the right version are always deflated. */
2183 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
2184 spa->spa_deflate = TRUE;
2185 if (zap_add(spa->spa_meta_objset,
2186 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
2187 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
2188 cmn_err(CE_PANIC, "failed to add deflate");
2193 * Create the deferred-free bplist object. Turn off compression
2194 * because sync-to-convergence takes longer if the blocksize
2197 spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset,
2199 dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj,
2200 ZIO_COMPRESS_OFF, tx);
2202 if (zap_add(spa->spa_meta_objset,
2203 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
2204 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) {
2205 cmn_err(CE_PANIC, "failed to add bplist");
2209 * Create the pool's history object.
2211 if (version >= SPA_VERSION_ZPOOL_HISTORY)
2212 spa_history_create_obj(spa, tx);
2215 * Set pool properties.
2217 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
2218 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2219 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
2220 if (props != NULL) {
2221 spa_configfile_set(spa, props, B_FALSE);
2222 spa_sync_props(spa, props, CRED(), tx);
2227 spa->spa_sync_on = B_TRUE;
2228 txg_sync_start(spa->spa_dsl_pool);
2231 * We explicitly wait for the first transaction to complete so that our
2232 * bean counters are appropriately updated.
2234 txg_wait_synced(spa->spa_dsl_pool, txg);
2236 spa_config_sync(spa, B_FALSE, B_TRUE);
2238 if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL)
2239 (void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE);
2241 spa->spa_minref = refcount_count(&spa->spa_refcount);
2243 mutex_exit(&spa_namespace_lock);
2251 * Build a "root" vdev for a top level vdev read in from a rootpool
2255 spa_build_rootpool_config(nvlist_t *config)
2257 nvlist_t *nvtop, *nvroot;
2261 * Add this top-level vdev to the child array.
2263 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop)
2265 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid)
2269 * Put this pool's top-level vdevs into a root vdev.
2271 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2272 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT)
2274 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
2275 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
2276 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2280 * Replace the existing vdev_tree with the new root vdev in
2281 * this pool's configuration (remove the old, add the new).
2283 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
2284 nvlist_free(nvroot);
2288 * Get the root pool information from the root disk, then import the root pool
2289 * during the system boot up time.
2291 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
2294 spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf,
2301 if (error = vdev_disk_read_rootlabel(devpath, devid, &config))
2304 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
2306 if (bestconf != NULL)
2309 nvlist_free(config);
2315 spa_rootdev_validate(nvlist_t *nv)
2319 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2320 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2321 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2329 * Given the boot device's physical path or devid, check if the device
2330 * is in a valid state. If so, return the configuration from the vdev
2334 spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf)
2336 nvlist_t *conf = NULL;
2338 nvlist_t *nvtop, **child;
2340 char *bootpath = NULL;
2345 if (devpath && ((tmp = strchr(devpath, ' ')) != NULL))
2347 if (error = spa_check_rootconf(devpath, devid, &conf, &txg)) {
2348 cmn_err(CE_NOTE, "error reading device label");
2352 cmn_err(CE_NOTE, "this device is detached");
2357 VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE,
2359 VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0);
2361 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2362 if (spa_rootdev_validate(nvtop)) {
2370 ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0);
2372 VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN,
2373 &child, &children) == 0);
2376 * Go thru vdevs in the mirror to see if the given device
2377 * has the most recent txg. Only the device with the most
2378 * recent txg has valid information and should be booted.
2380 for (c = 0; c < children; c++) {
2381 char *cdevid, *cpath;
2386 if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH,
2387 &cpath) != 0 && nvlist_lookup_string(child[c],
2388 ZPOOL_CONFIG_DEVID, &cdevid) != 0)
2390 if ((spa_check_rootconf(cpath, cdevid, NULL,
2391 &tmptxg) == 0) && (tmptxg > txg)) {
2393 VERIFY(nvlist_lookup_string(child[c],
2394 ZPOOL_CONFIG_PATH, &bootpath) == 0);
2398 /* Does the best device match the one we've booted from? */
2400 cmn_err(CE_NOTE, "try booting from '%s'", bootpath);
2409 * Import a root pool.
2411 * For x86. devpath_list will consist of devid and/or physpath name of
2412 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
2413 * The GRUB "findroot" command will return the vdev we should boot.
2415 * For Sparc, devpath_list consists the physpath name of the booting device
2416 * no matter the rootpool is a single device pool or a mirrored pool.
2418 * "/pci@1f,0/ide@d/disk@0,0:a"
2421 spa_import_rootpool(char *devpath, char *devid)
2423 nvlist_t *conf = NULL;
2429 * Get the vdev pathname and configuation from the most
2430 * recently updated vdev (highest txg).
2432 if (error = spa_get_rootconf(devpath, devid, &conf))
2436 * Add type "root" vdev to the config.
2438 spa_build_rootpool_config(conf);
2440 VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0);
2442 mutex_enter(&spa_namespace_lock);
2443 if ((spa = spa_lookup(pname)) != NULL) {
2445 * Remove the existing root pool from the namespace so that we
2446 * can replace it with the correct config we just read in.
2451 spa = spa_add(pname, NULL);
2452 spa->spa_is_root = B_TRUE;
2453 spa->spa_load_verbatim = B_TRUE;
2455 VERIFY(nvlist_dup(conf, &spa->spa_config, 0) == 0);
2456 mutex_exit(&spa_namespace_lock);
2462 cmn_err(CE_NOTE, "\n"
2463 " *************************************************** \n"
2464 " * This device is not bootable! * \n"
2465 " * It is either offlined or detached or faulted. * \n"
2466 " * Please try to boot from a different device. * \n"
2467 " *************************************************** ");
2475 * Take a pool and insert it into the namespace as if it had been loaded at
2479 spa_import_verbatim(const char *pool, nvlist_t *config, nvlist_t *props)
2482 char *altroot = NULL;
2484 mutex_enter(&spa_namespace_lock);
2485 if (spa_lookup(pool) != NULL) {
2486 mutex_exit(&spa_namespace_lock);
2490 (void) nvlist_lookup_string(props,
2491 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
2492 spa = spa_add(pool, altroot);
2494 spa->spa_load_verbatim = B_TRUE;
2496 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
2499 spa_configfile_set(spa, props, B_FALSE);
2501 spa_config_sync(spa, B_FALSE, B_TRUE);
2503 mutex_exit(&spa_namespace_lock);
2509 * Import a non-root pool into the system.
2512 spa_import(const char *pool, nvlist_t *config, nvlist_t *props)
2515 char *altroot = NULL;
2518 nvlist_t **spares, **l2cache;
2519 uint_t nspares, nl2cache;
2522 * If a pool with this name exists, return failure.
2524 mutex_enter(&spa_namespace_lock);
2525 if ((spa = spa_lookup(pool)) != NULL) {
2526 mutex_exit(&spa_namespace_lock);
2531 * Create and initialize the spa structure.
2533 (void) nvlist_lookup_string(props,
2534 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
2535 spa = spa_add(pool, altroot);
2536 spa_activate(spa, spa_mode_global);
2539 * Don't start async tasks until we know everything is healthy.
2541 spa_async_suspend(spa);
2544 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig
2545 * because the user-supplied config is actually the one to trust when
2548 error = spa_load(spa, config, SPA_LOAD_IMPORT, B_TRUE);
2550 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2552 * Toss any existing sparelist, as it doesn't have any validity
2553 * anymore, and conflicts with spa_has_spare().
2555 if (spa->spa_spares.sav_config) {
2556 nvlist_free(spa->spa_spares.sav_config);
2557 spa->spa_spares.sav_config = NULL;
2558 spa_load_spares(spa);
2560 if (spa->spa_l2cache.sav_config) {
2561 nvlist_free(spa->spa_l2cache.sav_config);
2562 spa->spa_l2cache.sav_config = NULL;
2563 spa_load_l2cache(spa);
2566 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2569 error = spa_validate_aux(spa, nvroot, -1ULL,
2572 error = spa_validate_aux(spa, nvroot, -1ULL,
2573 VDEV_ALLOC_L2CACHE);
2574 spa_config_exit(spa, SCL_ALL, FTAG);
2577 spa_configfile_set(spa, props, B_FALSE);
2579 if (error != 0 || (props && spa_writeable(spa) &&
2580 (error = spa_prop_set(spa, props)))) {
2582 spa_deactivate(spa);
2584 mutex_exit(&spa_namespace_lock);
2588 spa_async_resume(spa);
2591 * Override any spares and level 2 cache devices as specified by
2592 * the user, as these may have correct device names/devids, etc.
2594 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2595 &spares, &nspares) == 0) {
2596 if (spa->spa_spares.sav_config)
2597 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
2598 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
2600 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
2601 NV_UNIQUE_NAME, KM_SLEEP) == 0);
2602 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
2603 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2604 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2605 spa_load_spares(spa);
2606 spa_config_exit(spa, SCL_ALL, FTAG);
2607 spa->spa_spares.sav_sync = B_TRUE;
2609 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2610 &l2cache, &nl2cache) == 0) {
2611 if (spa->spa_l2cache.sav_config)
2612 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
2613 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
2615 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2616 NV_UNIQUE_NAME, KM_SLEEP) == 0);
2617 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2618 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2619 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2620 spa_load_l2cache(spa);
2621 spa_config_exit(spa, SCL_ALL, FTAG);
2622 spa->spa_l2cache.sav_sync = B_TRUE;
2625 if (spa_writeable(spa)) {
2627 * Update the config cache to include the newly-imported pool.
2629 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
2632 mutex_exit(&spa_namespace_lock);
2638 * This (illegal) pool name is used when temporarily importing a spa_t in order
2639 * to get the vdev stats associated with the imported devices.
2641 #define TRYIMPORT_NAME "$import"
2644 spa_tryimport(nvlist_t *tryconfig)
2646 nvlist_t *config = NULL;
2652 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
2655 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
2659 * Create and initialize the spa structure.
2661 mutex_enter(&spa_namespace_lock);
2662 spa = spa_add(TRYIMPORT_NAME, NULL);
2663 spa_activate(spa, FREAD);
2666 * Pass off the heavy lifting to spa_load().
2667 * Pass TRUE for mosconfig because the user-supplied config
2668 * is actually the one to trust when doing an import.
2670 error = spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE);
2673 * If 'tryconfig' was at least parsable, return the current config.
2675 if (spa->spa_root_vdev != NULL) {
2676 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2677 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
2679 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2681 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
2682 spa->spa_uberblock.ub_timestamp) == 0);
2685 * If the bootfs property exists on this pool then we
2686 * copy it out so that external consumers can tell which
2687 * pools are bootable.
2689 if ((!error || error == EEXIST) && spa->spa_bootfs) {
2690 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2693 * We have to play games with the name since the
2694 * pool was opened as TRYIMPORT_NAME.
2696 if (dsl_dsobj_to_dsname(spa_name(spa),
2697 spa->spa_bootfs, tmpname) == 0) {
2699 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2701 cp = strchr(tmpname, '/');
2703 (void) strlcpy(dsname, tmpname,
2706 (void) snprintf(dsname, MAXPATHLEN,
2707 "%s/%s", poolname, ++cp);
2709 VERIFY(nvlist_add_string(config,
2710 ZPOOL_CONFIG_BOOTFS, dsname) == 0);
2711 kmem_free(dsname, MAXPATHLEN);
2713 kmem_free(tmpname, MAXPATHLEN);
2717 * Add the list of hot spares and level 2 cache devices.
2719 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
2720 spa_add_spares(spa, config);
2721 spa_add_l2cache(spa, config);
2722 spa_config_exit(spa, SCL_CONFIG, FTAG);
2726 spa_deactivate(spa);
2728 mutex_exit(&spa_namespace_lock);
2734 * Pool export/destroy
2736 * The act of destroying or exporting a pool is very simple. We make sure there
2737 * is no more pending I/O and any references to the pool are gone. Then, we
2738 * update the pool state and sync all the labels to disk, removing the
2739 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
2740 * we don't sync the labels or remove the configuration cache.
2743 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
2744 boolean_t force, boolean_t hardforce)
2751 if (!(spa_mode_global & FWRITE))
2754 mutex_enter(&spa_namespace_lock);
2755 if ((spa = spa_lookup(pool)) == NULL) {
2756 mutex_exit(&spa_namespace_lock);
2761 * Put a hold on the pool, drop the namespace lock, stop async tasks,
2762 * reacquire the namespace lock, and see if we can export.
2764 spa_open_ref(spa, FTAG);
2765 mutex_exit(&spa_namespace_lock);
2766 spa_async_suspend(spa);
2767 mutex_enter(&spa_namespace_lock);
2768 spa_close(spa, FTAG);
2771 * The pool will be in core if it's openable,
2772 * in which case we can modify its state.
2774 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
2776 * Objsets may be open only because they're dirty, so we
2777 * have to force it to sync before checking spa_refcnt.
2779 txg_wait_synced(spa->spa_dsl_pool, 0);
2782 * A pool cannot be exported or destroyed if there are active
2783 * references. If we are resetting a pool, allow references by
2784 * fault injection handlers.
2786 if (!spa_refcount_zero(spa) ||
2787 (spa->spa_inject_ref != 0 &&
2788 new_state != POOL_STATE_UNINITIALIZED)) {
2789 spa_async_resume(spa);
2790 mutex_exit(&spa_namespace_lock);
2795 * A pool cannot be exported if it has an active shared spare.
2796 * This is to prevent other pools stealing the active spare
2797 * from an exported pool. At user's own will, such pool can
2798 * be forcedly exported.
2800 if (!force && new_state == POOL_STATE_EXPORTED &&
2801 spa_has_active_shared_spare(spa)) {
2802 spa_async_resume(spa);
2803 mutex_exit(&spa_namespace_lock);
2808 * We want this to be reflected on every label,
2809 * so mark them all dirty. spa_unload() will do the
2810 * final sync that pushes these changes out.
2812 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
2813 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2814 spa->spa_state = new_state;
2815 spa->spa_final_txg = spa_last_synced_txg(spa) + 1;
2816 vdev_config_dirty(spa->spa_root_vdev);
2817 spa_config_exit(spa, SCL_ALL, FTAG);
2821 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
2823 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
2825 spa_deactivate(spa);
2828 if (oldconfig && spa->spa_config)
2829 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
2831 if (new_state != POOL_STATE_UNINITIALIZED) {
2833 spa_config_sync(spa, B_TRUE, B_TRUE);
2836 mutex_exit(&spa_namespace_lock);
2842 * Destroy a storage pool.
2845 spa_destroy(char *pool)
2847 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
2852 * Export a storage pool.
2855 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
2856 boolean_t hardforce)
2858 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
2863 * Similar to spa_export(), this unloads the spa_t without actually removing it
2864 * from the namespace in any way.
2867 spa_reset(char *pool)
2869 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
2874 * ==========================================================================
2875 * Device manipulation
2876 * ==========================================================================
2880 * Add a device to a storage pool.
2883 spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
2887 vdev_t *rvd = spa->spa_root_vdev;
2889 nvlist_t **spares, **l2cache;
2890 uint_t nspares, nl2cache;
2892 txg = spa_vdev_enter(spa);
2894 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
2895 VDEV_ALLOC_ADD)) != 0)
2896 return (spa_vdev_exit(spa, NULL, txg, error));
2898 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
2900 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
2904 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
2908 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
2909 return (spa_vdev_exit(spa, vd, txg, EINVAL));
2911 if (vd->vdev_children != 0 &&
2912 (error = vdev_create(vd, txg, B_FALSE)) != 0)
2913 return (spa_vdev_exit(spa, vd, txg, error));
2916 * We must validate the spares and l2cache devices after checking the
2917 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
2919 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
2920 return (spa_vdev_exit(spa, vd, txg, error));
2923 * Transfer each new top-level vdev from vd to rvd.
2925 for (int c = 0; c < vd->vdev_children; c++) {
2926 tvd = vd->vdev_child[c];
2927 vdev_remove_child(vd, tvd);
2928 tvd->vdev_id = rvd->vdev_children;
2929 vdev_add_child(rvd, tvd);
2930 vdev_config_dirty(tvd);
2934 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
2935 ZPOOL_CONFIG_SPARES);
2936 spa_load_spares(spa);
2937 spa->spa_spares.sav_sync = B_TRUE;
2940 if (nl2cache != 0) {
2941 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
2942 ZPOOL_CONFIG_L2CACHE);
2943 spa_load_l2cache(spa);
2944 spa->spa_l2cache.sav_sync = B_TRUE;
2948 * We have to be careful when adding new vdevs to an existing pool.
2949 * If other threads start allocating from these vdevs before we
2950 * sync the config cache, and we lose power, then upon reboot we may
2951 * fail to open the pool because there are DVAs that the config cache
2952 * can't translate. Therefore, we first add the vdevs without
2953 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
2954 * and then let spa_config_update() initialize the new metaslabs.
2956 * spa_load() checks for added-but-not-initialized vdevs, so that
2957 * if we lose power at any point in this sequence, the remaining
2958 * steps will be completed the next time we load the pool.
2960 (void) spa_vdev_exit(spa, vd, txg, 0);
2962 mutex_enter(&spa_namespace_lock);
2963 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
2964 mutex_exit(&spa_namespace_lock);
2970 * Attach a device to a mirror. The arguments are the path to any device
2971 * in the mirror, and the nvroot for the new device. If the path specifies
2972 * a device that is not mirrored, we automatically insert the mirror vdev.
2974 * If 'replacing' is specified, the new device is intended to replace the
2975 * existing device; in this case the two devices are made into their own
2976 * mirror using the 'replacing' vdev, which is functionally identical to
2977 * the mirror vdev (it actually reuses all the same ops) but has a few
2978 * extra rules: you can't attach to it after it's been created, and upon
2979 * completion of resilvering, the first disk (the one being replaced)
2980 * is automatically detached.
2983 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
2985 uint64_t txg, open_txg;
2986 vdev_t *rvd = spa->spa_root_vdev;
2987 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
2990 char *oldvdpath, *newvdpath;
2994 txg = spa_vdev_enter(spa);
2996 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
2999 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
3001 if (!oldvd->vdev_ops->vdev_op_leaf)
3002 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3004 pvd = oldvd->vdev_parent;
3006 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
3007 VDEV_ALLOC_ADD)) != 0)
3008 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
3010 if (newrootvd->vdev_children != 1)
3011 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
3013 newvd = newrootvd->vdev_child[0];
3015 if (!newvd->vdev_ops->vdev_op_leaf)
3016 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
3018 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
3019 return (spa_vdev_exit(spa, newrootvd, txg, error));
3022 * Spares can't replace logs
3024 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
3025 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3029 * For attach, the only allowable parent is a mirror or the root
3032 if (pvd->vdev_ops != &vdev_mirror_ops &&
3033 pvd->vdev_ops != &vdev_root_ops)
3034 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3036 pvops = &vdev_mirror_ops;
3039 * Active hot spares can only be replaced by inactive hot
3042 if (pvd->vdev_ops == &vdev_spare_ops &&
3043 pvd->vdev_child[1] == oldvd &&
3044 !spa_has_spare(spa, newvd->vdev_guid))
3045 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3048 * If the source is a hot spare, and the parent isn't already a
3049 * spare, then we want to create a new hot spare. Otherwise, we
3050 * want to create a replacing vdev. The user is not allowed to
3051 * attach to a spared vdev child unless the 'isspare' state is
3052 * the same (spare replaces spare, non-spare replaces
3055 if (pvd->vdev_ops == &vdev_replacing_ops)
3056 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3057 else if (pvd->vdev_ops == &vdev_spare_ops &&
3058 newvd->vdev_isspare != oldvd->vdev_isspare)
3059 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3060 else if (pvd->vdev_ops != &vdev_spare_ops &&
3061 newvd->vdev_isspare)
3062 pvops = &vdev_spare_ops;
3064 pvops = &vdev_replacing_ops;
3068 * Compare the new device size with the replaceable/attachable
3071 if (newvd->vdev_psize < vdev_get_rsize(oldvd))
3072 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
3075 * The new device cannot have a higher alignment requirement
3076 * than the top-level vdev.
3078 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
3079 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
3082 * If this is an in-place replacement, update oldvd's path and devid
3083 * to make it distinguishable from newvd, and unopenable from now on.
3085 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
3086 spa_strfree(oldvd->vdev_path);
3087 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
3089 (void) sprintf(oldvd->vdev_path, "%s/%s",
3090 newvd->vdev_path, "old");
3091 if (oldvd->vdev_devid != NULL) {
3092 spa_strfree(oldvd->vdev_devid);
3093 oldvd->vdev_devid = NULL;
3098 * If the parent is not a mirror, or if we're replacing, insert the new
3099 * mirror/replacing/spare vdev above oldvd.
3101 if (pvd->vdev_ops != pvops)
3102 pvd = vdev_add_parent(oldvd, pvops);
3104 ASSERT(pvd->vdev_top->vdev_parent == rvd);
3105 ASSERT(pvd->vdev_ops == pvops);
3106 ASSERT(oldvd->vdev_parent == pvd);
3109 * Extract the new device from its root and add it to pvd.
3111 vdev_remove_child(newrootvd, newvd);
3112 newvd->vdev_id = pvd->vdev_children;
3113 vdev_add_child(pvd, newvd);
3116 * If newvd is smaller than oldvd, but larger than its rsize,
3117 * the addition of newvd may have decreased our parent's asize.
3119 pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize);
3121 tvd = newvd->vdev_top;
3122 ASSERT(pvd->vdev_top == tvd);
3123 ASSERT(tvd->vdev_parent == rvd);
3125 vdev_config_dirty(tvd);
3128 * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate
3129 * upward when spa_vdev_exit() calls vdev_dtl_reassess().
3131 open_txg = txg + TXG_CONCURRENT_STATES - 1;
3133 vdev_dtl_dirty(newvd, DTL_MISSING,
3134 TXG_INITIAL, open_txg - TXG_INITIAL + 1);
3136 if (newvd->vdev_isspare) {
3137 spa_spare_activate(newvd);
3138 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE);
3141 oldvdpath = spa_strdup(oldvd->vdev_path);
3142 newvdpath = spa_strdup(newvd->vdev_path);
3143 newvd_isspare = newvd->vdev_isspare;
3146 * Mark newvd's DTL dirty in this txg.
3148 vdev_dirty(tvd, VDD_DTL, newvd, txg);
3150 (void) spa_vdev_exit(spa, newrootvd, open_txg, 0);
3152 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
3153 if (dmu_tx_assign(tx, TXG_WAIT) == 0) {
3154 spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, tx,
3155 CRED(), "%s vdev=%s %s vdev=%s",
3156 replacing && newvd_isspare ? "spare in" :
3157 replacing ? "replace" : "attach", newvdpath,
3158 replacing ? "for" : "to", oldvdpath);
3164 spa_strfree(oldvdpath);
3165 spa_strfree(newvdpath);
3168 * Kick off a resilver to update newvd.
3170 VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0);
3176 * Detach a device from a mirror or replacing vdev.
3177 * If 'replace_done' is specified, only detach if the parent
3178 * is a replacing vdev.
3181 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
3185 vdev_t *rvd = spa->spa_root_vdev;
3186 vdev_t *vd, *pvd, *cvd, *tvd;
3187 boolean_t unspare = B_FALSE;
3188 uint64_t unspare_guid;
3191 txg = spa_vdev_enter(spa);
3193 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3196 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
3198 if (!vd->vdev_ops->vdev_op_leaf)
3199 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3201 pvd = vd->vdev_parent;
3204 * If the parent/child relationship is not as expected, don't do it.
3205 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
3206 * vdev that's replacing B with C. The user's intent in replacing
3207 * is to go from M(A,B) to M(A,C). If the user decides to cancel
3208 * the replace by detaching C, the expected behavior is to end up
3209 * M(A,B). But suppose that right after deciding to detach C,
3210 * the replacement of B completes. We would have M(A,C), and then
3211 * ask to detach C, which would leave us with just A -- not what
3212 * the user wanted. To prevent this, we make sure that the
3213 * parent/child relationship hasn't changed -- in this example,
3214 * that C's parent is still the replacing vdev R.
3216 if (pvd->vdev_guid != pguid && pguid != 0)
3217 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3220 * If replace_done is specified, only remove this device if it's
3221 * the first child of a replacing vdev. For the 'spare' vdev, either
3222 * disk can be removed.
3225 if (pvd->vdev_ops == &vdev_replacing_ops) {
3226 if (vd->vdev_id != 0)
3227 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3228 } else if (pvd->vdev_ops != &vdev_spare_ops) {
3229 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3233 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
3234 spa_version(spa) >= SPA_VERSION_SPARES);
3237 * Only mirror, replacing, and spare vdevs support detach.
3239 if (pvd->vdev_ops != &vdev_replacing_ops &&
3240 pvd->vdev_ops != &vdev_mirror_ops &&
3241 pvd->vdev_ops != &vdev_spare_ops)
3242 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3245 * If this device has the only valid copy of some data,
3246 * we cannot safely detach it.
3248 if (vdev_dtl_required(vd))
3249 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3251 ASSERT(pvd->vdev_children >= 2);
3254 * If we are detaching the second disk from a replacing vdev, then
3255 * check to see if we changed the original vdev's path to have "/old"
3256 * at the end in spa_vdev_attach(). If so, undo that change now.
3258 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 &&
3259 pvd->vdev_child[0]->vdev_path != NULL &&
3260 pvd->vdev_child[1]->vdev_path != NULL) {
3261 ASSERT(pvd->vdev_child[1] == vd);
3262 cvd = pvd->vdev_child[0];
3263 len = strlen(vd->vdev_path);
3264 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
3265 strcmp(cvd->vdev_path + len, "/old") == 0) {
3266 spa_strfree(cvd->vdev_path);
3267 cvd->vdev_path = spa_strdup(vd->vdev_path);
3272 * If we are detaching the original disk from a spare, then it implies
3273 * that the spare should become a real disk, and be removed from the
3274 * active spare list for the pool.
3276 if (pvd->vdev_ops == &vdev_spare_ops &&
3277 vd->vdev_id == 0 && pvd->vdev_child[1]->vdev_isspare)
3281 * Erase the disk labels so the disk can be used for other things.
3282 * This must be done after all other error cases are handled,
3283 * but before we disembowel vd (so we can still do I/O to it).
3284 * But if we can't do it, don't treat the error as fatal --
3285 * it may be that the unwritability of the disk is the reason
3286 * it's being detached!
3288 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
3291 * Remove vd from its parent and compact the parent's children.
3293 vdev_remove_child(pvd, vd);
3294 vdev_compact_children(pvd);
3297 * Remember one of the remaining children so we can get tvd below.
3299 cvd = pvd->vdev_child[0];
3302 * If we need to remove the remaining child from the list of hot spares,
3303 * do it now, marking the vdev as no longer a spare in the process.
3304 * We must do this before vdev_remove_parent(), because that can
3305 * change the GUID if it creates a new toplevel GUID. For a similar
3306 * reason, we must remove the spare now, in the same txg as the detach;
3307 * otherwise someone could attach a new sibling, change the GUID, and
3308 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
3311 ASSERT(cvd->vdev_isspare);
3312 spa_spare_remove(cvd);
3313 unspare_guid = cvd->vdev_guid;
3314 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
3318 * If the parent mirror/replacing vdev only has one child,
3319 * the parent is no longer needed. Remove it from the tree.
3321 if (pvd->vdev_children == 1)
3322 vdev_remove_parent(cvd);
3325 * We don't set tvd until now because the parent we just removed
3326 * may have been the previous top-level vdev.
3328 tvd = cvd->vdev_top;
3329 ASSERT(tvd->vdev_parent == rvd);
3332 * Reevaluate the parent vdev state.
3334 vdev_propagate_state(cvd);
3337 * If the device we just detached was smaller than the others, it may be
3338 * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init()
3339 * can't fail because the existing metaslabs are already in core, so
3340 * there's nothing to read from disk.
3342 VERIFY(vdev_metaslab_init(tvd, txg) == 0);
3344 vdev_config_dirty(tvd);
3347 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
3348 * vd->vdev_detached is set and free vd's DTL object in syncing context.
3349 * But first make sure we're not on any *other* txg's DTL list, to
3350 * prevent vd from being accessed after it's freed.
3352 for (int t = 0; t < TXG_SIZE; t++)
3353 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
3354 vd->vdev_detached = B_TRUE;
3355 vdev_dirty(tvd, VDD_DTL, vd, txg);
3357 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
3359 error = spa_vdev_exit(spa, vd, txg, 0);
3362 * If this was the removal of the original device in a hot spare vdev,
3363 * then we want to go through and remove the device from the hot spare
3364 * list of every other pool.
3369 mutex_enter(&spa_namespace_lock);
3370 while ((spa = spa_next(spa)) != NULL) {
3371 if (spa->spa_state != POOL_STATE_ACTIVE)
3375 spa_open_ref(spa, FTAG);
3376 mutex_exit(&spa_namespace_lock);
3377 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
3378 mutex_enter(&spa_namespace_lock);
3379 spa_close(spa, FTAG);
3381 mutex_exit(&spa_namespace_lock);
3388 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
3390 for (int i = 0; i < count; i++) {
3393 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
3396 if (guid == target_guid)
3404 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
3405 nvlist_t *dev_to_remove)
3407 nvlist_t **newdev = NULL;
3410 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
3412 for (int i = 0, j = 0; i < count; i++) {
3413 if (dev[i] == dev_to_remove)
3415 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
3418 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
3419 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
3421 for (int i = 0; i < count - 1; i++)
3422 nvlist_free(newdev[i]);
3425 kmem_free(newdev, (count - 1) * sizeof (void *));
3429 * Remove a device from the pool. Currently, this supports removing only hot
3430 * spares and level 2 ARC devices.
3433 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
3436 nvlist_t **spares, **l2cache, *nv;
3437 uint_t nspares, nl2cache;
3440 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
3443 txg = spa_vdev_enter(spa);
3445 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3447 if (spa->spa_spares.sav_vdevs != NULL &&
3448 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3449 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
3450 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
3452 * Only remove the hot spare if it's not currently in use
3455 if (vd == NULL || unspare) {
3456 spa_vdev_remove_aux(spa->spa_spares.sav_config,
3457 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
3458 spa_load_spares(spa);
3459 spa->spa_spares.sav_sync = B_TRUE;
3463 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
3464 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3465 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
3466 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
3468 * Cache devices can always be removed.
3470 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
3471 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
3472 spa_load_l2cache(spa);
3473 spa->spa_l2cache.sav_sync = B_TRUE;
3474 } else if (vd != NULL) {
3476 * Normal vdevs cannot be removed (yet).
3481 * There is no vdev of any kind with the specified guid.
3487 return (spa_vdev_exit(spa, NULL, txg, error));
3493 * Find any device that's done replacing, or a vdev marked 'unspare' that's
3494 * current spared, so we can detach it.
3497 spa_vdev_resilver_done_hunt(vdev_t *vd)
3499 vdev_t *newvd, *oldvd;
3502 for (c = 0; c < vd->vdev_children; c++) {
3503 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
3509 * Check for a completed replacement.
3511 if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) {
3512 oldvd = vd->vdev_child[0];
3513 newvd = vd->vdev_child[1];
3515 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
3516 !vdev_dtl_required(oldvd))
3521 * Check for a completed resilver with the 'unspare' flag set.
3523 if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) {
3524 newvd = vd->vdev_child[0];
3525 oldvd = vd->vdev_child[1];
3527 if (newvd->vdev_unspare &&
3528 vdev_dtl_empty(newvd, DTL_MISSING) &&
3529 !vdev_dtl_required(oldvd)) {
3530 newvd->vdev_unspare = 0;
3539 spa_vdev_resilver_done(spa_t *spa)
3541 vdev_t *vd, *pvd, *ppvd;
3542 uint64_t guid, sguid, pguid, ppguid;
3544 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3546 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
3547 pvd = vd->vdev_parent;
3548 ppvd = pvd->vdev_parent;
3549 guid = vd->vdev_guid;
3550 pguid = pvd->vdev_guid;
3551 ppguid = ppvd->vdev_guid;
3554 * If we have just finished replacing a hot spared device, then
3555 * we need to detach the parent's first child (the original hot
3558 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0) {
3559 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
3560 ASSERT(ppvd->vdev_children == 2);
3561 sguid = ppvd->vdev_child[1]->vdev_guid;
3563 spa_config_exit(spa, SCL_ALL, FTAG);
3564 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
3566 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
3568 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3571 spa_config_exit(spa, SCL_ALL, FTAG);
3575 * Update the stored path or FRU for this vdev. Dirty the vdev configuration,
3576 * relying on spa_vdev_enter/exit() to synchronize the labels and cache.
3579 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
3585 txg = spa_vdev_enter(spa);
3587 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3588 return (spa_vdev_exit(spa, NULL, txg, ENOENT));
3590 if (!vd->vdev_ops->vdev_op_leaf)
3591 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3594 spa_strfree(vd->vdev_path);
3595 vd->vdev_path = spa_strdup(value);
3597 if (vd->vdev_fru != NULL)
3598 spa_strfree(vd->vdev_fru);
3599 vd->vdev_fru = spa_strdup(value);
3602 vdev_config_dirty(vd->vdev_top);
3604 return (spa_vdev_exit(spa, NULL, txg, 0));
3608 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
3610 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
3614 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
3616 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
3620 * ==========================================================================
3622 * ==========================================================================
3626 spa_scrub(spa_t *spa, pool_scrub_type_t type)
3628 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
3630 if ((uint_t)type >= POOL_SCRUB_TYPES)
3634 * If a resilver was requested, but there is no DTL on a
3635 * writeable leaf device, we have nothing to do.
3637 if (type == POOL_SCRUB_RESILVER &&
3638 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
3639 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
3643 if (type == POOL_SCRUB_EVERYTHING &&
3644 spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE &&
3645 spa->spa_dsl_pool->dp_scrub_isresilver)
3648 if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) {
3649 return (dsl_pool_scrub_clean(spa->spa_dsl_pool));
3650 } else if (type == POOL_SCRUB_NONE) {
3651 return (dsl_pool_scrub_cancel(spa->spa_dsl_pool));
3658 * ==========================================================================
3659 * SPA async task processing
3660 * ==========================================================================
3664 spa_async_remove(spa_t *spa, vdev_t *vd)
3666 if (vd->vdev_remove_wanted) {
3667 vd->vdev_remove_wanted = 0;
3668 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
3671 * We want to clear the stats, but we don't want to do a full
3672 * vdev_clear() as that will cause us to throw away
3673 * degraded/faulted state as well as attempt to reopen the
3674 * device, all of which is a waste.
3676 vd->vdev_stat.vs_read_errors = 0;
3677 vd->vdev_stat.vs_write_errors = 0;
3678 vd->vdev_stat.vs_checksum_errors = 0;
3680 vdev_state_dirty(vd->vdev_top);
3683 for (int c = 0; c < vd->vdev_children; c++)
3684 spa_async_remove(spa, vd->vdev_child[c]);
3688 spa_async_probe(spa_t *spa, vdev_t *vd)
3690 if (vd->vdev_probe_wanted) {
3691 vd->vdev_probe_wanted = 0;
3692 vdev_reopen(vd); /* vdev_open() does the actual probe */
3695 for (int c = 0; c < vd->vdev_children; c++)
3696 spa_async_probe(spa, vd->vdev_child[c]);
3700 spa_async_thread(void *arg)
3705 ASSERT(spa->spa_sync_on);
3707 mutex_enter(&spa->spa_async_lock);
3708 tasks = spa->spa_async_tasks;
3709 spa->spa_async_tasks = 0;
3710 mutex_exit(&spa->spa_async_lock);
3713 * See if the config needs to be updated.
3715 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
3716 mutex_enter(&spa_namespace_lock);
3717 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
3718 mutex_exit(&spa_namespace_lock);
3722 * See if any devices need to be marked REMOVED.
3724 if (tasks & SPA_ASYNC_REMOVE) {
3725 spa_vdev_state_enter(spa);
3726 spa_async_remove(spa, spa->spa_root_vdev);
3727 for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
3728 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
3729 for (int i = 0; i < spa->spa_spares.sav_count; i++)
3730 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
3731 (void) spa_vdev_state_exit(spa, NULL, 0);
3735 * See if any devices need to be probed.
3737 if (tasks & SPA_ASYNC_PROBE) {
3738 spa_vdev_state_enter(spa);
3739 spa_async_probe(spa, spa->spa_root_vdev);
3740 (void) spa_vdev_state_exit(spa, NULL, 0);
3744 * If any devices are done replacing, detach them.
3746 if (tasks & SPA_ASYNC_RESILVER_DONE)
3747 spa_vdev_resilver_done(spa);
3750 * Kick off a resilver.
3752 if (tasks & SPA_ASYNC_RESILVER)
3753 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0);
3756 * Let the world know that we're done.
3758 mutex_enter(&spa->spa_async_lock);
3759 spa->spa_async_thread = NULL;
3760 cv_broadcast(&spa->spa_async_cv);
3761 mutex_exit(&spa->spa_async_lock);
3766 spa_async_suspend(spa_t *spa)
3768 mutex_enter(&spa->spa_async_lock);
3769 spa->spa_async_suspended++;
3770 while (spa->spa_async_thread != NULL)
3771 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
3772 mutex_exit(&spa->spa_async_lock);
3776 spa_async_resume(spa_t *spa)
3778 mutex_enter(&spa->spa_async_lock);
3779 ASSERT(spa->spa_async_suspended != 0);
3780 spa->spa_async_suspended--;
3781 mutex_exit(&spa->spa_async_lock);
3785 spa_async_dispatch(spa_t *spa)
3787 mutex_enter(&spa->spa_async_lock);
3788 if (spa->spa_async_tasks && !spa->spa_async_suspended &&
3789 spa->spa_async_thread == NULL &&
3790 rootdir != NULL && !vn_is_readonly(rootdir))
3791 spa->spa_async_thread = thread_create(NULL, 0,
3792 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
3793 mutex_exit(&spa->spa_async_lock);
3797 spa_async_request(spa_t *spa, int task)
3799 mutex_enter(&spa->spa_async_lock);
3800 spa->spa_async_tasks |= task;
3801 mutex_exit(&spa->spa_async_lock);
3805 * ==========================================================================
3806 * SPA syncing routines
3807 * ==========================================================================
3811 spa_sync_deferred_frees(spa_t *spa, uint64_t txg)
3813 bplist_t *bpl = &spa->spa_sync_bplist;
3821 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
3823 while (bplist_iterate(bpl, &itor, &blk) == 0) {
3824 ASSERT(blk.blk_birth < txg);
3825 zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL,
3826 ZIO_FLAG_MUSTSUCCEED));
3829 error = zio_wait(zio);
3830 ASSERT3U(error, ==, 0);
3832 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3833 bplist_vacate(bpl, tx);
3836 * Pre-dirty the first block so we sync to convergence faster.
3837 * (Usually only the first block is needed.)
3839 dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx);
3844 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
3846 char *packed = NULL;
3851 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
3854 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
3855 * information. This avoids the dbuf_will_dirty() path and
3856 * saves us a pre-read to get data we don't actually care about.
3858 bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE);
3859 packed = kmem_alloc(bufsize, KM_SLEEP);
3861 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
3863 bzero(packed + nvsize, bufsize - nvsize);
3865 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
3867 kmem_free(packed, bufsize);
3869 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
3870 dmu_buf_will_dirty(db, tx);
3871 *(uint64_t *)db->db_data = nvsize;
3872 dmu_buf_rele(db, FTAG);
3876 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
3877 const char *config, const char *entry)
3887 * Update the MOS nvlist describing the list of available devices.
3888 * spa_validate_aux() will have already made sure this nvlist is
3889 * valid and the vdevs are labeled appropriately.
3891 if (sav->sav_object == 0) {
3892 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
3893 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
3894 sizeof (uint64_t), tx);
3895 VERIFY(zap_update(spa->spa_meta_objset,
3896 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
3897 &sav->sav_object, tx) == 0);
3900 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3901 if (sav->sav_count == 0) {
3902 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
3904 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
3905 for (i = 0; i < sav->sav_count; i++)
3906 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
3907 B_FALSE, B_FALSE, B_TRUE);
3908 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
3909 sav->sav_count) == 0);
3910 for (i = 0; i < sav->sav_count; i++)
3911 nvlist_free(list[i]);
3912 kmem_free(list, sav->sav_count * sizeof (void *));
3915 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
3916 nvlist_free(nvroot);
3918 sav->sav_sync = B_FALSE;
3922 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
3926 if (list_is_empty(&spa->spa_config_dirty_list))
3929 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
3931 config = spa_config_generate(spa, spa->spa_root_vdev,
3932 dmu_tx_get_txg(tx), B_FALSE);
3934 spa_config_exit(spa, SCL_STATE, FTAG);
3936 if (spa->spa_config_syncing)
3937 nvlist_free(spa->spa_config_syncing);
3938 spa->spa_config_syncing = config;
3940 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
3944 * Set zpool properties.
3947 spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
3950 objset_t *mos = spa->spa_meta_objset;
3951 nvlist_t *nvp = arg2;
3956 const char *propname;
3957 zprop_type_t proptype;
3959 mutex_enter(&spa->spa_props_lock);
3962 while ((elem = nvlist_next_nvpair(nvp, elem))) {
3963 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
3964 case ZPOOL_PROP_VERSION:
3966 * Only set version for non-zpool-creation cases
3967 * (set/import). spa_create() needs special care
3968 * for version setting.
3970 if (tx->tx_txg != TXG_INITIAL) {
3971 VERIFY(nvpair_value_uint64(elem,
3973 ASSERT(intval <= SPA_VERSION);
3974 ASSERT(intval >= spa_version(spa));
3975 spa->spa_uberblock.ub_version = intval;
3976 vdev_config_dirty(spa->spa_root_vdev);
3980 case ZPOOL_PROP_ALTROOT:
3982 * 'altroot' is a non-persistent property. It should
3983 * have been set temporarily at creation or import time.
3985 ASSERT(spa->spa_root != NULL);
3988 case ZPOOL_PROP_CACHEFILE:
3990 * 'cachefile' is also a non-persisitent property.
3995 * Set pool property values in the poolprops mos object.
3997 if (spa->spa_pool_props_object == 0) {
3998 objset_t *mos = spa->spa_meta_objset;
4000 VERIFY((spa->spa_pool_props_object =
4001 zap_create(mos, DMU_OT_POOL_PROPS,
4002 DMU_OT_NONE, 0, tx)) > 0);
4004 VERIFY(zap_update(mos,
4005 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
4006 8, 1, &spa->spa_pool_props_object, tx)
4010 /* normalize the property name */
4011 propname = zpool_prop_to_name(prop);
4012 proptype = zpool_prop_get_type(prop);
4014 if (nvpair_type(elem) == DATA_TYPE_STRING) {
4015 ASSERT(proptype == PROP_TYPE_STRING);
4016 VERIFY(nvpair_value_string(elem, &strval) == 0);
4017 VERIFY(zap_update(mos,
4018 spa->spa_pool_props_object, propname,
4019 1, strlen(strval) + 1, strval, tx) == 0);
4021 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
4022 VERIFY(nvpair_value_uint64(elem, &intval) == 0);
4024 if (proptype == PROP_TYPE_INDEX) {
4026 VERIFY(zpool_prop_index_to_string(
4027 prop, intval, &unused) == 0);
4029 VERIFY(zap_update(mos,
4030 spa->spa_pool_props_object, propname,
4031 8, 1, &intval, tx) == 0);
4033 ASSERT(0); /* not allowed */
4037 case ZPOOL_PROP_DELEGATION:
4038 spa->spa_delegation = intval;
4040 case ZPOOL_PROP_BOOTFS:
4041 spa->spa_bootfs = intval;
4043 case ZPOOL_PROP_FAILUREMODE:
4044 spa->spa_failmode = intval;
4051 /* log internal history if this is not a zpool create */
4052 if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY &&
4053 tx->tx_txg != TXG_INITIAL) {
4054 spa_history_internal_log(LOG_POOL_PROPSET,
4055 spa, tx, cr, "%s %lld %s",
4056 nvpair_name(elem), intval, spa_name(spa));
4060 mutex_exit(&spa->spa_props_lock);
4064 * Sync the specified transaction group. New blocks may be dirtied as
4065 * part of the process, so we iterate until it converges.
4068 spa_sync(spa_t *spa, uint64_t txg)
4070 dsl_pool_t *dp = spa->spa_dsl_pool;
4071 objset_t *mos = spa->spa_meta_objset;
4072 bplist_t *bpl = &spa->spa_sync_bplist;
4073 vdev_t *rvd = spa->spa_root_vdev;
4080 * Lock out configuration changes.
4082 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4084 spa->spa_syncing_txg = txg;
4085 spa->spa_sync_pass = 0;
4088 * If there are any pending vdev state changes, convert them
4089 * into config changes that go out with this transaction group.
4091 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4092 while (list_head(&spa->spa_state_dirty_list) != NULL) {
4094 * We need the write lock here because, for aux vdevs,
4095 * calling vdev_config_dirty() modifies sav_config.
4096 * This is ugly and will become unnecessary when we
4097 * eliminate the aux vdev wart by integrating all vdevs
4098 * into the root vdev tree.
4100 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
4101 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
4102 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
4103 vdev_state_clean(vd);
4104 vdev_config_dirty(vd);
4106 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
4107 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
4109 spa_config_exit(spa, SCL_STATE, FTAG);
4111 VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj));
4113 tx = dmu_tx_create_assigned(dp, txg);
4116 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
4117 * set spa_deflate if we have no raid-z vdevs.
4119 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
4120 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
4123 for (i = 0; i < rvd->vdev_children; i++) {
4124 vd = rvd->vdev_child[i];
4125 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
4128 if (i == rvd->vdev_children) {
4129 spa->spa_deflate = TRUE;
4130 VERIFY(0 == zap_add(spa->spa_meta_objset,
4131 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
4132 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
4136 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
4137 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
4138 dsl_pool_create_origin(dp, tx);
4140 /* Keeping the origin open increases spa_minref */
4141 spa->spa_minref += 3;
4144 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
4145 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
4146 dsl_pool_upgrade_clones(dp, tx);
4150 * If anything has changed in this txg, push the deferred frees
4151 * from the previous txg. If not, leave them alone so that we
4152 * don't generate work on an otherwise idle system.
4154 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
4155 !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
4156 !txg_list_empty(&dp->dp_sync_tasks, txg))
4157 spa_sync_deferred_frees(spa, txg);
4160 * Iterate to convergence.
4163 spa->spa_sync_pass++;
4165 spa_sync_config_object(spa, tx);
4166 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
4167 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
4168 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
4169 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
4170 spa_errlog_sync(spa, txg);
4171 dsl_pool_sync(dp, txg);
4174 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) {
4179 bplist_sync(bpl, tx);
4180 } while (dirty_vdevs);
4184 dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass);
4187 * Rewrite the vdev configuration (which includes the uberblock)
4188 * to commit the transaction group.
4190 * If there are no dirty vdevs, we sync the uberblock to a few
4191 * random top-level vdevs that are known to be visible in the
4192 * config cache (see spa_vdev_add() for a complete description).
4193 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
4197 * We hold SCL_STATE to prevent vdev open/close/etc.
4198 * while we're attempting to write the vdev labels.
4200 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4202 if (list_is_empty(&spa->spa_config_dirty_list)) {
4203 vdev_t *svd[SPA_DVAS_PER_BP];
4205 int children = rvd->vdev_children;
4206 int c0 = spa_get_random(children);
4209 for (c = 0; c < children; c++) {
4210 vd = rvd->vdev_child[(c0 + c) % children];
4211 if (vd->vdev_ms_array == 0 || vd->vdev_islog)
4213 svd[svdcount++] = vd;
4214 if (svdcount == SPA_DVAS_PER_BP)
4217 error = vdev_config_sync(svd, svdcount, txg);
4219 error = vdev_config_sync(rvd->vdev_child,
4220 rvd->vdev_children, txg);
4223 spa_config_exit(spa, SCL_STATE, FTAG);
4227 zio_suspend(spa, NULL);
4228 zio_resume_wait(spa);
4233 * Clear the dirty config list.
4235 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
4236 vdev_config_clean(vd);
4239 * Now that the new config has synced transactionally,
4240 * let it become visible to the config cache.
4242 if (spa->spa_config_syncing != NULL) {
4243 spa_config_set(spa, spa->spa_config_syncing);
4244 spa->spa_config_txg = txg;
4245 spa->spa_config_syncing = NULL;
4248 spa->spa_ubsync = spa->spa_uberblock;
4251 * Clean up the ZIL records for the synced txg.
4253 dsl_pool_zil_clean(dp);
4256 * Update usable space statistics.
4258 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
4259 vdev_sync_done(vd, txg);
4262 * It had better be the case that we didn't dirty anything
4263 * since vdev_config_sync().
4265 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
4266 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
4267 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
4268 ASSERT(bpl->bpl_queue == NULL);
4270 spa_config_exit(spa, SCL_CONFIG, FTAG);
4273 * If any async tasks have been requested, kick them off.
4275 spa_async_dispatch(spa);
4279 * Sync all pools. We don't want to hold the namespace lock across these
4280 * operations, so we take a reference on the spa_t and drop the lock during the
4284 spa_sync_allpools(void)
4287 mutex_enter(&spa_namespace_lock);
4288 while ((spa = spa_next(spa)) != NULL) {
4289 if (spa_state(spa) != POOL_STATE_ACTIVE || spa_suspended(spa))
4291 spa_open_ref(spa, FTAG);
4292 mutex_exit(&spa_namespace_lock);
4293 txg_wait_synced(spa_get_dsl(spa), 0);
4294 mutex_enter(&spa_namespace_lock);
4295 spa_close(spa, FTAG);
4297 mutex_exit(&spa_namespace_lock);
4301 * ==========================================================================
4302 * Miscellaneous routines
4303 * ==========================================================================
4307 * Remove all pools in the system.
4315 * Remove all cached state. All pools should be closed now,
4316 * so every spa in the AVL tree should be unreferenced.
4318 mutex_enter(&spa_namespace_lock);
4319 while ((spa = spa_next(NULL)) != NULL) {
4321 * Stop async tasks. The async thread may need to detach
4322 * a device that's been replaced, which requires grabbing
4323 * spa_namespace_lock, so we must drop it here.
4325 spa_open_ref(spa, FTAG);
4326 mutex_exit(&spa_namespace_lock);
4327 spa_async_suspend(spa);
4328 mutex_enter(&spa_namespace_lock);
4329 spa_close(spa, FTAG);
4331 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4333 spa_deactivate(spa);
4337 mutex_exit(&spa_namespace_lock);
4341 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
4346 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
4350 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
4351 vd = spa->spa_l2cache.sav_vdevs[i];
4352 if (vd->vdev_guid == guid)
4356 for (i = 0; i < spa->spa_spares.sav_count; i++) {
4357 vd = spa->spa_spares.sav_vdevs[i];
4358 if (vd->vdev_guid == guid)
4367 spa_upgrade(spa_t *spa, uint64_t version)
4369 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4372 * This should only be called for a non-faulted pool, and since a
4373 * future version would result in an unopenable pool, this shouldn't be
4376 ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION);
4377 ASSERT(version >= spa->spa_uberblock.ub_version);
4379 spa->spa_uberblock.ub_version = version;
4380 vdev_config_dirty(spa->spa_root_vdev);
4382 spa_config_exit(spa, SCL_ALL, FTAG);
4384 txg_wait_synced(spa_get_dsl(spa), 0);
4388 spa_has_spare(spa_t *spa, uint64_t guid)
4392 spa_aux_vdev_t *sav = &spa->spa_spares;
4394 for (i = 0; i < sav->sav_count; i++)
4395 if (sav->sav_vdevs[i]->vdev_guid == guid)
4398 for (i = 0; i < sav->sav_npending; i++) {
4399 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
4400 &spareguid) == 0 && spareguid == guid)
4408 * Check if a pool has an active shared spare device.
4409 * Note: reference count of an active spare is 2, as a spare and as a replace
4412 spa_has_active_shared_spare(spa_t *spa)
4416 spa_aux_vdev_t *sav = &spa->spa_spares;
4418 for (i = 0; i < sav->sav_count; i++) {
4419 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
4420 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
4429 * Post a sysevent corresponding to the given event. The 'name' must be one of
4430 * the event definitions in sys/sysevent/eventdefs.h. The payload will be
4431 * filled in from the spa and (optionally) the vdev. This doesn't do anything
4432 * in the userland libzpool, as we don't want consumers to misinterpret ztest
4433 * or zdb as real changes.
4436 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
4441 sysevent_attr_list_t *attr = NULL;
4442 sysevent_value_t value;
4445 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
4448 value.value_type = SE_DATA_TYPE_STRING;
4449 value.value.sv_string = spa_name(spa);
4450 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
4453 value.value_type = SE_DATA_TYPE_UINT64;
4454 value.value.sv_uint64 = spa_guid(spa);
4455 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
4459 value.value_type = SE_DATA_TYPE_UINT64;
4460 value.value.sv_uint64 = vd->vdev_guid;
4461 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
4465 if (vd->vdev_path) {
4466 value.value_type = SE_DATA_TYPE_STRING;
4467 value.value.sv_string = vd->vdev_path;
4468 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
4469 &value, SE_SLEEP) != 0)
4474 if (sysevent_attach_attributes(ev, attr) != 0)
4478 (void) log_sysevent(ev, SE_SLEEP, &eid);
4482 sysevent_free_attr(attr);