4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * This file contains all the routines used when modifying on-disk SPA state.
29 * This includes opening, importing, destroying, exporting a pool, and syncing a
33 #include <sys/zfs_context.h>
34 #include <sys/fm/fs/zfs.h>
35 #include <sys/spa_impl.h>
37 #include <sys/zio_checksum.h>
38 #include <sys/zio_compress.h>
40 #include <sys/dmu_tx.h>
43 #include <sys/vdev_impl.h>
44 #include <sys/metaslab.h>
45 #include <sys/uberblock_impl.h>
48 #include <sys/dmu_traverse.h>
49 #include <sys/dmu_objset.h>
50 #include <sys/unique.h>
51 #include <sys/dsl_pool.h>
52 #include <sys/dsl_dataset.h>
53 #include <sys/dsl_dir.h>
54 #include <sys/dsl_prop.h>
55 #include <sys/dsl_synctask.h>
56 #include <sys/fs/zfs.h>
58 #include <sys/callb.h>
59 #include <sys/sunddi.h>
60 #include <sys/spa_boot.h>
63 #include "zfs_comutil.h"
65 /* Check hostid on import? */
66 static int check_hostid = 1;
68 SYSCTL_DECL(_vfs_zfs);
69 TUNABLE_INT("vfs.zfs.check_hostid", &check_hostid);
70 SYSCTL_INT(_vfs_zfs, OID_AUTO, check_hostid, CTLFLAG_RW, &check_hostid, 0,
71 "Check hostid on import?");
74 zti_mode_fixed, /* value is # of threads (min 1) */
75 zti_mode_online_percent, /* value is % of online CPUs */
76 zti_mode_tune, /* fill from zio_taskq_tune_* */
77 zti_mode_null, /* don't create a taskq */
81 #define ZTI_FIX(n) { zti_mode_fixed, (n) }
82 #define ZTI_PCT(n) { zti_mode_online_percent, (n) }
83 #define ZTI_TUNE { zti_mode_tune, 0 }
84 #define ZTI_NULL { zti_mode_null, 0 }
86 #define ZTI_ONE ZTI_FIX(1)
88 typedef struct zio_taskq_info {
89 enum zti_modes zti_mode;
93 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
94 "issue", "issue_high", "intr", "intr_high"
98 * Define the taskq threads for the following I/O types:
99 * NULL, READ, WRITE, FREE, CLAIM, and IOCTL
101 const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
102 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */
103 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
104 { ZTI_FIX(8), ZTI_NULL, ZTI_TUNE, ZTI_NULL },
105 { ZTI_TUNE, ZTI_FIX(5), ZTI_FIX(8), ZTI_FIX(5) },
106 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
107 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
108 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
111 enum zti_modes zio_taskq_tune_mode = zti_mode_online_percent;
112 uint_t zio_taskq_tune_value = 80; /* #threads = 80% of # online CPUs */
114 static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx);
115 static boolean_t spa_has_active_shared_spare(spa_t *spa);
118 * ==========================================================================
119 * SPA properties routines
120 * ==========================================================================
124 * Add a (source=src, propname=propval) list to an nvlist.
127 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
128 uint64_t intval, zprop_source_t src)
130 const char *propname = zpool_prop_to_name(prop);
133 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
134 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
137 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
139 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
141 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
142 nvlist_free(propval);
146 * Get property values from the spa configuration.
149 spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
153 uint64_t cap, version;
154 zprop_source_t src = ZPROP_SRC_NONE;
155 spa_config_dirent_t *dp;
157 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
159 if (spa->spa_root_vdev != NULL) {
160 size = spa_get_space(spa);
161 used = spa_get_alloc(spa);
162 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
163 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
164 spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src);
165 spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL,
168 cap = (size == 0) ? 0 : (used * 100 / size);
169 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
171 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
172 spa->spa_root_vdev->vdev_state, src);
174 version = spa_version(spa);
175 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
176 src = ZPROP_SRC_DEFAULT;
178 src = ZPROP_SRC_LOCAL;
179 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
182 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
184 if (spa->spa_root != NULL)
185 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
188 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
189 if (dp->scd_path == NULL) {
190 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
191 "none", 0, ZPROP_SRC_LOCAL);
192 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
193 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
194 dp->scd_path, 0, ZPROP_SRC_LOCAL);
200 * Get zpool property values.
203 spa_prop_get(spa_t *spa, nvlist_t **nvp)
207 objset_t *mos = spa->spa_meta_objset;
210 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
212 mutex_enter(&spa->spa_props_lock);
215 * Get properties from the spa config.
217 spa_prop_get_config(spa, nvp);
219 /* If no pool property object, no more prop to get. */
220 if (spa->spa_pool_props_object == 0) {
221 mutex_exit(&spa->spa_props_lock);
226 * Get properties from the MOS pool property object.
228 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
229 (err = zap_cursor_retrieve(&zc, &za)) == 0;
230 zap_cursor_advance(&zc)) {
233 zprop_source_t src = ZPROP_SRC_DEFAULT;
236 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
239 switch (za.za_integer_length) {
241 /* integer property */
242 if (za.za_first_integer !=
243 zpool_prop_default_numeric(prop))
244 src = ZPROP_SRC_LOCAL;
246 if (prop == ZPOOL_PROP_BOOTFS) {
248 dsl_dataset_t *ds = NULL;
250 dp = spa_get_dsl(spa);
251 rw_enter(&dp->dp_config_rwlock, RW_READER);
252 if (err = dsl_dataset_hold_obj(dp,
253 za.za_first_integer, FTAG, &ds)) {
254 rw_exit(&dp->dp_config_rwlock);
259 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
261 dsl_dataset_name(ds, strval);
262 dsl_dataset_rele(ds, FTAG);
263 rw_exit(&dp->dp_config_rwlock);
266 intval = za.za_first_integer;
269 spa_prop_add_list(*nvp, prop, strval, intval, src);
273 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
278 /* string property */
279 strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
280 err = zap_lookup(mos, spa->spa_pool_props_object,
281 za.za_name, 1, za.za_num_integers, strval);
283 kmem_free(strval, za.za_num_integers);
286 spa_prop_add_list(*nvp, prop, strval, 0, src);
287 kmem_free(strval, za.za_num_integers);
294 zap_cursor_fini(&zc);
295 mutex_exit(&spa->spa_props_lock);
297 if (err && err != ENOENT) {
307 * Validate the given pool properties nvlist and modify the list
308 * for the property values to be set.
311 spa_prop_validate(spa_t *spa, nvlist_t *props)
314 int error = 0, reset_bootfs = 0;
318 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
320 char *propname, *strval;
325 propname = nvpair_name(elem);
327 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL)
331 case ZPOOL_PROP_VERSION:
332 error = nvpair_value_uint64(elem, &intval);
334 (intval < spa_version(spa) || intval > SPA_VERSION))
338 case ZPOOL_PROP_DELEGATION:
339 case ZPOOL_PROP_AUTOREPLACE:
340 case ZPOOL_PROP_LISTSNAPS:
341 error = nvpair_value_uint64(elem, &intval);
342 if (!error && intval > 1)
346 case ZPOOL_PROP_BOOTFS:
348 * If the pool version is less than SPA_VERSION_BOOTFS,
349 * or the pool is still being created (version == 0),
350 * the bootfs property cannot be set.
352 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
358 * Make sure the vdev config is bootable
360 if (!vdev_is_bootable(spa->spa_root_vdev)) {
367 error = nvpair_value_string(elem, &strval);
372 if (strval == NULL || strval[0] == '\0') {
373 objnum = zpool_prop_default_numeric(
378 if (error = dmu_objset_open(strval, DMU_OST_ZFS,
379 DS_MODE_USER | DS_MODE_READONLY, &os))
382 /* We don't support gzip bootable datasets */
383 if ((error = dsl_prop_get_integer(strval,
384 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
385 &compress, NULL)) == 0 &&
386 !BOOTFS_COMPRESS_VALID(compress)) {
389 objnum = dmu_objset_id(os);
391 dmu_objset_close(os);
395 case ZPOOL_PROP_FAILUREMODE:
396 error = nvpair_value_uint64(elem, &intval);
397 if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
398 intval > ZIO_FAILURE_MODE_PANIC))
402 * This is a special case which only occurs when
403 * the pool has completely failed. This allows
404 * the user to change the in-core failmode property
405 * without syncing it out to disk (I/Os might
406 * currently be blocked). We do this by returning
407 * EIO to the caller (spa_prop_set) to trick it
408 * into thinking we encountered a property validation
411 if (!error && spa_suspended(spa)) {
412 spa->spa_failmode = intval;
417 case ZPOOL_PROP_CACHEFILE:
418 if ((error = nvpair_value_string(elem, &strval)) != 0)
421 if (strval[0] == '\0')
424 if (strcmp(strval, "none") == 0)
427 if (strval[0] != '/') {
432 slash = strrchr(strval, '/');
433 ASSERT(slash != NULL);
435 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
436 strcmp(slash, "/..") == 0)
445 if (!error && reset_bootfs) {
446 error = nvlist_remove(props,
447 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
450 error = nvlist_add_uint64(props,
451 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
459 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
462 spa_config_dirent_t *dp;
464 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
468 dp = kmem_alloc(sizeof (spa_config_dirent_t),
471 if (cachefile[0] == '\0')
472 dp->scd_path = spa_strdup(spa_config_path);
473 else if (strcmp(cachefile, "none") == 0)
476 dp->scd_path = spa_strdup(cachefile);
478 list_insert_head(&spa->spa_config_list, dp);
480 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
484 spa_prop_set(spa_t *spa, nvlist_t *nvp)
488 boolean_t need_sync = B_FALSE;
491 if ((error = spa_prop_validate(spa, nvp)) != 0)
495 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
496 if ((prop = zpool_name_to_prop(
497 nvpair_name(elem))) == ZPROP_INVAL)
500 if (prop == ZPOOL_PROP_CACHEFILE || prop == ZPOOL_PROP_ALTROOT)
508 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
515 * If the bootfs property value is dsobj, clear it.
518 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
520 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
521 VERIFY(zap_remove(spa->spa_meta_objset,
522 spa->spa_pool_props_object,
523 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
529 * ==========================================================================
530 * SPA state manipulation (open/create/destroy/import/export)
531 * ==========================================================================
535 spa_error_entry_compare(const void *a, const void *b)
537 spa_error_entry_t *sa = (spa_error_entry_t *)a;
538 spa_error_entry_t *sb = (spa_error_entry_t *)b;
541 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
542 sizeof (zbookmark_t));
553 * Utility function which retrieves copies of the current logs and
554 * re-initializes them in the process.
557 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
559 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
561 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
562 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
564 avl_create(&spa->spa_errlist_scrub,
565 spa_error_entry_compare, sizeof (spa_error_entry_t),
566 offsetof(spa_error_entry_t, se_avl));
567 avl_create(&spa->spa_errlist_last,
568 spa_error_entry_compare, sizeof (spa_error_entry_t),
569 offsetof(spa_error_entry_t, se_avl));
573 * Activate an uninitialized pool.
576 spa_activate(spa_t *spa, int mode)
578 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
580 spa->spa_state = POOL_STATE_ACTIVE;
581 spa->spa_mode = mode;
583 spa->spa_normal_class = metaslab_class_create(zfs_metaslab_ops);
584 spa->spa_log_class = metaslab_class_create(zfs_metaslab_ops);
586 for (int t = 0; t < ZIO_TYPES; t++) {
587 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
588 const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
589 enum zti_modes mode = ztip->zti_mode;
590 uint_t value = ztip->zti_value;
593 (void) snprintf(name, sizeof (name),
594 "%s_%s", zio_type_name[t], zio_taskq_types[q]);
596 if (mode == zti_mode_tune) {
597 mode = zio_taskq_tune_mode;
598 value = zio_taskq_tune_value;
599 if (mode == zti_mode_tune)
600 mode = zti_mode_online_percent;
605 ASSERT3U(value, >=, 1);
606 value = MAX(value, 1);
608 spa->spa_zio_taskq[t][q] = taskq_create(name,
609 value, maxclsyspri, 50, INT_MAX,
613 case zti_mode_online_percent:
614 spa->spa_zio_taskq[t][q] = taskq_create(name,
615 value, maxclsyspri, 50, INT_MAX,
616 TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT);
620 spa->spa_zio_taskq[t][q] = NULL;
625 panic("unrecognized mode for "
626 "zio_taskqs[%u]->zti_nthreads[%u] (%u:%u) "
634 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
635 offsetof(vdev_t, vdev_config_dirty_node));
636 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
637 offsetof(vdev_t, vdev_state_dirty_node));
639 txg_list_create(&spa->spa_vdev_txg_list,
640 offsetof(struct vdev, vdev_txg_node));
642 avl_create(&spa->spa_errlist_scrub,
643 spa_error_entry_compare, sizeof (spa_error_entry_t),
644 offsetof(spa_error_entry_t, se_avl));
645 avl_create(&spa->spa_errlist_last,
646 spa_error_entry_compare, sizeof (spa_error_entry_t),
647 offsetof(spa_error_entry_t, se_avl));
651 * Opposite of spa_activate().
654 spa_deactivate(spa_t *spa)
656 ASSERT(spa->spa_sync_on == B_FALSE);
657 ASSERT(spa->spa_dsl_pool == NULL);
658 ASSERT(spa->spa_root_vdev == NULL);
659 ASSERT(spa->spa_async_zio_root == NULL);
660 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
662 txg_list_destroy(&spa->spa_vdev_txg_list);
664 list_destroy(&spa->spa_config_dirty_list);
665 list_destroy(&spa->spa_state_dirty_list);
667 for (int t = 0; t < ZIO_TYPES; t++) {
668 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
669 if (spa->spa_zio_taskq[t][q] != NULL)
670 taskq_destroy(spa->spa_zio_taskq[t][q]);
671 spa->spa_zio_taskq[t][q] = NULL;
675 metaslab_class_destroy(spa->spa_normal_class);
676 spa->spa_normal_class = NULL;
678 metaslab_class_destroy(spa->spa_log_class);
679 spa->spa_log_class = NULL;
682 * If this was part of an import or the open otherwise failed, we may
683 * still have errors left in the queues. Empty them just in case.
685 spa_errlog_drain(spa);
687 avl_destroy(&spa->spa_errlist_scrub);
688 avl_destroy(&spa->spa_errlist_last);
690 spa->spa_state = POOL_STATE_UNINITIALIZED;
694 * Verify a pool configuration, and construct the vdev tree appropriately. This
695 * will create all the necessary vdevs in the appropriate layout, with each vdev
696 * in the CLOSED state. This will prep the pool before open/creation/import.
697 * All vdev validation is done by the vdev_alloc() routine.
700 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
701 uint_t id, int atype)
707 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
710 if ((*vdp)->vdev_ops->vdev_op_leaf)
713 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
725 for (c = 0; c < children; c++) {
727 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
735 ASSERT(*vdp != NULL);
741 * Opposite of spa_load().
744 spa_unload(spa_t *spa)
748 ASSERT(MUTEX_HELD(&spa_namespace_lock));
753 spa_async_suspend(spa);
758 if (spa->spa_sync_on) {
759 txg_sync_stop(spa->spa_dsl_pool);
760 spa->spa_sync_on = B_FALSE;
764 * Wait for any outstanding async I/O to complete.
766 if (spa->spa_async_zio_root != NULL) {
767 (void) zio_wait(spa->spa_async_zio_root);
768 spa->spa_async_zio_root = NULL;
772 * Close the dsl pool.
774 if (spa->spa_dsl_pool) {
775 dsl_pool_close(spa->spa_dsl_pool);
776 spa->spa_dsl_pool = NULL;
779 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
782 * Drop and purge level 2 cache
784 spa_l2cache_drop(spa);
789 if (spa->spa_root_vdev)
790 vdev_free(spa->spa_root_vdev);
791 ASSERT(spa->spa_root_vdev == NULL);
793 for (i = 0; i < spa->spa_spares.sav_count; i++)
794 vdev_free(spa->spa_spares.sav_vdevs[i]);
795 if (spa->spa_spares.sav_vdevs) {
796 kmem_free(spa->spa_spares.sav_vdevs,
797 spa->spa_spares.sav_count * sizeof (void *));
798 spa->spa_spares.sav_vdevs = NULL;
800 if (spa->spa_spares.sav_config) {
801 nvlist_free(spa->spa_spares.sav_config);
802 spa->spa_spares.sav_config = NULL;
804 spa->spa_spares.sav_count = 0;
806 for (i = 0; i < spa->spa_l2cache.sav_count; i++)
807 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
808 if (spa->spa_l2cache.sav_vdevs) {
809 kmem_free(spa->spa_l2cache.sav_vdevs,
810 spa->spa_l2cache.sav_count * sizeof (void *));
811 spa->spa_l2cache.sav_vdevs = NULL;
813 if (spa->spa_l2cache.sav_config) {
814 nvlist_free(spa->spa_l2cache.sav_config);
815 spa->spa_l2cache.sav_config = NULL;
817 spa->spa_l2cache.sav_count = 0;
819 spa->spa_async_suspended = 0;
821 spa_config_exit(spa, SCL_ALL, FTAG);
825 * Load (or re-load) the current list of vdevs describing the active spares for
826 * this pool. When this is called, we have some form of basic information in
827 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
828 * then re-generate a more complete list including status information.
831 spa_load_spares(spa_t *spa)
838 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
841 * First, close and free any existing spare vdevs.
843 for (i = 0; i < spa->spa_spares.sav_count; i++) {
844 vd = spa->spa_spares.sav_vdevs[i];
846 /* Undo the call to spa_activate() below */
847 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
848 B_FALSE)) != NULL && tvd->vdev_isspare)
849 spa_spare_remove(tvd);
854 if (spa->spa_spares.sav_vdevs)
855 kmem_free(spa->spa_spares.sav_vdevs,
856 spa->spa_spares.sav_count * sizeof (void *));
858 if (spa->spa_spares.sav_config == NULL)
861 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
862 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
864 spa->spa_spares.sav_count = (int)nspares;
865 spa->spa_spares.sav_vdevs = NULL;
871 * Construct the array of vdevs, opening them to get status in the
872 * process. For each spare, there is potentially two different vdev_t
873 * structures associated with it: one in the list of spares (used only
874 * for basic validation purposes) and one in the active vdev
875 * configuration (if it's spared in). During this phase we open and
876 * validate each vdev on the spare list. If the vdev also exists in the
877 * active configuration, then we also mark this vdev as an active spare.
879 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
881 for (i = 0; i < spa->spa_spares.sav_count; i++) {
882 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
883 VDEV_ALLOC_SPARE) == 0);
886 spa->spa_spares.sav_vdevs[i] = vd;
888 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
890 if (!tvd->vdev_isspare)
894 * We only mark the spare active if we were successfully
895 * able to load the vdev. Otherwise, importing a pool
896 * with a bad active spare would result in strange
897 * behavior, because multiple pool would think the spare
898 * is actively in use.
900 * There is a vulnerability here to an equally bizarre
901 * circumstance, where a dead active spare is later
902 * brought back to life (onlined or otherwise). Given
903 * the rarity of this scenario, and the extra complexity
904 * it adds, we ignore the possibility.
906 if (!vdev_is_dead(tvd))
907 spa_spare_activate(tvd);
911 vd->vdev_aux = &spa->spa_spares;
913 if (vdev_open(vd) != 0)
916 if (vdev_validate_aux(vd) == 0)
921 * Recompute the stashed list of spares, with status information
924 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
925 DATA_TYPE_NVLIST_ARRAY) == 0);
927 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
929 for (i = 0; i < spa->spa_spares.sav_count; i++)
930 spares[i] = vdev_config_generate(spa,
931 spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE);
932 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
933 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
934 for (i = 0; i < spa->spa_spares.sav_count; i++)
935 nvlist_free(spares[i]);
936 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
940 * Load (or re-load) the current list of vdevs describing the active l2cache for
941 * this pool. When this is called, we have some form of basic information in
942 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
943 * then re-generate a more complete list including status information.
944 * Devices which are already active have their details maintained, and are
948 spa_load_l2cache(spa_t *spa)
954 vdev_t *vd, **oldvdevs, **newvdevs;
955 spa_aux_vdev_t *sav = &spa->spa_l2cache;
957 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
959 if (sav->sav_config != NULL) {
960 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
961 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
962 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
967 oldvdevs = sav->sav_vdevs;
968 oldnvdevs = sav->sav_count;
969 sav->sav_vdevs = NULL;
973 * Process new nvlist of vdevs.
975 for (i = 0; i < nl2cache; i++) {
976 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
980 for (j = 0; j < oldnvdevs; j++) {
982 if (vd != NULL && guid == vd->vdev_guid) {
984 * Retain previous vdev for add/remove ops.
992 if (newvdevs[i] == NULL) {
996 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
997 VDEV_ALLOC_L2CACHE) == 0);
1002 * Commit this vdev as an l2cache device,
1003 * even if it fails to open.
1005 spa_l2cache_add(vd);
1010 spa_l2cache_activate(vd);
1012 if (vdev_open(vd) != 0)
1015 (void) vdev_validate_aux(vd);
1017 if (!vdev_is_dead(vd)) {
1018 size = vdev_get_rsize(vd);
1019 l2arc_add_vdev(spa, vd,
1020 VDEV_LABEL_START_SIZE,
1021 size - VDEV_LABEL_START_SIZE);
1027 * Purge vdevs that were dropped
1029 for (i = 0; i < oldnvdevs; i++) {
1034 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1035 pool != 0ULL && l2arc_vdev_present(vd))
1036 l2arc_remove_vdev(vd);
1037 (void) vdev_close(vd);
1038 spa_l2cache_remove(vd);
1043 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1045 if (sav->sav_config == NULL)
1048 sav->sav_vdevs = newvdevs;
1049 sav->sav_count = (int)nl2cache;
1052 * Recompute the stashed list of l2cache devices, with status
1053 * information this time.
1055 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1056 DATA_TYPE_NVLIST_ARRAY) == 0);
1058 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
1059 for (i = 0; i < sav->sav_count; i++)
1060 l2cache[i] = vdev_config_generate(spa,
1061 sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE);
1062 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1063 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1065 for (i = 0; i < sav->sav_count; i++)
1066 nvlist_free(l2cache[i]);
1068 kmem_free(l2cache, sav->sav_count * sizeof (void *));
1072 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1075 char *packed = NULL;
1080 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
1081 nvsize = *(uint64_t *)db->db_data;
1082 dmu_buf_rele(db, FTAG);
1084 packed = kmem_alloc(nvsize, KM_SLEEP);
1085 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1088 error = nvlist_unpack(packed, nvsize, value, 0);
1089 kmem_free(packed, nvsize);
1095 * Checks to see if the given vdev could not be opened, in which case we post a
1096 * sysevent to notify the autoreplace code that the device has been removed.
1099 spa_check_removed(vdev_t *vd)
1103 for (c = 0; c < vd->vdev_children; c++)
1104 spa_check_removed(vd->vdev_child[c]);
1106 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) {
1107 zfs_post_autoreplace(vd->vdev_spa, vd);
1108 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
1113 * Load the slog device state from the config object since it's possible
1114 * that the label does not contain the most up-to-date information.
1117 spa_load_log_state(spa_t *spa)
1119 nvlist_t *nv, *nvroot, **child;
1122 vdev_t *rvd = spa->spa_root_vdev;
1124 VERIFY(load_nvlist(spa, spa->spa_config_object, &nv) == 0);
1125 VERIFY(nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1126 VERIFY(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1127 &child, &children) == 0);
1129 for (c = 0; c < children; c++) {
1130 vdev_t *tvd = rvd->vdev_child[c];
1132 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
1133 &is_log) == 0 && is_log)
1134 vdev_load_log_state(tvd, child[c]);
1140 * Check for missing log devices
1143 spa_check_logs(spa_t *spa)
1145 switch (spa->spa_log_state) {
1146 case SPA_LOG_MISSING:
1147 /* need to recheck in case slog has been restored */
1148 case SPA_LOG_UNKNOWN:
1149 if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL,
1150 DS_FIND_CHILDREN)) {
1151 spa->spa_log_state = SPA_LOG_MISSING;
1160 * Load an existing storage pool, using the pool's builtin spa_config as a
1161 * source of configuration information.
1164 spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig)
1167 nvlist_t *nvroot = NULL;
1169 uberblock_t *ub = &spa->spa_uberblock;
1170 uint64_t config_cache_txg = spa->spa_config_txg;
1173 uint64_t autoreplace = 0;
1174 int orig_mode = spa->spa_mode;
1175 char *ereport = FM_EREPORT_ZFS_POOL;
1178 * If this is an untrusted config, access the pool in read-only mode.
1179 * This prevents things like resilvering recently removed devices.
1182 spa->spa_mode = FREAD;
1184 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1186 spa->spa_load_state = state;
1188 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
1189 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
1195 * Versioning wasn't explicitly added to the label until later, so if
1196 * it's not present treat it as the initial version.
1198 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0)
1199 version = SPA_VERSION_INITIAL;
1201 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
1202 &spa->spa_config_txg);
1204 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
1205 spa_guid_exists(pool_guid, 0)) {
1210 spa->spa_load_guid = pool_guid;
1213 * Create "The Godfather" zio to hold all async IOs
1215 spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
1216 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
1219 * Parse the configuration into a vdev tree. We explicitly set the
1220 * value that will be returned by spa_version() since parsing the
1221 * configuration requires knowing the version number.
1223 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1224 spa->spa_ubsync.ub_version = version;
1225 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD);
1226 spa_config_exit(spa, SCL_ALL, FTAG);
1231 ASSERT(spa->spa_root_vdev == rvd);
1232 ASSERT(spa_guid(spa) == pool_guid);
1235 * Try to open all vdevs, loading each label in the process.
1237 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1238 error = vdev_open(rvd);
1239 spa_config_exit(spa, SCL_ALL, FTAG);
1244 * We need to validate the vdev labels against the configuration that
1245 * we have in hand, which is dependent on the setting of mosconfig. If
1246 * mosconfig is true then we're validating the vdev labels based on
1247 * that config. Otherwise, we're validating against the cached config
1248 * (zpool.cache) that was read when we loaded the zfs module, and then
1249 * later we will recursively call spa_load() and validate against
1252 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1253 error = vdev_validate(rvd);
1254 spa_config_exit(spa, SCL_ALL, FTAG);
1258 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1264 * Find the best uberblock.
1266 vdev_uberblock_load(NULL, rvd, ub);
1269 * If we weren't able to find a single valid uberblock, return failure.
1271 if (ub->ub_txg == 0) {
1272 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1273 VDEV_AUX_CORRUPT_DATA);
1279 * If the pool is newer than the code, we can't open it.
1281 if (ub->ub_version > SPA_VERSION) {
1282 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1283 VDEV_AUX_VERSION_NEWER);
1289 * If the vdev guid sum doesn't match the uberblock, we have an
1290 * incomplete configuration.
1292 if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) {
1293 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1294 VDEV_AUX_BAD_GUID_SUM);
1300 * Initialize internal SPA structures.
1302 spa->spa_state = POOL_STATE_ACTIVE;
1303 spa->spa_ubsync = spa->spa_uberblock;
1304 spa->spa_first_txg = spa_last_synced_txg(spa) + 1;
1305 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
1307 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1308 VDEV_AUX_CORRUPT_DATA);
1311 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
1313 if (zap_lookup(spa->spa_meta_objset,
1314 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1315 sizeof (uint64_t), 1, &spa->spa_config_object) != 0) {
1316 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1317 VDEV_AUX_CORRUPT_DATA);
1323 nvlist_t *newconfig;
1326 if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) {
1327 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1328 VDEV_AUX_CORRUPT_DATA);
1333 if (!spa_is_root(spa) && nvlist_lookup_uint64(newconfig,
1334 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
1336 unsigned long myhostid = 0;
1338 VERIFY(nvlist_lookup_string(newconfig,
1339 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
1341 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
1342 if (check_hostid && hostid != 0 && myhostid != 0 &&
1343 (unsigned long)hostid != myhostid) {
1344 cmn_err(CE_WARN, "pool '%s' could not be "
1345 "loaded as it was last accessed by "
1346 "another system (host: %s hostid: 0x%lx). "
1347 "See: http://www.sun.com/msg/ZFS-8000-EY",
1348 spa_name(spa), hostname,
1349 (unsigned long)hostid);
1355 spa_config_set(spa, newconfig);
1357 spa_deactivate(spa);
1358 spa_activate(spa, orig_mode);
1360 return (spa_load(spa, newconfig, state, B_TRUE));
1363 if (zap_lookup(spa->spa_meta_objset,
1364 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
1365 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) {
1366 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1367 VDEV_AUX_CORRUPT_DATA);
1373 * Load the bit that tells us to use the new accounting function
1374 * (raid-z deflation). If we have an older pool, this will not
1377 error = zap_lookup(spa->spa_meta_objset,
1378 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
1379 sizeof (uint64_t), 1, &spa->spa_deflate);
1380 if (error != 0 && error != ENOENT) {
1381 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1382 VDEV_AUX_CORRUPT_DATA);
1388 * Load the persistent error log. If we have an older pool, this will
1391 error = zap_lookup(spa->spa_meta_objset,
1392 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
1393 sizeof (uint64_t), 1, &spa->spa_errlog_last);
1394 if (error != 0 && error != ENOENT) {
1395 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1396 VDEV_AUX_CORRUPT_DATA);
1401 error = zap_lookup(spa->spa_meta_objset,
1402 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
1403 sizeof (uint64_t), 1, &spa->spa_errlog_scrub);
1404 if (error != 0 && error != ENOENT) {
1405 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1406 VDEV_AUX_CORRUPT_DATA);
1412 * Load the history object. If we have an older pool, this
1413 * will not be present.
1415 error = zap_lookup(spa->spa_meta_objset,
1416 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY,
1417 sizeof (uint64_t), 1, &spa->spa_history);
1418 if (error != 0 && error != ENOENT) {
1419 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1420 VDEV_AUX_CORRUPT_DATA);
1426 * Load any hot spares for this pool.
1428 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1429 DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object);
1430 if (error != 0 && error != ENOENT) {
1431 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1432 VDEV_AUX_CORRUPT_DATA);
1437 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
1438 if (load_nvlist(spa, spa->spa_spares.sav_object,
1439 &spa->spa_spares.sav_config) != 0) {
1440 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1441 VDEV_AUX_CORRUPT_DATA);
1446 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1447 spa_load_spares(spa);
1448 spa_config_exit(spa, SCL_ALL, FTAG);
1452 * Load any level 2 ARC devices for this pool.
1454 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1455 DMU_POOL_L2CACHE, sizeof (uint64_t), 1,
1456 &spa->spa_l2cache.sav_object);
1457 if (error != 0 && error != ENOENT) {
1458 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1459 VDEV_AUX_CORRUPT_DATA);
1464 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
1465 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
1466 &spa->spa_l2cache.sav_config) != 0) {
1467 vdev_set_state(rvd, B_TRUE,
1468 VDEV_STATE_CANT_OPEN,
1469 VDEV_AUX_CORRUPT_DATA);
1474 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1475 spa_load_l2cache(spa);
1476 spa_config_exit(spa, SCL_ALL, FTAG);
1479 spa_load_log_state(spa);
1481 if (spa_check_logs(spa)) {
1482 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1485 ereport = FM_EREPORT_ZFS_LOG_REPLAY;
1490 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
1492 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1493 DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object);
1495 if (error && error != ENOENT) {
1496 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1497 VDEV_AUX_CORRUPT_DATA);
1503 (void) zap_lookup(spa->spa_meta_objset,
1504 spa->spa_pool_props_object,
1505 zpool_prop_to_name(ZPOOL_PROP_BOOTFS),
1506 sizeof (uint64_t), 1, &spa->spa_bootfs);
1507 (void) zap_lookup(spa->spa_meta_objset,
1508 spa->spa_pool_props_object,
1509 zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE),
1510 sizeof (uint64_t), 1, &autoreplace);
1511 (void) zap_lookup(spa->spa_meta_objset,
1512 spa->spa_pool_props_object,
1513 zpool_prop_to_name(ZPOOL_PROP_DELEGATION),
1514 sizeof (uint64_t), 1, &spa->spa_delegation);
1515 (void) zap_lookup(spa->spa_meta_objset,
1516 spa->spa_pool_props_object,
1517 zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
1518 sizeof (uint64_t), 1, &spa->spa_failmode);
1522 * If the 'autoreplace' property is set, then post a resource notifying
1523 * the ZFS DE that it should not issue any faults for unopenable
1524 * devices. We also iterate over the vdevs, and post a sysevent for any
1525 * unopenable vdevs so that the normal autoreplace handler can take
1528 if (autoreplace && state != SPA_LOAD_TRYIMPORT)
1529 spa_check_removed(spa->spa_root_vdev);
1532 * Load the vdev state for all toplevel vdevs.
1537 * Propagate the leaf DTLs we just loaded all the way up the tree.
1539 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1540 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
1541 spa_config_exit(spa, SCL_ALL, FTAG);
1544 * Check the state of the root vdev. If it can't be opened, it
1545 * indicates one or more toplevel vdevs are faulted.
1547 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1552 if (spa_writeable(spa)) {
1554 int need_update = B_FALSE;
1556 ASSERT(state != SPA_LOAD_TRYIMPORT);
1559 * Claim log blocks that haven't been committed yet.
1560 * This must all happen in a single txg.
1562 tx = dmu_tx_create_assigned(spa_get_dsl(spa),
1563 spa_first_txg(spa));
1564 (void) dmu_objset_find(spa_name(spa),
1565 zil_claim, tx, DS_FIND_CHILDREN);
1568 spa->spa_log_state = SPA_LOG_GOOD;
1569 spa->spa_sync_on = B_TRUE;
1570 txg_sync_start(spa->spa_dsl_pool);
1573 * Wait for all claims to sync.
1575 txg_wait_synced(spa->spa_dsl_pool, 0);
1578 * If the config cache is stale, or we have uninitialized
1579 * metaslabs (see spa_vdev_add()), then update the config.
1581 * If spa_load_verbatim is true, trust the current
1582 * in-core spa_config and update the disk labels.
1584 if (config_cache_txg != spa->spa_config_txg ||
1585 state == SPA_LOAD_IMPORT || spa->spa_load_verbatim)
1586 need_update = B_TRUE;
1588 for (int c = 0; c < rvd->vdev_children; c++)
1589 if (rvd->vdev_child[c]->vdev_ms_array == 0)
1590 need_update = B_TRUE;
1593 * Update the config cache asychronously in case we're the
1594 * root pool, in which case the config cache isn't writable yet.
1597 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
1600 * Check all DTLs to see if anything needs resilvering.
1602 if (vdev_resilver_needed(rvd, NULL, NULL))
1603 spa_async_request(spa, SPA_ASYNC_RESILVER);
1608 spa->spa_minref = refcount_count(&spa->spa_refcount);
1609 if (error && error != EBADF)
1610 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
1611 spa->spa_load_state = SPA_LOAD_NONE;
1620 * The import case is identical to an open except that the configuration is sent
1621 * down from userland, instead of grabbed from the configuration cache. For the
1622 * case of an open, the pool configuration will exist in the
1623 * POOL_STATE_UNINITIALIZED state.
1625 * The stats information (gen/count/ustats) is used to gather vdev statistics at
1626 * the same time open the pool, without having to keep around the spa_t in some
1630 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config)
1634 int locked = B_FALSE;
1639 * As disgusting as this is, we need to support recursive calls to this
1640 * function because dsl_dir_open() is called during spa_load(), and ends
1641 * up calling spa_open() again. The real fix is to figure out how to
1642 * avoid dsl_dir_open() calling this in the first place.
1644 if (mutex_owner(&spa_namespace_lock) != curthread) {
1645 mutex_enter(&spa_namespace_lock);
1649 if ((spa = spa_lookup(pool)) == NULL) {
1651 mutex_exit(&spa_namespace_lock);
1654 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
1656 spa_activate(spa, spa_mode_global);
1658 error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE);
1660 if (error == EBADF) {
1662 * If vdev_validate() returns failure (indicated by
1663 * EBADF), it indicates that one of the vdevs indicates
1664 * that the pool has been exported or destroyed. If
1665 * this is the case, the config cache is out of sync and
1666 * we should remove the pool from the namespace.
1669 spa_deactivate(spa);
1670 spa_config_sync(spa, B_TRUE, B_TRUE);
1673 mutex_exit(&spa_namespace_lock);
1679 * We can't open the pool, but we still have useful
1680 * information: the state of each vdev after the
1681 * attempted vdev_open(). Return this to the user.
1683 if (config != NULL && spa->spa_root_vdev != NULL)
1684 *config = spa_config_generate(spa, NULL, -1ULL,
1687 spa_deactivate(spa);
1688 spa->spa_last_open_failed = B_TRUE;
1690 mutex_exit(&spa_namespace_lock);
1694 spa->spa_last_open_failed = B_FALSE;
1698 spa_open_ref(spa, tag);
1701 mutex_exit(&spa_namespace_lock);
1706 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
1712 spa_open(const char *name, spa_t **spapp, void *tag)
1714 return (spa_open_common(name, spapp, tag, NULL));
1718 * Lookup the given spa_t, incrementing the inject count in the process,
1719 * preventing it from being exported or destroyed.
1722 spa_inject_addref(char *name)
1726 mutex_enter(&spa_namespace_lock);
1727 if ((spa = spa_lookup(name)) == NULL) {
1728 mutex_exit(&spa_namespace_lock);
1731 spa->spa_inject_ref++;
1732 mutex_exit(&spa_namespace_lock);
1738 spa_inject_delref(spa_t *spa)
1740 mutex_enter(&spa_namespace_lock);
1741 spa->spa_inject_ref--;
1742 mutex_exit(&spa_namespace_lock);
1746 * Add spares device information to the nvlist.
1749 spa_add_spares(spa_t *spa, nvlist_t *config)
1759 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
1761 if (spa->spa_spares.sav_count == 0)
1764 VERIFY(nvlist_lookup_nvlist(config,
1765 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1766 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1767 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1769 VERIFY(nvlist_add_nvlist_array(nvroot,
1770 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
1771 VERIFY(nvlist_lookup_nvlist_array(nvroot,
1772 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1775 * Go through and find any spares which have since been
1776 * repurposed as an active spare. If this is the case, update
1777 * their status appropriately.
1779 for (i = 0; i < nspares; i++) {
1780 VERIFY(nvlist_lookup_uint64(spares[i],
1781 ZPOOL_CONFIG_GUID, &guid) == 0);
1782 if (spa_spare_exists(guid, &pool, NULL) &&
1784 VERIFY(nvlist_lookup_uint64_array(
1785 spares[i], ZPOOL_CONFIG_STATS,
1786 (uint64_t **)&vs, &vsc) == 0);
1787 vs->vs_state = VDEV_STATE_CANT_OPEN;
1788 vs->vs_aux = VDEV_AUX_SPARED;
1795 * Add l2cache device information to the nvlist, including vdev stats.
1798 spa_add_l2cache(spa_t *spa, nvlist_t *config)
1801 uint_t i, j, nl2cache;
1808 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
1810 if (spa->spa_l2cache.sav_count == 0)
1813 VERIFY(nvlist_lookup_nvlist(config,
1814 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1815 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
1816 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1817 if (nl2cache != 0) {
1818 VERIFY(nvlist_add_nvlist_array(nvroot,
1819 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
1820 VERIFY(nvlist_lookup_nvlist_array(nvroot,
1821 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1824 * Update level 2 cache device stats.
1827 for (i = 0; i < nl2cache; i++) {
1828 VERIFY(nvlist_lookup_uint64(l2cache[i],
1829 ZPOOL_CONFIG_GUID, &guid) == 0);
1832 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
1834 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
1835 vd = spa->spa_l2cache.sav_vdevs[j];
1841 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
1842 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
1843 vdev_get_stats(vd, vs);
1849 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
1855 error = spa_open_common(name, &spa, FTAG, config);
1859 * This still leaves a window of inconsistency where the spares
1860 * or l2cache devices could change and the config would be
1861 * self-inconsistent.
1863 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
1865 if (*config != NULL) {
1866 VERIFY(nvlist_add_uint64(*config,
1867 ZPOOL_CONFIG_ERRCOUNT,
1868 spa_get_errlog_size(spa)) == 0);
1870 if (spa_suspended(spa))
1871 VERIFY(nvlist_add_uint64(*config,
1872 ZPOOL_CONFIG_SUSPENDED,
1873 spa->spa_failmode) == 0);
1875 spa_add_spares(spa, *config);
1876 spa_add_l2cache(spa, *config);
1881 * We want to get the alternate root even for faulted pools, so we cheat
1882 * and call spa_lookup() directly.
1886 mutex_enter(&spa_namespace_lock);
1887 spa = spa_lookup(name);
1889 spa_altroot(spa, altroot, buflen);
1893 mutex_exit(&spa_namespace_lock);
1895 spa_altroot(spa, altroot, buflen);
1900 spa_config_exit(spa, SCL_CONFIG, FTAG);
1901 spa_close(spa, FTAG);
1908 * Validate that the auxiliary device array is well formed. We must have an
1909 * array of nvlists, each which describes a valid leaf vdev. If this is an
1910 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
1911 * specified, as long as they are well-formed.
1914 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
1915 spa_aux_vdev_t *sav, const char *config, uint64_t version,
1916 vdev_labeltype_t label)
1923 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1926 * It's acceptable to have no devs specified.
1928 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
1935 * Make sure the pool is formatted with a version that supports this
1938 if (spa_version(spa) < version)
1942 * Set the pending device list so we correctly handle device in-use
1945 sav->sav_pending = dev;
1946 sav->sav_npending = ndev;
1948 for (i = 0; i < ndev; i++) {
1949 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
1953 if (!vd->vdev_ops->vdev_op_leaf) {
1960 * The L2ARC currently only supports disk devices in
1961 * kernel context. For user-level testing, we allow it.
1964 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
1965 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
1972 if ((error = vdev_open(vd)) == 0 &&
1973 (error = vdev_label_init(vd, crtxg, label)) == 0) {
1974 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
1975 vd->vdev_guid) == 0);
1981 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
1988 sav->sav_pending = NULL;
1989 sav->sav_npending = 0;
1994 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
1998 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
2000 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
2001 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
2002 VDEV_LABEL_SPARE)) != 0) {
2006 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
2007 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
2008 VDEV_LABEL_L2CACHE));
2012 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
2017 if (sav->sav_config != NULL) {
2023 * Generate new dev list by concatentating with the
2026 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
2027 &olddevs, &oldndevs) == 0);
2029 newdevs = kmem_alloc(sizeof (void *) *
2030 (ndevs + oldndevs), KM_SLEEP);
2031 for (i = 0; i < oldndevs; i++)
2032 VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
2034 for (i = 0; i < ndevs; i++)
2035 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
2038 VERIFY(nvlist_remove(sav->sav_config, config,
2039 DATA_TYPE_NVLIST_ARRAY) == 0);
2041 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
2042 config, newdevs, ndevs + oldndevs) == 0);
2043 for (i = 0; i < oldndevs + ndevs; i++)
2044 nvlist_free(newdevs[i]);
2045 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
2048 * Generate a new dev list.
2050 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
2052 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
2058 * Stop and drop level 2 ARC devices
2061 spa_l2cache_drop(spa_t *spa)
2065 spa_aux_vdev_t *sav = &spa->spa_l2cache;
2067 for (i = 0; i < sav->sav_count; i++) {
2070 vd = sav->sav_vdevs[i];
2073 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
2074 pool != 0ULL && l2arc_vdev_present(vd))
2075 l2arc_remove_vdev(vd);
2076 if (vd->vdev_isl2cache)
2077 spa_l2cache_remove(vd);
2078 vdev_clear_stats(vd);
2079 (void) vdev_close(vd);
2087 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
2088 const char *history_str, nvlist_t *zplprops)
2091 char *altroot = NULL;
2096 uint64_t txg = TXG_INITIAL;
2097 nvlist_t **spares, **l2cache;
2098 uint_t nspares, nl2cache;
2102 * If this pool already exists, return failure.
2104 mutex_enter(&spa_namespace_lock);
2105 if (spa_lookup(pool) != NULL) {
2106 mutex_exit(&spa_namespace_lock);
2111 * Allocate a new spa_t structure.
2113 (void) nvlist_lookup_string(props,
2114 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
2115 spa = spa_add(pool, altroot);
2116 spa_activate(spa, spa_mode_global);
2118 spa->spa_uberblock.ub_txg = txg - 1;
2120 if (props && (error = spa_prop_validate(spa, props))) {
2121 spa_deactivate(spa);
2123 mutex_exit(&spa_namespace_lock);
2127 if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION),
2129 version = SPA_VERSION;
2130 ASSERT(version <= SPA_VERSION);
2131 spa->spa_uberblock.ub_version = version;
2132 spa->spa_ubsync = spa->spa_uberblock;
2135 * Create "The Godfather" zio to hold all async IOs
2137 spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
2138 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
2141 * Create the root vdev.
2143 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2145 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
2147 ASSERT(error != 0 || rvd != NULL);
2148 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
2150 if (error == 0 && !zfs_allocatable_devs(nvroot))
2154 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
2155 (error = spa_validate_aux(spa, nvroot, txg,
2156 VDEV_ALLOC_ADD)) == 0) {
2157 for (c = 0; c < rvd->vdev_children; c++)
2158 vdev_init(rvd->vdev_child[c], txg);
2159 vdev_config_dirty(rvd);
2162 spa_config_exit(spa, SCL_ALL, FTAG);
2166 spa_deactivate(spa);
2168 mutex_exit(&spa_namespace_lock);
2173 * Get the list of spares, if specified.
2175 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2176 &spares, &nspares) == 0) {
2177 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
2179 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
2180 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2181 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2182 spa_load_spares(spa);
2183 spa_config_exit(spa, SCL_ALL, FTAG);
2184 spa->spa_spares.sav_sync = B_TRUE;
2188 * Get the list of level 2 cache devices, if specified.
2190 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2191 &l2cache, &nl2cache) == 0) {
2192 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2193 NV_UNIQUE_NAME, KM_SLEEP) == 0);
2194 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2195 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2196 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2197 spa_load_l2cache(spa);
2198 spa_config_exit(spa, SCL_ALL, FTAG);
2199 spa->spa_l2cache.sav_sync = B_TRUE;
2202 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
2203 spa->spa_meta_objset = dp->dp_meta_objset;
2205 tx = dmu_tx_create_assigned(dp, txg);
2208 * Create the pool config object.
2210 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
2211 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
2212 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
2214 if (zap_add(spa->spa_meta_objset,
2215 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
2216 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
2217 cmn_err(CE_PANIC, "failed to add pool config");
2220 /* Newly created pools with the right version are always deflated. */
2221 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
2222 spa->spa_deflate = TRUE;
2223 if (zap_add(spa->spa_meta_objset,
2224 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
2225 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
2226 cmn_err(CE_PANIC, "failed to add deflate");
2231 * Create the deferred-free bplist object. Turn off compression
2232 * because sync-to-convergence takes longer if the blocksize
2235 spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset,
2237 dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj,
2238 ZIO_COMPRESS_OFF, tx);
2240 if (zap_add(spa->spa_meta_objset,
2241 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
2242 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) {
2243 cmn_err(CE_PANIC, "failed to add bplist");
2247 * Create the pool's history object.
2249 if (version >= SPA_VERSION_ZPOOL_HISTORY)
2250 spa_history_create_obj(spa, tx);
2253 * Set pool properties.
2255 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
2256 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2257 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
2258 if (props != NULL) {
2259 spa_configfile_set(spa, props, B_FALSE);
2260 spa_sync_props(spa, props, CRED(), tx);
2265 spa->spa_sync_on = B_TRUE;
2266 txg_sync_start(spa->spa_dsl_pool);
2269 * We explicitly wait for the first transaction to complete so that our
2270 * bean counters are appropriately updated.
2272 txg_wait_synced(spa->spa_dsl_pool, txg);
2274 spa_config_sync(spa, B_FALSE, B_TRUE);
2276 if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL)
2277 (void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE);
2279 spa->spa_minref = refcount_count(&spa->spa_refcount);
2281 mutex_exit(&spa_namespace_lock);
2289 * Build a "root" vdev for a top level vdev read in from a rootpool
2293 spa_build_rootpool_config(nvlist_t *config)
2295 nvlist_t *nvtop, *nvroot;
2299 * Add this top-level vdev to the child array.
2301 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop)
2303 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid)
2307 * Put this pool's top-level vdevs into a root vdev.
2309 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2310 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT)
2312 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
2313 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
2314 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2318 * Replace the existing vdev_tree with the new root vdev in
2319 * this pool's configuration (remove the old, add the new).
2321 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
2322 nvlist_free(nvroot);
2326 * Get the root pool information from the root disk, then import the root pool
2327 * during the system boot up time.
2329 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
2332 spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf,
2339 if (error = vdev_disk_read_rootlabel(devpath, devid, &config))
2342 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
2344 if (bestconf != NULL)
2347 nvlist_free(config);
2353 spa_rootdev_validate(nvlist_t *nv)
2357 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2358 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2359 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2367 * Given the boot device's physical path or devid, check if the device
2368 * is in a valid state. If so, return the configuration from the vdev
2372 spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf)
2374 nvlist_t *conf = NULL;
2376 nvlist_t *nvtop, **child;
2378 char *bootpath = NULL;
2383 if (devpath && ((tmp = strchr(devpath, ' ')) != NULL))
2385 if (error = spa_check_rootconf(devpath, devid, &conf, &txg)) {
2386 cmn_err(CE_NOTE, "error reading device label");
2390 cmn_err(CE_NOTE, "this device is detached");
2395 VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE,
2397 VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0);
2399 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2400 if (spa_rootdev_validate(nvtop)) {
2408 ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0);
2410 VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN,
2411 &child, &children) == 0);
2414 * Go thru vdevs in the mirror to see if the given device
2415 * has the most recent txg. Only the device with the most
2416 * recent txg has valid information and should be booted.
2418 for (c = 0; c < children; c++) {
2419 char *cdevid, *cpath;
2424 if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH,
2425 &cpath) != 0 && nvlist_lookup_string(child[c],
2426 ZPOOL_CONFIG_DEVID, &cdevid) != 0)
2428 if ((spa_check_rootconf(cpath, cdevid, NULL,
2429 &tmptxg) == 0) && (tmptxg > txg)) {
2431 VERIFY(nvlist_lookup_string(child[c],
2432 ZPOOL_CONFIG_PATH, &bootpath) == 0);
2436 /* Does the best device match the one we've booted from? */
2438 cmn_err(CE_NOTE, "try booting from '%s'", bootpath);
2447 * Import a root pool.
2449 * For x86. devpath_list will consist of devid and/or physpath name of
2450 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
2451 * The GRUB "findroot" command will return the vdev we should boot.
2453 * For Sparc, devpath_list consists the physpath name of the booting device
2454 * no matter the rootpool is a single device pool or a mirrored pool.
2456 * "/pci@1f,0/ide@d/disk@0,0:a"
2459 spa_import_rootpool(char *devpath, char *devid)
2461 nvlist_t *conf = NULL;
2467 * Get the vdev pathname and configuation from the most
2468 * recently updated vdev (highest txg).
2470 if (error = spa_get_rootconf(devpath, devid, &conf))
2474 * Add type "root" vdev to the config.
2476 spa_build_rootpool_config(conf);
2478 VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0);
2480 mutex_enter(&spa_namespace_lock);
2481 if ((spa = spa_lookup(pname)) != NULL) {
2483 * Remove the existing root pool from the namespace so that we
2484 * can replace it with the correct config we just read in.
2489 spa = spa_add(pname, NULL);
2490 spa->spa_is_root = B_TRUE;
2491 spa->spa_load_verbatim = B_TRUE;
2493 VERIFY(nvlist_dup(conf, &spa->spa_config, 0) == 0);
2494 mutex_exit(&spa_namespace_lock);
2500 cmn_err(CE_NOTE, "\n"
2501 " *************************************************** \n"
2502 " * This device is not bootable! * \n"
2503 " * It is either offlined or detached or faulted. * \n"
2504 " * Please try to boot from a different device. * \n"
2505 " *************************************************** ");
2513 * Take a pool and insert it into the namespace as if it had been loaded at
2517 spa_import_verbatim(const char *pool, nvlist_t *config, nvlist_t *props)
2520 char *altroot = NULL;
2522 mutex_enter(&spa_namespace_lock);
2523 if (spa_lookup(pool) != NULL) {
2524 mutex_exit(&spa_namespace_lock);
2528 (void) nvlist_lookup_string(props,
2529 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
2530 spa = spa_add(pool, altroot);
2532 spa->spa_load_verbatim = B_TRUE;
2534 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
2537 spa_configfile_set(spa, props, B_FALSE);
2539 spa_config_sync(spa, B_FALSE, B_TRUE);
2541 mutex_exit(&spa_namespace_lock);
2547 * Import a non-root pool into the system.
2550 spa_import(const char *pool, nvlist_t *config, nvlist_t *props)
2553 char *altroot = NULL;
2556 nvlist_t **spares, **l2cache;
2557 uint_t nspares, nl2cache;
2560 * If a pool with this name exists, return failure.
2562 mutex_enter(&spa_namespace_lock);
2563 if ((spa = spa_lookup(pool)) != NULL) {
2564 mutex_exit(&spa_namespace_lock);
2569 * Create and initialize the spa structure.
2571 (void) nvlist_lookup_string(props,
2572 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
2573 spa = spa_add(pool, altroot);
2574 spa_activate(spa, spa_mode_global);
2577 * Don't start async tasks until we know everything is healthy.
2579 spa_async_suspend(spa);
2582 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig
2583 * because the user-supplied config is actually the one to trust when
2586 error = spa_load(spa, config, SPA_LOAD_IMPORT, B_TRUE);
2588 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2590 * Toss any existing sparelist, as it doesn't have any validity
2591 * anymore, and conflicts with spa_has_spare().
2593 if (spa->spa_spares.sav_config) {
2594 nvlist_free(spa->spa_spares.sav_config);
2595 spa->spa_spares.sav_config = NULL;
2596 spa_load_spares(spa);
2598 if (spa->spa_l2cache.sav_config) {
2599 nvlist_free(spa->spa_l2cache.sav_config);
2600 spa->spa_l2cache.sav_config = NULL;
2601 spa_load_l2cache(spa);
2604 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2607 error = spa_validate_aux(spa, nvroot, -1ULL,
2610 error = spa_validate_aux(spa, nvroot, -1ULL,
2611 VDEV_ALLOC_L2CACHE);
2612 spa_config_exit(spa, SCL_ALL, FTAG);
2615 spa_configfile_set(spa, props, B_FALSE);
2617 if (error != 0 || (props && spa_writeable(spa) &&
2618 (error = spa_prop_set(spa, props)))) {
2620 spa_deactivate(spa);
2622 mutex_exit(&spa_namespace_lock);
2626 spa_async_resume(spa);
2629 * Override any spares and level 2 cache devices as specified by
2630 * the user, as these may have correct device names/devids, etc.
2632 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2633 &spares, &nspares) == 0) {
2634 if (spa->spa_spares.sav_config)
2635 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
2636 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
2638 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
2639 NV_UNIQUE_NAME, KM_SLEEP) == 0);
2640 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
2641 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2642 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2643 spa_load_spares(spa);
2644 spa_config_exit(spa, SCL_ALL, FTAG);
2645 spa->spa_spares.sav_sync = B_TRUE;
2647 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2648 &l2cache, &nl2cache) == 0) {
2649 if (spa->spa_l2cache.sav_config)
2650 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
2651 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
2653 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2654 NV_UNIQUE_NAME, KM_SLEEP) == 0);
2655 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2656 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2657 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2658 spa_load_l2cache(spa);
2659 spa_config_exit(spa, SCL_ALL, FTAG);
2660 spa->spa_l2cache.sav_sync = B_TRUE;
2663 if (spa_writeable(spa)) {
2665 * Update the config cache to include the newly-imported pool.
2667 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
2670 mutex_exit(&spa_namespace_lock);
2676 * This (illegal) pool name is used when temporarily importing a spa_t in order
2677 * to get the vdev stats associated with the imported devices.
2679 #define TRYIMPORT_NAME "$import"
2682 spa_tryimport(nvlist_t *tryconfig)
2684 nvlist_t *config = NULL;
2690 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
2693 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
2697 * Create and initialize the spa structure.
2699 mutex_enter(&spa_namespace_lock);
2700 spa = spa_add(TRYIMPORT_NAME, NULL);
2701 spa_activate(spa, FREAD);
2704 * Pass off the heavy lifting to spa_load().
2705 * Pass TRUE for mosconfig because the user-supplied config
2706 * is actually the one to trust when doing an import.
2708 error = spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE);
2711 * If 'tryconfig' was at least parsable, return the current config.
2713 if (spa->spa_root_vdev != NULL) {
2714 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2715 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
2717 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2719 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
2720 spa->spa_uberblock.ub_timestamp) == 0);
2723 * If the bootfs property exists on this pool then we
2724 * copy it out so that external consumers can tell which
2725 * pools are bootable.
2727 if ((!error || error == EEXIST) && spa->spa_bootfs) {
2728 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2731 * We have to play games with the name since the
2732 * pool was opened as TRYIMPORT_NAME.
2734 if (dsl_dsobj_to_dsname(spa_name(spa),
2735 spa->spa_bootfs, tmpname) == 0) {
2737 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2739 cp = strchr(tmpname, '/');
2741 (void) strlcpy(dsname, tmpname,
2744 (void) snprintf(dsname, MAXPATHLEN,
2745 "%s/%s", poolname, ++cp);
2747 VERIFY(nvlist_add_string(config,
2748 ZPOOL_CONFIG_BOOTFS, dsname) == 0);
2749 kmem_free(dsname, MAXPATHLEN);
2751 kmem_free(tmpname, MAXPATHLEN);
2755 * Add the list of hot spares and level 2 cache devices.
2757 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
2758 spa_add_spares(spa, config);
2759 spa_add_l2cache(spa, config);
2760 spa_config_exit(spa, SCL_CONFIG, FTAG);
2764 spa_deactivate(spa);
2766 mutex_exit(&spa_namespace_lock);
2772 * Pool export/destroy
2774 * The act of destroying or exporting a pool is very simple. We make sure there
2775 * is no more pending I/O and any references to the pool are gone. Then, we
2776 * update the pool state and sync all the labels to disk, removing the
2777 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
2778 * we don't sync the labels or remove the configuration cache.
2781 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
2782 boolean_t force, boolean_t hardforce)
2789 if (!(spa_mode_global & FWRITE))
2792 mutex_enter(&spa_namespace_lock);
2793 if ((spa = spa_lookup(pool)) == NULL) {
2794 mutex_exit(&spa_namespace_lock);
2799 * Put a hold on the pool, drop the namespace lock, stop async tasks,
2800 * reacquire the namespace lock, and see if we can export.
2802 spa_open_ref(spa, FTAG);
2803 mutex_exit(&spa_namespace_lock);
2804 spa_async_suspend(spa);
2805 mutex_enter(&spa_namespace_lock);
2806 spa_close(spa, FTAG);
2809 * The pool will be in core if it's openable,
2810 * in which case we can modify its state.
2812 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
2814 * Objsets may be open only because they're dirty, so we
2815 * have to force it to sync before checking spa_refcnt.
2817 txg_wait_synced(spa->spa_dsl_pool, 0);
2820 * A pool cannot be exported or destroyed if there are active
2821 * references. If we are resetting a pool, allow references by
2822 * fault injection handlers.
2824 if (!spa_refcount_zero(spa) ||
2825 (spa->spa_inject_ref != 0 &&
2826 new_state != POOL_STATE_UNINITIALIZED)) {
2827 spa_async_resume(spa);
2828 mutex_exit(&spa_namespace_lock);
2833 * A pool cannot be exported if it has an active shared spare.
2834 * This is to prevent other pools stealing the active spare
2835 * from an exported pool. At user's own will, such pool can
2836 * be forcedly exported.
2838 if (!force && new_state == POOL_STATE_EXPORTED &&
2839 spa_has_active_shared_spare(spa)) {
2840 spa_async_resume(spa);
2841 mutex_exit(&spa_namespace_lock);
2846 * We want this to be reflected on every label,
2847 * so mark them all dirty. spa_unload() will do the
2848 * final sync that pushes these changes out.
2850 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
2851 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2852 spa->spa_state = new_state;
2853 spa->spa_final_txg = spa_last_synced_txg(spa) + 1;
2854 vdev_config_dirty(spa->spa_root_vdev);
2855 spa_config_exit(spa, SCL_ALL, FTAG);
2859 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
2861 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
2863 spa_deactivate(spa);
2866 if (oldconfig && spa->spa_config)
2867 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
2869 if (new_state != POOL_STATE_UNINITIALIZED) {
2871 spa_config_sync(spa, B_TRUE, B_TRUE);
2874 mutex_exit(&spa_namespace_lock);
2880 * Destroy a storage pool.
2883 spa_destroy(char *pool)
2885 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
2890 * Export a storage pool.
2893 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
2894 boolean_t hardforce)
2896 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
2901 * Similar to spa_export(), this unloads the spa_t without actually removing it
2902 * from the namespace in any way.
2905 spa_reset(char *pool)
2907 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
2912 * ==========================================================================
2913 * Device manipulation
2914 * ==========================================================================
2918 * Add a device to a storage pool.
2921 spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
2925 vdev_t *rvd = spa->spa_root_vdev;
2927 nvlist_t **spares, **l2cache;
2928 uint_t nspares, nl2cache;
2930 txg = spa_vdev_enter(spa);
2932 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
2933 VDEV_ALLOC_ADD)) != 0)
2934 return (spa_vdev_exit(spa, NULL, txg, error));
2936 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
2938 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
2942 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
2946 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
2947 return (spa_vdev_exit(spa, vd, txg, EINVAL));
2949 if (vd->vdev_children != 0 &&
2950 (error = vdev_create(vd, txg, B_FALSE)) != 0)
2951 return (spa_vdev_exit(spa, vd, txg, error));
2954 * We must validate the spares and l2cache devices after checking the
2955 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
2957 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
2958 return (spa_vdev_exit(spa, vd, txg, error));
2961 * Transfer each new top-level vdev from vd to rvd.
2963 for (int c = 0; c < vd->vdev_children; c++) {
2964 tvd = vd->vdev_child[c];
2965 vdev_remove_child(vd, tvd);
2966 tvd->vdev_id = rvd->vdev_children;
2967 vdev_add_child(rvd, tvd);
2968 vdev_config_dirty(tvd);
2972 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
2973 ZPOOL_CONFIG_SPARES);
2974 spa_load_spares(spa);
2975 spa->spa_spares.sav_sync = B_TRUE;
2978 if (nl2cache != 0) {
2979 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
2980 ZPOOL_CONFIG_L2CACHE);
2981 spa_load_l2cache(spa);
2982 spa->spa_l2cache.sav_sync = B_TRUE;
2986 * We have to be careful when adding new vdevs to an existing pool.
2987 * If other threads start allocating from these vdevs before we
2988 * sync the config cache, and we lose power, then upon reboot we may
2989 * fail to open the pool because there are DVAs that the config cache
2990 * can't translate. Therefore, we first add the vdevs without
2991 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
2992 * and then let spa_config_update() initialize the new metaslabs.
2994 * spa_load() checks for added-but-not-initialized vdevs, so that
2995 * if we lose power at any point in this sequence, the remaining
2996 * steps will be completed the next time we load the pool.
2998 (void) spa_vdev_exit(spa, vd, txg, 0);
3000 mutex_enter(&spa_namespace_lock);
3001 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
3002 mutex_exit(&spa_namespace_lock);
3008 * Attach a device to a mirror. The arguments are the path to any device
3009 * in the mirror, and the nvroot for the new device. If the path specifies
3010 * a device that is not mirrored, we automatically insert the mirror vdev.
3012 * If 'replacing' is specified, the new device is intended to replace the
3013 * existing device; in this case the two devices are made into their own
3014 * mirror using the 'replacing' vdev, which is functionally identical to
3015 * the mirror vdev (it actually reuses all the same ops) but has a few
3016 * extra rules: you can't attach to it after it's been created, and upon
3017 * completion of resilvering, the first disk (the one being replaced)
3018 * is automatically detached.
3021 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
3023 uint64_t txg, open_txg;
3024 vdev_t *rvd = spa->spa_root_vdev;
3025 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
3028 char *oldvdpath, *newvdpath;
3032 txg = spa_vdev_enter(spa);
3034 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
3037 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
3039 if (!oldvd->vdev_ops->vdev_op_leaf)
3040 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3042 pvd = oldvd->vdev_parent;
3044 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
3045 VDEV_ALLOC_ADD)) != 0)
3046 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
3048 if (newrootvd->vdev_children != 1)
3049 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
3051 newvd = newrootvd->vdev_child[0];
3053 if (!newvd->vdev_ops->vdev_op_leaf)
3054 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
3056 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
3057 return (spa_vdev_exit(spa, newrootvd, txg, error));
3060 * Spares can't replace logs
3062 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
3063 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3067 * For attach, the only allowable parent is a mirror or the root
3070 if (pvd->vdev_ops != &vdev_mirror_ops &&
3071 pvd->vdev_ops != &vdev_root_ops)
3072 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3074 pvops = &vdev_mirror_ops;
3077 * Active hot spares can only be replaced by inactive hot
3080 if (pvd->vdev_ops == &vdev_spare_ops &&
3081 pvd->vdev_child[1] == oldvd &&
3082 !spa_has_spare(spa, newvd->vdev_guid))
3083 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3086 * If the source is a hot spare, and the parent isn't already a
3087 * spare, then we want to create a new hot spare. Otherwise, we
3088 * want to create a replacing vdev. The user is not allowed to
3089 * attach to a spared vdev child unless the 'isspare' state is
3090 * the same (spare replaces spare, non-spare replaces
3093 if (pvd->vdev_ops == &vdev_replacing_ops)
3094 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3095 else if (pvd->vdev_ops == &vdev_spare_ops &&
3096 newvd->vdev_isspare != oldvd->vdev_isspare)
3097 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3098 else if (pvd->vdev_ops != &vdev_spare_ops &&
3099 newvd->vdev_isspare)
3100 pvops = &vdev_spare_ops;
3102 pvops = &vdev_replacing_ops;
3106 * Compare the new device size with the replaceable/attachable
3109 if (newvd->vdev_psize < vdev_get_rsize(oldvd))
3110 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
3113 * The new device cannot have a higher alignment requirement
3114 * than the top-level vdev.
3116 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
3117 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
3120 * If this is an in-place replacement, update oldvd's path and devid
3121 * to make it distinguishable from newvd, and unopenable from now on.
3123 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
3124 spa_strfree(oldvd->vdev_path);
3125 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
3127 (void) sprintf(oldvd->vdev_path, "%s/%s",
3128 newvd->vdev_path, "old");
3129 if (oldvd->vdev_devid != NULL) {
3130 spa_strfree(oldvd->vdev_devid);
3131 oldvd->vdev_devid = NULL;
3136 * If the parent is not a mirror, or if we're replacing, insert the new
3137 * mirror/replacing/spare vdev above oldvd.
3139 if (pvd->vdev_ops != pvops)
3140 pvd = vdev_add_parent(oldvd, pvops);
3142 ASSERT(pvd->vdev_top->vdev_parent == rvd);
3143 ASSERT(pvd->vdev_ops == pvops);
3144 ASSERT(oldvd->vdev_parent == pvd);
3147 * Extract the new device from its root and add it to pvd.
3149 vdev_remove_child(newrootvd, newvd);
3150 newvd->vdev_id = pvd->vdev_children;
3151 vdev_add_child(pvd, newvd);
3154 * If newvd is smaller than oldvd, but larger than its rsize,
3155 * the addition of newvd may have decreased our parent's asize.
3157 pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize);
3159 tvd = newvd->vdev_top;
3160 ASSERT(pvd->vdev_top == tvd);
3161 ASSERT(tvd->vdev_parent == rvd);
3163 vdev_config_dirty(tvd);
3166 * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate
3167 * upward when spa_vdev_exit() calls vdev_dtl_reassess().
3169 open_txg = txg + TXG_CONCURRENT_STATES - 1;
3171 vdev_dtl_dirty(newvd, DTL_MISSING,
3172 TXG_INITIAL, open_txg - TXG_INITIAL + 1);
3174 if (newvd->vdev_isspare) {
3175 spa_spare_activate(newvd);
3176 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE);
3179 oldvdpath = spa_strdup(oldvd->vdev_path);
3180 newvdpath = spa_strdup(newvd->vdev_path);
3181 newvd_isspare = newvd->vdev_isspare;
3184 * Mark newvd's DTL dirty in this txg.
3186 vdev_dirty(tvd, VDD_DTL, newvd, txg);
3188 (void) spa_vdev_exit(spa, newrootvd, open_txg, 0);
3190 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
3191 if (dmu_tx_assign(tx, TXG_WAIT) == 0) {
3192 spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, tx,
3193 CRED(), "%s vdev=%s %s vdev=%s",
3194 replacing && newvd_isspare ? "spare in" :
3195 replacing ? "replace" : "attach", newvdpath,
3196 replacing ? "for" : "to", oldvdpath);
3202 spa_strfree(oldvdpath);
3203 spa_strfree(newvdpath);
3206 * Kick off a resilver to update newvd.
3208 VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0);
3214 * Detach a device from a mirror or replacing vdev.
3215 * If 'replace_done' is specified, only detach if the parent
3216 * is a replacing vdev.
3219 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
3223 vdev_t *rvd = spa->spa_root_vdev;
3224 vdev_t *vd, *pvd, *cvd, *tvd;
3225 boolean_t unspare = B_FALSE;
3226 uint64_t unspare_guid;
3229 txg = spa_vdev_enter(spa);
3231 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3234 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
3236 if (!vd->vdev_ops->vdev_op_leaf)
3237 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3239 pvd = vd->vdev_parent;
3242 * If the parent/child relationship is not as expected, don't do it.
3243 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
3244 * vdev that's replacing B with C. The user's intent in replacing
3245 * is to go from M(A,B) to M(A,C). If the user decides to cancel
3246 * the replace by detaching C, the expected behavior is to end up
3247 * M(A,B). But suppose that right after deciding to detach C,
3248 * the replacement of B completes. We would have M(A,C), and then
3249 * ask to detach C, which would leave us with just A -- not what
3250 * the user wanted. To prevent this, we make sure that the
3251 * parent/child relationship hasn't changed -- in this example,
3252 * that C's parent is still the replacing vdev R.
3254 if (pvd->vdev_guid != pguid && pguid != 0)
3255 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3258 * If replace_done is specified, only remove this device if it's
3259 * the first child of a replacing vdev. For the 'spare' vdev, either
3260 * disk can be removed.
3263 if (pvd->vdev_ops == &vdev_replacing_ops) {
3264 if (vd->vdev_id != 0)
3265 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3266 } else if (pvd->vdev_ops != &vdev_spare_ops) {
3267 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3271 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
3272 spa_version(spa) >= SPA_VERSION_SPARES);
3275 * Only mirror, replacing, and spare vdevs support detach.
3277 if (pvd->vdev_ops != &vdev_replacing_ops &&
3278 pvd->vdev_ops != &vdev_mirror_ops &&
3279 pvd->vdev_ops != &vdev_spare_ops)
3280 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3283 * If this device has the only valid copy of some data,
3284 * we cannot safely detach it.
3286 if (vdev_dtl_required(vd))
3287 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3289 ASSERT(pvd->vdev_children >= 2);
3292 * If we are detaching the second disk from a replacing vdev, then
3293 * check to see if we changed the original vdev's path to have "/old"
3294 * at the end in spa_vdev_attach(). If so, undo that change now.
3296 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 &&
3297 pvd->vdev_child[0]->vdev_path != NULL &&
3298 pvd->vdev_child[1]->vdev_path != NULL) {
3299 ASSERT(pvd->vdev_child[1] == vd);
3300 cvd = pvd->vdev_child[0];
3301 len = strlen(vd->vdev_path);
3302 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
3303 strcmp(cvd->vdev_path + len, "/old") == 0) {
3304 spa_strfree(cvd->vdev_path);
3305 cvd->vdev_path = spa_strdup(vd->vdev_path);
3310 * If we are detaching the original disk from a spare, then it implies
3311 * that the spare should become a real disk, and be removed from the
3312 * active spare list for the pool.
3314 if (pvd->vdev_ops == &vdev_spare_ops &&
3315 vd->vdev_id == 0 && pvd->vdev_child[1]->vdev_isspare)
3319 * Erase the disk labels so the disk can be used for other things.
3320 * This must be done after all other error cases are handled,
3321 * but before we disembowel vd (so we can still do I/O to it).
3322 * But if we can't do it, don't treat the error as fatal --
3323 * it may be that the unwritability of the disk is the reason
3324 * it's being detached!
3326 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
3329 * Remove vd from its parent and compact the parent's children.
3331 vdev_remove_child(pvd, vd);
3332 vdev_compact_children(pvd);
3335 * Remember one of the remaining children so we can get tvd below.
3337 cvd = pvd->vdev_child[0];
3340 * If we need to remove the remaining child from the list of hot spares,
3341 * do it now, marking the vdev as no longer a spare in the process.
3342 * We must do this before vdev_remove_parent(), because that can
3343 * change the GUID if it creates a new toplevel GUID. For a similar
3344 * reason, we must remove the spare now, in the same txg as the detach;
3345 * otherwise someone could attach a new sibling, change the GUID, and
3346 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
3349 ASSERT(cvd->vdev_isspare);
3350 spa_spare_remove(cvd);
3351 unspare_guid = cvd->vdev_guid;
3352 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
3356 * If the parent mirror/replacing vdev only has one child,
3357 * the parent is no longer needed. Remove it from the tree.
3359 if (pvd->vdev_children == 1)
3360 vdev_remove_parent(cvd);
3363 * We don't set tvd until now because the parent we just removed
3364 * may have been the previous top-level vdev.
3366 tvd = cvd->vdev_top;
3367 ASSERT(tvd->vdev_parent == rvd);
3370 * Reevaluate the parent vdev state.
3372 vdev_propagate_state(cvd);
3375 * If the device we just detached was smaller than the others, it may be
3376 * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init()
3377 * can't fail because the existing metaslabs are already in core, so
3378 * there's nothing to read from disk.
3380 VERIFY(vdev_metaslab_init(tvd, txg) == 0);
3382 vdev_config_dirty(tvd);
3385 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
3386 * vd->vdev_detached is set and free vd's DTL object in syncing context.
3387 * But first make sure we're not on any *other* txg's DTL list, to
3388 * prevent vd from being accessed after it's freed.
3390 for (int t = 0; t < TXG_SIZE; t++)
3391 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
3392 vd->vdev_detached = B_TRUE;
3393 vdev_dirty(tvd, VDD_DTL, vd, txg);
3395 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
3397 error = spa_vdev_exit(spa, vd, txg, 0);
3400 * If this was the removal of the original device in a hot spare vdev,
3401 * then we want to go through and remove the device from the hot spare
3402 * list of every other pool.
3407 mutex_enter(&spa_namespace_lock);
3408 while ((spa = spa_next(spa)) != NULL) {
3409 if (spa->spa_state != POOL_STATE_ACTIVE)
3413 spa_open_ref(spa, FTAG);
3414 mutex_exit(&spa_namespace_lock);
3415 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
3416 mutex_enter(&spa_namespace_lock);
3417 spa_close(spa, FTAG);
3419 mutex_exit(&spa_namespace_lock);
3426 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
3428 for (int i = 0; i < count; i++) {
3431 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
3434 if (guid == target_guid)
3442 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
3443 nvlist_t *dev_to_remove)
3445 nvlist_t **newdev = NULL;
3448 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
3450 for (int i = 0, j = 0; i < count; i++) {
3451 if (dev[i] == dev_to_remove)
3453 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
3456 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
3457 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
3459 for (int i = 0; i < count - 1; i++)
3460 nvlist_free(newdev[i]);
3463 kmem_free(newdev, (count - 1) * sizeof (void *));
3467 * Remove a device from the pool. Currently, this supports removing only hot
3468 * spares and level 2 ARC devices.
3471 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
3474 nvlist_t **spares, **l2cache, *nv;
3475 uint_t nspares, nl2cache;
3478 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
3481 txg = spa_vdev_enter(spa);
3483 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3485 if (spa->spa_spares.sav_vdevs != NULL &&
3486 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3487 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
3488 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
3490 * Only remove the hot spare if it's not currently in use
3493 if (vd == NULL || unspare) {
3494 spa_vdev_remove_aux(spa->spa_spares.sav_config,
3495 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
3496 spa_load_spares(spa);
3497 spa->spa_spares.sav_sync = B_TRUE;
3501 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
3502 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3503 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
3504 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
3506 * Cache devices can always be removed.
3508 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
3509 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
3510 spa_load_l2cache(spa);
3511 spa->spa_l2cache.sav_sync = B_TRUE;
3512 } else if (vd != NULL) {
3514 * Normal vdevs cannot be removed (yet).
3519 * There is no vdev of any kind with the specified guid.
3525 return (spa_vdev_exit(spa, NULL, txg, error));
3531 * Find any device that's done replacing, or a vdev marked 'unspare' that's
3532 * current spared, so we can detach it.
3535 spa_vdev_resilver_done_hunt(vdev_t *vd)
3537 vdev_t *newvd, *oldvd;
3540 for (c = 0; c < vd->vdev_children; c++) {
3541 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
3547 * Check for a completed replacement.
3549 if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) {
3550 oldvd = vd->vdev_child[0];
3551 newvd = vd->vdev_child[1];
3553 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
3554 !vdev_dtl_required(oldvd))
3559 * Check for a completed resilver with the 'unspare' flag set.
3561 if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) {
3562 newvd = vd->vdev_child[0];
3563 oldvd = vd->vdev_child[1];
3565 if (newvd->vdev_unspare &&
3566 vdev_dtl_empty(newvd, DTL_MISSING) &&
3567 !vdev_dtl_required(oldvd)) {
3568 newvd->vdev_unspare = 0;
3577 spa_vdev_resilver_done(spa_t *spa)
3579 vdev_t *vd, *pvd, *ppvd;
3580 uint64_t guid, sguid, pguid, ppguid;
3582 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3584 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
3585 pvd = vd->vdev_parent;
3586 ppvd = pvd->vdev_parent;
3587 guid = vd->vdev_guid;
3588 pguid = pvd->vdev_guid;
3589 ppguid = ppvd->vdev_guid;
3592 * If we have just finished replacing a hot spared device, then
3593 * we need to detach the parent's first child (the original hot
3596 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0) {
3597 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
3598 ASSERT(ppvd->vdev_children == 2);
3599 sguid = ppvd->vdev_child[1]->vdev_guid;
3601 spa_config_exit(spa, SCL_ALL, FTAG);
3602 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
3604 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
3606 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3609 spa_config_exit(spa, SCL_ALL, FTAG);
3613 * Update the stored path or FRU for this vdev. Dirty the vdev configuration,
3614 * relying on spa_vdev_enter/exit() to synchronize the labels and cache.
3617 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
3623 txg = spa_vdev_enter(spa);
3625 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3626 return (spa_vdev_exit(spa, NULL, txg, ENOENT));
3628 if (!vd->vdev_ops->vdev_op_leaf)
3629 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3632 spa_strfree(vd->vdev_path);
3633 vd->vdev_path = spa_strdup(value);
3635 if (vd->vdev_fru != NULL)
3636 spa_strfree(vd->vdev_fru);
3637 vd->vdev_fru = spa_strdup(value);
3640 vdev_config_dirty(vd->vdev_top);
3642 return (spa_vdev_exit(spa, NULL, txg, 0));
3646 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
3648 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
3652 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
3654 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
3658 * ==========================================================================
3660 * ==========================================================================
3664 spa_scrub(spa_t *spa, pool_scrub_type_t type)
3666 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
3668 if ((uint_t)type >= POOL_SCRUB_TYPES)
3672 * If a resilver was requested, but there is no DTL on a
3673 * writeable leaf device, we have nothing to do.
3675 if (type == POOL_SCRUB_RESILVER &&
3676 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
3677 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
3681 if (type == POOL_SCRUB_EVERYTHING &&
3682 spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE &&
3683 spa->spa_dsl_pool->dp_scrub_isresilver)
3686 if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) {
3687 return (dsl_pool_scrub_clean(spa->spa_dsl_pool));
3688 } else if (type == POOL_SCRUB_NONE) {
3689 return (dsl_pool_scrub_cancel(spa->spa_dsl_pool));
3696 * ==========================================================================
3697 * SPA async task processing
3698 * ==========================================================================
3702 spa_async_remove(spa_t *spa, vdev_t *vd)
3704 if (vd->vdev_remove_wanted) {
3705 vd->vdev_remove_wanted = 0;
3706 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
3709 * We want to clear the stats, but we don't want to do a full
3710 * vdev_clear() as that will cause us to throw away
3711 * degraded/faulted state as well as attempt to reopen the
3712 * device, all of which is a waste.
3714 vd->vdev_stat.vs_read_errors = 0;
3715 vd->vdev_stat.vs_write_errors = 0;
3716 vd->vdev_stat.vs_checksum_errors = 0;
3718 vdev_state_dirty(vd->vdev_top);
3721 for (int c = 0; c < vd->vdev_children; c++)
3722 spa_async_remove(spa, vd->vdev_child[c]);
3726 spa_async_probe(spa_t *spa, vdev_t *vd)
3728 if (vd->vdev_probe_wanted) {
3729 vd->vdev_probe_wanted = 0;
3730 vdev_reopen(vd); /* vdev_open() does the actual probe */
3733 for (int c = 0; c < vd->vdev_children; c++)
3734 spa_async_probe(spa, vd->vdev_child[c]);
3738 spa_async_thread(void *arg)
3743 ASSERT(spa->spa_sync_on);
3745 mutex_enter(&spa->spa_async_lock);
3746 tasks = spa->spa_async_tasks;
3747 spa->spa_async_tasks = 0;
3748 mutex_exit(&spa->spa_async_lock);
3751 * See if the config needs to be updated.
3753 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
3754 mutex_enter(&spa_namespace_lock);
3755 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
3756 mutex_exit(&spa_namespace_lock);
3760 * See if any devices need to be marked REMOVED.
3762 if (tasks & SPA_ASYNC_REMOVE) {
3763 spa_vdev_state_enter(spa);
3764 spa_async_remove(spa, spa->spa_root_vdev);
3765 for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
3766 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
3767 for (int i = 0; i < spa->spa_spares.sav_count; i++)
3768 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
3769 (void) spa_vdev_state_exit(spa, NULL, 0);
3773 * See if any devices need to be probed.
3775 if (tasks & SPA_ASYNC_PROBE) {
3776 spa_vdev_state_enter(spa);
3777 spa_async_probe(spa, spa->spa_root_vdev);
3778 (void) spa_vdev_state_exit(spa, NULL, 0);
3782 * If any devices are done replacing, detach them.
3784 if (tasks & SPA_ASYNC_RESILVER_DONE)
3785 spa_vdev_resilver_done(spa);
3788 * Kick off a resilver.
3790 if (tasks & SPA_ASYNC_RESILVER)
3791 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0);
3794 * Let the world know that we're done.
3796 mutex_enter(&spa->spa_async_lock);
3797 spa->spa_async_thread = NULL;
3798 cv_broadcast(&spa->spa_async_cv);
3799 mutex_exit(&spa->spa_async_lock);
3804 spa_async_suspend(spa_t *spa)
3806 mutex_enter(&spa->spa_async_lock);
3807 spa->spa_async_suspended++;
3808 while (spa->spa_async_thread != NULL)
3809 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
3810 mutex_exit(&spa->spa_async_lock);
3814 spa_async_resume(spa_t *spa)
3816 mutex_enter(&spa->spa_async_lock);
3817 ASSERT(spa->spa_async_suspended != 0);
3818 spa->spa_async_suspended--;
3819 mutex_exit(&spa->spa_async_lock);
3823 spa_async_dispatch(spa_t *spa)
3825 mutex_enter(&spa->spa_async_lock);
3826 if (spa->spa_async_tasks && !spa->spa_async_suspended &&
3827 spa->spa_async_thread == NULL &&
3828 rootdir != NULL && !vn_is_readonly(rootdir))
3829 spa->spa_async_thread = thread_create(NULL, 0,
3830 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
3831 mutex_exit(&spa->spa_async_lock);
3835 spa_async_request(spa_t *spa, int task)
3837 mutex_enter(&spa->spa_async_lock);
3838 spa->spa_async_tasks |= task;
3839 mutex_exit(&spa->spa_async_lock);
3843 * ==========================================================================
3844 * SPA syncing routines
3845 * ==========================================================================
3849 spa_sync_deferred_frees(spa_t *spa, uint64_t txg)
3851 bplist_t *bpl = &spa->spa_sync_bplist;
3859 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
3861 while (bplist_iterate(bpl, &itor, &blk) == 0) {
3862 ASSERT(blk.blk_birth < txg);
3863 zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL,
3864 ZIO_FLAG_MUSTSUCCEED));
3867 error = zio_wait(zio);
3868 ASSERT3U(error, ==, 0);
3870 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3871 bplist_vacate(bpl, tx);
3874 * Pre-dirty the first block so we sync to convergence faster.
3875 * (Usually only the first block is needed.)
3877 dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx);
3882 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
3884 char *packed = NULL;
3889 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
3892 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
3893 * information. This avoids the dbuf_will_dirty() path and
3894 * saves us a pre-read to get data we don't actually care about.
3896 bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE);
3897 packed = kmem_alloc(bufsize, KM_SLEEP);
3899 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
3901 bzero(packed + nvsize, bufsize - nvsize);
3903 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
3905 kmem_free(packed, bufsize);
3907 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
3908 dmu_buf_will_dirty(db, tx);
3909 *(uint64_t *)db->db_data = nvsize;
3910 dmu_buf_rele(db, FTAG);
3914 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
3915 const char *config, const char *entry)
3925 * Update the MOS nvlist describing the list of available devices.
3926 * spa_validate_aux() will have already made sure this nvlist is
3927 * valid and the vdevs are labeled appropriately.
3929 if (sav->sav_object == 0) {
3930 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
3931 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
3932 sizeof (uint64_t), tx);
3933 VERIFY(zap_update(spa->spa_meta_objset,
3934 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
3935 &sav->sav_object, tx) == 0);
3938 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3939 if (sav->sav_count == 0) {
3940 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
3942 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
3943 for (i = 0; i < sav->sav_count; i++)
3944 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
3945 B_FALSE, B_FALSE, B_TRUE);
3946 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
3947 sav->sav_count) == 0);
3948 for (i = 0; i < sav->sav_count; i++)
3949 nvlist_free(list[i]);
3950 kmem_free(list, sav->sav_count * sizeof (void *));
3953 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
3954 nvlist_free(nvroot);
3956 sav->sav_sync = B_FALSE;
3960 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
3964 if (list_is_empty(&spa->spa_config_dirty_list))
3967 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
3969 config = spa_config_generate(spa, spa->spa_root_vdev,
3970 dmu_tx_get_txg(tx), B_FALSE);
3972 spa_config_exit(spa, SCL_STATE, FTAG);
3974 if (spa->spa_config_syncing)
3975 nvlist_free(spa->spa_config_syncing);
3976 spa->spa_config_syncing = config;
3978 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
3982 * Set zpool properties.
3985 spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
3988 objset_t *mos = spa->spa_meta_objset;
3989 nvlist_t *nvp = arg2;
3994 const char *propname;
3995 zprop_type_t proptype;
3997 mutex_enter(&spa->spa_props_lock);
4000 while ((elem = nvlist_next_nvpair(nvp, elem))) {
4001 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
4002 case ZPOOL_PROP_VERSION:
4004 * Only set version for non-zpool-creation cases
4005 * (set/import). spa_create() needs special care
4006 * for version setting.
4008 if (tx->tx_txg != TXG_INITIAL) {
4009 VERIFY(nvpair_value_uint64(elem,
4011 ASSERT(intval <= SPA_VERSION);
4012 ASSERT(intval >= spa_version(spa));
4013 spa->spa_uberblock.ub_version = intval;
4014 vdev_config_dirty(spa->spa_root_vdev);
4018 case ZPOOL_PROP_ALTROOT:
4020 * 'altroot' is a non-persistent property. It should
4021 * have been set temporarily at creation or import time.
4023 ASSERT(spa->spa_root != NULL);
4026 case ZPOOL_PROP_CACHEFILE:
4028 * 'cachefile' is also a non-persisitent property.
4033 * Set pool property values in the poolprops mos object.
4035 if (spa->spa_pool_props_object == 0) {
4036 objset_t *mos = spa->spa_meta_objset;
4038 VERIFY((spa->spa_pool_props_object =
4039 zap_create(mos, DMU_OT_POOL_PROPS,
4040 DMU_OT_NONE, 0, tx)) > 0);
4042 VERIFY(zap_update(mos,
4043 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
4044 8, 1, &spa->spa_pool_props_object, tx)
4048 /* normalize the property name */
4049 propname = zpool_prop_to_name(prop);
4050 proptype = zpool_prop_get_type(prop);
4052 if (nvpair_type(elem) == DATA_TYPE_STRING) {
4053 ASSERT(proptype == PROP_TYPE_STRING);
4054 VERIFY(nvpair_value_string(elem, &strval) == 0);
4055 VERIFY(zap_update(mos,
4056 spa->spa_pool_props_object, propname,
4057 1, strlen(strval) + 1, strval, tx) == 0);
4059 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
4060 VERIFY(nvpair_value_uint64(elem, &intval) == 0);
4062 if (proptype == PROP_TYPE_INDEX) {
4064 VERIFY(zpool_prop_index_to_string(
4065 prop, intval, &unused) == 0);
4067 VERIFY(zap_update(mos,
4068 spa->spa_pool_props_object, propname,
4069 8, 1, &intval, tx) == 0);
4071 ASSERT(0); /* not allowed */
4075 case ZPOOL_PROP_DELEGATION:
4076 spa->spa_delegation = intval;
4078 case ZPOOL_PROP_BOOTFS:
4079 spa->spa_bootfs = intval;
4081 case ZPOOL_PROP_FAILUREMODE:
4082 spa->spa_failmode = intval;
4089 /* log internal history if this is not a zpool create */
4090 if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY &&
4091 tx->tx_txg != TXG_INITIAL) {
4092 spa_history_internal_log(LOG_POOL_PROPSET,
4093 spa, tx, cr, "%s %lld %s",
4094 nvpair_name(elem), intval, spa_name(spa));
4098 mutex_exit(&spa->spa_props_lock);
4102 * Sync the specified transaction group. New blocks may be dirtied as
4103 * part of the process, so we iterate until it converges.
4106 spa_sync(spa_t *spa, uint64_t txg)
4108 dsl_pool_t *dp = spa->spa_dsl_pool;
4109 objset_t *mos = spa->spa_meta_objset;
4110 bplist_t *bpl = &spa->spa_sync_bplist;
4111 vdev_t *rvd = spa->spa_root_vdev;
4118 * Lock out configuration changes.
4120 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4122 spa->spa_syncing_txg = txg;
4123 spa->spa_sync_pass = 0;
4126 * If there are any pending vdev state changes, convert them
4127 * into config changes that go out with this transaction group.
4129 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4130 while (list_head(&spa->spa_state_dirty_list) != NULL) {
4132 * We need the write lock here because, for aux vdevs,
4133 * calling vdev_config_dirty() modifies sav_config.
4134 * This is ugly and will become unnecessary when we
4135 * eliminate the aux vdev wart by integrating all vdevs
4136 * into the root vdev tree.
4138 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
4139 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
4140 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
4141 vdev_state_clean(vd);
4142 vdev_config_dirty(vd);
4144 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
4145 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
4147 spa_config_exit(spa, SCL_STATE, FTAG);
4149 VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj));
4151 tx = dmu_tx_create_assigned(dp, txg);
4154 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
4155 * set spa_deflate if we have no raid-z vdevs.
4157 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
4158 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
4161 for (i = 0; i < rvd->vdev_children; i++) {
4162 vd = rvd->vdev_child[i];
4163 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
4166 if (i == rvd->vdev_children) {
4167 spa->spa_deflate = TRUE;
4168 VERIFY(0 == zap_add(spa->spa_meta_objset,
4169 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
4170 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
4174 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
4175 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
4176 dsl_pool_create_origin(dp, tx);
4178 /* Keeping the origin open increases spa_minref */
4179 spa->spa_minref += 3;
4182 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
4183 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
4184 dsl_pool_upgrade_clones(dp, tx);
4188 * If anything has changed in this txg, push the deferred frees
4189 * from the previous txg. If not, leave them alone so that we
4190 * don't generate work on an otherwise idle system.
4192 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
4193 !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
4194 !txg_list_empty(&dp->dp_sync_tasks, txg))
4195 spa_sync_deferred_frees(spa, txg);
4198 * Iterate to convergence.
4201 spa->spa_sync_pass++;
4203 spa_sync_config_object(spa, tx);
4204 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
4205 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
4206 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
4207 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
4208 spa_errlog_sync(spa, txg);
4209 dsl_pool_sync(dp, txg);
4212 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) {
4217 bplist_sync(bpl, tx);
4218 } while (dirty_vdevs);
4222 dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass);
4225 * Rewrite the vdev configuration (which includes the uberblock)
4226 * to commit the transaction group.
4228 * If there are no dirty vdevs, we sync the uberblock to a few
4229 * random top-level vdevs that are known to be visible in the
4230 * config cache (see spa_vdev_add() for a complete description).
4231 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
4235 * We hold SCL_STATE to prevent vdev open/close/etc.
4236 * while we're attempting to write the vdev labels.
4238 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4240 if (list_is_empty(&spa->spa_config_dirty_list)) {
4241 vdev_t *svd[SPA_DVAS_PER_BP];
4243 int children = rvd->vdev_children;
4244 int c0 = spa_get_random(children);
4247 for (c = 0; c < children; c++) {
4248 vd = rvd->vdev_child[(c0 + c) % children];
4249 if (vd->vdev_ms_array == 0 || vd->vdev_islog)
4251 svd[svdcount++] = vd;
4252 if (svdcount == SPA_DVAS_PER_BP)
4255 error = vdev_config_sync(svd, svdcount, txg, B_FALSE);
4257 error = vdev_config_sync(svd, svdcount, txg,
4260 error = vdev_config_sync(rvd->vdev_child,
4261 rvd->vdev_children, txg, B_FALSE);
4263 error = vdev_config_sync(rvd->vdev_child,
4264 rvd->vdev_children, txg, B_TRUE);
4267 spa_config_exit(spa, SCL_STATE, FTAG);
4271 zio_suspend(spa, NULL);
4272 zio_resume_wait(spa);
4277 * Clear the dirty config list.
4279 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
4280 vdev_config_clean(vd);
4283 * Now that the new config has synced transactionally,
4284 * let it become visible to the config cache.
4286 if (spa->spa_config_syncing != NULL) {
4287 spa_config_set(spa, spa->spa_config_syncing);
4288 spa->spa_config_txg = txg;
4289 spa->spa_config_syncing = NULL;
4292 spa->spa_ubsync = spa->spa_uberblock;
4295 * Clean up the ZIL records for the synced txg.
4297 dsl_pool_zil_clean(dp);
4300 * Update usable space statistics.
4302 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
4303 vdev_sync_done(vd, txg);
4306 * It had better be the case that we didn't dirty anything
4307 * since vdev_config_sync().
4309 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
4310 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
4311 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
4312 ASSERT(bpl->bpl_queue == NULL);
4314 spa_config_exit(spa, SCL_CONFIG, FTAG);
4317 * If any async tasks have been requested, kick them off.
4319 spa_async_dispatch(spa);
4323 * Sync all pools. We don't want to hold the namespace lock across these
4324 * operations, so we take a reference on the spa_t and drop the lock during the
4328 spa_sync_allpools(void)
4331 mutex_enter(&spa_namespace_lock);
4332 while ((spa = spa_next(spa)) != NULL) {
4333 if (spa_state(spa) != POOL_STATE_ACTIVE || spa_suspended(spa))
4335 spa_open_ref(spa, FTAG);
4336 mutex_exit(&spa_namespace_lock);
4337 txg_wait_synced(spa_get_dsl(spa), 0);
4338 mutex_enter(&spa_namespace_lock);
4339 spa_close(spa, FTAG);
4341 mutex_exit(&spa_namespace_lock);
4345 * ==========================================================================
4346 * Miscellaneous routines
4347 * ==========================================================================
4351 * Remove all pools in the system.
4359 * Remove all cached state. All pools should be closed now,
4360 * so every spa in the AVL tree should be unreferenced.
4362 mutex_enter(&spa_namespace_lock);
4363 while ((spa = spa_next(NULL)) != NULL) {
4365 * Stop async tasks. The async thread may need to detach
4366 * a device that's been replaced, which requires grabbing
4367 * spa_namespace_lock, so we must drop it here.
4369 spa_open_ref(spa, FTAG);
4370 mutex_exit(&spa_namespace_lock);
4371 spa_async_suspend(spa);
4372 mutex_enter(&spa_namespace_lock);
4373 spa_close(spa, FTAG);
4375 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4377 spa_deactivate(spa);
4381 mutex_exit(&spa_namespace_lock);
4385 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
4390 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
4394 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
4395 vd = spa->spa_l2cache.sav_vdevs[i];
4396 if (vd->vdev_guid == guid)
4400 for (i = 0; i < spa->spa_spares.sav_count; i++) {
4401 vd = spa->spa_spares.sav_vdevs[i];
4402 if (vd->vdev_guid == guid)
4411 spa_upgrade(spa_t *spa, uint64_t version)
4413 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4416 * This should only be called for a non-faulted pool, and since a
4417 * future version would result in an unopenable pool, this shouldn't be
4420 ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION);
4421 ASSERT(version >= spa->spa_uberblock.ub_version);
4423 spa->spa_uberblock.ub_version = version;
4424 vdev_config_dirty(spa->spa_root_vdev);
4426 spa_config_exit(spa, SCL_ALL, FTAG);
4428 txg_wait_synced(spa_get_dsl(spa), 0);
4432 spa_has_spare(spa_t *spa, uint64_t guid)
4436 spa_aux_vdev_t *sav = &spa->spa_spares;
4438 for (i = 0; i < sav->sav_count; i++)
4439 if (sav->sav_vdevs[i]->vdev_guid == guid)
4442 for (i = 0; i < sav->sav_npending; i++) {
4443 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
4444 &spareguid) == 0 && spareguid == guid)
4452 * Check if a pool has an active shared spare device.
4453 * Note: reference count of an active spare is 2, as a spare and as a replace
4456 spa_has_active_shared_spare(spa_t *spa)
4460 spa_aux_vdev_t *sav = &spa->spa_spares;
4462 for (i = 0; i < sav->sav_count; i++) {
4463 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
4464 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
4473 * Post a sysevent corresponding to the given event. The 'name' must be one of
4474 * the event definitions in sys/sysevent/eventdefs.h. The payload will be
4475 * filled in from the spa and (optionally) the vdev. This doesn't do anything
4476 * in the userland libzpool, as we don't want consumers to misinterpret ztest
4477 * or zdb as real changes.
4480 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
4485 sysevent_attr_list_t *attr = NULL;
4486 sysevent_value_t value;
4489 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
4492 value.value_type = SE_DATA_TYPE_STRING;
4493 value.value.sv_string = spa_name(spa);
4494 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
4497 value.value_type = SE_DATA_TYPE_UINT64;
4498 value.value.sv_uint64 = spa_guid(spa);
4499 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
4503 value.value_type = SE_DATA_TYPE_UINT64;
4504 value.value.sv_uint64 = vd->vdev_guid;
4505 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
4509 if (vd->vdev_path) {
4510 value.value_type = SE_DATA_TYPE_STRING;
4511 value.value.sv_string = vd->vdev_path;
4512 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
4513 &value, SE_SLEEP) != 0)
4518 if (sysevent_attach_attributes(ev, attr) != 0)
4522 (void) log_sysevent(ev, SE_SLEEP, &eid);
4526 sysevent_free_attr(attr);