4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * This file contains all the routines used when modifying on-disk SPA state.
29 * This includes opening, importing, destroying, exporting a pool, and syncing a
33 #include <sys/zfs_context.h>
34 #include <sys/fm/fs/zfs.h>
35 #include <sys/spa_impl.h>
37 #include <sys/zio_checksum.h>
38 #include <sys/zio_compress.h>
40 #include <sys/dmu_tx.h>
43 #include <sys/vdev_impl.h>
44 #include <sys/metaslab.h>
45 #include <sys/uberblock_impl.h>
48 #include <sys/dmu_traverse.h>
49 #include <sys/dmu_objset.h>
50 #include <sys/unique.h>
51 #include <sys/dsl_pool.h>
52 #include <sys/dsl_dataset.h>
53 #include <sys/dsl_dir.h>
54 #include <sys/dsl_prop.h>
55 #include <sys/dsl_synctask.h>
56 #include <sys/fs/zfs.h>
58 #include <sys/callb.h>
59 #include <sys/sunddi.h>
60 #include <sys/spa_boot.h>
63 #include "zfs_comutil.h"
65 /* Check hostid on import? */
66 static int check_hostid = 1;
68 SYSCTL_DECL(_vfs_zfs);
69 TUNABLE_INT("vfs.zfs.check_hostid", &check_hostid);
70 SYSCTL_INT(_vfs_zfs, OID_AUTO, check_hostid, CTLFLAG_RW, &check_hostid, 0,
71 "Check hostid on import?");
73 int zio_taskq_threads[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
75 { 1, 1 }, /* ZIO_TYPE_NULL */
76 { 1, 8 }, /* ZIO_TYPE_READ */
77 { 8, 1 }, /* ZIO_TYPE_WRITE */
78 { 1, 1 }, /* ZIO_TYPE_FREE */
79 { 1, 1 }, /* ZIO_TYPE_CLAIM */
80 { 1, 1 }, /* ZIO_TYPE_IOCTL */
83 static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx);
84 static boolean_t spa_has_active_shared_spare(spa_t *spa);
87 * ==========================================================================
88 * SPA properties routines
89 * ==========================================================================
93 * Add a (source=src, propname=propval) list to an nvlist.
96 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
97 uint64_t intval, zprop_source_t src)
99 const char *propname = zpool_prop_to_name(prop);
102 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
103 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
106 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
108 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
110 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
111 nvlist_free(propval);
115 * Get property values from the spa configuration.
118 spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
120 uint64_t size = spa_get_space(spa);
121 uint64_t used = spa_get_alloc(spa);
122 uint64_t cap, version;
123 zprop_source_t src = ZPROP_SRC_NONE;
124 spa_config_dirent_t *dp;
126 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
129 * readonly properties
131 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
132 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
133 spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src);
134 spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL, size - used, src);
136 cap = (size == 0) ? 0 : (used * 100 / size);
137 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
139 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
140 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
141 spa->spa_root_vdev->vdev_state, src);
144 * settable properties that are not stored in the pool property object.
146 version = spa_version(spa);
147 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
148 src = ZPROP_SRC_DEFAULT;
150 src = ZPROP_SRC_LOCAL;
151 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
153 if (spa->spa_root != NULL)
154 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
157 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
158 if (dp->scd_path == NULL) {
159 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
160 "none", 0, ZPROP_SRC_LOCAL);
161 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
162 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
163 dp->scd_path, 0, ZPROP_SRC_LOCAL);
169 * Get zpool property values.
172 spa_prop_get(spa_t *spa, nvlist_t **nvp)
176 objset_t *mos = spa->spa_meta_objset;
179 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
181 mutex_enter(&spa->spa_props_lock);
184 * Get properties from the spa config.
186 spa_prop_get_config(spa, nvp);
188 /* If no pool property object, no more prop to get. */
189 if (spa->spa_pool_props_object == 0) {
190 mutex_exit(&spa->spa_props_lock);
195 * Get properties from the MOS pool property object.
197 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
198 (err = zap_cursor_retrieve(&zc, &za)) == 0;
199 zap_cursor_advance(&zc)) {
202 zprop_source_t src = ZPROP_SRC_DEFAULT;
205 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
208 switch (za.za_integer_length) {
210 /* integer property */
211 if (za.za_first_integer !=
212 zpool_prop_default_numeric(prop))
213 src = ZPROP_SRC_LOCAL;
215 if (prop == ZPOOL_PROP_BOOTFS) {
217 dsl_dataset_t *ds = NULL;
219 dp = spa_get_dsl(spa);
220 rw_enter(&dp->dp_config_rwlock, RW_READER);
221 if (err = dsl_dataset_hold_obj(dp,
222 za.za_first_integer, FTAG, &ds)) {
223 rw_exit(&dp->dp_config_rwlock);
228 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
230 dsl_dataset_name(ds, strval);
231 dsl_dataset_rele(ds, FTAG);
232 rw_exit(&dp->dp_config_rwlock);
235 intval = za.za_first_integer;
238 spa_prop_add_list(*nvp, prop, strval, intval, src);
242 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
247 /* string property */
248 strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
249 err = zap_lookup(mos, spa->spa_pool_props_object,
250 za.za_name, 1, za.za_num_integers, strval);
252 kmem_free(strval, za.za_num_integers);
255 spa_prop_add_list(*nvp, prop, strval, 0, src);
256 kmem_free(strval, za.za_num_integers);
263 zap_cursor_fini(&zc);
264 mutex_exit(&spa->spa_props_lock);
266 if (err && err != ENOENT) {
276 * Validate the given pool properties nvlist and modify the list
277 * for the property values to be set.
280 spa_prop_validate(spa_t *spa, nvlist_t *props)
283 int error = 0, reset_bootfs = 0;
287 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
289 char *propname, *strval;
294 propname = nvpair_name(elem);
296 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL)
300 case ZPOOL_PROP_VERSION:
301 error = nvpair_value_uint64(elem, &intval);
303 (intval < spa_version(spa) || intval > SPA_VERSION))
307 case ZPOOL_PROP_DELEGATION:
308 case ZPOOL_PROP_AUTOREPLACE:
309 case ZPOOL_PROP_LISTSNAPS:
310 error = nvpair_value_uint64(elem, &intval);
311 if (!error && intval > 1)
315 case ZPOOL_PROP_BOOTFS:
316 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
322 * Make sure the vdev config is bootable
324 if (!vdev_is_bootable(spa->spa_root_vdev)) {
331 error = nvpair_value_string(elem, &strval);
336 if (strval == NULL || strval[0] == '\0') {
337 objnum = zpool_prop_default_numeric(
342 if (error = dmu_objset_open(strval, DMU_OST_ZFS,
343 DS_MODE_USER | DS_MODE_READONLY, &os))
346 /* We don't support gzip bootable datasets */
347 if ((error = dsl_prop_get_integer(strval,
348 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
349 &compress, NULL)) == 0 &&
350 !BOOTFS_COMPRESS_VALID(compress)) {
353 objnum = dmu_objset_id(os);
355 dmu_objset_close(os);
359 case ZPOOL_PROP_FAILUREMODE:
360 error = nvpair_value_uint64(elem, &intval);
361 if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
362 intval > ZIO_FAILURE_MODE_PANIC))
366 * This is a special case which only occurs when
367 * the pool has completely failed. This allows
368 * the user to change the in-core failmode property
369 * without syncing it out to disk (I/Os might
370 * currently be blocked). We do this by returning
371 * EIO to the caller (spa_prop_set) to trick it
372 * into thinking we encountered a property validation
375 if (!error && spa_suspended(spa)) {
376 spa->spa_failmode = intval;
381 case ZPOOL_PROP_CACHEFILE:
382 if ((error = nvpair_value_string(elem, &strval)) != 0)
385 if (strval[0] == '\0')
388 if (strcmp(strval, "none") == 0)
391 if (strval[0] != '/') {
396 slash = strrchr(strval, '/');
397 ASSERT(slash != NULL);
399 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
400 strcmp(slash, "/..") == 0)
409 if (!error && reset_bootfs) {
410 error = nvlist_remove(props,
411 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
414 error = nvlist_add_uint64(props,
415 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
423 spa_prop_set(spa_t *spa, nvlist_t *nvp)
427 if ((error = spa_prop_validate(spa, nvp)) != 0)
430 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
435 * If the bootfs property value is dsobj, clear it.
438 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
440 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
441 VERIFY(zap_remove(spa->spa_meta_objset,
442 spa->spa_pool_props_object,
443 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
449 * ==========================================================================
450 * SPA state manipulation (open/create/destroy/import/export)
451 * ==========================================================================
455 spa_error_entry_compare(const void *a, const void *b)
457 spa_error_entry_t *sa = (spa_error_entry_t *)a;
458 spa_error_entry_t *sb = (spa_error_entry_t *)b;
461 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
462 sizeof (zbookmark_t));
473 * Utility function which retrieves copies of the current logs and
474 * re-initializes them in the process.
477 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
479 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
481 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
482 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
484 avl_create(&spa->spa_errlist_scrub,
485 spa_error_entry_compare, sizeof (spa_error_entry_t),
486 offsetof(spa_error_entry_t, se_avl));
487 avl_create(&spa->spa_errlist_last,
488 spa_error_entry_compare, sizeof (spa_error_entry_t),
489 offsetof(spa_error_entry_t, se_avl));
493 * Activate an uninitialized pool.
496 spa_activate(spa_t *spa)
499 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
501 spa->spa_state = POOL_STATE_ACTIVE;
503 spa->spa_normal_class = metaslab_class_create();
504 spa->spa_log_class = metaslab_class_create();
506 for (int t = 0; t < ZIO_TYPES; t++) {
507 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
508 spa->spa_zio_taskq[t][q] = taskq_create("spa_zio",
509 zio_taskq_threads[t][q], maxclsyspri, 50,
510 INT_MAX, TASKQ_PREPOPULATE);
514 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
515 offsetof(vdev_t, vdev_config_dirty_node));
516 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
517 offsetof(vdev_t, vdev_state_dirty_node));
519 txg_list_create(&spa->spa_vdev_txg_list,
520 offsetof(struct vdev, vdev_txg_node));
522 avl_create(&spa->spa_errlist_scrub,
523 spa_error_entry_compare, sizeof (spa_error_entry_t),
524 offsetof(spa_error_entry_t, se_avl));
525 avl_create(&spa->spa_errlist_last,
526 spa_error_entry_compare, sizeof (spa_error_entry_t),
527 offsetof(spa_error_entry_t, se_avl));
531 * Opposite of spa_activate().
534 spa_deactivate(spa_t *spa)
536 ASSERT(spa->spa_sync_on == B_FALSE);
537 ASSERT(spa->spa_dsl_pool == NULL);
538 ASSERT(spa->spa_root_vdev == NULL);
540 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
542 txg_list_destroy(&spa->spa_vdev_txg_list);
544 list_destroy(&spa->spa_config_dirty_list);
545 list_destroy(&spa->spa_state_dirty_list);
547 for (int t = 0; t < ZIO_TYPES; t++) {
548 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
549 taskq_destroy(spa->spa_zio_taskq[t][q]);
550 spa->spa_zio_taskq[t][q] = NULL;
554 metaslab_class_destroy(spa->spa_normal_class);
555 spa->spa_normal_class = NULL;
557 metaslab_class_destroy(spa->spa_log_class);
558 spa->spa_log_class = NULL;
561 * If this was part of an import or the open otherwise failed, we may
562 * still have errors left in the queues. Empty them just in case.
564 spa_errlog_drain(spa);
566 avl_destroy(&spa->spa_errlist_scrub);
567 avl_destroy(&spa->spa_errlist_last);
569 spa->spa_state = POOL_STATE_UNINITIALIZED;
573 * Verify a pool configuration, and construct the vdev tree appropriately. This
574 * will create all the necessary vdevs in the appropriate layout, with each vdev
575 * in the CLOSED state. This will prep the pool before open/creation/import.
576 * All vdev validation is done by the vdev_alloc() routine.
579 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
580 uint_t id, int atype)
586 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
589 if ((*vdp)->vdev_ops->vdev_op_leaf)
592 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
604 for (c = 0; c < children; c++) {
606 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
614 ASSERT(*vdp != NULL);
620 * Opposite of spa_load().
623 spa_unload(spa_t *spa)
627 ASSERT(MUTEX_HELD(&spa_namespace_lock));
632 spa_async_suspend(spa);
637 if (spa->spa_sync_on) {
638 txg_sync_stop(spa->spa_dsl_pool);
639 spa->spa_sync_on = B_FALSE;
643 * Wait for any outstanding async I/O to complete.
645 mutex_enter(&spa->spa_async_root_lock);
646 while (spa->spa_async_root_count != 0)
647 cv_wait(&spa->spa_async_root_cv, &spa->spa_async_root_lock);
648 mutex_exit(&spa->spa_async_root_lock);
651 * Drop and purge level 2 cache
653 spa_l2cache_drop(spa);
656 * Close the dsl pool.
658 if (spa->spa_dsl_pool) {
659 dsl_pool_close(spa->spa_dsl_pool);
660 spa->spa_dsl_pool = NULL;
666 if (spa->spa_root_vdev)
667 vdev_free(spa->spa_root_vdev);
668 ASSERT(spa->spa_root_vdev == NULL);
670 for (i = 0; i < spa->spa_spares.sav_count; i++)
671 vdev_free(spa->spa_spares.sav_vdevs[i]);
672 if (spa->spa_spares.sav_vdevs) {
673 kmem_free(spa->spa_spares.sav_vdevs,
674 spa->spa_spares.sav_count * sizeof (void *));
675 spa->spa_spares.sav_vdevs = NULL;
677 if (spa->spa_spares.sav_config) {
678 nvlist_free(spa->spa_spares.sav_config);
679 spa->spa_spares.sav_config = NULL;
681 spa->spa_spares.sav_count = 0;
683 for (i = 0; i < spa->spa_l2cache.sav_count; i++)
684 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
685 if (spa->spa_l2cache.sav_vdevs) {
686 kmem_free(spa->spa_l2cache.sav_vdevs,
687 spa->spa_l2cache.sav_count * sizeof (void *));
688 spa->spa_l2cache.sav_vdevs = NULL;
690 if (spa->spa_l2cache.sav_config) {
691 nvlist_free(spa->spa_l2cache.sav_config);
692 spa->spa_l2cache.sav_config = NULL;
694 spa->spa_l2cache.sav_count = 0;
696 spa->spa_async_suspended = 0;
700 * Load (or re-load) the current list of vdevs describing the active spares for
701 * this pool. When this is called, we have some form of basic information in
702 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
703 * then re-generate a more complete list including status information.
706 spa_load_spares(spa_t *spa)
713 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
716 * First, close and free any existing spare vdevs.
718 for (i = 0; i < spa->spa_spares.sav_count; i++) {
719 vd = spa->spa_spares.sav_vdevs[i];
721 /* Undo the call to spa_activate() below */
722 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
723 B_FALSE)) != NULL && tvd->vdev_isspare)
724 spa_spare_remove(tvd);
729 if (spa->spa_spares.sav_vdevs)
730 kmem_free(spa->spa_spares.sav_vdevs,
731 spa->spa_spares.sav_count * sizeof (void *));
733 if (spa->spa_spares.sav_config == NULL)
736 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
737 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
739 spa->spa_spares.sav_count = (int)nspares;
740 spa->spa_spares.sav_vdevs = NULL;
746 * Construct the array of vdevs, opening them to get status in the
747 * process. For each spare, there is potentially two different vdev_t
748 * structures associated with it: one in the list of spares (used only
749 * for basic validation purposes) and one in the active vdev
750 * configuration (if it's spared in). During this phase we open and
751 * validate each vdev on the spare list. If the vdev also exists in the
752 * active configuration, then we also mark this vdev as an active spare.
754 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
756 for (i = 0; i < spa->spa_spares.sav_count; i++) {
757 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
758 VDEV_ALLOC_SPARE) == 0);
761 spa->spa_spares.sav_vdevs[i] = vd;
763 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
765 if (!tvd->vdev_isspare)
769 * We only mark the spare active if we were successfully
770 * able to load the vdev. Otherwise, importing a pool
771 * with a bad active spare would result in strange
772 * behavior, because multiple pool would think the spare
773 * is actively in use.
775 * There is a vulnerability here to an equally bizarre
776 * circumstance, where a dead active spare is later
777 * brought back to life (onlined or otherwise). Given
778 * the rarity of this scenario, and the extra complexity
779 * it adds, we ignore the possibility.
781 if (!vdev_is_dead(tvd))
782 spa_spare_activate(tvd);
787 if (vdev_open(vd) != 0)
790 if (vdev_validate_aux(vd) == 0)
795 * Recompute the stashed list of spares, with status information
798 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
799 DATA_TYPE_NVLIST_ARRAY) == 0);
801 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
803 for (i = 0; i < spa->spa_spares.sav_count; i++)
804 spares[i] = vdev_config_generate(spa,
805 spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE);
806 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
807 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
808 for (i = 0; i < spa->spa_spares.sav_count; i++)
809 nvlist_free(spares[i]);
810 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
814 * Load (or re-load) the current list of vdevs describing the active l2cache for
815 * this pool. When this is called, we have some form of basic information in
816 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
817 * then re-generate a more complete list including status information.
818 * Devices which are already active have their details maintained, and are
822 spa_load_l2cache(spa_t *spa)
828 vdev_t *vd, **oldvdevs, **newvdevs;
829 spa_aux_vdev_t *sav = &spa->spa_l2cache;
831 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
833 if (sav->sav_config != NULL) {
834 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
835 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
836 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
841 oldvdevs = sav->sav_vdevs;
842 oldnvdevs = sav->sav_count;
843 sav->sav_vdevs = NULL;
847 * Process new nvlist of vdevs.
849 for (i = 0; i < nl2cache; i++) {
850 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
854 for (j = 0; j < oldnvdevs; j++) {
856 if (vd != NULL && guid == vd->vdev_guid) {
858 * Retain previous vdev for add/remove ops.
866 if (newvdevs[i] == NULL) {
870 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
871 VDEV_ALLOC_L2CACHE) == 0);
876 * Commit this vdev as an l2cache device,
877 * even if it fails to open.
884 spa_l2cache_activate(vd);
886 if (vdev_open(vd) != 0)
889 (void) vdev_validate_aux(vd);
891 if (!vdev_is_dead(vd)) {
892 size = vdev_get_rsize(vd);
893 l2arc_add_vdev(spa, vd,
894 VDEV_LABEL_START_SIZE,
895 size - VDEV_LABEL_START_SIZE);
901 * Purge vdevs that were dropped
903 for (i = 0; i < oldnvdevs; i++) {
908 if ((spa_mode & FWRITE) &&
909 spa_l2cache_exists(vd->vdev_guid, &pool) &&
911 l2arc_vdev_present(vd)) {
912 l2arc_remove_vdev(vd);
914 (void) vdev_close(vd);
915 spa_l2cache_remove(vd);
920 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
922 if (sav->sav_config == NULL)
925 sav->sav_vdevs = newvdevs;
926 sav->sav_count = (int)nl2cache;
929 * Recompute the stashed list of l2cache devices, with status
930 * information this time.
932 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
933 DATA_TYPE_NVLIST_ARRAY) == 0);
935 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
936 for (i = 0; i < sav->sav_count; i++)
937 l2cache[i] = vdev_config_generate(spa,
938 sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE);
939 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
940 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
942 for (i = 0; i < sav->sav_count; i++)
943 nvlist_free(l2cache[i]);
945 kmem_free(l2cache, sav->sav_count * sizeof (void *));
949 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
957 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
958 nvsize = *(uint64_t *)db->db_data;
959 dmu_buf_rele(db, FTAG);
961 packed = kmem_alloc(nvsize, KM_SLEEP);
962 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed);
964 error = nvlist_unpack(packed, nvsize, value, 0);
965 kmem_free(packed, nvsize);
971 * Checks to see if the given vdev could not be opened, in which case we post a
972 * sysevent to notify the autoreplace code that the device has been removed.
975 spa_check_removed(vdev_t *vd)
979 for (c = 0; c < vd->vdev_children; c++)
980 spa_check_removed(vd->vdev_child[c]);
982 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) {
983 zfs_post_autoreplace(vd->vdev_spa, vd);
984 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
989 * Check for missing log devices
992 spa_check_logs(spa_t *spa)
994 switch (spa->spa_log_state) {
995 case SPA_LOG_MISSING:
996 /* need to recheck in case slog has been restored */
997 case SPA_LOG_UNKNOWN:
998 if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL,
1000 spa->spa_log_state = SPA_LOG_MISSING;
1006 (void) dmu_objset_find(spa->spa_name, zil_clear_log_chain, NULL,
1010 spa->spa_log_state = SPA_LOG_GOOD;
1015 * Load an existing storage pool, using the pool's builtin spa_config as a
1016 * source of configuration information.
1019 spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig)
1022 nvlist_t *nvroot = NULL;
1024 uberblock_t *ub = &spa->spa_uberblock;
1025 uint64_t config_cache_txg = spa->spa_config_txg;
1028 uint64_t autoreplace = 0;
1029 char *ereport = FM_EREPORT_ZFS_POOL;
1031 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1033 spa->spa_load_state = state;
1035 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
1036 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
1042 * Versioning wasn't explicitly added to the label until later, so if
1043 * it's not present treat it as the initial version.
1045 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0)
1046 version = SPA_VERSION_INITIAL;
1048 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
1049 &spa->spa_config_txg);
1051 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
1052 spa_guid_exists(pool_guid, 0)) {
1057 spa->spa_load_guid = pool_guid;
1060 * Parse the configuration into a vdev tree. We explicitly set the
1061 * value that will be returned by spa_version() since parsing the
1062 * configuration requires knowing the version number.
1064 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1065 spa->spa_ubsync.ub_version = version;
1066 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD);
1067 spa_config_exit(spa, SCL_ALL, FTAG);
1072 ASSERT(spa->spa_root_vdev == rvd);
1073 ASSERT(spa_guid(spa) == pool_guid);
1076 * Try to open all vdevs, loading each label in the process.
1078 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1079 error = vdev_open(rvd);
1080 spa_config_exit(spa, SCL_ALL, FTAG);
1085 * Validate the labels for all leaf vdevs. We need to grab the config
1086 * lock because all label I/O is done with ZIO_FLAG_CONFIG_WRITER.
1088 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1089 error = vdev_validate(rvd);
1090 spa_config_exit(spa, SCL_ALL, FTAG);
1095 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1101 * Find the best uberblock.
1103 vdev_uberblock_load(NULL, rvd, ub);
1106 * If we weren't able to find a single valid uberblock, return failure.
1108 if (ub->ub_txg == 0) {
1109 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1110 VDEV_AUX_CORRUPT_DATA);
1116 * If the pool is newer than the code, we can't open it.
1118 if (ub->ub_version > SPA_VERSION) {
1119 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1120 VDEV_AUX_VERSION_NEWER);
1126 * If the vdev guid sum doesn't match the uberblock, we have an
1127 * incomplete configuration.
1129 if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) {
1130 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1131 VDEV_AUX_BAD_GUID_SUM);
1137 * Initialize internal SPA structures.
1139 spa->spa_state = POOL_STATE_ACTIVE;
1140 spa->spa_ubsync = spa->spa_uberblock;
1141 spa->spa_first_txg = spa_last_synced_txg(spa) + 1;
1142 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
1144 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1145 VDEV_AUX_CORRUPT_DATA);
1148 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
1150 if (zap_lookup(spa->spa_meta_objset,
1151 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1152 sizeof (uint64_t), 1, &spa->spa_config_object) != 0) {
1153 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1154 VDEV_AUX_CORRUPT_DATA);
1160 nvlist_t *newconfig;
1163 if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) {
1164 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1165 VDEV_AUX_CORRUPT_DATA);
1170 if (!spa_is_root(spa) && nvlist_lookup_uint64(newconfig,
1171 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
1173 unsigned long myhostid = 0;
1175 VERIFY(nvlist_lookup_string(newconfig,
1176 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
1178 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
1179 if (check_hostid && hostid != 0 && myhostid != 0 &&
1180 (unsigned long)hostid != myhostid) {
1181 cmn_err(CE_WARN, "pool '%s' could not be "
1182 "loaded as it was last accessed by "
1183 "another system (host: %s hostid: 0x%lx). "
1184 "See: http://www.sun.com/msg/ZFS-8000-EY",
1185 spa_name(spa), hostname,
1186 (unsigned long)hostid);
1192 spa_config_set(spa, newconfig);
1194 spa_deactivate(spa);
1197 return (spa_load(spa, newconfig, state, B_TRUE));
1200 if (zap_lookup(spa->spa_meta_objset,
1201 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
1202 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) {
1203 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1204 VDEV_AUX_CORRUPT_DATA);
1210 * Load the bit that tells us to use the new accounting function
1211 * (raid-z deflation). If we have an older pool, this will not
1214 error = zap_lookup(spa->spa_meta_objset,
1215 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
1216 sizeof (uint64_t), 1, &spa->spa_deflate);
1217 if (error != 0 && error != ENOENT) {
1218 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1219 VDEV_AUX_CORRUPT_DATA);
1225 * Load the persistent error log. If we have an older pool, this will
1228 error = zap_lookup(spa->spa_meta_objset,
1229 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
1230 sizeof (uint64_t), 1, &spa->spa_errlog_last);
1231 if (error != 0 && error != ENOENT) {
1232 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1233 VDEV_AUX_CORRUPT_DATA);
1238 error = zap_lookup(spa->spa_meta_objset,
1239 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
1240 sizeof (uint64_t), 1, &spa->spa_errlog_scrub);
1241 if (error != 0 && error != ENOENT) {
1242 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1243 VDEV_AUX_CORRUPT_DATA);
1249 * Load the history object. If we have an older pool, this
1250 * will not be present.
1252 error = zap_lookup(spa->spa_meta_objset,
1253 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY,
1254 sizeof (uint64_t), 1, &spa->spa_history);
1255 if (error != 0 && error != ENOENT) {
1256 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1257 VDEV_AUX_CORRUPT_DATA);
1263 * Load any hot spares for this pool.
1265 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1266 DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object);
1267 if (error != 0 && error != ENOENT) {
1268 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1269 VDEV_AUX_CORRUPT_DATA);
1274 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
1275 if (load_nvlist(spa, spa->spa_spares.sav_object,
1276 &spa->spa_spares.sav_config) != 0) {
1277 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1278 VDEV_AUX_CORRUPT_DATA);
1283 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1284 spa_load_spares(spa);
1285 spa_config_exit(spa, SCL_ALL, FTAG);
1289 * Load any level 2 ARC devices for this pool.
1291 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1292 DMU_POOL_L2CACHE, sizeof (uint64_t), 1,
1293 &spa->spa_l2cache.sav_object);
1294 if (error != 0 && error != ENOENT) {
1295 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1296 VDEV_AUX_CORRUPT_DATA);
1301 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
1302 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
1303 &spa->spa_l2cache.sav_config) != 0) {
1304 vdev_set_state(rvd, B_TRUE,
1305 VDEV_STATE_CANT_OPEN,
1306 VDEV_AUX_CORRUPT_DATA);
1311 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1312 spa_load_l2cache(spa);
1313 spa_config_exit(spa, SCL_ALL, FTAG);
1316 if (spa_check_logs(spa)) {
1317 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1320 ereport = FM_EREPORT_ZFS_LOG_REPLAY;
1325 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
1327 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1328 DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object);
1330 if (error && error != ENOENT) {
1331 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1332 VDEV_AUX_CORRUPT_DATA);
1338 (void) zap_lookup(spa->spa_meta_objset,
1339 spa->spa_pool_props_object,
1340 zpool_prop_to_name(ZPOOL_PROP_BOOTFS),
1341 sizeof (uint64_t), 1, &spa->spa_bootfs);
1342 (void) zap_lookup(spa->spa_meta_objset,
1343 spa->spa_pool_props_object,
1344 zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE),
1345 sizeof (uint64_t), 1, &autoreplace);
1346 (void) zap_lookup(spa->spa_meta_objset,
1347 spa->spa_pool_props_object,
1348 zpool_prop_to_name(ZPOOL_PROP_DELEGATION),
1349 sizeof (uint64_t), 1, &spa->spa_delegation);
1350 (void) zap_lookup(spa->spa_meta_objset,
1351 spa->spa_pool_props_object,
1352 zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
1353 sizeof (uint64_t), 1, &spa->spa_failmode);
1357 * If the 'autoreplace' property is set, then post a resource notifying
1358 * the ZFS DE that it should not issue any faults for unopenable
1359 * devices. We also iterate over the vdevs, and post a sysevent for any
1360 * unopenable vdevs so that the normal autoreplace handler can take
1363 if (autoreplace && state != SPA_LOAD_TRYIMPORT)
1364 spa_check_removed(spa->spa_root_vdev);
1367 * Load the vdev state for all toplevel vdevs.
1372 * Propagate the leaf DTLs we just loaded all the way up the tree.
1374 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1375 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
1376 spa_config_exit(spa, SCL_ALL, FTAG);
1379 * Check the state of the root vdev. If it can't be opened, it
1380 * indicates one or more toplevel vdevs are faulted.
1382 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1387 if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) {
1389 int need_update = B_FALSE;
1393 * Claim log blocks that haven't been committed yet.
1394 * This must all happen in a single txg.
1396 tx = dmu_tx_create_assigned(spa_get_dsl(spa),
1397 spa_first_txg(spa));
1398 (void) dmu_objset_find(spa_name(spa),
1399 zil_claim, tx, DS_FIND_CHILDREN);
1402 spa->spa_sync_on = B_TRUE;
1403 txg_sync_start(spa->spa_dsl_pool);
1406 * Wait for all claims to sync.
1408 txg_wait_synced(spa->spa_dsl_pool, 0);
1411 * If the config cache is stale, or we have uninitialized
1412 * metaslabs (see spa_vdev_add()), then update the config.
1414 if (config_cache_txg != spa->spa_config_txg ||
1415 state == SPA_LOAD_IMPORT)
1416 need_update = B_TRUE;
1418 for (c = 0; c < rvd->vdev_children; c++)
1419 if (rvd->vdev_child[c]->vdev_ms_array == 0)
1420 need_update = B_TRUE;
1423 * Update the config cache asychronously in case we're the
1424 * root pool, in which case the config cache isn't writable yet.
1427 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
1432 spa->spa_minref = refcount_count(&spa->spa_refcount);
1433 if (error && error != EBADF)
1434 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
1435 spa->spa_load_state = SPA_LOAD_NONE;
1444 * The import case is identical to an open except that the configuration is sent
1445 * down from userland, instead of grabbed from the configuration cache. For the
1446 * case of an open, the pool configuration will exist in the
1447 * POOL_STATE_UNINITIALIZED state.
1449 * The stats information (gen/count/ustats) is used to gather vdev statistics at
1450 * the same time open the pool, without having to keep around the spa_t in some
1454 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config)
1458 int locked = B_FALSE;
1463 * As disgusting as this is, we need to support recursive calls to this
1464 * function because dsl_dir_open() is called during spa_load(), and ends
1465 * up calling spa_open() again. The real fix is to figure out how to
1466 * avoid dsl_dir_open() calling this in the first place.
1468 if (mutex_owner(&spa_namespace_lock) != curthread) {
1469 mutex_enter(&spa_namespace_lock);
1473 if ((spa = spa_lookup(pool)) == NULL) {
1475 mutex_exit(&spa_namespace_lock);
1478 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
1482 error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE);
1484 if (error == EBADF) {
1486 * If vdev_validate() returns failure (indicated by
1487 * EBADF), it indicates that one of the vdevs indicates
1488 * that the pool has been exported or destroyed. If
1489 * this is the case, the config cache is out of sync and
1490 * we should remove the pool from the namespace.
1493 spa_deactivate(spa);
1494 spa_config_sync(spa, B_TRUE, B_TRUE);
1497 mutex_exit(&spa_namespace_lock);
1503 * We can't open the pool, but we still have useful
1504 * information: the state of each vdev after the
1505 * attempted vdev_open(). Return this to the user.
1507 if (config != NULL && spa->spa_root_vdev != NULL)
1508 *config = spa_config_generate(spa, NULL, -1ULL,
1511 spa_deactivate(spa);
1512 spa->spa_last_open_failed = B_TRUE;
1514 mutex_exit(&spa_namespace_lock);
1518 spa->spa_last_open_failed = B_FALSE;
1522 spa_open_ref(spa, tag);
1525 mutex_exit(&spa_namespace_lock);
1530 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
1536 spa_open(const char *name, spa_t **spapp, void *tag)
1538 return (spa_open_common(name, spapp, tag, NULL));
1542 * Lookup the given spa_t, incrementing the inject count in the process,
1543 * preventing it from being exported or destroyed.
1546 spa_inject_addref(char *name)
1550 mutex_enter(&spa_namespace_lock);
1551 if ((spa = spa_lookup(name)) == NULL) {
1552 mutex_exit(&spa_namespace_lock);
1555 spa->spa_inject_ref++;
1556 mutex_exit(&spa_namespace_lock);
1562 spa_inject_delref(spa_t *spa)
1564 mutex_enter(&spa_namespace_lock);
1565 spa->spa_inject_ref--;
1566 mutex_exit(&spa_namespace_lock);
1570 * Add spares device information to the nvlist.
1573 spa_add_spares(spa_t *spa, nvlist_t *config)
1583 if (spa->spa_spares.sav_count == 0)
1586 VERIFY(nvlist_lookup_nvlist(config,
1587 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1588 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1589 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1591 VERIFY(nvlist_add_nvlist_array(nvroot,
1592 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
1593 VERIFY(nvlist_lookup_nvlist_array(nvroot,
1594 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1597 * Go through and find any spares which have since been
1598 * repurposed as an active spare. If this is the case, update
1599 * their status appropriately.
1601 for (i = 0; i < nspares; i++) {
1602 VERIFY(nvlist_lookup_uint64(spares[i],
1603 ZPOOL_CONFIG_GUID, &guid) == 0);
1604 if (spa_spare_exists(guid, &pool, NULL) &&
1606 VERIFY(nvlist_lookup_uint64_array(
1607 spares[i], ZPOOL_CONFIG_STATS,
1608 (uint64_t **)&vs, &vsc) == 0);
1609 vs->vs_state = VDEV_STATE_CANT_OPEN;
1610 vs->vs_aux = VDEV_AUX_SPARED;
1617 * Add l2cache device information to the nvlist, including vdev stats.
1620 spa_add_l2cache(spa_t *spa, nvlist_t *config)
1623 uint_t i, j, nl2cache;
1630 if (spa->spa_l2cache.sav_count == 0)
1633 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
1635 VERIFY(nvlist_lookup_nvlist(config,
1636 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1637 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
1638 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1639 if (nl2cache != 0) {
1640 VERIFY(nvlist_add_nvlist_array(nvroot,
1641 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
1642 VERIFY(nvlist_lookup_nvlist_array(nvroot,
1643 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1646 * Update level 2 cache device stats.
1649 for (i = 0; i < nl2cache; i++) {
1650 VERIFY(nvlist_lookup_uint64(l2cache[i],
1651 ZPOOL_CONFIG_GUID, &guid) == 0);
1654 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
1656 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
1657 vd = spa->spa_l2cache.sav_vdevs[j];
1663 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
1664 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
1665 vdev_get_stats(vd, vs);
1669 spa_config_exit(spa, SCL_CONFIG, FTAG);
1673 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
1679 error = spa_open_common(name, &spa, FTAG, config);
1681 if (spa && *config != NULL) {
1682 VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT,
1683 spa_get_errlog_size(spa)) == 0);
1685 if (spa_suspended(spa))
1686 VERIFY(nvlist_add_uint64(*config,
1687 ZPOOL_CONFIG_SUSPENDED, spa->spa_failmode) == 0);
1689 spa_add_spares(spa, *config);
1690 spa_add_l2cache(spa, *config);
1694 * We want to get the alternate root even for faulted pools, so we cheat
1695 * and call spa_lookup() directly.
1699 mutex_enter(&spa_namespace_lock);
1700 spa = spa_lookup(name);
1702 spa_altroot(spa, altroot, buflen);
1706 mutex_exit(&spa_namespace_lock);
1708 spa_altroot(spa, altroot, buflen);
1713 spa_close(spa, FTAG);
1719 * Validate that the auxiliary device array is well formed. We must have an
1720 * array of nvlists, each which describes a valid leaf vdev. If this is an
1721 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
1722 * specified, as long as they are well-formed.
1725 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
1726 spa_aux_vdev_t *sav, const char *config, uint64_t version,
1727 vdev_labeltype_t label)
1734 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1737 * It's acceptable to have no devs specified.
1739 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
1746 * Make sure the pool is formatted with a version that supports this
1749 if (spa_version(spa) < version)
1753 * Set the pending device list so we correctly handle device in-use
1756 sav->sav_pending = dev;
1757 sav->sav_npending = ndev;
1759 for (i = 0; i < ndev; i++) {
1760 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
1764 if (!vd->vdev_ops->vdev_op_leaf) {
1771 * The L2ARC currently only supports disk devices in
1772 * kernel context. For user-level testing, we allow it.
1775 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
1776 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
1783 if ((error = vdev_open(vd)) == 0 &&
1784 (error = vdev_label_init(vd, crtxg, label)) == 0) {
1785 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
1786 vd->vdev_guid) == 0);
1792 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
1799 sav->sav_pending = NULL;
1800 sav->sav_npending = 0;
1805 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
1809 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1811 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1812 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
1813 VDEV_LABEL_SPARE)) != 0) {
1817 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1818 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
1819 VDEV_LABEL_L2CACHE));
1823 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
1828 if (sav->sav_config != NULL) {
1834 * Generate new dev list by concatentating with the
1837 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
1838 &olddevs, &oldndevs) == 0);
1840 newdevs = kmem_alloc(sizeof (void *) *
1841 (ndevs + oldndevs), KM_SLEEP);
1842 for (i = 0; i < oldndevs; i++)
1843 VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
1845 for (i = 0; i < ndevs; i++)
1846 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
1849 VERIFY(nvlist_remove(sav->sav_config, config,
1850 DATA_TYPE_NVLIST_ARRAY) == 0);
1852 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1853 config, newdevs, ndevs + oldndevs) == 0);
1854 for (i = 0; i < oldndevs + ndevs; i++)
1855 nvlist_free(newdevs[i]);
1856 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
1859 * Generate a new dev list.
1861 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
1863 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
1869 * Stop and drop level 2 ARC devices
1872 spa_l2cache_drop(spa_t *spa)
1876 spa_aux_vdev_t *sav = &spa->spa_l2cache;
1878 for (i = 0; i < sav->sav_count; i++) {
1881 vd = sav->sav_vdevs[i];
1884 if ((spa_mode & FWRITE) &&
1885 spa_l2cache_exists(vd->vdev_guid, &pool) && pool != 0ULL &&
1886 l2arc_vdev_present(vd)) {
1887 l2arc_remove_vdev(vd);
1889 if (vd->vdev_isl2cache)
1890 spa_l2cache_remove(vd);
1891 vdev_clear_stats(vd);
1892 (void) vdev_close(vd);
1900 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
1901 const char *history_str, nvlist_t *zplprops)
1904 char *altroot = NULL;
1909 uint64_t txg = TXG_INITIAL;
1910 nvlist_t **spares, **l2cache;
1911 uint_t nspares, nl2cache;
1915 * If this pool already exists, return failure.
1917 mutex_enter(&spa_namespace_lock);
1918 if (spa_lookup(pool) != NULL) {
1919 mutex_exit(&spa_namespace_lock);
1924 * Allocate a new spa_t structure.
1926 (void) nvlist_lookup_string(props,
1927 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
1928 spa = spa_add(pool, altroot);
1931 spa->spa_uberblock.ub_txg = txg - 1;
1933 if (props && (error = spa_prop_validate(spa, props))) {
1935 spa_deactivate(spa);
1937 mutex_exit(&spa_namespace_lock);
1941 if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION),
1943 version = SPA_VERSION;
1944 ASSERT(version <= SPA_VERSION);
1945 spa->spa_uberblock.ub_version = version;
1946 spa->spa_ubsync = spa->spa_uberblock;
1949 * Create the root vdev.
1951 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1953 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
1955 ASSERT(error != 0 || rvd != NULL);
1956 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
1958 if (error == 0 && !zfs_allocatable_devs(nvroot))
1962 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
1963 (error = spa_validate_aux(spa, nvroot, txg,
1964 VDEV_ALLOC_ADD)) == 0) {
1965 for (c = 0; c < rvd->vdev_children; c++)
1966 vdev_init(rvd->vdev_child[c], txg);
1967 vdev_config_dirty(rvd);
1970 spa_config_exit(spa, SCL_ALL, FTAG);
1974 spa_deactivate(spa);
1976 mutex_exit(&spa_namespace_lock);
1981 * Get the list of spares, if specified.
1983 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1984 &spares, &nspares) == 0) {
1985 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
1987 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1988 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
1989 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1990 spa_load_spares(spa);
1991 spa_config_exit(spa, SCL_ALL, FTAG);
1992 spa->spa_spares.sav_sync = B_TRUE;
1996 * Get the list of level 2 cache devices, if specified.
1998 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1999 &l2cache, &nl2cache) == 0) {
2000 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2001 NV_UNIQUE_NAME, KM_SLEEP) == 0);
2002 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2003 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2004 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2005 spa_load_l2cache(spa);
2006 spa_config_exit(spa, SCL_ALL, FTAG);
2007 spa->spa_l2cache.sav_sync = B_TRUE;
2010 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
2011 spa->spa_meta_objset = dp->dp_meta_objset;
2013 tx = dmu_tx_create_assigned(dp, txg);
2016 * Create the pool config object.
2018 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
2019 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
2020 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
2022 if (zap_add(spa->spa_meta_objset,
2023 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
2024 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
2025 cmn_err(CE_PANIC, "failed to add pool config");
2028 /* Newly created pools with the right version are always deflated. */
2029 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
2030 spa->spa_deflate = TRUE;
2031 if (zap_add(spa->spa_meta_objset,
2032 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
2033 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
2034 cmn_err(CE_PANIC, "failed to add deflate");
2039 * Create the deferred-free bplist object. Turn off compression
2040 * because sync-to-convergence takes longer if the blocksize
2043 spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset,
2045 dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj,
2046 ZIO_COMPRESS_OFF, tx);
2048 if (zap_add(spa->spa_meta_objset,
2049 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
2050 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) {
2051 cmn_err(CE_PANIC, "failed to add bplist");
2055 * Create the pool's history object.
2057 if (version >= SPA_VERSION_ZPOOL_HISTORY)
2058 spa_history_create_obj(spa, tx);
2061 * Set pool properties.
2063 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
2064 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2065 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
2067 spa_sync_props(spa, props, CRED(), tx);
2071 spa->spa_sync_on = B_TRUE;
2072 txg_sync_start(spa->spa_dsl_pool);
2075 * We explicitly wait for the first transaction to complete so that our
2076 * bean counters are appropriately updated.
2078 txg_wait_synced(spa->spa_dsl_pool, txg);
2080 spa_config_sync(spa, B_FALSE, B_TRUE);
2082 if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL)
2083 (void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE);
2085 mutex_exit(&spa_namespace_lock);
2087 spa->spa_minref = refcount_count(&spa->spa_refcount);
2093 * Import the given pool into the system. We set up the necessary spa_t and
2094 * then call spa_load() to do the dirty work.
2097 spa_import_common(const char *pool, nvlist_t *config, nvlist_t *props,
2098 boolean_t isroot, boolean_t allowfaulted)
2101 char *altroot = NULL;
2104 nvlist_t **spares, **l2cache;
2105 uint_t nspares, nl2cache;
2108 * If a pool with this name exists, return failure.
2110 mutex_enter(&spa_namespace_lock);
2111 if (spa_lookup(pool) != NULL) {
2112 mutex_exit(&spa_namespace_lock);
2117 * Create and initialize the spa structure.
2119 (void) nvlist_lookup_string(props,
2120 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
2121 spa = spa_add(pool, altroot);
2125 spa->spa_import_faulted = B_TRUE;
2126 spa->spa_is_root = isroot;
2129 * Pass off the heavy lifting to spa_load().
2130 * Pass TRUE for mosconfig (unless this is a root pool) because
2131 * the user-supplied config is actually the one to trust when
2134 loaderr = error = spa_load(spa, config, SPA_LOAD_IMPORT, !isroot);
2136 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2138 * Toss any existing sparelist, as it doesn't have any validity anymore,
2139 * and conflicts with spa_has_spare().
2141 if (!isroot && spa->spa_spares.sav_config) {
2142 nvlist_free(spa->spa_spares.sav_config);
2143 spa->spa_spares.sav_config = NULL;
2144 spa_load_spares(spa);
2146 if (!isroot && spa->spa_l2cache.sav_config) {
2147 nvlist_free(spa->spa_l2cache.sav_config);
2148 spa->spa_l2cache.sav_config = NULL;
2149 spa_load_l2cache(spa);
2152 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2155 error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE);
2157 error = spa_validate_aux(spa, nvroot, -1ULL,
2158 VDEV_ALLOC_L2CACHE);
2159 spa_config_exit(spa, SCL_ALL, FTAG);
2161 if (error != 0 || (props && (error = spa_prop_set(spa, props)))) {
2162 if (loaderr != 0 && loaderr != EINVAL && allowfaulted) {
2164 * If we failed to load the pool, but 'allowfaulted' is
2165 * set, then manually set the config as if the config
2166 * passed in was specified in the cache file.
2169 spa->spa_import_faulted = B_FALSE;
2170 if (spa->spa_config == NULL)
2171 spa->spa_config = spa_config_generate(spa,
2172 NULL, -1ULL, B_TRUE);
2174 spa_deactivate(spa);
2175 spa_config_sync(spa, B_FALSE, B_TRUE);
2178 spa_deactivate(spa);
2181 mutex_exit(&spa_namespace_lock);
2186 * Override any spares and level 2 cache devices as specified by
2187 * the user, as these may have correct device names/devids, etc.
2189 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2190 &spares, &nspares) == 0) {
2191 if (spa->spa_spares.sav_config)
2192 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
2193 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
2195 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
2196 NV_UNIQUE_NAME, KM_SLEEP) == 0);
2197 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
2198 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2199 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2200 spa_load_spares(spa);
2201 spa_config_exit(spa, SCL_ALL, FTAG);
2202 spa->spa_spares.sav_sync = B_TRUE;
2204 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2205 &l2cache, &nl2cache) == 0) {
2206 if (spa->spa_l2cache.sav_config)
2207 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
2208 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
2210 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2211 NV_UNIQUE_NAME, KM_SLEEP) == 0);
2212 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2213 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2214 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2215 spa_load_l2cache(spa);
2216 spa_config_exit(spa, SCL_ALL, FTAG);
2217 spa->spa_l2cache.sav_sync = B_TRUE;
2220 if (spa_mode & FWRITE) {
2222 * Update the config cache to include the newly-imported pool.
2224 spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, isroot);
2227 spa->spa_import_faulted = B_FALSE;
2228 mutex_exit(&spa_namespace_lock);
2236 * Build a "root" vdev for a top level vdev read in from a rootpool
2240 spa_build_rootpool_config(nvlist_t *config)
2242 nvlist_t *nvtop, *nvroot;
2246 * Add this top-level vdev to the child array.
2248 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop)
2250 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid)
2254 * Put this pool's top-level vdevs into a root vdev.
2256 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2257 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT)
2259 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
2260 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
2261 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2265 * Replace the existing vdev_tree with the new root vdev in
2266 * this pool's configuration (remove the old, add the new).
2268 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
2269 nvlist_free(nvroot);
2273 * Get the root pool information from the root disk, then import the root pool
2274 * during the system boot up time.
2276 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
2279 spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf,
2286 if (error = vdev_disk_read_rootlabel(devpath, devid, &config))
2289 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
2291 if (bestconf != NULL)
2294 nvlist_free(config);
2300 spa_rootdev_validate(nvlist_t *nv)
2304 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2305 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2306 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2314 * Given the boot device's physical path or devid, check if the device
2315 * is in a valid state. If so, return the configuration from the vdev
2319 spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf)
2321 nvlist_t *conf = NULL;
2323 nvlist_t *nvtop, **child;
2325 char *bootpath = NULL;
2330 if (devpath && ((tmp = strchr(devpath, ' ')) != NULL))
2332 if (error = spa_check_rootconf(devpath, devid, &conf, &txg)) {
2333 cmn_err(CE_NOTE, "error reading device label");
2337 cmn_err(CE_NOTE, "this device is detached");
2342 VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE,
2344 VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0);
2346 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2347 if (spa_rootdev_validate(nvtop)) {
2355 ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0);
2357 VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN,
2358 &child, &children) == 0);
2361 * Go thru vdevs in the mirror to see if the given device
2362 * has the most recent txg. Only the device with the most
2363 * recent txg has valid information and should be booted.
2365 for (c = 0; c < children; c++) {
2366 char *cdevid, *cpath;
2369 if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH,
2372 if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_DEVID,
2375 if ((spa_check_rootconf(cpath, cdevid, NULL,
2376 &tmptxg) == 0) && (tmptxg > txg)) {
2378 VERIFY(nvlist_lookup_string(child[c],
2379 ZPOOL_CONFIG_PATH, &bootpath) == 0);
2383 /* Does the best device match the one we've booted from? */
2385 cmn_err(CE_NOTE, "try booting from '%s'", bootpath);
2394 * Import a root pool.
2396 * For x86. devpath_list will consist of devid and/or physpath name of
2397 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
2398 * The GRUB "findroot" command will return the vdev we should boot.
2400 * For Sparc, devpath_list consists the physpath name of the booting device
2401 * no matter the rootpool is a single device pool or a mirrored pool.
2403 * "/pci@1f,0/ide@d/disk@0,0:a"
2406 spa_import_rootpool(char *devpath, char *devid)
2408 nvlist_t *conf = NULL;
2413 * Get the vdev pathname and configuation from the most
2414 * recently updated vdev (highest txg).
2416 if (error = spa_get_rootconf(devpath, devid, &conf))
2420 * Add type "root" vdev to the config.
2422 spa_build_rootpool_config(conf);
2424 VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0);
2427 * We specify 'allowfaulted' for this to be treated like spa_open()
2428 * instead of spa_import(). This prevents us from marking vdevs as
2429 * persistently unavailable, and generates FMA ereports as if it were a
2430 * pool open, not import.
2432 error = spa_import_common(pname, conf, NULL, B_TRUE, B_TRUE);
2433 if (error == EEXIST)
2440 cmn_err(CE_NOTE, "\n"
2441 " *************************************************** \n"
2442 " * This device is not bootable! * \n"
2443 " * It is either offlined or detached or faulted. * \n"
2444 " * Please try to boot from a different device. * \n"
2445 " *************************************************** ");
2453 * Import a non-root pool into the system.
2456 spa_import(const char *pool, nvlist_t *config, nvlist_t *props)
2458 return (spa_import_common(pool, config, props, B_FALSE, B_FALSE));
2462 spa_import_faulted(const char *pool, nvlist_t *config, nvlist_t *props)
2464 return (spa_import_common(pool, config, props, B_FALSE, B_TRUE));
2469 * This (illegal) pool name is used when temporarily importing a spa_t in order
2470 * to get the vdev stats associated with the imported devices.
2472 #define TRYIMPORT_NAME "$import"
2475 spa_tryimport(nvlist_t *tryconfig)
2477 nvlist_t *config = NULL;
2482 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
2485 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
2489 * Create and initialize the spa structure.
2491 mutex_enter(&spa_namespace_lock);
2492 spa = spa_add(TRYIMPORT_NAME, NULL);
2496 * Pass off the heavy lifting to spa_load().
2497 * Pass TRUE for mosconfig because the user-supplied config
2498 * is actually the one to trust when doing an import.
2500 (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE);
2503 * If 'tryconfig' was at least parsable, return the current config.
2505 if (spa->spa_root_vdev != NULL) {
2506 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2507 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
2509 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2511 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
2512 spa->spa_uberblock.ub_timestamp) == 0);
2515 * If the bootfs property exists on this pool then we
2516 * copy it out so that external consumers can tell which
2517 * pools are bootable.
2519 if (spa->spa_bootfs) {
2520 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2523 * We have to play games with the name since the
2524 * pool was opened as TRYIMPORT_NAME.
2526 if (dsl_dsobj_to_dsname(spa_name(spa),
2527 spa->spa_bootfs, tmpname) == 0) {
2529 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2531 cp = strchr(tmpname, '/');
2533 (void) strlcpy(dsname, tmpname,
2536 (void) snprintf(dsname, MAXPATHLEN,
2537 "%s/%s", poolname, ++cp);
2539 VERIFY(nvlist_add_string(config,
2540 ZPOOL_CONFIG_BOOTFS, dsname) == 0);
2541 kmem_free(dsname, MAXPATHLEN);
2543 kmem_free(tmpname, MAXPATHLEN);
2547 * Add the list of hot spares and level 2 cache devices.
2549 spa_add_spares(spa, config);
2550 spa_add_l2cache(spa, config);
2554 spa_deactivate(spa);
2556 mutex_exit(&spa_namespace_lock);
2562 * Pool export/destroy
2564 * The act of destroying or exporting a pool is very simple. We make sure there
2565 * is no more pending I/O and any references to the pool are gone. Then, we
2566 * update the pool state and sync all the labels to disk, removing the
2567 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
2568 * we don't sync the labels or remove the configuration cache.
2571 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
2572 boolean_t force, boolean_t hardforce)
2579 if (!(spa_mode & FWRITE))
2582 mutex_enter(&spa_namespace_lock);
2583 if ((spa = spa_lookup(pool)) == NULL) {
2584 mutex_exit(&spa_namespace_lock);
2589 * Put a hold on the pool, drop the namespace lock, stop async tasks,
2590 * reacquire the namespace lock, and see if we can export.
2592 spa_open_ref(spa, FTAG);
2593 mutex_exit(&spa_namespace_lock);
2594 spa_async_suspend(spa);
2595 mutex_enter(&spa_namespace_lock);
2596 spa_close(spa, FTAG);
2599 * The pool will be in core if it's openable,
2600 * in which case we can modify its state.
2602 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
2604 * Objsets may be open only because they're dirty, so we
2605 * have to force it to sync before checking spa_refcnt.
2607 txg_wait_synced(spa->spa_dsl_pool, 0);
2610 * A pool cannot be exported or destroyed if there are active
2611 * references. If we are resetting a pool, allow references by
2612 * fault injection handlers.
2614 if (!spa_refcount_zero(spa) ||
2615 (spa->spa_inject_ref != 0 &&
2616 new_state != POOL_STATE_UNINITIALIZED)) {
2617 spa_async_resume(spa);
2618 mutex_exit(&spa_namespace_lock);
2623 * A pool cannot be exported if it has an active shared spare.
2624 * This is to prevent other pools stealing the active spare
2625 * from an exported pool. At user's own will, such pool can
2626 * be forcedly exported.
2628 if (!force && new_state == POOL_STATE_EXPORTED &&
2629 spa_has_active_shared_spare(spa)) {
2630 spa_async_resume(spa);
2631 mutex_exit(&spa_namespace_lock);
2636 * We want this to be reflected on every label,
2637 * so mark them all dirty. spa_unload() will do the
2638 * final sync that pushes these changes out.
2640 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
2641 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2642 spa->spa_state = new_state;
2643 spa->spa_final_txg = spa_last_synced_txg(spa) + 1;
2644 vdev_config_dirty(spa->spa_root_vdev);
2645 spa_config_exit(spa, SCL_ALL, FTAG);
2649 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
2651 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
2653 spa_deactivate(spa);
2656 if (oldconfig && spa->spa_config)
2657 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
2659 if (new_state != POOL_STATE_UNINITIALIZED) {
2661 spa_config_sync(spa, B_TRUE, B_TRUE);
2664 mutex_exit(&spa_namespace_lock);
2670 * Destroy a storage pool.
2673 spa_destroy(char *pool)
2675 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
2680 * Export a storage pool.
2683 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
2684 boolean_t hardforce)
2686 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
2691 * Similar to spa_export(), this unloads the spa_t without actually removing it
2692 * from the namespace in any way.
2695 spa_reset(char *pool)
2697 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
2702 * ==========================================================================
2703 * Device manipulation
2704 * ==========================================================================
2708 * Add a device to a storage pool.
2711 spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
2715 vdev_t *rvd = spa->spa_root_vdev;
2717 nvlist_t **spares, **l2cache;
2718 uint_t nspares, nl2cache;
2720 txg = spa_vdev_enter(spa);
2722 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
2723 VDEV_ALLOC_ADD)) != 0)
2724 return (spa_vdev_exit(spa, NULL, txg, error));
2726 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
2728 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
2732 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
2736 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
2737 return (spa_vdev_exit(spa, vd, txg, EINVAL));
2739 if (vd->vdev_children != 0 &&
2740 (error = vdev_create(vd, txg, B_FALSE)) != 0)
2741 return (spa_vdev_exit(spa, vd, txg, error));
2744 * We must validate the spares and l2cache devices after checking the
2745 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
2747 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
2748 return (spa_vdev_exit(spa, vd, txg, error));
2751 * Transfer each new top-level vdev from vd to rvd.
2753 for (c = 0; c < vd->vdev_children; c++) {
2754 tvd = vd->vdev_child[c];
2755 vdev_remove_child(vd, tvd);
2756 tvd->vdev_id = rvd->vdev_children;
2757 vdev_add_child(rvd, tvd);
2758 vdev_config_dirty(tvd);
2762 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
2763 ZPOOL_CONFIG_SPARES);
2764 spa_load_spares(spa);
2765 spa->spa_spares.sav_sync = B_TRUE;
2768 if (nl2cache != 0) {
2769 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
2770 ZPOOL_CONFIG_L2CACHE);
2771 spa_load_l2cache(spa);
2772 spa->spa_l2cache.sav_sync = B_TRUE;
2776 * We have to be careful when adding new vdevs to an existing pool.
2777 * If other threads start allocating from these vdevs before we
2778 * sync the config cache, and we lose power, then upon reboot we may
2779 * fail to open the pool because there are DVAs that the config cache
2780 * can't translate. Therefore, we first add the vdevs without
2781 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
2782 * and then let spa_config_update() initialize the new metaslabs.
2784 * spa_load() checks for added-but-not-initialized vdevs, so that
2785 * if we lose power at any point in this sequence, the remaining
2786 * steps will be completed the next time we load the pool.
2788 (void) spa_vdev_exit(spa, vd, txg, 0);
2790 mutex_enter(&spa_namespace_lock);
2791 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
2792 mutex_exit(&spa_namespace_lock);
2798 * Attach a device to a mirror. The arguments are the path to any device
2799 * in the mirror, and the nvroot for the new device. If the path specifies
2800 * a device that is not mirrored, we automatically insert the mirror vdev.
2802 * If 'replacing' is specified, the new device is intended to replace the
2803 * existing device; in this case the two devices are made into their own
2804 * mirror using the 'replacing' vdev, which is functionally identical to
2805 * the mirror vdev (it actually reuses all the same ops) but has a few
2806 * extra rules: you can't attach to it after it's been created, and upon
2807 * completion of resilvering, the first disk (the one being replaced)
2808 * is automatically detached.
2811 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
2813 uint64_t txg, open_txg;
2814 vdev_t *rvd = spa->spa_root_vdev;
2815 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
2818 char *oldvdpath, *newvdpath;
2822 txg = spa_vdev_enter(spa);
2824 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
2827 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
2829 if (!oldvd->vdev_ops->vdev_op_leaf)
2830 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
2832 pvd = oldvd->vdev_parent;
2834 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
2835 VDEV_ALLOC_ADD)) != 0)
2836 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
2838 if (newrootvd->vdev_children != 1)
2839 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
2841 newvd = newrootvd->vdev_child[0];
2843 if (!newvd->vdev_ops->vdev_op_leaf)
2844 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
2846 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
2847 return (spa_vdev_exit(spa, newrootvd, txg, error));
2850 * Spares can't replace logs
2852 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
2853 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
2857 * For attach, the only allowable parent is a mirror or the root
2860 if (pvd->vdev_ops != &vdev_mirror_ops &&
2861 pvd->vdev_ops != &vdev_root_ops)
2862 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
2864 pvops = &vdev_mirror_ops;
2867 * Active hot spares can only be replaced by inactive hot
2870 if (pvd->vdev_ops == &vdev_spare_ops &&
2871 pvd->vdev_child[1] == oldvd &&
2872 !spa_has_spare(spa, newvd->vdev_guid))
2873 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
2876 * If the source is a hot spare, and the parent isn't already a
2877 * spare, then we want to create a new hot spare. Otherwise, we
2878 * want to create a replacing vdev. The user is not allowed to
2879 * attach to a spared vdev child unless the 'isspare' state is
2880 * the same (spare replaces spare, non-spare replaces
2883 if (pvd->vdev_ops == &vdev_replacing_ops)
2884 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
2885 else if (pvd->vdev_ops == &vdev_spare_ops &&
2886 newvd->vdev_isspare != oldvd->vdev_isspare)
2887 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
2888 else if (pvd->vdev_ops != &vdev_spare_ops &&
2889 newvd->vdev_isspare)
2890 pvops = &vdev_spare_ops;
2892 pvops = &vdev_replacing_ops;
2896 * Compare the new device size with the replaceable/attachable
2899 if (newvd->vdev_psize < vdev_get_rsize(oldvd))
2900 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
2903 * The new device cannot have a higher alignment requirement
2904 * than the top-level vdev.
2906 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
2907 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
2910 * If this is an in-place replacement, update oldvd's path and devid
2911 * to make it distinguishable from newvd, and unopenable from now on.
2913 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
2914 spa_strfree(oldvd->vdev_path);
2915 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
2917 (void) sprintf(oldvd->vdev_path, "%s/%s",
2918 newvd->vdev_path, "old");
2919 if (oldvd->vdev_devid != NULL) {
2920 spa_strfree(oldvd->vdev_devid);
2921 oldvd->vdev_devid = NULL;
2926 * If the parent is not a mirror, or if we're replacing, insert the new
2927 * mirror/replacing/spare vdev above oldvd.
2929 if (pvd->vdev_ops != pvops)
2930 pvd = vdev_add_parent(oldvd, pvops);
2932 ASSERT(pvd->vdev_top->vdev_parent == rvd);
2933 ASSERT(pvd->vdev_ops == pvops);
2934 ASSERT(oldvd->vdev_parent == pvd);
2937 * Extract the new device from its root and add it to pvd.
2939 vdev_remove_child(newrootvd, newvd);
2940 newvd->vdev_id = pvd->vdev_children;
2941 vdev_add_child(pvd, newvd);
2944 * If newvd is smaller than oldvd, but larger than its rsize,
2945 * the addition of newvd may have decreased our parent's asize.
2947 pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize);
2949 tvd = newvd->vdev_top;
2950 ASSERT(pvd->vdev_top == tvd);
2951 ASSERT(tvd->vdev_parent == rvd);
2953 vdev_config_dirty(tvd);
2956 * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate
2957 * upward when spa_vdev_exit() calls vdev_dtl_reassess().
2959 open_txg = txg + TXG_CONCURRENT_STATES - 1;
2961 mutex_enter(&newvd->vdev_dtl_lock);
2962 space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL,
2963 open_txg - TXG_INITIAL + 1);
2964 mutex_exit(&newvd->vdev_dtl_lock);
2966 if (newvd->vdev_isspare)
2967 spa_spare_activate(newvd);
2968 oldvdpath = spa_strdup(oldvd->vdev_path);
2969 newvdpath = spa_strdup(newvd->vdev_path);
2970 newvd_isspare = newvd->vdev_isspare;
2973 * Mark newvd's DTL dirty in this txg.
2975 vdev_dirty(tvd, VDD_DTL, newvd, txg);
2977 (void) spa_vdev_exit(spa, newrootvd, open_txg, 0);
2979 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
2980 if (dmu_tx_assign(tx, TXG_WAIT) == 0) {
2981 spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, tx,
2982 CRED(), "%s vdev=%s %s vdev=%s",
2983 replacing && newvd_isspare ? "spare in" :
2984 replacing ? "replace" : "attach", newvdpath,
2985 replacing ? "for" : "to", oldvdpath);
2991 spa_strfree(oldvdpath);
2992 spa_strfree(newvdpath);
2995 * Kick off a resilver to update newvd.
2997 VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0);
3003 * Detach a device from a mirror or replacing vdev.
3004 * If 'replace_done' is specified, only detach if the parent
3005 * is a replacing vdev.
3008 spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done)
3012 vdev_t *rvd = spa->spa_root_vdev;
3013 vdev_t *vd, *pvd, *cvd, *tvd;
3014 boolean_t unspare = B_FALSE;
3015 uint64_t unspare_guid;
3018 txg = spa_vdev_enter(spa);
3020 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3023 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
3025 if (!vd->vdev_ops->vdev_op_leaf)
3026 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3028 pvd = vd->vdev_parent;
3031 * If replace_done is specified, only remove this device if it's
3032 * the first child of a replacing vdev. For the 'spare' vdev, either
3033 * disk can be removed.
3036 if (pvd->vdev_ops == &vdev_replacing_ops) {
3037 if (vd->vdev_id != 0)
3038 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3039 } else if (pvd->vdev_ops != &vdev_spare_ops) {
3040 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3044 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
3045 spa_version(spa) >= SPA_VERSION_SPARES);
3048 * Only mirror, replacing, and spare vdevs support detach.
3050 if (pvd->vdev_ops != &vdev_replacing_ops &&
3051 pvd->vdev_ops != &vdev_mirror_ops &&
3052 pvd->vdev_ops != &vdev_spare_ops)
3053 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3056 * If there's only one replica, you can't detach it.
3058 if (pvd->vdev_children <= 1)
3059 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3062 * If all siblings have non-empty DTLs, this device may have the only
3063 * valid copy of the data, which means we cannot safely detach it.
3065 * XXX -- as in the vdev_offline() case, we really want a more
3066 * precise DTL check.
3068 for (c = 0; c < pvd->vdev_children; c++) {
3071 cvd = pvd->vdev_child[c];
3074 if (vdev_is_dead(cvd))
3076 mutex_enter(&cvd->vdev_dtl_lock);
3077 dirty = cvd->vdev_dtl_map.sm_space |
3078 cvd->vdev_dtl_scrub.sm_space;
3079 mutex_exit(&cvd->vdev_dtl_lock);
3084 if (c == pvd->vdev_children)
3085 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3088 * If we are detaching the second disk from a replacing vdev, then
3089 * check to see if we changed the original vdev's path to have "/old"
3090 * at the end in spa_vdev_attach(). If so, undo that change now.
3092 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 &&
3093 pvd->vdev_child[0]->vdev_path != NULL &&
3094 pvd->vdev_child[1]->vdev_path != NULL) {
3095 ASSERT(pvd->vdev_child[1] == vd);
3096 cvd = pvd->vdev_child[0];
3097 len = strlen(vd->vdev_path);
3098 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
3099 strcmp(cvd->vdev_path + len, "/old") == 0) {
3100 spa_strfree(cvd->vdev_path);
3101 cvd->vdev_path = spa_strdup(vd->vdev_path);
3106 * If we are detaching the original disk from a spare, then it implies
3107 * that the spare should become a real disk, and be removed from the
3108 * active spare list for the pool.
3110 if (pvd->vdev_ops == &vdev_spare_ops &&
3115 * Erase the disk labels so the disk can be used for other things.
3116 * This must be done after all other error cases are handled,
3117 * but before we disembowel vd (so we can still do I/O to it).
3118 * But if we can't do it, don't treat the error as fatal --
3119 * it may be that the unwritability of the disk is the reason
3120 * it's being detached!
3122 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
3125 * Remove vd from its parent and compact the parent's children.
3127 vdev_remove_child(pvd, vd);
3128 vdev_compact_children(pvd);
3131 * Remember one of the remaining children so we can get tvd below.
3133 cvd = pvd->vdev_child[0];
3136 * If we need to remove the remaining child from the list of hot spares,
3137 * do it now, marking the vdev as no longer a spare in the process. We
3138 * must do this before vdev_remove_parent(), because that can change the
3139 * GUID if it creates a new toplevel GUID.
3142 ASSERT(cvd->vdev_isspare);
3143 spa_spare_remove(cvd);
3144 unspare_guid = cvd->vdev_guid;
3148 * If the parent mirror/replacing vdev only has one child,
3149 * the parent is no longer needed. Remove it from the tree.
3151 if (pvd->vdev_children == 1)
3152 vdev_remove_parent(cvd);
3155 * We don't set tvd until now because the parent we just removed
3156 * may have been the previous top-level vdev.
3158 tvd = cvd->vdev_top;
3159 ASSERT(tvd->vdev_parent == rvd);
3162 * Reevaluate the parent vdev state.
3164 vdev_propagate_state(cvd);
3167 * If the device we just detached was smaller than the others, it may be
3168 * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init()
3169 * can't fail because the existing metaslabs are already in core, so
3170 * there's nothing to read from disk.
3172 VERIFY(vdev_metaslab_init(tvd, txg) == 0);
3174 vdev_config_dirty(tvd);
3177 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
3178 * vd->vdev_detached is set and free vd's DTL object in syncing context.
3179 * But first make sure we're not on any *other* txg's DTL list, to
3180 * prevent vd from being accessed after it's freed.
3182 for (t = 0; t < TXG_SIZE; t++)
3183 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
3184 vd->vdev_detached = B_TRUE;
3185 vdev_dirty(tvd, VDD_DTL, vd, txg);
3187 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
3189 error = spa_vdev_exit(spa, vd, txg, 0);
3192 * If this was the removal of the original device in a hot spare vdev,
3193 * then we want to go through and remove the device from the hot spare
3194 * list of every other pool.
3198 mutex_enter(&spa_namespace_lock);
3199 while ((spa = spa_next(spa)) != NULL) {
3200 if (spa->spa_state != POOL_STATE_ACTIVE)
3202 spa_open_ref(spa, FTAG);
3203 mutex_exit(&spa_namespace_lock);
3204 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
3205 mutex_enter(&spa_namespace_lock);
3206 spa_close(spa, FTAG);
3208 mutex_exit(&spa_namespace_lock);
3215 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
3217 for (int i = 0; i < count; i++) {
3220 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
3223 if (guid == target_guid)
3231 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
3232 nvlist_t *dev_to_remove)
3234 nvlist_t **newdev = NULL;
3237 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
3239 for (int i = 0, j = 0; i < count; i++) {
3240 if (dev[i] == dev_to_remove)
3242 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
3245 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
3246 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
3248 for (int i = 0; i < count - 1; i++)
3249 nvlist_free(newdev[i]);
3252 kmem_free(newdev, (count - 1) * sizeof (void *));
3256 * Remove a device from the pool. Currently, this supports removing only hot
3257 * spares and level 2 ARC devices.
3260 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
3263 nvlist_t **spares, **l2cache, *nv;
3264 uint_t nspares, nl2cache;
3268 txg = spa_vdev_enter(spa);
3270 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3272 if (spa->spa_spares.sav_vdevs != NULL &&
3273 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3274 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
3275 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
3277 * Only remove the hot spare if it's not currently in use
3280 if (vd == NULL || unspare) {
3281 spa_vdev_remove_aux(spa->spa_spares.sav_config,
3282 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
3283 spa_load_spares(spa);
3284 spa->spa_spares.sav_sync = B_TRUE;
3288 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
3289 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3290 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
3291 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
3293 * Cache devices can always be removed.
3295 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
3296 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
3297 spa_load_l2cache(spa);
3298 spa->spa_l2cache.sav_sync = B_TRUE;
3299 } else if (vd != NULL) {
3301 * Normal vdevs cannot be removed (yet).
3306 * There is no vdev of any kind with the specified guid.
3311 return (spa_vdev_exit(spa, NULL, txg, error));
3315 * Find any device that's done replacing, or a vdev marked 'unspare' that's
3316 * current spared, so we can detach it.
3319 spa_vdev_resilver_done_hunt(vdev_t *vd)
3321 vdev_t *newvd, *oldvd;
3324 for (c = 0; c < vd->vdev_children; c++) {
3325 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
3331 * Check for a completed replacement.
3333 if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) {
3334 oldvd = vd->vdev_child[0];
3335 newvd = vd->vdev_child[1];
3337 mutex_enter(&newvd->vdev_dtl_lock);
3338 if (newvd->vdev_dtl_map.sm_space == 0 &&
3339 newvd->vdev_dtl_scrub.sm_space == 0) {
3340 mutex_exit(&newvd->vdev_dtl_lock);
3343 mutex_exit(&newvd->vdev_dtl_lock);
3347 * Check for a completed resilver with the 'unspare' flag set.
3349 if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) {
3350 newvd = vd->vdev_child[0];
3351 oldvd = vd->vdev_child[1];
3353 mutex_enter(&newvd->vdev_dtl_lock);
3354 if (newvd->vdev_unspare &&
3355 newvd->vdev_dtl_map.sm_space == 0 &&
3356 newvd->vdev_dtl_scrub.sm_space == 0) {
3357 newvd->vdev_unspare = 0;
3358 mutex_exit(&newvd->vdev_dtl_lock);
3361 mutex_exit(&newvd->vdev_dtl_lock);
3368 spa_vdev_resilver_done(spa_t *spa)
3375 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3377 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
3378 guid = vd->vdev_guid;
3380 * If we have just finished replacing a hot spared device, then
3381 * we need to detach the parent's first child (the original hot
3384 pvd = vd->vdev_parent;
3385 if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
3386 pvd->vdev_id == 0) {
3387 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
3388 ASSERT(pvd->vdev_parent->vdev_children == 2);
3389 pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid;
3391 spa_config_exit(spa, SCL_CONFIG, FTAG);
3392 if (spa_vdev_detach(spa, guid, B_TRUE) != 0)
3394 if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0)
3396 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3399 spa_config_exit(spa, SCL_CONFIG, FTAG);
3403 * Update the stored path for this vdev. Dirty the vdev configuration, relying
3404 * on spa_vdev_enter/exit() to synchronize the labels and cache.
3407 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
3412 txg = spa_vdev_enter(spa);
3414 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) {
3416 * Determine if this is a reference to a hot spare device. If
3417 * it is, update the path manually as there is no associated
3418 * vdev_t that can be synced to disk.
3423 if (spa->spa_spares.sav_config != NULL) {
3424 VERIFY(nvlist_lookup_nvlist_array(
3425 spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
3426 &spares, &nspares) == 0);
3427 for (i = 0; i < nspares; i++) {
3429 VERIFY(nvlist_lookup_uint64(spares[i],
3430 ZPOOL_CONFIG_GUID, &theguid) == 0);
3431 if (theguid == guid) {
3432 VERIFY(nvlist_add_string(spares[i],
3433 ZPOOL_CONFIG_PATH, newpath) == 0);
3434 spa_load_spares(spa);
3435 spa->spa_spares.sav_sync = B_TRUE;
3436 return (spa_vdev_exit(spa, NULL, txg,
3442 return (spa_vdev_exit(spa, NULL, txg, ENOENT));
3445 if (!vd->vdev_ops->vdev_op_leaf)
3446 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3448 spa_strfree(vd->vdev_path);
3449 vd->vdev_path = spa_strdup(newpath);
3451 vdev_config_dirty(vd->vdev_top);
3453 return (spa_vdev_exit(spa, NULL, txg, 0));
3457 * ==========================================================================
3459 * ==========================================================================
3463 spa_scrub(spa_t *spa, pool_scrub_type_t type)
3465 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
3467 if ((uint_t)type >= POOL_SCRUB_TYPES)
3471 * If a resilver was requested, but there is no DTL on a
3472 * writeable leaf device, we have nothing to do.
3474 if (type == POOL_SCRUB_RESILVER &&
3475 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
3476 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
3480 if (type == POOL_SCRUB_EVERYTHING &&
3481 spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE &&
3482 spa->spa_dsl_pool->dp_scrub_isresilver)
3485 if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) {
3486 return (dsl_pool_scrub_clean(spa->spa_dsl_pool));
3487 } else if (type == POOL_SCRUB_NONE) {
3488 return (dsl_pool_scrub_cancel(spa->spa_dsl_pool));
3495 * ==========================================================================
3496 * SPA async task processing
3497 * ==========================================================================
3501 spa_async_remove(spa_t *spa, vdev_t *vd)
3503 if (vd->vdev_remove_wanted) {
3504 vd->vdev_remove_wanted = 0;
3505 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
3506 vdev_clear(spa, vd);
3507 vdev_state_dirty(vd->vdev_top);
3510 for (int c = 0; c < vd->vdev_children; c++)
3511 spa_async_remove(spa, vd->vdev_child[c]);
3515 spa_async_probe(spa_t *spa, vdev_t *vd)
3517 if (vd->vdev_probe_wanted) {
3518 vd->vdev_probe_wanted = 0;
3519 vdev_reopen(vd); /* vdev_open() does the actual probe */
3522 for (int c = 0; c < vd->vdev_children; c++)
3523 spa_async_probe(spa, vd->vdev_child[c]);
3527 spa_async_thread(void *arg)
3532 ASSERT(spa->spa_sync_on);
3534 mutex_enter(&spa->spa_async_lock);
3535 tasks = spa->spa_async_tasks;
3536 spa->spa_async_tasks = 0;
3537 mutex_exit(&spa->spa_async_lock);
3540 * See if the config needs to be updated.
3542 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
3543 mutex_enter(&spa_namespace_lock);
3544 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
3545 mutex_exit(&spa_namespace_lock);
3549 * See if any devices need to be marked REMOVED.
3551 if (tasks & SPA_ASYNC_REMOVE) {
3552 spa_vdev_state_enter(spa);
3553 spa_async_remove(spa, spa->spa_root_vdev);
3554 for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
3555 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
3556 for (int i = 0; i < spa->spa_spares.sav_count; i++)
3557 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
3558 (void) spa_vdev_state_exit(spa, NULL, 0);
3562 * See if any devices need to be probed.
3564 if (tasks & SPA_ASYNC_PROBE) {
3565 spa_vdev_state_enter(spa);
3566 spa_async_probe(spa, spa->spa_root_vdev);
3567 (void) spa_vdev_state_exit(spa, NULL, 0);
3571 * If any devices are done replacing, detach them.
3573 if (tasks & SPA_ASYNC_RESILVER_DONE)
3574 spa_vdev_resilver_done(spa);
3577 * Kick off a resilver.
3579 if (tasks & SPA_ASYNC_RESILVER)
3580 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0);
3583 * Let the world know that we're done.
3585 mutex_enter(&spa->spa_async_lock);
3586 spa->spa_async_thread = NULL;
3587 cv_broadcast(&spa->spa_async_cv);
3588 mutex_exit(&spa->spa_async_lock);
3593 spa_async_suspend(spa_t *spa)
3595 mutex_enter(&spa->spa_async_lock);
3596 spa->spa_async_suspended++;
3597 while (spa->spa_async_thread != NULL)
3598 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
3599 mutex_exit(&spa->spa_async_lock);
3603 spa_async_resume(spa_t *spa)
3605 mutex_enter(&spa->spa_async_lock);
3606 ASSERT(spa->spa_async_suspended != 0);
3607 spa->spa_async_suspended--;
3608 mutex_exit(&spa->spa_async_lock);
3612 spa_async_dispatch(spa_t *spa)
3614 mutex_enter(&spa->spa_async_lock);
3615 if (spa->spa_async_tasks && !spa->spa_async_suspended &&
3616 spa->spa_async_thread == NULL &&
3617 rootdir != NULL && !vn_is_readonly(rootdir))
3618 spa->spa_async_thread = thread_create(NULL, 0,
3619 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
3620 mutex_exit(&spa->spa_async_lock);
3624 spa_async_request(spa_t *spa, int task)
3626 mutex_enter(&spa->spa_async_lock);
3627 spa->spa_async_tasks |= task;
3628 mutex_exit(&spa->spa_async_lock);
3632 * ==========================================================================
3633 * SPA syncing routines
3634 * ==========================================================================
3638 spa_sync_deferred_frees(spa_t *spa, uint64_t txg)
3640 bplist_t *bpl = &spa->spa_sync_bplist;
3648 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
3650 while (bplist_iterate(bpl, &itor, &blk) == 0) {
3651 ASSERT(blk.blk_birth < txg);
3652 zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL,
3653 ZIO_FLAG_MUSTSUCCEED));
3656 error = zio_wait(zio);
3657 ASSERT3U(error, ==, 0);
3659 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3660 bplist_vacate(bpl, tx);
3663 * Pre-dirty the first block so we sync to convergence faster.
3664 * (Usually only the first block is needed.)
3666 dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx);
3671 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
3673 char *packed = NULL;
3678 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
3681 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
3682 * information. This avoids the dbuf_will_dirty() path and
3683 * saves us a pre-read to get data we don't actually care about.
3685 bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE);
3686 packed = kmem_alloc(bufsize, KM_SLEEP);
3688 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
3690 bzero(packed + nvsize, bufsize - nvsize);
3692 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
3694 kmem_free(packed, bufsize);
3696 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
3697 dmu_buf_will_dirty(db, tx);
3698 *(uint64_t *)db->db_data = nvsize;
3699 dmu_buf_rele(db, FTAG);
3703 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
3704 const char *config, const char *entry)
3714 * Update the MOS nvlist describing the list of available devices.
3715 * spa_validate_aux() will have already made sure this nvlist is
3716 * valid and the vdevs are labeled appropriately.
3718 if (sav->sav_object == 0) {
3719 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
3720 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
3721 sizeof (uint64_t), tx);
3722 VERIFY(zap_update(spa->spa_meta_objset,
3723 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
3724 &sav->sav_object, tx) == 0);
3727 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3728 if (sav->sav_count == 0) {
3729 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
3731 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
3732 for (i = 0; i < sav->sav_count; i++)
3733 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
3734 B_FALSE, B_FALSE, B_TRUE);
3735 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
3736 sav->sav_count) == 0);
3737 for (i = 0; i < sav->sav_count; i++)
3738 nvlist_free(list[i]);
3739 kmem_free(list, sav->sav_count * sizeof (void *));
3742 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
3743 nvlist_free(nvroot);
3745 sav->sav_sync = B_FALSE;
3749 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
3753 if (list_is_empty(&spa->spa_config_dirty_list))
3756 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
3758 config = spa_config_generate(spa, spa->spa_root_vdev,
3759 dmu_tx_get_txg(tx), B_FALSE);
3761 spa_config_exit(spa, SCL_STATE, FTAG);
3763 if (spa->spa_config_syncing)
3764 nvlist_free(spa->spa_config_syncing);
3765 spa->spa_config_syncing = config;
3767 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
3771 * Set zpool properties.
3774 spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
3777 objset_t *mos = spa->spa_meta_objset;
3778 nvlist_t *nvp = arg2;
3783 const char *propname;
3784 zprop_type_t proptype;
3785 spa_config_dirent_t *dp;
3787 mutex_enter(&spa->spa_props_lock);
3790 while ((elem = nvlist_next_nvpair(nvp, elem))) {
3791 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
3792 case ZPOOL_PROP_VERSION:
3794 * Only set version for non-zpool-creation cases
3795 * (set/import). spa_create() needs special care
3796 * for version setting.
3798 if (tx->tx_txg != TXG_INITIAL) {
3799 VERIFY(nvpair_value_uint64(elem,
3801 ASSERT(intval <= SPA_VERSION);
3802 ASSERT(intval >= spa_version(spa));
3803 spa->spa_uberblock.ub_version = intval;
3804 vdev_config_dirty(spa->spa_root_vdev);
3808 case ZPOOL_PROP_ALTROOT:
3810 * 'altroot' is a non-persistent property. It should
3811 * have been set temporarily at creation or import time.
3813 ASSERT(spa->spa_root != NULL);
3816 case ZPOOL_PROP_CACHEFILE:
3818 * 'cachefile' is a non-persistent property, but note
3819 * an async request that the config cache needs to be
3822 VERIFY(nvpair_value_string(elem, &strval) == 0);
3824 dp = kmem_alloc(sizeof (spa_config_dirent_t), KM_SLEEP);
3826 if (strval[0] == '\0')
3827 dp->scd_path = spa_strdup(spa_config_path);
3828 else if (strcmp(strval, "none") == 0)
3829 dp->scd_path = NULL;
3831 dp->scd_path = spa_strdup(strval);
3833 list_insert_head(&spa->spa_config_list, dp);
3834 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
3838 * Set pool property values in the poolprops mos object.
3840 if (spa->spa_pool_props_object == 0) {
3841 objset_t *mos = spa->spa_meta_objset;
3843 VERIFY((spa->spa_pool_props_object =
3844 zap_create(mos, DMU_OT_POOL_PROPS,
3845 DMU_OT_NONE, 0, tx)) > 0);
3847 VERIFY(zap_update(mos,
3848 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
3849 8, 1, &spa->spa_pool_props_object, tx)
3853 /* normalize the property name */
3854 propname = zpool_prop_to_name(prop);
3855 proptype = zpool_prop_get_type(prop);
3857 if (nvpair_type(elem) == DATA_TYPE_STRING) {
3858 ASSERT(proptype == PROP_TYPE_STRING);
3859 VERIFY(nvpair_value_string(elem, &strval) == 0);
3860 VERIFY(zap_update(mos,
3861 spa->spa_pool_props_object, propname,
3862 1, strlen(strval) + 1, strval, tx) == 0);
3864 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
3865 VERIFY(nvpair_value_uint64(elem, &intval) == 0);
3867 if (proptype == PROP_TYPE_INDEX) {
3869 VERIFY(zpool_prop_index_to_string(
3870 prop, intval, &unused) == 0);
3872 VERIFY(zap_update(mos,
3873 spa->spa_pool_props_object, propname,
3874 8, 1, &intval, tx) == 0);
3876 ASSERT(0); /* not allowed */
3880 case ZPOOL_PROP_DELEGATION:
3881 spa->spa_delegation = intval;
3883 case ZPOOL_PROP_BOOTFS:
3884 spa->spa_bootfs = intval;
3886 case ZPOOL_PROP_FAILUREMODE:
3887 spa->spa_failmode = intval;
3894 /* log internal history if this is not a zpool create */
3895 if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY &&
3896 tx->tx_txg != TXG_INITIAL) {
3897 spa_history_internal_log(LOG_POOL_PROPSET,
3898 spa, tx, cr, "%s %lld %s",
3899 nvpair_name(elem), intval, spa_name(spa));
3903 mutex_exit(&spa->spa_props_lock);
3907 * Sync the specified transaction group. New blocks may be dirtied as
3908 * part of the process, so we iterate until it converges.
3911 spa_sync(spa_t *spa, uint64_t txg)
3913 dsl_pool_t *dp = spa->spa_dsl_pool;
3914 objset_t *mos = spa->spa_meta_objset;
3915 bplist_t *bpl = &spa->spa_sync_bplist;
3916 vdev_t *rvd = spa->spa_root_vdev;
3923 * Lock out configuration changes.
3925 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3927 spa->spa_syncing_txg = txg;
3928 spa->spa_sync_pass = 0;
3931 * If there are any pending vdev state changes, convert them
3932 * into config changes that go out with this transaction group.
3934 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
3935 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
3936 vdev_state_clean(vd);
3937 vdev_config_dirty(vd);
3939 spa_config_exit(spa, SCL_STATE, FTAG);
3941 VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj));
3943 tx = dmu_tx_create_assigned(dp, txg);
3946 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
3947 * set spa_deflate if we have no raid-z vdevs.
3949 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
3950 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
3953 for (i = 0; i < rvd->vdev_children; i++) {
3954 vd = rvd->vdev_child[i];
3955 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
3958 if (i == rvd->vdev_children) {
3959 spa->spa_deflate = TRUE;
3960 VERIFY(0 == zap_add(spa->spa_meta_objset,
3961 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
3962 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
3966 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
3967 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
3968 dsl_pool_create_origin(dp, tx);
3970 /* Keeping the origin open increases spa_minref */
3971 spa->spa_minref += 3;
3974 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
3975 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
3976 dsl_pool_upgrade_clones(dp, tx);
3980 * If anything has changed in this txg, push the deferred frees
3981 * from the previous txg. If not, leave them alone so that we
3982 * don't generate work on an otherwise idle system.
3984 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
3985 !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
3986 !txg_list_empty(&dp->dp_sync_tasks, txg))
3987 spa_sync_deferred_frees(spa, txg);
3990 * Iterate to convergence.
3993 spa->spa_sync_pass++;
3995 spa_sync_config_object(spa, tx);
3996 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
3997 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
3998 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
3999 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
4000 spa_errlog_sync(spa, txg);
4001 dsl_pool_sync(dp, txg);
4004 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) {
4009 bplist_sync(bpl, tx);
4010 } while (dirty_vdevs);
4014 dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass);
4017 * Rewrite the vdev configuration (which includes the uberblock)
4018 * to commit the transaction group.
4020 * If there are no dirty vdevs, we sync the uberblock to a few
4021 * random top-level vdevs that are known to be visible in the
4022 * config cache (see spa_vdev_add() for a complete description).
4023 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
4027 * We hold SCL_STATE to prevent vdev open/close/etc.
4028 * while we're attempting to write the vdev labels.
4030 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4032 if (list_is_empty(&spa->spa_config_dirty_list)) {
4033 vdev_t *svd[SPA_DVAS_PER_BP];
4035 int children = rvd->vdev_children;
4036 int c0 = spa_get_random(children);
4039 for (c = 0; c < children; c++) {
4040 vd = rvd->vdev_child[(c0 + c) % children];
4041 if (vd->vdev_ms_array == 0 || vd->vdev_islog)
4043 svd[svdcount++] = vd;
4044 if (svdcount == SPA_DVAS_PER_BP)
4047 error = vdev_config_sync(svd, svdcount, txg);
4049 error = vdev_config_sync(rvd->vdev_child,
4050 rvd->vdev_children, txg);
4053 spa_config_exit(spa, SCL_STATE, FTAG);
4057 zio_suspend(spa, NULL);
4058 zio_resume_wait(spa);
4063 * Clear the dirty config list.
4065 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
4066 vdev_config_clean(vd);
4069 * Now that the new config has synced transactionally,
4070 * let it become visible to the config cache.
4072 if (spa->spa_config_syncing != NULL) {
4073 spa_config_set(spa, spa->spa_config_syncing);
4074 spa->spa_config_txg = txg;
4075 spa->spa_config_syncing = NULL;
4078 spa->spa_traverse_wanted = B_TRUE;
4079 rw_enter(&spa->spa_traverse_lock, RW_WRITER);
4080 spa->spa_traverse_wanted = B_FALSE;
4081 spa->spa_ubsync = spa->spa_uberblock;
4082 rw_exit(&spa->spa_traverse_lock);
4085 * Clean up the ZIL records for the synced txg.
4087 dsl_pool_zil_clean(dp);
4090 * Update usable space statistics.
4092 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
4093 vdev_sync_done(vd, txg);
4096 * It had better be the case that we didn't dirty anything
4097 * since vdev_config_sync().
4099 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
4100 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
4101 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
4102 ASSERT(bpl->bpl_queue == NULL);
4104 spa_config_exit(spa, SCL_CONFIG, FTAG);
4107 * If any async tasks have been requested, kick them off.
4109 spa_async_dispatch(spa);
4113 * Sync all pools. We don't want to hold the namespace lock across these
4114 * operations, so we take a reference on the spa_t and drop the lock during the
4118 spa_sync_allpools(void)
4121 mutex_enter(&spa_namespace_lock);
4122 while ((spa = spa_next(spa)) != NULL) {
4123 if (spa_state(spa) != POOL_STATE_ACTIVE || spa_suspended(spa))
4125 spa_open_ref(spa, FTAG);
4126 mutex_exit(&spa_namespace_lock);
4127 txg_wait_synced(spa_get_dsl(spa), 0);
4128 mutex_enter(&spa_namespace_lock);
4129 spa_close(spa, FTAG);
4131 mutex_exit(&spa_namespace_lock);
4135 * ==========================================================================
4136 * Miscellaneous routines
4137 * ==========================================================================
4141 * Remove all pools in the system.
4149 * Remove all cached state. All pools should be closed now,
4150 * so every spa in the AVL tree should be unreferenced.
4152 mutex_enter(&spa_namespace_lock);
4153 while ((spa = spa_next(NULL)) != NULL) {
4155 * Stop async tasks. The async thread may need to detach
4156 * a device that's been replaced, which requires grabbing
4157 * spa_namespace_lock, so we must drop it here.
4159 spa_open_ref(spa, FTAG);
4160 mutex_exit(&spa_namespace_lock);
4161 spa_async_suspend(spa);
4162 mutex_enter(&spa_namespace_lock);
4163 spa_close(spa, FTAG);
4165 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4167 spa_deactivate(spa);
4171 mutex_exit(&spa_namespace_lock);
4175 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache)
4180 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
4184 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
4185 vd = spa->spa_l2cache.sav_vdevs[i];
4186 if (vd->vdev_guid == guid)
4195 spa_upgrade(spa_t *spa, uint64_t version)
4197 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4200 * This should only be called for a non-faulted pool, and since a
4201 * future version would result in an unopenable pool, this shouldn't be
4204 ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION);
4205 ASSERT(version >= spa->spa_uberblock.ub_version);
4207 spa->spa_uberblock.ub_version = version;
4208 vdev_config_dirty(spa->spa_root_vdev);
4210 spa_config_exit(spa, SCL_ALL, FTAG);
4212 txg_wait_synced(spa_get_dsl(spa), 0);
4216 spa_has_spare(spa_t *spa, uint64_t guid)
4220 spa_aux_vdev_t *sav = &spa->spa_spares;
4222 for (i = 0; i < sav->sav_count; i++)
4223 if (sav->sav_vdevs[i]->vdev_guid == guid)
4226 for (i = 0; i < sav->sav_npending; i++) {
4227 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
4228 &spareguid) == 0 && spareguid == guid)
4236 * Check if a pool has an active shared spare device.
4237 * Note: reference count of an active spare is 2, as a spare and as a replace
4240 spa_has_active_shared_spare(spa_t *spa)
4244 spa_aux_vdev_t *sav = &spa->spa_spares;
4246 for (i = 0; i < sav->sav_count; i++) {
4247 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
4248 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
4257 * Post a sysevent corresponding to the given event. The 'name' must be one of
4258 * the event definitions in sys/sysevent/eventdefs.h. The payload will be
4259 * filled in from the spa and (optionally) the vdev. This doesn't do anything
4260 * in the userland libzpool, as we don't want consumers to misinterpret ztest
4261 * or zdb as real changes.
4264 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
4269 sysevent_attr_list_t *attr = NULL;
4270 sysevent_value_t value;
4273 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
4276 value.value_type = SE_DATA_TYPE_STRING;
4277 value.value.sv_string = spa_name(spa);
4278 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
4281 value.value_type = SE_DATA_TYPE_UINT64;
4282 value.value.sv_uint64 = spa_guid(spa);
4283 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
4287 value.value_type = SE_DATA_TYPE_UINT64;
4288 value.value.sv_uint64 = vd->vdev_guid;
4289 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
4293 if (vd->vdev_path) {
4294 value.value_type = SE_DATA_TYPE_STRING;
4295 value.value.sv_string = vd->vdev_path;
4296 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
4297 &value, SE_SLEEP) != 0)
4302 if (sysevent_attach_attributes(ev, attr) != 0)
4306 (void) log_sysevent(ev, SE_SLEEP, &eid);
4310 sysevent_free_attr(attr);