4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
25 * Copyright (c) 2018, Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 * Copyright 2013 Saso Kiselkov. All rights reserved.
28 * Copyright (c) 2014 Integros [integros.com]
29 * Copyright 2016 Toomas Soome <tsoome@me.com>
30 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
31 * Copyright 2018 Joyent, Inc.
32 * Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
33 * Copyright 2017 Joyent, Inc.
34 * Copyright (c) 2017, Intel Corporation.
38 * SPA: Storage Pool Allocator
40 * This file contains all the routines used when modifying on-disk SPA state.
41 * This includes opening, importing, destroying, exporting a pool, and syncing a
45 #include <sys/zfs_context.h>
46 #include <sys/fm/fs/zfs.h>
47 #include <sys/spa_impl.h>
49 #include <sys/zio_checksum.h>
51 #include <sys/dmu_tx.h>
55 #include <sys/vdev_impl.h>
56 #include <sys/vdev_removal.h>
57 #include <sys/vdev_indirect_mapping.h>
58 #include <sys/vdev_indirect_births.h>
59 #include <sys/vdev_initialize.h>
60 #include <sys/vdev_rebuild.h>
61 #include <sys/vdev_trim.h>
62 #include <sys/vdev_disk.h>
63 #include <sys/metaslab.h>
64 #include <sys/metaslab_impl.h>
66 #include <sys/uberblock_impl.h>
69 #include <sys/bpobj.h>
70 #include <sys/dmu_traverse.h>
71 #include <sys/dmu_objset.h>
72 #include <sys/unique.h>
73 #include <sys/dsl_pool.h>
74 #include <sys/dsl_dataset.h>
75 #include <sys/dsl_dir.h>
76 #include <sys/dsl_prop.h>
77 #include <sys/dsl_synctask.h>
78 #include <sys/fs/zfs.h>
80 #include <sys/callb.h>
81 #include <sys/systeminfo.h>
82 #include <sys/spa_boot.h>
83 #include <sys/zfs_ioctl.h>
84 #include <sys/dsl_scan.h>
85 #include <sys/zfeature.h>
86 #include <sys/dsl_destroy.h>
90 #include <sys/fm/protocol.h>
91 #include <sys/fm/util.h>
92 #include <sys/callb.h>
94 #include <sys/vmsystm.h>
98 #include "zfs_comutil.h"
101 * The interval, in seconds, at which failed configuration cache file writes
104 int zfs_ccw_retry_interval = 300;
106 typedef enum zti_modes {
107 ZTI_MODE_FIXED, /* value is # of threads (min 1) */
108 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
109 ZTI_MODE_NULL, /* don't create a taskq */
113 #define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
114 #define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 }
115 #define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
116 #define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
118 #define ZTI_N(n) ZTI_P(n, 1)
119 #define ZTI_ONE ZTI_N(1)
121 typedef struct zio_taskq_info {
122 zti_modes_t zti_mode;
127 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
128 "iss", "iss_h", "int", "int_h"
132 * This table defines the taskq settings for each ZFS I/O type. When
133 * initializing a pool, we use this table to create an appropriately sized
134 * taskq. Some operations are low volume and therefore have a small, static
135 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
136 * macros. Other operations process a large amount of data; the ZTI_BATCH
137 * macro causes us to create a taskq oriented for throughput. Some operations
138 * are so high frequency and short-lived that the taskq itself can become a
139 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
140 * additional degree of parallelism specified by the number of threads per-
141 * taskq and the number of taskqs; when dispatching an event in this case, the
142 * particular taskq is chosen at random.
144 * The different taskq priorities are to handle the different contexts (issue
145 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
146 * need to be handled with minimum delay.
148 const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
149 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */
150 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
151 { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */
152 { ZTI_BATCH, ZTI_N(5), ZTI_P(12, 8), ZTI_N(5) }, /* WRITE */
153 { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
154 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
155 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
156 { ZTI_N(4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* TRIM */
159 static void spa_sync_version(void *arg, dmu_tx_t *tx);
160 static void spa_sync_props(void *arg, dmu_tx_t *tx);
161 static boolean_t spa_has_active_shared_spare(spa_t *spa);
162 static int spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport);
163 static void spa_vdev_resilver_done(spa_t *spa);
165 uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */
166 boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
167 uint_t zio_taskq_basedc = 80; /* base duty cycle */
169 boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */
172 * Report any spa_load_verify errors found, but do not fail spa_load.
173 * This is used by zdb to analyze non-idle pools.
175 boolean_t spa_load_verify_dryrun = B_FALSE;
178 * This (illegal) pool name is used when temporarily importing a spa_t in order
179 * to get the vdev stats associated with the imported devices.
181 #define TRYIMPORT_NAME "$import"
184 * For debugging purposes: print out vdev tree during pool import.
186 int spa_load_print_vdev_tree = B_FALSE;
189 * A non-zero value for zfs_max_missing_tvds means that we allow importing
190 * pools with missing top-level vdevs. This is strictly intended for advanced
191 * pool recovery cases since missing data is almost inevitable. Pools with
192 * missing devices can only be imported read-only for safety reasons, and their
193 * fail-mode will be automatically set to "continue".
195 * With 1 missing vdev we should be able to import the pool and mount all
196 * datasets. User data that was not modified after the missing device has been
197 * added should be recoverable. This means that snapshots created prior to the
198 * addition of that device should be completely intact.
200 * With 2 missing vdevs, some datasets may fail to mount since there are
201 * dataset statistics that are stored as regular metadata. Some data might be
202 * recoverable if those vdevs were added recently.
204 * With 3 or more missing vdevs, the pool is severely damaged and MOS entries
205 * may be missing entirely. Chances of data recovery are very low. Note that
206 * there are also risks of performing an inadvertent rewind as we might be
207 * missing all the vdevs with the latest uberblocks.
209 unsigned long zfs_max_missing_tvds = 0;
212 * The parameters below are similar to zfs_max_missing_tvds but are only
213 * intended for a preliminary open of the pool with an untrusted config which
214 * might be incomplete or out-dated.
216 * We are more tolerant for pools opened from a cachefile since we could have
217 * an out-dated cachefile where a device removal was not registered.
218 * We could have set the limit arbitrarily high but in the case where devices
219 * are really missing we would want to return the proper error codes; we chose
220 * SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available
221 * and we get a chance to retrieve the trusted config.
223 uint64_t zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1;
226 * In the case where config was assembled by scanning device paths (/dev/dsks
227 * by default) we are less tolerant since all the existing devices should have
228 * been detected and we want spa_load to return the right error codes.
230 uint64_t zfs_max_missing_tvds_scan = 0;
233 * Debugging aid that pauses spa_sync() towards the end.
235 boolean_t zfs_pause_spa_sync = B_FALSE;
238 * Variables to indicate the livelist condense zthr func should wait at certain
239 * points for the livelist to be removed - used to test condense/destroy races
241 int zfs_livelist_condense_zthr_pause = 0;
242 int zfs_livelist_condense_sync_pause = 0;
245 * Variables to track whether or not condense cancellation has been
246 * triggered in testing.
248 int zfs_livelist_condense_sync_cancel = 0;
249 int zfs_livelist_condense_zthr_cancel = 0;
252 * Variable to track whether or not extra ALLOC blkptrs were added to a
253 * livelist entry while it was being condensed (caused by the way we track
254 * remapped blkptrs in dbuf_remap_impl)
256 int zfs_livelist_condense_new_alloc = 0;
259 * ==========================================================================
260 * SPA properties routines
261 * ==========================================================================
265 * Add a (source=src, propname=propval) list to an nvlist.
268 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
269 uint64_t intval, zprop_source_t src)
271 const char *propname = zpool_prop_to_name(prop);
274 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
275 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
278 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
280 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
282 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
283 nvlist_free(propval);
287 * Get property values from the spa configuration.
290 spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
292 vdev_t *rvd = spa->spa_root_vdev;
293 dsl_pool_t *pool = spa->spa_dsl_pool;
294 uint64_t size, alloc, cap, version;
295 const zprop_source_t src = ZPROP_SRC_NONE;
296 spa_config_dirent_t *dp;
297 metaslab_class_t *mc = spa_normal_class(spa);
299 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
302 alloc = metaslab_class_get_alloc(mc);
303 alloc += metaslab_class_get_alloc(spa_special_class(spa));
304 alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
306 size = metaslab_class_get_space(mc);
307 size += metaslab_class_get_space(spa_special_class(spa));
308 size += metaslab_class_get_space(spa_dedup_class(spa));
310 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
311 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
312 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
313 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
315 spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL,
316 spa->spa_checkpoint_info.sci_dspace, src);
318 spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
319 metaslab_class_fragmentation(mc), src);
320 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
321 metaslab_class_expandable_space(mc), src);
322 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
323 (spa_mode(spa) == SPA_MODE_READ), src);
325 cap = (size == 0) ? 0 : (alloc * 100 / size);
326 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
328 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
329 ddt_get_pool_dedup_ratio(spa), src);
331 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
332 rvd->vdev_state, src);
334 version = spa_version(spa);
335 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
336 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
337 version, ZPROP_SRC_DEFAULT);
339 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
340 version, ZPROP_SRC_LOCAL);
342 spa_prop_add_list(*nvp, ZPOOL_PROP_LOAD_GUID,
343 NULL, spa_load_guid(spa), src);
348 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
349 * when opening pools before this version freedir will be NULL.
351 if (pool->dp_free_dir != NULL) {
352 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
353 dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
356 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
360 if (pool->dp_leak_dir != NULL) {
361 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
362 dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
365 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
370 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
372 if (spa->spa_comment != NULL) {
373 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
377 if (spa->spa_root != NULL)
378 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
381 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
382 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
383 MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
385 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
386 SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
389 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
390 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
391 DNODE_MAX_SIZE, ZPROP_SRC_NONE);
393 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
394 DNODE_MIN_SIZE, ZPROP_SRC_NONE);
397 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
398 if (dp->scd_path == NULL) {
399 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
400 "none", 0, ZPROP_SRC_LOCAL);
401 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
402 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
403 dp->scd_path, 0, ZPROP_SRC_LOCAL);
409 * Get zpool property values.
412 spa_prop_get(spa_t *spa, nvlist_t **nvp)
414 objset_t *mos = spa->spa_meta_objset;
419 err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
423 mutex_enter(&spa->spa_props_lock);
426 * Get properties from the spa config.
428 spa_prop_get_config(spa, nvp);
430 /* If no pool property object, no more prop to get. */
431 if (mos == NULL || spa->spa_pool_props_object == 0) {
432 mutex_exit(&spa->spa_props_lock);
437 * Get properties from the MOS pool property object.
439 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
440 (err = zap_cursor_retrieve(&zc, &za)) == 0;
441 zap_cursor_advance(&zc)) {
444 zprop_source_t src = ZPROP_SRC_DEFAULT;
447 if ((prop = zpool_name_to_prop(za.za_name)) == ZPOOL_PROP_INVAL)
450 switch (za.za_integer_length) {
452 /* integer property */
453 if (za.za_first_integer !=
454 zpool_prop_default_numeric(prop))
455 src = ZPROP_SRC_LOCAL;
457 if (prop == ZPOOL_PROP_BOOTFS) {
459 dsl_dataset_t *ds = NULL;
461 dp = spa_get_dsl(spa);
462 dsl_pool_config_enter(dp, FTAG);
463 err = dsl_dataset_hold_obj(dp,
464 za.za_first_integer, FTAG, &ds);
466 dsl_pool_config_exit(dp, FTAG);
470 strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
472 dsl_dataset_name(ds, strval);
473 dsl_dataset_rele(ds, FTAG);
474 dsl_pool_config_exit(dp, FTAG);
477 intval = za.za_first_integer;
480 spa_prop_add_list(*nvp, prop, strval, intval, src);
483 kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
488 /* string property */
489 strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
490 err = zap_lookup(mos, spa->spa_pool_props_object,
491 za.za_name, 1, za.za_num_integers, strval);
493 kmem_free(strval, za.za_num_integers);
496 spa_prop_add_list(*nvp, prop, strval, 0, src);
497 kmem_free(strval, za.za_num_integers);
504 zap_cursor_fini(&zc);
505 mutex_exit(&spa->spa_props_lock);
507 if (err && err != ENOENT) {
517 * Validate the given pool properties nvlist and modify the list
518 * for the property values to be set.
521 spa_prop_validate(spa_t *spa, nvlist_t *props)
524 int error = 0, reset_bootfs = 0;
526 boolean_t has_feature = B_FALSE;
529 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
531 char *strval, *slash, *check, *fname;
532 const char *propname = nvpair_name(elem);
533 zpool_prop_t prop = zpool_name_to_prop(propname);
536 case ZPOOL_PROP_INVAL:
537 if (!zpool_prop_feature(propname)) {
538 error = SET_ERROR(EINVAL);
543 * Sanitize the input.
545 if (nvpair_type(elem) != DATA_TYPE_UINT64) {
546 error = SET_ERROR(EINVAL);
550 if (nvpair_value_uint64(elem, &intval) != 0) {
551 error = SET_ERROR(EINVAL);
556 error = SET_ERROR(EINVAL);
560 fname = strchr(propname, '@') + 1;
561 if (zfeature_lookup_name(fname, NULL) != 0) {
562 error = SET_ERROR(EINVAL);
566 has_feature = B_TRUE;
569 case ZPOOL_PROP_VERSION:
570 error = nvpair_value_uint64(elem, &intval);
572 (intval < spa_version(spa) ||
573 intval > SPA_VERSION_BEFORE_FEATURES ||
575 error = SET_ERROR(EINVAL);
578 case ZPOOL_PROP_DELEGATION:
579 case ZPOOL_PROP_AUTOREPLACE:
580 case ZPOOL_PROP_LISTSNAPS:
581 case ZPOOL_PROP_AUTOEXPAND:
582 case ZPOOL_PROP_AUTOTRIM:
583 error = nvpair_value_uint64(elem, &intval);
584 if (!error && intval > 1)
585 error = SET_ERROR(EINVAL);
588 case ZPOOL_PROP_MULTIHOST:
589 error = nvpair_value_uint64(elem, &intval);
590 if (!error && intval > 1)
591 error = SET_ERROR(EINVAL);
594 uint32_t hostid = zone_get_hostid(NULL);
596 spa->spa_hostid = hostid;
598 error = SET_ERROR(ENOTSUP);
603 case ZPOOL_PROP_BOOTFS:
605 * If the pool version is less than SPA_VERSION_BOOTFS,
606 * or the pool is still being created (version == 0),
607 * the bootfs property cannot be set.
609 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
610 error = SET_ERROR(ENOTSUP);
615 * Make sure the vdev config is bootable
617 if (!vdev_is_bootable(spa->spa_root_vdev)) {
618 error = SET_ERROR(ENOTSUP);
624 error = nvpair_value_string(elem, &strval);
630 if (strval == NULL || strval[0] == '\0') {
631 objnum = zpool_prop_default_numeric(
636 error = dmu_objset_hold(strval, FTAG, &os);
641 * Must be ZPL, and its property settings
642 * must be supported by GRUB (compression
643 * is not gzip, and large dnodes are not
647 if (dmu_objset_type(os) != DMU_OST_ZFS) {
648 error = SET_ERROR(ENOTSUP);
650 dsl_prop_get_int_ds(dmu_objset_ds(os),
651 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
653 !BOOTFS_COMPRESS_VALID(propval)) {
654 error = SET_ERROR(ENOTSUP);
656 dsl_prop_get_int_ds(dmu_objset_ds(os),
657 zfs_prop_to_name(ZFS_PROP_DNODESIZE),
659 propval != ZFS_DNSIZE_LEGACY) {
660 error = SET_ERROR(ENOTSUP);
662 objnum = dmu_objset_id(os);
664 dmu_objset_rele(os, FTAG);
668 case ZPOOL_PROP_FAILUREMODE:
669 error = nvpair_value_uint64(elem, &intval);
670 if (!error && intval > ZIO_FAILURE_MODE_PANIC)
671 error = SET_ERROR(EINVAL);
674 * This is a special case which only occurs when
675 * the pool has completely failed. This allows
676 * the user to change the in-core failmode property
677 * without syncing it out to disk (I/Os might
678 * currently be blocked). We do this by returning
679 * EIO to the caller (spa_prop_set) to trick it
680 * into thinking we encountered a property validation
683 if (!error && spa_suspended(spa)) {
684 spa->spa_failmode = intval;
685 error = SET_ERROR(EIO);
689 case ZPOOL_PROP_CACHEFILE:
690 if ((error = nvpair_value_string(elem, &strval)) != 0)
693 if (strval[0] == '\0')
696 if (strcmp(strval, "none") == 0)
699 if (strval[0] != '/') {
700 error = SET_ERROR(EINVAL);
704 slash = strrchr(strval, '/');
705 ASSERT(slash != NULL);
707 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
708 strcmp(slash, "/..") == 0)
709 error = SET_ERROR(EINVAL);
712 case ZPOOL_PROP_COMMENT:
713 if ((error = nvpair_value_string(elem, &strval)) != 0)
715 for (check = strval; *check != '\0'; check++) {
716 if (!isprint(*check)) {
717 error = SET_ERROR(EINVAL);
721 if (strlen(strval) > ZPROP_MAX_COMMENT)
722 error = SET_ERROR(E2BIG);
733 (void) nvlist_remove_all(props,
734 zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO));
736 if (!error && reset_bootfs) {
737 error = nvlist_remove(props,
738 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
741 error = nvlist_add_uint64(props,
742 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
750 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
753 spa_config_dirent_t *dp;
755 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
759 dp = kmem_alloc(sizeof (spa_config_dirent_t),
762 if (cachefile[0] == '\0')
763 dp->scd_path = spa_strdup(spa_config_path);
764 else if (strcmp(cachefile, "none") == 0)
767 dp->scd_path = spa_strdup(cachefile);
769 list_insert_head(&spa->spa_config_list, dp);
771 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
775 spa_prop_set(spa_t *spa, nvlist_t *nvp)
778 nvpair_t *elem = NULL;
779 boolean_t need_sync = B_FALSE;
781 if ((error = spa_prop_validate(spa, nvp)) != 0)
784 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
785 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
787 if (prop == ZPOOL_PROP_CACHEFILE ||
788 prop == ZPOOL_PROP_ALTROOT ||
789 prop == ZPOOL_PROP_READONLY)
792 if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
795 if (prop == ZPOOL_PROP_VERSION) {
796 VERIFY(nvpair_value_uint64(elem, &ver) == 0);
798 ASSERT(zpool_prop_feature(nvpair_name(elem)));
799 ver = SPA_VERSION_FEATURES;
803 /* Save time if the version is already set. */
804 if (ver == spa_version(spa))
808 * In addition to the pool directory object, we might
809 * create the pool properties object, the features for
810 * read object, the features for write object, or the
811 * feature descriptions object.
813 error = dsl_sync_task(spa->spa_name, NULL,
814 spa_sync_version, &ver,
815 6, ZFS_SPACE_CHECK_RESERVED);
826 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
827 nvp, 6, ZFS_SPACE_CHECK_RESERVED));
834 * If the bootfs property value is dsobj, clear it.
837 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
839 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
840 VERIFY(zap_remove(spa->spa_meta_objset,
841 spa->spa_pool_props_object,
842 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
849 spa_change_guid_check(void *arg, dmu_tx_t *tx)
851 uint64_t *newguid __maybe_unused = arg;
852 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
853 vdev_t *rvd = spa->spa_root_vdev;
856 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
857 int error = (spa_has_checkpoint(spa)) ?
858 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
859 return (SET_ERROR(error));
862 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
863 vdev_state = rvd->vdev_state;
864 spa_config_exit(spa, SCL_STATE, FTAG);
866 if (vdev_state != VDEV_STATE_HEALTHY)
867 return (SET_ERROR(ENXIO));
869 ASSERT3U(spa_guid(spa), !=, *newguid);
875 spa_change_guid_sync(void *arg, dmu_tx_t *tx)
877 uint64_t *newguid = arg;
878 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
880 vdev_t *rvd = spa->spa_root_vdev;
882 oldguid = spa_guid(spa);
884 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
885 rvd->vdev_guid = *newguid;
886 rvd->vdev_guid_sum += (*newguid - oldguid);
887 vdev_config_dirty(rvd);
888 spa_config_exit(spa, SCL_STATE, FTAG);
890 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
891 (u_longlong_t)oldguid, (u_longlong_t)*newguid);
895 * Change the GUID for the pool. This is done so that we can later
896 * re-import a pool built from a clone of our own vdevs. We will modify
897 * the root vdev's guid, our own pool guid, and then mark all of our
898 * vdevs dirty. Note that we must make sure that all our vdevs are
899 * online when we do this, or else any vdevs that weren't present
900 * would be orphaned from our pool. We are also going to issue a
901 * sysevent to update any watchers.
904 spa_change_guid(spa_t *spa)
909 mutex_enter(&spa->spa_vdev_top_lock);
910 mutex_enter(&spa_namespace_lock);
911 guid = spa_generate_guid(NULL);
913 error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
914 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
917 spa_write_cachefile(spa, B_FALSE, B_TRUE);
918 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
921 mutex_exit(&spa_namespace_lock);
922 mutex_exit(&spa->spa_vdev_top_lock);
928 * ==========================================================================
929 * SPA state manipulation (open/create/destroy/import/export)
930 * ==========================================================================
934 spa_error_entry_compare(const void *a, const void *b)
936 const spa_error_entry_t *sa = (const spa_error_entry_t *)a;
937 const spa_error_entry_t *sb = (const spa_error_entry_t *)b;
940 ret = memcmp(&sa->se_bookmark, &sb->se_bookmark,
941 sizeof (zbookmark_phys_t));
943 return (TREE_ISIGN(ret));
947 * Utility function which retrieves copies of the current logs and
948 * re-initializes them in the process.
951 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
953 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
955 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
956 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
958 avl_create(&spa->spa_errlist_scrub,
959 spa_error_entry_compare, sizeof (spa_error_entry_t),
960 offsetof(spa_error_entry_t, se_avl));
961 avl_create(&spa->spa_errlist_last,
962 spa_error_entry_compare, sizeof (spa_error_entry_t),
963 offsetof(spa_error_entry_t, se_avl));
967 spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
969 const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
970 enum zti_modes mode = ztip->zti_mode;
971 uint_t value = ztip->zti_value;
972 uint_t count = ztip->zti_count;
973 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
975 boolean_t batch = B_FALSE;
977 if (mode == ZTI_MODE_NULL) {
979 tqs->stqs_taskq = NULL;
983 ASSERT3U(count, >, 0);
985 tqs->stqs_count = count;
986 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
990 ASSERT3U(value, >=, 1);
991 value = MAX(value, 1);
992 flags |= TASKQ_DYNAMIC;
997 flags |= TASKQ_THREADS_CPU_PCT;
998 value = MIN(zio_taskq_batch_pct, 100);
1002 panic("unrecognized mode for %s_%s taskq (%u:%u) in "
1004 zio_type_name[t], zio_taskq_types[q], mode, value);
1008 for (uint_t i = 0; i < count; i++) {
1012 (void) snprintf(name, sizeof (name), "%s_%s",
1013 zio_type_name[t], zio_taskq_types[q]);
1015 if (zio_taskq_sysdc && spa->spa_proc != &p0) {
1017 flags |= TASKQ_DC_BATCH;
1019 tq = taskq_create_sysdc(name, value, 50, INT_MAX,
1020 spa->spa_proc, zio_taskq_basedc, flags);
1022 pri_t pri = maxclsyspri;
1024 * The write issue taskq can be extremely CPU
1025 * intensive. Run it at slightly less important
1026 * priority than the other taskqs. Under Linux this
1027 * means incrementing the priority value on platforms
1028 * like illumos it should be decremented.
1030 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
1033 tq = taskq_create_proc(name, value, pri, 50,
1034 INT_MAX, spa->spa_proc, flags);
1037 tqs->stqs_taskq[i] = tq;
1042 spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
1044 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1046 if (tqs->stqs_taskq == NULL) {
1047 ASSERT3U(tqs->stqs_count, ==, 0);
1051 for (uint_t i = 0; i < tqs->stqs_count; i++) {
1052 ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
1053 taskq_destroy(tqs->stqs_taskq[i]);
1056 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
1057 tqs->stqs_taskq = NULL;
1061 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
1062 * Note that a type may have multiple discrete taskqs to avoid lock contention
1063 * on the taskq itself. In that case we choose which taskq at random by using
1064 * the low bits of gethrtime().
1067 spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
1068 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
1070 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1073 ASSERT3P(tqs->stqs_taskq, !=, NULL);
1074 ASSERT3U(tqs->stqs_count, !=, 0);
1076 if (tqs->stqs_count == 1) {
1077 tq = tqs->stqs_taskq[0];
1079 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
1082 taskq_dispatch_ent(tq, func, arg, flags, ent);
1086 * Same as spa_taskq_dispatch_ent() but block on the task until completion.
1089 spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
1090 task_func_t *func, void *arg, uint_t flags)
1092 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1096 ASSERT3P(tqs->stqs_taskq, !=, NULL);
1097 ASSERT3U(tqs->stqs_count, !=, 0);
1099 if (tqs->stqs_count == 1) {
1100 tq = tqs->stqs_taskq[0];
1102 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
1105 id = taskq_dispatch(tq, func, arg, flags);
1107 taskq_wait_id(tq, id);
1111 spa_create_zio_taskqs(spa_t *spa)
1113 for (int t = 0; t < ZIO_TYPES; t++) {
1114 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1115 spa_taskqs_init(spa, t, q);
1121 * Disabled until spa_thread() can be adapted for Linux.
1123 #undef HAVE_SPA_THREAD
1125 #if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
1127 spa_thread(void *arg)
1129 psetid_t zio_taskq_psrset_bind = PS_NONE;
1130 callb_cpr_t cprinfo;
1133 user_t *pu = PTOU(curproc);
1135 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
1138 ASSERT(curproc != &p0);
1139 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
1140 "zpool-%s", spa->spa_name);
1141 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
1143 /* bind this thread to the requested psrset */
1144 if (zio_taskq_psrset_bind != PS_NONE) {
1146 mutex_enter(&cpu_lock);
1147 mutex_enter(&pidlock);
1148 mutex_enter(&curproc->p_lock);
1150 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
1151 0, NULL, NULL) == 0) {
1152 curthread->t_bind_pset = zio_taskq_psrset_bind;
1155 "Couldn't bind process for zfs pool \"%s\" to "
1156 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1159 mutex_exit(&curproc->p_lock);
1160 mutex_exit(&pidlock);
1161 mutex_exit(&cpu_lock);
1165 if (zio_taskq_sysdc) {
1166 sysdc_thread_enter(curthread, 100, 0);
1169 spa->spa_proc = curproc;
1170 spa->spa_did = curthread->t_did;
1172 spa_create_zio_taskqs(spa);
1174 mutex_enter(&spa->spa_proc_lock);
1175 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1177 spa->spa_proc_state = SPA_PROC_ACTIVE;
1178 cv_broadcast(&spa->spa_proc_cv);
1180 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1181 while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1182 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1183 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1185 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1186 spa->spa_proc_state = SPA_PROC_GONE;
1187 spa->spa_proc = &p0;
1188 cv_broadcast(&spa->spa_proc_cv);
1189 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
1191 mutex_enter(&curproc->p_lock);
1197 * Activate an uninitialized pool.
1200 spa_activate(spa_t *spa, spa_mode_t mode)
1202 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1204 spa->spa_state = POOL_STATE_ACTIVE;
1205 spa->spa_mode = mode;
1207 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
1208 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
1209 spa->spa_special_class = metaslab_class_create(spa, zfs_metaslab_ops);
1210 spa->spa_dedup_class = metaslab_class_create(spa, zfs_metaslab_ops);
1212 /* Try to create a covering process */
1213 mutex_enter(&spa->spa_proc_lock);
1214 ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1215 ASSERT(spa->spa_proc == &p0);
1218 #ifdef HAVE_SPA_THREAD
1219 /* Only create a process if we're going to be around a while. */
1220 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1221 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1223 spa->spa_proc_state = SPA_PROC_CREATED;
1224 while (spa->spa_proc_state == SPA_PROC_CREATED) {
1225 cv_wait(&spa->spa_proc_cv,
1226 &spa->spa_proc_lock);
1228 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1229 ASSERT(spa->spa_proc != &p0);
1230 ASSERT(spa->spa_did != 0);
1234 "Couldn't create process for zfs pool \"%s\"\n",
1239 #endif /* HAVE_SPA_THREAD */
1240 mutex_exit(&spa->spa_proc_lock);
1242 /* If we didn't create a process, we need to create our taskqs. */
1243 if (spa->spa_proc == &p0) {
1244 spa_create_zio_taskqs(spa);
1247 for (size_t i = 0; i < TXG_SIZE; i++) {
1248 spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL,
1252 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1253 offsetof(vdev_t, vdev_config_dirty_node));
1254 list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
1255 offsetof(objset_t, os_evicting_node));
1256 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1257 offsetof(vdev_t, vdev_state_dirty_node));
1259 txg_list_create(&spa->spa_vdev_txg_list, spa,
1260 offsetof(struct vdev, vdev_txg_node));
1262 avl_create(&spa->spa_errlist_scrub,
1263 spa_error_entry_compare, sizeof (spa_error_entry_t),
1264 offsetof(spa_error_entry_t, se_avl));
1265 avl_create(&spa->spa_errlist_last,
1266 spa_error_entry_compare, sizeof (spa_error_entry_t),
1267 offsetof(spa_error_entry_t, se_avl));
1269 spa_keystore_init(&spa->spa_keystore);
1272 * This taskq is used to perform zvol-minor-related tasks
1273 * asynchronously. This has several advantages, including easy
1274 * resolution of various deadlocks (zfsonlinux bug #3681).
1276 * The taskq must be single threaded to ensure tasks are always
1277 * processed in the order in which they were dispatched.
1279 * A taskq per pool allows one to keep the pools independent.
1280 * This way if one pool is suspended, it will not impact another.
1282 * The preferred location to dispatch a zvol minor task is a sync
1283 * task. In this context, there is easy access to the spa_t and minimal
1284 * error handling is required because the sync task must succeed.
1286 spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
1290 * Taskq dedicated to prefetcher threads: this is used to prevent the
1291 * pool traverse code from monopolizing the global (and limited)
1292 * system_taskq by inappropriately scheduling long running tasks on it.
1294 spa->spa_prefetch_taskq = taskq_create("z_prefetch", boot_ncpus,
1295 defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC);
1298 * The taskq to upgrade datasets in this pool. Currently used by
1299 * feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
1301 spa->spa_upgrade_taskq = taskq_create("z_upgrade", boot_ncpus,
1302 defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC);
1306 * Opposite of spa_activate().
1309 spa_deactivate(spa_t *spa)
1311 ASSERT(spa->spa_sync_on == B_FALSE);
1312 ASSERT(spa->spa_dsl_pool == NULL);
1313 ASSERT(spa->spa_root_vdev == NULL);
1314 ASSERT(spa->spa_async_zio_root == NULL);
1315 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1317 spa_evicting_os_wait(spa);
1319 if (spa->spa_zvol_taskq) {
1320 taskq_destroy(spa->spa_zvol_taskq);
1321 spa->spa_zvol_taskq = NULL;
1324 if (spa->spa_prefetch_taskq) {
1325 taskq_destroy(spa->spa_prefetch_taskq);
1326 spa->spa_prefetch_taskq = NULL;
1329 if (spa->spa_upgrade_taskq) {
1330 taskq_destroy(spa->spa_upgrade_taskq);
1331 spa->spa_upgrade_taskq = NULL;
1334 txg_list_destroy(&spa->spa_vdev_txg_list);
1336 list_destroy(&spa->spa_config_dirty_list);
1337 list_destroy(&spa->spa_evicting_os_list);
1338 list_destroy(&spa->spa_state_dirty_list);
1340 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
1342 for (int t = 0; t < ZIO_TYPES; t++) {
1343 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1344 spa_taskqs_fini(spa, t, q);
1348 for (size_t i = 0; i < TXG_SIZE; i++) {
1349 ASSERT3P(spa->spa_txg_zio[i], !=, NULL);
1350 VERIFY0(zio_wait(spa->spa_txg_zio[i]));
1351 spa->spa_txg_zio[i] = NULL;
1354 metaslab_class_destroy(spa->spa_normal_class);
1355 spa->spa_normal_class = NULL;
1357 metaslab_class_destroy(spa->spa_log_class);
1358 spa->spa_log_class = NULL;
1360 metaslab_class_destroy(spa->spa_special_class);
1361 spa->spa_special_class = NULL;
1363 metaslab_class_destroy(spa->spa_dedup_class);
1364 spa->spa_dedup_class = NULL;
1367 * If this was part of an import or the open otherwise failed, we may
1368 * still have errors left in the queues. Empty them just in case.
1370 spa_errlog_drain(spa);
1371 avl_destroy(&spa->spa_errlist_scrub);
1372 avl_destroy(&spa->spa_errlist_last);
1374 spa_keystore_fini(&spa->spa_keystore);
1376 spa->spa_state = POOL_STATE_UNINITIALIZED;
1378 mutex_enter(&spa->spa_proc_lock);
1379 if (spa->spa_proc_state != SPA_PROC_NONE) {
1380 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1381 spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1382 cv_broadcast(&spa->spa_proc_cv);
1383 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1384 ASSERT(spa->spa_proc != &p0);
1385 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1387 ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1388 spa->spa_proc_state = SPA_PROC_NONE;
1390 ASSERT(spa->spa_proc == &p0);
1391 mutex_exit(&spa->spa_proc_lock);
1394 * We want to make sure spa_thread() has actually exited the ZFS
1395 * module, so that the module can't be unloaded out from underneath
1398 if (spa->spa_did != 0) {
1399 thread_join(spa->spa_did);
1405 * Verify a pool configuration, and construct the vdev tree appropriately. This
1406 * will create all the necessary vdevs in the appropriate layout, with each vdev
1407 * in the CLOSED state. This will prep the pool before open/creation/import.
1408 * All vdev validation is done by the vdev_alloc() routine.
1411 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1412 uint_t id, int atype)
1418 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1421 if ((*vdp)->vdev_ops->vdev_op_leaf)
1424 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1427 if (error == ENOENT)
1433 return (SET_ERROR(EINVAL));
1436 for (int c = 0; c < children; c++) {
1438 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1446 ASSERT(*vdp != NULL);
1452 spa_should_flush_logs_on_unload(spa_t *spa)
1454 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
1457 if (!spa_writeable(spa))
1460 if (!spa->spa_sync_on)
1463 if (spa_state(spa) != POOL_STATE_EXPORTED)
1466 if (zfs_keep_log_spacemaps_at_export)
1473 * Opens a transaction that will set the flag that will instruct
1474 * spa_sync to attempt to flush all the metaslabs for that txg.
1477 spa_unload_log_sm_flush_all(spa_t *spa)
1479 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
1480 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
1482 ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
1483 spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
1486 txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg);
1490 spa_unload_log_sm_metadata(spa_t *spa)
1492 void *cookie = NULL;
1494 while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg,
1495 &cookie)) != NULL) {
1496 VERIFY0(sls->sls_mscount);
1497 kmem_free(sls, sizeof (spa_log_sm_t));
1500 for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
1501 e != NULL; e = list_head(&spa->spa_log_summary)) {
1502 VERIFY0(e->lse_mscount);
1503 list_remove(&spa->spa_log_summary, e);
1504 kmem_free(e, sizeof (log_summary_entry_t));
1507 spa->spa_unflushed_stats.sus_nblocks = 0;
1508 spa->spa_unflushed_stats.sus_memused = 0;
1509 spa->spa_unflushed_stats.sus_blocklimit = 0;
1513 spa_destroy_aux_threads(spa_t *spa)
1515 if (spa->spa_condense_zthr != NULL) {
1516 zthr_destroy(spa->spa_condense_zthr);
1517 spa->spa_condense_zthr = NULL;
1519 if (spa->spa_checkpoint_discard_zthr != NULL) {
1520 zthr_destroy(spa->spa_checkpoint_discard_zthr);
1521 spa->spa_checkpoint_discard_zthr = NULL;
1523 if (spa->spa_livelist_delete_zthr != NULL) {
1524 zthr_destroy(spa->spa_livelist_delete_zthr);
1525 spa->spa_livelist_delete_zthr = NULL;
1527 if (spa->spa_livelist_condense_zthr != NULL) {
1528 zthr_destroy(spa->spa_livelist_condense_zthr);
1529 spa->spa_livelist_condense_zthr = NULL;
1534 * Opposite of spa_load().
1537 spa_unload(spa_t *spa)
1539 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1540 ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED);
1542 spa_import_progress_remove(spa_guid(spa));
1543 spa_load_note(spa, "UNLOADING");
1545 spa_wake_waiters(spa);
1548 * If the log space map feature is enabled and the pool is getting
1549 * exported (but not destroyed), we want to spend some time flushing
1550 * as many metaslabs as we can in an attempt to destroy log space
1551 * maps and save import time.
1553 if (spa_should_flush_logs_on_unload(spa))
1554 spa_unload_log_sm_flush_all(spa);
1559 spa_async_suspend(spa);
1561 if (spa->spa_root_vdev) {
1562 vdev_t *root_vdev = spa->spa_root_vdev;
1563 vdev_initialize_stop_all(root_vdev, VDEV_INITIALIZE_ACTIVE);
1564 vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE);
1565 vdev_autotrim_stop_all(spa);
1566 vdev_rebuild_stop_all(spa);
1572 if (spa->spa_sync_on) {
1573 txg_sync_stop(spa->spa_dsl_pool);
1574 spa->spa_sync_on = B_FALSE;
1578 * This ensures that there is no async metaslab prefetching
1579 * while we attempt to unload the spa.
1581 if (spa->spa_root_vdev != NULL) {
1582 for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) {
1583 vdev_t *vc = spa->spa_root_vdev->vdev_child[c];
1584 if (vc->vdev_mg != NULL)
1585 taskq_wait(vc->vdev_mg->mg_taskq);
1589 if (spa->spa_mmp.mmp_thread)
1590 mmp_thread_stop(spa);
1593 * Wait for any outstanding async I/O to complete.
1595 if (spa->spa_async_zio_root != NULL) {
1596 for (int i = 0; i < max_ncpus; i++)
1597 (void) zio_wait(spa->spa_async_zio_root[i]);
1598 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
1599 spa->spa_async_zio_root = NULL;
1602 if (spa->spa_vdev_removal != NULL) {
1603 spa_vdev_removal_destroy(spa->spa_vdev_removal);
1604 spa->spa_vdev_removal = NULL;
1607 spa_destroy_aux_threads(spa);
1609 spa_condense_fini(spa);
1611 bpobj_close(&spa->spa_deferred_bpobj);
1613 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1618 if (spa->spa_root_vdev)
1619 vdev_free(spa->spa_root_vdev);
1620 ASSERT(spa->spa_root_vdev == NULL);
1623 * Close the dsl pool.
1625 if (spa->spa_dsl_pool) {
1626 dsl_pool_close(spa->spa_dsl_pool);
1627 spa->spa_dsl_pool = NULL;
1628 spa->spa_meta_objset = NULL;
1632 spa_unload_log_sm_metadata(spa);
1635 * Drop and purge level 2 cache
1637 spa_l2cache_drop(spa);
1639 for (int i = 0; i < spa->spa_spares.sav_count; i++)
1640 vdev_free(spa->spa_spares.sav_vdevs[i]);
1641 if (spa->spa_spares.sav_vdevs) {
1642 kmem_free(spa->spa_spares.sav_vdevs,
1643 spa->spa_spares.sav_count * sizeof (void *));
1644 spa->spa_spares.sav_vdevs = NULL;
1646 if (spa->spa_spares.sav_config) {
1647 nvlist_free(spa->spa_spares.sav_config);
1648 spa->spa_spares.sav_config = NULL;
1650 spa->spa_spares.sav_count = 0;
1652 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
1653 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
1654 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
1656 if (spa->spa_l2cache.sav_vdevs) {
1657 kmem_free(spa->spa_l2cache.sav_vdevs,
1658 spa->spa_l2cache.sav_count * sizeof (void *));
1659 spa->spa_l2cache.sav_vdevs = NULL;
1661 if (spa->spa_l2cache.sav_config) {
1662 nvlist_free(spa->spa_l2cache.sav_config);
1663 spa->spa_l2cache.sav_config = NULL;
1665 spa->spa_l2cache.sav_count = 0;
1667 spa->spa_async_suspended = 0;
1669 spa->spa_indirect_vdevs_loaded = B_FALSE;
1671 if (spa->spa_comment != NULL) {
1672 spa_strfree(spa->spa_comment);
1673 spa->spa_comment = NULL;
1676 spa_config_exit(spa, SCL_ALL, spa);
1680 * Load (or re-load) the current list of vdevs describing the active spares for
1681 * this pool. When this is called, we have some form of basic information in
1682 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
1683 * then re-generate a more complete list including status information.
1686 spa_load_spares(spa_t *spa)
1695 * zdb opens both the current state of the pool and the
1696 * checkpointed state (if present), with a different spa_t.
1698 * As spare vdevs are shared among open pools, we skip loading
1699 * them when we load the checkpointed state of the pool.
1701 if (!spa_writeable(spa))
1705 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1708 * First, close and free any existing spare vdevs.
1710 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1711 vd = spa->spa_spares.sav_vdevs[i];
1713 /* Undo the call to spa_activate() below */
1714 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1715 B_FALSE)) != NULL && tvd->vdev_isspare)
1716 spa_spare_remove(tvd);
1721 if (spa->spa_spares.sav_vdevs)
1722 kmem_free(spa->spa_spares.sav_vdevs,
1723 spa->spa_spares.sav_count * sizeof (void *));
1725 if (spa->spa_spares.sav_config == NULL)
1728 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1729 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1731 spa->spa_spares.sav_count = (int)nspares;
1732 spa->spa_spares.sav_vdevs = NULL;
1738 * Construct the array of vdevs, opening them to get status in the
1739 * process. For each spare, there is potentially two different vdev_t
1740 * structures associated with it: one in the list of spares (used only
1741 * for basic validation purposes) and one in the active vdev
1742 * configuration (if it's spared in). During this phase we open and
1743 * validate each vdev on the spare list. If the vdev also exists in the
1744 * active configuration, then we also mark this vdev as an active spare.
1746 spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
1748 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1749 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1750 VDEV_ALLOC_SPARE) == 0);
1753 spa->spa_spares.sav_vdevs[i] = vd;
1755 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1756 B_FALSE)) != NULL) {
1757 if (!tvd->vdev_isspare)
1761 * We only mark the spare active if we were successfully
1762 * able to load the vdev. Otherwise, importing a pool
1763 * with a bad active spare would result in strange
1764 * behavior, because multiple pool would think the spare
1765 * is actively in use.
1767 * There is a vulnerability here to an equally bizarre
1768 * circumstance, where a dead active spare is later
1769 * brought back to life (onlined or otherwise). Given
1770 * the rarity of this scenario, and the extra complexity
1771 * it adds, we ignore the possibility.
1773 if (!vdev_is_dead(tvd))
1774 spa_spare_activate(tvd);
1778 vd->vdev_aux = &spa->spa_spares;
1780 if (vdev_open(vd) != 0)
1783 if (vdev_validate_aux(vd) == 0)
1788 * Recompute the stashed list of spares, with status information
1791 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1792 DATA_TYPE_NVLIST_ARRAY) == 0);
1794 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
1796 for (i = 0; i < spa->spa_spares.sav_count; i++)
1797 spares[i] = vdev_config_generate(spa,
1798 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
1799 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1800 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1801 for (i = 0; i < spa->spa_spares.sav_count; i++)
1802 nvlist_free(spares[i]);
1803 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1807 * Load (or re-load) the current list of vdevs describing the active l2cache for
1808 * this pool. When this is called, we have some form of basic information in
1809 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
1810 * then re-generate a more complete list including status information.
1811 * Devices which are already active have their details maintained, and are
1815 spa_load_l2cache(spa_t *spa)
1817 nvlist_t **l2cache = NULL;
1819 int i, j, oldnvdevs;
1821 vdev_t *vd, **oldvdevs, **newvdevs;
1822 spa_aux_vdev_t *sav = &spa->spa_l2cache;
1826 * zdb opens both the current state of the pool and the
1827 * checkpointed state (if present), with a different spa_t.
1829 * As L2 caches are part of the ARC which is shared among open
1830 * pools, we skip loading them when we load the checkpointed
1831 * state of the pool.
1833 if (!spa_writeable(spa))
1837 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1839 oldvdevs = sav->sav_vdevs;
1840 oldnvdevs = sav->sav_count;
1841 sav->sav_vdevs = NULL;
1844 if (sav->sav_config == NULL) {
1850 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1851 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1852 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
1855 * Process new nvlist of vdevs.
1857 for (i = 0; i < nl2cache; i++) {
1858 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1862 for (j = 0; j < oldnvdevs; j++) {
1864 if (vd != NULL && guid == vd->vdev_guid) {
1866 * Retain previous vdev for add/remove ops.
1874 if (newvdevs[i] == NULL) {
1878 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1879 VDEV_ALLOC_L2CACHE) == 0);
1884 * Commit this vdev as an l2cache device,
1885 * even if it fails to open.
1887 spa_l2cache_add(vd);
1892 spa_l2cache_activate(vd);
1894 if (vdev_open(vd) != 0)
1897 (void) vdev_validate_aux(vd);
1899 if (!vdev_is_dead(vd))
1900 l2arc_add_vdev(spa, vd);
1903 * Upon cache device addition to a pool or pool
1904 * creation with a cache device or if the header
1905 * of the device is invalid we issue an async
1906 * TRIM command for the whole device which will
1907 * execute if l2arc_trim_ahead > 0.
1909 spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
1913 sav->sav_vdevs = newvdevs;
1914 sav->sav_count = (int)nl2cache;
1917 * Recompute the stashed list of l2cache devices, with status
1918 * information this time.
1920 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1921 DATA_TYPE_NVLIST_ARRAY) == 0);
1923 if (sav->sav_count > 0)
1924 l2cache = kmem_alloc(sav->sav_count * sizeof (void *),
1926 for (i = 0; i < sav->sav_count; i++)
1927 l2cache[i] = vdev_config_generate(spa,
1928 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
1929 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1930 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1934 * Purge vdevs that were dropped
1936 for (i = 0; i < oldnvdevs; i++) {
1941 ASSERT(vd->vdev_isl2cache);
1943 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1944 pool != 0ULL && l2arc_vdev_present(vd))
1945 l2arc_remove_vdev(vd);
1946 vdev_clear_stats(vd);
1952 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1954 for (i = 0; i < sav->sav_count; i++)
1955 nvlist_free(l2cache[i]);
1957 kmem_free(l2cache, sav->sav_count * sizeof (void *));
1961 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1964 char *packed = NULL;
1969 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
1973 nvsize = *(uint64_t *)db->db_data;
1974 dmu_buf_rele(db, FTAG);
1976 packed = vmem_alloc(nvsize, KM_SLEEP);
1977 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1980 error = nvlist_unpack(packed, nvsize, value, 0);
1981 vmem_free(packed, nvsize);
1987 * Concrete top-level vdevs that are not missing and are not logs. At every
1988 * spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds.
1991 spa_healthy_core_tvds(spa_t *spa)
1993 vdev_t *rvd = spa->spa_root_vdev;
1996 for (uint64_t i = 0; i < rvd->vdev_children; i++) {
1997 vdev_t *vd = rvd->vdev_child[i];
2000 if (vdev_is_concrete(vd) && !vdev_is_dead(vd))
2008 * Checks to see if the given vdev could not be opened, in which case we post a
2009 * sysevent to notify the autoreplace code that the device has been removed.
2012 spa_check_removed(vdev_t *vd)
2014 for (uint64_t c = 0; c < vd->vdev_children; c++)
2015 spa_check_removed(vd->vdev_child[c]);
2017 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
2018 vdev_is_concrete(vd)) {
2019 zfs_post_autoreplace(vd->vdev_spa, vd);
2020 spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
2025 spa_check_for_missing_logs(spa_t *spa)
2027 vdev_t *rvd = spa->spa_root_vdev;
2030 * If we're doing a normal import, then build up any additional
2031 * diagnostic information about missing log devices.
2032 * We'll pass this up to the user for further processing.
2034 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
2035 nvlist_t **child, *nv;
2038 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *),
2040 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2042 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2043 vdev_t *tvd = rvd->vdev_child[c];
2046 * We consider a device as missing only if it failed
2047 * to open (i.e. offline or faulted is not considered
2050 if (tvd->vdev_islog &&
2051 tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
2052 child[idx++] = vdev_config_generate(spa, tvd,
2053 B_FALSE, VDEV_CONFIG_MISSING);
2058 fnvlist_add_nvlist_array(nv,
2059 ZPOOL_CONFIG_CHILDREN, child, idx);
2060 fnvlist_add_nvlist(spa->spa_load_info,
2061 ZPOOL_CONFIG_MISSING_DEVICES, nv);
2063 for (uint64_t i = 0; i < idx; i++)
2064 nvlist_free(child[i]);
2067 kmem_free(child, rvd->vdev_children * sizeof (char **));
2070 spa_load_failed(spa, "some log devices are missing");
2071 vdev_dbgmsg_print_tree(rvd, 2);
2072 return (SET_ERROR(ENXIO));
2075 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2076 vdev_t *tvd = rvd->vdev_child[c];
2078 if (tvd->vdev_islog &&
2079 tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
2080 spa_set_log_state(spa, SPA_LOG_CLEAR);
2081 spa_load_note(spa, "some log devices are "
2082 "missing, ZIL is dropped.");
2083 vdev_dbgmsg_print_tree(rvd, 2);
2093 * Check for missing log devices
2096 spa_check_logs(spa_t *spa)
2098 boolean_t rv = B_FALSE;
2099 dsl_pool_t *dp = spa_get_dsl(spa);
2101 switch (spa->spa_log_state) {
2104 case SPA_LOG_MISSING:
2105 /* need to recheck in case slog has been restored */
2106 case SPA_LOG_UNKNOWN:
2107 rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2108 zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
2110 spa_set_log_state(spa, SPA_LOG_MISSING);
2117 spa_passivate_log(spa_t *spa)
2119 vdev_t *rvd = spa->spa_root_vdev;
2120 boolean_t slog_found = B_FALSE;
2122 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
2124 if (!spa_has_slogs(spa))
2127 for (int c = 0; c < rvd->vdev_children; c++) {
2128 vdev_t *tvd = rvd->vdev_child[c];
2129 metaslab_group_t *mg = tvd->vdev_mg;
2131 if (tvd->vdev_islog) {
2132 metaslab_group_passivate(mg);
2133 slog_found = B_TRUE;
2137 return (slog_found);
2141 spa_activate_log(spa_t *spa)
2143 vdev_t *rvd = spa->spa_root_vdev;
2145 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
2147 for (int c = 0; c < rvd->vdev_children; c++) {
2148 vdev_t *tvd = rvd->vdev_child[c];
2149 metaslab_group_t *mg = tvd->vdev_mg;
2151 if (tvd->vdev_islog)
2152 metaslab_group_activate(mg);
2157 spa_reset_logs(spa_t *spa)
2161 error = dmu_objset_find(spa_name(spa), zil_reset,
2162 NULL, DS_FIND_CHILDREN);
2165 * We successfully offlined the log device, sync out the
2166 * current txg so that the "stubby" block can be removed
2169 txg_wait_synced(spa->spa_dsl_pool, 0);
2175 spa_aux_check_removed(spa_aux_vdev_t *sav)
2177 for (int i = 0; i < sav->sav_count; i++)
2178 spa_check_removed(sav->sav_vdevs[i]);
2182 spa_claim_notify(zio_t *zio)
2184 spa_t *spa = zio->io_spa;
2189 mutex_enter(&spa->spa_props_lock); /* any mutex will do */
2190 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
2191 spa->spa_claim_max_txg = zio->io_bp->blk_birth;
2192 mutex_exit(&spa->spa_props_lock);
2195 typedef struct spa_load_error {
2196 uint64_t sle_meta_count;
2197 uint64_t sle_data_count;
2201 spa_load_verify_done(zio_t *zio)
2203 blkptr_t *bp = zio->io_bp;
2204 spa_load_error_t *sle = zio->io_private;
2205 dmu_object_type_t type = BP_GET_TYPE(bp);
2206 int error = zio->io_error;
2207 spa_t *spa = zio->io_spa;
2209 abd_free(zio->io_abd);
2211 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
2212 type != DMU_OT_INTENT_LOG)
2213 atomic_inc_64(&sle->sle_meta_count);
2215 atomic_inc_64(&sle->sle_data_count);
2218 mutex_enter(&spa->spa_scrub_lock);
2219 spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
2220 cv_broadcast(&spa->spa_scrub_io_cv);
2221 mutex_exit(&spa->spa_scrub_lock);
2225 * Maximum number of inflight bytes is the log2 fraction of the arc size.
2226 * By default, we set it to 1/16th of the arc.
2228 int spa_load_verify_shift = 4;
2229 int spa_load_verify_metadata = B_TRUE;
2230 int spa_load_verify_data = B_TRUE;
2234 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
2235 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
2237 if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
2238 BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
2241 * Note: normally this routine will not be called if
2242 * spa_load_verify_metadata is not set. However, it may be useful
2243 * to manually set the flag after the traversal has begun.
2245 if (!spa_load_verify_metadata)
2247 if (!BP_IS_METADATA(bp) && !spa_load_verify_data)
2250 uint64_t maxinflight_bytes =
2251 arc_target_bytes() >> spa_load_verify_shift;
2253 size_t size = BP_GET_PSIZE(bp);
2255 mutex_enter(&spa->spa_scrub_lock);
2256 while (spa->spa_load_verify_bytes >= maxinflight_bytes)
2257 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2258 spa->spa_load_verify_bytes += size;
2259 mutex_exit(&spa->spa_scrub_lock);
2261 zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
2262 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
2263 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
2264 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
2270 verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
2272 if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
2273 return (SET_ERROR(ENAMETOOLONG));
2279 spa_load_verify(spa_t *spa)
2282 spa_load_error_t sle = { 0 };
2283 zpool_load_policy_t policy;
2284 boolean_t verify_ok = B_FALSE;
2287 zpool_get_load_policy(spa->spa_config, &policy);
2289 if (policy.zlp_rewind & ZPOOL_NEVER_REWIND)
2292 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
2293 error = dmu_objset_find_dp(spa->spa_dsl_pool,
2294 spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
2296 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
2300 rio = zio_root(spa, NULL, &sle,
2301 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
2303 if (spa_load_verify_metadata) {
2304 if (spa->spa_extreme_rewind) {
2305 spa_load_note(spa, "performing a complete scan of the "
2306 "pool since extreme rewind is on. This may take "
2307 "a very long time.\n (spa_load_verify_data=%u, "
2308 "spa_load_verify_metadata=%u)",
2309 spa_load_verify_data, spa_load_verify_metadata);
2312 error = traverse_pool(spa, spa->spa_verify_min_txg,
2313 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
2314 TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio);
2317 (void) zio_wait(rio);
2318 ASSERT0(spa->spa_load_verify_bytes);
2320 spa->spa_load_meta_errors = sle.sle_meta_count;
2321 spa->spa_load_data_errors = sle.sle_data_count;
2323 if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) {
2324 spa_load_note(spa, "spa_load_verify found %llu metadata errors "
2325 "and %llu data errors", (u_longlong_t)sle.sle_meta_count,
2326 (u_longlong_t)sle.sle_data_count);
2329 if (spa_load_verify_dryrun ||
2330 (!error && sle.sle_meta_count <= policy.zlp_maxmeta &&
2331 sle.sle_data_count <= policy.zlp_maxdata)) {
2335 spa->spa_load_txg = spa->spa_uberblock.ub_txg;
2336 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
2338 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
2339 VERIFY(nvlist_add_uint64(spa->spa_load_info,
2340 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
2341 VERIFY(nvlist_add_int64(spa->spa_load_info,
2342 ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
2343 VERIFY(nvlist_add_uint64(spa->spa_load_info,
2344 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
2346 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
2349 if (spa_load_verify_dryrun)
2353 if (error != ENXIO && error != EIO)
2354 error = SET_ERROR(EIO);
2358 return (verify_ok ? 0 : EIO);
2362 * Find a value in the pool props object.
2365 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
2367 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
2368 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
2372 * Find a value in the pool directory object.
2375 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
2377 int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2378 name, sizeof (uint64_t), 1, val);
2380 if (error != 0 && (error != ENOENT || log_enoent)) {
2381 spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
2382 "[error=%d]", name, error);
2389 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
2391 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
2392 return (SET_ERROR(err));
2396 spa_livelist_delete_check(spa_t *spa)
2398 return (spa->spa_livelists_to_delete != 0);
2403 spa_livelist_delete_cb_check(void *arg, zthr_t *z)
2406 return (spa_livelist_delete_check(spa));
2410 delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
2413 zio_free(spa, tx->tx_txg, bp);
2414 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
2415 -bp_get_dsize_sync(spa, bp),
2416 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
2421 dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp)
2426 zap_cursor_init(&zc, os, zap_obj);
2427 err = zap_cursor_retrieve(&zc, &za);
2428 zap_cursor_fini(&zc);
2430 *llp = za.za_first_integer;
2435 * Components of livelist deletion that must be performed in syncing
2436 * context: freeing block pointers and updating the pool-wide data
2437 * structures to indicate how much work is left to do
2439 typedef struct sublist_delete_arg {
2444 } sublist_delete_arg_t;
2447 sublist_delete_sync(void *arg, dmu_tx_t *tx)
2449 sublist_delete_arg_t *sda = arg;
2450 spa_t *spa = sda->spa;
2451 dsl_deadlist_t *ll = sda->ll;
2452 uint64_t key = sda->key;
2453 bplist_t *to_free = sda->to_free;
2455 bplist_iterate(to_free, delete_blkptr_cb, spa, tx);
2456 dsl_deadlist_remove_entry(ll, key, tx);
2459 typedef struct livelist_delete_arg {
2463 } livelist_delete_arg_t;
2466 livelist_delete_sync(void *arg, dmu_tx_t *tx)
2468 livelist_delete_arg_t *lda = arg;
2469 spa_t *spa = lda->spa;
2470 uint64_t ll_obj = lda->ll_obj;
2471 uint64_t zap_obj = lda->zap_obj;
2472 objset_t *mos = spa->spa_meta_objset;
2475 /* free the livelist and decrement the feature count */
2476 VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx));
2477 dsl_deadlist_free(mos, ll_obj, tx);
2478 spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
2479 VERIFY0(zap_count(mos, zap_obj, &count));
2481 /* no more livelists to delete */
2482 VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
2483 DMU_POOL_DELETED_CLONES, tx));
2484 VERIFY0(zap_destroy(mos, zap_obj, tx));
2485 spa->spa_livelists_to_delete = 0;
2486 spa_notify_waiters(spa);
2491 * Load in the value for the livelist to be removed and open it. Then,
2492 * load its first sublist and determine which block pointers should actually
2493 * be freed. Then, call a synctask which performs the actual frees and updates
2494 * the pool-wide livelist data.
2498 spa_livelist_delete_cb(void *arg, zthr_t *z)
2501 uint64_t ll_obj = 0, count;
2502 objset_t *mos = spa->spa_meta_objset;
2503 uint64_t zap_obj = spa->spa_livelists_to_delete;
2505 * Determine the next livelist to delete. This function should only
2506 * be called if there is at least one deleted clone.
2508 VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj));
2509 VERIFY0(zap_count(mos, ll_obj, &count));
2511 dsl_deadlist_t ll = { 0 };
2512 dsl_deadlist_entry_t *dle;
2514 dsl_deadlist_open(&ll, mos, ll_obj);
2515 dle = dsl_deadlist_first(&ll);
2516 ASSERT3P(dle, !=, NULL);
2517 bplist_create(&to_free);
2518 int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free,
2521 sublist_delete_arg_t sync_arg = {
2524 .key = dle->dle_mintxg,
2527 zfs_dbgmsg("deleting sublist (id %llu) from"
2528 " livelist %llu, %d remaining",
2529 dle->dle_bpobj.bpo_object, ll_obj, count - 1);
2530 VERIFY0(dsl_sync_task(spa_name(spa), NULL,
2531 sublist_delete_sync, &sync_arg, 0,
2532 ZFS_SPACE_CHECK_DESTROY));
2534 ASSERT(err == EINTR);
2536 bplist_clear(&to_free);
2537 bplist_destroy(&to_free);
2538 dsl_deadlist_close(&ll);
2540 livelist_delete_arg_t sync_arg = {
2545 zfs_dbgmsg("deletion of livelist %llu completed", ll_obj);
2546 VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync,
2547 &sync_arg, 0, ZFS_SPACE_CHECK_DESTROY));
2552 spa_start_livelist_destroy_thread(spa_t *spa)
2554 ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL);
2555 spa->spa_livelist_delete_zthr = zthr_create(
2556 spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa);
2559 typedef struct livelist_new_arg {
2562 } livelist_new_arg_t;
2565 livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
2569 livelist_new_arg_t *lna = arg;
2571 bplist_append(lna->frees, bp);
2573 bplist_append(lna->allocs, bp);
2574 zfs_livelist_condense_new_alloc++;
2579 typedef struct livelist_condense_arg {
2582 uint64_t first_size;
2584 } livelist_condense_arg_t;
2587 spa_livelist_condense_sync(void *arg, dmu_tx_t *tx)
2589 livelist_condense_arg_t *lca = arg;
2590 spa_t *spa = lca->spa;
2592 dsl_dataset_t *ds = spa->spa_to_condense.ds;
2594 /* Have we been cancelled? */
2595 if (spa->spa_to_condense.cancelled) {
2596 zfs_livelist_condense_sync_cancel++;
2600 dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
2601 dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
2602 dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
2605 * It's possible that the livelist was changed while the zthr was
2606 * running. Therefore, we need to check for new blkptrs in the two
2607 * entries being condensed and continue to track them in the livelist.
2608 * Because of the way we handle remapped blkptrs (see dbuf_remap_impl),
2609 * it's possible that the newly added blkptrs are FREEs or ALLOCs so
2610 * we need to sort them into two different bplists.
2612 uint64_t first_obj = first->dle_bpobj.bpo_object;
2613 uint64_t next_obj = next->dle_bpobj.bpo_object;
2614 uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs;
2615 uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
2617 bplist_create(&new_frees);
2618 livelist_new_arg_t new_bps = {
2619 .allocs = &lca->to_keep,
2620 .frees = &new_frees,
2623 if (cur_first_size > lca->first_size) {
2624 VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj,
2625 livelist_track_new_cb, &new_bps, lca->first_size));
2627 if (cur_next_size > lca->next_size) {
2628 VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj,
2629 livelist_track_new_cb, &new_bps, lca->next_size));
2632 dsl_deadlist_clear_entry(first, ll, tx);
2633 ASSERT(bpobj_is_empty(&first->dle_bpobj));
2634 dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx);
2636 bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx);
2637 bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx);
2638 bplist_destroy(&new_frees);
2640 char dsname[ZFS_MAX_DATASET_NAME_LEN];
2641 dsl_dataset_name(ds, dsname);
2642 zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu "
2643 "(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu "
2644 "(%llu blkptrs)", tx->tx_txg, dsname, ds->ds_object, first_obj,
2645 cur_first_size, next_obj, cur_next_size,
2646 first->dle_bpobj.bpo_object,
2647 first->dle_bpobj.bpo_phys->bpo_num_blkptrs);
2649 dmu_buf_rele(ds->ds_dbuf, spa);
2650 spa->spa_to_condense.ds = NULL;
2651 bplist_clear(&lca->to_keep);
2652 bplist_destroy(&lca->to_keep);
2653 kmem_free(lca, sizeof (livelist_condense_arg_t));
2654 spa->spa_to_condense.syncing = B_FALSE;
2658 spa_livelist_condense_cb(void *arg, zthr_t *t)
2660 while (zfs_livelist_condense_zthr_pause &&
2661 !(zthr_has_waiters(t) || zthr_iscancelled(t)))
2665 dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
2666 dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
2667 uint64_t first_size, next_size;
2669 livelist_condense_arg_t *lca =
2670 kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP);
2671 bplist_create(&lca->to_keep);
2674 * Process the livelists (matching FREEs and ALLOCs) in open context
2675 * so we have minimal work in syncing context to condense.
2677 * We save bpobj sizes (first_size and next_size) to use later in
2678 * syncing context to determine if entries were added to these sublists
2679 * while in open context. This is possible because the clone is still
2680 * active and open for normal writes and we want to make sure the new,
2681 * unprocessed blockpointers are inserted into the livelist normally.
2683 * Note that dsl_process_sub_livelist() both stores the size number of
2684 * blockpointers and iterates over them while the bpobj's lock held, so
2685 * the sizes returned to us are consistent which what was actually
2688 int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t,
2691 err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep,
2695 while (zfs_livelist_condense_sync_pause &&
2696 !(zthr_has_waiters(t) || zthr_iscancelled(t)))
2699 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
2700 dmu_tx_mark_netfree(tx);
2701 dmu_tx_hold_space(tx, 1);
2702 err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE);
2705 * Prevent the condense zthr restarting before
2706 * the synctask completes.
2708 spa->spa_to_condense.syncing = B_TRUE;
2710 lca->first_size = first_size;
2711 lca->next_size = next_size;
2712 dsl_sync_task_nowait(spa_get_dsl(spa),
2713 spa_livelist_condense_sync, lca, 0,
2714 ZFS_SPACE_CHECK_NONE, tx);
2720 * Condensing can not continue: either it was externally stopped or
2721 * we were unable to assign to a tx because the pool has run out of
2722 * space. In the second case, we'll just end up trying to condense
2723 * again in a later txg.
2726 bplist_clear(&lca->to_keep);
2727 bplist_destroy(&lca->to_keep);
2728 kmem_free(lca, sizeof (livelist_condense_arg_t));
2729 dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa);
2730 spa->spa_to_condense.ds = NULL;
2732 zfs_livelist_condense_zthr_cancel++;
2737 * Check that there is something to condense but that a condense is not
2738 * already in progress and that condensing has not been cancelled.
2741 spa_livelist_condense_cb_check(void *arg, zthr_t *z)
2744 if ((spa->spa_to_condense.ds != NULL) &&
2745 (spa->spa_to_condense.syncing == B_FALSE) &&
2746 (spa->spa_to_condense.cancelled == B_FALSE)) {
2753 spa_start_livelist_condensing_thread(spa_t *spa)
2755 spa->spa_to_condense.ds = NULL;
2756 spa->spa_to_condense.first = NULL;
2757 spa->spa_to_condense.next = NULL;
2758 spa->spa_to_condense.syncing = B_FALSE;
2759 spa->spa_to_condense.cancelled = B_FALSE;
2761 ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL);
2762 spa->spa_livelist_condense_zthr = zthr_create(
2763 spa_livelist_condense_cb_check, spa_livelist_condense_cb, spa);
2767 spa_spawn_aux_threads(spa_t *spa)
2769 ASSERT(spa_writeable(spa));
2771 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2773 spa_start_indirect_condensing_thread(spa);
2774 spa_start_livelist_destroy_thread(spa);
2775 spa_start_livelist_condensing_thread(spa);
2777 ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
2778 spa->spa_checkpoint_discard_zthr =
2779 zthr_create(spa_checkpoint_discard_thread_check,
2780 spa_checkpoint_discard_thread, spa);
2784 * Fix up config after a partly-completed split. This is done with the
2785 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
2786 * pool have that entry in their config, but only the splitting one contains
2787 * a list of all the guids of the vdevs that are being split off.
2789 * This function determines what to do with that list: either rejoin
2790 * all the disks to the pool, or complete the splitting process. To attempt
2791 * the rejoin, each disk that is offlined is marked online again, and
2792 * we do a reopen() call. If the vdev label for every disk that was
2793 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
2794 * then we call vdev_split() on each disk, and complete the split.
2796 * Otherwise we leave the config alone, with all the vdevs in place in
2797 * the original pool.
2800 spa_try_repair(spa_t *spa, nvlist_t *config)
2807 boolean_t attempt_reopen;
2809 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
2812 /* check that the config is complete */
2813 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
2814 &glist, &gcount) != 0)
2817 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
2819 /* attempt to online all the vdevs & validate */
2820 attempt_reopen = B_TRUE;
2821 for (i = 0; i < gcount; i++) {
2822 if (glist[i] == 0) /* vdev is hole */
2825 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
2826 if (vd[i] == NULL) {
2828 * Don't bother attempting to reopen the disks;
2829 * just do the split.
2831 attempt_reopen = B_FALSE;
2833 /* attempt to re-online it */
2834 vd[i]->vdev_offline = B_FALSE;
2838 if (attempt_reopen) {
2839 vdev_reopen(spa->spa_root_vdev);
2841 /* check each device to see what state it's in */
2842 for (extracted = 0, i = 0; i < gcount; i++) {
2843 if (vd[i] != NULL &&
2844 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
2851 * If every disk has been moved to the new pool, or if we never
2852 * even attempted to look at them, then we split them off for
2855 if (!attempt_reopen || gcount == extracted) {
2856 for (i = 0; i < gcount; i++)
2859 vdev_reopen(spa->spa_root_vdev);
2862 kmem_free(vd, gcount * sizeof (vdev_t *));
2866 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
2868 char *ereport = FM_EREPORT_ZFS_POOL;
2871 spa->spa_load_state = state;
2872 (void) spa_import_progress_set_state(spa_guid(spa),
2873 spa_load_state(spa));
2875 gethrestime(&spa->spa_loaded_ts);
2876 error = spa_load_impl(spa, type, &ereport);
2879 * Don't count references from objsets that are already closed
2880 * and are making their way through the eviction process.
2882 spa_evicting_os_wait(spa);
2883 spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
2885 if (error != EEXIST) {
2886 spa->spa_loaded_ts.tv_sec = 0;
2887 spa->spa_loaded_ts.tv_nsec = 0;
2889 if (error != EBADF) {
2890 zfs_ereport_post(ereport, spa, NULL, NULL, NULL, 0, 0);
2893 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
2896 (void) spa_import_progress_set_state(spa_guid(spa),
2897 spa_load_state(spa));
2904 * Count the number of per-vdev ZAPs associated with all of the vdevs in the
2905 * vdev tree rooted in the given vd, and ensure that each ZAP is present in the
2906 * spa's per-vdev ZAP list.
2909 vdev_count_verify_zaps(vdev_t *vd)
2911 spa_t *spa = vd->vdev_spa;
2914 if (vd->vdev_top_zap != 0) {
2916 ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2917 spa->spa_all_vdev_zaps, vd->vdev_top_zap));
2919 if (vd->vdev_leaf_zap != 0) {
2921 ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2922 spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
2925 for (uint64_t i = 0; i < vd->vdev_children; i++) {
2926 total += vdev_count_verify_zaps(vd->vdev_child[i]);
2934 * Determine whether the activity check is required.
2937 spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
2941 uint64_t hostid = 0;
2942 uint64_t tryconfig_txg = 0;
2943 uint64_t tryconfig_timestamp = 0;
2944 uint16_t tryconfig_mmp_seq = 0;
2947 if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
2948 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
2949 (void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
2951 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
2952 &tryconfig_timestamp);
2953 (void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ,
2954 &tryconfig_mmp_seq);
2957 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
2960 * Disable the MMP activity check - This is used by zdb which
2961 * is intended to be used on potentially active pools.
2963 if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
2967 * Skip the activity check when the MMP feature is disabled.
2969 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
2973 * If the tryconfig_ values are nonzero, they are the results of an
2974 * earlier tryimport. If they all match the uberblock we just found,
2975 * then the pool has not changed and we return false so we do not test
2978 if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
2979 tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp &&
2980 tryconfig_mmp_seq && tryconfig_mmp_seq ==
2981 (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0))
2985 * Allow the activity check to be skipped when importing the pool
2986 * on the same host which last imported it. Since the hostid from
2987 * configuration may be stale use the one read from the label.
2989 if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
2990 hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
2992 if (hostid == spa_get_hostid(spa))
2996 * Skip the activity test when the pool was cleanly exported.
2998 if (state != POOL_STATE_ACTIVE)
3005 * Nanoseconds the activity check must watch for changes on-disk.
3008 spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
3010 uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
3011 uint64_t multihost_interval = MSEC2NSEC(
3012 MMP_INTERVAL_OK(zfs_multihost_interval));
3013 uint64_t import_delay = MAX(NANOSEC, import_intervals *
3014 multihost_interval);
3017 * Local tunables determine a minimum duration except for the case
3018 * where we know when the remote host will suspend the pool if MMP
3019 * writes do not land.
3021 * See Big Theory comment at the top of mmp.c for the reasoning behind
3022 * these cases and times.
3025 ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100);
3027 if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
3028 MMP_FAIL_INT(ub) > 0) {
3030 /* MMP on remote host will suspend pool after failed writes */
3031 import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) *
3032 MMP_IMPORT_SAFETY_FACTOR / 100;
3034 zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp "
3035 "mmp_fails=%llu ub_mmp mmp_interval=%llu "
3036 "import_intervals=%u", import_delay, MMP_FAIL_INT(ub),
3037 MMP_INTERVAL(ub), import_intervals);
3039 } else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
3040 MMP_FAIL_INT(ub) == 0) {
3042 /* MMP on remote host will never suspend pool */
3043 import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) +
3044 ub->ub_mmp_delay) * import_intervals);
3046 zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp "
3047 "mmp_interval=%llu ub_mmp_delay=%llu "
3048 "import_intervals=%u", import_delay, MMP_INTERVAL(ub),
3049 ub->ub_mmp_delay, import_intervals);
3051 } else if (MMP_VALID(ub)) {
3053 * zfs-0.7 compatibility case
3056 import_delay = MAX(import_delay, (multihost_interval +
3057 ub->ub_mmp_delay) * import_intervals);
3059 zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu "
3060 "import_intervals=%u leaves=%u", import_delay,
3061 ub->ub_mmp_delay, import_intervals,
3062 vdev_count_leaves(spa));
3064 /* Using local tunings is the only reasonable option */
3065 zfs_dbgmsg("pool last imported on non-MMP aware "
3066 "host using import_delay=%llu multihost_interval=%llu "
3067 "import_intervals=%u", import_delay, multihost_interval,
3071 return (import_delay);
3075 * Perform the import activity check. If the user canceled the import or
3076 * we detected activity then fail.
3079 spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config)
3081 uint64_t txg = ub->ub_txg;
3082 uint64_t timestamp = ub->ub_timestamp;
3083 uint64_t mmp_config = ub->ub_mmp_config;
3084 uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0;
3085 uint64_t import_delay;
3086 hrtime_t import_expire;
3087 nvlist_t *mmp_label = NULL;
3088 vdev_t *rvd = spa->spa_root_vdev;
3093 cv_init(&cv, NULL, CV_DEFAULT, NULL);
3094 mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
3098 * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
3099 * during the earlier tryimport. If the txg recorded there is 0 then
3100 * the pool is known to be active on another host.
3102 * Otherwise, the pool might be in use on another host. Check for
3103 * changes in the uberblocks on disk if necessary.
3105 if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
3106 nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
3107 ZPOOL_CONFIG_LOAD_INFO);
3109 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
3110 fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
3111 vdev_uberblock_load(rvd, ub, &mmp_label);
3112 error = SET_ERROR(EREMOTEIO);
3117 import_delay = spa_activity_check_duration(spa, ub);
3119 /* Add a small random factor in case of simultaneous imports (0-25%) */
3120 import_delay += import_delay * spa_get_random(250) / 1000;
3122 import_expire = gethrtime() + import_delay;
3124 while (gethrtime() < import_expire) {
3125 (void) spa_import_progress_set_mmp_check(spa_guid(spa),
3126 NSEC2SEC(import_expire - gethrtime()));
3128 vdev_uberblock_load(rvd, ub, &mmp_label);
3130 if (txg != ub->ub_txg || timestamp != ub->ub_timestamp ||
3131 mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) {
3132 zfs_dbgmsg("multihost activity detected "
3133 "txg %llu ub_txg %llu "
3134 "timestamp %llu ub_timestamp %llu "
3135 "mmp_config %#llx ub_mmp_config %#llx",
3136 txg, ub->ub_txg, timestamp, ub->ub_timestamp,
3137 mmp_config, ub->ub_mmp_config);
3139 error = SET_ERROR(EREMOTEIO);
3144 nvlist_free(mmp_label);
3148 error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
3150 error = SET_ERROR(EINTR);
3158 mutex_destroy(&mtx);
3162 * If the pool is determined to be active store the status in the
3163 * spa->spa_load_info nvlist. If the remote hostname or hostid are
3164 * available from configuration read from disk store them as well.
3165 * This allows 'zpool import' to generate a more useful message.
3167 * ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory)
3168 * ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
3169 * ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool
3171 if (error == EREMOTEIO) {
3172 char *hostname = "<unknown>";
3173 uint64_t hostid = 0;
3176 if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
3177 hostname = fnvlist_lookup_string(mmp_label,
3178 ZPOOL_CONFIG_HOSTNAME);
3179 fnvlist_add_string(spa->spa_load_info,
3180 ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
3183 if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
3184 hostid = fnvlist_lookup_uint64(mmp_label,
3185 ZPOOL_CONFIG_HOSTID);
3186 fnvlist_add_uint64(spa->spa_load_info,
3187 ZPOOL_CONFIG_MMP_HOSTID, hostid);
3191 fnvlist_add_uint64(spa->spa_load_info,
3192 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
3193 fnvlist_add_uint64(spa->spa_load_info,
3194 ZPOOL_CONFIG_MMP_TXG, 0);
3196 error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
3200 nvlist_free(mmp_label);
3206 spa_verify_host(spa_t *spa, nvlist_t *mos_config)
3210 uint64_t myhostid = 0;
3212 if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
3213 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
3214 hostname = fnvlist_lookup_string(mos_config,
3215 ZPOOL_CONFIG_HOSTNAME);
3217 myhostid = zone_get_hostid(NULL);
3219 if (hostid != 0 && myhostid != 0 && hostid != myhostid) {
3220 cmn_err(CE_WARN, "pool '%s' could not be "
3221 "loaded as it was last accessed by "
3222 "another system (host: %s hostid: 0x%llx). "
3223 "See: http://illumos.org/msg/ZFS-8000-EY",
3224 spa_name(spa), hostname, (u_longlong_t)hostid);
3225 spa_load_failed(spa, "hostid verification failed: pool "
3226 "last accessed by host: %s (hostid: 0x%llx)",
3227 hostname, (u_longlong_t)hostid);
3228 return (SET_ERROR(EBADF));
3236 spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
3239 nvlist_t *nvtree, *nvl, *config = spa->spa_config;
3246 * Versioning wasn't explicitly added to the label until later, so if
3247 * it's not present treat it as the initial version.
3249 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
3250 &spa->spa_ubsync.ub_version) != 0)
3251 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
3253 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
3254 spa_load_failed(spa, "invalid config provided: '%s' missing",
3255 ZPOOL_CONFIG_POOL_GUID);
3256 return (SET_ERROR(EINVAL));
3260 * If we are doing an import, ensure that the pool is not already
3261 * imported by checking if its pool guid already exists in the
3264 * The only case that we allow an already imported pool to be
3265 * imported again, is when the pool is checkpointed and we want to
3266 * look at its checkpointed state from userland tools like zdb.
3269 if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
3270 spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
3271 spa_guid_exists(pool_guid, 0)) {
3273 if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
3274 spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
3275 spa_guid_exists(pool_guid, 0) &&
3276 !spa_importing_readonly_checkpoint(spa)) {
3278 spa_load_failed(spa, "a pool with guid %llu is already open",
3279 (u_longlong_t)pool_guid);
3280 return (SET_ERROR(EEXIST));
3283 spa->spa_config_guid = pool_guid;
3285 nvlist_free(spa->spa_load_info);
3286 spa->spa_load_info = fnvlist_alloc();
3288 ASSERT(spa->spa_comment == NULL);
3289 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
3290 spa->spa_comment = spa_strdup(comment);
3292 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
3293 &spa->spa_config_txg);
3295 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0)
3296 spa->spa_config_splitting = fnvlist_dup(nvl);
3298 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
3299 spa_load_failed(spa, "invalid config provided: '%s' missing",
3300 ZPOOL_CONFIG_VDEV_TREE);
3301 return (SET_ERROR(EINVAL));
3305 * Create "The Godfather" zio to hold all async IOs
3307 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
3309 for (int i = 0; i < max_ncpus; i++) {
3310 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
3311 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3312 ZIO_FLAG_GODFATHER);
3316 * Parse the configuration into a vdev tree. We explicitly set the
3317 * value that will be returned by spa_version() since parsing the
3318 * configuration requires knowing the version number.
3320 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3321 parse = (type == SPA_IMPORT_EXISTING ?
3322 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
3323 error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
3324 spa_config_exit(spa, SCL_ALL, FTAG);
3327 spa_load_failed(spa, "unable to parse config [error=%d]",
3332 ASSERT(spa->spa_root_vdev == rvd);
3333 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
3334 ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
3336 if (type != SPA_IMPORT_ASSEMBLE) {
3337 ASSERT(spa_guid(spa) == pool_guid);
3344 * Recursively open all vdevs in the vdev tree. This function is called twice:
3345 * first with the untrusted config, then with the trusted config.
3348 spa_ld_open_vdevs(spa_t *spa)
3353 * spa_missing_tvds_allowed defines how many top-level vdevs can be
3354 * missing/unopenable for the root vdev to be still considered openable.
3356 if (spa->spa_trust_config) {
3357 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds;
3358 } else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) {
3359 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile;
3360 } else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) {
3361 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan;
3363 spa->spa_missing_tvds_allowed = 0;
3366 spa->spa_missing_tvds_allowed =
3367 MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed);
3369 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3370 error = vdev_open(spa->spa_root_vdev);
3371 spa_config_exit(spa, SCL_ALL, FTAG);
3373 if (spa->spa_missing_tvds != 0) {
3374 spa_load_note(spa, "vdev tree has %lld missing top-level "
3375 "vdevs.", (u_longlong_t)spa->spa_missing_tvds);
3376 if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) {
3378 * Although theoretically we could allow users to open
3379 * incomplete pools in RW mode, we'd need to add a lot
3380 * of extra logic (e.g. adjust pool space to account
3381 * for missing vdevs).
3382 * This limitation also prevents users from accidentally
3383 * opening the pool in RW mode during data recovery and
3384 * damaging it further.
3386 spa_load_note(spa, "pools with missing top-level "
3387 "vdevs can only be opened in read-only mode.");
3388 error = SET_ERROR(ENXIO);
3390 spa_load_note(spa, "current settings allow for maximum "
3391 "%lld missing top-level vdevs at this stage.",
3392 (u_longlong_t)spa->spa_missing_tvds_allowed);
3396 spa_load_failed(spa, "unable to open vdev tree [error=%d]",
3399 if (spa->spa_missing_tvds != 0 || error != 0)
3400 vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2);
3406 * We need to validate the vdev labels against the configuration that
3407 * we have in hand. This function is called twice: first with an untrusted
3408 * config, then with a trusted config. The validation is more strict when the
3409 * config is trusted.
3412 spa_ld_validate_vdevs(spa_t *spa)
3415 vdev_t *rvd = spa->spa_root_vdev;
3417 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3418 error = vdev_validate(rvd);
3419 spa_config_exit(spa, SCL_ALL, FTAG);
3422 spa_load_failed(spa, "vdev_validate failed [error=%d]", error);
3426 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
3427 spa_load_failed(spa, "cannot open vdev tree after invalidating "
3429 vdev_dbgmsg_print_tree(rvd, 2);
3430 return (SET_ERROR(ENXIO));
3437 spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub)
3439 spa->spa_state = POOL_STATE_ACTIVE;
3440 spa->spa_ubsync = spa->spa_uberblock;
3441 spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
3442 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
3443 spa->spa_first_txg = spa->spa_last_ubsync_txg ?
3444 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
3445 spa->spa_claim_max_txg = spa->spa_first_txg;
3446 spa->spa_prev_software_version = ub->ub_software_version;
3450 spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
3452 vdev_t *rvd = spa->spa_root_vdev;
3454 uberblock_t *ub = &spa->spa_uberblock;
3455 boolean_t activity_check = B_FALSE;
3458 * If we are opening the checkpointed state of the pool by
3459 * rewinding to it, at this point we will have written the
3460 * checkpointed uberblock to the vdev labels, so searching
3461 * the labels will find the right uberblock. However, if
3462 * we are opening the checkpointed state read-only, we have
3463 * not modified the labels. Therefore, we must ignore the
3464 * labels and continue using the spa_uberblock that was set
3465 * by spa_ld_checkpoint_rewind.
3467 * Note that it would be fine to ignore the labels when
3468 * rewinding (opening writeable) as well. However, if we
3469 * crash just after writing the labels, we will end up
3470 * searching the labels. Doing so in the common case means
3471 * that this code path gets exercised normally, rather than
3472 * just in the edge case.
3474 if (ub->ub_checkpoint_txg != 0 &&
3475 spa_importing_readonly_checkpoint(spa)) {
3476 spa_ld_select_uberblock_done(spa, ub);
3481 * Find the best uberblock.
3483 vdev_uberblock_load(rvd, ub, &label);
3486 * If we weren't able to find a single valid uberblock, return failure.
3488 if (ub->ub_txg == 0) {
3490 spa_load_failed(spa, "no valid uberblock found");
3491 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
3494 if (spa->spa_load_max_txg != UINT64_MAX) {
3495 (void) spa_import_progress_set_max_txg(spa_guid(spa),
3496 (u_longlong_t)spa->spa_load_max_txg);
3498 spa_load_note(spa, "using uberblock with txg=%llu",
3499 (u_longlong_t)ub->ub_txg);
3503 * For pools which have the multihost property on determine if the
3504 * pool is truly inactive and can be safely imported. Prevent
3505 * hosts which don't have a hostid set from importing the pool.
3507 activity_check = spa_activity_check_required(spa, ub, label,
3509 if (activity_check) {
3510 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
3511 spa_get_hostid(spa) == 0) {
3513 fnvlist_add_uint64(spa->spa_load_info,
3514 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
3515 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
3518 int error = spa_activity_check(spa, ub, spa->spa_config);
3524 fnvlist_add_uint64(spa->spa_load_info,
3525 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
3526 fnvlist_add_uint64(spa->spa_load_info,
3527 ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
3528 fnvlist_add_uint16(spa->spa_load_info,
3529 ZPOOL_CONFIG_MMP_SEQ,
3530 (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0));
3534 * If the pool has an unsupported version we can't open it.
3536 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
3538 spa_load_failed(spa, "version %llu is not supported",
3539 (u_longlong_t)ub->ub_version);
3540 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
3543 if (ub->ub_version >= SPA_VERSION_FEATURES) {
3547 * If we weren't able to find what's necessary for reading the
3548 * MOS in the label, return failure.
3550 if (label == NULL) {
3551 spa_load_failed(spa, "label config unavailable");
3552 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
3556 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
3559 spa_load_failed(spa, "invalid label: '%s' missing",
3560 ZPOOL_CONFIG_FEATURES_FOR_READ);
3561 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
3566 * Update our in-core representation with the definitive values
3569 nvlist_free(spa->spa_label_features);
3570 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
3576 * Look through entries in the label nvlist's features_for_read. If
3577 * there is a feature listed there which we don't understand then we
3578 * cannot open a pool.
3580 if (ub->ub_version >= SPA_VERSION_FEATURES) {
3581 nvlist_t *unsup_feat;
3583 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
3586 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
3588 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
3589 if (!zfeature_is_supported(nvpair_name(nvp))) {
3590 VERIFY(nvlist_add_string(unsup_feat,
3591 nvpair_name(nvp), "") == 0);
3595 if (!nvlist_empty(unsup_feat)) {
3596 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
3597 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
3598 nvlist_free(unsup_feat);
3599 spa_load_failed(spa, "some features are unsupported");
3600 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
3604 nvlist_free(unsup_feat);
3607 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
3608 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3609 spa_try_repair(spa, spa->spa_config);
3610 spa_config_exit(spa, SCL_ALL, FTAG);
3611 nvlist_free(spa->spa_config_splitting);
3612 spa->spa_config_splitting = NULL;
3616 * Initialize internal SPA structures.
3618 spa_ld_select_uberblock_done(spa, ub);
3624 spa_ld_open_rootbp(spa_t *spa)
3627 vdev_t *rvd = spa->spa_root_vdev;
3629 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
3631 spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
3632 "[error=%d]", error);
3633 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3635 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
3641 spa_ld_trusted_config(spa_t *spa, spa_import_type_t type,
3642 boolean_t reloading)
3644 vdev_t *mrvd, *rvd = spa->spa_root_vdev;
3645 nvlist_t *nv, *mos_config, *policy;
3646 int error = 0, copy_error;
3647 uint64_t healthy_tvds, healthy_tvds_mos;
3648 uint64_t mos_config_txg;
3650 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
3652 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3655 * If we're assembling a pool from a split, the config provided is
3656 * already trusted so there is nothing to do.
3658 if (type == SPA_IMPORT_ASSEMBLE)
3661 healthy_tvds = spa_healthy_core_tvds(spa);
3663 if (load_nvlist(spa, spa->spa_config_object, &mos_config)
3665 spa_load_failed(spa, "unable to retrieve MOS config");
3666 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3670 * If we are doing an open, pool owner wasn't verified yet, thus do
3671 * the verification here.
3673 if (spa->spa_load_state == SPA_LOAD_OPEN) {
3674 error = spa_verify_host(spa, mos_config);
3676 nvlist_free(mos_config);
3681 nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE);
3683 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3686 * Build a new vdev tree from the trusted config
3688 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
3691 * Vdev paths in the MOS may be obsolete. If the untrusted config was
3692 * obtained by scanning /dev/dsk, then it will have the right vdev
3693 * paths. We update the trusted MOS config with this information.
3694 * We first try to copy the paths with vdev_copy_path_strict, which
3695 * succeeds only when both configs have exactly the same vdev tree.
3696 * If that fails, we fall back to a more flexible method that has a
3697 * best effort policy.
3699 copy_error = vdev_copy_path_strict(rvd, mrvd);
3700 if (copy_error != 0 || spa_load_print_vdev_tree) {
3701 spa_load_note(spa, "provided vdev tree:");
3702 vdev_dbgmsg_print_tree(rvd, 2);
3703 spa_load_note(spa, "MOS vdev tree:");
3704 vdev_dbgmsg_print_tree(mrvd, 2);
3706 if (copy_error != 0) {
3707 spa_load_note(spa, "vdev_copy_path_strict failed, falling "
3708 "back to vdev_copy_path_relaxed");
3709 vdev_copy_path_relaxed(rvd, mrvd);
3714 spa->spa_root_vdev = mrvd;
3716 spa_config_exit(spa, SCL_ALL, FTAG);
3719 * We will use spa_config if we decide to reload the spa or if spa_load
3720 * fails and we rewind. We must thus regenerate the config using the
3721 * MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to
3722 * pass settings on how to load the pool and is not stored in the MOS.
3723 * We copy it over to our new, trusted config.
3725 mos_config_txg = fnvlist_lookup_uint64(mos_config,
3726 ZPOOL_CONFIG_POOL_TXG);
3727 nvlist_free(mos_config);
3728 mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
3729 if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY,
3731 fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy);
3732 spa_config_set(spa, mos_config);
3733 spa->spa_config_source = SPA_CONFIG_SRC_MOS;
3736 * Now that we got the config from the MOS, we should be more strict
3737 * in checking blkptrs and can make assumptions about the consistency
3738 * of the vdev tree. spa_trust_config must be set to true before opening
3739 * vdevs in order for them to be writeable.
3741 spa->spa_trust_config = B_TRUE;
3744 * Open and validate the new vdev tree
3746 error = spa_ld_open_vdevs(spa);
3750 error = spa_ld_validate_vdevs(spa);
3754 if (copy_error != 0 || spa_load_print_vdev_tree) {
3755 spa_load_note(spa, "final vdev tree:");
3756 vdev_dbgmsg_print_tree(rvd, 2);
3759 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
3760 !spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) {
3762 * Sanity check to make sure that we are indeed loading the
3763 * latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds
3764 * in the config provided and they happened to be the only ones
3765 * to have the latest uberblock, we could involuntarily perform
3766 * an extreme rewind.
3768 healthy_tvds_mos = spa_healthy_core_tvds(spa);
3769 if (healthy_tvds_mos - healthy_tvds >=
3770 SPA_SYNC_MIN_VDEVS) {
3771 spa_load_note(spa, "config provided misses too many "
3772 "top-level vdevs compared to MOS (%lld vs %lld). ",
3773 (u_longlong_t)healthy_tvds,
3774 (u_longlong_t)healthy_tvds_mos);
3775 spa_load_note(spa, "vdev tree:");
3776 vdev_dbgmsg_print_tree(rvd, 2);
3778 spa_load_failed(spa, "config was already "
3779 "provided from MOS. Aborting.");
3780 return (spa_vdev_err(rvd,
3781 VDEV_AUX_CORRUPT_DATA, EIO));
3783 spa_load_note(spa, "spa must be reloaded using MOS "
3785 return (SET_ERROR(EAGAIN));
3789 error = spa_check_for_missing_logs(spa);
3791 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
3793 if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) {
3794 spa_load_failed(spa, "uberblock guid sum doesn't match MOS "
3795 "guid sum (%llu != %llu)",
3796 (u_longlong_t)spa->spa_uberblock.ub_guid_sum,
3797 (u_longlong_t)rvd->vdev_guid_sum);
3798 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
3806 spa_ld_open_indirect_vdev_metadata(spa_t *spa)
3809 vdev_t *rvd = spa->spa_root_vdev;
3812 * Everything that we read before spa_remove_init() must be stored
3813 * on concreted vdevs. Therefore we do this as early as possible.
3815 error = spa_remove_init(spa);
3817 spa_load_failed(spa, "spa_remove_init failed [error=%d]",
3819 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3823 * Retrieve information needed to condense indirect vdev mappings.
3825 error = spa_condense_init(spa);
3827 spa_load_failed(spa, "spa_condense_init failed [error=%d]",
3829 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
3836 spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
3839 vdev_t *rvd = spa->spa_root_vdev;
3841 if (spa_version(spa) >= SPA_VERSION_FEATURES) {
3842 boolean_t missing_feat_read = B_FALSE;
3843 nvlist_t *unsup_feat, *enabled_feat;
3845 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
3846 &spa->spa_feat_for_read_obj, B_TRUE) != 0) {
3847 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3850 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
3851 &spa->spa_feat_for_write_obj, B_TRUE) != 0) {
3852 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3855 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
3856 &spa->spa_feat_desc_obj, B_TRUE) != 0) {
3857 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3860 enabled_feat = fnvlist_alloc();
3861 unsup_feat = fnvlist_alloc();
3863 if (!spa_features_check(spa, B_FALSE,
3864 unsup_feat, enabled_feat))
3865 missing_feat_read = B_TRUE;
3867 if (spa_writeable(spa) ||
3868 spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
3869 if (!spa_features_check(spa, B_TRUE,
3870 unsup_feat, enabled_feat)) {
3871 *missing_feat_writep = B_TRUE;
3875 fnvlist_add_nvlist(spa->spa_load_info,
3876 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
3878 if (!nvlist_empty(unsup_feat)) {
3879 fnvlist_add_nvlist(spa->spa_load_info,
3880 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
3883 fnvlist_free(enabled_feat);
3884 fnvlist_free(unsup_feat);
3886 if (!missing_feat_read) {
3887 fnvlist_add_boolean(spa->spa_load_info,
3888 ZPOOL_CONFIG_CAN_RDONLY);
3892 * If the state is SPA_LOAD_TRYIMPORT, our objective is
3893 * twofold: to determine whether the pool is available for
3894 * import in read-write mode and (if it is not) whether the
3895 * pool is available for import in read-only mode. If the pool
3896 * is available for import in read-write mode, it is displayed
3897 * as available in userland; if it is not available for import
3898 * in read-only mode, it is displayed as unavailable in
3899 * userland. If the pool is available for import in read-only
3900 * mode but not read-write mode, it is displayed as unavailable
3901 * in userland with a special note that the pool is actually
3902 * available for open in read-only mode.
3904 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
3905 * missing a feature for write, we must first determine whether
3906 * the pool can be opened read-only before returning to
3907 * userland in order to know whether to display the
3908 * abovementioned note.
3910 if (missing_feat_read || (*missing_feat_writep &&
3911 spa_writeable(spa))) {
3912 spa_load_failed(spa, "pool uses unsupported features");
3913 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
3918 * Load refcounts for ZFS features from disk into an in-memory
3919 * cache during SPA initialization.
3921 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
3924 error = feature_get_refcount_from_disk(spa,
3925 &spa_feature_table[i], &refcount);
3927 spa->spa_feat_refcount_cache[i] = refcount;
3928 } else if (error == ENOTSUP) {
3929 spa->spa_feat_refcount_cache[i] =
3930 SPA_FEATURE_DISABLED;
3932 spa_load_failed(spa, "error getting refcount "
3933 "for feature %s [error=%d]",
3934 spa_feature_table[i].fi_guid, error);
3935 return (spa_vdev_err(rvd,
3936 VDEV_AUX_CORRUPT_DATA, EIO));
3941 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
3942 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
3943 &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
3944 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3948 * Encryption was added before bookmark_v2, even though bookmark_v2
3949 * is now a dependency. If this pool has encryption enabled without
3950 * bookmark_v2, trigger an errata message.
3952 if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) &&
3953 !spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
3954 spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
3961 spa_ld_load_special_directories(spa_t *spa)
3964 vdev_t *rvd = spa->spa_root_vdev;
3966 spa->spa_is_initializing = B_TRUE;
3967 error = dsl_pool_open(spa->spa_dsl_pool);
3968 spa->spa_is_initializing = B_FALSE;
3970 spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
3971 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3978 spa_ld_get_props(spa_t *spa)
3982 vdev_t *rvd = spa->spa_root_vdev;
3984 /* Grab the checksum salt from the MOS. */
3985 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3986 DMU_POOL_CHECKSUM_SALT, 1,
3987 sizeof (spa->spa_cksum_salt.zcs_bytes),
3988 spa->spa_cksum_salt.zcs_bytes);
3989 if (error == ENOENT) {
3990 /* Generate a new salt for subsequent use */
3991 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
3992 sizeof (spa->spa_cksum_salt.zcs_bytes));
3993 } else if (error != 0) {
3994 spa_load_failed(spa, "unable to retrieve checksum salt from "
3995 "MOS [error=%d]", error);
3996 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3999 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
4000 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4001 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
4003 spa_load_failed(spa, "error opening deferred-frees bpobj "
4004 "[error=%d]", error);
4005 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4009 * Load the bit that tells us to use the new accounting function
4010 * (raid-z deflation). If we have an older pool, this will not
4013 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
4014 if (error != 0 && error != ENOENT)
4015 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4017 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
4018 &spa->spa_creation_version, B_FALSE);
4019 if (error != 0 && error != ENOENT)
4020 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4023 * Load the persistent error log. If we have an older pool, this will
4026 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
4028 if (error != 0 && error != ENOENT)
4029 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4031 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
4032 &spa->spa_errlog_scrub, B_FALSE);
4033 if (error != 0 && error != ENOENT)
4034 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4037 * Load the livelist deletion field. If a livelist is queued for
4038 * deletion, indicate that in the spa
4040 error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES,
4041 &spa->spa_livelists_to_delete, B_FALSE);
4042 if (error != 0 && error != ENOENT)
4043 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4046 * Load the history object. If we have an older pool, this
4047 * will not be present.
4049 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
4050 if (error != 0 && error != ENOENT)
4051 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4054 * Load the per-vdev ZAP map. If we have an older pool, this will not
4055 * be present; in this case, defer its creation to a later time to
4056 * avoid dirtying the MOS this early / out of sync context. See
4057 * spa_sync_config_object.
4060 /* The sentinel is only available in the MOS config. */
4061 nvlist_t *mos_config;
4062 if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
4063 spa_load_failed(spa, "unable to retrieve MOS config");
4064 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4067 error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
4068 &spa->spa_all_vdev_zaps, B_FALSE);
4070 if (error == ENOENT) {
4071 VERIFY(!nvlist_exists(mos_config,
4072 ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
4073 spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
4074 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
4075 } else if (error != 0) {
4076 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4077 } else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
4079 * An older version of ZFS overwrote the sentinel value, so
4080 * we have orphaned per-vdev ZAPs in the MOS. Defer their
4081 * destruction to later; see spa_sync_config_object.
4083 spa->spa_avz_action = AVZ_ACTION_DESTROY;
4085 * We're assuming that no vdevs have had their ZAPs created
4086 * before this. Better be sure of it.
4088 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
4090 nvlist_free(mos_config);
4092 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
4094 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
4096 if (error && error != ENOENT)
4097 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4100 uint64_t autoreplace;
4102 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
4103 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
4104 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
4105 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
4106 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
4107 spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
4108 spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim);
4109 spa->spa_autoreplace = (autoreplace != 0);
4113 * If we are importing a pool with missing top-level vdevs,
4114 * we enforce that the pool doesn't panic or get suspended on
4115 * error since the likelihood of missing data is extremely high.
4117 if (spa->spa_missing_tvds > 0 &&
4118 spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE &&
4119 spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
4120 spa_load_note(spa, "forcing failmode to 'continue' "
4121 "as some top level vdevs are missing");
4122 spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE;
4129 spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
4132 vdev_t *rvd = spa->spa_root_vdev;
4135 * If we're assembling the pool from the split-off vdevs of
4136 * an existing pool, we don't want to attach the spares & cache
4141 * Load any hot spares for this pool.
4143 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
4145 if (error != 0 && error != ENOENT)
4146 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4147 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
4148 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
4149 if (load_nvlist(spa, spa->spa_spares.sav_object,
4150 &spa->spa_spares.sav_config) != 0) {
4151 spa_load_failed(spa, "error loading spares nvlist");
4152 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4155 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4156 spa_load_spares(spa);
4157 spa_config_exit(spa, SCL_ALL, FTAG);
4158 } else if (error == 0) {
4159 spa->spa_spares.sav_sync = B_TRUE;
4163 * Load any level 2 ARC devices for this pool.
4165 error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
4166 &spa->spa_l2cache.sav_object, B_FALSE);
4167 if (error != 0 && error != ENOENT)
4168 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4169 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
4170 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
4171 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
4172 &spa->spa_l2cache.sav_config) != 0) {
4173 spa_load_failed(spa, "error loading l2cache nvlist");
4174 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4177 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4178 spa_load_l2cache(spa);
4179 spa_config_exit(spa, SCL_ALL, FTAG);
4180 } else if (error == 0) {
4181 spa->spa_l2cache.sav_sync = B_TRUE;
4188 spa_ld_load_vdev_metadata(spa_t *spa)
4191 vdev_t *rvd = spa->spa_root_vdev;
4194 * If the 'multihost' property is set, then never allow a pool to
4195 * be imported when the system hostid is zero. The exception to
4196 * this rule is zdb which is always allowed to access pools.
4198 if (spa_multihost(spa) && spa_get_hostid(spa) == 0 &&
4199 (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
4200 fnvlist_add_uint64(spa->spa_load_info,
4201 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
4202 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
4206 * If the 'autoreplace' property is set, then post a resource notifying
4207 * the ZFS DE that it should not issue any faults for unopenable
4208 * devices. We also iterate over the vdevs, and post a sysevent for any
4209 * unopenable vdevs so that the normal autoreplace handler can take
4212 if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
4213 spa_check_removed(spa->spa_root_vdev);
4215 * For the import case, this is done in spa_import(), because
4216 * at this point we're using the spare definitions from
4217 * the MOS config, not necessarily from the userland config.
4219 if (spa->spa_load_state != SPA_LOAD_IMPORT) {
4220 spa_aux_check_removed(&spa->spa_spares);
4221 spa_aux_check_removed(&spa->spa_l2cache);
4226 * Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc.
4228 error = vdev_load(rvd);
4230 spa_load_failed(spa, "vdev_load failed [error=%d]", error);
4231 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
4234 error = spa_ld_log_spacemaps(spa);
4236 spa_load_failed(spa, "spa_ld_log_sm_data failed [error=%d]",
4238 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
4242 * Propagate the leaf DTLs we just loaded all the way up the vdev tree.
4244 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4245 vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE);
4246 spa_config_exit(spa, SCL_ALL, FTAG);
4252 spa_ld_load_dedup_tables(spa_t *spa)
4255 vdev_t *rvd = spa->spa_root_vdev;
4257 error = ddt_load(spa);
4259 spa_load_failed(spa, "ddt_load failed [error=%d]", error);
4260 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4267 spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, char **ereport)
4269 vdev_t *rvd = spa->spa_root_vdev;
4271 if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
4272 boolean_t missing = spa_check_logs(spa);
4274 if (spa->spa_missing_tvds != 0) {
4275 spa_load_note(spa, "spa_check_logs failed "
4276 "so dropping the logs");
4278 *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
4279 spa_load_failed(spa, "spa_check_logs failed");
4280 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG,
4290 spa_ld_verify_pool_data(spa_t *spa)
4293 vdev_t *rvd = spa->spa_root_vdev;
4296 * We've successfully opened the pool, verify that we're ready
4297 * to start pushing transactions.
4299 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
4300 error = spa_load_verify(spa);
4302 spa_load_failed(spa, "spa_load_verify failed "
4303 "[error=%d]", error);
4304 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
4313 spa_ld_claim_log_blocks(spa_t *spa)
4316 dsl_pool_t *dp = spa_get_dsl(spa);
4319 * Claim log blocks that haven't been committed yet.
4320 * This must all happen in a single txg.
4321 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
4322 * invoked from zil_claim_log_block()'s i/o done callback.
4323 * Price of rollback is that we abandon the log.
4325 spa->spa_claiming = B_TRUE;
4327 tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
4328 (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
4329 zil_claim, tx, DS_FIND_CHILDREN);
4332 spa->spa_claiming = B_FALSE;
4334 spa_set_log_state(spa, SPA_LOG_GOOD);
4338 spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
4339 boolean_t update_config_cache)
4341 vdev_t *rvd = spa->spa_root_vdev;
4342 int need_update = B_FALSE;
4345 * If the config cache is stale, or we have uninitialized
4346 * metaslabs (see spa_vdev_add()), then update the config.
4348 * If this is a verbatim import, trust the current
4349 * in-core spa_config and update the disk labels.
4351 if (update_config_cache || config_cache_txg != spa->spa_config_txg ||
4352 spa->spa_load_state == SPA_LOAD_IMPORT ||
4353 spa->spa_load_state == SPA_LOAD_RECOVER ||
4354 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
4355 need_update = B_TRUE;
4357 for (int c = 0; c < rvd->vdev_children; c++)
4358 if (rvd->vdev_child[c]->vdev_ms_array == 0)
4359 need_update = B_TRUE;
4362 * Update the config cache asynchronously in case we're the
4363 * root pool, in which case the config cache isn't writable yet.
4366 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
4370 spa_ld_prepare_for_reload(spa_t *spa)
4372 spa_mode_t mode = spa->spa_mode;
4373 int async_suspended = spa->spa_async_suspended;
4376 spa_deactivate(spa);
4377 spa_activate(spa, mode);
4380 * We save the value of spa_async_suspended as it gets reset to 0 by
4381 * spa_unload(). We want to restore it back to the original value before
4382 * returning as we might be calling spa_async_resume() later.
4384 spa->spa_async_suspended = async_suspended;
4388 spa_ld_read_checkpoint_txg(spa_t *spa)
4390 uberblock_t checkpoint;
4393 ASSERT0(spa->spa_checkpoint_txg);
4394 ASSERT(MUTEX_HELD(&spa_namespace_lock));
4396 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
4397 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
4398 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
4400 if (error == ENOENT)
4406 ASSERT3U(checkpoint.ub_txg, !=, 0);
4407 ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0);
4408 ASSERT3U(checkpoint.ub_timestamp, !=, 0);
4409 spa->spa_checkpoint_txg = checkpoint.ub_txg;
4410 spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
4416 spa_ld_mos_init(spa_t *spa, spa_import_type_t type)
4420 ASSERT(MUTEX_HELD(&spa_namespace_lock));
4421 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
4424 * Never trust the config that is provided unless we are assembling
4425 * a pool following a split.
4426 * This means don't trust blkptrs and the vdev tree in general. This
4427 * also effectively puts the spa in read-only mode since
4428 * spa_writeable() checks for spa_trust_config to be true.
4429 * We will later load a trusted config from the MOS.
4431 if (type != SPA_IMPORT_ASSEMBLE)
4432 spa->spa_trust_config = B_FALSE;
4435 * Parse the config provided to create a vdev tree.
4437 error = spa_ld_parse_config(spa, type);
4441 spa_import_progress_add(spa);
4444 * Now that we have the vdev tree, try to open each vdev. This involves
4445 * opening the underlying physical device, retrieving its geometry and
4446 * probing the vdev with a dummy I/O. The state of each vdev will be set
4447 * based on the success of those operations. After this we'll be ready
4448 * to read from the vdevs.
4450 error = spa_ld_open_vdevs(spa);
4455 * Read the label of each vdev and make sure that the GUIDs stored
4456 * there match the GUIDs in the config provided.
4457 * If we're assembling a new pool that's been split off from an
4458 * existing pool, the labels haven't yet been updated so we skip
4459 * validation for now.
4461 if (type != SPA_IMPORT_ASSEMBLE) {
4462 error = spa_ld_validate_vdevs(spa);
4468 * Read all vdev labels to find the best uberblock (i.e. latest,
4469 * unless spa_load_max_txg is set) and store it in spa_uberblock. We
4470 * get the list of features required to read blkptrs in the MOS from
4471 * the vdev label with the best uberblock and verify that our version
4472 * of zfs supports them all.
4474 error = spa_ld_select_uberblock(spa, type);
4479 * Pass that uberblock to the dsl_pool layer which will open the root
4480 * blkptr. This blkptr points to the latest version of the MOS and will
4481 * allow us to read its contents.
4483 error = spa_ld_open_rootbp(spa);
4491 spa_ld_checkpoint_rewind(spa_t *spa)
4493 uberblock_t checkpoint;
4496 ASSERT(MUTEX_HELD(&spa_namespace_lock));
4497 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
4499 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
4500 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
4501 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
4504 spa_load_failed(spa, "unable to retrieve checkpointed "
4505 "uberblock from the MOS config [error=%d]", error);
4507 if (error == ENOENT)
4508 error = ZFS_ERR_NO_CHECKPOINT;
4513 ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg);
4514 ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg);
4517 * We need to update the txg and timestamp of the checkpointed
4518 * uberblock to be higher than the latest one. This ensures that
4519 * the checkpointed uberblock is selected if we were to close and
4520 * reopen the pool right after we've written it in the vdev labels.
4521 * (also see block comment in vdev_uberblock_compare)
4523 checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1;
4524 checkpoint.ub_timestamp = gethrestime_sec();
4527 * Set current uberblock to be the checkpointed uberblock.
4529 spa->spa_uberblock = checkpoint;
4532 * If we are doing a normal rewind, then the pool is open for
4533 * writing and we sync the "updated" checkpointed uberblock to
4534 * disk. Once this is done, we've basically rewound the whole
4535 * pool and there is no way back.
4537 * There are cases when we don't want to attempt and sync the
4538 * checkpointed uberblock to disk because we are opening a
4539 * pool as read-only. Specifically, verifying the checkpointed
4540 * state with zdb, and importing the checkpointed state to get
4541 * a "preview" of its content.
4543 if (spa_writeable(spa)) {
4544 vdev_t *rvd = spa->spa_root_vdev;
4546 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4547 vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
4549 int children = rvd->vdev_children;
4550 int c0 = spa_get_random(children);
4552 for (int c = 0; c < children; c++) {
4553 vdev_t *vd = rvd->vdev_child[(c0 + c) % children];
4555 /* Stop when revisiting the first vdev */
4556 if (c > 0 && svd[0] == vd)
4559 if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
4560 !vdev_is_concrete(vd))
4563 svd[svdcount++] = vd;
4564 if (svdcount == SPA_SYNC_MIN_VDEVS)
4567 error = vdev_config_sync(svd, svdcount, spa->spa_first_txg);
4569 spa->spa_last_synced_guid = rvd->vdev_guid;
4570 spa_config_exit(spa, SCL_ALL, FTAG);
4573 spa_load_failed(spa, "failed to write checkpointed "
4574 "uberblock to the vdev labels [error=%d]", error);
4583 spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type,
4584 boolean_t *update_config_cache)
4589 * Parse the config for pool, open and validate vdevs,
4590 * select an uberblock, and use that uberblock to open
4593 error = spa_ld_mos_init(spa, type);
4598 * Retrieve the trusted config stored in the MOS and use it to create
4599 * a new, exact version of the vdev tree, then reopen all vdevs.
4601 error = spa_ld_trusted_config(spa, type, B_FALSE);
4602 if (error == EAGAIN) {
4603 if (update_config_cache != NULL)
4604 *update_config_cache = B_TRUE;
4607 * Redo the loading process with the trusted config if it is
4608 * too different from the untrusted config.
4610 spa_ld_prepare_for_reload(spa);
4611 spa_load_note(spa, "RELOADING");
4612 error = spa_ld_mos_init(spa, type);
4616 error = spa_ld_trusted_config(spa, type, B_TRUE);
4620 } else if (error != 0) {
4628 * Load an existing storage pool, using the config provided. This config
4629 * describes which vdevs are part of the pool and is later validated against
4630 * partial configs present in each vdev's label and an entire copy of the
4631 * config stored in the MOS.
4634 spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport)
4637 boolean_t missing_feat_write = B_FALSE;
4638 boolean_t checkpoint_rewind =
4639 (spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
4640 boolean_t update_config_cache = B_FALSE;
4642 ASSERT(MUTEX_HELD(&spa_namespace_lock));
4643 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
4645 spa_load_note(spa, "LOADING");
4647 error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache);
4652 * If we are rewinding to the checkpoint then we need to repeat
4653 * everything we've done so far in this function but this time
4654 * selecting the checkpointed uberblock and using that to open
4657 if (checkpoint_rewind) {
4659 * If we are rewinding to the checkpoint update config cache
4662 update_config_cache = B_TRUE;
4665 * Extract the checkpointed uberblock from the current MOS
4666 * and use this as the pool's uberblock from now on. If the
4667 * pool is imported as writeable we also write the checkpoint
4668 * uberblock to the labels, making the rewind permanent.
4670 error = spa_ld_checkpoint_rewind(spa);
4675 * Redo the loading process again with the
4676 * checkpointed uberblock.
4678 spa_ld_prepare_for_reload(spa);
4679 spa_load_note(spa, "LOADING checkpointed uberblock");
4680 error = spa_ld_mos_with_trusted_config(spa, type, NULL);
4686 * Retrieve the checkpoint txg if the pool has a checkpoint.
4688 error = spa_ld_read_checkpoint_txg(spa);
4693 * Retrieve the mapping of indirect vdevs. Those vdevs were removed
4694 * from the pool and their contents were re-mapped to other vdevs. Note
4695 * that everything that we read before this step must have been
4696 * rewritten on concrete vdevs after the last device removal was
4697 * initiated. Otherwise we could be reading from indirect vdevs before
4698 * we have loaded their mappings.
4700 error = spa_ld_open_indirect_vdev_metadata(spa);
4705 * Retrieve the full list of active features from the MOS and check if
4706 * they are all supported.
4708 error = spa_ld_check_features(spa, &missing_feat_write);
4713 * Load several special directories from the MOS needed by the dsl_pool
4716 error = spa_ld_load_special_directories(spa);
4721 * Retrieve pool properties from the MOS.
4723 error = spa_ld_get_props(spa);
4728 * Retrieve the list of auxiliary devices - cache devices and spares -
4731 error = spa_ld_open_aux_vdevs(spa, type);
4736 * Load the metadata for all vdevs. Also check if unopenable devices
4737 * should be autoreplaced.
4739 error = spa_ld_load_vdev_metadata(spa);
4743 error = spa_ld_load_dedup_tables(spa);
4748 * Verify the logs now to make sure we don't have any unexpected errors
4749 * when we claim log blocks later.
4751 error = spa_ld_verify_logs(spa, type, ereport);
4755 if (missing_feat_write) {
4756 ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT);
4759 * At this point, we know that we can open the pool in
4760 * read-only mode but not read-write mode. We now have enough
4761 * information and can return to userland.
4763 return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT,
4768 * Traverse the last txgs to make sure the pool was left off in a safe
4769 * state. When performing an extreme rewind, we verify the whole pool,
4770 * which can take a very long time.
4772 error = spa_ld_verify_pool_data(spa);
4777 * Calculate the deflated space for the pool. This must be done before
4778 * we write anything to the pool because we'd need to update the space
4779 * accounting using the deflated sizes.
4781 spa_update_dspace(spa);
4784 * We have now retrieved all the information we needed to open the
4785 * pool. If we are importing the pool in read-write mode, a few
4786 * additional steps must be performed to finish the import.
4788 if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER ||
4789 spa->spa_load_max_txg == UINT64_MAX)) {
4790 uint64_t config_cache_txg = spa->spa_config_txg;
4792 ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
4795 * In case of a checkpoint rewind, log the original txg
4796 * of the checkpointed uberblock.
4798 if (checkpoint_rewind) {
4799 spa_history_log_internal(spa, "checkpoint rewind",
4800 NULL, "rewound state to txg=%llu",
4801 (u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg);
4805 * Traverse the ZIL and claim all blocks.
4807 spa_ld_claim_log_blocks(spa);
4810 * Kick-off the syncing thread.
4812 spa->spa_sync_on = B_TRUE;
4813 txg_sync_start(spa->spa_dsl_pool);
4814 mmp_thread_start(spa);
4817 * Wait for all claims to sync. We sync up to the highest
4818 * claimed log block birth time so that claimed log blocks
4819 * don't appear to be from the future. spa_claim_max_txg
4820 * will have been set for us by ZIL traversal operations
4823 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
4826 * Check if we need to request an update of the config. On the
4827 * next sync, we would update the config stored in vdev labels
4828 * and the cachefile (by default /etc/zfs/zpool.cache).
4830 spa_ld_check_for_config_update(spa, config_cache_txg,
4831 update_config_cache);
4834 * Check if a rebuild was in progress and if so resume it.
4835 * Then check all DTLs to see if anything needs resilvering.
4836 * The resilver will be deferred if a rebuild was started.
4838 if (vdev_rebuild_active(spa->spa_root_vdev)) {
4839 vdev_rebuild_restart(spa);
4840 } else if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
4841 vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
4842 spa_async_request(spa, SPA_ASYNC_RESILVER);
4846 * Log the fact that we booted up (so that we can detect if
4847 * we rebooted in the middle of an operation).
4849 spa_history_log_version(spa, "open", NULL);
4851 spa_restart_removal(spa);
4852 spa_spawn_aux_threads(spa);
4855 * Delete any inconsistent datasets.
4858 * Since we may be issuing deletes for clones here,
4859 * we make sure to do so after we've spawned all the
4860 * auxiliary threads above (from which the livelist
4861 * deletion zthr is part of).
4863 (void) dmu_objset_find(spa_name(spa),
4864 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
4867 * Clean up any stale temporary dataset userrefs.
4869 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
4871 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4872 vdev_initialize_restart(spa->spa_root_vdev);
4873 vdev_trim_restart(spa->spa_root_vdev);
4874 vdev_autotrim_restart(spa);
4875 spa_config_exit(spa, SCL_CONFIG, FTAG);
4878 spa_import_progress_remove(spa_guid(spa));
4879 spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
4881 spa_load_note(spa, "LOADED");
4887 spa_load_retry(spa_t *spa, spa_load_state_t state)
4889 spa_mode_t mode = spa->spa_mode;
4892 spa_deactivate(spa);
4894 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
4896 spa_activate(spa, mode);
4897 spa_async_suspend(spa);
4899 spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
4900 (u_longlong_t)spa->spa_load_max_txg);
4902 return (spa_load(spa, state, SPA_IMPORT_EXISTING));
4906 * If spa_load() fails this function will try loading prior txg's. If
4907 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
4908 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
4909 * function will not rewind the pool and will return the same error as
4913 spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
4916 nvlist_t *loadinfo = NULL;
4917 nvlist_t *config = NULL;
4918 int load_error, rewind_error;
4919 uint64_t safe_rewind_txg;
4922 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
4923 spa->spa_load_max_txg = spa->spa_load_txg;
4924 spa_set_log_state(spa, SPA_LOG_CLEAR);
4926 spa->spa_load_max_txg = max_request;
4927 if (max_request != UINT64_MAX)
4928 spa->spa_extreme_rewind = B_TRUE;
4931 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
4932 if (load_error == 0)
4934 if (load_error == ZFS_ERR_NO_CHECKPOINT) {
4936 * When attempting checkpoint-rewind on a pool with no
4937 * checkpoint, we should not attempt to load uberblocks
4938 * from previous txgs when spa_load fails.
4940 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
4941 spa_import_progress_remove(spa_guid(spa));
4942 return (load_error);
4945 if (spa->spa_root_vdev != NULL)
4946 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
4948 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
4949 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
4951 if (rewind_flags & ZPOOL_NEVER_REWIND) {
4952 nvlist_free(config);
4953 spa_import_progress_remove(spa_guid(spa));
4954 return (load_error);
4957 if (state == SPA_LOAD_RECOVER) {
4958 /* Price of rolling back is discarding txgs, including log */
4959 spa_set_log_state(spa, SPA_LOG_CLEAR);
4962 * If we aren't rolling back save the load info from our first
4963 * import attempt so that we can restore it after attempting
4966 loadinfo = spa->spa_load_info;
4967 spa->spa_load_info = fnvlist_alloc();
4970 spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
4971 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
4972 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
4973 TXG_INITIAL : safe_rewind_txg;
4976 * Continue as long as we're finding errors, we're still within
4977 * the acceptable rewind range, and we're still finding uberblocks
4979 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
4980 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
4981 if (spa->spa_load_max_txg < safe_rewind_txg)
4982 spa->spa_extreme_rewind = B_TRUE;
4983 rewind_error = spa_load_retry(spa, state);
4986 spa->spa_extreme_rewind = B_FALSE;
4987 spa->spa_load_max_txg = UINT64_MAX;
4989 if (config && (rewind_error || state != SPA_LOAD_RECOVER))
4990 spa_config_set(spa, config);
4992 nvlist_free(config);
4994 if (state == SPA_LOAD_RECOVER) {
4995 ASSERT3P(loadinfo, ==, NULL);
4996 spa_import_progress_remove(spa_guid(spa));
4997 return (rewind_error);
4999 /* Store the rewind info as part of the initial load info */
5000 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
5001 spa->spa_load_info);
5003 /* Restore the initial load info */
5004 fnvlist_free(spa->spa_load_info);
5005 spa->spa_load_info = loadinfo;
5007 spa_import_progress_remove(spa_guid(spa));
5008 return (load_error);
5015 * The import case is identical to an open except that the configuration is sent
5016 * down from userland, instead of grabbed from the configuration cache. For the
5017 * case of an open, the pool configuration will exist in the
5018 * POOL_STATE_UNINITIALIZED state.
5020 * The stats information (gen/count/ustats) is used to gather vdev statistics at
5021 * the same time open the pool, without having to keep around the spa_t in some
5025 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
5029 spa_load_state_t state = SPA_LOAD_OPEN;
5031 int locked = B_FALSE;
5032 int firstopen = B_FALSE;
5037 * As disgusting as this is, we need to support recursive calls to this
5038 * function because dsl_dir_open() is called during spa_load(), and ends
5039 * up calling spa_open() again. The real fix is to figure out how to
5040 * avoid dsl_dir_open() calling this in the first place.
5042 if (MUTEX_NOT_HELD(&spa_namespace_lock)) {
5043 mutex_enter(&spa_namespace_lock);
5047 if ((spa = spa_lookup(pool)) == NULL) {
5049 mutex_exit(&spa_namespace_lock);
5050 return (SET_ERROR(ENOENT));
5053 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
5054 zpool_load_policy_t policy;
5058 zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config,
5060 if (policy.zlp_rewind & ZPOOL_DO_REWIND)
5061 state = SPA_LOAD_RECOVER;
5063 spa_activate(spa, spa_mode_global);
5065 if (state != SPA_LOAD_RECOVER)
5066 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
5067 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
5069 zfs_dbgmsg("spa_open_common: opening %s", pool);
5070 error = spa_load_best(spa, state, policy.zlp_txg,
5073 if (error == EBADF) {
5075 * If vdev_validate() returns failure (indicated by
5076 * EBADF), it indicates that one of the vdevs indicates
5077 * that the pool has been exported or destroyed. If
5078 * this is the case, the config cache is out of sync and
5079 * we should remove the pool from the namespace.
5082 spa_deactivate(spa);
5083 spa_write_cachefile(spa, B_TRUE, B_TRUE);
5086 mutex_exit(&spa_namespace_lock);
5087 return (SET_ERROR(ENOENT));
5092 * We can't open the pool, but we still have useful
5093 * information: the state of each vdev after the
5094 * attempted vdev_open(). Return this to the user.
5096 if (config != NULL && spa->spa_config) {
5097 VERIFY(nvlist_dup(spa->spa_config, config,
5099 VERIFY(nvlist_add_nvlist(*config,
5100 ZPOOL_CONFIG_LOAD_INFO,
5101 spa->spa_load_info) == 0);
5104 spa_deactivate(spa);
5105 spa->spa_last_open_failed = error;
5107 mutex_exit(&spa_namespace_lock);
5113 spa_open_ref(spa, tag);
5116 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
5119 * If we've recovered the pool, pass back any information we
5120 * gathered while doing the load.
5122 if (state == SPA_LOAD_RECOVER) {
5123 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
5124 spa->spa_load_info) == 0);
5128 spa->spa_last_open_failed = 0;
5129 spa->spa_last_ubsync_txg = 0;
5130 spa->spa_load_txg = 0;
5131 mutex_exit(&spa_namespace_lock);
5135 zvol_create_minors_recursive(spa_name(spa));
5143 spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
5146 return (spa_open_common(name, spapp, tag, policy, config));
5150 spa_open(const char *name, spa_t **spapp, void *tag)
5152 return (spa_open_common(name, spapp, tag, NULL, NULL));
5156 * Lookup the given spa_t, incrementing the inject count in the process,
5157 * preventing it from being exported or destroyed.
5160 spa_inject_addref(char *name)
5164 mutex_enter(&spa_namespace_lock);
5165 if ((spa = spa_lookup(name)) == NULL) {
5166 mutex_exit(&spa_namespace_lock);
5169 spa->spa_inject_ref++;
5170 mutex_exit(&spa_namespace_lock);
5176 spa_inject_delref(spa_t *spa)
5178 mutex_enter(&spa_namespace_lock);
5179 spa->spa_inject_ref--;
5180 mutex_exit(&spa_namespace_lock);
5184 * Add spares device information to the nvlist.
5187 spa_add_spares(spa_t *spa, nvlist_t *config)
5197 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
5199 if (spa->spa_spares.sav_count == 0)
5202 VERIFY(nvlist_lookup_nvlist(config,
5203 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
5204 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
5205 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
5207 VERIFY(nvlist_add_nvlist_array(nvroot,
5208 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
5209 VERIFY(nvlist_lookup_nvlist_array(nvroot,
5210 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
5213 * Go through and find any spares which have since been
5214 * repurposed as an active spare. If this is the case, update
5215 * their status appropriately.
5217 for (i = 0; i < nspares; i++) {
5218 VERIFY(nvlist_lookup_uint64(spares[i],
5219 ZPOOL_CONFIG_GUID, &guid) == 0);
5220 if (spa_spare_exists(guid, &pool, NULL) &&
5222 VERIFY(nvlist_lookup_uint64_array(
5223 spares[i], ZPOOL_CONFIG_VDEV_STATS,
5224 (uint64_t **)&vs, &vsc) == 0);
5225 vs->vs_state = VDEV_STATE_CANT_OPEN;
5226 vs->vs_aux = VDEV_AUX_SPARED;
5233 * Add l2cache device information to the nvlist, including vdev stats.
5236 spa_add_l2cache(spa_t *spa, nvlist_t *config)
5239 uint_t i, j, nl2cache;
5246 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
5248 if (spa->spa_l2cache.sav_count == 0)
5251 VERIFY(nvlist_lookup_nvlist(config,
5252 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
5253 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
5254 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
5255 if (nl2cache != 0) {
5256 VERIFY(nvlist_add_nvlist_array(nvroot,
5257 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
5258 VERIFY(nvlist_lookup_nvlist_array(nvroot,
5259 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
5262 * Update level 2 cache device stats.
5265 for (i = 0; i < nl2cache; i++) {
5266 VERIFY(nvlist_lookup_uint64(l2cache[i],
5267 ZPOOL_CONFIG_GUID, &guid) == 0);
5270 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
5272 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
5273 vd = spa->spa_l2cache.sav_vdevs[j];
5279 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
5280 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
5282 vdev_get_stats(vd, vs);
5283 vdev_config_generate_stats(vd, l2cache[i]);
5290 spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
5295 if (spa->spa_feat_for_read_obj != 0) {
5296 for (zap_cursor_init(&zc, spa->spa_meta_objset,
5297 spa->spa_feat_for_read_obj);
5298 zap_cursor_retrieve(&zc, &za) == 0;
5299 zap_cursor_advance(&zc)) {
5300 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
5301 za.za_num_integers == 1);
5302 VERIFY0(nvlist_add_uint64(features, za.za_name,
5303 za.za_first_integer));
5305 zap_cursor_fini(&zc);
5308 if (spa->spa_feat_for_write_obj != 0) {
5309 for (zap_cursor_init(&zc, spa->spa_meta_objset,
5310 spa->spa_feat_for_write_obj);
5311 zap_cursor_retrieve(&zc, &za) == 0;
5312 zap_cursor_advance(&zc)) {
5313 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
5314 za.za_num_integers == 1);
5315 VERIFY0(nvlist_add_uint64(features, za.za_name,
5316 za.za_first_integer));
5318 zap_cursor_fini(&zc);
5323 spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
5327 for (i = 0; i < SPA_FEATURES; i++) {
5328 zfeature_info_t feature = spa_feature_table[i];
5331 if (feature_get_refcount(spa, &feature, &refcount) != 0)
5334 VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
5339 * Store a list of pool features and their reference counts in the
5342 * The first time this is called on a spa, allocate a new nvlist, fetch
5343 * the pool features and reference counts from disk, then save the list
5344 * in the spa. In subsequent calls on the same spa use the saved nvlist
5345 * and refresh its values from the cached reference counts. This
5346 * ensures we don't block here on I/O on a suspended pool so 'zpool
5347 * clear' can resume the pool.
5350 spa_add_feature_stats(spa_t *spa, nvlist_t *config)
5354 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
5356 mutex_enter(&spa->spa_feat_stats_lock);
5357 features = spa->spa_feat_stats;
5359 if (features != NULL) {
5360 spa_feature_stats_from_cache(spa, features);
5362 VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
5363 spa->spa_feat_stats = features;
5364 spa_feature_stats_from_disk(spa, features);
5367 VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
5370 mutex_exit(&spa->spa_feat_stats_lock);
5374 spa_get_stats(const char *name, nvlist_t **config,
5375 char *altroot, size_t buflen)
5381 error = spa_open_common(name, &spa, FTAG, NULL, config);
5385 * This still leaves a window of inconsistency where the spares
5386 * or l2cache devices could change and the config would be
5387 * self-inconsistent.
5389 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5391 if (*config != NULL) {
5392 uint64_t loadtimes[2];
5394 loadtimes[0] = spa->spa_loaded_ts.tv_sec;
5395 loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
5396 VERIFY(nvlist_add_uint64_array(*config,
5397 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
5399 VERIFY(nvlist_add_uint64(*config,
5400 ZPOOL_CONFIG_ERRCOUNT,
5401 spa_get_errlog_size(spa)) == 0);
5403 if (spa_suspended(spa)) {
5404 VERIFY(nvlist_add_uint64(*config,
5405 ZPOOL_CONFIG_SUSPENDED,
5406 spa->spa_failmode) == 0);
5407 VERIFY(nvlist_add_uint64(*config,
5408 ZPOOL_CONFIG_SUSPENDED_REASON,
5409 spa->spa_suspended) == 0);
5412 spa_add_spares(spa, *config);
5413 spa_add_l2cache(spa, *config);
5414 spa_add_feature_stats(spa, *config);
5419 * We want to get the alternate root even for faulted pools, so we cheat
5420 * and call spa_lookup() directly.
5424 mutex_enter(&spa_namespace_lock);
5425 spa = spa_lookup(name);
5427 spa_altroot(spa, altroot, buflen);
5431 mutex_exit(&spa_namespace_lock);
5433 spa_altroot(spa, altroot, buflen);
5438 spa_config_exit(spa, SCL_CONFIG, FTAG);
5439 spa_close(spa, FTAG);
5446 * Validate that the auxiliary device array is well formed. We must have an
5447 * array of nvlists, each which describes a valid leaf vdev. If this is an
5448 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
5449 * specified, as long as they are well-formed.
5452 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
5453 spa_aux_vdev_t *sav, const char *config, uint64_t version,
5454 vdev_labeltype_t label)
5461 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5464 * It's acceptable to have no devs specified.
5466 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
5470 return (SET_ERROR(EINVAL));
5473 * Make sure the pool is formatted with a version that supports this
5476 if (spa_version(spa) < version)
5477 return (SET_ERROR(ENOTSUP));
5480 * Set the pending device list so we correctly handle device in-use
5483 sav->sav_pending = dev;
5484 sav->sav_npending = ndev;
5486 for (i = 0; i < ndev; i++) {
5487 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
5491 if (!vd->vdev_ops->vdev_op_leaf) {
5493 error = SET_ERROR(EINVAL);
5499 if ((error = vdev_open(vd)) == 0 &&
5500 (error = vdev_label_init(vd, crtxg, label)) == 0) {
5501 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
5502 vd->vdev_guid) == 0);
5508 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
5515 sav->sav_pending = NULL;
5516 sav->sav_npending = 0;
5521 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
5525 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5527 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
5528 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
5529 VDEV_LABEL_SPARE)) != 0) {
5533 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
5534 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
5535 VDEV_LABEL_L2CACHE));
5539 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
5544 if (sav->sav_config != NULL) {
5550 * Generate new dev list by concatenating with the
5553 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
5554 &olddevs, &oldndevs) == 0);
5556 newdevs = kmem_alloc(sizeof (void *) *
5557 (ndevs + oldndevs), KM_SLEEP);
5558 for (i = 0; i < oldndevs; i++)
5559 VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
5561 for (i = 0; i < ndevs; i++)
5562 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
5565 VERIFY(nvlist_remove(sav->sav_config, config,
5566 DATA_TYPE_NVLIST_ARRAY) == 0);
5568 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
5569 config, newdevs, ndevs + oldndevs) == 0);
5570 for (i = 0; i < oldndevs + ndevs; i++)
5571 nvlist_free(newdevs[i]);
5572 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
5575 * Generate a new dev list.
5577 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
5579 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
5585 * Stop and drop level 2 ARC devices
5588 spa_l2cache_drop(spa_t *spa)
5592 spa_aux_vdev_t *sav = &spa->spa_l2cache;
5594 for (i = 0; i < sav->sav_count; i++) {
5597 vd = sav->sav_vdevs[i];
5600 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
5601 pool != 0ULL && l2arc_vdev_present(vd))
5602 l2arc_remove_vdev(vd);
5607 * Verify encryption parameters for spa creation. If we are encrypting, we must
5608 * have the encryption feature flag enabled.
5611 spa_create_check_encryption_params(dsl_crypto_params_t *dcp,
5612 boolean_t has_encryption)
5614 if (dcp->cp_crypt != ZIO_CRYPT_OFF &&
5615 dcp->cp_crypt != ZIO_CRYPT_INHERIT &&
5617 return (SET_ERROR(ENOTSUP));
5619 return (dmu_objset_create_crypt_check(NULL, dcp, NULL));
5626 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
5627 nvlist_t *zplprops, dsl_crypto_params_t *dcp)
5630 char *altroot = NULL;
5635 uint64_t txg = TXG_INITIAL;
5636 nvlist_t **spares, **l2cache;
5637 uint_t nspares, nl2cache;
5638 uint64_t version, obj;
5639 boolean_t has_features;
5640 boolean_t has_encryption;
5641 boolean_t has_allocclass;
5647 if (props == NULL ||
5648 nvlist_lookup_string(props, "tname", &poolname) != 0)
5649 poolname = (char *)pool;
5652 * If this pool already exists, return failure.
5654 mutex_enter(&spa_namespace_lock);
5655 if (spa_lookup(poolname) != NULL) {
5656 mutex_exit(&spa_namespace_lock);
5657 return (SET_ERROR(EEXIST));
5661 * Allocate a new spa_t structure.
5663 nvl = fnvlist_alloc();
5664 fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
5665 (void) nvlist_lookup_string(props,
5666 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
5667 spa = spa_add(poolname, nvl, altroot);
5669 spa_activate(spa, spa_mode_global);
5671 if (props && (error = spa_prop_validate(spa, props))) {
5672 spa_deactivate(spa);
5674 mutex_exit(&spa_namespace_lock);
5679 * Temporary pool names should never be written to disk.
5681 if (poolname != pool)
5682 spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
5684 has_features = B_FALSE;
5685 has_encryption = B_FALSE;
5686 has_allocclass = B_FALSE;
5687 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
5688 elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
5689 if (zpool_prop_feature(nvpair_name(elem))) {
5690 has_features = B_TRUE;
5692 feat_name = strchr(nvpair_name(elem), '@') + 1;
5693 VERIFY0(zfeature_lookup_name(feat_name, &feat));
5694 if (feat == SPA_FEATURE_ENCRYPTION)
5695 has_encryption = B_TRUE;
5696 if (feat == SPA_FEATURE_ALLOCATION_CLASSES)
5697 has_allocclass = B_TRUE;
5701 /* verify encryption params, if they were provided */
5703 error = spa_create_check_encryption_params(dcp, has_encryption);
5705 spa_deactivate(spa);
5707 mutex_exit(&spa_namespace_lock);
5711 if (!has_allocclass && zfs_special_devs(nvroot, NULL)) {
5712 spa_deactivate(spa);
5714 mutex_exit(&spa_namespace_lock);
5718 if (has_features || nvlist_lookup_uint64(props,
5719 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
5720 version = SPA_VERSION;
5722 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
5724 spa->spa_first_txg = txg;
5725 spa->spa_uberblock.ub_txg = txg - 1;
5726 spa->spa_uberblock.ub_version = version;
5727 spa->spa_ubsync = spa->spa_uberblock;
5728 spa->spa_load_state = SPA_LOAD_CREATE;
5729 spa->spa_removing_phys.sr_state = DSS_NONE;
5730 spa->spa_removing_phys.sr_removing_vdev = -1;
5731 spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
5732 spa->spa_indirect_vdevs_loaded = B_TRUE;
5735 * Create "The Godfather" zio to hold all async IOs
5737 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
5739 for (int i = 0; i < max_ncpus; i++) {
5740 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
5741 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
5742 ZIO_FLAG_GODFATHER);
5746 * Create the root vdev.
5748 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5750 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
5752 ASSERT(error != 0 || rvd != NULL);
5753 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
5755 if (error == 0 && !zfs_allocatable_devs(nvroot))
5756 error = SET_ERROR(EINVAL);
5759 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
5760 (error = spa_validate_aux(spa, nvroot, txg,
5761 VDEV_ALLOC_ADD)) == 0) {
5763 * instantiate the metaslab groups (this will dirty the vdevs)
5764 * we can no longer error exit past this point
5766 for (int c = 0; error == 0 && c < rvd->vdev_children; c++) {
5767 vdev_t *vd = rvd->vdev_child[c];
5769 vdev_metaslab_set_size(vd);
5770 vdev_expand(vd, txg);
5774 spa_config_exit(spa, SCL_ALL, FTAG);
5778 spa_deactivate(spa);
5780 mutex_exit(&spa_namespace_lock);
5785 * Get the list of spares, if specified.
5787 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
5788 &spares, &nspares) == 0) {
5789 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
5791 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
5792 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
5793 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5794 spa_load_spares(spa);
5795 spa_config_exit(spa, SCL_ALL, FTAG);
5796 spa->spa_spares.sav_sync = B_TRUE;
5800 * Get the list of level 2 cache devices, if specified.
5802 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
5803 &l2cache, &nl2cache) == 0) {
5804 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
5805 NV_UNIQUE_NAME, KM_SLEEP) == 0);
5806 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
5807 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
5808 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5809 spa_load_l2cache(spa);
5810 spa_config_exit(spa, SCL_ALL, FTAG);
5811 spa->spa_l2cache.sav_sync = B_TRUE;
5814 spa->spa_is_initializing = B_TRUE;
5815 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
5816 spa->spa_is_initializing = B_FALSE;
5819 * Create DDTs (dedup tables).
5823 spa_update_dspace(spa);
5825 tx = dmu_tx_create_assigned(dp, txg);
5828 * Create the pool's history object.
5830 if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
5831 spa_history_create_obj(spa, tx);
5833 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
5834 spa_history_log_version(spa, "create", tx);
5837 * Create the pool config object.
5839 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
5840 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
5841 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
5843 if (zap_add(spa->spa_meta_objset,
5844 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
5845 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
5846 cmn_err(CE_PANIC, "failed to add pool config");
5849 if (zap_add(spa->spa_meta_objset,
5850 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
5851 sizeof (uint64_t), 1, &version, tx) != 0) {
5852 cmn_err(CE_PANIC, "failed to add pool version");
5855 /* Newly created pools with the right version are always deflated. */
5856 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
5857 spa->spa_deflate = TRUE;
5858 if (zap_add(spa->spa_meta_objset,
5859 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
5860 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
5861 cmn_err(CE_PANIC, "failed to add deflate");
5866 * Create the deferred-free bpobj. Turn off compression
5867 * because sync-to-convergence takes longer if the blocksize
5870 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
5871 dmu_object_set_compress(spa->spa_meta_objset, obj,
5872 ZIO_COMPRESS_OFF, tx);
5873 if (zap_add(spa->spa_meta_objset,
5874 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
5875 sizeof (uint64_t), 1, &obj, tx) != 0) {
5876 cmn_err(CE_PANIC, "failed to add bpobj");
5878 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
5879 spa->spa_meta_objset, obj));
5882 * Generate some random noise for salted checksums to operate on.
5884 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
5885 sizeof (spa->spa_cksum_salt.zcs_bytes));
5888 * Set pool properties.
5890 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
5891 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
5892 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
5893 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
5894 spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
5895 spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM);
5897 if (props != NULL) {
5898 spa_configfile_set(spa, props, B_FALSE);
5899 spa_sync_props(props, tx);
5904 spa->spa_sync_on = B_TRUE;
5906 mmp_thread_start(spa);
5907 txg_wait_synced(dp, txg);
5909 spa_spawn_aux_threads(spa);
5911 spa_write_cachefile(spa, B_FALSE, B_TRUE);
5914 * Don't count references from objsets that are already closed
5915 * and are making their way through the eviction process.
5917 spa_evicting_os_wait(spa);
5918 spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
5919 spa->spa_load_state = SPA_LOAD_NONE;
5921 mutex_exit(&spa_namespace_lock);
5927 * Import a non-root pool into the system.
5930 spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
5933 char *altroot = NULL;
5934 spa_load_state_t state = SPA_LOAD_IMPORT;
5935 zpool_load_policy_t policy;
5936 spa_mode_t mode = spa_mode_global;
5937 uint64_t readonly = B_FALSE;
5940 nvlist_t **spares, **l2cache;
5941 uint_t nspares, nl2cache;
5944 * If a pool with this name exists, return failure.
5946 mutex_enter(&spa_namespace_lock);
5947 if (spa_lookup(pool) != NULL) {
5948 mutex_exit(&spa_namespace_lock);
5949 return (SET_ERROR(EEXIST));
5953 * Create and initialize the spa structure.
5955 (void) nvlist_lookup_string(props,
5956 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
5957 (void) nvlist_lookup_uint64(props,
5958 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
5960 mode = SPA_MODE_READ;
5961 spa = spa_add(pool, config, altroot);
5962 spa->spa_import_flags = flags;
5965 * Verbatim import - Take a pool and insert it into the namespace
5966 * as if it had been loaded at boot.
5968 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
5970 spa_configfile_set(spa, props, B_FALSE);
5972 spa_write_cachefile(spa, B_FALSE, B_TRUE);
5973 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
5974 zfs_dbgmsg("spa_import: verbatim import of %s", pool);
5975 mutex_exit(&spa_namespace_lock);
5979 spa_activate(spa, mode);
5982 * Don't start async tasks until we know everything is healthy.
5984 spa_async_suspend(spa);
5986 zpool_get_load_policy(config, &policy);
5987 if (policy.zlp_rewind & ZPOOL_DO_REWIND)
5988 state = SPA_LOAD_RECOVER;
5990 spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
5992 if (state != SPA_LOAD_RECOVER) {
5993 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
5994 zfs_dbgmsg("spa_import: importing %s", pool);
5996 zfs_dbgmsg("spa_import: importing %s, max_txg=%lld "
5997 "(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg);
5999 error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
6002 * Propagate anything learned while loading the pool and pass it
6003 * back to caller (i.e. rewind info, missing devices, etc).
6005 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
6006 spa->spa_load_info) == 0);
6008 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6010 * Toss any existing sparelist, as it doesn't have any validity
6011 * anymore, and conflicts with spa_has_spare().
6013 if (spa->spa_spares.sav_config) {
6014 nvlist_free(spa->spa_spares.sav_config);
6015 spa->spa_spares.sav_config = NULL;
6016 spa_load_spares(spa);
6018 if (spa->spa_l2cache.sav_config) {
6019 nvlist_free(spa->spa_l2cache.sav_config);
6020 spa->spa_l2cache.sav_config = NULL;
6021 spa_load_l2cache(spa);
6024 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
6026 spa_config_exit(spa, SCL_ALL, FTAG);
6029 spa_configfile_set(spa, props, B_FALSE);
6031 if (error != 0 || (props && spa_writeable(spa) &&
6032 (error = spa_prop_set(spa, props)))) {
6034 spa_deactivate(spa);
6036 mutex_exit(&spa_namespace_lock);
6040 spa_async_resume(spa);
6043 * Override any spares and level 2 cache devices as specified by
6044 * the user, as these may have correct device names/devids, etc.
6046 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
6047 &spares, &nspares) == 0) {
6048 if (spa->spa_spares.sav_config)
6049 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
6050 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
6052 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
6053 NV_UNIQUE_NAME, KM_SLEEP) == 0);
6054 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
6055 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
6056 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6057 spa_load_spares(spa);
6058 spa_config_exit(spa, SCL_ALL, FTAG);
6059 spa->spa_spares.sav_sync = B_TRUE;
6061 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
6062 &l2cache, &nl2cache) == 0) {
6063 if (spa->spa_l2cache.sav_config)
6064 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
6065 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
6067 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
6068 NV_UNIQUE_NAME, KM_SLEEP) == 0);
6069 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
6070 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
6071 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6072 spa_load_l2cache(spa);
6073 spa_config_exit(spa, SCL_ALL, FTAG);
6074 spa->spa_l2cache.sav_sync = B_TRUE;
6078 * Check for any removed devices.
6080 if (spa->spa_autoreplace) {
6081 spa_aux_check_removed(&spa->spa_spares);
6082 spa_aux_check_removed(&spa->spa_l2cache);
6085 if (spa_writeable(spa)) {
6087 * Update the config cache to include the newly-imported pool.
6089 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
6093 * It's possible that the pool was expanded while it was exported.
6094 * We kick off an async task to handle this for us.
6096 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
6098 spa_history_log_version(spa, "import", NULL);
6100 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
6102 mutex_exit(&spa_namespace_lock);
6104 zvol_create_minors_recursive(pool);
6110 spa_tryimport(nvlist_t *tryconfig)
6112 nvlist_t *config = NULL;
6113 char *poolname, *cachefile;
6117 zpool_load_policy_t policy;
6119 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
6122 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
6126 * Create and initialize the spa structure.
6128 mutex_enter(&spa_namespace_lock);
6129 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
6130 spa_activate(spa, SPA_MODE_READ);
6133 * Rewind pool if a max txg was provided.
6135 zpool_get_load_policy(spa->spa_config, &policy);
6136 if (policy.zlp_txg != UINT64_MAX) {
6137 spa->spa_load_max_txg = policy.zlp_txg;
6138 spa->spa_extreme_rewind = B_TRUE;
6139 zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld",
6140 poolname, (longlong_t)policy.zlp_txg);
6142 zfs_dbgmsg("spa_tryimport: importing %s", poolname);
6145 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile)
6147 zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile);
6148 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
6150 spa->spa_config_source = SPA_CONFIG_SRC_SCAN;
6153 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING);
6156 * If 'tryconfig' was at least parsable, return the current config.
6158 if (spa->spa_root_vdev != NULL) {
6159 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
6160 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
6162 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
6164 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
6165 spa->spa_uberblock.ub_timestamp) == 0);
6166 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
6167 spa->spa_load_info) == 0);
6168 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
6169 spa->spa_errata) == 0);
6172 * If the bootfs property exists on this pool then we
6173 * copy it out so that external consumers can tell which
6174 * pools are bootable.
6176 if ((!error || error == EEXIST) && spa->spa_bootfs) {
6177 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
6180 * We have to play games with the name since the
6181 * pool was opened as TRYIMPORT_NAME.
6183 if (dsl_dsobj_to_dsname(spa_name(spa),
6184 spa->spa_bootfs, tmpname) == 0) {
6188 dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
6190 cp = strchr(tmpname, '/');
6192 (void) strlcpy(dsname, tmpname,
6195 (void) snprintf(dsname, MAXPATHLEN,
6196 "%s/%s", poolname, ++cp);
6198 VERIFY(nvlist_add_string(config,
6199 ZPOOL_CONFIG_BOOTFS, dsname) == 0);
6200 kmem_free(dsname, MAXPATHLEN);
6202 kmem_free(tmpname, MAXPATHLEN);
6206 * Add the list of hot spares and level 2 cache devices.
6208 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6209 spa_add_spares(spa, config);
6210 spa_add_l2cache(spa, config);
6211 spa_config_exit(spa, SCL_CONFIG, FTAG);
6215 spa_deactivate(spa);
6217 mutex_exit(&spa_namespace_lock);
6223 * Pool export/destroy
6225 * The act of destroying or exporting a pool is very simple. We make sure there
6226 * is no more pending I/O and any references to the pool are gone. Then, we
6227 * update the pool state and sync all the labels to disk, removing the
6228 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
6229 * we don't sync the labels or remove the configuration cache.
6232 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
6233 boolean_t force, boolean_t hardforce)
6240 if (!(spa_mode_global & SPA_MODE_WRITE))
6241 return (SET_ERROR(EROFS));
6243 mutex_enter(&spa_namespace_lock);
6244 if ((spa = spa_lookup(pool)) == NULL) {
6245 mutex_exit(&spa_namespace_lock);
6246 return (SET_ERROR(ENOENT));
6249 if (spa->spa_is_exporting) {
6250 /* the pool is being exported by another thread */
6251 mutex_exit(&spa_namespace_lock);
6252 return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS));
6254 spa->spa_is_exporting = B_TRUE;
6257 * Put a hold on the pool, drop the namespace lock, stop async tasks,
6258 * reacquire the namespace lock, and see if we can export.
6260 spa_open_ref(spa, FTAG);
6261 mutex_exit(&spa_namespace_lock);
6262 spa_async_suspend(spa);
6263 if (spa->spa_zvol_taskq) {
6264 zvol_remove_minors(spa, spa_name(spa), B_TRUE);
6265 taskq_wait(spa->spa_zvol_taskq);
6267 mutex_enter(&spa_namespace_lock);
6268 spa_close(spa, FTAG);
6270 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
6273 * The pool will be in core if it's openable, in which case we can
6274 * modify its state. Objsets may be open only because they're dirty,
6275 * so we have to force it to sync before checking spa_refcnt.
6277 if (spa->spa_sync_on) {
6278 txg_wait_synced(spa->spa_dsl_pool, 0);
6279 spa_evicting_os_wait(spa);
6283 * A pool cannot be exported or destroyed if there are active
6284 * references. If we are resetting a pool, allow references by
6285 * fault injection handlers.
6287 if (!spa_refcount_zero(spa) ||
6288 (spa->spa_inject_ref != 0 &&
6289 new_state != POOL_STATE_UNINITIALIZED)) {
6290 spa_async_resume(spa);
6291 spa->spa_is_exporting = B_FALSE;
6292 mutex_exit(&spa_namespace_lock);
6293 return (SET_ERROR(EBUSY));
6296 if (spa->spa_sync_on) {
6298 * A pool cannot be exported if it has an active shared spare.
6299 * This is to prevent other pools stealing the active spare
6300 * from an exported pool. At user's own will, such pool can
6301 * be forcedly exported.
6303 if (!force && new_state == POOL_STATE_EXPORTED &&
6304 spa_has_active_shared_spare(spa)) {
6305 spa_async_resume(spa);
6306 spa->spa_is_exporting = B_FALSE;
6307 mutex_exit(&spa_namespace_lock);
6308 return (SET_ERROR(EXDEV));
6312 * We're about to export or destroy this pool. Make sure
6313 * we stop all initialization and trim activity here before
6314 * we set the spa_final_txg. This will ensure that all
6315 * dirty data resulting from the initialization is
6316 * committed to disk before we unload the pool.
6318 if (spa->spa_root_vdev != NULL) {
6319 vdev_t *rvd = spa->spa_root_vdev;
6320 vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE);
6321 vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE);
6322 vdev_autotrim_stop_all(spa);
6323 vdev_rebuild_stop_all(spa);
6327 * We want this to be reflected on every label,
6328 * so mark them all dirty. spa_unload() will do the
6329 * final sync that pushes these changes out.
6331 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
6332 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6333 spa->spa_state = new_state;
6334 spa->spa_final_txg = spa_last_synced_txg(spa) +
6336 vdev_config_dirty(spa->spa_root_vdev);
6337 spa_config_exit(spa, SCL_ALL, FTAG);
6342 if (new_state == POOL_STATE_DESTROYED)
6343 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
6344 else if (new_state == POOL_STATE_EXPORTED)
6345 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
6347 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
6349 spa_deactivate(spa);
6352 if (oldconfig && spa->spa_config)
6353 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
6355 if (new_state != POOL_STATE_UNINITIALIZED) {
6357 spa_write_cachefile(spa, B_TRUE, B_TRUE);
6361 * If spa_remove() is not called for this spa_t and
6362 * there is any possibility that it can be reused,
6363 * we make sure to reset the exporting flag.
6365 spa->spa_is_exporting = B_FALSE;
6368 mutex_exit(&spa_namespace_lock);
6373 * Destroy a storage pool.
6376 spa_destroy(char *pool)
6378 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
6383 * Export a storage pool.
6386 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
6387 boolean_t hardforce)
6389 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
6394 * Similar to spa_export(), this unloads the spa_t without actually removing it
6395 * from the namespace in any way.
6398 spa_reset(char *pool)
6400 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
6405 * ==========================================================================
6406 * Device manipulation
6407 * ==========================================================================
6411 * Add a device to a storage pool.
6414 spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
6418 vdev_t *rvd = spa->spa_root_vdev;
6420 nvlist_t **spares, **l2cache;
6421 uint_t nspares, nl2cache;
6423 ASSERT(spa_writeable(spa));
6425 txg = spa_vdev_enter(spa);
6427 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
6428 VDEV_ALLOC_ADD)) != 0)
6429 return (spa_vdev_exit(spa, NULL, txg, error));
6431 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
6433 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
6437 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
6441 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
6442 return (spa_vdev_exit(spa, vd, txg, EINVAL));
6444 if (vd->vdev_children != 0 &&
6445 (error = vdev_create(vd, txg, B_FALSE)) != 0)
6446 return (spa_vdev_exit(spa, vd, txg, error));
6449 * We must validate the spares and l2cache devices after checking the
6450 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
6452 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
6453 return (spa_vdev_exit(spa, vd, txg, error));
6456 * If we are in the middle of a device removal, we can only add
6457 * devices which match the existing devices in the pool.
6458 * If we are in the middle of a removal, or have some indirect
6459 * vdevs, we can not add raidz toplevels.
6461 if (spa->spa_vdev_removal != NULL ||
6462 spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
6463 for (int c = 0; c < vd->vdev_children; c++) {
6464 tvd = vd->vdev_child[c];
6465 if (spa->spa_vdev_removal != NULL &&
6466 tvd->vdev_ashift != spa->spa_max_ashift) {
6467 return (spa_vdev_exit(spa, vd, txg, EINVAL));
6469 /* Fail if top level vdev is raidz */
6470 if (tvd->vdev_ops == &vdev_raidz_ops) {
6471 return (spa_vdev_exit(spa, vd, txg, EINVAL));
6474 * Need the top level mirror to be
6475 * a mirror of leaf vdevs only
6477 if (tvd->vdev_ops == &vdev_mirror_ops) {
6478 for (uint64_t cid = 0;
6479 cid < tvd->vdev_children; cid++) {
6480 vdev_t *cvd = tvd->vdev_child[cid];
6481 if (!cvd->vdev_ops->vdev_op_leaf) {
6482 return (spa_vdev_exit(spa, vd,
6490 for (int c = 0; c < vd->vdev_children; c++) {
6491 tvd = vd->vdev_child[c];
6492 vdev_remove_child(vd, tvd);
6493 tvd->vdev_id = rvd->vdev_children;
6494 vdev_add_child(rvd, tvd);
6495 vdev_config_dirty(tvd);
6499 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
6500 ZPOOL_CONFIG_SPARES);
6501 spa_load_spares(spa);
6502 spa->spa_spares.sav_sync = B_TRUE;
6505 if (nl2cache != 0) {
6506 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
6507 ZPOOL_CONFIG_L2CACHE);
6508 spa_load_l2cache(spa);
6509 spa->spa_l2cache.sav_sync = B_TRUE;
6513 * We have to be careful when adding new vdevs to an existing pool.
6514 * If other threads start allocating from these vdevs before we
6515 * sync the config cache, and we lose power, then upon reboot we may
6516 * fail to open the pool because there are DVAs that the config cache
6517 * can't translate. Therefore, we first add the vdevs without
6518 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
6519 * and then let spa_config_update() initialize the new metaslabs.
6521 * spa_load() checks for added-but-not-initialized vdevs, so that
6522 * if we lose power at any point in this sequence, the remaining
6523 * steps will be completed the next time we load the pool.
6525 (void) spa_vdev_exit(spa, vd, txg, 0);
6527 mutex_enter(&spa_namespace_lock);
6528 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
6529 spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
6530 mutex_exit(&spa_namespace_lock);
6536 * Attach a device to a mirror. The arguments are the path to any device
6537 * in the mirror, and the nvroot for the new device. If the path specifies
6538 * a device that is not mirrored, we automatically insert the mirror vdev.
6540 * If 'replacing' is specified, the new device is intended to replace the
6541 * existing device; in this case the two devices are made into their own
6542 * mirror using the 'replacing' vdev, which is functionally identical to
6543 * the mirror vdev (it actually reuses all the same ops) but has a few
6544 * extra rules: you can't attach to it after it's been created, and upon
6545 * completion of resilvering, the first disk (the one being replaced)
6546 * is automatically detached.
6548 * If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild)
6549 * should be performed instead of traditional healing reconstruction. From
6550 * an administrators perspective these are both resilver operations.
6553 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
6556 uint64_t txg, dtl_max_txg;
6557 vdev_t *rvd = spa->spa_root_vdev;
6558 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
6560 char *oldvdpath, *newvdpath;
6564 ASSERT(spa_writeable(spa));
6566 txg = spa_vdev_enter(spa);
6568 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
6570 ASSERT(MUTEX_HELD(&spa_namespace_lock));
6571 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
6572 error = (spa_has_checkpoint(spa)) ?
6573 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
6574 return (spa_vdev_exit(spa, NULL, txg, error));
6578 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
6579 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6581 if (dsl_scan_resilvering(spa_get_dsl(spa)))
6582 return (spa_vdev_exit(spa, NULL, txg,
6583 ZFS_ERR_RESILVER_IN_PROGRESS));
6585 if (vdev_rebuild_active(rvd))
6586 return (spa_vdev_exit(spa, NULL, txg,
6587 ZFS_ERR_REBUILD_IN_PROGRESS));
6590 if (spa->spa_vdev_removal != NULL)
6591 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
6594 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
6596 if (!oldvd->vdev_ops->vdev_op_leaf)
6597 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6599 pvd = oldvd->vdev_parent;
6601 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
6602 VDEV_ALLOC_ATTACH)) != 0)
6603 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
6605 if (newrootvd->vdev_children != 1)
6606 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
6608 newvd = newrootvd->vdev_child[0];
6610 if (!newvd->vdev_ops->vdev_op_leaf)
6611 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
6613 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
6614 return (spa_vdev_exit(spa, newrootvd, txg, error));
6617 * Spares can't replace logs
6619 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
6620 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6624 * For rebuilds, the parent vdev must support reconstruction
6625 * using only space maps. This means the only allowable
6626 * parents are the root vdev or a mirror vdev.
6628 if (pvd->vdev_ops != &vdev_mirror_ops &&
6629 pvd->vdev_ops != &vdev_root_ops) {
6630 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6636 * For attach, the only allowable parent is a mirror or the root
6639 if (pvd->vdev_ops != &vdev_mirror_ops &&
6640 pvd->vdev_ops != &vdev_root_ops)
6641 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6643 pvops = &vdev_mirror_ops;
6646 * Active hot spares can only be replaced by inactive hot
6649 if (pvd->vdev_ops == &vdev_spare_ops &&
6650 oldvd->vdev_isspare &&
6651 !spa_has_spare(spa, newvd->vdev_guid))
6652 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6655 * If the source is a hot spare, and the parent isn't already a
6656 * spare, then we want to create a new hot spare. Otherwise, we
6657 * want to create a replacing vdev. The user is not allowed to
6658 * attach to a spared vdev child unless the 'isspare' state is
6659 * the same (spare replaces spare, non-spare replaces
6662 if (pvd->vdev_ops == &vdev_replacing_ops &&
6663 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
6664 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6665 } else if (pvd->vdev_ops == &vdev_spare_ops &&
6666 newvd->vdev_isspare != oldvd->vdev_isspare) {
6667 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6670 if (newvd->vdev_isspare)
6671 pvops = &vdev_spare_ops;
6673 pvops = &vdev_replacing_ops;
6677 * Make sure the new device is big enough.
6679 if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
6680 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
6683 * The new device cannot have a higher alignment requirement
6684 * than the top-level vdev.
6686 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
6687 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6690 * If this is an in-place replacement, update oldvd's path and devid
6691 * to make it distinguishable from newvd, and unopenable from now on.
6693 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
6694 spa_strfree(oldvd->vdev_path);
6695 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
6697 (void) snprintf(oldvd->vdev_path, strlen(newvd->vdev_path) + 5,
6698 "%s/%s", newvd->vdev_path, "old");
6699 if (oldvd->vdev_devid != NULL) {
6700 spa_strfree(oldvd->vdev_devid);
6701 oldvd->vdev_devid = NULL;
6706 * If the parent is not a mirror, or if we're replacing, insert the new
6707 * mirror/replacing/spare vdev above oldvd.
6709 if (pvd->vdev_ops != pvops)
6710 pvd = vdev_add_parent(oldvd, pvops);
6712 ASSERT(pvd->vdev_top->vdev_parent == rvd);
6713 ASSERT(pvd->vdev_ops == pvops);
6714 ASSERT(oldvd->vdev_parent == pvd);
6717 * Extract the new device from its root and add it to pvd.
6719 vdev_remove_child(newrootvd, newvd);
6720 newvd->vdev_id = pvd->vdev_children;
6721 newvd->vdev_crtxg = oldvd->vdev_crtxg;
6722 vdev_add_child(pvd, newvd);
6725 * Reevaluate the parent vdev state.
6727 vdev_propagate_state(pvd);
6729 tvd = newvd->vdev_top;
6730 ASSERT(pvd->vdev_top == tvd);
6731 ASSERT(tvd->vdev_parent == rvd);
6733 vdev_config_dirty(tvd);
6736 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
6737 * for any dmu_sync-ed blocks. It will propagate upward when
6738 * spa_vdev_exit() calls vdev_dtl_reassess().
6740 dtl_max_txg = txg + TXG_CONCURRENT_STATES;
6742 vdev_dtl_dirty(newvd, DTL_MISSING,
6743 TXG_INITIAL, dtl_max_txg - TXG_INITIAL);
6745 if (newvd->vdev_isspare) {
6746 spa_spare_activate(newvd);
6747 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
6750 oldvdpath = spa_strdup(oldvd->vdev_path);
6751 newvdpath = spa_strdup(newvd->vdev_path);
6752 newvd_isspare = newvd->vdev_isspare;
6755 * Mark newvd's DTL dirty in this txg.
6757 vdev_dirty(tvd, VDD_DTL, newvd, txg);
6760 * Schedule the resilver or rebuild to restart in the future. We do
6761 * this to ensure that dmu_sync-ed blocks have been stitched into the
6762 * respective datasets.
6765 newvd->vdev_rebuild_txg = txg;
6769 newvd->vdev_resilver_txg = txg;
6771 if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
6772 spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) {
6773 vdev_defer_resilver(newvd);
6775 dsl_scan_restart_resilver(spa->spa_dsl_pool,
6780 if (spa->spa_bootfs)
6781 spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
6783 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
6788 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
6790 spa_history_log_internal(spa, "vdev attach", NULL,
6791 "%s vdev=%s %s vdev=%s",
6792 replacing && newvd_isspare ? "spare in" :
6793 replacing ? "replace" : "attach", newvdpath,
6794 replacing ? "for" : "to", oldvdpath);
6796 spa_strfree(oldvdpath);
6797 spa_strfree(newvdpath);
6803 * Detach a device from a mirror or replacing vdev.
6805 * If 'replace_done' is specified, only detach if the parent
6806 * is a replacing vdev.
6809 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
6813 vdev_t *rvd __maybe_unused = spa->spa_root_vdev;
6814 vdev_t *vd, *pvd, *cvd, *tvd;
6815 boolean_t unspare = B_FALSE;
6816 uint64_t unspare_guid = 0;
6819 ASSERT(spa_writeable(spa));
6821 txg = spa_vdev_detach_enter(spa, guid);
6823 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
6826 * Besides being called directly from the userland through the
6827 * ioctl interface, spa_vdev_detach() can be potentially called
6828 * at the end of spa_vdev_resilver_done().
6830 * In the regular case, when we have a checkpoint this shouldn't
6831 * happen as we never empty the DTLs of a vdev during the scrub
6832 * [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done()
6833 * should never get here when we have a checkpoint.
6835 * That said, even in a case when we checkpoint the pool exactly
6836 * as spa_vdev_resilver_done() calls this function everything
6837 * should be fine as the resilver will return right away.
6839 ASSERT(MUTEX_HELD(&spa_namespace_lock));
6840 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
6841 error = (spa_has_checkpoint(spa)) ?
6842 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
6843 return (spa_vdev_exit(spa, NULL, txg, error));
6847 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
6849 if (!vd->vdev_ops->vdev_op_leaf)
6850 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6852 pvd = vd->vdev_parent;
6855 * If the parent/child relationship is not as expected, don't do it.
6856 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
6857 * vdev that's replacing B with C. The user's intent in replacing
6858 * is to go from M(A,B) to M(A,C). If the user decides to cancel
6859 * the replace by detaching C, the expected behavior is to end up
6860 * M(A,B). But suppose that right after deciding to detach C,
6861 * the replacement of B completes. We would have M(A,C), and then
6862 * ask to detach C, which would leave us with just A -- not what
6863 * the user wanted. To prevent this, we make sure that the
6864 * parent/child relationship hasn't changed -- in this example,
6865 * that C's parent is still the replacing vdev R.
6867 if (pvd->vdev_guid != pguid && pguid != 0)
6868 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
6871 * Only 'replacing' or 'spare' vdevs can be replaced.
6873 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
6874 pvd->vdev_ops != &vdev_spare_ops)
6875 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6877 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
6878 spa_version(spa) >= SPA_VERSION_SPARES);
6881 * Only mirror, replacing, and spare vdevs support detach.
6883 if (pvd->vdev_ops != &vdev_replacing_ops &&
6884 pvd->vdev_ops != &vdev_mirror_ops &&
6885 pvd->vdev_ops != &vdev_spare_ops)
6886 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6889 * If this device has the only valid copy of some data,
6890 * we cannot safely detach it.
6892 if (vdev_dtl_required(vd))
6893 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
6895 ASSERT(pvd->vdev_children >= 2);
6898 * If we are detaching the second disk from a replacing vdev, then
6899 * check to see if we changed the original vdev's path to have "/old"
6900 * at the end in spa_vdev_attach(). If so, undo that change now.
6902 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
6903 vd->vdev_path != NULL) {
6904 size_t len = strlen(vd->vdev_path);
6906 for (int c = 0; c < pvd->vdev_children; c++) {
6907 cvd = pvd->vdev_child[c];
6909 if (cvd == vd || cvd->vdev_path == NULL)
6912 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
6913 strcmp(cvd->vdev_path + len, "/old") == 0) {
6914 spa_strfree(cvd->vdev_path);
6915 cvd->vdev_path = spa_strdup(vd->vdev_path);
6922 * If we are detaching the original disk from a spare, then it implies
6923 * that the spare should become a real disk, and be removed from the
6924 * active spare list for the pool.
6926 if (pvd->vdev_ops == &vdev_spare_ops &&
6928 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
6932 * Erase the disk labels so the disk can be used for other things.
6933 * This must be done after all other error cases are handled,
6934 * but before we disembowel vd (so we can still do I/O to it).
6935 * But if we can't do it, don't treat the error as fatal --
6936 * it may be that the unwritability of the disk is the reason
6937 * it's being detached!
6939 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
6942 * Remove vd from its parent and compact the parent's children.
6944 vdev_remove_child(pvd, vd);
6945 vdev_compact_children(pvd);
6948 * Remember one of the remaining children so we can get tvd below.
6950 cvd = pvd->vdev_child[pvd->vdev_children - 1];
6953 * If we need to remove the remaining child from the list of hot spares,
6954 * do it now, marking the vdev as no longer a spare in the process.
6955 * We must do this before vdev_remove_parent(), because that can
6956 * change the GUID if it creates a new toplevel GUID. For a similar
6957 * reason, we must remove the spare now, in the same txg as the detach;
6958 * otherwise someone could attach a new sibling, change the GUID, and
6959 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
6962 ASSERT(cvd->vdev_isspare);
6963 spa_spare_remove(cvd);
6964 unspare_guid = cvd->vdev_guid;
6965 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
6966 cvd->vdev_unspare = B_TRUE;
6970 * If the parent mirror/replacing vdev only has one child,
6971 * the parent is no longer needed. Remove it from the tree.
6973 if (pvd->vdev_children == 1) {
6974 if (pvd->vdev_ops == &vdev_spare_ops)
6975 cvd->vdev_unspare = B_FALSE;
6976 vdev_remove_parent(cvd);
6980 * We don't set tvd until now because the parent we just removed
6981 * may have been the previous top-level vdev.
6983 tvd = cvd->vdev_top;
6984 ASSERT(tvd->vdev_parent == rvd);
6987 * Reevaluate the parent vdev state.
6989 vdev_propagate_state(cvd);
6992 * If the 'autoexpand' property is set on the pool then automatically
6993 * try to expand the size of the pool. For example if the device we
6994 * just detached was smaller than the others, it may be possible to
6995 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
6996 * first so that we can obtain the updated sizes of the leaf vdevs.
6998 if (spa->spa_autoexpand) {
7000 vdev_expand(tvd, txg);
7003 vdev_config_dirty(tvd);
7006 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
7007 * vd->vdev_detached is set and free vd's DTL object in syncing context.
7008 * But first make sure we're not on any *other* txg's DTL list, to
7009 * prevent vd from being accessed after it's freed.
7011 vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
7012 for (int t = 0; t < TXG_SIZE; t++)
7013 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
7014 vd->vdev_detached = B_TRUE;
7015 vdev_dirty(tvd, VDD_DTL, vd, txg);
7017 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
7018 spa_notify_waiters(spa);
7020 /* hang on to the spa before we release the lock */
7021 spa_open_ref(spa, FTAG);
7023 error = spa_vdev_exit(spa, vd, txg, 0);
7025 spa_history_log_internal(spa, "detach", NULL,
7027 spa_strfree(vdpath);
7030 * If this was the removal of the original device in a hot spare vdev,
7031 * then we want to go through and remove the device from the hot spare
7032 * list of every other pool.
7035 spa_t *altspa = NULL;
7037 mutex_enter(&spa_namespace_lock);
7038 while ((altspa = spa_next(altspa)) != NULL) {
7039 if (altspa->spa_state != POOL_STATE_ACTIVE ||
7043 spa_open_ref(altspa, FTAG);
7044 mutex_exit(&spa_namespace_lock);
7045 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
7046 mutex_enter(&spa_namespace_lock);
7047 spa_close(altspa, FTAG);
7049 mutex_exit(&spa_namespace_lock);
7051 /* search the rest of the vdevs for spares to remove */
7052 spa_vdev_resilver_done(spa);
7055 /* all done with the spa; OK to release */
7056 mutex_enter(&spa_namespace_lock);
7057 spa_close(spa, FTAG);
7058 mutex_exit(&spa_namespace_lock);
7064 spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
7067 ASSERT(MUTEX_HELD(&spa_namespace_lock));
7069 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
7071 /* Look up vdev and ensure it's a leaf. */
7072 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
7073 if (vd == NULL || vd->vdev_detached) {
7074 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7075 return (SET_ERROR(ENODEV));
7076 } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
7077 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7078 return (SET_ERROR(EINVAL));
7079 } else if (!vdev_writeable(vd)) {
7080 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7081 return (SET_ERROR(EROFS));
7083 mutex_enter(&vd->vdev_initialize_lock);
7084 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7087 * When we activate an initialize action we check to see
7088 * if the vdev_initialize_thread is NULL. We do this instead
7089 * of using the vdev_initialize_state since there might be
7090 * a previous initialization process which has completed but
7091 * the thread is not exited.
7093 if (cmd_type == POOL_INITIALIZE_START &&
7094 (vd->vdev_initialize_thread != NULL ||
7095 vd->vdev_top->vdev_removing)) {
7096 mutex_exit(&vd->vdev_initialize_lock);
7097 return (SET_ERROR(EBUSY));
7098 } else if (cmd_type == POOL_INITIALIZE_CANCEL &&
7099 (vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE &&
7100 vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) {
7101 mutex_exit(&vd->vdev_initialize_lock);
7102 return (SET_ERROR(ESRCH));
7103 } else if (cmd_type == POOL_INITIALIZE_SUSPEND &&
7104 vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) {
7105 mutex_exit(&vd->vdev_initialize_lock);
7106 return (SET_ERROR(ESRCH));
7110 case POOL_INITIALIZE_START:
7111 vdev_initialize(vd);
7113 case POOL_INITIALIZE_CANCEL:
7114 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list);
7116 case POOL_INITIALIZE_SUSPEND:
7117 vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list);
7120 panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
7122 mutex_exit(&vd->vdev_initialize_lock);
7128 spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
7129 nvlist_t *vdev_errlist)
7131 int total_errors = 0;
7134 list_create(&vd_list, sizeof (vdev_t),
7135 offsetof(vdev_t, vdev_initialize_node));
7138 * We hold the namespace lock through the whole function
7139 * to prevent any changes to the pool while we're starting or
7140 * stopping initialization. The config and state locks are held so that
7141 * we can properly assess the vdev state before we commit to
7142 * the initializing operation.
7144 mutex_enter(&spa_namespace_lock);
7146 for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
7147 pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
7148 uint64_t vdev_guid = fnvpair_value_uint64(pair);
7150 int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type,
7153 char guid_as_str[MAXNAMELEN];
7155 (void) snprintf(guid_as_str, sizeof (guid_as_str),
7156 "%llu", (unsigned long long)vdev_guid);
7157 fnvlist_add_int64(vdev_errlist, guid_as_str, error);
7162 /* Wait for all initialize threads to stop. */
7163 vdev_initialize_stop_wait(spa, &vd_list);
7165 /* Sync out the initializing state */
7166 txg_wait_synced(spa->spa_dsl_pool, 0);
7167 mutex_exit(&spa_namespace_lock);
7169 list_destroy(&vd_list);
7171 return (total_errors);
7175 spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
7176 uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list)
7178 ASSERT(MUTEX_HELD(&spa_namespace_lock));
7180 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
7182 /* Look up vdev and ensure it's a leaf. */
7183 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
7184 if (vd == NULL || vd->vdev_detached) {
7185 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7186 return (SET_ERROR(ENODEV));
7187 } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
7188 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7189 return (SET_ERROR(EINVAL));
7190 } else if (!vdev_writeable(vd)) {
7191 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7192 return (SET_ERROR(EROFS));
7193 } else if (!vd->vdev_has_trim) {
7194 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7195 return (SET_ERROR(EOPNOTSUPP));
7196 } else if (secure && !vd->vdev_has_securetrim) {
7197 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7198 return (SET_ERROR(EOPNOTSUPP));
7200 mutex_enter(&vd->vdev_trim_lock);
7201 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7204 * When we activate a TRIM action we check to see if the
7205 * vdev_trim_thread is NULL. We do this instead of using the
7206 * vdev_trim_state since there might be a previous TRIM process
7207 * which has completed but the thread is not exited.
7209 if (cmd_type == POOL_TRIM_START &&
7210 (vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing)) {
7211 mutex_exit(&vd->vdev_trim_lock);
7212 return (SET_ERROR(EBUSY));
7213 } else if (cmd_type == POOL_TRIM_CANCEL &&
7214 (vd->vdev_trim_state != VDEV_TRIM_ACTIVE &&
7215 vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) {
7216 mutex_exit(&vd->vdev_trim_lock);
7217 return (SET_ERROR(ESRCH));
7218 } else if (cmd_type == POOL_TRIM_SUSPEND &&
7219 vd->vdev_trim_state != VDEV_TRIM_ACTIVE) {
7220 mutex_exit(&vd->vdev_trim_lock);
7221 return (SET_ERROR(ESRCH));
7225 case POOL_TRIM_START:
7226 vdev_trim(vd, rate, partial, secure);
7228 case POOL_TRIM_CANCEL:
7229 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list);
7231 case POOL_TRIM_SUSPEND:
7232 vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list);
7235 panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
7237 mutex_exit(&vd->vdev_trim_lock);
7243 * Initiates a manual TRIM for the requested vdevs. This kicks off individual
7244 * TRIM threads for each child vdev. These threads pass over all of the free
7245 * space in the vdev's metaslabs and issues TRIM commands for that space.
7248 spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
7249 boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist)
7251 int total_errors = 0;
7254 list_create(&vd_list, sizeof (vdev_t),
7255 offsetof(vdev_t, vdev_trim_node));
7258 * We hold the namespace lock through the whole function
7259 * to prevent any changes to the pool while we're starting or
7260 * stopping TRIM. The config and state locks are held so that
7261 * we can properly assess the vdev state before we commit to
7262 * the TRIM operation.
7264 mutex_enter(&spa_namespace_lock);
7266 for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
7267 pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
7268 uint64_t vdev_guid = fnvpair_value_uint64(pair);
7270 int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type,
7271 rate, partial, secure, &vd_list);
7273 char guid_as_str[MAXNAMELEN];
7275 (void) snprintf(guid_as_str, sizeof (guid_as_str),
7276 "%llu", (unsigned long long)vdev_guid);
7277 fnvlist_add_int64(vdev_errlist, guid_as_str, error);
7282 /* Wait for all TRIM threads to stop. */
7283 vdev_trim_stop_wait(spa, &vd_list);
7285 /* Sync out the TRIM state */
7286 txg_wait_synced(spa->spa_dsl_pool, 0);
7287 mutex_exit(&spa_namespace_lock);
7289 list_destroy(&vd_list);
7291 return (total_errors);
7295 * Split a set of devices from their mirrors, and create a new pool from them.
7298 spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
7299 nvlist_t *props, boolean_t exp)
7302 uint64_t txg, *glist;
7304 uint_t c, children, lastlog;
7305 nvlist_t **child, *nvl, *tmp;
7307 char *altroot = NULL;
7308 vdev_t *rvd, **vml = NULL; /* vdev modify list */
7309 boolean_t activate_slog;
7311 ASSERT(spa_writeable(spa));
7313 txg = spa_vdev_enter(spa);
7315 ASSERT(MUTEX_HELD(&spa_namespace_lock));
7316 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
7317 error = (spa_has_checkpoint(spa)) ?
7318 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
7319 return (spa_vdev_exit(spa, NULL, txg, error));
7322 /* clear the log and flush everything up to now */
7323 activate_slog = spa_passivate_log(spa);
7324 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
7325 error = spa_reset_logs(spa);
7326 txg = spa_vdev_config_enter(spa);
7329 spa_activate_log(spa);
7332 return (spa_vdev_exit(spa, NULL, txg, error));
7334 /* check new spa name before going any further */
7335 if (spa_lookup(newname) != NULL)
7336 return (spa_vdev_exit(spa, NULL, txg, EEXIST));
7339 * scan through all the children to ensure they're all mirrors
7341 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
7342 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
7344 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
7346 /* first, check to ensure we've got the right child count */
7347 rvd = spa->spa_root_vdev;
7349 for (c = 0; c < rvd->vdev_children; c++) {
7350 vdev_t *vd = rvd->vdev_child[c];
7352 /* don't count the holes & logs as children */
7353 if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops &&
7354 !vdev_is_concrete(vd))) {
7362 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
7363 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
7365 /* next, ensure no spare or cache devices are part of the split */
7366 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
7367 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
7368 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
7370 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
7371 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
7373 /* then, loop over each vdev and validate it */
7374 for (c = 0; c < children; c++) {
7375 uint64_t is_hole = 0;
7377 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
7381 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
7382 spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
7385 error = SET_ERROR(EINVAL);
7390 /* deal with indirect vdevs */
7391 if (spa->spa_root_vdev->vdev_child[c]->vdev_ops ==
7395 /* which disk is going to be split? */
7396 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
7398 error = SET_ERROR(EINVAL);
7402 /* look it up in the spa */
7403 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
7404 if (vml[c] == NULL) {
7405 error = SET_ERROR(ENODEV);
7409 /* make sure there's nothing stopping the split */
7410 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
7411 vml[c]->vdev_islog ||
7412 !vdev_is_concrete(vml[c]) ||
7413 vml[c]->vdev_isspare ||
7414 vml[c]->vdev_isl2cache ||
7415 !vdev_writeable(vml[c]) ||
7416 vml[c]->vdev_children != 0 ||
7417 vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
7418 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
7419 error = SET_ERROR(EINVAL);
7423 if (vdev_dtl_required(vml[c]) ||
7424 vdev_resilver_needed(vml[c], NULL, NULL)) {
7425 error = SET_ERROR(EBUSY);
7429 /* we need certain info from the top level */
7430 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
7431 vml[c]->vdev_top->vdev_ms_array) == 0);
7432 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
7433 vml[c]->vdev_top->vdev_ms_shift) == 0);
7434 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
7435 vml[c]->vdev_top->vdev_asize) == 0);
7436 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
7437 vml[c]->vdev_top->vdev_ashift) == 0);
7439 /* transfer per-vdev ZAPs */
7440 ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
7441 VERIFY0(nvlist_add_uint64(child[c],
7442 ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
7444 ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
7445 VERIFY0(nvlist_add_uint64(child[c],
7446 ZPOOL_CONFIG_VDEV_TOP_ZAP,
7447 vml[c]->vdev_parent->vdev_top_zap));
7451 kmem_free(vml, children * sizeof (vdev_t *));
7452 kmem_free(glist, children * sizeof (uint64_t));
7453 return (spa_vdev_exit(spa, NULL, txg, error));
7456 /* stop writers from using the disks */
7457 for (c = 0; c < children; c++) {
7459 vml[c]->vdev_offline = B_TRUE;
7461 vdev_reopen(spa->spa_root_vdev);
7464 * Temporarily record the splitting vdevs in the spa config. This
7465 * will disappear once the config is regenerated.
7467 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
7468 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
7469 glist, children) == 0);
7470 kmem_free(glist, children * sizeof (uint64_t));
7472 mutex_enter(&spa->spa_props_lock);
7473 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
7475 mutex_exit(&spa->spa_props_lock);
7476 spa->spa_config_splitting = nvl;
7477 vdev_config_dirty(spa->spa_root_vdev);
7479 /* configure and create the new pool */
7480 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
7481 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
7482 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
7483 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
7484 spa_version(spa)) == 0);
7485 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
7486 spa->spa_config_txg) == 0);
7487 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
7488 spa_generate_guid(NULL)) == 0);
7489 VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
7490 (void) nvlist_lookup_string(props,
7491 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
7493 /* add the new pool to the namespace */
7494 newspa = spa_add(newname, config, altroot);
7495 newspa->spa_avz_action = AVZ_ACTION_REBUILD;
7496 newspa->spa_config_txg = spa->spa_config_txg;
7497 spa_set_log_state(newspa, SPA_LOG_CLEAR);
7499 /* release the spa config lock, retaining the namespace lock */
7500 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
7502 if (zio_injection_enabled)
7503 zio_handle_panic_injection(spa, FTAG, 1);
7505 spa_activate(newspa, spa_mode_global);
7506 spa_async_suspend(newspa);
7509 * Temporarily stop the initializing and TRIM activity. We set the
7510 * state to ACTIVE so that we know to resume initializing or TRIM
7511 * once the split has completed.
7513 list_t vd_initialize_list;
7514 list_create(&vd_initialize_list, sizeof (vdev_t),
7515 offsetof(vdev_t, vdev_initialize_node));
7517 list_t vd_trim_list;
7518 list_create(&vd_trim_list, sizeof (vdev_t),
7519 offsetof(vdev_t, vdev_trim_node));
7521 for (c = 0; c < children; c++) {
7522 if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
7523 mutex_enter(&vml[c]->vdev_initialize_lock);
7524 vdev_initialize_stop(vml[c],
7525 VDEV_INITIALIZE_ACTIVE, &vd_initialize_list);
7526 mutex_exit(&vml[c]->vdev_initialize_lock);
7528 mutex_enter(&vml[c]->vdev_trim_lock);
7529 vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list);
7530 mutex_exit(&vml[c]->vdev_trim_lock);
7534 vdev_initialize_stop_wait(spa, &vd_initialize_list);
7535 vdev_trim_stop_wait(spa, &vd_trim_list);
7537 list_destroy(&vd_initialize_list);
7538 list_destroy(&vd_trim_list);
7540 newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT;
7541 newspa->spa_is_splitting = B_TRUE;
7543 /* create the new pool from the disks of the original pool */
7544 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE);
7548 /* if that worked, generate a real config for the new pool */
7549 if (newspa->spa_root_vdev != NULL) {
7550 VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
7551 NV_UNIQUE_NAME, KM_SLEEP) == 0);
7552 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
7553 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
7554 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
7559 if (props != NULL) {
7560 spa_configfile_set(newspa, props, B_FALSE);
7561 error = spa_prop_set(newspa, props);
7566 /* flush everything */
7567 txg = spa_vdev_config_enter(newspa);
7568 vdev_config_dirty(newspa->spa_root_vdev);
7569 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
7571 if (zio_injection_enabled)
7572 zio_handle_panic_injection(spa, FTAG, 2);
7574 spa_async_resume(newspa);
7576 /* finally, update the original pool's config */
7577 txg = spa_vdev_config_enter(spa);
7578 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
7579 error = dmu_tx_assign(tx, TXG_WAIT);
7582 for (c = 0; c < children; c++) {
7583 if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
7584 vdev_t *tvd = vml[c]->vdev_top;
7587 * Need to be sure the detachable VDEV is not
7588 * on any *other* txg's DTL list to prevent it
7589 * from being accessed after it's freed.
7591 for (int t = 0; t < TXG_SIZE; t++) {
7592 (void) txg_list_remove_this(
7593 &tvd->vdev_dtl_list, vml[c], t);
7598 spa_history_log_internal(spa, "detach", tx,
7599 "vdev=%s", vml[c]->vdev_path);
7604 spa->spa_avz_action = AVZ_ACTION_REBUILD;
7605 vdev_config_dirty(spa->spa_root_vdev);
7606 spa->spa_config_splitting = NULL;
7610 (void) spa_vdev_exit(spa, NULL, txg, 0);
7612 if (zio_injection_enabled)
7613 zio_handle_panic_injection(spa, FTAG, 3);
7615 /* split is complete; log a history record */
7616 spa_history_log_internal(newspa, "split", NULL,
7617 "from pool %s", spa_name(spa));
7619 newspa->spa_is_splitting = B_FALSE;
7620 kmem_free(vml, children * sizeof (vdev_t *));
7622 /* if we're not going to mount the filesystems in userland, export */
7624 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
7631 spa_deactivate(newspa);
7634 txg = spa_vdev_config_enter(spa);
7636 /* re-online all offlined disks */
7637 for (c = 0; c < children; c++) {
7639 vml[c]->vdev_offline = B_FALSE;
7642 /* restart initializing or trimming disks as necessary */
7643 spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
7644 spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
7645 spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
7647 vdev_reopen(spa->spa_root_vdev);
7649 nvlist_free(spa->spa_config_splitting);
7650 spa->spa_config_splitting = NULL;
7651 (void) spa_vdev_exit(spa, NULL, txg, error);
7653 kmem_free(vml, children * sizeof (vdev_t *));
7658 * Find any device that's done replacing, or a vdev marked 'unspare' that's
7659 * currently spared, so we can detach it.
7662 spa_vdev_resilver_done_hunt(vdev_t *vd)
7664 vdev_t *newvd, *oldvd;
7666 for (int c = 0; c < vd->vdev_children; c++) {
7667 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
7673 * Check for a completed replacement. We always consider the first
7674 * vdev in the list to be the oldest vdev, and the last one to be
7675 * the newest (see spa_vdev_attach() for how that works). In
7676 * the case where the newest vdev is faulted, we will not automatically
7677 * remove it after a resilver completes. This is OK as it will require
7678 * user intervention to determine which disk the admin wishes to keep.
7680 if (vd->vdev_ops == &vdev_replacing_ops) {
7681 ASSERT(vd->vdev_children > 1);
7683 newvd = vd->vdev_child[vd->vdev_children - 1];
7684 oldvd = vd->vdev_child[0];
7686 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
7687 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
7688 !vdev_dtl_required(oldvd))
7693 * Check for a completed resilver with the 'unspare' flag set.
7694 * Also potentially update faulted state.
7696 if (vd->vdev_ops == &vdev_spare_ops) {
7697 vdev_t *first = vd->vdev_child[0];
7698 vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
7700 if (last->vdev_unspare) {
7703 } else if (first->vdev_unspare) {
7710 if (oldvd != NULL &&
7711 vdev_dtl_empty(newvd, DTL_MISSING) &&
7712 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
7713 !vdev_dtl_required(oldvd))
7716 vdev_propagate_state(vd);
7719 * If there are more than two spares attached to a disk,
7720 * and those spares are not required, then we want to
7721 * attempt to free them up now so that they can be used
7722 * by other pools. Once we're back down to a single
7723 * disk+spare, we stop removing them.
7725 if (vd->vdev_children > 2) {
7726 newvd = vd->vdev_child[1];
7728 if (newvd->vdev_isspare && last->vdev_isspare &&
7729 vdev_dtl_empty(last, DTL_MISSING) &&
7730 vdev_dtl_empty(last, DTL_OUTAGE) &&
7731 !vdev_dtl_required(newvd))
7740 spa_vdev_resilver_done(spa_t *spa)
7742 vdev_t *vd, *pvd, *ppvd;
7743 uint64_t guid, sguid, pguid, ppguid;
7745 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7747 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
7748 pvd = vd->vdev_parent;
7749 ppvd = pvd->vdev_parent;
7750 guid = vd->vdev_guid;
7751 pguid = pvd->vdev_guid;
7752 ppguid = ppvd->vdev_guid;
7755 * If we have just finished replacing a hot spared device, then
7756 * we need to detach the parent's first child (the original hot
7759 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
7760 ppvd->vdev_children == 2) {
7761 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
7762 sguid = ppvd->vdev_child[1]->vdev_guid;
7764 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
7766 spa_config_exit(spa, SCL_ALL, FTAG);
7767 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
7769 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
7771 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7774 spa_config_exit(spa, SCL_ALL, FTAG);
7777 * If a detach was not performed above replace waiters will not have
7778 * been notified. In which case we must do so now.
7780 spa_notify_waiters(spa);
7784 * Update the stored path or FRU for this vdev.
7787 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
7791 boolean_t sync = B_FALSE;
7793 ASSERT(spa_writeable(spa));
7795 spa_vdev_state_enter(spa, SCL_ALL);
7797 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
7798 return (spa_vdev_state_exit(spa, NULL, ENOENT));
7800 if (!vd->vdev_ops->vdev_op_leaf)
7801 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
7804 if (strcmp(value, vd->vdev_path) != 0) {
7805 spa_strfree(vd->vdev_path);
7806 vd->vdev_path = spa_strdup(value);
7810 if (vd->vdev_fru == NULL) {
7811 vd->vdev_fru = spa_strdup(value);
7813 } else if (strcmp(value, vd->vdev_fru) != 0) {
7814 spa_strfree(vd->vdev_fru);
7815 vd->vdev_fru = spa_strdup(value);
7820 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
7824 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
7826 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
7830 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
7832 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
7836 * ==========================================================================
7838 * ==========================================================================
7841 spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
7843 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
7845 if (dsl_scan_resilvering(spa->spa_dsl_pool))
7846 return (SET_ERROR(EBUSY));
7848 return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
7852 spa_scan_stop(spa_t *spa)
7854 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
7855 if (dsl_scan_resilvering(spa->spa_dsl_pool))
7856 return (SET_ERROR(EBUSY));
7857 return (dsl_scan_cancel(spa->spa_dsl_pool));
7861 spa_scan(spa_t *spa, pool_scan_func_t func)
7863 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
7865 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
7866 return (SET_ERROR(ENOTSUP));
7868 if (func == POOL_SCAN_RESILVER &&
7869 !spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
7870 return (SET_ERROR(ENOTSUP));
7873 * If a resilver was requested, but there is no DTL on a
7874 * writeable leaf device, we have nothing to do.
7876 if (func == POOL_SCAN_RESILVER &&
7877 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
7878 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
7882 return (dsl_scan(spa->spa_dsl_pool, func));
7886 * ==========================================================================
7887 * SPA async task processing
7888 * ==========================================================================
7892 spa_async_remove(spa_t *spa, vdev_t *vd)
7894 if (vd->vdev_remove_wanted) {
7895 vd->vdev_remove_wanted = B_FALSE;
7896 vd->vdev_delayed_close = B_FALSE;
7897 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
7900 * We want to clear the stats, but we don't want to do a full
7901 * vdev_clear() as that will cause us to throw away
7902 * degraded/faulted state as well as attempt to reopen the
7903 * device, all of which is a waste.
7905 vd->vdev_stat.vs_read_errors = 0;
7906 vd->vdev_stat.vs_write_errors = 0;
7907 vd->vdev_stat.vs_checksum_errors = 0;
7909 vdev_state_dirty(vd->vdev_top);
7912 for (int c = 0; c < vd->vdev_children; c++)
7913 spa_async_remove(spa, vd->vdev_child[c]);
7917 spa_async_probe(spa_t *spa, vdev_t *vd)
7919 if (vd->vdev_probe_wanted) {
7920 vd->vdev_probe_wanted = B_FALSE;
7921 vdev_reopen(vd); /* vdev_open() does the actual probe */
7924 for (int c = 0; c < vd->vdev_children; c++)
7925 spa_async_probe(spa, vd->vdev_child[c]);
7929 spa_async_autoexpand(spa_t *spa, vdev_t *vd)
7931 if (!spa->spa_autoexpand)
7934 for (int c = 0; c < vd->vdev_children; c++) {
7935 vdev_t *cvd = vd->vdev_child[c];
7936 spa_async_autoexpand(spa, cvd);
7939 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
7942 spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND);
7946 spa_async_thread(void *arg)
7948 spa_t *spa = (spa_t *)arg;
7949 dsl_pool_t *dp = spa->spa_dsl_pool;
7952 ASSERT(spa->spa_sync_on);
7954 mutex_enter(&spa->spa_async_lock);
7955 tasks = spa->spa_async_tasks;
7956 spa->spa_async_tasks = 0;
7957 mutex_exit(&spa->spa_async_lock);
7960 * See if the config needs to be updated.
7962 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
7963 uint64_t old_space, new_space;
7965 mutex_enter(&spa_namespace_lock);
7966 old_space = metaslab_class_get_space(spa_normal_class(spa));
7967 old_space += metaslab_class_get_space(spa_special_class(spa));
7968 old_space += metaslab_class_get_space(spa_dedup_class(spa));
7970 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
7972 new_space = metaslab_class_get_space(spa_normal_class(spa));
7973 new_space += metaslab_class_get_space(spa_special_class(spa));
7974 new_space += metaslab_class_get_space(spa_dedup_class(spa));
7975 mutex_exit(&spa_namespace_lock);
7978 * If the pool grew as a result of the config update,
7979 * then log an internal history event.
7981 if (new_space != old_space) {
7982 spa_history_log_internal(spa, "vdev online", NULL,
7983 "pool '%s' size: %llu(+%llu)",
7984 spa_name(spa), (u_longlong_t)new_space,
7985 (u_longlong_t)(new_space - old_space));
7990 * See if any devices need to be marked REMOVED.
7992 if (tasks & SPA_ASYNC_REMOVE) {
7993 spa_vdev_state_enter(spa, SCL_NONE);
7994 spa_async_remove(spa, spa->spa_root_vdev);
7995 for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
7996 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
7997 for (int i = 0; i < spa->spa_spares.sav_count; i++)
7998 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
7999 (void) spa_vdev_state_exit(spa, NULL, 0);
8002 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
8003 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8004 spa_async_autoexpand(spa, spa->spa_root_vdev);
8005 spa_config_exit(spa, SCL_CONFIG, FTAG);
8009 * See if any devices need to be probed.
8011 if (tasks & SPA_ASYNC_PROBE) {
8012 spa_vdev_state_enter(spa, SCL_NONE);
8013 spa_async_probe(spa, spa->spa_root_vdev);
8014 (void) spa_vdev_state_exit(spa, NULL, 0);
8018 * If any devices are done replacing, detach them.
8020 if (tasks & SPA_ASYNC_RESILVER_DONE)
8021 spa_vdev_resilver_done(spa);
8024 * If any devices are done replacing, detach them. Then if no
8025 * top-level vdevs are rebuilding attempt to kick off a scrub.
8027 if (tasks & SPA_ASYNC_REBUILD_DONE) {
8028 spa_vdev_resilver_done(spa);
8030 if (!vdev_rebuild_active(spa->spa_root_vdev))
8031 (void) dsl_scan(spa->spa_dsl_pool, POOL_SCAN_SCRUB);
8035 * Kick off a resilver.
8037 if (tasks & SPA_ASYNC_RESILVER &&
8038 !vdev_rebuild_active(spa->spa_root_vdev) &&
8039 (!dsl_scan_resilvering(dp) ||
8040 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)))
8041 dsl_scan_restart_resilver(dp, 0);
8043 if (tasks & SPA_ASYNC_INITIALIZE_RESTART) {
8044 mutex_enter(&spa_namespace_lock);
8045 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8046 vdev_initialize_restart(spa->spa_root_vdev);
8047 spa_config_exit(spa, SCL_CONFIG, FTAG);
8048 mutex_exit(&spa_namespace_lock);
8051 if (tasks & SPA_ASYNC_TRIM_RESTART) {
8052 mutex_enter(&spa_namespace_lock);
8053 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8054 vdev_trim_restart(spa->spa_root_vdev);
8055 spa_config_exit(spa, SCL_CONFIG, FTAG);
8056 mutex_exit(&spa_namespace_lock);
8059 if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) {
8060 mutex_enter(&spa_namespace_lock);
8061 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8062 vdev_autotrim_restart(spa);
8063 spa_config_exit(spa, SCL_CONFIG, FTAG);
8064 mutex_exit(&spa_namespace_lock);
8068 * Kick off L2 cache whole device TRIM.
8070 if (tasks & SPA_ASYNC_L2CACHE_TRIM) {
8071 mutex_enter(&spa_namespace_lock);
8072 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8073 vdev_trim_l2arc(spa);
8074 spa_config_exit(spa, SCL_CONFIG, FTAG);
8075 mutex_exit(&spa_namespace_lock);
8079 * Kick off L2 cache rebuilding.
8081 if (tasks & SPA_ASYNC_L2CACHE_REBUILD) {
8082 mutex_enter(&spa_namespace_lock);
8083 spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER);
8084 l2arc_spa_rebuild_start(spa);
8085 spa_config_exit(spa, SCL_L2ARC, FTAG);
8086 mutex_exit(&spa_namespace_lock);
8090 * Let the world know that we're done.
8092 mutex_enter(&spa->spa_async_lock);
8093 spa->spa_async_thread = NULL;
8094 cv_broadcast(&spa->spa_async_cv);
8095 mutex_exit(&spa->spa_async_lock);
8100 spa_async_suspend(spa_t *spa)
8102 mutex_enter(&spa->spa_async_lock);
8103 spa->spa_async_suspended++;
8104 while (spa->spa_async_thread != NULL)
8105 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
8106 mutex_exit(&spa->spa_async_lock);
8108 spa_vdev_remove_suspend(spa);
8110 zthr_t *condense_thread = spa->spa_condense_zthr;
8111 if (condense_thread != NULL)
8112 zthr_cancel(condense_thread);
8114 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
8115 if (discard_thread != NULL)
8116 zthr_cancel(discard_thread);
8118 zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
8119 if (ll_delete_thread != NULL)
8120 zthr_cancel(ll_delete_thread);
8122 zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
8123 if (ll_condense_thread != NULL)
8124 zthr_cancel(ll_condense_thread);
8128 spa_async_resume(spa_t *spa)
8130 mutex_enter(&spa->spa_async_lock);
8131 ASSERT(spa->spa_async_suspended != 0);
8132 spa->spa_async_suspended--;
8133 mutex_exit(&spa->spa_async_lock);
8134 spa_restart_removal(spa);
8136 zthr_t *condense_thread = spa->spa_condense_zthr;
8137 if (condense_thread != NULL)
8138 zthr_resume(condense_thread);
8140 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
8141 if (discard_thread != NULL)
8142 zthr_resume(discard_thread);
8144 zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
8145 if (ll_delete_thread != NULL)
8146 zthr_resume(ll_delete_thread);
8148 zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
8149 if (ll_condense_thread != NULL)
8150 zthr_resume(ll_condense_thread);
8154 spa_async_tasks_pending(spa_t *spa)
8156 uint_t non_config_tasks;
8158 boolean_t config_task_suspended;
8160 non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
8161 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
8162 if (spa->spa_ccw_fail_time == 0) {
8163 config_task_suspended = B_FALSE;
8165 config_task_suspended =
8166 (gethrtime() - spa->spa_ccw_fail_time) <
8167 ((hrtime_t)zfs_ccw_retry_interval * NANOSEC);
8170 return (non_config_tasks || (config_task && !config_task_suspended));
8174 spa_async_dispatch(spa_t *spa)
8176 mutex_enter(&spa->spa_async_lock);
8177 if (spa_async_tasks_pending(spa) &&
8178 !spa->spa_async_suspended &&
8179 spa->spa_async_thread == NULL)
8180 spa->spa_async_thread = thread_create(NULL, 0,
8181 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
8182 mutex_exit(&spa->spa_async_lock);
8186 spa_async_request(spa_t *spa, int task)
8188 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
8189 mutex_enter(&spa->spa_async_lock);
8190 spa->spa_async_tasks |= task;
8191 mutex_exit(&spa->spa_async_lock);
8195 spa_async_tasks(spa_t *spa)
8197 return (spa->spa_async_tasks);
8201 * ==========================================================================
8202 * SPA syncing routines
8203 * ==========================================================================
8208 bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
8212 bpobj_enqueue(bpo, bp, bp_freed, tx);
8217 bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
8219 return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx));
8223 bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
8225 return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx));
8229 spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
8233 zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp,
8239 bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
8243 return (spa_free_sync_cb(arg, bp, tx));
8247 * Note: this simple function is not inlined to make it easier to dtrace the
8248 * amount of time spent syncing frees.
8251 spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
8253 zio_t *zio = zio_root(spa, NULL, NULL, 0);
8254 bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
8255 VERIFY(zio_wait(zio) == 0);
8259 * Note: this simple function is not inlined to make it easier to dtrace the
8260 * amount of time spent syncing deferred frees.
8263 spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
8265 if (spa_sync_pass(spa) != 1)
8270 * If the log space map feature is active, we stop deferring
8271 * frees to the next TXG and therefore running this function
8272 * would be considered a no-op as spa_deferred_bpobj should
8273 * not have any entries.
8275 * That said we run this function anyway (instead of returning
8276 * immediately) for the edge-case scenario where we just
8277 * activated the log space map feature in this TXG but we have
8278 * deferred frees from the previous TXG.
8280 zio_t *zio = zio_root(spa, NULL, NULL, 0);
8281 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
8282 bpobj_spa_free_sync_cb, zio, tx), ==, 0);
8283 VERIFY0(zio_wait(zio));
8287 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
8289 char *packed = NULL;
8294 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
8297 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
8298 * information. This avoids the dmu_buf_will_dirty() path and
8299 * saves us a pre-read to get data we don't actually care about.
8301 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
8302 packed = vmem_alloc(bufsize, KM_SLEEP);
8304 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
8306 bzero(packed + nvsize, bufsize - nvsize);
8308 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
8310 vmem_free(packed, bufsize);
8312 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
8313 dmu_buf_will_dirty(db, tx);
8314 *(uint64_t *)db->db_data = nvsize;
8315 dmu_buf_rele(db, FTAG);
8319 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
8320 const char *config, const char *entry)
8330 * Update the MOS nvlist describing the list of available devices.
8331 * spa_validate_aux() will have already made sure this nvlist is
8332 * valid and the vdevs are labeled appropriately.
8334 if (sav->sav_object == 0) {
8335 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
8336 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
8337 sizeof (uint64_t), tx);
8338 VERIFY(zap_update(spa->spa_meta_objset,
8339 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
8340 &sav->sav_object, tx) == 0);
8343 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
8344 if (sav->sav_count == 0) {
8345 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
8347 list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
8348 for (i = 0; i < sav->sav_count; i++)
8349 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
8350 B_FALSE, VDEV_CONFIG_L2CACHE);
8351 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
8352 sav->sav_count) == 0);
8353 for (i = 0; i < sav->sav_count; i++)
8354 nvlist_free(list[i]);
8355 kmem_free(list, sav->sav_count * sizeof (void *));
8358 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
8359 nvlist_free(nvroot);
8361 sav->sav_sync = B_FALSE;
8365 * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
8366 * The all-vdev ZAP must be empty.
8369 spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
8371 spa_t *spa = vd->vdev_spa;
8373 if (vd->vdev_top_zap != 0) {
8374 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
8375 vd->vdev_top_zap, tx));
8377 if (vd->vdev_leaf_zap != 0) {
8378 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
8379 vd->vdev_leaf_zap, tx));
8381 for (uint64_t i = 0; i < vd->vdev_children; i++) {
8382 spa_avz_build(vd->vdev_child[i], avz, tx);
8387 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
8392 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
8393 * its config may not be dirty but we still need to build per-vdev ZAPs.
8394 * Similarly, if the pool is being assembled (e.g. after a split), we
8395 * need to rebuild the AVZ although the config may not be dirty.
8397 if (list_is_empty(&spa->spa_config_dirty_list) &&
8398 spa->spa_avz_action == AVZ_ACTION_NONE)
8401 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
8403 ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
8404 spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
8405 spa->spa_all_vdev_zaps != 0);
8407 if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
8408 /* Make and build the new AVZ */
8409 uint64_t new_avz = zap_create(spa->spa_meta_objset,
8410 DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
8411 spa_avz_build(spa->spa_root_vdev, new_avz, tx);
8413 /* Diff old AVZ with new one */
8417 for (zap_cursor_init(&zc, spa->spa_meta_objset,
8418 spa->spa_all_vdev_zaps);
8419 zap_cursor_retrieve(&zc, &za) == 0;
8420 zap_cursor_advance(&zc)) {
8421 uint64_t vdzap = za.za_first_integer;
8422 if (zap_lookup_int(spa->spa_meta_objset, new_avz,
8425 * ZAP is listed in old AVZ but not in new one;
8428 VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
8433 zap_cursor_fini(&zc);
8435 /* Destroy the old AVZ */
8436 VERIFY0(zap_destroy(spa->spa_meta_objset,
8437 spa->spa_all_vdev_zaps, tx));
8439 /* Replace the old AVZ in the dir obj with the new one */
8440 VERIFY0(zap_update(spa->spa_meta_objset,
8441 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
8442 sizeof (new_avz), 1, &new_avz, tx));
8444 spa->spa_all_vdev_zaps = new_avz;
8445 } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
8449 /* Walk through the AVZ and destroy all listed ZAPs */
8450 for (zap_cursor_init(&zc, spa->spa_meta_objset,
8451 spa->spa_all_vdev_zaps);
8452 zap_cursor_retrieve(&zc, &za) == 0;
8453 zap_cursor_advance(&zc)) {
8454 uint64_t zap = za.za_first_integer;
8455 VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
8458 zap_cursor_fini(&zc);
8460 /* Destroy and unlink the AVZ itself */
8461 VERIFY0(zap_destroy(spa->spa_meta_objset,
8462 spa->spa_all_vdev_zaps, tx));
8463 VERIFY0(zap_remove(spa->spa_meta_objset,
8464 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
8465 spa->spa_all_vdev_zaps = 0;
8468 if (spa->spa_all_vdev_zaps == 0) {
8469 spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
8470 DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
8471 DMU_POOL_VDEV_ZAP_MAP, tx);
8473 spa->spa_avz_action = AVZ_ACTION_NONE;
8475 /* Create ZAPs for vdevs that don't have them. */
8476 vdev_construct_zaps(spa->spa_root_vdev, tx);
8478 config = spa_config_generate(spa, spa->spa_root_vdev,
8479 dmu_tx_get_txg(tx), B_FALSE);
8482 * If we're upgrading the spa version then make sure that
8483 * the config object gets updated with the correct version.
8485 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
8486 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
8487 spa->spa_uberblock.ub_version);
8489 spa_config_exit(spa, SCL_STATE, FTAG);
8491 nvlist_free(spa->spa_config_syncing);
8492 spa->spa_config_syncing = config;
8494 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
8498 spa_sync_version(void *arg, dmu_tx_t *tx)
8500 uint64_t *versionp = arg;
8501 uint64_t version = *versionp;
8502 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
8505 * Setting the version is special cased when first creating the pool.
8507 ASSERT(tx->tx_txg != TXG_INITIAL);
8509 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
8510 ASSERT(version >= spa_version(spa));
8512 spa->spa_uberblock.ub_version = version;
8513 vdev_config_dirty(spa->spa_root_vdev);
8514 spa_history_log_internal(spa, "set", tx, "version=%lld",
8515 (longlong_t)version);
8519 * Set zpool properties.
8522 spa_sync_props(void *arg, dmu_tx_t *tx)
8524 nvlist_t *nvp = arg;
8525 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
8526 objset_t *mos = spa->spa_meta_objset;
8527 nvpair_t *elem = NULL;
8529 mutex_enter(&spa->spa_props_lock);
8531 while ((elem = nvlist_next_nvpair(nvp, elem))) {
8533 char *strval, *fname;
8535 const char *propname;
8536 zprop_type_t proptype;
8539 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
8540 case ZPOOL_PROP_INVAL:
8542 * We checked this earlier in spa_prop_validate().
8544 ASSERT(zpool_prop_feature(nvpair_name(elem)));
8546 fname = strchr(nvpair_name(elem), '@') + 1;
8547 VERIFY0(zfeature_lookup_name(fname, &fid));
8549 spa_feature_enable(spa, fid, tx);
8550 spa_history_log_internal(spa, "set", tx,
8551 "%s=enabled", nvpair_name(elem));
8554 case ZPOOL_PROP_VERSION:
8555 intval = fnvpair_value_uint64(elem);
8557 * The version is synced separately before other
8558 * properties and should be correct by now.
8560 ASSERT3U(spa_version(spa), >=, intval);
8563 case ZPOOL_PROP_ALTROOT:
8565 * 'altroot' is a non-persistent property. It should
8566 * have been set temporarily at creation or import time.
8568 ASSERT(spa->spa_root != NULL);
8571 case ZPOOL_PROP_READONLY:
8572 case ZPOOL_PROP_CACHEFILE:
8574 * 'readonly' and 'cachefile' are also non-persistent
8578 case ZPOOL_PROP_COMMENT:
8579 strval = fnvpair_value_string(elem);
8580 if (spa->spa_comment != NULL)
8581 spa_strfree(spa->spa_comment);
8582 spa->spa_comment = spa_strdup(strval);
8584 * We need to dirty the configuration on all the vdevs
8585 * so that their labels get updated. It's unnecessary
8586 * to do this for pool creation since the vdev's
8587 * configuration has already been dirtied.
8589 if (tx->tx_txg != TXG_INITIAL)
8590 vdev_config_dirty(spa->spa_root_vdev);
8591 spa_history_log_internal(spa, "set", tx,
8592 "%s=%s", nvpair_name(elem), strval);
8596 * Set pool property values in the poolprops mos object.
8598 if (spa->spa_pool_props_object == 0) {
8599 spa->spa_pool_props_object =
8600 zap_create_link(mos, DMU_OT_POOL_PROPS,
8601 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
8605 /* normalize the property name */
8606 propname = zpool_prop_to_name(prop);
8607 proptype = zpool_prop_get_type(prop);
8609 if (nvpair_type(elem) == DATA_TYPE_STRING) {
8610 ASSERT(proptype == PROP_TYPE_STRING);
8611 strval = fnvpair_value_string(elem);
8612 VERIFY0(zap_update(mos,
8613 spa->spa_pool_props_object, propname,
8614 1, strlen(strval) + 1, strval, tx));
8615 spa_history_log_internal(spa, "set", tx,
8616 "%s=%s", nvpair_name(elem), strval);
8617 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
8618 intval = fnvpair_value_uint64(elem);
8620 if (proptype == PROP_TYPE_INDEX) {
8622 VERIFY0(zpool_prop_index_to_string(
8623 prop, intval, &unused));
8625 VERIFY0(zap_update(mos,
8626 spa->spa_pool_props_object, propname,
8627 8, 1, &intval, tx));
8628 spa_history_log_internal(spa, "set", tx,
8629 "%s=%lld", nvpair_name(elem),
8630 (longlong_t)intval);
8632 ASSERT(0); /* not allowed */
8636 case ZPOOL_PROP_DELEGATION:
8637 spa->spa_delegation = intval;
8639 case ZPOOL_PROP_BOOTFS:
8640 spa->spa_bootfs = intval;
8642 case ZPOOL_PROP_FAILUREMODE:
8643 spa->spa_failmode = intval;
8645 case ZPOOL_PROP_AUTOTRIM:
8646 spa->spa_autotrim = intval;
8647 spa_async_request(spa,
8648 SPA_ASYNC_AUTOTRIM_RESTART);
8650 case ZPOOL_PROP_AUTOEXPAND:
8651 spa->spa_autoexpand = intval;
8652 if (tx->tx_txg != TXG_INITIAL)
8653 spa_async_request(spa,
8654 SPA_ASYNC_AUTOEXPAND);
8656 case ZPOOL_PROP_MULTIHOST:
8657 spa->spa_multihost = intval;
8666 mutex_exit(&spa->spa_props_lock);
8670 * Perform one-time upgrade on-disk changes. spa_version() does not
8671 * reflect the new version this txg, so there must be no changes this
8672 * txg to anything that the upgrade code depends on after it executes.
8673 * Therefore this must be called after dsl_pool_sync() does the sync
8677 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
8679 if (spa_sync_pass(spa) != 1)
8682 dsl_pool_t *dp = spa->spa_dsl_pool;
8683 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
8685 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
8686 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
8687 dsl_pool_create_origin(dp, tx);
8689 /* Keeping the origin open increases spa_minref */
8690 spa->spa_minref += 3;
8693 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
8694 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
8695 dsl_pool_upgrade_clones(dp, tx);
8698 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
8699 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
8700 dsl_pool_upgrade_dir_clones(dp, tx);
8702 /* Keeping the freedir open increases spa_minref */
8703 spa->spa_minref += 3;
8706 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
8707 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
8708 spa_feature_create_zap_objects(spa, tx);
8712 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
8713 * when possibility to use lz4 compression for metadata was added
8714 * Old pools that have this feature enabled must be upgraded to have
8715 * this feature active
8717 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
8718 boolean_t lz4_en = spa_feature_is_enabled(spa,
8719 SPA_FEATURE_LZ4_COMPRESS);
8720 boolean_t lz4_ac = spa_feature_is_active(spa,
8721 SPA_FEATURE_LZ4_COMPRESS);
8723 if (lz4_en && !lz4_ac)
8724 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
8728 * If we haven't written the salt, do so now. Note that the
8729 * feature may not be activated yet, but that's fine since
8730 * the presence of this ZAP entry is backwards compatible.
8732 if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
8733 DMU_POOL_CHECKSUM_SALT) == ENOENT) {
8734 VERIFY0(zap_add(spa->spa_meta_objset,
8735 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
8736 sizeof (spa->spa_cksum_salt.zcs_bytes),
8737 spa->spa_cksum_salt.zcs_bytes, tx));
8740 rrw_exit(&dp->dp_config_rwlock, FTAG);
8744 vdev_indirect_state_sync_verify(vdev_t *vd)
8746 vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping;
8747 vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births;
8749 if (vd->vdev_ops == &vdev_indirect_ops) {
8750 ASSERT(vim != NULL);
8751 ASSERT(vib != NULL);
8754 uint64_t obsolete_sm_object = 0;
8755 ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
8756 if (obsolete_sm_object != 0) {
8757 ASSERT(vd->vdev_obsolete_sm != NULL);
8758 ASSERT(vd->vdev_removing ||
8759 vd->vdev_ops == &vdev_indirect_ops);
8760 ASSERT(vdev_indirect_mapping_num_entries(vim) > 0);
8761 ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0);
8762 ASSERT3U(obsolete_sm_object, ==,
8763 space_map_object(vd->vdev_obsolete_sm));
8764 ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=,
8765 space_map_allocated(vd->vdev_obsolete_sm));
8767 ASSERT(vd->vdev_obsolete_segments != NULL);
8770 * Since frees / remaps to an indirect vdev can only
8771 * happen in syncing context, the obsolete segments
8772 * tree must be empty when we start syncing.
8774 ASSERT0(range_tree_space(vd->vdev_obsolete_segments));
8778 * Set the top-level vdev's max queue depth. Evaluate each top-level's
8779 * async write queue depth in case it changed. The max queue depth will
8780 * not change in the middle of syncing out this txg.
8783 spa_sync_adjust_vdev_max_queue_depth(spa_t *spa)
8785 ASSERT(spa_writeable(spa));
8787 vdev_t *rvd = spa->spa_root_vdev;
8788 uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
8789 zfs_vdev_queue_depth_pct / 100;
8790 metaslab_class_t *normal = spa_normal_class(spa);
8791 metaslab_class_t *special = spa_special_class(spa);
8792 metaslab_class_t *dedup = spa_dedup_class(spa);
8794 uint64_t slots_per_allocator = 0;
8795 for (int c = 0; c < rvd->vdev_children; c++) {
8796 vdev_t *tvd = rvd->vdev_child[c];
8798 metaslab_group_t *mg = tvd->vdev_mg;
8799 if (mg == NULL || !metaslab_group_initialized(mg))
8802 metaslab_class_t *mc = mg->mg_class;
8803 if (mc != normal && mc != special && mc != dedup)
8807 * It is safe to do a lock-free check here because only async
8808 * allocations look at mg_max_alloc_queue_depth, and async
8809 * allocations all happen from spa_sync().
8811 for (int i = 0; i < mg->mg_allocators; i++) {
8812 ASSERT0(zfs_refcount_count(
8813 &(mg->mg_allocator[i].mga_alloc_queue_depth)));
8815 mg->mg_max_alloc_queue_depth = max_queue_depth;
8817 for (int i = 0; i < mg->mg_allocators; i++) {
8818 mg->mg_allocator[i].mga_cur_max_alloc_queue_depth =
8819 zfs_vdev_def_queue_depth;
8821 slots_per_allocator += zfs_vdev_def_queue_depth;
8824 for (int i = 0; i < spa->spa_alloc_count; i++) {
8825 ASSERT0(zfs_refcount_count(&normal->mc_alloc_slots[i]));
8826 ASSERT0(zfs_refcount_count(&special->mc_alloc_slots[i]));
8827 ASSERT0(zfs_refcount_count(&dedup->mc_alloc_slots[i]));
8828 normal->mc_alloc_max_slots[i] = slots_per_allocator;
8829 special->mc_alloc_max_slots[i] = slots_per_allocator;
8830 dedup->mc_alloc_max_slots[i] = slots_per_allocator;
8832 normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
8833 special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
8834 dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
8838 spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx)
8840 ASSERT(spa_writeable(spa));
8842 vdev_t *rvd = spa->spa_root_vdev;
8843 for (int c = 0; c < rvd->vdev_children; c++) {
8844 vdev_t *vd = rvd->vdev_child[c];
8845 vdev_indirect_state_sync_verify(vd);
8847 if (vdev_indirect_should_condense(vd)) {
8848 spa_condense_indirect_start_sync(vd, tx);
8855 spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx)
8857 objset_t *mos = spa->spa_meta_objset;
8858 dsl_pool_t *dp = spa->spa_dsl_pool;
8859 uint64_t txg = tx->tx_txg;
8860 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
8863 int pass = ++spa->spa_sync_pass;
8865 spa_sync_config_object(spa, tx);
8866 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
8867 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
8868 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
8869 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
8870 spa_errlog_sync(spa, txg);
8871 dsl_pool_sync(dp, txg);
8873 if (pass < zfs_sync_pass_deferred_free ||
8874 spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
8876 * If the log space map feature is active we don't
8877 * care about deferred frees and the deferred bpobj
8878 * as the log space map should effectively have the
8879 * same results (i.e. appending only to one object).
8881 spa_sync_frees(spa, free_bpl, tx);
8884 * We can not defer frees in pass 1, because
8885 * we sync the deferred frees later in pass 1.
8887 ASSERT3U(pass, >, 1);
8888 bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb,
8889 &spa->spa_deferred_bpobj, tx);
8893 dsl_scan_sync(dp, tx);
8895 spa_sync_upgrades(spa, tx);
8897 spa_flush_metaslabs(spa, tx);
8900 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
8905 * Note: We need to check if the MOS is dirty because we could
8906 * have marked the MOS dirty without updating the uberblock
8907 * (e.g. if we have sync tasks but no dirty user data). We need
8908 * to check the uberblock's rootbp because it is updated if we
8909 * have synced out dirty data (though in this case the MOS will
8910 * most likely also be dirty due to second order effects, we
8911 * don't want to rely on that here).
8914 spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
8915 !dmu_objset_is_dirty(mos, txg)) {
8917 * Nothing changed on the first pass, therefore this
8918 * TXG is a no-op. Avoid syncing deferred frees, so
8919 * that we can keep this TXG as a no-op.
8921 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
8922 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
8923 ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
8924 ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg));
8928 spa_sync_deferred_frees(spa, tx);
8929 } while (dmu_objset_is_dirty(mos, txg));
8933 * Rewrite the vdev configuration (which includes the uberblock) to
8934 * commit the transaction group.
8936 * If there are no dirty vdevs, we sync the uberblock to a few random
8937 * top-level vdevs that are known to be visible in the config cache
8938 * (see spa_vdev_add() for a complete description). If there *are* dirty
8939 * vdevs, sync the uberblock to all vdevs.
8942 spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx)
8944 vdev_t *rvd = spa->spa_root_vdev;
8945 uint64_t txg = tx->tx_txg;
8951 * We hold SCL_STATE to prevent vdev open/close/etc.
8952 * while we're attempting to write the vdev labels.
8954 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
8956 if (list_is_empty(&spa->spa_config_dirty_list)) {
8957 vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
8959 int children = rvd->vdev_children;
8960 int c0 = spa_get_random(children);
8962 for (int c = 0; c < children; c++) {
8964 rvd->vdev_child[(c0 + c) % children];
8966 /* Stop when revisiting the first vdev */
8967 if (c > 0 && svd[0] == vd)
8970 if (vd->vdev_ms_array == 0 ||
8972 !vdev_is_concrete(vd))
8975 svd[svdcount++] = vd;
8976 if (svdcount == SPA_SYNC_MIN_VDEVS)
8979 error = vdev_config_sync(svd, svdcount, txg);
8981 error = vdev_config_sync(rvd->vdev_child,
8982 rvd->vdev_children, txg);
8986 spa->spa_last_synced_guid = rvd->vdev_guid;
8988 spa_config_exit(spa, SCL_STATE, FTAG);
8992 zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
8993 zio_resume_wait(spa);
8998 * Sync the specified transaction group. New blocks may be dirtied as
8999 * part of the process, so we iterate until it converges.
9002 spa_sync(spa_t *spa, uint64_t txg)
9006 VERIFY(spa_writeable(spa));
9009 * Wait for i/os issued in open context that need to complete
9010 * before this txg syncs.
9012 (void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]);
9013 spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL,
9017 * Lock out configuration changes.
9019 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
9021 spa->spa_syncing_txg = txg;
9022 spa->spa_sync_pass = 0;
9024 for (int i = 0; i < spa->spa_alloc_count; i++) {
9025 mutex_enter(&spa->spa_alloc_locks[i]);
9026 VERIFY0(avl_numnodes(&spa->spa_alloc_trees[i]));
9027 mutex_exit(&spa->spa_alloc_locks[i]);
9031 * If there are any pending vdev state changes, convert them
9032 * into config changes that go out with this transaction group.
9034 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
9035 while (list_head(&spa->spa_state_dirty_list) != NULL) {
9037 * We need the write lock here because, for aux vdevs,
9038 * calling vdev_config_dirty() modifies sav_config.
9039 * This is ugly and will become unnecessary when we
9040 * eliminate the aux vdev wart by integrating all vdevs
9041 * into the root vdev tree.
9043 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9044 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
9045 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
9046 vdev_state_clean(vd);
9047 vdev_config_dirty(vd);
9049 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9050 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
9052 spa_config_exit(spa, SCL_STATE, FTAG);
9054 dsl_pool_t *dp = spa->spa_dsl_pool;
9055 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
9057 spa->spa_sync_starttime = gethrtime();
9058 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
9059 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
9060 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
9061 NSEC_TO_TICK(spa->spa_deadman_synctime));
9064 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
9065 * set spa_deflate if we have no raid-z vdevs.
9067 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
9068 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
9069 vdev_t *rvd = spa->spa_root_vdev;
9072 for (i = 0; i < rvd->vdev_children; i++) {
9073 vd = rvd->vdev_child[i];
9074 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
9077 if (i == rvd->vdev_children) {
9078 spa->spa_deflate = TRUE;
9079 VERIFY0(zap_add(spa->spa_meta_objset,
9080 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
9081 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
9085 spa_sync_adjust_vdev_max_queue_depth(spa);
9087 spa_sync_condense_indirect(spa, tx);
9089 spa_sync_iterate_to_convergence(spa, tx);
9092 if (!list_is_empty(&spa->spa_config_dirty_list)) {
9094 * Make sure that the number of ZAPs for all the vdevs matches
9095 * the number of ZAPs in the per-vdev ZAP list. This only gets
9096 * called if the config is dirty; otherwise there may be
9097 * outstanding AVZ operations that weren't completed in
9098 * spa_sync_config_object.
9100 uint64_t all_vdev_zap_entry_count;
9101 ASSERT0(zap_count(spa->spa_meta_objset,
9102 spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
9103 ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
9104 all_vdev_zap_entry_count);
9108 if (spa->spa_vdev_removal != NULL) {
9109 ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
9112 spa_sync_rewrite_vdev_config(spa, tx);
9115 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
9116 spa->spa_deadman_tqid = 0;
9119 * Clear the dirty config list.
9121 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
9122 vdev_config_clean(vd);
9125 * Now that the new config has synced transactionally,
9126 * let it become visible to the config cache.
9128 if (spa->spa_config_syncing != NULL) {
9129 spa_config_set(spa, spa->spa_config_syncing);
9130 spa->spa_config_txg = txg;
9131 spa->spa_config_syncing = NULL;
9134 dsl_pool_sync_done(dp, txg);
9136 for (int i = 0; i < spa->spa_alloc_count; i++) {
9137 mutex_enter(&spa->spa_alloc_locks[i]);
9138 VERIFY0(avl_numnodes(&spa->spa_alloc_trees[i]));
9139 mutex_exit(&spa->spa_alloc_locks[i]);
9143 * Update usable space statistics.
9145 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
9147 vdev_sync_done(vd, txg);
9149 metaslab_class_evict_old(spa->spa_normal_class, txg);
9150 metaslab_class_evict_old(spa->spa_log_class, txg);
9152 spa_sync_close_syncing_log_sm(spa);
9154 spa_update_dspace(spa);
9157 * It had better be the case that we didn't dirty anything
9158 * since vdev_config_sync().
9160 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
9161 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
9162 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
9164 while (zfs_pause_spa_sync)
9167 spa->spa_sync_pass = 0;
9170 * Update the last synced uberblock here. We want to do this at
9171 * the end of spa_sync() so that consumers of spa_last_synced_txg()
9172 * will be guaranteed that all the processing associated with
9173 * that txg has been completed.
9175 spa->spa_ubsync = spa->spa_uberblock;
9176 spa_config_exit(spa, SCL_CONFIG, FTAG);
9178 spa_handle_ignored_writes(spa);
9181 * If any async tasks have been requested, kick them off.
9183 spa_async_dispatch(spa);
9187 * Sync all pools. We don't want to hold the namespace lock across these
9188 * operations, so we take a reference on the spa_t and drop the lock during the
9192 spa_sync_allpools(void)
9195 mutex_enter(&spa_namespace_lock);
9196 while ((spa = spa_next(spa)) != NULL) {
9197 if (spa_state(spa) != POOL_STATE_ACTIVE ||
9198 !spa_writeable(spa) || spa_suspended(spa))
9200 spa_open_ref(spa, FTAG);
9201 mutex_exit(&spa_namespace_lock);
9202 txg_wait_synced(spa_get_dsl(spa), 0);
9203 mutex_enter(&spa_namespace_lock);
9204 spa_close(spa, FTAG);
9206 mutex_exit(&spa_namespace_lock);
9210 * ==========================================================================
9211 * Miscellaneous routines
9212 * ==========================================================================
9216 * Remove all pools in the system.
9224 * Remove all cached state. All pools should be closed now,
9225 * so every spa in the AVL tree should be unreferenced.
9227 mutex_enter(&spa_namespace_lock);
9228 while ((spa = spa_next(NULL)) != NULL) {
9230 * Stop async tasks. The async thread may need to detach
9231 * a device that's been replaced, which requires grabbing
9232 * spa_namespace_lock, so we must drop it here.
9234 spa_open_ref(spa, FTAG);
9235 mutex_exit(&spa_namespace_lock);
9236 spa_async_suspend(spa);
9237 mutex_enter(&spa_namespace_lock);
9238 spa_close(spa, FTAG);
9240 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
9242 spa_deactivate(spa);
9246 mutex_exit(&spa_namespace_lock);
9250 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
9255 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
9259 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
9260 vd = spa->spa_l2cache.sav_vdevs[i];
9261 if (vd->vdev_guid == guid)
9265 for (i = 0; i < spa->spa_spares.sav_count; i++) {
9266 vd = spa->spa_spares.sav_vdevs[i];
9267 if (vd->vdev_guid == guid)
9276 spa_upgrade(spa_t *spa, uint64_t version)
9278 ASSERT(spa_writeable(spa));
9280 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
9283 * This should only be called for a non-faulted pool, and since a
9284 * future version would result in an unopenable pool, this shouldn't be
9287 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
9288 ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
9290 spa->spa_uberblock.ub_version = version;
9291 vdev_config_dirty(spa->spa_root_vdev);
9293 spa_config_exit(spa, SCL_ALL, FTAG);
9295 txg_wait_synced(spa_get_dsl(spa), 0);
9299 spa_has_spare(spa_t *spa, uint64_t guid)
9303 spa_aux_vdev_t *sav = &spa->spa_spares;
9305 for (i = 0; i < sav->sav_count; i++)
9306 if (sav->sav_vdevs[i]->vdev_guid == guid)
9309 for (i = 0; i < sav->sav_npending; i++) {
9310 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
9311 &spareguid) == 0 && spareguid == guid)
9319 * Check if a pool has an active shared spare device.
9320 * Note: reference count of an active spare is 2, as a spare and as a replace
9323 spa_has_active_shared_spare(spa_t *spa)
9327 spa_aux_vdev_t *sav = &spa->spa_spares;
9329 for (i = 0; i < sav->sav_count; i++) {
9330 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
9331 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
9340 spa_total_metaslabs(spa_t *spa)
9342 vdev_t *rvd = spa->spa_root_vdev;
9345 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
9346 vdev_t *vd = rvd->vdev_child[c];
9347 if (!vdev_is_concrete(vd))
9349 m += vd->vdev_ms_count;
9355 * Notify any waiting threads that some activity has switched from being in-
9356 * progress to not-in-progress so that the thread can wake up and determine
9357 * whether it is finished waiting.
9360 spa_notify_waiters(spa_t *spa)
9363 * Acquiring spa_activities_lock here prevents the cv_broadcast from
9364 * happening between the waiting thread's check and cv_wait.
9366 mutex_enter(&spa->spa_activities_lock);
9367 cv_broadcast(&spa->spa_activities_cv);
9368 mutex_exit(&spa->spa_activities_lock);
9372 * Notify any waiting threads that the pool is exporting, and then block until
9373 * they are finished using the spa_t.
9376 spa_wake_waiters(spa_t *spa)
9378 mutex_enter(&spa->spa_activities_lock);
9379 spa->spa_waiters_cancel = B_TRUE;
9380 cv_broadcast(&spa->spa_activities_cv);
9381 while (spa->spa_waiters != 0)
9382 cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock);
9383 spa->spa_waiters_cancel = B_FALSE;
9384 mutex_exit(&spa->spa_activities_lock);
9387 /* Whether the vdev or any of its descendants are being initialized/trimmed. */
9389 spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity)
9391 spa_t *spa = vd->vdev_spa;
9393 ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER));
9394 ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
9395 ASSERT(activity == ZPOOL_WAIT_INITIALIZE ||
9396 activity == ZPOOL_WAIT_TRIM);
9398 kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ?
9399 &vd->vdev_initialize_lock : &vd->vdev_trim_lock;
9401 mutex_exit(&spa->spa_activities_lock);
9403 mutex_enter(&spa->spa_activities_lock);
9405 boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ?
9406 (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) :
9407 (vd->vdev_trim_state == VDEV_TRIM_ACTIVE);
9413 for (int i = 0; i < vd->vdev_children; i++) {
9414 if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i],
9423 * If use_guid is true, this checks whether the vdev specified by guid is
9424 * being initialized/trimmed. Otherwise, it checks whether any vdev in the pool
9425 * is being initialized/trimmed. The caller must hold the config lock and
9426 * spa_activities_lock.
9429 spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid,
9430 zpool_wait_activity_t activity, boolean_t *in_progress)
9432 mutex_exit(&spa->spa_activities_lock);
9433 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
9434 mutex_enter(&spa->spa_activities_lock);
9438 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
9439 if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) {
9440 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9444 vd = spa->spa_root_vdev;
9447 *in_progress = spa_vdev_activity_in_progress_impl(vd, activity);
9449 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9454 * Locking for waiting threads
9455 * ---------------------------
9457 * Waiting threads need a way to check whether a given activity is in progress,
9458 * and then, if it is, wait for it to complete. Each activity will have some
9459 * in-memory representation of the relevant on-disk state which can be used to
9460 * determine whether or not the activity is in progress. The in-memory state and
9461 * the locking used to protect it will be different for each activity, and may
9462 * not be suitable for use with a cvar (e.g., some state is protected by the
9463 * config lock). To allow waiting threads to wait without any races, another
9464 * lock, spa_activities_lock, is used.
9466 * When the state is checked, both the activity-specific lock (if there is one)
9467 * and spa_activities_lock are held. In some cases, the activity-specific lock
9468 * is acquired explicitly (e.g. the config lock). In others, the locking is
9469 * internal to some check (e.g. bpobj_is_empty). After checking, the waiting
9470 * thread releases the activity-specific lock and, if the activity is in
9471 * progress, then cv_waits using spa_activities_lock.
9473 * The waiting thread is woken when another thread, one completing some
9474 * activity, updates the state of the activity and then calls
9475 * spa_notify_waiters, which will cv_broadcast. This 'completing' thread only
9476 * needs to hold its activity-specific lock when updating the state, and this
9477 * lock can (but doesn't have to) be dropped before calling spa_notify_waiters.
9479 * Because spa_notify_waiters acquires spa_activities_lock before broadcasting,
9480 * and because it is held when the waiting thread checks the state of the
9481 * activity, it can never be the case that the completing thread both updates
9482 * the activity state and cv_broadcasts in between the waiting thread's check
9483 * and cv_wait. Thus, a waiting thread can never miss a wakeup.
9485 * In order to prevent deadlock, when the waiting thread does its check, in some
9486 * cases it will temporarily drop spa_activities_lock in order to acquire the
9487 * activity-specific lock. The order in which spa_activities_lock and the
9488 * activity specific lock are acquired in the waiting thread is determined by
9489 * the order in which they are acquired in the completing thread; if the
9490 * completing thread calls spa_notify_waiters with the activity-specific lock
9491 * held, then the waiting thread must also acquire the activity-specific lock
9496 spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity,
9497 boolean_t use_tag, uint64_t tag, boolean_t *in_progress)
9501 ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
9504 case ZPOOL_WAIT_CKPT_DISCARD:
9506 (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) &&
9507 zap_contains(spa_meta_objset(spa),
9508 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) ==
9511 case ZPOOL_WAIT_FREE:
9512 *in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS &&
9513 !bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) ||
9514 spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) ||
9515 spa_livelist_delete_check(spa));
9517 case ZPOOL_WAIT_INITIALIZE:
9518 case ZPOOL_WAIT_TRIM:
9519 error = spa_vdev_activity_in_progress(spa, use_tag, tag,
9520 activity, in_progress);
9522 case ZPOOL_WAIT_REPLACE:
9523 mutex_exit(&spa->spa_activities_lock);
9524 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
9525 mutex_enter(&spa->spa_activities_lock);
9527 *in_progress = vdev_replace_in_progress(spa->spa_root_vdev);
9528 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9530 case ZPOOL_WAIT_REMOVE:
9531 *in_progress = (spa->spa_removing_phys.sr_state ==
9534 case ZPOOL_WAIT_RESILVER:
9535 if ((*in_progress = vdev_rebuild_active(spa->spa_root_vdev)))
9538 case ZPOOL_WAIT_SCRUB:
9540 boolean_t scanning, paused, is_scrub;
9541 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
9543 is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB);
9544 scanning = (scn->scn_phys.scn_state == DSS_SCANNING);
9545 paused = dsl_scan_is_paused_scrub(scn);
9546 *in_progress = (scanning && !paused &&
9547 is_scrub == (activity == ZPOOL_WAIT_SCRUB));
9551 panic("unrecognized value for activity %d", activity);
9558 spa_wait_common(const char *pool, zpool_wait_activity_t activity,
9559 boolean_t use_tag, uint64_t tag, boolean_t *waited)
9562 * The tag is used to distinguish between instances of an activity.
9563 * 'initialize' and 'trim' are the only activities that we use this for.
9564 * The other activities can only have a single instance in progress in a
9565 * pool at one time, making the tag unnecessary.
9567 * There can be multiple devices being replaced at once, but since they
9568 * all finish once resilvering finishes, we don't bother keeping track
9569 * of them individually, we just wait for them all to finish.
9571 if (use_tag && activity != ZPOOL_WAIT_INITIALIZE &&
9572 activity != ZPOOL_WAIT_TRIM)
9575 if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES)
9579 int error = spa_open(pool, &spa, FTAG);
9584 * Increment the spa's waiter count so that we can call spa_close and
9585 * still ensure that the spa_t doesn't get freed before this thread is
9586 * finished with it when the pool is exported. We want to call spa_close
9587 * before we start waiting because otherwise the additional ref would
9588 * prevent the pool from being exported or destroyed throughout the
9589 * potentially long wait.
9591 mutex_enter(&spa->spa_activities_lock);
9593 spa_close(spa, FTAG);
9597 boolean_t in_progress;
9598 error = spa_activity_in_progress(spa, activity, use_tag, tag,
9601 if (error || !in_progress || spa->spa_waiters_cancel)
9606 if (cv_wait_sig(&spa->spa_activities_cv,
9607 &spa->spa_activities_lock) == 0) {
9614 cv_signal(&spa->spa_waiters_cv);
9615 mutex_exit(&spa->spa_activities_lock);
9621 * Wait for a particular instance of the specified activity to complete, where
9622 * the instance is identified by 'tag'
9625 spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag,
9628 return (spa_wait_common(pool, activity, B_TRUE, tag, waited));
9632 * Wait for all instances of the specified activity complete
9635 spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited)
9638 return (spa_wait_common(pool, activity, B_FALSE, 0, waited));
9642 spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
9644 sysevent_t *ev = NULL;
9648 resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl);
9650 ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP);
9651 ev->resource = resource;
9658 spa_event_post(sysevent_t *ev)
9662 zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb);
9663 kmem_free(ev, sizeof (*ev));
9669 * Post a zevent corresponding to the given sysevent. The 'name' must be one
9670 * of the event definitions in sys/sysevent/eventdefs.h. The payload will be
9671 * filled in from the spa and (optionally) the vdev. This doesn't do anything
9672 * in the userland libzpool, as we don't want consumers to misinterpret ztest
9673 * or zdb as real changes.
9676 spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
9678 spa_event_post(spa_event_create(spa, vd, hist_nvl, name));
9681 /* state manipulation functions */
9682 EXPORT_SYMBOL(spa_open);
9683 EXPORT_SYMBOL(spa_open_rewind);
9684 EXPORT_SYMBOL(spa_get_stats);
9685 EXPORT_SYMBOL(spa_create);
9686 EXPORT_SYMBOL(spa_import);
9687 EXPORT_SYMBOL(spa_tryimport);
9688 EXPORT_SYMBOL(spa_destroy);
9689 EXPORT_SYMBOL(spa_export);
9690 EXPORT_SYMBOL(spa_reset);
9691 EXPORT_SYMBOL(spa_async_request);
9692 EXPORT_SYMBOL(spa_async_suspend);
9693 EXPORT_SYMBOL(spa_async_resume);
9694 EXPORT_SYMBOL(spa_inject_addref);
9695 EXPORT_SYMBOL(spa_inject_delref);
9696 EXPORT_SYMBOL(spa_scan_stat_init);
9697 EXPORT_SYMBOL(spa_scan_get_stats);
9699 /* device manipulation */
9700 EXPORT_SYMBOL(spa_vdev_add);
9701 EXPORT_SYMBOL(spa_vdev_attach);
9702 EXPORT_SYMBOL(spa_vdev_detach);
9703 EXPORT_SYMBOL(spa_vdev_setpath);
9704 EXPORT_SYMBOL(spa_vdev_setfru);
9705 EXPORT_SYMBOL(spa_vdev_split_mirror);
9707 /* spare statech is global across all pools) */
9708 EXPORT_SYMBOL(spa_spare_add);
9709 EXPORT_SYMBOL(spa_spare_remove);
9710 EXPORT_SYMBOL(spa_spare_exists);
9711 EXPORT_SYMBOL(spa_spare_activate);
9713 /* L2ARC statech is global across all pools) */
9714 EXPORT_SYMBOL(spa_l2cache_add);
9715 EXPORT_SYMBOL(spa_l2cache_remove);
9716 EXPORT_SYMBOL(spa_l2cache_exists);
9717 EXPORT_SYMBOL(spa_l2cache_activate);
9718 EXPORT_SYMBOL(spa_l2cache_drop);
9721 EXPORT_SYMBOL(spa_scan);
9722 EXPORT_SYMBOL(spa_scan_stop);
9725 EXPORT_SYMBOL(spa_sync); /* only for DMU use */
9726 EXPORT_SYMBOL(spa_sync_allpools);
9729 EXPORT_SYMBOL(spa_prop_set);
9730 EXPORT_SYMBOL(spa_prop_get);
9731 EXPORT_SYMBOL(spa_prop_clear_bootfs);
9733 /* asynchronous event notification */
9734 EXPORT_SYMBOL(spa_event_notify);
9737 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, INT, ZMOD_RW,
9738 "log2(fraction of arc that can be used by inflight I/Os when "
9739 "verifying pool during import");
9741 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW,
9742 "Set to traverse metadata on pool import");
9744 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW,
9745 "Set to traverse data on pool import");
9747 ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW,
9748 "Print vdev tree to zfs_dbgmsg during pool import");
9750 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RD,
9751 "Percentage of CPUs to run an IO worker thread");
9753 ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, ULONG, ZMOD_RW,
9754 "Allow importing pool with up to this number of missing top-level "
9755 "vdevs (in read-only mode)");
9757 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT, ZMOD_RW,
9758 "Set the livelist condense zthr to pause");
9760 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT, ZMOD_RW,
9761 "Set the livelist condense synctask to pause");
9763 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel, INT, ZMOD_RW,
9764 "Whether livelist condensing was canceled in the synctask");
9766 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel, INT, ZMOD_RW,
9767 "Whether livelist condensing was canceled in the zthr function");
9769 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT, ZMOD_RW,
9770 "Whether extra ALLOC blkptrs were added to a livelist entry while it "
9771 "was being condensed");