4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25 * Copyright 2019 Joyent, Inc.
29 * Virtual Device Labels
30 * ---------------------
32 * The vdev label serves several distinct purposes:
34 * 1. Uniquely identify this device as part of a ZFS pool and confirm its
35 * identity within the pool.
37 * 2. Verify that all the devices given in a configuration are present
40 * 3. Determine the uberblock for the pool.
42 * 4. In case of an import operation, determine the configuration of the
43 * toplevel vdev of which it is a part.
45 * 5. If an import operation cannot find all the devices in the pool,
46 * provide enough information to the administrator to determine which
47 * devices are missing.
49 * It is important to note that while the kernel is responsible for writing the
50 * label, it only consumes the information in the first three cases. The
51 * latter information is only consumed in userland when determining the
52 * configuration to import a pool.
58 * Before describing the contents of the label, it's important to understand how
59 * the labels are written and updated with respect to the uberblock.
61 * When the pool configuration is altered, either because it was newly created
62 * or a device was added, we want to update all the labels such that we can deal
63 * with fatal failure at any point. To this end, each disk has two labels which
64 * are updated before and after the uberblock is synced. Assuming we have
65 * labels and an uberblock with the following transaction groups:
68 * +------+ +------+ +------+
70 * | t10 | | t10 | | t10 |
72 * +------+ +------+ +------+
74 * In this stable state, the labels and the uberblock were all updated within
75 * the same transaction group (10). Each label is mirrored and checksummed, so
76 * that we can detect when we fail partway through writing the label.
78 * In order to identify which labels are valid, the labels are written in the
81 * 1. For each vdev, update 'L1' to the new label
82 * 2. Update the uberblock
83 * 3. For each vdev, update 'L2' to the new label
85 * Given arbitrary failure, we can determine the correct label to use based on
86 * the transaction group. If we fail after updating L1 but before updating the
87 * UB, we will notice that L1's transaction group is greater than the uberblock,
88 * so L2 must be valid. If we fail after writing the uberblock but before
89 * writing L2, we will notice that L2's transaction group is less than L1, and
90 * therefore L1 is valid.
92 * Another added complexity is that not every label is updated when the config
93 * is synced. If we add a single device, we do not want to have to re-write
94 * every label for every device in the pool. This means that both L1 and L2 may
95 * be older than the pool uberblock, because the necessary information is stored
102 * The vdev label consists of two distinct parts, and is wrapped within the
103 * vdev_label_t structure. The label includes 8k of padding to permit legacy
104 * VTOC disk labels, but is otherwise ignored.
106 * The first half of the label is a packed nvlist which contains pool wide
107 * properties, per-vdev properties, and configuration information. It is
108 * described in more detail below.
110 * The latter half of the label consists of a redundant array of uberblocks.
111 * These uberblocks are updated whenever a transaction group is committed,
112 * or when the configuration is updated. When a pool is loaded, we scan each
113 * vdev for the 'best' uberblock.
116 * Configuration Information
117 * -------------------------
119 * The nvlist describing the pool and vdev contains the following elements:
121 * version ZFS on-disk version
124 * txg Transaction group in which this label was written
125 * pool_guid Unique identifier for this pool
126 * vdev_tree An nvlist describing vdev tree.
128 * An nvlist of the features necessary for reading the MOS.
130 * Each leaf device label also contains the following:
132 * top_guid Unique ID for top-level vdev in which this is contained
133 * guid Unique ID for the leaf vdev
135 * The 'vs' configuration follows the format described in 'spa_config.c'.
138 #include <sys/zfs_context.h>
140 #include <sys/spa_impl.h>
143 #include <sys/vdev.h>
144 #include <sys/vdev_impl.h>
145 #include <sys/uberblock_impl.h>
146 #include <sys/metaslab.h>
147 #include <sys/metaslab_impl.h>
149 #include <sys/dsl_scan.h>
151 #include <sys/fs/zfs.h>
152 #include <sys/trim_map.h>
154 static boolean_t vdev_trim_on_init = B_TRUE;
155 SYSCTL_DECL(_vfs_zfs_vdev);
156 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, trim_on_init, CTLFLAG_RWTUN,
157 &vdev_trim_on_init, 0, "Enable/disable full vdev trim on initialisation");
160 * Basic routines to read and write from a vdev label.
161 * Used throughout the rest of this file.
164 vdev_label_offset(uint64_t psize, int l, uint64_t offset)
166 ASSERT(offset < sizeof (vdev_label_t));
167 ASSERT(P2PHASE_TYPED(psize, sizeof (vdev_label_t), uint64_t) == 0);
169 return (offset + l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
170 0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
174 * Returns back the vdev label associated with the passed in offset.
177 vdev_label_number(uint64_t psize, uint64_t offset)
181 if (offset >= psize - VDEV_LABEL_END_SIZE) {
182 offset -= psize - VDEV_LABEL_END_SIZE;
183 offset += (VDEV_LABELS / 2) * sizeof (vdev_label_t);
185 l = offset / sizeof (vdev_label_t);
186 return (l < VDEV_LABELS ? l : -1);
190 vdev_label_read(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
191 uint64_t size, zio_done_func_t *done, void *private, int flags)
194 spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE ||
195 spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE);
196 ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
198 zio_nowait(zio_read_phys(zio, vd,
199 vdev_label_offset(vd->vdev_psize, l, offset),
200 size, buf, ZIO_CHECKSUM_LABEL, done, private,
201 ZIO_PRIORITY_SYNC_READ, flags, B_TRUE));
205 vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
206 uint64_t size, zio_done_func_t *done, void *private, int flags)
209 spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE ||
210 spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE);
211 ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
213 zio_nowait(zio_write_phys(zio, vd,
214 vdev_label_offset(vd->vdev_psize, l, offset),
215 size, buf, ZIO_CHECKSUM_LABEL, done, private,
216 ZIO_PRIORITY_SYNC_WRITE, flags, B_TRUE));
220 root_vdev_actions_getprogress(vdev_t *vd, nvlist_t *nvl)
222 spa_t *spa = vd->vdev_spa;
224 if (vd != spa->spa_root_vdev)
227 /* provide either current or previous scan information */
229 if (spa_scan_get_stats(spa, &ps) == 0) {
230 fnvlist_add_uint64_array(nvl,
231 ZPOOL_CONFIG_SCAN_STATS, (uint64_t *)&ps,
232 sizeof (pool_scan_stat_t) / sizeof (uint64_t));
235 pool_removal_stat_t prs;
236 if (spa_removal_get_stats(spa, &prs) == 0) {
237 fnvlist_add_uint64_array(nvl,
238 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t *)&prs,
239 sizeof (prs) / sizeof (uint64_t));
242 pool_checkpoint_stat_t pcs;
243 if (spa_checkpoint_get_stats(spa, &pcs) == 0) {
244 fnvlist_add_uint64_array(nvl,
245 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t *)&pcs,
246 sizeof (pcs) / sizeof (uint64_t));
251 * Generate the nvlist representing this vdev's config.
254 vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
255 vdev_config_flag_t flags)
258 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
260 nv = fnvlist_alloc();
262 fnvlist_add_string(nv, ZPOOL_CONFIG_TYPE, vd->vdev_ops->vdev_op_type);
263 if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)))
264 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ID, vd->vdev_id);
265 fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, vd->vdev_guid);
267 if (vd->vdev_path != NULL)
268 fnvlist_add_string(nv, ZPOOL_CONFIG_PATH, vd->vdev_path);
270 if (vd->vdev_devid != NULL)
271 fnvlist_add_string(nv, ZPOOL_CONFIG_DEVID, vd->vdev_devid);
273 if (vd->vdev_physpath != NULL)
274 fnvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH,
277 if (vd->vdev_fru != NULL)
278 fnvlist_add_string(nv, ZPOOL_CONFIG_FRU, vd->vdev_fru);
280 if (vd->vdev_nparity != 0) {
281 ASSERT(strcmp(vd->vdev_ops->vdev_op_type,
282 VDEV_TYPE_RAIDZ) == 0);
285 * Make sure someone hasn't managed to sneak a fancy new vdev
286 * into a crufty old storage pool.
288 ASSERT(vd->vdev_nparity == 1 ||
289 (vd->vdev_nparity <= 2 &&
290 spa_version(spa) >= SPA_VERSION_RAIDZ2) ||
291 (vd->vdev_nparity <= 3 &&
292 spa_version(spa) >= SPA_VERSION_RAIDZ3));
295 * Note that we'll add the nparity tag even on storage pools
296 * that only support a single parity device -- older software
297 * will just ignore it.
299 fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vd->vdev_nparity);
302 if (vd->vdev_wholedisk != -1ULL)
303 fnvlist_add_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
306 if (vd->vdev_not_present && !(flags & VDEV_CONFIG_MISSING))
307 fnvlist_add_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1);
309 if (vd->vdev_isspare)
310 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
312 if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)) &&
313 vd == vd->vdev_top) {
314 fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
316 fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
318 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift);
319 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE,
321 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, vd->vdev_islog);
322 if (vd->vdev_removing) {
323 fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVING,
328 if (vd->vdev_dtl_sm != NULL) {
329 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DTL,
330 space_map_object(vd->vdev_dtl_sm));
333 if (vic->vic_mapping_object != 0) {
334 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
335 vic->vic_mapping_object);
338 if (vic->vic_births_object != 0) {
339 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
340 vic->vic_births_object);
343 if (vic->vic_prev_indirect_vdev != UINT64_MAX) {
344 fnvlist_add_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
345 vic->vic_prev_indirect_vdev);
349 fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
351 if (flags & VDEV_CONFIG_MOS) {
352 if (vd->vdev_leaf_zap != 0) {
353 ASSERT(vd->vdev_ops->vdev_op_leaf);
354 fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_LEAF_ZAP,
358 if (vd->vdev_top_zap != 0) {
359 ASSERT(vd == vd->vdev_top);
360 fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
368 vdev_get_stats(vd, &vs);
369 fnvlist_add_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
370 (uint64_t *)&vs, sizeof (vs) / sizeof (uint64_t));
372 root_vdev_actions_getprogress(vd, nv);
375 * Note: this can be called from open context
376 * (spa_get_stats()), so we need the rwlock to prevent
377 * the mapping from being changed by condensing.
379 rw_enter(&vd->vdev_indirect_rwlock, RW_READER);
380 if (vd->vdev_indirect_mapping != NULL) {
381 ASSERT(vd->vdev_indirect_births != NULL);
382 vdev_indirect_mapping_t *vim =
383 vd->vdev_indirect_mapping;
384 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
385 vdev_indirect_mapping_size(vim));
387 rw_exit(&vd->vdev_indirect_rwlock);
388 if (vd->vdev_mg != NULL &&
389 vd->vdev_mg->mg_fragmentation != ZFS_FRAG_INVALID) {
391 * Compute approximately how much memory would be used
392 * for the indirect mapping if this device were to
395 * Note: If the frag metric is invalid, then not
396 * enough metaslabs have been converted to have
399 uint64_t seg_count = 0;
400 uint64_t to_alloc = vd->vdev_stat.vs_alloc;
403 * There are the same number of allocated segments
404 * as free segments, so we will have at least one
405 * entry per free segment. However, small free
406 * segments (smaller than vdev_removal_max_span)
407 * will be combined with adjacent allocated segments
408 * as a single mapping.
410 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
411 if (1ULL << (i + 1) < vdev_removal_max_span) {
413 vd->vdev_mg->mg_histogram[i] <<
417 vd->vdev_mg->mg_histogram[i];
422 * The maximum length of a mapping is
423 * zfs_remove_max_segment, so we need at least one entry
424 * per zfs_remove_max_segment of allocated data.
426 seg_count += to_alloc / zfs_remove_max_segment;
428 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
430 sizeof (vdev_indirect_mapping_entry_phys_t));
434 if (!vd->vdev_ops->vdev_op_leaf) {
438 ASSERT(!vd->vdev_ishole);
440 child = kmem_alloc(vd->vdev_children * sizeof (nvlist_t *),
443 for (c = 0, idx = 0; c < vd->vdev_children; c++) {
444 vdev_t *cvd = vd->vdev_child[c];
447 * If we're generating an nvlist of removing
448 * vdevs then skip over any device which is
451 if ((flags & VDEV_CONFIG_REMOVING) &&
455 child[idx++] = vdev_config_generate(spa, cvd,
460 fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
464 for (c = 0; c < idx; c++)
465 nvlist_free(child[c]);
467 kmem_free(child, vd->vdev_children * sizeof (nvlist_t *));
470 const char *aux = NULL;
472 if (vd->vdev_offline && !vd->vdev_tmpoffline)
473 fnvlist_add_uint64(nv, ZPOOL_CONFIG_OFFLINE, B_TRUE);
474 if (vd->vdev_resilver_txg != 0)
475 fnvlist_add_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
476 vd->vdev_resilver_txg);
477 if (vd->vdev_faulted)
478 fnvlist_add_uint64(nv, ZPOOL_CONFIG_FAULTED, B_TRUE);
479 if (vd->vdev_degraded)
480 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DEGRADED, B_TRUE);
481 if (vd->vdev_removed)
482 fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVED, B_TRUE);
483 if (vd->vdev_unspare)
484 fnvlist_add_uint64(nv, ZPOOL_CONFIG_UNSPARE, B_TRUE);
486 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_HOLE, B_TRUE);
488 switch (vd->vdev_stat.vs_aux) {
489 case VDEV_AUX_ERR_EXCEEDED:
490 aux = "err_exceeded";
493 case VDEV_AUX_EXTERNAL:
499 fnvlist_add_string(nv, ZPOOL_CONFIG_AUX_STATE, aux);
501 if (vd->vdev_splitting && vd->vdev_orig_guid != 0LL) {
502 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ORIG_GUID,
511 * Generate a view of the top-level vdevs. If we currently have holes
512 * in the namespace, then generate an array which contains a list of holey
513 * vdevs. Additionally, add the number of top-level children that currently
517 vdev_top_config_generate(spa_t *spa, nvlist_t *config)
519 vdev_t *rvd = spa->spa_root_vdev;
523 array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_SLEEP);
525 for (c = 0, idx = 0; c < rvd->vdev_children; c++) {
526 vdev_t *tvd = rvd->vdev_child[c];
528 if (tvd->vdev_ishole) {
534 VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY,
538 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
539 rvd->vdev_children) == 0);
541 kmem_free(array, rvd->vdev_children * sizeof (uint64_t));
545 * Returns the configuration from the label of the given vdev. For vdevs
546 * which don't have a txg value stored on their label (i.e. spares/cache)
547 * or have not been completely initialized (txg = 0) just return
548 * the configuration from the first valid label we find. Otherwise,
549 * find the most up-to-date label that does not exceed the specified
553 vdev_label_read_config(vdev_t *vd, uint64_t txg)
555 spa_t *spa = vd->vdev_spa;
556 nvlist_t *config = NULL;
560 uint64_t best_txg = 0;
561 uint64_t label_txg = 0;
563 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
564 ZIO_FLAG_SPECULATIVE;
566 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
568 if (!vdev_readable(vd))
571 vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
572 vp = abd_to_buf(vp_abd);
575 for (int l = 0; l < VDEV_LABELS; l++) {
576 nvlist_t *label = NULL;
578 zio = zio_root(spa, NULL, NULL, flags);
580 vdev_label_read(zio, vd, l, vp_abd,
581 offsetof(vdev_label_t, vl_vdev_phys),
582 sizeof (vdev_phys_t), NULL, NULL, flags);
584 if (zio_wait(zio) == 0 &&
585 nvlist_unpack(vp->vp_nvlist, sizeof (vp->vp_nvlist),
588 * Auxiliary vdevs won't have txg values in their
589 * labels and newly added vdevs may not have been
590 * completely initialized so just return the
591 * configuration from the first valid label we
594 error = nvlist_lookup_uint64(label,
595 ZPOOL_CONFIG_POOL_TXG, &label_txg);
596 if ((error || label_txg == 0) && !config) {
599 } else if (label_txg <= txg && label_txg > best_txg) {
600 best_txg = label_txg;
602 config = fnvlist_dup(label);
612 if (config == NULL && !(flags & ZIO_FLAG_TRYHARD)) {
613 flags |= ZIO_FLAG_TRYHARD;
618 * We found a valid label but it didn't pass txg restrictions.
620 if (config == NULL && label_txg != 0) {
621 vdev_dbgmsg(vd, "label discarded as txg is too large "
622 "(%llu > %llu)", (u_longlong_t)label_txg,
632 * Determine if a device is in use. The 'spare_guid' parameter will be filled
633 * in with the device guid if this spare is active elsewhere on the system.
636 vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason,
637 uint64_t *spare_guid, uint64_t *l2cache_guid)
639 spa_t *spa = vd->vdev_spa;
640 uint64_t state, pool_guid, device_guid, txg, spare_pool;
647 *l2cache_guid = 0ULL;
650 * Read the label, if any, and perform some basic sanity checks.
652 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL)
655 (void) nvlist_lookup_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
658 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
660 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
661 &device_guid) != 0) {
666 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
667 (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID,
669 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
678 * Check to see if this device indeed belongs to the pool it claims to
679 * be a part of. The only way this is allowed is if the device is a hot
680 * spare (which we check for later on).
682 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
683 !spa_guid_exists(pool_guid, device_guid) &&
684 !spa_spare_exists(device_guid, NULL, NULL) &&
685 !spa_l2cache_exists(device_guid, NULL))
689 * If the transaction group is zero, then this an initialized (but
690 * unused) label. This is only an error if the create transaction
691 * on-disk is the same as the one we're using now, in which case the
692 * user has attempted to add the same vdev multiple times in the same
695 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
696 txg == 0 && vdtxg == crtxg)
700 * Check to see if this is a spare device. We do an explicit check for
701 * spa_has_spare() here because it may be on our pending list of spares
702 * to add. We also check if it is an l2cache device.
704 if (spa_spare_exists(device_guid, &spare_pool, NULL) ||
705 spa_has_spare(spa, device_guid)) {
707 *spare_guid = device_guid;
710 case VDEV_LABEL_CREATE:
711 case VDEV_LABEL_L2CACHE:
714 case VDEV_LABEL_REPLACE:
715 return (!spa_has_spare(spa, device_guid) ||
718 case VDEV_LABEL_SPARE:
719 return (spa_has_spare(spa, device_guid));
724 * Check to see if this is an l2cache device.
726 if (spa_l2cache_exists(device_guid, NULL))
730 * We can't rely on a pool's state if it's been imported
731 * read-only. Instead we look to see if the pools is marked
732 * read-only in the namespace and set the state to active.
734 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
735 (spa = spa_by_guid(pool_guid, device_guid)) != NULL &&
736 spa_mode(spa) == FREAD)
737 state = POOL_STATE_ACTIVE;
740 * If the device is marked ACTIVE, then this device is in use by another
741 * pool on the system.
743 return (state == POOL_STATE_ACTIVE);
747 * Initialize a vdev label. We check to make sure each leaf device is not in
748 * use, and writable. We put down an initial label which we will later
749 * overwrite with a complete label. Note that it's important to do this
750 * sequentially, not in parallel, so that we catch cases of multiple use of the
751 * same leaf vdev in the vdev we're creating -- e.g. mirroring a disk with
755 vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
757 spa_t *spa = vd->vdev_spa;
768 uint64_t spare_guid, l2cache_guid;
769 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
771 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
773 for (int c = 0; c < vd->vdev_children; c++)
774 if ((error = vdev_label_init(vd->vdev_child[c],
775 crtxg, reason)) != 0)
778 /* Track the creation time for this vdev */
779 vd->vdev_crtxg = crtxg;
781 if (!vd->vdev_ops->vdev_op_leaf || !spa_writeable(spa))
785 * Dead vdevs cannot be initialized.
787 if (vdev_is_dead(vd))
788 return (SET_ERROR(EIO));
791 * Determine if the vdev is in use.
793 if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPLIT &&
794 vdev_inuse(vd, crtxg, reason, &spare_guid, &l2cache_guid))
795 return (SET_ERROR(EBUSY));
798 * If this is a request to add or replace a spare or l2cache device
799 * that is in use elsewhere on the system, then we must update the
800 * guid (which was initialized to a random value) to reflect the
801 * actual GUID (which is shared between multiple pools).
803 if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_L2CACHE &&
804 spare_guid != 0ULL) {
805 uint64_t guid_delta = spare_guid - vd->vdev_guid;
807 vd->vdev_guid += guid_delta;
809 for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
810 pvd->vdev_guid_sum += guid_delta;
813 * If this is a replacement, then we want to fallthrough to the
814 * rest of the code. If we're adding a spare, then it's already
815 * labeled appropriately and we can just return.
817 if (reason == VDEV_LABEL_SPARE)
819 ASSERT(reason == VDEV_LABEL_REPLACE ||
820 reason == VDEV_LABEL_SPLIT);
823 if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPARE &&
824 l2cache_guid != 0ULL) {
825 uint64_t guid_delta = l2cache_guid - vd->vdev_guid;
827 vd->vdev_guid += guid_delta;
829 for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
830 pvd->vdev_guid_sum += guid_delta;
833 * If this is a replacement, then we want to fallthrough to the
834 * rest of the code. If we're adding an l2cache, then it's
835 * already labeled appropriately and we can just return.
837 if (reason == VDEV_LABEL_L2CACHE)
839 ASSERT(reason == VDEV_LABEL_REPLACE);
843 * TRIM the whole thing, excluding the blank space and boot header
844 * as specified by ZFS On-Disk Specification (section 1.3), so that
845 * we start with a clean slate.
846 * It's just an optimization, so we don't care if it fails.
847 * Don't TRIM if removing so that we don't interfere with zpool
850 if (zfs_trim_enabled && vdev_trim_on_init && !vd->vdev_notrim &&
851 (reason == VDEV_LABEL_CREATE || reason == VDEV_LABEL_SPARE ||
852 reason == VDEV_LABEL_L2CACHE))
853 zio_wait(zio_trim(NULL, spa, vd, VDEV_SKIP_SIZE,
854 vd->vdev_psize - VDEV_SKIP_SIZE));
857 * Initialize its label.
859 vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
860 abd_zero(vp_abd, sizeof (vdev_phys_t));
861 vp = abd_to_buf(vp_abd);
864 * Generate a label describing the pool and our top-level vdev.
865 * We mark it as being from txg 0 to indicate that it's not
866 * really part of an active pool just yet. The labels will
867 * be written again with a meaningful txg by spa_sync().
869 if (reason == VDEV_LABEL_SPARE ||
870 (reason == VDEV_LABEL_REMOVE && vd->vdev_isspare)) {
872 * For inactive hot spares, we generate a special label that
873 * identifies as a mutually shared hot spare. We write the
874 * label if we are adding a hot spare, or if we are removing an
875 * active hot spare (in which case we want to revert the
878 VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0);
880 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION,
881 spa_version(spa)) == 0);
882 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE,
883 POOL_STATE_SPARE) == 0);
884 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID,
885 vd->vdev_guid) == 0);
886 } else if (reason == VDEV_LABEL_L2CACHE ||
887 (reason == VDEV_LABEL_REMOVE && vd->vdev_isl2cache)) {
889 * For level 2 ARC devices, add a special label.
891 VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0);
893 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION,
894 spa_version(spa)) == 0);
895 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE,
896 POOL_STATE_L2CACHE) == 0);
897 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID,
898 vd->vdev_guid) == 0);
902 if (reason == VDEV_LABEL_SPLIT)
903 txg = spa->spa_uberblock.ub_txg;
904 label = spa_config_generate(spa, vd, txg, B_FALSE);
907 * Add our creation time. This allows us to detect multiple
908 * vdev uses as described above, and automatically expires if we
911 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
916 buflen = sizeof (vp->vp_nvlist);
918 error = nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP);
922 /* EFAULT means nvlist_pack ran out of room */
923 return (error == EFAULT ? ENAMETOOLONG : EINVAL);
927 * Initialize uberblock template.
929 ub_abd = abd_alloc_linear(VDEV_UBERBLOCK_RING, B_TRUE);
930 abd_zero(ub_abd, VDEV_UBERBLOCK_RING);
931 abd_copy_from_buf(ub_abd, &spa->spa_uberblock, sizeof (uberblock_t));
932 ub = abd_to_buf(ub_abd);
935 /* Initialize the 2nd padding area. */
936 pad2 = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE);
937 abd_zero(pad2, VDEV_PAD_SIZE);
940 * Write everything in parallel.
943 zio = zio_root(spa, NULL, NULL, flags);
945 for (int l = 0; l < VDEV_LABELS; l++) {
947 vdev_label_write(zio, vd, l, vp_abd,
948 offsetof(vdev_label_t, vl_vdev_phys),
949 sizeof (vdev_phys_t), NULL, NULL, flags);
952 * Skip the 1st padding area.
953 * Zero out the 2nd padding area where it might have
954 * left over data from previous filesystem format.
956 vdev_label_write(zio, vd, l, pad2,
957 offsetof(vdev_label_t, vl_pad2),
958 VDEV_PAD_SIZE, NULL, NULL, flags);
960 vdev_label_write(zio, vd, l, ub_abd,
961 offsetof(vdev_label_t, vl_uberblock),
962 VDEV_UBERBLOCK_RING, NULL, NULL, flags);
965 error = zio_wait(zio);
967 if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) {
968 flags |= ZIO_FLAG_TRYHARD;
978 * If this vdev hasn't been previously identified as a spare, then we
979 * mark it as such only if a) we are labeling it as a spare, or b) it
980 * exists as a spare elsewhere in the system. Do the same for
981 * level 2 ARC devices.
983 if (error == 0 && !vd->vdev_isspare &&
984 (reason == VDEV_LABEL_SPARE ||
985 spa_spare_exists(vd->vdev_guid, NULL, NULL)))
988 if (error == 0 && !vd->vdev_isl2cache &&
989 (reason == VDEV_LABEL_L2CACHE ||
990 spa_l2cache_exists(vd->vdev_guid, NULL)))
997 vdev_label_write_pad2(vdev_t *vd, const char *buf, size_t size)
999 spa_t *spa = vd->vdev_spa;
1002 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
1005 if (size > VDEV_PAD_SIZE)
1008 if (!vd->vdev_ops->vdev_op_leaf)
1010 if (vdev_is_dead(vd))
1013 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1015 pad2 = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE);
1016 abd_zero(pad2, VDEV_PAD_SIZE);
1017 abd_copy_from_buf(pad2, buf, size);
1020 zio = zio_root(spa, NULL, NULL, flags);
1021 vdev_label_write(zio, vd, 0, pad2,
1022 offsetof(vdev_label_t, vl_pad2),
1023 VDEV_PAD_SIZE, NULL, NULL, flags);
1024 error = zio_wait(zio);
1025 if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) {
1026 flags |= ZIO_FLAG_TRYHARD;
1035 * ==========================================================================
1036 * uberblock load/sync
1037 * ==========================================================================
1041 * Consider the following situation: txg is safely synced to disk. We've
1042 * written the first uberblock for txg + 1, and then we lose power. When we
1043 * come back up, we fail to see the uberblock for txg + 1 because, say,
1044 * it was on a mirrored device and the replica to which we wrote txg + 1
1045 * is now offline. If we then make some changes and sync txg + 1, and then
1046 * the missing replica comes back, then for a few seconds we'll have two
1047 * conflicting uberblocks on disk with the same txg. The solution is simple:
1048 * among uberblocks with equal txg, choose the one with the latest timestamp.
1051 vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2)
1053 int cmp = AVL_CMP(ub1->ub_txg, ub2->ub_txg);
1058 cmp = AVL_CMP(ub1->ub_timestamp, ub2->ub_timestamp);
1063 * If MMP_VALID(ub) && MMP_SEQ_VALID(ub) then the host has an MMP-aware
1064 * ZFS, e.g. zfsonlinux >= 0.7.
1066 * If one ub has MMP and the other does not, they were written by
1067 * different hosts, which matters for MMP. So we treat no MMP/no SEQ as
1070 * Since timestamp and txg are the same if we get this far, either is
1071 * acceptable for importing the pool.
1073 unsigned int seq1 = 0;
1074 unsigned int seq2 = 0;
1076 if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1))
1077 seq1 = MMP_SEQ(ub1);
1079 if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2))
1080 seq2 = MMP_SEQ(ub2);
1082 return (AVL_CMP(seq1, seq2));
1086 uberblock_t *ubl_ubbest; /* Best uberblock */
1087 vdev_t *ubl_vd; /* vdev associated with the above */
1091 vdev_uberblock_load_done(zio_t *zio)
1093 vdev_t *vd = zio->io_vd;
1094 spa_t *spa = zio->io_spa;
1095 zio_t *rio = zio->io_private;
1096 uberblock_t *ub = abd_to_buf(zio->io_abd);
1097 struct ubl_cbdata *cbp = rio->io_private;
1099 ASSERT3U(zio->io_size, ==, VDEV_UBERBLOCK_SIZE(vd));
1101 if (zio->io_error == 0 && uberblock_verify(ub) == 0) {
1102 mutex_enter(&rio->io_lock);
1103 if (ub->ub_txg <= spa->spa_load_max_txg &&
1104 vdev_uberblock_compare(ub, cbp->ubl_ubbest) > 0) {
1106 * Keep track of the vdev in which this uberblock
1107 * was found. We will use this information later
1108 * to obtain the config nvlist associated with
1111 *cbp->ubl_ubbest = *ub;
1114 mutex_exit(&rio->io_lock);
1117 abd_free(zio->io_abd);
1121 vdev_uberblock_load_impl(zio_t *zio, vdev_t *vd, int flags,
1122 struct ubl_cbdata *cbp)
1124 for (int c = 0; c < vd->vdev_children; c++)
1125 vdev_uberblock_load_impl(zio, vd->vdev_child[c], flags, cbp);
1127 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
1128 for (int l = 0; l < VDEV_LABELS; l++) {
1129 for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
1130 vdev_label_read(zio, vd, l,
1131 abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd),
1132 B_TRUE), VDEV_UBERBLOCK_OFFSET(vd, n),
1133 VDEV_UBERBLOCK_SIZE(vd),
1134 vdev_uberblock_load_done, zio, flags);
1141 * Reads the 'best' uberblock from disk along with its associated
1142 * configuration. First, we read the uberblock array of each label of each
1143 * vdev, keeping track of the uberblock with the highest txg in each array.
1144 * Then, we read the configuration from the same vdev as the best uberblock.
1147 vdev_uberblock_load(vdev_t *rvd, uberblock_t *ub, nvlist_t **config)
1150 spa_t *spa = rvd->vdev_spa;
1151 struct ubl_cbdata cb;
1152 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
1153 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD;
1158 bzero(ub, sizeof (uberblock_t));
1164 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1165 zio = zio_root(spa, NULL, &cb, flags);
1166 vdev_uberblock_load_impl(zio, rvd, flags, &cb);
1167 (void) zio_wait(zio);
1170 * It's possible that the best uberblock was discovered on a label
1171 * that has a configuration which was written in a future txg.
1172 * Search all labels on this vdev to find the configuration that
1173 * matches the txg for our uberblock.
1175 if (cb.ubl_vd != NULL) {
1176 vdev_dbgmsg(cb.ubl_vd, "best uberblock found for spa %s. "
1177 "txg %llu", spa->spa_name, (u_longlong_t)ub->ub_txg);
1179 *config = vdev_label_read_config(cb.ubl_vd, ub->ub_txg);
1180 if (*config == NULL && spa->spa_extreme_rewind) {
1181 vdev_dbgmsg(cb.ubl_vd, "failed to read label config. "
1182 "Trying again without txg restrictions.");
1183 *config = vdev_label_read_config(cb.ubl_vd, UINT64_MAX);
1185 if (*config == NULL) {
1186 vdev_dbgmsg(cb.ubl_vd, "failed to read label config");
1189 spa_config_exit(spa, SCL_ALL, FTAG);
1193 * On success, increment root zio's count of good writes.
1194 * We only get credit for writes to known-visible vdevs; see spa_vdev_add().
1197 vdev_uberblock_sync_done(zio_t *zio)
1199 uint64_t *good_writes = zio->io_private;
1201 if (zio->io_error == 0 && zio->io_vd->vdev_top->vdev_ms_array != 0)
1202 atomic_inc_64(good_writes);
1206 * Write the uberblock to all labels of all leaves of the specified vdev.
1209 vdev_uberblock_sync(zio_t *zio, uint64_t *good_writes,
1210 uberblock_t *ub, vdev_t *vd, int flags)
1212 for (uint64_t c = 0; c < vd->vdev_children; c++) {
1213 vdev_uberblock_sync(zio, good_writes,
1214 ub, vd->vdev_child[c], flags);
1217 if (!vd->vdev_ops->vdev_op_leaf)
1220 if (!vdev_writeable(vd))
1223 int m = spa_multihost(vd->vdev_spa) ? MMP_BLOCKS_PER_LABEL : 0;
1224 int n = ub->ub_txg % (VDEV_UBERBLOCK_COUNT(vd) - m);
1226 /* Copy the uberblock_t into the ABD */
1227 abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
1228 abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
1229 abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
1231 for (int l = 0; l < VDEV_LABELS; l++)
1232 vdev_label_write(zio, vd, l, ub_abd,
1233 VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd),
1234 vdev_uberblock_sync_done, good_writes,
1235 flags | ZIO_FLAG_DONT_PROPAGATE);
1240 /* Sync the uberblocks to all vdevs in svd[] */
1242 vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags)
1244 spa_t *spa = svd[0]->vdev_spa;
1246 uint64_t good_writes = 0;
1248 zio = zio_root(spa, NULL, NULL, flags);
1250 for (int v = 0; v < svdcount; v++)
1251 vdev_uberblock_sync(zio, &good_writes, ub, svd[v], flags);
1253 (void) zio_wait(zio);
1256 * Flush the uberblocks to disk. This ensures that the odd labels
1257 * are no longer needed (because the new uberblocks and the even
1258 * labels are safely on disk), so it is safe to overwrite them.
1260 zio = zio_root(spa, NULL, NULL, flags);
1262 for (int v = 0; v < svdcount; v++) {
1263 if (vdev_writeable(svd[v])) {
1264 zio_flush(zio, svd[v]);
1268 (void) zio_wait(zio);
1270 return (good_writes >= 1 ? 0 : EIO);
1274 * On success, increment the count of good writes for our top-level vdev.
1277 vdev_label_sync_done(zio_t *zio)
1279 uint64_t *good_writes = zio->io_private;
1281 if (zio->io_error == 0)
1282 atomic_inc_64(good_writes);
1286 * If there weren't enough good writes, indicate failure to the parent.
1289 vdev_label_sync_top_done(zio_t *zio)
1291 uint64_t *good_writes = zio->io_private;
1293 if (*good_writes == 0)
1294 zio->io_error = SET_ERROR(EIO);
1296 kmem_free(good_writes, sizeof (uint64_t));
1300 * We ignore errors for log and cache devices, simply free the private data.
1303 vdev_label_sync_ignore_done(zio_t *zio)
1305 kmem_free(zio->io_private, sizeof (uint64_t));
1309 * Write all even or odd labels to all leaves of the specified vdev.
1312 vdev_label_sync(zio_t *zio, uint64_t *good_writes,
1313 vdev_t *vd, int l, uint64_t txg, int flags)
1321 for (int c = 0; c < vd->vdev_children; c++) {
1322 vdev_label_sync(zio, good_writes,
1323 vd->vdev_child[c], l, txg, flags);
1326 if (!vd->vdev_ops->vdev_op_leaf)
1329 if (!vdev_writeable(vd))
1333 * Generate a label describing the top-level config to which we belong.
1335 label = spa_config_generate(vd->vdev_spa, vd, txg, B_FALSE);
1337 vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
1338 abd_zero(vp_abd, sizeof (vdev_phys_t));
1339 vp = abd_to_buf(vp_abd);
1341 buf = vp->vp_nvlist;
1342 buflen = sizeof (vp->vp_nvlist);
1344 if (nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP) == 0) {
1345 for (; l < VDEV_LABELS; l += 2) {
1346 vdev_label_write(zio, vd, l, vp_abd,
1347 offsetof(vdev_label_t, vl_vdev_phys),
1348 sizeof (vdev_phys_t),
1349 vdev_label_sync_done, good_writes,
1350 flags | ZIO_FLAG_DONT_PROPAGATE);
1359 vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags)
1361 list_t *dl = &spa->spa_config_dirty_list;
1367 * Write the new labels to disk.
1369 zio = zio_root(spa, NULL, NULL, flags);
1371 for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) {
1372 uint64_t *good_writes = kmem_zalloc(sizeof (uint64_t),
1375 ASSERT(!vd->vdev_ishole);
1377 zio_t *vio = zio_null(zio, spa, NULL,
1378 (vd->vdev_islog || vd->vdev_aux != NULL) ?
1379 vdev_label_sync_ignore_done : vdev_label_sync_top_done,
1380 good_writes, flags);
1381 vdev_label_sync(vio, good_writes, vd, l, txg, flags);
1385 error = zio_wait(zio);
1388 * Flush the new labels to disk.
1390 zio = zio_root(spa, NULL, NULL, flags);
1392 for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd))
1395 (void) zio_wait(zio);
1401 * Sync the uberblock and any changes to the vdev configuration.
1403 * The order of operations is carefully crafted to ensure that
1404 * if the system panics or loses power at any time, the state on disk
1405 * is still transactionally consistent. The in-line comments below
1406 * describe the failure semantics at each stage.
1408 * Moreover, vdev_config_sync() is designed to be idempotent: if it fails
1409 * at any time, you can just call it again, and it will resume its work.
1412 vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg)
1414 spa_t *spa = svd[0]->vdev_spa;
1415 uberblock_t *ub = &spa->spa_uberblock;
1417 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
1419 ASSERT(svdcount != 0);
1422 * Normally, we don't want to try too hard to write every label and
1423 * uberblock. If there is a flaky disk, we don't want the rest of the
1424 * sync process to block while we retry. But if we can't write a
1425 * single label out, we should retry with ZIO_FLAG_TRYHARD before
1426 * bailing out and declaring the pool faulted.
1429 if ((flags & ZIO_FLAG_TRYHARD) != 0)
1431 flags |= ZIO_FLAG_TRYHARD;
1434 ASSERT(ub->ub_txg <= txg);
1437 * If this isn't a resync due to I/O errors,
1438 * and nothing changed in this transaction group,
1439 * and the vdev configuration hasn't changed,
1440 * then there's nothing to do.
1442 if (ub->ub_txg < txg) {
1443 boolean_t changed = uberblock_update(ub, spa->spa_root_vdev,
1444 txg, spa->spa_mmp.mmp_delay);
1446 if (!changed && list_is_empty(&spa->spa_config_dirty_list))
1450 if (txg > spa_freeze_txg(spa))
1453 ASSERT(txg <= spa->spa_final_txg);
1456 * Flush the write cache of every disk that's been written to
1457 * in this transaction group. This ensures that all blocks
1458 * written in this txg will be committed to stable storage
1459 * before any uberblock that references them.
1461 zio_t *zio = zio_root(spa, NULL, NULL, flags);
1464 txg_list_head(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)); vd != NULL;
1465 vd = txg_list_next(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)))
1468 (void) zio_wait(zio);
1471 * Sync out the even labels (L0, L2) for every dirty vdev. If the
1472 * system dies in the middle of this process, that's OK: all of the
1473 * even labels that made it to disk will be newer than any uberblock,
1474 * and will therefore be considered invalid. The odd labels (L1, L3),
1475 * which have not yet been touched, will still be valid. We flush
1476 * the new labels to disk to ensure that all even-label updates
1477 * are committed to stable storage before the uberblock update.
1479 if ((error = vdev_label_sync_list(spa, 0, txg, flags)) != 0) {
1480 if ((flags & ZIO_FLAG_TRYHARD) != 0) {
1481 zfs_dbgmsg("vdev_label_sync_list() returned error %d "
1482 "for pool '%s' when syncing out the even labels "
1483 "of dirty vdevs", error, spa_name(spa));
1489 * Sync the uberblocks to all vdevs in svd[].
1490 * If the system dies in the middle of this step, there are two cases
1491 * to consider, and the on-disk state is consistent either way:
1493 * (1) If none of the new uberblocks made it to disk, then the
1494 * previous uberblock will be the newest, and the odd labels
1495 * (which had not yet been touched) will be valid with respect
1496 * to that uberblock.
1498 * (2) If one or more new uberblocks made it to disk, then they
1499 * will be the newest, and the even labels (which had all
1500 * been successfully committed) will be valid with respect
1501 * to the new uberblocks.
1503 if ((error = vdev_uberblock_sync_list(svd, svdcount, ub, flags)) != 0) {
1504 if ((flags & ZIO_FLAG_TRYHARD) != 0) {
1505 zfs_dbgmsg("vdev_uberblock_sync_list() returned error "
1506 "%d for pool '%s'", error, spa_name(spa));
1511 if (spa_multihost(spa))
1512 mmp_update_uberblock(spa, ub);
1515 * Sync out odd labels for every dirty vdev. If the system dies
1516 * in the middle of this process, the even labels and the new
1517 * uberblocks will suffice to open the pool. The next time
1518 * the pool is opened, the first thing we'll do -- before any
1519 * user data is modified -- is mark every vdev dirty so that
1520 * all labels will be brought up to date. We flush the new labels
1521 * to disk to ensure that all odd-label updates are committed to
1522 * stable storage before the next transaction group begins.
1524 if ((error = vdev_label_sync_list(spa, 1, txg, flags)) != 0) {
1525 if ((flags & ZIO_FLAG_TRYHARD) != 0) {
1526 zfs_dbgmsg("vdev_label_sync_list() returned error %d "
1527 "for pool '%s' when syncing out the odd labels of "
1528 "dirty vdevs", error, spa_name(spa));
1533 trim_thread_wakeup(spa);