4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
25 * Copyright 2017 Nexenta Systems, Inc.
26 * Copyright 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27 * Copyright (c) 2014 Integros [integros.com]
28 * Copyright 2016 Toomas Soome <tsoome@me.com>
29 * Copyright 2017 Joyent, Inc.
32 #include <sys/zfs_context.h>
33 #include <sys/fm/fs/zfs.h>
35 #include <sys/spa_impl.h>
36 #include <sys/bpobj.h>
38 #include <sys/dmu_tx.h>
39 #include <sys/dsl_dir.h>
40 #include <sys/vdev_impl.h>
41 #include <sys/uberblock_impl.h>
42 #include <sys/metaslab.h>
43 #include <sys/metaslab_impl.h>
44 #include <sys/space_map.h>
45 #include <sys/space_reftree.h>
48 #include <sys/fs/zfs.h>
51 #include <sys/dsl_scan.h>
53 #include <sys/trim_map.h>
54 #include <sys/vdev_initialize.h>
56 SYSCTL_DECL(_vfs_zfs);
57 SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV");
60 * Virtual device management.
64 * The limit for ZFS to automatically increase a top-level vdev's ashift
65 * from logical ashift to physical ashift.
67 * Example: one or more 512B emulation child vdevs
68 * child->vdev_ashift = 9 (512 bytes)
69 * child->vdev_physical_ashift = 12 (4096 bytes)
70 * zfs_max_auto_ashift = 11 (2048 bytes)
71 * zfs_min_auto_ashift = 9 (512 bytes)
73 * On pool creation or the addition of a new top-level vdev, ZFS will
74 * increase the ashift of the top-level vdev to 2048 as limited by
75 * zfs_max_auto_ashift.
77 * Example: one or more 512B emulation child vdevs
78 * child->vdev_ashift = 9 (512 bytes)
79 * child->vdev_physical_ashift = 12 (4096 bytes)
80 * zfs_max_auto_ashift = 13 (8192 bytes)
81 * zfs_min_auto_ashift = 9 (512 bytes)
83 * On pool creation or the addition of a new top-level vdev, ZFS will
84 * increase the ashift of the top-level vdev to 4096 to match the
85 * max vdev_physical_ashift.
87 * Example: one or more 512B emulation child vdevs
88 * child->vdev_ashift = 9 (512 bytes)
89 * child->vdev_physical_ashift = 9 (512 bytes)
90 * zfs_max_auto_ashift = 13 (8192 bytes)
91 * zfs_min_auto_ashift = 12 (4096 bytes)
93 * On pool creation or the addition of a new top-level vdev, ZFS will
94 * increase the ashift of the top-level vdev to 4096 to match the
95 * zfs_min_auto_ashift.
97 static uint64_t zfs_max_auto_ashift = SPA_MAXASHIFT;
98 static uint64_t zfs_min_auto_ashift = SPA_MINASHIFT;
101 sysctl_vfs_zfs_max_auto_ashift(SYSCTL_HANDLER_ARGS)
106 val = zfs_max_auto_ashift;
107 err = sysctl_handle_64(oidp, &val, 0, req);
108 if (err != 0 || req->newptr == NULL)
111 if (val > SPA_MAXASHIFT || val < zfs_min_auto_ashift)
114 zfs_max_auto_ashift = val;
118 SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift,
119 CTLTYPE_U64 | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(uint64_t),
120 sysctl_vfs_zfs_max_auto_ashift, "QU",
121 "Max ashift used when optimising for logical -> physical sectors size on "
122 "new top-level vdevs.");
125 sysctl_vfs_zfs_min_auto_ashift(SYSCTL_HANDLER_ARGS)
130 val = zfs_min_auto_ashift;
131 err = sysctl_handle_64(oidp, &val, 0, req);
132 if (err != 0 || req->newptr == NULL)
135 if (val < SPA_MINASHIFT || val > zfs_max_auto_ashift)
138 zfs_min_auto_ashift = val;
142 SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift,
143 CTLTYPE_U64 | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(uint64_t),
144 sysctl_vfs_zfs_min_auto_ashift, "QU",
145 "Min ashift used when creating new top-level vdevs.");
147 static vdev_ops_t *vdev_ops_table[] = {
166 /* maximum number of metaslabs per top-level vdev */
167 int vdev_max_ms_count = 200;
168 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, max_ms_count, CTLFLAG_RDTUN,
169 &vdev_max_ms_count, 0,
170 "Maximum number of metaslabs per top-level vdev");
172 /* minimum amount of metaslabs per top-level vdev */
173 int vdev_min_ms_count = 16;
174 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, min_ms_count, CTLFLAG_RDTUN,
175 &vdev_min_ms_count, 0,
176 "Minimum number of metaslabs per top-level vdev");
178 /* see comment in vdev_metaslab_set_size() */
179 int vdev_default_ms_shift = 29;
180 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, default_ms_shift, CTLFLAG_RDTUN,
181 &vdev_default_ms_shift, 0,
182 "Shift between vdev size and number of metaslabs");
184 boolean_t vdev_validate_skip = B_FALSE;
187 * Since the DTL space map of a vdev is not expected to have a lot of
188 * entries, we default its block size to 4K.
190 int vdev_dtl_sm_blksz = (1 << 12);
191 SYSCTL_INT(_vfs_zfs, OID_AUTO, dtl_sm_blksz, CTLFLAG_RDTUN,
192 &vdev_dtl_sm_blksz, 0,
193 "Block size for DTL space map. Power of 2 and greater than 4096.");
196 * vdev-wide space maps that have lots of entries written to them at
197 * the end of each transaction can benefit from a higher I/O bandwidth
198 * (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
200 int vdev_standard_sm_blksz = (1 << 17);
201 SYSCTL_INT(_vfs_zfs, OID_AUTO, standard_sm_blksz, CTLFLAG_RDTUN,
202 &vdev_standard_sm_blksz, 0,
203 "Block size for standard space map. Power of 2 and greater than 4096.");
207 vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
213 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
216 if (vd->vdev_path != NULL) {
217 zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
220 zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
221 vd->vdev_ops->vdev_op_type,
222 (u_longlong_t)vd->vdev_id,
223 (u_longlong_t)vd->vdev_guid, buf);
228 vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
232 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) {
233 zfs_dbgmsg("%*svdev %u: %s", indent, "", vd->vdev_id,
234 vd->vdev_ops->vdev_op_type);
238 switch (vd->vdev_state) {
239 case VDEV_STATE_UNKNOWN:
240 (void) snprintf(state, sizeof (state), "unknown");
242 case VDEV_STATE_CLOSED:
243 (void) snprintf(state, sizeof (state), "closed");
245 case VDEV_STATE_OFFLINE:
246 (void) snprintf(state, sizeof (state), "offline");
248 case VDEV_STATE_REMOVED:
249 (void) snprintf(state, sizeof (state), "removed");
251 case VDEV_STATE_CANT_OPEN:
252 (void) snprintf(state, sizeof (state), "can't open");
254 case VDEV_STATE_FAULTED:
255 (void) snprintf(state, sizeof (state), "faulted");
257 case VDEV_STATE_DEGRADED:
258 (void) snprintf(state, sizeof (state), "degraded");
260 case VDEV_STATE_HEALTHY:
261 (void) snprintf(state, sizeof (state), "healthy");
264 (void) snprintf(state, sizeof (state), "<state %u>",
265 (uint_t)vd->vdev_state);
268 zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent,
269 "", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type,
270 vd->vdev_islog ? " (log)" : "",
271 (u_longlong_t)vd->vdev_guid,
272 vd->vdev_path ? vd->vdev_path : "N/A", state);
274 for (uint64_t i = 0; i < vd->vdev_children; i++)
275 vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
279 * Given a vdev type, return the appropriate ops vector.
282 vdev_getops(const char *type)
284 vdev_ops_t *ops, **opspp;
286 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
287 if (strcmp(ops->vdev_op_type, type) == 0)
295 vdev_default_xlate(vdev_t *vd, const range_seg_t *in, range_seg_t *res)
297 res->rs_start = in->rs_start;
298 res->rs_end = in->rs_end;
302 * Default asize function: return the MAX of psize with the asize of
303 * all children. This is what's used by anything other than RAID-Z.
306 vdev_default_asize(vdev_t *vd, uint64_t psize)
308 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
311 for (int c = 0; c < vd->vdev_children; c++) {
312 csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
313 asize = MAX(asize, csize);
320 * Get the minimum allocatable size. We define the allocatable size as
321 * the vdev's asize rounded to the nearest metaslab. This allows us to
322 * replace or attach devices which don't have the same physical size but
323 * can still satisfy the same number of allocations.
326 vdev_get_min_asize(vdev_t *vd)
328 vdev_t *pvd = vd->vdev_parent;
331 * If our parent is NULL (inactive spare or cache) or is the root,
332 * just return our own asize.
335 return (vd->vdev_asize);
338 * The top-level vdev just returns the allocatable size rounded
339 * to the nearest metaslab.
341 if (vd == vd->vdev_top)
342 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
345 * The allocatable space for a raidz vdev is N * sizeof(smallest child),
346 * so each child must provide at least 1/Nth of its asize.
348 if (pvd->vdev_ops == &vdev_raidz_ops)
349 return ((pvd->vdev_min_asize + pvd->vdev_children - 1) /
352 return (pvd->vdev_min_asize);
356 vdev_set_min_asize(vdev_t *vd)
358 vd->vdev_min_asize = vdev_get_min_asize(vd);
360 for (int c = 0; c < vd->vdev_children; c++)
361 vdev_set_min_asize(vd->vdev_child[c]);
365 vdev_lookup_top(spa_t *spa, uint64_t vdev)
367 vdev_t *rvd = spa->spa_root_vdev;
369 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
371 if (vdev < rvd->vdev_children) {
372 ASSERT(rvd->vdev_child[vdev] != NULL);
373 return (rvd->vdev_child[vdev]);
380 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
384 if (vd->vdev_guid == guid)
387 for (int c = 0; c < vd->vdev_children; c++)
388 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
396 vdev_count_leaves_impl(vdev_t *vd)
400 if (vd->vdev_ops->vdev_op_leaf)
403 for (int c = 0; c < vd->vdev_children; c++)
404 n += vdev_count_leaves_impl(vd->vdev_child[c]);
410 vdev_count_leaves(spa_t *spa)
412 return (vdev_count_leaves_impl(spa->spa_root_vdev));
416 vdev_add_child(vdev_t *pvd, vdev_t *cvd)
418 size_t oldsize, newsize;
419 uint64_t id = cvd->vdev_id;
421 spa_t *spa = cvd->vdev_spa;
423 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
424 ASSERT(cvd->vdev_parent == NULL);
426 cvd->vdev_parent = pvd;
431 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
433 oldsize = pvd->vdev_children * sizeof (vdev_t *);
434 pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
435 newsize = pvd->vdev_children * sizeof (vdev_t *);
437 newchild = kmem_zalloc(newsize, KM_SLEEP);
438 if (pvd->vdev_child != NULL) {
439 bcopy(pvd->vdev_child, newchild, oldsize);
440 kmem_free(pvd->vdev_child, oldsize);
443 pvd->vdev_child = newchild;
444 pvd->vdev_child[id] = cvd;
446 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
447 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
450 * Walk up all ancestors to update guid sum.
452 for (; pvd != NULL; pvd = pvd->vdev_parent)
453 pvd->vdev_guid_sum += cvd->vdev_guid_sum;
457 vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
460 uint_t id = cvd->vdev_id;
462 ASSERT(cvd->vdev_parent == pvd);
467 ASSERT(id < pvd->vdev_children);
468 ASSERT(pvd->vdev_child[id] == cvd);
470 pvd->vdev_child[id] = NULL;
471 cvd->vdev_parent = NULL;
473 for (c = 0; c < pvd->vdev_children; c++)
474 if (pvd->vdev_child[c])
477 if (c == pvd->vdev_children) {
478 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
479 pvd->vdev_child = NULL;
480 pvd->vdev_children = 0;
484 * Walk up all ancestors to update guid sum.
486 for (; pvd != NULL; pvd = pvd->vdev_parent)
487 pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
491 * Remove any holes in the child array.
494 vdev_compact_children(vdev_t *pvd)
496 vdev_t **newchild, *cvd;
497 int oldc = pvd->vdev_children;
500 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
502 for (int c = newc = 0; c < oldc; c++)
503 if (pvd->vdev_child[c])
506 newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP);
508 for (int c = newc = 0; c < oldc; c++) {
509 if ((cvd = pvd->vdev_child[c]) != NULL) {
510 newchild[newc] = cvd;
511 cvd->vdev_id = newc++;
515 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
516 pvd->vdev_child = newchild;
517 pvd->vdev_children = newc;
521 * Allocate and minimally initialize a vdev_t.
524 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
527 vdev_indirect_config_t *vic;
529 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
530 vic = &vd->vdev_indirect_config;
532 if (spa->spa_root_vdev == NULL) {
533 ASSERT(ops == &vdev_root_ops);
534 spa->spa_root_vdev = vd;
535 spa->spa_load_guid = spa_generate_guid(NULL);
538 if (guid == 0 && ops != &vdev_hole_ops) {
539 if (spa->spa_root_vdev == vd) {
541 * The root vdev's guid will also be the pool guid,
542 * which must be unique among all pools.
544 guid = spa_generate_guid(NULL);
547 * Any other vdev's guid must be unique within the pool.
549 guid = spa_generate_guid(spa);
551 ASSERT(!spa_guid_exists(spa_guid(spa), guid));
556 vd->vdev_guid = guid;
557 vd->vdev_guid_sum = guid;
559 vd->vdev_state = VDEV_STATE_CLOSED;
560 vd->vdev_ishole = (ops == &vdev_hole_ops);
561 vic->vic_prev_indirect_vdev = UINT64_MAX;
563 rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
564 mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
565 vd->vdev_obsolete_segments = range_tree_create(NULL, NULL);
567 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL);
568 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
569 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
570 mutex_init(&vd->vdev_queue_lock, NULL, MUTEX_DEFAULT, NULL);
571 mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL);
572 mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
573 mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL);
574 cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL);
575 cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL);
577 for (int t = 0; t < DTL_TYPES; t++) {
578 vd->vdev_dtl[t] = range_tree_create(NULL, NULL);
580 txg_list_create(&vd->vdev_ms_list, spa,
581 offsetof(struct metaslab, ms_txg_node));
582 txg_list_create(&vd->vdev_dtl_list, spa,
583 offsetof(struct vdev, vdev_dtl_node));
584 vd->vdev_stat.vs_timestamp = gethrtime();
592 * Allocate a new vdev. The 'alloctype' is used to control whether we are
593 * creating a new vdev or loading an existing one - the behavior is slightly
594 * different for each case.
597 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
602 uint64_t guid = 0, islog, nparity;
604 vdev_indirect_config_t *vic;
606 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
608 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
609 return (SET_ERROR(EINVAL));
611 if ((ops = vdev_getops(type)) == NULL)
612 return (SET_ERROR(EINVAL));
615 * If this is a load, get the vdev guid from the nvlist.
616 * Otherwise, vdev_alloc_common() will generate one for us.
618 if (alloctype == VDEV_ALLOC_LOAD) {
621 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
623 return (SET_ERROR(EINVAL));
625 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
626 return (SET_ERROR(EINVAL));
627 } else if (alloctype == VDEV_ALLOC_SPARE) {
628 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
629 return (SET_ERROR(EINVAL));
630 } else if (alloctype == VDEV_ALLOC_L2CACHE) {
631 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
632 return (SET_ERROR(EINVAL));
633 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
634 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
635 return (SET_ERROR(EINVAL));
639 * The first allocated vdev must be of type 'root'.
641 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
642 return (SET_ERROR(EINVAL));
645 * Determine whether we're a log vdev.
648 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
649 if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
650 return (SET_ERROR(ENOTSUP));
652 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
653 return (SET_ERROR(ENOTSUP));
656 * Set the nparity property for RAID-Z vdevs.
659 if (ops == &vdev_raidz_ops) {
660 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
662 if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY)
663 return (SET_ERROR(EINVAL));
665 * Previous versions could only support 1 or 2 parity
669 spa_version(spa) < SPA_VERSION_RAIDZ2)
670 return (SET_ERROR(ENOTSUP));
672 spa_version(spa) < SPA_VERSION_RAIDZ3)
673 return (SET_ERROR(ENOTSUP));
676 * We require the parity to be specified for SPAs that
677 * support multiple parity levels.
679 if (spa_version(spa) >= SPA_VERSION_RAIDZ2)
680 return (SET_ERROR(EINVAL));
682 * Otherwise, we default to 1 parity device for RAID-Z.
689 ASSERT(nparity != -1ULL);
691 vd = vdev_alloc_common(spa, id, guid, ops);
692 vic = &vd->vdev_indirect_config;
694 vd->vdev_islog = islog;
695 vd->vdev_nparity = nparity;
697 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
698 vd->vdev_path = spa_strdup(vd->vdev_path);
699 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
700 vd->vdev_devid = spa_strdup(vd->vdev_devid);
701 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH,
702 &vd->vdev_physpath) == 0)
703 vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
704 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0)
705 vd->vdev_fru = spa_strdup(vd->vdev_fru);
708 * Set the whole_disk property. If it's not specified, leave the value
711 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
712 &vd->vdev_wholedisk) != 0)
713 vd->vdev_wholedisk = -1ULL;
715 ASSERT0(vic->vic_mapping_object);
716 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
717 &vic->vic_mapping_object);
718 ASSERT0(vic->vic_births_object);
719 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
720 &vic->vic_births_object);
721 ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX);
722 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
723 &vic->vic_prev_indirect_vdev);
726 * Look for the 'not present' flag. This will only be set if the device
727 * was not present at the time of import.
729 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
730 &vd->vdev_not_present);
733 * Get the alignment requirement.
735 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
738 * Retrieve the vdev creation time.
740 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
744 * If we're a top-level vdev, try to load the allocation parameters.
746 if (parent && !parent->vdev_parent &&
747 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
748 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
750 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
752 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
754 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
756 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
759 ASSERT0(vd->vdev_top_zap);
762 if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) {
763 ASSERT(alloctype == VDEV_ALLOC_LOAD ||
764 alloctype == VDEV_ALLOC_ADD ||
765 alloctype == VDEV_ALLOC_SPLIT ||
766 alloctype == VDEV_ALLOC_ROOTPOOL);
767 vd->vdev_mg = metaslab_group_create(islog ?
768 spa_log_class(spa) : spa_normal_class(spa), vd,
769 spa->spa_alloc_count);
772 if (vd->vdev_ops->vdev_op_leaf &&
773 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
774 (void) nvlist_lookup_uint64(nv,
775 ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
777 ASSERT0(vd->vdev_leaf_zap);
781 * If we're a leaf vdev, try to load the DTL object and other state.
784 if (vd->vdev_ops->vdev_op_leaf &&
785 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
786 alloctype == VDEV_ALLOC_ROOTPOOL)) {
787 if (alloctype == VDEV_ALLOC_LOAD) {
788 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
789 &vd->vdev_dtl_object);
790 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
794 if (alloctype == VDEV_ALLOC_ROOTPOOL) {
797 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
798 &spare) == 0 && spare)
802 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
805 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
806 &vd->vdev_resilver_txg);
809 * When importing a pool, we want to ignore the persistent fault
810 * state, as the diagnosis made on another system may not be
811 * valid in the current context. Local vdevs will
812 * remain in the faulted state.
814 if (spa_load_state(spa) == SPA_LOAD_OPEN) {
815 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
817 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
819 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
822 if (vd->vdev_faulted || vd->vdev_degraded) {
826 VDEV_AUX_ERR_EXCEEDED;
827 if (nvlist_lookup_string(nv,
828 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
829 strcmp(aux, "external") == 0)
830 vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
836 * Add ourselves to the parent's list of children.
838 vdev_add_child(parent, vd);
846 vdev_free(vdev_t *vd)
848 spa_t *spa = vd->vdev_spa;
849 ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
852 * Scan queues are normally destroyed at the end of a scan. If the
853 * queue exists here, that implies the vdev is being removed while
854 * the scan is still running.
856 if (vd->vdev_scan_io_queue != NULL) {
857 mutex_enter(&vd->vdev_scan_io_queue_lock);
858 dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue);
859 vd->vdev_scan_io_queue = NULL;
860 mutex_exit(&vd->vdev_scan_io_queue_lock);
864 * vdev_free() implies closing the vdev first. This is simpler than
865 * trying to ensure complicated semantics for all callers.
869 ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
870 ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
875 for (int c = 0; c < vd->vdev_children; c++)
876 vdev_free(vd->vdev_child[c]);
878 ASSERT(vd->vdev_child == NULL);
879 ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
880 ASSERT(vd->vdev_initialize_thread == NULL);
883 * Discard allocation state.
885 if (vd->vdev_mg != NULL) {
886 vdev_metaslab_fini(vd);
887 metaslab_group_destroy(vd->vdev_mg);
890 ASSERT0(vd->vdev_stat.vs_space);
891 ASSERT0(vd->vdev_stat.vs_dspace);
892 ASSERT0(vd->vdev_stat.vs_alloc);
895 * Remove this vdev from its parent's child list.
897 vdev_remove_child(vd->vdev_parent, vd);
899 ASSERT(vd->vdev_parent == NULL);
902 * Clean up vdev structure.
908 spa_strfree(vd->vdev_path);
910 spa_strfree(vd->vdev_devid);
911 if (vd->vdev_physpath)
912 spa_strfree(vd->vdev_physpath);
914 spa_strfree(vd->vdev_fru);
916 if (vd->vdev_isspare)
917 spa_spare_remove(vd);
918 if (vd->vdev_isl2cache)
919 spa_l2cache_remove(vd);
921 txg_list_destroy(&vd->vdev_ms_list);
922 txg_list_destroy(&vd->vdev_dtl_list);
924 mutex_enter(&vd->vdev_dtl_lock);
925 space_map_close(vd->vdev_dtl_sm);
926 for (int t = 0; t < DTL_TYPES; t++) {
927 range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
928 range_tree_destroy(vd->vdev_dtl[t]);
930 mutex_exit(&vd->vdev_dtl_lock);
932 EQUIV(vd->vdev_indirect_births != NULL,
933 vd->vdev_indirect_mapping != NULL);
934 if (vd->vdev_indirect_births != NULL) {
935 vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
936 vdev_indirect_births_close(vd->vdev_indirect_births);
939 if (vd->vdev_obsolete_sm != NULL) {
940 ASSERT(vd->vdev_removing ||
941 vd->vdev_ops == &vdev_indirect_ops);
942 space_map_close(vd->vdev_obsolete_sm);
943 vd->vdev_obsolete_sm = NULL;
945 range_tree_destroy(vd->vdev_obsolete_segments);
946 rw_destroy(&vd->vdev_indirect_rwlock);
947 mutex_destroy(&vd->vdev_obsolete_lock);
949 mutex_destroy(&vd->vdev_queue_lock);
950 mutex_destroy(&vd->vdev_dtl_lock);
951 mutex_destroy(&vd->vdev_stat_lock);
952 mutex_destroy(&vd->vdev_probe_lock);
953 mutex_destroy(&vd->vdev_scan_io_queue_lock);
954 mutex_destroy(&vd->vdev_initialize_lock);
955 mutex_destroy(&vd->vdev_initialize_io_lock);
956 cv_destroy(&vd->vdev_initialize_io_cv);
957 cv_destroy(&vd->vdev_initialize_cv);
959 if (vd == spa->spa_root_vdev)
960 spa->spa_root_vdev = NULL;
962 kmem_free(vd, sizeof (vdev_t));
966 * Transfer top-level vdev state from svd to tvd.
969 vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
971 spa_t *spa = svd->vdev_spa;
976 ASSERT(tvd == tvd->vdev_top);
978 tvd->vdev_ms_array = svd->vdev_ms_array;
979 tvd->vdev_ms_shift = svd->vdev_ms_shift;
980 tvd->vdev_ms_count = svd->vdev_ms_count;
981 tvd->vdev_top_zap = svd->vdev_top_zap;
983 svd->vdev_ms_array = 0;
984 svd->vdev_ms_shift = 0;
985 svd->vdev_ms_count = 0;
986 svd->vdev_top_zap = 0;
989 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
990 tvd->vdev_mg = svd->vdev_mg;
991 tvd->vdev_ms = svd->vdev_ms;
996 if (tvd->vdev_mg != NULL)
997 tvd->vdev_mg->mg_vd = tvd;
999 tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm;
1000 svd->vdev_checkpoint_sm = NULL;
1002 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
1003 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
1004 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
1006 svd->vdev_stat.vs_alloc = 0;
1007 svd->vdev_stat.vs_space = 0;
1008 svd->vdev_stat.vs_dspace = 0;
1011 * State which may be set on a top-level vdev that's in the
1012 * process of being removed.
1014 ASSERT0(tvd->vdev_indirect_config.vic_births_object);
1015 ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
1016 ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
1017 ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
1018 ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
1019 ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
1020 ASSERT0(tvd->vdev_removing);
1021 tvd->vdev_removing = svd->vdev_removing;
1022 tvd->vdev_indirect_config = svd->vdev_indirect_config;
1023 tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping;
1024 tvd->vdev_indirect_births = svd->vdev_indirect_births;
1025 range_tree_swap(&svd->vdev_obsolete_segments,
1026 &tvd->vdev_obsolete_segments);
1027 tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm;
1028 svd->vdev_indirect_config.vic_mapping_object = 0;
1029 svd->vdev_indirect_config.vic_births_object = 0;
1030 svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL;
1031 svd->vdev_indirect_mapping = NULL;
1032 svd->vdev_indirect_births = NULL;
1033 svd->vdev_obsolete_sm = NULL;
1034 svd->vdev_removing = 0;
1036 for (t = 0; t < TXG_SIZE; t++) {
1037 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
1038 (void) txg_list_add(&tvd->vdev_ms_list, msp, t);
1039 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
1040 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
1041 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
1042 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
1045 if (list_link_active(&svd->vdev_config_dirty_node)) {
1046 vdev_config_clean(svd);
1047 vdev_config_dirty(tvd);
1050 if (list_link_active(&svd->vdev_state_dirty_node)) {
1051 vdev_state_clean(svd);
1052 vdev_state_dirty(tvd);
1055 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
1056 svd->vdev_deflate_ratio = 0;
1058 tvd->vdev_islog = svd->vdev_islog;
1059 svd->vdev_islog = 0;
1061 dsl_scan_io_queue_vdev_xfer(svd, tvd);
1065 vdev_top_update(vdev_t *tvd, vdev_t *vd)
1072 for (int c = 0; c < vd->vdev_children; c++)
1073 vdev_top_update(tvd, vd->vdev_child[c]);
1077 * Add a mirror/replacing vdev above an existing vdev.
1080 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
1082 spa_t *spa = cvd->vdev_spa;
1083 vdev_t *pvd = cvd->vdev_parent;
1086 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1088 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
1090 mvd->vdev_asize = cvd->vdev_asize;
1091 mvd->vdev_min_asize = cvd->vdev_min_asize;
1092 mvd->vdev_max_asize = cvd->vdev_max_asize;
1093 mvd->vdev_psize = cvd->vdev_psize;
1094 mvd->vdev_ashift = cvd->vdev_ashift;
1095 mvd->vdev_logical_ashift = cvd->vdev_logical_ashift;
1096 mvd->vdev_physical_ashift = cvd->vdev_physical_ashift;
1097 mvd->vdev_state = cvd->vdev_state;
1098 mvd->vdev_crtxg = cvd->vdev_crtxg;
1100 vdev_remove_child(pvd, cvd);
1101 vdev_add_child(pvd, mvd);
1102 cvd->vdev_id = mvd->vdev_children;
1103 vdev_add_child(mvd, cvd);
1104 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
1106 if (mvd == mvd->vdev_top)
1107 vdev_top_transfer(cvd, mvd);
1113 * Remove a 1-way mirror/replacing vdev from the tree.
1116 vdev_remove_parent(vdev_t *cvd)
1118 vdev_t *mvd = cvd->vdev_parent;
1119 vdev_t *pvd = mvd->vdev_parent;
1121 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1123 ASSERT(mvd->vdev_children == 1);
1124 ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
1125 mvd->vdev_ops == &vdev_replacing_ops ||
1126 mvd->vdev_ops == &vdev_spare_ops);
1127 cvd->vdev_ashift = mvd->vdev_ashift;
1128 cvd->vdev_logical_ashift = mvd->vdev_logical_ashift;
1129 cvd->vdev_physical_ashift = mvd->vdev_physical_ashift;
1131 vdev_remove_child(mvd, cvd);
1132 vdev_remove_child(pvd, mvd);
1135 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
1136 * Otherwise, we could have detached an offline device, and when we
1137 * go to import the pool we'll think we have two top-level vdevs,
1138 * instead of a different version of the same top-level vdev.
1140 if (mvd->vdev_top == mvd) {
1141 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
1142 cvd->vdev_orig_guid = cvd->vdev_guid;
1143 cvd->vdev_guid += guid_delta;
1144 cvd->vdev_guid_sum += guid_delta;
1146 cvd->vdev_id = mvd->vdev_id;
1147 vdev_add_child(pvd, cvd);
1148 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
1150 if (cvd == cvd->vdev_top)
1151 vdev_top_transfer(mvd, cvd);
1153 ASSERT(mvd->vdev_children == 0);
1158 vdev_metaslab_init(vdev_t *vd, uint64_t txg)
1160 spa_t *spa = vd->vdev_spa;
1161 objset_t *mos = spa->spa_meta_objset;
1163 uint64_t oldc = vd->vdev_ms_count;
1164 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
1168 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1171 * This vdev is not being allocated from yet or is a hole.
1173 if (vd->vdev_ms_shift == 0)
1176 ASSERT(!vd->vdev_ishole);
1178 ASSERT(oldc <= newc);
1180 mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
1183 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
1184 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
1188 vd->vdev_ms_count = newc;
1189 for (m = oldc; m < newc; m++) {
1190 uint64_t object = 0;
1193 * vdev_ms_array may be 0 if we are creating the "fake"
1194 * metaslabs for an indirect vdev for zdb's leak detection.
1195 * See zdb_leak_init().
1197 if (txg == 0 && vd->vdev_ms_array != 0) {
1198 error = dmu_read(mos, vd->vdev_ms_array,
1199 m * sizeof (uint64_t), sizeof (uint64_t), &object,
1202 vdev_dbgmsg(vd, "unable to read the metaslab "
1203 "array [error=%d]", error);
1208 error = metaslab_init(vd->vdev_mg, m, object, txg,
1211 vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
1218 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
1221 * If the vdev is being removed we don't activate
1222 * the metaslabs since we want to ensure that no new
1223 * allocations are performed on this device.
1225 if (oldc == 0 && !vd->vdev_removing)
1226 metaslab_group_activate(vd->vdev_mg);
1229 spa_config_exit(spa, SCL_ALLOC, FTAG);
1235 vdev_metaslab_fini(vdev_t *vd)
1237 if (vd->vdev_checkpoint_sm != NULL) {
1238 ASSERT(spa_feature_is_active(vd->vdev_spa,
1239 SPA_FEATURE_POOL_CHECKPOINT));
1240 space_map_close(vd->vdev_checkpoint_sm);
1242 * Even though we close the space map, we need to set its
1243 * pointer to NULL. The reason is that vdev_metaslab_fini()
1244 * may be called multiple times for certain operations
1245 * (i.e. when destroying a pool) so we need to ensure that
1246 * this clause never executes twice. This logic is similar
1247 * to the one used for the vdev_ms clause below.
1249 vd->vdev_checkpoint_sm = NULL;
1252 if (vd->vdev_ms != NULL) {
1253 uint64_t count = vd->vdev_ms_count;
1255 metaslab_group_passivate(vd->vdev_mg);
1256 for (uint64_t m = 0; m < count; m++) {
1257 metaslab_t *msp = vd->vdev_ms[m];
1262 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
1265 vd->vdev_ms_count = 0;
1267 ASSERT0(vd->vdev_ms_count);
1270 typedef struct vdev_probe_stats {
1271 boolean_t vps_readable;
1272 boolean_t vps_writeable;
1274 } vdev_probe_stats_t;
1277 vdev_probe_done(zio_t *zio)
1279 spa_t *spa = zio->io_spa;
1280 vdev_t *vd = zio->io_vd;
1281 vdev_probe_stats_t *vps = zio->io_private;
1283 ASSERT(vd->vdev_probe_zio != NULL);
1285 if (zio->io_type == ZIO_TYPE_READ) {
1286 if (zio->io_error == 0)
1287 vps->vps_readable = 1;
1288 if (zio->io_error == 0 && spa_writeable(spa)) {
1289 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
1290 zio->io_offset, zio->io_size, zio->io_abd,
1291 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1292 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
1294 abd_free(zio->io_abd);
1296 } else if (zio->io_type == ZIO_TYPE_WRITE) {
1297 if (zio->io_error == 0)
1298 vps->vps_writeable = 1;
1299 abd_free(zio->io_abd);
1300 } else if (zio->io_type == ZIO_TYPE_NULL) {
1303 vd->vdev_cant_read |= !vps->vps_readable;
1304 vd->vdev_cant_write |= !vps->vps_writeable;
1306 if (vdev_readable(vd) &&
1307 (vdev_writeable(vd) || !spa_writeable(spa))) {
1310 ASSERT(zio->io_error != 0);
1311 vdev_dbgmsg(vd, "failed probe");
1312 zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
1313 spa, vd, NULL, 0, 0);
1314 zio->io_error = SET_ERROR(ENXIO);
1317 mutex_enter(&vd->vdev_probe_lock);
1318 ASSERT(vd->vdev_probe_zio == zio);
1319 vd->vdev_probe_zio = NULL;
1320 mutex_exit(&vd->vdev_probe_lock);
1322 zio_link_t *zl = NULL;
1323 while ((pio = zio_walk_parents(zio, &zl)) != NULL)
1324 if (!vdev_accessible(vd, pio))
1325 pio->io_error = SET_ERROR(ENXIO);
1327 kmem_free(vps, sizeof (*vps));
1332 * Determine whether this device is accessible.
1334 * Read and write to several known locations: the pad regions of each
1335 * vdev label but the first, which we leave alone in case it contains
1339 vdev_probe(vdev_t *vd, zio_t *zio)
1341 spa_t *spa = vd->vdev_spa;
1342 vdev_probe_stats_t *vps = NULL;
1345 ASSERT(vd->vdev_ops->vdev_op_leaf);
1348 * Don't probe the probe.
1350 if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
1354 * To prevent 'probe storms' when a device fails, we create
1355 * just one probe i/o at a time. All zios that want to probe
1356 * this vdev will become parents of the probe io.
1358 mutex_enter(&vd->vdev_probe_lock);
1360 if ((pio = vd->vdev_probe_zio) == NULL) {
1361 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
1363 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
1364 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
1367 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
1369 * vdev_cant_read and vdev_cant_write can only
1370 * transition from TRUE to FALSE when we have the
1371 * SCL_ZIO lock as writer; otherwise they can only
1372 * transition from FALSE to TRUE. This ensures that
1373 * any zio looking at these values can assume that
1374 * failures persist for the life of the I/O. That's
1375 * important because when a device has intermittent
1376 * connectivity problems, we want to ensure that
1377 * they're ascribed to the device (ENXIO) and not
1380 * Since we hold SCL_ZIO as writer here, clear both
1381 * values so the probe can reevaluate from first
1384 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
1385 vd->vdev_cant_read = B_FALSE;
1386 vd->vdev_cant_write = B_FALSE;
1389 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
1390 vdev_probe_done, vps,
1391 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
1394 * We can't change the vdev state in this context, so we
1395 * kick off an async task to do it on our behalf.
1398 vd->vdev_probe_wanted = B_TRUE;
1399 spa_async_request(spa, SPA_ASYNC_PROBE);
1404 zio_add_child(zio, pio);
1406 mutex_exit(&vd->vdev_probe_lock);
1409 ASSERT(zio != NULL);
1413 for (int l = 1; l < VDEV_LABELS; l++) {
1414 zio_nowait(zio_read_phys(pio, vd,
1415 vdev_label_offset(vd->vdev_psize, l,
1416 offsetof(vdev_label_t, vl_pad2)), VDEV_PAD_SIZE,
1417 abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
1418 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1419 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
1430 vdev_open_child(void *arg)
1434 vd->vdev_open_thread = curthread;
1435 vd->vdev_open_error = vdev_open(vd);
1436 vd->vdev_open_thread = NULL;
1440 vdev_uses_zvols(vdev_t *vd)
1442 if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR,
1443 strlen(ZVOL_DIR)) == 0)
1445 for (int c = 0; c < vd->vdev_children; c++)
1446 if (vdev_uses_zvols(vd->vdev_child[c]))
1452 vdev_open_children(vdev_t *vd)
1455 int children = vd->vdev_children;
1458 * in order to handle pools on top of zvols, do the opens
1459 * in a single thread so that the same thread holds the
1460 * spa_namespace_lock
1462 if (B_TRUE || vdev_uses_zvols(vd)) {
1463 for (int c = 0; c < children; c++)
1464 vd->vdev_child[c]->vdev_open_error =
1465 vdev_open(vd->vdev_child[c]);
1468 tq = taskq_create("vdev_open", children, minclsyspri,
1469 children, children, TASKQ_PREPOPULATE);
1471 for (int c = 0; c < children; c++)
1472 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c],
1479 * Compute the raidz-deflation ratio. Note, we hard-code
1480 * in 128k (1 << 17) because it is the "typical" blocksize.
1481 * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
1482 * otherwise it would inconsistently account for existing bp's.
1485 vdev_set_deflate_ratio(vdev_t *vd)
1487 if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
1488 vd->vdev_deflate_ratio = (1 << 17) /
1489 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
1494 * Prepare a virtual device for access.
1497 vdev_open(vdev_t *vd)
1499 spa_t *spa = vd->vdev_spa;
1502 uint64_t max_osize = 0;
1503 uint64_t asize, max_asize, psize;
1504 uint64_t logical_ashift = 0;
1505 uint64_t physical_ashift = 0;
1507 ASSERT(vd->vdev_open_thread == curthread ||
1508 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1509 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
1510 vd->vdev_state == VDEV_STATE_CANT_OPEN ||
1511 vd->vdev_state == VDEV_STATE_OFFLINE);
1513 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1514 vd->vdev_cant_read = B_FALSE;
1515 vd->vdev_cant_write = B_FALSE;
1516 vd->vdev_notrim = B_FALSE;
1517 vd->vdev_min_asize = vdev_get_min_asize(vd);
1520 * If this vdev is not removed, check its fault status. If it's
1521 * faulted, bail out of the open.
1523 if (!vd->vdev_removed && vd->vdev_faulted) {
1524 ASSERT(vd->vdev_children == 0);
1525 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1526 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1527 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1528 vd->vdev_label_aux);
1529 return (SET_ERROR(ENXIO));
1530 } else if (vd->vdev_offline) {
1531 ASSERT(vd->vdev_children == 0);
1532 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
1533 return (SET_ERROR(ENXIO));
1536 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize,
1537 &logical_ashift, &physical_ashift);
1540 * Reset the vdev_reopening flag so that we actually close
1541 * the vdev on error.
1543 vd->vdev_reopening = B_FALSE;
1544 if (zio_injection_enabled && error == 0)
1545 error = zio_handle_device_injection(vd, NULL, ENXIO);
1548 if (vd->vdev_removed &&
1549 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
1550 vd->vdev_removed = B_FALSE;
1552 if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) {
1553 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE,
1554 vd->vdev_stat.vs_aux);
1556 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1557 vd->vdev_stat.vs_aux);
1562 vd->vdev_removed = B_FALSE;
1565 * Recheck the faulted flag now that we have confirmed that
1566 * the vdev is accessible. If we're faulted, bail.
1568 if (vd->vdev_faulted) {
1569 ASSERT(vd->vdev_children == 0);
1570 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1571 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1572 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1573 vd->vdev_label_aux);
1574 return (SET_ERROR(ENXIO));
1577 if (vd->vdev_degraded) {
1578 ASSERT(vd->vdev_children == 0);
1579 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1580 VDEV_AUX_ERR_EXCEEDED);
1582 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
1586 * For hole or missing vdevs we just return success.
1588 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
1591 if (zfs_trim_enabled && !vd->vdev_notrim && vd->vdev_ops->vdev_op_leaf)
1592 trim_map_create(vd);
1594 for (int c = 0; c < vd->vdev_children; c++) {
1595 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
1596 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1602 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
1603 max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
1605 if (vd->vdev_children == 0) {
1606 if (osize < SPA_MINDEVSIZE) {
1607 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1608 VDEV_AUX_TOO_SMALL);
1609 return (SET_ERROR(EOVERFLOW));
1612 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
1613 max_asize = max_osize - (VDEV_LABEL_START_SIZE +
1614 VDEV_LABEL_END_SIZE);
1616 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
1617 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
1618 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1619 VDEV_AUX_TOO_SMALL);
1620 return (SET_ERROR(EOVERFLOW));
1624 max_asize = max_osize;
1627 vd->vdev_psize = psize;
1630 * Make sure the allocatable size hasn't shrunk too much.
1632 if (asize < vd->vdev_min_asize) {
1633 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1634 VDEV_AUX_BAD_LABEL);
1635 return (SET_ERROR(EINVAL));
1638 vd->vdev_physical_ashift =
1639 MAX(physical_ashift, vd->vdev_physical_ashift);
1640 vd->vdev_logical_ashift = MAX(logical_ashift, vd->vdev_logical_ashift);
1641 vd->vdev_ashift = MAX(vd->vdev_logical_ashift, vd->vdev_ashift);
1643 if (vd->vdev_logical_ashift > SPA_MAXASHIFT) {
1644 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1645 VDEV_AUX_ASHIFT_TOO_BIG);
1649 if (vd->vdev_asize == 0) {
1651 * This is the first-ever open, so use the computed values.
1652 * For testing purposes, a higher ashift can be requested.
1654 vd->vdev_asize = asize;
1655 vd->vdev_max_asize = max_asize;
1658 * Make sure the alignment requirement hasn't increased.
1660 if (vd->vdev_ashift > vd->vdev_top->vdev_ashift &&
1661 vd->vdev_ops->vdev_op_leaf) {
1662 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1663 VDEV_AUX_BAD_LABEL);
1666 vd->vdev_max_asize = max_asize;
1670 * If all children are healthy we update asize if either:
1671 * The asize has increased, due to a device expansion caused by dynamic
1672 * LUN growth or vdev replacement, and automatic expansion is enabled;
1673 * making the additional space available.
1675 * The asize has decreased, due to a device shrink usually caused by a
1676 * vdev replace with a smaller device. This ensures that calculations
1677 * based of max_asize and asize e.g. esize are always valid. It's safe
1678 * to do this as we've already validated that asize is greater than
1681 if (vd->vdev_state == VDEV_STATE_HEALTHY &&
1682 ((asize > vd->vdev_asize &&
1683 (vd->vdev_expanding || spa->spa_autoexpand)) ||
1684 (asize < vd->vdev_asize)))
1685 vd->vdev_asize = asize;
1687 vdev_set_min_asize(vd);
1690 * Ensure we can issue some IO before declaring the
1691 * vdev open for business.
1693 if (vd->vdev_ops->vdev_op_leaf &&
1694 (error = zio_wait(vdev_probe(vd, NULL))) != 0) {
1695 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1696 VDEV_AUX_ERR_EXCEEDED);
1701 * Track the min and max ashift values for normal data devices.
1703 if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
1704 !vd->vdev_islog && vd->vdev_aux == NULL) {
1705 if (vd->vdev_ashift > spa->spa_max_ashift)
1706 spa->spa_max_ashift = vd->vdev_ashift;
1707 if (vd->vdev_ashift < spa->spa_min_ashift)
1708 spa->spa_min_ashift = vd->vdev_ashift;
1712 * If a leaf vdev has a DTL, and seems healthy, then kick off a
1713 * resilver. But don't do this if we are doing a reopen for a scrub,
1714 * since this would just restart the scrub we are already doing.
1716 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen &&
1717 vdev_resilver_needed(vd, NULL, NULL))
1718 spa_async_request(spa, SPA_ASYNC_RESILVER);
1724 * Called once the vdevs are all opened, this routine validates the label
1725 * contents. This needs to be done before vdev_load() so that we don't
1726 * inadvertently do repair I/Os to the wrong device.
1728 * This function will only return failure if one of the vdevs indicates that it
1729 * has since been destroyed or exported. This is only possible if
1730 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
1731 * will be updated but the function will return 0.
1734 vdev_validate(vdev_t *vd)
1736 spa_t *spa = vd->vdev_spa;
1738 uint64_t guid = 0, aux_guid = 0, top_guid;
1743 if (vdev_validate_skip)
1746 for (uint64_t c = 0; c < vd->vdev_children; c++)
1747 if (vdev_validate(vd->vdev_child[c]) != 0)
1748 return (SET_ERROR(EBADF));
1751 * If the device has already failed, or was marked offline, don't do
1752 * any further validation. Otherwise, label I/O will fail and we will
1753 * overwrite the previous state.
1755 if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd))
1759 * If we are performing an extreme rewind, we allow for a label that
1760 * was modified at a point after the current txg.
1761 * If config lock is not held do not check for the txg. spa_sync could
1762 * be updating the vdev's label before updating spa_last_synced_txg.
1764 if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
1765 spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
1768 txg = spa_last_synced_txg(spa);
1770 if ((label = vdev_label_read_config(vd, txg)) == NULL) {
1771 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1772 VDEV_AUX_BAD_LABEL);
1773 vdev_dbgmsg(vd, "vdev_validate: failed reading config for "
1774 "txg %llu", (u_longlong_t)txg);
1779 * Determine if this vdev has been split off into another
1780 * pool. If so, then refuse to open it.
1782 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
1783 &aux_guid) == 0 && aux_guid == spa_guid(spa)) {
1784 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1785 VDEV_AUX_SPLIT_POOL);
1787 vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool");
1791 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) {
1792 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1793 VDEV_AUX_CORRUPT_DATA);
1795 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
1796 ZPOOL_CONFIG_POOL_GUID);
1801 * If config is not trusted then ignore the spa guid check. This is
1802 * necessary because if the machine crashed during a re-guid the new
1803 * guid might have been written to all of the vdev labels, but not the
1804 * cached config. The check will be performed again once we have the
1805 * trusted config from the MOS.
1807 if (spa->spa_trust_config && guid != spa_guid(spa)) {
1808 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1809 VDEV_AUX_CORRUPT_DATA);
1811 vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't "
1812 "match config (%llu != %llu)", (u_longlong_t)guid,
1813 (u_longlong_t)spa_guid(spa));
1817 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
1818 != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
1822 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) {
1823 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1824 VDEV_AUX_CORRUPT_DATA);
1826 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
1831 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid)
1833 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1834 VDEV_AUX_CORRUPT_DATA);
1836 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
1837 ZPOOL_CONFIG_TOP_GUID);
1842 * If this vdev just became a top-level vdev because its sibling was
1843 * detached, it will have adopted the parent's vdev guid -- but the
1844 * label may or may not be on disk yet. Fortunately, either version
1845 * of the label will have the same top guid, so if we're a top-level
1846 * vdev, we can safely compare to that instead.
1847 * However, if the config comes from a cachefile that failed to update
1848 * after the detach, a top-level vdev will appear as a non top-level
1849 * vdev in the config. Also relax the constraints if we perform an
1852 * If we split this vdev off instead, then we also check the
1853 * original pool's guid. We don't want to consider the vdev
1854 * corrupt if it is partway through a split operation.
1856 if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) {
1857 boolean_t mismatch = B_FALSE;
1858 if (spa->spa_trust_config && !spa->spa_extreme_rewind) {
1859 if (vd != vd->vdev_top || vd->vdev_guid != top_guid)
1862 if (vd->vdev_guid != top_guid &&
1863 vd->vdev_top->vdev_guid != guid)
1868 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1869 VDEV_AUX_CORRUPT_DATA);
1871 vdev_dbgmsg(vd, "vdev_validate: config guid "
1872 "doesn't match label guid");
1873 vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu",
1874 (u_longlong_t)vd->vdev_guid,
1875 (u_longlong_t)vd->vdev_top->vdev_guid);
1876 vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, "
1877 "aux_guid %llu", (u_longlong_t)guid,
1878 (u_longlong_t)top_guid, (u_longlong_t)aux_guid);
1883 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
1885 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1886 VDEV_AUX_CORRUPT_DATA);
1888 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
1889 ZPOOL_CONFIG_POOL_STATE);
1896 * If this is a verbatim import, no need to check the
1897 * state of the pool.
1899 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
1900 spa_load_state(spa) == SPA_LOAD_OPEN &&
1901 state != POOL_STATE_ACTIVE) {
1902 vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) "
1903 "for spa %s", (u_longlong_t)state, spa->spa_name);
1904 return (SET_ERROR(EBADF));
1908 * If we were able to open and validate a vdev that was
1909 * previously marked permanently unavailable, clear that state
1912 if (vd->vdev_not_present)
1913 vd->vdev_not_present = 0;
1919 vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd)
1921 if (svd->vdev_path != NULL && dvd->vdev_path != NULL) {
1922 if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) {
1923 zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed "
1924 "from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
1925 dvd->vdev_path, svd->vdev_path);
1926 spa_strfree(dvd->vdev_path);
1927 dvd->vdev_path = spa_strdup(svd->vdev_path);
1929 } else if (svd->vdev_path != NULL) {
1930 dvd->vdev_path = spa_strdup(svd->vdev_path);
1931 zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'",
1932 (u_longlong_t)dvd->vdev_guid, dvd->vdev_path);
1937 * Recursively copy vdev paths from one vdev to another. Source and destination
1938 * vdev trees must have same geometry otherwise return error. Intended to copy
1939 * paths from userland config into MOS config.
1942 vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd)
1944 if ((svd->vdev_ops == &vdev_missing_ops) ||
1945 (svd->vdev_ishole && dvd->vdev_ishole) ||
1946 (dvd->vdev_ops == &vdev_indirect_ops))
1949 if (svd->vdev_ops != dvd->vdev_ops) {
1950 vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s",
1951 svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type);
1952 return (SET_ERROR(EINVAL));
1955 if (svd->vdev_guid != dvd->vdev_guid) {
1956 vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != "
1957 "%llu)", (u_longlong_t)svd->vdev_guid,
1958 (u_longlong_t)dvd->vdev_guid);
1959 return (SET_ERROR(EINVAL));
1962 if (svd->vdev_children != dvd->vdev_children) {
1963 vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: "
1964 "%llu != %llu", (u_longlong_t)svd->vdev_children,
1965 (u_longlong_t)dvd->vdev_children);
1966 return (SET_ERROR(EINVAL));
1969 for (uint64_t i = 0; i < svd->vdev_children; i++) {
1970 int error = vdev_copy_path_strict(svd->vdev_child[i],
1971 dvd->vdev_child[i]);
1976 if (svd->vdev_ops->vdev_op_leaf)
1977 vdev_copy_path_impl(svd, dvd);
1983 vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd)
1985 ASSERT(stvd->vdev_top == stvd);
1986 ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id);
1988 for (uint64_t i = 0; i < dvd->vdev_children; i++) {
1989 vdev_copy_path_search(stvd, dvd->vdev_child[i]);
1992 if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd))
1996 * The idea here is that while a vdev can shift positions within
1997 * a top vdev (when replacing, attaching mirror, etc.) it cannot
1998 * step outside of it.
2000 vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid);
2002 if (vd == NULL || vd->vdev_ops != dvd->vdev_ops)
2005 ASSERT(vd->vdev_ops->vdev_op_leaf);
2007 vdev_copy_path_impl(vd, dvd);
2011 * Recursively copy vdev paths from one root vdev to another. Source and
2012 * destination vdev trees may differ in geometry. For each destination leaf
2013 * vdev, search a vdev with the same guid and top vdev id in the source.
2014 * Intended to copy paths from userland config into MOS config.
2017 vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd)
2019 uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children);
2020 ASSERT(srvd->vdev_ops == &vdev_root_ops);
2021 ASSERT(drvd->vdev_ops == &vdev_root_ops);
2023 for (uint64_t i = 0; i < children; i++) {
2024 vdev_copy_path_search(srvd->vdev_child[i],
2025 drvd->vdev_child[i]);
2030 * Close a virtual device.
2033 vdev_close(vdev_t *vd)
2035 spa_t *spa = vd->vdev_spa;
2036 vdev_t *pvd = vd->vdev_parent;
2038 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2041 * If our parent is reopening, then we are as well, unless we are
2044 if (pvd != NULL && pvd->vdev_reopening)
2045 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
2047 vd->vdev_ops->vdev_op_close(vd);
2049 vdev_cache_purge(vd);
2051 if (vd->vdev_ops->vdev_op_leaf)
2052 trim_map_destroy(vd);
2055 * We record the previous state before we close it, so that if we are
2056 * doing a reopen(), we don't generate FMA ereports if we notice that
2057 * it's still faulted.
2059 vd->vdev_prevstate = vd->vdev_state;
2061 if (vd->vdev_offline)
2062 vd->vdev_state = VDEV_STATE_OFFLINE;
2064 vd->vdev_state = VDEV_STATE_CLOSED;
2065 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
2069 vdev_hold(vdev_t *vd)
2071 spa_t *spa = vd->vdev_spa;
2073 ASSERT(spa_is_root(spa));
2074 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
2077 for (int c = 0; c < vd->vdev_children; c++)
2078 vdev_hold(vd->vdev_child[c]);
2080 if (vd->vdev_ops->vdev_op_leaf)
2081 vd->vdev_ops->vdev_op_hold(vd);
2085 vdev_rele(vdev_t *vd)
2087 spa_t *spa = vd->vdev_spa;
2089 ASSERT(spa_is_root(spa));
2090 for (int c = 0; c < vd->vdev_children; c++)
2091 vdev_rele(vd->vdev_child[c]);
2093 if (vd->vdev_ops->vdev_op_leaf)
2094 vd->vdev_ops->vdev_op_rele(vd);
2098 * Reopen all interior vdevs and any unopened leaves. We don't actually
2099 * reopen leaf vdevs which had previously been opened as they might deadlock
2100 * on the spa_config_lock. Instead we only obtain the leaf's physical size.
2101 * If the leaf has never been opened then open it, as usual.
2104 vdev_reopen(vdev_t *vd)
2106 spa_t *spa = vd->vdev_spa;
2108 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2110 /* set the reopening flag unless we're taking the vdev offline */
2111 vd->vdev_reopening = !vd->vdev_offline;
2113 (void) vdev_open(vd);
2116 * Call vdev_validate() here to make sure we have the same device.
2117 * Otherwise, a device with an invalid label could be successfully
2118 * opened in response to vdev_reopen().
2121 (void) vdev_validate_aux(vd);
2122 if (vdev_readable(vd) && vdev_writeable(vd) &&
2123 vd->vdev_aux == &spa->spa_l2cache &&
2124 !l2arc_vdev_present(vd))
2125 l2arc_add_vdev(spa, vd);
2127 (void) vdev_validate(vd);
2131 * Reassess parent vdev's health.
2133 vdev_propagate_state(vd);
2137 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
2142 * Normally, partial opens (e.g. of a mirror) are allowed.
2143 * For a create, however, we want to fail the request if
2144 * there are any components we can't open.
2146 error = vdev_open(vd);
2148 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
2150 return (error ? error : ENXIO);
2154 * Recursively load DTLs and initialize all labels.
2156 if ((error = vdev_dtl_load(vd)) != 0 ||
2157 (error = vdev_label_init(vd, txg, isreplacing ?
2158 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
2167 vdev_metaslab_set_size(vdev_t *vd)
2169 uint64_t asize = vd->vdev_asize;
2170 uint64_t ms_shift = 0;
2173 * For vdevs that are bigger than 8G the metaslab size varies in
2174 * a way that the number of metaslabs increases in powers of two,
2175 * linearly in terms of vdev_asize, starting from 16 metaslabs.
2176 * So for vdev_asize of 8G we get 16 metaslabs, for 16G, we get 32,
2177 * and so on, until we hit the maximum metaslab count limit
2178 * [vdev_max_ms_count] from which point the metaslab count stays
2181 ms_shift = vdev_default_ms_shift;
2183 if ((asize >> ms_shift) < vdev_min_ms_count) {
2185 * For devices that are less than 8G we want to have
2186 * exactly 16 metaslabs. We don't want less as integer
2187 * division rounds down, so less metaslabs mean more
2188 * wasted space. We don't want more as these vdevs are
2189 * small and in the likely event that we are running
2190 * out of space, the SPA will have a hard time finding
2191 * space due to fragmentation.
2193 ms_shift = highbit64(asize / vdev_min_ms_count);
2194 ms_shift = MAX(ms_shift, SPA_MAXBLOCKSHIFT);
2196 } else if ((asize >> ms_shift) > vdev_max_ms_count) {
2197 ms_shift = highbit64(asize / vdev_max_ms_count);
2200 vd->vdev_ms_shift = ms_shift;
2201 ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT);
2205 * Maximize performance by inflating the configured ashift for top level
2206 * vdevs to be as close to the physical ashift as possible while maintaining
2207 * administrator defined limits and ensuring it doesn't go below the
2211 vdev_ashift_optimize(vdev_t *vd)
2213 if (vd == vd->vdev_top) {
2214 if (vd->vdev_ashift < vd->vdev_physical_ashift) {
2215 vd->vdev_ashift = MIN(
2216 MAX(zfs_max_auto_ashift, vd->vdev_ashift),
2217 MAX(zfs_min_auto_ashift, vd->vdev_physical_ashift));
2220 * Unusual case where logical ashift > physical ashift
2221 * so we can't cap the calculated ashift based on max
2222 * ashift as that would cause failures.
2223 * We still check if we need to increase it to match
2226 vd->vdev_ashift = MAX(zfs_min_auto_ashift,
2233 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
2235 ASSERT(vd == vd->vdev_top);
2236 /* indirect vdevs don't have metaslabs or dtls */
2237 ASSERT(vdev_is_concrete(vd) || flags == 0);
2238 ASSERT(ISP2(flags));
2239 ASSERT(spa_writeable(vd->vdev_spa));
2241 if (flags & VDD_METASLAB)
2242 (void) txg_list_add(&vd->vdev_ms_list, arg, txg);
2244 if (flags & VDD_DTL)
2245 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
2247 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
2251 vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
2253 for (int c = 0; c < vd->vdev_children; c++)
2254 vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
2256 if (vd->vdev_ops->vdev_op_leaf)
2257 vdev_dirty(vd->vdev_top, flags, vd, txg);
2263 * A vdev's DTL (dirty time log) is the set of transaction groups for which
2264 * the vdev has less than perfect replication. There are four kinds of DTL:
2266 * DTL_MISSING: txgs for which the vdev has no valid copies of the data
2268 * DTL_PARTIAL: txgs for which data is available, but not fully replicated
2270 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
2271 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
2272 * txgs that was scrubbed.
2274 * DTL_OUTAGE: txgs which cannot currently be read, whether due to
2275 * persistent errors or just some device being offline.
2276 * Unlike the other three, the DTL_OUTAGE map is not generally
2277 * maintained; it's only computed when needed, typically to
2278 * determine whether a device can be detached.
2280 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
2281 * either has the data or it doesn't.
2283 * For interior vdevs such as mirror and RAID-Z the picture is more complex.
2284 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
2285 * if any child is less than fully replicated, then so is its parent.
2286 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
2287 * comprising only those txgs which appear in 'maxfaults' or more children;
2288 * those are the txgs we don't have enough replication to read. For example,
2289 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
2290 * thus, its DTL_MISSING consists of the set of txgs that appear in more than
2291 * two child DTL_MISSING maps.
2293 * It should be clear from the above that to compute the DTLs and outage maps
2294 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
2295 * Therefore, that is all we keep on disk. When loading the pool, or after
2296 * a configuration change, we generate all other DTLs from first principles.
2299 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
2301 range_tree_t *rt = vd->vdev_dtl[t];
2303 ASSERT(t < DTL_TYPES);
2304 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2305 ASSERT(spa_writeable(vd->vdev_spa));
2307 mutex_enter(&vd->vdev_dtl_lock);
2308 if (!range_tree_contains(rt, txg, size))
2309 range_tree_add(rt, txg, size);
2310 mutex_exit(&vd->vdev_dtl_lock);
2314 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
2316 range_tree_t *rt = vd->vdev_dtl[t];
2317 boolean_t dirty = B_FALSE;
2319 ASSERT(t < DTL_TYPES);
2320 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2323 * While we are loading the pool, the DTLs have not been loaded yet.
2324 * Ignore the DTLs and try all devices. This avoids a recursive
2325 * mutex enter on the vdev_dtl_lock, and also makes us try hard
2326 * when loading the pool (relying on the checksum to ensure that
2327 * we get the right data -- note that we while loading, we are
2328 * only reading the MOS, which is always checksummed).
2330 if (vd->vdev_spa->spa_load_state != SPA_LOAD_NONE)
2333 mutex_enter(&vd->vdev_dtl_lock);
2334 if (!range_tree_is_empty(rt))
2335 dirty = range_tree_contains(rt, txg, size);
2336 mutex_exit(&vd->vdev_dtl_lock);
2342 vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
2344 range_tree_t *rt = vd->vdev_dtl[t];
2347 mutex_enter(&vd->vdev_dtl_lock);
2348 empty = range_tree_is_empty(rt);
2349 mutex_exit(&vd->vdev_dtl_lock);
2355 * Returns B_TRUE if vdev determines offset needs to be resilvered.
2358 vdev_dtl_need_resilver(vdev_t *vd, uint64_t offset, size_t psize)
2360 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2362 if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
2363 vd->vdev_ops->vdev_op_leaf)
2366 return (vd->vdev_ops->vdev_op_need_resilver(vd, offset, psize));
2370 * Returns the lowest txg in the DTL range.
2373 vdev_dtl_min(vdev_t *vd)
2377 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
2378 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
2379 ASSERT0(vd->vdev_children);
2381 rs = avl_first(&vd->vdev_dtl[DTL_MISSING]->rt_root);
2382 return (rs->rs_start - 1);
2386 * Returns the highest txg in the DTL.
2389 vdev_dtl_max(vdev_t *vd)
2393 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
2394 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
2395 ASSERT0(vd->vdev_children);
2397 rs = avl_last(&vd->vdev_dtl[DTL_MISSING]->rt_root);
2398 return (rs->rs_end);
2402 * Determine if a resilvering vdev should remove any DTL entries from
2403 * its range. If the vdev was resilvering for the entire duration of the
2404 * scan then it should excise that range from its DTLs. Otherwise, this
2405 * vdev is considered partially resilvered and should leave its DTL
2406 * entries intact. The comment in vdev_dtl_reassess() describes how we
2410 vdev_dtl_should_excise(vdev_t *vd)
2412 spa_t *spa = vd->vdev_spa;
2413 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
2415 ASSERT0(scn->scn_phys.scn_errors);
2416 ASSERT0(vd->vdev_children);
2418 if (vd->vdev_state < VDEV_STATE_DEGRADED)
2421 if (vd->vdev_resilver_txg == 0 ||
2422 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
2426 * When a resilver is initiated the scan will assign the scn_max_txg
2427 * value to the highest txg value that exists in all DTLs. If this
2428 * device's max DTL is not part of this scan (i.e. it is not in
2429 * the range (scn_min_txg, scn_max_txg] then it is not eligible
2432 if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
2433 ASSERT3U(scn->scn_phys.scn_min_txg, <=, vdev_dtl_min(vd));
2434 ASSERT3U(scn->scn_phys.scn_min_txg, <, vd->vdev_resilver_txg);
2435 ASSERT3U(vd->vdev_resilver_txg, <=, scn->scn_phys.scn_max_txg);
2442 * Reassess DTLs after a config change or scrub completion.
2445 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
2447 spa_t *spa = vd->vdev_spa;
2451 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
2453 for (int c = 0; c < vd->vdev_children; c++)
2454 vdev_dtl_reassess(vd->vdev_child[c], txg,
2455 scrub_txg, scrub_done);
2457 if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
2460 if (vd->vdev_ops->vdev_op_leaf) {
2461 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
2463 mutex_enter(&vd->vdev_dtl_lock);
2466 * If we've completed a scan cleanly then determine
2467 * if this vdev should remove any DTLs. We only want to
2468 * excise regions on vdevs that were available during
2469 * the entire duration of this scan.
2471 if (scrub_txg != 0 &&
2472 (spa->spa_scrub_started ||
2473 (scn != NULL && scn->scn_phys.scn_errors == 0)) &&
2474 vdev_dtl_should_excise(vd)) {
2476 * We completed a scrub up to scrub_txg. If we
2477 * did it without rebooting, then the scrub dtl
2478 * will be valid, so excise the old region and
2479 * fold in the scrub dtl. Otherwise, leave the
2480 * dtl as-is if there was an error.
2482 * There's little trick here: to excise the beginning
2483 * of the DTL_MISSING map, we put it into a reference
2484 * tree and then add a segment with refcnt -1 that
2485 * covers the range [0, scrub_txg). This means
2486 * that each txg in that range has refcnt -1 or 0.
2487 * We then add DTL_SCRUB with a refcnt of 2, so that
2488 * entries in the range [0, scrub_txg) will have a
2489 * positive refcnt -- either 1 or 2. We then convert
2490 * the reference tree into the new DTL_MISSING map.
2492 space_reftree_create(&reftree);
2493 space_reftree_add_map(&reftree,
2494 vd->vdev_dtl[DTL_MISSING], 1);
2495 space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
2496 space_reftree_add_map(&reftree,
2497 vd->vdev_dtl[DTL_SCRUB], 2);
2498 space_reftree_generate_map(&reftree,
2499 vd->vdev_dtl[DTL_MISSING], 1);
2500 space_reftree_destroy(&reftree);
2502 range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
2503 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
2504 range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
2506 range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
2507 range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
2508 if (!vdev_readable(vd))
2509 range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
2511 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
2512 range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
2515 * If the vdev was resilvering and no longer has any
2516 * DTLs then reset its resilvering flag and dirty
2517 * the top level so that we persist the change.
2519 if (vd->vdev_resilver_txg != 0 &&
2520 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
2521 range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) {
2522 vd->vdev_resilver_txg = 0;
2523 vdev_config_dirty(vd->vdev_top);
2526 mutex_exit(&vd->vdev_dtl_lock);
2529 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
2533 mutex_enter(&vd->vdev_dtl_lock);
2534 for (int t = 0; t < DTL_TYPES; t++) {
2535 /* account for child's outage in parent's missing map */
2536 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
2538 continue; /* leaf vdevs only */
2539 if (t == DTL_PARTIAL)
2540 minref = 1; /* i.e. non-zero */
2541 else if (vd->vdev_nparity != 0)
2542 minref = vd->vdev_nparity + 1; /* RAID-Z */
2544 minref = vd->vdev_children; /* any kind of mirror */
2545 space_reftree_create(&reftree);
2546 for (int c = 0; c < vd->vdev_children; c++) {
2547 vdev_t *cvd = vd->vdev_child[c];
2548 mutex_enter(&cvd->vdev_dtl_lock);
2549 space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
2550 mutex_exit(&cvd->vdev_dtl_lock);
2552 space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
2553 space_reftree_destroy(&reftree);
2555 mutex_exit(&vd->vdev_dtl_lock);
2559 vdev_dtl_load(vdev_t *vd)
2561 spa_t *spa = vd->vdev_spa;
2562 objset_t *mos = spa->spa_meta_objset;
2565 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
2566 ASSERT(vdev_is_concrete(vd));
2568 error = space_map_open(&vd->vdev_dtl_sm, mos,
2569 vd->vdev_dtl_object, 0, -1ULL, 0);
2572 ASSERT(vd->vdev_dtl_sm != NULL);
2574 mutex_enter(&vd->vdev_dtl_lock);
2577 * Now that we've opened the space_map we need to update
2580 space_map_update(vd->vdev_dtl_sm);
2582 error = space_map_load(vd->vdev_dtl_sm,
2583 vd->vdev_dtl[DTL_MISSING], SM_ALLOC);
2584 mutex_exit(&vd->vdev_dtl_lock);
2589 for (int c = 0; c < vd->vdev_children; c++) {
2590 error = vdev_dtl_load(vd->vdev_child[c]);
2599 vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
2601 spa_t *spa = vd->vdev_spa;
2603 VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
2604 VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
2609 vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
2611 spa_t *spa = vd->vdev_spa;
2612 uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
2613 DMU_OT_NONE, 0, tx);
2616 VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
2623 vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
2625 if (vd->vdev_ops != &vdev_hole_ops &&
2626 vd->vdev_ops != &vdev_missing_ops &&
2627 vd->vdev_ops != &vdev_root_ops &&
2628 !vd->vdev_top->vdev_removing) {
2629 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
2630 vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
2632 if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
2633 vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
2636 for (uint64_t i = 0; i < vd->vdev_children; i++) {
2637 vdev_construct_zaps(vd->vdev_child[i], tx);
2642 vdev_dtl_sync(vdev_t *vd, uint64_t txg)
2644 spa_t *spa = vd->vdev_spa;
2645 range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
2646 objset_t *mos = spa->spa_meta_objset;
2647 range_tree_t *rtsync;
2649 uint64_t object = space_map_object(vd->vdev_dtl_sm);
2651 ASSERT(vdev_is_concrete(vd));
2652 ASSERT(vd->vdev_ops->vdev_op_leaf);
2654 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
2656 if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
2657 mutex_enter(&vd->vdev_dtl_lock);
2658 space_map_free(vd->vdev_dtl_sm, tx);
2659 space_map_close(vd->vdev_dtl_sm);
2660 vd->vdev_dtl_sm = NULL;
2661 mutex_exit(&vd->vdev_dtl_lock);
2664 * We only destroy the leaf ZAP for detached leaves or for
2665 * removed log devices. Removed data devices handle leaf ZAP
2666 * cleanup later, once cancellation is no longer possible.
2668 if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
2669 vd->vdev_top->vdev_islog)) {
2670 vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
2671 vd->vdev_leaf_zap = 0;
2678 if (vd->vdev_dtl_sm == NULL) {
2679 uint64_t new_object;
2681 new_object = space_map_alloc(mos, vdev_dtl_sm_blksz, tx);
2682 VERIFY3U(new_object, !=, 0);
2684 VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
2686 ASSERT(vd->vdev_dtl_sm != NULL);
2689 rtsync = range_tree_create(NULL, NULL);
2691 mutex_enter(&vd->vdev_dtl_lock);
2692 range_tree_walk(rt, range_tree_add, rtsync);
2693 mutex_exit(&vd->vdev_dtl_lock);
2695 space_map_truncate(vd->vdev_dtl_sm, vdev_dtl_sm_blksz, tx);
2696 space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
2697 range_tree_vacate(rtsync, NULL, NULL);
2699 range_tree_destroy(rtsync);
2702 * If the object for the space map has changed then dirty
2703 * the top level so that we update the config.
2705 if (object != space_map_object(vd->vdev_dtl_sm)) {
2706 vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
2707 "new object %llu", (u_longlong_t)txg, spa_name(spa),
2708 (u_longlong_t)object,
2709 (u_longlong_t)space_map_object(vd->vdev_dtl_sm));
2710 vdev_config_dirty(vd->vdev_top);
2715 mutex_enter(&vd->vdev_dtl_lock);
2716 space_map_update(vd->vdev_dtl_sm);
2717 mutex_exit(&vd->vdev_dtl_lock);
2721 * Determine whether the specified vdev can be offlined/detached/removed
2722 * without losing data.
2725 vdev_dtl_required(vdev_t *vd)
2727 spa_t *spa = vd->vdev_spa;
2728 vdev_t *tvd = vd->vdev_top;
2729 uint8_t cant_read = vd->vdev_cant_read;
2732 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2734 if (vd == spa->spa_root_vdev || vd == tvd)
2738 * Temporarily mark the device as unreadable, and then determine
2739 * whether this results in any DTL outages in the top-level vdev.
2740 * If not, we can safely offline/detach/remove the device.
2742 vd->vdev_cant_read = B_TRUE;
2743 vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
2744 required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
2745 vd->vdev_cant_read = cant_read;
2746 vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
2748 if (!required && zio_injection_enabled)
2749 required = !!zio_handle_device_injection(vd, NULL, ECHILD);
2755 * Determine if resilver is needed, and if so the txg range.
2758 vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
2760 boolean_t needed = B_FALSE;
2761 uint64_t thismin = UINT64_MAX;
2762 uint64_t thismax = 0;
2764 if (vd->vdev_children == 0) {
2765 mutex_enter(&vd->vdev_dtl_lock);
2766 if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
2767 vdev_writeable(vd)) {
2769 thismin = vdev_dtl_min(vd);
2770 thismax = vdev_dtl_max(vd);
2773 mutex_exit(&vd->vdev_dtl_lock);
2775 for (int c = 0; c < vd->vdev_children; c++) {
2776 vdev_t *cvd = vd->vdev_child[c];
2777 uint64_t cmin, cmax;
2779 if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
2780 thismin = MIN(thismin, cmin);
2781 thismax = MAX(thismax, cmax);
2787 if (needed && minp) {
2795 * Gets the checkpoint space map object from the vdev's ZAP.
2796 * Returns the spacemap object, or 0 if it wasn't in the ZAP
2797 * or the ZAP doesn't exist yet.
2800 vdev_checkpoint_sm_object(vdev_t *vd)
2802 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
2803 if (vd->vdev_top_zap == 0) {
2807 uint64_t sm_obj = 0;
2808 int err = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
2809 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, &sm_obj);
2811 ASSERT(err == 0 || err == ENOENT);
2817 vdev_load(vdev_t *vd)
2821 * Recursively load all children.
2823 for (int c = 0; c < vd->vdev_children; c++) {
2824 error = vdev_load(vd->vdev_child[c]);
2830 vdev_set_deflate_ratio(vd);
2833 * If this is a top-level vdev, initialize its metaslabs.
2835 if (vd == vd->vdev_top && vdev_is_concrete(vd)) {
2836 if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
2837 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2838 VDEV_AUX_CORRUPT_DATA);
2839 vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
2840 "asize=%llu", (u_longlong_t)vd->vdev_ashift,
2841 (u_longlong_t)vd->vdev_asize);
2842 return (SET_ERROR(ENXIO));
2843 } else if ((error = vdev_metaslab_init(vd, 0)) != 0) {
2844 vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
2845 "[error=%d]", error);
2846 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2847 VDEV_AUX_CORRUPT_DATA);
2851 uint64_t checkpoint_sm_obj = vdev_checkpoint_sm_object(vd);
2852 if (checkpoint_sm_obj != 0) {
2853 objset_t *mos = spa_meta_objset(vd->vdev_spa);
2854 ASSERT(vd->vdev_asize != 0);
2855 ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
2857 if ((error = space_map_open(&vd->vdev_checkpoint_sm,
2858 mos, checkpoint_sm_obj, 0, vd->vdev_asize,
2859 vd->vdev_ashift))) {
2860 vdev_dbgmsg(vd, "vdev_load: space_map_open "
2861 "failed for checkpoint spacemap (obj %llu) "
2863 (u_longlong_t)checkpoint_sm_obj, error);
2866 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
2867 space_map_update(vd->vdev_checkpoint_sm);
2870 * Since the checkpoint_sm contains free entries
2871 * exclusively we can use sm_alloc to indicate the
2872 * culmulative checkpointed space that has been freed.
2874 vd->vdev_stat.vs_checkpoint_space =
2875 -vd->vdev_checkpoint_sm->sm_alloc;
2876 vd->vdev_spa->spa_checkpoint_info.sci_dspace +=
2877 vd->vdev_stat.vs_checkpoint_space;
2882 * If this is a leaf vdev, load its DTL.
2884 if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
2885 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2886 VDEV_AUX_CORRUPT_DATA);
2887 vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
2888 "[error=%d]", error);
2892 uint64_t obsolete_sm_object = vdev_obsolete_sm_object(vd);
2893 if (obsolete_sm_object != 0) {
2894 objset_t *mos = vd->vdev_spa->spa_meta_objset;
2895 ASSERT(vd->vdev_asize != 0);
2896 ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
2898 if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
2899 obsolete_sm_object, 0, vd->vdev_asize, 0))) {
2900 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2901 VDEV_AUX_CORRUPT_DATA);
2902 vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
2903 "obsolete spacemap (obj %llu) [error=%d]",
2904 (u_longlong_t)obsolete_sm_object, error);
2907 space_map_update(vd->vdev_obsolete_sm);
2914 * The special vdev case is used for hot spares and l2cache devices. Its
2915 * sole purpose it to set the vdev state for the associated vdev. To do this,
2916 * we make sure that we can open the underlying device, then try to read the
2917 * label, and make sure that the label is sane and that it hasn't been
2918 * repurposed to another pool.
2921 vdev_validate_aux(vdev_t *vd)
2924 uint64_t guid, version;
2927 if (!vdev_readable(vd))
2930 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
2931 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2932 VDEV_AUX_CORRUPT_DATA);
2936 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
2937 !SPA_VERSION_IS_SUPPORTED(version) ||
2938 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
2939 guid != vd->vdev_guid ||
2940 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
2941 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2942 VDEV_AUX_CORRUPT_DATA);
2948 * We don't actually check the pool state here. If it's in fact in
2949 * use by another pool, we update this fact on the fly when requested.
2956 * Free the objects used to store this vdev's spacemaps, and the array
2957 * that points to them.
2960 vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
2962 if (vd->vdev_ms_array == 0)
2965 objset_t *mos = vd->vdev_spa->spa_meta_objset;
2966 uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift;
2967 size_t array_bytes = array_count * sizeof (uint64_t);
2968 uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP);
2969 VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0,
2970 array_bytes, smobj_array, 0));
2972 for (uint64_t i = 0; i < array_count; i++) {
2973 uint64_t smobj = smobj_array[i];
2977 space_map_free_obj(mos, smobj, tx);
2980 kmem_free(smobj_array, array_bytes);
2981 VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
2982 vd->vdev_ms_array = 0;
2986 vdev_remove_empty(vdev_t *vd, uint64_t txg)
2988 spa_t *spa = vd->vdev_spa;
2991 ASSERT(vd == vd->vdev_top);
2992 ASSERT3U(txg, ==, spa_syncing_txg(spa));
2994 if (vd->vdev_ms != NULL) {
2995 metaslab_group_t *mg = vd->vdev_mg;
2997 metaslab_group_histogram_verify(mg);
2998 metaslab_class_histogram_verify(mg->mg_class);
3000 for (int m = 0; m < vd->vdev_ms_count; m++) {
3001 metaslab_t *msp = vd->vdev_ms[m];
3003 if (msp == NULL || msp->ms_sm == NULL)
3006 mutex_enter(&msp->ms_lock);
3008 * If the metaslab was not loaded when the vdev
3009 * was removed then the histogram accounting may
3010 * not be accurate. Update the histogram information
3011 * here so that we ensure that the metaslab group
3012 * and metaslab class are up-to-date.
3014 metaslab_group_histogram_remove(mg, msp);
3016 VERIFY0(space_map_allocated(msp->ms_sm));
3017 space_map_close(msp->ms_sm);
3019 mutex_exit(&msp->ms_lock);
3022 if (vd->vdev_checkpoint_sm != NULL) {
3023 ASSERT(spa_has_checkpoint(spa));
3024 space_map_close(vd->vdev_checkpoint_sm);
3025 vd->vdev_checkpoint_sm = NULL;
3028 metaslab_group_histogram_verify(mg);
3029 metaslab_class_histogram_verify(mg->mg_class);
3030 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
3031 ASSERT0(mg->mg_histogram[i]);
3034 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
3035 vdev_destroy_spacemaps(vd, tx);
3037 if (vd->vdev_islog && vd->vdev_top_zap != 0) {
3038 vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
3039 vd->vdev_top_zap = 0;
3045 vdev_sync_done(vdev_t *vd, uint64_t txg)
3048 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
3050 ASSERT(vdev_is_concrete(vd));
3052 while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
3054 metaslab_sync_done(msp, txg);
3057 metaslab_sync_reassess(vd->vdev_mg);
3061 vdev_sync(vdev_t *vd, uint64_t txg)
3063 spa_t *spa = vd->vdev_spa;
3068 if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
3071 ASSERT(vd->vdev_removing ||
3072 vd->vdev_ops == &vdev_indirect_ops);
3074 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3075 vdev_indirect_sync_obsolete(vd, tx);
3079 * If the vdev is indirect, it can't have dirty
3080 * metaslabs or DTLs.
3082 if (vd->vdev_ops == &vdev_indirect_ops) {
3083 ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
3084 ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
3089 ASSERT(vdev_is_concrete(vd));
3091 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 &&
3092 !vd->vdev_removing) {
3093 ASSERT(vd == vd->vdev_top);
3094 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
3095 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3096 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
3097 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
3098 ASSERT(vd->vdev_ms_array != 0);
3099 vdev_config_dirty(vd);
3103 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
3104 metaslab_sync(msp, txg);
3105 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
3108 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
3109 vdev_dtl_sync(lvd, txg);
3112 * Remove the metadata associated with this vdev once it's empty.
3113 * Note that this is typically used for log/cache device removal;
3114 * we don't empty toplevel vdevs when removing them. But if
3115 * a toplevel happens to be emptied, this is not harmful.
3117 if (vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing) {
3118 vdev_remove_empty(vd, txg);
3121 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
3125 vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
3127 return (vd->vdev_ops->vdev_op_asize(vd, psize));
3131 * Mark the given vdev faulted. A faulted vdev behaves as if the device could
3132 * not be opened, and no I/O is attempted.
3135 vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
3139 spa_vdev_state_enter(spa, SCL_NONE);
3141 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3142 return (spa_vdev_state_exit(spa, NULL, ENODEV));
3144 if (!vd->vdev_ops->vdev_op_leaf)
3145 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
3150 * We don't directly use the aux state here, but if we do a
3151 * vdev_reopen(), we need this value to be present to remember why we
3154 vd->vdev_label_aux = aux;
3157 * Faulted state takes precedence over degraded.
3159 vd->vdev_delayed_close = B_FALSE;
3160 vd->vdev_faulted = 1ULL;
3161 vd->vdev_degraded = 0ULL;
3162 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
3165 * If this device has the only valid copy of the data, then
3166 * back off and simply mark the vdev as degraded instead.
3168 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
3169 vd->vdev_degraded = 1ULL;
3170 vd->vdev_faulted = 0ULL;
3173 * If we reopen the device and it's not dead, only then do we
3178 if (vdev_readable(vd))
3179 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
3182 return (spa_vdev_state_exit(spa, vd, 0));
3186 * Mark the given vdev degraded. A degraded vdev is purely an indication to the
3187 * user that something is wrong. The vdev continues to operate as normal as far
3188 * as I/O is concerned.
3191 vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
3195 spa_vdev_state_enter(spa, SCL_NONE);
3197 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3198 return (spa_vdev_state_exit(spa, NULL, ENODEV));
3200 if (!vd->vdev_ops->vdev_op_leaf)
3201 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
3204 * If the vdev is already faulted, then don't do anything.
3206 if (vd->vdev_faulted || vd->vdev_degraded)
3207 return (spa_vdev_state_exit(spa, NULL, 0));
3209 vd->vdev_degraded = 1ULL;
3210 if (!vdev_is_dead(vd))
3211 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
3214 return (spa_vdev_state_exit(spa, vd, 0));
3218 * Online the given vdev.
3220 * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
3221 * spare device should be detached when the device finishes resilvering.
3222 * Second, the online should be treated like a 'test' online case, so no FMA
3223 * events are generated if the device fails to open.
3226 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
3228 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
3229 boolean_t wasoffline;
3230 vdev_state_t oldstate;
3232 spa_vdev_state_enter(spa, SCL_NONE);
3234 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3235 return (spa_vdev_state_exit(spa, NULL, ENODEV));
3237 if (!vd->vdev_ops->vdev_op_leaf)
3238 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
3240 wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
3241 oldstate = vd->vdev_state;
3244 vd->vdev_offline = B_FALSE;
3245 vd->vdev_tmpoffline = B_FALSE;
3246 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
3247 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
3249 /* XXX - L2ARC 1.0 does not support expansion */
3250 if (!vd->vdev_aux) {
3251 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
3252 pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND);
3256 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
3258 if (!vd->vdev_aux) {
3259 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
3260 pvd->vdev_expanding = B_FALSE;
3264 *newstate = vd->vdev_state;
3265 if ((flags & ZFS_ONLINE_UNSPARE) &&
3266 !vdev_is_dead(vd) && vd->vdev_parent &&
3267 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
3268 vd->vdev_parent->vdev_child[0] == vd)
3269 vd->vdev_unspare = B_TRUE;
3271 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
3273 /* XXX - L2ARC 1.0 does not support expansion */
3275 return (spa_vdev_state_exit(spa, vd, ENOTSUP));
3276 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
3279 /* Restart initializing if necessary */
3280 mutex_enter(&vd->vdev_initialize_lock);
3281 if (vdev_writeable(vd) &&
3282 vd->vdev_initialize_thread == NULL &&
3283 vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) {
3284 (void) vdev_initialize(vd);
3286 mutex_exit(&vd->vdev_initialize_lock);
3289 (oldstate < VDEV_STATE_DEGRADED &&
3290 vd->vdev_state >= VDEV_STATE_DEGRADED))
3291 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
3293 return (spa_vdev_state_exit(spa, vd, 0));
3297 vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
3301 uint64_t generation;
3302 metaslab_group_t *mg;
3305 spa_vdev_state_enter(spa, SCL_ALLOC);
3307 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3308 return (spa_vdev_state_exit(spa, NULL, ENODEV));
3310 if (!vd->vdev_ops->vdev_op_leaf)
3311 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
3315 generation = spa->spa_config_generation + 1;
3318 * If the device isn't already offline, try to offline it.
3320 if (!vd->vdev_offline) {
3322 * If this device has the only valid copy of some data,
3323 * don't allow it to be offlined. Log devices are always
3326 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
3327 vdev_dtl_required(vd))
3328 return (spa_vdev_state_exit(spa, NULL, EBUSY));
3331 * If the top-level is a slog and it has had allocations
3332 * then proceed. We check that the vdev's metaslab group
3333 * is not NULL since it's possible that we may have just
3334 * added this vdev but not yet initialized its metaslabs.
3336 if (tvd->vdev_islog && mg != NULL) {
3338 * Prevent any future allocations.
3340 metaslab_group_passivate(mg);
3341 (void) spa_vdev_state_exit(spa, vd, 0);
3343 error = spa_reset_logs(spa);
3346 * If the log device was successfully reset but has
3347 * checkpointed data, do not offline it.
3350 tvd->vdev_checkpoint_sm != NULL) {
3351 ASSERT3U(tvd->vdev_checkpoint_sm->sm_alloc,
3353 error = ZFS_ERR_CHECKPOINT_EXISTS;
3356 spa_vdev_state_enter(spa, SCL_ALLOC);
3359 * Check to see if the config has changed.
3361 if (error || generation != spa->spa_config_generation) {
3362 metaslab_group_activate(mg);
3364 return (spa_vdev_state_exit(spa,
3366 (void) spa_vdev_state_exit(spa, vd, 0);
3369 ASSERT0(tvd->vdev_stat.vs_alloc);
3373 * Offline this device and reopen its top-level vdev.
3374 * If the top-level vdev is a log device then just offline
3375 * it. Otherwise, if this action results in the top-level
3376 * vdev becoming unusable, undo it and fail the request.
3378 vd->vdev_offline = B_TRUE;
3381 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
3382 vdev_is_dead(tvd)) {
3383 vd->vdev_offline = B_FALSE;
3385 return (spa_vdev_state_exit(spa, NULL, EBUSY));
3389 * Add the device back into the metaslab rotor so that
3390 * once we online the device it's open for business.
3392 if (tvd->vdev_islog && mg != NULL)
3393 metaslab_group_activate(mg);
3396 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
3398 return (spa_vdev_state_exit(spa, vd, 0));
3402 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
3406 mutex_enter(&spa->spa_vdev_top_lock);
3407 error = vdev_offline_locked(spa, guid, flags);
3408 mutex_exit(&spa->spa_vdev_top_lock);
3414 * Clear the error counts associated with this vdev. Unlike vdev_online() and
3415 * vdev_offline(), we assume the spa config is locked. We also clear all
3416 * children. If 'vd' is NULL, then the user wants to clear all vdevs.
3419 vdev_clear(spa_t *spa, vdev_t *vd)
3421 vdev_t *rvd = spa->spa_root_vdev;
3423 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
3428 vd->vdev_stat.vs_read_errors = 0;
3429 vd->vdev_stat.vs_write_errors = 0;
3430 vd->vdev_stat.vs_checksum_errors = 0;
3432 for (int c = 0; c < vd->vdev_children; c++)
3433 vdev_clear(spa, vd->vdev_child[c]);
3436 for (int c = 0; c < spa->spa_l2cache.sav_count; c++)
3437 vdev_clear(spa, spa->spa_l2cache.sav_vdevs[c]);
3439 for (int c = 0; c < spa->spa_spares.sav_count; c++)
3440 vdev_clear(spa, spa->spa_spares.sav_vdevs[c]);
3444 * It makes no sense to "clear" an indirect vdev.
3446 if (!vdev_is_concrete(vd))
3450 * If we're in the FAULTED state or have experienced failed I/O, then
3451 * clear the persistent state and attempt to reopen the device. We
3452 * also mark the vdev config dirty, so that the new faulted state is
3453 * written out to disk.
3455 if (vd->vdev_faulted || vd->vdev_degraded ||
3456 !vdev_readable(vd) || !vdev_writeable(vd)) {
3459 * When reopening in reponse to a clear event, it may be due to
3460 * a fmadm repair request. In this case, if the device is
3461 * still broken, we want to still post the ereport again.
3463 vd->vdev_forcefault = B_TRUE;
3465 vd->vdev_faulted = vd->vdev_degraded = 0ULL;
3466 vd->vdev_cant_read = B_FALSE;
3467 vd->vdev_cant_write = B_FALSE;
3469 vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
3471 vd->vdev_forcefault = B_FALSE;
3473 if (vd != rvd && vdev_writeable(vd->vdev_top))
3474 vdev_state_dirty(vd->vdev_top);
3476 if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
3477 spa_async_request(spa, SPA_ASYNC_RESILVER);
3479 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
3483 * When clearing a FMA-diagnosed fault, we always want to
3484 * unspare the device, as we assume that the original spare was
3485 * done in response to the FMA fault.
3487 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
3488 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
3489 vd->vdev_parent->vdev_child[0] == vd)
3490 vd->vdev_unspare = B_TRUE;
3494 vdev_is_dead(vdev_t *vd)
3497 * Holes and missing devices are always considered "dead".
3498 * This simplifies the code since we don't have to check for
3499 * these types of devices in the various code paths.
3500 * Instead we rely on the fact that we skip over dead devices
3501 * before issuing I/O to them.
3503 return (vd->vdev_state < VDEV_STATE_DEGRADED ||
3504 vd->vdev_ops == &vdev_hole_ops ||
3505 vd->vdev_ops == &vdev_missing_ops);
3509 vdev_readable(vdev_t *vd)
3511 return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
3515 vdev_writeable(vdev_t *vd)
3517 return (!vdev_is_dead(vd) && !vd->vdev_cant_write &&
3518 vdev_is_concrete(vd));
3522 vdev_allocatable(vdev_t *vd)
3524 uint64_t state = vd->vdev_state;
3527 * We currently allow allocations from vdevs which may be in the
3528 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device
3529 * fails to reopen then we'll catch it later when we're holding
3530 * the proper locks. Note that we have to get the vdev state
3531 * in a local variable because although it changes atomically,
3532 * we're asking two separate questions about it.
3534 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
3535 !vd->vdev_cant_write && vdev_is_concrete(vd) &&
3536 vd->vdev_mg->mg_initialized);
3540 vdev_accessible(vdev_t *vd, zio_t *zio)
3542 ASSERT(zio->io_vd == vd);
3544 if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
3547 if (zio->io_type == ZIO_TYPE_READ)
3548 return (!vd->vdev_cant_read);
3550 if (zio->io_type == ZIO_TYPE_WRITE)
3551 return (!vd->vdev_cant_write);
3557 vdev_is_spacemap_addressable(vdev_t *vd)
3560 * Assuming 47 bits of the space map entry dedicated for the entry's
3561 * offset (see description in space_map.h), we calculate the maximum
3562 * address that can be described by a space map entry for the given
3565 uint64_t shift = vd->vdev_ashift + 47;
3567 if (shift >= 63) /* detect potential overflow */
3570 return (vd->vdev_asize < (1ULL << shift));
3574 * Get statistics for the given vdev.
3577 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
3579 spa_t *spa = vd->vdev_spa;
3580 vdev_t *rvd = spa->spa_root_vdev;
3581 vdev_t *tvd = vd->vdev_top;
3583 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
3585 mutex_enter(&vd->vdev_stat_lock);
3586 bcopy(&vd->vdev_stat, vs, sizeof (*vs));
3587 vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
3588 vs->vs_state = vd->vdev_state;
3589 vs->vs_rsize = vdev_get_min_asize(vd);
3590 if (vd->vdev_ops->vdev_op_leaf) {
3591 vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
3593 * Report intializing progress. Since we don't have the
3594 * initializing locks held, this is only an estimate (although a
3595 * fairly accurate one).
3597 vs->vs_initialize_bytes_done = vd->vdev_initialize_bytes_done;
3598 vs->vs_initialize_bytes_est = vd->vdev_initialize_bytes_est;
3599 vs->vs_initialize_state = vd->vdev_initialize_state;
3600 vs->vs_initialize_action_time = vd->vdev_initialize_action_time;
3603 * Report expandable space on top-level, non-auxillary devices only.
3604 * The expandable space is reported in terms of metaslab sized units
3605 * since that determines how much space the pool can expand.
3607 if (vd->vdev_aux == NULL && tvd != NULL && vd->vdev_max_asize != 0) {
3608 vs->vs_esize = P2ALIGN(vd->vdev_max_asize - vd->vdev_asize -
3609 spa->spa_bootsize, 1ULL << tvd->vdev_ms_shift);
3611 vs->vs_configured_ashift = vd->vdev_top != NULL
3612 ? vd->vdev_top->vdev_ashift : vd->vdev_ashift;
3613 vs->vs_logical_ashift = vd->vdev_logical_ashift;
3614 vs->vs_physical_ashift = vd->vdev_physical_ashift;
3615 if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
3616 vdev_is_concrete(vd)) {
3617 vs->vs_fragmentation = vd->vdev_mg->mg_fragmentation;
3621 * If we're getting stats on the root vdev, aggregate the I/O counts
3622 * over all top-level vdevs (i.e. the direct children of the root).
3625 for (int c = 0; c < rvd->vdev_children; c++) {
3626 vdev_t *cvd = rvd->vdev_child[c];
3627 vdev_stat_t *cvs = &cvd->vdev_stat;
3629 for (int t = 0; t < ZIO_TYPES; t++) {
3630 vs->vs_ops[t] += cvs->vs_ops[t];
3631 vs->vs_bytes[t] += cvs->vs_bytes[t];
3633 cvs->vs_scan_removing = cvd->vdev_removing;
3636 mutex_exit(&vd->vdev_stat_lock);
3640 vdev_clear_stats(vdev_t *vd)
3642 mutex_enter(&vd->vdev_stat_lock);
3643 vd->vdev_stat.vs_space = 0;
3644 vd->vdev_stat.vs_dspace = 0;
3645 vd->vdev_stat.vs_alloc = 0;
3646 mutex_exit(&vd->vdev_stat_lock);
3650 vdev_scan_stat_init(vdev_t *vd)
3652 vdev_stat_t *vs = &vd->vdev_stat;
3654 for (int c = 0; c < vd->vdev_children; c++)
3655 vdev_scan_stat_init(vd->vdev_child[c]);
3657 mutex_enter(&vd->vdev_stat_lock);
3658 vs->vs_scan_processed = 0;
3659 mutex_exit(&vd->vdev_stat_lock);
3663 vdev_stat_update(zio_t *zio, uint64_t psize)
3665 spa_t *spa = zio->io_spa;
3666 vdev_t *rvd = spa->spa_root_vdev;
3667 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
3669 uint64_t txg = zio->io_txg;
3670 vdev_stat_t *vs = &vd->vdev_stat;
3671 zio_type_t type = zio->io_type;
3672 int flags = zio->io_flags;
3675 * If this i/o is a gang leader, it didn't do any actual work.
3677 if (zio->io_gang_tree)
3680 if (zio->io_error == 0) {
3682 * If this is a root i/o, don't count it -- we've already
3683 * counted the top-level vdevs, and vdev_get_stats() will
3684 * aggregate them when asked. This reduces contention on
3685 * the root vdev_stat_lock and implicitly handles blocks
3686 * that compress away to holes, for which there is no i/o.
3687 * (Holes never create vdev children, so all the counters
3688 * remain zero, which is what we want.)
3690 * Note: this only applies to successful i/o (io_error == 0)
3691 * because unlike i/o counts, errors are not additive.
3692 * When reading a ditto block, for example, failure of
3693 * one top-level vdev does not imply a root-level error.
3698 ASSERT(vd == zio->io_vd);
3700 if (flags & ZIO_FLAG_IO_BYPASS)
3703 mutex_enter(&vd->vdev_stat_lock);
3705 if (flags & ZIO_FLAG_IO_REPAIR) {
3706 if (flags & ZIO_FLAG_SCAN_THREAD) {
3707 dsl_scan_phys_t *scn_phys =
3708 &spa->spa_dsl_pool->dp_scan->scn_phys;
3709 uint64_t *processed = &scn_phys->scn_processed;
3712 if (vd->vdev_ops->vdev_op_leaf)
3713 atomic_add_64(processed, psize);
3714 vs->vs_scan_processed += psize;
3717 if (flags & ZIO_FLAG_SELF_HEAL)
3718 vs->vs_self_healed += psize;
3722 vs->vs_bytes[type] += psize;
3724 mutex_exit(&vd->vdev_stat_lock);
3728 if (flags & ZIO_FLAG_SPECULATIVE)
3732 * If this is an I/O error that is going to be retried, then ignore the
3733 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as
3734 * hard errors, when in reality they can happen for any number of
3735 * innocuous reasons (bus resets, MPxIO link failure, etc).
3737 if (zio->io_error == EIO &&
3738 !(zio->io_flags & ZIO_FLAG_IO_RETRY))
3742 * Intent logs writes won't propagate their error to the root
3743 * I/O so don't mark these types of failures as pool-level
3746 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
3749 mutex_enter(&vd->vdev_stat_lock);
3750 if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) {
3751 if (zio->io_error == ECKSUM)
3752 vs->vs_checksum_errors++;
3754 vs->vs_read_errors++;
3756 if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd))
3757 vs->vs_write_errors++;
3758 mutex_exit(&vd->vdev_stat_lock);
3760 if (spa->spa_load_state == SPA_LOAD_NONE &&
3761 type == ZIO_TYPE_WRITE && txg != 0 &&
3762 (!(flags & ZIO_FLAG_IO_REPAIR) ||
3763 (flags & ZIO_FLAG_SCAN_THREAD) ||
3764 spa->spa_claiming)) {
3766 * This is either a normal write (not a repair), or it's
3767 * a repair induced by the scrub thread, or it's a repair
3768 * made by zil_claim() during spa_load() in the first txg.
3769 * In the normal case, we commit the DTL change in the same
3770 * txg as the block was born. In the scrub-induced repair
3771 * case, we know that scrubs run in first-pass syncing context,
3772 * so we commit the DTL change in spa_syncing_txg(spa).
3773 * In the zil_claim() case, we commit in spa_first_txg(spa).
3775 * We currently do not make DTL entries for failed spontaneous
3776 * self-healing writes triggered by normal (non-scrubbing)
3777 * reads, because we have no transactional context in which to
3778 * do so -- and it's not clear that it'd be desirable anyway.
3780 if (vd->vdev_ops->vdev_op_leaf) {
3781 uint64_t commit_txg = txg;
3782 if (flags & ZIO_FLAG_SCAN_THREAD) {
3783 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
3784 ASSERT(spa_sync_pass(spa) == 1);
3785 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
3786 commit_txg = spa_syncing_txg(spa);
3787 } else if (spa->spa_claiming) {
3788 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
3789 commit_txg = spa_first_txg(spa);
3791 ASSERT(commit_txg >= spa_syncing_txg(spa));
3792 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
3794 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
3795 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
3796 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
3799 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
3804 * Update the in-core space usage stats for this vdev, its metaslab class,
3805 * and the root vdev.
3808 vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
3809 int64_t space_delta)
3811 int64_t dspace_delta = space_delta;
3812 spa_t *spa = vd->vdev_spa;
3813 vdev_t *rvd = spa->spa_root_vdev;
3814 metaslab_group_t *mg = vd->vdev_mg;
3815 metaslab_class_t *mc = mg ? mg->mg_class : NULL;
3817 ASSERT(vd == vd->vdev_top);
3820 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
3821 * factor. We must calculate this here and not at the root vdev
3822 * because the root vdev's psize-to-asize is simply the max of its
3823 * childrens', thus not accurate enough for us.
3825 ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0);
3826 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
3827 dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) *
3828 vd->vdev_deflate_ratio;
3830 mutex_enter(&vd->vdev_stat_lock);
3831 vd->vdev_stat.vs_alloc += alloc_delta;
3832 vd->vdev_stat.vs_space += space_delta;
3833 vd->vdev_stat.vs_dspace += dspace_delta;
3834 mutex_exit(&vd->vdev_stat_lock);
3836 if (mc == spa_normal_class(spa)) {
3837 mutex_enter(&rvd->vdev_stat_lock);
3838 rvd->vdev_stat.vs_alloc += alloc_delta;
3839 rvd->vdev_stat.vs_space += space_delta;
3840 rvd->vdev_stat.vs_dspace += dspace_delta;
3841 mutex_exit(&rvd->vdev_stat_lock);
3845 ASSERT(rvd == vd->vdev_parent);
3846 ASSERT(vd->vdev_ms_count != 0);
3848 metaslab_class_space_update(mc,
3849 alloc_delta, defer_delta, space_delta, dspace_delta);
3854 * Mark a top-level vdev's config as dirty, placing it on the dirty list
3855 * so that it will be written out next time the vdev configuration is synced.
3856 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
3859 vdev_config_dirty(vdev_t *vd)
3861 spa_t *spa = vd->vdev_spa;
3862 vdev_t *rvd = spa->spa_root_vdev;
3865 ASSERT(spa_writeable(spa));
3868 * If this is an aux vdev (as with l2cache and spare devices), then we
3869 * update the vdev config manually and set the sync flag.
3871 if (vd->vdev_aux != NULL) {
3872 spa_aux_vdev_t *sav = vd->vdev_aux;
3876 for (c = 0; c < sav->sav_count; c++) {
3877 if (sav->sav_vdevs[c] == vd)
3881 if (c == sav->sav_count) {
3883 * We're being removed. There's nothing more to do.
3885 ASSERT(sav->sav_sync == B_TRUE);
3889 sav->sav_sync = B_TRUE;
3891 if (nvlist_lookup_nvlist_array(sav->sav_config,
3892 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
3893 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
3894 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
3900 * Setting the nvlist in the middle if the array is a little
3901 * sketchy, but it will work.
3903 nvlist_free(aux[c]);
3904 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
3910 * The dirty list is protected by the SCL_CONFIG lock. The caller
3911 * must either hold SCL_CONFIG as writer, or must be the sync thread
3912 * (which holds SCL_CONFIG as reader). There's only one sync thread,
3913 * so this is sufficient to ensure mutual exclusion.
3915 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
3916 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
3917 spa_config_held(spa, SCL_CONFIG, RW_READER)));
3920 for (c = 0; c < rvd->vdev_children; c++)
3921 vdev_config_dirty(rvd->vdev_child[c]);
3923 ASSERT(vd == vd->vdev_top);
3925 if (!list_link_active(&vd->vdev_config_dirty_node) &&
3926 vdev_is_concrete(vd)) {
3927 list_insert_head(&spa->spa_config_dirty_list, vd);
3933 vdev_config_clean(vdev_t *vd)
3935 spa_t *spa = vd->vdev_spa;
3937 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
3938 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
3939 spa_config_held(spa, SCL_CONFIG, RW_READER)));
3941 ASSERT(list_link_active(&vd->vdev_config_dirty_node));
3942 list_remove(&spa->spa_config_dirty_list, vd);
3946 * Mark a top-level vdev's state as dirty, so that the next pass of
3947 * spa_sync() can convert this into vdev_config_dirty(). We distinguish
3948 * the state changes from larger config changes because they require
3949 * much less locking, and are often needed for administrative actions.
3952 vdev_state_dirty(vdev_t *vd)
3954 spa_t *spa = vd->vdev_spa;
3956 ASSERT(spa_writeable(spa));
3957 ASSERT(vd == vd->vdev_top);
3960 * The state list is protected by the SCL_STATE lock. The caller
3961 * must either hold SCL_STATE as writer, or must be the sync thread
3962 * (which holds SCL_STATE as reader). There's only one sync thread,
3963 * so this is sufficient to ensure mutual exclusion.
3965 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
3966 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
3967 spa_config_held(spa, SCL_STATE, RW_READER)));
3969 if (!list_link_active(&vd->vdev_state_dirty_node) &&
3970 vdev_is_concrete(vd))
3971 list_insert_head(&spa->spa_state_dirty_list, vd);
3975 vdev_state_clean(vdev_t *vd)
3977 spa_t *spa = vd->vdev_spa;
3979 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
3980 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
3981 spa_config_held(spa, SCL_STATE, RW_READER)));
3983 ASSERT(list_link_active(&vd->vdev_state_dirty_node));
3984 list_remove(&spa->spa_state_dirty_list, vd);
3988 * Propagate vdev state up from children to parent.
3991 vdev_propagate_state(vdev_t *vd)
3993 spa_t *spa = vd->vdev_spa;
3994 vdev_t *rvd = spa->spa_root_vdev;
3995 int degraded = 0, faulted = 0;
3999 if (vd->vdev_children > 0) {
4000 for (int c = 0; c < vd->vdev_children; c++) {
4001 child = vd->vdev_child[c];
4004 * Don't factor holes or indirect vdevs into the
4007 if (!vdev_is_concrete(child))
4010 if (!vdev_readable(child) ||
4011 (!vdev_writeable(child) && spa_writeable(spa))) {
4013 * Root special: if there is a top-level log
4014 * device, treat the root vdev as if it were
4017 if (child->vdev_islog && vd == rvd)
4021 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
4025 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
4029 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
4032 * Root special: if there is a top-level vdev that cannot be
4033 * opened due to corrupted metadata, then propagate the root
4034 * vdev's aux state as 'corrupt' rather than 'insufficient
4037 if (corrupted && vd == rvd &&
4038 rvd->vdev_state == VDEV_STATE_CANT_OPEN)
4039 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
4040 VDEV_AUX_CORRUPT_DATA);
4043 if (vd->vdev_parent)
4044 vdev_propagate_state(vd->vdev_parent);
4048 * Set a vdev's state. If this is during an open, we don't update the parent
4049 * state, because we're in the process of opening children depth-first.
4050 * Otherwise, we propagate the change to the parent.
4052 * If this routine places a device in a faulted state, an appropriate ereport is
4056 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
4058 uint64_t save_state;
4059 spa_t *spa = vd->vdev_spa;
4061 if (state == vd->vdev_state) {
4062 vd->vdev_stat.vs_aux = aux;
4066 save_state = vd->vdev_state;
4068 vd->vdev_state = state;
4069 vd->vdev_stat.vs_aux = aux;
4072 * If we are setting the vdev state to anything but an open state, then
4073 * always close the underlying device unless the device has requested
4074 * a delayed close (i.e. we're about to remove or fault the device).
4075 * Otherwise, we keep accessible but invalid devices open forever.
4076 * We don't call vdev_close() itself, because that implies some extra
4077 * checks (offline, etc) that we don't want here. This is limited to
4078 * leaf devices, because otherwise closing the device will affect other
4081 if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
4082 vd->vdev_ops->vdev_op_leaf)
4083 vd->vdev_ops->vdev_op_close(vd);
4085 if (vd->vdev_removed &&
4086 state == VDEV_STATE_CANT_OPEN &&
4087 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
4089 * If the previous state is set to VDEV_STATE_REMOVED, then this
4090 * device was previously marked removed and someone attempted to
4091 * reopen it. If this failed due to a nonexistent device, then
4092 * keep the device in the REMOVED state. We also let this be if
4093 * it is one of our special test online cases, which is only
4094 * attempting to online the device and shouldn't generate an FMA
4097 vd->vdev_state = VDEV_STATE_REMOVED;
4098 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
4099 } else if (state == VDEV_STATE_REMOVED) {
4100 vd->vdev_removed = B_TRUE;
4101 } else if (state == VDEV_STATE_CANT_OPEN) {
4103 * If we fail to open a vdev during an import or recovery, we
4104 * mark it as "not available", which signifies that it was
4105 * never there to begin with. Failure to open such a device
4106 * is not considered an error.
4108 if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
4109 spa_load_state(spa) == SPA_LOAD_RECOVER) &&
4110 vd->vdev_ops->vdev_op_leaf)
4111 vd->vdev_not_present = 1;
4114 * Post the appropriate ereport. If the 'prevstate' field is
4115 * set to something other than VDEV_STATE_UNKNOWN, it indicates
4116 * that this is part of a vdev_reopen(). In this case, we don't
4117 * want to post the ereport if the device was already in the
4118 * CANT_OPEN state beforehand.
4120 * If the 'checkremove' flag is set, then this is an attempt to
4121 * online the device in response to an insertion event. If we
4122 * hit this case, then we have detected an insertion event for a
4123 * faulted or offline device that wasn't in the removed state.
4124 * In this scenario, we don't post an ereport because we are
4125 * about to replace the device, or attempt an online with
4126 * vdev_forcefault, which will generate the fault for us.
4128 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
4129 !vd->vdev_not_present && !vd->vdev_checkremove &&
4130 vd != spa->spa_root_vdev) {
4134 case VDEV_AUX_OPEN_FAILED:
4135 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
4137 case VDEV_AUX_CORRUPT_DATA:
4138 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
4140 case VDEV_AUX_NO_REPLICAS:
4141 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
4143 case VDEV_AUX_BAD_GUID_SUM:
4144 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
4146 case VDEV_AUX_TOO_SMALL:
4147 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
4149 case VDEV_AUX_BAD_LABEL:
4150 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
4153 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
4156 zfs_ereport_post(class, spa, vd, NULL, save_state, 0);
4159 /* Erase any notion of persistent removed state */
4160 vd->vdev_removed = B_FALSE;
4162 vd->vdev_removed = B_FALSE;
4166 * Notify the fmd of the state change. Be verbose and post
4167 * notifications even for stuff that's not important; the fmd agent can
4168 * sort it out. Don't emit state change events for non-leaf vdevs since
4169 * they can't change state on their own. The FMD can check their state
4170 * if it wants to when it sees that a leaf vdev had a state change.
4172 if (vd->vdev_ops->vdev_op_leaf)
4173 zfs_post_state_change(spa, vd);
4175 if (!isopen && vd->vdev_parent)
4176 vdev_propagate_state(vd->vdev_parent);
4180 vdev_children_are_offline(vdev_t *vd)
4182 ASSERT(!vd->vdev_ops->vdev_op_leaf);
4184 for (uint64_t i = 0; i < vd->vdev_children; i++) {
4185 if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE)
4193 * Check the vdev configuration to ensure that it's capable of supporting
4194 * a root pool. We do not support partial configuration.
4195 * In addition, only a single top-level vdev is allowed.
4197 * FreeBSD does not have above limitations.
4200 vdev_is_bootable(vdev_t *vd)
4203 if (!vd->vdev_ops->vdev_op_leaf) {
4204 char *vdev_type = vd->vdev_ops->vdev_op_type;
4206 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 &&
4207 vd->vdev_children > 1) {
4209 } else if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0 ||
4210 strcmp(vdev_type, VDEV_TYPE_INDIRECT) == 0) {
4215 for (int c = 0; c < vd->vdev_children; c++) {
4216 if (!vdev_is_bootable(vd->vdev_child[c]))
4219 #endif /* illumos */
4224 vdev_is_concrete(vdev_t *vd)
4226 vdev_ops_t *ops = vd->vdev_ops;
4227 if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops ||
4228 ops == &vdev_missing_ops || ops == &vdev_root_ops) {
4236 * Determine if a log device has valid content. If the vdev was
4237 * removed or faulted in the MOS config then we know that
4238 * the content on the log device has already been written to the pool.
4241 vdev_log_state_valid(vdev_t *vd)
4243 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
4247 for (int c = 0; c < vd->vdev_children; c++)
4248 if (vdev_log_state_valid(vd->vdev_child[c]))
4255 * Expand a vdev if possible.
4258 vdev_expand(vdev_t *vd, uint64_t txg)
4260 ASSERT(vd->vdev_top == vd);
4261 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
4263 vdev_set_deflate_ratio(vd);
4265 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
4266 vdev_is_concrete(vd)) {
4267 VERIFY(vdev_metaslab_init(vd, txg) == 0);
4268 vdev_config_dirty(vd);
4276 vdev_split(vdev_t *vd)
4278 vdev_t *cvd, *pvd = vd->vdev_parent;
4280 vdev_remove_child(pvd, vd);
4281 vdev_compact_children(pvd);
4283 cvd = pvd->vdev_child[0];
4284 if (pvd->vdev_children == 1) {
4285 vdev_remove_parent(cvd);
4286 cvd->vdev_splitting = B_TRUE;
4288 vdev_propagate_state(cvd);
4292 vdev_deadman(vdev_t *vd)
4294 for (int c = 0; c < vd->vdev_children; c++) {
4295 vdev_t *cvd = vd->vdev_child[c];
4300 if (vd->vdev_ops->vdev_op_leaf) {
4301 vdev_queue_t *vq = &vd->vdev_queue;
4303 mutex_enter(&vq->vq_lock);
4304 if (avl_numnodes(&vq->vq_active_tree) > 0) {
4305 spa_t *spa = vd->vdev_spa;
4310 * Look at the head of all the pending queues,
4311 * if any I/O has been outstanding for longer than
4312 * the spa_deadman_synctime we panic the system.
4314 fio = avl_first(&vq->vq_active_tree);
4315 delta = gethrtime() - fio->io_timestamp;
4316 if (delta > spa_deadman_synctime(spa)) {
4317 vdev_dbgmsg(vd, "SLOW IO: zio timestamp "
4318 "%lluns, delta %lluns, last io %lluns",
4319 fio->io_timestamp, (u_longlong_t)delta,
4320 vq->vq_io_complete_ts);
4321 fm_panic("I/O to pool '%s' appears to be "
4322 "hung on vdev guid %llu at '%s'.",
4324 (long long unsigned int) vd->vdev_guid,
4328 mutex_exit(&vq->vq_lock);