4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
29 #include <sys/zfs_context.h>
30 #include <sys/fm/fs/zfs.h>
32 #include <sys/spa_impl.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/vdev_impl.h>
36 #include <sys/uberblock_impl.h>
37 #include <sys/metaslab.h>
38 #include <sys/metaslab_impl.h>
39 #include <sys/space_map.h>
42 #include <sys/fs/zfs.h>
44 SYSCTL_DECL(_vfs_zfs);
45 SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV");
48 * Virtual device management.
51 static vdev_ops_t *vdev_ops_table[] = {
67 /* maximum scrub/resilver I/O queue */
68 int zfs_scrub_limit = 70;
71 * Given a vdev type, return the appropriate ops vector.
74 vdev_getops(const char *type)
76 vdev_ops_t *ops, **opspp;
78 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
79 if (strcmp(ops->vdev_op_type, type) == 0)
86 * Default asize function: return the MAX of psize with the asize of
87 * all children. This is what's used by anything other than RAID-Z.
90 vdev_default_asize(vdev_t *vd, uint64_t psize)
92 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
96 for (c = 0; c < vd->vdev_children; c++) {
97 csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
98 asize = MAX(asize, csize);
105 * Get the replaceable or attachable device size.
106 * If the parent is a mirror or raidz, the replaceable size is the minimum
107 * psize of all its children. For the rest, just return our own psize.
118 vdev_get_rsize(vdev_t *vd)
123 pvd = vd->vdev_parent;
126 * If our parent is NULL or the root, just return our own psize.
128 if (pvd == NULL || pvd->vdev_parent == NULL)
129 return (vd->vdev_psize);
133 for (c = 0; c < pvd->vdev_children; c++) {
134 cvd = pvd->vdev_child[c];
135 rsize = MIN(rsize - 1, cvd->vdev_psize - 1) + 1;
142 vdev_lookup_top(spa_t *spa, uint64_t vdev)
144 vdev_t *rvd = spa->spa_root_vdev;
146 if (vdev < rvd->vdev_children)
147 return (rvd->vdev_child[vdev]);
153 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
158 if (vd->vdev_guid == guid)
161 for (c = 0; c < vd->vdev_children; c++)
162 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
170 vdev_add_child(vdev_t *pvd, vdev_t *cvd)
172 size_t oldsize, newsize;
173 uint64_t id = cvd->vdev_id;
176 ASSERT(spa_config_held(cvd->vdev_spa, RW_WRITER));
177 ASSERT(cvd->vdev_parent == NULL);
179 cvd->vdev_parent = pvd;
184 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
186 oldsize = pvd->vdev_children * sizeof (vdev_t *);
187 pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
188 newsize = pvd->vdev_children * sizeof (vdev_t *);
190 newchild = kmem_zalloc(newsize, KM_SLEEP);
191 if (pvd->vdev_child != NULL) {
192 bcopy(pvd->vdev_child, newchild, oldsize);
193 kmem_free(pvd->vdev_child, oldsize);
196 pvd->vdev_child = newchild;
197 pvd->vdev_child[id] = cvd;
199 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
200 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
203 * Walk up all ancestors to update guid sum.
205 for (; pvd != NULL; pvd = pvd->vdev_parent)
206 pvd->vdev_guid_sum += cvd->vdev_guid_sum;
208 if (cvd->vdev_ops->vdev_op_leaf)
209 cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit;
213 vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
216 uint_t id = cvd->vdev_id;
218 ASSERT(cvd->vdev_parent == pvd);
223 ASSERT(id < pvd->vdev_children);
224 ASSERT(pvd->vdev_child[id] == cvd);
226 pvd->vdev_child[id] = NULL;
227 cvd->vdev_parent = NULL;
229 for (c = 0; c < pvd->vdev_children; c++)
230 if (pvd->vdev_child[c])
233 if (c == pvd->vdev_children) {
234 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
235 pvd->vdev_child = NULL;
236 pvd->vdev_children = 0;
240 * Walk up all ancestors to update guid sum.
242 for (; pvd != NULL; pvd = pvd->vdev_parent)
243 pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
245 if (cvd->vdev_ops->vdev_op_leaf)
246 cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit;
250 * Remove any holes in the child array.
253 vdev_compact_children(vdev_t *pvd)
255 vdev_t **newchild, *cvd;
256 int oldc = pvd->vdev_children;
259 ASSERT(spa_config_held(pvd->vdev_spa, RW_WRITER));
261 for (c = newc = 0; c < oldc; c++)
262 if (pvd->vdev_child[c])
265 newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP);
267 for (c = newc = 0; c < oldc; c++) {
268 if ((cvd = pvd->vdev_child[c]) != NULL) {
269 newchild[newc] = cvd;
270 cvd->vdev_id = newc++;
274 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
275 pvd->vdev_child = newchild;
276 pvd->vdev_children = newc;
280 * Allocate and minimally initialize a vdev_t.
283 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
287 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
289 if (spa->spa_root_vdev == NULL) {
290 ASSERT(ops == &vdev_root_ops);
291 spa->spa_root_vdev = vd;
295 if (spa->spa_root_vdev == vd) {
297 * The root vdev's guid will also be the pool guid,
298 * which must be unique among all pools.
300 while (guid == 0 || spa_guid_exists(guid, 0))
301 guid = spa_get_random(-1ULL);
304 * Any other vdev's guid must be unique within the pool.
307 spa_guid_exists(spa_guid(spa), guid))
308 guid = spa_get_random(-1ULL);
310 ASSERT(!spa_guid_exists(spa_guid(spa), guid));
315 vd->vdev_guid = guid;
316 vd->vdev_guid_sum = guid;
318 vd->vdev_state = VDEV_STATE_CLOSED;
320 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL);
321 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
322 space_map_create(&vd->vdev_dtl_map, 0, -1ULL, 0, &vd->vdev_dtl_lock);
323 space_map_create(&vd->vdev_dtl_scrub, 0, -1ULL, 0, &vd->vdev_dtl_lock);
324 txg_list_create(&vd->vdev_ms_list,
325 offsetof(struct metaslab, ms_txg_node));
326 txg_list_create(&vd->vdev_dtl_list,
327 offsetof(struct vdev, vdev_dtl_node));
328 vd->vdev_stat.vs_timestamp = gethrtime();
334 * Free a vdev_t that has been removed from service.
337 vdev_free_common(vdev_t *vd)
339 spa_t *spa = vd->vdev_spa;
342 spa_strfree(vd->vdev_path);
344 spa_strfree(vd->vdev_devid);
346 if (vd->vdev_isspare)
347 spa_spare_remove(vd);
349 txg_list_destroy(&vd->vdev_ms_list);
350 txg_list_destroy(&vd->vdev_dtl_list);
351 mutex_enter(&vd->vdev_dtl_lock);
352 space_map_unload(&vd->vdev_dtl_map);
353 space_map_destroy(&vd->vdev_dtl_map);
354 space_map_vacate(&vd->vdev_dtl_scrub, NULL, NULL);
355 space_map_destroy(&vd->vdev_dtl_scrub);
356 mutex_exit(&vd->vdev_dtl_lock);
357 mutex_destroy(&vd->vdev_dtl_lock);
358 mutex_destroy(&vd->vdev_stat_lock);
360 if (vd == spa->spa_root_vdev)
361 spa->spa_root_vdev = NULL;
363 kmem_free(vd, sizeof (vdev_t));
367 * Allocate a new vdev. The 'alloctype' is used to control whether we are
368 * creating a new vdev or loading an existing one - the behavior is slightly
369 * different for each case.
372 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
380 ASSERT(spa_config_held(spa, RW_WRITER));
382 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
385 if ((ops = vdev_getops(type)) == NULL)
389 * If this is a load, get the vdev guid from the nvlist.
390 * Otherwise, vdev_alloc_common() will generate one for us.
392 if (alloctype == VDEV_ALLOC_LOAD) {
395 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
399 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
401 } else if (alloctype == VDEV_ALLOC_SPARE) {
402 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
407 * The first allocated vdev must be of type 'root'.
409 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
412 vd = vdev_alloc_common(spa, id, guid, ops);
414 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
415 vd->vdev_path = spa_strdup(vd->vdev_path);
416 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
417 vd->vdev_devid = spa_strdup(vd->vdev_devid);
420 * Set the nparity propery for RAID-Z vdevs.
422 if (ops == &vdev_raidz_ops) {
423 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
424 &vd->vdev_nparity) == 0) {
426 * Currently, we can only support 2 parity devices.
428 if (vd->vdev_nparity > 2)
431 * Older versions can only support 1 parity device.
433 if (vd->vdev_nparity == 2 &&
434 spa_version(spa) < ZFS_VERSION_RAID6)
439 * We require the parity to be specified for SPAs that
440 * support multiple parity levels.
442 if (spa_version(spa) >= ZFS_VERSION_RAID6)
446 * Otherwise, we default to 1 parity device for RAID-Z.
448 vd->vdev_nparity = 1;
451 vd->vdev_nparity = 0;
455 * Set the whole_disk property. If it's not specified, leave the value
458 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
459 &vd->vdev_wholedisk) != 0)
460 vd->vdev_wholedisk = -1ULL;
463 * Look for the 'not present' flag. This will only be set if the device
464 * was not present at the time of import.
466 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
467 &vd->vdev_not_present);
470 * Get the alignment requirement.
472 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
475 * If we're a top-level vdev, try to load the allocation parameters.
477 if (parent && !parent->vdev_parent && alloctype == VDEV_ALLOC_LOAD) {
478 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
480 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
482 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
487 * If we're a leaf vdev, try to load the DTL object and offline state.
489 if (vd->vdev_ops->vdev_op_leaf && alloctype == VDEV_ALLOC_LOAD) {
490 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
491 &vd->vdev_dtl.smo_object);
492 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
497 * Add ourselves to the parent's list of children.
499 vdev_add_child(parent, vd);
507 vdev_free(vdev_t *vd)
512 * vdev_free() implies closing the vdev first. This is simpler than
513 * trying to ensure complicated semantics for all callers.
517 ASSERT(!list_link_active(&vd->vdev_dirty_node));
522 for (c = 0; c < vd->vdev_children; c++)
523 vdev_free(vd->vdev_child[c]);
525 ASSERT(vd->vdev_child == NULL);
526 ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
529 * Discard allocation state.
531 if (vd == vd->vdev_top)
532 vdev_metaslab_fini(vd);
534 ASSERT3U(vd->vdev_stat.vs_space, ==, 0);
535 ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0);
536 ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0);
539 * Remove this vdev from its parent's child list.
541 vdev_remove_child(vd->vdev_parent, vd);
543 ASSERT(vd->vdev_parent == NULL);
545 vdev_free_common(vd);
549 * Transfer top-level vdev state from svd to tvd.
552 vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
554 spa_t *spa = svd->vdev_spa;
559 ASSERT(tvd == tvd->vdev_top);
561 tvd->vdev_ms_array = svd->vdev_ms_array;
562 tvd->vdev_ms_shift = svd->vdev_ms_shift;
563 tvd->vdev_ms_count = svd->vdev_ms_count;
565 svd->vdev_ms_array = 0;
566 svd->vdev_ms_shift = 0;
567 svd->vdev_ms_count = 0;
569 tvd->vdev_mg = svd->vdev_mg;
570 tvd->vdev_ms = svd->vdev_ms;
575 if (tvd->vdev_mg != NULL)
576 tvd->vdev_mg->mg_vd = tvd;
578 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
579 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
580 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
582 svd->vdev_stat.vs_alloc = 0;
583 svd->vdev_stat.vs_space = 0;
584 svd->vdev_stat.vs_dspace = 0;
586 for (t = 0; t < TXG_SIZE; t++) {
587 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
588 (void) txg_list_add(&tvd->vdev_ms_list, msp, t);
589 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
590 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
591 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
592 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
595 if (list_link_active(&svd->vdev_dirty_node)) {
596 vdev_config_clean(svd);
597 vdev_config_dirty(tvd);
600 tvd->vdev_reopen_wanted = svd->vdev_reopen_wanted;
601 svd->vdev_reopen_wanted = 0;
603 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
604 svd->vdev_deflate_ratio = 0;
608 vdev_top_update(vdev_t *tvd, vdev_t *vd)
617 for (c = 0; c < vd->vdev_children; c++)
618 vdev_top_update(tvd, vd->vdev_child[c]);
622 * Add a mirror/replacing vdev above an existing vdev.
625 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
627 spa_t *spa = cvd->vdev_spa;
628 vdev_t *pvd = cvd->vdev_parent;
631 ASSERT(spa_config_held(spa, RW_WRITER));
633 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
635 mvd->vdev_asize = cvd->vdev_asize;
636 mvd->vdev_ashift = cvd->vdev_ashift;
637 mvd->vdev_state = cvd->vdev_state;
639 vdev_remove_child(pvd, cvd);
640 vdev_add_child(pvd, mvd);
641 cvd->vdev_id = mvd->vdev_children;
642 vdev_add_child(mvd, cvd);
643 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
645 if (mvd == mvd->vdev_top)
646 vdev_top_transfer(cvd, mvd);
652 * Remove a 1-way mirror/replacing vdev from the tree.
655 vdev_remove_parent(vdev_t *cvd)
657 vdev_t *mvd = cvd->vdev_parent;
658 vdev_t *pvd = mvd->vdev_parent;
660 ASSERT(spa_config_held(cvd->vdev_spa, RW_WRITER));
662 ASSERT(mvd->vdev_children == 1);
663 ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
664 mvd->vdev_ops == &vdev_replacing_ops ||
665 mvd->vdev_ops == &vdev_spare_ops);
666 cvd->vdev_ashift = mvd->vdev_ashift;
668 vdev_remove_child(mvd, cvd);
669 vdev_remove_child(pvd, mvd);
670 cvd->vdev_id = mvd->vdev_id;
671 vdev_add_child(pvd, cvd);
673 * If we created a new toplevel vdev, then we need to change the child's
674 * vdev GUID to match the old toplevel vdev. Otherwise, we could have
675 * detached an offline device, and when we go to import the pool we'll
676 * think we have two toplevel vdevs, instead of a different version of
677 * the same toplevel vdev.
679 if (cvd->vdev_top == cvd) {
680 pvd->vdev_guid_sum -= cvd->vdev_guid;
681 cvd->vdev_guid_sum -= cvd->vdev_guid;
682 cvd->vdev_guid = mvd->vdev_guid;
683 cvd->vdev_guid_sum += mvd->vdev_guid;
684 pvd->vdev_guid_sum += cvd->vdev_guid;
686 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
688 if (cvd == cvd->vdev_top)
689 vdev_top_transfer(mvd, cvd);
691 ASSERT(mvd->vdev_children == 0);
696 vdev_metaslab_init(vdev_t *vd, uint64_t txg)
698 spa_t *spa = vd->vdev_spa;
699 objset_t *mos = spa->spa_meta_objset;
700 metaslab_class_t *mc = spa_metaslab_class_select(spa);
702 uint64_t oldc = vd->vdev_ms_count;
703 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
707 if (vd->vdev_ms_shift == 0) /* not being allocated from yet */
710 dprintf("%s oldc %llu newc %llu\n", vdev_description(vd), oldc, newc);
712 ASSERT(oldc <= newc);
714 if (vd->vdev_mg == NULL)
715 vd->vdev_mg = metaslab_group_create(mc, vd);
717 mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
720 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
721 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
725 vd->vdev_ms_count = newc;
727 for (m = oldc; m < newc; m++) {
728 space_map_obj_t smo = { 0, 0, 0 };
731 error = dmu_read(mos, vd->vdev_ms_array,
732 m * sizeof (uint64_t), sizeof (uint64_t), &object);
737 error = dmu_bonus_hold(mos, object, FTAG, &db);
740 ASSERT3U(db->db_size, ==, sizeof (smo));
741 bcopy(db->db_data, &smo, db->db_size);
742 ASSERT3U(smo.smo_object, ==, object);
743 dmu_buf_rele(db, FTAG);
746 vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo,
747 m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg);
754 vdev_metaslab_fini(vdev_t *vd)
757 uint64_t count = vd->vdev_ms_count;
759 if (vd->vdev_ms != NULL) {
760 for (m = 0; m < count; m++)
761 if (vd->vdev_ms[m] != NULL)
762 metaslab_fini(vd->vdev_ms[m]);
763 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
769 * Prepare a virtual device for access.
772 vdev_open(vdev_t *vd)
777 uint64_t asize, psize;
780 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
781 vd->vdev_state == VDEV_STATE_CANT_OPEN ||
782 vd->vdev_state == VDEV_STATE_OFFLINE);
784 if (vd->vdev_fault_mode == VDEV_FAULT_COUNT)
785 vd->vdev_fault_arg >>= 1;
787 vd->vdev_fault_mode = VDEV_FAULT_NONE;
789 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
791 if (vd->vdev_ops->vdev_op_leaf) {
794 vd->vdev_cache_active = B_TRUE;
797 if (vd->vdev_offline) {
798 ASSERT(vd->vdev_children == 0);
799 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
803 error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift);
805 if (zio_injection_enabled && error == 0)
806 error = zio_handle_device_injection(vd, ENXIO);
808 dprintf("%s = %d, osize %llu, state = %d\n",
809 vdev_description(vd), error, osize, vd->vdev_state);
812 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
813 vd->vdev_stat.vs_aux);
817 vd->vdev_state = VDEV_STATE_HEALTHY;
819 for (c = 0; c < vd->vdev_children; c++)
820 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
821 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
826 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
828 if (vd->vdev_children == 0) {
829 if (osize < SPA_MINDEVSIZE) {
830 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
835 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
837 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
838 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
839 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
847 vd->vdev_psize = psize;
849 if (vd->vdev_asize == 0) {
851 * This is the first-ever open, so use the computed values.
852 * For testing purposes, a higher ashift can be requested.
854 vd->vdev_asize = asize;
855 vd->vdev_ashift = MAX(ashift, vd->vdev_ashift);
858 * Make sure the alignment requirement hasn't increased.
860 if (ashift > vd->vdev_top->vdev_ashift) {
861 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
867 * Make sure the device hasn't shrunk.
869 if (asize < vd->vdev_asize) {
870 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
876 * If all children are healthy and the asize has increased,
877 * then we've experienced dynamic LUN growth.
879 if (vd->vdev_state == VDEV_STATE_HEALTHY &&
880 asize > vd->vdev_asize) {
881 vd->vdev_asize = asize;
886 * If this is a top-level vdev, compute the raidz-deflation
887 * ratio. Note, we hard-code in 128k (1<<17) because it is the
888 * current "typical" blocksize. Even if SPA_MAXBLOCKSIZE
889 * changes, this algorithm must never change, or we will
890 * inconsistently account for existing bp's.
892 if (vd->vdev_top == vd) {
893 vd->vdev_deflate_ratio = (1<<17) /
894 (vdev_psize_to_asize(vd, 1<<17) >> SPA_MINBLOCKSHIFT);
898 * This allows the ZFS DE to close cases appropriately. If a device
899 * goes away and later returns, we want to close the associated case.
900 * But it's not enough to simply post this only when a device goes from
901 * CANT_OPEN -> HEALTHY. If we reboot the system and the device is
902 * back, we also need to close the case (otherwise we will try to replay
903 * it). So we have to post this notifier every time. Since this only
904 * occurs during pool open or error recovery, this should not be an
907 zfs_post_ok(vd->vdev_spa, vd);
913 * Called once the vdevs are all opened, this routine validates the label
914 * contents. This needs to be done before vdev_load() so that we don't
915 * inadvertently do repair I/Os to the wrong device, and so that vdev_reopen()
916 * won't succeed if the device has been changed underneath.
918 * This function will only return failure if one of the vdevs indicates that it
919 * has since been destroyed or exported. This is only possible if
920 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
921 * will be updated but the function will return 0.
924 vdev_validate(vdev_t *vd)
926 spa_t *spa = vd->vdev_spa;
932 for (c = 0; c < vd->vdev_children; c++)
933 if (vdev_validate(vd->vdev_child[c]) != 0)
937 * If the device has already failed, or was marked offline, don't do
938 * any further validation. Otherwise, label I/O will fail and we will
939 * overwrite the previous state.
941 if (vd->vdev_ops->vdev_op_leaf && !vdev_is_dead(vd)) {
943 if ((label = vdev_label_read_config(vd)) == NULL) {
944 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
949 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID,
950 &guid) != 0 || guid != spa_guid(spa)) {
951 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
952 VDEV_AUX_CORRUPT_DATA);
957 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
958 &guid) != 0 || guid != vd->vdev_guid) {
959 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
960 VDEV_AUX_CORRUPT_DATA);
965 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
967 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
968 VDEV_AUX_CORRUPT_DATA);
975 if (spa->spa_load_state == SPA_LOAD_OPEN &&
976 state != POOL_STATE_ACTIVE)
981 * If we were able to open and validate a vdev that was previously
982 * marked permanently unavailable, clear that state now.
984 if (vd->vdev_not_present)
985 vd->vdev_not_present = 0;
991 * Close a virtual device.
994 vdev_close(vdev_t *vd)
996 vd->vdev_ops->vdev_op_close(vd);
998 if (vd->vdev_cache_active) {
1000 vdev_queue_fini(vd);
1001 vd->vdev_cache_active = B_FALSE;
1005 * We record the previous state before we close it, so that if we are
1006 * doing a reopen(), we don't generate FMA ereports if we notice that
1007 * it's still faulted.
1009 vd->vdev_prevstate = vd->vdev_state;
1011 if (vd->vdev_offline)
1012 vd->vdev_state = VDEV_STATE_OFFLINE;
1014 vd->vdev_state = VDEV_STATE_CLOSED;
1015 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1019 vdev_reopen(vdev_t *vd)
1021 spa_t *spa = vd->vdev_spa;
1023 ASSERT(spa_config_held(spa, RW_WRITER));
1026 (void) vdev_open(vd);
1029 * Call vdev_validate() here to make sure we have the same device.
1030 * Otherwise, a device with an invalid label could be successfully
1031 * opened in response to vdev_reopen().
1033 * The downside to this is that if the user is simply experimenting by
1034 * overwriting an entire disk, we'll fault the device rather than
1035 * demonstrate self-healing capabilities. On the other hand, with
1036 * proper FMA integration, the series of errors we'd see from the device
1037 * would result in a faulted device anyway. Given that this doesn't
1038 * model any real-world corruption, it's better to catch this here and
1039 * correctly identify that the device has either changed beneath us, or
1040 * is corrupted beyond recognition.
1042 (void) vdev_validate(vd);
1045 * Reassess root vdev's health.
1047 vdev_propagate_state(spa->spa_root_vdev);
1051 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
1056 * Normally, partial opens (e.g. of a mirror) are allowed.
1057 * For a create, however, we want to fail the request if
1058 * there are any components we can't open.
1060 error = vdev_open(vd);
1062 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
1064 return (error ? error : ENXIO);
1068 * Recursively initialize all labels.
1070 if ((error = vdev_label_init(vd, txg, isreplacing ?
1071 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
1080 * The is the latter half of vdev_create(). It is distinct because it
1081 * involves initiating transactions in order to do metaslab creation.
1082 * For creation, we want to try to create all vdevs at once and then undo it
1083 * if anything fails; this is much harder if we have pending transactions.
1086 vdev_init(vdev_t *vd, uint64_t txg)
1089 * Aim for roughly 200 metaslabs per vdev.
1091 vd->vdev_ms_shift = highbit(vd->vdev_asize / 200);
1092 vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT);
1095 * Initialize the vdev's metaslabs. This can't fail because
1096 * there's nothing to read when creating all new metaslabs.
1098 VERIFY(vdev_metaslab_init(vd, txg) == 0);
1102 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
1104 ASSERT(vd == vd->vdev_top);
1105 ASSERT(ISP2(flags));
1107 if (flags & VDD_METASLAB)
1108 (void) txg_list_add(&vd->vdev_ms_list, arg, txg);
1110 if (flags & VDD_DTL)
1111 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
1113 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
1117 vdev_dtl_dirty(space_map_t *sm, uint64_t txg, uint64_t size)
1119 mutex_enter(sm->sm_lock);
1120 if (!space_map_contains(sm, txg, size))
1121 space_map_add(sm, txg, size);
1122 mutex_exit(sm->sm_lock);
1126 vdev_dtl_contains(space_map_t *sm, uint64_t txg, uint64_t size)
1131 * Quick test without the lock -- covers the common case that
1132 * there are no dirty time segments.
1134 if (sm->sm_space == 0)
1137 mutex_enter(sm->sm_lock);
1138 dirty = space_map_contains(sm, txg, size);
1139 mutex_exit(sm->sm_lock);
1145 * Reassess DTLs after a config change or scrub completion.
1148 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
1150 spa_t *spa = vd->vdev_spa;
1153 ASSERT(spa_config_held(spa, RW_WRITER));
1155 if (vd->vdev_children == 0) {
1156 mutex_enter(&vd->vdev_dtl_lock);
1158 * We're successfully scrubbed everything up to scrub_txg.
1159 * Therefore, excise all old DTLs up to that point, then
1160 * fold in the DTLs for everything we couldn't scrub.
1162 if (scrub_txg != 0) {
1163 space_map_excise(&vd->vdev_dtl_map, 0, scrub_txg);
1164 space_map_union(&vd->vdev_dtl_map, &vd->vdev_dtl_scrub);
1167 space_map_vacate(&vd->vdev_dtl_scrub, NULL, NULL);
1168 mutex_exit(&vd->vdev_dtl_lock);
1170 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
1175 * Make sure the DTLs are always correct under the scrub lock.
1177 if (vd == spa->spa_root_vdev)
1178 mutex_enter(&spa->spa_scrub_lock);
1180 mutex_enter(&vd->vdev_dtl_lock);
1181 space_map_vacate(&vd->vdev_dtl_map, NULL, NULL);
1182 space_map_vacate(&vd->vdev_dtl_scrub, NULL, NULL);
1183 mutex_exit(&vd->vdev_dtl_lock);
1185 for (c = 0; c < vd->vdev_children; c++) {
1186 vdev_t *cvd = vd->vdev_child[c];
1187 vdev_dtl_reassess(cvd, txg, scrub_txg, scrub_done);
1188 mutex_enter(&vd->vdev_dtl_lock);
1189 space_map_union(&vd->vdev_dtl_map, &cvd->vdev_dtl_map);
1190 space_map_union(&vd->vdev_dtl_scrub, &cvd->vdev_dtl_scrub);
1191 mutex_exit(&vd->vdev_dtl_lock);
1194 if (vd == spa->spa_root_vdev)
1195 mutex_exit(&spa->spa_scrub_lock);
1199 vdev_dtl_load(vdev_t *vd)
1201 spa_t *spa = vd->vdev_spa;
1202 space_map_obj_t *smo = &vd->vdev_dtl;
1203 objset_t *mos = spa->spa_meta_objset;
1207 ASSERT(vd->vdev_children == 0);
1209 if (smo->smo_object == 0)
1212 if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0)
1215 ASSERT3U(db->db_size, ==, sizeof (*smo));
1216 bcopy(db->db_data, smo, db->db_size);
1217 dmu_buf_rele(db, FTAG);
1219 mutex_enter(&vd->vdev_dtl_lock);
1220 error = space_map_load(&vd->vdev_dtl_map, NULL, SM_ALLOC, smo, mos);
1221 mutex_exit(&vd->vdev_dtl_lock);
1227 vdev_dtl_sync(vdev_t *vd, uint64_t txg)
1229 spa_t *spa = vd->vdev_spa;
1230 space_map_obj_t *smo = &vd->vdev_dtl;
1231 space_map_t *sm = &vd->vdev_dtl_map;
1232 objset_t *mos = spa->spa_meta_objset;
1238 dprintf("%s in txg %llu pass %d\n",
1239 vdev_description(vd), (u_longlong_t)txg, spa_sync_pass(spa));
1241 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1243 if (vd->vdev_detached) {
1244 if (smo->smo_object != 0) {
1245 int err = dmu_object_free(mos, smo->smo_object, tx);
1246 ASSERT3U(err, ==, 0);
1247 smo->smo_object = 0;
1250 dprintf("detach %s committed in txg %llu\n",
1251 vdev_description(vd), txg);
1255 if (smo->smo_object == 0) {
1256 ASSERT(smo->smo_objsize == 0);
1257 ASSERT(smo->smo_alloc == 0);
1258 smo->smo_object = dmu_object_alloc(mos,
1259 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
1260 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
1261 ASSERT(smo->smo_object != 0);
1262 vdev_config_dirty(vd->vdev_top);
1265 mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL);
1267 space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift,
1270 mutex_enter(&smlock);
1272 mutex_enter(&vd->vdev_dtl_lock);
1273 space_map_walk(sm, space_map_add, &smsync);
1274 mutex_exit(&vd->vdev_dtl_lock);
1276 space_map_truncate(smo, mos, tx);
1277 space_map_sync(&smsync, SM_ALLOC, smo, mos, tx);
1279 space_map_destroy(&smsync);
1281 mutex_exit(&smlock);
1282 mutex_destroy(&smlock);
1284 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
1285 dmu_buf_will_dirty(db, tx);
1286 ASSERT3U(db->db_size, ==, sizeof (*smo));
1287 bcopy(smo, db->db_data, db->db_size);
1288 dmu_buf_rele(db, FTAG);
1294 vdev_load(vdev_t *vd)
1299 * Recursively load all children.
1301 for (c = 0; c < vd->vdev_children; c++)
1302 vdev_load(vd->vdev_child[c]);
1305 * If this is a top-level vdev, initialize its metaslabs.
1307 if (vd == vd->vdev_top &&
1308 (vd->vdev_ashift == 0 || vd->vdev_asize == 0 ||
1309 vdev_metaslab_init(vd, 0) != 0))
1310 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1311 VDEV_AUX_CORRUPT_DATA);
1314 * If this is a leaf vdev, load its DTL.
1316 if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0)
1317 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1318 VDEV_AUX_CORRUPT_DATA);
1322 * This special case of vdev_spare() is used for hot spares. It's sole purpose
1323 * it to set the vdev state for the associated vdev. To do this, we make sure
1324 * that we can open the underlying device, then try to read the label, and make
1325 * sure that the label is sane and that it hasn't been repurposed to another
1329 vdev_validate_spare(vdev_t *vd)
1332 uint64_t guid, version;
1335 if ((label = vdev_label_read_config(vd)) == NULL) {
1336 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1337 VDEV_AUX_CORRUPT_DATA);
1341 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
1342 version > ZFS_VERSION ||
1343 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
1344 guid != vd->vdev_guid ||
1345 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
1346 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1347 VDEV_AUX_CORRUPT_DATA);
1355 * We don't actually check the pool state here. If it's in fact in
1356 * use by another pool, we update this fact on the fly when requested.
1363 vdev_sync_done(vdev_t *vd, uint64_t txg)
1367 dprintf("%s txg %llu\n", vdev_description(vd), txg);
1369 while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
1370 metaslab_sync_done(msp, txg);
1374 vdev_sync(vdev_t *vd, uint64_t txg)
1376 spa_t *spa = vd->vdev_spa;
1381 dprintf("%s txg %llu pass %d\n",
1382 vdev_description(vd), (u_longlong_t)txg, spa_sync_pass(spa));
1384 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) {
1385 ASSERT(vd == vd->vdev_top);
1386 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1387 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
1388 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
1389 ASSERT(vd->vdev_ms_array != 0);
1390 vdev_config_dirty(vd);
1394 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
1395 metaslab_sync(msp, txg);
1396 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
1399 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
1400 vdev_dtl_sync(lvd, txg);
1402 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
1406 vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
1408 return (vd->vdev_ops->vdev_op_asize(vd, psize));
1412 vdev_io_start(zio_t *zio)
1414 zio->io_vd->vdev_ops->vdev_op_io_start(zio);
1418 vdev_io_done(zio_t *zio)
1420 zio->io_vd->vdev_ops->vdev_op_io_done(zio);
1424 vdev_description(vdev_t *vd)
1426 if (vd == NULL || vd->vdev_ops == NULL)
1427 return ("<unknown>");
1429 if (vd->vdev_path != NULL)
1430 return (vd->vdev_path);
1432 if (vd->vdev_parent == NULL)
1433 return (spa_name(vd->vdev_spa));
1435 return (vd->vdev_ops->vdev_op_type);
1439 vdev_online(spa_t *spa, uint64_t guid)
1444 txg = spa_vdev_enter(spa);
1446 rvd = spa->spa_root_vdev;
1448 if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL)
1449 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
1451 if (!vd->vdev_ops->vdev_op_leaf)
1452 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
1454 dprintf("ONLINE: %s\n", vdev_description(vd));
1456 vd->vdev_offline = B_FALSE;
1457 vd->vdev_tmpoffline = B_FALSE;
1458 vdev_reopen(vd->vdev_top);
1460 vdev_config_dirty(vd->vdev_top);
1462 (void) spa_vdev_exit(spa, NULL, txg, 0);
1464 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
1470 vdev_offline(spa_t *spa, uint64_t guid, int istmp)
1475 txg = spa_vdev_enter(spa);
1477 rvd = spa->spa_root_vdev;
1479 if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL)
1480 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
1482 if (!vd->vdev_ops->vdev_op_leaf)
1483 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
1485 dprintf("OFFLINE: %s\n", vdev_description(vd));
1488 * If the device isn't already offline, try to offline it.
1490 if (!vd->vdev_offline) {
1492 * If this device's top-level vdev has a non-empty DTL,
1493 * don't allow the device to be offlined.
1495 * XXX -- make this more precise by allowing the offline
1496 * as long as the remaining devices don't have any DTL holes.
1498 if (vd->vdev_top->vdev_dtl_map.sm_space != 0)
1499 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
1502 * Offline this device and reopen its top-level vdev.
1503 * If this action results in the top-level vdev becoming
1504 * unusable, undo it and fail the request.
1506 vd->vdev_offline = B_TRUE;
1507 vdev_reopen(vd->vdev_top);
1508 if (vdev_is_dead(vd->vdev_top)) {
1509 vd->vdev_offline = B_FALSE;
1510 vdev_reopen(vd->vdev_top);
1511 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
1515 vd->vdev_tmpoffline = istmp;
1517 vdev_config_dirty(vd->vdev_top);
1519 return (spa_vdev_exit(spa, NULL, txg, 0));
1523 * Clear the error counts associated with this vdev. Unlike vdev_online() and
1524 * vdev_offline(), we assume the spa config is locked. We also clear all
1525 * children. If 'vd' is NULL, then the user wants to clear all vdevs.
1528 vdev_clear(spa_t *spa, vdev_t *vd)
1533 vd = spa->spa_root_vdev;
1535 vd->vdev_stat.vs_read_errors = 0;
1536 vd->vdev_stat.vs_write_errors = 0;
1537 vd->vdev_stat.vs_checksum_errors = 0;
1539 for (c = 0; c < vd->vdev_children; c++)
1540 vdev_clear(spa, vd->vdev_child[c]);
1544 vdev_is_dead(vdev_t *vd)
1546 return (vd->vdev_state <= VDEV_STATE_CANT_OPEN);
1550 vdev_error_inject(vdev_t *vd, zio_t *zio)
1554 if (vd->vdev_fault_mode == VDEV_FAULT_NONE)
1557 if (((1ULL << zio->io_type) & vd->vdev_fault_mask) == 0)
1560 switch (vd->vdev_fault_mode) {
1561 case VDEV_FAULT_RANDOM:
1562 if (spa_get_random(vd->vdev_fault_arg) == 0)
1566 case VDEV_FAULT_COUNT:
1567 if ((int64_t)--vd->vdev_fault_arg <= 0)
1568 vd->vdev_fault_mode = VDEV_FAULT_NONE;
1574 dprintf("returning %d for type %d on %s state %d offset %llx\n",
1575 error, zio->io_type, vdev_description(vd),
1576 vd->vdev_state, zio->io_offset);
1583 * Get statistics for the given vdev.
1586 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
1588 vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
1591 mutex_enter(&vd->vdev_stat_lock);
1592 bcopy(&vd->vdev_stat, vs, sizeof (*vs));
1593 vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
1594 vs->vs_state = vd->vdev_state;
1595 vs->vs_rsize = vdev_get_rsize(vd);
1596 mutex_exit(&vd->vdev_stat_lock);
1599 * If we're getting stats on the root vdev, aggregate the I/O counts
1600 * over all top-level vdevs (i.e. the direct children of the root).
1603 for (c = 0; c < rvd->vdev_children; c++) {
1604 vdev_t *cvd = rvd->vdev_child[c];
1605 vdev_stat_t *cvs = &cvd->vdev_stat;
1607 mutex_enter(&vd->vdev_stat_lock);
1608 for (t = 0; t < ZIO_TYPES; t++) {
1609 vs->vs_ops[t] += cvs->vs_ops[t];
1610 vs->vs_bytes[t] += cvs->vs_bytes[t];
1612 vs->vs_read_errors += cvs->vs_read_errors;
1613 vs->vs_write_errors += cvs->vs_write_errors;
1614 vs->vs_checksum_errors += cvs->vs_checksum_errors;
1615 vs->vs_scrub_examined += cvs->vs_scrub_examined;
1616 vs->vs_scrub_errors += cvs->vs_scrub_errors;
1617 mutex_exit(&vd->vdev_stat_lock);
1623 vdev_stat_update(zio_t *zio)
1625 vdev_t *vd = zio->io_vd;
1627 uint64_t txg = zio->io_txg;
1628 vdev_stat_t *vs = &vd->vdev_stat;
1629 zio_type_t type = zio->io_type;
1630 int flags = zio->io_flags;
1632 if (zio->io_error == 0) {
1633 if (!(flags & ZIO_FLAG_IO_BYPASS)) {
1634 mutex_enter(&vd->vdev_stat_lock);
1636 vs->vs_bytes[type] += zio->io_size;
1637 mutex_exit(&vd->vdev_stat_lock);
1639 if ((flags & ZIO_FLAG_IO_REPAIR) &&
1640 zio->io_delegate_list == NULL) {
1641 mutex_enter(&vd->vdev_stat_lock);
1642 if (flags & ZIO_FLAG_SCRUB_THREAD)
1643 vs->vs_scrub_repaired += zio->io_size;
1645 vs->vs_self_healed += zio->io_size;
1646 mutex_exit(&vd->vdev_stat_lock);
1651 if (flags & ZIO_FLAG_SPECULATIVE)
1654 if (!vdev_is_dead(vd)) {
1655 mutex_enter(&vd->vdev_stat_lock);
1656 if (type == ZIO_TYPE_READ) {
1657 if (zio->io_error == ECKSUM)
1658 vs->vs_checksum_errors++;
1660 vs->vs_read_errors++;
1662 if (type == ZIO_TYPE_WRITE)
1663 vs->vs_write_errors++;
1664 mutex_exit(&vd->vdev_stat_lock);
1667 if (type == ZIO_TYPE_WRITE) {
1668 if (txg == 0 || vd->vdev_children != 0)
1670 if (flags & ZIO_FLAG_SCRUB_THREAD) {
1671 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
1672 for (pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
1673 vdev_dtl_dirty(&pvd->vdev_dtl_scrub, txg, 1);
1675 if (!(flags & ZIO_FLAG_IO_REPAIR)) {
1676 if (vdev_dtl_contains(&vd->vdev_dtl_map, txg, 1))
1678 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
1679 for (pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
1680 vdev_dtl_dirty(&pvd->vdev_dtl_map, txg, 1);
1686 vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete)
1689 vdev_stat_t *vs = &vd->vdev_stat;
1691 for (c = 0; c < vd->vdev_children; c++)
1692 vdev_scrub_stat_update(vd->vdev_child[c], type, complete);
1694 mutex_enter(&vd->vdev_stat_lock);
1696 if (type == POOL_SCRUB_NONE) {
1698 * Update completion and end time. Leave everything else alone
1699 * so we can report what happened during the previous scrub.
1701 vs->vs_scrub_complete = complete;
1702 vs->vs_scrub_end = gethrestime_sec();
1704 vs->vs_scrub_type = type;
1705 vs->vs_scrub_complete = 0;
1706 vs->vs_scrub_examined = 0;
1707 vs->vs_scrub_repaired = 0;
1708 vs->vs_scrub_errors = 0;
1709 vs->vs_scrub_start = gethrestime_sec();
1710 vs->vs_scrub_end = 0;
1713 mutex_exit(&vd->vdev_stat_lock);
1717 * Update the in-core space usage stats for this vdev and the root vdev.
1720 vdev_space_update(vdev_t *vd, int64_t space_delta, int64_t alloc_delta)
1722 ASSERT(vd == vd->vdev_top);
1723 int64_t dspace_delta = space_delta;
1726 if (vd->vdev_ms_count) {
1728 * If this is a top-level vdev, apply the
1729 * inverse of its psize-to-asize (ie. RAID-Z)
1730 * space-expansion factor. We must calculate
1731 * this here and not at the root vdev because
1732 * the root vdev's psize-to-asize is simply the
1733 * max of its childrens', thus not accurate
1736 ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0);
1737 dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) *
1738 vd->vdev_deflate_ratio;
1741 mutex_enter(&vd->vdev_stat_lock);
1742 vd->vdev_stat.vs_space += space_delta;
1743 vd->vdev_stat.vs_alloc += alloc_delta;
1744 vd->vdev_stat.vs_dspace += dspace_delta;
1745 mutex_exit(&vd->vdev_stat_lock);
1746 } while ((vd = vd->vdev_parent) != NULL);
1750 * Mark a top-level vdev's config as dirty, placing it on the dirty list
1751 * so that it will be written out next time the vdev configuration is synced.
1752 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
1755 vdev_config_dirty(vdev_t *vd)
1757 spa_t *spa = vd->vdev_spa;
1758 vdev_t *rvd = spa->spa_root_vdev;
1762 * The dirty list is protected by the config lock. The caller must
1763 * either hold the config lock as writer, or must be the sync thread
1764 * (which holds the lock as reader). There's only one sync thread,
1765 * so this is sufficient to ensure mutual exclusion.
1767 ASSERT(spa_config_held(spa, RW_WRITER) ||
1768 dsl_pool_sync_context(spa_get_dsl(spa)));
1771 for (c = 0; c < rvd->vdev_children; c++)
1772 vdev_config_dirty(rvd->vdev_child[c]);
1774 ASSERT(vd == vd->vdev_top);
1776 if (!list_link_active(&vd->vdev_dirty_node))
1777 list_insert_head(&spa->spa_dirty_list, vd);
1782 vdev_config_clean(vdev_t *vd)
1784 spa_t *spa = vd->vdev_spa;
1786 ASSERT(spa_config_held(spa, RW_WRITER) ||
1787 dsl_pool_sync_context(spa_get_dsl(spa)));
1789 ASSERT(list_link_active(&vd->vdev_dirty_node));
1790 list_remove(&spa->spa_dirty_list, vd);
1794 vdev_propagate_state(vdev_t *vd)
1796 vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
1797 int degraded = 0, faulted = 0;
1802 for (c = 0; c < vd->vdev_children; c++) {
1803 child = vd->vdev_child[c];
1804 if (child->vdev_state <= VDEV_STATE_CANT_OPEN)
1806 else if (child->vdev_state == VDEV_STATE_DEGRADED)
1809 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
1813 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
1816 * Root special: if there is a toplevel vdev that cannot be
1817 * opened due to corrupted metadata, then propagate the root
1818 * vdev's aux state as 'corrupt' rather than 'insufficient
1821 if (corrupted && vd == rvd && rvd->vdev_state == VDEV_STATE_CANT_OPEN)
1822 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
1823 VDEV_AUX_CORRUPT_DATA);
1827 * Set a vdev's state. If this is during an open, we don't update the parent
1828 * state, because we're in the process of opening children depth-first.
1829 * Otherwise, we propagate the change to the parent.
1831 * If this routine places a device in a faulted state, an appropriate ereport is
1835 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
1837 uint64_t save_state;
1839 if (state == vd->vdev_state) {
1840 vd->vdev_stat.vs_aux = aux;
1844 save_state = vd->vdev_state;
1846 vd->vdev_state = state;
1847 vd->vdev_stat.vs_aux = aux;
1850 * If we are setting the vdev state to anything but an open state, then
1851 * always close the underlying device. Otherwise, we keep accessible
1852 * but invalid devices open forever. We don't call vdev_close() itself,
1853 * because that implies some extra checks (offline, etc) that we don't
1854 * want here. This is limited to leaf devices, because otherwise
1855 * closing the device will affect other children.
1857 if (vdev_is_dead(vd) && vd->vdev_ops->vdev_op_leaf)
1858 vd->vdev_ops->vdev_op_close(vd);
1860 if (state == VDEV_STATE_CANT_OPEN) {
1862 * If we fail to open a vdev during an import, we mark it as
1863 * "not available", which signifies that it was never there to
1864 * begin with. Failure to open such a device is not considered
1867 if (vd->vdev_spa->spa_load_state == SPA_LOAD_IMPORT &&
1868 vd->vdev_ops->vdev_op_leaf)
1869 vd->vdev_not_present = 1;
1872 * Post the appropriate ereport. If the 'prevstate' field is
1873 * set to something other than VDEV_STATE_UNKNOWN, it indicates
1874 * that this is part of a vdev_reopen(). In this case, we don't
1875 * want to post the ereport if the device was already in the
1876 * CANT_OPEN state beforehand.
1878 if (vd->vdev_prevstate != state && !vd->vdev_not_present &&
1879 vd != vd->vdev_spa->spa_root_vdev) {
1883 case VDEV_AUX_OPEN_FAILED:
1884 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
1886 case VDEV_AUX_CORRUPT_DATA:
1887 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
1889 case VDEV_AUX_NO_REPLICAS:
1890 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
1892 case VDEV_AUX_BAD_GUID_SUM:
1893 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
1895 case VDEV_AUX_TOO_SMALL:
1896 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
1898 case VDEV_AUX_BAD_LABEL:
1899 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
1902 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
1905 zfs_ereport_post(class, vd->vdev_spa,
1906 vd, NULL, save_state, 0);
1913 if (vd->vdev_parent != NULL)
1914 vdev_propagate_state(vd->vdev_parent);