4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
30 #include <sys/zfs_context.h>
32 #include <sys/spa_impl.h>
33 #include <sys/dsl_pool.h>
34 #include <sys/dsl_scan.h>
35 #include <sys/vdev_impl.h>
38 #include <sys/fs/zfs.h>
41 * Virtual device vector for mirroring.
44 typedef struct mirror_child {
51 uint8_t mc_speculative;
54 typedef struct mirror_map {
58 boolean_t mm_resilvering;
60 mirror_child_t mm_child[];
63 static int vdev_mirror_shift = 21;
66 SYSCTL_DECL(_vfs_zfs_vdev);
67 static SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, mirror, CTLFLAG_RD, 0,
72 * The load configuration settings below are tuned by default for
73 * the case where all devices are of the same rotational type.
75 * If there is a mixture of rotating and non-rotating media, setting
76 * non_rotating_seek_inc to 0 may well provide better results as it
77 * will direct more reads to the non-rotating vdevs which are more
78 * likely to have a higher performance.
81 /* Rotating media load calculation configuration. */
82 static int rotating_inc = 0;
84 SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_inc, CTLFLAG_RWTUN,
85 &rotating_inc, 0, "Rotating media load increment for non-seeking I/O's");
88 static int rotating_seek_inc = 5;
90 SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_seek_inc, CTLFLAG_RWTUN,
91 &rotating_seek_inc, 0, "Rotating media load increment for seeking I/O's");
94 static int rotating_seek_offset = 1 * 1024 * 1024;
96 SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_seek_offset, CTLFLAG_RWTUN,
97 &rotating_seek_offset, 0, "Offset in bytes from the last I/O which "
98 "triggers a reduced rotating media seek increment");
101 /* Non-rotating media load calculation configuration. */
102 static int non_rotating_inc = 0;
104 SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, non_rotating_inc, CTLFLAG_RWTUN,
105 &non_rotating_inc, 0,
106 "Non-rotating media load increment for non-seeking I/O's");
109 static int non_rotating_seek_inc = 1;
111 SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, non_rotating_seek_inc, CTLFLAG_RWTUN,
112 &non_rotating_seek_inc, 0,
113 "Non-rotating media load increment for seeking I/O's");
118 vdev_mirror_map_size(int children)
120 return (offsetof(mirror_map_t, mm_child[children]) +
121 sizeof(int) * children);
124 static inline mirror_map_t *
125 vdev_mirror_map_alloc(int children, boolean_t resilvering, boolean_t root)
129 mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP);
130 mm->mm_children = children;
131 mm->mm_resilvering = resilvering;
133 mm->mm_preferred = (int *)((uintptr_t)mm +
134 offsetof(mirror_map_t, mm_child[children]));
140 vdev_mirror_map_free(zio_t *zio)
142 mirror_map_t *mm = zio->io_vsd;
144 kmem_free(mm, vdev_mirror_map_size(mm->mm_children));
147 static const zio_vsd_ops_t vdev_mirror_vsd_ops = {
148 vdev_mirror_map_free,
149 zio_vsd_default_cksum_report
153 vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
158 /* All DVAs have equal weight at the root. */
163 * We don't return INT_MAX if the device is resilvering i.e.
164 * vdev_resilver_txg != 0 as when tested performance was slightly
165 * worse overall when resilvering with compared to without.
168 /* Standard load based on pending queue length. */
169 load = vdev_queue_length(vd);
170 lastoffset = vdev_queue_lastoffset(vd);
172 if (vd->vdev_rotation_rate == VDEV_RATE_NON_ROTATING) {
173 /* Non-rotating media. */
174 if (lastoffset == zio_offset)
175 return (load + non_rotating_inc);
178 * Apply a seek penalty even for non-rotating devices as
179 * sequential I/O'a can be aggregated into fewer operations
180 * on the device, thus avoiding unnecessary per-command
181 * overhead and boosting performance.
183 return (load + non_rotating_seek_inc);
186 /* Rotating media I/O's which directly follow the last I/O. */
187 if (lastoffset == zio_offset)
188 return (load + rotating_inc);
191 * Apply half the seek increment to I/O's within seek offset
192 * of the last I/O queued to this vdev as they should incure less
193 * of a seek increment.
195 if (ABS(lastoffset - zio_offset) < rotating_seek_offset)
196 return (load + (rotating_seek_inc / 2));
198 /* Apply the full seek increment to all other I/O's. */
199 return (load + rotating_seek_inc);
203 static mirror_map_t *
204 vdev_mirror_map_init(zio_t *zio)
206 mirror_map_t *mm = NULL;
208 vdev_t *vd = zio->io_vd;
212 dva_t *dva = zio->io_bp->blk_dva;
213 spa_t *spa = zio->io_spa;
214 dva_t dva_copy[SPA_DVAS_PER_BP];
216 c = BP_GET_NDVAS(zio->io_bp);
219 * If we do not trust the pool config, some DVAs might be
220 * invalid or point to vdevs that do not exist. We skip them.
222 if (!spa_trust_config(spa)) {
223 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
225 for (int i = 0; i < c; i++) {
226 if (zfs_dva_valid(spa, &dva[i], zio->io_bp))
227 dva_copy[j++] = dva[i];
231 zio->io_error = ENXIO;
240 mm = vdev_mirror_map_alloc(c, B_FALSE, B_TRUE);
242 for (c = 0; c < mm->mm_children; c++) {
243 mc = &mm->mm_child[c];
244 mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c]));
245 mc->mc_offset = DVA_GET_OFFSET(&dva[c]);
249 * If we are resilvering, then we should handle scrub reads
250 * differently; we shouldn't issue them to the resilvering
251 * device because it might not have those blocks.
253 * We are resilvering iff:
254 * 1) We are a replacing vdev (ie our name is "replacing-1" or
255 * "spare-1" or something like that), and
256 * 2) The pool is currently being resilvered.
258 * We cannot simply check vd->vdev_resilver_txg, because it's
259 * not set in this path.
261 * Nor can we just check our vdev_ops; there are cases (such as
262 * when a user types "zpool replace pool odev spare_dev" and
263 * spare_dev is in the spare list, or when a spare device is
264 * automatically used to replace a DEGRADED device) when
265 * resilvering is complete but both the original vdev and the
266 * spare vdev remain in the pool. That behavior is intentional.
267 * It helps implement the policy that a spare should be
268 * automatically removed from the pool after the user replaces
269 * the device that originally failed.
271 * If a spa load is in progress, then spa_dsl_pool may be
272 * uninitialized. But we shouldn't be resilvering during a spa
275 boolean_t replacing = (vd->vdev_ops == &vdev_replacing_ops ||
276 vd->vdev_ops == &vdev_spare_ops) &&
277 spa_load_state(vd->vdev_spa) == SPA_LOAD_NONE &&
278 dsl_scan_resilvering(vd->vdev_spa->spa_dsl_pool);
279 mm = vdev_mirror_map_alloc(vd->vdev_children, replacing,
281 for (c = 0; c < mm->mm_children; c++) {
282 mc = &mm->mm_child[c];
283 mc->mc_vd = vd->vdev_child[c];
284 mc->mc_offset = zio->io_offset;
289 zio->io_vsd_ops = &vdev_mirror_vsd_ops;
294 vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
295 uint64_t *logical_ashift, uint64_t *physical_ashift)
300 if (vd->vdev_children == 0) {
301 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
302 return (SET_ERROR(EINVAL));
305 vdev_open_children(vd);
307 for (int c = 0; c < vd->vdev_children; c++) {
308 vdev_t *cvd = vd->vdev_child[c];
310 if (cvd->vdev_open_error) {
311 lasterror = cvd->vdev_open_error;
316 *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
317 *max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
318 *logical_ashift = MAX(*logical_ashift, cvd->vdev_ashift);
319 *physical_ashift = MAX(*physical_ashift,
320 cvd->vdev_physical_ashift);
323 if (numerrors == vd->vdev_children) {
324 if (vdev_children_are_offline(vd))
325 vd->vdev_stat.vs_aux = VDEV_AUX_CHILDREN_OFFLINE;
327 vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
335 vdev_mirror_close(vdev_t *vd)
337 for (int c = 0; c < vd->vdev_children; c++)
338 vdev_close(vd->vdev_child[c]);
342 vdev_mirror_child_done(zio_t *zio)
344 mirror_child_t *mc = zio->io_private;
346 mc->mc_error = zio->io_error;
352 vdev_mirror_scrub_done(zio_t *zio)
354 mirror_child_t *mc = zio->io_private;
356 if (zio->io_error == 0) {
358 zio_link_t *zl = NULL;
360 mutex_enter(&zio->io_lock);
361 while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
362 mutex_enter(&pio->io_lock);
363 ASSERT3U(zio->io_size, >=, pio->io_size);
364 abd_copy(pio->io_abd, zio->io_abd, pio->io_size);
365 mutex_exit(&pio->io_lock);
367 mutex_exit(&zio->io_lock);
369 abd_free(zio->io_abd);
371 mc->mc_error = zio->io_error;
377 * Check the other, lower-index DVAs to see if they're on the same
378 * vdev as the child we picked. If they are, use them since they
379 * are likely to have been allocated from the primary metaslab in
380 * use at the time, and hence are more likely to have locality with
384 vdev_mirror_dva_select(zio_t *zio, int p)
386 dva_t *dva = zio->io_bp->blk_dva;
387 mirror_map_t *mm = zio->io_vsd;
391 preferred = mm->mm_preferred[p];
392 for (p-- ; p >= 0; p--) {
393 c = mm->mm_preferred[p];
394 if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred]))
401 vdev_mirror_preferred_child_randomize(zio_t *zio)
403 mirror_map_t *mm = zio->io_vsd;
407 p = spa_get_random(mm->mm_preferred_cnt);
408 return (vdev_mirror_dva_select(zio, p));
412 * To ensure we don't always favour the first matching vdev,
413 * which could lead to wear leveling issues on SSD's, we
414 * use the I/O offset as a pseudo random seed into the vdevs
415 * which have the lowest load.
417 p = (zio->io_offset >> vdev_mirror_shift) % mm->mm_preferred_cnt;
418 return (mm->mm_preferred[p]);
422 * Try to find a vdev whose DTL doesn't contain the block we want to read
423 * prefering vdevs based on determined load.
425 * If we can't, try the read on any vdev we haven't already tried.
428 vdev_mirror_child_select(zio_t *zio)
430 mirror_map_t *mm = zio->io_vsd;
431 uint64_t txg = zio->io_txg;
434 ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg);
436 lowest_load = INT_MAX;
437 mm->mm_preferred_cnt = 0;
438 for (c = 0; c < mm->mm_children; c++) {
441 mc = &mm->mm_child[c];
442 if (mc->mc_tried || mc->mc_skipped)
445 if (!vdev_readable(mc->mc_vd)) {
446 mc->mc_error = SET_ERROR(ENXIO);
447 mc->mc_tried = 1; /* don't even try */
452 if (vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1)) {
453 mc->mc_error = SET_ERROR(ESTALE);
455 mc->mc_speculative = 1;
459 mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset);
460 if (mc->mc_load > lowest_load)
463 if (mc->mc_load < lowest_load) {
464 lowest_load = mc->mc_load;
465 mm->mm_preferred_cnt = 0;
467 mm->mm_preferred[mm->mm_preferred_cnt] = c;
468 mm->mm_preferred_cnt++;
471 if (mm->mm_preferred_cnt == 1) {
472 vdev_queue_register_lastoffset(
473 mm->mm_child[mm->mm_preferred[0]].mc_vd, zio);
474 return (mm->mm_preferred[0]);
477 if (mm->mm_preferred_cnt > 1) {
478 int c = vdev_mirror_preferred_child_randomize(zio);
480 vdev_queue_register_lastoffset(mm->mm_child[c].mc_vd, zio);
485 * Every device is either missing or has this txg in its DTL.
486 * Look for any child we haven't already tried before giving up.
488 for (c = 0; c < mm->mm_children; c++) {
489 if (!mm->mm_child[c].mc_tried) {
490 vdev_queue_register_lastoffset(mm->mm_child[c].mc_vd,
497 * Every child failed. There's no place left to look.
503 vdev_mirror_io_start(zio_t *zio)
509 mm = vdev_mirror_map_init(zio);
512 ASSERT(!spa_trust_config(zio->io_spa));
513 ASSERT(zio->io_type == ZIO_TYPE_READ);
518 if (zio->io_type == ZIO_TYPE_READ) {
519 if (zio->io_bp != NULL &&
520 (zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_resilvering &&
521 mm->mm_children > 1) {
523 * For scrubbing reads (if we can verify the
524 * checksum here, as indicated by io_bp being
525 * non-NULL) we need to allocate a read buffer for
526 * each child and issue reads to all children. If
527 * any child succeeds, it will copy its data into
528 * zio->io_data in vdev_mirror_scrub_done.
530 for (c = 0; c < mm->mm_children; c++) {
531 mc = &mm->mm_child[c];
532 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
533 mc->mc_vd, mc->mc_offset,
534 abd_alloc_sametype(zio->io_abd,
535 zio->io_size), zio->io_size,
536 zio->io_type, zio->io_priority, 0,
537 vdev_mirror_scrub_done, mc));
543 * For normal reads just pick one child.
545 c = vdev_mirror_child_select(zio);
548 ASSERT(zio->io_type == ZIO_TYPE_WRITE ||
549 zio->io_type == ZIO_TYPE_FREE);
552 * Writes and frees go to all children.
555 children = mm->mm_children;
559 mc = &mm->mm_child[c];
560 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
561 mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size,
562 zio->io_type, zio->io_priority, 0,
563 vdev_mirror_child_done, mc));
571 vdev_mirror_worst_error(mirror_map_t *mm)
573 int error[2] = { 0, 0 };
575 for (int c = 0; c < mm->mm_children; c++) {
576 mirror_child_t *mc = &mm->mm_child[c];
577 int s = mc->mc_speculative;
578 error[s] = zio_worst_error(error[s], mc->mc_error);
581 return (error[0] ? error[0] : error[1]);
585 vdev_mirror_io_done(zio_t *zio)
587 mirror_map_t *mm = zio->io_vsd;
591 int unexpected_errors = 0;
596 for (c = 0; c < mm->mm_children; c++) {
597 mc = &mm->mm_child[c];
602 } else if (mc->mc_tried) {
607 if (zio->io_type == ZIO_TYPE_WRITE) {
609 * XXX -- for now, treat partial writes as success.
611 * Now that we support write reallocation, it would be better
612 * to treat partial failure as real failure unless there are
613 * no non-degraded top-level vdevs left, and not update DTLs
614 * if we intend to reallocate.
617 if (good_copies != mm->mm_children) {
619 * Always require at least one good copy.
621 * For ditto blocks (io_vd == NULL), require
622 * all copies to be good.
624 * XXX -- for replacing vdevs, there's no great answer.
625 * If the old device is really dead, we may not even
626 * be able to access it -- so we only want to
627 * require good writes to the new device. But if
628 * the new device turns out to be flaky, we want
629 * to be able to detach it -- which requires all
630 * writes to the old device to have succeeded.
632 if (good_copies == 0 || zio->io_vd == NULL)
633 zio->io_error = vdev_mirror_worst_error(mm);
636 } else if (zio->io_type == ZIO_TYPE_FREE) {
640 ASSERT(zio->io_type == ZIO_TYPE_READ);
643 * If we don't have a good copy yet, keep trying other children.
646 if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) {
647 ASSERT(c >= 0 && c < mm->mm_children);
648 mc = &mm->mm_child[c];
649 zio_vdev_io_redone(zio);
650 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
651 mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size,
652 ZIO_TYPE_READ, zio->io_priority, 0,
653 vdev_mirror_child_done, mc));
658 if (good_copies == 0) {
659 zio->io_error = vdev_mirror_worst_error(mm);
660 ASSERT(zio->io_error != 0);
663 if (good_copies && spa_writeable(zio->io_spa) &&
664 (unexpected_errors ||
665 (zio->io_flags & ZIO_FLAG_RESILVER) ||
666 ((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_resilvering))) {
668 * Use the good data we have in hand to repair damaged children.
670 for (c = 0; c < mm->mm_children; c++) {
672 * Don't rewrite known good children.
673 * Not only is it unnecessary, it could
674 * actually be harmful: if the system lost
675 * power while rewriting the only good copy,
676 * there would be no good copies left!
678 mc = &mm->mm_child[c];
680 if (mc->mc_error == 0) {
684 * We didn't try this child. We need to
686 * 1. it's a scrub (in which case we have
687 * tried everything that was healthy)
689 * 2. it's an indirect vdev (in which case
690 * it could point to any other vdev, which
691 * might have a bad DTL)
693 * 3. the DTL indicates that this data is
694 * missing from this vdev
696 if (!(zio->io_flags & ZIO_FLAG_SCRUB) &&
697 mc->mc_vd->vdev_ops != &vdev_indirect_ops &&
698 !vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL,
701 mc->mc_error = SET_ERROR(ESTALE);
704 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
705 mc->mc_vd, mc->mc_offset,
706 zio->io_abd, zio->io_size,
707 ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
708 ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
709 ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
715 vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded)
717 if (faulted == vd->vdev_children) {
718 if (vdev_children_are_offline(vd)) {
719 vdev_set_state(vd, B_FALSE, VDEV_STATE_OFFLINE,
720 VDEV_AUX_CHILDREN_OFFLINE);
722 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
723 VDEV_AUX_NO_REPLICAS);
725 } else if (degraded + faulted != 0) {
726 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
728 vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
732 vdev_ops_t vdev_mirror_ops = {
736 vdev_mirror_io_start,
738 vdev_mirror_state_change,
744 VDEV_TYPE_MIRROR, /* name of this vdev type */
745 B_FALSE /* not a leaf vdev */
748 vdev_ops_t vdev_replacing_ops = {
752 vdev_mirror_io_start,
754 vdev_mirror_state_change,
760 VDEV_TYPE_REPLACING, /* name of this vdev type */
761 B_FALSE /* not a leaf vdev */
764 vdev_ops_t vdev_spare_ops = {
768 vdev_mirror_io_start,
770 vdev_mirror_state_change,
776 VDEV_TYPE_SPARE, /* name of this vdev type */
777 B_FALSE /* not a leaf vdev */