4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
30 #include <sys/zfs_context.h>
32 #include <sys/vdev_impl.h>
34 #include <sys/fs/zfs.h>
37 * Virtual device vector for mirroring.
40 typedef struct mirror_child {
47 uint8_t mc_speculative;
50 typedef struct mirror_map {
54 boolean_t mm_replacing;
56 mirror_child_t mm_child[];
59 static int vdev_mirror_shift = 21;
62 SYSCTL_DECL(_vfs_zfs_vdev);
63 static SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, mirror, CTLFLAG_RD, 0,
68 * The load configuration settings below are tuned by default for
69 * the case where all devices are of the same rotational type.
71 * If there is a mixture of rotating and non-rotating media, setting
72 * non_rotating_seek_inc to 0 may well provide better results as it
73 * will direct more reads to the non-rotating vdevs which are more
74 * likely to have a higher performance.
77 /* Rotating media load calculation configuration. */
78 static int rotating_inc = 0;
80 TUNABLE_INT("vfs.zfs.vdev.mirror.rotating_inc", &rotating_inc);
81 SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_inc, CTLFLAG_RW,
82 &rotating_inc, 0, "Rotating media load increment for non-seeking I/O's");
85 static int rotating_seek_inc = 5;
87 TUNABLE_INT("vfs.zfs.vdev.mirror.rotating_seek_inc", &rotating_seek_inc);
88 SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_seek_inc, CTLFLAG_RW,
89 &rotating_seek_inc, 0, "Rotating media load increment for seeking I/O's");
92 static int rotating_seek_offset = 1 * 1024 * 1024;
94 TUNABLE_INT("vfs.zfs.vdev.mirror.rotating_seek_offset", &rotating_seek_offset);
95 SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_seek_offset, CTLFLAG_RW,
96 &rotating_seek_offset, 0, "Offset in bytes from the last I/O which "
97 "triggers a reduced rotating media seek increment");
100 /* Non-rotating media load calculation configuration. */
101 static int non_rotating_inc = 0;
103 TUNABLE_INT("vfs.zfs.vdev.mirror.non_rotating_inc", &non_rotating_inc);
104 SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, non_rotating_inc, CTLFLAG_RW,
105 &non_rotating_inc, 0,
106 "Non-rotating media load increment for non-seeking I/O's");
109 static int non_rotating_seek_inc = 1;
111 TUNABLE_INT("vfs.zfs.vdev.mirror.non_rotating_seek_inc",
112 &non_rotating_seek_inc);
113 SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, non_rotating_seek_inc, CTLFLAG_RW,
114 &non_rotating_seek_inc, 0,
115 "Non-rotating media load increment for seeking I/O's");
120 vdev_mirror_map_size(int children)
122 return (offsetof(mirror_map_t, mm_child[children]) +
123 sizeof(int) * children);
126 static inline mirror_map_t *
127 vdev_mirror_map_alloc(int children, boolean_t replacing, boolean_t root)
131 mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP);
132 mm->mm_children = children;
133 mm->mm_replacing = replacing;
135 mm->mm_preferred = (int *)((uintptr_t)mm +
136 offsetof(mirror_map_t, mm_child[children]));
142 vdev_mirror_map_free(zio_t *zio)
144 mirror_map_t *mm = zio->io_vsd;
146 kmem_free(mm, vdev_mirror_map_size(mm->mm_children));
149 static const zio_vsd_ops_t vdev_mirror_vsd_ops = {
150 vdev_mirror_map_free,
151 zio_vsd_default_cksum_report
155 vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
160 /* All DVAs have equal weight at the root. */
165 * We don't return INT_MAX if the device is resilvering i.e.
166 * vdev_resilver_txg != 0 as when tested performance was slightly
167 * worse overall when resilvering with compared to without.
170 /* Standard load based on pending queue length. */
171 load = vdev_queue_length(vd);
172 lastoffset = vdev_queue_lastoffset(vd);
174 if (vd->vdev_rotation_rate == VDEV_RATE_NON_ROTATING) {
175 /* Non-rotating media. */
176 if (lastoffset == zio_offset)
177 return (load + non_rotating_inc);
180 * Apply a seek penalty even for non-rotating devices as
181 * sequential I/O'a can be aggregated into fewer operations
182 * on the device, thus avoiding unnecessary per-command
183 * overhead and boosting performance.
185 return (load + non_rotating_seek_inc);
188 /* Rotating media I/O's which directly follow the last I/O. */
189 if (lastoffset == zio_offset)
190 return (load + rotating_inc);
193 * Apply half the seek increment to I/O's within seek offset
194 * of the last I/O queued to this vdev as they should incure less
195 * of a seek increment.
197 if (ABS(lastoffset - zio_offset) < rotating_seek_offset)
198 return (load + (rotating_seek_inc / 2));
200 /* Apply the full seek increment to all other I/O's. */
201 return (load + rotating_seek_inc);
205 static mirror_map_t *
206 vdev_mirror_map_init(zio_t *zio)
208 mirror_map_t *mm = NULL;
210 vdev_t *vd = zio->io_vd;
214 dva_t *dva = zio->io_bp->blk_dva;
215 spa_t *spa = zio->io_spa;
217 mm = vdev_mirror_map_alloc(BP_GET_NDVAS(zio->io_bp), B_FALSE,
219 for (c = 0; c < mm->mm_children; c++) {
220 mc = &mm->mm_child[c];
221 mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c]));
222 mc->mc_offset = DVA_GET_OFFSET(&dva[c]);
225 mm = vdev_mirror_map_alloc(vd->vdev_children,
226 (vd->vdev_ops == &vdev_replacing_ops ||
227 vd->vdev_ops == &vdev_spare_ops), B_FALSE);
228 for (c = 0; c < mm->mm_children; c++) {
229 mc = &mm->mm_child[c];
230 mc->mc_vd = vd->vdev_child[c];
231 mc->mc_offset = zio->io_offset;
236 zio->io_vsd_ops = &vdev_mirror_vsd_ops;
241 vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
242 uint64_t *logical_ashift, uint64_t *physical_ashift)
247 if (vd->vdev_children == 0) {
248 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
249 return (SET_ERROR(EINVAL));
252 vdev_open_children(vd);
254 for (int c = 0; c < vd->vdev_children; c++) {
255 vdev_t *cvd = vd->vdev_child[c];
257 if (cvd->vdev_open_error) {
258 lasterror = cvd->vdev_open_error;
263 *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
264 *max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
265 *logical_ashift = MAX(*logical_ashift, cvd->vdev_ashift);
266 *physical_ashift = MAX(*physical_ashift,
267 cvd->vdev_physical_ashift);
270 if (numerrors == vd->vdev_children) {
271 vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
279 vdev_mirror_close(vdev_t *vd)
281 for (int c = 0; c < vd->vdev_children; c++)
282 vdev_close(vd->vdev_child[c]);
286 vdev_mirror_child_done(zio_t *zio)
288 mirror_child_t *mc = zio->io_private;
290 mc->mc_error = zio->io_error;
296 vdev_mirror_scrub_done(zio_t *zio)
298 mirror_child_t *mc = zio->io_private;
300 if (zio->io_error == 0) {
302 zio_link_t *zl = NULL;
304 mutex_enter(&zio->io_lock);
305 while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
306 mutex_enter(&pio->io_lock);
307 ASSERT3U(zio->io_size, >=, pio->io_size);
308 bcopy(zio->io_data, pio->io_data, pio->io_size);
309 mutex_exit(&pio->io_lock);
311 mutex_exit(&zio->io_lock);
314 zio_buf_free(zio->io_data, zio->io_size);
316 mc->mc_error = zio->io_error;
322 * Check the other, lower-index DVAs to see if they're on the same
323 * vdev as the child we picked. If they are, use them since they
324 * are likely to have been allocated from the primary metaslab in
325 * use at the time, and hence are more likely to have locality with
329 vdev_mirror_dva_select(zio_t *zio, int p)
331 dva_t *dva = zio->io_bp->blk_dva;
332 mirror_map_t *mm = zio->io_vsd;
336 preferred = mm->mm_preferred[p];
337 for (p-- ; p >= 0; p--) {
338 c = mm->mm_preferred[p];
339 if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred]))
346 vdev_mirror_preferred_child_randomize(zio_t *zio)
348 mirror_map_t *mm = zio->io_vsd;
352 p = spa_get_random(mm->mm_preferred_cnt);
353 return (vdev_mirror_dva_select(zio, p));
357 * To ensure we don't always favour the first matching vdev,
358 * which could lead to wear leveling issues on SSD's, we
359 * use the I/O offset as a pseudo random seed into the vdevs
360 * which have the lowest load.
362 p = (zio->io_offset >> vdev_mirror_shift) % mm->mm_preferred_cnt;
363 return (mm->mm_preferred[p]);
367 * Try to find a vdev whose DTL doesn't contain the block we want to read
368 * prefering vdevs based on determined load.
370 * If we can't, try the read on any vdev we haven't already tried.
373 vdev_mirror_child_select(zio_t *zio)
375 mirror_map_t *mm = zio->io_vsd;
376 uint64_t txg = zio->io_txg;
379 ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg);
381 lowest_load = INT_MAX;
382 mm->mm_preferred_cnt = 0;
383 for (c = 0; c < mm->mm_children; c++) {
386 mc = &mm->mm_child[c];
387 if (mc->mc_tried || mc->mc_skipped)
390 if (!vdev_readable(mc->mc_vd)) {
391 mc->mc_error = SET_ERROR(ENXIO);
392 mc->mc_tried = 1; /* don't even try */
397 if (vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1)) {
398 mc->mc_error = SET_ERROR(ESTALE);
400 mc->mc_speculative = 1;
404 mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset);
405 if (mc->mc_load > lowest_load)
408 if (mc->mc_load < lowest_load) {
409 lowest_load = mc->mc_load;
410 mm->mm_preferred_cnt = 0;
412 mm->mm_preferred[mm->mm_preferred_cnt] = c;
413 mm->mm_preferred_cnt++;
416 if (mm->mm_preferred_cnt == 1) {
417 vdev_queue_register_lastoffset(
418 mm->mm_child[mm->mm_preferred[0]].mc_vd, zio);
419 return (mm->mm_preferred[0]);
422 if (mm->mm_preferred_cnt > 1) {
423 int c = vdev_mirror_preferred_child_randomize(zio);
425 vdev_queue_register_lastoffset(mm->mm_child[c].mc_vd, zio);
430 * Every device is either missing or has this txg in its DTL.
431 * Look for any child we haven't already tried before giving up.
433 for (c = 0; c < mm->mm_children; c++) {
434 if (!mm->mm_child[c].mc_tried) {
435 vdev_queue_register_lastoffset(mm->mm_child[c].mc_vd,
442 * Every child failed. There's no place left to look.
448 vdev_mirror_io_start(zio_t *zio)
454 mm = vdev_mirror_map_init(zio);
456 if (zio->io_type == ZIO_TYPE_READ) {
457 if ((zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_replacing &&
458 mm->mm_children > 1) {
460 * For scrubbing reads we need to allocate a read
461 * buffer for each child and issue reads to all
462 * children. If any child succeeds, it will copy its
463 * data into zio->io_data in vdev_mirror_scrub_done.
465 for (c = 0; c < mm->mm_children; c++) {
466 mc = &mm->mm_child[c];
467 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
468 mc->mc_vd, mc->mc_offset,
469 zio_buf_alloc(zio->io_size), zio->io_size,
470 zio->io_type, zio->io_priority, 0,
471 vdev_mirror_scrub_done, mc));
477 * For normal reads just pick one child.
479 c = vdev_mirror_child_select(zio);
482 ASSERT(zio->io_type == ZIO_TYPE_WRITE ||
483 zio->io_type == ZIO_TYPE_FREE);
486 * Writes and frees go to all children.
489 children = mm->mm_children;
493 mc = &mm->mm_child[c];
494 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
495 mc->mc_vd, mc->mc_offset, zio->io_data, zio->io_size,
496 zio->io_type, zio->io_priority, 0,
497 vdev_mirror_child_done, mc));
505 vdev_mirror_worst_error(mirror_map_t *mm)
507 int error[2] = { 0, 0 };
509 for (int c = 0; c < mm->mm_children; c++) {
510 mirror_child_t *mc = &mm->mm_child[c];
511 int s = mc->mc_speculative;
512 error[s] = zio_worst_error(error[s], mc->mc_error);
515 return (error[0] ? error[0] : error[1]);
519 vdev_mirror_io_done(zio_t *zio)
521 mirror_map_t *mm = zio->io_vsd;
525 int unexpected_errors = 0;
527 for (c = 0; c < mm->mm_children; c++) {
528 mc = &mm->mm_child[c];
533 } else if (mc->mc_tried) {
538 if (zio->io_type == ZIO_TYPE_WRITE) {
540 * XXX -- for now, treat partial writes as success.
542 * Now that we support write reallocation, it would be better
543 * to treat partial failure as real failure unless there are
544 * no non-degraded top-level vdevs left, and not update DTLs
545 * if we intend to reallocate.
548 if (good_copies != mm->mm_children) {
550 * Always require at least one good copy.
552 * For ditto blocks (io_vd == NULL), require
553 * all copies to be good.
555 * XXX -- for replacing vdevs, there's no great answer.
556 * If the old device is really dead, we may not even
557 * be able to access it -- so we only want to
558 * require good writes to the new device. But if
559 * the new device turns out to be flaky, we want
560 * to be able to detach it -- which requires all
561 * writes to the old device to have succeeded.
563 if (good_copies == 0 || zio->io_vd == NULL)
564 zio->io_error = vdev_mirror_worst_error(mm);
567 } else if (zio->io_type == ZIO_TYPE_FREE) {
571 ASSERT(zio->io_type == ZIO_TYPE_READ);
574 * If we don't have a good copy yet, keep trying other children.
577 if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) {
578 ASSERT(c >= 0 && c < mm->mm_children);
579 mc = &mm->mm_child[c];
580 zio_vdev_io_redone(zio);
581 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
582 mc->mc_vd, mc->mc_offset, zio->io_data, zio->io_size,
583 ZIO_TYPE_READ, zio->io_priority, 0,
584 vdev_mirror_child_done, mc));
589 if (good_copies == 0) {
590 zio->io_error = vdev_mirror_worst_error(mm);
591 ASSERT(zio->io_error != 0);
594 if (good_copies && spa_writeable(zio->io_spa) &&
595 (unexpected_errors ||
596 (zio->io_flags & ZIO_FLAG_RESILVER) ||
597 ((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_replacing))) {
599 * Use the good data we have in hand to repair damaged children.
601 for (c = 0; c < mm->mm_children; c++) {
603 * Don't rewrite known good children.
604 * Not only is it unnecessary, it could
605 * actually be harmful: if the system lost
606 * power while rewriting the only good copy,
607 * there would be no good copies left!
609 mc = &mm->mm_child[c];
611 if (mc->mc_error == 0) {
614 if (!(zio->io_flags & ZIO_FLAG_SCRUB) &&
615 !vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL,
618 mc->mc_error = SET_ERROR(ESTALE);
621 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
622 mc->mc_vd, mc->mc_offset,
623 zio->io_data, zio->io_size,
624 ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
625 ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
626 ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
632 vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded)
634 if (faulted == vd->vdev_children)
635 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
636 VDEV_AUX_NO_REPLICAS);
637 else if (degraded + faulted != 0)
638 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
640 vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
643 vdev_ops_t vdev_mirror_ops = {
647 vdev_mirror_io_start,
649 vdev_mirror_state_change,
652 VDEV_TYPE_MIRROR, /* name of this vdev type */
653 B_FALSE /* not a leaf vdev */
656 vdev_ops_t vdev_replacing_ops = {
660 vdev_mirror_io_start,
662 vdev_mirror_state_change,
665 VDEV_TYPE_REPLACING, /* name of this vdev type */
666 B_FALSE /* not a leaf vdev */
669 vdev_ops_t vdev_spare_ops = {
673 vdev_mirror_io_start,
675 vdev_mirror_state_change,
678 VDEV_TYPE_SPARE, /* name of this vdev type */
679 B_FALSE /* not a leaf vdev */