4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
30 #include <sys/zfs_context.h>
32 #include <sys/spa_impl.h>
33 #include <sys/dsl_pool.h>
34 #include <sys/dsl_scan.h>
35 #include <sys/vdev_impl.h>
38 #include <sys/fs/zfs.h>
43 static kstat_t *mirror_ksp = NULL;
45 typedef struct mirror_stats {
46 kstat_named_t vdev_mirror_stat_rotating_linear;
47 kstat_named_t vdev_mirror_stat_rotating_offset;
48 kstat_named_t vdev_mirror_stat_rotating_seek;
49 kstat_named_t vdev_mirror_stat_non_rotating_linear;
50 kstat_named_t vdev_mirror_stat_non_rotating_seek;
52 kstat_named_t vdev_mirror_stat_preferred_found;
53 kstat_named_t vdev_mirror_stat_preferred_not_found;
56 static mirror_stats_t mirror_stats = {
57 /* New I/O follows directly the last I/O */
58 { "rotating_linear", KSTAT_DATA_UINT64 },
59 /* New I/O is within zfs_vdev_mirror_rotating_seek_offset of the last */
60 { "rotating_offset", KSTAT_DATA_UINT64 },
61 /* New I/O requires random seek */
62 { "rotating_seek", KSTAT_DATA_UINT64 },
63 /* New I/O follows directly the last I/O (nonrot) */
64 { "non_rotating_linear", KSTAT_DATA_UINT64 },
65 /* New I/O requires random seek (nonrot) */
66 { "non_rotating_seek", KSTAT_DATA_UINT64 },
67 /* Preferred child vdev found */
68 { "preferred_found", KSTAT_DATA_UINT64 },
69 /* Preferred child vdev not found or equal load */
70 { "preferred_not_found", KSTAT_DATA_UINT64 },
74 #define MIRROR_STAT(stat) (mirror_stats.stat.value.ui64)
75 #define MIRROR_INCR(stat, val) atomic_add_64(&MIRROR_STAT(stat), val)
76 #define MIRROR_BUMP(stat) MIRROR_INCR(stat, 1)
79 vdev_mirror_stat_init(void)
81 mirror_ksp = kstat_create("zfs", 0, "vdev_mirror_stats",
82 "misc", KSTAT_TYPE_NAMED,
83 sizeof (mirror_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
84 if (mirror_ksp != NULL) {
85 mirror_ksp->ks_data = &mirror_stats;
86 kstat_install(mirror_ksp);
91 vdev_mirror_stat_fini(void)
93 if (mirror_ksp != NULL) {
94 kstat_delete(mirror_ksp);
100 * Virtual device vector for mirroring.
103 typedef struct mirror_child {
110 uint8_t mc_speculative;
113 typedef struct mirror_map {
115 int mm_preferred_cnt;
117 boolean_t mm_resilvering;
119 mirror_child_t mm_child[];
122 static int vdev_mirror_shift = 21;
125 * The load configuration settings below are tuned by default for
126 * the case where all devices are of the same rotational type.
128 * If there is a mixture of rotating and non-rotating media, setting
129 * zfs_vdev_mirror_non_rotating_seek_inc to 0 may well provide better results
130 * as it will direct more reads to the non-rotating vdevs which are more likely
131 * to have a higher performance.
134 /* Rotating media load calculation configuration. */
135 static int zfs_vdev_mirror_rotating_inc = 0;
136 static int zfs_vdev_mirror_rotating_seek_inc = 5;
137 static int zfs_vdev_mirror_rotating_seek_offset = 1 * 1024 * 1024;
139 /* Non-rotating media load calculation configuration. */
140 static int zfs_vdev_mirror_non_rotating_inc = 0;
141 static int zfs_vdev_mirror_non_rotating_seek_inc = 1;
144 vdev_mirror_map_size(int children)
146 return (offsetof(mirror_map_t, mm_child[children]) +
147 sizeof (int) * children);
150 static inline mirror_map_t *
151 vdev_mirror_map_alloc(int children, boolean_t resilvering, boolean_t root)
155 mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP);
156 mm->mm_children = children;
157 mm->mm_resilvering = resilvering;
159 mm->mm_preferred = (int *)((uintptr_t)mm +
160 offsetof(mirror_map_t, mm_child[children]));
166 vdev_mirror_map_free(zio_t *zio)
168 mirror_map_t *mm = zio->io_vsd;
170 kmem_free(mm, vdev_mirror_map_size(mm->mm_children));
173 static const zio_vsd_ops_t vdev_mirror_vsd_ops = {
174 .vsd_free = vdev_mirror_map_free,
175 .vsd_cksum_report = zio_vsd_default_cksum_report
179 vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
181 uint64_t last_offset;
185 /* All DVAs have equal weight at the root. */
190 * We don't return INT_MAX if the device is resilvering i.e.
191 * vdev_resilver_txg != 0 as when tested performance was slightly
192 * worse overall when resilvering with compared to without.
195 /* Fix zio_offset for leaf vdevs */
196 if (vd->vdev_ops->vdev_op_leaf)
197 zio_offset += VDEV_LABEL_START_SIZE;
199 /* Standard load based on pending queue length. */
200 load = vdev_queue_length(vd);
201 last_offset = vdev_queue_last_offset(vd);
203 if (vd->vdev_nonrot) {
204 /* Non-rotating media. */
205 if (last_offset == zio_offset) {
206 MIRROR_BUMP(vdev_mirror_stat_non_rotating_linear);
207 return (load + zfs_vdev_mirror_non_rotating_inc);
211 * Apply a seek penalty even for non-rotating devices as
212 * sequential I/O's can be aggregated into fewer operations on
213 * the device, thus avoiding unnecessary per-command overhead
214 * and boosting performance.
216 MIRROR_BUMP(vdev_mirror_stat_non_rotating_seek);
217 return (load + zfs_vdev_mirror_non_rotating_seek_inc);
220 /* Rotating media I/O's which directly follow the last I/O. */
221 if (last_offset == zio_offset) {
222 MIRROR_BUMP(vdev_mirror_stat_rotating_linear);
223 return (load + zfs_vdev_mirror_rotating_inc);
227 * Apply half the seek increment to I/O's within seek offset
228 * of the last I/O issued to this vdev as they should incur less
229 * of a seek increment.
231 offset_diff = (int64_t)(last_offset - zio_offset);
232 if (ABS(offset_diff) < zfs_vdev_mirror_rotating_seek_offset) {
233 MIRROR_BUMP(vdev_mirror_stat_rotating_offset);
234 return (load + (zfs_vdev_mirror_rotating_seek_inc / 2));
237 /* Apply the full seek increment to all other I/O's. */
238 MIRROR_BUMP(vdev_mirror_stat_rotating_seek);
239 return (load + zfs_vdev_mirror_rotating_seek_inc);
243 * Avoid inlining the function to keep vdev_mirror_io_start(), which
244 * is this functions only caller, as small as possible on the stack.
246 noinline static mirror_map_t *
247 vdev_mirror_map_init(zio_t *zio)
249 mirror_map_t *mm = NULL;
251 vdev_t *vd = zio->io_vd;
255 dva_t *dva = zio->io_bp->blk_dva;
256 spa_t *spa = zio->io_spa;
257 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
258 dva_t dva_copy[SPA_DVAS_PER_BP];
261 * The sequential scrub code sorts and issues all DVAs
262 * of a bp separately. Each of these IOs includes all
263 * original DVA copies so that repairs can be performed
264 * in the event of an error, but we only actually want
265 * to check the first DVA since the others will be
266 * checked by their respective sorted IOs. Only if we
267 * hit an error will we try all DVAs upon retrying.
269 * Note: This check is safe even if the user switches
270 * from a legacy scrub to a sequential one in the middle
271 * of processing, since scn_is_sorted isn't updated until
272 * all outstanding IOs from the previous scrub pass
275 if ((zio->io_flags & ZIO_FLAG_SCRUB) &&
276 !(zio->io_flags & ZIO_FLAG_IO_RETRY) &&
277 dsl_scan_scrubbing(spa->spa_dsl_pool) &&
278 scn->scn_is_sorted) {
281 c = BP_GET_NDVAS(zio->io_bp);
285 * If the pool cannot be written to, then infer that some
286 * DVAs might be invalid or point to vdevs that do not exist.
289 if (!spa_writeable(spa)) {
290 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
292 for (int i = 0; i < c; i++) {
293 if (zfs_dva_valid(spa, &dva[i], zio->io_bp))
294 dva_copy[j++] = dva[i];
298 zio->io_error = ENXIO;
307 mm = vdev_mirror_map_alloc(c, B_FALSE, B_TRUE);
308 for (c = 0; c < mm->mm_children; c++) {
309 mc = &mm->mm_child[c];
311 mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c]));
312 mc->mc_offset = DVA_GET_OFFSET(&dva[c]);
313 if (mc->mc_vd == NULL) {
314 kmem_free(mm, vdev_mirror_map_size(
317 zio->io_error = ENXIO;
323 * If we are resilvering, then we should handle scrub reads
324 * differently; we shouldn't issue them to the resilvering
325 * device because it might not have those blocks.
327 * We are resilvering iff:
328 * 1) We are a replacing vdev (ie our name is "replacing-1" or
329 * "spare-1" or something like that), and
330 * 2) The pool is currently being resilvered.
332 * We cannot simply check vd->vdev_resilver_txg, because it's
333 * not set in this path.
335 * Nor can we just check our vdev_ops; there are cases (such as
336 * when a user types "zpool replace pool odev spare_dev" and
337 * spare_dev is in the spare list, or when a spare device is
338 * automatically used to replace a DEGRADED device) when
339 * resilvering is complete but both the original vdev and the
340 * spare vdev remain in the pool. That behavior is intentional.
341 * It helps implement the policy that a spare should be
342 * automatically removed from the pool after the user replaces
343 * the device that originally failed.
345 * If a spa load is in progress, then spa_dsl_pool may be
346 * uninitialized. But we shouldn't be resilvering during a spa
349 boolean_t replacing = (vd->vdev_ops == &vdev_replacing_ops ||
350 vd->vdev_ops == &vdev_spare_ops) &&
351 spa_load_state(vd->vdev_spa) == SPA_LOAD_NONE &&
352 dsl_scan_resilvering(vd->vdev_spa->spa_dsl_pool);
353 mm = vdev_mirror_map_alloc(vd->vdev_children, replacing,
355 for (c = 0; c < mm->mm_children; c++) {
356 mc = &mm->mm_child[c];
357 mc->mc_vd = vd->vdev_child[c];
358 mc->mc_offset = zio->io_offset;
363 zio->io_vsd_ops = &vdev_mirror_vsd_ops;
368 vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
369 uint64_t *logical_ashift, uint64_t *physical_ashift)
374 if (vd->vdev_children == 0) {
375 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
376 return (SET_ERROR(EINVAL));
379 vdev_open_children(vd);
381 for (int c = 0; c < vd->vdev_children; c++) {
382 vdev_t *cvd = vd->vdev_child[c];
384 if (cvd->vdev_open_error) {
385 lasterror = cvd->vdev_open_error;
390 *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
391 *max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
392 *logical_ashift = MAX(*logical_ashift, cvd->vdev_ashift);
393 *physical_ashift = MAX(*physical_ashift,
394 cvd->vdev_physical_ashift);
397 if (numerrors == vd->vdev_children) {
398 if (vdev_children_are_offline(vd))
399 vd->vdev_stat.vs_aux = VDEV_AUX_CHILDREN_OFFLINE;
401 vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
409 vdev_mirror_close(vdev_t *vd)
411 for (int c = 0; c < vd->vdev_children; c++)
412 vdev_close(vd->vdev_child[c]);
416 vdev_mirror_child_done(zio_t *zio)
418 mirror_child_t *mc = zio->io_private;
420 mc->mc_error = zio->io_error;
426 vdev_mirror_scrub_done(zio_t *zio)
428 mirror_child_t *mc = zio->io_private;
430 if (zio->io_error == 0) {
432 zio_link_t *zl = NULL;
434 mutex_enter(&zio->io_lock);
435 while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
436 mutex_enter(&pio->io_lock);
437 ASSERT3U(zio->io_size, >=, pio->io_size);
438 abd_copy(pio->io_abd, zio->io_abd, pio->io_size);
439 mutex_exit(&pio->io_lock);
441 mutex_exit(&zio->io_lock);
444 abd_free(zio->io_abd);
446 mc->mc_error = zio->io_error;
452 * Check the other, lower-index DVAs to see if they're on the same
453 * vdev as the child we picked. If they are, use them since they
454 * are likely to have been allocated from the primary metaslab in
455 * use at the time, and hence are more likely to have locality with
459 vdev_mirror_dva_select(zio_t *zio, int p)
461 dva_t *dva = zio->io_bp->blk_dva;
462 mirror_map_t *mm = zio->io_vsd;
466 preferred = mm->mm_preferred[p];
467 for (p--; p >= 0; p--) {
468 c = mm->mm_preferred[p];
469 if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred]))
476 vdev_mirror_preferred_child_randomize(zio_t *zio)
478 mirror_map_t *mm = zio->io_vsd;
482 p = spa_get_random(mm->mm_preferred_cnt);
483 return (vdev_mirror_dva_select(zio, p));
487 * To ensure we don't always favour the first matching vdev,
488 * which could lead to wear leveling issues on SSD's, we
489 * use the I/O offset as a pseudo random seed into the vdevs
490 * which have the lowest load.
492 p = (zio->io_offset >> vdev_mirror_shift) % mm->mm_preferred_cnt;
493 return (mm->mm_preferred[p]);
497 * Try to find a vdev whose DTL doesn't contain the block we want to read
498 * preferring vdevs based on determined load.
500 * Try to find a child whose DTL doesn't contain the block we want to read.
501 * If we can't, try the read on any vdev we haven't already tried.
504 vdev_mirror_child_select(zio_t *zio)
506 mirror_map_t *mm = zio->io_vsd;
507 uint64_t txg = zio->io_txg;
510 ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg);
512 lowest_load = INT_MAX;
513 mm->mm_preferred_cnt = 0;
514 for (c = 0; c < mm->mm_children; c++) {
517 mc = &mm->mm_child[c];
518 if (mc->mc_tried || mc->mc_skipped)
521 if (mc->mc_vd == NULL || !vdev_readable(mc->mc_vd)) {
522 mc->mc_error = SET_ERROR(ENXIO);
523 mc->mc_tried = 1; /* don't even try */
528 if (vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1)) {
529 mc->mc_error = SET_ERROR(ESTALE);
531 mc->mc_speculative = 1;
535 mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset);
536 if (mc->mc_load > lowest_load)
539 if (mc->mc_load < lowest_load) {
540 lowest_load = mc->mc_load;
541 mm->mm_preferred_cnt = 0;
543 mm->mm_preferred[mm->mm_preferred_cnt] = c;
544 mm->mm_preferred_cnt++;
547 if (mm->mm_preferred_cnt == 1) {
548 MIRROR_BUMP(vdev_mirror_stat_preferred_found);
549 return (mm->mm_preferred[0]);
552 if (mm->mm_preferred_cnt > 1) {
553 MIRROR_BUMP(vdev_mirror_stat_preferred_not_found);
554 return (vdev_mirror_preferred_child_randomize(zio));
558 * Every device is either missing or has this txg in its DTL.
559 * Look for any child we haven't already tried before giving up.
561 for (c = 0; c < mm->mm_children; c++) {
562 if (!mm->mm_child[c].mc_tried)
567 * Every child failed. There's no place left to look.
573 vdev_mirror_io_start(zio_t *zio)
579 mm = vdev_mirror_map_init(zio);
582 ASSERT(!spa_trust_config(zio->io_spa));
583 ASSERT(zio->io_type == ZIO_TYPE_READ);
588 if (zio->io_type == ZIO_TYPE_READ) {
589 if (zio->io_bp != NULL &&
590 (zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_resilvering) {
592 * For scrubbing reads (if we can verify the
593 * checksum here, as indicated by io_bp being
594 * non-NULL) we need to allocate a read buffer for
595 * each child and issue reads to all children. If
596 * any child succeeds, it will copy its data into
597 * zio->io_data in vdev_mirror_scrub_done.
599 for (c = 0; c < mm->mm_children; c++) {
600 mc = &mm->mm_child[c];
601 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
602 mc->mc_vd, mc->mc_offset,
603 abd_alloc_sametype(zio->io_abd,
604 zio->io_size), zio->io_size,
605 zio->io_type, zio->io_priority, 0,
606 vdev_mirror_scrub_done, mc));
612 * For normal reads just pick one child.
614 c = vdev_mirror_child_select(zio);
617 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
620 * Writes go to all children.
623 children = mm->mm_children;
627 mc = &mm->mm_child[c];
628 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
629 mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size,
630 zio->io_type, zio->io_priority, 0,
631 vdev_mirror_child_done, mc));
639 vdev_mirror_worst_error(mirror_map_t *mm)
641 int error[2] = { 0, 0 };
643 for (int c = 0; c < mm->mm_children; c++) {
644 mirror_child_t *mc = &mm->mm_child[c];
645 int s = mc->mc_speculative;
646 error[s] = zio_worst_error(error[s], mc->mc_error);
649 return (error[0] ? error[0] : error[1]);
653 vdev_mirror_io_done(zio_t *zio)
655 mirror_map_t *mm = zio->io_vsd;
659 int unexpected_errors = 0;
664 for (c = 0; c < mm->mm_children; c++) {
665 mc = &mm->mm_child[c];
670 } else if (mc->mc_tried) {
675 if (zio->io_type == ZIO_TYPE_WRITE) {
677 * XXX -- for now, treat partial writes as success.
679 * Now that we support write reallocation, it would be better
680 * to treat partial failure as real failure unless there are
681 * no non-degraded top-level vdevs left, and not update DTLs
682 * if we intend to reallocate.
685 if (good_copies != mm->mm_children) {
687 * Always require at least one good copy.
689 * For ditto blocks (io_vd == NULL), require
690 * all copies to be good.
692 * XXX -- for replacing vdevs, there's no great answer.
693 * If the old device is really dead, we may not even
694 * be able to access it -- so we only want to
695 * require good writes to the new device. But if
696 * the new device turns out to be flaky, we want
697 * to be able to detach it -- which requires all
698 * writes to the old device to have succeeded.
700 if (good_copies == 0 || zio->io_vd == NULL)
701 zio->io_error = vdev_mirror_worst_error(mm);
706 ASSERT(zio->io_type == ZIO_TYPE_READ);
709 * If we don't have a good copy yet, keep trying other children.
712 if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) {
713 ASSERT(c >= 0 && c < mm->mm_children);
714 mc = &mm->mm_child[c];
715 zio_vdev_io_redone(zio);
716 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
717 mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size,
718 ZIO_TYPE_READ, zio->io_priority, 0,
719 vdev_mirror_child_done, mc));
724 if (good_copies == 0) {
725 zio->io_error = vdev_mirror_worst_error(mm);
726 ASSERT(zio->io_error != 0);
729 if (good_copies && spa_writeable(zio->io_spa) &&
730 (unexpected_errors ||
731 (zio->io_flags & ZIO_FLAG_RESILVER) ||
732 ((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_resilvering))) {
734 * Use the good data we have in hand to repair damaged children.
736 for (c = 0; c < mm->mm_children; c++) {
738 * Don't rewrite known good children.
739 * Not only is it unnecessary, it could
740 * actually be harmful: if the system lost
741 * power while rewriting the only good copy,
742 * there would be no good copies left!
744 mc = &mm->mm_child[c];
746 if (mc->mc_error == 0) {
750 * We didn't try this child. We need to
752 * 1. it's a scrub (in which case we have
753 * tried everything that was healthy)
755 * 2. it's an indirect vdev (in which case
756 * it could point to any other vdev, which
757 * might have a bad DTL)
759 * 3. the DTL indicates that this data is
760 * missing from this vdev
762 if (!(zio->io_flags & ZIO_FLAG_SCRUB) &&
763 mc->mc_vd->vdev_ops != &vdev_indirect_ops &&
764 !vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL,
767 mc->mc_error = SET_ERROR(ESTALE);
770 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
771 mc->mc_vd, mc->mc_offset,
772 zio->io_abd, zio->io_size, ZIO_TYPE_WRITE,
773 zio->io_priority == ZIO_PRIORITY_REBUILD ?
774 ZIO_PRIORITY_REBUILD : ZIO_PRIORITY_ASYNC_WRITE,
775 ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
776 ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
782 vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded)
784 if (faulted == vd->vdev_children) {
785 if (vdev_children_are_offline(vd)) {
786 vdev_set_state(vd, B_FALSE, VDEV_STATE_OFFLINE,
787 VDEV_AUX_CHILDREN_OFFLINE);
789 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
790 VDEV_AUX_NO_REPLICAS);
792 } else if (degraded + faulted != 0) {
793 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
795 vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
799 vdev_ops_t vdev_mirror_ops = {
800 .vdev_op_open = vdev_mirror_open,
801 .vdev_op_close = vdev_mirror_close,
802 .vdev_op_asize = vdev_default_asize,
803 .vdev_op_io_start = vdev_mirror_io_start,
804 .vdev_op_io_done = vdev_mirror_io_done,
805 .vdev_op_state_change = vdev_mirror_state_change,
806 .vdev_op_need_resilver = NULL,
807 .vdev_op_hold = NULL,
808 .vdev_op_rele = NULL,
809 .vdev_op_remap = NULL,
810 .vdev_op_xlate = vdev_default_xlate,
811 .vdev_op_type = VDEV_TYPE_MIRROR, /* name of this vdev type */
812 .vdev_op_leaf = B_FALSE /* not a leaf vdev */
815 vdev_ops_t vdev_replacing_ops = {
816 .vdev_op_open = vdev_mirror_open,
817 .vdev_op_close = vdev_mirror_close,
818 .vdev_op_asize = vdev_default_asize,
819 .vdev_op_io_start = vdev_mirror_io_start,
820 .vdev_op_io_done = vdev_mirror_io_done,
821 .vdev_op_state_change = vdev_mirror_state_change,
822 .vdev_op_need_resilver = NULL,
823 .vdev_op_hold = NULL,
824 .vdev_op_rele = NULL,
825 .vdev_op_remap = NULL,
826 .vdev_op_xlate = vdev_default_xlate,
827 .vdev_op_type = VDEV_TYPE_REPLACING, /* name of this vdev type */
828 .vdev_op_leaf = B_FALSE /* not a leaf vdev */
831 vdev_ops_t vdev_spare_ops = {
832 .vdev_op_open = vdev_mirror_open,
833 .vdev_op_close = vdev_mirror_close,
834 .vdev_op_asize = vdev_default_asize,
835 .vdev_op_io_start = vdev_mirror_io_start,
836 .vdev_op_io_done = vdev_mirror_io_done,
837 .vdev_op_state_change = vdev_mirror_state_change,
838 .vdev_op_need_resilver = NULL,
839 .vdev_op_hold = NULL,
840 .vdev_op_rele = NULL,
841 .vdev_op_remap = NULL,
842 .vdev_op_xlate = vdev_default_xlate,
843 .vdev_op_type = VDEV_TYPE_SPARE, /* name of this vdev type */
844 .vdev_op_leaf = B_FALSE /* not a leaf vdev */
848 ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_inc, INT, ZMOD_RW,
849 "Rotating media load increment for non-seeking I/O's");
851 ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_seek_inc, INT, ZMOD_RW,
852 "Rotating media load increment for seeking I/O's");
854 ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_seek_offset, INT, ZMOD_RW,
855 "Offset in bytes from the last I/O which triggers "
856 "a reduced rotating media seek increment");
858 ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, non_rotating_inc, INT, ZMOD_RW,
859 "Non-rotating media load increment for non-seeking I/O's");
861 ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, non_rotating_seek_inc, INT, ZMOD_RW,
862 "Non-rotating media load increment for seeking I/O's");