2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
37 #include <sys/limits.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/sysctl.h>
42 #include <sys/systm.h>
43 #include <geom/geom.h>
44 #include "geom/raid/g_raid.h"
45 #include "g_raid_tr_if.h"
47 SYSCTL_DECL(_kern_geom_raid_raid1);
49 #define RAID1_REBUILD_SLAB (1 << 20) /* One transation in a rebuild */
50 static int g_raid1_rebuild_slab = RAID1_REBUILD_SLAB;
51 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_slab_size, CTLFLAG_RWTUN,
52 &g_raid1_rebuild_slab, 0,
53 "Amount of the disk to rebuild each read/write cycle of the rebuild.");
55 #define RAID1_REBUILD_FAIR_IO 20 /* use 1/x of the available I/O */
56 static int g_raid1_rebuild_fair_io = RAID1_REBUILD_FAIR_IO;
57 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_fair_io, CTLFLAG_RWTUN,
58 &g_raid1_rebuild_fair_io, 0,
59 "Fraction of the I/O bandwidth to use when disk busy for rebuild.");
61 #define RAID1_REBUILD_CLUSTER_IDLE 100
62 static int g_raid1_rebuild_cluster_idle = RAID1_REBUILD_CLUSTER_IDLE;
63 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_cluster_idle, CTLFLAG_RWTUN,
64 &g_raid1_rebuild_cluster_idle, 0,
65 "Number of slabs to do each time we trigger a rebuild cycle");
67 #define RAID1_REBUILD_META_UPDATE 1024 /* update meta data every 1GB or so */
68 static int g_raid1_rebuild_meta_update = RAID1_REBUILD_META_UPDATE;
69 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_meta_update, CTLFLAG_RWTUN,
70 &g_raid1_rebuild_meta_update, 0,
71 "When to update the meta data.");
73 static MALLOC_DEFINE(M_TR_RAID1, "tr_raid1_data", "GEOM_RAID RAID1 data");
75 #define TR_RAID1_NONE 0
76 #define TR_RAID1_REBUILD 1
77 #define TR_RAID1_RESYNC 2
79 #define TR_RAID1_F_DOING_SOME 0x1
80 #define TR_RAID1_F_LOCKED 0x2
81 #define TR_RAID1_F_ABORT 0x4
83 struct g_raid_tr_raid1_object {
84 struct g_raid_tr_object trso_base;
88 int trso_recover_slabs; /* slabs before rest */
92 struct g_raid_subdisk *trso_failed_sd; /* like per volume */
93 void *trso_buffer; /* Buffer space */
97 static g_raid_tr_taste_t g_raid_tr_taste_raid1;
98 static g_raid_tr_event_t g_raid_tr_event_raid1;
99 static g_raid_tr_start_t g_raid_tr_start_raid1;
100 static g_raid_tr_stop_t g_raid_tr_stop_raid1;
101 static g_raid_tr_iostart_t g_raid_tr_iostart_raid1;
102 static g_raid_tr_iodone_t g_raid_tr_iodone_raid1;
103 static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1;
104 static g_raid_tr_locked_t g_raid_tr_locked_raid1;
105 static g_raid_tr_idle_t g_raid_tr_idle_raid1;
106 static g_raid_tr_free_t g_raid_tr_free_raid1;
108 static kobj_method_t g_raid_tr_raid1_methods[] = {
109 KOBJMETHOD(g_raid_tr_taste, g_raid_tr_taste_raid1),
110 KOBJMETHOD(g_raid_tr_event, g_raid_tr_event_raid1),
111 KOBJMETHOD(g_raid_tr_start, g_raid_tr_start_raid1),
112 KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_raid1),
113 KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_raid1),
114 KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_raid1),
115 KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1),
116 KOBJMETHOD(g_raid_tr_locked, g_raid_tr_locked_raid1),
117 KOBJMETHOD(g_raid_tr_idle, g_raid_tr_idle_raid1),
118 KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_raid1),
122 static struct g_raid_tr_class g_raid_tr_raid1_class = {
124 g_raid_tr_raid1_methods,
125 sizeof(struct g_raid_tr_raid1_object),
128 .trc_accept_unmapped = 1
131 static void g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr);
132 static void g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
133 struct g_raid_subdisk *sd);
136 g_raid_tr_taste_raid1(struct g_raid_tr_object *tr, struct g_raid_volume *vol)
138 struct g_raid_tr_raid1_object *trs;
140 trs = (struct g_raid_tr_raid1_object *)tr;
141 if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_RAID1 ||
142 (tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1SM &&
143 tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1MM))
144 return (G_RAID_TR_TASTE_FAIL);
145 trs->trso_starting = 1;
146 return (G_RAID_TR_TASTE_SUCCEED);
150 g_raid_tr_update_state_raid1(struct g_raid_volume *vol,
151 struct g_raid_subdisk *sd)
153 struct g_raid_tr_raid1_object *trs;
154 struct g_raid_softc *sc;
155 struct g_raid_subdisk *tsd, *bestsd;
160 trs = (struct g_raid_tr_raid1_object *)vol->v_tr;
161 if (trs->trso_stopping &&
162 (trs->trso_flags & TR_RAID1_F_DOING_SOME) == 0)
163 s = G_RAID_VOLUME_S_STOPPED;
164 else if (trs->trso_starting)
165 s = G_RAID_VOLUME_S_STARTING;
167 /* Make sure we have at least one ACTIVE disk. */
168 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
171 * Critical situation! We have no any active disk!
172 * Choose the best disk we have to make it active.
174 bestsd = &vol->v_subdisks[0];
175 for (i = 1; i < vol->v_disks_count; i++) {
176 tsd = &vol->v_subdisks[i];
177 if (tsd->sd_state > bestsd->sd_state)
179 else if (tsd->sd_state == bestsd->sd_state &&
180 (tsd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
181 tsd->sd_state == G_RAID_SUBDISK_S_RESYNC) &&
182 tsd->sd_rebuild_pos > bestsd->sd_rebuild_pos)
185 if (bestsd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED) {
186 /* We found reasonable candidate. */
188 "Promote subdisk %s:%d from %s to ACTIVE.",
189 vol->v_name, bestsd->sd_pos,
190 g_raid_subdisk_state2str(bestsd->sd_state));
191 g_raid_change_subdisk_state(bestsd,
192 G_RAID_SUBDISK_S_ACTIVE);
193 g_raid_write_metadata(sc,
194 vol, bestsd, bestsd->sd_disk);
197 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
198 ns = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
199 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
200 if (na == vol->v_disks_count)
201 s = G_RAID_VOLUME_S_OPTIMAL;
202 else if (na + ns == vol->v_disks_count)
203 s = G_RAID_VOLUME_S_SUBOPTIMAL;
205 s = G_RAID_VOLUME_S_DEGRADED;
207 s = G_RAID_VOLUME_S_BROKEN;
208 g_raid_tr_raid1_maybe_rebuild(vol->v_tr, sd);
210 if (s != vol->v_state) {
211 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ?
212 G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN,
213 G_RAID_EVENT_VOLUME);
214 g_raid_change_volume_state(vol, s);
215 if (!trs->trso_starting && !trs->trso_stopping)
216 g_raid_write_metadata(sc, vol, NULL, NULL);
222 g_raid_tr_raid1_fail_disk(struct g_raid_softc *sc, struct g_raid_subdisk *sd,
223 struct g_raid_disk *disk)
226 * We don't fail the last disk in the pack, since it still has decent
227 * data on it and that's better than failing the disk if it is the root
230 * XXX should this be controlled via a tunable? It makes sense for
231 * the volume that has / on it. I can't think of a case where we'd
232 * want the volume to go away on this kind of event.
234 if (g_raid_nsubdisks(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == 1 &&
235 g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == sd)
237 g_raid_fail_disk(sc, sd, disk);
241 g_raid_tr_raid1_rebuild_some(struct g_raid_tr_object *tr)
243 struct g_raid_tr_raid1_object *trs;
244 struct g_raid_subdisk *sd, *good_sd;
247 trs = (struct g_raid_tr_raid1_object *)tr;
248 if (trs->trso_flags & TR_RAID1_F_DOING_SOME)
250 sd = trs->trso_failed_sd;
251 good_sd = g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE);
252 if (good_sd == NULL) {
253 g_raid_tr_raid1_rebuild_abort(tr);
257 memset(bp, 0, sizeof(*bp));
258 bp->bio_offset = sd->sd_rebuild_pos;
259 bp->bio_length = MIN(g_raid1_rebuild_slab,
260 sd->sd_size - sd->sd_rebuild_pos);
261 bp->bio_data = trs->trso_buffer;
262 bp->bio_cmd = BIO_READ;
263 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
264 bp->bio_caller1 = good_sd;
265 trs->trso_flags |= TR_RAID1_F_DOING_SOME;
266 trs->trso_flags |= TR_RAID1_F_LOCKED;
267 g_raid_lock_range(sd->sd_volume, /* Lock callback starts I/O */
268 bp->bio_offset, bp->bio_length, NULL, bp);
272 g_raid_tr_raid1_rebuild_done(struct g_raid_tr_raid1_object *trs)
274 struct g_raid_volume *vol;
275 struct g_raid_subdisk *sd;
277 vol = trs->trso_base.tro_volume;
278 sd = trs->trso_failed_sd;
279 g_raid_write_metadata(vol->v_softc, vol, sd, sd->sd_disk);
280 free(trs->trso_buffer, M_TR_RAID1);
281 trs->trso_buffer = NULL;
282 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
283 trs->trso_type = TR_RAID1_NONE;
284 trs->trso_recover_slabs = 0;
285 trs->trso_failed_sd = NULL;
286 g_raid_tr_update_state_raid1(vol, NULL);
290 g_raid_tr_raid1_rebuild_finish(struct g_raid_tr_object *tr)
292 struct g_raid_tr_raid1_object *trs;
293 struct g_raid_subdisk *sd;
295 trs = (struct g_raid_tr_raid1_object *)tr;
296 sd = trs->trso_failed_sd;
297 G_RAID_DEBUG1(0, tr->tro_volume->v_softc,
298 "Subdisk %s:%d-%s rebuild completed.",
299 sd->sd_volume->v_name, sd->sd_pos,
300 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
301 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
302 sd->sd_rebuild_pos = 0;
303 g_raid_tr_raid1_rebuild_done(trs);
307 g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr)
309 struct g_raid_tr_raid1_object *trs;
310 struct g_raid_subdisk *sd;
311 struct g_raid_volume *vol;
314 vol = tr->tro_volume;
315 trs = (struct g_raid_tr_raid1_object *)tr;
316 sd = trs->trso_failed_sd;
317 if (trs->trso_flags & TR_RAID1_F_DOING_SOME) {
318 G_RAID_DEBUG1(1, vol->v_softc,
319 "Subdisk %s:%d-%s rebuild is aborting.",
320 sd->sd_volume->v_name, sd->sd_pos,
321 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
322 trs->trso_flags |= TR_RAID1_F_ABORT;
324 G_RAID_DEBUG1(0, vol->v_softc,
325 "Subdisk %s:%d-%s rebuild aborted.",
326 sd->sd_volume->v_name, sd->sd_pos,
327 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
328 trs->trso_flags &= ~TR_RAID1_F_ABORT;
329 if (trs->trso_flags & TR_RAID1_F_LOCKED) {
330 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
331 len = MIN(g_raid1_rebuild_slab,
332 sd->sd_size - sd->sd_rebuild_pos);
333 g_raid_unlock_range(tr->tro_volume,
334 sd->sd_rebuild_pos, len);
336 g_raid_tr_raid1_rebuild_done(trs);
341 g_raid_tr_raid1_rebuild_start(struct g_raid_tr_object *tr)
343 struct g_raid_volume *vol;
344 struct g_raid_tr_raid1_object *trs;
345 struct g_raid_subdisk *sd, *fsd;
347 vol = tr->tro_volume;
348 trs = (struct g_raid_tr_raid1_object *)tr;
349 if (trs->trso_failed_sd) {
350 G_RAID_DEBUG1(1, vol->v_softc,
351 "Already rebuild in start rebuild. pos %jd\n",
352 (intmax_t)trs->trso_failed_sd->sd_rebuild_pos);
355 sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_ACTIVE);
357 G_RAID_DEBUG1(1, vol->v_softc,
358 "No active disk to rebuild. night night.");
361 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_RESYNC);
363 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_REBUILD);
365 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_STALE);
367 fsd->sd_rebuild_pos = 0;
368 g_raid_change_subdisk_state(fsd,
369 G_RAID_SUBDISK_S_RESYNC);
370 g_raid_write_metadata(vol->v_softc, vol, fsd, NULL);
372 fsd = g_raid_get_subdisk(vol,
373 G_RAID_SUBDISK_S_UNINITIALIZED);
375 fsd = g_raid_get_subdisk(vol,
376 G_RAID_SUBDISK_S_NEW);
378 fsd->sd_rebuild_pos = 0;
379 g_raid_change_subdisk_state(fsd,
380 G_RAID_SUBDISK_S_REBUILD);
381 g_raid_write_metadata(vol->v_softc,
387 G_RAID_DEBUG1(1, vol->v_softc,
388 "No failed disk to rebuild. night night.");
391 trs->trso_failed_sd = fsd;
392 G_RAID_DEBUG1(0, vol->v_softc,
393 "Subdisk %s:%d-%s rebuild start at %jd.",
394 fsd->sd_volume->v_name, fsd->sd_pos,
395 fsd->sd_disk ? g_raid_get_diskname(fsd->sd_disk) : "[none]",
396 trs->trso_failed_sd->sd_rebuild_pos);
397 trs->trso_type = TR_RAID1_REBUILD;
398 trs->trso_buffer = malloc(g_raid1_rebuild_slab, M_TR_RAID1, M_WAITOK);
399 trs->trso_meta_update = g_raid1_rebuild_meta_update;
400 g_raid_tr_raid1_rebuild_some(tr);
405 g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
406 struct g_raid_subdisk *sd)
408 struct g_raid_volume *vol;
409 struct g_raid_tr_raid1_object *trs;
413 * If we're stopping, don't do anything. If we don't have at least one
414 * good disk and one bad disk, we don't do anything. And if there's a
415 * 'good disk' stored in the trs, then we're in progress and we punt.
416 * If we make it past all these checks, we need to rebuild.
418 vol = tr->tro_volume;
419 trs = (struct g_raid_tr_raid1_object *)tr;
420 if (trs->trso_stopping)
422 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
423 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_REBUILD) +
424 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
425 switch(trs->trso_type) {
430 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_NEW) +
431 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
432 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_UNINITIALIZED);
436 g_raid_tr_raid1_rebuild_start(tr);
438 case TR_RAID1_REBUILD:
439 if (na == 0 || nr == 0 || trs->trso_failed_sd == sd)
440 g_raid_tr_raid1_rebuild_abort(tr);
442 case TR_RAID1_RESYNC:
448 g_raid_tr_event_raid1(struct g_raid_tr_object *tr,
449 struct g_raid_subdisk *sd, u_int event)
452 g_raid_tr_update_state_raid1(tr->tro_volume, sd);
457 g_raid_tr_start_raid1(struct g_raid_tr_object *tr)
459 struct g_raid_tr_raid1_object *trs;
460 struct g_raid_volume *vol;
462 trs = (struct g_raid_tr_raid1_object *)tr;
463 vol = tr->tro_volume;
464 trs->trso_starting = 0;
465 g_raid_tr_update_state_raid1(vol, NULL);
470 g_raid_tr_stop_raid1(struct g_raid_tr_object *tr)
472 struct g_raid_tr_raid1_object *trs;
473 struct g_raid_volume *vol;
475 trs = (struct g_raid_tr_raid1_object *)tr;
476 vol = tr->tro_volume;
477 trs->trso_starting = 0;
478 trs->trso_stopping = 1;
479 g_raid_tr_update_state_raid1(vol, NULL);
484 * Select the disk to read from. Take into account: subdisk state, running
485 * error recovery, average disk load, head position and possible cache hits.
487 #define ABS(x) (((x) >= 0) ? (x) : (-(x)))
488 static struct g_raid_subdisk *
489 g_raid_tr_raid1_select_read_disk(struct g_raid_volume *vol, struct bio *bp,
492 struct g_raid_subdisk *sd, *best;
493 int i, prio, bestprio;
497 for (i = 0; i < vol->v_disks_count; i++) {
498 sd = &vol->v_subdisks[i];
499 if (sd->sd_state != G_RAID_SUBDISK_S_ACTIVE &&
500 ((sd->sd_state != G_RAID_SUBDISK_S_REBUILD &&
501 sd->sd_state != G_RAID_SUBDISK_S_RESYNC) ||
502 bp->bio_offset + bp->bio_length > sd->sd_rebuild_pos))
504 if ((mask & (1 << i)) != 0)
506 prio = G_RAID_SUBDISK_LOAD(sd);
507 prio += min(sd->sd_recovery, 255) << 22;
508 prio += (G_RAID_SUBDISK_S_ACTIVE - sd->sd_state) << 16;
509 /* If disk head is precisely in position - highly prefer it. */
510 if (G_RAID_SUBDISK_POS(sd) == bp->bio_offset)
511 prio -= 2 * G_RAID_SUBDISK_LOAD_SCALE;
513 /* If disk head is close to position - prefer it. */
514 if (ABS(G_RAID_SUBDISK_POS(sd) - bp->bio_offset) <
515 G_RAID_SUBDISK_TRACK_SIZE)
516 prio -= 1 * G_RAID_SUBDISK_LOAD_SCALE;
517 if (prio < bestprio) {
526 g_raid_tr_iostart_raid1_read(struct g_raid_tr_object *tr, struct bio *bp)
528 struct g_raid_subdisk *sd;
531 sd = g_raid_tr_raid1_select_read_disk(tr->tro_volume, bp, 0);
532 KASSERT(sd != NULL, ("No active disks in volume %s.",
533 tr->tro_volume->v_name));
535 cbp = g_clone_bio(bp);
537 g_raid_iodone(bp, ENOMEM);
541 g_raid_subdisk_iostart(sd, cbp);
545 g_raid_tr_iostart_raid1_write(struct g_raid_tr_object *tr, struct bio *bp)
547 struct g_raid_volume *vol;
548 struct g_raid_subdisk *sd;
549 struct bio_queue_head queue;
553 vol = tr->tro_volume;
556 * Allocate all bios before sending any request, so we can return
557 * ENOMEM in nice and clean way.
560 for (i = 0; i < vol->v_disks_count; i++) {
561 sd = &vol->v_subdisks[i];
562 switch (sd->sd_state) {
563 case G_RAID_SUBDISK_S_ACTIVE:
565 case G_RAID_SUBDISK_S_REBUILD:
567 * When rebuilding, only part of this subdisk is
568 * writable, the rest will be written as part of the
571 if (bp->bio_offset >= sd->sd_rebuild_pos)
574 case G_RAID_SUBDISK_S_STALE:
575 case G_RAID_SUBDISK_S_RESYNC:
577 * Resyncing still writes on the theory that the
578 * resync'd disk is very close and writing it will
579 * keep it that way better if we keep up while
586 cbp = g_clone_bio(bp);
589 cbp->bio_caller1 = sd;
590 bioq_insert_tail(&queue, cbp);
592 while ((cbp = bioq_takefirst(&queue)) != NULL) {
593 sd = cbp->bio_caller1;
594 cbp->bio_caller1 = NULL;
595 g_raid_subdisk_iostart(sd, cbp);
599 while ((cbp = bioq_takefirst(&queue)) != NULL)
601 if (bp->bio_error == 0)
602 bp->bio_error = ENOMEM;
603 g_raid_iodone(bp, bp->bio_error);
607 g_raid_tr_iostart_raid1(struct g_raid_tr_object *tr, struct bio *bp)
609 struct g_raid_volume *vol;
610 struct g_raid_tr_raid1_object *trs;
612 vol = tr->tro_volume;
613 trs = (struct g_raid_tr_raid1_object *)tr;
614 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL &&
615 vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL &&
616 vol->v_state != G_RAID_VOLUME_S_DEGRADED) {
617 g_raid_iodone(bp, EIO);
621 * If we're rebuilding, squeeze in rebuild activity every so often,
622 * even when the disk is busy. Be sure to only count real I/O
623 * to the disk. All 'SPECIAL' I/O is traffic generated to the disk
626 if (trs->trso_failed_sd != NULL &&
627 !(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL)) {
628 /* Make this new or running now round short. */
629 trs->trso_recover_slabs = 0;
630 if (--trs->trso_fair_io <= 0) {
631 trs->trso_fair_io = g_raid1_rebuild_fair_io;
632 g_raid_tr_raid1_rebuild_some(tr);
635 switch (bp->bio_cmd) {
637 g_raid_tr_iostart_raid1_read(tr, bp);
641 g_raid_tr_iostart_raid1_write(tr, bp);
644 g_raid_tr_flush_common(tr, bp);
647 KASSERT(1 == 0, ("Invalid command here: %u (volume=%s)",
648 bp->bio_cmd, vol->v_name));
654 g_raid_tr_iodone_raid1(struct g_raid_tr_object *tr,
655 struct g_raid_subdisk *sd, struct bio *bp)
658 struct g_raid_subdisk *nsd;
659 struct g_raid_volume *vol;
661 struct g_raid_tr_raid1_object *trs;
665 trs = (struct g_raid_tr_raid1_object *)tr;
666 vol = tr->tro_volume;
667 if (bp->bio_cflags & G_RAID_BIO_FLAG_SYNC) {
669 * This operation is part of a rebuild or resync operation.
670 * See what work just got done, then schedule the next bit of
671 * work, if any. Rebuild/resync is done a little bit at a
672 * time. Either when a timeout happens, or after we get a
673 * bunch of I/Os to the disk (to make sure an active system
674 * will complete in a sane amount of time).
676 * We are setup to do differing amounts of work for each of
677 * these cases. so long as the slabs is smallish (less than
678 * 50 or so, I'd guess, but that's just a WAG), we shouldn't
679 * have any bio starvation issues. For active disks, we do
680 * 5MB of data, for inactive ones, we do 50MB.
682 if (trs->trso_type == TR_RAID1_REBUILD) {
683 if (bp->bio_cmd == BIO_READ) {
685 /* Immediately abort rebuild, if requested. */
686 if (trs->trso_flags & TR_RAID1_F_ABORT) {
687 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
688 g_raid_tr_raid1_rebuild_abort(tr);
692 /* On read error, skip and cross fingers. */
693 if (bp->bio_error != 0) {
695 "Read error during rebuild (%d), "
696 "possible data loss!",
698 goto rebuild_round_done;
702 * The read operation finished, queue the
705 G_RAID_LOGREQ(4, bp, "rebuild read done. %d",
707 bp->bio_cmd = BIO_WRITE;
708 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
709 G_RAID_LOGREQ(4, bp, "Queueing rebuild write.");
710 g_raid_subdisk_iostart(trs->trso_failed_sd, bp);
713 * The write operation just finished. Do
714 * another. We keep cloning the master bio
715 * since it has the right buffers allocated to
719 "rebuild write done. Error %d",
721 nsd = trs->trso_failed_sd;
722 if (bp->bio_error != 0 ||
723 trs->trso_flags & TR_RAID1_F_ABORT) {
724 if ((trs->trso_flags &
725 TR_RAID1_F_ABORT) == 0) {
726 g_raid_tr_raid1_fail_disk(sd->sd_softc,
729 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
730 g_raid_tr_raid1_rebuild_abort(tr);
734 nsd = trs->trso_failed_sd;
735 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
736 g_raid_unlock_range(sd->sd_volume,
737 bp->bio_offset, bp->bio_length);
738 nsd->sd_rebuild_pos += bp->bio_length;
739 if (nsd->sd_rebuild_pos >= nsd->sd_size) {
740 g_raid_tr_raid1_rebuild_finish(tr);
744 /* Abort rebuild if we are stopping */
745 if (trs->trso_stopping) {
746 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
747 g_raid_tr_raid1_rebuild_abort(tr);
751 if (--trs->trso_meta_update <= 0) {
752 g_raid_write_metadata(vol->v_softc,
753 vol, nsd, nsd->sd_disk);
754 trs->trso_meta_update =
755 g_raid1_rebuild_meta_update;
757 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
758 if (--trs->trso_recover_slabs <= 0)
760 g_raid_tr_raid1_rebuild_some(tr);
762 } else if (trs->trso_type == TR_RAID1_RESYNC) {
764 * read good sd, read bad sd in parallel. when both
765 * done, compare the buffers. write good to the bad
766 * if different. do the next bit of work.
768 panic("Somehow, we think we're doing a resync");
772 pbp = bp->bio_parent;
774 if (bp->bio_cmd == BIO_READ && bp->bio_error != 0) {
776 * Read failed on first drive. Retry the read error on
777 * another disk drive, if available, before erroring out the
780 sd->sd_disk->d_read_errs++;
782 "Read error (%d), %d read errors total",
783 bp->bio_error, sd->sd_disk->d_read_errs);
786 * If there are too many read errors, we move to degraded.
787 * XXX Do we want to FAIL the drive (eg, make the user redo
788 * everything to get it back in sync), or just degrade the
789 * drive, which kicks off a resync?
792 if (sd->sd_disk->d_read_errs > g_raid_read_err_thresh) {
793 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
794 if (pbp->bio_children == 1)
799 * Find the other disk, and try to do the I/O to it.
801 mask = (uintptr_t *)(&pbp->bio_driver2);
802 if (pbp->bio_children == 1) {
803 /* Save original subdisk. */
804 pbp->bio_driver1 = do_write ? sd : NULL;
807 *mask |= 1 << sd->sd_pos;
808 nsd = g_raid_tr_raid1_select_read_disk(vol, pbp, *mask);
809 if (nsd != NULL && (cbp = g_clone_bio(pbp)) != NULL) {
811 G_RAID_LOGREQ(2, cbp, "Retrying read from %d",
813 if (pbp->bio_children == 2 && do_write) {
815 cbp->bio_caller1 = nsd;
816 pbp->bio_pflags = G_RAID_BIO_FLAG_LOCKED;
817 /* Lock callback starts I/O */
818 g_raid_lock_range(sd->sd_volume,
819 cbp->bio_offset, cbp->bio_length, pbp, cbp);
821 g_raid_subdisk_iostart(nsd, cbp);
826 * We can't retry. Return the original error by falling
827 * through. This will happen when there's only one good disk.
828 * We don't need to fail the raid, since its actual state is
829 * based on the state of the subdisks.
831 G_RAID_LOGREQ(2, bp, "Couldn't retry read, failing it");
833 if (bp->bio_cmd == BIO_READ &&
834 bp->bio_error == 0 &&
835 pbp->bio_children > 1 &&
836 pbp->bio_driver1 != NULL) {
838 * If it was a read, and bio_children is >1, then we just
839 * recovered the data from the second drive. We should try to
840 * write that data to the first drive if sector remapping is
841 * enabled. A write should put the data in a new place on the
842 * disk, remapping the bad sector. Do we need to do that by
843 * queueing a request to the main worker thread? It doesn't
844 * affect the return code of this current read, and can be
845 * done at our leisure. However, to make the code simpler, it
846 * is done synchronously.
848 G_RAID_LOGREQ(3, bp, "Recovered data from other drive");
849 cbp = g_clone_bio(pbp);
852 cbp->bio_cmd = BIO_WRITE;
853 cbp->bio_cflags = G_RAID_BIO_FLAG_REMAP;
854 G_RAID_LOGREQ(2, cbp,
855 "Attempting bad sector remap on failing drive.");
856 g_raid_subdisk_iostart(pbp->bio_driver1, cbp);
860 if (pbp->bio_pflags & G_RAID_BIO_FLAG_LOCKED) {
862 * We're done with a recovery, mark the range as unlocked.
863 * For any write errors, we aggressively fail the disk since
864 * there was both a READ and a WRITE error at this location.
865 * Both types of errors generally indicates the drive is on
866 * the verge of total failure anyway. Better to stop trusting
867 * it now. However, we need to reset error to 0 in that case
868 * because we're not failing the original I/O which succeeded.
870 if (bp->bio_cmd == BIO_WRITE && bp->bio_error) {
871 G_RAID_LOGREQ(0, bp, "Remap write failed: "
873 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
876 if (pbp->bio_driver1 != NULL) {
877 ((struct g_raid_subdisk *)pbp->bio_driver1)
880 G_RAID_LOGREQ(2, bp, "REMAP done %d.", bp->bio_error);
881 g_raid_unlock_range(sd->sd_volume, bp->bio_offset,
884 if (pbp->bio_cmd != BIO_READ) {
885 if (pbp->bio_inbed == 1 || pbp->bio_error != 0)
886 pbp->bio_error = bp->bio_error;
887 if (pbp->bio_cmd == BIO_WRITE && bp->bio_error != 0) {
888 G_RAID_LOGREQ(0, bp, "Write failed: failing subdisk.");
889 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
891 error = pbp->bio_error;
893 error = bp->bio_error;
895 if (pbp->bio_children == pbp->bio_inbed) {
896 pbp->bio_completed = pbp->bio_length;
897 g_raid_iodone(pbp, error);
902 g_raid_tr_kerneldump_raid1(struct g_raid_tr_object *tr,
903 void *virtual, vm_offset_t physical, off_t offset, size_t length)
905 struct g_raid_volume *vol;
906 struct g_raid_subdisk *sd;
909 vol = tr->tro_volume;
912 for (i = 0; i < vol->v_disks_count; i++) {
913 sd = &vol->v_subdisks[i];
914 switch (sd->sd_state) {
915 case G_RAID_SUBDISK_S_ACTIVE:
917 case G_RAID_SUBDISK_S_REBUILD:
919 * When rebuilding, only part of this subdisk is
920 * writable, the rest will be written as part of the
923 if (offset >= sd->sd_rebuild_pos)
926 case G_RAID_SUBDISK_S_STALE:
927 case G_RAID_SUBDISK_S_RESYNC:
929 * Resyncing still writes on the theory that the
930 * resync'd disk is very close and writing it will
931 * keep it that way better if we keep up while
938 error = g_raid_subdisk_kerneldump(sd,
939 virtual, physical, offset, length);
943 return (ok > 0 ? 0 : error);
947 g_raid_tr_locked_raid1(struct g_raid_tr_object *tr, void *argp)
950 struct g_raid_subdisk *sd;
952 bp = (struct bio *)argp;
953 sd = (struct g_raid_subdisk *)bp->bio_caller1;
954 g_raid_subdisk_iostart(sd, bp);
960 g_raid_tr_idle_raid1(struct g_raid_tr_object *tr)
962 struct g_raid_tr_raid1_object *trs;
964 trs = (struct g_raid_tr_raid1_object *)tr;
965 trs->trso_fair_io = g_raid1_rebuild_fair_io;
966 trs->trso_recover_slabs = g_raid1_rebuild_cluster_idle;
967 if (trs->trso_type == TR_RAID1_REBUILD)
968 g_raid_tr_raid1_rebuild_some(tr);
973 g_raid_tr_free_raid1(struct g_raid_tr_object *tr)
975 struct g_raid_tr_raid1_object *trs;
977 trs = (struct g_raid_tr_raid1_object *)tr;
979 if (trs->trso_buffer != NULL) {
980 free(trs->trso_buffer, M_TR_RAID1);
981 trs->trso_buffer = NULL;
986 G_RAID_TR_DECLARE(raid1, "RAID1");