2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
37 #include <sys/limits.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/sysctl.h>
42 #include <sys/systm.h>
43 #include <geom/geom.h>
44 #include <geom/geom_dbg.h>
45 #include "geom/raid/g_raid.h"
46 #include "g_raid_tr_if.h"
48 SYSCTL_DECL(_kern_geom_raid_raid1);
50 #define RAID1_REBUILD_SLAB (1 << 20) /* One transation in a rebuild */
51 static int g_raid1_rebuild_slab = RAID1_REBUILD_SLAB;
52 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_slab_size, CTLFLAG_RWTUN,
53 &g_raid1_rebuild_slab, 0,
54 "Amount of the disk to rebuild each read/write cycle of the rebuild.");
56 #define RAID1_REBUILD_FAIR_IO 20 /* use 1/x of the available I/O */
57 static int g_raid1_rebuild_fair_io = RAID1_REBUILD_FAIR_IO;
58 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_fair_io, CTLFLAG_RWTUN,
59 &g_raid1_rebuild_fair_io, 0,
60 "Fraction of the I/O bandwidth to use when disk busy for rebuild.");
62 #define RAID1_REBUILD_CLUSTER_IDLE 100
63 static int g_raid1_rebuild_cluster_idle = RAID1_REBUILD_CLUSTER_IDLE;
64 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_cluster_idle, CTLFLAG_RWTUN,
65 &g_raid1_rebuild_cluster_idle, 0,
66 "Number of slabs to do each time we trigger a rebuild cycle");
68 #define RAID1_REBUILD_META_UPDATE 1024 /* update meta data every 1GB or so */
69 static int g_raid1_rebuild_meta_update = RAID1_REBUILD_META_UPDATE;
70 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_meta_update, CTLFLAG_RWTUN,
71 &g_raid1_rebuild_meta_update, 0,
72 "When to update the meta data.");
74 static MALLOC_DEFINE(M_TR_RAID1, "tr_raid1_data", "GEOM_RAID RAID1 data");
76 #define TR_RAID1_NONE 0
77 #define TR_RAID1_REBUILD 1
78 #define TR_RAID1_RESYNC 2
80 #define TR_RAID1_F_DOING_SOME 0x1
81 #define TR_RAID1_F_LOCKED 0x2
82 #define TR_RAID1_F_ABORT 0x4
84 struct g_raid_tr_raid1_object {
85 struct g_raid_tr_object trso_base;
89 int trso_recover_slabs; /* slabs before rest */
93 struct g_raid_subdisk *trso_failed_sd; /* like per volume */
94 void *trso_buffer; /* Buffer space */
98 static g_raid_tr_taste_t g_raid_tr_taste_raid1;
99 static g_raid_tr_event_t g_raid_tr_event_raid1;
100 static g_raid_tr_start_t g_raid_tr_start_raid1;
101 static g_raid_tr_stop_t g_raid_tr_stop_raid1;
102 static g_raid_tr_iostart_t g_raid_tr_iostart_raid1;
103 static g_raid_tr_iodone_t g_raid_tr_iodone_raid1;
104 static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1;
105 static g_raid_tr_locked_t g_raid_tr_locked_raid1;
106 static g_raid_tr_idle_t g_raid_tr_idle_raid1;
107 static g_raid_tr_free_t g_raid_tr_free_raid1;
109 static kobj_method_t g_raid_tr_raid1_methods[] = {
110 KOBJMETHOD(g_raid_tr_taste, g_raid_tr_taste_raid1),
111 KOBJMETHOD(g_raid_tr_event, g_raid_tr_event_raid1),
112 KOBJMETHOD(g_raid_tr_start, g_raid_tr_start_raid1),
113 KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_raid1),
114 KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_raid1),
115 KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_raid1),
116 KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1),
117 KOBJMETHOD(g_raid_tr_locked, g_raid_tr_locked_raid1),
118 KOBJMETHOD(g_raid_tr_idle, g_raid_tr_idle_raid1),
119 KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_raid1),
123 static struct g_raid_tr_class g_raid_tr_raid1_class = {
125 g_raid_tr_raid1_methods,
126 sizeof(struct g_raid_tr_raid1_object),
129 .trc_accept_unmapped = 1
132 static void g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr);
133 static void g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
134 struct g_raid_subdisk *sd);
137 g_raid_tr_taste_raid1(struct g_raid_tr_object *tr, struct g_raid_volume *vol)
139 struct g_raid_tr_raid1_object *trs;
141 trs = (struct g_raid_tr_raid1_object *)tr;
142 if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_RAID1 ||
143 (tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1SM &&
144 tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1MM))
145 return (G_RAID_TR_TASTE_FAIL);
146 trs->trso_starting = 1;
147 return (G_RAID_TR_TASTE_SUCCEED);
151 g_raid_tr_update_state_raid1(struct g_raid_volume *vol,
152 struct g_raid_subdisk *sd)
154 struct g_raid_tr_raid1_object *trs;
155 struct g_raid_softc *sc;
156 struct g_raid_subdisk *tsd, *bestsd;
161 trs = (struct g_raid_tr_raid1_object *)vol->v_tr;
162 if (trs->trso_stopping &&
163 (trs->trso_flags & TR_RAID1_F_DOING_SOME) == 0)
164 s = G_RAID_VOLUME_S_STOPPED;
165 else if (trs->trso_starting)
166 s = G_RAID_VOLUME_S_STARTING;
168 /* Make sure we have at least one ACTIVE disk. */
169 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
172 * Critical situation! We have no any active disk!
173 * Choose the best disk we have to make it active.
175 bestsd = &vol->v_subdisks[0];
176 for (i = 1; i < vol->v_disks_count; i++) {
177 tsd = &vol->v_subdisks[i];
178 if (tsd->sd_state > bestsd->sd_state)
180 else if (tsd->sd_state == bestsd->sd_state &&
181 (tsd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
182 tsd->sd_state == G_RAID_SUBDISK_S_RESYNC) &&
183 tsd->sd_rebuild_pos > bestsd->sd_rebuild_pos)
186 if (bestsd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED) {
187 /* We found reasonable candidate. */
189 "Promote subdisk %s:%d from %s to ACTIVE.",
190 vol->v_name, bestsd->sd_pos,
191 g_raid_subdisk_state2str(bestsd->sd_state));
192 g_raid_change_subdisk_state(bestsd,
193 G_RAID_SUBDISK_S_ACTIVE);
194 g_raid_write_metadata(sc,
195 vol, bestsd, bestsd->sd_disk);
198 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
199 ns = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
200 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
201 if (na == vol->v_disks_count)
202 s = G_RAID_VOLUME_S_OPTIMAL;
203 else if (na + ns == vol->v_disks_count)
204 s = G_RAID_VOLUME_S_SUBOPTIMAL;
206 s = G_RAID_VOLUME_S_DEGRADED;
208 s = G_RAID_VOLUME_S_BROKEN;
209 g_raid_tr_raid1_maybe_rebuild(vol->v_tr, sd);
211 if (s != vol->v_state) {
212 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ?
213 G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN,
214 G_RAID_EVENT_VOLUME);
215 g_raid_change_volume_state(vol, s);
216 if (!trs->trso_starting && !trs->trso_stopping)
217 g_raid_write_metadata(sc, vol, NULL, NULL);
223 g_raid_tr_raid1_fail_disk(struct g_raid_softc *sc, struct g_raid_subdisk *sd,
224 struct g_raid_disk *disk)
227 * We don't fail the last disk in the pack, since it still has decent
228 * data on it and that's better than failing the disk if it is the root
231 * XXX should this be controlled via a tunable? It makes sense for
232 * the volume that has / on it. I can't think of a case where we'd
233 * want the volume to go away on this kind of event.
235 if (g_raid_nsubdisks(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == 1 &&
236 g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == sd)
238 g_raid_fail_disk(sc, sd, disk);
242 g_raid_tr_raid1_rebuild_some(struct g_raid_tr_object *tr)
244 struct g_raid_tr_raid1_object *trs;
245 struct g_raid_subdisk *sd, *good_sd;
248 trs = (struct g_raid_tr_raid1_object *)tr;
249 if (trs->trso_flags & TR_RAID1_F_DOING_SOME)
251 sd = trs->trso_failed_sd;
252 good_sd = g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE);
253 if (good_sd == NULL) {
254 g_raid_tr_raid1_rebuild_abort(tr);
258 memset(bp, 0, sizeof(*bp));
259 bp->bio_offset = sd->sd_rebuild_pos;
260 bp->bio_length = MIN(g_raid1_rebuild_slab,
261 sd->sd_size - sd->sd_rebuild_pos);
262 bp->bio_data = trs->trso_buffer;
263 bp->bio_cmd = BIO_READ;
264 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
265 bp->bio_caller1 = good_sd;
266 trs->trso_flags |= TR_RAID1_F_DOING_SOME;
267 trs->trso_flags |= TR_RAID1_F_LOCKED;
268 g_raid_lock_range(sd->sd_volume, /* Lock callback starts I/O */
269 bp->bio_offset, bp->bio_length, NULL, bp);
273 g_raid_tr_raid1_rebuild_done(struct g_raid_tr_raid1_object *trs)
275 struct g_raid_volume *vol;
276 struct g_raid_subdisk *sd;
278 vol = trs->trso_base.tro_volume;
279 sd = trs->trso_failed_sd;
280 g_raid_write_metadata(vol->v_softc, vol, sd, sd->sd_disk);
281 free(trs->trso_buffer, M_TR_RAID1);
282 trs->trso_buffer = NULL;
283 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
284 trs->trso_type = TR_RAID1_NONE;
285 trs->trso_recover_slabs = 0;
286 trs->trso_failed_sd = NULL;
287 g_raid_tr_update_state_raid1(vol, NULL);
291 g_raid_tr_raid1_rebuild_finish(struct g_raid_tr_object *tr)
293 struct g_raid_tr_raid1_object *trs;
294 struct g_raid_subdisk *sd;
296 trs = (struct g_raid_tr_raid1_object *)tr;
297 sd = trs->trso_failed_sd;
298 G_RAID_DEBUG1(0, tr->tro_volume->v_softc,
299 "Subdisk %s:%d-%s rebuild completed.",
300 sd->sd_volume->v_name, sd->sd_pos,
301 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
302 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
303 sd->sd_rebuild_pos = 0;
304 g_raid_tr_raid1_rebuild_done(trs);
308 g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr)
310 struct g_raid_tr_raid1_object *trs;
311 struct g_raid_subdisk *sd;
312 struct g_raid_volume *vol;
315 vol = tr->tro_volume;
316 trs = (struct g_raid_tr_raid1_object *)tr;
317 sd = trs->trso_failed_sd;
318 if (trs->trso_flags & TR_RAID1_F_DOING_SOME) {
319 G_RAID_DEBUG1(1, vol->v_softc,
320 "Subdisk %s:%d-%s rebuild is aborting.",
321 sd->sd_volume->v_name, sd->sd_pos,
322 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
323 trs->trso_flags |= TR_RAID1_F_ABORT;
325 G_RAID_DEBUG1(0, vol->v_softc,
326 "Subdisk %s:%d-%s rebuild aborted.",
327 sd->sd_volume->v_name, sd->sd_pos,
328 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
329 trs->trso_flags &= ~TR_RAID1_F_ABORT;
330 if (trs->trso_flags & TR_RAID1_F_LOCKED) {
331 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
332 len = MIN(g_raid1_rebuild_slab,
333 sd->sd_size - sd->sd_rebuild_pos);
334 g_raid_unlock_range(tr->tro_volume,
335 sd->sd_rebuild_pos, len);
337 g_raid_tr_raid1_rebuild_done(trs);
342 g_raid_tr_raid1_rebuild_start(struct g_raid_tr_object *tr)
344 struct g_raid_volume *vol;
345 struct g_raid_tr_raid1_object *trs;
346 struct g_raid_subdisk *sd, *fsd;
348 vol = tr->tro_volume;
349 trs = (struct g_raid_tr_raid1_object *)tr;
350 if (trs->trso_failed_sd) {
351 G_RAID_DEBUG1(1, vol->v_softc,
352 "Already rebuild in start rebuild. pos %jd\n",
353 (intmax_t)trs->trso_failed_sd->sd_rebuild_pos);
356 sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_ACTIVE);
358 G_RAID_DEBUG1(1, vol->v_softc,
359 "No active disk to rebuild. night night.");
362 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_RESYNC);
364 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_REBUILD);
366 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_STALE);
368 fsd->sd_rebuild_pos = 0;
369 g_raid_change_subdisk_state(fsd,
370 G_RAID_SUBDISK_S_RESYNC);
371 g_raid_write_metadata(vol->v_softc, vol, fsd, NULL);
373 fsd = g_raid_get_subdisk(vol,
374 G_RAID_SUBDISK_S_UNINITIALIZED);
376 fsd = g_raid_get_subdisk(vol,
377 G_RAID_SUBDISK_S_NEW);
379 fsd->sd_rebuild_pos = 0;
380 g_raid_change_subdisk_state(fsd,
381 G_RAID_SUBDISK_S_REBUILD);
382 g_raid_write_metadata(vol->v_softc,
388 G_RAID_DEBUG1(1, vol->v_softc,
389 "No failed disk to rebuild. night night.");
392 trs->trso_failed_sd = fsd;
393 G_RAID_DEBUG1(0, vol->v_softc,
394 "Subdisk %s:%d-%s rebuild start at %jd.",
395 fsd->sd_volume->v_name, fsd->sd_pos,
396 fsd->sd_disk ? g_raid_get_diskname(fsd->sd_disk) : "[none]",
397 trs->trso_failed_sd->sd_rebuild_pos);
398 trs->trso_type = TR_RAID1_REBUILD;
399 trs->trso_buffer = malloc(g_raid1_rebuild_slab, M_TR_RAID1, M_WAITOK);
400 trs->trso_meta_update = g_raid1_rebuild_meta_update;
401 g_raid_tr_raid1_rebuild_some(tr);
406 g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
407 struct g_raid_subdisk *sd)
409 struct g_raid_volume *vol;
410 struct g_raid_tr_raid1_object *trs;
414 * If we're stopping, don't do anything. If we don't have at least one
415 * good disk and one bad disk, we don't do anything. And if there's a
416 * 'good disk' stored in the trs, then we're in progress and we punt.
417 * If we make it past all these checks, we need to rebuild.
419 vol = tr->tro_volume;
420 trs = (struct g_raid_tr_raid1_object *)tr;
421 if (trs->trso_stopping)
423 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
424 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_REBUILD) +
425 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
426 switch(trs->trso_type) {
431 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_NEW) +
432 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
433 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_UNINITIALIZED);
437 g_raid_tr_raid1_rebuild_start(tr);
439 case TR_RAID1_REBUILD:
440 if (na == 0 || nr == 0 || trs->trso_failed_sd == sd)
441 g_raid_tr_raid1_rebuild_abort(tr);
443 case TR_RAID1_RESYNC:
449 g_raid_tr_event_raid1(struct g_raid_tr_object *tr,
450 struct g_raid_subdisk *sd, u_int event)
453 g_raid_tr_update_state_raid1(tr->tro_volume, sd);
458 g_raid_tr_start_raid1(struct g_raid_tr_object *tr)
460 struct g_raid_tr_raid1_object *trs;
461 struct g_raid_volume *vol;
463 trs = (struct g_raid_tr_raid1_object *)tr;
464 vol = tr->tro_volume;
465 trs->trso_starting = 0;
466 g_raid_tr_update_state_raid1(vol, NULL);
471 g_raid_tr_stop_raid1(struct g_raid_tr_object *tr)
473 struct g_raid_tr_raid1_object *trs;
474 struct g_raid_volume *vol;
476 trs = (struct g_raid_tr_raid1_object *)tr;
477 vol = tr->tro_volume;
478 trs->trso_starting = 0;
479 trs->trso_stopping = 1;
480 g_raid_tr_update_state_raid1(vol, NULL);
485 * Select the disk to read from. Take into account: subdisk state, running
486 * error recovery, average disk load, head position and possible cache hits.
488 #define ABS(x) (((x) >= 0) ? (x) : (-(x)))
489 static struct g_raid_subdisk *
490 g_raid_tr_raid1_select_read_disk(struct g_raid_volume *vol, struct bio *bp,
493 struct g_raid_subdisk *sd, *best;
494 int i, prio, bestprio;
498 for (i = 0; i < vol->v_disks_count; i++) {
499 sd = &vol->v_subdisks[i];
500 if (sd->sd_state != G_RAID_SUBDISK_S_ACTIVE &&
501 ((sd->sd_state != G_RAID_SUBDISK_S_REBUILD &&
502 sd->sd_state != G_RAID_SUBDISK_S_RESYNC) ||
503 bp->bio_offset + bp->bio_length > sd->sd_rebuild_pos))
505 if ((mask & (1 << i)) != 0)
507 prio = G_RAID_SUBDISK_LOAD(sd);
508 prio += min(sd->sd_recovery, 255) << 22;
509 prio += (G_RAID_SUBDISK_S_ACTIVE - sd->sd_state) << 16;
510 /* If disk head is precisely in position - highly prefer it. */
511 if (G_RAID_SUBDISK_POS(sd) == bp->bio_offset)
512 prio -= 2 * G_RAID_SUBDISK_LOAD_SCALE;
514 /* If disk head is close to position - prefer it. */
515 if (ABS(G_RAID_SUBDISK_POS(sd) - bp->bio_offset) <
516 G_RAID_SUBDISK_TRACK_SIZE)
517 prio -= 1 * G_RAID_SUBDISK_LOAD_SCALE;
518 if (prio < bestprio) {
527 g_raid_tr_iostart_raid1_read(struct g_raid_tr_object *tr, struct bio *bp)
529 struct g_raid_subdisk *sd;
532 sd = g_raid_tr_raid1_select_read_disk(tr->tro_volume, bp, 0);
533 KASSERT(sd != NULL, ("No active disks in volume %s.",
534 tr->tro_volume->v_name));
536 cbp = g_clone_bio(bp);
538 g_raid_iodone(bp, ENOMEM);
542 g_raid_subdisk_iostart(sd, cbp);
546 g_raid_tr_iostart_raid1_write(struct g_raid_tr_object *tr, struct bio *bp)
548 struct g_raid_volume *vol;
549 struct g_raid_subdisk *sd;
550 struct bio_queue_head queue;
554 vol = tr->tro_volume;
557 * Allocate all bios before sending any request, so we can return
558 * ENOMEM in nice and clean way.
561 for (i = 0; i < vol->v_disks_count; i++) {
562 sd = &vol->v_subdisks[i];
563 switch (sd->sd_state) {
564 case G_RAID_SUBDISK_S_ACTIVE:
566 case G_RAID_SUBDISK_S_REBUILD:
568 * When rebuilding, only part of this subdisk is
569 * writable, the rest will be written as part of the
572 if (bp->bio_offset >= sd->sd_rebuild_pos)
575 case G_RAID_SUBDISK_S_STALE:
576 case G_RAID_SUBDISK_S_RESYNC:
578 * Resyncing still writes on the theory that the
579 * resync'd disk is very close and writing it will
580 * keep it that way better if we keep up while
587 cbp = g_clone_bio(bp);
590 cbp->bio_caller1 = sd;
591 bioq_insert_tail(&queue, cbp);
593 while ((cbp = bioq_takefirst(&queue)) != NULL) {
594 sd = cbp->bio_caller1;
595 cbp->bio_caller1 = NULL;
596 g_raid_subdisk_iostart(sd, cbp);
600 while ((cbp = bioq_takefirst(&queue)) != NULL)
602 if (bp->bio_error == 0)
603 bp->bio_error = ENOMEM;
604 g_raid_iodone(bp, bp->bio_error);
608 g_raid_tr_iostart_raid1(struct g_raid_tr_object *tr, struct bio *bp)
610 struct g_raid_volume *vol;
611 struct g_raid_tr_raid1_object *trs;
613 vol = tr->tro_volume;
614 trs = (struct g_raid_tr_raid1_object *)tr;
615 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL &&
616 vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL &&
617 vol->v_state != G_RAID_VOLUME_S_DEGRADED) {
618 g_raid_iodone(bp, EIO);
622 * If we're rebuilding, squeeze in rebuild activity every so often,
623 * even when the disk is busy. Be sure to only count real I/O
624 * to the disk. All 'SPECIAL' I/O is traffic generated to the disk
627 if (trs->trso_failed_sd != NULL &&
628 !(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL)) {
629 /* Make this new or running now round short. */
630 trs->trso_recover_slabs = 0;
631 if (--trs->trso_fair_io <= 0) {
632 trs->trso_fair_io = g_raid1_rebuild_fair_io;
633 g_raid_tr_raid1_rebuild_some(tr);
636 switch (bp->bio_cmd) {
638 g_raid_tr_iostart_raid1_read(tr, bp);
642 g_raid_tr_iostart_raid1_write(tr, bp);
646 g_raid_tr_flush_common(tr, bp);
649 KASSERT(1 == 0, ("Invalid command here: %u (volume=%s)",
650 bp->bio_cmd, vol->v_name));
656 g_raid_tr_iodone_raid1(struct g_raid_tr_object *tr,
657 struct g_raid_subdisk *sd, struct bio *bp)
660 struct g_raid_subdisk *nsd;
661 struct g_raid_volume *vol;
663 struct g_raid_tr_raid1_object *trs;
667 trs = (struct g_raid_tr_raid1_object *)tr;
668 vol = tr->tro_volume;
669 if (bp->bio_cflags & G_RAID_BIO_FLAG_SYNC) {
671 * This operation is part of a rebuild or resync operation.
672 * See what work just got done, then schedule the next bit of
673 * work, if any. Rebuild/resync is done a little bit at a
674 * time. Either when a timeout happens, or after we get a
675 * bunch of I/Os to the disk (to make sure an active system
676 * will complete in a sane amount of time).
678 * We are setup to do differing amounts of work for each of
679 * these cases. so long as the slabs is smallish (less than
680 * 50 or so, I'd guess, but that's just a WAG), we shouldn't
681 * have any bio starvation issues. For active disks, we do
682 * 5MB of data, for inactive ones, we do 50MB.
684 if (trs->trso_type == TR_RAID1_REBUILD) {
685 if (bp->bio_cmd == BIO_READ) {
687 /* Immediately abort rebuild, if requested. */
688 if (trs->trso_flags & TR_RAID1_F_ABORT) {
689 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
690 g_raid_tr_raid1_rebuild_abort(tr);
694 /* On read error, skip and cross fingers. */
695 if (bp->bio_error != 0) {
697 "Read error during rebuild (%d), "
698 "possible data loss!",
700 goto rebuild_round_done;
704 * The read operation finished, queue the
707 G_RAID_LOGREQ(4, bp, "rebuild read done. %d",
709 bp->bio_cmd = BIO_WRITE;
710 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
711 G_RAID_LOGREQ(4, bp, "Queueing rebuild write.");
712 g_raid_subdisk_iostart(trs->trso_failed_sd, bp);
715 * The write operation just finished. Do
716 * another. We keep cloning the master bio
717 * since it has the right buffers allocated to
721 "rebuild write done. Error %d",
723 nsd = trs->trso_failed_sd;
724 if (bp->bio_error != 0 ||
725 trs->trso_flags & TR_RAID1_F_ABORT) {
726 if ((trs->trso_flags &
727 TR_RAID1_F_ABORT) == 0) {
728 g_raid_tr_raid1_fail_disk(sd->sd_softc,
731 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
732 g_raid_tr_raid1_rebuild_abort(tr);
736 nsd = trs->trso_failed_sd;
737 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
738 g_raid_unlock_range(sd->sd_volume,
739 bp->bio_offset, bp->bio_length);
740 nsd->sd_rebuild_pos += bp->bio_length;
741 if (nsd->sd_rebuild_pos >= nsd->sd_size) {
742 g_raid_tr_raid1_rebuild_finish(tr);
746 /* Abort rebuild if we are stopping */
747 if (trs->trso_stopping) {
748 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
749 g_raid_tr_raid1_rebuild_abort(tr);
753 if (--trs->trso_meta_update <= 0) {
754 g_raid_write_metadata(vol->v_softc,
755 vol, nsd, nsd->sd_disk);
756 trs->trso_meta_update =
757 g_raid1_rebuild_meta_update;
759 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
760 if (--trs->trso_recover_slabs <= 0)
762 g_raid_tr_raid1_rebuild_some(tr);
764 } else if (trs->trso_type == TR_RAID1_RESYNC) {
766 * read good sd, read bad sd in parallel. when both
767 * done, compare the buffers. write good to the bad
768 * if different. do the next bit of work.
770 panic("Somehow, we think we're doing a resync");
774 pbp = bp->bio_parent;
776 if (bp->bio_cmd == BIO_READ && bp->bio_error != 0) {
778 * Read failed on first drive. Retry the read error on
779 * another disk drive, if available, before erroring out the
782 sd->sd_disk->d_read_errs++;
784 "Read error (%d), %d read errors total",
785 bp->bio_error, sd->sd_disk->d_read_errs);
788 * If there are too many read errors, we move to degraded.
789 * XXX Do we want to FAIL the drive (eg, make the user redo
790 * everything to get it back in sync), or just degrade the
791 * drive, which kicks off a resync?
794 if (sd->sd_disk->d_read_errs > g_raid_read_err_thresh) {
795 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
796 if (pbp->bio_children == 1)
801 * Find the other disk, and try to do the I/O to it.
803 mask = (uintptr_t *)(&pbp->bio_driver2);
804 if (pbp->bio_children == 1) {
805 /* Save original subdisk. */
806 pbp->bio_driver1 = do_write ? sd : NULL;
809 *mask |= 1 << sd->sd_pos;
810 nsd = g_raid_tr_raid1_select_read_disk(vol, pbp, *mask);
811 if (nsd != NULL && (cbp = g_clone_bio(pbp)) != NULL) {
813 G_RAID_LOGREQ(2, cbp, "Retrying read from %d",
815 if (pbp->bio_children == 2 && do_write) {
817 cbp->bio_caller1 = nsd;
818 pbp->bio_pflags = G_RAID_BIO_FLAG_LOCKED;
819 /* Lock callback starts I/O */
820 g_raid_lock_range(sd->sd_volume,
821 cbp->bio_offset, cbp->bio_length, pbp, cbp);
823 g_raid_subdisk_iostart(nsd, cbp);
828 * We can't retry. Return the original error by falling
829 * through. This will happen when there's only one good disk.
830 * We don't need to fail the raid, since its actual state is
831 * based on the state of the subdisks.
833 G_RAID_LOGREQ(2, bp, "Couldn't retry read, failing it");
835 if (bp->bio_cmd == BIO_READ &&
836 bp->bio_error == 0 &&
837 pbp->bio_children > 1 &&
838 pbp->bio_driver1 != NULL) {
840 * If it was a read, and bio_children is >1, then we just
841 * recovered the data from the second drive. We should try to
842 * write that data to the first drive if sector remapping is
843 * enabled. A write should put the data in a new place on the
844 * disk, remapping the bad sector. Do we need to do that by
845 * queueing a request to the main worker thread? It doesn't
846 * affect the return code of this current read, and can be
847 * done at our leisure. However, to make the code simpler, it
848 * is done synchronously.
850 G_RAID_LOGREQ(3, bp, "Recovered data from other drive");
851 cbp = g_clone_bio(pbp);
854 cbp->bio_cmd = BIO_WRITE;
855 cbp->bio_cflags = G_RAID_BIO_FLAG_REMAP;
856 G_RAID_LOGREQ(2, cbp,
857 "Attempting bad sector remap on failing drive.");
858 g_raid_subdisk_iostart(pbp->bio_driver1, cbp);
862 if (pbp->bio_pflags & G_RAID_BIO_FLAG_LOCKED) {
864 * We're done with a recovery, mark the range as unlocked.
865 * For any write errors, we aggressively fail the disk since
866 * there was both a READ and a WRITE error at this location.
867 * Both types of errors generally indicates the drive is on
868 * the verge of total failure anyway. Better to stop trusting
869 * it now. However, we need to reset error to 0 in that case
870 * because we're not failing the original I/O which succeeded.
872 if (bp->bio_cmd == BIO_WRITE && bp->bio_error) {
873 G_RAID_LOGREQ(0, bp, "Remap write failed: "
875 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
878 if (pbp->bio_driver1 != NULL) {
879 ((struct g_raid_subdisk *)pbp->bio_driver1)
882 G_RAID_LOGREQ(2, bp, "REMAP done %d.", bp->bio_error);
883 g_raid_unlock_range(sd->sd_volume, bp->bio_offset,
886 if (pbp->bio_cmd != BIO_READ) {
887 if (pbp->bio_inbed == 1 || pbp->bio_error != 0)
888 pbp->bio_error = bp->bio_error;
889 if (pbp->bio_cmd == BIO_WRITE && bp->bio_error != 0) {
890 G_RAID_LOGREQ(0, bp, "Write failed: failing subdisk.");
891 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
893 error = pbp->bio_error;
895 error = bp->bio_error;
897 if (pbp->bio_children == pbp->bio_inbed) {
898 pbp->bio_completed = pbp->bio_length;
899 g_raid_iodone(pbp, error);
904 g_raid_tr_kerneldump_raid1(struct g_raid_tr_object *tr,
905 void *virtual, vm_offset_t physical, off_t offset, size_t length)
907 struct g_raid_volume *vol;
908 struct g_raid_subdisk *sd;
911 vol = tr->tro_volume;
914 for (i = 0; i < vol->v_disks_count; i++) {
915 sd = &vol->v_subdisks[i];
916 switch (sd->sd_state) {
917 case G_RAID_SUBDISK_S_ACTIVE:
919 case G_RAID_SUBDISK_S_REBUILD:
921 * When rebuilding, only part of this subdisk is
922 * writable, the rest will be written as part of the
925 if (offset >= sd->sd_rebuild_pos)
928 case G_RAID_SUBDISK_S_STALE:
929 case G_RAID_SUBDISK_S_RESYNC:
931 * Resyncing still writes on the theory that the
932 * resync'd disk is very close and writing it will
933 * keep it that way better if we keep up while
940 error = g_raid_subdisk_kerneldump(sd,
941 virtual, physical, offset, length);
945 return (ok > 0 ? 0 : error);
949 g_raid_tr_locked_raid1(struct g_raid_tr_object *tr, void *argp)
952 struct g_raid_subdisk *sd;
954 bp = (struct bio *)argp;
955 sd = (struct g_raid_subdisk *)bp->bio_caller1;
956 g_raid_subdisk_iostart(sd, bp);
962 g_raid_tr_idle_raid1(struct g_raid_tr_object *tr)
964 struct g_raid_tr_raid1_object *trs;
966 trs = (struct g_raid_tr_raid1_object *)tr;
967 trs->trso_fair_io = g_raid1_rebuild_fair_io;
968 trs->trso_recover_slabs = g_raid1_rebuild_cluster_idle;
969 if (trs->trso_type == TR_RAID1_REBUILD)
970 g_raid_tr_raid1_rebuild_some(tr);
975 g_raid_tr_free_raid1(struct g_raid_tr_object *tr)
977 struct g_raid_tr_raid1_object *trs;
979 trs = (struct g_raid_tr_raid1_object *)tr;
981 if (trs->trso_buffer != NULL) {
982 free(trs->trso_buffer, M_TR_RAID1);
983 trs->trso_buffer = NULL;
988 G_RAID_TR_DECLARE(raid1, "RAID1");