2 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
32 #include <sys/endian.h>
33 #include <sys/kernel.h>
35 #include <sys/limits.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/systm.h>
41 #include <geom/geom.h>
42 #include "geom/raid/g_raid.h"
43 #include "g_raid_tr_if.h"
45 SYSCTL_DECL(_kern_geom_raid);
46 SYSCTL_NODE(_kern_geom_raid, OID_AUTO, raid1, CTLFLAG_RW, 0,
49 #define RAID1_REBUILD_SLAB (1 << 20) /* One transation in a rebuild */
50 static int g_raid1_rebuild_slab = RAID1_REBUILD_SLAB;
51 TUNABLE_INT("kern.geom.raid.raid1.rebuild_slab_size",
52 &g_raid1_rebuild_slab);
53 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_slab_size, CTLFLAG_RW,
54 &g_raid1_rebuild_slab, 0,
55 "Amount of the disk to rebuild each read/write cycle of the rebuild.");
57 #define RAID1_REBUILD_FAIR_IO 20 /* use 1/x of the available I/O */
58 static int g_raid1_rebuild_fair_io = RAID1_REBUILD_FAIR_IO;
59 TUNABLE_INT("kern.geom.raid.raid1.rebuild_fair_io",
60 &g_raid1_rebuild_fair_io);
61 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_fair_io, CTLFLAG_RW,
62 &g_raid1_rebuild_fair_io, 0,
63 "Fraction of the I/O bandwidth to use when disk busy for rebuild.");
65 #define RAID1_REBUILD_CLUSTER_IDLE 100
66 static int g_raid1_rebuild_cluster_idle = RAID1_REBUILD_CLUSTER_IDLE;
67 TUNABLE_INT("kern.geom.raid.raid1.rebuild_cluster_idle",
68 &g_raid1_rebuild_cluster_idle);
69 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_cluster_idle, CTLFLAG_RW,
70 &g_raid1_rebuild_cluster_idle, 0,
71 "Number of slabs to do each time we trigger a rebuild cycle");
73 #define RAID1_REBUILD_META_UPDATE 1024 /* update meta data every 1GB or so */
74 static int g_raid1_rebuild_meta_update = RAID1_REBUILD_META_UPDATE;
75 TUNABLE_INT("kern.geom.raid.raid1.rebuild_meta_update",
76 &g_raid1_rebuild_meta_update);
77 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_meta_update, CTLFLAG_RW,
78 &g_raid1_rebuild_meta_update, 0,
79 "When to update the meta data.");
81 static MALLOC_DEFINE(M_TR_RAID1, "tr_raid1_data", "GEOM_RAID RAID1 data");
83 #define TR_RAID1_NONE 0
84 #define TR_RAID1_REBUILD 1
85 #define TR_RAID1_RESYNC 2
87 #define TR_RAID1_F_DOING_SOME 0x1
88 #define TR_RAID1_F_LOCKED 0x2
89 #define TR_RAID1_F_ABORT 0x4
91 struct g_raid_tr_raid1_object {
92 struct g_raid_tr_object trso_base;
96 int trso_recover_slabs; /* slabs before rest */
100 struct g_raid_subdisk *trso_failed_sd; /* like per volume */
101 void *trso_buffer; /* Buffer space */
105 static g_raid_tr_taste_t g_raid_tr_taste_raid1;
106 static g_raid_tr_event_t g_raid_tr_event_raid1;
107 static g_raid_tr_start_t g_raid_tr_start_raid1;
108 static g_raid_tr_stop_t g_raid_tr_stop_raid1;
109 static g_raid_tr_iostart_t g_raid_tr_iostart_raid1;
110 static g_raid_tr_iodone_t g_raid_tr_iodone_raid1;
111 static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1;
112 static g_raid_tr_locked_t g_raid_tr_locked_raid1;
113 static g_raid_tr_idle_t g_raid_tr_idle_raid1;
114 static g_raid_tr_free_t g_raid_tr_free_raid1;
116 static kobj_method_t g_raid_tr_raid1_methods[] = {
117 KOBJMETHOD(g_raid_tr_taste, g_raid_tr_taste_raid1),
118 KOBJMETHOD(g_raid_tr_event, g_raid_tr_event_raid1),
119 KOBJMETHOD(g_raid_tr_start, g_raid_tr_start_raid1),
120 KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_raid1),
121 KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_raid1),
122 KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_raid1),
123 KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1),
124 KOBJMETHOD(g_raid_tr_locked, g_raid_tr_locked_raid1),
125 KOBJMETHOD(g_raid_tr_idle, g_raid_tr_idle_raid1),
126 KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_raid1),
130 static struct g_raid_tr_class g_raid_tr_raid1_class = {
132 g_raid_tr_raid1_methods,
133 sizeof(struct g_raid_tr_raid1_object),
137 static void g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr);
138 static void g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
139 struct g_raid_subdisk *sd);
142 g_raid_tr_taste_raid1(struct g_raid_tr_object *tr, struct g_raid_volume *vol)
144 struct g_raid_tr_raid1_object *trs;
146 trs = (struct g_raid_tr_raid1_object *)tr;
147 if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_RAID1 ||
148 tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_NONE)
149 return (G_RAID_TR_TASTE_FAIL);
150 trs->trso_starting = 1;
151 return (G_RAID_TR_TASTE_SUCCEED);
155 g_raid_tr_update_state_raid1(struct g_raid_volume *vol,
156 struct g_raid_subdisk *sd)
158 struct g_raid_tr_raid1_object *trs;
159 struct g_raid_softc *sc;
160 struct g_raid_subdisk *tsd, *bestsd;
165 trs = (struct g_raid_tr_raid1_object *)vol->v_tr;
166 if (trs->trso_stopping &&
167 (trs->trso_flags & TR_RAID1_F_DOING_SOME) == 0)
168 s = G_RAID_VOLUME_S_STOPPED;
169 else if (trs->trso_starting)
170 s = G_RAID_VOLUME_S_STARTING;
172 /* Make sure we have at least one ACTIVE disk. */
173 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
176 * Critical situation! We have no any active disk!
177 * Choose the best disk we have to make it active.
179 bestsd = &vol->v_subdisks[0];
180 for (i = 1; i < vol->v_disks_count; i++) {
181 tsd = &vol->v_subdisks[i];
182 if (tsd->sd_state > bestsd->sd_state)
184 else if (tsd->sd_state == bestsd->sd_state &&
185 (tsd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
186 tsd->sd_state == G_RAID_SUBDISK_S_RESYNC) &&
187 tsd->sd_rebuild_pos > bestsd->sd_rebuild_pos)
190 if (bestsd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED) {
191 /* We found reasonable candidate. */
193 "Promote subdisk %s:%d from %s to ACTIVE.",
194 vol->v_name, bestsd->sd_pos,
195 g_raid_subdisk_state2str(bestsd->sd_state));
196 g_raid_change_subdisk_state(bestsd,
197 G_RAID_SUBDISK_S_ACTIVE);
198 g_raid_write_metadata(sc,
199 vol, bestsd, bestsd->sd_disk);
202 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
203 ns = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
204 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
205 if (na == vol->v_disks_count)
206 s = G_RAID_VOLUME_S_OPTIMAL;
207 else if (na + ns == vol->v_disks_count)
208 s = G_RAID_VOLUME_S_SUBOPTIMAL;
210 s = G_RAID_VOLUME_S_DEGRADED;
212 s = G_RAID_VOLUME_S_BROKEN;
213 g_raid_tr_raid1_maybe_rebuild(vol->v_tr, sd);
215 if (s != vol->v_state) {
216 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ?
217 G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN,
218 G_RAID_EVENT_VOLUME);
219 g_raid_change_volume_state(vol, s);
220 if (!trs->trso_starting && !trs->trso_stopping)
221 g_raid_write_metadata(sc, vol, NULL, NULL);
227 g_raid_tr_raid1_fail_disk(struct g_raid_softc *sc, struct g_raid_subdisk *sd,
228 struct g_raid_disk *disk)
231 * We don't fail the last disk in the pack, since it still has decent
232 * data on it and that's better than failing the disk if it is the root
235 * XXX should this be controlled via a tunable? It makes sense for
236 * the volume that has / on it. I can't think of a case where we'd
237 * want the volume to go away on this kind of event.
239 if (g_raid_nsubdisks(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == 1 &&
240 g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == sd)
242 g_raid_fail_disk(sc, sd, disk);
246 g_raid_tr_raid1_rebuild_some(struct g_raid_tr_object *tr)
248 struct g_raid_tr_raid1_object *trs;
249 struct g_raid_subdisk *sd, *good_sd;
252 trs = (struct g_raid_tr_raid1_object *)tr;
253 if (trs->trso_flags & TR_RAID1_F_DOING_SOME)
255 sd = trs->trso_failed_sd;
256 good_sd = g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE);
257 if (good_sd == NULL) {
258 g_raid_tr_raid1_rebuild_abort(tr);
262 memset(bp, 0, sizeof(*bp));
263 bp->bio_offset = sd->sd_rebuild_pos;
264 bp->bio_length = MIN(g_raid1_rebuild_slab,
265 sd->sd_size - sd->sd_rebuild_pos);
266 bp->bio_data = trs->trso_buffer;
267 bp->bio_cmd = BIO_READ;
268 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
269 bp->bio_caller1 = good_sd;
270 trs->trso_flags |= TR_RAID1_F_DOING_SOME;
271 trs->trso_flags |= TR_RAID1_F_LOCKED;
272 g_raid_lock_range(sd->sd_volume, /* Lock callback starts I/O */
273 bp->bio_offset, bp->bio_length, NULL, bp);
277 g_raid_tr_raid1_rebuild_done(struct g_raid_tr_raid1_object *trs)
279 struct g_raid_volume *vol;
280 struct g_raid_subdisk *sd;
282 vol = trs->trso_base.tro_volume;
283 sd = trs->trso_failed_sd;
284 g_raid_write_metadata(vol->v_softc, vol, sd, sd->sd_disk);
285 free(trs->trso_buffer, M_TR_RAID1);
286 trs->trso_buffer = NULL;
287 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
288 trs->trso_type = TR_RAID1_NONE;
289 trs->trso_recover_slabs = 0;
290 trs->trso_failed_sd = NULL;
291 g_raid_tr_update_state_raid1(vol, NULL);
295 g_raid_tr_raid1_rebuild_finish(struct g_raid_tr_object *tr)
297 struct g_raid_tr_raid1_object *trs;
298 struct g_raid_subdisk *sd;
300 trs = (struct g_raid_tr_raid1_object *)tr;
301 sd = trs->trso_failed_sd;
302 G_RAID_DEBUG1(0, tr->tro_volume->v_softc,
303 "Subdisk %s:%d-%s rebuild completed.",
304 sd->sd_volume->v_name, sd->sd_pos,
305 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
306 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
307 sd->sd_rebuild_pos = 0;
308 g_raid_tr_raid1_rebuild_done(trs);
312 g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr)
314 struct g_raid_tr_raid1_object *trs;
315 struct g_raid_subdisk *sd;
316 struct g_raid_volume *vol;
319 vol = tr->tro_volume;
320 trs = (struct g_raid_tr_raid1_object *)tr;
321 sd = trs->trso_failed_sd;
322 if (trs->trso_flags & TR_RAID1_F_DOING_SOME) {
323 G_RAID_DEBUG1(1, vol->v_softc,
324 "Subdisk %s:%d-%s rebuild is aborting.",
325 sd->sd_volume->v_name, sd->sd_pos,
326 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
327 trs->trso_flags |= TR_RAID1_F_ABORT;
329 G_RAID_DEBUG1(0, vol->v_softc,
330 "Subdisk %s:%d-%s rebuild aborted.",
331 sd->sd_volume->v_name, sd->sd_pos,
332 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
333 trs->trso_flags &= ~TR_RAID1_F_ABORT;
334 if (trs->trso_flags & TR_RAID1_F_LOCKED) {
335 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
336 len = MIN(g_raid1_rebuild_slab,
337 sd->sd_size - sd->sd_rebuild_pos);
338 g_raid_unlock_range(tr->tro_volume,
339 sd->sd_rebuild_pos, len);
341 g_raid_tr_raid1_rebuild_done(trs);
346 g_raid_tr_raid1_rebuild_start(struct g_raid_tr_object *tr)
348 struct g_raid_volume *vol;
349 struct g_raid_tr_raid1_object *trs;
350 struct g_raid_subdisk *sd, *fsd;
352 vol = tr->tro_volume;
353 trs = (struct g_raid_tr_raid1_object *)tr;
354 if (trs->trso_failed_sd) {
355 G_RAID_DEBUG1(1, vol->v_softc,
356 "Already rebuild in start rebuild. pos %jd\n",
357 (intmax_t)trs->trso_failed_sd->sd_rebuild_pos);
360 sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_ACTIVE);
362 G_RAID_DEBUG1(1, vol->v_softc,
363 "No active disk to rebuild. night night.");
366 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_RESYNC);
368 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_REBUILD);
370 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_STALE);
372 fsd->sd_rebuild_pos = 0;
373 g_raid_change_subdisk_state(fsd,
374 G_RAID_SUBDISK_S_RESYNC);
375 g_raid_write_metadata(vol->v_softc, vol, fsd, NULL);
377 fsd = g_raid_get_subdisk(vol,
378 G_RAID_SUBDISK_S_UNINITIALIZED);
380 fsd = g_raid_get_subdisk(vol,
381 G_RAID_SUBDISK_S_NEW);
383 fsd->sd_rebuild_pos = 0;
384 g_raid_change_subdisk_state(fsd,
385 G_RAID_SUBDISK_S_REBUILD);
386 g_raid_write_metadata(vol->v_softc,
392 G_RAID_DEBUG1(1, vol->v_softc,
393 "No failed disk to rebuild. night night.");
396 trs->trso_failed_sd = fsd;
397 G_RAID_DEBUG1(0, vol->v_softc,
398 "Subdisk %s:%d-%s rebuild start at %jd.",
399 fsd->sd_volume->v_name, fsd->sd_pos,
400 fsd->sd_disk ? g_raid_get_diskname(fsd->sd_disk) : "[none]",
401 trs->trso_failed_sd->sd_rebuild_pos);
402 trs->trso_type = TR_RAID1_REBUILD;
403 trs->trso_buffer = malloc(g_raid1_rebuild_slab, M_TR_RAID1, M_WAITOK);
404 trs->trso_meta_update = g_raid1_rebuild_meta_update;
405 g_raid_tr_raid1_rebuild_some(tr);
410 g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
411 struct g_raid_subdisk *sd)
413 struct g_raid_volume *vol;
414 struct g_raid_tr_raid1_object *trs;
418 * If we're stopping, don't do anything. If we don't have at least one
419 * good disk and one bad disk, we don't do anything. And if there's a
420 * 'good disk' stored in the trs, then we're in progress and we punt.
421 * If we make it past all these checks, we need to rebuild.
423 vol = tr->tro_volume;
424 trs = (struct g_raid_tr_raid1_object *)tr;
425 if (trs->trso_stopping)
427 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
428 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_REBUILD) +
429 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
430 switch(trs->trso_type) {
435 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_NEW) +
436 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
437 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_UNINITIALIZED);
441 g_raid_tr_raid1_rebuild_start(tr);
443 case TR_RAID1_REBUILD:
444 if (na == 0 || nr == 0 || trs->trso_failed_sd == sd)
445 g_raid_tr_raid1_rebuild_abort(tr);
447 case TR_RAID1_RESYNC:
453 g_raid_tr_event_raid1(struct g_raid_tr_object *tr,
454 struct g_raid_subdisk *sd, u_int event)
457 g_raid_tr_update_state_raid1(tr->tro_volume, sd);
462 g_raid_tr_start_raid1(struct g_raid_tr_object *tr)
464 struct g_raid_tr_raid1_object *trs;
465 struct g_raid_volume *vol;
467 trs = (struct g_raid_tr_raid1_object *)tr;
468 vol = tr->tro_volume;
469 trs->trso_starting = 0;
470 g_raid_tr_update_state_raid1(vol, NULL);
475 g_raid_tr_stop_raid1(struct g_raid_tr_object *tr)
477 struct g_raid_tr_raid1_object *trs;
478 struct g_raid_volume *vol;
480 trs = (struct g_raid_tr_raid1_object *)tr;
481 vol = tr->tro_volume;
482 trs->trso_starting = 0;
483 trs->trso_stopping = 1;
484 g_raid_tr_update_state_raid1(vol, NULL);
489 * Select the disk to read from. Take into account: subdisk state, running
490 * error recovery, average disk load, head position and possible cache hits.
492 #define ABS(x) (((x) >= 0) ? (x) : (-(x)))
493 static struct g_raid_subdisk *
494 g_raid_tr_raid1_select_read_disk(struct g_raid_volume *vol, struct bio *bp,
497 struct g_raid_subdisk *sd, *best;
498 int i, prio, bestprio;
502 for (i = 0; i < vol->v_disks_count; i++) {
503 sd = &vol->v_subdisks[i];
504 if (sd->sd_state != G_RAID_SUBDISK_S_ACTIVE &&
505 ((sd->sd_state != G_RAID_SUBDISK_S_REBUILD &&
506 sd->sd_state != G_RAID_SUBDISK_S_RESYNC) ||
507 bp->bio_offset + bp->bio_length > sd->sd_rebuild_pos))
509 if ((mask & (1 << i)) != 0)
511 prio = G_RAID_SUBDISK_LOAD(sd);
512 prio += min(sd->sd_recovery, 255) << 22;
513 prio += (G_RAID_SUBDISK_S_ACTIVE - sd->sd_state) << 16;
514 /* If disk head is precisely in position - highly prefer it. */
515 if (G_RAID_SUBDISK_POS(sd) == bp->bio_offset)
516 prio -= 2 * G_RAID_SUBDISK_LOAD_SCALE;
518 /* If disk head is close to position - prefer it. */
519 if (ABS(G_RAID_SUBDISK_POS(sd) - bp->bio_offset) <
520 G_RAID_SUBDISK_TRACK_SIZE)
521 prio -= 1 * G_RAID_SUBDISK_LOAD_SCALE;
522 if (prio < bestprio) {
531 g_raid_tr_iostart_raid1_read(struct g_raid_tr_object *tr, struct bio *bp)
533 struct g_raid_subdisk *sd;
536 sd = g_raid_tr_raid1_select_read_disk(tr->tro_volume, bp, 0);
537 KASSERT(sd != NULL, ("No active disks in volume %s.",
538 tr->tro_volume->v_name));
540 cbp = g_clone_bio(bp);
542 g_raid_iodone(bp, ENOMEM);
546 g_raid_subdisk_iostart(sd, cbp);
550 g_raid_tr_iostart_raid1_write(struct g_raid_tr_object *tr, struct bio *bp)
552 struct g_raid_volume *vol;
553 struct g_raid_subdisk *sd;
554 struct bio_queue_head queue;
558 vol = tr->tro_volume;
561 * Allocate all bios before sending any request, so we can return
562 * ENOMEM in nice and clean way.
565 for (i = 0; i < vol->v_disks_count; i++) {
566 sd = &vol->v_subdisks[i];
567 switch (sd->sd_state) {
568 case G_RAID_SUBDISK_S_ACTIVE:
570 case G_RAID_SUBDISK_S_REBUILD:
572 * When rebuilding, only part of this subdisk is
573 * writable, the rest will be written as part of the
576 if (bp->bio_offset >= sd->sd_rebuild_pos)
579 case G_RAID_SUBDISK_S_STALE:
580 case G_RAID_SUBDISK_S_RESYNC:
582 * Resyncing still writes on the theory that the
583 * resync'd disk is very close and writing it will
584 * keep it that way better if we keep up while
591 cbp = g_clone_bio(bp);
594 cbp->bio_caller1 = sd;
595 bioq_insert_tail(&queue, cbp);
597 for (cbp = bioq_first(&queue); cbp != NULL;
598 cbp = bioq_first(&queue)) {
599 bioq_remove(&queue, cbp);
600 sd = cbp->bio_caller1;
601 cbp->bio_caller1 = NULL;
602 g_raid_subdisk_iostart(sd, cbp);
606 for (cbp = bioq_first(&queue); cbp != NULL;
607 cbp = bioq_first(&queue)) {
608 bioq_remove(&queue, cbp);
611 if (bp->bio_error == 0)
612 bp->bio_error = ENOMEM;
613 g_raid_iodone(bp, bp->bio_error);
617 g_raid_tr_iostart_raid1(struct g_raid_tr_object *tr, struct bio *bp)
619 struct g_raid_volume *vol;
620 struct g_raid_tr_raid1_object *trs;
622 vol = tr->tro_volume;
623 trs = (struct g_raid_tr_raid1_object *)tr;
624 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL &&
625 vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL &&
626 vol->v_state != G_RAID_VOLUME_S_DEGRADED) {
627 g_raid_iodone(bp, EIO);
631 * If we're rebuilding, squeeze in rebuild activity every so often,
632 * even when the disk is busy. Be sure to only count real I/O
633 * to the disk. All 'SPECIAL' I/O is traffic generated to the disk
636 if (trs->trso_failed_sd != NULL &&
637 !(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL)) {
638 /* Make this new or running now round short. */
639 trs->trso_recover_slabs = 0;
640 if (--trs->trso_fair_io <= 0) {
641 trs->trso_fair_io = g_raid1_rebuild_fair_io;
642 g_raid_tr_raid1_rebuild_some(tr);
645 switch (bp->bio_cmd) {
647 g_raid_tr_iostart_raid1_read(tr, bp);
650 g_raid_tr_iostart_raid1_write(tr, bp);
653 g_raid_iodone(bp, EIO);
656 g_raid_tr_flush_common(tr, bp);
659 KASSERT(1 == 0, ("Invalid command here: %u (volume=%s)",
660 bp->bio_cmd, vol->v_name));
666 g_raid_tr_iodone_raid1(struct g_raid_tr_object *tr,
667 struct g_raid_subdisk *sd, struct bio *bp)
670 struct g_raid_subdisk *nsd;
671 struct g_raid_volume *vol;
673 struct g_raid_tr_raid1_object *trs;
677 trs = (struct g_raid_tr_raid1_object *)tr;
678 vol = tr->tro_volume;
679 if (bp->bio_cflags & G_RAID_BIO_FLAG_SYNC) {
681 * This operation is part of a rebuild or resync operation.
682 * See what work just got done, then schedule the next bit of
683 * work, if any. Rebuild/resync is done a little bit at a
684 * time. Either when a timeout happens, or after we get a
685 * bunch of I/Os to the disk (to make sure an active system
686 * will complete in a sane amount of time).
688 * We are setup to do differing amounts of work for each of
689 * these cases. so long as the slabs is smallish (less than
690 * 50 or so, I'd guess, but that's just a WAG), we shouldn't
691 * have any bio starvation issues. For active disks, we do
692 * 5MB of data, for inactive ones, we do 50MB.
694 if (trs->trso_type == TR_RAID1_REBUILD) {
695 if (bp->bio_cmd == BIO_READ) {
697 /* Immediately abort rebuild, if requested. */
698 if (trs->trso_flags & TR_RAID1_F_ABORT) {
699 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
700 g_raid_tr_raid1_rebuild_abort(tr);
704 /* On read error, skip and cross fingers. */
705 if (bp->bio_error != 0) {
707 "Read error during rebuild (%d), "
708 "possible data loss!",
710 goto rebuild_round_done;
714 * The read operation finished, queue the
717 G_RAID_LOGREQ(4, bp, "rebuild read done. %d",
719 bp->bio_cmd = BIO_WRITE;
720 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
721 G_RAID_LOGREQ(4, bp, "Queueing rebuild write.");
722 g_raid_subdisk_iostart(trs->trso_failed_sd, bp);
725 * The write operation just finished. Do
726 * another. We keep cloning the master bio
727 * since it has the right buffers allocated to
731 "rebuild write done. Error %d",
733 nsd = trs->trso_failed_sd;
734 if (bp->bio_error != 0 ||
735 trs->trso_flags & TR_RAID1_F_ABORT) {
736 if ((trs->trso_flags &
737 TR_RAID1_F_ABORT) == 0) {
738 g_raid_tr_raid1_fail_disk(sd->sd_softc,
741 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
742 g_raid_tr_raid1_rebuild_abort(tr);
746 nsd = trs->trso_failed_sd;
747 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
748 g_raid_unlock_range(sd->sd_volume,
749 bp->bio_offset, bp->bio_length);
750 nsd->sd_rebuild_pos += bp->bio_length;
751 if (nsd->sd_rebuild_pos >= nsd->sd_size) {
752 g_raid_tr_raid1_rebuild_finish(tr);
756 /* Abort rebuild if we are stopping */
757 if (trs->trso_stopping) {
758 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
759 g_raid_tr_raid1_rebuild_abort(tr);
763 if (--trs->trso_meta_update <= 0) {
764 g_raid_write_metadata(vol->v_softc,
765 vol, nsd, nsd->sd_disk);
766 trs->trso_meta_update =
767 g_raid1_rebuild_meta_update;
769 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
770 if (--trs->trso_recover_slabs <= 0)
772 g_raid_tr_raid1_rebuild_some(tr);
774 } else if (trs->trso_type == TR_RAID1_RESYNC) {
776 * read good sd, read bad sd in parallel. when both
777 * done, compare the buffers. write good to the bad
778 * if different. do the next bit of work.
780 panic("Somehow, we think we're doing a resync");
784 pbp = bp->bio_parent;
786 if (bp->bio_cmd == BIO_READ && bp->bio_error != 0) {
788 * Read failed on first drive. Retry the read error on
789 * another disk drive, if available, before erroring out the
792 sd->sd_disk->d_read_errs++;
794 "Read error (%d), %d read errors total",
795 bp->bio_error, sd->sd_disk->d_read_errs);
798 * If there are too many read errors, we move to degraded.
799 * XXX Do we want to FAIL the drive (eg, make the user redo
800 * everything to get it back in sync), or just degrade the
801 * drive, which kicks off a resync?
804 if (sd->sd_disk->d_read_errs > g_raid_read_err_thresh) {
805 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
806 if (pbp->bio_children == 1)
811 * Find the other disk, and try to do the I/O to it.
813 mask = (uintptr_t *)(&pbp->bio_driver2);
814 if (pbp->bio_children == 1) {
815 /* Save original subdisk. */
816 pbp->bio_driver1 = do_write ? sd : NULL;
819 *mask |= 1 << sd->sd_pos;
820 nsd = g_raid_tr_raid1_select_read_disk(vol, pbp, *mask);
821 if (nsd != NULL && (cbp = g_clone_bio(pbp)) != NULL) {
823 G_RAID_LOGREQ(2, cbp, "Retrying read from %d",
825 if (pbp->bio_children == 2 && do_write) {
827 cbp->bio_caller1 = nsd;
828 pbp->bio_pflags = G_RAID_BIO_FLAG_LOCKED;
829 /* Lock callback starts I/O */
830 g_raid_lock_range(sd->sd_volume,
831 cbp->bio_offset, cbp->bio_length, pbp, cbp);
833 g_raid_subdisk_iostart(nsd, cbp);
838 * We can't retry. Return the original error by falling
839 * through. This will happen when there's only one good disk.
840 * We don't need to fail the raid, since its actual state is
841 * based on the state of the subdisks.
843 G_RAID_LOGREQ(2, bp, "Couldn't retry read, failing it");
845 if (bp->bio_cmd == BIO_READ &&
846 bp->bio_error == 0 &&
847 pbp->bio_children > 1 &&
848 pbp->bio_driver1 != NULL) {
850 * If it was a read, and bio_children is >1, then we just
851 * recovered the data from the second drive. We should try to
852 * write that data to the first drive if sector remapping is
853 * enabled. A write should put the data in a new place on the
854 * disk, remapping the bad sector. Do we need to do that by
855 * queueing a request to the main worker thread? It doesn't
856 * affect the return code of this current read, and can be
857 * done at our liesure. However, to make the code simpler, it
858 * is done syncrhonously.
860 G_RAID_LOGREQ(3, bp, "Recovered data from other drive");
861 cbp = g_clone_bio(pbp);
864 cbp->bio_cmd = BIO_WRITE;
865 cbp->bio_cflags = G_RAID_BIO_FLAG_REMAP;
866 G_RAID_LOGREQ(2, cbp,
867 "Attempting bad sector remap on failing drive.");
868 g_raid_subdisk_iostart(pbp->bio_driver1, cbp);
872 if (pbp->bio_pflags & G_RAID_BIO_FLAG_LOCKED) {
874 * We're done with a recovery, mark the range as unlocked.
875 * For any write errors, we agressively fail the disk since
876 * there was both a READ and a WRITE error at this location.
877 * Both types of errors generally indicates the drive is on
878 * the verge of total failure anyway. Better to stop trusting
879 * it now. However, we need to reset error to 0 in that case
880 * because we're not failing the original I/O which succeeded.
882 if (bp->bio_cmd == BIO_WRITE && bp->bio_error) {
883 G_RAID_LOGREQ(0, bp, "Remap write failed: "
885 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
888 if (pbp->bio_driver1 != NULL) {
889 ((struct g_raid_subdisk *)pbp->bio_driver1)
892 G_RAID_LOGREQ(2, bp, "REMAP done %d.", bp->bio_error);
893 g_raid_unlock_range(sd->sd_volume, bp->bio_offset,
896 error = bp->bio_error;
898 if (pbp->bio_children == pbp->bio_inbed) {
899 pbp->bio_completed = pbp->bio_length;
900 g_raid_iodone(pbp, error);
905 g_raid_tr_kerneldump_raid1(struct g_raid_tr_object *tr,
906 void *virtual, vm_offset_t physical, off_t offset, size_t length)
908 struct g_raid_volume *vol;
909 struct g_raid_subdisk *sd;
912 vol = tr->tro_volume;
915 for (i = 0; i < vol->v_disks_count; i++) {
916 sd = &vol->v_subdisks[i];
917 switch (sd->sd_state) {
918 case G_RAID_SUBDISK_S_ACTIVE:
920 case G_RAID_SUBDISK_S_REBUILD:
922 * When rebuilding, only part of this subdisk is
923 * writable, the rest will be written as part of the
926 if (offset >= sd->sd_rebuild_pos)
929 case G_RAID_SUBDISK_S_STALE:
930 case G_RAID_SUBDISK_S_RESYNC:
932 * Resyncing still writes on the theory that the
933 * resync'd disk is very close and writing it will
934 * keep it that way better if we keep up while
941 error = g_raid_subdisk_kerneldump(sd,
942 virtual, physical, offset, length);
946 return (ok > 0 ? 0 : error);
950 g_raid_tr_locked_raid1(struct g_raid_tr_object *tr, void *argp)
953 struct g_raid_subdisk *sd;
955 bp = (struct bio *)argp;
956 sd = (struct g_raid_subdisk *)bp->bio_caller1;
957 g_raid_subdisk_iostart(sd, bp);
963 g_raid_tr_idle_raid1(struct g_raid_tr_object *tr)
965 struct g_raid_tr_raid1_object *trs;
967 trs = (struct g_raid_tr_raid1_object *)tr;
968 trs->trso_fair_io = g_raid1_rebuild_fair_io;
969 trs->trso_recover_slabs = g_raid1_rebuild_cluster_idle;
970 if (trs->trso_type == TR_RAID1_REBUILD)
971 g_raid_tr_raid1_rebuild_some(tr);
976 g_raid_tr_free_raid1(struct g_raid_tr_object *tr)
978 struct g_raid_tr_raid1_object *trs;
980 trs = (struct g_raid_tr_raid1_object *)tr;
982 if (trs->trso_buffer != NULL) {
983 free(trs->trso_buffer, M_TR_RAID1);
984 trs->trso_buffer = NULL;
989 G_RAID_TR_DECLARE(g_raid_tr_raid1);