2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2004, 2007 Lukas Ertl
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
35 #include <sys/malloc.h>
36 #include <sys/systm.h>
38 #include <geom/geom.h>
39 #include <geom/geom_dbg.h>
40 #include <geom/vinum/geom_vinum_var.h>
41 #include <geom/vinum/geom_vinum_raid5.h>
42 #include <geom/vinum/geom_vinum.h>
44 static int gv_raid5_offset(struct gv_plex *, off_t, off_t,
45 off_t *, off_t *, int *, int *, int);
46 static struct bio * gv_raid5_clone_bio(struct bio *, struct gv_sd *,
47 struct gv_raid5_packet *, caddr_t, int);
48 static int gv_raid5_request(struct gv_plex *, struct gv_raid5_packet *,
49 struct bio *, caddr_t, off_t, off_t, int *);
50 static int gv_raid5_check(struct gv_plex *, struct gv_raid5_packet *,
51 struct bio *, caddr_t, off_t, off_t);
52 static int gv_raid5_rebuild(struct gv_plex *, struct gv_raid5_packet *,
53 struct bio *, caddr_t, off_t, off_t);
55 struct gv_raid5_packet *
56 gv_raid5_start(struct gv_plex *p, struct bio *bp, caddr_t addr, off_t boff,
60 struct gv_raid5_packet *wp, *wp2;
61 struct gv_bioq *bq, *bq2;
65 wp = g_malloc(sizeof(*wp), M_WAITOK | M_ZERO);
69 TAILQ_INIT(&wp->bits);
71 if (bp->bio_pflags & GV_BIO_REBUILD)
72 err = gv_raid5_rebuild(p, wp, bp, addr, boff, bcount);
73 else if (bp->bio_pflags & GV_BIO_CHECK)
74 err = gv_raid5_check(p, wp, bp, addr, boff, bcount);
76 err = gv_raid5_request(p, wp, bp, addr, boff, bcount, &delay);
78 /* Means we have a delayed request. */
85 * Building the sub-request failed, we probably need to clean up a lot.
88 G_VINUM_LOGREQ(0, bp, "raid5 plex request failed.");
89 TAILQ_FOREACH_SAFE(bq, &wp->bits, queue, bq2) {
90 TAILQ_REMOVE(&wp->bits, bq, queue);
93 if (wp->waiting != NULL) {
94 if (wp->waiting->bio_cflags & GV_BIO_MALLOC)
95 g_free(wp->waiting->bio_data);
96 gv_drive_done(wp->waiting->bio_caller1);
97 g_destroy_bio(wp->waiting);
99 if (wp->parity != NULL) {
100 if (wp->parity->bio_cflags & GV_BIO_MALLOC)
101 g_free(wp->parity->bio_data);
102 gv_drive_done(wp->parity->bio_caller1);
103 g_destroy_bio(wp->parity);
107 TAILQ_FOREACH_SAFE(wp, &p->packets, list, wp2) {
111 TAILQ_REMOVE(&p->packets, wp, list);
112 TAILQ_FOREACH_SAFE(bq, &wp->bits, queue, bq2) {
113 TAILQ_REMOVE(&wp->bits, bq, queue);
119 cbp = bioq_takefirst(p->bqueue);
120 while (cbp != NULL) {
121 if (cbp->bio_cflags & GV_BIO_MALLOC)
122 g_free(cbp->bio_data);
123 gv_drive_done(cbp->bio_caller1);
125 cbp = bioq_takefirst(p->bqueue);
128 /* If internal, stop and reset state. */
129 if (bp->bio_pflags & GV_BIO_INTERNAL) {
130 if (bp->bio_pflags & GV_BIO_MALLOC)
131 g_free(bp->bio_data);
134 p->flags &= ~(GV_PLEX_SYNCING | GV_PLEX_REBUILDING |
138 g_io_deliver(bp, err);
146 * Check if the stripe that the work packet wants is already being used by
147 * some other work packet.
150 gv_stripe_active(struct gv_plex *p, struct bio *bp)
152 struct gv_raid5_packet *wp, *owp;
155 wp = bp->bio_caller2;
156 if (wp->lockbase == -1)
160 TAILQ_FOREACH(owp, &p->packets, list) {
163 if ((wp->lockbase >= owp->lockbase) &&
164 (wp->lockbase <= owp->lockbase + owp->length)) {
168 if ((wp->lockbase <= owp->lockbase) &&
169 (wp->lockbase + wp->length >= owp->lockbase)) {
179 gv_raid5_check(struct gv_plex *p, struct gv_raid5_packet *wp, struct bio *bp,
180 caddr_t addr, off_t boff, off_t bcount)
182 struct gv_sd *parity, *s;
186 off_t real_len, real_off;
188 if (p == NULL || LIST_EMPTY(&p->subdisks))
191 gv_raid5_offset(p, boff, bcount, &real_off, &real_len, NULL, &psdno, 1);
193 /* Find the right subdisk. */
196 LIST_FOREACH(s, &p->subdisks, in_plex) {
204 /* Parity stripe not found. */
208 if (parity->state != GV_SD_UP)
211 wp->length = real_len;
213 wp->lockbase = real_off;
215 /* Read all subdisks. */
216 LIST_FOREACH(s, &p->subdisks, in_plex) {
217 /* Skip the parity subdisk. */
220 /* Skip growing subdisks. */
221 if (s->flags & GV_SD_GROW)
224 cbp = gv_raid5_clone_bio(bp, s, wp, NULL, 1);
227 cbp->bio_cmd = BIO_READ;
229 bioq_insert_tail(p->bqueue, cbp);
231 bq = g_malloc(sizeof(*bq), M_WAITOK | M_ZERO);
233 TAILQ_INSERT_TAIL(&wp->bits, bq, queue);
236 /* Read the parity data. */
237 cbp = gv_raid5_clone_bio(bp, parity, wp, NULL, 1);
240 cbp->bio_cmd = BIO_READ;
244 * In case we want to rebuild the parity, create an extra BIO to write
245 * it out. It also acts as buffer for the XOR operations.
247 cbp = gv_raid5_clone_bio(bp, parity, wp, addr, 1);
255 /* Rebuild a degraded RAID5 plex. */
257 gv_raid5_rebuild(struct gv_plex *p, struct gv_raid5_packet *wp, struct bio *bp,
258 caddr_t addr, off_t boff, off_t bcount)
260 struct gv_sd *broken, *s;
263 off_t real_len, real_off;
265 if (p == NULL || LIST_EMPTY(&p->subdisks))
268 gv_raid5_offset(p, boff, bcount, &real_off, &real_len, NULL, NULL, 1);
270 /* Find the right subdisk. */
272 LIST_FOREACH(s, &p->subdisks, in_plex) {
273 if (s->state != GV_SD_UP)
277 /* Broken stripe not found. */
281 switch (broken->state) {
286 if (!(bp->bio_pflags & GV_BIO_REBUILD))
289 G_VINUM_DEBUG(1, "sd %s is reviving", broken->name);
290 gv_set_sd_state(broken, GV_SD_REVIVING, GV_SETSTATE_FORCE);
291 /* Set this bit now, but should be set at end. */
292 broken->flags |= GV_SD_CANGOUP;
299 /* All other subdisk states mean it's not accessible. */
303 wp->length = real_len;
305 wp->lockbase = real_off;
307 KASSERT(wp->length >= 0, ("gv_rebuild_raid5: wp->length < 0"));
309 /* Read all subdisks. */
310 LIST_FOREACH(s, &p->subdisks, in_plex) {
311 /* Skip the broken subdisk. */
315 /* Skip growing subdisks. */
316 if (s->flags & GV_SD_GROW)
319 cbp = gv_raid5_clone_bio(bp, s, wp, NULL, 1);
322 cbp->bio_cmd = BIO_READ;
324 bioq_insert_tail(p->bqueue, cbp);
326 bq = g_malloc(sizeof(*bq), M_WAITOK | M_ZERO);
328 TAILQ_INSERT_TAIL(&wp->bits, bq, queue);
331 /* Write the parity data. */
332 cbp = gv_raid5_clone_bio(bp, broken, wp, NULL, 1);
339 /* Post notification that we're finished. */
343 /* Build a request group to perform (part of) a RAID5 request. */
345 gv_raid5_request(struct gv_plex *p, struct gv_raid5_packet *wp,
346 struct bio *bp, caddr_t addr, off_t boff, off_t bcount, int *delay)
349 struct gv_sd *broken, *original, *parity, *s;
352 int i, psdno, sdno, type, grow;
353 off_t real_len, real_off;
355 gp = bp->bio_to->geom;
357 if (p == NULL || LIST_EMPTY(&p->subdisks))
360 /* We are optimistic and assume that this request will be OK. */
361 #define REQ_TYPE_NORMAL 0
362 #define REQ_TYPE_DEGRADED 1
363 #define REQ_TYPE_NOPARITY 2
365 type = REQ_TYPE_NORMAL;
366 original = parity = broken = NULL;
368 /* XXX: The resize won't crash with rebuild or sync, but we should still
369 * be aware of it. Also this should perhaps be done on rebuild/check as
372 /* If we're over, we must use the old. */
373 if (boff >= p->synced) {
375 /* Or if over the resized offset, we use all drives. */
376 } else if (boff + bcount <= p->synced) {
378 /* Else, we're in the middle, and must wait a bit. */
380 bioq_disksort(p->rqueue, bp);
384 gv_raid5_offset(p, boff, bcount, &real_off, &real_len,
385 &sdno, &psdno, grow);
387 /* Find the right subdisks. */
389 LIST_FOREACH(s, &p->subdisks, in_plex) {
394 if (s->state != GV_SD_UP)
399 if ((original == NULL) || (parity == NULL))
402 /* Our data stripe is missing. */
403 if (original->state != GV_SD_UP)
404 type = REQ_TYPE_DEGRADED;
406 /* If synchronizing request, just write it if disks are stale. */
407 if (original->state == GV_SD_STALE && parity->state == GV_SD_STALE &&
408 bp->bio_pflags & GV_BIO_SYNCREQ && bp->bio_cmd == BIO_WRITE) {
409 type = REQ_TYPE_NORMAL;
410 /* Our parity stripe is missing. */
411 } else if (parity->state != GV_SD_UP) {
412 /* We cannot take another failure if we're already degraded. */
413 if (type != REQ_TYPE_NORMAL)
416 type = REQ_TYPE_NOPARITY;
419 wp->length = real_len;
421 wp->lockbase = real_off;
423 KASSERT(wp->length >= 0, ("gv_build_raid5_request: wp->length < 0"));
425 if ((p->flags & GV_PLEX_REBUILDING) && (boff + real_len < p->synced))
426 type = REQ_TYPE_NORMAL;
428 if ((p->flags & GV_PLEX_REBUILDING) && (boff + real_len >= p->synced)) {
429 bioq_disksort(p->rqueue, bp);
434 switch (bp->bio_cmd) {
437 * For a degraded read we need to read in all stripes except
438 * the broken one plus the parity stripe and then recalculate
441 if (type == REQ_TYPE_DEGRADED) {
442 bzero(wp->data, wp->length);
443 LIST_FOREACH(s, &p->subdisks, in_plex) {
444 /* Skip the broken subdisk. */
447 /* Skip growing if within offset. */
448 if (grow && s->flags & GV_SD_GROW)
450 cbp = gv_raid5_clone_bio(bp, s, wp, NULL, 1);
454 bioq_insert_tail(p->bqueue, cbp);
456 bq = g_malloc(sizeof(*bq), M_WAITOK | M_ZERO);
458 TAILQ_INSERT_TAIL(&wp->bits, bq, queue);
461 /* A normal read can be fulfilled with the original subdisk. */
463 cbp = gv_raid5_clone_bio(bp, original, wp, addr, 0);
467 bioq_insert_tail(p->bqueue, cbp);
475 * A degraded write means we cannot write to the original data
476 * subdisk. Thus we need to read in all valid stripes,
477 * recalculate the parity from the original data, and then
478 * write the parity stripe back out.
480 if (type == REQ_TYPE_DEGRADED) {
481 /* Read all subdisks. */
482 LIST_FOREACH(s, &p->subdisks, in_plex) {
483 /* Skip the broken and the parity subdisk. */
484 if ((s == broken) || (s == parity))
486 /* Skip growing if within offset. */
487 if (grow && s->flags & GV_SD_GROW)
490 cbp = gv_raid5_clone_bio(bp, s, wp, NULL, 1);
493 cbp->bio_cmd = BIO_READ;
495 bioq_insert_tail(p->bqueue, cbp);
497 bq = g_malloc(sizeof(*bq), M_WAITOK | M_ZERO);
499 TAILQ_INSERT_TAIL(&wp->bits, bq, queue);
502 /* Write the parity data. */
503 cbp = gv_raid5_clone_bio(bp, parity, wp, NULL, 1);
506 bcopy(addr, cbp->bio_data, wp->length);
510 * When the parity stripe is missing we just write out the data.
512 } else if (type == REQ_TYPE_NOPARITY) {
513 cbp = gv_raid5_clone_bio(bp, original, wp, addr, 1);
517 bioq_insert_tail(p->bqueue, cbp);
519 bq = g_malloc(sizeof(*bq), M_WAITOK | M_ZERO);
521 TAILQ_INSERT_TAIL(&wp->bits, bq, queue);
524 * A normal write request goes to the original subdisk, then we
525 * read in all other stripes, recalculate the parity and write
526 * out the parity again.
529 /* Read old parity. */
530 cbp = gv_raid5_clone_bio(bp, parity, wp, NULL, 1);
533 cbp->bio_cmd = BIO_READ;
535 bioq_insert_tail(p->bqueue, cbp);
537 bq = g_malloc(sizeof(*bq), M_WAITOK | M_ZERO);
539 TAILQ_INSERT_TAIL(&wp->bits, bq, queue);
542 cbp = gv_raid5_clone_bio(bp, original, wp, NULL, 1);
545 cbp->bio_cmd = BIO_READ;
547 bioq_insert_tail(p->bqueue, cbp);
549 bq = g_malloc(sizeof(*bq), M_WAITOK | M_ZERO);
551 TAILQ_INSERT_TAIL(&wp->bits, bq, queue);
553 /* Write new data. */
554 cbp = gv_raid5_clone_bio(bp, original, wp, addr, 1);
559 * We must not write the new data until the old data
560 * was read, so hold this BIO back until we're ready
565 /* The final bio for the parity. */
566 cbp = gv_raid5_clone_bio(bp, parity, wp, NULL, 1);
570 /* Remember that this is the BIO for the parity data. */
583 * Calculate the offsets in the various subdisks for a RAID5 request. Also take
584 * care of new subdisks in an expanded RAID5 array.
585 * XXX: This assumes that the new subdisks are inserted after the others (which
586 * is okay as long as plex_offset is larger). If subdisks are inserted into the
587 * plexlist before, we get problems.
590 gv_raid5_offset(struct gv_plex *p, off_t boff, off_t bcount, off_t *real_off,
591 off_t *real_len, int *sdno, int *psdno, int growing)
594 int sd, psd, sdcount;
595 off_t len_left, stripeend, stripeoff, stripestart;
597 sdcount = p->sdcount;
599 LIST_FOREACH(s, &p->subdisks, in_plex) {
600 if (s->flags & GV_SD_GROW)
605 /* The number of the subdisk containing the parity stripe. */
606 psd = sdcount - 1 - ( boff / (p->stripesize * (sdcount - 1))) %
608 KASSERT(psdno >= 0, ("gv_raid5_offset: psdno < 0"));
610 /* Offset of the start address from the start of the stripe. */
611 stripeoff = boff % (p->stripesize * (sdcount - 1));
612 KASSERT(stripeoff >= 0, ("gv_raid5_offset: stripeoff < 0"));
614 /* The number of the subdisk where the stripe resides. */
615 sd = stripeoff / p->stripesize;
616 KASSERT(sdno >= 0, ("gv_raid5_offset: sdno < 0"));
618 /* At or past parity subdisk. */
622 /* The offset of the stripe on this subdisk. */
623 stripestart = (boff - stripeoff) / (sdcount - 1);
624 KASSERT(stripestart >= 0, ("gv_raid5_offset: stripestart < 0"));
626 stripeoff %= p->stripesize;
628 /* The offset of the request on this subdisk. */
629 *real_off = stripestart + stripeoff;
631 stripeend = stripestart + p->stripesize;
632 len_left = stripeend - *real_off;
633 KASSERT(len_left >= 0, ("gv_raid5_offset: len_left < 0"));
635 *real_len = (bcount <= len_left) ? bcount : len_left;
646 gv_raid5_clone_bio(struct bio *bp, struct gv_sd *s, struct gv_raid5_packet *wp,
647 caddr_t addr, int use_wp)
651 cbp = g_clone_bio(bp);
655 cbp->bio_data = g_malloc(wp->length, M_WAITOK | M_ZERO);
656 cbp->bio_cflags |= GV_BIO_MALLOC;
658 cbp->bio_data = addr;
659 cbp->bio_offset = wp->lockbase + s->drive_offset;
660 cbp->bio_length = wp->length;
661 cbp->bio_done = gv_done;
662 cbp->bio_caller1 = s;
663 s->drive_sc->active++;
665 cbp->bio_caller2 = wp;