2 * Copyright (c) 1997, 1998, 1999
3 * Nan Yang Computer Services Limited. All rights reserved.
5 * Parts copyright (c) 1997, 1998 Cybernet Corporation, NetMAX project.
7 * Written by Greg Lehey
9 * This software is distributed under the so-called ``Berkeley
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Nan Yang Computer
24 * 4. Neither the name of the Company nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * This software is provided ``as is'', and any express or implied
29 * warranties, including, but not limited to, the implied warranties of
30 * merchantability and fitness for a particular purpose are disclaimed.
31 * In no event shall the company or contributors be liable for any
32 * direct, indirect, incidental, special, exemplary, or consequential
33 * damages (including, but not limited to, procurement of substitute
34 * goods or services; loss of use, data, or profits; or business
35 * interruption) however caused and on any theory of liability, whether
36 * in contract, strict liability, or tort (including negligence or
37 * otherwise) arising in any way out of the use of this software, even if
38 * advised of the possibility of such damage.
40 * $Id: vinumrequest.c,v 1.26 1999/12/30 07:38:33 grog Exp grog $
44 #include <dev/vinum/vinumhdr.h>
45 #include <dev/vinum/request.h>
46 #include <sys/resourcevar.h>
48 enum requeststatus bre(struct request *rq,
52 enum requeststatus bre5(struct request *rq,
56 enum requeststatus build_read_request(struct request *rq, int volplexno);
57 enum requeststatus build_write_request(struct request *rq);
58 enum requeststatus build_rq_buffer(struct rqelement *rqe, struct plex *plex);
59 int find_alternate_sd(struct request *rq);
60 int check_range_covered(struct request *);
61 void complete_rqe(struct buf *bp);
62 void complete_raid5_write(struct rqelement *);
63 int abortrequest(struct request *rq, int error);
64 void sdio_done(struct buf *bp);
65 int vinum_bounds_check(struct buf *bp, struct volume *vol);
66 caddr_t allocdatabuf(struct rqelement *rqe);
67 void freedatabuf(struct rqelement *rqe);
70 struct rqinfo rqinfo[RQINFO_SIZE];
71 struct rqinfo *rqip = rqinfo;
74 logrq(enum rqinfo_type type, union rqinfou info, struct buf *ubp)
78 microtime(&rqip->timestamp); /* when did this happen? */
80 rqip->bp = ubp; /* user buffer */
83 case loginfo_user_bpl:
84 case loginfo_sdio: /* subdisk I/O */
85 case loginfo_sdiol: /* subdisk I/O launch */
86 case loginfo_sdiodone: /* subdisk I/O complete */
87 bcopy(info.bp, &rqip->info.b, sizeof(struct buf));
88 rqip->devmajor = major(info.bp->b_dev);
89 rqip->devminor = minor(info.bp->b_dev);
94 case loginfo_raid5_data:
95 case loginfo_raid5_parity:
96 bcopy(info.rqe, &rqip->info.rqe, sizeof(struct rqelement));
97 rqip->devmajor = major(info.rqe->b.b_dev);
98 rqip->devminor = minor(info.rqe->b.b_dev);
101 case loginfo_lockwait:
104 bcopy(info.lockinfo, &rqip->info.lockinfo, sizeof(struct rangelock));
112 if (rqip >= &rqinfo[RQINFO_SIZE]) /* wrap around */
120 vinumstrategy(struct buf *bp)
123 struct volume *vol = NULL;
125 switch (DEVTYPE(bp->b_dev)) {
127 case VINUM_RAWSD_TYPE:
132 * In fact, vinum doesn't handle drives: they're
133 * handled directly by the disk drivers
135 case VINUM_DRIVE_TYPE:
137 bp->b_error = EIO; /* I/O error */
138 bp->b_flags |= B_ERROR;
142 case VINUM_VOLUME_TYPE: /* volume I/O */
143 volno = Volno(bp->b_dev);
145 if (vol->state != volume_up) { /* can't access this volume */
146 bp->b_error = EIO; /* I/O error */
147 bp->b_flags |= B_ERROR;
151 if (vinum_bounds_check(bp, vol) <= 0) { /* don't like them bounds */
152 biodone(bp); /* have nothing to do with this */
157 * Plex I/O is pretty much the same as volume I/O
158 * for a single plex. Indicate this by passing a NULL
159 * pointer (set above) for the volume
161 case VINUM_PLEX_TYPE:
162 case VINUM_RAWPLEX_TYPE:
163 bp->b_resid = bp->b_bcount; /* transfer everything */
170 * Start a transfer. Return -1 on error,
171 * 0 if OK, 1 if we need to retry.
172 * Parameter reviveok is set when doing
173 * transfers for revives: it allows transfers to
174 * be started immediately when a revive is in
175 * progress. During revive, normal transfers
176 * are queued if they share address space with
177 * a currently active revive operation.
180 vinumstart(struct buf *bp, int reviveok)
183 int maxplex; /* maximum number of plexes to handle */
185 struct request *rq; /* build up our request here */
186 enum requeststatus status;
189 if (debug & DEBUG_LASTREQS)
190 logrq(loginfo_user_bp, (union rqinfou) bp, bp);
193 if ((bp->b_bcount % DEV_BSIZE) != 0) { /* bad length */
194 bp->b_error = EINVAL; /* invalid size */
195 bp->b_flags |= B_ERROR;
199 rq = (struct request *) Malloc(sizeof(struct request)); /* allocate a request struct */
200 if (rq == NULL) { /* can't do it */
201 bp->b_error = ENOMEM; /* can't get memory */
202 bp->b_flags |= B_ERROR;
206 bzero(rq, sizeof(struct request));
209 * Note the volume ID. This can be NULL, which
210 * the request building functions use as an
211 * indication for single plex I/O
213 rq->bp = bp; /* and the user buffer struct */
215 if (DEVTYPE(bp->b_dev) == VINUM_VOLUME_TYPE) { /* it's a volume, */
216 rq->volplex.volno = Volno(bp->b_dev); /* get the volume number */
217 vol = &VOL[rq->volplex.volno]; /* and point to it */
218 vol->active++; /* one more active request */
219 maxplex = vol->plexes; /* consider all its plexes */
221 vol = NULL; /* no volume */
222 rq->volplex.plexno = Plexno(bp->b_dev); /* point to the plex */
223 rq->isplex = 1; /* note that it's a plex */
224 maxplex = 1; /* just the one plex */
227 if (bp->b_iocmd == BIO_READ) {
229 * This is a read request. Decide
230 * which plex to read from.
232 * There's a potential race condition here,
233 * since we're not locked, and we could end
234 * up multiply incrementing the round-robin
235 * counter. This doesn't have any serious
240 plexno = vol->preferred_plex; /* get the plex to use */
241 if (plexno < 0) { /* round robin */
242 plexno = vol->last_plex_read;
243 vol->last_plex_read++;
244 if (vol->last_plex_read >= vol->plexes) /* got the the end? */
245 vol->last_plex_read = 0; /* wrap around */
247 status = build_read_request(rq, plexno); /* build a request */
249 daddr_t diskaddr = bp->b_blkno; /* start offset of transfer */
250 status = bre(rq, /* build a request list */
253 diskaddr + (bp->b_bcount / DEV_BSIZE));
256 if ((status > REQUEST_RECOVERED) /* can't satisfy it */
257 ||(bp->b_flags & B_DONE)) { /* XXX shouldn't get this without bad status */
258 if (status == REQUEST_DOWN) { /* not enough subdisks */
259 bp->b_error = EIO; /* I/O error */
260 bp->b_flags |= B_ERROR;
266 return launch_requests(rq, reviveok); /* now start the requests if we can */
269 * This is a write operation. We write to all plexes. If this is
270 * a RAID-4 or RAID-5 plex, we must also update the parity stripe.
275 status = build_write_request(rq); /* Not all the subdisks are up */
276 } else { /* plex I/O */
279 diskstart = bp->b_blkno; /* start offset of transfer */
283 bp->b_blkno + (bp->b_bcount / DEV_BSIZE)); /* build requests for the plex */
285 if ((status > REQUEST_RECOVERED) /* can't satisfy it */
286 ||(bp->b_flags & B_DONE)) { /* XXX shouldn't get this without bad status */
287 if (status == REQUEST_DOWN) { /* not enough subdisks */
288 bp->b_error = EIO; /* I/O error */
289 bp->b_flags |= B_ERROR;
291 if ((bp->b_flags & B_DONE) == 0)
296 return launch_requests(rq, reviveok); /* now start the requests if we can */
301 * Call the low-level strategy routines to
302 * perform the requests in a struct request
305 launch_requests(struct request *rq, int reviveok)
309 int rqno; /* loop index */
310 struct rqelement *rqe; /* current element */
312 int rcount; /* request count */
315 * First find out whether we're reviving, and the
316 * request contains a conflict. If so, we hang
317 * the request off plex->waitlist of the first
318 * plex we find which is reviving
320 if ((rq->flags & XFR_REVIVECONFLICT) /* possible revive conflict */
321 &&(!reviveok)) { /* and we don't want to do it now, */
323 struct request *waitlist; /* point to the waitlist */
326 if (sd->waitlist != NULL) { /* something there already, */
327 waitlist = sd->waitlist;
328 while (waitlist->next != NULL) /* find the end */
329 waitlist = waitlist->next;
330 waitlist->next = rq; /* hook our request there */
332 sd->waitlist = rq; /* hook our request at the front */
335 if (debug & DEBUG_REVIVECONFLICT)
337 "Revive conflict sd %d: %p\n%s dev %d.%d, offset 0x%x, length %ld\n",
340 rq->bp->b_iocmd == BIO_READ ? "Read" : "Write",
341 major(rq->bp->b_dev),
342 minor(rq->bp->b_dev),
346 return 0; /* and get out of here */
348 rq->active = 0; /* nothing yet */
350 if (debug & DEBUG_ADDRESSES)
352 "Request: %p\n%s dev %d.%d, offset 0x%x, length %ld\n",
354 rq->bp->b_iocmd == BIO_READ ? "Read" : "Write",
355 major(rq->bp->b_dev),
356 minor(rq->bp->b_dev),
359 vinum_conf.lastrq = rq;
360 vinum_conf.lastbuf = rq->bp;
361 if (debug & DEBUG_LASTREQS)
362 logrq(loginfo_user_bpl, (union rqinfou) rq->bp, rq->bp);
366 * With the division of labour below (first count the requests, then
367 * issue them), it's possible that we don't need this splbio()
368 * protection. But I'll try that some other time.
371 for (rqg = rq->rqg; rqg != NULL; rqg = rqg->next) { /* through the whole request chain */
372 rqg->active = rqg->count; /* they're all active */
373 for (rqno = 0; rqno < rqg->count; rqno++) {
374 rqe = &rqg->rqe[rqno];
375 if (rqe->flags & XFR_BAD_SUBDISK) /* this subdisk is bad, */
376 rqg->active--; /* one less active request */
378 if (rqg->active) /* we have at least one active request, */
379 rq->active++; /* one more active request group */
382 /* Now fire off the requests */
383 for (rqg = rq->rqg; rqg != NULL;) { /* through the whole request chain */
384 if (rqg->lockbase >= 0) /* this rqg needs a lock first */
385 rqg->lock = lockrange(rqg->lockbase, rqg->rq->bp, &PLEX[rqg->plexno]);
387 for (rqno = 0; rqno < rcount;) {
388 rqe = &rqg->rqe[rqno];
391 * Point to next rqg before the bottom end
392 * changes the structures.
394 if (++rqno >= rcount)
396 if ((rqe->flags & XFR_BAD_SUBDISK) == 0) { /* this subdisk is good, */
397 drive = &DRIVE[rqe->driveno]; /* look at drive */
399 if (drive->active >= drive->maxactive)
400 drive->maxactive = drive->active;
402 if (vinum_conf.active >= vinum_conf.maxactive)
403 vinum_conf.maxactive = vinum_conf.active;
406 if (debug & DEBUG_ADDRESSES)
408 " %s dev %d.%d, sd %d, offset 0x%x, devoffset 0x%x, length %ld\n",
409 rqe->b.b_iocmd == BIO_READ ? "Read" : "Write",
413 (u_int) (rqe->b.b_blkno - SD[rqe->sdno].driveoffset),
416 if (debug & DEBUG_LASTREQS)
417 logrq(loginfo_rqe, (union rqinfou) rqe, rq->bp);
421 /* fire off the request */
422 DEV_STRATEGY(&rqe->b, 0);
431 * define the low-level requests needed to perform a
432 * high-level I/O operation for a specific plex 'plexno'.
434 * Return REQUEST_OK if all subdisks involved in the request are up,
435 * REQUEST_DOWN if some subdisks are not up, and REQUEST_EOF if the
436 * request is at least partially outside the bounds of the subdisks.
438 * Modify the pointer *diskstart to point to the end address. On
439 * read, return on the first bad subdisk, so that the caller
440 * (build_read_request) can try alternatives.
442 * On entry to this routine, the rqg structures are not assigned. The
443 * assignment is performed by expandrq(). Strictly speaking, the
444 * elements rqe->sdno of all entries should be set to -1, since 0
445 * (from bzero) is a valid subdisk number. We avoid this problem by
446 * initializing the ones we use, and not looking at the others (index
450 bre(struct request *rq,
458 struct buf *bp; /* user's bp */
460 enum requeststatus status; /* return value */
461 daddr_t plexoffset; /* offset of transfer in plex */
462 daddr_t stripebase; /* base address of stripe (1st subdisk) */
463 daddr_t stripeoffset; /* offset in stripe */
464 daddr_t blockoffset; /* offset in stripe on subdisk */
465 struct rqelement *rqe; /* point to this request information */
466 daddr_t diskstart = *diskaddr; /* remember where this transfer starts */
467 enum requeststatus s; /* temp return value */
469 bp = rq->bp; /* buffer pointer */
470 status = REQUEST_OK; /* return value: OK until proven otherwise */
471 plex = &PLEX[plexno]; /* point to the plex */
473 switch (plex->organization) {
475 sd = NULL; /* (keep compiler quiet) */
476 for (sdno = 0; sdno < plex->subdisks; sdno++) {
477 sd = &SD[plex->sdnos[sdno]];
478 if (*diskaddr < sd->plexoffset) /* we must have a hole, */
479 status = REQUEST_DEGRADED; /* note the fact */
480 if (*diskaddr < (sd->plexoffset + sd->sectors)) { /* the request starts in this subdisk */
481 rqg = allocrqg(rq, 1); /* space for the request */
482 if (rqg == NULL) { /* malloc failed */
483 bp->b_flags |= B_ERROR;
484 bp->b_error = ENOMEM;
486 return REQUEST_ENOMEM;
488 rqg->plexno = plexno;
490 rqe = &rqg->rqe[0]; /* point to the element */
491 rqe->rqg = rqg; /* group */
492 rqe->sdno = sd->sdno; /* put in the subdisk number */
493 plexoffset = *diskaddr; /* start offset in plex */
494 rqe->sdoffset = plexoffset - sd->plexoffset; /* start offset in subdisk */
495 rqe->useroffset = plexoffset - diskstart; /* start offset in user buffer */
497 rqe->datalen = min(diskend - *diskaddr, /* number of sectors to transfer in this sd */
498 sd->sectors - rqe->sdoffset);
499 rqe->groupoffset = 0; /* no groups for concatenated plexes */
501 rqe->buflen = rqe->datalen; /* buffer length is data buffer length */
503 rqe->driveno = sd->driveno;
504 if (sd->state != sd_up) { /* *now* we find the sd is down */
505 s = checksdstate(sd, rq, *diskaddr, diskend); /* do we need to change state? */
506 if (s == REQUEST_DOWN) { /* down? */
507 rqe->flags = XFR_BAD_SUBDISK; /* yup */
508 if (rq->bp->b_iocmd == BIO_READ) /* read request, */
509 return REQUEST_DEGRADED; /* give up here */
511 * If we're writing, don't give up
512 * because of a bad subdisk. Go
513 * through to the bitter end, but note
514 * which ones we can't access.
516 status = REQUEST_DEGRADED; /* can't do it all */
519 *diskaddr += rqe->datalen; /* bump the address */
520 if (build_rq_buffer(rqe, plex)) { /* build the buffer */
522 bp->b_flags |= B_ERROR;
523 bp->b_error = ENOMEM;
525 return REQUEST_ENOMEM; /* can't do it */
528 if (*diskaddr == diskend) /* we're finished, */
529 break; /* get out of here */
532 * We've got to the end of the plex. Have we got to the end of
533 * the transfer? It would seem that having an offset beyond the
534 * end of the subdisk is an error, but in fact it can happen if
535 * the volume has another plex of different size. There's a valid
536 * question as to why you would want to do this, but currently
539 * In a previous version, I returned REQUEST_DOWN here. I think
540 * REQUEST_EOF is more appropriate now.
542 if (diskend > sd->sectors + sd->plexoffset) /* pointing beyond EOF? */
543 status = REQUEST_EOF;
548 while (*diskaddr < diskend) { /* until we get it all sorted out */
549 if (*diskaddr >= plex->length) /* beyond the end of the plex */
550 return REQUEST_EOF; /* can't continue */
552 /* The offset of the start address from the start of the stripe. */
553 stripeoffset = *diskaddr % (plex->stripesize * plex->subdisks);
555 /* The plex-relative address of the start of the stripe. */
556 stripebase = *diskaddr - stripeoffset;
558 /* The number of the subdisk in which the start is located. */
559 sdno = stripeoffset / plex->stripesize;
561 /* The offset from the beginning of the stripe on this subdisk. */
562 blockoffset = stripeoffset % plex->stripesize;
564 sd = &SD[plex->sdnos[sdno]]; /* the subdisk in question */
565 rqg = allocrqg(rq, 1); /* space for the request */
566 if (rqg == NULL) { /* malloc failed */
567 bp->b_flags |= B_ERROR;
568 bp->b_error = ENOMEM;
570 return REQUEST_ENOMEM;
572 rqg->plexno = plexno;
574 rqe = &rqg->rqe[0]; /* point to the element */
576 rqe->sdoffset = stripebase / plex->subdisks + blockoffset; /* start offset in this subdisk */
577 rqe->useroffset = *diskaddr - diskstart; /* The offset of the start in the user buffer */
579 rqe->datalen = min(diskend - *diskaddr, /* the amount remaining to transfer */
580 plex->stripesize - blockoffset); /* and the amount left in this stripe */
581 rqe->groupoffset = 0; /* no groups for striped plexes */
583 rqe->buflen = rqe->datalen; /* buffer length is data buffer length */
585 rqe->sdno = sd->sdno; /* put in the subdisk number */
586 rqe->driveno = sd->driveno;
588 if (sd->state != sd_up) { /* *now* we find the sd is down */
589 s = checksdstate(sd, rq, *diskaddr, diskend); /* do we need to change state? */
590 if (s == REQUEST_DOWN) { /* down? */
591 rqe->flags = XFR_BAD_SUBDISK; /* yup */
592 if (rq->bp->b_iocmd == BIO_READ) /* read request, */
593 return REQUEST_DEGRADED; /* give up here */
595 * If we're writing, don't give up
596 * because of a bad subdisk. Go through
597 * to the bitter end, but note which
598 * ones we can't access.
600 status = REQUEST_DEGRADED; /* can't do it all */
604 * It would seem that having an offset
605 * beyond the end of the subdisk is an
606 * error, but in fact it can happen if the
607 * volume has another plex of different
608 * size. There's a valid question as to why
609 * you would want to do this, but currently
612 if (rqe->sdoffset + rqe->datalen > sd->sectors) { /* ends beyond the end of the subdisk? */
613 rqe->datalen = sd->sectors - rqe->sdoffset; /* truncate */
615 if (debug & DEBUG_EOFINFO) { /* tell on the request */
617 "vinum: EOF on plex %s, sd %s offset %x (user offset %x)\n",
623 "vinum: stripebase %x, stripeoffset %x, blockoffset %x\n",
630 if (build_rq_buffer(rqe, plex)) { /* build the buffer */
632 bp->b_flags |= B_ERROR;
633 bp->b_error = ENOMEM;
635 return REQUEST_ENOMEM; /* can't do it */
637 *diskaddr += rqe->datalen; /* look at the remainder */
638 if ((*diskaddr < diskend) /* didn't finish the request on this stripe */
639 &&(*diskaddr < plex->length)) { /* and there's more to come */
640 plex->multiblock++; /* count another one */
641 if (sdno == plex->subdisks - 1) /* last subdisk, */
642 plex->multistripe++; /* another stripe as well */
649 * RAID-4 and RAID-5 are complicated enough to have their own
654 status = bre5(rq, plexno, diskaddr, diskend);
658 log(LOG_ERR, "vinum: invalid plex type %d in bre\n", plex->organization);
659 status = REQUEST_DOWN; /* can't access it */
666 * Build up a request structure for reading volumes.
667 * This function is not needed for plex reads, since there's
668 * no recovery if a plex read can't be satisified.
671 build_read_request(struct request *rq, /* request */
673 { /* index in the volume's plex table */
675 daddr_t startaddr; /* offset of previous part of transfer */
676 daddr_t diskaddr; /* offset of current part of transfer */
677 daddr_t diskend; /* and end offset of transfer */
678 int plexno; /* plex index in vinum_conf */
679 struct rqgroup *rqg; /* point to the request we're working on */
680 struct volume *vol; /* volume in question */
681 int recovered = 0; /* set if we recover a read */
682 enum requeststatus status = REQUEST_OK;
683 int plexmask; /* bit mask of plexes, for recovery */
685 bp = rq->bp; /* buffer pointer */
686 diskaddr = bp->b_blkno; /* start offset of transfer */
687 diskend = diskaddr + (bp->b_bcount / DEV_BSIZE); /* and end offset of transfer */
688 rqg = &rq->rqg[plexindex]; /* plex request */
689 vol = &VOL[rq->volplex.volno]; /* point to volume */
691 while (diskaddr < diskend) { /* build up request components */
692 startaddr = diskaddr;
693 status = bre(rq, vol->plex[plexindex], &diskaddr, diskend); /* build up a request */
698 case REQUEST_RECOVERED:
700 * XXX FIXME if we have more than one plex, and we can
701 * satisfy the request from another, don't use the
702 * recovered request, since it's more expensive.
710 * If we get here, our request is not complete. Try
711 * to fill in the missing parts from another plex.
712 * This can happen multiple times in this function,
713 * and we reinitialize the plex mask each time, since
714 * we could have a hole in our plexes.
717 case REQUEST_DOWN: /* can't access the plex */
718 case REQUEST_DEGRADED: /* can't access the plex */
719 plexmask = ((1 << vol->plexes) - 1) /* all plexes in the volume */
720 &~(1 << plexindex); /* except for the one we were looking at */
721 for (plexno = 0; plexno < vol->plexes; plexno++) {
722 if (plexmask == 0) /* no plexes left to try */
723 return REQUEST_DOWN; /* failed */
724 diskaddr = startaddr; /* start at the beginning again */
725 if (plexmask & (1 << plexno)) { /* we haven't tried this plex yet */
726 bre(rq, vol->plex[plexno], &diskaddr, diskend); /* try a request */
727 if (diskaddr > startaddr) { /* we satisfied another part */
728 recovered = 1; /* we recovered from the problem */
729 status = REQUEST_OK; /* don't complain about it */
734 if (diskaddr == startaddr) /* didn't get any further, */
738 vol->recovered_reads += recovered; /* adjust our recovery count */
744 * Build up a request structure for writes.
745 * Return 0 if all subdisks involved in the request are up, 1 if some
746 * subdisks are not up, and -1 if the request is at least partially
747 * outside the bounds of the subdisks.
750 build_write_request(struct request *rq)
753 daddr_t diskstart; /* offset of current part of transfer */
754 daddr_t diskend; /* and end offset of transfer */
755 int plexno; /* plex index in vinum_conf */
756 struct volume *vol; /* volume in question */
757 enum requeststatus status;
759 bp = rq->bp; /* buffer pointer */
760 vol = &VOL[rq->volplex.volno]; /* point to volume */
761 diskend = bp->b_blkno + (bp->b_bcount / DEV_BSIZE); /* end offset of transfer */
762 status = REQUEST_DOWN; /* assume the worst */
763 for (plexno = 0; plexno < vol->plexes; plexno++) {
764 diskstart = bp->b_blkno; /* start offset of transfer */
766 * Build requests for the plex.
767 * We take the best possible result here (min,
768 * not max): we're happy if we can write at all
770 status = min(status, bre(rq,
778 /* Fill in the struct buf part of a request element. */
780 build_rq_buffer(struct rqelement *rqe, struct plex *plex)
782 struct sd *sd; /* point to subdisk */
785 struct buf *ubp; /* user (high level) buffer header */
787 vol = &VOL[rqe->rqg->rq->volplex.volno];
788 sd = &SD[rqe->sdno]; /* point to subdisk */
790 ubp = rqe->rqg->rq->bp; /* pointer to user buffer header */
792 /* Initialize the buf struct */
793 /* copy these flags from user bp */
794 bp->b_flags = ubp->b_flags & (B_ORDERED | B_NOCACHE | B_ASYNC);
795 bp->b_iocmd = BIO_READ; /* inform us when it's done */
796 BUF_LOCKINIT(bp); /* get a lock for the buffer */
797 BUF_LOCK(bp, LK_EXCLUSIVE); /* and lock it */
799 bp->b_iodone = complete_rqe; /* by calling us here */
801 * You'd think that we wouldn't need to even
802 * build the request buffer for a dead subdisk,
803 * but in some cases we need information like
804 * the user buffer address. Err on the side of
805 * generosity and supply what we can. That
806 * obviously doesn't include drive information
807 * when the drive is dead.
809 if ((rqe->flags & XFR_BAD_SUBDISK) == 0) { /* subdisk is accessible, */
810 bp->b_dev = DRIVE[rqe->driveno].dev; /* drive device */
812 bp->b_blkno = rqe->sdoffset + sd->driveoffset; /* start address */
813 bp->b_bcount = rqe->buflen << DEV_BSHIFT; /* number of bytes to transfer */
814 bp->b_resid = bp->b_bcount; /* and it's still all waiting */
815 bp->b_bufsize = bp->b_bcount; /* and buffer size */
816 bp->b_rcred = FSCRED; /* we have the file system credentials */
817 bp->b_wcred = FSCRED; /* we have the file system credentials */
819 if (rqe->flags & XFR_MALLOCED) { /* this operation requires a malloced buffer */
820 bp->b_data = Malloc(bp->b_bcount); /* get a buffer to put it in */
821 if (bp->b_data == NULL) { /* failed */
822 abortrequest(rqe->rqg->rq, ENOMEM);
823 return REQUEST_ENOMEM; /* no memory */
827 * Point directly to user buffer data. This means
828 * that we don't need to do anything when we have
829 * finished the transfer
831 bp->b_data = ubp->b_data + rqe->useroffset * DEV_BSIZE;
833 * On a recovery read, we perform an XOR of
834 * all blocks to the user buffer. To make
835 * this work, we first clean out the buffer
837 if ((rqe->flags & (XFR_RECOVERY_READ | XFR_BAD_SUBDISK))
838 == (XFR_RECOVERY_READ | XFR_BAD_SUBDISK)) { /* bad subdisk of a recovery read */
839 int length = rqe->grouplen << DEV_BSHIFT; /* and count involved */
840 char *data = (char *) &rqe->b.b_data[rqe->groupoffset << DEV_BSHIFT]; /* destination */
842 bzero(data, length); /* clean it out */
848 * Abort a request: free resources and complete the
849 * user request with the specified error
852 abortrequest(struct request *rq, int error)
854 struct buf *bp = rq->bp; /* user buffer */
856 bp->b_flags |= B_ERROR;
858 freerq(rq); /* free everything we're doing */
860 return error; /* and give up */
864 * Check that our transfer will cover the
865 * complete address space of the user request.
867 * Return 1 if it can, otherwise 0
870 check_range_covered(struct request *rq)
875 /* Perform I/O on a subdisk */
886 if (debug & DEBUG_LASTREQS)
887 logrq(loginfo_sdio, (union rqinfou) bp, bp);
889 sd = &SD[Sdno(bp->b_dev)]; /* point to the subdisk */
890 drive = &DRIVE[sd->driveno];
892 if (drive->state != drive_up) {
893 if (sd->state >= sd_crashed) {
894 if (bp->b_iocmd == BIO_READ) /* reading, */
895 set_sd_state(sd->sdno, sd_crashed, setstate_force);
896 else if (bp->b_iocmd == BIO_WRITE) /* writing, */
897 set_sd_state(sd->sdno, sd_stale, setstate_force);
899 bp->b_flags |= B_ERROR;
905 * We allow access to any kind of subdisk as long as we can expect
906 * to get the I/O performed.
908 if (sd->state < sd_empty) { /* nothing to talk to, */
909 bp->b_flags |= B_ERROR;
915 sbp = (struct sdbuf *) Malloc(sizeof(struct sdbuf));
917 bp->b_flags |= B_ERROR;
918 bp->b_error = ENOMEM;
922 bzero(sbp, sizeof(struct sdbuf)); /* start with nothing */
923 sbp->b.b_flags = bp->b_flags;
924 sbp->b.b_bufsize = bp->b_bufsize; /* buffer size */
925 sbp->b.b_bcount = bp->b_bcount; /* number of bytes to transfer */
926 sbp->b.b_resid = bp->b_resid; /* and amount waiting */
927 sbp->b.b_dev = DRIVE[sd->driveno].dev; /* device */
928 sbp->b.b_data = bp->b_data; /* data buffer */
929 sbp->b.b_blkno = bp->b_blkno + sd->driveoffset;
930 sbp->b.b_iodone = sdio_done; /* come here on completion */
931 BUF_LOCKINIT(&sbp->b); /* get a lock for the buffer */
932 BUF_LOCK(&sbp->b, LK_EXCLUSIVE); /* and lock it */
933 sbp->bp = bp; /* note the address of the original header */
934 sbp->sdno = sd->sdno; /* note for statistics */
935 sbp->driveno = sd->driveno;
936 endoffset = bp->b_blkno + sbp->b.b_bcount / DEV_BSIZE; /* final sector offset */
937 if (endoffset > sd->sectors) { /* beyond the end */
938 sbp->b.b_bcount -= (endoffset - sd->sectors) * DEV_BSIZE; /* trim */
939 if (sbp->b.b_bcount <= 0) { /* nothing to transfer */
940 bp->b_resid = bp->b_bcount; /* nothing transferred */
947 if (debug & DEBUG_ADDRESSES)
949 " %s dev %d.%d, sd %d, offset 0x%x, devoffset 0x%x, length %ld\n",
950 sbp->b.b_iocmd == BIO_READ ? "Read" : "Write",
954 (u_int) (sbp->b.b_blkno - SD[sbp->sdno].driveoffset),
955 (int) sbp->b.b_blkno,
960 if (debug & DEBUG_LASTREQS)
961 logrq(loginfo_sdiol, (union rqinfou) &sbp->b, &sbp->b);
963 DEV_STRATEGY(&sbp->b, 0);
968 * Simplified version of bounds_check_with_label
969 * Determine the size of the transfer, and make sure it is
970 * within the boundaries of the partition. Adjust transfer
971 * if needed, and signal errors or early completion.
973 * Volumes are simpler than disk slices: they only contain
974 * one component (though we call them a, b and c to make
975 * system utilities happy), and they always take up the
976 * complete space of the "partition".
978 * I'm still not happy with this: why should the label be
979 * protected? If it weren't so damned difficult to write
980 * one in the first pleace (because it's protected), it wouldn't
984 vinum_bounds_check(struct buf *bp, struct volume *vol)
986 int maxsize = vol->size; /* size of the partition (sectors) */
987 int size = (bp->b_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; /* size of this request (sectors) */
989 /* Would this transfer overwrite the disk label? */
990 if (bp->b_blkno <= LABELSECTOR /* starts before or at the label */
992 && bp->b_blkno + size > LABELSECTOR /* and finishes after */
994 && (!(vol->flags & VF_RAW)) /* and it's not raw */
995 && (bp->b_iocmd == BIO_WRITE) /* and it's a write */
996 && (!vol->flags & (VF_WLABEL | VF_LABELLING))) { /* and we're not allowed to write the label */
997 bp->b_error = EROFS; /* read-only */
998 bp->b_flags |= B_ERROR;
1001 if (size == 0) /* no transfer specified, */
1002 return 0; /* treat as EOF */
1003 /* beyond partition? */
1004 if (bp->b_blkno < 0 /* negative start */
1005 || bp->b_blkno + size > maxsize) { /* or goes beyond the end of the partition */
1006 /* if exactly at end of disk, return an EOF */
1007 if (bp->b_blkno == maxsize) {
1008 bp->b_resid = bp->b_bcount;
1011 /* or truncate if part of it fits */
1012 size = maxsize - bp->b_blkno;
1013 if (size <= 0) { /* nothing to transfer */
1014 bp->b_error = EINVAL;
1015 bp->b_flags |= B_ERROR;
1018 bp->b_bcount = size << DEV_BSHIFT;
1020 bp->b_pblkno = bp->b_blkno;
1025 * Allocate a request group and hook
1026 * it in in the list for rq
1029 allocrqg(struct request *rq, int elements)
1031 struct rqgroup *rqg; /* the one we're going to allocate */
1032 int size = sizeof(struct rqgroup) + elements * sizeof(struct rqelement);
1034 rqg = (struct rqgroup *) Malloc(size);
1035 if (rqg != NULL) { /* malloc OK, */
1036 if (rq->rqg) /* we already have requests */
1037 rq->lrqg->next = rqg; /* hang it off the end */
1038 else /* first request */
1039 rq->rqg = rqg; /* at the start */
1040 rq->lrqg = rqg; /* this one is the last in the list */
1042 bzero(rqg, size); /* no old junk */
1043 rqg->rq = rq; /* point back to the parent request */
1044 rqg->count = elements; /* number of requests in the group */
1046 rqg->lockbase = -1; /* no lock required yet */
1051 * Deallocate a request group out of a chain. We do
1052 * this by linear search: the chain is short, this
1053 * almost never happens, and currently it can only
1054 * happen to the first member of the chain.
1057 deallocrqg(struct rqgroup *rqg)
1059 struct rqgroup *rqgc = rqg->rq->rqg; /* point to the request chain */
1061 if (rqg->lock) /* got a lock? */
1062 unlockrange(rqg->plexno, rqg->lock); /* yes, free it */
1063 if (rqgc == rqg) /* we're first in line */
1064 rqg->rq->rqg = rqg->next; /* unhook ourselves */
1066 while ((rqgc->next != NULL) /* find the group */
1067 &&(rqgc->next != rqg))
1069 if (rqgc->next == NULL)
1071 "vinum deallocrqg: rqg %p not found in request %p\n",
1075 rqgc->next = rqg->next; /* make the chain jump over us */