2 * Copyright (c) 2013 Peter Grehan <grehan@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #ifndef WITHOUT_CAPSICUM
34 #include <sys/capsicum.h>
36 #include <sys/queue.h>
37 #include <sys/errno.h>
39 #include <sys/ioctl.h>
49 #include <pthread_np.h>
54 #include <machine/atomic.h>
60 #define BLOCKIF_SIG 0xb109b109
62 #define BLOCKIF_NUMTHR 8
63 #define BLOCKIF_MAXREQ (64 + BLOCKIF_NUMTHR)
81 TAILQ_ENTRY(blockif_elem) be_link;
82 struct blockif_req *be_req;
84 enum blockstat be_status;
101 pthread_t bc_btid[BLOCKIF_NUMTHR];
102 pthread_mutex_t bc_mtx;
103 pthread_cond_t bc_cond;
105 /* Request elements and free/pending/busy queues */
106 TAILQ_HEAD(, blockif_elem) bc_freeq;
107 TAILQ_HEAD(, blockif_elem) bc_pendq;
108 TAILQ_HEAD(, blockif_elem) bc_busyq;
109 struct blockif_elem bc_reqs[BLOCKIF_MAXREQ];
112 static pthread_once_t blockif_once = PTHREAD_ONCE_INIT;
114 struct blockif_sig_elem {
115 pthread_mutex_t bse_mtx;
116 pthread_cond_t bse_cond;
118 struct blockif_sig_elem *bse_next;
121 static struct blockif_sig_elem *blockif_bse_head;
124 blockif_enqueue(struct blockif_ctxt *bc, struct blockif_req *breq,
127 struct blockif_elem *be, *tbe;
131 be = TAILQ_FIRST(&bc->bc_freeq);
133 assert(be->be_status == BST_FREE);
134 TAILQ_REMOVE(&bc->bc_freeq, be, be_link);
141 off = breq->br_offset;
142 for (i = 0; i < breq->br_iovcnt; i++)
143 off += breq->br_iov[i].iov_len;
149 TAILQ_FOREACH(tbe, &bc->bc_pendq, be_link) {
150 if (tbe->be_block == breq->br_offset)
154 TAILQ_FOREACH(tbe, &bc->bc_busyq, be_link) {
155 if (tbe->be_block == breq->br_offset)
160 be->be_status = BST_PEND;
162 be->be_status = BST_BLOCK;
163 TAILQ_INSERT_TAIL(&bc->bc_pendq, be, be_link);
164 return (be->be_status == BST_PEND);
168 blockif_dequeue(struct blockif_ctxt *bc, pthread_t t, struct blockif_elem **bep)
170 struct blockif_elem *be;
172 TAILQ_FOREACH(be, &bc->bc_pendq, be_link) {
173 if (be->be_status == BST_PEND)
175 assert(be->be_status == BST_BLOCK);
179 TAILQ_REMOVE(&bc->bc_pendq, be, be_link);
180 be->be_status = BST_BUSY;
182 TAILQ_INSERT_TAIL(&bc->bc_busyq, be, be_link);
188 blockif_complete(struct blockif_ctxt *bc, struct blockif_elem *be)
190 struct blockif_elem *tbe;
192 if (be->be_status == BST_DONE || be->be_status == BST_BUSY)
193 TAILQ_REMOVE(&bc->bc_busyq, be, be_link);
195 TAILQ_REMOVE(&bc->bc_pendq, be, be_link);
196 TAILQ_FOREACH(tbe, &bc->bc_pendq, be_link) {
197 if (tbe->be_req->br_offset == be->be_block)
198 tbe->be_status = BST_PEND;
201 be->be_status = BST_FREE;
203 TAILQ_INSERT_TAIL(&bc->bc_freeq, be, be_link);
207 blockif_proc(struct blockif_ctxt *bc, struct blockif_elem *be, uint8_t *buf)
209 struct blockif_req *br;
211 ssize_t clen, len, off, boff, voff;
215 if (br->br_iovcnt <= 1)
221 if ((len = preadv(bc->bc_fd, br->br_iov, br->br_iovcnt,
230 while (br->br_resid > 0) {
231 len = MIN(br->br_resid, MAXPHYS);
232 if (pread(bc->bc_fd, buf, len, br->br_offset +
239 clen = MIN(len - boff, br->br_iov[i].iov_len -
241 memcpy(br->br_iov[i].iov_base + voff,
243 if (clen < br->br_iov[i].iov_len - voff)
250 } while (boff < len);
261 if ((len = pwritev(bc->bc_fd, br->br_iov, br->br_iovcnt,
270 while (br->br_resid > 0) {
271 len = MIN(br->br_resid, MAXPHYS);
274 clen = MIN(len - boff, br->br_iov[i].iov_len -
277 br->br_iov[i].iov_base + voff, clen);
278 if (clen < br->br_iov[i].iov_len - voff)
285 } while (boff < len);
286 if (pwrite(bc->bc_fd, buf, len, br->br_offset +
297 if (ioctl(bc->bc_fd, DIOCGFLUSH))
299 } else if (fsync(bc->bc_fd))
303 if (!bc->bc_candelete)
305 else if (bc->bc_rdonly)
307 else if (bc->bc_ischr) {
308 arg[0] = br->br_offset;
309 arg[1] = br->br_resid;
310 if (ioctl(bc->bc_fd, DIOCGDELETE, arg))
322 be->be_status = BST_DONE;
324 (*br->br_callback)(br, err);
328 blockif_thr(void *arg)
330 struct blockif_ctxt *bc;
331 struct blockif_elem *be;
337 buf = malloc(MAXPHYS);
342 pthread_mutex_lock(&bc->bc_mtx);
344 while (blockif_dequeue(bc, t, &be)) {
345 pthread_mutex_unlock(&bc->bc_mtx);
346 blockif_proc(bc, be, buf);
347 pthread_mutex_lock(&bc->bc_mtx);
348 blockif_complete(bc, be);
350 /* Check ctxt status here to see if exit requested */
353 pthread_cond_wait(&bc->bc_cond, &bc->bc_mtx);
355 pthread_mutex_unlock(&bc->bc_mtx);
364 blockif_sigcont_handler(int signal, enum ev_type type, void *arg)
366 struct blockif_sig_elem *bse;
370 * Process the entire list even if not intended for
374 bse = blockif_bse_head;
377 } while (!atomic_cmpset_ptr((uintptr_t *)&blockif_bse_head,
379 (uintptr_t)bse->bse_next));
381 pthread_mutex_lock(&bse->bse_mtx);
382 bse->bse_pending = 0;
383 pthread_cond_signal(&bse->bse_cond);
384 pthread_mutex_unlock(&bse->bse_mtx);
391 mevent_add(SIGCONT, EVF_SIGNAL, blockif_sigcont_handler, NULL);
392 (void) signal(SIGCONT, SIG_IGN);
395 struct blockif_ctxt *
396 blockif_open(const char *optstr, const char *ident)
398 char tname[MAXCOMLEN + 1];
399 char name[MAXPATHLEN];
400 char *nopt, *xopts, *cp;
401 struct blockif_ctxt *bc;
403 struct diocgattr_arg arg;
404 off_t size, psectsz, psectoff;
405 int extra, fd, i, sectsz;
406 int nocache, sync, ro, candelete, geom, ssopt, pssopt;
407 #ifndef WITHOUT_CAPSICUM
409 cap_ioctl_t cmds[] = { DIOCGFLUSH, DIOCGDELETE };
412 pthread_once(&blockif_once, blockif_init);
421 * The first element in the optstring is always a pathname.
422 * Optional elements follow
424 nopt = xopts = strdup(optstr);
425 while (xopts != NULL) {
426 cp = strsep(&xopts, ",");
427 if (cp == nopt) /* file or device pathname */
429 else if (!strcmp(cp, "nocache"))
431 else if (!strcmp(cp, "sync") || !strcmp(cp, "direct"))
433 else if (!strcmp(cp, "ro"))
435 else if (sscanf(cp, "sectorsize=%d/%d", &ssopt, &pssopt) == 2)
437 else if (sscanf(cp, "sectorsize=%d", &ssopt) == 1)
440 fprintf(stderr, "Invalid device option \"%s\"\n", cp);
451 fd = open(nopt, (ro ? O_RDONLY : O_RDWR) | extra);
453 /* Attempt a r/w fail with a r/o open */
454 fd = open(nopt, O_RDONLY | extra);
459 warn("Could not open backing file: %s", nopt);
463 if (fstat(fd, &sbuf) < 0) {
464 warn("Could not stat backing file %s", nopt);
468 #ifndef WITHOUT_CAPSICUM
469 cap_rights_init(&rights, CAP_FSYNC, CAP_IOCTL, CAP_READ, CAP_SEEK,
472 cap_rights_clear(&rights, CAP_FSYNC, CAP_WRITE);
474 if (cap_rights_limit(fd, &rights) == -1 && errno != ENOSYS)
475 errx(EX_OSERR, "Unable to apply rights for sandbox");
479 * Deal with raw devices
483 psectsz = psectoff = 0;
484 candelete = geom = 0;
485 if (S_ISCHR(sbuf.st_mode)) {
486 if (ioctl(fd, DIOCGMEDIASIZE, &size) < 0 ||
487 ioctl(fd, DIOCGSECTORSIZE, §sz)) {
488 perror("Could not fetch dev blk/sector size");
493 if (ioctl(fd, DIOCGSTRIPESIZE, &psectsz) == 0 && psectsz > 0)
494 ioctl(fd, DIOCGSTRIPEOFFSET, &psectoff);
495 strlcpy(arg.name, "GEOM::candelete", sizeof(arg.name));
496 arg.len = sizeof(arg.value.i);
497 if (ioctl(fd, DIOCGATTR, &arg) == 0)
498 candelete = arg.value.i;
499 if (ioctl(fd, DIOCGPROVIDERNAME, name) == 0)
502 psectsz = sbuf.st_blksize;
504 #ifndef WITHOUT_CAPSICUM
505 if (cap_ioctls_limit(fd, cmds, nitems(cmds)) == -1 && errno != ENOSYS)
506 errx(EX_OSERR, "Unable to apply rights for sandbox");
510 if (!powerof2(ssopt) || !powerof2(pssopt) || ssopt < 512 ||
512 fprintf(stderr, "Invalid sector size %d/%d\n",
518 * Some backend drivers (e.g. cd0, ada0) require that the I/O
519 * size be a multiple of the device's sector size.
521 * Validate that the emulated sector size complies with this
524 if (S_ISCHR(sbuf.st_mode)) {
525 if (ssopt < sectsz || (ssopt % sectsz) != 0) {
526 fprintf(stderr, "Sector size %d incompatible "
527 "with underlying device sector size %d\n",
538 bc = calloc(1, sizeof(struct blockif_ctxt));
544 bc->bc_magic = BLOCKIF_SIG;
546 bc->bc_ischr = S_ISCHR(sbuf.st_mode);
547 bc->bc_isgeom = geom;
548 bc->bc_candelete = candelete;
551 bc->bc_sectsz = sectsz;
552 bc->bc_psectsz = psectsz;
553 bc->bc_psectoff = psectoff;
554 pthread_mutex_init(&bc->bc_mtx, NULL);
555 pthread_cond_init(&bc->bc_cond, NULL);
556 TAILQ_INIT(&bc->bc_freeq);
557 TAILQ_INIT(&bc->bc_pendq);
558 TAILQ_INIT(&bc->bc_busyq);
559 for (i = 0; i < BLOCKIF_MAXREQ; i++) {
560 bc->bc_reqs[i].be_status = BST_FREE;
561 TAILQ_INSERT_HEAD(&bc->bc_freeq, &bc->bc_reqs[i], be_link);
564 for (i = 0; i < BLOCKIF_NUMTHR; i++) {
565 pthread_create(&bc->bc_btid[i], NULL, blockif_thr, bc);
566 snprintf(tname, sizeof(tname), "blk-%s-%d", ident, i);
567 pthread_set_name_np(bc->bc_btid[i], tname);
578 blockif_request(struct blockif_ctxt *bc, struct blockif_req *breq,
585 pthread_mutex_lock(&bc->bc_mtx);
586 if (!TAILQ_EMPTY(&bc->bc_freeq)) {
588 * Enqueue and inform the block i/o thread
589 * that there is work available
591 if (blockif_enqueue(bc, breq, op))
592 pthread_cond_signal(&bc->bc_cond);
595 * Callers are not allowed to enqueue more than
596 * the specified blockif queue limit. Return an
597 * error to indicate that the queue length has been
602 pthread_mutex_unlock(&bc->bc_mtx);
608 blockif_read(struct blockif_ctxt *bc, struct blockif_req *breq)
611 assert(bc->bc_magic == BLOCKIF_SIG);
612 return (blockif_request(bc, breq, BOP_READ));
616 blockif_write(struct blockif_ctxt *bc, struct blockif_req *breq)
619 assert(bc->bc_magic == BLOCKIF_SIG);
620 return (blockif_request(bc, breq, BOP_WRITE));
624 blockif_flush(struct blockif_ctxt *bc, struct blockif_req *breq)
627 assert(bc->bc_magic == BLOCKIF_SIG);
628 return (blockif_request(bc, breq, BOP_FLUSH));
632 blockif_delete(struct blockif_ctxt *bc, struct blockif_req *breq)
635 assert(bc->bc_magic == BLOCKIF_SIG);
636 return (blockif_request(bc, breq, BOP_DELETE));
640 blockif_cancel(struct blockif_ctxt *bc, struct blockif_req *breq)
642 struct blockif_elem *be;
644 assert(bc->bc_magic == BLOCKIF_SIG);
646 pthread_mutex_lock(&bc->bc_mtx);
648 * Check pending requests.
650 TAILQ_FOREACH(be, &bc->bc_pendq, be_link) {
651 if (be->be_req == breq)
658 blockif_complete(bc, be);
659 pthread_mutex_unlock(&bc->bc_mtx);
665 * Check in-flight requests.
667 TAILQ_FOREACH(be, &bc->bc_busyq, be_link) {
668 if (be->be_req == breq)
675 pthread_mutex_unlock(&bc->bc_mtx);
680 * Interrupt the processing thread to force it return
681 * prematurely via it's normal callback path.
683 while (be->be_status == BST_BUSY) {
684 struct blockif_sig_elem bse, *old_head;
686 pthread_mutex_init(&bse.bse_mtx, NULL);
687 pthread_cond_init(&bse.bse_cond, NULL);
692 old_head = blockif_bse_head;
693 bse.bse_next = old_head;
694 } while (!atomic_cmpset_ptr((uintptr_t *)&blockif_bse_head,
698 pthread_kill(be->be_tid, SIGCONT);
700 pthread_mutex_lock(&bse.bse_mtx);
701 while (bse.bse_pending)
702 pthread_cond_wait(&bse.bse_cond, &bse.bse_mtx);
703 pthread_mutex_unlock(&bse.bse_mtx);
706 pthread_mutex_unlock(&bc->bc_mtx);
709 * The processing thread has been interrupted. Since it's not
710 * clear if the callback has been invoked yet, return EBUSY.
716 blockif_close(struct blockif_ctxt *bc)
721 assert(bc->bc_magic == BLOCKIF_SIG);
724 * Stop the block i/o thread
726 pthread_mutex_lock(&bc->bc_mtx);
728 pthread_mutex_unlock(&bc->bc_mtx);
729 pthread_cond_broadcast(&bc->bc_cond);
730 for (i = 0; i < BLOCKIF_NUMTHR; i++)
731 pthread_join(bc->bc_btid[i], &jval);
733 /* XXX Cancel queued i/o's ??? */
746 * Return virtual C/H/S values for a given block. Use the algorithm
747 * outlined in the VHD specification to calculate values.
750 blockif_chs(struct blockif_ctxt *bc, uint16_t *c, uint8_t *h, uint8_t *s)
752 off_t sectors; /* total sectors of the block dev */
753 off_t hcyl; /* cylinders times heads */
754 uint16_t secpt; /* sectors per track */
757 assert(bc->bc_magic == BLOCKIF_SIG);
759 sectors = bc->bc_size / bc->bc_sectsz;
761 /* Clamp the size to the largest possible with CHS */
762 if (sectors > 65535UL*16*255)
763 sectors = 65535UL*16*255;
765 if (sectors >= 65536UL*16*63) {
768 hcyl = sectors / secpt;
771 hcyl = sectors / secpt;
772 heads = (hcyl + 1023) / 1024;
777 if (hcyl >= (heads * 1024) || heads > 16) {
780 hcyl = sectors / secpt;
782 if (hcyl >= (heads * 1024)) {
785 hcyl = sectors / secpt;
798 blockif_size(struct blockif_ctxt *bc)
801 assert(bc->bc_magic == BLOCKIF_SIG);
802 return (bc->bc_size);
806 blockif_sectsz(struct blockif_ctxt *bc)
809 assert(bc->bc_magic == BLOCKIF_SIG);
810 return (bc->bc_sectsz);
814 blockif_psectsz(struct blockif_ctxt *bc, int *size, int *off)
817 assert(bc->bc_magic == BLOCKIF_SIG);
818 *size = bc->bc_psectsz;
819 *off = bc->bc_psectoff;
823 blockif_queuesz(struct blockif_ctxt *bc)
826 assert(bc->bc_magic == BLOCKIF_SIG);
827 return (BLOCKIF_MAXREQ - 1);
831 blockif_is_ro(struct blockif_ctxt *bc)
834 assert(bc->bc_magic == BLOCKIF_SIG);
835 return (bc->bc_rdonly);
839 blockif_candelete(struct blockif_ctxt *bc)
842 assert(bc->bc_magic == BLOCKIF_SIG);
843 return (bc->bc_candelete);