2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2013 Peter Grehan <grehan@freebsd.org>
6 * Copyright 2020 Joyent, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #ifndef WITHOUT_CAPSICUM
33 #include <sys/capsicum.h>
35 #include <sys/queue.h>
36 #include <sys/errno.h>
38 #include <sys/ioctl.h>
42 #ifndef WITHOUT_CAPSICUM
43 #include <capsicum_helpers.h>
51 #include <pthread_np.h>
56 #include <machine/atomic.h>
57 #include <machine/vmm_snapshot.h>
66 #define BLOCKIF_SIG 0xb109b109
68 #define BLOCKIF_NUMTHR 8
69 #define BLOCKIF_MAXREQ (BLOCKIF_RING_MAX + BLOCKIF_NUMTHR)
87 TAILQ_ENTRY(blockif_elem) be_link;
88 struct blockif_req *be_req;
90 enum blockstat be_status;
96 unsigned int bc_magic;
108 pthread_t bc_btid[BLOCKIF_NUMTHR];
109 pthread_mutex_t bc_mtx;
110 pthread_cond_t bc_cond;
111 pthread_cond_t bc_work_done_cond;
112 blockif_resize_cb *bc_resize_cb;
113 void *bc_resize_cb_arg;
114 struct mevent *bc_resize_event;
116 /* Request elements and free/pending/busy queues */
117 TAILQ_HEAD(, blockif_elem) bc_freeq;
118 TAILQ_HEAD(, blockif_elem) bc_pendq;
119 TAILQ_HEAD(, blockif_elem) bc_busyq;
120 struct blockif_elem bc_reqs[BLOCKIF_MAXREQ];
124 static pthread_once_t blockif_once = PTHREAD_ONCE_INIT;
126 struct blockif_sig_elem {
127 pthread_mutex_t bse_mtx;
128 pthread_cond_t bse_cond;
130 struct blockif_sig_elem *bse_next;
133 static struct blockif_sig_elem *blockif_bse_head;
136 blockif_enqueue(struct blockif_ctxt *bc, struct blockif_req *breq,
139 struct blockif_elem *be, *tbe;
143 be = TAILQ_FIRST(&bc->bc_freeq);
145 assert(be->be_status == BST_FREE);
146 TAILQ_REMOVE(&bc->bc_freeq, be, be_link);
153 off = breq->br_offset;
154 for (i = 0; i < breq->br_iovcnt; i++)
155 off += breq->br_iov[i].iov_len;
161 TAILQ_FOREACH(tbe, &bc->bc_pendq, be_link) {
162 if (tbe->be_block == breq->br_offset)
166 TAILQ_FOREACH(tbe, &bc->bc_busyq, be_link) {
167 if (tbe->be_block == breq->br_offset)
172 be->be_status = BST_PEND;
174 be->be_status = BST_BLOCK;
175 TAILQ_INSERT_TAIL(&bc->bc_pendq, be, be_link);
176 return (be->be_status == BST_PEND);
180 blockif_dequeue(struct blockif_ctxt *bc, pthread_t t, struct blockif_elem **bep)
182 struct blockif_elem *be;
184 TAILQ_FOREACH(be, &bc->bc_pendq, be_link) {
185 if (be->be_status == BST_PEND)
187 assert(be->be_status == BST_BLOCK);
191 TAILQ_REMOVE(&bc->bc_pendq, be, be_link);
192 be->be_status = BST_BUSY;
194 TAILQ_INSERT_TAIL(&bc->bc_busyq, be, be_link);
200 blockif_complete(struct blockif_ctxt *bc, struct blockif_elem *be)
202 struct blockif_elem *tbe;
204 if (be->be_status == BST_DONE || be->be_status == BST_BUSY)
205 TAILQ_REMOVE(&bc->bc_busyq, be, be_link);
207 TAILQ_REMOVE(&bc->bc_pendq, be, be_link);
208 TAILQ_FOREACH(tbe, &bc->bc_pendq, be_link) {
209 if (tbe->be_req->br_offset == be->be_block)
210 tbe->be_status = BST_PEND;
213 be->be_status = BST_FREE;
215 TAILQ_INSERT_TAIL(&bc->bc_freeq, be, be_link);
219 blockif_flush_bc(struct blockif_ctxt *bc)
222 if (ioctl(bc->bc_fd, DIOCGFLUSH))
224 } else if (fsync(bc->bc_fd))
231 blockif_proc(struct blockif_ctxt *bc, struct blockif_elem *be, uint8_t *buf)
233 struct spacectl_range range;
234 struct blockif_req *br;
237 size_t clen, len, off, boff, voff;
241 assert(br->br_resid >= 0);
243 if (br->br_iovcnt <= 1)
249 if ((n = preadv(bc->bc_fd, br->br_iov, br->br_iovcnt,
258 while (br->br_resid > 0) {
259 len = MIN(br->br_resid, MAXPHYS);
260 n = pread(bc->bc_fd, buf, len, br->br_offset + off);
268 clen = MIN(len - boff, br->br_iov[i].iov_len -
270 memcpy((uint8_t *)br->br_iov[i].iov_base + voff,
272 if (clen < br->br_iov[i].iov_len - voff)
279 } while (boff < len);
290 if ((n = pwritev(bc->bc_fd, br->br_iov, br->br_iovcnt,
299 while (br->br_resid > 0) {
300 len = MIN(br->br_resid, MAXPHYS);
303 clen = MIN(len - boff, br->br_iov[i].iov_len -
306 (uint8_t *)br->br_iov[i].iov_base + voff,
308 if (clen < br->br_iov[i].iov_len - voff)
315 } while (boff < len);
317 n = pwrite(bc->bc_fd, buf, len, br->br_offset + off);
327 err = blockif_flush_bc(bc);
330 if (!bc->bc_candelete)
332 else if (bc->bc_rdonly)
334 else if (bc->bc_ischr) {
335 arg[0] = br->br_offset;
336 arg[1] = br->br_resid;
337 if (ioctl(bc->bc_fd, DIOCGDELETE, arg))
342 range.r_offset = br->br_offset;
343 range.r_len = br->br_resid;
345 while (range.r_len > 0) {
346 if (fspacectl(bc->bc_fd, SPACECTL_DEALLOC,
347 &range, 0, &range) != 0) {
361 be->be_status = BST_DONE;
363 (*br->br_callback)(br, err);
367 blockif_empty(const struct blockif_ctxt *bc)
369 return (TAILQ_EMPTY(&bc->bc_pendq) && TAILQ_EMPTY(&bc->bc_busyq));
373 blockif_thr(void *arg)
375 struct blockif_ctxt *bc;
376 struct blockif_elem *be;
382 buf = malloc(MAXPHYS);
387 pthread_mutex_lock(&bc->bc_mtx);
389 while (blockif_dequeue(bc, t, &be)) {
390 pthread_mutex_unlock(&bc->bc_mtx);
391 blockif_proc(bc, be, buf);
392 pthread_mutex_lock(&bc->bc_mtx);
393 blockif_complete(bc, be);
396 /* If none to work, notify the main thread */
397 if (blockif_empty(bc))
398 pthread_cond_broadcast(&bc->bc_work_done_cond);
400 /* Check ctxt status here to see if exit requested */
404 pthread_cond_wait(&bc->bc_cond, &bc->bc_mtx);
406 pthread_mutex_unlock(&bc->bc_mtx);
415 blockif_sigcont_handler(int signal __unused, enum ev_type type __unused,
418 struct blockif_sig_elem *bse;
422 * Process the entire list even if not intended for
426 bse = blockif_bse_head;
429 } while (!atomic_cmpset_ptr((uintptr_t *)&blockif_bse_head,
431 (uintptr_t)bse->bse_next));
433 pthread_mutex_lock(&bse->bse_mtx);
434 bse->bse_pending = 0;
435 pthread_cond_signal(&bse->bse_cond);
436 pthread_mutex_unlock(&bse->bse_mtx);
443 mevent_add(SIGCONT, EVF_SIGNAL, blockif_sigcont_handler, NULL);
444 (void) signal(SIGCONT, SIG_IGN);
448 blockif_legacy_config(nvlist_t *nvl, const char *opts)
455 cp = strchr(opts, ',');
457 set_config_value_node(nvl, "path", opts);
460 path = strndup(opts, cp - opts);
461 set_config_value_node(nvl, "path", path);
463 return (pci_parse_legacy_config(nvl, cp + 1));
467 blockif_add_boot_device(struct pci_devinst *const pi,
468 struct blockif_ctxt *const bc)
470 if (bc->bc_bootindex < 0)
473 return (pci_emul_add_boot_device(pi, bc->bc_bootindex));
476 struct blockif_ctxt *
477 blockif_open(nvlist_t *nvl, const char *ident)
479 char tname[MAXCOMLEN + 1];
480 char name[MAXPATHLEN];
481 const char *path, *pssval, *ssval, *bootindex_val;
483 struct blockif_ctxt *bc;
485 struct diocgattr_arg arg;
486 off_t size, psectsz, psectoff;
487 int extra, fd, i, sectsz;
488 int ro, candelete, geom, ssopt, pssopt;
492 #ifndef WITHOUT_CAPSICUM
494 cap_ioctl_t cmds[] = { DIOCGFLUSH, DIOCGDELETE, DIOCGMEDIASIZE };
497 pthread_once(&blockif_once, blockif_init);
506 if (get_config_bool_node_default(nvl, "nocache", false))
508 if (get_config_bool_node_default(nvl, "nodelete", false))
510 if (get_config_bool_node_default(nvl, "sync", false) ||
511 get_config_bool_node_default(nvl, "direct", false))
513 if (get_config_bool_node_default(nvl, "ro", false))
515 ssval = get_config_value_node(nvl, "sectorsize");
517 ssopt = strtol(ssval, &cp, 10);
519 EPRINTLN("Invalid sector size \"%s\"", ssval);
524 } else if (*cp == '/') {
526 pssopt = strtol(pssval, &cp, 10);
527 if (cp == pssval || *cp != '\0') {
528 EPRINTLN("Invalid sector size \"%s\"", ssval);
532 EPRINTLN("Invalid sector size \"%s\"", ssval);
537 bootindex_val = get_config_value_node(nvl, "bootindex");
538 if (bootindex_val != NULL) {
539 bootindex = atoi(bootindex_val);
542 path = get_config_value_node(nvl, "path");
544 EPRINTLN("Missing \"path\" for block device.");
548 fd = open(path, (ro ? O_RDONLY : O_RDWR) | extra);
550 /* Attempt a r/w fail with a r/o open */
551 fd = open(path, O_RDONLY | extra);
556 warn("Could not open backing file: %s", path);
560 if (fstat(fd, &sbuf) < 0) {
561 warn("Could not stat backing file %s", path);
565 #ifndef WITHOUT_CAPSICUM
566 cap_rights_init(&rights, CAP_FSYNC, CAP_IOCTL, CAP_READ, CAP_SEEK,
567 CAP_WRITE, CAP_FSTAT, CAP_EVENT, CAP_FPATHCONF);
569 cap_rights_clear(&rights, CAP_FSYNC, CAP_WRITE);
571 if (caph_rights_limit(fd, &rights) == -1)
572 errx(EX_OSERR, "Unable to apply rights for sandbox");
576 * Deal with raw devices
580 psectsz = psectoff = 0;
581 candelete = geom = 0;
582 if (S_ISCHR(sbuf.st_mode)) {
583 if (ioctl(fd, DIOCGMEDIASIZE, &size) < 0 ||
584 ioctl(fd, DIOCGSECTORSIZE, §sz)) {
585 perror("Could not fetch dev blk/sector size");
590 if (ioctl(fd, DIOCGSTRIPESIZE, &psectsz) == 0 && psectsz > 0)
591 ioctl(fd, DIOCGSTRIPEOFFSET, &psectoff);
592 strlcpy(arg.name, "GEOM::candelete", sizeof(arg.name));
593 arg.len = sizeof(arg.value.i);
594 if (nodelete == 0 && ioctl(fd, DIOCGATTR, &arg) == 0)
595 candelete = arg.value.i;
596 if (ioctl(fd, DIOCGPROVIDERNAME, name) == 0)
599 psectsz = sbuf.st_blksize;
600 /* Avoid fallback implementation */
601 candelete = fpathconf(fd, _PC_DEALLOC_PRESENT) == 1;
604 #ifndef WITHOUT_CAPSICUM
605 if (caph_ioctls_limit(fd, cmds, nitems(cmds)) == -1)
606 errx(EX_OSERR, "Unable to apply rights for sandbox");
610 if (!powerof2(ssopt) || !powerof2(pssopt) || ssopt < 512 ||
612 EPRINTLN("Invalid sector size %d/%d",
618 * Some backend drivers (e.g. cd0, ada0) require that the I/O
619 * size be a multiple of the device's sector size.
621 * Validate that the emulated sector size complies with this
624 if (S_ISCHR(sbuf.st_mode)) {
625 if (ssopt < sectsz || (ssopt % sectsz) != 0) {
626 EPRINTLN("Sector size %d incompatible "
627 "with underlying device sector size %d",
638 bc = calloc(1, sizeof(struct blockif_ctxt));
644 bc->bc_magic = BLOCKIF_SIG;
646 bc->bc_ischr = S_ISCHR(sbuf.st_mode);
647 bc->bc_isgeom = geom;
648 bc->bc_candelete = candelete;
651 bc->bc_sectsz = sectsz;
652 bc->bc_psectsz = psectsz;
653 bc->bc_psectoff = psectoff;
654 pthread_mutex_init(&bc->bc_mtx, NULL);
655 pthread_cond_init(&bc->bc_cond, NULL);
657 pthread_cond_init(&bc->bc_work_done_cond, NULL);
658 TAILQ_INIT(&bc->bc_freeq);
659 TAILQ_INIT(&bc->bc_pendq);
660 TAILQ_INIT(&bc->bc_busyq);
661 bc->bc_bootindex = bootindex;
662 for (i = 0; i < BLOCKIF_MAXREQ; i++) {
663 bc->bc_reqs[i].be_status = BST_FREE;
664 TAILQ_INSERT_HEAD(&bc->bc_freeq, &bc->bc_reqs[i], be_link);
667 for (i = 0; i < BLOCKIF_NUMTHR; i++) {
668 pthread_create(&bc->bc_btid[i], NULL, blockif_thr, bc);
669 snprintf(tname, sizeof(tname), "blk-%s-%d", ident, i);
670 pthread_set_name_np(bc->bc_btid[i], tname);
681 blockif_resized(int fd, enum ev_type type __unused, void *arg)
683 struct blockif_ctxt *bc;
687 if (fstat(fd, &sb) != 0)
690 if (S_ISCHR(sb.st_mode)) {
691 if (ioctl(fd, DIOCGMEDIASIZE, &mediasize) < 0) {
692 EPRINTLN("blockif_resized: get mediasize failed: %s",
697 mediasize = sb.st_size;
700 pthread_mutex_lock(&bc->bc_mtx);
701 if (mediasize != bc->bc_size) {
702 bc->bc_size = mediasize;
703 bc->bc_resize_cb(bc, bc->bc_resize_cb_arg, bc->bc_size);
705 pthread_mutex_unlock(&bc->bc_mtx);
709 blockif_register_resize_callback(struct blockif_ctxt *bc, blockif_resize_cb *cb,
720 pthread_mutex_lock(&bc->bc_mtx);
721 if (bc->bc_resize_cb != NULL) {
726 assert(bc->bc_closing == 0);
728 if (fstat(bc->bc_fd, &sb) != 0) {
733 bc->bc_resize_event = mevent_add_flags(bc->bc_fd, EVF_VNODE,
734 EVFF_ATTRIB, blockif_resized, bc);
735 if (bc->bc_resize_event == NULL) {
740 bc->bc_resize_cb = cb;
741 bc->bc_resize_cb_arg = cb_arg;
743 pthread_mutex_unlock(&bc->bc_mtx);
749 blockif_request(struct blockif_ctxt *bc, struct blockif_req *breq,
756 pthread_mutex_lock(&bc->bc_mtx);
757 assert(!bc->bc_paused);
758 if (!TAILQ_EMPTY(&bc->bc_freeq)) {
760 * Enqueue and inform the block i/o thread
761 * that there is work available
763 if (blockif_enqueue(bc, breq, op))
764 pthread_cond_signal(&bc->bc_cond);
767 * Callers are not allowed to enqueue more than
768 * the specified blockif queue limit. Return an
769 * error to indicate that the queue length has been
774 pthread_mutex_unlock(&bc->bc_mtx);
780 blockif_read(struct blockif_ctxt *bc, struct blockif_req *breq)
782 assert(bc->bc_magic == BLOCKIF_SIG);
783 return (blockif_request(bc, breq, BOP_READ));
787 blockif_write(struct blockif_ctxt *bc, struct blockif_req *breq)
789 assert(bc->bc_magic == BLOCKIF_SIG);
790 return (blockif_request(bc, breq, BOP_WRITE));
794 blockif_flush(struct blockif_ctxt *bc, struct blockif_req *breq)
796 assert(bc->bc_magic == BLOCKIF_SIG);
797 return (blockif_request(bc, breq, BOP_FLUSH));
801 blockif_delete(struct blockif_ctxt *bc, struct blockif_req *breq)
803 assert(bc->bc_magic == BLOCKIF_SIG);
804 return (blockif_request(bc, breq, BOP_DELETE));
808 blockif_cancel(struct blockif_ctxt *bc, struct blockif_req *breq)
810 struct blockif_elem *be;
812 assert(bc->bc_magic == BLOCKIF_SIG);
814 pthread_mutex_lock(&bc->bc_mtx);
815 /* XXX: not waiting while paused */
818 * Check pending requests.
820 TAILQ_FOREACH(be, &bc->bc_pendq, be_link) {
821 if (be->be_req == breq)
828 blockif_complete(bc, be);
829 pthread_mutex_unlock(&bc->bc_mtx);
835 * Check in-flight requests.
837 TAILQ_FOREACH(be, &bc->bc_busyq, be_link) {
838 if (be->be_req == breq)
845 pthread_mutex_unlock(&bc->bc_mtx);
850 * Interrupt the processing thread to force it return
851 * prematurely via it's normal callback path.
853 while (be->be_status == BST_BUSY) {
854 struct blockif_sig_elem bse, *old_head;
856 pthread_mutex_init(&bse.bse_mtx, NULL);
857 pthread_cond_init(&bse.bse_cond, NULL);
862 old_head = blockif_bse_head;
863 bse.bse_next = old_head;
864 } while (!atomic_cmpset_ptr((uintptr_t *)&blockif_bse_head,
868 pthread_kill(be->be_tid, SIGCONT);
870 pthread_mutex_lock(&bse.bse_mtx);
871 while (bse.bse_pending)
872 pthread_cond_wait(&bse.bse_cond, &bse.bse_mtx);
873 pthread_mutex_unlock(&bse.bse_mtx);
876 pthread_mutex_unlock(&bc->bc_mtx);
879 * The processing thread has been interrupted. Since it's not
880 * clear if the callback has been invoked yet, return EBUSY.
886 blockif_close(struct blockif_ctxt *bc)
891 assert(bc->bc_magic == BLOCKIF_SIG);
894 * Stop the block i/o thread
896 pthread_mutex_lock(&bc->bc_mtx);
898 if (bc->bc_resize_event != NULL)
899 mevent_disable(bc->bc_resize_event);
900 pthread_mutex_unlock(&bc->bc_mtx);
901 pthread_cond_broadcast(&bc->bc_cond);
902 for (i = 0; i < BLOCKIF_NUMTHR; i++)
903 pthread_join(bc->bc_btid[i], &jval);
905 /* XXX Cancel queued i/o's ??? */
918 * Return virtual C/H/S values for a given block. Use the algorithm
919 * outlined in the VHD specification to calculate values.
922 blockif_chs(struct blockif_ctxt *bc, uint16_t *c, uint8_t *h, uint8_t *s)
924 off_t sectors; /* total sectors of the block dev */
925 off_t hcyl; /* cylinders times heads */
926 uint16_t secpt; /* sectors per track */
929 assert(bc->bc_magic == BLOCKIF_SIG);
931 sectors = bc->bc_size / bc->bc_sectsz;
933 /* Clamp the size to the largest possible with CHS */
934 if (sectors > 65535L * 16 * 255)
935 sectors = 65535L * 16 * 255;
937 if (sectors >= 65536L * 16 * 63) {
940 hcyl = sectors / secpt;
943 hcyl = sectors / secpt;
944 heads = (hcyl + 1023) / 1024;
949 if (hcyl >= (heads * 1024) || heads > 16) {
952 hcyl = sectors / secpt;
954 if (hcyl >= (heads * 1024)) {
957 hcyl = sectors / secpt;
970 blockif_size(struct blockif_ctxt *bc)
972 assert(bc->bc_magic == BLOCKIF_SIG);
973 return (bc->bc_size);
977 blockif_sectsz(struct blockif_ctxt *bc)
979 assert(bc->bc_magic == BLOCKIF_SIG);
980 return (bc->bc_sectsz);
984 blockif_psectsz(struct blockif_ctxt *bc, int *size, int *off)
986 assert(bc->bc_magic == BLOCKIF_SIG);
987 *size = bc->bc_psectsz;
988 *off = bc->bc_psectoff;
992 blockif_queuesz(struct blockif_ctxt *bc)
994 assert(bc->bc_magic == BLOCKIF_SIG);
995 return (BLOCKIF_MAXREQ - 1);
999 blockif_is_ro(struct blockif_ctxt *bc)
1001 assert(bc->bc_magic == BLOCKIF_SIG);
1002 return (bc->bc_rdonly);
1006 blockif_candelete(struct blockif_ctxt *bc)
1008 assert(bc->bc_magic == BLOCKIF_SIG);
1009 return (bc->bc_candelete);
1012 #ifdef BHYVE_SNAPSHOT
1014 blockif_pause(struct blockif_ctxt *bc)
1017 assert(bc->bc_magic == BLOCKIF_SIG);
1019 pthread_mutex_lock(&bc->bc_mtx);
1022 /* The interface is paused. Wait for workers to finish their work */
1023 while (!blockif_empty(bc))
1024 pthread_cond_wait(&bc->bc_work_done_cond, &bc->bc_mtx);
1025 pthread_mutex_unlock(&bc->bc_mtx);
1027 if (!bc->bc_rdonly && blockif_flush_bc(bc))
1028 fprintf(stderr, "%s: [WARN] failed to flush backing file.\r\n",
1033 blockif_resume(struct blockif_ctxt *bc)
1036 assert(bc->bc_magic == BLOCKIF_SIG);
1038 pthread_mutex_lock(&bc->bc_mtx);
1040 pthread_mutex_unlock(&bc->bc_mtx);
1042 #endif /* BHYVE_SNAPSHOT */