2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /* Driver for VirtIO block devices. */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/sglist.h>
41 #include <sys/sysctl.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
46 #include <geom/geom.h>
47 #include <geom/geom_disk.h>
49 #include <machine/bus.h>
50 #include <machine/resource.h>
54 #include <dev/virtio/virtio.h>
55 #include <dev/virtio/virtqueue.h>
56 #include <dev/virtio/block/virtio_blk.h>
58 #include "virtio_if.h"
60 struct vtblk_request {
61 struct virtio_blk_outhdr vbr_hdr;
64 TAILQ_ENTRY(vtblk_request) vbr_link;
67 enum vtblk_cache_mode {
68 VTBLK_CACHE_WRITETHROUGH,
69 VTBLK_CACHE_WRITEBACK,
76 uint64_t vtblk_features;
78 #define VTBLK_FLAG_INDIRECT 0x0001
79 #define VTBLK_FLAG_READONLY 0x0002
80 #define VTBLK_FLAG_DETACH 0x0004
81 #define VTBLK_FLAG_SUSPEND 0x0008
82 #define VTBLK_FLAG_BARRIER 0x0010
83 #define VTBLK_FLAG_WC_CONFIG 0x0020
84 #define VTBLK_FLAG_DISCARD 0x0040
86 struct virtqueue *vtblk_vq;
87 struct sglist *vtblk_sglist;
88 struct disk *vtblk_disk;
90 struct bio_queue_head vtblk_bioq;
91 TAILQ_HEAD(, vtblk_request)
93 TAILQ_HEAD(, vtblk_request)
95 struct vtblk_request *vtblk_req_ordered;
98 int vtblk_request_count;
99 enum vtblk_cache_mode vtblk_write_cache;
101 struct bio_queue vtblk_dump_queue;
102 struct vtblk_request vtblk_dump_request;
105 static struct virtio_feature_desc vtblk_feature_desc[] = {
106 { VIRTIO_BLK_F_BARRIER, "HostBarrier" },
107 { VIRTIO_BLK_F_SIZE_MAX, "MaxSegSize" },
108 { VIRTIO_BLK_F_SEG_MAX, "MaxNumSegs" },
109 { VIRTIO_BLK_F_GEOMETRY, "DiskGeometry" },
110 { VIRTIO_BLK_F_RO, "ReadOnly" },
111 { VIRTIO_BLK_F_BLK_SIZE, "BlockSize" },
112 { VIRTIO_BLK_F_SCSI, "SCSICmds" },
113 { VIRTIO_BLK_F_WCE, "WriteCache" },
114 { VIRTIO_BLK_F_TOPOLOGY, "Topology" },
115 { VIRTIO_BLK_F_CONFIG_WCE, "ConfigWCE" },
116 { VIRTIO_BLK_F_DISCARD, "Discard" },
121 static int vtblk_modevent(module_t, int, void *);
123 static int vtblk_probe(device_t);
124 static int vtblk_attach(device_t);
125 static int vtblk_detach(device_t);
126 static int vtblk_suspend(device_t);
127 static int vtblk_resume(device_t);
128 static int vtblk_shutdown(device_t);
129 static int vtblk_config_change(device_t);
131 static int vtblk_open(struct disk *);
132 static int vtblk_close(struct disk *);
133 static int vtblk_ioctl(struct disk *, u_long, void *, int,
135 static int vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
136 static void vtblk_strategy(struct bio *);
138 static void vtblk_negotiate_features(struct vtblk_softc *);
139 static void vtblk_setup_features(struct vtblk_softc *);
140 static int vtblk_maximum_segments(struct vtblk_softc *,
141 struct virtio_blk_config *);
142 static int vtblk_alloc_virtqueue(struct vtblk_softc *);
143 static void vtblk_resize_disk(struct vtblk_softc *, uint64_t);
144 static void vtblk_alloc_disk(struct vtblk_softc *,
145 struct virtio_blk_config *);
146 static void vtblk_create_disk(struct vtblk_softc *);
148 static int vtblk_request_prealloc(struct vtblk_softc *);
149 static void vtblk_request_free(struct vtblk_softc *);
150 static struct vtblk_request *
151 vtblk_request_dequeue(struct vtblk_softc *);
152 static void vtblk_request_enqueue(struct vtblk_softc *,
153 struct vtblk_request *);
154 static struct vtblk_request *
155 vtblk_request_next_ready(struct vtblk_softc *);
156 static void vtblk_request_requeue_ready(struct vtblk_softc *,
157 struct vtblk_request *);
158 static struct vtblk_request *
159 vtblk_request_next(struct vtblk_softc *);
160 static struct vtblk_request *
161 vtblk_request_bio(struct vtblk_softc *);
162 static int vtblk_request_execute(struct vtblk_softc *,
163 struct vtblk_request *);
164 static int vtblk_request_error(struct vtblk_request *);
166 static void vtblk_queue_completed(struct vtblk_softc *,
168 static void vtblk_done_completed(struct vtblk_softc *,
170 static void vtblk_drain_vq(struct vtblk_softc *);
171 static void vtblk_drain(struct vtblk_softc *);
173 static void vtblk_startio(struct vtblk_softc *);
174 static void vtblk_bio_done(struct vtblk_softc *, struct bio *, int);
176 static void vtblk_read_config(struct vtblk_softc *,
177 struct virtio_blk_config *);
178 static void vtblk_ident(struct vtblk_softc *);
179 static int vtblk_poll_request(struct vtblk_softc *,
180 struct vtblk_request *);
181 static int vtblk_quiesce(struct vtblk_softc *);
182 static void vtblk_vq_intr(void *);
183 static void vtblk_stop(struct vtblk_softc *);
185 static void vtblk_dump_quiesce(struct vtblk_softc *);
186 static int vtblk_dump_write(struct vtblk_softc *, void *, off_t, size_t);
187 static int vtblk_dump_flush(struct vtblk_softc *);
188 static void vtblk_dump_complete(struct vtblk_softc *);
190 static void vtblk_set_write_cache(struct vtblk_softc *, int);
191 static int vtblk_write_cache_enabled(struct vtblk_softc *sc,
192 struct virtio_blk_config *);
193 static int vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
195 static void vtblk_setup_sysctl(struct vtblk_softc *);
196 static int vtblk_tunable_int(struct vtblk_softc *, const char *, int);
199 static int vtblk_no_ident = 0;
200 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
201 static int vtblk_writecache_mode = -1;
202 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
204 /* Features desired/implemented by this driver. */
205 #define VTBLK_FEATURES \
206 (VIRTIO_BLK_F_BARRIER | \
207 VIRTIO_BLK_F_SIZE_MAX | \
208 VIRTIO_BLK_F_SEG_MAX | \
209 VIRTIO_BLK_F_GEOMETRY | \
211 VIRTIO_BLK_F_BLK_SIZE | \
213 VIRTIO_BLK_F_TOPOLOGY | \
214 VIRTIO_BLK_F_CONFIG_WCE | \
215 VIRTIO_BLK_F_DISCARD | \
216 VIRTIO_RING_F_INDIRECT_DESC)
218 #define VTBLK_MTX(_sc) &(_sc)->vtblk_mtx
219 #define VTBLK_LOCK_INIT(_sc, _name) \
220 mtx_init(VTBLK_MTX((_sc)), (_name), \
221 "VirtIO Block Lock", MTX_DEF)
222 #define VTBLK_LOCK(_sc) mtx_lock(VTBLK_MTX((_sc)))
223 #define VTBLK_UNLOCK(_sc) mtx_unlock(VTBLK_MTX((_sc)))
224 #define VTBLK_LOCK_DESTROY(_sc) mtx_destroy(VTBLK_MTX((_sc)))
225 #define VTBLK_LOCK_ASSERT(_sc) mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
226 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
227 mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
229 #define VTBLK_DISK_NAME "vtbd"
230 #define VTBLK_QUIESCE_TIMEOUT (30 * hz)
233 * Each block request uses at least two segments - one for the header
234 * and one for the status.
236 #define VTBLK_MIN_SEGMENTS 2
238 static device_method_t vtblk_methods[] = {
239 /* Device methods. */
240 DEVMETHOD(device_probe, vtblk_probe),
241 DEVMETHOD(device_attach, vtblk_attach),
242 DEVMETHOD(device_detach, vtblk_detach),
243 DEVMETHOD(device_suspend, vtblk_suspend),
244 DEVMETHOD(device_resume, vtblk_resume),
245 DEVMETHOD(device_shutdown, vtblk_shutdown),
247 /* VirtIO methods. */
248 DEVMETHOD(virtio_config_change, vtblk_config_change),
253 static driver_t vtblk_driver = {
256 sizeof(struct vtblk_softc)
258 static devclass_t vtblk_devclass;
260 DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
262 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
264 MODULE_VERSION(virtio_blk, 1);
265 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
267 VIRTIO_SIMPLE_PNPTABLE(virtio_blk, VIRTIO_ID_BLOCK, "VirtIO Block Adapter");
268 VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_blk);
269 VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_blk);
272 vtblk_modevent(module_t mod, int type, void *unused)
293 vtblk_probe(device_t dev)
295 return (VIRTIO_SIMPLE_PROBE(dev, virtio_blk));
299 vtblk_attach(device_t dev)
301 struct vtblk_softc *sc;
302 struct virtio_blk_config blkcfg;
305 virtio_set_feature_desc(dev, vtblk_feature_desc);
307 sc = device_get_softc(dev);
309 VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
310 bioq_init(&sc->vtblk_bioq);
311 TAILQ_INIT(&sc->vtblk_dump_queue);
312 TAILQ_INIT(&sc->vtblk_req_free);
313 TAILQ_INIT(&sc->vtblk_req_ready);
315 vtblk_setup_sysctl(sc);
316 vtblk_setup_features(sc);
318 vtblk_read_config(sc, &blkcfg);
321 * With the current sglist(9) implementation, it is not easy
322 * for us to support a maximum segment size as adjacent
323 * segments are coalesced. For now, just make sure it's larger
324 * than the maximum supported transfer size.
326 if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
327 if (blkcfg.size_max < MAXPHYS) {
329 device_printf(dev, "host requires unsupported "
330 "maximum segment size feature\n");
335 sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
336 if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
338 device_printf(dev, "fewer than minimum number of segments "
339 "allowed: %d\n", sc->vtblk_max_nsegs);
343 sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
344 if (sc->vtblk_sglist == NULL) {
346 device_printf(dev, "cannot allocate sglist\n");
350 error = vtblk_alloc_virtqueue(sc);
352 device_printf(dev, "cannot allocate virtqueue\n");
356 error = vtblk_request_prealloc(sc);
358 device_printf(dev, "cannot preallocate requests\n");
362 vtblk_alloc_disk(sc, &blkcfg);
364 error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
366 device_printf(dev, "cannot setup virtqueue interrupt\n");
370 vtblk_create_disk(sc);
372 virtqueue_enable_intr(sc->vtblk_vq);
382 vtblk_detach(device_t dev)
384 struct vtblk_softc *sc;
386 sc = device_get_softc(dev);
389 sc->vtblk_flags |= VTBLK_FLAG_DETACH;
390 if (device_is_attached(dev))
396 if (sc->vtblk_disk != NULL) {
397 disk_destroy(sc->vtblk_disk);
398 sc->vtblk_disk = NULL;
401 if (sc->vtblk_sglist != NULL) {
402 sglist_free(sc->vtblk_sglist);
403 sc->vtblk_sglist = NULL;
406 VTBLK_LOCK_DESTROY(sc);
412 vtblk_suspend(device_t dev)
414 struct vtblk_softc *sc;
417 sc = device_get_softc(dev);
420 sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
421 /* XXX BMV: virtio_stop(), etc needed here? */
422 error = vtblk_quiesce(sc);
424 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
431 vtblk_resume(device_t dev)
433 struct vtblk_softc *sc;
435 sc = device_get_softc(dev);
438 /* XXX BMV: virtio_reinit(), etc needed here? */
439 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
447 vtblk_shutdown(device_t dev)
454 vtblk_config_change(device_t dev)
456 struct vtblk_softc *sc;
457 struct virtio_blk_config blkcfg;
460 sc = device_get_softc(dev);
462 vtblk_read_config(sc, &blkcfg);
464 /* Capacity is always in 512-byte units. */
465 capacity = blkcfg.capacity * VTBLK_BSIZE;
467 if (sc->vtblk_disk->d_mediasize != capacity)
468 vtblk_resize_disk(sc, capacity);
474 vtblk_open(struct disk *dp)
476 struct vtblk_softc *sc;
478 if ((sc = dp->d_drv1) == NULL)
481 return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
485 vtblk_close(struct disk *dp)
487 struct vtblk_softc *sc;
489 if ((sc = dp->d_drv1) == NULL)
496 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
499 struct vtblk_softc *sc;
501 if ((sc = dp->d_drv1) == NULL)
508 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
512 struct vtblk_softc *sc;
518 if ((sc = dp->d_drv1) == NULL)
523 vtblk_dump_quiesce(sc);
526 error = vtblk_dump_write(sc, virtual, offset, length);
527 if (error || (virtual == NULL && offset == 0))
528 vtblk_dump_complete(sc);
536 vtblk_strategy(struct bio *bp)
538 struct vtblk_softc *sc;
540 if ((sc = bp->bio_disk->d_drv1) == NULL) {
541 vtblk_bio_done(NULL, bp, EINVAL);
546 * Fail any write if RO. Unfortunately, there does not seem to
547 * be a better way to report our readonly'ness to GEOM above.
549 if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
550 (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH ||
551 bp->bio_cmd == BIO_DELETE)) {
552 vtblk_bio_done(sc, bp, EROFS);
556 if ((bp->bio_cmd != BIO_READ) && (bp->bio_cmd != BIO_WRITE) &&
557 (bp->bio_cmd != BIO_FLUSH) && (bp->bio_cmd != BIO_DELETE)) {
558 vtblk_bio_done(sc, bp, EOPNOTSUPP);
564 if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
566 vtblk_bio_done(sc, bp, ENXIO);
570 if ((bp->bio_cmd == BIO_DELETE) &&
571 !(sc->vtblk_flags & VTBLK_FLAG_DISCARD)) {
573 vtblk_bio_done(sc, bp, EOPNOTSUPP);
577 bioq_insert_tail(&sc->vtblk_bioq, bp);
584 vtblk_negotiate_features(struct vtblk_softc *sc)
590 features = VTBLK_FEATURES;
592 sc->vtblk_features = virtio_negotiate_features(dev, features);
596 vtblk_setup_features(struct vtblk_softc *sc)
602 vtblk_negotiate_features(sc);
604 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
605 sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
606 if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
607 sc->vtblk_flags |= VTBLK_FLAG_READONLY;
608 if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
609 sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
610 if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
611 sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
612 if (virtio_with_feature(dev, VIRTIO_BLK_F_DISCARD))
613 sc->vtblk_flags |= VTBLK_FLAG_DISCARD;
617 vtblk_maximum_segments(struct vtblk_softc *sc,
618 struct virtio_blk_config *blkcfg)
624 nsegs = VTBLK_MIN_SEGMENTS;
626 if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
627 nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
628 if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
629 nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
637 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
640 struct vq_alloc_info vq_info;
644 VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
645 vtblk_vq_intr, sc, &sc->vtblk_vq,
646 "%s request", device_get_nameunit(dev));
648 return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
652 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
661 dp->d_mediasize = new_capacity;
663 device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
664 (uintmax_t) dp->d_mediasize >> 20,
665 (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
669 error = disk_resize(dp, M_NOWAIT);
672 "disk_resize(9) failed, error: %d\n", error);
677 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
684 sc->vtblk_disk = dp = disk_alloc();
685 dp->d_open = vtblk_open;
686 dp->d_close = vtblk_close;
687 dp->d_ioctl = vtblk_ioctl;
688 dp->d_strategy = vtblk_strategy;
689 dp->d_name = VTBLK_DISK_NAME;
690 dp->d_unit = device_get_unit(dev);
692 dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO |
693 DISKFLAG_DIRECT_COMPLETION;
694 dp->d_hba_vendor = virtio_get_vendor(dev);
695 dp->d_hba_device = virtio_get_device(dev);
696 dp->d_hba_subvendor = virtio_get_subvendor(dev);
697 dp->d_hba_subdevice = virtio_get_subdevice(dev);
699 if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
700 dp->d_dump = vtblk_dump;
702 /* Capacity is always in 512-byte units. */
703 dp->d_mediasize = blkcfg->capacity * VTBLK_BSIZE;
705 if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
706 dp->d_sectorsize = blkcfg->blk_size;
708 dp->d_sectorsize = VTBLK_BSIZE;
711 * The VirtIO maximum I/O size is given in terms of segments.
712 * However, FreeBSD limits I/O size by logical buffer size, not
713 * by physically contiguous pages. Therefore, we have to assume
714 * no pages are contiguous. This may impose an artificially low
715 * maximum I/O size. But in practice, since QEMU advertises 128
716 * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
717 * which is typically greater than MAXPHYS. Eventually we should
718 * just advertise MAXPHYS and split buffers that are too big.
720 * Note we must subtract one additional segment in case of non
721 * page aligned buffers.
723 dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
725 if (dp->d_maxsize < PAGE_SIZE)
726 dp->d_maxsize = PAGE_SIZE; /* XXX */
728 if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
729 dp->d_fwsectors = blkcfg->geometry.sectors;
730 dp->d_fwheads = blkcfg->geometry.heads;
733 if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY) &&
734 blkcfg->topology.physical_block_exp > 0) {
735 dp->d_stripesize = dp->d_sectorsize *
736 (1 << blkcfg->topology.physical_block_exp);
737 dp->d_stripeoffset = (dp->d_stripesize -
738 blkcfg->topology.alignment_offset * dp->d_sectorsize) %
742 if (virtio_with_feature(dev, VIRTIO_BLK_F_DISCARD)) {
743 dp->d_flags |= DISKFLAG_CANDELETE;
744 dp->d_delmaxsize = blkcfg->max_discard_sectors * VTBLK_BSIZE;
747 if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
748 sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
750 sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
754 vtblk_create_disk(struct vtblk_softc *sc)
762 device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
763 (uintmax_t) dp->d_mediasize >> 20,
764 (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
767 disk_create(dp, DISK_VERSION);
771 vtblk_request_prealloc(struct vtblk_softc *sc)
773 struct vtblk_request *req;
776 nreqs = virtqueue_size(sc->vtblk_vq);
779 * Preallocate sufficient requests to keep the virtqueue full. Each
780 * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
781 * the number allocated when indirect descriptors are not available.
783 if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
784 nreqs /= VTBLK_MIN_SEGMENTS;
786 for (i = 0; i < nreqs; i++) {
787 req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
791 MPASS(sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr)) == 1);
792 MPASS(sglist_count(&req->vbr_ack, sizeof(req->vbr_ack)) == 1);
794 sc->vtblk_request_count++;
795 vtblk_request_enqueue(sc, req);
802 vtblk_request_free(struct vtblk_softc *sc)
804 struct vtblk_request *req;
806 MPASS(TAILQ_EMPTY(&sc->vtblk_req_ready));
808 while ((req = vtblk_request_dequeue(sc)) != NULL) {
809 sc->vtblk_request_count--;
813 KASSERT(sc->vtblk_request_count == 0,
814 ("%s: leaked %d requests", __func__, sc->vtblk_request_count));
817 static struct vtblk_request *
818 vtblk_request_dequeue(struct vtblk_softc *sc)
820 struct vtblk_request *req;
822 req = TAILQ_FIRST(&sc->vtblk_req_free);
824 TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
825 bzero(req, sizeof(struct vtblk_request));
832 vtblk_request_enqueue(struct vtblk_softc *sc, struct vtblk_request *req)
835 TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
838 static struct vtblk_request *
839 vtblk_request_next_ready(struct vtblk_softc *sc)
841 struct vtblk_request *req;
843 req = TAILQ_FIRST(&sc->vtblk_req_ready);
845 TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
851 vtblk_request_requeue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
854 /* NOTE: Currently, there will be at most one request in the queue. */
855 TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
858 static struct vtblk_request *
859 vtblk_request_next(struct vtblk_softc *sc)
861 struct vtblk_request *req;
863 req = vtblk_request_next_ready(sc);
867 return (vtblk_request_bio(sc));
870 static struct vtblk_request *
871 vtblk_request_bio(struct vtblk_softc *sc)
873 struct bio_queue_head *bioq;
874 struct vtblk_request *req;
877 bioq = &sc->vtblk_bioq;
879 if (bioq_first(bioq) == NULL)
882 req = vtblk_request_dequeue(sc);
886 bp = bioq_takefirst(bioq);
889 req->vbr_hdr.ioprio = 1;
891 switch (bp->bio_cmd) {
893 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
896 req->vbr_hdr.type = VIRTIO_BLK_T_IN;
897 req->vbr_hdr.sector = bp->bio_offset / VTBLK_BSIZE;
900 req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
901 req->vbr_hdr.sector = bp->bio_offset / VTBLK_BSIZE;
904 req->vbr_hdr.type = VIRTIO_BLK_T_DISCARD;
905 req->vbr_hdr.sector = bp->bio_offset / VTBLK_BSIZE;
908 panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
911 if (bp->bio_flags & BIO_ORDERED)
912 req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
918 vtblk_request_execute(struct vtblk_softc *sc, struct vtblk_request *req)
920 struct virtqueue *vq;
923 int ordered, readable, writable, error;
926 sg = sc->vtblk_sglist;
932 * Some hosts (such as bhyve) do not implement the barrier feature,
933 * so we emulate it in the driver by allowing the barrier request
934 * to be the only one in flight.
936 if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
937 if (sc->vtblk_req_ordered != NULL)
939 if (bp->bio_flags & BIO_ORDERED) {
940 if (!virtqueue_empty(vq))
943 req->vbr_hdr.type &= ~VIRTIO_BLK_T_BARRIER;
948 sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
950 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
951 error = sglist_append_bio(sg, bp);
952 if (error || sg->sg_nseg == sg->sg_maxseg) {
953 panic("%s: bio %p data buffer too big %d",
954 __func__, bp, error);
957 /* BIO_READ means the host writes into our buffer. */
958 if (bp->bio_cmd == BIO_READ)
959 writable = sg->sg_nseg - 1;
960 } else if (bp->bio_cmd == BIO_DELETE) {
961 struct virtio_blk_discard_write_zeroes *discard;
963 discard = malloc(sizeof(*discard), M_DEVBUF, M_NOWAIT | M_ZERO);
966 discard->sector = bp->bio_offset / VTBLK_BSIZE;
967 discard->num_sectors = bp->bio_bcount / VTBLK_BSIZE;
968 bp->bio_driver1 = discard;
969 error = sglist_append(sg, discard, sizeof(*discard));
970 if (error || sg->sg_nseg == sg->sg_maxseg) {
971 panic("%s: bio %p data buffer too big %d",
972 __func__, bp, error);
977 sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
978 readable = sg->sg_nseg - writable;
980 error = virtqueue_enqueue(vq, req, sg, readable, writable);
981 if (error == 0 && ordered)
982 sc->vtblk_req_ordered = req;
988 vtblk_request_error(struct vtblk_request *req)
992 switch (req->vbr_ack) {
993 case VIRTIO_BLK_S_OK:
996 case VIRTIO_BLK_S_UNSUPP:
1008 vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
1010 struct vtblk_request *req;
1013 while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
1014 if (sc->vtblk_req_ordered != NULL) {
1015 MPASS(sc->vtblk_req_ordered == req);
1016 sc->vtblk_req_ordered = NULL;
1020 bp->bio_error = vtblk_request_error(req);
1021 TAILQ_INSERT_TAIL(queue, bp, bio_queue);
1023 vtblk_request_enqueue(sc, req);
1028 vtblk_done_completed(struct vtblk_softc *sc, struct bio_queue *queue)
1030 struct bio *bp, *tmp;
1032 TAILQ_FOREACH_SAFE(bp, queue, bio_queue, tmp) {
1033 if (bp->bio_error != 0)
1034 disk_err(bp, "hard error", -1, 1);
1035 vtblk_bio_done(sc, bp, bp->bio_error);
1040 vtblk_drain_vq(struct vtblk_softc *sc)
1042 struct virtqueue *vq;
1043 struct vtblk_request *req;
1049 while ((req = virtqueue_drain(vq, &last)) != NULL) {
1050 vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1051 vtblk_request_enqueue(sc, req);
1054 sc->vtblk_req_ordered = NULL;
1055 KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1059 vtblk_drain(struct vtblk_softc *sc)
1061 struct bio_queue queue;
1062 struct bio_queue_head *bioq;
1063 struct vtblk_request *req;
1066 bioq = &sc->vtblk_bioq;
1069 if (sc->vtblk_vq != NULL) {
1070 vtblk_queue_completed(sc, &queue);
1071 vtblk_done_completed(sc, &queue);
1076 while ((req = vtblk_request_next_ready(sc)) != NULL) {
1077 vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1078 vtblk_request_enqueue(sc, req);
1081 while (bioq_first(bioq) != NULL) {
1082 bp = bioq_takefirst(bioq);
1083 vtblk_bio_done(sc, bp, ENXIO);
1086 vtblk_request_free(sc);
1090 vtblk_startio(struct vtblk_softc *sc)
1092 struct virtqueue *vq;
1093 struct vtblk_request *req;
1096 VTBLK_LOCK_ASSERT(sc);
1100 if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1103 while (!virtqueue_full(vq)) {
1104 req = vtblk_request_next(sc);
1108 if (vtblk_request_execute(sc, req) != 0) {
1109 vtblk_request_requeue_ready(sc, req);
1117 virtqueue_notify(vq);
1121 vtblk_bio_done(struct vtblk_softc *sc, struct bio *bp, int error)
1124 /* Because of GEOM direct dispatch, we cannot hold any locks. */
1126 VTBLK_LOCK_ASSERT_NOTOWNED(sc);
1129 bp->bio_resid = bp->bio_bcount;
1130 bp->bio_error = error;
1131 bp->bio_flags |= BIO_ERROR;
1134 if (bp->bio_driver1 != NULL) {
1135 free(bp->bio_driver1, M_DEVBUF);
1136 bp->bio_driver1 = NULL;
1142 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg) \
1143 if (virtio_with_feature(_dev, _feature)) { \
1144 virtio_read_device_config(_dev, \
1145 offsetof(struct virtio_blk_config, _field), \
1146 &(_cfg)->_field, sizeof((_cfg)->_field)); \
1150 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
1154 dev = sc->vtblk_dev;
1156 bzero(blkcfg, sizeof(struct virtio_blk_config));
1158 /* The capacity is always available. */
1159 virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
1160 capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
1162 /* Read the configuration if the feature was negotiated. */
1163 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1164 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1165 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1166 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1167 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1168 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, wce, blkcfg);
1169 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, max_discard_sectors,
1171 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, max_discard_seg, blkcfg);
1172 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, discard_sector_alignment,
1176 #undef VTBLK_GET_CONFIG
1179 vtblk_ident(struct vtblk_softc *sc)
1183 struct vtblk_request *req;
1186 dp = sc->vtblk_disk;
1187 len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
1189 if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
1192 req = vtblk_request_dequeue(sc);
1197 req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1198 req->vbr_hdr.ioprio = 1;
1199 req->vbr_hdr.sector = 0;
1204 buf.bio_cmd = BIO_READ;
1205 buf.bio_data = dp->d_ident;
1206 buf.bio_bcount = len;
1209 error = vtblk_poll_request(sc, req);
1212 vtblk_request_enqueue(sc, req);
1215 device_printf(sc->vtblk_dev,
1216 "error getting device identifier: %d\n", error);
1221 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
1223 struct virtqueue *vq;
1228 if (!virtqueue_empty(vq))
1231 error = vtblk_request_execute(sc, req);
1235 virtqueue_notify(vq);
1236 virtqueue_poll(vq, NULL);
1238 error = vtblk_request_error(req);
1239 if (error && bootverbose) {
1240 device_printf(sc->vtblk_dev,
1241 "%s: IO error: %d\n", __func__, error);
1248 vtblk_quiesce(struct vtblk_softc *sc)
1252 VTBLK_LOCK_ASSERT(sc);
1255 while (!virtqueue_empty(sc->vtblk_vq)) {
1256 if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
1257 VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
1267 vtblk_vq_intr(void *xsc)
1269 struct vtblk_softc *sc;
1270 struct virtqueue *vq;
1271 struct bio_queue queue;
1280 if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
1283 vtblk_queue_completed(sc, &queue);
1286 if (virtqueue_enable_intr(vq) != 0) {
1287 virtqueue_disable_intr(vq);
1291 if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1292 wakeup(&sc->vtblk_vq);
1296 vtblk_done_completed(sc, &queue);
1300 vtblk_stop(struct vtblk_softc *sc)
1303 virtqueue_disable_intr(sc->vtblk_vq);
1304 virtio_stop(sc->vtblk_dev);
1308 vtblk_dump_quiesce(struct vtblk_softc *sc)
1312 * Spin here until all the requests in-flight at the time of the
1313 * dump are completed and queued. The queued requests will be
1314 * biodone'd once the dump is finished.
1316 while (!virtqueue_empty(sc->vtblk_vq))
1317 vtblk_queue_completed(sc, &sc->vtblk_dump_queue);
1321 vtblk_dump_write(struct vtblk_softc *sc, void *virtual, off_t offset,
1325 struct vtblk_request *req;
1327 req = &sc->vtblk_dump_request;
1329 req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1330 req->vbr_hdr.ioprio = 1;
1331 req->vbr_hdr.sector = offset / VTBLK_BSIZE;
1336 buf.bio_cmd = BIO_WRITE;
1337 buf.bio_data = virtual;
1338 buf.bio_bcount = length;
1340 return (vtblk_poll_request(sc, req));
1344 vtblk_dump_flush(struct vtblk_softc *sc)
1347 struct vtblk_request *req;
1349 req = &sc->vtblk_dump_request;
1351 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1352 req->vbr_hdr.ioprio = 1;
1353 req->vbr_hdr.sector = 0;
1358 buf.bio_cmd = BIO_FLUSH;
1360 return (vtblk_poll_request(sc, req));
1364 vtblk_dump_complete(struct vtblk_softc *sc)
1367 vtblk_dump_flush(sc);
1370 vtblk_done_completed(sc, &sc->vtblk_dump_queue);
1375 vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
1378 /* Set either writeback (1) or writethrough (0) mode. */
1379 virtio_write_dev_config_1(sc->vtblk_dev,
1380 offsetof(struct virtio_blk_config, wce), wc);
1384 vtblk_write_cache_enabled(struct vtblk_softc *sc,
1385 struct virtio_blk_config *blkcfg)
1389 if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
1390 wc = vtblk_tunable_int(sc, "writecache_mode",
1391 vtblk_writecache_mode);
1392 if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1393 vtblk_set_write_cache(sc, wc);
1397 wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
1403 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
1405 struct vtblk_softc *sc;
1408 sc = oidp->oid_arg1;
1409 wc = sc->vtblk_write_cache;
1411 error = sysctl_handle_int(oidp, &wc, 0, req);
1412 if (error || req->newptr == NULL)
1414 if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
1416 if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1420 sc->vtblk_write_cache = wc;
1421 vtblk_set_write_cache(sc, sc->vtblk_write_cache);
1428 vtblk_setup_sysctl(struct vtblk_softc *sc)
1431 struct sysctl_ctx_list *ctx;
1432 struct sysctl_oid *tree;
1433 struct sysctl_oid_list *child;
1435 dev = sc->vtblk_dev;
1436 ctx = device_get_sysctl_ctx(dev);
1437 tree = device_get_sysctl_tree(dev);
1438 child = SYSCTL_CHILDREN(tree);
1440 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
1441 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
1442 vtblk_write_cache_sysctl, "I",
1443 "Write cache mode (writethrough (0) or writeback (1))");
1447 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
1451 snprintf(path, sizeof(path),
1452 "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
1453 TUNABLE_INT_FETCH(path, &def);