2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* Driver for VirtIO block devices. */
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/sglist.h>
39 #include <sys/sysctl.h>
41 #include <sys/mutex.h>
42 #include <sys/queue.h>
44 #include <geom/geom_disk.h>
46 #include <machine/bus.h>
47 #include <machine/resource.h>
51 #include <dev/virtio/virtio.h>
52 #include <dev/virtio/virtqueue.h>
53 #include <dev/virtio/block/virtio_blk.h>
55 #include "virtio_if.h"
57 struct vtblk_request {
58 struct virtio_blk_outhdr vbr_hdr;
61 TAILQ_ENTRY(vtblk_request) vbr_link;
64 enum vtblk_cache_mode {
65 VTBLK_CACHE_WRITETHROUGH,
66 VTBLK_CACHE_WRITEBACK,
73 uint64_t vtblk_features;
75 #define VTBLK_FLAG_INDIRECT 0x0001
76 #define VTBLK_FLAG_READONLY 0x0002
77 #define VTBLK_FLAG_DETACH 0x0004
78 #define VTBLK_FLAG_SUSPEND 0x0008
79 #define VTBLK_FLAG_BARRIER 0x0010
80 #define VTBLK_FLAG_WC_CONFIG 0x0020
82 struct virtqueue *vtblk_vq;
83 struct sglist *vtblk_sglist;
84 struct disk *vtblk_disk;
86 struct bio_queue_head vtblk_bioq;
87 TAILQ_HEAD(, vtblk_request)
89 TAILQ_HEAD(, vtblk_request)
91 struct vtblk_request *vtblk_req_ordered;
94 int vtblk_request_count;
95 enum vtblk_cache_mode vtblk_write_cache;
97 struct bio_queue vtblk_dump_queue;
98 struct vtblk_request vtblk_dump_request;
101 static struct virtio_feature_desc vtblk_feature_desc[] = {
102 { VIRTIO_BLK_F_BARRIER, "HostBarrier" },
103 { VIRTIO_BLK_F_SIZE_MAX, "MaxSegSize" },
104 { VIRTIO_BLK_F_SEG_MAX, "MaxNumSegs" },
105 { VIRTIO_BLK_F_GEOMETRY, "DiskGeometry" },
106 { VIRTIO_BLK_F_RO, "ReadOnly" },
107 { VIRTIO_BLK_F_BLK_SIZE, "BlockSize" },
108 { VIRTIO_BLK_F_SCSI, "SCSICmds" },
109 { VIRTIO_BLK_F_WCE, "WriteCache" },
110 { VIRTIO_BLK_F_TOPOLOGY, "Topology" },
111 { VIRTIO_BLK_F_CONFIG_WCE, "ConfigWCE" },
116 static int vtblk_modevent(module_t, int, void *);
118 static int vtblk_probe(device_t);
119 static int vtblk_attach(device_t);
120 static int vtblk_detach(device_t);
121 static int vtblk_suspend(device_t);
122 static int vtblk_resume(device_t);
123 static int vtblk_shutdown(device_t);
124 static int vtblk_config_change(device_t);
126 static int vtblk_open(struct disk *);
127 static int vtblk_close(struct disk *);
128 static int vtblk_ioctl(struct disk *, u_long, void *, int,
130 static int vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
131 static void vtblk_strategy(struct bio *);
133 static void vtblk_negotiate_features(struct vtblk_softc *);
134 static void vtblk_setup_features(struct vtblk_softc *);
135 static int vtblk_maximum_segments(struct vtblk_softc *,
136 struct virtio_blk_config *);
137 static int vtblk_alloc_virtqueue(struct vtblk_softc *);
138 static void vtblk_resize_disk(struct vtblk_softc *, uint64_t);
139 static void vtblk_alloc_disk(struct vtblk_softc *,
140 struct virtio_blk_config *);
141 static void vtblk_create_disk(struct vtblk_softc *);
143 static int vtblk_request_prealloc(struct vtblk_softc *);
144 static void vtblk_request_free(struct vtblk_softc *);
145 static struct vtblk_request *
146 vtblk_request_dequeue(struct vtblk_softc *);
147 static void vtblk_request_enqueue(struct vtblk_softc *,
148 struct vtblk_request *);
149 static struct vtblk_request *
150 vtblk_request_next_ready(struct vtblk_softc *);
151 static void vtblk_request_requeue_ready(struct vtblk_softc *,
152 struct vtblk_request *);
153 static struct vtblk_request *
154 vtblk_request_next(struct vtblk_softc *);
155 static struct vtblk_request *
156 vtblk_request_bio(struct vtblk_softc *);
157 static int vtblk_request_execute(struct vtblk_softc *,
158 struct vtblk_request *);
159 static int vtblk_request_error(struct vtblk_request *);
161 static void vtblk_queue_completed(struct vtblk_softc *,
163 static void vtblk_done_completed(struct vtblk_softc *,
165 static void vtblk_drain_vq(struct vtblk_softc *);
166 static void vtblk_drain(struct vtblk_softc *);
168 static void vtblk_startio(struct vtblk_softc *);
169 static void vtblk_bio_done(struct vtblk_softc *, struct bio *, int);
171 static void vtblk_read_config(struct vtblk_softc *,
172 struct virtio_blk_config *);
173 static void vtblk_ident(struct vtblk_softc *);
174 static int vtblk_poll_request(struct vtblk_softc *,
175 struct vtblk_request *);
176 static int vtblk_quiesce(struct vtblk_softc *);
177 static void vtblk_vq_intr(void *);
178 static void vtblk_stop(struct vtblk_softc *);
180 static void vtblk_dump_quiesce(struct vtblk_softc *);
181 static int vtblk_dump_write(struct vtblk_softc *, void *, off_t, size_t);
182 static int vtblk_dump_flush(struct vtblk_softc *);
183 static void vtblk_dump_complete(struct vtblk_softc *);
185 static void vtblk_set_write_cache(struct vtblk_softc *, int);
186 static int vtblk_write_cache_enabled(struct vtblk_softc *sc,
187 struct virtio_blk_config *);
188 static int vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
190 static void vtblk_setup_sysctl(struct vtblk_softc *);
191 static int vtblk_tunable_int(struct vtblk_softc *, const char *, int);
194 static int vtblk_no_ident = 0;
195 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
196 static int vtblk_writecache_mode = -1;
197 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
199 /* Features desired/implemented by this driver. */
200 #define VTBLK_FEATURES \
201 (VIRTIO_BLK_F_BARRIER | \
202 VIRTIO_BLK_F_SIZE_MAX | \
203 VIRTIO_BLK_F_SEG_MAX | \
204 VIRTIO_BLK_F_GEOMETRY | \
206 VIRTIO_BLK_F_BLK_SIZE | \
208 VIRTIO_BLK_F_TOPOLOGY | \
209 VIRTIO_BLK_F_CONFIG_WCE | \
210 VIRTIO_RING_F_INDIRECT_DESC)
212 #define VTBLK_MTX(_sc) &(_sc)->vtblk_mtx
213 #define VTBLK_LOCK_INIT(_sc, _name) \
214 mtx_init(VTBLK_MTX((_sc)), (_name), \
215 "VirtIO Block Lock", MTX_DEF)
216 #define VTBLK_LOCK(_sc) mtx_lock(VTBLK_MTX((_sc)))
217 #define VTBLK_UNLOCK(_sc) mtx_unlock(VTBLK_MTX((_sc)))
218 #define VTBLK_LOCK_DESTROY(_sc) mtx_destroy(VTBLK_MTX((_sc)))
219 #define VTBLK_LOCK_ASSERT(_sc) mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
220 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
221 mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
223 #define VTBLK_DISK_NAME "vtbd"
224 #define VTBLK_QUIESCE_TIMEOUT (30 * hz)
227 * Each block request uses at least two segments - one for the header
228 * and one for the status.
230 #define VTBLK_MIN_SEGMENTS 2
232 static device_method_t vtblk_methods[] = {
233 /* Device methods. */
234 DEVMETHOD(device_probe, vtblk_probe),
235 DEVMETHOD(device_attach, vtblk_attach),
236 DEVMETHOD(device_detach, vtblk_detach),
237 DEVMETHOD(device_suspend, vtblk_suspend),
238 DEVMETHOD(device_resume, vtblk_resume),
239 DEVMETHOD(device_shutdown, vtblk_shutdown),
241 /* VirtIO methods. */
242 DEVMETHOD(virtio_config_change, vtblk_config_change),
247 static driver_t vtblk_driver = {
250 sizeof(struct vtblk_softc)
252 static devclass_t vtblk_devclass;
254 DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
256 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
258 MODULE_VERSION(virtio_blk, 1);
259 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
262 vtblk_modevent(module_t mod, int type, void *unused)
283 vtblk_probe(device_t dev)
286 if (virtio_get_device_type(dev) != VIRTIO_ID_BLOCK)
289 device_set_desc(dev, "VirtIO Block Adapter");
291 return (BUS_PROBE_DEFAULT);
295 vtblk_attach(device_t dev)
297 struct vtblk_softc *sc;
298 struct virtio_blk_config blkcfg;
301 virtio_set_feature_desc(dev, vtblk_feature_desc);
303 sc = device_get_softc(dev);
305 VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
306 bioq_init(&sc->vtblk_bioq);
307 TAILQ_INIT(&sc->vtblk_dump_queue);
308 TAILQ_INIT(&sc->vtblk_req_free);
309 TAILQ_INIT(&sc->vtblk_req_ready);
311 vtblk_setup_sysctl(sc);
312 vtblk_setup_features(sc);
314 vtblk_read_config(sc, &blkcfg);
317 * With the current sglist(9) implementation, it is not easy
318 * for us to support a maximum segment size as adjacent
319 * segments are coalesced. For now, just make sure it's larger
320 * than the maximum supported transfer size.
322 if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
323 if (blkcfg.size_max < MAXPHYS) {
325 device_printf(dev, "host requires unsupported "
326 "maximum segment size feature\n");
331 sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
332 if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
334 device_printf(dev, "fewer than minimum number of segments "
335 "allowed: %d\n", sc->vtblk_max_nsegs);
339 sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
340 if (sc->vtblk_sglist == NULL) {
342 device_printf(dev, "cannot allocate sglist\n");
346 error = vtblk_alloc_virtqueue(sc);
348 device_printf(dev, "cannot allocate virtqueue\n");
352 error = vtblk_request_prealloc(sc);
354 device_printf(dev, "cannot preallocate requests\n");
358 vtblk_alloc_disk(sc, &blkcfg);
360 error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
362 device_printf(dev, "cannot setup virtqueue interrupt\n");
366 vtblk_create_disk(sc);
368 virtqueue_enable_intr(sc->vtblk_vq);
378 vtblk_detach(device_t dev)
380 struct vtblk_softc *sc;
382 sc = device_get_softc(dev);
385 sc->vtblk_flags |= VTBLK_FLAG_DETACH;
386 if (device_is_attached(dev))
392 if (sc->vtblk_disk != NULL) {
393 disk_destroy(sc->vtblk_disk);
394 sc->vtblk_disk = NULL;
397 if (sc->vtblk_sglist != NULL) {
398 sglist_free(sc->vtblk_sglist);
399 sc->vtblk_sglist = NULL;
402 VTBLK_LOCK_DESTROY(sc);
408 vtblk_suspend(device_t dev)
410 struct vtblk_softc *sc;
413 sc = device_get_softc(dev);
416 sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
417 /* XXX BMV: virtio_stop(), etc needed here? */
418 error = vtblk_quiesce(sc);
420 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
427 vtblk_resume(device_t dev)
429 struct vtblk_softc *sc;
431 sc = device_get_softc(dev);
434 /* XXX BMV: virtio_reinit(), etc needed here? */
435 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
443 vtblk_shutdown(device_t dev)
450 vtblk_config_change(device_t dev)
452 struct vtblk_softc *sc;
453 struct virtio_blk_config blkcfg;
456 sc = device_get_softc(dev);
458 vtblk_read_config(sc, &blkcfg);
460 /* Capacity is always in 512-byte units. */
461 capacity = blkcfg.capacity * 512;
463 if (sc->vtblk_disk->d_mediasize != capacity)
464 vtblk_resize_disk(sc, capacity);
470 vtblk_open(struct disk *dp)
472 struct vtblk_softc *sc;
474 if ((sc = dp->d_drv1) == NULL)
477 return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
481 vtblk_close(struct disk *dp)
483 struct vtblk_softc *sc;
485 if ((sc = dp->d_drv1) == NULL)
492 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
495 struct vtblk_softc *sc;
497 if ((sc = dp->d_drv1) == NULL)
504 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
508 struct vtblk_softc *sc;
514 if ((sc = dp->d_drv1) == NULL)
519 vtblk_dump_quiesce(sc);
522 error = vtblk_dump_write(sc, virtual, offset, length);
523 if (error || (virtual == NULL && offset == 0))
524 vtblk_dump_complete(sc);
532 vtblk_strategy(struct bio *bp)
534 struct vtblk_softc *sc;
536 if ((sc = bp->bio_disk->d_drv1) == NULL) {
537 vtblk_bio_done(NULL, bp, EINVAL);
542 * Fail any write if RO. Unfortunately, there does not seem to
543 * be a better way to report our readonly'ness to GEOM above.
545 if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
546 (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
547 vtblk_bio_done(sc, bp, EROFS);
553 if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
555 vtblk_bio_done(sc, bp, ENXIO);
559 bioq_insert_tail(&sc->vtblk_bioq, bp);
566 vtblk_negotiate_features(struct vtblk_softc *sc)
572 features = VTBLK_FEATURES;
574 sc->vtblk_features = virtio_negotiate_features(dev, features);
578 vtblk_setup_features(struct vtblk_softc *sc)
584 vtblk_negotiate_features(sc);
586 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
587 sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
588 if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
589 sc->vtblk_flags |= VTBLK_FLAG_READONLY;
590 if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
591 sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
592 if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
593 sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
597 vtblk_maximum_segments(struct vtblk_softc *sc,
598 struct virtio_blk_config *blkcfg)
604 nsegs = VTBLK_MIN_SEGMENTS;
606 if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
607 nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
608 if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
609 nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
617 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
620 struct vq_alloc_info vq_info;
624 VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
625 vtblk_vq_intr, sc, &sc->vtblk_vq,
626 "%s request", device_get_nameunit(dev));
628 return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
632 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
641 dp->d_mediasize = new_capacity;
643 device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
644 (uintmax_t) dp->d_mediasize >> 20,
645 (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
649 error = disk_resize(dp, M_NOWAIT);
652 "disk_resize(9) failed, error: %d\n", error);
657 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
664 sc->vtblk_disk = dp = disk_alloc();
665 dp->d_open = vtblk_open;
666 dp->d_close = vtblk_close;
667 dp->d_ioctl = vtblk_ioctl;
668 dp->d_strategy = vtblk_strategy;
669 dp->d_name = VTBLK_DISK_NAME;
670 dp->d_unit = device_get_unit(dev);
672 dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO |
673 DISKFLAG_DIRECT_COMPLETION;
674 dp->d_hba_vendor = virtio_get_vendor(dev);
675 dp->d_hba_device = virtio_get_device(dev);
676 dp->d_hba_subvendor = virtio_get_subvendor(dev);
677 dp->d_hba_subdevice = virtio_get_subdevice(dev);
679 if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
680 dp->d_dump = vtblk_dump;
682 /* Capacity is always in 512-byte units. */
683 dp->d_mediasize = blkcfg->capacity * 512;
685 if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
686 dp->d_sectorsize = blkcfg->blk_size;
688 dp->d_sectorsize = 512;
691 * The VirtIO maximum I/O size is given in terms of segments.
692 * However, FreeBSD limits I/O size by logical buffer size, not
693 * by physically contiguous pages. Therefore, we have to assume
694 * no pages are contiguous. This may impose an artificially low
695 * maximum I/O size. But in practice, since QEMU advertises 128
696 * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
697 * which is typically greater than MAXPHYS. Eventually we should
698 * just advertise MAXPHYS and split buffers that are too big.
700 * Note we must subtract one additional segment in case of non
701 * page aligned buffers.
703 dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
705 if (dp->d_maxsize < PAGE_SIZE)
706 dp->d_maxsize = PAGE_SIZE; /* XXX */
708 if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
709 dp->d_fwsectors = blkcfg->geometry.sectors;
710 dp->d_fwheads = blkcfg->geometry.heads;
713 if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY) &&
714 blkcfg->topology.physical_block_exp > 0) {
715 dp->d_stripesize = dp->d_sectorsize *
716 (1 << blkcfg->topology.physical_block_exp);
717 dp->d_stripeoffset = (dp->d_stripesize -
718 blkcfg->topology.alignment_offset * dp->d_sectorsize) %
722 if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
723 sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
725 sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
729 vtblk_create_disk(struct vtblk_softc *sc)
737 device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
738 (uintmax_t) dp->d_mediasize >> 20,
739 (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
742 disk_create(dp, DISK_VERSION);
746 vtblk_request_prealloc(struct vtblk_softc *sc)
748 struct vtblk_request *req;
751 nreqs = virtqueue_size(sc->vtblk_vq);
754 * Preallocate sufficient requests to keep the virtqueue full. Each
755 * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
756 * the number allocated when indirect descriptors are not available.
758 if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
759 nreqs /= VTBLK_MIN_SEGMENTS;
761 for (i = 0; i < nreqs; i++) {
762 req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
766 MPASS(sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr)) == 1);
767 MPASS(sglist_count(&req->vbr_ack, sizeof(req->vbr_ack)) == 1);
769 sc->vtblk_request_count++;
770 vtblk_request_enqueue(sc, req);
777 vtblk_request_free(struct vtblk_softc *sc)
779 struct vtblk_request *req;
781 MPASS(TAILQ_EMPTY(&sc->vtblk_req_ready));
783 while ((req = vtblk_request_dequeue(sc)) != NULL) {
784 sc->vtblk_request_count--;
788 KASSERT(sc->vtblk_request_count == 0,
789 ("%s: leaked %d requests", __func__, sc->vtblk_request_count));
792 static struct vtblk_request *
793 vtblk_request_dequeue(struct vtblk_softc *sc)
795 struct vtblk_request *req;
797 req = TAILQ_FIRST(&sc->vtblk_req_free);
799 TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
800 bzero(req, sizeof(struct vtblk_request));
807 vtblk_request_enqueue(struct vtblk_softc *sc, struct vtblk_request *req)
810 TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
813 static struct vtblk_request *
814 vtblk_request_next_ready(struct vtblk_softc *sc)
816 struct vtblk_request *req;
818 req = TAILQ_FIRST(&sc->vtblk_req_ready);
820 TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
826 vtblk_request_requeue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
829 /* NOTE: Currently, there will be at most one request in the queue. */
830 TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
833 static struct vtblk_request *
834 vtblk_request_next(struct vtblk_softc *sc)
836 struct vtblk_request *req;
838 req = vtblk_request_next_ready(sc);
842 return (vtblk_request_bio(sc));
845 static struct vtblk_request *
846 vtblk_request_bio(struct vtblk_softc *sc)
848 struct bio_queue_head *bioq;
849 struct vtblk_request *req;
852 bioq = &sc->vtblk_bioq;
854 if (bioq_first(bioq) == NULL)
857 req = vtblk_request_dequeue(sc);
861 bp = bioq_takefirst(bioq);
864 req->vbr_hdr.ioprio = 1;
866 switch (bp->bio_cmd) {
868 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
871 req->vbr_hdr.type = VIRTIO_BLK_T_IN;
872 req->vbr_hdr.sector = bp->bio_offset / 512;
875 req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
876 req->vbr_hdr.sector = bp->bio_offset / 512;
879 panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
882 if (bp->bio_flags & BIO_ORDERED)
883 req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
889 vtblk_request_execute(struct vtblk_softc *sc, struct vtblk_request *req)
891 struct virtqueue *vq;
894 int ordered, readable, writable, error;
897 sg = sc->vtblk_sglist;
903 * Some hosts (such as bhyve) do not implement the barrier feature,
904 * so we emulate it in the driver by allowing the barrier request
905 * to be the only one in flight.
907 if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
908 if (sc->vtblk_req_ordered != NULL)
910 if (bp->bio_flags & BIO_ORDERED) {
911 if (!virtqueue_empty(vq))
914 req->vbr_hdr.type &= ~VIRTIO_BLK_T_BARRIER;
919 sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
921 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
922 error = sglist_append_bio(sg, bp);
923 if (error || sg->sg_nseg == sg->sg_maxseg) {
924 panic("%s: bio %p data buffer too big %d",
925 __func__, bp, error);
928 /* BIO_READ means the host writes into our buffer. */
929 if (bp->bio_cmd == BIO_READ)
930 writable = sg->sg_nseg - 1;
934 sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
935 readable = sg->sg_nseg - writable;
937 error = virtqueue_enqueue(vq, req, sg, readable, writable);
938 if (error == 0 && ordered)
939 sc->vtblk_req_ordered = req;
945 vtblk_request_error(struct vtblk_request *req)
949 switch (req->vbr_ack) {
950 case VIRTIO_BLK_S_OK:
953 case VIRTIO_BLK_S_UNSUPP:
965 vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
967 struct vtblk_request *req;
970 while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
971 if (sc->vtblk_req_ordered != NULL) {
972 MPASS(sc->vtblk_req_ordered == req);
973 sc->vtblk_req_ordered = NULL;
977 bp->bio_error = vtblk_request_error(req);
978 TAILQ_INSERT_TAIL(queue, bp, bio_queue);
980 vtblk_request_enqueue(sc, req);
985 vtblk_done_completed(struct vtblk_softc *sc, struct bio_queue *queue)
987 struct bio *bp, *tmp;
989 TAILQ_FOREACH_SAFE(bp, queue, bio_queue, tmp) {
990 if (bp->bio_error != 0)
991 disk_err(bp, "hard error", -1, 1);
992 vtblk_bio_done(sc, bp, bp->bio_error);
997 vtblk_drain_vq(struct vtblk_softc *sc)
999 struct virtqueue *vq;
1000 struct vtblk_request *req;
1006 while ((req = virtqueue_drain(vq, &last)) != NULL) {
1007 vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1008 vtblk_request_enqueue(sc, req);
1011 sc->vtblk_req_ordered = NULL;
1012 KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1016 vtblk_drain(struct vtblk_softc *sc)
1018 struct bio_queue queue;
1019 struct bio_queue_head *bioq;
1020 struct vtblk_request *req;
1023 bioq = &sc->vtblk_bioq;
1026 if (sc->vtblk_vq != NULL) {
1027 vtblk_queue_completed(sc, &queue);
1028 vtblk_done_completed(sc, &queue);
1033 while ((req = vtblk_request_next_ready(sc)) != NULL) {
1034 vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1035 vtblk_request_enqueue(sc, req);
1038 while (bioq_first(bioq) != NULL) {
1039 bp = bioq_takefirst(bioq);
1040 vtblk_bio_done(sc, bp, ENXIO);
1043 vtblk_request_free(sc);
1047 vtblk_startio(struct vtblk_softc *sc)
1049 struct virtqueue *vq;
1050 struct vtblk_request *req;
1053 VTBLK_LOCK_ASSERT(sc);
1057 if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1060 while (!virtqueue_full(vq)) {
1061 req = vtblk_request_next(sc);
1065 if (vtblk_request_execute(sc, req) != 0) {
1066 vtblk_request_requeue_ready(sc, req);
1074 virtqueue_notify(vq);
1078 vtblk_bio_done(struct vtblk_softc *sc, struct bio *bp, int error)
1081 /* Because of GEOM direct dispatch, we cannot hold any locks. */
1083 VTBLK_LOCK_ASSERT_NOTOWNED(sc);
1086 bp->bio_resid = bp->bio_bcount;
1087 bp->bio_error = error;
1088 bp->bio_flags |= BIO_ERROR;
1094 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg) \
1095 if (virtio_with_feature(_dev, _feature)) { \
1096 virtio_read_device_config(_dev, \
1097 offsetof(struct virtio_blk_config, _field), \
1098 &(_cfg)->_field, sizeof((_cfg)->_field)); \
1102 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
1106 dev = sc->vtblk_dev;
1108 bzero(blkcfg, sizeof(struct virtio_blk_config));
1110 /* The capacity is always available. */
1111 virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
1112 capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
1114 /* Read the configuration if the feature was negotiated. */
1115 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1116 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1117 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1118 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1119 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1120 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
1123 #undef VTBLK_GET_CONFIG
1126 vtblk_ident(struct vtblk_softc *sc)
1130 struct vtblk_request *req;
1133 dp = sc->vtblk_disk;
1134 len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
1136 if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
1139 req = vtblk_request_dequeue(sc);
1144 req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1145 req->vbr_hdr.ioprio = 1;
1146 req->vbr_hdr.sector = 0;
1149 bzero(&buf, sizeof(struct bio));
1151 buf.bio_cmd = BIO_READ;
1152 buf.bio_data = dp->d_ident;
1153 buf.bio_bcount = len;
1156 error = vtblk_poll_request(sc, req);
1159 vtblk_request_enqueue(sc, req);
1162 device_printf(sc->vtblk_dev,
1163 "error getting device identifier: %d\n", error);
1168 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
1170 struct virtqueue *vq;
1175 if (!virtqueue_empty(vq))
1178 error = vtblk_request_execute(sc, req);
1182 virtqueue_notify(vq);
1183 virtqueue_poll(vq, NULL);
1185 error = vtblk_request_error(req);
1186 if (error && bootverbose) {
1187 device_printf(sc->vtblk_dev,
1188 "%s: IO error: %d\n", __func__, error);
1195 vtblk_quiesce(struct vtblk_softc *sc)
1199 VTBLK_LOCK_ASSERT(sc);
1202 while (!virtqueue_empty(sc->vtblk_vq)) {
1203 if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
1204 VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
1214 vtblk_vq_intr(void *xsc)
1216 struct vtblk_softc *sc;
1217 struct virtqueue *vq;
1218 struct bio_queue queue;
1227 if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
1230 vtblk_queue_completed(sc, &queue);
1233 if (virtqueue_enable_intr(vq) != 0) {
1234 virtqueue_disable_intr(vq);
1238 if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1239 wakeup(&sc->vtblk_vq);
1243 vtblk_done_completed(sc, &queue);
1247 vtblk_stop(struct vtblk_softc *sc)
1250 virtqueue_disable_intr(sc->vtblk_vq);
1251 virtio_stop(sc->vtblk_dev);
1255 vtblk_dump_quiesce(struct vtblk_softc *sc)
1259 * Spin here until all the requests in-flight at the time of the
1260 * dump are completed and queued. The queued requests will be
1261 * biodone'd once the dump is finished.
1263 while (!virtqueue_empty(sc->vtblk_vq))
1264 vtblk_queue_completed(sc, &sc->vtblk_dump_queue);
1268 vtblk_dump_write(struct vtblk_softc *sc, void *virtual, off_t offset,
1272 struct vtblk_request *req;
1274 req = &sc->vtblk_dump_request;
1276 req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1277 req->vbr_hdr.ioprio = 1;
1278 req->vbr_hdr.sector = offset / 512;
1281 bzero(&buf, sizeof(struct bio));
1283 buf.bio_cmd = BIO_WRITE;
1284 buf.bio_data = virtual;
1285 buf.bio_bcount = length;
1287 return (vtblk_poll_request(sc, req));
1291 vtblk_dump_flush(struct vtblk_softc *sc)
1294 struct vtblk_request *req;
1296 req = &sc->vtblk_dump_request;
1298 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1299 req->vbr_hdr.ioprio = 1;
1300 req->vbr_hdr.sector = 0;
1303 bzero(&buf, sizeof(struct bio));
1305 buf.bio_cmd = BIO_FLUSH;
1307 return (vtblk_poll_request(sc, req));
1311 vtblk_dump_complete(struct vtblk_softc *sc)
1314 vtblk_dump_flush(sc);
1317 vtblk_done_completed(sc, &sc->vtblk_dump_queue);
1322 vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
1325 /* Set either writeback (1) or writethrough (0) mode. */
1326 virtio_write_dev_config_1(sc->vtblk_dev,
1327 offsetof(struct virtio_blk_config, writeback), wc);
1331 vtblk_write_cache_enabled(struct vtblk_softc *sc,
1332 struct virtio_blk_config *blkcfg)
1336 if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
1337 wc = vtblk_tunable_int(sc, "writecache_mode",
1338 vtblk_writecache_mode);
1339 if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1340 vtblk_set_write_cache(sc, wc);
1342 wc = blkcfg->writeback;
1344 wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
1350 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
1352 struct vtblk_softc *sc;
1355 sc = oidp->oid_arg1;
1356 wc = sc->vtblk_write_cache;
1358 error = sysctl_handle_int(oidp, &wc, 0, req);
1359 if (error || req->newptr == NULL)
1361 if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
1363 if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1367 sc->vtblk_write_cache = wc;
1368 vtblk_set_write_cache(sc, sc->vtblk_write_cache);
1375 vtblk_setup_sysctl(struct vtblk_softc *sc)
1378 struct sysctl_ctx_list *ctx;
1379 struct sysctl_oid *tree;
1380 struct sysctl_oid_list *child;
1382 dev = sc->vtblk_dev;
1383 ctx = device_get_sysctl_ctx(dev);
1384 tree = device_get_sysctl_tree(dev);
1385 child = SYSCTL_CHILDREN(tree);
1387 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
1388 CTLTYPE_INT | CTLFLAG_RW, sc, 0, vtblk_write_cache_sysctl,
1389 "I", "Write cache mode (writethrough (0) or writeback (1))");
1393 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
1397 snprintf(path, sizeof(path),
1398 "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
1399 TUNABLE_INT_FETCH(path, &def);