2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /* Driver for VirtIO block devices. */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/sglist.h>
41 #include <sys/sysctl.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
46 #include <geom/geom.h>
47 #include <geom/geom_disk.h>
49 #include <machine/bus.h>
50 #include <machine/resource.h>
54 #include <dev/virtio/virtio.h>
55 #include <dev/virtio/virtqueue.h>
56 #include <dev/virtio/block/virtio_blk.h>
58 #include "virtio_if.h"
60 struct vtblk_request {
61 struct virtio_blk_outhdr vbr_hdr;
64 TAILQ_ENTRY(vtblk_request) vbr_link;
67 enum vtblk_cache_mode {
68 VTBLK_CACHE_WRITETHROUGH,
69 VTBLK_CACHE_WRITEBACK,
76 uint64_t vtblk_features;
78 #define VTBLK_FLAG_INDIRECT 0x0001
79 #define VTBLK_FLAG_READONLY 0x0002
80 #define VTBLK_FLAG_DETACH 0x0004
81 #define VTBLK_FLAG_SUSPEND 0x0008
82 #define VTBLK_FLAG_BARRIER 0x0010
83 #define VTBLK_FLAG_WC_CONFIG 0x0020
85 struct virtqueue *vtblk_vq;
86 struct sglist *vtblk_sglist;
87 struct disk *vtblk_disk;
89 struct bio_queue_head vtblk_bioq;
90 TAILQ_HEAD(, vtblk_request)
92 TAILQ_HEAD(, vtblk_request)
94 struct vtblk_request *vtblk_req_ordered;
97 int vtblk_request_count;
98 enum vtblk_cache_mode vtblk_write_cache;
100 struct bio_queue vtblk_dump_queue;
101 struct vtblk_request vtblk_dump_request;
104 static struct virtio_feature_desc vtblk_feature_desc[] = {
105 { VIRTIO_BLK_F_BARRIER, "HostBarrier" },
106 { VIRTIO_BLK_F_SIZE_MAX, "MaxSegSize" },
107 { VIRTIO_BLK_F_SEG_MAX, "MaxNumSegs" },
108 { VIRTIO_BLK_F_GEOMETRY, "DiskGeometry" },
109 { VIRTIO_BLK_F_RO, "ReadOnly" },
110 { VIRTIO_BLK_F_BLK_SIZE, "BlockSize" },
111 { VIRTIO_BLK_F_SCSI, "SCSICmds" },
112 { VIRTIO_BLK_F_WCE, "WriteCache" },
113 { VIRTIO_BLK_F_TOPOLOGY, "Topology" },
114 { VIRTIO_BLK_F_CONFIG_WCE, "ConfigWCE" },
119 static int vtblk_modevent(module_t, int, void *);
121 static int vtblk_probe(device_t);
122 static int vtblk_attach(device_t);
123 static int vtblk_detach(device_t);
124 static int vtblk_suspend(device_t);
125 static int vtblk_resume(device_t);
126 static int vtblk_shutdown(device_t);
127 static int vtblk_config_change(device_t);
129 static int vtblk_open(struct disk *);
130 static int vtblk_close(struct disk *);
131 static int vtblk_ioctl(struct disk *, u_long, void *, int,
133 static int vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
134 static void vtblk_strategy(struct bio *);
136 static void vtblk_negotiate_features(struct vtblk_softc *);
137 static void vtblk_setup_features(struct vtblk_softc *);
138 static int vtblk_maximum_segments(struct vtblk_softc *,
139 struct virtio_blk_config *);
140 static int vtblk_alloc_virtqueue(struct vtblk_softc *);
141 static void vtblk_resize_disk(struct vtblk_softc *, uint64_t);
142 static void vtblk_alloc_disk(struct vtblk_softc *,
143 struct virtio_blk_config *);
144 static void vtblk_create_disk(struct vtblk_softc *);
146 static int vtblk_request_prealloc(struct vtblk_softc *);
147 static void vtblk_request_free(struct vtblk_softc *);
148 static struct vtblk_request *
149 vtblk_request_dequeue(struct vtblk_softc *);
150 static void vtblk_request_enqueue(struct vtblk_softc *,
151 struct vtblk_request *);
152 static struct vtblk_request *
153 vtblk_request_next_ready(struct vtblk_softc *);
154 static void vtblk_request_requeue_ready(struct vtblk_softc *,
155 struct vtblk_request *);
156 static struct vtblk_request *
157 vtblk_request_next(struct vtblk_softc *);
158 static struct vtblk_request *
159 vtblk_request_bio(struct vtblk_softc *);
160 static int vtblk_request_execute(struct vtblk_softc *,
161 struct vtblk_request *);
162 static int vtblk_request_error(struct vtblk_request *);
164 static void vtblk_queue_completed(struct vtblk_softc *,
166 static void vtblk_done_completed(struct vtblk_softc *,
168 static void vtblk_drain_vq(struct vtblk_softc *);
169 static void vtblk_drain(struct vtblk_softc *);
171 static void vtblk_startio(struct vtblk_softc *);
172 static void vtblk_bio_done(struct vtblk_softc *, struct bio *, int);
174 static void vtblk_read_config(struct vtblk_softc *,
175 struct virtio_blk_config *);
176 static void vtblk_ident(struct vtblk_softc *);
177 static int vtblk_poll_request(struct vtblk_softc *,
178 struct vtblk_request *);
179 static int vtblk_quiesce(struct vtblk_softc *);
180 static void vtblk_vq_intr(void *);
181 static void vtblk_stop(struct vtblk_softc *);
183 static void vtblk_dump_quiesce(struct vtblk_softc *);
184 static int vtblk_dump_write(struct vtblk_softc *, void *, off_t, size_t);
185 static int vtblk_dump_flush(struct vtblk_softc *);
186 static void vtblk_dump_complete(struct vtblk_softc *);
188 static void vtblk_set_write_cache(struct vtblk_softc *, int);
189 static int vtblk_write_cache_enabled(struct vtblk_softc *sc,
190 struct virtio_blk_config *);
191 static int vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
193 static void vtblk_setup_sysctl(struct vtblk_softc *);
194 static int vtblk_tunable_int(struct vtblk_softc *, const char *, int);
197 static int vtblk_no_ident = 0;
198 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
199 static int vtblk_writecache_mode = -1;
200 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
202 /* Features desired/implemented by this driver. */
203 #define VTBLK_FEATURES \
204 (VIRTIO_BLK_F_BARRIER | \
205 VIRTIO_BLK_F_SIZE_MAX | \
206 VIRTIO_BLK_F_SEG_MAX | \
207 VIRTIO_BLK_F_GEOMETRY | \
209 VIRTIO_BLK_F_BLK_SIZE | \
211 VIRTIO_BLK_F_TOPOLOGY | \
212 VIRTIO_BLK_F_CONFIG_WCE | \
213 VIRTIO_RING_F_INDIRECT_DESC)
215 #define VTBLK_MTX(_sc) &(_sc)->vtblk_mtx
216 #define VTBLK_LOCK_INIT(_sc, _name) \
217 mtx_init(VTBLK_MTX((_sc)), (_name), \
218 "VirtIO Block Lock", MTX_DEF)
219 #define VTBLK_LOCK(_sc) mtx_lock(VTBLK_MTX((_sc)))
220 #define VTBLK_UNLOCK(_sc) mtx_unlock(VTBLK_MTX((_sc)))
221 #define VTBLK_LOCK_DESTROY(_sc) mtx_destroy(VTBLK_MTX((_sc)))
222 #define VTBLK_LOCK_ASSERT(_sc) mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
223 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
224 mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
226 #define VTBLK_DISK_NAME "vtbd"
227 #define VTBLK_QUIESCE_TIMEOUT (30 * hz)
230 * Each block request uses at least two segments - one for the header
231 * and one for the status.
233 #define VTBLK_MIN_SEGMENTS 2
235 static device_method_t vtblk_methods[] = {
236 /* Device methods. */
237 DEVMETHOD(device_probe, vtblk_probe),
238 DEVMETHOD(device_attach, vtblk_attach),
239 DEVMETHOD(device_detach, vtblk_detach),
240 DEVMETHOD(device_suspend, vtblk_suspend),
241 DEVMETHOD(device_resume, vtblk_resume),
242 DEVMETHOD(device_shutdown, vtblk_shutdown),
244 /* VirtIO methods. */
245 DEVMETHOD(virtio_config_change, vtblk_config_change),
250 static driver_t vtblk_driver = {
253 sizeof(struct vtblk_softc)
255 static devclass_t vtblk_devclass;
257 DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
259 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
261 MODULE_VERSION(virtio_blk, 1);
262 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
264 VIRTIO_SIMPLE_PNPTABLE(virtio_blk, VIRTIO_ID_BLOCK, "VirtIO Block Adapter");
265 VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_blk);
266 VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_blk);
269 vtblk_modevent(module_t mod, int type, void *unused)
290 vtblk_probe(device_t dev)
292 return (VIRTIO_SIMPLE_PROBE(dev, virtio_blk));
296 vtblk_attach(device_t dev)
298 struct vtblk_softc *sc;
299 struct virtio_blk_config blkcfg;
302 virtio_set_feature_desc(dev, vtblk_feature_desc);
304 sc = device_get_softc(dev);
306 VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
307 bioq_init(&sc->vtblk_bioq);
308 TAILQ_INIT(&sc->vtblk_dump_queue);
309 TAILQ_INIT(&sc->vtblk_req_free);
310 TAILQ_INIT(&sc->vtblk_req_ready);
312 vtblk_setup_sysctl(sc);
313 vtblk_setup_features(sc);
315 vtblk_read_config(sc, &blkcfg);
318 * With the current sglist(9) implementation, it is not easy
319 * for us to support a maximum segment size as adjacent
320 * segments are coalesced. For now, just make sure it's larger
321 * than the maximum supported transfer size.
323 if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
324 if (blkcfg.size_max < MAXPHYS) {
326 device_printf(dev, "host requires unsupported "
327 "maximum segment size feature\n");
332 sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
333 if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
335 device_printf(dev, "fewer than minimum number of segments "
336 "allowed: %d\n", sc->vtblk_max_nsegs);
340 sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
341 if (sc->vtblk_sglist == NULL) {
343 device_printf(dev, "cannot allocate sglist\n");
347 error = vtblk_alloc_virtqueue(sc);
349 device_printf(dev, "cannot allocate virtqueue\n");
353 error = vtblk_request_prealloc(sc);
355 device_printf(dev, "cannot preallocate requests\n");
359 vtblk_alloc_disk(sc, &blkcfg);
361 error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
363 device_printf(dev, "cannot setup virtqueue interrupt\n");
367 vtblk_create_disk(sc);
369 virtqueue_enable_intr(sc->vtblk_vq);
379 vtblk_detach(device_t dev)
381 struct vtblk_softc *sc;
383 sc = device_get_softc(dev);
386 sc->vtblk_flags |= VTBLK_FLAG_DETACH;
387 if (device_is_attached(dev))
393 if (sc->vtblk_disk != NULL) {
394 disk_destroy(sc->vtblk_disk);
395 sc->vtblk_disk = NULL;
398 if (sc->vtblk_sglist != NULL) {
399 sglist_free(sc->vtblk_sglist);
400 sc->vtblk_sglist = NULL;
403 VTBLK_LOCK_DESTROY(sc);
409 vtblk_suspend(device_t dev)
411 struct vtblk_softc *sc;
414 sc = device_get_softc(dev);
417 sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
418 /* XXX BMV: virtio_stop(), etc needed here? */
419 error = vtblk_quiesce(sc);
421 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
428 vtblk_resume(device_t dev)
430 struct vtblk_softc *sc;
432 sc = device_get_softc(dev);
435 /* XXX BMV: virtio_reinit(), etc needed here? */
436 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
444 vtblk_shutdown(device_t dev)
451 vtblk_config_change(device_t dev)
453 struct vtblk_softc *sc;
454 struct virtio_blk_config blkcfg;
457 sc = device_get_softc(dev);
459 vtblk_read_config(sc, &blkcfg);
461 /* Capacity is always in 512-byte units. */
462 capacity = blkcfg.capacity * 512;
464 if (sc->vtblk_disk->d_mediasize != capacity)
465 vtblk_resize_disk(sc, capacity);
471 vtblk_open(struct disk *dp)
473 struct vtblk_softc *sc;
475 if ((sc = dp->d_drv1) == NULL)
478 return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
482 vtblk_close(struct disk *dp)
484 struct vtblk_softc *sc;
486 if ((sc = dp->d_drv1) == NULL)
493 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
496 struct vtblk_softc *sc;
498 if ((sc = dp->d_drv1) == NULL)
505 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
509 struct vtblk_softc *sc;
515 if ((sc = dp->d_drv1) == NULL)
520 vtblk_dump_quiesce(sc);
523 error = vtblk_dump_write(sc, virtual, offset, length);
524 if (error || (virtual == NULL && offset == 0))
525 vtblk_dump_complete(sc);
533 vtblk_strategy(struct bio *bp)
535 struct vtblk_softc *sc;
537 if ((sc = bp->bio_disk->d_drv1) == NULL) {
538 vtblk_bio_done(NULL, bp, EINVAL);
543 * Fail any write if RO. Unfortunately, there does not seem to
544 * be a better way to report our readonly'ness to GEOM above.
546 if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
547 (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
548 vtblk_bio_done(sc, bp, EROFS);
554 if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
556 vtblk_bio_done(sc, bp, ENXIO);
560 bioq_insert_tail(&sc->vtblk_bioq, bp);
567 vtblk_negotiate_features(struct vtblk_softc *sc)
573 features = VTBLK_FEATURES;
575 sc->vtblk_features = virtio_negotiate_features(dev, features);
579 vtblk_setup_features(struct vtblk_softc *sc)
585 vtblk_negotiate_features(sc);
587 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
588 sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
589 if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
590 sc->vtblk_flags |= VTBLK_FLAG_READONLY;
591 if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
592 sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
593 if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
594 sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
598 vtblk_maximum_segments(struct vtblk_softc *sc,
599 struct virtio_blk_config *blkcfg)
605 nsegs = VTBLK_MIN_SEGMENTS;
607 if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
608 nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
609 if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
610 nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
618 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
621 struct vq_alloc_info vq_info;
625 VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
626 vtblk_vq_intr, sc, &sc->vtblk_vq,
627 "%s request", device_get_nameunit(dev));
629 return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
633 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
642 dp->d_mediasize = new_capacity;
644 device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
645 (uintmax_t) dp->d_mediasize >> 20,
646 (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
650 error = disk_resize(dp, M_NOWAIT);
653 "disk_resize(9) failed, error: %d\n", error);
658 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
665 sc->vtblk_disk = dp = disk_alloc();
666 dp->d_open = vtblk_open;
667 dp->d_close = vtblk_close;
668 dp->d_ioctl = vtblk_ioctl;
669 dp->d_strategy = vtblk_strategy;
670 dp->d_name = VTBLK_DISK_NAME;
671 dp->d_unit = device_get_unit(dev);
673 dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO |
674 DISKFLAG_DIRECT_COMPLETION;
675 dp->d_hba_vendor = virtio_get_vendor(dev);
676 dp->d_hba_device = virtio_get_device(dev);
677 dp->d_hba_subvendor = virtio_get_subvendor(dev);
678 dp->d_hba_subdevice = virtio_get_subdevice(dev);
680 if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
681 dp->d_dump = vtblk_dump;
683 /* Capacity is always in 512-byte units. */
684 dp->d_mediasize = blkcfg->capacity * 512;
686 if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
687 dp->d_sectorsize = blkcfg->blk_size;
689 dp->d_sectorsize = 512;
692 * The VirtIO maximum I/O size is given in terms of segments.
693 * However, FreeBSD limits I/O size by logical buffer size, not
694 * by physically contiguous pages. Therefore, we have to assume
695 * no pages are contiguous. This may impose an artificially low
696 * maximum I/O size. But in practice, since QEMU advertises 128
697 * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
698 * which is typically greater than MAXPHYS. Eventually we should
699 * just advertise MAXPHYS and split buffers that are too big.
701 * Note we must subtract one additional segment in case of non
702 * page aligned buffers.
704 dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
706 if (dp->d_maxsize < PAGE_SIZE)
707 dp->d_maxsize = PAGE_SIZE; /* XXX */
709 if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
710 dp->d_fwsectors = blkcfg->geometry.sectors;
711 dp->d_fwheads = blkcfg->geometry.heads;
714 if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY) &&
715 blkcfg->topology.physical_block_exp > 0) {
716 dp->d_stripesize = dp->d_sectorsize *
717 (1 << blkcfg->topology.physical_block_exp);
718 dp->d_stripeoffset = (dp->d_stripesize -
719 blkcfg->topology.alignment_offset * dp->d_sectorsize) %
723 if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
724 sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
726 sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
730 vtblk_create_disk(struct vtblk_softc *sc)
738 device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
739 (uintmax_t) dp->d_mediasize >> 20,
740 (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
743 disk_create(dp, DISK_VERSION);
747 vtblk_request_prealloc(struct vtblk_softc *sc)
749 struct vtblk_request *req;
752 nreqs = virtqueue_size(sc->vtblk_vq);
755 * Preallocate sufficient requests to keep the virtqueue full. Each
756 * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
757 * the number allocated when indirect descriptors are not available.
759 if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
760 nreqs /= VTBLK_MIN_SEGMENTS;
762 for (i = 0; i < nreqs; i++) {
763 req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
767 MPASS(sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr)) == 1);
768 MPASS(sglist_count(&req->vbr_ack, sizeof(req->vbr_ack)) == 1);
770 sc->vtblk_request_count++;
771 vtblk_request_enqueue(sc, req);
778 vtblk_request_free(struct vtblk_softc *sc)
780 struct vtblk_request *req;
782 MPASS(TAILQ_EMPTY(&sc->vtblk_req_ready));
784 while ((req = vtblk_request_dequeue(sc)) != NULL) {
785 sc->vtblk_request_count--;
789 KASSERT(sc->vtblk_request_count == 0,
790 ("%s: leaked %d requests", __func__, sc->vtblk_request_count));
793 static struct vtblk_request *
794 vtblk_request_dequeue(struct vtblk_softc *sc)
796 struct vtblk_request *req;
798 req = TAILQ_FIRST(&sc->vtblk_req_free);
800 TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
801 bzero(req, sizeof(struct vtblk_request));
808 vtblk_request_enqueue(struct vtblk_softc *sc, struct vtblk_request *req)
811 TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
814 static struct vtblk_request *
815 vtblk_request_next_ready(struct vtblk_softc *sc)
817 struct vtblk_request *req;
819 req = TAILQ_FIRST(&sc->vtblk_req_ready);
821 TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
827 vtblk_request_requeue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
830 /* NOTE: Currently, there will be at most one request in the queue. */
831 TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
834 static struct vtblk_request *
835 vtblk_request_next(struct vtblk_softc *sc)
837 struct vtblk_request *req;
839 req = vtblk_request_next_ready(sc);
843 return (vtblk_request_bio(sc));
846 static struct vtblk_request *
847 vtblk_request_bio(struct vtblk_softc *sc)
849 struct bio_queue_head *bioq;
850 struct vtblk_request *req;
853 bioq = &sc->vtblk_bioq;
855 if (bioq_first(bioq) == NULL)
858 req = vtblk_request_dequeue(sc);
862 bp = bioq_takefirst(bioq);
865 req->vbr_hdr.ioprio = 1;
867 switch (bp->bio_cmd) {
869 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
872 req->vbr_hdr.type = VIRTIO_BLK_T_IN;
873 req->vbr_hdr.sector = bp->bio_offset / 512;
876 req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
877 req->vbr_hdr.sector = bp->bio_offset / 512;
880 panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
883 if (bp->bio_flags & BIO_ORDERED)
884 req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
890 vtblk_request_execute(struct vtblk_softc *sc, struct vtblk_request *req)
892 struct virtqueue *vq;
895 int ordered, readable, writable, error;
898 sg = sc->vtblk_sglist;
904 * Some hosts (such as bhyve) do not implement the barrier feature,
905 * so we emulate it in the driver by allowing the barrier request
906 * to be the only one in flight.
908 if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
909 if (sc->vtblk_req_ordered != NULL)
911 if (bp->bio_flags & BIO_ORDERED) {
912 if (!virtqueue_empty(vq))
915 req->vbr_hdr.type &= ~VIRTIO_BLK_T_BARRIER;
920 sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
922 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
923 error = sglist_append_bio(sg, bp);
924 if (error || sg->sg_nseg == sg->sg_maxseg) {
925 panic("%s: bio %p data buffer too big %d",
926 __func__, bp, error);
929 /* BIO_READ means the host writes into our buffer. */
930 if (bp->bio_cmd == BIO_READ)
931 writable = sg->sg_nseg - 1;
935 sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
936 readable = sg->sg_nseg - writable;
938 error = virtqueue_enqueue(vq, req, sg, readable, writable);
939 if (error == 0 && ordered)
940 sc->vtblk_req_ordered = req;
946 vtblk_request_error(struct vtblk_request *req)
950 switch (req->vbr_ack) {
951 case VIRTIO_BLK_S_OK:
954 case VIRTIO_BLK_S_UNSUPP:
966 vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
968 struct vtblk_request *req;
971 while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
972 if (sc->vtblk_req_ordered != NULL) {
973 MPASS(sc->vtblk_req_ordered == req);
974 sc->vtblk_req_ordered = NULL;
978 bp->bio_error = vtblk_request_error(req);
979 TAILQ_INSERT_TAIL(queue, bp, bio_queue);
981 vtblk_request_enqueue(sc, req);
986 vtblk_done_completed(struct vtblk_softc *sc, struct bio_queue *queue)
988 struct bio *bp, *tmp;
990 TAILQ_FOREACH_SAFE(bp, queue, bio_queue, tmp) {
991 if (bp->bio_error != 0)
992 disk_err(bp, "hard error", -1, 1);
993 vtblk_bio_done(sc, bp, bp->bio_error);
998 vtblk_drain_vq(struct vtblk_softc *sc)
1000 struct virtqueue *vq;
1001 struct vtblk_request *req;
1007 while ((req = virtqueue_drain(vq, &last)) != NULL) {
1008 vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1009 vtblk_request_enqueue(sc, req);
1012 sc->vtblk_req_ordered = NULL;
1013 KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1017 vtblk_drain(struct vtblk_softc *sc)
1019 struct bio_queue queue;
1020 struct bio_queue_head *bioq;
1021 struct vtblk_request *req;
1024 bioq = &sc->vtblk_bioq;
1027 if (sc->vtblk_vq != NULL) {
1028 vtblk_queue_completed(sc, &queue);
1029 vtblk_done_completed(sc, &queue);
1034 while ((req = vtblk_request_next_ready(sc)) != NULL) {
1035 vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1036 vtblk_request_enqueue(sc, req);
1039 while (bioq_first(bioq) != NULL) {
1040 bp = bioq_takefirst(bioq);
1041 vtblk_bio_done(sc, bp, ENXIO);
1044 vtblk_request_free(sc);
1048 vtblk_startio(struct vtblk_softc *sc)
1050 struct virtqueue *vq;
1051 struct vtblk_request *req;
1054 VTBLK_LOCK_ASSERT(sc);
1058 if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1061 while (!virtqueue_full(vq)) {
1062 req = vtblk_request_next(sc);
1066 if (vtblk_request_execute(sc, req) != 0) {
1067 vtblk_request_requeue_ready(sc, req);
1075 virtqueue_notify(vq);
1079 vtblk_bio_done(struct vtblk_softc *sc, struct bio *bp, int error)
1082 /* Because of GEOM direct dispatch, we cannot hold any locks. */
1084 VTBLK_LOCK_ASSERT_NOTOWNED(sc);
1087 bp->bio_resid = bp->bio_bcount;
1088 bp->bio_error = error;
1089 bp->bio_flags |= BIO_ERROR;
1095 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg) \
1096 if (virtio_with_feature(_dev, _feature)) { \
1097 virtio_read_device_config(_dev, \
1098 offsetof(struct virtio_blk_config, _field), \
1099 &(_cfg)->_field, sizeof((_cfg)->_field)); \
1103 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
1107 dev = sc->vtblk_dev;
1109 bzero(blkcfg, sizeof(struct virtio_blk_config));
1111 /* The capacity is always available. */
1112 virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
1113 capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
1115 /* Read the configuration if the feature was negotiated. */
1116 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1117 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1118 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1119 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1120 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1121 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
1124 #undef VTBLK_GET_CONFIG
1127 vtblk_ident(struct vtblk_softc *sc)
1131 struct vtblk_request *req;
1134 dp = sc->vtblk_disk;
1135 len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
1137 if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
1140 req = vtblk_request_dequeue(sc);
1145 req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1146 req->vbr_hdr.ioprio = 1;
1147 req->vbr_hdr.sector = 0;
1152 buf.bio_cmd = BIO_READ;
1153 buf.bio_data = dp->d_ident;
1154 buf.bio_bcount = len;
1157 error = vtblk_poll_request(sc, req);
1160 vtblk_request_enqueue(sc, req);
1163 device_printf(sc->vtblk_dev,
1164 "error getting device identifier: %d\n", error);
1169 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
1171 struct virtqueue *vq;
1176 if (!virtqueue_empty(vq))
1179 error = vtblk_request_execute(sc, req);
1183 virtqueue_notify(vq);
1184 virtqueue_poll(vq, NULL);
1186 error = vtblk_request_error(req);
1187 if (error && bootverbose) {
1188 device_printf(sc->vtblk_dev,
1189 "%s: IO error: %d\n", __func__, error);
1196 vtblk_quiesce(struct vtblk_softc *sc)
1200 VTBLK_LOCK_ASSERT(sc);
1203 while (!virtqueue_empty(sc->vtblk_vq)) {
1204 if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
1205 VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
1215 vtblk_vq_intr(void *xsc)
1217 struct vtblk_softc *sc;
1218 struct virtqueue *vq;
1219 struct bio_queue queue;
1228 if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
1231 vtblk_queue_completed(sc, &queue);
1234 if (virtqueue_enable_intr(vq) != 0) {
1235 virtqueue_disable_intr(vq);
1239 if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1240 wakeup(&sc->vtblk_vq);
1244 vtblk_done_completed(sc, &queue);
1248 vtblk_stop(struct vtblk_softc *sc)
1251 virtqueue_disable_intr(sc->vtblk_vq);
1252 virtio_stop(sc->vtblk_dev);
1256 vtblk_dump_quiesce(struct vtblk_softc *sc)
1260 * Spin here until all the requests in-flight at the time of the
1261 * dump are completed and queued. The queued requests will be
1262 * biodone'd once the dump is finished.
1264 while (!virtqueue_empty(sc->vtblk_vq))
1265 vtblk_queue_completed(sc, &sc->vtblk_dump_queue);
1269 vtblk_dump_write(struct vtblk_softc *sc, void *virtual, off_t offset,
1273 struct vtblk_request *req;
1275 req = &sc->vtblk_dump_request;
1277 req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1278 req->vbr_hdr.ioprio = 1;
1279 req->vbr_hdr.sector = offset / 512;
1284 buf.bio_cmd = BIO_WRITE;
1285 buf.bio_data = virtual;
1286 buf.bio_bcount = length;
1288 return (vtblk_poll_request(sc, req));
1292 vtblk_dump_flush(struct vtblk_softc *sc)
1295 struct vtblk_request *req;
1297 req = &sc->vtblk_dump_request;
1299 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1300 req->vbr_hdr.ioprio = 1;
1301 req->vbr_hdr.sector = 0;
1306 buf.bio_cmd = BIO_FLUSH;
1308 return (vtblk_poll_request(sc, req));
1312 vtblk_dump_complete(struct vtblk_softc *sc)
1315 vtblk_dump_flush(sc);
1318 vtblk_done_completed(sc, &sc->vtblk_dump_queue);
1323 vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
1326 /* Set either writeback (1) or writethrough (0) mode. */
1327 virtio_write_dev_config_1(sc->vtblk_dev,
1328 offsetof(struct virtio_blk_config, writeback), wc);
1332 vtblk_write_cache_enabled(struct vtblk_softc *sc,
1333 struct virtio_blk_config *blkcfg)
1337 if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
1338 wc = vtblk_tunable_int(sc, "writecache_mode",
1339 vtblk_writecache_mode);
1340 if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1341 vtblk_set_write_cache(sc, wc);
1343 wc = blkcfg->writeback;
1345 wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
1351 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
1353 struct vtblk_softc *sc;
1356 sc = oidp->oid_arg1;
1357 wc = sc->vtblk_write_cache;
1359 error = sysctl_handle_int(oidp, &wc, 0, req);
1360 if (error || req->newptr == NULL)
1362 if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
1364 if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1368 sc->vtblk_write_cache = wc;
1369 vtblk_set_write_cache(sc, sc->vtblk_write_cache);
1376 vtblk_setup_sysctl(struct vtblk_softc *sc)
1379 struct sysctl_ctx_list *ctx;
1380 struct sysctl_oid *tree;
1381 struct sysctl_oid_list *child;
1383 dev = sc->vtblk_dev;
1384 ctx = device_get_sysctl_ctx(dev);
1385 tree = device_get_sysctl_tree(dev);
1386 child = SYSCTL_CHILDREN(tree);
1388 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
1389 CTLTYPE_INT | CTLFLAG_RW, sc, 0, vtblk_write_cache_sysctl,
1390 "I", "Write cache mode (writethrough (0) or writeback (1))");
1394 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
1398 snprintf(path, sizeof(path),
1399 "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
1400 TUNABLE_INT_FETCH(path, &def);