2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* Driver for VirtIO block devices. */
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/sglist.h>
39 #include <sys/sysctl.h>
41 #include <sys/mutex.h>
42 #include <sys/queue.h>
44 #include <geom/geom_disk.h>
46 #include <machine/bus.h>
47 #include <machine/resource.h>
51 #include <dev/virtio/virtio.h>
52 #include <dev/virtio/virtqueue.h>
53 #include <dev/virtio/block/virtio_blk.h>
55 #include "virtio_if.h"
57 struct vtblk_request {
58 struct virtio_blk_outhdr vbr_hdr;
62 TAILQ_ENTRY(vtblk_request) vbr_link;
65 enum vtblk_cache_mode {
66 VTBLK_CACHE_WRITETHROUGH,
67 VTBLK_CACHE_WRITEBACK,
74 uint64_t vtblk_features;
76 #define VTBLK_FLAG_INDIRECT 0x0001
77 #define VTBLK_FLAG_READONLY 0x0002
78 #define VTBLK_FLAG_DETACH 0x0004
79 #define VTBLK_FLAG_SUSPEND 0x0008
80 #define VTBLK_FLAG_DUMPING 0x0010
81 #define VTBLK_FLAG_BARRIER 0x0020
82 #define VTBLK_FLAG_WC_CONFIG 0x0040
84 struct virtqueue *vtblk_vq;
85 struct sglist *vtblk_sglist;
86 struct disk *vtblk_disk;
88 struct bio_queue_head vtblk_bioq;
89 TAILQ_HEAD(, vtblk_request)
91 TAILQ_HEAD(, vtblk_request)
93 struct vtblk_request *vtblk_req_ordered;
96 int vtblk_request_count;
97 enum vtblk_cache_mode vtblk_write_cache;
99 struct vtblk_request vtblk_dump_request;
102 static struct virtio_feature_desc vtblk_feature_desc[] = {
103 { VIRTIO_BLK_F_BARRIER, "HostBarrier" },
104 { VIRTIO_BLK_F_SIZE_MAX, "MaxSegSize" },
105 { VIRTIO_BLK_F_SEG_MAX, "MaxNumSegs" },
106 { VIRTIO_BLK_F_GEOMETRY, "DiskGeometry" },
107 { VIRTIO_BLK_F_RO, "ReadOnly" },
108 { VIRTIO_BLK_F_BLK_SIZE, "BlockSize" },
109 { VIRTIO_BLK_F_SCSI, "SCSICmds" },
110 { VIRTIO_BLK_F_WCE, "WriteCache" },
111 { VIRTIO_BLK_F_TOPOLOGY, "Topology" },
112 { VIRTIO_BLK_F_CONFIG_WCE, "ConfigWCE" },
117 static int vtblk_modevent(module_t, int, void *);
119 static int vtblk_probe(device_t);
120 static int vtblk_attach(device_t);
121 static int vtblk_detach(device_t);
122 static int vtblk_suspend(device_t);
123 static int vtblk_resume(device_t);
124 static int vtblk_shutdown(device_t);
125 static int vtblk_config_change(device_t);
127 static int vtblk_open(struct disk *);
128 static int vtblk_close(struct disk *);
129 static int vtblk_ioctl(struct disk *, u_long, void *, int,
131 static int vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
132 static void vtblk_strategy(struct bio *);
134 static void vtblk_negotiate_features(struct vtblk_softc *);
135 static int vtblk_maximum_segments(struct vtblk_softc *,
136 struct virtio_blk_config *);
137 static int vtblk_alloc_virtqueue(struct vtblk_softc *);
138 static void vtblk_resize_disk(struct vtblk_softc *, uint64_t);
139 static void vtblk_set_write_cache(struct vtblk_softc *, int);
140 static int vtblk_write_cache_enabled(struct vtblk_softc *sc,
141 struct virtio_blk_config *);
142 static int vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
143 static void vtblk_alloc_disk(struct vtblk_softc *,
144 struct virtio_blk_config *);
145 static void vtblk_create_disk(struct vtblk_softc *);
147 static int vtblk_quiesce(struct vtblk_softc *);
148 static void vtblk_startio(struct vtblk_softc *);
149 static struct vtblk_request * vtblk_bio_request(struct vtblk_softc *);
150 static int vtblk_execute_request(struct vtblk_softc *,
151 struct vtblk_request *);
153 static void vtblk_vq_intr(void *);
155 static void vtblk_stop(struct vtblk_softc *);
157 static void vtblk_read_config(struct vtblk_softc *,
158 struct virtio_blk_config *);
159 static void vtblk_get_ident(struct vtblk_softc *);
160 static void vtblk_prepare_dump(struct vtblk_softc *);
161 static int vtblk_write_dump(struct vtblk_softc *, void *, off_t, size_t);
162 static int vtblk_flush_dump(struct vtblk_softc *);
163 static int vtblk_poll_request(struct vtblk_softc *,
164 struct vtblk_request *);
166 static void vtblk_finish_completed(struct vtblk_softc *);
167 static void vtblk_drain_vq(struct vtblk_softc *, int);
168 static void vtblk_drain(struct vtblk_softc *);
170 static int vtblk_alloc_requests(struct vtblk_softc *);
171 static void vtblk_free_requests(struct vtblk_softc *);
172 static struct vtblk_request * vtblk_dequeue_request(struct vtblk_softc *);
173 static void vtblk_enqueue_request(struct vtblk_softc *,
174 struct vtblk_request *);
176 static struct vtblk_request * vtblk_dequeue_ready(struct vtblk_softc *);
177 static void vtblk_enqueue_ready(struct vtblk_softc *,
178 struct vtblk_request *);
180 static int vtblk_request_error(struct vtblk_request *);
181 static void vtblk_finish_bio(struct bio *, int);
183 static void vtblk_setup_sysctl(struct vtblk_softc *);
184 static int vtblk_tunable_int(struct vtblk_softc *, const char *, int);
187 static int vtblk_no_ident = 0;
188 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
189 static int vtblk_writecache_mode = -1;
190 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
192 /* Features desired/implemented by this driver. */
193 #define VTBLK_FEATURES \
194 (VIRTIO_BLK_F_BARRIER | \
195 VIRTIO_BLK_F_SIZE_MAX | \
196 VIRTIO_BLK_F_SEG_MAX | \
197 VIRTIO_BLK_F_GEOMETRY | \
199 VIRTIO_BLK_F_BLK_SIZE | \
201 VIRTIO_BLK_F_CONFIG_WCE | \
202 VIRTIO_RING_F_INDIRECT_DESC)
204 #define VTBLK_MTX(_sc) &(_sc)->vtblk_mtx
205 #define VTBLK_LOCK_INIT(_sc, _name) \
206 mtx_init(VTBLK_MTX((_sc)), (_name), \
207 "VirtIO Block Lock", MTX_DEF)
208 #define VTBLK_LOCK(_sc) mtx_lock(VTBLK_MTX((_sc)))
209 #define VTBLK_UNLOCK(_sc) mtx_unlock(VTBLK_MTX((_sc)))
210 #define VTBLK_LOCK_DESTROY(_sc) mtx_destroy(VTBLK_MTX((_sc)))
211 #define VTBLK_LOCK_ASSERT(_sc) mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
212 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
213 mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
215 #define VTBLK_DISK_NAME "vtbd"
216 #define VTBLK_QUIESCE_TIMEOUT (30 * hz)
219 * Each block request uses at least two segments - one for the header
220 * and one for the status.
222 #define VTBLK_MIN_SEGMENTS 2
224 static device_method_t vtblk_methods[] = {
225 /* Device methods. */
226 DEVMETHOD(device_probe, vtblk_probe),
227 DEVMETHOD(device_attach, vtblk_attach),
228 DEVMETHOD(device_detach, vtblk_detach),
229 DEVMETHOD(device_suspend, vtblk_suspend),
230 DEVMETHOD(device_resume, vtblk_resume),
231 DEVMETHOD(device_shutdown, vtblk_shutdown),
233 /* VirtIO methods. */
234 DEVMETHOD(virtio_config_change, vtblk_config_change),
239 static driver_t vtblk_driver = {
242 sizeof(struct vtblk_softc)
244 static devclass_t vtblk_devclass;
246 DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
248 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
250 MODULE_VERSION(virtio_blk, 1);
251 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
254 vtblk_modevent(module_t mod, int type, void *unused)
275 vtblk_probe(device_t dev)
278 if (virtio_get_device_type(dev) != VIRTIO_ID_BLOCK)
281 device_set_desc(dev, "VirtIO Block Adapter");
283 return (BUS_PROBE_DEFAULT);
287 vtblk_attach(device_t dev)
289 struct vtblk_softc *sc;
290 struct virtio_blk_config blkcfg;
293 sc = device_get_softc(dev);
296 VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
298 bioq_init(&sc->vtblk_bioq);
299 TAILQ_INIT(&sc->vtblk_req_free);
300 TAILQ_INIT(&sc->vtblk_req_ready);
302 virtio_set_feature_desc(dev, vtblk_feature_desc);
303 vtblk_negotiate_features(sc);
305 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
306 sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
307 if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
308 sc->vtblk_flags |= VTBLK_FLAG_READONLY;
309 if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
310 sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
311 if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
312 sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
314 vtblk_setup_sysctl(sc);
316 /* Get local copy of config. */
317 vtblk_read_config(sc, &blkcfg);
320 * With the current sglist(9) implementation, it is not easy
321 * for us to support a maximum segment size as adjacent
322 * segments are coalesced. For now, just make sure it's larger
323 * than the maximum supported transfer size.
325 if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
326 if (blkcfg.size_max < MAXPHYS) {
328 device_printf(dev, "host requires unsupported "
329 "maximum segment size feature\n");
334 sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
335 if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
337 device_printf(dev, "fewer than minimum number of segments "
338 "allowed: %d\n", sc->vtblk_max_nsegs);
342 sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
343 if (sc->vtblk_sglist == NULL) {
345 device_printf(dev, "cannot allocate sglist\n");
349 error = vtblk_alloc_virtqueue(sc);
351 device_printf(dev, "cannot allocate virtqueue\n");
355 error = vtblk_alloc_requests(sc);
357 device_printf(dev, "cannot preallocate requests\n");
361 vtblk_alloc_disk(sc, &blkcfg);
363 error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
365 device_printf(dev, "cannot setup virtqueue interrupt\n");
369 vtblk_create_disk(sc);
371 virtqueue_enable_intr(sc->vtblk_vq);
381 vtblk_detach(device_t dev)
383 struct vtblk_softc *sc;
385 sc = device_get_softc(dev);
388 sc->vtblk_flags |= VTBLK_FLAG_DETACH;
389 if (device_is_attached(dev))
395 if (sc->vtblk_disk != NULL) {
396 disk_destroy(sc->vtblk_disk);
397 sc->vtblk_disk = NULL;
400 if (sc->vtblk_sglist != NULL) {
401 sglist_free(sc->vtblk_sglist);
402 sc->vtblk_sglist = NULL;
405 VTBLK_LOCK_DESTROY(sc);
411 vtblk_suspend(device_t dev)
413 struct vtblk_softc *sc;
416 sc = device_get_softc(dev);
419 sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
420 /* XXX BMV: virtio_stop(), etc needed here? */
421 error = vtblk_quiesce(sc);
423 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
430 vtblk_resume(device_t dev)
432 struct vtblk_softc *sc;
434 sc = device_get_softc(dev);
437 /* XXX BMV: virtio_reinit(), etc needed here? */
438 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
446 vtblk_shutdown(device_t dev)
453 vtblk_config_change(device_t dev)
455 struct vtblk_softc *sc;
456 struct virtio_blk_config blkcfg;
459 sc = device_get_softc(dev);
461 vtblk_read_config(sc, &blkcfg);
463 /* Capacity is always in 512-byte units. */
464 capacity = blkcfg.capacity * 512;
466 if (sc->vtblk_disk->d_mediasize != capacity)
467 vtblk_resize_disk(sc, capacity);
473 vtblk_open(struct disk *dp)
475 struct vtblk_softc *sc;
477 if ((sc = dp->d_drv1) == NULL)
480 return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
484 vtblk_close(struct disk *dp)
486 struct vtblk_softc *sc;
488 if ((sc = dp->d_drv1) == NULL)
495 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
498 struct vtblk_softc *sc;
500 if ((sc = dp->d_drv1) == NULL)
507 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
511 struct vtblk_softc *sc;
516 if ((sc = dp->d_drv1) == NULL)
521 if ((sc->vtblk_flags & VTBLK_FLAG_DUMPING) == 0) {
522 vtblk_prepare_dump(sc);
523 sc->vtblk_flags |= VTBLK_FLAG_DUMPING;
527 error = vtblk_write_dump(sc, virtual, offset, length);
528 else if (virtual == NULL && offset == 0)
529 error = vtblk_flush_dump(sc);
532 sc->vtblk_flags &= ~VTBLK_FLAG_DUMPING;
541 vtblk_strategy(struct bio *bp)
543 struct vtblk_softc *sc;
545 if ((sc = bp->bio_disk->d_drv1) == NULL) {
546 vtblk_finish_bio(bp, EINVAL);
551 * Fail any write if RO. Unfortunately, there does not seem to
552 * be a better way to report our readonly'ness to GEOM above.
554 if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
555 (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
556 vtblk_finish_bio(bp, EROFS);
562 * Prevent read/write buffers spanning too many segments from
563 * getting into the queue. This should only trip if d_maxsize
564 * was incorrectly set.
566 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
567 int nsegs, max_nsegs;
569 nsegs = sglist_count(bp->bio_data, bp->bio_bcount);
570 max_nsegs = sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS;
572 KASSERT(nsegs <= max_nsegs,
573 ("%s: bio %p spanned too many segments: %d, max: %d",
574 __func__, bp, nsegs, max_nsegs));
579 if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
580 vtblk_finish_bio(bp, ENXIO);
582 bioq_insert_tail(&sc->vtblk_bioq, bp);
584 if ((sc->vtblk_flags & VTBLK_FLAG_SUSPEND) == 0)
591 vtblk_negotiate_features(struct vtblk_softc *sc)
597 features = VTBLK_FEATURES;
599 sc->vtblk_features = virtio_negotiate_features(dev, features);
603 vtblk_maximum_segments(struct vtblk_softc *sc,
604 struct virtio_blk_config *blkcfg)
610 nsegs = VTBLK_MIN_SEGMENTS;
612 if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
613 nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
614 if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
615 nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
623 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
626 struct vq_alloc_info vq_info;
630 VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
631 vtblk_vq_intr, sc, &sc->vtblk_vq,
632 "%s request", device_get_nameunit(dev));
634 return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
638 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
647 dp->d_mediasize = new_capacity;
649 device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
650 (uintmax_t) dp->d_mediasize >> 20,
651 (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
655 error = disk_resize(dp, M_NOWAIT);
658 "disk_resize(9) failed, error: %d\n", error);
663 vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
666 /* Set either writeback (1) or writethrough (0) mode. */
667 virtio_write_dev_config_1(sc->vtblk_dev,
668 offsetof(struct virtio_blk_config, writeback), wc);
672 vtblk_write_cache_enabled(struct vtblk_softc *sc,
673 struct virtio_blk_config *blkcfg)
677 if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
678 wc = vtblk_tunable_int(sc, "writecache_mode",
679 vtblk_writecache_mode);
680 if (wc >= 0 && wc < VTBLK_CACHE_MAX)
681 vtblk_set_write_cache(sc, wc);
683 wc = blkcfg->writeback;
685 wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
691 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
693 struct vtblk_softc *sc;
697 wc = sc->vtblk_write_cache;
699 error = sysctl_handle_int(oidp, &wc, 0, req);
700 if (error || req->newptr == NULL)
702 if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
704 if (wc < 0 || wc >= VTBLK_CACHE_MAX)
708 sc->vtblk_write_cache = wc;
709 vtblk_set_write_cache(sc, sc->vtblk_write_cache);
716 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
723 sc->vtblk_disk = dp = disk_alloc();
724 dp->d_open = vtblk_open;
725 dp->d_close = vtblk_close;
726 dp->d_ioctl = vtblk_ioctl;
727 dp->d_strategy = vtblk_strategy;
728 dp->d_name = VTBLK_DISK_NAME;
729 dp->d_unit = device_get_unit(dev);
731 dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO;
732 dp->d_hba_vendor = virtio_get_vendor(dev);
733 dp->d_hba_device = virtio_get_device(dev);
734 dp->d_hba_subvendor = virtio_get_subvendor(dev);
735 dp->d_hba_subdevice = virtio_get_subdevice(dev);
737 if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
738 dp->d_dump = vtblk_dump;
740 /* Capacity is always in 512-byte units. */
741 dp->d_mediasize = blkcfg->capacity * 512;
743 if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
744 dp->d_sectorsize = blkcfg->blk_size;
746 dp->d_sectorsize = 512;
749 * The VirtIO maximum I/O size is given in terms of segments.
750 * However, FreeBSD limits I/O size by logical buffer size, not
751 * by physically contiguous pages. Therefore, we have to assume
752 * no pages are contiguous. This may impose an artificially low
753 * maximum I/O size. But in practice, since QEMU advertises 128
754 * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
755 * which is typically greater than MAXPHYS. Eventually we should
756 * just advertise MAXPHYS and split buffers that are too big.
758 * Note we must subtract one additional segment in case of non
759 * page aligned buffers.
761 dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
763 if (dp->d_maxsize < PAGE_SIZE)
764 dp->d_maxsize = PAGE_SIZE; /* XXX */
766 if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
767 dp->d_fwsectors = blkcfg->geometry.sectors;
768 dp->d_fwheads = blkcfg->geometry.heads;
771 if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY)) {
772 dp->d_stripesize = dp->d_sectorsize *
773 (1 << blkcfg->topology.physical_block_exp);
774 dp->d_stripeoffset = (dp->d_stripesize -
775 blkcfg->topology.alignment_offset * dp->d_sectorsize) %
779 if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
780 sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
782 sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
786 vtblk_create_disk(struct vtblk_softc *sc)
793 * Retrieving the identification string must be done after
794 * the virtqueue interrupt is setup otherwise it will hang.
798 device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
799 (uintmax_t) dp->d_mediasize >> 20,
800 (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
803 disk_create(dp, DISK_VERSION);
807 vtblk_quiesce(struct vtblk_softc *sc)
813 VTBLK_LOCK_ASSERT(sc);
815 while (!virtqueue_empty(sc->vtblk_vq)) {
816 if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
817 VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
827 vtblk_startio(struct vtblk_softc *sc)
829 struct virtqueue *vq;
830 struct vtblk_request *req;
836 VTBLK_LOCK_ASSERT(sc);
838 while (!virtqueue_full(vq)) {
839 if ((req = vtblk_dequeue_ready(sc)) == NULL)
840 req = vtblk_bio_request(sc);
844 if (vtblk_execute_request(sc, req) != 0) {
845 vtblk_enqueue_ready(sc, req);
853 virtqueue_notify(vq);
856 static struct vtblk_request *
857 vtblk_bio_request(struct vtblk_softc *sc)
859 struct bio_queue_head *bioq;
860 struct vtblk_request *req;
863 bioq = &sc->vtblk_bioq;
865 if (bioq_first(bioq) == NULL)
868 req = vtblk_dequeue_request(sc);
872 bp = bioq_takefirst(bioq);
875 req->vbr_hdr.ioprio = 1;
877 switch (bp->bio_cmd) {
879 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
882 req->vbr_hdr.type = VIRTIO_BLK_T_IN;
883 req->vbr_hdr.sector = bp->bio_offset / 512;
886 req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
887 req->vbr_hdr.sector = bp->bio_offset / 512;
890 panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
897 vtblk_execute_request(struct vtblk_softc *sc, struct vtblk_request *req)
899 struct virtqueue *vq;
902 int ordered, readable, writable, error;
905 sg = sc->vtblk_sglist;
910 VTBLK_LOCK_ASSERT(sc);
913 * Wait until the ordered request completes before
914 * executing subsequent requests.
916 if (sc->vtblk_req_ordered != NULL)
919 if (bp->bio_flags & BIO_ORDERED) {
920 if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
922 * This request will be executed once all
923 * the in-flight requests are completed.
925 if (!virtqueue_empty(vq))
929 req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
933 sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
935 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
936 error = sglist_append_bio(sg, bp);
937 if (error || sg->sg_nseg == sg->sg_maxseg) {
938 panic("%s: data buffer too big bio:%p error:%d",
939 __func__, bp, error);
942 /* BIO_READ means the host writes into our buffer. */
943 if (bp->bio_cmd == BIO_READ)
944 writable = sg->sg_nseg - 1;
948 sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
949 readable = sg->sg_nseg - writable;
951 error = virtqueue_enqueue(vq, req, sg, readable, writable);
952 if (error == 0 && ordered)
953 sc->vtblk_req_ordered = req;
959 vtblk_vq_intr(void *xsc)
961 struct vtblk_softc *sc;
962 struct virtqueue *vq;
969 if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
974 vtblk_finish_completed(sc);
976 if ((sc->vtblk_flags & VTBLK_FLAG_SUSPEND) == 0)
979 wakeup(&sc->vtblk_vq);
981 if (virtqueue_enable_intr(vq) != 0) {
982 virtqueue_disable_intr(vq);
991 vtblk_stop(struct vtblk_softc *sc)
994 virtqueue_disable_intr(sc->vtblk_vq);
995 virtio_stop(sc->vtblk_dev);
998 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg) \
999 if (virtio_with_feature(_dev, _feature)) { \
1000 virtio_read_device_config(_dev, \
1001 offsetof(struct virtio_blk_config, _field), \
1002 &(_cfg)->_field, sizeof((_cfg)->_field)); \
1006 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
1010 dev = sc->vtblk_dev;
1012 bzero(blkcfg, sizeof(struct virtio_blk_config));
1014 /* The capacity is always available. */
1015 virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
1016 capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
1018 /* Read the configuration if the feature was negotiated. */
1019 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1020 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1021 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1022 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1023 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1024 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
1027 #undef VTBLK_GET_CONFIG
1030 vtblk_get_ident(struct vtblk_softc *sc)
1034 struct vtblk_request *req;
1037 dp = sc->vtblk_disk;
1038 len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
1040 if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
1043 req = vtblk_dequeue_request(sc);
1048 req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1049 req->vbr_hdr.ioprio = 1;
1050 req->vbr_hdr.sector = 0;
1053 bzero(&buf, sizeof(struct bio));
1055 buf.bio_cmd = BIO_READ;
1056 buf.bio_data = dp->d_ident;
1057 buf.bio_bcount = len;
1060 error = vtblk_poll_request(sc, req);
1063 vtblk_enqueue_request(sc, req);
1066 device_printf(sc->vtblk_dev,
1067 "error getting device identifier: %d\n", error);
1072 vtblk_prepare_dump(struct vtblk_softc *sc)
1075 struct virtqueue *vq;
1077 dev = sc->vtblk_dev;
1083 * Drain all requests caught in-flight in the virtqueue,
1084 * skipping biodone(). When dumping, only one request is
1085 * outstanding at a time, and we just poll the virtqueue
1088 vtblk_drain_vq(sc, 1);
1090 if (virtio_reinit(dev, sc->vtblk_features) != 0) {
1091 panic("%s: cannot reinit VirtIO block device during dump",
1092 device_get_nameunit(dev));
1095 virtqueue_disable_intr(vq);
1096 virtio_reinit_complete(dev);
1100 vtblk_write_dump(struct vtblk_softc *sc, void *virtual, off_t offset,
1104 struct vtblk_request *req;
1106 req = &sc->vtblk_dump_request;
1108 req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1109 req->vbr_hdr.ioprio = 1;
1110 req->vbr_hdr.sector = offset / 512;
1113 bzero(&buf, sizeof(struct bio));
1115 buf.bio_cmd = BIO_WRITE;
1116 buf.bio_data = virtual;
1117 buf.bio_bcount = length;
1119 return (vtblk_poll_request(sc, req));
1123 vtblk_flush_dump(struct vtblk_softc *sc)
1126 struct vtblk_request *req;
1128 req = &sc->vtblk_dump_request;
1130 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1131 req->vbr_hdr.ioprio = 1;
1132 req->vbr_hdr.sector = 0;
1135 bzero(&buf, sizeof(struct bio));
1137 buf.bio_cmd = BIO_FLUSH;
1139 return (vtblk_poll_request(sc, req));
1143 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
1145 struct virtqueue *vq;
1150 if (!virtqueue_empty(vq))
1153 error = vtblk_execute_request(sc, req);
1157 virtqueue_notify(vq);
1158 virtqueue_poll(vq, NULL);
1160 error = vtblk_request_error(req);
1161 if (error && bootverbose) {
1162 device_printf(sc->vtblk_dev,
1163 "%s: IO error: %d\n", __func__, error);
1170 vtblk_finish_completed(struct vtblk_softc *sc)
1172 struct vtblk_request *req;
1176 while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
1179 if (sc->vtblk_req_ordered != NULL) {
1180 /* This should be the only outstanding request. */
1181 MPASS(sc->vtblk_req_ordered == req);
1182 sc->vtblk_req_ordered = NULL;
1185 error = vtblk_request_error(req);
1187 disk_err(bp, "hard error", -1, 1);
1189 vtblk_finish_bio(bp, error);
1190 vtblk_enqueue_request(sc, req);
1195 vtblk_drain_vq(struct vtblk_softc *sc, int skip_done)
1197 struct virtqueue *vq;
1198 struct vtblk_request *req;
1204 while ((req = virtqueue_drain(vq, &last)) != NULL) {
1206 vtblk_finish_bio(req->vbr_bp, ENXIO);
1208 vtblk_enqueue_request(sc, req);
1211 sc->vtblk_req_ordered = NULL;
1212 KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1216 vtblk_drain(struct vtblk_softc *sc)
1218 struct bio_queue_head *bioq;
1219 struct vtblk_request *req;
1222 bioq = &sc->vtblk_bioq;
1224 if (sc->vtblk_vq != NULL) {
1225 vtblk_finish_completed(sc);
1226 vtblk_drain_vq(sc, 0);
1229 while ((req = vtblk_dequeue_ready(sc)) != NULL) {
1230 vtblk_finish_bio(req->vbr_bp, ENXIO);
1231 vtblk_enqueue_request(sc, req);
1234 while (bioq_first(bioq) != NULL) {
1235 bp = bioq_takefirst(bioq);
1236 vtblk_finish_bio(bp, ENXIO);
1239 vtblk_free_requests(sc);
1244 vtblk_request_invariants(struct vtblk_request *req)
1246 int hdr_nsegs, ack_nsegs;
1248 hdr_nsegs = sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr));
1249 ack_nsegs = sglist_count(&req->vbr_ack, sizeof(req->vbr_ack));
1251 KASSERT(hdr_nsegs == 1, ("request header crossed page boundary"));
1252 KASSERT(ack_nsegs == 1, ("request ack crossed page boundary"));
1257 vtblk_alloc_requests(struct vtblk_softc *sc)
1259 struct vtblk_request *req;
1262 nreqs = virtqueue_size(sc->vtblk_vq);
1265 * Preallocate sufficient requests to keep the virtqueue full. Each
1266 * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
1267 * the number allocated when indirect descriptors are not available.
1269 if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
1270 nreqs /= VTBLK_MIN_SEGMENTS;
1272 for (i = 0; i < nreqs; i++) {
1273 req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
1278 vtblk_request_invariants(req);
1281 sc->vtblk_request_count++;
1282 vtblk_enqueue_request(sc, req);
1289 vtblk_free_requests(struct vtblk_softc *sc)
1291 struct vtblk_request *req;
1293 KASSERT(TAILQ_EMPTY(&sc->vtblk_req_ready),
1294 ("%s: ready requests left on queue", __func__));
1296 while ((req = vtblk_dequeue_request(sc)) != NULL) {
1297 sc->vtblk_request_count--;
1298 free(req, M_DEVBUF);
1301 KASSERT(sc->vtblk_request_count == 0,
1302 ("%s: leaked %d requests", __func__, sc->vtblk_request_count));
1305 static struct vtblk_request *
1306 vtblk_dequeue_request(struct vtblk_softc *sc)
1308 struct vtblk_request *req;
1310 req = TAILQ_FIRST(&sc->vtblk_req_free);
1312 TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
1318 vtblk_enqueue_request(struct vtblk_softc *sc, struct vtblk_request *req)
1321 bzero(req, sizeof(struct vtblk_request));
1322 TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
1325 static struct vtblk_request *
1326 vtblk_dequeue_ready(struct vtblk_softc *sc)
1328 struct vtblk_request *req;
1330 req = TAILQ_FIRST(&sc->vtblk_req_ready);
1332 TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
1338 vtblk_enqueue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
1341 TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
1345 vtblk_request_error(struct vtblk_request *req)
1349 switch (req->vbr_ack) {
1350 case VIRTIO_BLK_S_OK:
1353 case VIRTIO_BLK_S_UNSUPP:
1365 vtblk_finish_bio(struct bio *bp, int error)
1369 bp->bio_resid = bp->bio_bcount;
1370 bp->bio_error = error;
1371 bp->bio_flags |= BIO_ERROR;
1378 vtblk_setup_sysctl(struct vtblk_softc *sc)
1381 struct sysctl_ctx_list *ctx;
1382 struct sysctl_oid *tree;
1383 struct sysctl_oid_list *child;
1385 dev = sc->vtblk_dev;
1386 ctx = device_get_sysctl_ctx(dev);
1387 tree = device_get_sysctl_tree(dev);
1388 child = SYSCTL_CHILDREN(tree);
1390 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
1391 CTLTYPE_INT | CTLFLAG_RW, sc, 0, vtblk_write_cache_sysctl,
1392 "I", "Write cache mode (writethrough (0) or writeback (1))");
1396 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
1400 snprintf(path, sizeof(path),
1401 "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
1402 TUNABLE_INT_FETCH(path, &def);