2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* Driver for VirtIO block devices. */
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/sglist.h>
39 #include <sys/sysctl.h>
41 #include <sys/mutex.h>
42 #include <sys/queue.h>
44 #include <geom/geom_disk.h>
46 #include <machine/bus.h>
47 #include <machine/resource.h>
51 #include <dev/virtio/virtio.h>
52 #include <dev/virtio/virtqueue.h>
53 #include <dev/virtio/block/virtio_blk.h>
55 #include "virtio_if.h"
57 struct vtblk_request {
58 struct virtio_blk_outhdr vbr_hdr;
62 TAILQ_ENTRY(vtblk_request) vbr_link;
65 enum vtblk_cache_mode {
66 VTBLK_CACHE_WRITETHROUGH,
67 VTBLK_CACHE_WRITEBACK,
74 uint64_t vtblk_features;
76 #define VTBLK_FLAG_INDIRECT 0x0001
77 #define VTBLK_FLAG_READONLY 0x0002
78 #define VTBLK_FLAG_DETACH 0x0004
79 #define VTBLK_FLAG_SUSPEND 0x0008
80 #define VTBLK_FLAG_DUMPING 0x0010
81 #define VTBLK_FLAG_BARRIER 0x0020
82 #define VTBLK_FLAG_WC_CONFIG 0x0040
84 struct virtqueue *vtblk_vq;
85 struct sglist *vtblk_sglist;
86 struct disk *vtblk_disk;
88 struct bio_queue_head vtblk_bioq;
89 TAILQ_HEAD(, vtblk_request)
91 TAILQ_HEAD(, vtblk_request)
93 struct vtblk_request *vtblk_req_ordered;
96 int vtblk_request_count;
97 enum vtblk_cache_mode vtblk_write_cache;
99 struct vtblk_request vtblk_dump_request;
102 static struct virtio_feature_desc vtblk_feature_desc[] = {
103 { VIRTIO_BLK_F_BARRIER, "HostBarrier" },
104 { VIRTIO_BLK_F_SIZE_MAX, "MaxSegSize" },
105 { VIRTIO_BLK_F_SEG_MAX, "MaxNumSegs" },
106 { VIRTIO_BLK_F_GEOMETRY, "DiskGeometry" },
107 { VIRTIO_BLK_F_RO, "ReadOnly" },
108 { VIRTIO_BLK_F_BLK_SIZE, "BlockSize" },
109 { VIRTIO_BLK_F_SCSI, "SCSICmds" },
110 { VIRTIO_BLK_F_WCE, "WriteCache" },
111 { VIRTIO_BLK_F_TOPOLOGY, "Topology" },
112 { VIRTIO_BLK_F_CONFIG_WCE, "ConfigWCE" },
117 static int vtblk_modevent(module_t, int, void *);
119 static int vtblk_probe(device_t);
120 static int vtblk_attach(device_t);
121 static int vtblk_detach(device_t);
122 static int vtblk_suspend(device_t);
123 static int vtblk_resume(device_t);
124 static int vtblk_shutdown(device_t);
125 static int vtblk_config_change(device_t);
127 static int vtblk_open(struct disk *);
128 static int vtblk_close(struct disk *);
129 static int vtblk_ioctl(struct disk *, u_long, void *, int,
131 static int vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
132 static void vtblk_strategy(struct bio *);
134 static void vtblk_negotiate_features(struct vtblk_softc *);
135 static int vtblk_maximum_segments(struct vtblk_softc *,
136 struct virtio_blk_config *);
137 static int vtblk_alloc_virtqueue(struct vtblk_softc *);
138 static void vtblk_resize_disk(struct vtblk_softc *, uint64_t);
139 static void vtblk_set_write_cache(struct vtblk_softc *, int);
140 static int vtblk_write_cache_enabled(struct vtblk_softc *sc,
141 struct virtio_blk_config *);
142 static int vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
143 static void vtblk_alloc_disk(struct vtblk_softc *,
144 struct virtio_blk_config *);
145 static void vtblk_create_disk(struct vtblk_softc *);
147 static int vtblk_quiesce(struct vtblk_softc *);
148 static void vtblk_startio(struct vtblk_softc *);
149 static struct vtblk_request * vtblk_bio_request(struct vtblk_softc *);
150 static int vtblk_execute_request(struct vtblk_softc *,
151 struct vtblk_request *);
153 static void vtblk_vq_intr(void *);
155 static void vtblk_stop(struct vtblk_softc *);
157 static void vtblk_read_config(struct vtblk_softc *,
158 struct virtio_blk_config *);
159 static void vtblk_get_ident(struct vtblk_softc *);
160 static void vtblk_prepare_dump(struct vtblk_softc *);
161 static int vtblk_write_dump(struct vtblk_softc *, void *, off_t, size_t);
162 static int vtblk_flush_dump(struct vtblk_softc *);
163 static int vtblk_poll_request(struct vtblk_softc *,
164 struct vtblk_request *);
166 static void vtblk_finish_completed(struct vtblk_softc *);
167 static void vtblk_drain_vq(struct vtblk_softc *, int);
168 static void vtblk_drain(struct vtblk_softc *);
170 static int vtblk_alloc_requests(struct vtblk_softc *);
171 static void vtblk_free_requests(struct vtblk_softc *);
172 static struct vtblk_request * vtblk_dequeue_request(struct vtblk_softc *);
173 static void vtblk_enqueue_request(struct vtblk_softc *,
174 struct vtblk_request *);
176 static struct vtblk_request * vtblk_dequeue_ready(struct vtblk_softc *);
177 static void vtblk_enqueue_ready(struct vtblk_softc *,
178 struct vtblk_request *);
180 static int vtblk_request_error(struct vtblk_request *);
181 static void vtblk_finish_bio(struct bio *, int);
183 static void vtblk_setup_sysctl(struct vtblk_softc *);
184 static int vtblk_tunable_int(struct vtblk_softc *, const char *, int);
187 static int vtblk_no_ident = 0;
188 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
189 static int vtblk_writecache_mode = -1;
190 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
192 /* Features desired/implemented by this driver. */
193 #define VTBLK_FEATURES \
194 (VIRTIO_BLK_F_BARRIER | \
195 VIRTIO_BLK_F_SIZE_MAX | \
196 VIRTIO_BLK_F_SEG_MAX | \
197 VIRTIO_BLK_F_GEOMETRY | \
199 VIRTIO_BLK_F_BLK_SIZE | \
201 VIRTIO_BLK_F_CONFIG_WCE | \
202 VIRTIO_RING_F_INDIRECT_DESC)
204 #define VTBLK_MTX(_sc) &(_sc)->vtblk_mtx
205 #define VTBLK_LOCK_INIT(_sc, _name) \
206 mtx_init(VTBLK_MTX((_sc)), (_name), \
207 "VirtIO Block Lock", MTX_DEF)
208 #define VTBLK_LOCK(_sc) mtx_lock(VTBLK_MTX((_sc)))
209 #define VTBLK_UNLOCK(_sc) mtx_unlock(VTBLK_MTX((_sc)))
210 #define VTBLK_LOCK_DESTROY(_sc) mtx_destroy(VTBLK_MTX((_sc)))
211 #define VTBLK_LOCK_ASSERT(_sc) mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
212 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
213 mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
215 #define VTBLK_DISK_NAME "vtbd"
216 #define VTBLK_QUIESCE_TIMEOUT (30 * hz)
219 * Each block request uses at least two segments - one for the header
220 * and one for the status.
222 #define VTBLK_MIN_SEGMENTS 2
224 static device_method_t vtblk_methods[] = {
225 /* Device methods. */
226 DEVMETHOD(device_probe, vtblk_probe),
227 DEVMETHOD(device_attach, vtblk_attach),
228 DEVMETHOD(device_detach, vtblk_detach),
229 DEVMETHOD(device_suspend, vtblk_suspend),
230 DEVMETHOD(device_resume, vtblk_resume),
231 DEVMETHOD(device_shutdown, vtblk_shutdown),
233 /* VirtIO methods. */
234 DEVMETHOD(virtio_config_change, vtblk_config_change),
239 static driver_t vtblk_driver = {
242 sizeof(struct vtblk_softc)
244 static devclass_t vtblk_devclass;
246 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
248 MODULE_VERSION(virtio_blk, 1);
249 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
252 vtblk_modevent(module_t mod, int type, void *unused)
273 vtblk_probe(device_t dev)
276 if (virtio_get_device_type(dev) != VIRTIO_ID_BLOCK)
279 device_set_desc(dev, "VirtIO Block Adapter");
281 return (BUS_PROBE_DEFAULT);
285 vtblk_attach(device_t dev)
287 struct vtblk_softc *sc;
288 struct virtio_blk_config blkcfg;
291 sc = device_get_softc(dev);
294 VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
296 bioq_init(&sc->vtblk_bioq);
297 TAILQ_INIT(&sc->vtblk_req_free);
298 TAILQ_INIT(&sc->vtblk_req_ready);
300 virtio_set_feature_desc(dev, vtblk_feature_desc);
301 vtblk_negotiate_features(sc);
303 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
304 sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
305 if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
306 sc->vtblk_flags |= VTBLK_FLAG_READONLY;
307 if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
308 sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
309 if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
310 sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
312 vtblk_setup_sysctl(sc);
314 /* Get local copy of config. */
315 vtblk_read_config(sc, &blkcfg);
318 * With the current sglist(9) implementation, it is not easy
319 * for us to support a maximum segment size as adjacent
320 * segments are coalesced. For now, just make sure it's larger
321 * than the maximum supported transfer size.
323 if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
324 if (blkcfg.size_max < MAXPHYS) {
326 device_printf(dev, "host requires unsupported "
327 "maximum segment size feature\n");
332 sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
333 if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
335 device_printf(dev, "fewer than minimum number of segments "
336 "allowed: %d\n", sc->vtblk_max_nsegs);
340 sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
341 if (sc->vtblk_sglist == NULL) {
343 device_printf(dev, "cannot allocate sglist\n");
347 error = vtblk_alloc_virtqueue(sc);
349 device_printf(dev, "cannot allocate virtqueue\n");
353 error = vtblk_alloc_requests(sc);
355 device_printf(dev, "cannot preallocate requests\n");
359 vtblk_alloc_disk(sc, &blkcfg);
361 error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
363 device_printf(dev, "cannot setup virtqueue interrupt\n");
367 vtblk_create_disk(sc);
369 virtqueue_enable_intr(sc->vtblk_vq);
379 vtblk_detach(device_t dev)
381 struct vtblk_softc *sc;
383 sc = device_get_softc(dev);
386 sc->vtblk_flags |= VTBLK_FLAG_DETACH;
387 if (device_is_attached(dev))
393 if (sc->vtblk_disk != NULL) {
394 disk_destroy(sc->vtblk_disk);
395 sc->vtblk_disk = NULL;
398 if (sc->vtblk_sglist != NULL) {
399 sglist_free(sc->vtblk_sglist);
400 sc->vtblk_sglist = NULL;
403 VTBLK_LOCK_DESTROY(sc);
409 vtblk_suspend(device_t dev)
411 struct vtblk_softc *sc;
414 sc = device_get_softc(dev);
417 sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
418 /* XXX BMV: virtio_stop(), etc needed here? */
419 error = vtblk_quiesce(sc);
421 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
428 vtblk_resume(device_t dev)
430 struct vtblk_softc *sc;
432 sc = device_get_softc(dev);
435 /* XXX BMV: virtio_reinit(), etc needed here? */
436 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
444 vtblk_shutdown(device_t dev)
451 vtblk_config_change(device_t dev)
453 struct vtblk_softc *sc;
454 struct virtio_blk_config blkcfg;
457 sc = device_get_softc(dev);
459 vtblk_read_config(sc, &blkcfg);
461 /* Capacity is always in 512-byte units. */
462 capacity = blkcfg.capacity * 512;
464 if (sc->vtblk_disk->d_mediasize != capacity)
465 vtblk_resize_disk(sc, capacity);
471 vtblk_open(struct disk *dp)
473 struct vtblk_softc *sc;
475 if ((sc = dp->d_drv1) == NULL)
478 return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
482 vtblk_close(struct disk *dp)
484 struct vtblk_softc *sc;
486 if ((sc = dp->d_drv1) == NULL)
493 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
496 struct vtblk_softc *sc;
498 if ((sc = dp->d_drv1) == NULL)
505 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
509 struct vtblk_softc *sc;
514 if ((sc = dp->d_drv1) == NULL)
519 if ((sc->vtblk_flags & VTBLK_FLAG_DUMPING) == 0) {
520 vtblk_prepare_dump(sc);
521 sc->vtblk_flags |= VTBLK_FLAG_DUMPING;
525 error = vtblk_write_dump(sc, virtual, offset, length);
526 else if (virtual == NULL && offset == 0)
527 error = vtblk_flush_dump(sc);
530 sc->vtblk_flags &= ~VTBLK_FLAG_DUMPING;
539 vtblk_strategy(struct bio *bp)
541 struct vtblk_softc *sc;
543 if ((sc = bp->bio_disk->d_drv1) == NULL) {
544 vtblk_finish_bio(bp, EINVAL);
549 * Fail any write if RO. Unfortunately, there does not seem to
550 * be a better way to report our readonly'ness to GEOM above.
552 if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
553 (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
554 vtblk_finish_bio(bp, EROFS);
560 * Prevent read/write buffers spanning too many segments from
561 * getting into the queue. This should only trip if d_maxsize
562 * was incorrectly set.
564 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
565 int nsegs, max_nsegs;
567 nsegs = sglist_count(bp->bio_data, bp->bio_bcount);
568 max_nsegs = sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS;
570 KASSERT(nsegs <= max_nsegs,
571 ("%s: bio %p spanned too many segments: %d, max: %d",
572 __func__, bp, nsegs, max_nsegs));
577 if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
578 vtblk_finish_bio(bp, ENXIO);
580 bioq_disksort(&sc->vtblk_bioq, bp);
582 if ((sc->vtblk_flags & VTBLK_FLAG_SUSPEND) == 0)
589 vtblk_negotiate_features(struct vtblk_softc *sc)
595 features = VTBLK_FEATURES;
597 sc->vtblk_features = virtio_negotiate_features(dev, features);
601 vtblk_maximum_segments(struct vtblk_softc *sc,
602 struct virtio_blk_config *blkcfg)
608 nsegs = VTBLK_MIN_SEGMENTS;
610 if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
611 nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
612 if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
613 nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
621 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
624 struct vq_alloc_info vq_info;
628 VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
629 vtblk_vq_intr, sc, &sc->vtblk_vq,
630 "%s request", device_get_nameunit(dev));
632 return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
636 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
645 dp->d_mediasize = new_capacity;
647 device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
648 (uintmax_t) dp->d_mediasize >> 20,
649 (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
653 error = disk_resize(dp, M_NOWAIT);
656 "disk_resize(9) failed, error: %d\n", error);
661 vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
664 /* Set either writeback (1) or writethrough (0) mode. */
665 virtio_write_dev_config_1(sc->vtblk_dev,
666 offsetof(struct virtio_blk_config, writeback), wc);
670 vtblk_write_cache_enabled(struct vtblk_softc *sc,
671 struct virtio_blk_config *blkcfg)
675 if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
676 wc = vtblk_tunable_int(sc, "writecache_mode",
677 vtblk_writecache_mode);
678 if (wc >= 0 && wc < VTBLK_CACHE_MAX)
679 vtblk_set_write_cache(sc, wc);
681 wc = blkcfg->writeback;
683 wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
689 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
691 struct vtblk_softc *sc;
695 wc = sc->vtblk_write_cache;
697 error = sysctl_handle_int(oidp, &wc, 0, req);
698 if (error || req->newptr == NULL)
700 if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
702 if (wc < 0 || wc >= VTBLK_CACHE_MAX)
706 sc->vtblk_write_cache = wc;
707 vtblk_set_write_cache(sc, sc->vtblk_write_cache);
714 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
721 sc->vtblk_disk = dp = disk_alloc();
722 dp->d_open = vtblk_open;
723 dp->d_close = vtblk_close;
724 dp->d_ioctl = vtblk_ioctl;
725 dp->d_strategy = vtblk_strategy;
726 dp->d_name = VTBLK_DISK_NAME;
727 dp->d_unit = device_get_unit(dev);
729 dp->d_flags = DISKFLAG_CANFLUSHCACHE;
730 dp->d_hba_vendor = virtio_get_vendor(dev);
731 dp->d_hba_device = virtio_get_device(dev);
732 dp->d_hba_subvendor = virtio_get_subvendor(dev);
733 dp->d_hba_subdevice = virtio_get_subdevice(dev);
735 if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
736 dp->d_dump = vtblk_dump;
738 /* Capacity is always in 512-byte units. */
739 dp->d_mediasize = blkcfg->capacity * 512;
741 if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
742 dp->d_sectorsize = blkcfg->blk_size;
744 dp->d_sectorsize = 512;
747 * The VirtIO maximum I/O size is given in terms of segments.
748 * However, FreeBSD limits I/O size by logical buffer size, not
749 * by physically contiguous pages. Therefore, we have to assume
750 * no pages are contiguous. This may impose an artificially low
751 * maximum I/O size. But in practice, since QEMU advertises 128
752 * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
753 * which is typically greater than MAXPHYS. Eventually we should
754 * just advertise MAXPHYS and split buffers that are too big.
756 * Note we must subtract one additional segment in case of non
757 * page aligned buffers.
759 dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
761 if (dp->d_maxsize < PAGE_SIZE)
762 dp->d_maxsize = PAGE_SIZE; /* XXX */
764 if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
765 dp->d_fwsectors = blkcfg->geometry.sectors;
766 dp->d_fwheads = blkcfg->geometry.heads;
769 if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY)) {
770 dp->d_stripesize = dp->d_sectorsize *
771 (1 << blkcfg->topology.physical_block_exp);
772 dp->d_stripeoffset = (dp->d_stripesize -
773 blkcfg->topology.alignment_offset * dp->d_sectorsize) %
777 if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
778 sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
780 sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
784 vtblk_create_disk(struct vtblk_softc *sc)
791 * Retrieving the identification string must be done after
792 * the virtqueue interrupt is setup otherwise it will hang.
796 device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
797 (uintmax_t) dp->d_mediasize >> 20,
798 (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
801 disk_create(dp, DISK_VERSION);
805 vtblk_quiesce(struct vtblk_softc *sc)
811 VTBLK_LOCK_ASSERT(sc);
813 while (!virtqueue_empty(sc->vtblk_vq)) {
814 if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
815 VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
825 vtblk_startio(struct vtblk_softc *sc)
827 struct virtqueue *vq;
828 struct vtblk_request *req;
834 VTBLK_LOCK_ASSERT(sc);
836 while (!virtqueue_full(vq)) {
837 if ((req = vtblk_dequeue_ready(sc)) == NULL)
838 req = vtblk_bio_request(sc);
842 if (vtblk_execute_request(sc, req) != 0) {
843 vtblk_enqueue_ready(sc, req);
851 virtqueue_notify(vq);
854 static struct vtblk_request *
855 vtblk_bio_request(struct vtblk_softc *sc)
857 struct bio_queue_head *bioq;
858 struct vtblk_request *req;
861 bioq = &sc->vtblk_bioq;
863 if (bioq_first(bioq) == NULL)
866 req = vtblk_dequeue_request(sc);
870 bp = bioq_takefirst(bioq);
873 req->vbr_hdr.ioprio = 1;
875 switch (bp->bio_cmd) {
877 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
880 req->vbr_hdr.type = VIRTIO_BLK_T_IN;
881 req->vbr_hdr.sector = bp->bio_offset / 512;
884 req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
885 req->vbr_hdr.sector = bp->bio_offset / 512;
888 panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
895 vtblk_execute_request(struct vtblk_softc *sc, struct vtblk_request *req)
897 struct virtqueue *vq;
900 int ordered, readable, writable, error;
903 sg = sc->vtblk_sglist;
908 VTBLK_LOCK_ASSERT(sc);
911 * Wait until the ordered request completes before
912 * executing subsequent requests.
914 if (sc->vtblk_req_ordered != NULL)
917 if (bp->bio_flags & BIO_ORDERED) {
918 if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
920 * This request will be executed once all
921 * the in-flight requests are completed.
923 if (!virtqueue_empty(vq))
927 req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
931 sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
933 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
934 error = sglist_append(sg, bp->bio_data, bp->bio_bcount);
935 if (error || sg->sg_nseg == sg->sg_maxseg)
936 panic("%s: data buffer too big bio:%p error:%d",
937 __func__, bp, error);
939 /* BIO_READ means the host writes into our buffer. */
940 if (bp->bio_cmd == BIO_READ)
941 writable = sg->sg_nseg - 1;
945 sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
946 readable = sg->sg_nseg - writable;
948 error = virtqueue_enqueue(vq, req, sg, readable, writable);
949 if (error == 0 && ordered)
950 sc->vtblk_req_ordered = req;
956 vtblk_vq_intr(void *xsc)
958 struct vtblk_softc *sc;
959 struct virtqueue *vq;
966 if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
971 vtblk_finish_completed(sc);
973 if ((sc->vtblk_flags & VTBLK_FLAG_SUSPEND) == 0)
976 wakeup(&sc->vtblk_vq);
978 if (virtqueue_enable_intr(vq) != 0) {
979 virtqueue_disable_intr(vq);
988 vtblk_stop(struct vtblk_softc *sc)
991 virtqueue_disable_intr(sc->vtblk_vq);
992 virtio_stop(sc->vtblk_dev);
995 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg) \
996 if (virtio_with_feature(_dev, _feature)) { \
997 virtio_read_device_config(_dev, \
998 offsetof(struct virtio_blk_config, _field), \
999 &(_cfg)->_field, sizeof((_cfg)->_field)); \
1003 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
1007 dev = sc->vtblk_dev;
1009 bzero(blkcfg, sizeof(struct virtio_blk_config));
1011 /* The capacity is always available. */
1012 virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
1013 capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
1015 /* Read the configuration if the feature was negotiated. */
1016 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1017 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1018 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1019 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1020 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1021 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
1024 #undef VTBLK_GET_CONFIG
1027 vtblk_get_ident(struct vtblk_softc *sc)
1031 struct vtblk_request *req;
1034 dp = sc->vtblk_disk;
1035 len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
1037 if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
1040 req = vtblk_dequeue_request(sc);
1045 req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1046 req->vbr_hdr.ioprio = 1;
1047 req->vbr_hdr.sector = 0;
1050 bzero(&buf, sizeof(struct bio));
1052 buf.bio_cmd = BIO_READ;
1053 buf.bio_data = dp->d_ident;
1054 buf.bio_bcount = len;
1057 error = vtblk_poll_request(sc, req);
1060 vtblk_enqueue_request(sc, req);
1063 device_printf(sc->vtblk_dev,
1064 "error getting device identifier: %d\n", error);
1069 vtblk_prepare_dump(struct vtblk_softc *sc)
1072 struct virtqueue *vq;
1074 dev = sc->vtblk_dev;
1080 * Drain all requests caught in-flight in the virtqueue,
1081 * skipping biodone(). When dumping, only one request is
1082 * outstanding at a time, and we just poll the virtqueue
1085 vtblk_drain_vq(sc, 1);
1087 if (virtio_reinit(dev, sc->vtblk_features) != 0) {
1088 panic("%s: cannot reinit VirtIO block device during dump",
1089 device_get_nameunit(dev));
1092 virtqueue_disable_intr(vq);
1093 virtio_reinit_complete(dev);
1097 vtblk_write_dump(struct vtblk_softc *sc, void *virtual, off_t offset,
1101 struct vtblk_request *req;
1103 req = &sc->vtblk_dump_request;
1105 req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1106 req->vbr_hdr.ioprio = 1;
1107 req->vbr_hdr.sector = offset / 512;
1110 bzero(&buf, sizeof(struct bio));
1112 buf.bio_cmd = BIO_WRITE;
1113 buf.bio_data = virtual;
1114 buf.bio_bcount = length;
1116 return (vtblk_poll_request(sc, req));
1120 vtblk_flush_dump(struct vtblk_softc *sc)
1123 struct vtblk_request *req;
1125 req = &sc->vtblk_dump_request;
1127 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1128 req->vbr_hdr.ioprio = 1;
1129 req->vbr_hdr.sector = 0;
1132 bzero(&buf, sizeof(struct bio));
1134 buf.bio_cmd = BIO_FLUSH;
1136 return (vtblk_poll_request(sc, req));
1140 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
1142 struct virtqueue *vq;
1147 if (!virtqueue_empty(vq))
1150 error = vtblk_execute_request(sc, req);
1154 virtqueue_notify(vq);
1155 virtqueue_poll(vq, NULL);
1157 error = vtblk_request_error(req);
1158 if (error && bootverbose) {
1159 device_printf(sc->vtblk_dev,
1160 "%s: IO error: %d\n", __func__, error);
1167 vtblk_finish_completed(struct vtblk_softc *sc)
1169 struct vtblk_request *req;
1173 while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
1176 if (sc->vtblk_req_ordered != NULL) {
1177 /* This should be the only outstanding request. */
1178 MPASS(sc->vtblk_req_ordered == req);
1179 sc->vtblk_req_ordered = NULL;
1182 error = vtblk_request_error(req);
1184 disk_err(bp, "hard error", -1, 1);
1186 vtblk_finish_bio(bp, error);
1187 vtblk_enqueue_request(sc, req);
1192 vtblk_drain_vq(struct vtblk_softc *sc, int skip_done)
1194 struct virtqueue *vq;
1195 struct vtblk_request *req;
1201 while ((req = virtqueue_drain(vq, &last)) != NULL) {
1203 vtblk_finish_bio(req->vbr_bp, ENXIO);
1205 vtblk_enqueue_request(sc, req);
1208 sc->vtblk_req_ordered = NULL;
1209 KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1213 vtblk_drain(struct vtblk_softc *sc)
1215 struct bio_queue_head *bioq;
1216 struct vtblk_request *req;
1219 bioq = &sc->vtblk_bioq;
1221 if (sc->vtblk_vq != NULL) {
1222 vtblk_finish_completed(sc);
1223 vtblk_drain_vq(sc, 0);
1226 while ((req = vtblk_dequeue_ready(sc)) != NULL) {
1227 vtblk_finish_bio(req->vbr_bp, ENXIO);
1228 vtblk_enqueue_request(sc, req);
1231 while (bioq_first(bioq) != NULL) {
1232 bp = bioq_takefirst(bioq);
1233 vtblk_finish_bio(bp, ENXIO);
1236 vtblk_free_requests(sc);
1241 vtblk_request_invariants(struct vtblk_request *req)
1243 int hdr_nsegs, ack_nsegs;
1245 hdr_nsegs = sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr));
1246 ack_nsegs = sglist_count(&req->vbr_ack, sizeof(req->vbr_ack));
1248 KASSERT(hdr_nsegs == 1, ("request header crossed page boundary"));
1249 KASSERT(ack_nsegs == 1, ("request ack crossed page boundary"));
1254 vtblk_alloc_requests(struct vtblk_softc *sc)
1256 struct vtblk_request *req;
1259 nreqs = virtqueue_size(sc->vtblk_vq);
1262 * Preallocate sufficient requests to keep the virtqueue full. Each
1263 * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
1264 * the number allocated when indirect descriptors are not available.
1266 if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
1267 nreqs /= VTBLK_MIN_SEGMENTS;
1269 for (i = 0; i < nreqs; i++) {
1270 req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
1275 vtblk_request_invariants(req);
1278 sc->vtblk_request_count++;
1279 vtblk_enqueue_request(sc, req);
1286 vtblk_free_requests(struct vtblk_softc *sc)
1288 struct vtblk_request *req;
1290 KASSERT(TAILQ_EMPTY(&sc->vtblk_req_ready),
1291 ("%s: ready requests left on queue", __func__));
1293 while ((req = vtblk_dequeue_request(sc)) != NULL) {
1294 sc->vtblk_request_count--;
1295 free(req, M_DEVBUF);
1298 KASSERT(sc->vtblk_request_count == 0,
1299 ("%s: leaked %d requests", __func__, sc->vtblk_request_count));
1302 static struct vtblk_request *
1303 vtblk_dequeue_request(struct vtblk_softc *sc)
1305 struct vtblk_request *req;
1307 req = TAILQ_FIRST(&sc->vtblk_req_free);
1309 TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
1315 vtblk_enqueue_request(struct vtblk_softc *sc, struct vtblk_request *req)
1318 bzero(req, sizeof(struct vtblk_request));
1319 TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
1322 static struct vtblk_request *
1323 vtblk_dequeue_ready(struct vtblk_softc *sc)
1325 struct vtblk_request *req;
1327 req = TAILQ_FIRST(&sc->vtblk_req_ready);
1329 TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
1335 vtblk_enqueue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
1338 TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
1342 vtblk_request_error(struct vtblk_request *req)
1346 switch (req->vbr_ack) {
1347 case VIRTIO_BLK_S_OK:
1350 case VIRTIO_BLK_S_UNSUPP:
1362 vtblk_finish_bio(struct bio *bp, int error)
1366 bp->bio_resid = bp->bio_bcount;
1367 bp->bio_error = error;
1368 bp->bio_flags |= BIO_ERROR;
1375 vtblk_setup_sysctl(struct vtblk_softc *sc)
1378 struct sysctl_ctx_list *ctx;
1379 struct sysctl_oid *tree;
1380 struct sysctl_oid_list *child;
1382 dev = sc->vtblk_dev;
1383 ctx = device_get_sysctl_ctx(dev);
1384 tree = device_get_sysctl_tree(dev);
1385 child = SYSCTL_CHILDREN(tree);
1387 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
1388 CTLTYPE_INT | CTLFLAG_RW, sc, 0, vtblk_write_cache_sysctl,
1389 "I", "Write cache mode (writethrough (0) or writeback (1))");
1393 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
1397 snprintf(path, sizeof(path),
1398 "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
1399 TUNABLE_INT_FETCH(path, &def);