]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/virtio/block/virtio_blk.c
virtio_blk: Add modern (V1) support
[FreeBSD/FreeBSD.git] / sys / dev / virtio / block / virtio_blk.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 /* Driver for VirtIO block devices. */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bio.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/sglist.h>
41 #include <sys/sysctl.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45
46 #include <geom/geom.h>
47 #include <geom/geom_disk.h>
48
49 #include <machine/bus.h>
50 #include <machine/resource.h>
51 #include <sys/bus.h>
52 #include <sys/rman.h>
53
54 #include <dev/virtio/virtio.h>
55 #include <dev/virtio/virtqueue.h>
56 #include <dev/virtio/block/virtio_blk.h>
57
58 #include "virtio_if.h"
59
60 struct vtblk_request {
61         struct virtio_blk_outhdr         vbr_hdr;
62         struct bio                      *vbr_bp;
63         uint8_t                          vbr_ack;
64         TAILQ_ENTRY(vtblk_request)       vbr_link;
65 };
66
67 enum vtblk_cache_mode {
68         VTBLK_CACHE_WRITETHROUGH,
69         VTBLK_CACHE_WRITEBACK,
70         VTBLK_CACHE_MAX
71 };
72
73 struct vtblk_softc {
74         device_t                 vtblk_dev;
75         struct mtx               vtblk_mtx;
76         uint64_t                 vtblk_features;
77         uint32_t                 vtblk_flags;
78 #define VTBLK_FLAG_INDIRECT     0x0001
79 #define VTBLK_FLAG_READONLY     0x0002
80 #define VTBLK_FLAG_DETACH       0x0004
81 #define VTBLK_FLAG_SUSPEND      0x0008
82 #define VTBLK_FLAG_BARRIER      0x0010
83 #define VTBLK_FLAG_WCE_CONFIG   0x0020
84
85         struct virtqueue        *vtblk_vq;
86         struct sglist           *vtblk_sglist;
87         struct disk             *vtblk_disk;
88
89         struct bio_queue_head    vtblk_bioq;
90         TAILQ_HEAD(, vtblk_request)
91                                  vtblk_req_free;
92         TAILQ_HEAD(, vtblk_request)
93                                  vtblk_req_ready;
94         struct vtblk_request    *vtblk_req_ordered;
95
96         int                      vtblk_max_nsegs;
97         int                      vtblk_request_count;
98         enum vtblk_cache_mode    vtblk_write_cache;
99
100         struct bio_queue         vtblk_dump_queue;
101         struct vtblk_request     vtblk_dump_request;
102 };
103
104 static struct virtio_feature_desc vtblk_feature_desc[] = {
105         { VIRTIO_BLK_F_BARRIER,         "HostBarrier"   },
106         { VIRTIO_BLK_F_SIZE_MAX,        "MaxSegSize"    },
107         { VIRTIO_BLK_F_SEG_MAX,         "MaxNumSegs"    },
108         { VIRTIO_BLK_F_GEOMETRY,        "DiskGeometry"  },
109         { VIRTIO_BLK_F_RO,              "ReadOnly"      },
110         { VIRTIO_BLK_F_BLK_SIZE,        "BlockSize"     },
111         { VIRTIO_BLK_F_SCSI,            "SCSICmds"      },
112         { VIRTIO_BLK_F_FLUSH,           "FlushCmd"      },
113         { VIRTIO_BLK_F_TOPOLOGY,        "Topology"      },
114         { VIRTIO_BLK_F_CONFIG_WCE,      "ConfigWCE"     },
115         { VIRTIO_BLK_F_MQ,              "Multiqueue"    },
116         { VIRTIO_BLK_F_DISCARD,         "Discard"       },
117         { VIRTIO_BLK_F_WRITE_ZEROES,    "WriteZeros"    },
118
119         { 0, NULL }
120 };
121
122 static int      vtblk_modevent(module_t, int, void *);
123
124 static int      vtblk_probe(device_t);
125 static int      vtblk_attach(device_t);
126 static int      vtblk_detach(device_t);
127 static int      vtblk_suspend(device_t);
128 static int      vtblk_resume(device_t);
129 static int      vtblk_shutdown(device_t);
130 static int      vtblk_config_change(device_t);
131
132 static int      vtblk_open(struct disk *);
133 static int      vtblk_close(struct disk *);
134 static int      vtblk_ioctl(struct disk *, u_long, void *, int,
135                     struct thread *);
136 static int      vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
137 static void     vtblk_strategy(struct bio *);
138
139 static void     vtblk_negotiate_features(struct vtblk_softc *);
140 static void     vtblk_setup_features(struct vtblk_softc *);
141 static int      vtblk_maximum_segments(struct vtblk_softc *,
142                     struct virtio_blk_config *);
143 static int      vtblk_alloc_virtqueue(struct vtblk_softc *);
144 static void     vtblk_resize_disk(struct vtblk_softc *, uint64_t);
145 static void     vtblk_alloc_disk(struct vtblk_softc *,
146                     struct virtio_blk_config *);
147 static void     vtblk_create_disk(struct vtblk_softc *);
148
149 static int      vtblk_request_prealloc(struct vtblk_softc *);
150 static void     vtblk_request_free(struct vtblk_softc *);
151 static struct vtblk_request *
152                 vtblk_request_dequeue(struct vtblk_softc *);
153 static void     vtblk_request_enqueue(struct vtblk_softc *,
154                     struct vtblk_request *);
155 static struct vtblk_request *
156                 vtblk_request_next_ready(struct vtblk_softc *);
157 static void     vtblk_request_requeue_ready(struct vtblk_softc *,
158                     struct vtblk_request *);
159 static struct vtblk_request *
160                 vtblk_request_next(struct vtblk_softc *);
161 static struct vtblk_request *
162                 vtblk_request_bio(struct vtblk_softc *);
163 static int      vtblk_request_execute(struct vtblk_softc *,
164                     struct vtblk_request *);
165 static int      vtblk_request_error(struct vtblk_request *);
166
167 static void     vtblk_queue_completed(struct vtblk_softc *,
168                     struct bio_queue *);
169 static void     vtblk_done_completed(struct vtblk_softc *,
170                     struct bio_queue *);
171 static void     vtblk_drain_vq(struct vtblk_softc *);
172 static void     vtblk_drain(struct vtblk_softc *);
173
174 static void     vtblk_startio(struct vtblk_softc *);
175 static void     vtblk_bio_done(struct vtblk_softc *, struct bio *, int);
176
177 static void     vtblk_read_config(struct vtblk_softc *,
178                     struct virtio_blk_config *);
179 static void     vtblk_ident(struct vtblk_softc *);
180 static int      vtblk_poll_request(struct vtblk_softc *,
181                     struct vtblk_request *);
182 static int      vtblk_quiesce(struct vtblk_softc *);
183 static void     vtblk_vq_intr(void *);
184 static void     vtblk_stop(struct vtblk_softc *);
185
186 static void     vtblk_dump_quiesce(struct vtblk_softc *);
187 static int      vtblk_dump_write(struct vtblk_softc *, void *, off_t, size_t);
188 static int      vtblk_dump_flush(struct vtblk_softc *);
189 static void     vtblk_dump_complete(struct vtblk_softc *);
190
191 static void     vtblk_set_write_cache(struct vtblk_softc *, int);
192 static int      vtblk_write_cache_enabled(struct vtblk_softc *sc,
193                     struct virtio_blk_config *);
194 static int      vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
195
196 static void     vtblk_setup_sysctl(struct vtblk_softc *);
197 static int      vtblk_tunable_int(struct vtblk_softc *, const char *, int);
198
199 #define vtblk_modern(_sc) (((_sc)->vtblk_features & VIRTIO_F_VERSION_1) != 0)
200 #define vtblk_htog16(_sc, _val) virtio_htog16(vtblk_modern(_sc), _val)
201 #define vtblk_htog32(_sc, _val) virtio_htog32(vtblk_modern(_sc), _val)
202 #define vtblk_htog64(_sc, _val) virtio_htog64(vtblk_modern(_sc), _val)
203 #define vtblk_gtoh16(_sc, _val) virtio_gtoh16(vtblk_modern(_sc), _val)
204 #define vtblk_gtoh32(_sc, _val) virtio_gtoh32(vtblk_modern(_sc), _val)
205 #define vtblk_gtoh64(_sc, _val) virtio_gtoh64(vtblk_modern(_sc), _val)
206
207 /* Tunables. */
208 static int vtblk_no_ident = 0;
209 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
210 static int vtblk_writecache_mode = -1;
211 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
212
213 #define VTBLK_COMMON_FEATURES \
214     (VIRTIO_BLK_F_SIZE_MAX              | \
215      VIRTIO_BLK_F_SEG_MAX               | \
216      VIRTIO_BLK_F_GEOMETRY              | \
217      VIRTIO_BLK_F_RO                    | \
218      VIRTIO_BLK_F_BLK_SIZE              | \
219      VIRTIO_BLK_F_FLUSH                 | \
220      VIRTIO_BLK_F_TOPOLOGY              | \
221      VIRTIO_BLK_F_CONFIG_WCE            | \
222      VIRTIO_BLK_F_DISCARD               | \
223      VIRTIO_RING_F_INDIRECT_DESC)
224
225 #define VTBLK_MODERN_FEATURES   (VTBLK_COMMON_FEATURES)
226 #define VTBLK_LEGACY_FEATURES   (VIRTIO_BLK_F_BARRIER | VTBLK_COMMON_FEATURES)
227
228 #define VTBLK_MTX(_sc)          &(_sc)->vtblk_mtx
229 #define VTBLK_LOCK_INIT(_sc, _name) \
230                                 mtx_init(VTBLK_MTX((_sc)), (_name), \
231                                     "VirtIO Block Lock", MTX_DEF)
232 #define VTBLK_LOCK(_sc)         mtx_lock(VTBLK_MTX((_sc)))
233 #define VTBLK_UNLOCK(_sc)       mtx_unlock(VTBLK_MTX((_sc)))
234 #define VTBLK_LOCK_DESTROY(_sc) mtx_destroy(VTBLK_MTX((_sc)))
235 #define VTBLK_LOCK_ASSERT(_sc)  mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
236 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
237                                 mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
238
239 #define VTBLK_DISK_NAME         "vtbd"
240 #define VTBLK_QUIESCE_TIMEOUT   (30 * hz)
241 #define VTBLK_BSIZE             512
242
243 /*
244  * Each block request uses at least two segments - one for the header
245  * and one for the status.
246  */
247 #define VTBLK_MIN_SEGMENTS      2
248
249 static device_method_t vtblk_methods[] = {
250         /* Device methods. */
251         DEVMETHOD(device_probe,         vtblk_probe),
252         DEVMETHOD(device_attach,        vtblk_attach),
253         DEVMETHOD(device_detach,        vtblk_detach),
254         DEVMETHOD(device_suspend,       vtblk_suspend),
255         DEVMETHOD(device_resume,        vtblk_resume),
256         DEVMETHOD(device_shutdown,      vtblk_shutdown),
257
258         /* VirtIO methods. */
259         DEVMETHOD(virtio_config_change, vtblk_config_change),
260
261         DEVMETHOD_END
262 };
263
264 static driver_t vtblk_driver = {
265         "vtblk",
266         vtblk_methods,
267         sizeof(struct vtblk_softc)
268 };
269 static devclass_t vtblk_devclass;
270
271 DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
272     vtblk_modevent, 0);
273 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
274     vtblk_modevent, 0);
275 MODULE_VERSION(virtio_blk, 1);
276 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
277
278 VIRTIO_SIMPLE_PNPTABLE(virtio_blk, VIRTIO_ID_BLOCK, "VirtIO Block Adapter");
279 VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_blk);
280 VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_blk);
281
282 static int
283 vtblk_modevent(module_t mod, int type, void *unused)
284 {
285         int error;
286
287         error = 0;
288
289         switch (type) {
290         case MOD_LOAD:
291         case MOD_QUIESCE:
292         case MOD_UNLOAD:
293         case MOD_SHUTDOWN:
294                 break;
295         default:
296                 error = EOPNOTSUPP;
297                 break;
298         }
299
300         return (error);
301 }
302
303 static int
304 vtblk_probe(device_t dev)
305 {
306         return (VIRTIO_SIMPLE_PROBE(dev, virtio_blk));
307 }
308
309 static int
310 vtblk_attach(device_t dev)
311 {
312         struct vtblk_softc *sc;
313         struct virtio_blk_config blkcfg;
314         int error;
315
316         virtio_set_feature_desc(dev, vtblk_feature_desc);
317
318         sc = device_get_softc(dev);
319         sc->vtblk_dev = dev;
320         VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
321         bioq_init(&sc->vtblk_bioq);
322         TAILQ_INIT(&sc->vtblk_dump_queue);
323         TAILQ_INIT(&sc->vtblk_req_free);
324         TAILQ_INIT(&sc->vtblk_req_ready);
325
326         vtblk_setup_sysctl(sc);
327         vtblk_setup_features(sc);
328
329         vtblk_read_config(sc, &blkcfg);
330
331         /*
332          * With the current sglist(9) implementation, it is not easy
333          * for us to support a maximum segment size as adjacent
334          * segments are coalesced. For now, just make sure it's larger
335          * than the maximum supported transfer size.
336          */
337         if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
338                 if (blkcfg.size_max < maxphys) {
339                         error = ENOTSUP;
340                         device_printf(dev, "host requires unsupported "
341                             "maximum segment size feature\n");
342                         goto fail;
343                 }
344         }
345
346         sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
347         if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
348                 error = EINVAL;
349                 device_printf(dev, "fewer than minimum number of segments "
350                     "allowed: %d\n", sc->vtblk_max_nsegs);
351                 goto fail;
352         }
353
354         sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
355         if (sc->vtblk_sglist == NULL) {
356                 error = ENOMEM;
357                 device_printf(dev, "cannot allocate sglist\n");
358                 goto fail;
359         }
360
361         error = vtblk_alloc_virtqueue(sc);
362         if (error) {
363                 device_printf(dev, "cannot allocate virtqueue\n");
364                 goto fail;
365         }
366
367         error = vtblk_request_prealloc(sc);
368         if (error) {
369                 device_printf(dev, "cannot preallocate requests\n");
370                 goto fail;
371         }
372
373         vtblk_alloc_disk(sc, &blkcfg);
374
375         error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
376         if (error) {
377                 device_printf(dev, "cannot setup virtqueue interrupt\n");
378                 goto fail;
379         }
380
381         vtblk_create_disk(sc);
382
383         virtqueue_enable_intr(sc->vtblk_vq);
384
385 fail:
386         if (error)
387                 vtblk_detach(dev);
388
389         return (error);
390 }
391
392 static int
393 vtblk_detach(device_t dev)
394 {
395         struct vtblk_softc *sc;
396
397         sc = device_get_softc(dev);
398
399         VTBLK_LOCK(sc);
400         sc->vtblk_flags |= VTBLK_FLAG_DETACH;
401         if (device_is_attached(dev))
402                 vtblk_stop(sc);
403         VTBLK_UNLOCK(sc);
404
405         vtblk_drain(sc);
406
407         if (sc->vtblk_disk != NULL) {
408                 disk_destroy(sc->vtblk_disk);
409                 sc->vtblk_disk = NULL;
410         }
411
412         if (sc->vtblk_sglist != NULL) {
413                 sglist_free(sc->vtblk_sglist);
414                 sc->vtblk_sglist = NULL;
415         }
416
417         VTBLK_LOCK_DESTROY(sc);
418
419         return (0);
420 }
421
422 static int
423 vtblk_suspend(device_t dev)
424 {
425         struct vtblk_softc *sc;
426         int error;
427
428         sc = device_get_softc(dev);
429
430         VTBLK_LOCK(sc);
431         sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
432         /* XXX BMV: virtio_stop(), etc needed here? */
433         error = vtblk_quiesce(sc);
434         if (error)
435                 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
436         VTBLK_UNLOCK(sc);
437
438         return (error);
439 }
440
441 static int
442 vtblk_resume(device_t dev)
443 {
444         struct vtblk_softc *sc;
445
446         sc = device_get_softc(dev);
447
448         VTBLK_LOCK(sc);
449         /* XXX BMV: virtio_reinit(), etc needed here? */
450         sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
451         vtblk_startio(sc);
452         VTBLK_UNLOCK(sc);
453
454         return (0);
455 }
456
457 static int
458 vtblk_shutdown(device_t dev)
459 {
460
461         return (0);
462 }
463
464 static int
465 vtblk_config_change(device_t dev)
466 {
467         struct vtblk_softc *sc;
468         struct virtio_blk_config blkcfg;
469         uint64_t capacity;
470
471         sc = device_get_softc(dev);
472
473         vtblk_read_config(sc, &blkcfg);
474
475         /* Capacity is always in 512-byte units. */
476         capacity = blkcfg.capacity * VTBLK_BSIZE;
477
478         if (sc->vtblk_disk->d_mediasize != capacity)
479                 vtblk_resize_disk(sc, capacity);
480
481         return (0);
482 }
483
484 static int
485 vtblk_open(struct disk *dp)
486 {
487         struct vtblk_softc *sc;
488
489         if ((sc = dp->d_drv1) == NULL)
490                 return (ENXIO);
491
492         return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
493 }
494
495 static int
496 vtblk_close(struct disk *dp)
497 {
498         struct vtblk_softc *sc;
499
500         if ((sc = dp->d_drv1) == NULL)
501                 return (ENXIO);
502
503         return (0);
504 }
505
506 static int
507 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
508     struct thread *td)
509 {
510         struct vtblk_softc *sc;
511
512         if ((sc = dp->d_drv1) == NULL)
513                 return (ENXIO);
514
515         return (ENOTTY);
516 }
517
518 static int
519 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
520     size_t length)
521 {
522         struct disk *dp;
523         struct vtblk_softc *sc;
524         int error;
525
526         dp = arg;
527         error = 0;
528
529         if ((sc = dp->d_drv1) == NULL)
530                 return (ENXIO);
531
532         VTBLK_LOCK(sc);
533
534         vtblk_dump_quiesce(sc);
535
536         if (length > 0)
537                 error = vtblk_dump_write(sc, virtual, offset, length);
538         if (error || (virtual == NULL && offset == 0))
539                 vtblk_dump_complete(sc);
540
541         VTBLK_UNLOCK(sc);
542
543         return (error);
544 }
545
546 static void
547 vtblk_strategy(struct bio *bp)
548 {
549         struct vtblk_softc *sc;
550
551         if ((sc = bp->bio_disk->d_drv1) == NULL) {
552                 vtblk_bio_done(NULL, bp, EINVAL);
553                 return;
554         }
555
556         /*
557          * Fail any write if RO. Unfortunately, there does not seem to
558          * be a better way to report our readonly'ness to GEOM above.
559          */
560         if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
561             (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH ||
562             bp->bio_cmd == BIO_DELETE)) {
563                 vtblk_bio_done(sc, bp, EROFS);
564                 return;
565         }
566
567         if ((bp->bio_cmd != BIO_READ) && (bp->bio_cmd != BIO_WRITE) &&
568             (bp->bio_cmd != BIO_FLUSH) && (bp->bio_cmd != BIO_DELETE)) {
569                 vtblk_bio_done(sc, bp, EOPNOTSUPP);
570                 return;
571         }
572
573         VTBLK_LOCK(sc);
574
575         if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
576                 VTBLK_UNLOCK(sc);
577                 vtblk_bio_done(sc, bp, ENXIO);
578                 return;
579         }
580
581         bioq_insert_tail(&sc->vtblk_bioq, bp);
582         vtblk_startio(sc);
583
584         VTBLK_UNLOCK(sc);
585 }
586
587 static void
588 vtblk_negotiate_features(struct vtblk_softc *sc)
589 {
590         device_t dev;
591         uint64_t features;
592
593         dev = sc->vtblk_dev;
594         features = virtio_bus_is_modern(dev) ? VTBLK_MODERN_FEATURES :
595             VTBLK_LEGACY_FEATURES;
596
597         sc->vtblk_features = virtio_negotiate_features(dev, features);
598         virtio_finalize_features(dev);
599 }
600
601 static void
602 vtblk_setup_features(struct vtblk_softc *sc)
603 {
604         device_t dev;
605
606         dev = sc->vtblk_dev;
607
608         vtblk_negotiate_features(sc);
609
610         if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
611                 sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
612         if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
613                 sc->vtblk_flags |= VTBLK_FLAG_READONLY;
614         if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
615                 sc->vtblk_flags |= VTBLK_FLAG_WCE_CONFIG;
616
617         /* Legacy. */
618         if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
619                 sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
620 }
621
622 static int
623 vtblk_maximum_segments(struct vtblk_softc *sc,
624     struct virtio_blk_config *blkcfg)
625 {
626         device_t dev;
627         int nsegs;
628
629         dev = sc->vtblk_dev;
630         nsegs = VTBLK_MIN_SEGMENTS;
631
632         if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
633                 nsegs += MIN(blkcfg->seg_max, maxphys / PAGE_SIZE + 1);
634                 if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
635                         nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
636         } else
637                 nsegs += 1;
638
639         return (nsegs);
640 }
641
642 static int
643 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
644 {
645         device_t dev;
646         struct vq_alloc_info vq_info;
647
648         dev = sc->vtblk_dev;
649
650         VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
651             vtblk_vq_intr, sc, &sc->vtblk_vq,
652             "%s request", device_get_nameunit(dev));
653
654         return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
655 }
656
657 static void
658 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
659 {
660         device_t dev;
661         struct disk *dp;
662         int error;
663
664         dev = sc->vtblk_dev;
665         dp = sc->vtblk_disk;
666
667         dp->d_mediasize = new_capacity;
668         if (bootverbose) {
669                 device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
670                     (uintmax_t) dp->d_mediasize >> 20,
671                     (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
672                     dp->d_sectorsize);
673         }
674
675         error = disk_resize(dp, M_NOWAIT);
676         if (error) {
677                 device_printf(dev,
678                     "disk_resize(9) failed, error: %d\n", error);
679         }
680 }
681
682 static void
683 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
684 {
685         device_t dev;
686         struct disk *dp;
687
688         dev = sc->vtblk_dev;
689
690         sc->vtblk_disk = dp = disk_alloc();
691         dp->d_open = vtblk_open;
692         dp->d_close = vtblk_close;
693         dp->d_ioctl = vtblk_ioctl;
694         dp->d_strategy = vtblk_strategy;
695         dp->d_name = VTBLK_DISK_NAME;
696         dp->d_unit = device_get_unit(dev);
697         dp->d_drv1 = sc;
698         dp->d_flags = DISKFLAG_UNMAPPED_BIO | DISKFLAG_DIRECT_COMPLETION;
699         dp->d_hba_vendor = virtio_get_vendor(dev);
700         dp->d_hba_device = virtio_get_device(dev);
701         dp->d_hba_subvendor = virtio_get_subvendor(dev);
702         dp->d_hba_subdevice = virtio_get_subdevice(dev);
703
704         if (virtio_with_feature(dev, VIRTIO_BLK_F_FLUSH))
705                 dp->d_flags |= DISKFLAG_CANFLUSHCACHE;
706
707         if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
708                 dp->d_dump = vtblk_dump;
709
710         /* Capacity is always in 512-byte units. */
711         dp->d_mediasize = blkcfg->capacity * VTBLK_BSIZE;
712
713         if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
714                 dp->d_sectorsize = blkcfg->blk_size;
715         else
716                 dp->d_sectorsize = VTBLK_BSIZE;
717
718         /*
719          * The VirtIO maximum I/O size is given in terms of segments.
720          * However, FreeBSD limits I/O size by logical buffer size, not
721          * by physically contiguous pages. Therefore, we have to assume
722          * no pages are contiguous. This may impose an artificially low
723          * maximum I/O size. But in practice, since QEMU advertises 128
724          * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
725          * which is typically greater than maxphys. Eventually we should
726          * just advertise maxphys and split buffers that are too big.
727          *
728          * Note we must subtract one additional segment in case of non
729          * page aligned buffers.
730          */
731         dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
732             PAGE_SIZE;
733         if (dp->d_maxsize < PAGE_SIZE)
734                 dp->d_maxsize = PAGE_SIZE; /* XXX */
735
736         if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
737                 dp->d_fwsectors = blkcfg->geometry.sectors;
738                 dp->d_fwheads = blkcfg->geometry.heads;
739         }
740
741         if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY) &&
742             blkcfg->topology.physical_block_exp > 0) {
743                 dp->d_stripesize = dp->d_sectorsize *
744                     (1 << blkcfg->topology.physical_block_exp);
745                 dp->d_stripeoffset = (dp->d_stripesize -
746                     blkcfg->topology.alignment_offset * dp->d_sectorsize) %
747                     dp->d_stripesize;
748         }
749
750         if (virtio_with_feature(dev, VIRTIO_BLK_F_DISCARD)) {
751                 dp->d_flags |= DISKFLAG_CANDELETE;
752                 dp->d_delmaxsize = blkcfg->max_discard_sectors * VTBLK_BSIZE;
753         }
754
755         if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
756                 sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
757         else
758                 sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
759 }
760
761 static void
762 vtblk_create_disk(struct vtblk_softc *sc)
763 {
764         struct disk *dp;
765
766         dp = sc->vtblk_disk;
767
768         vtblk_ident(sc);
769
770         device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
771             (uintmax_t) dp->d_mediasize >> 20,
772             (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
773             dp->d_sectorsize);
774
775         disk_create(dp, DISK_VERSION);
776 }
777
778 static int
779 vtblk_request_prealloc(struct vtblk_softc *sc)
780 {
781         struct vtblk_request *req;
782         int i, nreqs;
783
784         nreqs = virtqueue_size(sc->vtblk_vq);
785
786         /*
787          * Preallocate sufficient requests to keep the virtqueue full. Each
788          * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
789          * the number allocated when indirect descriptors are not available.
790          */
791         if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
792                 nreqs /= VTBLK_MIN_SEGMENTS;
793
794         for (i = 0; i < nreqs; i++) {
795                 req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
796                 if (req == NULL)
797                         return (ENOMEM);
798
799                 MPASS(sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr)) == 1);
800                 MPASS(sglist_count(&req->vbr_ack, sizeof(req->vbr_ack)) == 1);
801
802                 sc->vtblk_request_count++;
803                 vtblk_request_enqueue(sc, req);
804         }
805
806         return (0);
807 }
808
809 static void
810 vtblk_request_free(struct vtblk_softc *sc)
811 {
812         struct vtblk_request *req;
813
814         MPASS(TAILQ_EMPTY(&sc->vtblk_req_ready));
815
816         while ((req = vtblk_request_dequeue(sc)) != NULL) {
817                 sc->vtblk_request_count--;
818                 free(req, M_DEVBUF);
819         }
820
821         KASSERT(sc->vtblk_request_count == 0,
822             ("%s: leaked %d requests", __func__, sc->vtblk_request_count));
823 }
824
825 static struct vtblk_request *
826 vtblk_request_dequeue(struct vtblk_softc *sc)
827 {
828         struct vtblk_request *req;
829
830         req = TAILQ_FIRST(&sc->vtblk_req_free);
831         if (req != NULL) {
832                 TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
833                 bzero(req, sizeof(struct vtblk_request));
834         }
835
836         return (req);
837 }
838
839 static void
840 vtblk_request_enqueue(struct vtblk_softc *sc, struct vtblk_request *req)
841 {
842
843         TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
844 }
845
846 static struct vtblk_request *
847 vtblk_request_next_ready(struct vtblk_softc *sc)
848 {
849         struct vtblk_request *req;
850
851         req = TAILQ_FIRST(&sc->vtblk_req_ready);
852         if (req != NULL)
853                 TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
854
855         return (req);
856 }
857
858 static void
859 vtblk_request_requeue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
860 {
861
862         /* NOTE: Currently, there will be at most one request in the queue. */
863         TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
864 }
865
866 static struct vtblk_request *
867 vtblk_request_next(struct vtblk_softc *sc)
868 {
869         struct vtblk_request *req;
870
871         req = vtblk_request_next_ready(sc);
872         if (req != NULL)
873                 return (req);
874
875         return (vtblk_request_bio(sc));
876 }
877
878 static struct vtblk_request *
879 vtblk_request_bio(struct vtblk_softc *sc)
880 {
881         struct bio_queue_head *bioq;
882         struct vtblk_request *req;
883         struct bio *bp;
884
885         bioq = &sc->vtblk_bioq;
886
887         if (bioq_first(bioq) == NULL)
888                 return (NULL);
889
890         req = vtblk_request_dequeue(sc);
891         if (req == NULL)
892                 return (NULL);
893
894         bp = bioq_takefirst(bioq);
895         req->vbr_bp = bp;
896         req->vbr_ack = -1;
897         req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
898
899         switch (bp->bio_cmd) {
900         case BIO_FLUSH:
901                 req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_FLUSH);
902                 req->vbr_hdr.sector = 0;
903                 break;
904         case BIO_READ:
905                 req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_IN);
906                 req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / VTBLK_BSIZE);
907                 break;
908         case BIO_WRITE:
909                 req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_OUT);
910                 req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / VTBLK_BSIZE);
911                 break;
912         case BIO_DELETE:
913                 req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_DISCARD);
914                 req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / VTBLK_BSIZE);
915                 break;
916         default:
917                 panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
918         }
919
920         if (bp->bio_flags & BIO_ORDERED)
921                 req->vbr_hdr.type |= vtblk_gtoh32(sc, VIRTIO_BLK_T_BARRIER);
922
923         return (req);
924 }
925
926 static int
927 vtblk_request_execute(struct vtblk_softc *sc, struct vtblk_request *req)
928 {
929         struct virtqueue *vq;
930         struct sglist *sg;
931         struct bio *bp;
932         int ordered, readable, writable, error;
933
934         vq = sc->vtblk_vq;
935         sg = sc->vtblk_sglist;
936         bp = req->vbr_bp;
937         ordered = 0;
938         writable = 0;
939
940         /*
941          * Some hosts (such as bhyve) do not implement the barrier feature,
942          * so we emulate it in the driver by allowing the barrier request
943          * to be the only one in flight.
944          */
945         if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
946                 if (sc->vtblk_req_ordered != NULL)
947                         return (EBUSY);
948                 if (bp->bio_flags & BIO_ORDERED) {
949                         if (!virtqueue_empty(vq))
950                                 return (EBUSY);
951                         ordered = 1;
952                         req->vbr_hdr.type &= vtblk_gtoh32(sc,
953                                 ~VIRTIO_BLK_T_BARRIER);
954                 }
955         }
956
957         sglist_reset(sg);
958         sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
959
960         if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
961                 error = sglist_append_bio(sg, bp);
962                 if (error || sg->sg_nseg == sg->sg_maxseg) {
963                         panic("%s: bio %p data buffer too big %d",
964                             __func__, bp, error);
965                 }
966
967                 /* BIO_READ means the host writes into our buffer. */
968                 if (bp->bio_cmd == BIO_READ)
969                         writable = sg->sg_nseg - 1;
970         } else if (bp->bio_cmd == BIO_DELETE) {
971                 struct virtio_blk_discard_write_zeroes *discard;
972
973                 discard = malloc(sizeof(*discard), M_DEVBUF, M_NOWAIT | M_ZERO);
974                 if (discard == NULL)
975                         return (ENOMEM);
976
977                 bp->bio_driver1 = discard;
978                 discard->sector = vtblk_gtoh64(sc, bp->bio_offset / VTBLK_BSIZE);
979                 discard->num_sectors = vtblk_gtoh32(sc, bp->bio_bcount / VTBLK_BSIZE);
980                 error = sglist_append(sg, discard, sizeof(*discard));
981                 if (error || sg->sg_nseg == sg->sg_maxseg) {
982                         panic("%s: bio %p data buffer too big %d",
983                             __func__, bp, error);
984                 }
985         }
986
987         writable++;
988         sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
989         readable = sg->sg_nseg - writable;
990
991         error = virtqueue_enqueue(vq, req, sg, readable, writable);
992         if (error == 0 && ordered)
993                 sc->vtblk_req_ordered = req;
994
995         return (error);
996 }
997
998 static int
999 vtblk_request_error(struct vtblk_request *req)
1000 {
1001         int error;
1002
1003         switch (req->vbr_ack) {
1004         case VIRTIO_BLK_S_OK:
1005                 error = 0;
1006                 break;
1007         case VIRTIO_BLK_S_UNSUPP:
1008                 error = ENOTSUP;
1009                 break;
1010         default:
1011                 error = EIO;
1012                 break;
1013         }
1014
1015         return (error);
1016 }
1017
1018 static void
1019 vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
1020 {
1021         struct vtblk_request *req;
1022         struct bio *bp;
1023
1024         while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
1025                 if (sc->vtblk_req_ordered != NULL) {
1026                         MPASS(sc->vtblk_req_ordered == req);
1027                         sc->vtblk_req_ordered = NULL;
1028                 }
1029
1030                 bp = req->vbr_bp;
1031                 bp->bio_error = vtblk_request_error(req);
1032                 TAILQ_INSERT_TAIL(queue, bp, bio_queue);
1033
1034                 vtblk_request_enqueue(sc, req);
1035         }
1036 }
1037
1038 static void
1039 vtblk_done_completed(struct vtblk_softc *sc, struct bio_queue *queue)
1040 {
1041         struct bio *bp, *tmp;
1042
1043         TAILQ_FOREACH_SAFE(bp, queue, bio_queue, tmp) {
1044                 if (bp->bio_error != 0)
1045                         disk_err(bp, "hard error", -1, 1);
1046                 vtblk_bio_done(sc, bp, bp->bio_error);
1047         }
1048 }
1049
1050 static void
1051 vtblk_drain_vq(struct vtblk_softc *sc)
1052 {
1053         struct virtqueue *vq;
1054         struct vtblk_request *req;
1055         int last;
1056
1057         vq = sc->vtblk_vq;
1058         last = 0;
1059
1060         while ((req = virtqueue_drain(vq, &last)) != NULL) {
1061                 vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1062                 vtblk_request_enqueue(sc, req);
1063         }
1064
1065         sc->vtblk_req_ordered = NULL;
1066         KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1067 }
1068
1069 static void
1070 vtblk_drain(struct vtblk_softc *sc)
1071 {
1072         struct bio_queue_head *bioq;
1073         struct vtblk_request *req;
1074         struct bio *bp;
1075
1076         bioq = &sc->vtblk_bioq;
1077
1078         if (sc->vtblk_vq != NULL) {
1079                 struct bio_queue queue;
1080
1081                 TAILQ_INIT(&queue);
1082                 vtblk_queue_completed(sc, &queue);
1083                 vtblk_done_completed(sc, &queue);
1084
1085                 vtblk_drain_vq(sc);
1086         }
1087
1088         while ((req = vtblk_request_next_ready(sc)) != NULL) {
1089                 vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1090                 vtblk_request_enqueue(sc, req);
1091         }
1092
1093         while (bioq_first(bioq) != NULL) {
1094                 bp = bioq_takefirst(bioq);
1095                 vtblk_bio_done(sc, bp, ENXIO);
1096         }
1097
1098         vtblk_request_free(sc);
1099 }
1100
1101 static void
1102 vtblk_startio(struct vtblk_softc *sc)
1103 {
1104         struct virtqueue *vq;
1105         struct vtblk_request *req;
1106         int enq;
1107
1108         VTBLK_LOCK_ASSERT(sc);
1109         vq = sc->vtblk_vq;
1110         enq = 0;
1111
1112         if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1113                 return;
1114
1115         while (!virtqueue_full(vq)) {
1116                 req = vtblk_request_next(sc);
1117                 if (req == NULL)
1118                         break;
1119
1120                 if (vtblk_request_execute(sc, req) != 0) {
1121                         vtblk_request_requeue_ready(sc, req);
1122                         break;
1123                 }
1124
1125                 enq++;
1126         }
1127
1128         if (enq > 0)
1129                 virtqueue_notify(vq);
1130 }
1131
1132 static void
1133 vtblk_bio_done(struct vtblk_softc *sc, struct bio *bp, int error)
1134 {
1135
1136         /* Because of GEOM direct dispatch, we cannot hold any locks. */
1137         if (sc != NULL)
1138                 VTBLK_LOCK_ASSERT_NOTOWNED(sc);
1139
1140         if (error) {
1141                 bp->bio_resid = bp->bio_bcount;
1142                 bp->bio_error = error;
1143                 bp->bio_flags |= BIO_ERROR;
1144         }
1145
1146         if (bp->bio_driver1 != NULL) {
1147                 free(bp->bio_driver1, M_DEVBUF);
1148                 bp->bio_driver1 = NULL;
1149         }
1150
1151         biodone(bp);
1152 }
1153
1154 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg)                  \
1155         if (virtio_with_feature(_dev, _feature)) {                      \
1156                 virtio_read_device_config(_dev,                         \
1157                     offsetof(struct virtio_blk_config, _field),         \
1158                     &(_cfg)->_field, sizeof((_cfg)->_field));           \
1159         }
1160
1161 static void
1162 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
1163 {
1164         device_t dev;
1165
1166         dev = sc->vtblk_dev;
1167
1168         bzero(blkcfg, sizeof(struct virtio_blk_config));
1169
1170         /* The capacity is always available. */
1171         virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
1172             capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
1173
1174         /* Read the configuration if the feature was negotiated. */
1175         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1176         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1177         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1178             geometry.cylinders, blkcfg);
1179         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1180             geometry.heads, blkcfg);
1181         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1182             geometry.sectors, blkcfg);
1183         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1184         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1185             topology.physical_block_exp, blkcfg);
1186         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1187             topology.alignment_offset, blkcfg);
1188         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1189             topology.min_io_size, blkcfg);
1190         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1191             topology.opt_io_size, blkcfg);
1192         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, wce, blkcfg);
1193         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, max_discard_sectors,
1194             blkcfg);
1195         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, max_discard_seg, blkcfg);
1196         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, discard_sector_alignment,
1197             blkcfg);
1198 }
1199
1200 #undef VTBLK_GET_CONFIG
1201
1202 static void
1203 vtblk_ident(struct vtblk_softc *sc)
1204 {
1205         struct bio buf;
1206         struct disk *dp;
1207         struct vtblk_request *req;
1208         int len, error;
1209
1210         dp = sc->vtblk_disk;
1211         len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
1212
1213         if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
1214                 return;
1215
1216         req = vtblk_request_dequeue(sc);
1217         if (req == NULL)
1218                 return;
1219
1220         req->vbr_ack = -1;
1221         req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_GET_ID);
1222         req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1223         req->vbr_hdr.sector = 0;
1224
1225         req->vbr_bp = &buf;
1226         g_reset_bio(&buf);
1227
1228         buf.bio_cmd = BIO_READ;
1229         buf.bio_data = dp->d_ident;
1230         buf.bio_bcount = len;
1231
1232         VTBLK_LOCK(sc);
1233         error = vtblk_poll_request(sc, req);
1234         VTBLK_UNLOCK(sc);
1235
1236         vtblk_request_enqueue(sc, req);
1237
1238         if (error) {
1239                 device_printf(sc->vtblk_dev,
1240                     "error getting device identifier: %d\n", error);
1241         }
1242 }
1243
1244 static int
1245 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
1246 {
1247         struct virtqueue *vq;
1248         int error;
1249
1250         vq = sc->vtblk_vq;
1251
1252         if (!virtqueue_empty(vq))
1253                 return (EBUSY);
1254
1255         error = vtblk_request_execute(sc, req);
1256         if (error)
1257                 return (error);
1258
1259         virtqueue_notify(vq);
1260         virtqueue_poll(vq, NULL);
1261
1262         error = vtblk_request_error(req);
1263         if (error && bootverbose) {
1264                 device_printf(sc->vtblk_dev,
1265                     "%s: IO error: %d\n", __func__, error);
1266         }
1267
1268         return (error);
1269 }
1270
1271 static int
1272 vtblk_quiesce(struct vtblk_softc *sc)
1273 {
1274         int error;
1275
1276         VTBLK_LOCK_ASSERT(sc);
1277         error = 0;
1278
1279         while (!virtqueue_empty(sc->vtblk_vq)) {
1280                 if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
1281                     VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
1282                         error = EBUSY;
1283                         break;
1284                 }
1285         }
1286
1287         return (error);
1288 }
1289
1290 static void
1291 vtblk_vq_intr(void *xsc)
1292 {
1293         struct vtblk_softc *sc;
1294         struct virtqueue *vq;
1295         struct bio_queue queue;
1296
1297         sc = xsc;
1298         vq = sc->vtblk_vq;
1299         TAILQ_INIT(&queue);
1300
1301         VTBLK_LOCK(sc);
1302
1303 again:
1304         if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
1305                 goto out;
1306
1307         vtblk_queue_completed(sc, &queue);
1308         vtblk_startio(sc);
1309
1310         if (virtqueue_enable_intr(vq) != 0) {
1311                 virtqueue_disable_intr(vq);
1312                 goto again;
1313         }
1314
1315         if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1316                 wakeup(&sc->vtblk_vq);
1317
1318 out:
1319         VTBLK_UNLOCK(sc);
1320         vtblk_done_completed(sc, &queue);
1321 }
1322
1323 static void
1324 vtblk_stop(struct vtblk_softc *sc)
1325 {
1326
1327         virtqueue_disable_intr(sc->vtblk_vq);
1328         virtio_stop(sc->vtblk_dev);
1329 }
1330
1331 static void
1332 vtblk_dump_quiesce(struct vtblk_softc *sc)
1333 {
1334
1335         /*
1336          * Spin here until all the requests in-flight at the time of the
1337          * dump are completed and queued. The queued requests will be
1338          * biodone'd once the dump is finished.
1339          */
1340         while (!virtqueue_empty(sc->vtblk_vq))
1341                 vtblk_queue_completed(sc, &sc->vtblk_dump_queue);
1342 }
1343
1344 static int
1345 vtblk_dump_write(struct vtblk_softc *sc, void *virtual, off_t offset,
1346     size_t length)
1347 {
1348         struct bio buf;
1349         struct vtblk_request *req;
1350
1351         req = &sc->vtblk_dump_request;
1352         req->vbr_ack = -1;
1353         req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_OUT);
1354         req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1355         req->vbr_hdr.sector = vtblk_gtoh64(sc, offset / VTBLK_BSIZE);
1356
1357         req->vbr_bp = &buf;
1358         g_reset_bio(&buf);
1359
1360         buf.bio_cmd = BIO_WRITE;
1361         buf.bio_data = virtual;
1362         buf.bio_bcount = length;
1363
1364         return (vtblk_poll_request(sc, req));
1365 }
1366
1367 static int
1368 vtblk_dump_flush(struct vtblk_softc *sc)
1369 {
1370         struct bio buf;
1371         struct vtblk_request *req;
1372
1373         req = &sc->vtblk_dump_request;
1374         req->vbr_ack = -1;
1375         req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_FLUSH);
1376         req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1377         req->vbr_hdr.sector = 0;
1378
1379         req->vbr_bp = &buf;
1380         g_reset_bio(&buf);
1381
1382         buf.bio_cmd = BIO_FLUSH;
1383
1384         return (vtblk_poll_request(sc, req));
1385 }
1386
1387 static void
1388 vtblk_dump_complete(struct vtblk_softc *sc)
1389 {
1390
1391         vtblk_dump_flush(sc);
1392
1393         VTBLK_UNLOCK(sc);
1394         vtblk_done_completed(sc, &sc->vtblk_dump_queue);
1395         VTBLK_LOCK(sc);
1396 }
1397
1398 static void
1399 vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
1400 {
1401
1402         /* Set either writeback (1) or writethrough (0) mode. */
1403         virtio_write_dev_config_1(sc->vtblk_dev,
1404             offsetof(struct virtio_blk_config, wce), wc);
1405 }
1406
1407 static int
1408 vtblk_write_cache_enabled(struct vtblk_softc *sc,
1409     struct virtio_blk_config *blkcfg)
1410 {
1411         int wc;
1412
1413         if (sc->vtblk_flags & VTBLK_FLAG_WCE_CONFIG) {
1414                 wc = vtblk_tunable_int(sc, "writecache_mode",
1415                     vtblk_writecache_mode);
1416                 if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1417                         vtblk_set_write_cache(sc, wc);
1418                 else
1419                         wc = blkcfg->wce;
1420         } else
1421                 wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_FLUSH);
1422
1423         return (wc);
1424 }
1425
1426 static int
1427 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
1428 {
1429         struct vtblk_softc *sc;
1430         int wc, error;
1431
1432         sc = oidp->oid_arg1;
1433         wc = sc->vtblk_write_cache;
1434
1435         error = sysctl_handle_int(oidp, &wc, 0, req);
1436         if (error || req->newptr == NULL)
1437                 return (error);
1438         if ((sc->vtblk_flags & VTBLK_FLAG_WCE_CONFIG) == 0)
1439                 return (EPERM);
1440         if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1441                 return (EINVAL);
1442
1443         VTBLK_LOCK(sc);
1444         sc->vtblk_write_cache = wc;
1445         vtblk_set_write_cache(sc, sc->vtblk_write_cache);
1446         VTBLK_UNLOCK(sc);
1447
1448         return (0);
1449 }
1450
1451 static void
1452 vtblk_setup_sysctl(struct vtblk_softc *sc)
1453 {
1454         device_t dev;
1455         struct sysctl_ctx_list *ctx;
1456         struct sysctl_oid *tree;
1457         struct sysctl_oid_list *child;
1458
1459         dev = sc->vtblk_dev;
1460         ctx = device_get_sysctl_ctx(dev);
1461         tree = device_get_sysctl_tree(dev);
1462         child = SYSCTL_CHILDREN(tree);
1463
1464         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
1465             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
1466             vtblk_write_cache_sysctl, "I",
1467             "Write cache mode (writethrough (0) or writeback (1))");
1468 }
1469
1470 static int
1471 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
1472 {
1473         char path[64];
1474
1475         snprintf(path, sizeof(path),
1476             "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
1477         TUNABLE_INT_FETCH(path, &def);
1478
1479         return (def);
1480 }