]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/virtio/block/virtio_blk.c
MFC r279642: Reenable VIRTIO_BLK_F_TOPOLOGY feature.
[FreeBSD/stable/10.git] / sys / dev / virtio / block / virtio_blk.c
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 /* Driver for VirtIO block devices. */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/bio.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/sglist.h>
39 #include <sys/sysctl.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/queue.h>
43
44 #include <geom/geom_disk.h>
45
46 #include <machine/bus.h>
47 #include <machine/resource.h>
48 #include <sys/bus.h>
49 #include <sys/rman.h>
50
51 #include <dev/virtio/virtio.h>
52 #include <dev/virtio/virtqueue.h>
53 #include <dev/virtio/block/virtio_blk.h>
54
55 #include "virtio_if.h"
56
57 struct vtblk_request {
58         struct virtio_blk_outhdr         vbr_hdr;
59         struct bio                      *vbr_bp;
60         uint8_t                          vbr_ack;
61         TAILQ_ENTRY(vtblk_request)       vbr_link;
62 };
63
64 enum vtblk_cache_mode {
65         VTBLK_CACHE_WRITETHROUGH,
66         VTBLK_CACHE_WRITEBACK,
67         VTBLK_CACHE_MAX
68 };
69
70 struct vtblk_softc {
71         device_t                 vtblk_dev;
72         struct mtx               vtblk_mtx;
73         uint64_t                 vtblk_features;
74         uint32_t                 vtblk_flags;
75 #define VTBLK_FLAG_INDIRECT     0x0001
76 #define VTBLK_FLAG_READONLY     0x0002
77 #define VTBLK_FLAG_DETACH       0x0004
78 #define VTBLK_FLAG_SUSPEND      0x0008
79 #define VTBLK_FLAG_DUMPING      0x0010
80 #define VTBLK_FLAG_BARRIER      0x0020
81 #define VTBLK_FLAG_WC_CONFIG    0x0040
82
83         struct virtqueue        *vtblk_vq;
84         struct sglist           *vtblk_sglist;
85         struct disk             *vtblk_disk;
86
87         struct bio_queue_head    vtblk_bioq;
88         TAILQ_HEAD(, vtblk_request)
89                                  vtblk_req_free;
90         TAILQ_HEAD(, vtblk_request)
91                                  vtblk_req_ready;
92         struct vtblk_request    *vtblk_req_ordered;
93
94         int                      vtblk_max_nsegs;
95         int                      vtblk_request_count;
96         enum vtblk_cache_mode    vtblk_write_cache;
97
98         struct vtblk_request     vtblk_dump_request;
99 };
100
101 static struct virtio_feature_desc vtblk_feature_desc[] = {
102         { VIRTIO_BLK_F_BARRIER,         "HostBarrier"   },
103         { VIRTIO_BLK_F_SIZE_MAX,        "MaxSegSize"    },
104         { VIRTIO_BLK_F_SEG_MAX,         "MaxNumSegs"    },
105         { VIRTIO_BLK_F_GEOMETRY,        "DiskGeometry"  },
106         { VIRTIO_BLK_F_RO,              "ReadOnly"      },
107         { VIRTIO_BLK_F_BLK_SIZE,        "BlockSize"     },
108         { VIRTIO_BLK_F_SCSI,            "SCSICmds"      },
109         { VIRTIO_BLK_F_WCE,             "WriteCache"    },
110         { VIRTIO_BLK_F_TOPOLOGY,        "Topology"      },
111         { VIRTIO_BLK_F_CONFIG_WCE,      "ConfigWCE"     },
112
113         { 0, NULL }
114 };
115
116 static int      vtblk_modevent(module_t, int, void *);
117
118 static int      vtblk_probe(device_t);
119 static int      vtblk_attach(device_t);
120 static int      vtblk_detach(device_t);
121 static int      vtblk_suspend(device_t);
122 static int      vtblk_resume(device_t);
123 static int      vtblk_shutdown(device_t);
124 static int      vtblk_config_change(device_t);
125
126 static int      vtblk_open(struct disk *);
127 static int      vtblk_close(struct disk *);
128 static int      vtblk_ioctl(struct disk *, u_long, void *, int,
129                     struct thread *);
130 static int      vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
131 static void     vtblk_strategy(struct bio *);
132
133 static void     vtblk_negotiate_features(struct vtblk_softc *);
134 static void     vtblk_setup_features(struct vtblk_softc *);
135 static int      vtblk_maximum_segments(struct vtblk_softc *,
136                     struct virtio_blk_config *);
137 static int      vtblk_alloc_virtqueue(struct vtblk_softc *);
138 static void     vtblk_resize_disk(struct vtblk_softc *, uint64_t);
139 static void     vtblk_alloc_disk(struct vtblk_softc *,
140                     struct virtio_blk_config *);
141 static void     vtblk_create_disk(struct vtblk_softc *);
142
143 static int      vtblk_request_prealloc(struct vtblk_softc *);
144 static void     vtblk_request_free(struct vtblk_softc *);
145 static struct vtblk_request *
146                 vtblk_request_dequeue(struct vtblk_softc *);
147 static void     vtblk_request_enqueue(struct vtblk_softc *,
148                     struct vtblk_request *);
149 static struct vtblk_request *
150                 vtblk_request_next_ready(struct vtblk_softc *);
151 static void     vtblk_request_requeue_ready(struct vtblk_softc *,
152                     struct vtblk_request *);
153 static struct vtblk_request *
154                 vtblk_request_next(struct vtblk_softc *);
155 static struct vtblk_request *
156                 vtblk_request_bio(struct vtblk_softc *);
157 static int      vtblk_request_execute(struct vtblk_softc *,
158                     struct vtblk_request *);
159 static int      vtblk_request_error(struct vtblk_request *);
160
161 static void     vtblk_queue_completed(struct vtblk_softc *,
162                     struct bio_queue *);
163 static void     vtblk_done_completed(struct vtblk_softc *,
164                     struct bio_queue *);
165 static void     vtblk_drain_vq(struct vtblk_softc *, int);
166 static void     vtblk_drain(struct vtblk_softc *);
167
168 static void     vtblk_startio(struct vtblk_softc *);
169 static void     vtblk_bio_done(struct vtblk_softc *, struct bio *, int);
170
171 static void     vtblk_read_config(struct vtblk_softc *,
172                     struct virtio_blk_config *);
173 static void     vtblk_ident(struct vtblk_softc *);
174 static int      vtblk_poll_request(struct vtblk_softc *,
175                     struct vtblk_request *);
176 static int      vtblk_quiesce(struct vtblk_softc *);
177 static void     vtblk_vq_intr(void *);
178 static void     vtblk_stop(struct vtblk_softc *);
179
180 static void     vtblk_dump_prepare(struct vtblk_softc *);
181 static int      vtblk_dump_write(struct vtblk_softc *, void *, off_t, size_t);
182 static int      vtblk_dump_flush(struct vtblk_softc *);
183
184 static void     vtblk_set_write_cache(struct vtblk_softc *, int);
185 static int      vtblk_write_cache_enabled(struct vtblk_softc *sc,
186                     struct virtio_blk_config *);
187 static int      vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
188
189 static void     vtblk_setup_sysctl(struct vtblk_softc *);
190 static int      vtblk_tunable_int(struct vtblk_softc *, const char *, int);
191
192 /* Tunables. */
193 static int vtblk_no_ident = 0;
194 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
195 static int vtblk_writecache_mode = -1;
196 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
197
198 /* Features desired/implemented by this driver. */
199 #define VTBLK_FEATURES \
200     (VIRTIO_BLK_F_BARRIER               | \
201      VIRTIO_BLK_F_SIZE_MAX              | \
202      VIRTIO_BLK_F_SEG_MAX               | \
203      VIRTIO_BLK_F_GEOMETRY              | \
204      VIRTIO_BLK_F_RO                    | \
205      VIRTIO_BLK_F_BLK_SIZE              | \
206      VIRTIO_BLK_F_WCE                   | \
207      VIRTIO_BLK_F_TOPOLOGY              | \
208      VIRTIO_BLK_F_CONFIG_WCE            | \
209      VIRTIO_RING_F_INDIRECT_DESC)
210
211 #define VTBLK_MTX(_sc)          &(_sc)->vtblk_mtx
212 #define VTBLK_LOCK_INIT(_sc, _name) \
213                                 mtx_init(VTBLK_MTX((_sc)), (_name), \
214                                     "VirtIO Block Lock", MTX_DEF)
215 #define VTBLK_LOCK(_sc)         mtx_lock(VTBLK_MTX((_sc)))
216 #define VTBLK_UNLOCK(_sc)       mtx_unlock(VTBLK_MTX((_sc)))
217 #define VTBLK_LOCK_DESTROY(_sc) mtx_destroy(VTBLK_MTX((_sc)))
218 #define VTBLK_LOCK_ASSERT(_sc)  mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
219 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
220                                 mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
221
222 #define VTBLK_DISK_NAME         "vtbd"
223 #define VTBLK_QUIESCE_TIMEOUT   (30 * hz)
224
225 /*
226  * Each block request uses at least two segments - one for the header
227  * and one for the status.
228  */
229 #define VTBLK_MIN_SEGMENTS      2
230
231 static device_method_t vtblk_methods[] = {
232         /* Device methods. */
233         DEVMETHOD(device_probe,         vtblk_probe),
234         DEVMETHOD(device_attach,        vtblk_attach),
235         DEVMETHOD(device_detach,        vtblk_detach),
236         DEVMETHOD(device_suspend,       vtblk_suspend),
237         DEVMETHOD(device_resume,        vtblk_resume),
238         DEVMETHOD(device_shutdown,      vtblk_shutdown),
239
240         /* VirtIO methods. */
241         DEVMETHOD(virtio_config_change, vtblk_config_change),
242
243         DEVMETHOD_END
244 };
245
246 static driver_t vtblk_driver = {
247         "vtblk",
248         vtblk_methods,
249         sizeof(struct vtblk_softc)
250 };
251 static devclass_t vtblk_devclass;
252
253 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
254     vtblk_modevent, 0);
255 MODULE_VERSION(virtio_blk, 1);
256 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
257
258 static int
259 vtblk_modevent(module_t mod, int type, void *unused)
260 {
261         int error;
262
263         error = 0;
264
265         switch (type) {
266         case MOD_LOAD:
267         case MOD_QUIESCE:
268         case MOD_UNLOAD:
269         case MOD_SHUTDOWN:
270                 break;
271         default:
272                 error = EOPNOTSUPP;
273                 break;
274         }
275
276         return (error);
277 }
278
279 static int
280 vtblk_probe(device_t dev)
281 {
282
283         if (virtio_get_device_type(dev) != VIRTIO_ID_BLOCK)
284                 return (ENXIO);
285
286         device_set_desc(dev, "VirtIO Block Adapter");
287
288         return (BUS_PROBE_DEFAULT);
289 }
290
291 static int
292 vtblk_attach(device_t dev)
293 {
294         struct vtblk_softc *sc;
295         struct virtio_blk_config blkcfg;
296         int error;
297
298         virtio_set_feature_desc(dev, vtblk_feature_desc);
299
300         sc = device_get_softc(dev);
301         sc->vtblk_dev = dev;
302         VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
303         bioq_init(&sc->vtblk_bioq);
304         TAILQ_INIT(&sc->vtblk_req_free);
305         TAILQ_INIT(&sc->vtblk_req_ready);
306
307         vtblk_setup_sysctl(sc);
308         vtblk_setup_features(sc);
309
310         vtblk_read_config(sc, &blkcfg);
311
312         /*
313          * With the current sglist(9) implementation, it is not easy
314          * for us to support a maximum segment size as adjacent
315          * segments are coalesced. For now, just make sure it's larger
316          * than the maximum supported transfer size.
317          */
318         if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
319                 if (blkcfg.size_max < MAXPHYS) {
320                         error = ENOTSUP;
321                         device_printf(dev, "host requires unsupported "
322                             "maximum segment size feature\n");
323                         goto fail;
324                 }
325         }
326
327         sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
328         if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
329                 error = EINVAL;
330                 device_printf(dev, "fewer than minimum number of segments "
331                     "allowed: %d\n", sc->vtblk_max_nsegs);
332                 goto fail;
333         }
334
335         sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
336         if (sc->vtblk_sglist == NULL) {
337                 error = ENOMEM;
338                 device_printf(dev, "cannot allocate sglist\n");
339                 goto fail;
340         }
341
342         error = vtblk_alloc_virtqueue(sc);
343         if (error) {
344                 device_printf(dev, "cannot allocate virtqueue\n");
345                 goto fail;
346         }
347
348         error = vtblk_request_prealloc(sc);
349         if (error) {
350                 device_printf(dev, "cannot preallocate requests\n");
351                 goto fail;
352         }
353
354         vtblk_alloc_disk(sc, &blkcfg);
355
356         error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
357         if (error) {
358                 device_printf(dev, "cannot setup virtqueue interrupt\n");
359                 goto fail;
360         }
361
362         vtblk_create_disk(sc);
363
364         virtqueue_enable_intr(sc->vtblk_vq);
365
366 fail:
367         if (error)
368                 vtblk_detach(dev);
369
370         return (error);
371 }
372
373 static int
374 vtblk_detach(device_t dev)
375 {
376         struct vtblk_softc *sc;
377
378         sc = device_get_softc(dev);
379
380         VTBLK_LOCK(sc);
381         sc->vtblk_flags |= VTBLK_FLAG_DETACH;
382         if (device_is_attached(dev))
383                 vtblk_stop(sc);
384         VTBLK_UNLOCK(sc);
385
386         vtblk_drain(sc);
387
388         if (sc->vtblk_disk != NULL) {
389                 disk_destroy(sc->vtblk_disk);
390                 sc->vtblk_disk = NULL;
391         }
392
393         if (sc->vtblk_sglist != NULL) {
394                 sglist_free(sc->vtblk_sglist);
395                 sc->vtblk_sglist = NULL;
396         }
397
398         VTBLK_LOCK_DESTROY(sc);
399
400         return (0);
401 }
402
403 static int
404 vtblk_suspend(device_t dev)
405 {
406         struct vtblk_softc *sc;
407         int error;
408
409         sc = device_get_softc(dev);
410
411         VTBLK_LOCK(sc);
412         sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
413         /* XXX BMV: virtio_stop(), etc needed here? */
414         error = vtblk_quiesce(sc);
415         if (error)
416                 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
417         VTBLK_UNLOCK(sc);
418
419         return (error);
420 }
421
422 static int
423 vtblk_resume(device_t dev)
424 {
425         struct vtblk_softc *sc;
426
427         sc = device_get_softc(dev);
428
429         VTBLK_LOCK(sc);
430         /* XXX BMV: virtio_reinit(), etc needed here? */
431         sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
432         vtblk_startio(sc);
433         VTBLK_UNLOCK(sc);
434
435         return (0);
436 }
437
438 static int
439 vtblk_shutdown(device_t dev)
440 {
441
442         return (0);
443 }
444
445 static int
446 vtblk_config_change(device_t dev)
447 {
448         struct vtblk_softc *sc;
449         struct virtio_blk_config blkcfg;
450         uint64_t capacity;
451
452         sc = device_get_softc(dev);
453
454         vtblk_read_config(sc, &blkcfg);
455
456         /* Capacity is always in 512-byte units. */
457         capacity = blkcfg.capacity * 512;
458
459         if (sc->vtblk_disk->d_mediasize != capacity)
460                 vtblk_resize_disk(sc, capacity);
461
462         return (0);
463 }
464
465 static int
466 vtblk_open(struct disk *dp)
467 {
468         struct vtblk_softc *sc;
469
470         if ((sc = dp->d_drv1) == NULL)
471                 return (ENXIO);
472
473         return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
474 }
475
476 static int
477 vtblk_close(struct disk *dp)
478 {
479         struct vtblk_softc *sc;
480
481         if ((sc = dp->d_drv1) == NULL)
482                 return (ENXIO);
483
484         return (0);
485 }
486
487 static int
488 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
489     struct thread *td)
490 {
491         struct vtblk_softc *sc;
492
493         if ((sc = dp->d_drv1) == NULL)
494                 return (ENXIO);
495
496         return (ENOTTY);
497 }
498
499 static int
500 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
501     size_t length)
502 {
503         struct disk *dp;
504         struct vtblk_softc *sc;
505         int error;
506
507         dp = arg;
508
509         if ((sc = dp->d_drv1) == NULL)
510                 return (ENXIO);
511
512         VTBLK_LOCK(sc);
513
514         if ((sc->vtblk_flags & VTBLK_FLAG_DUMPING) == 0) {
515                 vtblk_dump_prepare(sc);
516                 sc->vtblk_flags |= VTBLK_FLAG_DUMPING;
517         }
518
519         if (length > 0)
520                 error = vtblk_dump_write(sc, virtual, offset, length);
521         else if (virtual == NULL && offset == 0)
522                 error = vtblk_dump_flush(sc);
523         else {
524                 error = EINVAL;
525                 sc->vtblk_flags &= ~VTBLK_FLAG_DUMPING;
526         }
527
528         VTBLK_UNLOCK(sc);
529
530         return (error);
531 }
532
533 static void
534 vtblk_strategy(struct bio *bp)
535 {
536         struct vtblk_softc *sc;
537
538         if ((sc = bp->bio_disk->d_drv1) == NULL) {
539                 vtblk_bio_done(NULL, bp, EINVAL);
540                 return;
541         }
542
543         /*
544          * Fail any write if RO. Unfortunately, there does not seem to
545          * be a better way to report our readonly'ness to GEOM above.
546          */
547         if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
548             (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
549                 vtblk_bio_done(sc, bp, EROFS);
550                 return;
551         }
552
553         VTBLK_LOCK(sc);
554
555         if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
556                 VTBLK_UNLOCK(sc);
557                 vtblk_bio_done(sc, bp, ENXIO);
558                 return;
559         }
560
561         bioq_insert_tail(&sc->vtblk_bioq, bp);
562         vtblk_startio(sc);
563
564         VTBLK_UNLOCK(sc);
565 }
566
567 static void
568 vtblk_negotiate_features(struct vtblk_softc *sc)
569 {
570         device_t dev;
571         uint64_t features;
572
573         dev = sc->vtblk_dev;
574         features = VTBLK_FEATURES;
575
576         sc->vtblk_features = virtio_negotiate_features(dev, features);
577 }
578
579 static void
580 vtblk_setup_features(struct vtblk_softc *sc)
581 {
582         device_t dev;
583
584         dev = sc->vtblk_dev;
585
586         vtblk_negotiate_features(sc);
587
588         if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
589                 sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
590         if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
591                 sc->vtblk_flags |= VTBLK_FLAG_READONLY;
592         if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
593                 sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
594         if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
595                 sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
596 }
597
598 static int
599 vtblk_maximum_segments(struct vtblk_softc *sc,
600     struct virtio_blk_config *blkcfg)
601 {
602         device_t dev;
603         int nsegs;
604
605         dev = sc->vtblk_dev;
606         nsegs = VTBLK_MIN_SEGMENTS;
607
608         if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
609                 nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
610                 if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
611                         nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
612         } else
613                 nsegs += 1;
614
615         return (nsegs);
616 }
617
618 static int
619 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
620 {
621         device_t dev;
622         struct vq_alloc_info vq_info;
623
624         dev = sc->vtblk_dev;
625
626         VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
627             vtblk_vq_intr, sc, &sc->vtblk_vq,
628             "%s request", device_get_nameunit(dev));
629
630         return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
631 }
632
633 static void
634 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
635 {
636         device_t dev;
637         struct disk *dp;
638         int error;
639
640         dev = sc->vtblk_dev;
641         dp = sc->vtblk_disk;
642
643         dp->d_mediasize = new_capacity;
644         if (bootverbose) {
645                 device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
646                     (uintmax_t) dp->d_mediasize >> 20,
647                     (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
648                     dp->d_sectorsize);
649         }
650
651         error = disk_resize(dp, M_NOWAIT);
652         if (error) {
653                 device_printf(dev,
654                     "disk_resize(9) failed, error: %d\n", error);
655         }
656 }
657
658 static void
659 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
660 {
661         device_t dev;
662         struct disk *dp;
663
664         dev = sc->vtblk_dev;
665
666         sc->vtblk_disk = dp = disk_alloc();
667         dp->d_open = vtblk_open;
668         dp->d_close = vtblk_close;
669         dp->d_ioctl = vtblk_ioctl;
670         dp->d_strategy = vtblk_strategy;
671         dp->d_name = VTBLK_DISK_NAME;
672         dp->d_unit = device_get_unit(dev);
673         dp->d_drv1 = sc;
674         dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO |
675             DISKFLAG_DIRECT_COMPLETION;
676         dp->d_hba_vendor = virtio_get_vendor(dev);
677         dp->d_hba_device = virtio_get_device(dev);
678         dp->d_hba_subvendor = virtio_get_subvendor(dev);
679         dp->d_hba_subdevice = virtio_get_subdevice(dev);
680
681         if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
682                 dp->d_dump = vtblk_dump;
683
684         /* Capacity is always in 512-byte units. */
685         dp->d_mediasize = blkcfg->capacity * 512;
686
687         if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
688                 dp->d_sectorsize = blkcfg->blk_size;
689         else
690                 dp->d_sectorsize = 512;
691
692         /*
693          * The VirtIO maximum I/O size is given in terms of segments.
694          * However, FreeBSD limits I/O size by logical buffer size, not
695          * by physically contiguous pages. Therefore, we have to assume
696          * no pages are contiguous. This may impose an artificially low
697          * maximum I/O size. But in practice, since QEMU advertises 128
698          * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
699          * which is typically greater than MAXPHYS. Eventually we should
700          * just advertise MAXPHYS and split buffers that are too big.
701          *
702          * Note we must subtract one additional segment in case of non
703          * page aligned buffers.
704          */
705         dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
706             PAGE_SIZE;
707         if (dp->d_maxsize < PAGE_SIZE)
708                 dp->d_maxsize = PAGE_SIZE; /* XXX */
709
710         if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
711                 dp->d_fwsectors = blkcfg->geometry.sectors;
712                 dp->d_fwheads = blkcfg->geometry.heads;
713         }
714
715         if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY)) {
716                 dp->d_stripesize = dp->d_sectorsize *
717                     (1 << blkcfg->topology.physical_block_exp);
718                 dp->d_stripeoffset = (dp->d_stripesize -
719                     blkcfg->topology.alignment_offset * dp->d_sectorsize) %
720                     dp->d_stripesize;
721         }
722
723         if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
724                 sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
725         else
726                 sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
727 }
728
729 static void
730 vtblk_create_disk(struct vtblk_softc *sc)
731 {
732         struct disk *dp;
733
734         dp = sc->vtblk_disk;
735
736         vtblk_ident(sc);
737
738         device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
739             (uintmax_t) dp->d_mediasize >> 20,
740             (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
741             dp->d_sectorsize);
742
743         disk_create(dp, DISK_VERSION);
744 }
745
746 static int
747 vtblk_request_prealloc(struct vtblk_softc *sc)
748 {
749         struct vtblk_request *req;
750         int i, nreqs;
751
752         nreqs = virtqueue_size(sc->vtblk_vq);
753
754         /*
755          * Preallocate sufficient requests to keep the virtqueue full. Each
756          * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
757          * the number allocated when indirect descriptors are not available.
758          */
759         if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
760                 nreqs /= VTBLK_MIN_SEGMENTS;
761
762         for (i = 0; i < nreqs; i++) {
763                 req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
764                 if (req == NULL)
765                         return (ENOMEM);
766
767                 MPASS(sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr)) == 1);
768                 MPASS(sglist_count(&req->vbr_ack, sizeof(req->vbr_ack)) == 1);
769
770                 sc->vtblk_request_count++;
771                 vtblk_request_enqueue(sc, req);
772         }
773
774         return (0);
775 }
776
777 static void
778 vtblk_request_free(struct vtblk_softc *sc)
779 {
780         struct vtblk_request *req;
781
782         MPASS(TAILQ_EMPTY(&sc->vtblk_req_ready));
783
784         while ((req = vtblk_request_dequeue(sc)) != NULL) {
785                 sc->vtblk_request_count--;
786                 free(req, M_DEVBUF);
787         }
788
789         KASSERT(sc->vtblk_request_count == 0,
790             ("%s: leaked %d requests", __func__, sc->vtblk_request_count));
791 }
792
793 static struct vtblk_request *
794 vtblk_request_dequeue(struct vtblk_softc *sc)
795 {
796         struct vtblk_request *req;
797
798         req = TAILQ_FIRST(&sc->vtblk_req_free);
799         if (req != NULL) {
800                 TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
801                 bzero(req, sizeof(struct vtblk_request));
802         }
803
804         return (req);
805 }
806
807 static void
808 vtblk_request_enqueue(struct vtblk_softc *sc, struct vtblk_request *req)
809 {
810
811         TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
812 }
813
814 static struct vtblk_request *
815 vtblk_request_next_ready(struct vtblk_softc *sc)
816 {
817         struct vtblk_request *req;
818
819         req = TAILQ_FIRST(&sc->vtblk_req_ready);
820         if (req != NULL)
821                 TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
822
823         return (req);
824 }
825
826 static void
827 vtblk_request_requeue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
828 {
829
830         /* NOTE: Currently, there will be at most one request in the queue. */
831         TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
832 }
833
834 static struct vtblk_request *
835 vtblk_request_next(struct vtblk_softc *sc)
836 {
837         struct vtblk_request *req;
838
839         req = vtblk_request_next_ready(sc);
840         if (req != NULL)
841                 return (req);
842
843         return (vtblk_request_bio(sc));
844 }
845
846 static struct vtblk_request *
847 vtblk_request_bio(struct vtblk_softc *sc)
848 {
849         struct bio_queue_head *bioq;
850         struct vtblk_request *req;
851         struct bio *bp;
852
853         bioq = &sc->vtblk_bioq;
854
855         if (bioq_first(bioq) == NULL)
856                 return (NULL);
857
858         req = vtblk_request_dequeue(sc);
859         if (req == NULL)
860                 return (NULL);
861
862         bp = bioq_takefirst(bioq);
863         req->vbr_bp = bp;
864         req->vbr_ack = -1;
865         req->vbr_hdr.ioprio = 1;
866
867         switch (bp->bio_cmd) {
868         case BIO_FLUSH:
869                 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
870                 break;
871         case BIO_READ:
872                 req->vbr_hdr.type = VIRTIO_BLK_T_IN;
873                 req->vbr_hdr.sector = bp->bio_offset / 512;
874                 break;
875         case BIO_WRITE:
876                 req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
877                 req->vbr_hdr.sector = bp->bio_offset / 512;
878                 break;
879         default:
880                 panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
881         }
882
883         if (bp->bio_flags & BIO_ORDERED)
884                 req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
885
886         return (req);
887 }
888
889 static int
890 vtblk_request_execute(struct vtblk_softc *sc, struct vtblk_request *req)
891 {
892         struct virtqueue *vq;
893         struct sglist *sg;
894         struct bio *bp;
895         int ordered, readable, writable, error;
896
897         vq = sc->vtblk_vq;
898         sg = sc->vtblk_sglist;
899         bp = req->vbr_bp;
900         ordered = 0;
901         writable = 0;
902
903         /*
904          * Some hosts (such as bhyve) do not implement the barrier feature,
905          * so we emulate it in the driver by allowing the barrier request
906          * to be the only one in flight.
907          */
908         if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
909                 if (sc->vtblk_req_ordered != NULL)
910                         return (EBUSY);
911                 if (bp->bio_flags & BIO_ORDERED) {
912                         if (!virtqueue_empty(vq))
913                                 return (EBUSY);
914                         ordered = 1;
915                         req->vbr_hdr.type &= ~VIRTIO_BLK_T_BARRIER;
916                 }
917         }
918
919         sglist_reset(sg);
920         sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
921
922         if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
923                 error = sglist_append_bio(sg, bp);
924                 if (error || sg->sg_nseg == sg->sg_maxseg) {
925                         panic("%s: bio %p data buffer too big %d",
926                             __func__, bp, error);
927                 }
928
929                 /* BIO_READ means the host writes into our buffer. */
930                 if (bp->bio_cmd == BIO_READ)
931                         writable = sg->sg_nseg - 1;
932         }
933
934         writable++;
935         sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
936         readable = sg->sg_nseg - writable;
937
938         error = virtqueue_enqueue(vq, req, sg, readable, writable);
939         if (error == 0 && ordered)
940                 sc->vtblk_req_ordered = req;
941
942         return (error);
943 }
944
945 static int
946 vtblk_request_error(struct vtblk_request *req)
947 {
948         int error;
949
950         switch (req->vbr_ack) {
951         case VIRTIO_BLK_S_OK:
952                 error = 0;
953                 break;
954         case VIRTIO_BLK_S_UNSUPP:
955                 error = ENOTSUP;
956                 break;
957         default:
958                 error = EIO;
959                 break;
960         }
961
962         return (error);
963 }
964
965 static void
966 vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
967 {
968         struct vtblk_request *req;
969         struct bio *bp;
970
971         while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
972                 if (sc->vtblk_req_ordered != NULL) {
973                         MPASS(sc->vtblk_req_ordered == req);
974                         sc->vtblk_req_ordered = NULL;
975                 }
976
977                 bp = req->vbr_bp;
978                 bp->bio_error = vtblk_request_error(req);
979                 TAILQ_INSERT_TAIL(queue, bp, bio_queue);
980
981                 vtblk_request_enqueue(sc, req);
982         }
983 }
984
985 static void
986 vtblk_done_completed(struct vtblk_softc *sc, struct bio_queue *queue)
987 {
988         struct bio *bp, *tmp;
989
990         TAILQ_FOREACH_SAFE(bp, queue, bio_queue, tmp) {
991                 if (bp->bio_error != 0)
992                         disk_err(bp, "hard error", -1, 1);
993                 vtblk_bio_done(sc, bp, bp->bio_error);
994         }
995 }
996
997 static void
998 vtblk_drain_vq(struct vtblk_softc *sc, int skip_done)
999 {
1000         struct virtqueue *vq;
1001         struct vtblk_request *req;
1002         int last;
1003
1004         vq = sc->vtblk_vq;
1005         last = 0;
1006
1007         while ((req = virtqueue_drain(vq, &last)) != NULL) {
1008                 if (!skip_done)
1009                         vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1010
1011                 vtblk_request_enqueue(sc, req);
1012         }
1013
1014         sc->vtblk_req_ordered = NULL;
1015         KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1016 }
1017
1018 static void
1019 vtblk_drain(struct vtblk_softc *sc)
1020 {
1021         struct bio_queue queue;
1022         struct bio_queue_head *bioq;
1023         struct vtblk_request *req;
1024         struct bio *bp;
1025
1026         bioq = &sc->vtblk_bioq;
1027         TAILQ_INIT(&queue);
1028
1029         if (sc->vtblk_vq != NULL) {
1030                 vtblk_queue_completed(sc, &queue);
1031                 vtblk_done_completed(sc, &queue);
1032
1033                 vtblk_drain_vq(sc, 0);
1034         }
1035
1036         while ((req = vtblk_request_next_ready(sc)) != NULL) {
1037                 vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1038                 vtblk_request_enqueue(sc, req);
1039         }
1040
1041         while (bioq_first(bioq) != NULL) {
1042                 bp = bioq_takefirst(bioq);
1043                 vtblk_bio_done(sc, bp, ENXIO);
1044         }
1045
1046         vtblk_request_free(sc);
1047 }
1048
1049 static void
1050 vtblk_startio(struct vtblk_softc *sc)
1051 {
1052         struct virtqueue *vq;
1053         struct vtblk_request *req;
1054         int enq;
1055
1056         VTBLK_LOCK_ASSERT(sc);
1057         vq = sc->vtblk_vq;
1058         enq = 0;
1059
1060         if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1061                 return;
1062
1063         while (!virtqueue_full(vq)) {
1064                 req = vtblk_request_next(sc);
1065                 if (req == NULL)
1066                         break;
1067
1068                 if (vtblk_request_execute(sc, req) != 0) {
1069                         vtblk_request_requeue_ready(sc, req);
1070                         break;
1071                 }
1072
1073                 enq++;
1074         }
1075
1076         if (enq > 0)
1077                 virtqueue_notify(vq);
1078 }
1079
1080 static void
1081 vtblk_bio_done(struct vtblk_softc *sc, struct bio *bp, int error)
1082 {
1083
1084         /* Because of GEOM direct dispatch, we cannot hold any locks. */
1085         if (sc != NULL)
1086                 VTBLK_LOCK_ASSERT_NOTOWNED(sc);
1087
1088         if (error) {
1089                 bp->bio_resid = bp->bio_bcount;
1090                 bp->bio_error = error;
1091                 bp->bio_flags |= BIO_ERROR;
1092         }
1093
1094         biodone(bp);
1095 }
1096
1097 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg)                  \
1098         if (virtio_with_feature(_dev, _feature)) {                      \
1099                 virtio_read_device_config(_dev,                         \
1100                     offsetof(struct virtio_blk_config, _field),         \
1101                     &(_cfg)->_field, sizeof((_cfg)->_field));           \
1102         }
1103
1104 static void
1105 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
1106 {
1107         device_t dev;
1108
1109         dev = sc->vtblk_dev;
1110
1111         bzero(blkcfg, sizeof(struct virtio_blk_config));
1112
1113         /* The capacity is always available. */
1114         virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
1115             capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
1116
1117         /* Read the configuration if the feature was negotiated. */
1118         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1119         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1120         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1121         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1122         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1123         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
1124 }
1125
1126 #undef VTBLK_GET_CONFIG
1127
1128 static void
1129 vtblk_ident(struct vtblk_softc *sc)
1130 {
1131         struct bio buf;
1132         struct disk *dp;
1133         struct vtblk_request *req;
1134         int len, error;
1135
1136         dp = sc->vtblk_disk;
1137         len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
1138
1139         if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
1140                 return;
1141
1142         req = vtblk_request_dequeue(sc);
1143         if (req == NULL)
1144                 return;
1145
1146         req->vbr_ack = -1;
1147         req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1148         req->vbr_hdr.ioprio = 1;
1149         req->vbr_hdr.sector = 0;
1150
1151         req->vbr_bp = &buf;
1152         bzero(&buf, sizeof(struct bio));
1153
1154         buf.bio_cmd = BIO_READ;
1155         buf.bio_data = dp->d_ident;
1156         buf.bio_bcount = len;
1157
1158         VTBLK_LOCK(sc);
1159         error = vtblk_poll_request(sc, req);
1160         VTBLK_UNLOCK(sc);
1161
1162         vtblk_request_enqueue(sc, req);
1163
1164         if (error) {
1165                 device_printf(sc->vtblk_dev,
1166                     "error getting device identifier: %d\n", error);
1167         }
1168 }
1169
1170 static int
1171 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
1172 {
1173         struct virtqueue *vq;
1174         int error;
1175
1176         vq = sc->vtblk_vq;
1177
1178         if (!virtqueue_empty(vq))
1179                 return (EBUSY);
1180
1181         error = vtblk_request_execute(sc, req);
1182         if (error)
1183                 return (error);
1184
1185         virtqueue_notify(vq);
1186         virtqueue_poll(vq, NULL);
1187
1188         error = vtblk_request_error(req);
1189         if (error && bootverbose) {
1190                 device_printf(sc->vtblk_dev,
1191                     "%s: IO error: %d\n", __func__, error);
1192         }
1193
1194         return (error);
1195 }
1196
1197 static int
1198 vtblk_quiesce(struct vtblk_softc *sc)
1199 {
1200         int error;
1201
1202         VTBLK_LOCK_ASSERT(sc);
1203         error = 0;
1204
1205         while (!virtqueue_empty(sc->vtblk_vq)) {
1206                 if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
1207                     VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
1208                         error = EBUSY;
1209                         break;
1210                 }
1211         }
1212
1213         return (error);
1214 }
1215
1216 static void
1217 vtblk_vq_intr(void *xsc)
1218 {
1219         struct vtblk_softc *sc;
1220         struct virtqueue *vq;
1221         struct bio_queue queue;
1222
1223         sc = xsc;
1224         vq = sc->vtblk_vq;
1225         TAILQ_INIT(&queue);
1226
1227         VTBLK_LOCK(sc);
1228
1229 again:
1230         if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
1231                 goto out;
1232
1233         vtblk_queue_completed(sc, &queue);
1234         vtblk_startio(sc);
1235
1236         if (virtqueue_enable_intr(vq) != 0) {
1237                 virtqueue_disable_intr(vq);
1238                 goto again;
1239         }
1240
1241         if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1242                 wakeup(&sc->vtblk_vq);
1243
1244 out:
1245         VTBLK_UNLOCK(sc);
1246         vtblk_done_completed(sc, &queue);
1247 }
1248
1249 static void
1250 vtblk_stop(struct vtblk_softc *sc)
1251 {
1252
1253         virtqueue_disable_intr(sc->vtblk_vq);
1254         virtio_stop(sc->vtblk_dev);
1255 }
1256
1257 static void
1258 vtblk_dump_prepare(struct vtblk_softc *sc)
1259 {
1260         device_t dev;
1261         struct virtqueue *vq;
1262
1263         dev = sc->vtblk_dev;
1264         vq = sc->vtblk_vq;
1265
1266         vtblk_stop(sc);
1267
1268         /*
1269          * Drain all requests caught in-flight in the virtqueue,
1270          * skipping biodone(). When dumping, only one request is
1271          * outstanding at a time, and we just poll the virtqueue
1272          * for the response.
1273          */
1274         vtblk_drain_vq(sc, 1);
1275
1276         if (virtio_reinit(dev, sc->vtblk_features) != 0) {
1277                 panic("%s: cannot reinit VirtIO block device during dump",
1278                     device_get_nameunit(dev));
1279         }
1280
1281         virtqueue_disable_intr(vq);
1282         virtio_reinit_complete(dev);
1283 }
1284
1285 static int
1286 vtblk_dump_write(struct vtblk_softc *sc, void *virtual, off_t offset,
1287     size_t length)
1288 {
1289         struct bio buf;
1290         struct vtblk_request *req;
1291
1292         req = &sc->vtblk_dump_request;
1293         req->vbr_ack = -1;
1294         req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1295         req->vbr_hdr.ioprio = 1;
1296         req->vbr_hdr.sector = offset / 512;
1297
1298         req->vbr_bp = &buf;
1299         bzero(&buf, sizeof(struct bio));
1300
1301         buf.bio_cmd = BIO_WRITE;
1302         buf.bio_data = virtual;
1303         buf.bio_bcount = length;
1304
1305         return (vtblk_poll_request(sc, req));
1306 }
1307
1308 static int
1309 vtblk_dump_flush(struct vtblk_softc *sc)
1310 {
1311         struct bio buf;
1312         struct vtblk_request *req;
1313
1314         req = &sc->vtblk_dump_request;
1315         req->vbr_ack = -1;
1316         req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1317         req->vbr_hdr.ioprio = 1;
1318         req->vbr_hdr.sector = 0;
1319
1320         req->vbr_bp = &buf;
1321         bzero(&buf, sizeof(struct bio));
1322
1323         buf.bio_cmd = BIO_FLUSH;
1324
1325         return (vtblk_poll_request(sc, req));
1326 }
1327
1328 static void
1329 vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
1330 {
1331
1332         /* Set either writeback (1) or writethrough (0) mode. */
1333         virtio_write_dev_config_1(sc->vtblk_dev,
1334             offsetof(struct virtio_blk_config, writeback), wc);
1335 }
1336
1337 static int
1338 vtblk_write_cache_enabled(struct vtblk_softc *sc,
1339     struct virtio_blk_config *blkcfg)
1340 {
1341         int wc;
1342
1343         if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
1344                 wc = vtblk_tunable_int(sc, "writecache_mode",
1345                     vtblk_writecache_mode);
1346                 if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1347                         vtblk_set_write_cache(sc, wc);
1348                 else
1349                         wc = blkcfg->writeback;
1350         } else
1351                 wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
1352
1353         return (wc);
1354 }
1355
1356 static int
1357 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
1358 {
1359         struct vtblk_softc *sc;
1360         int wc, error;
1361
1362         sc = oidp->oid_arg1;
1363         wc = sc->vtblk_write_cache;
1364
1365         error = sysctl_handle_int(oidp, &wc, 0, req);
1366         if (error || req->newptr == NULL)
1367                 return (error);
1368         if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
1369                 return (EPERM);
1370         if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1371                 return (EINVAL);
1372
1373         VTBLK_LOCK(sc);
1374         sc->vtblk_write_cache = wc;
1375         vtblk_set_write_cache(sc, sc->vtblk_write_cache);
1376         VTBLK_UNLOCK(sc);
1377
1378         return (0);
1379 }
1380
1381 static void
1382 vtblk_setup_sysctl(struct vtblk_softc *sc)
1383 {
1384         device_t dev;
1385         struct sysctl_ctx_list *ctx;
1386         struct sysctl_oid *tree;
1387         struct sysctl_oid_list *child;
1388
1389         dev = sc->vtblk_dev;
1390         ctx = device_get_sysctl_ctx(dev);
1391         tree = device_get_sysctl_tree(dev);
1392         child = SYSCTL_CHILDREN(tree);
1393
1394         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
1395             CTLTYPE_INT | CTLFLAG_RW, sc, 0, vtblk_write_cache_sysctl,
1396             "I", "Write cache mode (writethrough (0) or writeback (1))");
1397 }
1398
1399 static int
1400 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
1401 {
1402         char path[64];
1403
1404         snprintf(path, sizeof(path),
1405             "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
1406         TUNABLE_INT_FETCH(path, &def);
1407
1408         return (def);
1409 }