]> CyberLeo.Net >> Repos - FreeBSD/releng/9.3.git/blob - sys/dev/virtio/block/virtio_blk.c
Copy stable/9 to releng/9.3 as part of the 9.3-RELEASE cycle.
[FreeBSD/releng/9.3.git] / sys / dev / virtio / block / virtio_blk.c
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 /* Driver for VirtIO block devices. */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/bio.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/sglist.h>
39 #include <sys/sysctl.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/queue.h>
43
44 #include <geom/geom_disk.h>
45
46 #include <machine/bus.h>
47 #include <machine/resource.h>
48 #include <sys/bus.h>
49 #include <sys/rman.h>
50
51 #include <dev/virtio/virtio.h>
52 #include <dev/virtio/virtqueue.h>
53 #include <dev/virtio/block/virtio_blk.h>
54
55 #include "virtio_if.h"
56
57 struct vtblk_request {
58         struct virtio_blk_outhdr         vbr_hdr;
59         struct bio                      *vbr_bp;
60         uint8_t                          vbr_ack;
61
62         TAILQ_ENTRY(vtblk_request)       vbr_link;
63 };
64
65 enum vtblk_cache_mode {
66         VTBLK_CACHE_WRITETHROUGH,
67         VTBLK_CACHE_WRITEBACK,
68         VTBLK_CACHE_MAX
69 };
70
71 struct vtblk_softc {
72         device_t                 vtblk_dev;
73         struct mtx               vtblk_mtx;
74         uint64_t                 vtblk_features;
75         uint32_t                 vtblk_flags;
76 #define VTBLK_FLAG_INDIRECT     0x0001
77 #define VTBLK_FLAG_READONLY     0x0002
78 #define VTBLK_FLAG_DETACH       0x0004
79 #define VTBLK_FLAG_SUSPEND      0x0008
80 #define VTBLK_FLAG_DUMPING      0x0010
81 #define VTBLK_FLAG_BARRIER      0x0020
82 #define VTBLK_FLAG_WC_CONFIG    0x0040
83
84         struct virtqueue        *vtblk_vq;
85         struct sglist           *vtblk_sglist;
86         struct disk             *vtblk_disk;
87
88         struct bio_queue_head    vtblk_bioq;
89         TAILQ_HEAD(, vtblk_request)
90                                  vtblk_req_free;
91         TAILQ_HEAD(, vtblk_request)
92                                  vtblk_req_ready;
93         struct vtblk_request    *vtblk_req_ordered;
94
95         int                      vtblk_max_nsegs;
96         int                      vtblk_request_count;
97         enum vtblk_cache_mode    vtblk_write_cache;
98
99         struct vtblk_request     vtblk_dump_request;
100 };
101
102 static struct virtio_feature_desc vtblk_feature_desc[] = {
103         { VIRTIO_BLK_F_BARRIER,         "HostBarrier"   },
104         { VIRTIO_BLK_F_SIZE_MAX,        "MaxSegSize"    },
105         { VIRTIO_BLK_F_SEG_MAX,         "MaxNumSegs"    },
106         { VIRTIO_BLK_F_GEOMETRY,        "DiskGeometry"  },
107         { VIRTIO_BLK_F_RO,              "ReadOnly"      },
108         { VIRTIO_BLK_F_BLK_SIZE,        "BlockSize"     },
109         { VIRTIO_BLK_F_SCSI,            "SCSICmds"      },
110         { VIRTIO_BLK_F_WCE,             "WriteCache"    },
111         { VIRTIO_BLK_F_TOPOLOGY,        "Topology"      },
112         { VIRTIO_BLK_F_CONFIG_WCE,      "ConfigWCE"     },
113
114         { 0, NULL }
115 };
116
117 static int      vtblk_modevent(module_t, int, void *);
118
119 static int      vtblk_probe(device_t);
120 static int      vtblk_attach(device_t);
121 static int      vtblk_detach(device_t);
122 static int      vtblk_suspend(device_t);
123 static int      vtblk_resume(device_t);
124 static int      vtblk_shutdown(device_t);
125 static int      vtblk_config_change(device_t);
126
127 static int      vtblk_open(struct disk *);
128 static int      vtblk_close(struct disk *);
129 static int      vtblk_ioctl(struct disk *, u_long, void *, int,
130                     struct thread *);
131 static int      vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
132 static void     vtblk_strategy(struct bio *);
133
134 static void     vtblk_negotiate_features(struct vtblk_softc *);
135 static int      vtblk_maximum_segments(struct vtblk_softc *,
136                     struct virtio_blk_config *);
137 static int      vtblk_alloc_virtqueue(struct vtblk_softc *);
138 static void     vtblk_set_write_cache(struct vtblk_softc *, int);
139 static int      vtblk_write_cache_enabled(struct vtblk_softc *sc,
140                     struct virtio_blk_config *);
141 static int      vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
142 static void     vtblk_alloc_disk(struct vtblk_softc *,
143                     struct virtio_blk_config *);
144 static void     vtblk_create_disk(struct vtblk_softc *);
145
146 static int      vtblk_quiesce(struct vtblk_softc *);
147 static void     vtblk_startio(struct vtblk_softc *);
148 static struct vtblk_request * vtblk_bio_request(struct vtblk_softc *);
149 static int      vtblk_execute_request(struct vtblk_softc *,
150                     struct vtblk_request *);
151
152 static void     vtblk_vq_intr(void *);
153
154 static void     vtblk_stop(struct vtblk_softc *);
155
156 static void     vtblk_read_config(struct vtblk_softc *,
157                     struct virtio_blk_config *);
158 static void     vtblk_get_ident(struct vtblk_softc *);
159 static void     vtblk_prepare_dump(struct vtblk_softc *);
160 static int      vtblk_write_dump(struct vtblk_softc *, void *, off_t, size_t);
161 static int      vtblk_flush_dump(struct vtblk_softc *);
162 static int      vtblk_poll_request(struct vtblk_softc *,
163                     struct vtblk_request *);
164
165 static void     vtblk_finish_completed(struct vtblk_softc *);
166 static void     vtblk_drain_vq(struct vtblk_softc *, int);
167 static void     vtblk_drain(struct vtblk_softc *);
168
169 static int      vtblk_alloc_requests(struct vtblk_softc *);
170 static void     vtblk_free_requests(struct vtblk_softc *);
171 static struct vtblk_request * vtblk_dequeue_request(struct vtblk_softc *);
172 static void     vtblk_enqueue_request(struct vtblk_softc *,
173                     struct vtblk_request *);
174
175 static struct vtblk_request * vtblk_dequeue_ready(struct vtblk_softc *);
176 static void     vtblk_enqueue_ready(struct vtblk_softc *,
177                     struct vtblk_request *);
178
179 static int      vtblk_request_error(struct vtblk_request *);
180 static void     vtblk_finish_bio(struct bio *, int);
181
182 static void     vtblk_setup_sysctl(struct vtblk_softc *);
183 static int      vtblk_tunable_int(struct vtblk_softc *, const char *, int);
184
185 /* Tunables. */
186 static int vtblk_no_ident = 0;
187 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
188 static int vtblk_writecache_mode = -1;
189 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
190
191 /* Features desired/implemented by this driver. */
192 #define VTBLK_FEATURES \
193     (VIRTIO_BLK_F_BARRIER               | \
194      VIRTIO_BLK_F_SIZE_MAX              | \
195      VIRTIO_BLK_F_SEG_MAX               | \
196      VIRTIO_BLK_F_GEOMETRY              | \
197      VIRTIO_BLK_F_RO                    | \
198      VIRTIO_BLK_F_BLK_SIZE              | \
199      VIRTIO_BLK_F_WCE                   | \
200      VIRTIO_BLK_F_CONFIG_WCE            | \
201      VIRTIO_RING_F_INDIRECT_DESC)
202
203 #define VTBLK_MTX(_sc)          &(_sc)->vtblk_mtx
204 #define VTBLK_LOCK_INIT(_sc, _name) \
205                                 mtx_init(VTBLK_MTX((_sc)), (_name), \
206                                     "VirtIO Block Lock", MTX_DEF)
207 #define VTBLK_LOCK(_sc)         mtx_lock(VTBLK_MTX((_sc)))
208 #define VTBLK_UNLOCK(_sc)       mtx_unlock(VTBLK_MTX((_sc)))
209 #define VTBLK_LOCK_DESTROY(_sc) mtx_destroy(VTBLK_MTX((_sc)))
210 #define VTBLK_LOCK_ASSERT(_sc)  mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
211 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
212                                 mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
213
214 #define VTBLK_DISK_NAME         "vtbd"
215 #define VTBLK_QUIESCE_TIMEOUT   (30 * hz)
216
217 /*
218  * Each block request uses at least two segments - one for the header
219  * and one for the status.
220  */
221 #define VTBLK_MIN_SEGMENTS      2
222
223 static device_method_t vtblk_methods[] = {
224         /* Device methods. */
225         DEVMETHOD(device_probe,         vtblk_probe),
226         DEVMETHOD(device_attach,        vtblk_attach),
227         DEVMETHOD(device_detach,        vtblk_detach),
228         DEVMETHOD(device_suspend,       vtblk_suspend),
229         DEVMETHOD(device_resume,        vtblk_resume),
230         DEVMETHOD(device_shutdown,      vtblk_shutdown),
231
232         /* VirtIO methods. */
233         DEVMETHOD(virtio_config_change, vtblk_config_change),
234
235         DEVMETHOD_END
236 };
237
238 static driver_t vtblk_driver = {
239         "vtblk",
240         vtblk_methods,
241         sizeof(struct vtblk_softc)
242 };
243 static devclass_t vtblk_devclass;
244
245 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
246     vtblk_modevent, 0);
247 MODULE_VERSION(virtio_blk, 1);
248 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
249
250 static int
251 vtblk_modevent(module_t mod, int type, void *unused)
252 {
253         int error;
254
255         error = 0;
256
257         switch (type) {
258         case MOD_LOAD:
259         case MOD_QUIESCE:
260         case MOD_UNLOAD:
261         case MOD_SHUTDOWN:
262                 break;
263         default:
264                 error = EOPNOTSUPP;
265                 break;
266         }
267
268         return (error);
269 }
270
271 static int
272 vtblk_probe(device_t dev)
273 {
274
275         if (virtio_get_device_type(dev) != VIRTIO_ID_BLOCK)
276                 return (ENXIO);
277
278         device_set_desc(dev, "VirtIO Block Adapter");
279
280         return (BUS_PROBE_DEFAULT);
281 }
282
283 static int
284 vtblk_attach(device_t dev)
285 {
286         struct vtblk_softc *sc;
287         struct virtio_blk_config blkcfg;
288         int error;
289
290         sc = device_get_softc(dev);
291         sc->vtblk_dev = dev;
292
293         VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
294
295         bioq_init(&sc->vtblk_bioq);
296         TAILQ_INIT(&sc->vtblk_req_free);
297         TAILQ_INIT(&sc->vtblk_req_ready);
298
299         virtio_set_feature_desc(dev, vtblk_feature_desc);
300         vtblk_negotiate_features(sc);
301
302         if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
303                 sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
304         if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
305                 sc->vtblk_flags |= VTBLK_FLAG_READONLY;
306         if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
307                 sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
308         if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
309                 sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
310
311         vtblk_setup_sysctl(sc);
312
313         /* Get local copy of config. */
314         vtblk_read_config(sc, &blkcfg);
315
316         /*
317          * With the current sglist(9) implementation, it is not easy
318          * for us to support a maximum segment size as adjacent
319          * segments are coalesced. For now, just make sure it's larger
320          * than the maximum supported transfer size.
321          */
322         if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
323                 if (blkcfg.size_max < MAXPHYS) {
324                         error = ENOTSUP;
325                         device_printf(dev, "host requires unsupported "
326                             "maximum segment size feature\n");
327                         goto fail;
328                 }
329         }
330
331         sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
332         if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
333                 error = EINVAL;
334                 device_printf(dev, "fewer than minimum number of segments "
335                     "allowed: %d\n", sc->vtblk_max_nsegs);
336                 goto fail;
337         }
338
339         sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
340         if (sc->vtblk_sglist == NULL) {
341                 error = ENOMEM;
342                 device_printf(dev, "cannot allocate sglist\n");
343                 goto fail;
344         }
345
346         error = vtblk_alloc_virtqueue(sc);
347         if (error) {
348                 device_printf(dev, "cannot allocate virtqueue\n");
349                 goto fail;
350         }
351
352         error = vtblk_alloc_requests(sc);
353         if (error) {
354                 device_printf(dev, "cannot preallocate requests\n");
355                 goto fail;
356         }
357
358         vtblk_alloc_disk(sc, &blkcfg);
359
360         error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
361         if (error) {
362                 device_printf(dev, "cannot setup virtqueue interrupt\n");
363                 goto fail;
364         }
365
366         vtblk_create_disk(sc);
367
368         virtqueue_enable_intr(sc->vtblk_vq);
369
370 fail:
371         if (error)
372                 vtblk_detach(dev);
373
374         return (error);
375 }
376
377 static int
378 vtblk_detach(device_t dev)
379 {
380         struct vtblk_softc *sc;
381
382         sc = device_get_softc(dev);
383
384         VTBLK_LOCK(sc);
385         sc->vtblk_flags |= VTBLK_FLAG_DETACH;
386         if (device_is_attached(dev))
387                 vtblk_stop(sc);
388         VTBLK_UNLOCK(sc);
389
390         vtblk_drain(sc);
391
392         if (sc->vtblk_disk != NULL) {
393                 disk_destroy(sc->vtblk_disk);
394                 sc->vtblk_disk = NULL;
395         }
396
397         if (sc->vtblk_sglist != NULL) {
398                 sglist_free(sc->vtblk_sglist);
399                 sc->vtblk_sglist = NULL;
400         }
401
402         VTBLK_LOCK_DESTROY(sc);
403
404         return (0);
405 }
406
407 static int
408 vtblk_suspend(device_t dev)
409 {
410         struct vtblk_softc *sc;
411         int error;
412
413         sc = device_get_softc(dev);
414
415         VTBLK_LOCK(sc);
416         sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
417         /* XXX BMV: virtio_stop(), etc needed here? */
418         error = vtblk_quiesce(sc);
419         if (error)
420                 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
421         VTBLK_UNLOCK(sc);
422
423         return (error);
424 }
425
426 static int
427 vtblk_resume(device_t dev)
428 {
429         struct vtblk_softc *sc;
430
431         sc = device_get_softc(dev);
432
433         VTBLK_LOCK(sc);
434         /* XXX BMV: virtio_reinit(), etc needed here? */
435         sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
436         vtblk_startio(sc);
437         VTBLK_UNLOCK(sc);
438
439         return (0);
440 }
441
442 static int
443 vtblk_shutdown(device_t dev)
444 {
445
446         return (0);
447 }
448
449 static int
450 vtblk_config_change(device_t dev)
451 {
452
453         return (0);
454 }
455
456 static int
457 vtblk_open(struct disk *dp)
458 {
459         struct vtblk_softc *sc;
460
461         if ((sc = dp->d_drv1) == NULL)
462                 return (ENXIO);
463
464         return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
465 }
466
467 static int
468 vtblk_close(struct disk *dp)
469 {
470         struct vtblk_softc *sc;
471
472         if ((sc = dp->d_drv1) == NULL)
473                 return (ENXIO);
474
475         return (0);
476 }
477
478 static int
479 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
480     struct thread *td)
481 {
482         struct vtblk_softc *sc;
483
484         if ((sc = dp->d_drv1) == NULL)
485                 return (ENXIO);
486
487         return (ENOTTY);
488 }
489
490 static int
491 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
492     size_t length)
493 {
494         struct disk *dp;
495         struct vtblk_softc *sc;
496         int error;
497
498         dp = arg;
499
500         if ((sc = dp->d_drv1) == NULL)
501                 return (ENXIO);
502
503         VTBLK_LOCK(sc);
504
505         if ((sc->vtblk_flags & VTBLK_FLAG_DUMPING) == 0) {
506                 vtblk_prepare_dump(sc);
507                 sc->vtblk_flags |= VTBLK_FLAG_DUMPING;
508         }
509
510         if (length > 0)
511                 error = vtblk_write_dump(sc, virtual, offset, length);
512         else if (virtual == NULL && offset == 0)
513                 error = vtblk_flush_dump(sc);
514         else {
515                 error = EINVAL;
516                 sc->vtblk_flags &= ~VTBLK_FLAG_DUMPING;
517         }
518
519         VTBLK_UNLOCK(sc);
520
521         return (error);
522 }
523
524 static void
525 vtblk_strategy(struct bio *bp)
526 {
527         struct vtblk_softc *sc;
528
529         if ((sc = bp->bio_disk->d_drv1) == NULL) {
530                 vtblk_finish_bio(bp, EINVAL);
531                 return;
532         }
533
534         /*
535          * Fail any write if RO. Unfortunately, there does not seem to
536          * be a better way to report our readonly'ness to GEOM above.
537          */
538         if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
539             (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
540                 vtblk_finish_bio(bp, EROFS);
541                 return;
542         }
543
544 #ifdef INVARIANTS
545         /*
546          * Prevent read/write buffers spanning too many segments from
547          * getting into the queue. This should only trip if d_maxsize
548          * was incorrectly set.
549          */
550         if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
551                 int nsegs, max_nsegs;
552
553                 nsegs = sglist_count(bp->bio_data, bp->bio_bcount);
554                 max_nsegs = sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS;
555
556                 KASSERT(nsegs <= max_nsegs,
557                     ("%s: bio %p spanned too many segments: %d, max: %d",
558                     __func__, bp, nsegs, max_nsegs));
559         }
560 #endif
561
562         VTBLK_LOCK(sc);
563         if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
564                 vtblk_finish_bio(bp, ENXIO);
565         else {
566                 bioq_disksort(&sc->vtblk_bioq, bp);
567
568                 if ((sc->vtblk_flags & VTBLK_FLAG_SUSPEND) == 0)
569                         vtblk_startio(sc);
570         }
571         VTBLK_UNLOCK(sc);
572 }
573
574 static void
575 vtblk_negotiate_features(struct vtblk_softc *sc)
576 {
577         device_t dev;
578         uint64_t features;
579
580         dev = sc->vtblk_dev;
581         features = VTBLK_FEATURES;
582
583         sc->vtblk_features = virtio_negotiate_features(dev, features);
584 }
585
586 static int
587 vtblk_maximum_segments(struct vtblk_softc *sc,
588     struct virtio_blk_config *blkcfg)
589 {
590         device_t dev;
591         int nsegs;
592
593         dev = sc->vtblk_dev;
594         nsegs = VTBLK_MIN_SEGMENTS;
595
596         if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
597                 nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
598                 if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
599                         nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
600         } else
601                 nsegs += 1;
602
603         return (nsegs);
604 }
605
606 static int
607 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
608 {
609         device_t dev;
610         struct vq_alloc_info vq_info;
611
612         dev = sc->vtblk_dev;
613
614         VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
615             vtblk_vq_intr, sc, &sc->vtblk_vq,
616             "%s request", device_get_nameunit(dev));
617
618         return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
619 }
620
621 static void
622 vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
623 {
624
625         /* Set either writeback (1) or writethrough (0) mode. */
626         virtio_write_dev_config_1(sc->vtblk_dev,
627             offsetof(struct virtio_blk_config, writeback), wc);
628 }
629
630 static int
631 vtblk_write_cache_enabled(struct vtblk_softc *sc,
632     struct virtio_blk_config *blkcfg)
633 {
634         int wc;
635
636         if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
637                 wc = vtblk_tunable_int(sc, "writecache_mode",
638                     vtblk_writecache_mode);
639                 if (wc >= 0 && wc < VTBLK_CACHE_MAX)
640                         vtblk_set_write_cache(sc, wc);
641                 else
642                         wc = blkcfg->writeback;
643         } else
644                 wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
645
646         return (wc);
647 }
648
649 static int
650 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
651 {
652         struct vtblk_softc *sc;
653         int wc, error;
654
655         sc = oidp->oid_arg1;
656         wc = sc->vtblk_write_cache;
657
658         error = sysctl_handle_int(oidp, &wc, 0, req);
659         if (error || req->newptr == NULL)
660                 return (error);
661         if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
662                 return (EPERM);
663         if (wc < 0 || wc >= VTBLK_CACHE_MAX)
664                 return (EINVAL);
665
666         VTBLK_LOCK(sc);
667         sc->vtblk_write_cache = wc;
668         vtblk_set_write_cache(sc, sc->vtblk_write_cache);
669         VTBLK_UNLOCK(sc);
670
671         return (0);
672 }
673
674 static void
675 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
676 {
677         device_t dev;
678         struct disk *dp;
679
680         dev = sc->vtblk_dev;
681
682         sc->vtblk_disk = dp = disk_alloc();
683         dp->d_open = vtblk_open;
684         dp->d_close = vtblk_close;
685         dp->d_ioctl = vtblk_ioctl;
686         dp->d_strategy = vtblk_strategy;
687         dp->d_name = VTBLK_DISK_NAME;
688         dp->d_unit = device_get_unit(dev);
689         dp->d_drv1 = sc;
690         dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO;
691         dp->d_hba_vendor = virtio_get_vendor(dev);
692         dp->d_hba_device = virtio_get_device(dev);
693         dp->d_hba_subvendor = virtio_get_subvendor(dev);
694         dp->d_hba_subdevice = virtio_get_subdevice(dev);
695
696         if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
697                 dp->d_dump = vtblk_dump;
698
699         /* Capacity is always in 512-byte units. */
700         dp->d_mediasize = blkcfg->capacity * 512;
701
702         if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
703                 dp->d_sectorsize = blkcfg->blk_size;
704         else
705                 dp->d_sectorsize = 512;
706
707         /*
708          * The VirtIO maximum I/O size is given in terms of segments.
709          * However, FreeBSD limits I/O size by logical buffer size, not
710          * by physically contiguous pages. Therefore, we have to assume
711          * no pages are contiguous. This may impose an artificially low
712          * maximum I/O size. But in practice, since QEMU advertises 128
713          * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
714          * which is typically greater than MAXPHYS. Eventually we should
715          * just advertise MAXPHYS and split buffers that are too big.
716          *
717          * Note we must subtract one additional segment in case of non
718          * page aligned buffers.
719          */
720         dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
721             PAGE_SIZE;
722         if (dp->d_maxsize < PAGE_SIZE)
723                 dp->d_maxsize = PAGE_SIZE; /* XXX */
724
725         if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
726                 dp->d_fwsectors = blkcfg->geometry.sectors;
727                 dp->d_fwheads = blkcfg->geometry.heads;
728         }
729
730         if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY)) {
731                 dp->d_stripesize = dp->d_sectorsize *
732                     (1 << blkcfg->topology.physical_block_exp);
733                 dp->d_stripeoffset = (dp->d_stripesize -
734                     blkcfg->topology.alignment_offset * dp->d_sectorsize) %
735                     dp->d_stripesize;
736         }
737
738         if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
739                 sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
740         else
741                 sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
742 }
743
744 static void
745 vtblk_create_disk(struct vtblk_softc *sc)
746 {
747         struct disk *dp;
748
749         dp = sc->vtblk_disk;
750
751         /*
752          * Retrieving the identification string must be done after
753          * the virtqueue interrupt is setup otherwise it will hang.
754          */
755         vtblk_get_ident(sc);
756
757         device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
758             (uintmax_t) dp->d_mediasize >> 20,
759             (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
760             dp->d_sectorsize);
761
762         disk_create(dp, DISK_VERSION);
763 }
764
765 static int
766 vtblk_quiesce(struct vtblk_softc *sc)
767 {
768         int error;
769
770         error = 0;
771
772         VTBLK_LOCK_ASSERT(sc);
773
774         while (!virtqueue_empty(sc->vtblk_vq)) {
775                 if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
776                     VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
777                         error = EBUSY;
778                         break;
779                 }
780         }
781
782         return (error);
783 }
784
785 static void
786 vtblk_startio(struct vtblk_softc *sc)
787 {
788         struct virtqueue *vq;
789         struct vtblk_request *req;
790         int enq;
791
792         vq = sc->vtblk_vq;
793         enq = 0;
794
795         VTBLK_LOCK_ASSERT(sc);
796
797         while (!virtqueue_full(vq)) {
798                 if ((req = vtblk_dequeue_ready(sc)) == NULL)
799                         req = vtblk_bio_request(sc);
800                 if (req == NULL)
801                         break;
802
803                 if (vtblk_execute_request(sc, req) != 0) {
804                         vtblk_enqueue_ready(sc, req);
805                         break;
806                 }
807
808                 enq++;
809         }
810
811         if (enq > 0)
812                 virtqueue_notify(vq);
813 }
814
815 static struct vtblk_request *
816 vtblk_bio_request(struct vtblk_softc *sc)
817 {
818         struct bio_queue_head *bioq;
819         struct vtblk_request *req;
820         struct bio *bp;
821
822         bioq = &sc->vtblk_bioq;
823
824         if (bioq_first(bioq) == NULL)
825                 return (NULL);
826
827         req = vtblk_dequeue_request(sc);
828         if (req == NULL)
829                 return (NULL);
830
831         bp = bioq_takefirst(bioq);
832         req->vbr_bp = bp;
833         req->vbr_ack = -1;
834         req->vbr_hdr.ioprio = 1;
835
836         switch (bp->bio_cmd) {
837         case BIO_FLUSH:
838                 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
839                 break;
840         case BIO_READ:
841                 req->vbr_hdr.type = VIRTIO_BLK_T_IN;
842                 req->vbr_hdr.sector = bp->bio_offset / 512;
843                 break;
844         case BIO_WRITE:
845                 req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
846                 req->vbr_hdr.sector = bp->bio_offset / 512;
847                 break;
848         default:
849                 panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
850         }
851
852         return (req);
853 }
854
855 static int
856 vtblk_execute_request(struct vtblk_softc *sc, struct vtblk_request *req)
857 {
858         struct virtqueue *vq;
859         struct sglist *sg;
860         struct bio *bp;
861         int ordered, readable, writable, error;
862
863         vq = sc->vtblk_vq;
864         sg = sc->vtblk_sglist;
865         bp = req->vbr_bp;
866         ordered = 0;
867         writable = 0;
868
869         VTBLK_LOCK_ASSERT(sc);
870
871         /*
872          * Wait until the ordered request completes before
873          * executing subsequent requests.
874          */
875         if (sc->vtblk_req_ordered != NULL)
876                 return (EBUSY);
877
878         if (bp->bio_flags & BIO_ORDERED) {
879                 if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
880                         /*
881                          * This request will be executed once all
882                          * the in-flight requests are completed.
883                          */
884                         if (!virtqueue_empty(vq))
885                                 return (EBUSY);
886                         ordered = 1;
887                 } else
888                         req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
889         }
890
891         sglist_reset(sg);
892         sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
893
894         if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
895                 error = sglist_append_bio(sg, bp);
896                 if (error || sg->sg_nseg == sg->sg_maxseg) {
897                         panic("%s: data buffer too big bio:%p error:%d",
898                             __func__, bp, error);
899                 }
900
901                 /* BIO_READ means the host writes into our buffer. */
902                 if (bp->bio_cmd == BIO_READ)
903                         writable = sg->sg_nseg - 1;
904         }
905
906         writable++;
907         sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
908         readable = sg->sg_nseg - writable;
909
910         error = virtqueue_enqueue(vq, req, sg, readable, writable);
911         if (error == 0 && ordered)
912                 sc->vtblk_req_ordered = req;
913
914         return (error);
915 }
916
917 static void
918 vtblk_vq_intr(void *xsc)
919 {
920         struct vtblk_softc *sc;
921         struct virtqueue *vq;
922
923         sc = xsc;
924         vq = sc->vtblk_vq;
925
926 again:
927         VTBLK_LOCK(sc);
928         if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
929                 VTBLK_UNLOCK(sc);
930                 return;
931         }
932
933         vtblk_finish_completed(sc);
934
935         if ((sc->vtblk_flags & VTBLK_FLAG_SUSPEND) == 0)
936                 vtblk_startio(sc);
937         else
938                 wakeup(&sc->vtblk_vq);
939
940         if (virtqueue_enable_intr(vq) != 0) {
941                 virtqueue_disable_intr(vq);
942                 VTBLK_UNLOCK(sc);
943                 goto again;
944         }
945
946         VTBLK_UNLOCK(sc);
947 }
948
949 static void
950 vtblk_stop(struct vtblk_softc *sc)
951 {
952
953         virtqueue_disable_intr(sc->vtblk_vq);
954         virtio_stop(sc->vtblk_dev);
955 }
956
957 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg)                  \
958         if (virtio_with_feature(_dev, _feature)) {                      \
959                 virtio_read_device_config(_dev,                         \
960                     offsetof(struct virtio_blk_config, _field),         \
961                     &(_cfg)->_field, sizeof((_cfg)->_field));           \
962         }
963
964 static void
965 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
966 {
967         device_t dev;
968
969         dev = sc->vtblk_dev;
970
971         bzero(blkcfg, sizeof(struct virtio_blk_config));
972
973         /* The capacity is always available. */
974         virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
975             capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
976
977         /* Read the configuration if the feature was negotiated. */
978         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
979         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
980         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
981         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
982         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
983         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
984 }
985
986 #undef VTBLK_GET_CONFIG
987
988 static void
989 vtblk_get_ident(struct vtblk_softc *sc)
990 {
991         struct bio buf;
992         struct disk *dp;
993         struct vtblk_request *req;
994         int len, error;
995
996         dp = sc->vtblk_disk;
997         len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
998
999         if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
1000                 return;
1001
1002         req = vtblk_dequeue_request(sc);
1003         if (req == NULL)
1004                 return;
1005
1006         req->vbr_ack = -1;
1007         req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1008         req->vbr_hdr.ioprio = 1;
1009         req->vbr_hdr.sector = 0;
1010
1011         req->vbr_bp = &buf;
1012         bzero(&buf, sizeof(struct bio));
1013
1014         buf.bio_cmd = BIO_READ;
1015         buf.bio_data = dp->d_ident;
1016         buf.bio_bcount = len;
1017
1018         VTBLK_LOCK(sc);
1019         error = vtblk_poll_request(sc, req);
1020         VTBLK_UNLOCK(sc);
1021
1022         vtblk_enqueue_request(sc, req);
1023
1024         if (error) {
1025                 device_printf(sc->vtblk_dev,
1026                     "error getting device identifier: %d\n", error);
1027         }
1028 }
1029
1030 static void
1031 vtblk_prepare_dump(struct vtblk_softc *sc)
1032 {
1033         device_t dev;
1034         struct virtqueue *vq;
1035
1036         dev = sc->vtblk_dev;
1037         vq = sc->vtblk_vq;
1038
1039         vtblk_stop(sc);
1040
1041         /*
1042          * Drain all requests caught in-flight in the virtqueue,
1043          * skipping biodone(). When dumping, only one request is
1044          * outstanding at a time, and we just poll the virtqueue
1045          * for the response.
1046          */
1047         vtblk_drain_vq(sc, 1);
1048
1049         if (virtio_reinit(dev, sc->vtblk_features) != 0) {
1050                 panic("%s: cannot reinit VirtIO block device during dump",
1051                     device_get_nameunit(dev));
1052         }
1053
1054         virtqueue_disable_intr(vq);
1055         virtio_reinit_complete(dev);
1056 }
1057
1058 static int
1059 vtblk_write_dump(struct vtblk_softc *sc, void *virtual, off_t offset,
1060     size_t length)
1061 {
1062         struct bio buf;
1063         struct vtblk_request *req;
1064
1065         req = &sc->vtblk_dump_request;
1066         req->vbr_ack = -1;
1067         req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1068         req->vbr_hdr.ioprio = 1;
1069         req->vbr_hdr.sector = offset / 512;
1070
1071         req->vbr_bp = &buf;
1072         bzero(&buf, sizeof(struct bio));
1073
1074         buf.bio_cmd = BIO_WRITE;
1075         buf.bio_data = virtual;
1076         buf.bio_bcount = length;
1077
1078         return (vtblk_poll_request(sc, req));
1079 }
1080
1081 static int
1082 vtblk_flush_dump(struct vtblk_softc *sc)
1083 {
1084         struct bio buf;
1085         struct vtblk_request *req;
1086
1087         req = &sc->vtblk_dump_request;
1088         req->vbr_ack = -1;
1089         req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1090         req->vbr_hdr.ioprio = 1;
1091         req->vbr_hdr.sector = 0;
1092
1093         req->vbr_bp = &buf;
1094         bzero(&buf, sizeof(struct bio));
1095
1096         buf.bio_cmd = BIO_FLUSH;
1097
1098         return (vtblk_poll_request(sc, req));
1099 }
1100
1101 static int
1102 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
1103 {
1104         struct virtqueue *vq;
1105         int error;
1106
1107         vq = sc->vtblk_vq;
1108
1109         if (!virtqueue_empty(vq))
1110                 return (EBUSY);
1111
1112         error = vtblk_execute_request(sc, req);
1113         if (error)
1114                 return (error);
1115
1116         virtqueue_notify(vq);
1117         virtqueue_poll(vq, NULL);
1118
1119         error = vtblk_request_error(req);
1120         if (error && bootverbose) {
1121                 device_printf(sc->vtblk_dev,
1122                     "%s: IO error: %d\n", __func__, error);
1123         }
1124
1125         return (error);
1126 }
1127
1128 static void
1129 vtblk_finish_completed(struct vtblk_softc *sc)
1130 {
1131         struct vtblk_request *req;
1132         struct bio *bp;
1133         int error;
1134
1135         while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
1136                 bp = req->vbr_bp;
1137
1138                 if (sc->vtblk_req_ordered != NULL) {
1139                         /* This should be the only outstanding request. */
1140                         MPASS(sc->vtblk_req_ordered == req);
1141                         sc->vtblk_req_ordered = NULL;
1142                 }
1143
1144                 error = vtblk_request_error(req);
1145                 if (error)
1146                         disk_err(bp, "hard error", -1, 1);
1147
1148                 vtblk_finish_bio(bp, error);
1149                 vtblk_enqueue_request(sc, req);
1150         }
1151 }
1152
1153 static void
1154 vtblk_drain_vq(struct vtblk_softc *sc, int skip_done)
1155 {
1156         struct virtqueue *vq;
1157         struct vtblk_request *req;
1158         int last;
1159
1160         vq = sc->vtblk_vq;
1161         last = 0;
1162
1163         while ((req = virtqueue_drain(vq, &last)) != NULL) {
1164                 if (!skip_done)
1165                         vtblk_finish_bio(req->vbr_bp, ENXIO);
1166
1167                 vtblk_enqueue_request(sc, req);
1168         }
1169
1170         sc->vtblk_req_ordered = NULL;
1171         KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1172 }
1173
1174 static void
1175 vtblk_drain(struct vtblk_softc *sc)
1176 {
1177         struct bio_queue_head *bioq;
1178         struct vtblk_request *req;
1179         struct bio *bp;
1180
1181         bioq = &sc->vtblk_bioq;
1182
1183         if (sc->vtblk_vq != NULL) {
1184                 vtblk_finish_completed(sc);
1185                 vtblk_drain_vq(sc, 0);
1186         }
1187
1188         while ((req = vtblk_dequeue_ready(sc)) != NULL) {
1189                 vtblk_finish_bio(req->vbr_bp, ENXIO);
1190                 vtblk_enqueue_request(sc, req);
1191         }
1192
1193         while (bioq_first(bioq) != NULL) {
1194                 bp = bioq_takefirst(bioq);
1195                 vtblk_finish_bio(bp, ENXIO);
1196         }
1197
1198         vtblk_free_requests(sc);
1199 }
1200
1201 #ifdef INVARIANTS
1202 static void
1203 vtblk_request_invariants(struct vtblk_request *req)
1204 {
1205         int hdr_nsegs, ack_nsegs;
1206
1207         hdr_nsegs = sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr));
1208         ack_nsegs = sglist_count(&req->vbr_ack, sizeof(req->vbr_ack));
1209
1210         KASSERT(hdr_nsegs == 1, ("request header crossed page boundary"));
1211         KASSERT(ack_nsegs == 1, ("request ack crossed page boundary"));
1212 }
1213 #endif
1214
1215 static int
1216 vtblk_alloc_requests(struct vtblk_softc *sc)
1217 {
1218         struct vtblk_request *req;
1219         int i, nreqs;
1220
1221         nreqs = virtqueue_size(sc->vtblk_vq);
1222
1223         /*
1224          * Preallocate sufficient requests to keep the virtqueue full. Each
1225          * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
1226          * the number allocated when indirect descriptors are not available.
1227          */
1228         if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
1229                 nreqs /= VTBLK_MIN_SEGMENTS;
1230
1231         for (i = 0; i < nreqs; i++) {
1232                 req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
1233                 if (req == NULL)
1234                         return (ENOMEM);
1235
1236 #ifdef INVARIANTS
1237                 vtblk_request_invariants(req);
1238 #endif
1239
1240                 sc->vtblk_request_count++;
1241                 vtblk_enqueue_request(sc, req);
1242         }
1243
1244         return (0);
1245 }
1246
1247 static void
1248 vtblk_free_requests(struct vtblk_softc *sc)
1249 {
1250         struct vtblk_request *req;
1251
1252         KASSERT(TAILQ_EMPTY(&sc->vtblk_req_ready),
1253             ("%s: ready requests left on queue", __func__));
1254
1255         while ((req = vtblk_dequeue_request(sc)) != NULL) {
1256                 sc->vtblk_request_count--;
1257                 free(req, M_DEVBUF);
1258         }
1259
1260         KASSERT(sc->vtblk_request_count == 0,
1261             ("%s: leaked %d requests", __func__, sc->vtblk_request_count));
1262 }
1263
1264 static struct vtblk_request *
1265 vtblk_dequeue_request(struct vtblk_softc *sc)
1266 {
1267         struct vtblk_request *req;
1268
1269         req = TAILQ_FIRST(&sc->vtblk_req_free);
1270         if (req != NULL)
1271                 TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
1272
1273         return (req);
1274 }
1275
1276 static void
1277 vtblk_enqueue_request(struct vtblk_softc *sc, struct vtblk_request *req)
1278 {
1279
1280         bzero(req, sizeof(struct vtblk_request));
1281         TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
1282 }
1283
1284 static struct vtblk_request *
1285 vtblk_dequeue_ready(struct vtblk_softc *sc)
1286 {
1287         struct vtblk_request *req;
1288
1289         req = TAILQ_FIRST(&sc->vtblk_req_ready);
1290         if (req != NULL)
1291                 TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
1292
1293         return (req);
1294 }
1295
1296 static void
1297 vtblk_enqueue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
1298 {
1299
1300         TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
1301 }
1302
1303 static int
1304 vtblk_request_error(struct vtblk_request *req)
1305 {
1306         int error;
1307
1308         switch (req->vbr_ack) {
1309         case VIRTIO_BLK_S_OK:
1310                 error = 0;
1311                 break;
1312         case VIRTIO_BLK_S_UNSUPP:
1313                 error = ENOTSUP;
1314                 break;
1315         default:
1316                 error = EIO;
1317                 break;
1318         }
1319
1320         return (error);
1321 }
1322
1323 static void
1324 vtblk_finish_bio(struct bio *bp, int error)
1325 {
1326
1327         if (error) {
1328                 bp->bio_resid = bp->bio_bcount;
1329                 bp->bio_error = error;
1330                 bp->bio_flags |= BIO_ERROR;
1331         }
1332
1333         biodone(bp);
1334 }
1335
1336 static void
1337 vtblk_setup_sysctl(struct vtblk_softc *sc)
1338 {
1339         device_t dev;
1340         struct sysctl_ctx_list *ctx;
1341         struct sysctl_oid *tree;
1342         struct sysctl_oid_list *child;
1343
1344         dev = sc->vtblk_dev;
1345         ctx = device_get_sysctl_ctx(dev);
1346         tree = device_get_sysctl_tree(dev);
1347         child = SYSCTL_CHILDREN(tree);
1348
1349         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
1350             CTLTYPE_INT | CTLFLAG_RW, sc, 0, vtblk_write_cache_sysctl,
1351             "I", "Write cache mode (writethrough (0) or writeback (1))");
1352 }
1353
1354 static int
1355 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
1356 {
1357         char path[64];
1358
1359         snprintf(path, sizeof(path),
1360             "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
1361         TUNABLE_INT_FETCH(path, &def);
1362
1363         return (def);
1364 }