]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/virtio/block/virtio_blk.c
MFV r339640,339641,339644:
[FreeBSD/FreeBSD.git] / sys / dev / virtio / block / virtio_blk.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 /* Driver for VirtIO block devices. */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bio.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/sglist.h>
41 #include <sys/sysctl.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45
46 #include <geom/geom.h>
47 #include <geom/geom_disk.h>
48
49 #include <machine/bus.h>
50 #include <machine/resource.h>
51 #include <sys/bus.h>
52 #include <sys/rman.h>
53
54 #include <dev/virtio/virtio.h>
55 #include <dev/virtio/virtqueue.h>
56 #include <dev/virtio/block/virtio_blk.h>
57
58 #include "virtio_if.h"
59
60 struct vtblk_request {
61         struct virtio_blk_outhdr         vbr_hdr;
62         struct bio                      *vbr_bp;
63         uint8_t                          vbr_ack;
64         TAILQ_ENTRY(vtblk_request)       vbr_link;
65 };
66
67 enum vtblk_cache_mode {
68         VTBLK_CACHE_WRITETHROUGH,
69         VTBLK_CACHE_WRITEBACK,
70         VTBLK_CACHE_MAX
71 };
72
73 struct vtblk_softc {
74         device_t                 vtblk_dev;
75         struct mtx               vtblk_mtx;
76         uint64_t                 vtblk_features;
77         uint32_t                 vtblk_flags;
78 #define VTBLK_FLAG_INDIRECT     0x0001
79 #define VTBLK_FLAG_READONLY     0x0002
80 #define VTBLK_FLAG_DETACH       0x0004
81 #define VTBLK_FLAG_SUSPEND      0x0008
82 #define VTBLK_FLAG_BARRIER      0x0010
83 #define VTBLK_FLAG_WC_CONFIG    0x0020
84
85         struct virtqueue        *vtblk_vq;
86         struct sglist           *vtblk_sglist;
87         struct disk             *vtblk_disk;
88
89         struct bio_queue_head    vtblk_bioq;
90         TAILQ_HEAD(, vtblk_request)
91                                  vtblk_req_free;
92         TAILQ_HEAD(, vtblk_request)
93                                  vtblk_req_ready;
94         struct vtblk_request    *vtblk_req_ordered;
95
96         int                      vtblk_max_nsegs;
97         int                      vtblk_request_count;
98         enum vtblk_cache_mode    vtblk_write_cache;
99
100         struct bio_queue         vtblk_dump_queue;
101         struct vtblk_request     vtblk_dump_request;
102 };
103
104 static struct virtio_feature_desc vtblk_feature_desc[] = {
105         { VIRTIO_BLK_F_BARRIER,         "HostBarrier"   },
106         { VIRTIO_BLK_F_SIZE_MAX,        "MaxSegSize"    },
107         { VIRTIO_BLK_F_SEG_MAX,         "MaxNumSegs"    },
108         { VIRTIO_BLK_F_GEOMETRY,        "DiskGeometry"  },
109         { VIRTIO_BLK_F_RO,              "ReadOnly"      },
110         { VIRTIO_BLK_F_BLK_SIZE,        "BlockSize"     },
111         { VIRTIO_BLK_F_SCSI,            "SCSICmds"      },
112         { VIRTIO_BLK_F_WCE,             "WriteCache"    },
113         { VIRTIO_BLK_F_TOPOLOGY,        "Topology"      },
114         { VIRTIO_BLK_F_CONFIG_WCE,      "ConfigWCE"     },
115
116         { 0, NULL }
117 };
118
119 static int      vtblk_modevent(module_t, int, void *);
120
121 static int      vtblk_probe(device_t);
122 static int      vtblk_attach(device_t);
123 static int      vtblk_detach(device_t);
124 static int      vtblk_suspend(device_t);
125 static int      vtblk_resume(device_t);
126 static int      vtblk_shutdown(device_t);
127 static int      vtblk_config_change(device_t);
128
129 static int      vtblk_open(struct disk *);
130 static int      vtblk_close(struct disk *);
131 static int      vtblk_ioctl(struct disk *, u_long, void *, int,
132                     struct thread *);
133 static int      vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
134 static void     vtblk_strategy(struct bio *);
135
136 static void     vtblk_negotiate_features(struct vtblk_softc *);
137 static void     vtblk_setup_features(struct vtblk_softc *);
138 static int      vtblk_maximum_segments(struct vtblk_softc *,
139                     struct virtio_blk_config *);
140 static int      vtblk_alloc_virtqueue(struct vtblk_softc *);
141 static void     vtblk_resize_disk(struct vtblk_softc *, uint64_t);
142 static void     vtblk_alloc_disk(struct vtblk_softc *,
143                     struct virtio_blk_config *);
144 static void     vtblk_create_disk(struct vtblk_softc *);
145
146 static int      vtblk_request_prealloc(struct vtblk_softc *);
147 static void     vtblk_request_free(struct vtblk_softc *);
148 static struct vtblk_request *
149                 vtblk_request_dequeue(struct vtblk_softc *);
150 static void     vtblk_request_enqueue(struct vtblk_softc *,
151                     struct vtblk_request *);
152 static struct vtblk_request *
153                 vtblk_request_next_ready(struct vtblk_softc *);
154 static void     vtblk_request_requeue_ready(struct vtblk_softc *,
155                     struct vtblk_request *);
156 static struct vtblk_request *
157                 vtblk_request_next(struct vtblk_softc *);
158 static struct vtblk_request *
159                 vtblk_request_bio(struct vtblk_softc *);
160 static int      vtblk_request_execute(struct vtblk_softc *,
161                     struct vtblk_request *);
162 static int      vtblk_request_error(struct vtblk_request *);
163
164 static void     vtblk_queue_completed(struct vtblk_softc *,
165                     struct bio_queue *);
166 static void     vtblk_done_completed(struct vtblk_softc *,
167                     struct bio_queue *);
168 static void     vtblk_drain_vq(struct vtblk_softc *);
169 static void     vtblk_drain(struct vtblk_softc *);
170
171 static void     vtblk_startio(struct vtblk_softc *);
172 static void     vtblk_bio_done(struct vtblk_softc *, struct bio *, int);
173
174 static void     vtblk_read_config(struct vtblk_softc *,
175                     struct virtio_blk_config *);
176 static void     vtblk_ident(struct vtblk_softc *);
177 static int      vtblk_poll_request(struct vtblk_softc *,
178                     struct vtblk_request *);
179 static int      vtblk_quiesce(struct vtblk_softc *);
180 static void     vtblk_vq_intr(void *);
181 static void     vtblk_stop(struct vtblk_softc *);
182
183 static void     vtblk_dump_quiesce(struct vtblk_softc *);
184 static int      vtblk_dump_write(struct vtblk_softc *, void *, off_t, size_t);
185 static int      vtblk_dump_flush(struct vtblk_softc *);
186 static void     vtblk_dump_complete(struct vtblk_softc *);
187
188 static void     vtblk_set_write_cache(struct vtblk_softc *, int);
189 static int      vtblk_write_cache_enabled(struct vtblk_softc *sc,
190                     struct virtio_blk_config *);
191 static int      vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
192
193 static void     vtblk_setup_sysctl(struct vtblk_softc *);
194 static int      vtblk_tunable_int(struct vtblk_softc *, const char *, int);
195
196 /* Tunables. */
197 static int vtblk_no_ident = 0;
198 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
199 static int vtblk_writecache_mode = -1;
200 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
201
202 /* Features desired/implemented by this driver. */
203 #define VTBLK_FEATURES \
204     (VIRTIO_BLK_F_BARRIER               | \
205      VIRTIO_BLK_F_SIZE_MAX              | \
206      VIRTIO_BLK_F_SEG_MAX               | \
207      VIRTIO_BLK_F_GEOMETRY              | \
208      VIRTIO_BLK_F_RO                    | \
209      VIRTIO_BLK_F_BLK_SIZE              | \
210      VIRTIO_BLK_F_WCE                   | \
211      VIRTIO_BLK_F_TOPOLOGY              | \
212      VIRTIO_BLK_F_CONFIG_WCE            | \
213      VIRTIO_RING_F_INDIRECT_DESC)
214
215 #define VTBLK_MTX(_sc)          &(_sc)->vtblk_mtx
216 #define VTBLK_LOCK_INIT(_sc, _name) \
217                                 mtx_init(VTBLK_MTX((_sc)), (_name), \
218                                     "VirtIO Block Lock", MTX_DEF)
219 #define VTBLK_LOCK(_sc)         mtx_lock(VTBLK_MTX((_sc)))
220 #define VTBLK_UNLOCK(_sc)       mtx_unlock(VTBLK_MTX((_sc)))
221 #define VTBLK_LOCK_DESTROY(_sc) mtx_destroy(VTBLK_MTX((_sc)))
222 #define VTBLK_LOCK_ASSERT(_sc)  mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
223 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
224                                 mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
225
226 #define VTBLK_DISK_NAME         "vtbd"
227 #define VTBLK_QUIESCE_TIMEOUT   (30 * hz)
228
229 /*
230  * Each block request uses at least two segments - one for the header
231  * and one for the status.
232  */
233 #define VTBLK_MIN_SEGMENTS      2
234
235 static device_method_t vtblk_methods[] = {
236         /* Device methods. */
237         DEVMETHOD(device_probe,         vtblk_probe),
238         DEVMETHOD(device_attach,        vtblk_attach),
239         DEVMETHOD(device_detach,        vtblk_detach),
240         DEVMETHOD(device_suspend,       vtblk_suspend),
241         DEVMETHOD(device_resume,        vtblk_resume),
242         DEVMETHOD(device_shutdown,      vtblk_shutdown),
243
244         /* VirtIO methods. */
245         DEVMETHOD(virtio_config_change, vtblk_config_change),
246
247         DEVMETHOD_END
248 };
249
250 static driver_t vtblk_driver = {
251         "vtblk",
252         vtblk_methods,
253         sizeof(struct vtblk_softc)
254 };
255 static devclass_t vtblk_devclass;
256
257 DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
258     vtblk_modevent, 0);
259 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
260     vtblk_modevent, 0);
261 MODULE_VERSION(virtio_blk, 1);
262 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
263
264 static int
265 vtblk_modevent(module_t mod, int type, void *unused)
266 {
267         int error;
268
269         error = 0;
270
271         switch (type) {
272         case MOD_LOAD:
273         case MOD_QUIESCE:
274         case MOD_UNLOAD:
275         case MOD_SHUTDOWN:
276                 break;
277         default:
278                 error = EOPNOTSUPP;
279                 break;
280         }
281
282         return (error);
283 }
284
285 static int
286 vtblk_probe(device_t dev)
287 {
288
289         if (virtio_get_device_type(dev) != VIRTIO_ID_BLOCK)
290                 return (ENXIO);
291
292         device_set_desc(dev, "VirtIO Block Adapter");
293
294         return (BUS_PROBE_DEFAULT);
295 }
296
297 static int
298 vtblk_attach(device_t dev)
299 {
300         struct vtblk_softc *sc;
301         struct virtio_blk_config blkcfg;
302         int error;
303
304         virtio_set_feature_desc(dev, vtblk_feature_desc);
305
306         sc = device_get_softc(dev);
307         sc->vtblk_dev = dev;
308         VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
309         bioq_init(&sc->vtblk_bioq);
310         TAILQ_INIT(&sc->vtblk_dump_queue);
311         TAILQ_INIT(&sc->vtblk_req_free);
312         TAILQ_INIT(&sc->vtblk_req_ready);
313
314         vtblk_setup_sysctl(sc);
315         vtblk_setup_features(sc);
316
317         vtblk_read_config(sc, &blkcfg);
318
319         /*
320          * With the current sglist(9) implementation, it is not easy
321          * for us to support a maximum segment size as adjacent
322          * segments are coalesced. For now, just make sure it's larger
323          * than the maximum supported transfer size.
324          */
325         if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
326                 if (blkcfg.size_max < MAXPHYS) {
327                         error = ENOTSUP;
328                         device_printf(dev, "host requires unsupported "
329                             "maximum segment size feature\n");
330                         goto fail;
331                 }
332         }
333
334         sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
335         if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
336                 error = EINVAL;
337                 device_printf(dev, "fewer than minimum number of segments "
338                     "allowed: %d\n", sc->vtblk_max_nsegs);
339                 goto fail;
340         }
341
342         sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
343         if (sc->vtblk_sglist == NULL) {
344                 error = ENOMEM;
345                 device_printf(dev, "cannot allocate sglist\n");
346                 goto fail;
347         }
348
349         error = vtblk_alloc_virtqueue(sc);
350         if (error) {
351                 device_printf(dev, "cannot allocate virtqueue\n");
352                 goto fail;
353         }
354
355         error = vtblk_request_prealloc(sc);
356         if (error) {
357                 device_printf(dev, "cannot preallocate requests\n");
358                 goto fail;
359         }
360
361         vtblk_alloc_disk(sc, &blkcfg);
362
363         error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
364         if (error) {
365                 device_printf(dev, "cannot setup virtqueue interrupt\n");
366                 goto fail;
367         }
368
369         vtblk_create_disk(sc);
370
371         virtqueue_enable_intr(sc->vtblk_vq);
372
373 fail:
374         if (error)
375                 vtblk_detach(dev);
376
377         return (error);
378 }
379
380 static int
381 vtblk_detach(device_t dev)
382 {
383         struct vtblk_softc *sc;
384
385         sc = device_get_softc(dev);
386
387         VTBLK_LOCK(sc);
388         sc->vtblk_flags |= VTBLK_FLAG_DETACH;
389         if (device_is_attached(dev))
390                 vtblk_stop(sc);
391         VTBLK_UNLOCK(sc);
392
393         vtblk_drain(sc);
394
395         if (sc->vtblk_disk != NULL) {
396                 disk_destroy(sc->vtblk_disk);
397                 sc->vtblk_disk = NULL;
398         }
399
400         if (sc->vtblk_sglist != NULL) {
401                 sglist_free(sc->vtblk_sglist);
402                 sc->vtblk_sglist = NULL;
403         }
404
405         VTBLK_LOCK_DESTROY(sc);
406
407         return (0);
408 }
409
410 static int
411 vtblk_suspend(device_t dev)
412 {
413         struct vtblk_softc *sc;
414         int error;
415
416         sc = device_get_softc(dev);
417
418         VTBLK_LOCK(sc);
419         sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
420         /* XXX BMV: virtio_stop(), etc needed here? */
421         error = vtblk_quiesce(sc);
422         if (error)
423                 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
424         VTBLK_UNLOCK(sc);
425
426         return (error);
427 }
428
429 static int
430 vtblk_resume(device_t dev)
431 {
432         struct vtblk_softc *sc;
433
434         sc = device_get_softc(dev);
435
436         VTBLK_LOCK(sc);
437         /* XXX BMV: virtio_reinit(), etc needed here? */
438         sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
439         vtblk_startio(sc);
440         VTBLK_UNLOCK(sc);
441
442         return (0);
443 }
444
445 static int
446 vtblk_shutdown(device_t dev)
447 {
448
449         return (0);
450 }
451
452 static int
453 vtblk_config_change(device_t dev)
454 {
455         struct vtblk_softc *sc;
456         struct virtio_blk_config blkcfg;
457         uint64_t capacity;
458
459         sc = device_get_softc(dev);
460
461         vtblk_read_config(sc, &blkcfg);
462
463         /* Capacity is always in 512-byte units. */
464         capacity = blkcfg.capacity * 512;
465
466         if (sc->vtblk_disk->d_mediasize != capacity)
467                 vtblk_resize_disk(sc, capacity);
468
469         return (0);
470 }
471
472 static int
473 vtblk_open(struct disk *dp)
474 {
475         struct vtblk_softc *sc;
476
477         if ((sc = dp->d_drv1) == NULL)
478                 return (ENXIO);
479
480         return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
481 }
482
483 static int
484 vtblk_close(struct disk *dp)
485 {
486         struct vtblk_softc *sc;
487
488         if ((sc = dp->d_drv1) == NULL)
489                 return (ENXIO);
490
491         return (0);
492 }
493
494 static int
495 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
496     struct thread *td)
497 {
498         struct vtblk_softc *sc;
499
500         if ((sc = dp->d_drv1) == NULL)
501                 return (ENXIO);
502
503         return (ENOTTY);
504 }
505
506 static int
507 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
508     size_t length)
509 {
510         struct disk *dp;
511         struct vtblk_softc *sc;
512         int error;
513
514         dp = arg;
515         error = 0;
516
517         if ((sc = dp->d_drv1) == NULL)
518                 return (ENXIO);
519
520         VTBLK_LOCK(sc);
521
522         vtblk_dump_quiesce(sc);
523
524         if (length > 0)
525                 error = vtblk_dump_write(sc, virtual, offset, length);
526         if (error || (virtual == NULL && offset == 0))
527                 vtblk_dump_complete(sc);
528
529         VTBLK_UNLOCK(sc);
530
531         return (error);
532 }
533
534 static void
535 vtblk_strategy(struct bio *bp)
536 {
537         struct vtblk_softc *sc;
538
539         if ((sc = bp->bio_disk->d_drv1) == NULL) {
540                 vtblk_bio_done(NULL, bp, EINVAL);
541                 return;
542         }
543
544         /*
545          * Fail any write if RO. Unfortunately, there does not seem to
546          * be a better way to report our readonly'ness to GEOM above.
547          */
548         if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
549             (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
550                 vtblk_bio_done(sc, bp, EROFS);
551                 return;
552         }
553
554         VTBLK_LOCK(sc);
555
556         if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
557                 VTBLK_UNLOCK(sc);
558                 vtblk_bio_done(sc, bp, ENXIO);
559                 return;
560         }
561
562         bioq_insert_tail(&sc->vtblk_bioq, bp);
563         vtblk_startio(sc);
564
565         VTBLK_UNLOCK(sc);
566 }
567
568 static void
569 vtblk_negotiate_features(struct vtblk_softc *sc)
570 {
571         device_t dev;
572         uint64_t features;
573
574         dev = sc->vtblk_dev;
575         features = VTBLK_FEATURES;
576
577         sc->vtblk_features = virtio_negotiate_features(dev, features);
578 }
579
580 static void
581 vtblk_setup_features(struct vtblk_softc *sc)
582 {
583         device_t dev;
584
585         dev = sc->vtblk_dev;
586
587         vtblk_negotiate_features(sc);
588
589         if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
590                 sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
591         if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
592                 sc->vtblk_flags |= VTBLK_FLAG_READONLY;
593         if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
594                 sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
595         if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
596                 sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
597 }
598
599 static int
600 vtblk_maximum_segments(struct vtblk_softc *sc,
601     struct virtio_blk_config *blkcfg)
602 {
603         device_t dev;
604         int nsegs;
605
606         dev = sc->vtblk_dev;
607         nsegs = VTBLK_MIN_SEGMENTS;
608
609         if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
610                 nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
611                 if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
612                         nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
613         } else
614                 nsegs += 1;
615
616         return (nsegs);
617 }
618
619 static int
620 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
621 {
622         device_t dev;
623         struct vq_alloc_info vq_info;
624
625         dev = sc->vtblk_dev;
626
627         VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
628             vtblk_vq_intr, sc, &sc->vtblk_vq,
629             "%s request", device_get_nameunit(dev));
630
631         return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
632 }
633
634 static void
635 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
636 {
637         device_t dev;
638         struct disk *dp;
639         int error;
640
641         dev = sc->vtblk_dev;
642         dp = sc->vtblk_disk;
643
644         dp->d_mediasize = new_capacity;
645         if (bootverbose) {
646                 device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
647                     (uintmax_t) dp->d_mediasize >> 20,
648                     (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
649                     dp->d_sectorsize);
650         }
651
652         error = disk_resize(dp, M_NOWAIT);
653         if (error) {
654                 device_printf(dev,
655                     "disk_resize(9) failed, error: %d\n", error);
656         }
657 }
658
659 static void
660 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
661 {
662         device_t dev;
663         struct disk *dp;
664
665         dev = sc->vtblk_dev;
666
667         sc->vtblk_disk = dp = disk_alloc();
668         dp->d_open = vtblk_open;
669         dp->d_close = vtblk_close;
670         dp->d_ioctl = vtblk_ioctl;
671         dp->d_strategy = vtblk_strategy;
672         dp->d_name = VTBLK_DISK_NAME;
673         dp->d_unit = device_get_unit(dev);
674         dp->d_drv1 = sc;
675         dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO |
676             DISKFLAG_DIRECT_COMPLETION;
677         dp->d_hba_vendor = virtio_get_vendor(dev);
678         dp->d_hba_device = virtio_get_device(dev);
679         dp->d_hba_subvendor = virtio_get_subvendor(dev);
680         dp->d_hba_subdevice = virtio_get_subdevice(dev);
681
682         if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
683                 dp->d_dump = vtblk_dump;
684
685         /* Capacity is always in 512-byte units. */
686         dp->d_mediasize = blkcfg->capacity * 512;
687
688         if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
689                 dp->d_sectorsize = blkcfg->blk_size;
690         else
691                 dp->d_sectorsize = 512;
692
693         /*
694          * The VirtIO maximum I/O size is given in terms of segments.
695          * However, FreeBSD limits I/O size by logical buffer size, not
696          * by physically contiguous pages. Therefore, we have to assume
697          * no pages are contiguous. This may impose an artificially low
698          * maximum I/O size. But in practice, since QEMU advertises 128
699          * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
700          * which is typically greater than MAXPHYS. Eventually we should
701          * just advertise MAXPHYS and split buffers that are too big.
702          *
703          * Note we must subtract one additional segment in case of non
704          * page aligned buffers.
705          */
706         dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
707             PAGE_SIZE;
708         if (dp->d_maxsize < PAGE_SIZE)
709                 dp->d_maxsize = PAGE_SIZE; /* XXX */
710
711         if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
712                 dp->d_fwsectors = blkcfg->geometry.sectors;
713                 dp->d_fwheads = blkcfg->geometry.heads;
714         }
715
716         if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY) &&
717             blkcfg->topology.physical_block_exp > 0) {
718                 dp->d_stripesize = dp->d_sectorsize *
719                     (1 << blkcfg->topology.physical_block_exp);
720                 dp->d_stripeoffset = (dp->d_stripesize -
721                     blkcfg->topology.alignment_offset * dp->d_sectorsize) %
722                     dp->d_stripesize;
723         }
724
725         if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
726                 sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
727         else
728                 sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
729 }
730
731 static void
732 vtblk_create_disk(struct vtblk_softc *sc)
733 {
734         struct disk *dp;
735
736         dp = sc->vtblk_disk;
737
738         vtblk_ident(sc);
739
740         device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
741             (uintmax_t) dp->d_mediasize >> 20,
742             (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
743             dp->d_sectorsize);
744
745         disk_create(dp, DISK_VERSION);
746 }
747
748 static int
749 vtblk_request_prealloc(struct vtblk_softc *sc)
750 {
751         struct vtblk_request *req;
752         int i, nreqs;
753
754         nreqs = virtqueue_size(sc->vtblk_vq);
755
756         /*
757          * Preallocate sufficient requests to keep the virtqueue full. Each
758          * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
759          * the number allocated when indirect descriptors are not available.
760          */
761         if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
762                 nreqs /= VTBLK_MIN_SEGMENTS;
763
764         for (i = 0; i < nreqs; i++) {
765                 req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
766                 if (req == NULL)
767                         return (ENOMEM);
768
769                 MPASS(sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr)) == 1);
770                 MPASS(sglist_count(&req->vbr_ack, sizeof(req->vbr_ack)) == 1);
771
772                 sc->vtblk_request_count++;
773                 vtblk_request_enqueue(sc, req);
774         }
775
776         return (0);
777 }
778
779 static void
780 vtblk_request_free(struct vtblk_softc *sc)
781 {
782         struct vtblk_request *req;
783
784         MPASS(TAILQ_EMPTY(&sc->vtblk_req_ready));
785
786         while ((req = vtblk_request_dequeue(sc)) != NULL) {
787                 sc->vtblk_request_count--;
788                 free(req, M_DEVBUF);
789         }
790
791         KASSERT(sc->vtblk_request_count == 0,
792             ("%s: leaked %d requests", __func__, sc->vtblk_request_count));
793 }
794
795 static struct vtblk_request *
796 vtblk_request_dequeue(struct vtblk_softc *sc)
797 {
798         struct vtblk_request *req;
799
800         req = TAILQ_FIRST(&sc->vtblk_req_free);
801         if (req != NULL) {
802                 TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
803                 bzero(req, sizeof(struct vtblk_request));
804         }
805
806         return (req);
807 }
808
809 static void
810 vtblk_request_enqueue(struct vtblk_softc *sc, struct vtblk_request *req)
811 {
812
813         TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
814 }
815
816 static struct vtblk_request *
817 vtblk_request_next_ready(struct vtblk_softc *sc)
818 {
819         struct vtblk_request *req;
820
821         req = TAILQ_FIRST(&sc->vtblk_req_ready);
822         if (req != NULL)
823                 TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
824
825         return (req);
826 }
827
828 static void
829 vtblk_request_requeue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
830 {
831
832         /* NOTE: Currently, there will be at most one request in the queue. */
833         TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
834 }
835
836 static struct vtblk_request *
837 vtblk_request_next(struct vtblk_softc *sc)
838 {
839         struct vtblk_request *req;
840
841         req = vtblk_request_next_ready(sc);
842         if (req != NULL)
843                 return (req);
844
845         return (vtblk_request_bio(sc));
846 }
847
848 static struct vtblk_request *
849 vtblk_request_bio(struct vtblk_softc *sc)
850 {
851         struct bio_queue_head *bioq;
852         struct vtblk_request *req;
853         struct bio *bp;
854
855         bioq = &sc->vtblk_bioq;
856
857         if (bioq_first(bioq) == NULL)
858                 return (NULL);
859
860         req = vtblk_request_dequeue(sc);
861         if (req == NULL)
862                 return (NULL);
863
864         bp = bioq_takefirst(bioq);
865         req->vbr_bp = bp;
866         req->vbr_ack = -1;
867         req->vbr_hdr.ioprio = 1;
868
869         switch (bp->bio_cmd) {
870         case BIO_FLUSH:
871                 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
872                 break;
873         case BIO_READ:
874                 req->vbr_hdr.type = VIRTIO_BLK_T_IN;
875                 req->vbr_hdr.sector = bp->bio_offset / 512;
876                 break;
877         case BIO_WRITE:
878                 req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
879                 req->vbr_hdr.sector = bp->bio_offset / 512;
880                 break;
881         default:
882                 panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
883         }
884
885         if (bp->bio_flags & BIO_ORDERED)
886                 req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
887
888         return (req);
889 }
890
891 static int
892 vtblk_request_execute(struct vtblk_softc *sc, struct vtblk_request *req)
893 {
894         struct virtqueue *vq;
895         struct sglist *sg;
896         struct bio *bp;
897         int ordered, readable, writable, error;
898
899         vq = sc->vtblk_vq;
900         sg = sc->vtblk_sglist;
901         bp = req->vbr_bp;
902         ordered = 0;
903         writable = 0;
904
905         /*
906          * Some hosts (such as bhyve) do not implement the barrier feature,
907          * so we emulate it in the driver by allowing the barrier request
908          * to be the only one in flight.
909          */
910         if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
911                 if (sc->vtblk_req_ordered != NULL)
912                         return (EBUSY);
913                 if (bp->bio_flags & BIO_ORDERED) {
914                         if (!virtqueue_empty(vq))
915                                 return (EBUSY);
916                         ordered = 1;
917                         req->vbr_hdr.type &= ~VIRTIO_BLK_T_BARRIER;
918                 }
919         }
920
921         sglist_reset(sg);
922         sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
923
924         if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
925                 error = sglist_append_bio(sg, bp);
926                 if (error || sg->sg_nseg == sg->sg_maxseg) {
927                         panic("%s: bio %p data buffer too big %d",
928                             __func__, bp, error);
929                 }
930
931                 /* BIO_READ means the host writes into our buffer. */
932                 if (bp->bio_cmd == BIO_READ)
933                         writable = sg->sg_nseg - 1;
934         }
935
936         writable++;
937         sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
938         readable = sg->sg_nseg - writable;
939
940         error = virtqueue_enqueue(vq, req, sg, readable, writable);
941         if (error == 0 && ordered)
942                 sc->vtblk_req_ordered = req;
943
944         return (error);
945 }
946
947 static int
948 vtblk_request_error(struct vtblk_request *req)
949 {
950         int error;
951
952         switch (req->vbr_ack) {
953         case VIRTIO_BLK_S_OK:
954                 error = 0;
955                 break;
956         case VIRTIO_BLK_S_UNSUPP:
957                 error = ENOTSUP;
958                 break;
959         default:
960                 error = EIO;
961                 break;
962         }
963
964         return (error);
965 }
966
967 static void
968 vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
969 {
970         struct vtblk_request *req;
971         struct bio *bp;
972
973         while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
974                 if (sc->vtblk_req_ordered != NULL) {
975                         MPASS(sc->vtblk_req_ordered == req);
976                         sc->vtblk_req_ordered = NULL;
977                 }
978
979                 bp = req->vbr_bp;
980                 bp->bio_error = vtblk_request_error(req);
981                 TAILQ_INSERT_TAIL(queue, bp, bio_queue);
982
983                 vtblk_request_enqueue(sc, req);
984         }
985 }
986
987 static void
988 vtblk_done_completed(struct vtblk_softc *sc, struct bio_queue *queue)
989 {
990         struct bio *bp, *tmp;
991
992         TAILQ_FOREACH_SAFE(bp, queue, bio_queue, tmp) {
993                 if (bp->bio_error != 0)
994                         disk_err(bp, "hard error", -1, 1);
995                 vtblk_bio_done(sc, bp, bp->bio_error);
996         }
997 }
998
999 static void
1000 vtblk_drain_vq(struct vtblk_softc *sc)
1001 {
1002         struct virtqueue *vq;
1003         struct vtblk_request *req;
1004         int last;
1005
1006         vq = sc->vtblk_vq;
1007         last = 0;
1008
1009         while ((req = virtqueue_drain(vq, &last)) != NULL) {
1010                 vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1011                 vtblk_request_enqueue(sc, req);
1012         }
1013
1014         sc->vtblk_req_ordered = NULL;
1015         KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1016 }
1017
1018 static void
1019 vtblk_drain(struct vtblk_softc *sc)
1020 {
1021         struct bio_queue queue;
1022         struct bio_queue_head *bioq;
1023         struct vtblk_request *req;
1024         struct bio *bp;
1025
1026         bioq = &sc->vtblk_bioq;
1027         TAILQ_INIT(&queue);
1028
1029         if (sc->vtblk_vq != NULL) {
1030                 vtblk_queue_completed(sc, &queue);
1031                 vtblk_done_completed(sc, &queue);
1032
1033                 vtblk_drain_vq(sc);
1034         }
1035
1036         while ((req = vtblk_request_next_ready(sc)) != NULL) {
1037                 vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1038                 vtblk_request_enqueue(sc, req);
1039         }
1040
1041         while (bioq_first(bioq) != NULL) {
1042                 bp = bioq_takefirst(bioq);
1043                 vtblk_bio_done(sc, bp, ENXIO);
1044         }
1045
1046         vtblk_request_free(sc);
1047 }
1048
1049 static void
1050 vtblk_startio(struct vtblk_softc *sc)
1051 {
1052         struct virtqueue *vq;
1053         struct vtblk_request *req;
1054         int enq;
1055
1056         VTBLK_LOCK_ASSERT(sc);
1057         vq = sc->vtblk_vq;
1058         enq = 0;
1059
1060         if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1061                 return;
1062
1063         while (!virtqueue_full(vq)) {
1064                 req = vtblk_request_next(sc);
1065                 if (req == NULL)
1066                         break;
1067
1068                 if (vtblk_request_execute(sc, req) != 0) {
1069                         vtblk_request_requeue_ready(sc, req);
1070                         break;
1071                 }
1072
1073                 enq++;
1074         }
1075
1076         if (enq > 0)
1077                 virtqueue_notify(vq);
1078 }
1079
1080 static void
1081 vtblk_bio_done(struct vtblk_softc *sc, struct bio *bp, int error)
1082 {
1083
1084         /* Because of GEOM direct dispatch, we cannot hold any locks. */
1085         if (sc != NULL)
1086                 VTBLK_LOCK_ASSERT_NOTOWNED(sc);
1087
1088         if (error) {
1089                 bp->bio_resid = bp->bio_bcount;
1090                 bp->bio_error = error;
1091                 bp->bio_flags |= BIO_ERROR;
1092         }
1093
1094         biodone(bp);
1095 }
1096
1097 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg)                  \
1098         if (virtio_with_feature(_dev, _feature)) {                      \
1099                 virtio_read_device_config(_dev,                         \
1100                     offsetof(struct virtio_blk_config, _field),         \
1101                     &(_cfg)->_field, sizeof((_cfg)->_field));           \
1102         }
1103
1104 static void
1105 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
1106 {
1107         device_t dev;
1108
1109         dev = sc->vtblk_dev;
1110
1111         bzero(blkcfg, sizeof(struct virtio_blk_config));
1112
1113         /* The capacity is always available. */
1114         virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
1115             capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
1116
1117         /* Read the configuration if the feature was negotiated. */
1118         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1119         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1120         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1121         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1122         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1123         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
1124 }
1125
1126 #undef VTBLK_GET_CONFIG
1127
1128 static void
1129 vtblk_ident(struct vtblk_softc *sc)
1130 {
1131         struct bio buf;
1132         struct disk *dp;
1133         struct vtblk_request *req;
1134         int len, error;
1135
1136         dp = sc->vtblk_disk;
1137         len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
1138
1139         if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
1140                 return;
1141
1142         req = vtblk_request_dequeue(sc);
1143         if (req == NULL)
1144                 return;
1145
1146         req->vbr_ack = -1;
1147         req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1148         req->vbr_hdr.ioprio = 1;
1149         req->vbr_hdr.sector = 0;
1150
1151         req->vbr_bp = &buf;
1152         g_reset_bio(&buf);
1153
1154         buf.bio_cmd = BIO_READ;
1155         buf.bio_data = dp->d_ident;
1156         buf.bio_bcount = len;
1157
1158         VTBLK_LOCK(sc);
1159         error = vtblk_poll_request(sc, req);
1160         VTBLK_UNLOCK(sc);
1161
1162         vtblk_request_enqueue(sc, req);
1163
1164         if (error) {
1165                 device_printf(sc->vtblk_dev,
1166                     "error getting device identifier: %d\n", error);
1167         }
1168 }
1169
1170 static int
1171 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
1172 {
1173         struct virtqueue *vq;
1174         int error;
1175
1176         vq = sc->vtblk_vq;
1177
1178         if (!virtqueue_empty(vq))
1179                 return (EBUSY);
1180
1181         error = vtblk_request_execute(sc, req);
1182         if (error)
1183                 return (error);
1184
1185         virtqueue_notify(vq);
1186         virtqueue_poll(vq, NULL);
1187
1188         error = vtblk_request_error(req);
1189         if (error && bootverbose) {
1190                 device_printf(sc->vtblk_dev,
1191                     "%s: IO error: %d\n", __func__, error);
1192         }
1193
1194         return (error);
1195 }
1196
1197 static int
1198 vtblk_quiesce(struct vtblk_softc *sc)
1199 {
1200         int error;
1201
1202         VTBLK_LOCK_ASSERT(sc);
1203         error = 0;
1204
1205         while (!virtqueue_empty(sc->vtblk_vq)) {
1206                 if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
1207                     VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
1208                         error = EBUSY;
1209                         break;
1210                 }
1211         }
1212
1213         return (error);
1214 }
1215
1216 static void
1217 vtblk_vq_intr(void *xsc)
1218 {
1219         struct vtblk_softc *sc;
1220         struct virtqueue *vq;
1221         struct bio_queue queue;
1222
1223         sc = xsc;
1224         vq = sc->vtblk_vq;
1225         TAILQ_INIT(&queue);
1226
1227         VTBLK_LOCK(sc);
1228
1229 again:
1230         if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
1231                 goto out;
1232
1233         vtblk_queue_completed(sc, &queue);
1234         vtblk_startio(sc);
1235
1236         if (virtqueue_enable_intr(vq) != 0) {
1237                 virtqueue_disable_intr(vq);
1238                 goto again;
1239         }
1240
1241         if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1242                 wakeup(&sc->vtblk_vq);
1243
1244 out:
1245         VTBLK_UNLOCK(sc);
1246         vtblk_done_completed(sc, &queue);
1247 }
1248
1249 static void
1250 vtblk_stop(struct vtblk_softc *sc)
1251 {
1252
1253         virtqueue_disable_intr(sc->vtblk_vq);
1254         virtio_stop(sc->vtblk_dev);
1255 }
1256
1257 static void
1258 vtblk_dump_quiesce(struct vtblk_softc *sc)
1259 {
1260
1261         /*
1262          * Spin here until all the requests in-flight at the time of the
1263          * dump are completed and queued. The queued requests will be
1264          * biodone'd once the dump is finished.
1265          */
1266         while (!virtqueue_empty(sc->vtblk_vq))
1267                 vtblk_queue_completed(sc, &sc->vtblk_dump_queue);
1268 }
1269
1270 static int
1271 vtblk_dump_write(struct vtblk_softc *sc, void *virtual, off_t offset,
1272     size_t length)
1273 {
1274         struct bio buf;
1275         struct vtblk_request *req;
1276
1277         req = &sc->vtblk_dump_request;
1278         req->vbr_ack = -1;
1279         req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1280         req->vbr_hdr.ioprio = 1;
1281         req->vbr_hdr.sector = offset / 512;
1282
1283         req->vbr_bp = &buf;
1284         g_reset_bio(&buf);
1285
1286         buf.bio_cmd = BIO_WRITE;
1287         buf.bio_data = virtual;
1288         buf.bio_bcount = length;
1289
1290         return (vtblk_poll_request(sc, req));
1291 }
1292
1293 static int
1294 vtblk_dump_flush(struct vtblk_softc *sc)
1295 {
1296         struct bio buf;
1297         struct vtblk_request *req;
1298
1299         req = &sc->vtblk_dump_request;
1300         req->vbr_ack = -1;
1301         req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1302         req->vbr_hdr.ioprio = 1;
1303         req->vbr_hdr.sector = 0;
1304
1305         req->vbr_bp = &buf;
1306         g_reset_bio(&buf);
1307
1308         buf.bio_cmd = BIO_FLUSH;
1309
1310         return (vtblk_poll_request(sc, req));
1311 }
1312
1313 static void
1314 vtblk_dump_complete(struct vtblk_softc *sc)
1315 {
1316
1317         vtblk_dump_flush(sc);
1318
1319         VTBLK_UNLOCK(sc);
1320         vtblk_done_completed(sc, &sc->vtblk_dump_queue);
1321         VTBLK_LOCK(sc);
1322 }
1323
1324 static void
1325 vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
1326 {
1327
1328         /* Set either writeback (1) or writethrough (0) mode. */
1329         virtio_write_dev_config_1(sc->vtblk_dev,
1330             offsetof(struct virtio_blk_config, writeback), wc);
1331 }
1332
1333 static int
1334 vtblk_write_cache_enabled(struct vtblk_softc *sc,
1335     struct virtio_blk_config *blkcfg)
1336 {
1337         int wc;
1338
1339         if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
1340                 wc = vtblk_tunable_int(sc, "writecache_mode",
1341                     vtblk_writecache_mode);
1342                 if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1343                         vtblk_set_write_cache(sc, wc);
1344                 else
1345                         wc = blkcfg->writeback;
1346         } else
1347                 wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
1348
1349         return (wc);
1350 }
1351
1352 static int
1353 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
1354 {
1355         struct vtblk_softc *sc;
1356         int wc, error;
1357
1358         sc = oidp->oid_arg1;
1359         wc = sc->vtblk_write_cache;
1360
1361         error = sysctl_handle_int(oidp, &wc, 0, req);
1362         if (error || req->newptr == NULL)
1363                 return (error);
1364         if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
1365                 return (EPERM);
1366         if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1367                 return (EINVAL);
1368
1369         VTBLK_LOCK(sc);
1370         sc->vtblk_write_cache = wc;
1371         vtblk_set_write_cache(sc, sc->vtblk_write_cache);
1372         VTBLK_UNLOCK(sc);
1373
1374         return (0);
1375 }
1376
1377 static void
1378 vtblk_setup_sysctl(struct vtblk_softc *sc)
1379 {
1380         device_t dev;
1381         struct sysctl_ctx_list *ctx;
1382         struct sysctl_oid *tree;
1383         struct sysctl_oid_list *child;
1384
1385         dev = sc->vtblk_dev;
1386         ctx = device_get_sysctl_ctx(dev);
1387         tree = device_get_sysctl_tree(dev);
1388         child = SYSCTL_CHILDREN(tree);
1389
1390         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
1391             CTLTYPE_INT | CTLFLAG_RW, sc, 0, vtblk_write_cache_sysctl,
1392             "I", "Write cache mode (writethrough (0) or writeback (1))");
1393 }
1394
1395 static int
1396 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
1397 {
1398         char path[64];
1399
1400         snprintf(path, sizeof(path),
1401             "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
1402         TUNABLE_INT_FETCH(path, &def);
1403
1404         return (def);
1405 }