]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/virtio/block/virtio_blk.c
MFV r284234:
[FreeBSD/FreeBSD.git] / sys / dev / virtio / block / virtio_blk.c
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 /* Driver for VirtIO block devices. */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/bio.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/sglist.h>
39 #include <sys/sysctl.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/queue.h>
43
44 #include <geom/geom_disk.h>
45
46 #include <machine/bus.h>
47 #include <machine/resource.h>
48 #include <sys/bus.h>
49 #include <sys/rman.h>
50
51 #include <dev/virtio/virtio.h>
52 #include <dev/virtio/virtqueue.h>
53 #include <dev/virtio/block/virtio_blk.h>
54
55 #include "virtio_if.h"
56
57 struct vtblk_request {
58         struct virtio_blk_outhdr         vbr_hdr;
59         struct bio                      *vbr_bp;
60         uint8_t                          vbr_ack;
61         TAILQ_ENTRY(vtblk_request)       vbr_link;
62 };
63
64 enum vtblk_cache_mode {
65         VTBLK_CACHE_WRITETHROUGH,
66         VTBLK_CACHE_WRITEBACK,
67         VTBLK_CACHE_MAX
68 };
69
70 struct vtblk_softc {
71         device_t                 vtblk_dev;
72         struct mtx               vtblk_mtx;
73         uint64_t                 vtblk_features;
74         uint32_t                 vtblk_flags;
75 #define VTBLK_FLAG_INDIRECT     0x0001
76 #define VTBLK_FLAG_READONLY     0x0002
77 #define VTBLK_FLAG_DETACH       0x0004
78 #define VTBLK_FLAG_SUSPEND      0x0008
79 #define VTBLK_FLAG_BARRIER      0x0010
80 #define VTBLK_FLAG_WC_CONFIG    0x0020
81
82         struct virtqueue        *vtblk_vq;
83         struct sglist           *vtblk_sglist;
84         struct disk             *vtblk_disk;
85
86         struct bio_queue_head    vtblk_bioq;
87         TAILQ_HEAD(, vtblk_request)
88                                  vtblk_req_free;
89         TAILQ_HEAD(, vtblk_request)
90                                  vtblk_req_ready;
91         struct vtblk_request    *vtblk_req_ordered;
92
93         int                      vtblk_max_nsegs;
94         int                      vtblk_request_count;
95         enum vtblk_cache_mode    vtblk_write_cache;
96
97         struct bio_queue         vtblk_dump_queue;
98         struct vtblk_request     vtblk_dump_request;
99 };
100
101 static struct virtio_feature_desc vtblk_feature_desc[] = {
102         { VIRTIO_BLK_F_BARRIER,         "HostBarrier"   },
103         { VIRTIO_BLK_F_SIZE_MAX,        "MaxSegSize"    },
104         { VIRTIO_BLK_F_SEG_MAX,         "MaxNumSegs"    },
105         { VIRTIO_BLK_F_GEOMETRY,        "DiskGeometry"  },
106         { VIRTIO_BLK_F_RO,              "ReadOnly"      },
107         { VIRTIO_BLK_F_BLK_SIZE,        "BlockSize"     },
108         { VIRTIO_BLK_F_SCSI,            "SCSICmds"      },
109         { VIRTIO_BLK_F_WCE,             "WriteCache"    },
110         { VIRTIO_BLK_F_TOPOLOGY,        "Topology"      },
111         { VIRTIO_BLK_F_CONFIG_WCE,      "ConfigWCE"     },
112
113         { 0, NULL }
114 };
115
116 static int      vtblk_modevent(module_t, int, void *);
117
118 static int      vtblk_probe(device_t);
119 static int      vtblk_attach(device_t);
120 static int      vtblk_detach(device_t);
121 static int      vtblk_suspend(device_t);
122 static int      vtblk_resume(device_t);
123 static int      vtblk_shutdown(device_t);
124 static int      vtblk_config_change(device_t);
125
126 static int      vtblk_open(struct disk *);
127 static int      vtblk_close(struct disk *);
128 static int      vtblk_ioctl(struct disk *, u_long, void *, int,
129                     struct thread *);
130 static int      vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
131 static void     vtblk_strategy(struct bio *);
132
133 static void     vtblk_negotiate_features(struct vtblk_softc *);
134 static void     vtblk_setup_features(struct vtblk_softc *);
135 static int      vtblk_maximum_segments(struct vtblk_softc *,
136                     struct virtio_blk_config *);
137 static int      vtblk_alloc_virtqueue(struct vtblk_softc *);
138 static void     vtblk_resize_disk(struct vtblk_softc *, uint64_t);
139 static void     vtblk_alloc_disk(struct vtblk_softc *,
140                     struct virtio_blk_config *);
141 static void     vtblk_create_disk(struct vtblk_softc *);
142
143 static int      vtblk_request_prealloc(struct vtblk_softc *);
144 static void     vtblk_request_free(struct vtblk_softc *);
145 static struct vtblk_request *
146                 vtblk_request_dequeue(struct vtblk_softc *);
147 static void     vtblk_request_enqueue(struct vtblk_softc *,
148                     struct vtblk_request *);
149 static struct vtblk_request *
150                 vtblk_request_next_ready(struct vtblk_softc *);
151 static void     vtblk_request_requeue_ready(struct vtblk_softc *,
152                     struct vtblk_request *);
153 static struct vtblk_request *
154                 vtblk_request_next(struct vtblk_softc *);
155 static struct vtblk_request *
156                 vtblk_request_bio(struct vtblk_softc *);
157 static int      vtblk_request_execute(struct vtblk_softc *,
158                     struct vtblk_request *);
159 static int      vtblk_request_error(struct vtblk_request *);
160
161 static void     vtblk_queue_completed(struct vtblk_softc *,
162                     struct bio_queue *);
163 static void     vtblk_done_completed(struct vtblk_softc *,
164                     struct bio_queue *);
165 static void     vtblk_drain_vq(struct vtblk_softc *);
166 static void     vtblk_drain(struct vtblk_softc *);
167
168 static void     vtblk_startio(struct vtblk_softc *);
169 static void     vtblk_bio_done(struct vtblk_softc *, struct bio *, int);
170
171 static void     vtblk_read_config(struct vtblk_softc *,
172                     struct virtio_blk_config *);
173 static void     vtblk_ident(struct vtblk_softc *);
174 static int      vtblk_poll_request(struct vtblk_softc *,
175                     struct vtblk_request *);
176 static int      vtblk_quiesce(struct vtblk_softc *);
177 static void     vtblk_vq_intr(void *);
178 static void     vtblk_stop(struct vtblk_softc *);
179
180 static void     vtblk_dump_quiesce(struct vtblk_softc *);
181 static int      vtblk_dump_write(struct vtblk_softc *, void *, off_t, size_t);
182 static int      vtblk_dump_flush(struct vtblk_softc *);
183 static void     vtblk_dump_complete(struct vtblk_softc *);
184
185 static void     vtblk_set_write_cache(struct vtblk_softc *, int);
186 static int      vtblk_write_cache_enabled(struct vtblk_softc *sc,
187                     struct virtio_blk_config *);
188 static int      vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
189
190 static void     vtblk_setup_sysctl(struct vtblk_softc *);
191 static int      vtblk_tunable_int(struct vtblk_softc *, const char *, int);
192
193 /* Tunables. */
194 static int vtblk_no_ident = 0;
195 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
196 static int vtblk_writecache_mode = -1;
197 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
198
199 /* Features desired/implemented by this driver. */
200 #define VTBLK_FEATURES \
201     (VIRTIO_BLK_F_BARRIER               | \
202      VIRTIO_BLK_F_SIZE_MAX              | \
203      VIRTIO_BLK_F_SEG_MAX               | \
204      VIRTIO_BLK_F_GEOMETRY              | \
205      VIRTIO_BLK_F_RO                    | \
206      VIRTIO_BLK_F_BLK_SIZE              | \
207      VIRTIO_BLK_F_WCE                   | \
208      VIRTIO_BLK_F_TOPOLOGY              | \
209      VIRTIO_BLK_F_CONFIG_WCE            | \
210      VIRTIO_RING_F_INDIRECT_DESC)
211
212 #define VTBLK_MTX(_sc)          &(_sc)->vtblk_mtx
213 #define VTBLK_LOCK_INIT(_sc, _name) \
214                                 mtx_init(VTBLK_MTX((_sc)), (_name), \
215                                     "VirtIO Block Lock", MTX_DEF)
216 #define VTBLK_LOCK(_sc)         mtx_lock(VTBLK_MTX((_sc)))
217 #define VTBLK_UNLOCK(_sc)       mtx_unlock(VTBLK_MTX((_sc)))
218 #define VTBLK_LOCK_DESTROY(_sc) mtx_destroy(VTBLK_MTX((_sc)))
219 #define VTBLK_LOCK_ASSERT(_sc)  mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
220 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
221                                 mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
222
223 #define VTBLK_DISK_NAME         "vtbd"
224 #define VTBLK_QUIESCE_TIMEOUT   (30 * hz)
225
226 /*
227  * Each block request uses at least two segments - one for the header
228  * and one for the status.
229  */
230 #define VTBLK_MIN_SEGMENTS      2
231
232 static device_method_t vtblk_methods[] = {
233         /* Device methods. */
234         DEVMETHOD(device_probe,         vtblk_probe),
235         DEVMETHOD(device_attach,        vtblk_attach),
236         DEVMETHOD(device_detach,        vtblk_detach),
237         DEVMETHOD(device_suspend,       vtblk_suspend),
238         DEVMETHOD(device_resume,        vtblk_resume),
239         DEVMETHOD(device_shutdown,      vtblk_shutdown),
240
241         /* VirtIO methods. */
242         DEVMETHOD(virtio_config_change, vtblk_config_change),
243
244         DEVMETHOD_END
245 };
246
247 static driver_t vtblk_driver = {
248         "vtblk",
249         vtblk_methods,
250         sizeof(struct vtblk_softc)
251 };
252 static devclass_t vtblk_devclass;
253
254 DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
255     vtblk_modevent, 0);
256 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
257     vtblk_modevent, 0);
258 MODULE_VERSION(virtio_blk, 1);
259 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
260
261 static int
262 vtblk_modevent(module_t mod, int type, void *unused)
263 {
264         int error;
265
266         error = 0;
267
268         switch (type) {
269         case MOD_LOAD:
270         case MOD_QUIESCE:
271         case MOD_UNLOAD:
272         case MOD_SHUTDOWN:
273                 break;
274         default:
275                 error = EOPNOTSUPP;
276                 break;
277         }
278
279         return (error);
280 }
281
282 static int
283 vtblk_probe(device_t dev)
284 {
285
286         if (virtio_get_device_type(dev) != VIRTIO_ID_BLOCK)
287                 return (ENXIO);
288
289         device_set_desc(dev, "VirtIO Block Adapter");
290
291         return (BUS_PROBE_DEFAULT);
292 }
293
294 static int
295 vtblk_attach(device_t dev)
296 {
297         struct vtblk_softc *sc;
298         struct virtio_blk_config blkcfg;
299         int error;
300
301         virtio_set_feature_desc(dev, vtblk_feature_desc);
302
303         sc = device_get_softc(dev);
304         sc->vtblk_dev = dev;
305         VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
306         bioq_init(&sc->vtblk_bioq);
307         TAILQ_INIT(&sc->vtblk_dump_queue);
308         TAILQ_INIT(&sc->vtblk_req_free);
309         TAILQ_INIT(&sc->vtblk_req_ready);
310
311         vtblk_setup_sysctl(sc);
312         vtblk_setup_features(sc);
313
314         vtblk_read_config(sc, &blkcfg);
315
316         /*
317          * With the current sglist(9) implementation, it is not easy
318          * for us to support a maximum segment size as adjacent
319          * segments are coalesced. For now, just make sure it's larger
320          * than the maximum supported transfer size.
321          */
322         if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
323                 if (blkcfg.size_max < MAXPHYS) {
324                         error = ENOTSUP;
325                         device_printf(dev, "host requires unsupported "
326                             "maximum segment size feature\n");
327                         goto fail;
328                 }
329         }
330
331         sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
332         if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
333                 error = EINVAL;
334                 device_printf(dev, "fewer than minimum number of segments "
335                     "allowed: %d\n", sc->vtblk_max_nsegs);
336                 goto fail;
337         }
338
339         sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
340         if (sc->vtblk_sglist == NULL) {
341                 error = ENOMEM;
342                 device_printf(dev, "cannot allocate sglist\n");
343                 goto fail;
344         }
345
346         error = vtblk_alloc_virtqueue(sc);
347         if (error) {
348                 device_printf(dev, "cannot allocate virtqueue\n");
349                 goto fail;
350         }
351
352         error = vtblk_request_prealloc(sc);
353         if (error) {
354                 device_printf(dev, "cannot preallocate requests\n");
355                 goto fail;
356         }
357
358         vtblk_alloc_disk(sc, &blkcfg);
359
360         error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
361         if (error) {
362                 device_printf(dev, "cannot setup virtqueue interrupt\n");
363                 goto fail;
364         }
365
366         vtblk_create_disk(sc);
367
368         virtqueue_enable_intr(sc->vtblk_vq);
369
370 fail:
371         if (error)
372                 vtblk_detach(dev);
373
374         return (error);
375 }
376
377 static int
378 vtblk_detach(device_t dev)
379 {
380         struct vtblk_softc *sc;
381
382         sc = device_get_softc(dev);
383
384         VTBLK_LOCK(sc);
385         sc->vtblk_flags |= VTBLK_FLAG_DETACH;
386         if (device_is_attached(dev))
387                 vtblk_stop(sc);
388         VTBLK_UNLOCK(sc);
389
390         vtblk_drain(sc);
391
392         if (sc->vtblk_disk != NULL) {
393                 disk_destroy(sc->vtblk_disk);
394                 sc->vtblk_disk = NULL;
395         }
396
397         if (sc->vtblk_sglist != NULL) {
398                 sglist_free(sc->vtblk_sglist);
399                 sc->vtblk_sglist = NULL;
400         }
401
402         VTBLK_LOCK_DESTROY(sc);
403
404         return (0);
405 }
406
407 static int
408 vtblk_suspend(device_t dev)
409 {
410         struct vtblk_softc *sc;
411         int error;
412
413         sc = device_get_softc(dev);
414
415         VTBLK_LOCK(sc);
416         sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
417         /* XXX BMV: virtio_stop(), etc needed here? */
418         error = vtblk_quiesce(sc);
419         if (error)
420                 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
421         VTBLK_UNLOCK(sc);
422
423         return (error);
424 }
425
426 static int
427 vtblk_resume(device_t dev)
428 {
429         struct vtblk_softc *sc;
430
431         sc = device_get_softc(dev);
432
433         VTBLK_LOCK(sc);
434         /* XXX BMV: virtio_reinit(), etc needed here? */
435         sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
436         vtblk_startio(sc);
437         VTBLK_UNLOCK(sc);
438
439         return (0);
440 }
441
442 static int
443 vtblk_shutdown(device_t dev)
444 {
445
446         return (0);
447 }
448
449 static int
450 vtblk_config_change(device_t dev)
451 {
452         struct vtblk_softc *sc;
453         struct virtio_blk_config blkcfg;
454         uint64_t capacity;
455
456         sc = device_get_softc(dev);
457
458         vtblk_read_config(sc, &blkcfg);
459
460         /* Capacity is always in 512-byte units. */
461         capacity = blkcfg.capacity * 512;
462
463         if (sc->vtblk_disk->d_mediasize != capacity)
464                 vtblk_resize_disk(sc, capacity);
465
466         return (0);
467 }
468
469 static int
470 vtblk_open(struct disk *dp)
471 {
472         struct vtblk_softc *sc;
473
474         if ((sc = dp->d_drv1) == NULL)
475                 return (ENXIO);
476
477         return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
478 }
479
480 static int
481 vtblk_close(struct disk *dp)
482 {
483         struct vtblk_softc *sc;
484
485         if ((sc = dp->d_drv1) == NULL)
486                 return (ENXIO);
487
488         return (0);
489 }
490
491 static int
492 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
493     struct thread *td)
494 {
495         struct vtblk_softc *sc;
496
497         if ((sc = dp->d_drv1) == NULL)
498                 return (ENXIO);
499
500         return (ENOTTY);
501 }
502
503 static int
504 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
505     size_t length)
506 {
507         struct disk *dp;
508         struct vtblk_softc *sc;
509         int error;
510
511         dp = arg;
512         error = 0;
513
514         if ((sc = dp->d_drv1) == NULL)
515                 return (ENXIO);
516
517         VTBLK_LOCK(sc);
518
519         vtblk_dump_quiesce(sc);
520
521         if (length > 0)
522                 error = vtblk_dump_write(sc, virtual, offset, length);
523         if (error || (virtual == NULL && offset == 0))
524                 vtblk_dump_complete(sc);
525
526         VTBLK_UNLOCK(sc);
527
528         return (error);
529 }
530
531 static void
532 vtblk_strategy(struct bio *bp)
533 {
534         struct vtblk_softc *sc;
535
536         if ((sc = bp->bio_disk->d_drv1) == NULL) {
537                 vtblk_bio_done(NULL, bp, EINVAL);
538                 return;
539         }
540
541         /*
542          * Fail any write if RO. Unfortunately, there does not seem to
543          * be a better way to report our readonly'ness to GEOM above.
544          */
545         if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
546             (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
547                 vtblk_bio_done(sc, bp, EROFS);
548                 return;
549         }
550
551         VTBLK_LOCK(sc);
552
553         if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
554                 VTBLK_UNLOCK(sc);
555                 vtblk_bio_done(sc, bp, ENXIO);
556                 return;
557         }
558
559         bioq_insert_tail(&sc->vtblk_bioq, bp);
560         vtblk_startio(sc);
561
562         VTBLK_UNLOCK(sc);
563 }
564
565 static void
566 vtblk_negotiate_features(struct vtblk_softc *sc)
567 {
568         device_t dev;
569         uint64_t features;
570
571         dev = sc->vtblk_dev;
572         features = VTBLK_FEATURES;
573
574         sc->vtblk_features = virtio_negotiate_features(dev, features);
575 }
576
577 static void
578 vtblk_setup_features(struct vtblk_softc *sc)
579 {
580         device_t dev;
581
582         dev = sc->vtblk_dev;
583
584         vtblk_negotiate_features(sc);
585
586         if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
587                 sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
588         if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
589                 sc->vtblk_flags |= VTBLK_FLAG_READONLY;
590         if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
591                 sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
592         if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
593                 sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
594 }
595
596 static int
597 vtblk_maximum_segments(struct vtblk_softc *sc,
598     struct virtio_blk_config *blkcfg)
599 {
600         device_t dev;
601         int nsegs;
602
603         dev = sc->vtblk_dev;
604         nsegs = VTBLK_MIN_SEGMENTS;
605
606         if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
607                 nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
608                 if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
609                         nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
610         } else
611                 nsegs += 1;
612
613         return (nsegs);
614 }
615
616 static int
617 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
618 {
619         device_t dev;
620         struct vq_alloc_info vq_info;
621
622         dev = sc->vtblk_dev;
623
624         VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
625             vtblk_vq_intr, sc, &sc->vtblk_vq,
626             "%s request", device_get_nameunit(dev));
627
628         return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
629 }
630
631 static void
632 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
633 {
634         device_t dev;
635         struct disk *dp;
636         int error;
637
638         dev = sc->vtblk_dev;
639         dp = sc->vtblk_disk;
640
641         dp->d_mediasize = new_capacity;
642         if (bootverbose) {
643                 device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
644                     (uintmax_t) dp->d_mediasize >> 20,
645                     (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
646                     dp->d_sectorsize);
647         }
648
649         error = disk_resize(dp, M_NOWAIT);
650         if (error) {
651                 device_printf(dev,
652                     "disk_resize(9) failed, error: %d\n", error);
653         }
654 }
655
656 static void
657 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
658 {
659         device_t dev;
660         struct disk *dp;
661
662         dev = sc->vtblk_dev;
663
664         sc->vtblk_disk = dp = disk_alloc();
665         dp->d_open = vtblk_open;
666         dp->d_close = vtblk_close;
667         dp->d_ioctl = vtblk_ioctl;
668         dp->d_strategy = vtblk_strategy;
669         dp->d_name = VTBLK_DISK_NAME;
670         dp->d_unit = device_get_unit(dev);
671         dp->d_drv1 = sc;
672         dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO |
673             DISKFLAG_DIRECT_COMPLETION;
674         dp->d_hba_vendor = virtio_get_vendor(dev);
675         dp->d_hba_device = virtio_get_device(dev);
676         dp->d_hba_subvendor = virtio_get_subvendor(dev);
677         dp->d_hba_subdevice = virtio_get_subdevice(dev);
678
679         if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
680                 dp->d_dump = vtblk_dump;
681
682         /* Capacity is always in 512-byte units. */
683         dp->d_mediasize = blkcfg->capacity * 512;
684
685         if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
686                 dp->d_sectorsize = blkcfg->blk_size;
687         else
688                 dp->d_sectorsize = 512;
689
690         /*
691          * The VirtIO maximum I/O size is given in terms of segments.
692          * However, FreeBSD limits I/O size by logical buffer size, not
693          * by physically contiguous pages. Therefore, we have to assume
694          * no pages are contiguous. This may impose an artificially low
695          * maximum I/O size. But in practice, since QEMU advertises 128
696          * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
697          * which is typically greater than MAXPHYS. Eventually we should
698          * just advertise MAXPHYS and split buffers that are too big.
699          *
700          * Note we must subtract one additional segment in case of non
701          * page aligned buffers.
702          */
703         dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
704             PAGE_SIZE;
705         if (dp->d_maxsize < PAGE_SIZE)
706                 dp->d_maxsize = PAGE_SIZE; /* XXX */
707
708         if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
709                 dp->d_fwsectors = blkcfg->geometry.sectors;
710                 dp->d_fwheads = blkcfg->geometry.heads;
711         }
712
713         if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY) &&
714             blkcfg->topology.physical_block_exp > 0) {
715                 dp->d_stripesize = dp->d_sectorsize *
716                     (1 << blkcfg->topology.physical_block_exp);
717                 dp->d_stripeoffset = (dp->d_stripesize -
718                     blkcfg->topology.alignment_offset * dp->d_sectorsize) %
719                     dp->d_stripesize;
720         }
721
722         if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
723                 sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
724         else
725                 sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
726 }
727
728 static void
729 vtblk_create_disk(struct vtblk_softc *sc)
730 {
731         struct disk *dp;
732
733         dp = sc->vtblk_disk;
734
735         vtblk_ident(sc);
736
737         device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
738             (uintmax_t) dp->d_mediasize >> 20,
739             (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
740             dp->d_sectorsize);
741
742         disk_create(dp, DISK_VERSION);
743 }
744
745 static int
746 vtblk_request_prealloc(struct vtblk_softc *sc)
747 {
748         struct vtblk_request *req;
749         int i, nreqs;
750
751         nreqs = virtqueue_size(sc->vtblk_vq);
752
753         /*
754          * Preallocate sufficient requests to keep the virtqueue full. Each
755          * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
756          * the number allocated when indirect descriptors are not available.
757          */
758         if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
759                 nreqs /= VTBLK_MIN_SEGMENTS;
760
761         for (i = 0; i < nreqs; i++) {
762                 req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
763                 if (req == NULL)
764                         return (ENOMEM);
765
766                 MPASS(sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr)) == 1);
767                 MPASS(sglist_count(&req->vbr_ack, sizeof(req->vbr_ack)) == 1);
768
769                 sc->vtblk_request_count++;
770                 vtblk_request_enqueue(sc, req);
771         }
772
773         return (0);
774 }
775
776 static void
777 vtblk_request_free(struct vtblk_softc *sc)
778 {
779         struct vtblk_request *req;
780
781         MPASS(TAILQ_EMPTY(&sc->vtblk_req_ready));
782
783         while ((req = vtblk_request_dequeue(sc)) != NULL) {
784                 sc->vtblk_request_count--;
785                 free(req, M_DEVBUF);
786         }
787
788         KASSERT(sc->vtblk_request_count == 0,
789             ("%s: leaked %d requests", __func__, sc->vtblk_request_count));
790 }
791
792 static struct vtblk_request *
793 vtblk_request_dequeue(struct vtblk_softc *sc)
794 {
795         struct vtblk_request *req;
796
797         req = TAILQ_FIRST(&sc->vtblk_req_free);
798         if (req != NULL) {
799                 TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
800                 bzero(req, sizeof(struct vtblk_request));
801         }
802
803         return (req);
804 }
805
806 static void
807 vtblk_request_enqueue(struct vtblk_softc *sc, struct vtblk_request *req)
808 {
809
810         TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
811 }
812
813 static struct vtblk_request *
814 vtblk_request_next_ready(struct vtblk_softc *sc)
815 {
816         struct vtblk_request *req;
817
818         req = TAILQ_FIRST(&sc->vtblk_req_ready);
819         if (req != NULL)
820                 TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
821
822         return (req);
823 }
824
825 static void
826 vtblk_request_requeue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
827 {
828
829         /* NOTE: Currently, there will be at most one request in the queue. */
830         TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
831 }
832
833 static struct vtblk_request *
834 vtblk_request_next(struct vtblk_softc *sc)
835 {
836         struct vtblk_request *req;
837
838         req = vtblk_request_next_ready(sc);
839         if (req != NULL)
840                 return (req);
841
842         return (vtblk_request_bio(sc));
843 }
844
845 static struct vtblk_request *
846 vtblk_request_bio(struct vtblk_softc *sc)
847 {
848         struct bio_queue_head *bioq;
849         struct vtblk_request *req;
850         struct bio *bp;
851
852         bioq = &sc->vtblk_bioq;
853
854         if (bioq_first(bioq) == NULL)
855                 return (NULL);
856
857         req = vtblk_request_dequeue(sc);
858         if (req == NULL)
859                 return (NULL);
860
861         bp = bioq_takefirst(bioq);
862         req->vbr_bp = bp;
863         req->vbr_ack = -1;
864         req->vbr_hdr.ioprio = 1;
865
866         switch (bp->bio_cmd) {
867         case BIO_FLUSH:
868                 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
869                 break;
870         case BIO_READ:
871                 req->vbr_hdr.type = VIRTIO_BLK_T_IN;
872                 req->vbr_hdr.sector = bp->bio_offset / 512;
873                 break;
874         case BIO_WRITE:
875                 req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
876                 req->vbr_hdr.sector = bp->bio_offset / 512;
877                 break;
878         default:
879                 panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
880         }
881
882         if (bp->bio_flags & BIO_ORDERED)
883                 req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
884
885         return (req);
886 }
887
888 static int
889 vtblk_request_execute(struct vtblk_softc *sc, struct vtblk_request *req)
890 {
891         struct virtqueue *vq;
892         struct sglist *sg;
893         struct bio *bp;
894         int ordered, readable, writable, error;
895
896         vq = sc->vtblk_vq;
897         sg = sc->vtblk_sglist;
898         bp = req->vbr_bp;
899         ordered = 0;
900         writable = 0;
901
902         /*
903          * Some hosts (such as bhyve) do not implement the barrier feature,
904          * so we emulate it in the driver by allowing the barrier request
905          * to be the only one in flight.
906          */
907         if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
908                 if (sc->vtblk_req_ordered != NULL)
909                         return (EBUSY);
910                 if (bp->bio_flags & BIO_ORDERED) {
911                         if (!virtqueue_empty(vq))
912                                 return (EBUSY);
913                         ordered = 1;
914                         req->vbr_hdr.type &= ~VIRTIO_BLK_T_BARRIER;
915                 }
916         }
917
918         sglist_reset(sg);
919         sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
920
921         if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
922                 error = sglist_append_bio(sg, bp);
923                 if (error || sg->sg_nseg == sg->sg_maxseg) {
924                         panic("%s: bio %p data buffer too big %d",
925                             __func__, bp, error);
926                 }
927
928                 /* BIO_READ means the host writes into our buffer. */
929                 if (bp->bio_cmd == BIO_READ)
930                         writable = sg->sg_nseg - 1;
931         }
932
933         writable++;
934         sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
935         readable = sg->sg_nseg - writable;
936
937         error = virtqueue_enqueue(vq, req, sg, readable, writable);
938         if (error == 0 && ordered)
939                 sc->vtblk_req_ordered = req;
940
941         return (error);
942 }
943
944 static int
945 vtblk_request_error(struct vtblk_request *req)
946 {
947         int error;
948
949         switch (req->vbr_ack) {
950         case VIRTIO_BLK_S_OK:
951                 error = 0;
952                 break;
953         case VIRTIO_BLK_S_UNSUPP:
954                 error = ENOTSUP;
955                 break;
956         default:
957                 error = EIO;
958                 break;
959         }
960
961         return (error);
962 }
963
964 static void
965 vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
966 {
967         struct vtblk_request *req;
968         struct bio *bp;
969
970         while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
971                 if (sc->vtblk_req_ordered != NULL) {
972                         MPASS(sc->vtblk_req_ordered == req);
973                         sc->vtblk_req_ordered = NULL;
974                 }
975
976                 bp = req->vbr_bp;
977                 bp->bio_error = vtblk_request_error(req);
978                 TAILQ_INSERT_TAIL(queue, bp, bio_queue);
979
980                 vtblk_request_enqueue(sc, req);
981         }
982 }
983
984 static void
985 vtblk_done_completed(struct vtblk_softc *sc, struct bio_queue *queue)
986 {
987         struct bio *bp, *tmp;
988
989         TAILQ_FOREACH_SAFE(bp, queue, bio_queue, tmp) {
990                 if (bp->bio_error != 0)
991                         disk_err(bp, "hard error", -1, 1);
992                 vtblk_bio_done(sc, bp, bp->bio_error);
993         }
994 }
995
996 static void
997 vtblk_drain_vq(struct vtblk_softc *sc)
998 {
999         struct virtqueue *vq;
1000         struct vtblk_request *req;
1001         int last;
1002
1003         vq = sc->vtblk_vq;
1004         last = 0;
1005
1006         while ((req = virtqueue_drain(vq, &last)) != NULL) {
1007                 vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1008                 vtblk_request_enqueue(sc, req);
1009         }
1010
1011         sc->vtblk_req_ordered = NULL;
1012         KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1013 }
1014
1015 static void
1016 vtblk_drain(struct vtblk_softc *sc)
1017 {
1018         struct bio_queue queue;
1019         struct bio_queue_head *bioq;
1020         struct vtblk_request *req;
1021         struct bio *bp;
1022
1023         bioq = &sc->vtblk_bioq;
1024         TAILQ_INIT(&queue);
1025
1026         if (sc->vtblk_vq != NULL) {
1027                 vtblk_queue_completed(sc, &queue);
1028                 vtblk_done_completed(sc, &queue);
1029
1030                 vtblk_drain_vq(sc);
1031         }
1032
1033         while ((req = vtblk_request_next_ready(sc)) != NULL) {
1034                 vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1035                 vtblk_request_enqueue(sc, req);
1036         }
1037
1038         while (bioq_first(bioq) != NULL) {
1039                 bp = bioq_takefirst(bioq);
1040                 vtblk_bio_done(sc, bp, ENXIO);
1041         }
1042
1043         vtblk_request_free(sc);
1044 }
1045
1046 static void
1047 vtblk_startio(struct vtblk_softc *sc)
1048 {
1049         struct virtqueue *vq;
1050         struct vtblk_request *req;
1051         int enq;
1052
1053         VTBLK_LOCK_ASSERT(sc);
1054         vq = sc->vtblk_vq;
1055         enq = 0;
1056
1057         if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1058                 return;
1059
1060         while (!virtqueue_full(vq)) {
1061                 req = vtblk_request_next(sc);
1062                 if (req == NULL)
1063                         break;
1064
1065                 if (vtblk_request_execute(sc, req) != 0) {
1066                         vtblk_request_requeue_ready(sc, req);
1067                         break;
1068                 }
1069
1070                 enq++;
1071         }
1072
1073         if (enq > 0)
1074                 virtqueue_notify(vq);
1075 }
1076
1077 static void
1078 vtblk_bio_done(struct vtblk_softc *sc, struct bio *bp, int error)
1079 {
1080
1081         /* Because of GEOM direct dispatch, we cannot hold any locks. */
1082         if (sc != NULL)
1083                 VTBLK_LOCK_ASSERT_NOTOWNED(sc);
1084
1085         if (error) {
1086                 bp->bio_resid = bp->bio_bcount;
1087                 bp->bio_error = error;
1088                 bp->bio_flags |= BIO_ERROR;
1089         }
1090
1091         biodone(bp);
1092 }
1093
1094 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg)                  \
1095         if (virtio_with_feature(_dev, _feature)) {                      \
1096                 virtio_read_device_config(_dev,                         \
1097                     offsetof(struct virtio_blk_config, _field),         \
1098                     &(_cfg)->_field, sizeof((_cfg)->_field));           \
1099         }
1100
1101 static void
1102 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
1103 {
1104         device_t dev;
1105
1106         dev = sc->vtblk_dev;
1107
1108         bzero(blkcfg, sizeof(struct virtio_blk_config));
1109
1110         /* The capacity is always available. */
1111         virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
1112             capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
1113
1114         /* Read the configuration if the feature was negotiated. */
1115         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1116         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1117         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1118         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1119         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1120         VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
1121 }
1122
1123 #undef VTBLK_GET_CONFIG
1124
1125 static void
1126 vtblk_ident(struct vtblk_softc *sc)
1127 {
1128         struct bio buf;
1129         struct disk *dp;
1130         struct vtblk_request *req;
1131         int len, error;
1132
1133         dp = sc->vtblk_disk;
1134         len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
1135
1136         if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
1137                 return;
1138
1139         req = vtblk_request_dequeue(sc);
1140         if (req == NULL)
1141                 return;
1142
1143         req->vbr_ack = -1;
1144         req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1145         req->vbr_hdr.ioprio = 1;
1146         req->vbr_hdr.sector = 0;
1147
1148         req->vbr_bp = &buf;
1149         bzero(&buf, sizeof(struct bio));
1150
1151         buf.bio_cmd = BIO_READ;
1152         buf.bio_data = dp->d_ident;
1153         buf.bio_bcount = len;
1154
1155         VTBLK_LOCK(sc);
1156         error = vtblk_poll_request(sc, req);
1157         VTBLK_UNLOCK(sc);
1158
1159         vtblk_request_enqueue(sc, req);
1160
1161         if (error) {
1162                 device_printf(sc->vtblk_dev,
1163                     "error getting device identifier: %d\n", error);
1164         }
1165 }
1166
1167 static int
1168 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
1169 {
1170         struct virtqueue *vq;
1171         int error;
1172
1173         vq = sc->vtblk_vq;
1174
1175         if (!virtqueue_empty(vq))
1176                 return (EBUSY);
1177
1178         error = vtblk_request_execute(sc, req);
1179         if (error)
1180                 return (error);
1181
1182         virtqueue_notify(vq);
1183         virtqueue_poll(vq, NULL);
1184
1185         error = vtblk_request_error(req);
1186         if (error && bootverbose) {
1187                 device_printf(sc->vtblk_dev,
1188                     "%s: IO error: %d\n", __func__, error);
1189         }
1190
1191         return (error);
1192 }
1193
1194 static int
1195 vtblk_quiesce(struct vtblk_softc *sc)
1196 {
1197         int error;
1198
1199         VTBLK_LOCK_ASSERT(sc);
1200         error = 0;
1201
1202         while (!virtqueue_empty(sc->vtblk_vq)) {
1203                 if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
1204                     VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
1205                         error = EBUSY;
1206                         break;
1207                 }
1208         }
1209
1210         return (error);
1211 }
1212
1213 static void
1214 vtblk_vq_intr(void *xsc)
1215 {
1216         struct vtblk_softc *sc;
1217         struct virtqueue *vq;
1218         struct bio_queue queue;
1219
1220         sc = xsc;
1221         vq = sc->vtblk_vq;
1222         TAILQ_INIT(&queue);
1223
1224         VTBLK_LOCK(sc);
1225
1226 again:
1227         if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
1228                 goto out;
1229
1230         vtblk_queue_completed(sc, &queue);
1231         vtblk_startio(sc);
1232
1233         if (virtqueue_enable_intr(vq) != 0) {
1234                 virtqueue_disable_intr(vq);
1235                 goto again;
1236         }
1237
1238         if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1239                 wakeup(&sc->vtblk_vq);
1240
1241 out:
1242         VTBLK_UNLOCK(sc);
1243         vtblk_done_completed(sc, &queue);
1244 }
1245
1246 static void
1247 vtblk_stop(struct vtblk_softc *sc)
1248 {
1249
1250         virtqueue_disable_intr(sc->vtblk_vq);
1251         virtio_stop(sc->vtblk_dev);
1252 }
1253
1254 static void
1255 vtblk_dump_quiesce(struct vtblk_softc *sc)
1256 {
1257
1258         /*
1259          * Spin here until all the requests in-flight at the time of the
1260          * dump are completed and queued. The queued requests will be
1261          * biodone'd once the dump is finished.
1262          */
1263         while (!virtqueue_empty(sc->vtblk_vq))
1264                 vtblk_queue_completed(sc, &sc->vtblk_dump_queue);
1265 }
1266
1267 static int
1268 vtblk_dump_write(struct vtblk_softc *sc, void *virtual, off_t offset,
1269     size_t length)
1270 {
1271         struct bio buf;
1272         struct vtblk_request *req;
1273
1274         req = &sc->vtblk_dump_request;
1275         req->vbr_ack = -1;
1276         req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1277         req->vbr_hdr.ioprio = 1;
1278         req->vbr_hdr.sector = offset / 512;
1279
1280         req->vbr_bp = &buf;
1281         bzero(&buf, sizeof(struct bio));
1282
1283         buf.bio_cmd = BIO_WRITE;
1284         buf.bio_data = virtual;
1285         buf.bio_bcount = length;
1286
1287         return (vtblk_poll_request(sc, req));
1288 }
1289
1290 static int
1291 vtblk_dump_flush(struct vtblk_softc *sc)
1292 {
1293         struct bio buf;
1294         struct vtblk_request *req;
1295
1296         req = &sc->vtblk_dump_request;
1297         req->vbr_ack = -1;
1298         req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1299         req->vbr_hdr.ioprio = 1;
1300         req->vbr_hdr.sector = 0;
1301
1302         req->vbr_bp = &buf;
1303         bzero(&buf, sizeof(struct bio));
1304
1305         buf.bio_cmd = BIO_FLUSH;
1306
1307         return (vtblk_poll_request(sc, req));
1308 }
1309
1310 static void
1311 vtblk_dump_complete(struct vtblk_softc *sc)
1312 {
1313
1314         vtblk_dump_flush(sc);
1315
1316         VTBLK_UNLOCK(sc);
1317         vtblk_done_completed(sc, &sc->vtblk_dump_queue);
1318         VTBLK_LOCK(sc);
1319 }
1320
1321 static void
1322 vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
1323 {
1324
1325         /* Set either writeback (1) or writethrough (0) mode. */
1326         virtio_write_dev_config_1(sc->vtblk_dev,
1327             offsetof(struct virtio_blk_config, writeback), wc);
1328 }
1329
1330 static int
1331 vtblk_write_cache_enabled(struct vtblk_softc *sc,
1332     struct virtio_blk_config *blkcfg)
1333 {
1334         int wc;
1335
1336         if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
1337                 wc = vtblk_tunable_int(sc, "writecache_mode",
1338                     vtblk_writecache_mode);
1339                 if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1340                         vtblk_set_write_cache(sc, wc);
1341                 else
1342                         wc = blkcfg->writeback;
1343         } else
1344                 wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
1345
1346         return (wc);
1347 }
1348
1349 static int
1350 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
1351 {
1352         struct vtblk_softc *sc;
1353         int wc, error;
1354
1355         sc = oidp->oid_arg1;
1356         wc = sc->vtblk_write_cache;
1357
1358         error = sysctl_handle_int(oidp, &wc, 0, req);
1359         if (error || req->newptr == NULL)
1360                 return (error);
1361         if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
1362                 return (EPERM);
1363         if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1364                 return (EINVAL);
1365
1366         VTBLK_LOCK(sc);
1367         sc->vtblk_write_cache = wc;
1368         vtblk_set_write_cache(sc, sc->vtblk_write_cache);
1369         VTBLK_UNLOCK(sc);
1370
1371         return (0);
1372 }
1373
1374 static void
1375 vtblk_setup_sysctl(struct vtblk_softc *sc)
1376 {
1377         device_t dev;
1378         struct sysctl_ctx_list *ctx;
1379         struct sysctl_oid *tree;
1380         struct sysctl_oid_list *child;
1381
1382         dev = sc->vtblk_dev;
1383         ctx = device_get_sysctl_ctx(dev);
1384         tree = device_get_sysctl_tree(dev);
1385         child = SYSCTL_CHILDREN(tree);
1386
1387         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
1388             CTLTYPE_INT | CTLFLAG_RW, sc, 0, vtblk_write_cache_sysctl,
1389             "I", "Write cache mode (writethrough (0) or writeback (1))");
1390 }
1391
1392 static int
1393 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
1394 {
1395         char path[64];
1396
1397         snprintf(path, sizeof(path),
1398             "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
1399         TUNABLE_INT_FETCH(path, &def);
1400
1401         return (def);
1402 }