2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/linker_set.h>
36 #include <sys/ioctl.h>
54 #define VTBLK_RINGSZ 64
56 #define VTBLK_CFGSZ 28
58 #define VTBLK_R_CFG VTCFG_R_CFG1
59 #define VTBLK_R_CFG_END VTBLK_R_CFG + VTBLK_CFGSZ -1
60 #define VTBLK_R_MAX VTBLK_R_CFG_END
62 #define VTBLK_REGSZ VTBLK_R_MAX+1
64 #define VTBLK_MAXSEGS 32
67 #define VTBLK_S_IOERR 1
72 #define VTBLK_S_HOSTCAPS \
73 ( 0x00000004 | /* host maximum request segments */ \
74 0x10000000 ) /* supports indirect descriptors */
76 static int use_msix = 1;
81 uint16_t hq_cur_aidx; /* trails behind 'avail_idx' */
83 /* Host-context pointers to the queue */
84 struct virtio_desc *hq_dtable;
85 uint16_t *hq_avail_flags;
86 uint16_t *hq_avail_idx; /* monotonically increasing */
87 uint16_t *hq_avail_ring;
89 uint16_t *hq_used_flags;
90 uint16_t *hq_used_idx; /* monotonically increasing */
91 struct virtio_used *hq_used_ring;
98 uint64_t vbc_capacity;
99 uint32_t vbc_size_max;
100 uint32_t vbc_seg_max;
104 uint32_t vbc_blk_size;
105 uint32_t vbc_sectors_max;
107 CTASSERT(sizeof(struct vtblk_config) == VTBLK_CFGSZ);
110 * Fixed-size block header
112 struct virtio_blk_hdr {
113 #define VBH_OP_READ 0
114 #define VBH_OP_WRITE 1
115 #define VBH_FLAG_BARRIER 0x80000000 /* OR'ed into vbh_type */
124 static int pci_vtblk_debug;
125 #define DPRINTF(params) if (pci_vtblk_debug) printf params
126 #define WPRINTF(params) printf params
131 struct pci_vtblk_softc {
132 struct pci_devinst *vbsc_pi;
137 uint32_t vbsc_features;
139 struct vring_hqueue vbsc_q;
140 struct vtblk_config vbsc_cfg;
141 uint16_t msix_table_idx_req;
142 uint16_t msix_table_idx_cfg;
144 #define vtblk_ctx(sc) ((sc)->vbsc_pi->pi_vmctx)
147 * Return the size of IO BAR that maps virtio header and device specific
148 * region. The size would vary depending on whether MSI-X is enabled or
152 pci_vtblk_iosize(struct pci_devinst *pi)
155 if (pci_msix_enabled(pi))
156 return (VTBLK_REGSZ);
158 return (VTBLK_REGSZ - (VTCFG_R_CFG1 - VTCFG_R_MSIX));
162 * Return the number of available descriptors in the vring taking care
163 * of the 16-bit index wraparound.
166 hq_num_avail(struct vring_hqueue *hq)
171 * We're just computing (a-b) in GF(216).
173 * The only glitch here is that in standard C,
174 * uint16_t promotes to (signed) int when int has
175 * more than 16 bits (pretty much always now), so
176 * we have to force it back to unsigned.
178 ndesc = (unsigned)*hq->hq_avail_idx - (unsigned)hq->hq_cur_aidx;
180 assert(ndesc <= hq->hq_size);
186 pci_vtblk_update_status(struct pci_vtblk_softc *sc, uint32_t value)
189 DPRINTF(("vtblk: device reset requested !\n"));
192 sc->vbsc_status = value;
196 pci_vtblk_proc(struct pci_vtblk_softc *sc, struct vring_hqueue *hq)
198 struct iovec iov[VTBLK_MAXSEGS];
199 struct virtio_blk_hdr *vbh;
200 struct virtio_desc *vd, *vid;
201 struct virtio_used *vu;
207 int uidx, aidx, didx;
211 uidx = *hq->hq_used_idx;
212 aidx = hq->hq_cur_aidx;
213 didx = hq->hq_avail_ring[aidx % hq->hq_size];
214 assert(didx >= 0 && didx < hq->hq_size);
216 vd = &hq->hq_dtable[didx];
219 * Verify that the descriptor is indirect, and obtain
220 * the pointer to the indirect descriptor.
221 * There has to be space for at least 3 descriptors
222 * in the indirect descriptor array: the block header,
223 * 1 or more data descriptors, and a status byte.
225 assert(vd->vd_flags & VRING_DESC_F_INDIRECT);
227 nsegs = vd->vd_len / sizeof(struct virtio_desc);
229 assert(nsegs < VTBLK_MAXSEGS + 2);
231 vid = paddr_guest2host(vtblk_ctx(sc), vd->vd_addr, vd->vd_len);
232 assert((vid->vd_flags & VRING_DESC_F_INDIRECT) == 0);
235 * The first descriptor will be the read-only fixed header
237 vbh = paddr_guest2host(vtblk_ctx(sc), vid[0].vd_addr,
238 sizeof(struct virtio_blk_hdr));
239 assert(vid[0].vd_len == sizeof(struct virtio_blk_hdr));
240 assert(vid[0].vd_flags & VRING_DESC_F_NEXT);
241 assert((vid[0].vd_flags & VRING_DESC_F_WRITE) == 0);
245 * The guest should not be setting the BARRIER flag because
246 * we don't advertise the capability.
248 type = vbh->vbh_type & ~VBH_FLAG_BARRIER;
249 writeop = (type == VBH_OP_WRITE);
251 offset = vbh->vbh_sector * DEV_BSIZE;
254 * Build up the iovec based on the guest's data descriptors
256 for (i = 1, iolen = 0; i < nsegs - 1; i++) {
257 iov[i-1].iov_base = paddr_guest2host(vtblk_ctx(sc),
258 vid[i].vd_addr, vid[i].vd_len);
259 iov[i-1].iov_len = vid[i].vd_len;
260 iolen += vid[i].vd_len;
262 assert(vid[i].vd_flags & VRING_DESC_F_NEXT);
263 assert((vid[i].vd_flags & VRING_DESC_F_INDIRECT) == 0);
266 * - write op implies read-only descriptor,
267 * - read op implies write-only descriptor,
268 * therefore test the inverse of the descriptor bit
271 assert(((vid[i].vd_flags & VRING_DESC_F_WRITE) == 0) ==
275 /* Lastly, get the address of the status byte */
276 status = paddr_guest2host(vtblk_ctx(sc), vid[nsegs - 1].vd_addr, 1);
277 assert(vid[nsegs - 1].vd_len == 1);
278 assert((vid[nsegs - 1].vd_flags & VRING_DESC_F_NEXT) == 0);
279 assert(vid[nsegs - 1].vd_flags & VRING_DESC_F_WRITE);
281 DPRINTF(("virtio-block: %s op, %d bytes, %d segs, offset %ld\n\r",
282 writeop ? "write" : "read", iolen, nsegs - 2, offset));
285 err = pwritev(sc->vbsc_fd, iov, nsegs - 2, offset);
287 err = preadv(sc->vbsc_fd, iov, nsegs - 2, offset);
290 *status = err < 0 ? VTBLK_S_IOERR : VTBLK_S_OK;
293 * Return the single indirect descriptor back to the host
295 vu = &hq->hq_used_ring[uidx % hq->hq_size];
299 *hq->hq_used_idx += 1;
303 pci_vtblk_qnotify(struct pci_vtblk_softc *sc)
305 struct vring_hqueue *hq = &sc->vbsc_q;
310 * Calculate number of ring entries to process
312 ndescs = hq_num_avail(hq);
318 * Run through all the entries, placing them into iovecs and
319 * sending when an end-of-packet is found
321 for (i = 0; i < ndescs; i++)
322 pci_vtblk_proc(sc, hq);
325 * Generate an interrupt if able
327 if ((*hq->hq_avail_flags & VRING_AVAIL_F_NO_INTERRUPT) == 0) {
329 pci_generate_msix(sc->vbsc_pi, sc->msix_table_idx_req);
330 } else if (sc->vbsc_isr == 0) {
332 pci_generate_msi(sc->vbsc_pi, 0);
339 pci_vtblk_ring_init(struct pci_vtblk_softc *sc, uint64_t pfn)
341 struct vring_hqueue *hq;
343 sc->vbsc_pfn = pfn << VRING_PFN;
346 * Set up host pointers to the various parts of the
350 hq->hq_size = VTBLK_RINGSZ;
352 hq->hq_dtable = paddr_guest2host(vtblk_ctx(sc), pfn << VRING_PFN,
353 vring_size(VTBLK_RINGSZ));
354 hq->hq_avail_flags = (uint16_t *)(hq->hq_dtable + hq->hq_size);
355 hq->hq_avail_idx = hq->hq_avail_flags + 1;
356 hq->hq_avail_ring = hq->hq_avail_flags + 2;
357 hq->hq_used_flags = (uint16_t *)roundup2((uintptr_t)hq->hq_avail_ring,
359 hq->hq_used_idx = hq->hq_used_flags + 1;
360 hq->hq_used_ring = (struct virtio_used *)(hq->hq_used_flags + 2);
363 * Initialize queue indexes
369 pci_vtblk_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
372 struct pci_vtblk_softc *sc;
379 printf("virtio-block: backing device required\n");
384 * The supplied backing file has to exist
386 fd = open(opts, O_RDWR);
388 perror("Could not open backing file");
392 if (fstat(fd, &sbuf) < 0) {
393 perror("Could not stat backing file");
399 * Deal with raw devices
403 if (S_ISCHR(sbuf.st_mode)) {
404 if (ioctl(fd, DIOCGMEDIASIZE, &size) < 0 ||
405 ioctl(fd, DIOCGSECTORSIZE, §sz)) {
406 perror("Could not fetch dev blk/sector size");
414 sc = malloc(sizeof(struct pci_vtblk_softc));
415 memset(sc, 0, sizeof(struct pci_vtblk_softc));
421 /* setup virtio block config space */
422 sc->vbsc_cfg.vbc_capacity = size / sectsz;
423 sc->vbsc_cfg.vbc_seg_max = VTBLK_MAXSEGS;
424 sc->vbsc_cfg.vbc_blk_size = sectsz;
425 sc->vbsc_cfg.vbc_size_max = 0; /* not negotiated */
426 sc->vbsc_cfg.vbc_geom_c = 0; /* no geometry */
427 sc->vbsc_cfg.vbc_geom_h = 0;
428 sc->vbsc_cfg.vbc_geom_s = 0;
429 sc->vbsc_cfg.vbc_sectors_max = 0;
431 /* initialize config space */
432 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_BLOCK);
433 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
434 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
435 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_BLOCK);
437 if ((env_msi = getenv("BHYVE_USE_MSI"))) {
438 if (strcasecmp(env_msi, "yes") == 0)
444 sc->msix_table_idx_req = VIRTIO_MSI_NO_VECTOR;
445 sc->msix_table_idx_cfg = VIRTIO_MSI_NO_VECTOR;
447 if (pci_emul_add_msixcap(pi, 2, 1))
451 pci_emul_add_msicap(pi, 1);
454 pci_emul_alloc_bar(pi, 0, PCIBAR_IO, VTBLK_REGSZ);
460 vtblk_adjust_offset(struct pci_devinst *pi, uint64_t offset)
463 * Device specific offsets used by guest would change
464 * based on whether MSI-X capability is enabled or not
466 if (!pci_msix_enabled(pi)) {
467 if (offset >= VTCFG_R_MSIX)
468 return (offset + (VTCFG_R_CFG1 - VTCFG_R_MSIX));
475 pci_vtblk_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
476 int baridx, uint64_t offset, int size, uint64_t value)
478 struct pci_vtblk_softc *sc = pi->pi_arg;
481 if (baridx == pci_msix_table_bar(pi) ||
482 baridx == pci_msix_pba_bar(pi)) {
483 pci_emul_msix_twrite(pi, offset, size, value);
490 if (offset + size > pci_vtblk_iosize(pi)) {
491 DPRINTF(("vtblk_write: 2big, offset %ld size %d\n",
496 offset = vtblk_adjust_offset(pi, offset);
499 case VTCFG_R_GUESTCAP:
501 sc->vbsc_features = value & VTBLK_S_HOSTCAPS;
505 pci_vtblk_ring_init(sc, value);
509 sc->vbsc_lastq = value;
511 case VTCFG_R_QNOTIFY:
514 pci_vtblk_qnotify(sc);
518 pci_vtblk_update_status(sc, value);
522 sc->msix_table_idx_cfg = value;
526 sc->msix_table_idx_req = value;
528 case VTCFG_R_HOSTCAP:
531 case VTBLK_R_CFG ... VTBLK_R_CFG_END:
532 DPRINTF(("vtblk: write to readonly reg %ld\n\r", offset));
535 DPRINTF(("vtblk: unknown i/o write offset %ld\n\r", offset));
542 pci_vtblk_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
543 int baridx, uint64_t offset, int size)
545 struct pci_vtblk_softc *sc = pi->pi_arg;
550 if (baridx == pci_msix_table_bar(pi) ||
551 baridx == pci_msix_pba_bar(pi)) {
552 return (pci_emul_msix_tread(pi, offset, size));
558 if (offset + size > pci_vtblk_iosize(pi)) {
559 DPRINTF(("vtblk_read: 2big, offset %ld size %d\n",
564 offset = vtblk_adjust_offset(pi, offset);
567 case VTCFG_R_HOSTCAP:
569 value = VTBLK_S_HOSTCAPS;
571 case VTCFG_R_GUESTCAP:
573 value = sc->vbsc_features; /* XXX never read ? */
577 value = sc->vbsc_pfn >> VRING_PFN;
580 value = (sc->vbsc_lastq == 0) ? VTBLK_RINGSZ: 0;
584 value = sc->vbsc_lastq; /* XXX never read ? */
586 case VTCFG_R_QNOTIFY:
588 value = 0; /* XXX never read ? */
592 value = sc->vbsc_status;
596 value = sc->vbsc_isr;
597 sc->vbsc_isr = 0; /* a read clears this flag */
601 value = sc->msix_table_idx_cfg;
605 value = sc->msix_table_idx_req;
607 case VTBLK_R_CFG ... VTBLK_R_CFG_END:
608 assert(size + offset <= (VTBLK_R_CFG_END + 1));
609 ptr = (uint8_t *)&sc->vbsc_cfg + offset - VTBLK_R_CFG;
611 value = *(uint8_t *) ptr;
612 } else if (size == 2) {
613 value = *(uint16_t *) ptr;
615 value = *(uint32_t *) ptr;
619 DPRINTF(("vtblk: unknown i/o read offset %ld\n\r", offset));
627 struct pci_devemu pci_de_vblk = {
628 .pe_emu = "virtio-blk",
629 .pe_init = pci_vtblk_init,
630 .pe_barwrite = pci_vtblk_write,
631 .pe_barread = pci_vtblk_read
633 PCI_EMUL_SET(pci_de_vblk);