2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013 Chris Torek <torek @ torek net>
6 * Copyright (c) 2019 Joyent, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
36 #include <machine/atomic.h>
41 #include <pthread_np.h>
48 * Functions for dealing with generalized "virtual devices" as
49 * defined by <https://www.google.com/#output=search&q=virtio+spec>
53 * In case we decide to relax the "virtio softc comes at the
54 * front of virtio-based device softc" constraint, let's use
57 #define DEV_SOFTC(vs) ((void *)(vs))
60 * Link a virtio_softc to its constants, the device softc, and
64 vi_softc_linkup(struct virtio_softc *vs, struct virtio_consts *vc,
65 void *dev_softc, struct pci_devinst *pi,
66 struct vqueue_info *queues)
70 /* vs and dev_softc addresses must match */
71 assert((void *)vs == dev_softc);
76 vs->vs_queues = queues;
77 for (i = 0; i < vc->vc_nvq; i++) {
84 * Reset device (device-wide). This erases all queues, i.e.,
85 * all the queues become invalid (though we don't wipe out the
86 * internal pointers, we just clear the VQ_ALLOC flag).
88 * It resets negotiated features to "none".
90 * If MSI-X is enabled, this also resets all the vectors to NO_VECTOR.
93 vi_reset_dev(struct virtio_softc *vs)
95 struct vqueue_info *vq;
99 assert(pthread_mutex_isowned_np(vs->vs_mtx));
101 nvq = vs->vs_vc->vc_nvq;
102 for (vq = vs->vs_queues, i = 0; i < nvq; vq++, i++) {
104 vq->vq_last_avail = 0;
105 vq->vq_next_used = 0;
106 vq->vq_save_used = 0;
108 vq->vq_msix_idx = VIRTIO_MSI_NO_VECTOR;
110 vs->vs_negotiated_caps = 0;
112 /* vs->vs_status = 0; -- redundant */
114 pci_lintr_deassert(vs->vs_pi);
116 vs->vs_msix_cfg_idx = VIRTIO_MSI_NO_VECTOR;
120 * Set I/O BAR (usually 0) to map PCI config registers.
123 vi_set_io_bar(struct virtio_softc *vs, int barnum)
128 * ??? should we use CFG0 if MSI-X is disabled?
129 * Existing code did not...
131 size = VTCFG_R_CFG1 + vs->vs_vc->vc_cfgsize;
132 pci_emul_alloc_bar(vs->vs_pi, barnum, PCIBAR_IO, size);
136 * Initialize MSI-X vector capabilities if we're to use MSI-X,
137 * or MSI capabilities if not.
139 * We assume we want one MSI-X vector per queue, here, plus one
140 * for the config vec.
143 vi_intr_init(struct virtio_softc *vs, int barnum, int use_msix)
148 vs->vs_flags |= VIRTIO_USE_MSIX;
150 vi_reset_dev(vs); /* set all vectors to NO_VECTOR */
152 nvec = vs->vs_vc->vc_nvq + 1;
153 if (pci_emul_add_msixcap(vs->vs_pi, nvec, barnum))
156 vs->vs_flags &= ~VIRTIO_USE_MSIX;
158 /* Only 1 MSI vector for bhyve */
159 pci_emul_add_msicap(vs->vs_pi, 1);
161 /* Legacy interrupts are mandatory for virtio devices */
162 pci_lintr_request(vs->vs_pi);
168 * Initialize the currently-selected virtio queue (vs->vs_curq).
169 * The guest just gave us a page frame number, from which we can
170 * calculate the addresses of the queue.
173 vi_vq_init(struct virtio_softc *vs, uint32_t pfn)
175 struct vqueue_info *vq;
180 vq = &vs->vs_queues[vs->vs_curq];
182 phys = (uint64_t)pfn << VRING_PFN;
183 size = vring_size(vq->vq_qsize);
184 base = paddr_guest2host(vs->vs_pi->pi_vmctx, phys, size);
186 /* First page(s) are descriptors... */
187 vq->vq_desc = (struct virtio_desc *)base;
188 base += vq->vq_qsize * sizeof(struct virtio_desc);
190 /* ... immediately followed by "avail" ring (entirely uint16_t's) */
191 vq->vq_avail = (struct vring_avail *)base;
192 base += (2 + vq->vq_qsize + 1) * sizeof(uint16_t);
194 /* Then it's rounded up to the next page... */
195 base = (char *)roundup2((uintptr_t)base, VRING_ALIGN);
197 /* ... and the last page(s) are the used ring. */
198 vq->vq_used = (struct vring_used *)base;
200 /* Mark queue as allocated, and start at 0 when we use it. */
201 vq->vq_flags = VQ_ALLOC;
202 vq->vq_last_avail = 0;
203 vq->vq_next_used = 0;
204 vq->vq_save_used = 0;
208 * Helper inline for vq_getchain(): record the i'th "real"
212 _vq_record(int i, volatile struct virtio_desc *vd, struct vmctx *ctx,
213 struct iovec *iov, int n_iov, uint16_t *flags) {
217 iov[i].iov_base = paddr_guest2host(ctx, vd->vd_addr, vd->vd_len);
218 iov[i].iov_len = vd->vd_len;
220 flags[i] = vd->vd_flags;
222 #define VQ_MAX_DESCRIPTORS 512 /* see below */
225 * Examine the chain of descriptors starting at the "next one" to
226 * make sure that they describe a sensible request. If so, return
227 * the number of "real" descriptors that would be needed/used in
228 * acting on this request. This may be smaller than the number of
229 * available descriptors, e.g., if there are two available but
230 * they are two separate requests, this just returns 1. Or, it
231 * may be larger: if there are indirect descriptors involved,
232 * there may only be one descriptor available but it may be an
233 * indirect pointing to eight more. We return 8 in this case,
234 * i.e., we do not count the indirect descriptors, only the "real"
237 * Basically, this vets the vd_flags and vd_next field of each
238 * descriptor and tells you how many are involved. Since some may
239 * be indirect, this also needs the vmctx (in the pci_devinst
240 * at vs->vs_pi) so that it can find indirect descriptors.
242 * As we process each descriptor, we copy and adjust it (guest to
243 * host address wise, also using the vmtctx) into the given iov[]
244 * array (of the given size). If the array overflows, we stop
245 * placing values into the array but keep processing descriptors,
246 * up to VQ_MAX_DESCRIPTORS, before giving up and returning -1.
247 * So you, the caller, must not assume that iov[] is as big as the
248 * return value (you can process the same thing twice to allocate
249 * a larger iov array if needed, or supply a zero length to find
250 * out how much space is needed).
252 * If you want to verify the WRITE flag on each descriptor, pass a
253 * non-NULL "flags" pointer to an array of "uint16_t" of the same size
254 * as n_iov and we'll copy each vd_flags field after unwinding any
257 * If some descriptor(s) are invalid, this prints a diagnostic message
258 * and returns -1. If no descriptors are ready now it simply returns 0.
260 * You are assumed to have done a vq_ring_ready() if needed (note
261 * that vq_has_descs() does one).
264 vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
265 struct iovec *iov, int n_iov, uint16_t *flags)
268 u_int ndesc, n_indir;
270 volatile struct virtio_desc *vdir, *vindir, *vp;
272 struct virtio_softc *vs;
276 name = vs->vs_vc->vc_name;
279 * Note: it's the responsibility of the guest not to
280 * update vq->vq_avail->va_idx until all of the descriptors
281 * the guest has written are valid (including all their
282 * vd_next fields and vd_flags).
284 * Compute (va_idx - last_avail) in integers mod 2**16. This is
285 * the number of descriptors the device has made available
286 * since the last time we updated vq->vq_last_avail.
288 * We just need to do the subtraction as an unsigned int,
289 * then trim off excess bits.
291 idx = vq->vq_last_avail;
292 ndesc = (uint16_t)((u_int)vq->vq_avail->va_idx - idx);
295 if (ndesc > vq->vq_qsize) {
296 /* XXX need better way to diagnose issues */
298 "%s: ndesc (%u) out of range, driver confused?\r\n",
304 * Now count/parse "involved" descriptors starting from
305 * the head of the chain.
307 * To prevent loops, we could be more complicated and
308 * check whether we're re-visiting a previously visited
309 * index, but we just abort if the count gets excessive.
311 ctx = vs->vs_pi->pi_vmctx;
312 *pidx = next = vq->vq_avail->va_ring[idx & (vq->vq_qsize - 1)];
314 for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->vd_next) {
315 if (next >= vq->vq_qsize) {
317 "%s: descriptor index %u out of range, "
318 "driver confused?\r\n",
322 vdir = &vq->vq_desc[next];
323 if ((vdir->vd_flags & VRING_DESC_F_INDIRECT) == 0) {
324 _vq_record(i, vdir, ctx, iov, n_iov, flags);
326 } else if ((vs->vs_vc->vc_hv_caps &
327 VIRTIO_RING_F_INDIRECT_DESC) == 0) {
329 "%s: descriptor has forbidden INDIRECT flag, "
330 "driver confused?\r\n",
334 n_indir = vdir->vd_len / 16;
335 if ((vdir->vd_len & 0xf) || n_indir == 0) {
337 "%s: invalid indir len 0x%x, "
338 "driver confused?\r\n",
339 name, (u_int)vdir->vd_len);
342 vindir = paddr_guest2host(ctx,
343 vdir->vd_addr, vdir->vd_len);
345 * Indirects start at the 0th, then follow
346 * their own embedded "next"s until those run
347 * out. Each one's indirect flag must be off
348 * (we don't really have to check, could just
354 if (vp->vd_flags & VRING_DESC_F_INDIRECT) {
356 "%s: indirect desc has INDIR flag,"
357 " driver confused?\r\n",
361 _vq_record(i, vp, ctx, iov, n_iov, flags);
362 if (++i > VQ_MAX_DESCRIPTORS)
364 if ((vp->vd_flags & VRING_DESC_F_NEXT) == 0)
367 if (next >= n_indir) {
369 "%s: invalid next %u > %u, "
370 "driver confused?\r\n",
371 name, (u_int)next, n_indir);
376 if ((vdir->vd_flags & VRING_DESC_F_NEXT) == 0)
381 "%s: descriptor loop? count > %d - driver confused?\r\n",
387 * Return the first n_chain request chains back to the available queue.
389 * (These chains are the ones you handled when you called vq_getchain()
390 * and used its positive return value.)
393 vq_retchains(struct vqueue_info *vq, uint16_t n_chains)
396 vq->vq_last_avail -= n_chains;
400 vq_relchain_prepare(struct vqueue_info *vq, uint16_t idx, uint32_t iolen)
402 volatile struct vring_used *vuh;
403 volatile struct virtio_used *vue;
408 * - mask is N-1 where N is a power of 2 so computes x % N
409 * - vuh points to the "used" data shared with guest
410 * - vue points to the "used" ring entry we want to update
412 * (I apologize for the two fields named vu_idx; the
413 * virtio spec calls the one that vue points to, "id"...)
415 mask = vq->vq_qsize - 1;
418 vue = &vuh->vu_ring[vq->vq_next_used++ & mask];
420 vue->vu_tlen = iolen;
424 vq_relchain_publish(struct vqueue_info *vq)
427 * Ensure the used descriptor is visible before updating the index.
428 * This is necessary on ISAs with memory ordering less strict than x86
429 * (and even on x86 to act as a compiler barrier).
431 atomic_thread_fence_rel();
432 vq->vq_used->vu_idx = vq->vq_next_used;
436 * Return specified request chain to the guest, setting its I/O length
437 * to the provided value.
439 * (This chain is the one you handled when you called vq_getchain()
440 * and used its positive return value.)
443 vq_relchain(struct vqueue_info *vq, uint16_t idx, uint32_t iolen)
445 vq_relchain_prepare(vq, idx, iolen);
446 vq_relchain_publish(vq);
450 * Driver has finished processing "available" chains and calling
451 * vq_relchain on each one. If driver used all the available
452 * chains, used_all should be set.
454 * If the "used" index moved we may need to inform the guest, i.e.,
455 * deliver an interrupt. Even if the used index did NOT move we
456 * may need to deliver an interrupt, if the avail ring is empty and
457 * we are supposed to interrupt on empty.
459 * Note that used_all_avail is provided by the caller because it's
460 * a snapshot of the ring state when he decided to finish interrupt
461 * processing -- it's possible that descriptors became available after
462 * that point. (It's also typically a constant 1/True as well.)
465 vq_endchains(struct vqueue_info *vq, int used_all_avail)
467 struct virtio_softc *vs;
468 uint16_t event_idx, new_idx, old_idx;
472 * Interrupt generation: if we're using EVENT_IDX,
473 * interrupt if we've crossed the event threshold.
474 * Otherwise interrupt is generated if we added "used" entries,
475 * but suppressed by VRING_AVAIL_F_NO_INTERRUPT.
477 * In any case, though, if NOTIFY_ON_EMPTY is set and the
478 * entire avail was processed, we need to interrupt always.
481 old_idx = vq->vq_save_used;
482 vq->vq_save_used = new_idx = vq->vq_used->vu_idx;
485 * Use full memory barrier between vu_idx store from preceding
486 * vq_relchain() call and the loads from VQ_USED_EVENT_IDX() or
489 atomic_thread_fence_seq_cst();
490 if (used_all_avail &&
491 (vs->vs_negotiated_caps & VIRTIO_F_NOTIFY_ON_EMPTY))
493 else if (vs->vs_negotiated_caps & VIRTIO_RING_F_EVENT_IDX) {
494 event_idx = VQ_USED_EVENT_IDX(vq);
496 * This calculation is per docs and the kernel
497 * (see src/sys/dev/virtio/virtio_ring.h).
499 intr = (uint16_t)(new_idx - event_idx - 1) <
500 (uint16_t)(new_idx - old_idx);
502 intr = new_idx != old_idx &&
503 !(vq->vq_avail->va_flags & VRING_AVAIL_F_NO_INTERRUPT);
506 vq_interrupt(vs, vq);
509 /* Note: these are in sorted order to make for a fast search */
510 static struct config_reg {
511 uint16_t cr_offset; /* register offset */
512 uint8_t cr_size; /* size (bytes) */
513 uint8_t cr_ro; /* true => reg is read only */
514 const char *cr_name; /* name of reg */
516 { VTCFG_R_HOSTCAP, 4, 1, "HOSTCAP" },
517 { VTCFG_R_GUESTCAP, 4, 0, "GUESTCAP" },
518 { VTCFG_R_PFN, 4, 0, "PFN" },
519 { VTCFG_R_QNUM, 2, 1, "QNUM" },
520 { VTCFG_R_QSEL, 2, 0, "QSEL" },
521 { VTCFG_R_QNOTIFY, 2, 0, "QNOTIFY" },
522 { VTCFG_R_STATUS, 1, 0, "STATUS" },
523 { VTCFG_R_ISR, 1, 0, "ISR" },
524 { VTCFG_R_CFGVEC, 2, 0, "CFGVEC" },
525 { VTCFG_R_QVEC, 2, 0, "QVEC" },
528 static inline struct config_reg *
529 vi_find_cr(int offset) {
531 struct config_reg *cr;
534 hi = sizeof(config_regs) / sizeof(*config_regs) - 1;
536 mid = (hi + lo) >> 1;
537 cr = &config_regs[mid];
538 if (cr->cr_offset == offset)
540 if (cr->cr_offset < offset)
549 * Handle pci config space reads.
550 * If it's to the MSI-X info, do that.
551 * If it's part of the virtio standard stuff, do that.
552 * Otherwise dispatch to the actual driver.
555 vi_pci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
556 int baridx, uint64_t offset, int size)
558 struct virtio_softc *vs = pi->pi_arg;
559 struct virtio_consts *vc;
560 struct config_reg *cr;
561 uint64_t virtio_config_size, max;
567 if (vs->vs_flags & VIRTIO_USE_MSIX) {
568 if (baridx == pci_msix_table_bar(pi) ||
569 baridx == pci_msix_pba_bar(pi)) {
570 return (pci_emul_msix_tread(pi, offset, size));
574 /* XXX probably should do something better than just assert() */
578 pthread_mutex_lock(vs->vs_mtx);
582 value = size == 1 ? 0xff : size == 2 ? 0xffff : 0xffffffff;
584 if (size != 1 && size != 2 && size != 4)
587 if (pci_msix_enabled(pi))
588 virtio_config_size = VTCFG_R_CFG1;
590 virtio_config_size = VTCFG_R_CFG0;
592 if (offset >= virtio_config_size) {
594 * Subtract off the standard size (including MSI-X
595 * registers if enabled) and dispatch to underlying driver.
596 * If that fails, fall into general code.
598 newoff = offset - virtio_config_size;
599 max = vc->vc_cfgsize ? vc->vc_cfgsize : 0x100000000;
600 if (newoff + size > max)
602 error = (*vc->vc_cfgread)(DEV_SOFTC(vs), newoff, size, &value);
608 cr = vi_find_cr(offset);
609 if (cr == NULL || cr->cr_size != size) {
611 /* offset must be OK, so size must be bad */
613 "%s: read from %s: bad size %d\r\n",
614 name, cr->cr_name, size);
617 "%s: read from bad offset/size %jd/%d\r\n",
618 name, (uintmax_t)offset, size);
624 case VTCFG_R_HOSTCAP:
625 value = vc->vc_hv_caps;
627 case VTCFG_R_GUESTCAP:
628 value = vs->vs_negotiated_caps;
631 if (vs->vs_curq < vc->vc_nvq)
632 value = vs->vs_queues[vs->vs_curq].vq_pfn;
635 value = vs->vs_curq < vc->vc_nvq ?
636 vs->vs_queues[vs->vs_curq].vq_qsize : 0;
641 case VTCFG_R_QNOTIFY:
645 value = vs->vs_status;
649 vs->vs_isr = 0; /* a read clears this flag */
651 pci_lintr_deassert(pi);
654 value = vs->vs_msix_cfg_idx;
657 value = vs->vs_curq < vc->vc_nvq ?
658 vs->vs_queues[vs->vs_curq].vq_msix_idx :
659 VIRTIO_MSI_NO_VECTOR;
664 pthread_mutex_unlock(vs->vs_mtx);
669 * Handle pci config space writes.
670 * If it's to the MSI-X info, do that.
671 * If it's part of the virtio standard stuff, do that.
672 * Otherwise dispatch to the actual driver.
675 vi_pci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
676 int baridx, uint64_t offset, int size, uint64_t value)
678 struct virtio_softc *vs = pi->pi_arg;
679 struct vqueue_info *vq;
680 struct virtio_consts *vc;
681 struct config_reg *cr;
682 uint64_t virtio_config_size, max;
687 if (vs->vs_flags & VIRTIO_USE_MSIX) {
688 if (baridx == pci_msix_table_bar(pi) ||
689 baridx == pci_msix_pba_bar(pi)) {
690 pci_emul_msix_twrite(pi, offset, size, value);
695 /* XXX probably should do something better than just assert() */
699 pthread_mutex_lock(vs->vs_mtx);
704 if (size != 1 && size != 2 && size != 4)
707 if (pci_msix_enabled(pi))
708 virtio_config_size = VTCFG_R_CFG1;
710 virtio_config_size = VTCFG_R_CFG0;
712 if (offset >= virtio_config_size) {
714 * Subtract off the standard size (including MSI-X
715 * registers if enabled) and dispatch to underlying driver.
717 newoff = offset - virtio_config_size;
718 max = vc->vc_cfgsize ? vc->vc_cfgsize : 0x100000000;
719 if (newoff + size > max)
721 error = (*vc->vc_cfgwrite)(DEV_SOFTC(vs), newoff, size, value);
727 cr = vi_find_cr(offset);
728 if (cr == NULL || cr->cr_size != size || cr->cr_ro) {
730 /* offset must be OK, wrong size and/or reg is R/O */
731 if (cr->cr_size != size)
733 "%s: write to %s: bad size %d\r\n",
734 name, cr->cr_name, size);
737 "%s: write to read-only reg %s\r\n",
741 "%s: write to bad offset/size %jd/%d\r\n",
742 name, (uintmax_t)offset, size);
748 case VTCFG_R_GUESTCAP:
749 vs->vs_negotiated_caps = value & vc->vc_hv_caps;
750 if (vc->vc_apply_features)
751 (*vc->vc_apply_features)(DEV_SOFTC(vs),
752 vs->vs_negotiated_caps);
755 if (vs->vs_curq >= vc->vc_nvq)
757 vi_vq_init(vs, value);
761 * Note that the guest is allowed to select an
762 * invalid queue; we just need to return a QNUM
763 * of 0 while the bad queue is selected.
767 case VTCFG_R_QNOTIFY:
768 if (value >= vc->vc_nvq) {
769 fprintf(stderr, "%s: queue %d notify out of range\r\n",
773 vq = &vs->vs_queues[value];
775 (*vq->vq_notify)(DEV_SOFTC(vs), vq);
776 else if (vc->vc_qnotify)
777 (*vc->vc_qnotify)(DEV_SOFTC(vs), vq);
780 "%s: qnotify queue %d: missing vq/vc notify\r\n",
784 vs->vs_status = value;
786 (*vc->vc_reset)(DEV_SOFTC(vs));
789 vs->vs_msix_cfg_idx = value;
792 if (vs->vs_curq >= vc->vc_nvq)
794 vq = &vs->vs_queues[vs->vs_curq];
795 vq->vq_msix_idx = value;
802 "%s: write config reg %s: curq %d >= max %d\r\n",
803 name, cr->cr_name, vs->vs_curq, vc->vc_nvq);
806 pthread_mutex_unlock(vs->vs_mtx);