2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013 Chris Torek <torek @ torek net>
6 * Copyright (c) 2019 Joyent, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
36 #include <machine/atomic.h>
37 #include <machine/vmm_snapshot.h>
39 #include <dev/virtio/pci/virtio_pci_legacy_var.h>
45 #include <pthread_np.h>
53 * Functions for dealing with generalized "virtual devices" as
54 * defined by <https://www.google.com/#output=search&q=virtio+spec>
58 * In case we decide to relax the "virtio softc comes at the
59 * front of virtio-based device softc" constraint, let's use
62 #define DEV_SOFTC(vs) ((void *)(vs))
65 * Link a virtio_softc to its constants, the device softc, and
69 vi_softc_linkup(struct virtio_softc *vs, struct virtio_consts *vc,
70 void *dev_softc, struct pci_devinst *pi,
71 struct vqueue_info *queues)
75 /* vs and dev_softc addresses must match */
76 assert((void *)vs == dev_softc);
81 vs->vs_queues = queues;
82 for (i = 0; i < vc->vc_nvq; i++) {
89 * Reset device (device-wide). This erases all queues, i.e.,
90 * all the queues become invalid (though we don't wipe out the
91 * internal pointers, we just clear the VQ_ALLOC flag).
93 * It resets negotiated features to "none".
95 * If MSI-X is enabled, this also resets all the vectors to NO_VECTOR.
98 vi_reset_dev(struct virtio_softc *vs)
100 struct vqueue_info *vq;
104 assert(pthread_mutex_isowned_np(vs->vs_mtx));
106 nvq = vs->vs_vc->vc_nvq;
107 for (vq = vs->vs_queues, i = 0; i < nvq; vq++, i++) {
109 vq->vq_last_avail = 0;
110 vq->vq_next_used = 0;
111 vq->vq_save_used = 0;
113 vq->vq_msix_idx = VIRTIO_MSI_NO_VECTOR;
115 vs->vs_negotiated_caps = 0;
117 /* vs->vs_status = 0; -- redundant */
119 pci_lintr_deassert(vs->vs_pi);
121 vs->vs_msix_cfg_idx = VIRTIO_MSI_NO_VECTOR;
125 * Set I/O BAR (usually 0) to map PCI config registers.
128 vi_set_io_bar(struct virtio_softc *vs, int barnum)
133 * ??? should we use VIRTIO_PCI_CONFIG_OFF(0) if MSI-X is disabled?
134 * Existing code did not...
136 size = VIRTIO_PCI_CONFIG_OFF(1) + vs->vs_vc->vc_cfgsize;
137 pci_emul_alloc_bar(vs->vs_pi, barnum, PCIBAR_IO, size);
141 * Initialize MSI-X vector capabilities if we're to use MSI-X,
142 * or MSI capabilities if not.
144 * We assume we want one MSI-X vector per queue, here, plus one
145 * for the config vec.
148 vi_intr_init(struct virtio_softc *vs, int barnum, int use_msix)
153 vs->vs_flags |= VIRTIO_USE_MSIX;
155 vi_reset_dev(vs); /* set all vectors to NO_VECTOR */
157 nvec = vs->vs_vc->vc_nvq + 1;
158 if (pci_emul_add_msixcap(vs->vs_pi, nvec, barnum))
161 vs->vs_flags &= ~VIRTIO_USE_MSIX;
163 /* Only 1 MSI vector for bhyve */
164 pci_emul_add_msicap(vs->vs_pi, 1);
166 /* Legacy interrupts are mandatory for virtio devices */
167 pci_lintr_request(vs->vs_pi);
173 * Initialize the currently-selected virtio queue (vs->vs_curq).
174 * The guest just gave us a page frame number, from which we can
175 * calculate the addresses of the queue.
178 vi_vq_init(struct virtio_softc *vs, uint32_t pfn)
180 struct vqueue_info *vq;
185 vq = &vs->vs_queues[vs->vs_curq];
187 phys = (uint64_t)pfn << VRING_PFN;
188 size = vring_size_aligned(vq->vq_qsize);
189 base = paddr_guest2host(vs->vs_pi->pi_vmctx, phys, size);
191 /* First page(s) are descriptors... */
192 vq->vq_desc = (struct vring_desc *)base;
193 base += vq->vq_qsize * sizeof(struct vring_desc);
195 /* ... immediately followed by "avail" ring (entirely uint16_t's) */
196 vq->vq_avail = (struct vring_avail *)base;
197 base += (2 + vq->vq_qsize + 1) * sizeof(uint16_t);
199 /* Then it's rounded up to the next page... */
200 base = (char *)roundup2((uintptr_t)base, VRING_ALIGN);
202 /* ... and the last page(s) are the used ring. */
203 vq->vq_used = (struct vring_used *)base;
205 /* Mark queue as allocated, and start at 0 when we use it. */
206 vq->vq_flags = VQ_ALLOC;
207 vq->vq_last_avail = 0;
208 vq->vq_next_used = 0;
209 vq->vq_save_used = 0;
213 * Helper inline for vq_getchain(): record the i'th "real"
217 _vq_record(int i, struct vring_desc *vd, struct vmctx *ctx, struct iovec *iov,
218 int n_iov, struct vi_req *reqp)
222 iov[i].iov_base = paddr_guest2host(ctx, vd->addr, vd->len);
223 iov[i].iov_len = vd->len;
224 if ((vd->flags & VRING_DESC_F_WRITE) == 0)
229 #define VQ_MAX_DESCRIPTORS 512 /* see below */
232 * Examine the chain of descriptors starting at the "next one" to
233 * make sure that they describe a sensible request. If so, return
234 * the number of "real" descriptors that would be needed/used in
235 * acting on this request. This may be smaller than the number of
236 * available descriptors, e.g., if there are two available but
237 * they are two separate requests, this just returns 1. Or, it
238 * may be larger: if there are indirect descriptors involved,
239 * there may only be one descriptor available but it may be an
240 * indirect pointing to eight more. We return 8 in this case,
241 * i.e., we do not count the indirect descriptors, only the "real"
244 * Basically, this vets the "flags" and "next" field of each
245 * descriptor and tells you how many are involved. Since some may
246 * be indirect, this also needs the vmctx (in the pci_devinst
247 * at vs->vs_pi) so that it can find indirect descriptors.
249 * As we process each descriptor, we copy and adjust it (guest to
250 * host address wise, also using the vmtctx) into the given iov[]
251 * array (of the given size). If the array overflows, we stop
252 * placing values into the array but keep processing descriptors,
253 * up to VQ_MAX_DESCRIPTORS, before giving up and returning -1.
254 * So you, the caller, must not assume that iov[] is as big as the
255 * return value (you can process the same thing twice to allocate
256 * a larger iov array if needed, or supply a zero length to find
257 * out how much space is needed).
259 * If some descriptor(s) are invalid, this prints a diagnostic message
260 * and returns -1. If no descriptors are ready now it simply returns 0.
262 * You are assumed to have done a vq_ring_ready() if needed (note
263 * that vq_has_descs() does one).
266 vq_getchain(struct vqueue_info *vq, struct iovec *iov, int niov,
270 u_int ndesc, n_indir;
273 struct vring_desc *vdir, *vindir, *vp;
275 struct virtio_softc *vs;
279 name = vs->vs_vc->vc_name;
280 memset(&req, 0, sizeof(req));
283 * Note: it's the responsibility of the guest not to
284 * update vq->vq_avail->idx until all of the descriptors
285 * the guest has written are valid (including all their
286 * "next" fields and "flags").
288 * Compute (vq_avail->idx - last_avail) in integers mod 2**16. This is
289 * the number of descriptors the device has made available
290 * since the last time we updated vq->vq_last_avail.
292 * We just need to do the subtraction as an unsigned int,
293 * then trim off excess bits.
295 idx = vq->vq_last_avail;
296 ndesc = (uint16_t)((u_int)vq->vq_avail->idx - idx);
299 if (ndesc > vq->vq_qsize) {
300 /* XXX need better way to diagnose issues */
302 "%s: ndesc (%u) out of range, driver confused?",
308 * Now count/parse "involved" descriptors starting from
309 * the head of the chain.
311 * To prevent loops, we could be more complicated and
312 * check whether we're re-visiting a previously visited
313 * index, but we just abort if the count gets excessive.
315 ctx = vs->vs_pi->pi_vmctx;
316 req.idx = next = vq->vq_avail->ring[idx & (vq->vq_qsize - 1)];
318 for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->next) {
319 if (next >= vq->vq_qsize) {
321 "%s: descriptor index %u out of range, "
326 vdir = &vq->vq_desc[next];
327 if ((vdir->flags & VRING_DESC_F_INDIRECT) == 0) {
328 _vq_record(i, vdir, ctx, iov, niov, &req);
330 } else if ((vs->vs_vc->vc_hv_caps &
331 VIRTIO_RING_F_INDIRECT_DESC) == 0) {
333 "%s: descriptor has forbidden INDIRECT flag, "
338 n_indir = vdir->len / 16;
339 if ((vdir->len & 0xf) || n_indir == 0) {
341 "%s: invalid indir len 0x%x, "
343 name, (u_int)vdir->len);
346 vindir = paddr_guest2host(ctx,
347 vdir->addr, vdir->len);
349 * Indirects start at the 0th, then follow
350 * their own embedded "next"s until those run
351 * out. Each one's indirect flag must be off
352 * (we don't really have to check, could just
358 if (vp->flags & VRING_DESC_F_INDIRECT) {
360 "%s: indirect desc has INDIR flag,"
365 _vq_record(i, vp, ctx, iov, niov, &req);
366 if (++i > VQ_MAX_DESCRIPTORS)
368 if ((vp->flags & VRING_DESC_F_NEXT) == 0)
371 if (next >= n_indir) {
373 "%s: invalid next %u > %u, "
375 name, (u_int)next, n_indir);
380 if ((vdir->flags & VRING_DESC_F_NEXT) == 0)
386 "%s: descriptor loop? count > %d - driver confused?",
396 * Return the first n_chain request chains back to the available queue.
398 * (These chains are the ones you handled when you called vq_getchain()
399 * and used its positive return value.)
402 vq_retchains(struct vqueue_info *vq, uint16_t n_chains)
405 vq->vq_last_avail -= n_chains;
409 vq_relchain_prepare(struct vqueue_info *vq, uint16_t idx, uint32_t iolen)
411 struct vring_used *vuh;
412 struct vring_used_elem *vue;
417 * - mask is N-1 where N is a power of 2 so computes x % N
418 * - vuh points to the "used" data shared with guest
419 * - vue points to the "used" ring entry we want to update
421 mask = vq->vq_qsize - 1;
424 vue = &vuh->ring[vq->vq_next_used++ & mask];
430 vq_relchain_publish(struct vqueue_info *vq)
433 * Ensure the used descriptor is visible before updating the index.
434 * This is necessary on ISAs with memory ordering less strict than x86
435 * (and even on x86 to act as a compiler barrier).
437 atomic_thread_fence_rel();
438 vq->vq_used->idx = vq->vq_next_used;
442 * Return specified request chain to the guest, setting its I/O length
443 * to the provided value.
445 * (This chain is the one you handled when you called vq_getchain()
446 * and used its positive return value.)
449 vq_relchain(struct vqueue_info *vq, uint16_t idx, uint32_t iolen)
451 vq_relchain_prepare(vq, idx, iolen);
452 vq_relchain_publish(vq);
456 * Driver has finished processing "available" chains and calling
457 * vq_relchain on each one. If driver used all the available
458 * chains, used_all should be set.
460 * If the "used" index moved we may need to inform the guest, i.e.,
461 * deliver an interrupt. Even if the used index did NOT move we
462 * may need to deliver an interrupt, if the avail ring is empty and
463 * we are supposed to interrupt on empty.
465 * Note that used_all_avail is provided by the caller because it's
466 * a snapshot of the ring state when he decided to finish interrupt
467 * processing -- it's possible that descriptors became available after
468 * that point. (It's also typically a constant 1/True as well.)
471 vq_endchains(struct vqueue_info *vq, int used_all_avail)
473 struct virtio_softc *vs;
474 uint16_t event_idx, new_idx, old_idx;
478 * Interrupt generation: if we're using EVENT_IDX,
479 * interrupt if we've crossed the event threshold.
480 * Otherwise interrupt is generated if we added "used" entries,
481 * but suppressed by VRING_AVAIL_F_NO_INTERRUPT.
483 * In any case, though, if NOTIFY_ON_EMPTY is set and the
484 * entire avail was processed, we need to interrupt always.
487 old_idx = vq->vq_save_used;
488 vq->vq_save_used = new_idx = vq->vq_used->idx;
491 * Use full memory barrier between "idx" store from preceding
492 * vq_relchain() call and the loads from VQ_USED_EVENT_IDX() or
493 * "flags" field below.
495 atomic_thread_fence_seq_cst();
496 if (used_all_avail &&
497 (vs->vs_negotiated_caps & VIRTIO_F_NOTIFY_ON_EMPTY))
499 else if (vs->vs_negotiated_caps & VIRTIO_RING_F_EVENT_IDX) {
500 event_idx = VQ_USED_EVENT_IDX(vq);
502 * This calculation is per docs and the kernel
503 * (see src/sys/dev/virtio/virtio_ring.h).
505 intr = (uint16_t)(new_idx - event_idx - 1) <
506 (uint16_t)(new_idx - old_idx);
508 intr = new_idx != old_idx &&
509 !(vq->vq_avail->flags & VRING_AVAIL_F_NO_INTERRUPT);
512 vq_interrupt(vs, vq);
515 /* Note: these are in sorted order to make for a fast search */
516 static struct config_reg {
517 uint16_t cr_offset; /* register offset */
518 uint8_t cr_size; /* size (bytes) */
519 uint8_t cr_ro; /* true => reg is read only */
520 const char *cr_name; /* name of reg */
522 { VIRTIO_PCI_HOST_FEATURES, 4, 1, "HOST_FEATURES" },
523 { VIRTIO_PCI_GUEST_FEATURES, 4, 0, "GUEST_FEATURES" },
524 { VIRTIO_PCI_QUEUE_PFN, 4, 0, "QUEUE_PFN" },
525 { VIRTIO_PCI_QUEUE_NUM, 2, 1, "QUEUE_NUM" },
526 { VIRTIO_PCI_QUEUE_SEL, 2, 0, "QUEUE_SEL" },
527 { VIRTIO_PCI_QUEUE_NOTIFY, 2, 0, "QUEUE_NOTIFY" },
528 { VIRTIO_PCI_STATUS, 1, 0, "STATUS" },
529 { VIRTIO_PCI_ISR, 1, 0, "ISR" },
530 { VIRTIO_MSI_CONFIG_VECTOR, 2, 0, "CONFIG_VECTOR" },
531 { VIRTIO_MSI_QUEUE_VECTOR, 2, 0, "QUEUE_VECTOR" },
534 static inline struct config_reg *
535 vi_find_cr(int offset) {
537 struct config_reg *cr;
540 hi = sizeof(config_regs) / sizeof(*config_regs) - 1;
542 mid = (hi + lo) >> 1;
543 cr = &config_regs[mid];
544 if (cr->cr_offset == offset)
546 if (cr->cr_offset < offset)
555 * Handle pci config space reads.
556 * If it's to the MSI-X info, do that.
557 * If it's part of the virtio standard stuff, do that.
558 * Otherwise dispatch to the actual driver.
561 vi_pci_read(struct pci_devinst *pi, int baridx, uint64_t offset, int size)
563 struct virtio_softc *vs = pi->pi_arg;
564 struct virtio_consts *vc;
565 struct config_reg *cr;
566 uint64_t virtio_config_size, max;
572 if (vs->vs_flags & VIRTIO_USE_MSIX) {
573 if (baridx == pci_msix_table_bar(pi) ||
574 baridx == pci_msix_pba_bar(pi)) {
575 return (pci_emul_msix_tread(pi, offset, size));
579 /* XXX probably should do something better than just assert() */
583 pthread_mutex_lock(vs->vs_mtx);
587 value = size == 1 ? 0xff : size == 2 ? 0xffff : 0xffffffff;
589 if (size != 1 && size != 2 && size != 4)
592 virtio_config_size = VIRTIO_PCI_CONFIG_OFF(pci_msix_enabled(pi));
594 if (offset >= virtio_config_size) {
596 * Subtract off the standard size (including MSI-X
597 * registers if enabled) and dispatch to underlying driver.
598 * If that fails, fall into general code.
600 newoff = offset - virtio_config_size;
601 max = vc->vc_cfgsize ? vc->vc_cfgsize : 0x100000000;
602 if (newoff + size > max)
604 if (vc->vc_cfgread != NULL)
605 error = (*vc->vc_cfgread)(DEV_SOFTC(vs), newoff, size, &value);
613 cr = vi_find_cr(offset);
614 if (cr == NULL || cr->cr_size != size) {
616 /* offset must be OK, so size must be bad */
618 "%s: read from %s: bad size %d",
619 name, cr->cr_name, size);
622 "%s: read from bad offset/size %jd/%d",
623 name, (uintmax_t)offset, size);
629 case VIRTIO_PCI_HOST_FEATURES:
630 value = vc->vc_hv_caps;
632 case VIRTIO_PCI_GUEST_FEATURES:
633 value = vs->vs_negotiated_caps;
635 case VIRTIO_PCI_QUEUE_PFN:
636 if (vs->vs_curq < vc->vc_nvq)
637 value = vs->vs_queues[vs->vs_curq].vq_pfn;
639 case VIRTIO_PCI_QUEUE_NUM:
640 value = vs->vs_curq < vc->vc_nvq ?
641 vs->vs_queues[vs->vs_curq].vq_qsize : 0;
643 case VIRTIO_PCI_QUEUE_SEL:
646 case VIRTIO_PCI_QUEUE_NOTIFY:
649 case VIRTIO_PCI_STATUS:
650 value = vs->vs_status;
654 vs->vs_isr = 0; /* a read clears this flag */
656 pci_lintr_deassert(pi);
658 case VIRTIO_MSI_CONFIG_VECTOR:
659 value = vs->vs_msix_cfg_idx;
661 case VIRTIO_MSI_QUEUE_VECTOR:
662 value = vs->vs_curq < vc->vc_nvq ?
663 vs->vs_queues[vs->vs_curq].vq_msix_idx :
664 VIRTIO_MSI_NO_VECTOR;
669 pthread_mutex_unlock(vs->vs_mtx);
674 * Handle pci config space writes.
675 * If it's to the MSI-X info, do that.
676 * If it's part of the virtio standard stuff, do that.
677 * Otherwise dispatch to the actual driver.
680 vi_pci_write(struct pci_devinst *pi, int baridx, uint64_t offset, int size,
683 struct virtio_softc *vs = pi->pi_arg;
684 struct vqueue_info *vq;
685 struct virtio_consts *vc;
686 struct config_reg *cr;
687 uint64_t virtio_config_size, max;
692 if (vs->vs_flags & VIRTIO_USE_MSIX) {
693 if (baridx == pci_msix_table_bar(pi) ||
694 baridx == pci_msix_pba_bar(pi)) {
695 pci_emul_msix_twrite(pi, offset, size, value);
700 /* XXX probably should do something better than just assert() */
704 pthread_mutex_lock(vs->vs_mtx);
709 if (size != 1 && size != 2 && size != 4)
712 virtio_config_size = VIRTIO_PCI_CONFIG_OFF(pci_msix_enabled(pi));
714 if (offset >= virtio_config_size) {
716 * Subtract off the standard size (including MSI-X
717 * registers if enabled) and dispatch to underlying driver.
719 newoff = offset - virtio_config_size;
720 max = vc->vc_cfgsize ? vc->vc_cfgsize : 0x100000000;
721 if (newoff + size > max)
723 if (vc->vc_cfgwrite != NULL)
724 error = (*vc->vc_cfgwrite)(DEV_SOFTC(vs), newoff, size, value);
732 cr = vi_find_cr(offset);
733 if (cr == NULL || cr->cr_size != size || cr->cr_ro) {
735 /* offset must be OK, wrong size and/or reg is R/O */
736 if (cr->cr_size != size)
738 "%s: write to %s: bad size %d",
739 name, cr->cr_name, size);
742 "%s: write to read-only reg %s",
746 "%s: write to bad offset/size %jd/%d",
747 name, (uintmax_t)offset, size);
753 case VIRTIO_PCI_GUEST_FEATURES:
754 vs->vs_negotiated_caps = value & vc->vc_hv_caps;
755 if (vc->vc_apply_features)
756 (*vc->vc_apply_features)(DEV_SOFTC(vs),
757 vs->vs_negotiated_caps);
759 case VIRTIO_PCI_QUEUE_PFN:
760 if (vs->vs_curq >= vc->vc_nvq)
762 vi_vq_init(vs, value);
764 case VIRTIO_PCI_QUEUE_SEL:
766 * Note that the guest is allowed to select an
767 * invalid queue; we just need to return a QNUM
768 * of 0 while the bad queue is selected.
772 case VIRTIO_PCI_QUEUE_NOTIFY:
773 if (value >= (unsigned int)vc->vc_nvq) {
774 EPRINTLN("%s: queue %d notify out of range",
778 vq = &vs->vs_queues[value];
780 (*vq->vq_notify)(DEV_SOFTC(vs), vq);
781 else if (vc->vc_qnotify)
782 (*vc->vc_qnotify)(DEV_SOFTC(vs), vq);
785 "%s: qnotify queue %d: missing vq/vc notify",
788 case VIRTIO_PCI_STATUS:
789 vs->vs_status = value;
791 (*vc->vc_reset)(DEV_SOFTC(vs));
793 case VIRTIO_MSI_CONFIG_VECTOR:
794 vs->vs_msix_cfg_idx = value;
796 case VIRTIO_MSI_QUEUE_VECTOR:
797 if (vs->vs_curq >= vc->vc_nvq)
799 vq = &vs->vs_queues[vs->vs_curq];
800 vq->vq_msix_idx = value;
807 "%s: write config reg %s: curq %d >= max %d",
808 name, cr->cr_name, vs->vs_curq, vc->vc_nvq);
811 pthread_mutex_unlock(vs->vs_mtx);
814 #ifdef BHYVE_SNAPSHOT
816 vi_pci_pause(struct pci_devinst *pi)
818 struct virtio_softc *vs;
819 struct virtio_consts *vc;
825 assert(vc->vc_pause != NULL);
826 (*vc->vc_pause)(DEV_SOFTC(vs));
832 vi_pci_resume(struct pci_devinst *pi)
834 struct virtio_softc *vs;
835 struct virtio_consts *vc;
841 assert(vc->vc_resume != NULL);
842 (*vc->vc_resume)(DEV_SOFTC(vs));
848 vi_pci_snapshot_softc(struct virtio_softc *vs, struct vm_snapshot_meta *meta)
852 SNAPSHOT_VAR_OR_LEAVE(vs->vs_flags, meta, ret, done);
853 SNAPSHOT_VAR_OR_LEAVE(vs->vs_negotiated_caps, meta, ret, done);
854 SNAPSHOT_VAR_OR_LEAVE(vs->vs_curq, meta, ret, done);
855 SNAPSHOT_VAR_OR_LEAVE(vs->vs_status, meta, ret, done);
856 SNAPSHOT_VAR_OR_LEAVE(vs->vs_isr, meta, ret, done);
857 SNAPSHOT_VAR_OR_LEAVE(vs->vs_msix_cfg_idx, meta, ret, done);
864 vi_pci_snapshot_consts(struct virtio_consts *vc, struct vm_snapshot_meta *meta)
868 SNAPSHOT_VAR_CMP_OR_LEAVE(vc->vc_nvq, meta, ret, done);
869 SNAPSHOT_VAR_CMP_OR_LEAVE(vc->vc_cfgsize, meta, ret, done);
870 SNAPSHOT_VAR_CMP_OR_LEAVE(vc->vc_hv_caps, meta, ret, done);
877 vi_pci_snapshot_queues(struct virtio_softc *vs, struct vm_snapshot_meta *meta)
881 struct virtio_consts *vc;
882 struct vqueue_info *vq;
887 /* Save virtio queue info */
888 for (i = 0; i < vc->vc_nvq; i++) {
889 vq = &vs->vs_queues[i];
891 SNAPSHOT_VAR_CMP_OR_LEAVE(vq->vq_qsize, meta, ret, done);
892 SNAPSHOT_VAR_CMP_OR_LEAVE(vq->vq_num, meta, ret, done);
894 SNAPSHOT_VAR_OR_LEAVE(vq->vq_flags, meta, ret, done);
895 SNAPSHOT_VAR_OR_LEAVE(vq->vq_last_avail, meta, ret, done);
896 SNAPSHOT_VAR_OR_LEAVE(vq->vq_next_used, meta, ret, done);
897 SNAPSHOT_VAR_OR_LEAVE(vq->vq_save_used, meta, ret, done);
898 SNAPSHOT_VAR_OR_LEAVE(vq->vq_msix_idx, meta, ret, done);
900 SNAPSHOT_VAR_OR_LEAVE(vq->vq_pfn, meta, ret, done);
902 if (!vq_ring_ready(vq))
905 addr_size = vq->vq_qsize * sizeof(struct vring_desc);
906 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(vq->vq_desc, addr_size,
907 false, meta, ret, done);
909 addr_size = (2 + vq->vq_qsize + 1) * sizeof(uint16_t);
910 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(vq->vq_avail, addr_size,
911 false, meta, ret, done);
913 addr_size = (2 + 2 * vq->vq_qsize + 1) * sizeof(uint16_t);
914 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(vq->vq_used, addr_size,
915 false, meta, ret, done);
917 SNAPSHOT_BUF_OR_LEAVE(vq->vq_desc,
918 vring_size_aligned(vq->vq_qsize), meta, ret, done);
926 vi_pci_snapshot(struct vm_snapshot_meta *meta)
929 struct pci_devinst *pi;
930 struct virtio_softc *vs;
931 struct virtio_consts *vc;
937 /* Save virtio softc */
938 ret = vi_pci_snapshot_softc(vs, meta);
942 /* Save virtio consts */
943 ret = vi_pci_snapshot_consts(vc, meta);
947 /* Save virtio queue info */
948 ret = vi_pci_snapshot_queues(vs, meta);
952 /* Save device softc, if needed */
953 if (vc->vc_snapshot != NULL) {
954 ret = (*vc->vc_snapshot)(DEV_SOFTC(vs), meta);