2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Implements the virtqueue interface as basically described
31 * in the original VirtIO paper.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
42 #include <sys/sglist.h>
46 #include <machine/cpu.h>
47 #include <machine/bus.h>
48 #include <machine/atomic.h>
49 #include <machine/resource.h>
53 #include <dev/virtio/virtio.h>
54 #include <dev/virtio/virtqueue.h>
55 #include <dev/virtio/virtio_ring.h>
57 #include "virtio_bus_if.h"
61 uint16_t vq_queue_index;
64 #define VIRTQUEUE_FLAG_MODERN 0x0001
65 #define VIRTQUEUE_FLAG_INDIRECT 0x0002
66 #define VIRTQUEUE_FLAG_EVENT_IDX 0x0004
68 int vq_max_indirect_size;
69 bus_size_t vq_notify_offset;
70 virtqueue_intr_t *vq_intrhand;
71 void *vq_intrhand_arg;
75 uint16_t vq_queued_cnt;
77 * Head of the free chain in the descriptor table. If
78 * there are no free descriptors, this will be set to
79 * VQ_RING_DESC_CHAIN_END.
81 uint16_t vq_desc_head_idx;
83 * Last consumed descriptor in the used table,
84 * trails vq_ring.used->idx.
86 uint16_t vq_used_cons_idx;
89 int vq_indirect_mem_size;
92 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
94 struct vq_desc_extra {
96 struct vring_desc *indirect;
97 vm_paddr_t indirect_paddr;
103 * The maximum virtqueue size is 2^15. Use that value as the end of
104 * descriptor chain terminator since it will never be a valid index
105 * in the descriptor table. This is used to verify we are correctly
106 * handling vq_free_cnt.
108 #define VQ_RING_DESC_CHAIN_END 32768
110 #define VQASSERT(_vq, _exp, _msg, ...) \
111 KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name, \
114 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \
115 VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \
116 "invalid ring index: %d, max: %d", (_idx), \
119 #define VQ_RING_ASSERT_CHAIN_TERM(_vq) \
120 VQASSERT((_vq), (_vq)->vq_desc_head_idx == \
121 VQ_RING_DESC_CHAIN_END, "full ring terminated " \
122 "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
124 static int virtqueue_init_indirect(struct virtqueue *vq, int);
125 static void virtqueue_free_indirect(struct virtqueue *vq);
126 static void virtqueue_init_indirect_list(struct virtqueue *,
127 struct vring_desc *);
129 static void vq_ring_init(struct virtqueue *);
130 static void vq_ring_update_avail(struct virtqueue *, uint16_t);
131 static uint16_t vq_ring_enqueue_segments(struct virtqueue *,
132 struct vring_desc *, uint16_t, struct sglist *, int, int);
133 static int vq_ring_use_indirect(struct virtqueue *, int);
134 static void vq_ring_enqueue_indirect(struct virtqueue *, void *,
135 struct sglist *, int, int);
136 static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
137 static int vq_ring_must_notify_host(struct virtqueue *);
138 static void vq_ring_notify_host(struct virtqueue *);
139 static void vq_ring_free_chain(struct virtqueue *, uint16_t);
141 SDT_PROVIDER_DEFINE(virtqueue);
142 SDT_PROBE_DEFINE6(virtqueue, , enqueue_segments, entry, "struct virtqueue *",
143 "struct vring_desc *", "uint16_t", "struct sglist *", "int", "int");
144 SDT_PROBE_DEFINE1(virtqueue, , enqueue_segments, return, "uint16_t");
146 #define vq_modern(_vq) (((_vq)->vq_flags & VIRTQUEUE_FLAG_MODERN) != 0)
147 #define vq_htog16(_vq, _val) virtio_htog16(vq_modern(_vq), _val)
148 #define vq_htog32(_vq, _val) virtio_htog32(vq_modern(_vq), _val)
149 #define vq_htog64(_vq, _val) virtio_htog64(vq_modern(_vq), _val)
150 #define vq_gtoh16(_vq, _val) virtio_gtoh16(vq_modern(_vq), _val)
151 #define vq_gtoh32(_vq, _val) virtio_gtoh32(vq_modern(_vq), _val)
152 #define vq_gtoh64(_vq, _val) virtio_gtoh64(vq_modern(_vq), _val)
155 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size,
156 bus_size_t notify_offset, int align, vm_paddr_t highaddr,
157 struct vq_alloc_info *info, struct virtqueue **vqp)
159 struct virtqueue *vq;
167 "virtqueue %d (%s) does not exist (size is zero)\n",
168 queue, info->vqai_name);
170 } else if (!powerof2(size)) {
172 "virtqueue %d (%s) size is not a power of 2: %d\n",
173 queue, info->vqai_name, size);
175 } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
176 device_printf(dev, "virtqueue %d (%s) requested too many "
177 "indirect descriptors: %d, max %d\n",
178 queue, info->vqai_name, info->vqai_maxindirsz,
179 VIRTIO_MAX_INDIRECT);
183 vq = malloc(sizeof(struct virtqueue) +
184 size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
186 device_printf(dev, "cannot allocate virtqueue\n");
191 strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
192 vq->vq_queue_index = queue;
193 vq->vq_notify_offset = notify_offset;
194 vq->vq_alignment = align;
195 vq->vq_nentries = size;
196 vq->vq_free_cnt = size;
197 vq->vq_intrhand = info->vqai_intr;
198 vq->vq_intrhand_arg = info->vqai_intr_arg;
200 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_F_VERSION_1) != 0)
201 vq->vq_flags |= VIRTQUEUE_FLAG_MODERN;
202 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
203 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
205 if (info->vqai_maxindirsz > 1) {
206 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
211 vq->vq_ring_size = round_page(vring_size(size, align));
212 vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
213 M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
214 if (vq->vq_ring_mem == NULL) {
216 "cannot allocate memory for virtqueue ring\n");
222 virtqueue_disable_intr(vq);
234 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
237 struct vq_desc_extra *dxp;
242 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
244 * Indirect descriptors requested by the driver but not
245 * negotiated. Return zero to keep the initialization
246 * going: we'll run fine without.
249 device_printf(dev, "virtqueue %d (%s) requested "
250 "indirect descriptors but not negotiated\n",
251 vq->vq_queue_index, vq->vq_name);
255 size = indirect_size * sizeof(struct vring_desc);
256 vq->vq_max_indirect_size = indirect_size;
257 vq->vq_indirect_mem_size = size;
258 vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
260 for (i = 0; i < vq->vq_nentries; i++) {
261 dxp = &vq->vq_descx[i];
263 dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
264 if (dxp->indirect == NULL) {
265 device_printf(dev, "cannot allocate indirect list\n");
269 dxp->indirect_paddr = vtophys(dxp->indirect);
270 virtqueue_init_indirect_list(vq, dxp->indirect);
277 virtqueue_free_indirect(struct virtqueue *vq)
279 struct vq_desc_extra *dxp;
282 for (i = 0; i < vq->vq_nentries; i++) {
283 dxp = &vq->vq_descx[i];
285 if (dxp->indirect == NULL)
288 free(dxp->indirect, M_DEVBUF);
289 dxp->indirect = NULL;
290 dxp->indirect_paddr = 0;
293 vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
294 vq->vq_indirect_mem_size = 0;
298 virtqueue_init_indirect_list(struct virtqueue *vq,
299 struct vring_desc *indirect)
303 bzero(indirect, vq->vq_indirect_mem_size);
305 for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
306 indirect[i].next = vq_gtoh16(vq, i + 1);
307 indirect[i].next = vq_gtoh16(vq, VQ_RING_DESC_CHAIN_END);
311 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
313 struct vq_desc_extra *dxp;
316 if (vq->vq_nentries != size) {
317 device_printf(vq->vq_dev,
318 "%s: '%s' changed size; old=%hu, new=%hu\n",
319 __func__, vq->vq_name, vq->vq_nentries, size);
323 /* Warn if the virtqueue was not properly cleaned up. */
324 if (vq->vq_free_cnt != vq->vq_nentries) {
325 device_printf(vq->vq_dev,
326 "%s: warning '%s' virtqueue not empty, "
327 "leaking %d entries\n", __func__, vq->vq_name,
328 vq->vq_nentries - vq->vq_free_cnt);
331 vq->vq_desc_head_idx = 0;
332 vq->vq_used_cons_idx = 0;
333 vq->vq_queued_cnt = 0;
334 vq->vq_free_cnt = vq->vq_nentries;
336 /* To be safe, reset all our allocated memory. */
337 bzero(vq->vq_ring_mem, vq->vq_ring_size);
338 for (i = 0; i < vq->vq_nentries; i++) {
339 dxp = &vq->vq_descx[i];
342 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
343 virtqueue_init_indirect_list(vq, dxp->indirect);
347 virtqueue_disable_intr(vq);
353 virtqueue_free(struct virtqueue *vq)
356 if (vq->vq_free_cnt != vq->vq_nentries) {
357 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
358 "leaking %d entries\n", vq->vq_name,
359 vq->vq_nentries - vq->vq_free_cnt);
362 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
363 virtqueue_free_indirect(vq);
365 if (vq->vq_ring_mem != NULL) {
366 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
367 vq->vq_ring_size = 0;
368 vq->vq_ring_mem = NULL;
375 virtqueue_paddr(struct virtqueue *vq)
378 return (vtophys(vq->vq_ring_mem));
382 virtqueue_desc_paddr(struct virtqueue *vq)
385 return (vtophys(vq->vq_ring.desc));
389 virtqueue_avail_paddr(struct virtqueue *vq)
392 return (vtophys(vq->vq_ring.avail));
396 virtqueue_used_paddr(struct virtqueue *vq)
399 return (vtophys(vq->vq_ring.used));
403 virtqueue_index(struct virtqueue *vq)
406 return (vq->vq_queue_index);
410 virtqueue_size(struct virtqueue *vq)
413 return (vq->vq_nentries);
417 virtqueue_nfree(struct virtqueue *vq)
420 return (vq->vq_free_cnt);
424 virtqueue_empty(struct virtqueue *vq)
427 return (vq->vq_nentries == vq->vq_free_cnt);
431 virtqueue_full(struct virtqueue *vq)
434 return (vq->vq_free_cnt == 0);
438 virtqueue_notify(struct virtqueue *vq)
441 /* Ensure updated avail->idx is visible to host. */
444 if (vq_ring_must_notify_host(vq))
445 vq_ring_notify_host(vq);
446 vq->vq_queued_cnt = 0;
450 virtqueue_nused(struct virtqueue *vq)
452 uint16_t used_idx, nused;
454 used_idx = vq_htog16(vq, vq->vq_ring.used->idx);
456 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
457 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
463 virtqueue_intr_filter(struct virtqueue *vq)
466 if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
469 virtqueue_disable_intr(vq);
475 virtqueue_intr(struct virtqueue *vq)
478 vq->vq_intrhand(vq->vq_intrhand_arg);
482 virtqueue_enable_intr(struct virtqueue *vq)
485 return (vq_ring_enable_interrupt(vq, 0));
489 virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
491 uint16_t ndesc, avail_idx;
493 avail_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
494 ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
497 case VQ_POSTPONE_SHORT:
500 case VQ_POSTPONE_LONG:
501 ndesc = (ndesc * 3) / 4;
503 case VQ_POSTPONE_EMPTIED:
507 return (vq_ring_enable_interrupt(vq, ndesc));
511 * Note this is only considered a hint to the host.
514 virtqueue_disable_intr(struct virtqueue *vq)
517 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
518 vring_used_event(&vq->vq_ring) = vq_gtoh16(vq,
519 vq->vq_used_cons_idx - vq->vq_nentries - 1);
523 vq->vq_ring.avail->flags |= vq_gtoh16(vq, VRING_AVAIL_F_NO_INTERRUPT);
527 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
528 int readable, int writable)
530 struct vq_desc_extra *dxp;
532 uint16_t head_idx, idx;
534 needed = readable + writable;
536 VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
537 VQASSERT(vq, needed == sg->sg_nseg,
538 "segment count mismatch, %d, %d", needed, sg->sg_nseg);
540 needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
541 "too many segments to enqueue: %d, %d/%d", needed,
542 vq->vq_nentries, vq->vq_max_indirect_size);
546 if (vq->vq_free_cnt == 0)
549 if (vq_ring_use_indirect(vq, needed)) {
550 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
552 } else if (vq->vq_free_cnt < needed)
555 head_idx = vq->vq_desc_head_idx;
556 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
557 dxp = &vq->vq_descx[head_idx];
559 VQASSERT(vq, dxp->cookie == NULL,
560 "cookie already exists for index %d", head_idx);
561 dxp->cookie = cookie;
562 dxp->ndescs = needed;
564 idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
565 sg, readable, writable);
567 vq->vq_desc_head_idx = idx;
568 vq->vq_free_cnt -= needed;
569 if (vq->vq_free_cnt == 0)
570 VQ_RING_ASSERT_CHAIN_TERM(vq);
572 VQ_RING_ASSERT_VALID_IDX(vq, idx);
574 vq_ring_update_avail(vq, head_idx);
580 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
582 struct vring_used_elem *uep;
584 uint16_t used_idx, desc_idx;
586 if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
589 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
590 uep = &vq->vq_ring.used->ring[used_idx];
593 desc_idx = (uint16_t) vq_htog32(vq, uep->id);
595 *len = vq_htog32(vq, uep->len);
597 vq_ring_free_chain(vq, desc_idx);
599 cookie = vq->vq_descx[desc_idx].cookie;
600 VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
601 vq->vq_descx[desc_idx].cookie = NULL;
607 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
611 VIRTIO_BUS_POLL(vq->vq_dev);
612 while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
614 VIRTIO_BUS_POLL(vq->vq_dev);
621 virtqueue_drain(struct virtqueue *vq, int *last)
629 while (idx < vq->vq_nentries && cookie == NULL) {
630 if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
631 vq->vq_descx[idx].cookie = NULL;
632 /* Free chain to keep free count consistent. */
633 vq_ring_free_chain(vq, idx);
644 virtqueue_dump(struct virtqueue *vq)
650 printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
651 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
652 "used.idx=%d; used_event_idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
653 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt, virtqueue_nused(vq),
654 vq->vq_queued_cnt, vq->vq_desc_head_idx,
655 vq_htog16(vq, vq->vq_ring.avail->idx), vq->vq_used_cons_idx,
656 vq_htog16(vq, vq->vq_ring.used->idx),
657 vq_htog16(vq, vring_used_event(&vq->vq_ring)),
658 vq_htog16(vq, vq->vq_ring.avail->flags),
659 vq_htog16(vq, vq->vq_ring.used->flags));
663 vq_ring_init(struct virtqueue *vq)
669 ring_mem = vq->vq_ring_mem;
670 size = vq->vq_nentries;
673 vring_init(vr, size, ring_mem, vq->vq_alignment);
675 for (i = 0; i < size - 1; i++)
676 vr->desc[i].next = vq_gtoh16(vq, i + 1);
677 vr->desc[i].next = vq_gtoh16(vq, VQ_RING_DESC_CHAIN_END);
681 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
683 uint16_t avail_idx, avail_ring_idx;
686 * Place the head of the descriptor chain into the next slot and make
687 * it usable to the host. The chain is made available now rather than
688 * deferring to virtqueue_notify() in the hopes that if the host is
689 * currently running on another CPU, we can keep it processing the new
692 avail_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
693 avail_ring_idx = avail_idx & (vq->vq_nentries - 1);
694 vq->vq_ring.avail->ring[avail_ring_idx] = vq_gtoh16(vq, desc_idx);
697 vq->vq_ring.avail->idx = vq_gtoh16(vq, avail_idx + 1);
699 /* Keep pending count until virtqueue_notify(). */
704 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
705 uint16_t head_idx, struct sglist *sg, int readable, int writable)
707 struct sglist_seg *seg;
708 struct vring_desc *dp;
712 SDT_PROBE6(virtqueue, , enqueue_segments, entry, vq, desc, head_idx,
713 sg, readable, writable);
715 needed = readable + writable;
717 for (i = 0, idx = head_idx, seg = sg->sg_segs;
719 i++, idx = vq_htog16(vq, dp->next), seg++) {
720 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
721 "premature end of free desc chain");
724 dp->addr = vq_gtoh64(vq, seg->ss_paddr);
725 dp->len = vq_gtoh32(vq, seg->ss_len);
729 dp->flags |= vq_gtoh16(vq, VRING_DESC_F_NEXT);
731 dp->flags |= vq_gtoh16(vq, VRING_DESC_F_WRITE);
734 SDT_PROBE1(virtqueue, , enqueue_segments, return, idx);
739 vq_ring_use_indirect(struct virtqueue *vq, int needed)
742 if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
745 if (vq->vq_max_indirect_size < needed)
755 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
756 struct sglist *sg, int readable, int writable)
758 struct vring_desc *dp;
759 struct vq_desc_extra *dxp;
763 needed = readable + writable;
764 VQASSERT(vq, needed <= vq->vq_max_indirect_size,
765 "enqueuing too many indirect descriptors");
767 head_idx = vq->vq_desc_head_idx;
768 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
769 dp = &vq->vq_ring.desc[head_idx];
770 dxp = &vq->vq_descx[head_idx];
772 VQASSERT(vq, dxp->cookie == NULL,
773 "cookie already exists for index %d", head_idx);
774 dxp->cookie = cookie;
777 dp->addr = vq_gtoh64(vq, dxp->indirect_paddr);
778 dp->len = vq_gtoh32(vq, needed * sizeof(struct vring_desc));
779 dp->flags = vq_gtoh16(vq, VRING_DESC_F_INDIRECT);
781 vq_ring_enqueue_segments(vq, dxp->indirect, 0,
782 sg, readable, writable);
784 vq->vq_desc_head_idx = vq_htog16(vq, dp->next);
786 if (vq->vq_free_cnt == 0)
787 VQ_RING_ASSERT_CHAIN_TERM(vq);
789 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
791 vq_ring_update_avail(vq, head_idx);
795 vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
799 * Enable interrupts, making sure we get the latest index of
800 * what's already been consumed.
802 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
803 vring_used_event(&vq->vq_ring) =
804 vq_gtoh16(vq, vq->vq_used_cons_idx + ndesc);
806 vq->vq_ring.avail->flags &=
807 vq_gtoh16(vq, ~VRING_AVAIL_F_NO_INTERRUPT);
813 * Enough items may have already been consumed to meet our threshold
814 * since we last checked. Let our caller know so it processes the new
817 if (virtqueue_nused(vq) > ndesc)
824 vq_ring_must_notify_host(struct virtqueue *vq)
826 uint16_t new_idx, prev_idx, event_idx, flags;
828 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
829 new_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
830 prev_idx = new_idx - vq->vq_queued_cnt;
831 event_idx = vq_htog16(vq, vring_avail_event(&vq->vq_ring));
833 return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
836 flags = vq->vq_ring.used->flags;
837 return ((flags & vq_gtoh16(vq, VRING_USED_F_NO_NOTIFY)) == 0);
841 vq_ring_notify_host(struct virtqueue *vq)
844 VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index,
845 vq->vq_notify_offset);
849 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
851 struct vring_desc *dp;
852 struct vq_desc_extra *dxp;
854 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
855 dp = &vq->vq_ring.desc[desc_idx];
856 dxp = &vq->vq_descx[desc_idx];
858 if (vq->vq_free_cnt == 0)
859 VQ_RING_ASSERT_CHAIN_TERM(vq);
861 vq->vq_free_cnt += dxp->ndescs;
864 if ((dp->flags & vq_gtoh16(vq, VRING_DESC_F_INDIRECT)) == 0) {
865 while (dp->flags & vq_gtoh16(vq, VRING_DESC_F_NEXT)) {
866 uint16_t next_idx = vq_htog16(vq, dp->next);
867 VQ_RING_ASSERT_VALID_IDX(vq, next_idx);
868 dp = &vq->vq_ring.desc[next_idx];
873 VQASSERT(vq, dxp->ndescs == 0,
874 "failed to free entire desc chain, remaining: %d", dxp->ndescs);
877 * We must append the existing free chain, if any, to the end of
878 * newly freed chain. If the virtqueue was completely used, then
879 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
881 dp->next = vq_gtoh16(vq, vq->vq_desc_head_idx);
882 vq->vq_desc_head_idx = desc_idx;