2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Implements the virtqueue interface as basically described
31 * in the original VirtIO paper.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/sglist.h>
45 #include <machine/cpu.h>
46 #include <machine/bus.h>
47 #include <machine/atomic.h>
48 #include <machine/resource.h>
52 #include <dev/virtio/virtio.h>
53 #include <dev/virtio/virtqueue.h>
54 #include <dev/virtio/virtio_ring.h>
56 #include "virtio_bus_if.h"
60 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
61 uint16_t vq_queue_index;
64 #define VIRTQUEUE_FLAG_INDIRECT 0x0001
65 #define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
70 int vq_max_indirect_size;
71 int vq_indirect_mem_size;
72 virtqueue_intr_t *vq_intrhand;
73 void *vq_intrhand_arg;
77 uint16_t vq_queued_cnt;
79 * Head of the free chain in the descriptor table. If
80 * there are no free descriptors, this will be set to
81 * VQ_RING_DESC_CHAIN_END.
83 uint16_t vq_desc_head_idx;
85 * Last consumed descriptor in the used table,
86 * trails vq_ring.used->idx.
88 uint16_t vq_used_cons_idx;
90 struct vq_desc_extra {
92 struct vring_desc *indirect;
93 vm_paddr_t indirect_paddr;
99 * The maximum virtqueue size is 2^15. Use that value as the end of
100 * descriptor chain terminator since it will never be a valid index
101 * in the descriptor table. This is used to verify we are correctly
102 * handling vq_free_cnt.
104 #define VQ_RING_DESC_CHAIN_END 32768
106 #define VQASSERT(_vq, _exp, _msg, ...) \
107 KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name, \
110 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \
111 VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \
112 "invalid ring index: %d, max: %d", (_idx), \
115 #define VQ_RING_ASSERT_CHAIN_TERM(_vq) \
116 VQASSERT((_vq), (_vq)->vq_desc_head_idx == \
117 VQ_RING_DESC_CHAIN_END, "full ring terminated " \
118 "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
120 static int virtqueue_init_indirect(struct virtqueue *vq, int);
121 static void virtqueue_free_indirect(struct virtqueue *vq);
122 static void virtqueue_init_indirect_list(struct virtqueue *,
123 struct vring_desc *);
125 static void vq_ring_init(struct virtqueue *);
126 static void vq_ring_update_avail(struct virtqueue *, uint16_t);
127 static uint16_t vq_ring_enqueue_segments(struct virtqueue *,
128 struct vring_desc *, uint16_t, struct sglist *, int, int);
129 static int vq_ring_use_indirect(struct virtqueue *, int);
130 static void vq_ring_enqueue_indirect(struct virtqueue *, void *,
131 struct sglist *, int, int);
132 static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
133 static int vq_ring_must_notify_host(struct virtqueue *);
134 static void vq_ring_notify_host(struct virtqueue *);
135 static void vq_ring_free_chain(struct virtqueue *, uint16_t);
138 virtqueue_filter_features(uint64_t features)
142 mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
143 mask |= VIRTIO_RING_F_INDIRECT_DESC;
144 mask |= VIRTIO_RING_F_EVENT_IDX;
145 mask |= VIRTIO_F_VERSION_1;
147 return (features & mask);
151 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
152 vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
154 struct virtqueue *vq;
162 "virtqueue %d (%s) does not exist (size is zero)\n",
163 queue, info->vqai_name);
165 } else if (!powerof2(size)) {
167 "virtqueue %d (%s) size is not a power of 2: %d\n",
168 queue, info->vqai_name, size);
170 } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
171 device_printf(dev, "virtqueue %d (%s) requested too many "
172 "indirect descriptors: %d, max %d\n",
173 queue, info->vqai_name, info->vqai_maxindirsz,
174 VIRTIO_MAX_INDIRECT);
178 vq = malloc(sizeof(struct virtqueue) +
179 size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
181 device_printf(dev, "cannot allocate virtqueue\n");
186 strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
187 vq->vq_queue_index = queue;
188 vq->vq_alignment = align;
189 vq->vq_nentries = size;
190 vq->vq_free_cnt = size;
191 vq->vq_intrhand = info->vqai_intr;
192 vq->vq_intrhand_arg = info->vqai_intr_arg;
194 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
195 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
197 if (info->vqai_maxindirsz > 1) {
198 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
203 vq->vq_ring_size = round_page(vring_size(size, align));
204 vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
205 M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
206 if (vq->vq_ring_mem == NULL) {
208 "cannot allocate memory for virtqueue ring\n");
214 virtqueue_disable_intr(vq);
226 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
229 struct vq_desc_extra *dxp;
234 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
236 * Indirect descriptors requested by the driver but not
237 * negotiated. Return zero to keep the initialization
238 * going: we'll run fine without.
241 device_printf(dev, "virtqueue %d (%s) requested "
242 "indirect descriptors but not negotiated\n",
243 vq->vq_queue_index, vq->vq_name);
247 size = indirect_size * sizeof(struct vring_desc);
248 vq->vq_max_indirect_size = indirect_size;
249 vq->vq_indirect_mem_size = size;
250 vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
252 for (i = 0; i < vq->vq_nentries; i++) {
253 dxp = &vq->vq_descx[i];
255 dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
256 if (dxp->indirect == NULL) {
257 device_printf(dev, "cannot allocate indirect list\n");
261 dxp->indirect_paddr = vtophys(dxp->indirect);
262 virtqueue_init_indirect_list(vq, dxp->indirect);
269 virtqueue_free_indirect(struct virtqueue *vq)
271 struct vq_desc_extra *dxp;
274 for (i = 0; i < vq->vq_nentries; i++) {
275 dxp = &vq->vq_descx[i];
277 if (dxp->indirect == NULL)
280 free(dxp->indirect, M_DEVBUF);
281 dxp->indirect = NULL;
282 dxp->indirect_paddr = 0;
285 vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
286 vq->vq_indirect_mem_size = 0;
290 virtqueue_init_indirect_list(struct virtqueue *vq,
291 struct vring_desc *indirect)
295 bzero(indirect, vq->vq_indirect_mem_size);
297 for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
298 indirect[i].next = i + 1;
299 indirect[i].next = VQ_RING_DESC_CHAIN_END;
303 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
305 struct vq_desc_extra *dxp;
308 if (vq->vq_nentries != size) {
309 device_printf(vq->vq_dev,
310 "%s: '%s' changed size; old=%hu, new=%hu\n",
311 __func__, vq->vq_name, vq->vq_nentries, size);
315 /* Warn if the virtqueue was not properly cleaned up. */
316 if (vq->vq_free_cnt != vq->vq_nentries) {
317 device_printf(vq->vq_dev,
318 "%s: warning '%s' virtqueue not empty, "
319 "leaking %d entries\n", __func__, vq->vq_name,
320 vq->vq_nentries - vq->vq_free_cnt);
323 vq->vq_desc_head_idx = 0;
324 vq->vq_used_cons_idx = 0;
325 vq->vq_queued_cnt = 0;
326 vq->vq_free_cnt = vq->vq_nentries;
328 /* To be safe, reset all our allocated memory. */
329 bzero(vq->vq_ring_mem, vq->vq_ring_size);
330 for (i = 0; i < vq->vq_nentries; i++) {
331 dxp = &vq->vq_descx[i];
334 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
335 virtqueue_init_indirect_list(vq, dxp->indirect);
339 virtqueue_disable_intr(vq);
345 virtqueue_free(struct virtqueue *vq)
348 if (vq->vq_free_cnt != vq->vq_nentries) {
349 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
350 "leaking %d entries\n", vq->vq_name,
351 vq->vq_nentries - vq->vq_free_cnt);
354 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
355 virtqueue_free_indirect(vq);
357 if (vq->vq_ring_mem != NULL) {
358 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
359 vq->vq_ring_size = 0;
360 vq->vq_ring_mem = NULL;
367 virtqueue_paddr(struct virtqueue *vq)
370 return (vtophys(vq->vq_ring_mem));
374 virtqueue_desc_paddr(struct virtqueue *vq)
377 return (vtophys(vq->vq_ring.desc));
381 virtqueue_avail_paddr(struct virtqueue *vq)
384 return (vtophys(vq->vq_ring.avail));
388 virtqueue_used_paddr(struct virtqueue *vq)
391 return (vtophys(vq->vq_ring.used));
395 virtqueue_index(struct virtqueue *vq)
397 return (vq->vq_queue_index);
401 virtqueue_size(struct virtqueue *vq)
404 return (vq->vq_nentries);
408 virtqueue_nfree(struct virtqueue *vq)
411 return (vq->vq_free_cnt);
415 virtqueue_empty(struct virtqueue *vq)
418 return (vq->vq_nentries == vq->vq_free_cnt);
422 virtqueue_full(struct virtqueue *vq)
425 return (vq->vq_free_cnt == 0);
429 virtqueue_notify(struct virtqueue *vq)
432 /* Ensure updated avail->idx is visible to host. */
435 if (vq_ring_must_notify_host(vq))
436 vq_ring_notify_host(vq);
437 vq->vq_queued_cnt = 0;
441 virtqueue_nused(struct virtqueue *vq)
443 uint16_t used_idx, nused;
445 used_idx = vq->vq_ring.used->idx;
447 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
448 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
454 virtqueue_intr_filter(struct virtqueue *vq)
457 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
460 virtqueue_disable_intr(vq);
466 virtqueue_intr(struct virtqueue *vq)
469 vq->vq_intrhand(vq->vq_intrhand_arg);
473 virtqueue_enable_intr(struct virtqueue *vq)
476 return (vq_ring_enable_interrupt(vq, 0));
480 virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
482 uint16_t ndesc, avail_idx;
484 avail_idx = vq->vq_ring.avail->idx;
485 ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
488 case VQ_POSTPONE_SHORT:
491 case VQ_POSTPONE_LONG:
492 ndesc = (ndesc * 3) / 4;
494 case VQ_POSTPONE_EMPTIED:
498 return (vq_ring_enable_interrupt(vq, ndesc));
502 * Note this is only considered a hint to the host.
505 virtqueue_disable_intr(struct virtqueue *vq)
508 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
509 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx -
512 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
516 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
517 int readable, int writable)
519 struct vq_desc_extra *dxp;
521 uint16_t head_idx, idx;
523 needed = readable + writable;
525 VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
526 VQASSERT(vq, needed == sg->sg_nseg,
527 "segment count mismatch, %d, %d", needed, sg->sg_nseg);
529 needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
530 "too many segments to enqueue: %d, %d/%d", needed,
531 vq->vq_nentries, vq->vq_max_indirect_size);
535 if (vq->vq_free_cnt == 0)
538 if (vq_ring_use_indirect(vq, needed)) {
539 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
541 } else if (vq->vq_free_cnt < needed)
544 head_idx = vq->vq_desc_head_idx;
545 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
546 dxp = &vq->vq_descx[head_idx];
548 VQASSERT(vq, dxp->cookie == NULL,
549 "cookie already exists for index %d", head_idx);
550 dxp->cookie = cookie;
551 dxp->ndescs = needed;
553 idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
554 sg, readable, writable);
556 vq->vq_desc_head_idx = idx;
557 vq->vq_free_cnt -= needed;
558 if (vq->vq_free_cnt == 0)
559 VQ_RING_ASSERT_CHAIN_TERM(vq);
561 VQ_RING_ASSERT_VALID_IDX(vq, idx);
563 vq_ring_update_avail(vq, head_idx);
569 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
571 struct vring_used_elem *uep;
573 uint16_t used_idx, desc_idx;
575 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
578 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
579 uep = &vq->vq_ring.used->ring[used_idx];
582 desc_idx = (uint16_t) uep->id;
586 vq_ring_free_chain(vq, desc_idx);
588 cookie = vq->vq_descx[desc_idx].cookie;
589 VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
590 vq->vq_descx[desc_idx].cookie = NULL;
596 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
600 VIRTIO_BUS_POLL(vq->vq_dev);
601 while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
603 VIRTIO_BUS_POLL(vq->vq_dev);
610 virtqueue_drain(struct virtqueue *vq, int *last)
618 while (idx < vq->vq_nentries && cookie == NULL) {
619 if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
620 vq->vq_descx[idx].cookie = NULL;
621 /* Free chain to keep free count consistent. */
622 vq_ring_free_chain(vq, idx);
633 virtqueue_dump(struct virtqueue *vq)
639 printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
640 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
641 "used.idx=%d; used_event_idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
642 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
643 virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
644 vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
645 vq->vq_ring.used->idx,
646 vring_used_event(&vq->vq_ring),
647 vq->vq_ring.avail->flags,
648 vq->vq_ring.used->flags);
652 vq_ring_init(struct virtqueue *vq)
658 ring_mem = vq->vq_ring_mem;
659 size = vq->vq_nentries;
662 vring_init(vr, size, ring_mem, vq->vq_alignment);
664 for (i = 0; i < size - 1; i++)
665 vr->desc[i].next = i + 1;
666 vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
670 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
675 * Place the head of the descriptor chain into the next slot and make
676 * it usable to the host. The chain is made available now rather than
677 * deferring to virtqueue_notify() in the hopes that if the host is
678 * currently running on another CPU, we can keep it processing the new
681 avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
682 vq->vq_ring.avail->ring[avail_idx] = desc_idx;
685 vq->vq_ring.avail->idx++;
687 /* Keep pending count until virtqueue_notify(). */
692 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
693 uint16_t head_idx, struct sglist *sg, int readable, int writable)
695 struct sglist_seg *seg;
696 struct vring_desc *dp;
700 needed = readable + writable;
702 for (i = 0, idx = head_idx, seg = sg->sg_segs;
704 i++, idx = dp->next, seg++) {
705 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
706 "premature end of free desc chain");
709 dp->addr = seg->ss_paddr;
710 dp->len = seg->ss_len;
714 dp->flags |= VRING_DESC_F_NEXT;
716 dp->flags |= VRING_DESC_F_WRITE;
723 vq_ring_use_indirect(struct virtqueue *vq, int needed)
726 if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
729 if (vq->vq_max_indirect_size < needed)
739 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
740 struct sglist *sg, int readable, int writable)
742 struct vring_desc *dp;
743 struct vq_desc_extra *dxp;
747 needed = readable + writable;
748 VQASSERT(vq, needed <= vq->vq_max_indirect_size,
749 "enqueuing too many indirect descriptors");
751 head_idx = vq->vq_desc_head_idx;
752 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
753 dp = &vq->vq_ring.desc[head_idx];
754 dxp = &vq->vq_descx[head_idx];
756 VQASSERT(vq, dxp->cookie == NULL,
757 "cookie already exists for index %d", head_idx);
758 dxp->cookie = cookie;
761 dp->addr = dxp->indirect_paddr;
762 dp->len = needed * sizeof(struct vring_desc);
763 dp->flags = VRING_DESC_F_INDIRECT;
765 vq_ring_enqueue_segments(vq, dxp->indirect, 0,
766 sg, readable, writable);
768 vq->vq_desc_head_idx = dp->next;
770 if (vq->vq_free_cnt == 0)
771 VQ_RING_ASSERT_CHAIN_TERM(vq);
773 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
775 vq_ring_update_avail(vq, head_idx);
779 vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
783 * Enable interrupts, making sure we get the latest index of
784 * what's already been consumed.
786 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
787 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
789 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
794 * Enough items may have already been consumed to meet our threshold
795 * since we last checked. Let our caller know so it processes the new
798 if (virtqueue_nused(vq) > ndesc)
805 vq_ring_must_notify_host(struct virtqueue *vq)
807 uint16_t new_idx, prev_idx, event_idx;
809 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
810 new_idx = vq->vq_ring.avail->idx;
811 prev_idx = new_idx - vq->vq_queued_cnt;
812 event_idx = vring_avail_event(&vq->vq_ring);
814 return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
817 return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
821 vq_ring_notify_host(struct virtqueue *vq)
824 VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
828 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
830 struct vring_desc *dp;
831 struct vq_desc_extra *dxp;
833 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
834 dp = &vq->vq_ring.desc[desc_idx];
835 dxp = &vq->vq_descx[desc_idx];
837 if (vq->vq_free_cnt == 0)
838 VQ_RING_ASSERT_CHAIN_TERM(vq);
840 vq->vq_free_cnt += dxp->ndescs;
843 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
844 while (dp->flags & VRING_DESC_F_NEXT) {
845 VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
846 dp = &vq->vq_ring.desc[dp->next];
851 VQASSERT(vq, dxp->ndescs == 0,
852 "failed to free entire desc chain, remaining: %d", dxp->ndescs);
855 * We must append the existing free chain, if any, to the end of
856 * newly freed chain. If the virtqueue was completely used, then
857 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
859 dp->next = vq->vq_desc_head_idx;
860 vq->vq_desc_head_idx = desc_idx;