2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Implements the virtqueue interface as basically described
29 * in the original VirtIO paper.
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/sglist.h>
43 #include <machine/cpu.h>
44 #include <machine/bus.h>
45 #include <machine/atomic.h>
46 #include <machine/resource.h>
50 #include <dev/virtio/virtio.h>
51 #include <dev/virtio/virtqueue.h>
52 #include <dev/virtio/virtio_ring.h>
54 #include "virtio_bus_if.h"
58 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
59 uint16_t vq_queue_index;
62 #define VIRTQUEUE_FLAG_INDIRECT 0x0001
63 #define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
68 int vq_max_indirect_size;
69 int vq_indirect_mem_size;
70 virtqueue_intr_t *vq_intrhand;
71 void *vq_intrhand_arg;
75 uint16_t vq_queued_cnt;
77 * Head of the free chain in the descriptor table. If
78 * there are no free descriptors, this will be set to
79 * VQ_RING_DESC_CHAIN_END.
81 uint16_t vq_desc_head_idx;
83 * Last consumed descriptor in the used table,
84 * trails vq_ring.used->idx.
86 uint16_t vq_used_cons_idx;
88 struct vq_desc_extra {
90 struct vring_desc *indirect;
91 vm_paddr_t indirect_paddr;
97 * The maximum virtqueue size is 2^15. Use that value as the end of
98 * descriptor chain terminator since it will never be a valid index
99 * in the descriptor table. This is used to verify we are correctly
100 * handling vq_free_cnt.
102 #define VQ_RING_DESC_CHAIN_END 32768
104 #define VQASSERT(_vq, _exp, _msg, ...) \
105 KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name, \
108 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \
109 VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \
110 "invalid ring index: %d, max: %d", (_idx), \
113 #define VQ_RING_ASSERT_CHAIN_TERM(_vq) \
114 VQASSERT((_vq), (_vq)->vq_desc_head_idx == \
115 VQ_RING_DESC_CHAIN_END, "full ring terminated " \
116 "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
118 static int virtqueue_init_indirect(struct virtqueue *vq, int);
119 static void virtqueue_free_indirect(struct virtqueue *vq);
120 static void virtqueue_init_indirect_list(struct virtqueue *,
121 struct vring_desc *);
123 static void vq_ring_init(struct virtqueue *);
124 static void vq_ring_update_avail(struct virtqueue *, uint16_t);
125 static uint16_t vq_ring_enqueue_segments(struct virtqueue *,
126 struct vring_desc *, uint16_t, struct sglist *, int, int);
127 static int vq_ring_use_indirect(struct virtqueue *, int);
128 static void vq_ring_enqueue_indirect(struct virtqueue *, void *,
129 struct sglist *, int, int);
130 static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
131 static int vq_ring_must_notify_host(struct virtqueue *);
132 static void vq_ring_notify_host(struct virtqueue *);
133 static void vq_ring_free_chain(struct virtqueue *, uint16_t);
136 virtqueue_filter_features(uint64_t features)
140 mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
141 mask |= VIRTIO_RING_F_INDIRECT_DESC;
142 mask |= VIRTIO_RING_F_EVENT_IDX;
144 return (features & mask);
148 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
149 vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
151 struct virtqueue *vq;
159 "virtqueue %d (%s) does not exist (size is zero)\n",
160 queue, info->vqai_name);
162 } else if (!powerof2(size)) {
164 "virtqueue %d (%s) size is not a power of 2: %d\n",
165 queue, info->vqai_name, size);
167 } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
168 device_printf(dev, "virtqueue %d (%s) requested too many "
169 "indirect descriptors: %d, max %d\n",
170 queue, info->vqai_name, info->vqai_maxindirsz,
171 VIRTIO_MAX_INDIRECT);
175 vq = malloc(sizeof(struct virtqueue) +
176 size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
178 device_printf(dev, "cannot allocate virtqueue\n");
183 strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
184 vq->vq_queue_index = queue;
185 vq->vq_alignment = align;
186 vq->vq_nentries = size;
187 vq->vq_free_cnt = size;
188 vq->vq_intrhand = info->vqai_intr;
189 vq->vq_intrhand_arg = info->vqai_intr_arg;
191 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
192 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
194 if (info->vqai_maxindirsz > 1) {
195 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
200 vq->vq_ring_size = round_page(vring_size(size, align));
201 vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
202 M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
203 if (vq->vq_ring_mem == NULL) {
205 "cannot allocate memory for virtqueue ring\n");
211 virtqueue_disable_intr(vq);
223 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
226 struct vq_desc_extra *dxp;
231 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
233 * Indirect descriptors requested by the driver but not
234 * negotiated. Return zero to keep the initialization
235 * going: we'll run fine without.
238 device_printf(dev, "virtqueue %d (%s) requested "
239 "indirect descriptors but not negotiated\n",
240 vq->vq_queue_index, vq->vq_name);
244 size = indirect_size * sizeof(struct vring_desc);
245 vq->vq_max_indirect_size = indirect_size;
246 vq->vq_indirect_mem_size = size;
247 vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
249 for (i = 0; i < vq->vq_nentries; i++) {
250 dxp = &vq->vq_descx[i];
252 dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
253 if (dxp->indirect == NULL) {
254 device_printf(dev, "cannot allocate indirect list\n");
258 dxp->indirect_paddr = vtophys(dxp->indirect);
259 virtqueue_init_indirect_list(vq, dxp->indirect);
266 virtqueue_free_indirect(struct virtqueue *vq)
268 struct vq_desc_extra *dxp;
271 for (i = 0; i < vq->vq_nentries; i++) {
272 dxp = &vq->vq_descx[i];
274 if (dxp->indirect == NULL)
277 free(dxp->indirect, M_DEVBUF);
278 dxp->indirect = NULL;
279 dxp->indirect_paddr = 0;
282 vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
283 vq->vq_indirect_mem_size = 0;
287 virtqueue_init_indirect_list(struct virtqueue *vq,
288 struct vring_desc *indirect)
292 bzero(indirect, vq->vq_indirect_mem_size);
294 for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
295 indirect[i].next = i + 1;
296 indirect[i].next = VQ_RING_DESC_CHAIN_END;
300 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
302 struct vq_desc_extra *dxp;
305 if (vq->vq_nentries != size) {
306 device_printf(vq->vq_dev,
307 "%s: '%s' changed size; old=%hu, new=%hu\n",
308 __func__, vq->vq_name, vq->vq_nentries, size);
312 /* Warn if the virtqueue was not properly cleaned up. */
313 if (vq->vq_free_cnt != vq->vq_nentries) {
314 device_printf(vq->vq_dev,
315 "%s: warning '%s' virtqueue not empty, "
316 "leaking %d entries\n", __func__, vq->vq_name,
317 vq->vq_nentries - vq->vq_free_cnt);
320 vq->vq_desc_head_idx = 0;
321 vq->vq_used_cons_idx = 0;
322 vq->vq_queued_cnt = 0;
323 vq->vq_free_cnt = vq->vq_nentries;
325 /* To be safe, reset all our allocated memory. */
326 bzero(vq->vq_ring_mem, vq->vq_ring_size);
327 for (i = 0; i < vq->vq_nentries; i++) {
328 dxp = &vq->vq_descx[i];
331 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
332 virtqueue_init_indirect_list(vq, dxp->indirect);
336 virtqueue_disable_intr(vq);
342 virtqueue_free(struct virtqueue *vq)
345 if (vq->vq_free_cnt != vq->vq_nentries) {
346 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
347 "leaking %d entries\n", vq->vq_name,
348 vq->vq_nentries - vq->vq_free_cnt);
351 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
352 virtqueue_free_indirect(vq);
354 if (vq->vq_ring_mem != NULL) {
355 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
356 vq->vq_ring_size = 0;
357 vq->vq_ring_mem = NULL;
364 virtqueue_paddr(struct virtqueue *vq)
367 return (vtophys(vq->vq_ring_mem));
371 virtqueue_size(struct virtqueue *vq)
374 return (vq->vq_nentries);
378 virtqueue_nfree(struct virtqueue *vq)
381 return (vq->vq_free_cnt);
385 virtqueue_empty(struct virtqueue *vq)
388 return (vq->vq_nentries == vq->vq_free_cnt);
392 virtqueue_full(struct virtqueue *vq)
395 return (vq->vq_free_cnt == 0);
399 virtqueue_notify(struct virtqueue *vq)
402 /* Ensure updated avail->idx is visible to host. */
405 if (vq_ring_must_notify_host(vq))
406 vq_ring_notify_host(vq);
407 vq->vq_queued_cnt = 0;
411 virtqueue_nused(struct virtqueue *vq)
413 uint16_t used_idx, nused;
415 used_idx = vq->vq_ring.used->idx;
417 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
418 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
424 virtqueue_intr_filter(struct virtqueue *vq)
427 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
430 virtqueue_disable_intr(vq);
436 virtqueue_intr(struct virtqueue *vq)
439 vq->vq_intrhand(vq->vq_intrhand_arg);
443 virtqueue_enable_intr(struct virtqueue *vq)
446 return (vq_ring_enable_interrupt(vq, 0));
450 virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
452 uint16_t ndesc, avail_idx;
454 avail_idx = vq->vq_ring.avail->idx;
455 ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
458 case VQ_POSTPONE_SHORT:
461 case VQ_POSTPONE_LONG:
462 ndesc = (ndesc * 3) / 4;
464 case VQ_POSTPONE_EMPTIED:
468 return (vq_ring_enable_interrupt(vq, ndesc));
472 * Note this is only considered a hint to the host.
475 virtqueue_disable_intr(struct virtqueue *vq)
478 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
479 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx -
482 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
486 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
487 int readable, int writable)
489 struct vq_desc_extra *dxp;
491 uint16_t head_idx, idx;
493 needed = readable + writable;
495 VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
496 VQASSERT(vq, needed == sg->sg_nseg,
497 "segment count mismatch, %d, %d", needed, sg->sg_nseg);
499 needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
500 "too many segments to enqueue: %d, %d/%d", needed,
501 vq->vq_nentries, vq->vq_max_indirect_size);
505 if (vq->vq_free_cnt == 0)
508 if (vq_ring_use_indirect(vq, needed)) {
509 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
511 } else if (vq->vq_free_cnt < needed)
514 head_idx = vq->vq_desc_head_idx;
515 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
516 dxp = &vq->vq_descx[head_idx];
518 VQASSERT(vq, dxp->cookie == NULL,
519 "cookie already exists for index %d", head_idx);
520 dxp->cookie = cookie;
521 dxp->ndescs = needed;
523 idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
524 sg, readable, writable);
526 vq->vq_desc_head_idx = idx;
527 vq->vq_free_cnt -= needed;
528 if (vq->vq_free_cnt == 0)
529 VQ_RING_ASSERT_CHAIN_TERM(vq);
531 VQ_RING_ASSERT_VALID_IDX(vq, idx);
533 vq_ring_update_avail(vq, head_idx);
539 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
541 struct vring_used_elem *uep;
543 uint16_t used_idx, desc_idx;
545 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
548 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
549 uep = &vq->vq_ring.used->ring[used_idx];
552 desc_idx = (uint16_t) uep->id;
556 vq_ring_free_chain(vq, desc_idx);
558 cookie = vq->vq_descx[desc_idx].cookie;
559 VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
560 vq->vq_descx[desc_idx].cookie = NULL;
566 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
570 VIRTIO_BUS_POLL(vq->vq_dev);
571 while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
573 VIRTIO_BUS_POLL(vq->vq_dev);
580 virtqueue_drain(struct virtqueue *vq, int *last)
588 while (idx < vq->vq_nentries && cookie == NULL) {
589 if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
590 vq->vq_descx[idx].cookie = NULL;
591 /* Free chain to keep free count consistent. */
592 vq_ring_free_chain(vq, idx);
603 virtqueue_dump(struct virtqueue *vq)
609 printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
610 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
611 "used.idx=%d; used_event_idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
612 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
613 virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
614 vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
615 vq->vq_ring.used->idx,
616 vring_used_event(&vq->vq_ring),
617 vq->vq_ring.avail->flags,
618 vq->vq_ring.used->flags);
622 vq_ring_init(struct virtqueue *vq)
628 ring_mem = vq->vq_ring_mem;
629 size = vq->vq_nentries;
632 vring_init(vr, size, ring_mem, vq->vq_alignment);
634 for (i = 0; i < size - 1; i++)
635 vr->desc[i].next = i + 1;
636 vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
640 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
645 * Place the head of the descriptor chain into the next slot and make
646 * it usable to the host. The chain is made available now rather than
647 * deferring to virtqueue_notify() in the hopes that if the host is
648 * currently running on another CPU, we can keep it processing the new
651 avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
652 vq->vq_ring.avail->ring[avail_idx] = desc_idx;
655 vq->vq_ring.avail->idx++;
657 /* Keep pending count until virtqueue_notify(). */
662 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
663 uint16_t head_idx, struct sglist *sg, int readable, int writable)
665 struct sglist_seg *seg;
666 struct vring_desc *dp;
670 needed = readable + writable;
672 for (i = 0, idx = head_idx, seg = sg->sg_segs;
674 i++, idx = dp->next, seg++) {
675 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
676 "premature end of free desc chain");
679 dp->addr = seg->ss_paddr;
680 dp->len = seg->ss_len;
684 dp->flags |= VRING_DESC_F_NEXT;
686 dp->flags |= VRING_DESC_F_WRITE;
693 vq_ring_use_indirect(struct virtqueue *vq, int needed)
696 if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
699 if (vq->vq_max_indirect_size < needed)
709 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
710 struct sglist *sg, int readable, int writable)
712 struct vring_desc *dp;
713 struct vq_desc_extra *dxp;
717 needed = readable + writable;
718 VQASSERT(vq, needed <= vq->vq_max_indirect_size,
719 "enqueuing too many indirect descriptors");
721 head_idx = vq->vq_desc_head_idx;
722 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
723 dp = &vq->vq_ring.desc[head_idx];
724 dxp = &vq->vq_descx[head_idx];
726 VQASSERT(vq, dxp->cookie == NULL,
727 "cookie already exists for index %d", head_idx);
728 dxp->cookie = cookie;
731 dp->addr = dxp->indirect_paddr;
732 dp->len = needed * sizeof(struct vring_desc);
733 dp->flags = VRING_DESC_F_INDIRECT;
735 vq_ring_enqueue_segments(vq, dxp->indirect, 0,
736 sg, readable, writable);
738 vq->vq_desc_head_idx = dp->next;
740 if (vq->vq_free_cnt == 0)
741 VQ_RING_ASSERT_CHAIN_TERM(vq);
743 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
745 vq_ring_update_avail(vq, head_idx);
749 vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
753 * Enable interrupts, making sure we get the latest index of
754 * what's already been consumed.
756 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
757 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
759 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
764 * Enough items may have already been consumed to meet our threshold
765 * since we last checked. Let our caller know so it processes the new
768 if (virtqueue_nused(vq) > ndesc)
775 vq_ring_must_notify_host(struct virtqueue *vq)
777 uint16_t new_idx, prev_idx, event_idx;
779 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
780 new_idx = vq->vq_ring.avail->idx;
781 prev_idx = new_idx - vq->vq_queued_cnt;
782 event_idx = vring_avail_event(&vq->vq_ring);
784 return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
787 return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
791 vq_ring_notify_host(struct virtqueue *vq)
794 VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
798 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
800 struct vring_desc *dp;
801 struct vq_desc_extra *dxp;
803 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
804 dp = &vq->vq_ring.desc[desc_idx];
805 dxp = &vq->vq_descx[desc_idx];
807 if (vq->vq_free_cnt == 0)
808 VQ_RING_ASSERT_CHAIN_TERM(vq);
810 vq->vq_free_cnt += dxp->ndescs;
813 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
814 while (dp->flags & VRING_DESC_F_NEXT) {
815 VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
816 dp = &vq->vq_ring.desc[dp->next];
821 VQASSERT(vq, dxp->ndescs == 0,
822 "failed to free entire desc chain, remaining: %d", dxp->ndescs);
825 * We must append the existing free chain, if any, to the end of
826 * newly freed chain. If the virtqueue was completely used, then
827 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
829 dp->next = vq->vq_desc_head_idx;
830 vq->vq_desc_head_idx = desc_idx;