]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/virtio/virtqueue.c
Merge ^/vendor/lvm-project/release-10.x up to its last change (upstream
[FreeBSD/FreeBSD.git] / sys / dev / virtio / virtqueue.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 /*
30  * Implements the virtqueue interface as basically described
31  * in the original VirtIO paper.
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/sglist.h>
42 #include <vm/vm.h>
43 #include <vm/pmap.h>
44
45 #include <machine/cpu.h>
46 #include <machine/bus.h>
47 #include <machine/atomic.h>
48 #include <machine/resource.h>
49 #include <sys/bus.h>
50 #include <sys/rman.h>
51
52 #include <dev/virtio/virtio.h>
53 #include <dev/virtio/virtqueue.h>
54 #include <dev/virtio/virtio_ring.h>
55
56 #include "virtio_bus_if.h"
57
58 struct virtqueue {
59         device_t                 vq_dev;
60         char                     vq_name[VIRTQUEUE_MAX_NAME_SZ];
61         uint16_t                 vq_queue_index;
62         uint16_t                 vq_nentries;
63         uint32_t                 vq_flags;
64 #define VIRTQUEUE_FLAG_INDIRECT  0x0001
65 #define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
66
67         int                      vq_alignment;
68         int                      vq_ring_size;
69         void                    *vq_ring_mem;
70         int                      vq_max_indirect_size;
71         int                      vq_indirect_mem_size;
72         virtqueue_intr_t        *vq_intrhand;
73         void                    *vq_intrhand_arg;
74
75         struct vring             vq_ring;
76         uint16_t                 vq_free_cnt;
77         uint16_t                 vq_queued_cnt;
78         /*
79          * Head of the free chain in the descriptor table. If
80          * there are no free descriptors, this will be set to
81          * VQ_RING_DESC_CHAIN_END.
82          */
83         uint16_t                 vq_desc_head_idx;
84         /*
85          * Last consumed descriptor in the used table,
86          * trails vq_ring.used->idx.
87          */
88         uint16_t                 vq_used_cons_idx;
89
90         struct vq_desc_extra {
91                 void              *cookie;
92                 struct vring_desc *indirect;
93                 vm_paddr_t         indirect_paddr;
94                 uint16_t           ndescs;
95         } vq_descx[0];
96 };
97
98 /*
99  * The maximum virtqueue size is 2^15. Use that value as the end of
100  * descriptor chain terminator since it will never be a valid index
101  * in the descriptor table. This is used to verify we are correctly
102  * handling vq_free_cnt.
103  */
104 #define VQ_RING_DESC_CHAIN_END 32768
105
106 #define VQASSERT(_vq, _exp, _msg, ...)                          \
107     KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name,  \
108         ##__VA_ARGS__))
109
110 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)                     \
111     VQASSERT((_vq), (_idx) < (_vq)->vq_nentries,                \
112         "invalid ring index: %d, max: %d", (_idx),              \
113         (_vq)->vq_nentries)
114
115 #define VQ_RING_ASSERT_CHAIN_TERM(_vq)                          \
116     VQASSERT((_vq), (_vq)->vq_desc_head_idx ==                  \
117         VQ_RING_DESC_CHAIN_END, "full ring terminated "         \
118         "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
119
120 static int      virtqueue_init_indirect(struct virtqueue *vq, int);
121 static void     virtqueue_free_indirect(struct virtqueue *vq);
122 static void     virtqueue_init_indirect_list(struct virtqueue *,
123                     struct vring_desc *);
124
125 static void     vq_ring_init(struct virtqueue *);
126 static void     vq_ring_update_avail(struct virtqueue *, uint16_t);
127 static uint16_t vq_ring_enqueue_segments(struct virtqueue *,
128                     struct vring_desc *, uint16_t, struct sglist *, int, int);
129 static int      vq_ring_use_indirect(struct virtqueue *, int);
130 static void     vq_ring_enqueue_indirect(struct virtqueue *, void *,
131                     struct sglist *, int, int);
132 static int      vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
133 static int      vq_ring_must_notify_host(struct virtqueue *);
134 static void     vq_ring_notify_host(struct virtqueue *);
135 static void     vq_ring_free_chain(struct virtqueue *, uint16_t);
136
137 uint64_t
138 virtqueue_filter_features(uint64_t features)
139 {
140         uint64_t mask;
141
142         mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
143         mask |= VIRTIO_RING_F_INDIRECT_DESC;
144         mask |= VIRTIO_RING_F_EVENT_IDX;
145
146         return (features & mask);
147 }
148
149 int
150 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
151     vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
152 {
153         struct virtqueue *vq;
154         int error;
155
156         *vqp = NULL;
157         error = 0;
158
159         if (size == 0) {
160                 device_printf(dev,
161                     "virtqueue %d (%s) does not exist (size is zero)\n",
162                     queue, info->vqai_name);
163                 return (ENODEV);
164         } else if (!powerof2(size)) {
165                 device_printf(dev,
166                     "virtqueue %d (%s) size is not a power of 2: %d\n",
167                     queue, info->vqai_name, size);
168                 return (ENXIO);
169         } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
170                 device_printf(dev, "virtqueue %d (%s) requested too many "
171                     "indirect descriptors: %d, max %d\n",
172                     queue, info->vqai_name, info->vqai_maxindirsz,
173                     VIRTIO_MAX_INDIRECT);
174                 return (EINVAL);
175         }
176
177         vq = malloc(sizeof(struct virtqueue) +
178             size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
179         if (vq == NULL) {
180                 device_printf(dev, "cannot allocate virtqueue\n");
181                 return (ENOMEM);
182         }
183
184         vq->vq_dev = dev;
185         strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
186         vq->vq_queue_index = queue;
187         vq->vq_alignment = align;
188         vq->vq_nentries = size;
189         vq->vq_free_cnt = size;
190         vq->vq_intrhand = info->vqai_intr;
191         vq->vq_intrhand_arg = info->vqai_intr_arg;
192
193         if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
194                 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
195
196         if (info->vqai_maxindirsz > 1) {
197                 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
198                 if (error)
199                         goto fail;
200         }
201
202         vq->vq_ring_size = round_page(vring_size(size, align));
203         vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
204             M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
205         if (vq->vq_ring_mem == NULL) {
206                 device_printf(dev,
207                     "cannot allocate memory for virtqueue ring\n");
208                 error = ENOMEM;
209                 goto fail;
210         }
211
212         vq_ring_init(vq);
213         virtqueue_disable_intr(vq);
214
215         *vqp = vq;
216
217 fail:
218         if (error)
219                 virtqueue_free(vq);
220
221         return (error);
222 }
223
224 static int
225 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
226 {
227         device_t dev;
228         struct vq_desc_extra *dxp;
229         int i, size;
230
231         dev = vq->vq_dev;
232
233         if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
234                 /*
235                  * Indirect descriptors requested by the driver but not
236                  * negotiated. Return zero to keep the initialization
237                  * going: we'll run fine without.
238                  */
239                 if (bootverbose)
240                         device_printf(dev, "virtqueue %d (%s) requested "
241                             "indirect descriptors but not negotiated\n",
242                             vq->vq_queue_index, vq->vq_name);
243                 return (0);
244         }
245
246         size = indirect_size * sizeof(struct vring_desc);
247         vq->vq_max_indirect_size = indirect_size;
248         vq->vq_indirect_mem_size = size;
249         vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
250
251         for (i = 0; i < vq->vq_nentries; i++) {
252                 dxp = &vq->vq_descx[i];
253
254                 dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
255                 if (dxp->indirect == NULL) {
256                         device_printf(dev, "cannot allocate indirect list\n");
257                         return (ENOMEM);
258                 }
259
260                 dxp->indirect_paddr = vtophys(dxp->indirect);
261                 virtqueue_init_indirect_list(vq, dxp->indirect);
262         }
263
264         return (0);
265 }
266
267 static void
268 virtqueue_free_indirect(struct virtqueue *vq)
269 {
270         struct vq_desc_extra *dxp;
271         int i;
272
273         for (i = 0; i < vq->vq_nentries; i++) {
274                 dxp = &vq->vq_descx[i];
275
276                 if (dxp->indirect == NULL)
277                         break;
278
279                 free(dxp->indirect, M_DEVBUF);
280                 dxp->indirect = NULL;
281                 dxp->indirect_paddr = 0;
282         }
283
284         vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
285         vq->vq_indirect_mem_size = 0;
286 }
287
288 static void
289 virtqueue_init_indirect_list(struct virtqueue *vq,
290     struct vring_desc *indirect)
291 {
292         int i;
293
294         bzero(indirect, vq->vq_indirect_mem_size);
295
296         for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
297                 indirect[i].next = i + 1;
298         indirect[i].next = VQ_RING_DESC_CHAIN_END;
299 }
300
301 int
302 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
303 {
304         struct vq_desc_extra *dxp;
305         int i;
306
307         if (vq->vq_nentries != size) {
308                 device_printf(vq->vq_dev,
309                     "%s: '%s' changed size; old=%hu, new=%hu\n",
310                     __func__, vq->vq_name, vq->vq_nentries, size);
311                 return (EINVAL);
312         }
313
314         /* Warn if the virtqueue was not properly cleaned up. */
315         if (vq->vq_free_cnt != vq->vq_nentries) {
316                 device_printf(vq->vq_dev,
317                     "%s: warning '%s' virtqueue not empty, "
318                     "leaking %d entries\n", __func__, vq->vq_name,
319                     vq->vq_nentries - vq->vq_free_cnt);
320         }
321
322         vq->vq_desc_head_idx = 0;
323         vq->vq_used_cons_idx = 0;
324         vq->vq_queued_cnt = 0;
325         vq->vq_free_cnt = vq->vq_nentries;
326
327         /* To be safe, reset all our allocated memory. */
328         bzero(vq->vq_ring_mem, vq->vq_ring_size);
329         for (i = 0; i < vq->vq_nentries; i++) {
330                 dxp = &vq->vq_descx[i];
331                 dxp->cookie = NULL;
332                 dxp->ndescs = 0;
333                 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
334                         virtqueue_init_indirect_list(vq, dxp->indirect);
335         }
336
337         vq_ring_init(vq);
338         virtqueue_disable_intr(vq);
339
340         return (0);
341 }
342
343 void
344 virtqueue_free(struct virtqueue *vq)
345 {
346
347         if (vq->vq_free_cnt != vq->vq_nentries) {
348                 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
349                     "leaking %d entries\n", vq->vq_name,
350                     vq->vq_nentries - vq->vq_free_cnt);
351         }
352
353         if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
354                 virtqueue_free_indirect(vq);
355
356         if (vq->vq_ring_mem != NULL) {
357                 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
358                 vq->vq_ring_size = 0;
359                 vq->vq_ring_mem = NULL;
360         }
361
362         free(vq, M_DEVBUF);
363 }
364
365 vm_paddr_t
366 virtqueue_paddr(struct virtqueue *vq)
367 {
368
369         return (vtophys(vq->vq_ring_mem));
370 }
371
372 vm_paddr_t
373 virtqueue_desc_paddr(struct virtqueue *vq)
374 {
375
376         return (vtophys(vq->vq_ring.desc));
377 }
378
379 vm_paddr_t
380 virtqueue_avail_paddr(struct virtqueue *vq)
381 {
382
383         return (vtophys(vq->vq_ring.avail));
384 }
385
386 vm_paddr_t
387 virtqueue_used_paddr(struct virtqueue *vq)
388 {
389
390         return (vtophys(vq->vq_ring.used));
391 }
392
393 uint16_t
394 virtqueue_index(struct virtqueue *vq)
395 {
396         return (vq->vq_queue_index);
397 }
398
399 int
400 virtqueue_size(struct virtqueue *vq)
401 {
402
403         return (vq->vq_nentries);
404 }
405
406 int
407 virtqueue_nfree(struct virtqueue *vq)
408 {
409
410         return (vq->vq_free_cnt);
411 }
412
413 int
414 virtqueue_empty(struct virtqueue *vq)
415 {
416
417         return (vq->vq_nentries == vq->vq_free_cnt);
418 }
419
420 int
421 virtqueue_full(struct virtqueue *vq)
422 {
423
424         return (vq->vq_free_cnt == 0);
425 }
426
427 void
428 virtqueue_notify(struct virtqueue *vq)
429 {
430
431         /* Ensure updated avail->idx is visible to host. */
432         mb();
433
434         if (vq_ring_must_notify_host(vq))
435                 vq_ring_notify_host(vq);
436         vq->vq_queued_cnt = 0;
437 }
438
439 int
440 virtqueue_nused(struct virtqueue *vq)
441 {
442         uint16_t used_idx, nused;
443
444         used_idx = vq->vq_ring.used->idx;
445
446         nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
447         VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
448
449         return (nused);
450 }
451
452 int
453 virtqueue_intr_filter(struct virtqueue *vq)
454 {
455
456         if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
457                 return (0);
458
459         virtqueue_disable_intr(vq);
460
461         return (1);
462 }
463
464 void
465 virtqueue_intr(struct virtqueue *vq)
466 {
467
468         vq->vq_intrhand(vq->vq_intrhand_arg);
469 }
470
471 int
472 virtqueue_enable_intr(struct virtqueue *vq)
473 {
474
475         return (vq_ring_enable_interrupt(vq, 0));
476 }
477
478 int
479 virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
480 {
481         uint16_t ndesc, avail_idx;
482
483         avail_idx = vq->vq_ring.avail->idx;
484         ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
485
486         switch (hint) {
487         case VQ_POSTPONE_SHORT:
488                 ndesc = ndesc / 4;
489                 break;
490         case VQ_POSTPONE_LONG:
491                 ndesc = (ndesc * 3) / 4;
492                 break;
493         case VQ_POSTPONE_EMPTIED:
494                 break;
495         }
496
497         return (vq_ring_enable_interrupt(vq, ndesc));
498 }
499
500 /*
501  * Note this is only considered a hint to the host.
502  */
503 void
504 virtqueue_disable_intr(struct virtqueue *vq)
505 {
506
507         if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
508                 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx -
509                     vq->vq_nentries - 1;
510         } else
511                 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
512 }
513
514 int
515 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
516     int readable, int writable)
517 {
518         struct vq_desc_extra *dxp;
519         int needed;
520         uint16_t head_idx, idx;
521
522         needed = readable + writable;
523
524         VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
525         VQASSERT(vq, needed == sg->sg_nseg,
526             "segment count mismatch, %d, %d", needed, sg->sg_nseg);
527         VQASSERT(vq,
528             needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
529             "too many segments to enqueue: %d, %d/%d", needed,
530             vq->vq_nentries, vq->vq_max_indirect_size);
531
532         if (needed < 1)
533                 return (EINVAL);
534         if (vq->vq_free_cnt == 0)
535                 return (ENOSPC);
536
537         if (vq_ring_use_indirect(vq, needed)) {
538                 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
539                 return (0);
540         } else if (vq->vq_free_cnt < needed)
541                 return (EMSGSIZE);
542
543         head_idx = vq->vq_desc_head_idx;
544         VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
545         dxp = &vq->vq_descx[head_idx];
546
547         VQASSERT(vq, dxp->cookie == NULL,
548             "cookie already exists for index %d", head_idx);
549         dxp->cookie = cookie;
550         dxp->ndescs = needed;
551
552         idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
553             sg, readable, writable);
554
555         vq->vq_desc_head_idx = idx;
556         vq->vq_free_cnt -= needed;
557         if (vq->vq_free_cnt == 0)
558                 VQ_RING_ASSERT_CHAIN_TERM(vq);
559         else
560                 VQ_RING_ASSERT_VALID_IDX(vq, idx);
561
562         vq_ring_update_avail(vq, head_idx);
563
564         return (0);
565 }
566
567 void *
568 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
569 {
570         struct vring_used_elem *uep;
571         void *cookie;
572         uint16_t used_idx, desc_idx;
573
574         if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
575                 return (NULL);
576
577         used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
578         uep = &vq->vq_ring.used->ring[used_idx];
579
580         rmb();
581         desc_idx = (uint16_t) uep->id;
582         if (len != NULL)
583                 *len = uep->len;
584
585         vq_ring_free_chain(vq, desc_idx);
586
587         cookie = vq->vq_descx[desc_idx].cookie;
588         VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
589         vq->vq_descx[desc_idx].cookie = NULL;
590
591         return (cookie);
592 }
593
594 void *
595 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
596 {
597         void *cookie;
598
599         VIRTIO_BUS_POLL(vq->vq_dev);
600         while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
601                 cpu_spinwait();
602                 VIRTIO_BUS_POLL(vq->vq_dev);
603         }
604
605         return (cookie);
606 }
607
608 void *
609 virtqueue_drain(struct virtqueue *vq, int *last)
610 {
611         void *cookie;
612         int idx;
613
614         cookie = NULL;
615         idx = *last;
616
617         while (idx < vq->vq_nentries && cookie == NULL) {
618                 if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
619                         vq->vq_descx[idx].cookie = NULL;
620                         /* Free chain to keep free count consistent. */
621                         vq_ring_free_chain(vq, idx);
622                 }
623                 idx++;
624         }
625
626         *last = idx;
627
628         return (cookie);
629 }
630
631 void
632 virtqueue_dump(struct virtqueue *vq)
633 {
634
635         if (vq == NULL)
636                 return;
637
638         printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
639             "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
640             "used.idx=%d; used_event_idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
641             vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
642             virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
643             vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
644             vq->vq_ring.used->idx,
645                 vring_used_event(&vq->vq_ring),
646             vq->vq_ring.avail->flags,
647             vq->vq_ring.used->flags);
648 }
649
650 static void
651 vq_ring_init(struct virtqueue *vq)
652 {
653         struct vring *vr;
654         char *ring_mem;
655         int i, size;
656
657         ring_mem = vq->vq_ring_mem;
658         size = vq->vq_nentries;
659         vr = &vq->vq_ring;
660
661         vring_init(vr, size, ring_mem, vq->vq_alignment);
662
663         for (i = 0; i < size - 1; i++)
664                 vr->desc[i].next = i + 1;
665         vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
666 }
667
668 static void
669 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
670 {
671         uint16_t avail_idx;
672
673         /*
674          * Place the head of the descriptor chain into the next slot and make
675          * it usable to the host. The chain is made available now rather than
676          * deferring to virtqueue_notify() in the hopes that if the host is
677          * currently running on another CPU, we can keep it processing the new
678          * descriptor.
679          */
680         avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
681         vq->vq_ring.avail->ring[avail_idx] = desc_idx;
682
683         wmb();
684         vq->vq_ring.avail->idx++;
685
686         /* Keep pending count until virtqueue_notify(). */
687         vq->vq_queued_cnt++;
688 }
689
690 static uint16_t
691 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
692     uint16_t head_idx, struct sglist *sg, int readable, int writable)
693 {
694         struct sglist_seg *seg;
695         struct vring_desc *dp;
696         int i, needed;
697         uint16_t idx;
698
699         needed = readable + writable;
700
701         for (i = 0, idx = head_idx, seg = sg->sg_segs;
702              i < needed;
703              i++, idx = dp->next, seg++) {
704                 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
705                     "premature end of free desc chain");
706
707                 dp = &desc[idx];
708                 dp->addr = seg->ss_paddr;
709                 dp->len = seg->ss_len;
710                 dp->flags = 0;
711
712                 if (i < needed - 1)
713                         dp->flags |= VRING_DESC_F_NEXT;
714                 if (i >= readable)
715                         dp->flags |= VRING_DESC_F_WRITE;
716         }
717
718         return (idx);
719 }
720
721 static int
722 vq_ring_use_indirect(struct virtqueue *vq, int needed)
723 {
724
725         if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
726                 return (0);
727
728         if (vq->vq_max_indirect_size < needed)
729                 return (0);
730
731         if (needed < 2)
732                 return (0);
733
734         return (1);
735 }
736
737 static void
738 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
739     struct sglist *sg, int readable, int writable)
740 {
741         struct vring_desc *dp;
742         struct vq_desc_extra *dxp;
743         int needed;
744         uint16_t head_idx;
745
746         needed = readable + writable;
747         VQASSERT(vq, needed <= vq->vq_max_indirect_size,
748             "enqueuing too many indirect descriptors");
749
750         head_idx = vq->vq_desc_head_idx;
751         VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
752         dp = &vq->vq_ring.desc[head_idx];
753         dxp = &vq->vq_descx[head_idx];
754
755         VQASSERT(vq, dxp->cookie == NULL,
756             "cookie already exists for index %d", head_idx);
757         dxp->cookie = cookie;
758         dxp->ndescs = 1;
759
760         dp->addr = dxp->indirect_paddr;
761         dp->len = needed * sizeof(struct vring_desc);
762         dp->flags = VRING_DESC_F_INDIRECT;
763
764         vq_ring_enqueue_segments(vq, dxp->indirect, 0,
765             sg, readable, writable);
766
767         vq->vq_desc_head_idx = dp->next;
768         vq->vq_free_cnt--;
769         if (vq->vq_free_cnt == 0)
770                 VQ_RING_ASSERT_CHAIN_TERM(vq);
771         else
772                 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
773
774         vq_ring_update_avail(vq, head_idx);
775 }
776
777 static int
778 vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
779 {
780
781         /*
782          * Enable interrupts, making sure we get the latest index of
783          * what's already been consumed.
784          */
785         if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
786                 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
787         else
788                 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
789
790         mb();
791
792         /*
793          * Enough items may have already been consumed to meet our threshold
794          * since we last checked. Let our caller know so it processes the new
795          * entries.
796          */
797         if (virtqueue_nused(vq) > ndesc)
798                 return (1);
799
800         return (0);
801 }
802
803 static int
804 vq_ring_must_notify_host(struct virtqueue *vq)
805 {
806         uint16_t new_idx, prev_idx, event_idx;
807
808         if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
809                 new_idx = vq->vq_ring.avail->idx;
810                 prev_idx = new_idx - vq->vq_queued_cnt;
811                 event_idx = vring_avail_event(&vq->vq_ring);
812
813                 return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
814         }
815
816         return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
817 }
818
819 static void
820 vq_ring_notify_host(struct virtqueue *vq)
821 {
822
823         VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
824 }
825
826 static void
827 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
828 {
829         struct vring_desc *dp;
830         struct vq_desc_extra *dxp;
831
832         VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
833         dp = &vq->vq_ring.desc[desc_idx];
834         dxp = &vq->vq_descx[desc_idx];
835
836         if (vq->vq_free_cnt == 0)
837                 VQ_RING_ASSERT_CHAIN_TERM(vq);
838
839         vq->vq_free_cnt += dxp->ndescs;
840         dxp->ndescs--;
841
842         if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
843                 while (dp->flags & VRING_DESC_F_NEXT) {
844                         VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
845                         dp = &vq->vq_ring.desc[dp->next];
846                         dxp->ndescs--;
847                 }
848         }
849
850         VQASSERT(vq, dxp->ndescs == 0,
851             "failed to free entire desc chain, remaining: %d", dxp->ndescs);
852
853         /*
854          * We must append the existing free chain, if any, to the end of
855          * newly freed chain. If the virtqueue was completely used, then
856          * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
857          */
858         dp->next = vq->vq_desc_head_idx;
859         vq->vq_desc_head_idx = desc_idx;
860 }