]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/virtio/virtqueue.c
Since contrib/libc++'s ancestry was never correct, subversion 1.8 and
[FreeBSD/FreeBSD.git] / sys / dev / virtio / virtqueue.c
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 /*
28  * Implements the virtqueue interface as basically described
29  * in the original VirtIO paper.
30  */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/sglist.h>
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
42
43 #include <machine/cpu.h>
44 #include <machine/bus.h>
45 #include <machine/atomic.h>
46 #include <machine/resource.h>
47 #include <sys/bus.h>
48 #include <sys/rman.h>
49
50 #include <dev/virtio/virtio.h>
51 #include <dev/virtio/virtqueue.h>
52 #include <dev/virtio/virtio_ring.h>
53
54 #include "virtio_bus_if.h"
55
56 struct virtqueue {
57         device_t                 vq_dev;
58         char                     vq_name[VIRTQUEUE_MAX_NAME_SZ];
59         uint16_t                 vq_queue_index;
60         uint16_t                 vq_nentries;
61         uint32_t                 vq_flags;
62 #define VIRTQUEUE_FLAG_INDIRECT  0x0001
63 #define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
64
65         int                      vq_alignment;
66         int                      vq_ring_size;
67         void                    *vq_ring_mem;
68         int                      vq_max_indirect_size;
69         int                      vq_indirect_mem_size;
70         virtqueue_intr_t        *vq_intrhand;
71         void                    *vq_intrhand_arg;
72
73         struct vring             vq_ring;
74         uint16_t                 vq_free_cnt;
75         uint16_t                 vq_queued_cnt;
76         /*
77          * Head of the free chain in the descriptor table. If
78          * there are no free descriptors, this will be set to
79          * VQ_RING_DESC_CHAIN_END.
80          */
81         uint16_t                 vq_desc_head_idx;
82         /*
83          * Last consumed descriptor in the used table,
84          * trails vq_ring.used->idx.
85          */
86         uint16_t                 vq_used_cons_idx;
87
88         struct vq_desc_extra {
89                 void              *cookie;
90                 struct vring_desc *indirect;
91                 vm_paddr_t         indirect_paddr;
92                 uint16_t           ndescs;
93         } vq_descx[0];
94 };
95
96 /*
97  * The maximum virtqueue size is 2^15. Use that value as the end of
98  * descriptor chain terminator since it will never be a valid index
99  * in the descriptor table. This is used to verify we are correctly
100  * handling vq_free_cnt.
101  */
102 #define VQ_RING_DESC_CHAIN_END 32768
103
104 #define VQASSERT(_vq, _exp, _msg, ...)                          \
105     KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name,  \
106         ##__VA_ARGS__))
107
108 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)                     \
109     VQASSERT((_vq), (_idx) < (_vq)->vq_nentries,                \
110         "invalid ring index: %d, max: %d", (_idx),              \
111         (_vq)->vq_nentries)
112
113 #define VQ_RING_ASSERT_CHAIN_TERM(_vq)                          \
114     VQASSERT((_vq), (_vq)->vq_desc_head_idx ==                  \
115         VQ_RING_DESC_CHAIN_END, "full ring terminated "         \
116         "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
117
118 static int      virtqueue_init_indirect(struct virtqueue *vq, int);
119 static void     virtqueue_free_indirect(struct virtqueue *vq);
120 static void     virtqueue_init_indirect_list(struct virtqueue *,
121                     struct vring_desc *);
122
123 static void     vq_ring_init(struct virtqueue *);
124 static void     vq_ring_update_avail(struct virtqueue *, uint16_t);
125 static uint16_t vq_ring_enqueue_segments(struct virtqueue *,
126                     struct vring_desc *, uint16_t, struct sglist *, int, int);
127 static int      vq_ring_use_indirect(struct virtqueue *, int);
128 static void     vq_ring_enqueue_indirect(struct virtqueue *, void *,
129                     struct sglist *, int, int);
130 static int      vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
131 static int      vq_ring_must_notify_host(struct virtqueue *);
132 static void     vq_ring_notify_host(struct virtqueue *);
133 static void     vq_ring_free_chain(struct virtqueue *, uint16_t);
134
135 uint64_t
136 virtqueue_filter_features(uint64_t features)
137 {
138         uint64_t mask;
139
140         mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
141         mask |= VIRTIO_RING_F_INDIRECT_DESC;
142         mask |= VIRTIO_RING_F_EVENT_IDX;
143
144         return (features & mask);
145 }
146
147 int
148 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
149     vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
150 {
151         struct virtqueue *vq;
152         int error;
153
154         *vqp = NULL;
155         error = 0;
156
157         if (size == 0) {
158                 device_printf(dev,
159                     "virtqueue %d (%s) does not exist (size is zero)\n",
160                     queue, info->vqai_name);
161                 return (ENODEV);
162         } else if (!powerof2(size)) {
163                 device_printf(dev,
164                     "virtqueue %d (%s) size is not a power of 2: %d\n",
165                     queue, info->vqai_name, size);
166                 return (ENXIO);
167         } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
168                 device_printf(dev, "virtqueue %d (%s) requested too many "
169                     "indirect descriptors: %d, max %d\n",
170                     queue, info->vqai_name, info->vqai_maxindirsz,
171                     VIRTIO_MAX_INDIRECT);
172                 return (EINVAL);
173         }
174
175         vq = malloc(sizeof(struct virtqueue) +
176             size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
177         if (vq == NULL) {
178                 device_printf(dev, "cannot allocate virtqueue\n");
179                 return (ENOMEM);
180         }
181
182         vq->vq_dev = dev;
183         strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
184         vq->vq_queue_index = queue;
185         vq->vq_alignment = align;
186         vq->vq_nentries = size;
187         vq->vq_free_cnt = size;
188         vq->vq_intrhand = info->vqai_intr;
189         vq->vq_intrhand_arg = info->vqai_intr_arg;
190
191         if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
192                 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
193
194         if (info->vqai_maxindirsz > 1) {
195                 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
196                 if (error)
197                         goto fail;
198         }
199
200         vq->vq_ring_size = round_page(vring_size(size, align));
201         vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
202             M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
203         if (vq->vq_ring_mem == NULL) {
204                 device_printf(dev,
205                     "cannot allocate memory for virtqueue ring\n");
206                 error = ENOMEM;
207                 goto fail;
208         }
209
210         vq_ring_init(vq);
211         virtqueue_disable_intr(vq);
212
213         *vqp = vq;
214
215 fail:
216         if (error)
217                 virtqueue_free(vq);
218
219         return (error);
220 }
221
222 static int
223 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
224 {
225         device_t dev;
226         struct vq_desc_extra *dxp;
227         int i, size;
228
229         dev = vq->vq_dev;
230
231         if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
232                 /*
233                  * Indirect descriptors requested by the driver but not
234                  * negotiated. Return zero to keep the initialization
235                  * going: we'll run fine without.
236                  */
237                 if (bootverbose)
238                         device_printf(dev, "virtqueue %d (%s) requested "
239                             "indirect descriptors but not negotiated\n",
240                             vq->vq_queue_index, vq->vq_name);
241                 return (0);
242         }
243
244         size = indirect_size * sizeof(struct vring_desc);
245         vq->vq_max_indirect_size = indirect_size;
246         vq->vq_indirect_mem_size = size;
247         vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
248
249         for (i = 0; i < vq->vq_nentries; i++) {
250                 dxp = &vq->vq_descx[i];
251
252                 dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
253                 if (dxp->indirect == NULL) {
254                         device_printf(dev, "cannot allocate indirect list\n");
255                         return (ENOMEM);
256                 }
257
258                 dxp->indirect_paddr = vtophys(dxp->indirect);
259                 virtqueue_init_indirect_list(vq, dxp->indirect);
260         }
261
262         return (0);
263 }
264
265 static void
266 virtqueue_free_indirect(struct virtqueue *vq)
267 {
268         struct vq_desc_extra *dxp;
269         int i;
270
271         for (i = 0; i < vq->vq_nentries; i++) {
272                 dxp = &vq->vq_descx[i];
273
274                 if (dxp->indirect == NULL)
275                         break;
276
277                 free(dxp->indirect, M_DEVBUF);
278                 dxp->indirect = NULL;
279                 dxp->indirect_paddr = 0;
280         }
281
282         vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
283         vq->vq_indirect_mem_size = 0;
284 }
285
286 static void
287 virtqueue_init_indirect_list(struct virtqueue *vq,
288     struct vring_desc *indirect)
289 {
290         int i;
291
292         bzero(indirect, vq->vq_indirect_mem_size);
293
294         for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
295                 indirect[i].next = i + 1;
296         indirect[i].next = VQ_RING_DESC_CHAIN_END;
297 }
298
299 int
300 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
301 {
302         struct vq_desc_extra *dxp;
303         int i;
304
305         if (vq->vq_nentries != size) {
306                 device_printf(vq->vq_dev,
307                     "%s: '%s' changed size; old=%hu, new=%hu\n",
308                     __func__, vq->vq_name, vq->vq_nentries, size);
309                 return (EINVAL);
310         }
311
312         /* Warn if the virtqueue was not properly cleaned up. */
313         if (vq->vq_free_cnt != vq->vq_nentries) {
314                 device_printf(vq->vq_dev,
315                     "%s: warning '%s' virtqueue not empty, "
316                     "leaking %d entries\n", __func__, vq->vq_name,
317                     vq->vq_nentries - vq->vq_free_cnt);
318         }
319
320         vq->vq_desc_head_idx = 0;
321         vq->vq_used_cons_idx = 0;
322         vq->vq_queued_cnt = 0;
323         vq->vq_free_cnt = vq->vq_nentries;
324
325         /* To be safe, reset all our allocated memory. */
326         bzero(vq->vq_ring_mem, vq->vq_ring_size);
327         for (i = 0; i < vq->vq_nentries; i++) {
328                 dxp = &vq->vq_descx[i];
329                 dxp->cookie = NULL;
330                 dxp->ndescs = 0;
331                 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
332                         virtqueue_init_indirect_list(vq, dxp->indirect);
333         }
334
335         vq_ring_init(vq);
336         virtqueue_disable_intr(vq);
337
338         return (0);
339 }
340
341 void
342 virtqueue_free(struct virtqueue *vq)
343 {
344
345         if (vq->vq_free_cnt != vq->vq_nentries) {
346                 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
347                     "leaking %d entries\n", vq->vq_name,
348                     vq->vq_nentries - vq->vq_free_cnt);
349         }
350
351         if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
352                 virtqueue_free_indirect(vq);
353
354         if (vq->vq_ring_mem != NULL) {
355                 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
356                 vq->vq_ring_size = 0;
357                 vq->vq_ring_mem = NULL;
358         }
359
360         free(vq, M_DEVBUF);
361 }
362
363 vm_paddr_t
364 virtqueue_paddr(struct virtqueue *vq)
365 {
366
367         return (vtophys(vq->vq_ring_mem));
368 }
369
370 int
371 virtqueue_size(struct virtqueue *vq)
372 {
373
374         return (vq->vq_nentries);
375 }
376
377 int
378 virtqueue_nfree(struct virtqueue *vq)
379 {
380
381         return (vq->vq_free_cnt);
382 }
383
384 int
385 virtqueue_empty(struct virtqueue *vq)
386 {
387
388         return (vq->vq_nentries == vq->vq_free_cnt);
389 }
390
391 int
392 virtqueue_full(struct virtqueue *vq)
393 {
394
395         return (vq->vq_free_cnt == 0);
396 }
397
398 void
399 virtqueue_notify(struct virtqueue *vq)
400 {
401
402         /* Ensure updated avail->idx is visible to host. */
403         mb();
404
405         if (vq_ring_must_notify_host(vq))
406                 vq_ring_notify_host(vq);
407         vq->vq_queued_cnt = 0;
408 }
409
410 int
411 virtqueue_nused(struct virtqueue *vq)
412 {
413         uint16_t used_idx, nused;
414
415         used_idx = vq->vq_ring.used->idx;
416
417         nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
418         VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
419
420         return (nused);
421 }
422
423 int
424 virtqueue_intr_filter(struct virtqueue *vq)
425 {
426
427         if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
428                 return (0);
429
430         virtqueue_disable_intr(vq);
431
432         return (1);
433 }
434
435 void
436 virtqueue_intr(struct virtqueue *vq)
437 {
438
439         vq->vq_intrhand(vq->vq_intrhand_arg);
440 }
441
442 int
443 virtqueue_enable_intr(struct virtqueue *vq)
444 {
445
446         return (vq_ring_enable_interrupt(vq, 0));
447 }
448
449 int
450 virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
451 {
452         uint16_t ndesc, avail_idx;
453
454         avail_idx = vq->vq_ring.avail->idx;
455         ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
456
457         switch (hint) {
458         case VQ_POSTPONE_SHORT:
459                 ndesc = ndesc / 4;
460                 break;
461         case VQ_POSTPONE_LONG:
462                 ndesc = (ndesc * 3) / 4;
463                 break;
464         case VQ_POSTPONE_EMPTIED:
465                 break;
466         }
467
468         return (vq_ring_enable_interrupt(vq, ndesc));
469 }
470
471 /*
472  * Note this is only considered a hint to the host.
473  */
474 void
475 virtqueue_disable_intr(struct virtqueue *vq)
476 {
477
478         if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
479                 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx -
480                     vq->vq_nentries - 1;
481         } else
482                 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
483 }
484
485 int
486 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
487     int readable, int writable)
488 {
489         struct vq_desc_extra *dxp;
490         int needed;
491         uint16_t head_idx, idx;
492
493         needed = readable + writable;
494
495         VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
496         VQASSERT(vq, needed == sg->sg_nseg,
497             "segment count mismatch, %d, %d", needed, sg->sg_nseg);
498         VQASSERT(vq,
499             needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
500             "too many segments to enqueue: %d, %d/%d", needed,
501             vq->vq_nentries, vq->vq_max_indirect_size);
502
503         if (needed < 1)
504                 return (EINVAL);
505         if (vq->vq_free_cnt == 0)
506                 return (ENOSPC);
507
508         if (vq_ring_use_indirect(vq, needed)) {
509                 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
510                 return (0);
511         } else if (vq->vq_free_cnt < needed)
512                 return (EMSGSIZE);
513
514         head_idx = vq->vq_desc_head_idx;
515         VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
516         dxp = &vq->vq_descx[head_idx];
517
518         VQASSERT(vq, dxp->cookie == NULL,
519             "cookie already exists for index %d", head_idx);
520         dxp->cookie = cookie;
521         dxp->ndescs = needed;
522
523         idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
524             sg, readable, writable);
525
526         vq->vq_desc_head_idx = idx;
527         vq->vq_free_cnt -= needed;
528         if (vq->vq_free_cnt == 0)
529                 VQ_RING_ASSERT_CHAIN_TERM(vq);
530         else
531                 VQ_RING_ASSERT_VALID_IDX(vq, idx);
532
533         vq_ring_update_avail(vq, head_idx);
534
535         return (0);
536 }
537
538 void *
539 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
540 {
541         struct vring_used_elem *uep;
542         void *cookie;
543         uint16_t used_idx, desc_idx;
544
545         if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
546                 return (NULL);
547
548         used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
549         uep = &vq->vq_ring.used->ring[used_idx];
550
551         rmb();
552         desc_idx = (uint16_t) uep->id;
553         if (len != NULL)
554                 *len = uep->len;
555
556         vq_ring_free_chain(vq, desc_idx);
557
558         cookie = vq->vq_descx[desc_idx].cookie;
559         VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
560         vq->vq_descx[desc_idx].cookie = NULL;
561
562         return (cookie);
563 }
564
565 void *
566 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
567 {
568         void *cookie;
569
570         VIRTIO_BUS_POLL(vq->vq_dev);
571         while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
572                 cpu_spinwait();
573                 VIRTIO_BUS_POLL(vq->vq_dev);
574         }
575
576         return (cookie);
577 }
578
579 void *
580 virtqueue_drain(struct virtqueue *vq, int *last)
581 {
582         void *cookie;
583         int idx;
584
585         cookie = NULL;
586         idx = *last;
587
588         while (idx < vq->vq_nentries && cookie == NULL) {
589                 if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
590                         vq->vq_descx[idx].cookie = NULL;
591                         /* Free chain to keep free count consistent. */
592                         vq_ring_free_chain(vq, idx);
593                 }
594                 idx++;
595         }
596
597         *last = idx;
598
599         return (cookie);
600 }
601
602 void
603 virtqueue_dump(struct virtqueue *vq)
604 {
605
606         if (vq == NULL)
607                 return;
608
609         printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
610             "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
611             "used.idx=%d; used_event_idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
612             vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
613             virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
614             vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
615             vq->vq_ring.used->idx,
616                 vring_used_event(&vq->vq_ring),
617             vq->vq_ring.avail->flags,
618             vq->vq_ring.used->flags);
619 }
620
621 static void
622 vq_ring_init(struct virtqueue *vq)
623 {
624         struct vring *vr;
625         char *ring_mem;
626         int i, size;
627
628         ring_mem = vq->vq_ring_mem;
629         size = vq->vq_nentries;
630         vr = &vq->vq_ring;
631
632         vring_init(vr, size, ring_mem, vq->vq_alignment);
633
634         for (i = 0; i < size - 1; i++)
635                 vr->desc[i].next = i + 1;
636         vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
637 }
638
639 static void
640 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
641 {
642         uint16_t avail_idx;
643
644         /*
645          * Place the head of the descriptor chain into the next slot and make
646          * it usable to the host. The chain is made available now rather than
647          * deferring to virtqueue_notify() in the hopes that if the host is
648          * currently running on another CPU, we can keep it processing the new
649          * descriptor.
650          */
651         avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
652         vq->vq_ring.avail->ring[avail_idx] = desc_idx;
653
654         wmb();
655         vq->vq_ring.avail->idx++;
656
657         /* Keep pending count until virtqueue_notify(). */
658         vq->vq_queued_cnt++;
659 }
660
661 static uint16_t
662 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
663     uint16_t head_idx, struct sglist *sg, int readable, int writable)
664 {
665         struct sglist_seg *seg;
666         struct vring_desc *dp;
667         int i, needed;
668         uint16_t idx;
669
670         needed = readable + writable;
671
672         for (i = 0, idx = head_idx, seg = sg->sg_segs;
673              i < needed;
674              i++, idx = dp->next, seg++) {
675                 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
676                     "premature end of free desc chain");
677
678                 dp = &desc[idx];
679                 dp->addr = seg->ss_paddr;
680                 dp->len = seg->ss_len;
681                 dp->flags = 0;
682
683                 if (i < needed - 1)
684                         dp->flags |= VRING_DESC_F_NEXT;
685                 if (i >= readable)
686                         dp->flags |= VRING_DESC_F_WRITE;
687         }
688
689         return (idx);
690 }
691
692 static int
693 vq_ring_use_indirect(struct virtqueue *vq, int needed)
694 {
695
696         if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
697                 return (0);
698
699         if (vq->vq_max_indirect_size < needed)
700                 return (0);
701
702         if (needed < 2)
703                 return (0);
704
705         return (1);
706 }
707
708 static void
709 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
710     struct sglist *sg, int readable, int writable)
711 {
712         struct vring_desc *dp;
713         struct vq_desc_extra *dxp;
714         int needed;
715         uint16_t head_idx;
716
717         needed = readable + writable;
718         VQASSERT(vq, needed <= vq->vq_max_indirect_size,
719             "enqueuing too many indirect descriptors");
720
721         head_idx = vq->vq_desc_head_idx;
722         VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
723         dp = &vq->vq_ring.desc[head_idx];
724         dxp = &vq->vq_descx[head_idx];
725
726         VQASSERT(vq, dxp->cookie == NULL,
727             "cookie already exists for index %d", head_idx);
728         dxp->cookie = cookie;
729         dxp->ndescs = 1;
730
731         dp->addr = dxp->indirect_paddr;
732         dp->len = needed * sizeof(struct vring_desc);
733         dp->flags = VRING_DESC_F_INDIRECT;
734
735         vq_ring_enqueue_segments(vq, dxp->indirect, 0,
736             sg, readable, writable);
737
738         vq->vq_desc_head_idx = dp->next;
739         vq->vq_free_cnt--;
740         if (vq->vq_free_cnt == 0)
741                 VQ_RING_ASSERT_CHAIN_TERM(vq);
742         else
743                 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
744
745         vq_ring_update_avail(vq, head_idx);
746 }
747
748 static int
749 vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
750 {
751
752         /*
753          * Enable interrupts, making sure we get the latest index of
754          * what's already been consumed.
755          */
756         if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
757                 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
758         else
759                 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
760
761         mb();
762
763         /*
764          * Enough items may have already been consumed to meet our threshold
765          * since we last checked. Let our caller know so it processes the new
766          * entries.
767          */
768         if (virtqueue_nused(vq) > ndesc)
769                 return (1);
770
771         return (0);
772 }
773
774 static int
775 vq_ring_must_notify_host(struct virtqueue *vq)
776 {
777         uint16_t new_idx, prev_idx, event_idx;
778
779         if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
780                 new_idx = vq->vq_ring.avail->idx;
781                 prev_idx = new_idx - vq->vq_queued_cnt;
782                 event_idx = vring_avail_event(&vq->vq_ring);
783
784                 return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
785         }
786
787         return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
788 }
789
790 static void
791 vq_ring_notify_host(struct virtqueue *vq)
792 {
793
794         VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
795 }
796
797 static void
798 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
799 {
800         struct vring_desc *dp;
801         struct vq_desc_extra *dxp;
802
803         VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
804         dp = &vq->vq_ring.desc[desc_idx];
805         dxp = &vq->vq_descx[desc_idx];
806
807         if (vq->vq_free_cnt == 0)
808                 VQ_RING_ASSERT_CHAIN_TERM(vq);
809
810         vq->vq_free_cnt += dxp->ndescs;
811         dxp->ndescs--;
812
813         if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
814                 while (dp->flags & VRING_DESC_F_NEXT) {
815                         VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
816                         dp = &vq->vq_ring.desc[dp->next];
817                         dxp->ndescs--;
818                 }
819         }
820
821         VQASSERT(vq, dxp->ndescs == 0,
822             "failed to free entire desc chain, remaining: %d", dxp->ndescs);
823
824         /*
825          * We must append the existing free chain, if any, to the end of
826          * newly freed chain. If the virtqueue was completely used, then
827          * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
828          */
829         dp->next = vq->vq_desc_head_idx;
830         vq->vq_desc_head_idx = desc_idx;
831 }