]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - sys/dev/virtio/virtqueue.c
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / sys / dev / virtio / virtqueue.c
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 /*
28  * Implements the virtqueue interface as basically described
29  * in the original VirtIO paper.
30  */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/sglist.h>
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
42
43 #include <machine/cpu.h>
44 #include <machine/bus.h>
45 #include <machine/atomic.h>
46 #include <machine/resource.h>
47 #include <sys/bus.h>
48 #include <sys/rman.h>
49
50 #include <dev/virtio/virtio.h>
51 #include <dev/virtio/virtqueue.h>
52 #include <dev/virtio/virtio_ring.h>
53
54 #include "virtio_bus_if.h"
55
56 struct virtqueue {
57         device_t                 vq_dev;
58         char                     vq_name[VIRTQUEUE_MAX_NAME_SZ];
59         uint16_t                 vq_queue_index;
60         uint16_t                 vq_nentries;
61         uint32_t                 vq_flags;
62 #define VIRTQUEUE_FLAG_INDIRECT  0x0001
63 #define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
64
65         int                      vq_alignment;
66         int                      vq_ring_size;
67         void                    *vq_ring_mem;
68         int                      vq_max_indirect_size;
69         int                      vq_indirect_mem_size;
70         virtqueue_intr_t        *vq_intrhand;
71         void                    *vq_intrhand_arg;
72
73         struct vring             vq_ring;
74         uint16_t                 vq_free_cnt;
75         uint16_t                 vq_queued_cnt;
76         /*
77          * Head of the free chain in the descriptor table. If
78          * there are no free descriptors, this will be set to
79          * VQ_RING_DESC_CHAIN_END.
80          */
81         uint16_t                 vq_desc_head_idx;
82         /*
83          * Last consumed descriptor in the used table,
84          * trails vq_ring.used->idx.
85          */
86         uint16_t                 vq_used_cons_idx;
87
88         struct vq_desc_extra {
89                 void              *cookie;
90                 struct vring_desc *indirect;
91                 vm_paddr_t         indirect_paddr;
92                 uint16_t           ndescs;
93         } vq_descx[0];
94 };
95
96 /*
97  * The maximum virtqueue size is 2^15. Use that value as the end of
98  * descriptor chain terminator since it will never be a valid index
99  * in the descriptor table. This is used to verify we are correctly
100  * handling vq_free_cnt.
101  */
102 #define VQ_RING_DESC_CHAIN_END 32768
103
104 #define VQASSERT(_vq, _exp, _msg, ...)                          \
105     KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name,  \
106         ##__VA_ARGS__))
107
108 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)                     \
109     VQASSERT((_vq), (_idx) < (_vq)->vq_nentries,                \
110         "invalid ring index: %d, max: %d", (_idx),              \
111         (_vq)->vq_nentries)
112
113 #define VQ_RING_ASSERT_CHAIN_TERM(_vq)                          \
114     VQASSERT((_vq), (_vq)->vq_desc_head_idx ==                  \
115         VQ_RING_DESC_CHAIN_END, "full ring terminated "         \
116         "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
117
118 static int      virtqueue_init_indirect(struct virtqueue *vq, int);
119 static void     virtqueue_free_indirect(struct virtqueue *vq);
120 static void     virtqueue_init_indirect_list(struct virtqueue *,
121                     struct vring_desc *);
122
123 static void     vq_ring_init(struct virtqueue *);
124 static void     vq_ring_update_avail(struct virtqueue *, uint16_t);
125 static uint16_t vq_ring_enqueue_segments(struct virtqueue *,
126                     struct vring_desc *, uint16_t, struct sglist *, int, int);
127 static int      vq_ring_use_indirect(struct virtqueue *, int);
128 static void     vq_ring_enqueue_indirect(struct virtqueue *, void *,
129                     struct sglist *, int, int);
130 static int      vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
131 static int      vq_ring_must_notify_host(struct virtqueue *);
132 static void     vq_ring_notify_host(struct virtqueue *);
133 static void     vq_ring_free_chain(struct virtqueue *, uint16_t);
134
135 uint64_t
136 virtqueue_filter_features(uint64_t features)
137 {
138         uint64_t mask;
139
140         mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
141         mask |= VIRTIO_RING_F_INDIRECT_DESC;
142         mask |= VIRTIO_RING_F_EVENT_IDX;
143
144         return (features & mask);
145 }
146
147 int
148 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
149     vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
150 {
151         struct virtqueue *vq;
152         int error;
153
154         *vqp = NULL;
155         error = 0;
156
157         if (size == 0) {
158                 device_printf(dev,
159                     "virtqueue %d (%s) does not exist (size is zero)\n",
160                     queue, info->vqai_name);
161                 return (ENODEV);
162         } else if (!powerof2(size)) {
163                 device_printf(dev,
164                     "virtqueue %d (%s) size is not a power of 2: %d\n",
165                     queue, info->vqai_name, size);
166                 return (ENXIO);
167         } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
168                 device_printf(dev, "virtqueue %d (%s) requested too many "
169                     "indirect descriptors: %d, max %d\n",
170                     queue, info->vqai_name, info->vqai_maxindirsz,
171                     VIRTIO_MAX_INDIRECT);
172                 return (EINVAL);
173         }
174
175         vq = malloc(sizeof(struct virtqueue) +
176             size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
177         if (vq == NULL) {
178                 device_printf(dev, "cannot allocate virtqueue\n");
179                 return (ENOMEM);
180         }
181
182         vq->vq_dev = dev;
183         strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
184         vq->vq_queue_index = queue;
185         vq->vq_alignment = align;
186         vq->vq_nentries = size;
187         vq->vq_free_cnt = size;
188         vq->vq_intrhand = info->vqai_intr;
189         vq->vq_intrhand_arg = info->vqai_intr_arg;
190
191         if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
192                 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
193
194         if (info->vqai_maxindirsz > 1) {
195                 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
196                 if (error)
197                         goto fail;
198         }
199
200         vq->vq_ring_size = round_page(vring_size(size, align));
201         vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
202             M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
203         if (vq->vq_ring_mem == NULL) {
204                 device_printf(dev,
205                     "cannot allocate memory for virtqueue ring\n");
206                 error = ENOMEM;
207                 goto fail;
208         }
209
210         vq_ring_init(vq);
211         virtqueue_disable_intr(vq);
212
213         *vqp = vq;
214
215 fail:
216         if (error)
217                 virtqueue_free(vq);
218
219         return (error);
220 }
221
222 static int
223 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
224 {
225         device_t dev;
226         struct vq_desc_extra *dxp;
227         int i, size;
228
229         dev = vq->vq_dev;
230
231         if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
232                 /*
233                  * Indirect descriptors requested by the driver but not
234                  * negotiated. Return zero to keep the initialization
235                  * going: we'll run fine without.
236                  */
237                 if (bootverbose)
238                         device_printf(dev, "virtqueue %d (%s) requested "
239                             "indirect descriptors but not negotiated\n",
240                             vq->vq_queue_index, vq->vq_name);
241                 return (0);
242         }
243
244         size = indirect_size * sizeof(struct vring_desc);
245         vq->vq_max_indirect_size = indirect_size;
246         vq->vq_indirect_mem_size = size;
247         vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
248
249         for (i = 0; i < vq->vq_nentries; i++) {
250                 dxp = &vq->vq_descx[i];
251
252                 dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
253                 if (dxp->indirect == NULL) {
254                         device_printf(dev, "cannot allocate indirect list\n");
255                         return (ENOMEM);
256                 }
257
258                 dxp->indirect_paddr = vtophys(dxp->indirect);
259                 virtqueue_init_indirect_list(vq, dxp->indirect);
260         }
261
262         return (0);
263 }
264
265 static void
266 virtqueue_free_indirect(struct virtqueue *vq)
267 {
268         struct vq_desc_extra *dxp;
269         int i;
270
271         for (i = 0; i < vq->vq_nentries; i++) {
272                 dxp = &vq->vq_descx[i];
273
274                 if (dxp->indirect == NULL)
275                         break;
276
277                 free(dxp->indirect, M_DEVBUF);
278                 dxp->indirect = NULL;
279                 dxp->indirect_paddr = 0;
280         }
281
282         vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
283         vq->vq_indirect_mem_size = 0;
284 }
285
286 static void
287 virtqueue_init_indirect_list(struct virtqueue *vq,
288     struct vring_desc *indirect)
289 {
290         int i;
291
292         bzero(indirect, vq->vq_indirect_mem_size);
293
294         for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
295                 indirect[i].next = i + 1;
296         indirect[i].next = VQ_RING_DESC_CHAIN_END;
297 }
298
299 int
300 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
301 {
302         struct vq_desc_extra *dxp;
303         int i;
304
305         if (vq->vq_nentries != size) {
306                 device_printf(vq->vq_dev,
307                     "%s: '%s' changed size; old=%hu, new=%hu\n",
308                     __func__, vq->vq_name, vq->vq_nentries, size);
309                 return (EINVAL);
310         }
311
312         /* Warn if the virtqueue was not properly cleaned up. */
313         if (vq->vq_free_cnt != vq->vq_nentries) {
314                 device_printf(vq->vq_dev,
315                     "%s: warning '%s' virtqueue not empty, "
316                     "leaking %d entries\n", __func__, vq->vq_name,
317                     vq->vq_nentries - vq->vq_free_cnt);
318         }
319
320         vq->vq_desc_head_idx = 0;
321         vq->vq_used_cons_idx = 0;
322         vq->vq_queued_cnt = 0;
323         vq->vq_free_cnt = vq->vq_nentries;
324
325         /* To be safe, reset all our allocated memory. */
326         bzero(vq->vq_ring_mem, vq->vq_ring_size);
327         for (i = 0; i < vq->vq_nentries; i++) {
328                 dxp = &vq->vq_descx[i];
329                 dxp->cookie = NULL;
330                 dxp->ndescs = 0;
331                 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
332                         virtqueue_init_indirect_list(vq, dxp->indirect);
333         }
334
335         vq_ring_init(vq);
336         virtqueue_disable_intr(vq);
337
338         return (0);
339 }
340
341 void
342 virtqueue_free(struct virtqueue *vq)
343 {
344
345         if (vq->vq_free_cnt != vq->vq_nentries) {
346                 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
347                     "leaking %d entries\n", vq->vq_name,
348                     vq->vq_nentries - vq->vq_free_cnt);
349         }
350
351         if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
352                 virtqueue_free_indirect(vq);
353
354         if (vq->vq_ring_mem != NULL) {
355                 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
356                 vq->vq_ring_size = 0;
357                 vq->vq_ring_mem = NULL;
358         }
359
360         free(vq, M_DEVBUF);
361 }
362
363 vm_paddr_t
364 virtqueue_paddr(struct virtqueue *vq)
365 {
366
367         return (vtophys(vq->vq_ring_mem));
368 }
369
370 int
371 virtqueue_size(struct virtqueue *vq)
372 {
373
374         return (vq->vq_nentries);
375 }
376
377 int
378 virtqueue_empty(struct virtqueue *vq)
379 {
380
381         return (vq->vq_nentries == vq->vq_free_cnt);
382 }
383
384 int
385 virtqueue_full(struct virtqueue *vq)
386 {
387
388         return (vq->vq_free_cnt == 0);
389 }
390
391 void
392 virtqueue_notify(struct virtqueue *vq)
393 {
394
395         /* Ensure updated avail->idx is visible to host. */
396         mb();
397
398         if (vq_ring_must_notify_host(vq))
399                 vq_ring_notify_host(vq);
400         vq->vq_queued_cnt = 0;
401 }
402
403 int
404 virtqueue_nused(struct virtqueue *vq)
405 {
406         uint16_t used_idx, nused;
407
408         used_idx = vq->vq_ring.used->idx;
409
410         nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
411         VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
412
413         return (nused);
414 }
415
416 int
417 virtqueue_intr_filter(struct virtqueue *vq)
418 {
419
420         if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
421                 return (0);
422
423         virtqueue_disable_intr(vq);
424
425         return (1);
426 }
427
428 void
429 virtqueue_intr(struct virtqueue *vq)
430 {
431
432         vq->vq_intrhand(vq->vq_intrhand_arg);
433 }
434
435 int
436 virtqueue_enable_intr(struct virtqueue *vq)
437 {
438
439         return (vq_ring_enable_interrupt(vq, 0));
440 }
441
442 int
443 virtqueue_postpone_intr(struct virtqueue *vq)
444 {
445         uint16_t ndesc, avail_idx;
446
447         /*
448          * Request the next interrupt be postponed until at least half
449          * of the available descriptors have been consumed.
450          */
451         avail_idx = vq->vq_ring.avail->idx;
452         ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx) / 2;
453
454         return (vq_ring_enable_interrupt(vq, ndesc));
455 }
456
457 void
458 virtqueue_disable_intr(struct virtqueue *vq)
459 {
460
461         /*
462          * Note this is only considered a hint to the host.
463          */
464         if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) == 0)
465                 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
466 }
467
468 int
469 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
470     int readable, int writable)
471 {
472         struct vq_desc_extra *dxp;
473         int needed;
474         uint16_t head_idx, idx;
475
476         needed = readable + writable;
477
478         VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
479         VQASSERT(vq, needed == sg->sg_nseg,
480             "segment count mismatch, %d, %d", needed, sg->sg_nseg);
481         VQASSERT(vq,
482             needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
483             "too many segments to enqueue: %d, %d/%d", needed,
484             vq->vq_nentries, vq->vq_max_indirect_size);
485
486         if (needed < 1)
487                 return (EINVAL);
488         if (vq->vq_free_cnt == 0)
489                 return (ENOSPC);
490
491         if (vq_ring_use_indirect(vq, needed)) {
492                 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
493                 return (0);
494         } else if (vq->vq_free_cnt < needed)
495                 return (EMSGSIZE);
496
497         head_idx = vq->vq_desc_head_idx;
498         VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
499         dxp = &vq->vq_descx[head_idx];
500
501         VQASSERT(vq, dxp->cookie == NULL,
502             "cookie already exists for index %d", head_idx);
503         dxp->cookie = cookie;
504         dxp->ndescs = needed;
505
506         idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
507             sg, readable, writable);
508
509         vq->vq_desc_head_idx = idx;
510         vq->vq_free_cnt -= needed;
511         if (vq->vq_free_cnt == 0)
512                 VQ_RING_ASSERT_CHAIN_TERM(vq);
513         else
514                 VQ_RING_ASSERT_VALID_IDX(vq, idx);
515
516         vq_ring_update_avail(vq, head_idx);
517
518         return (0);
519 }
520
521 void *
522 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
523 {
524         struct vring_used_elem *uep;
525         void *cookie;
526         uint16_t used_idx, desc_idx;
527
528         if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
529                 return (NULL);
530
531         used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
532         uep = &vq->vq_ring.used->ring[used_idx];
533
534         mb();
535         desc_idx = (uint16_t) uep->id;
536         if (len != NULL)
537                 *len = uep->len;
538
539         vq_ring_free_chain(vq, desc_idx);
540
541         cookie = vq->vq_descx[desc_idx].cookie;
542         VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
543         vq->vq_descx[desc_idx].cookie = NULL;
544
545         return (cookie);
546 }
547
548 void *
549 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
550 {
551         void *cookie;
552
553         while ((cookie = virtqueue_dequeue(vq, len)) == NULL)
554                 cpu_spinwait();
555
556         return (cookie);
557 }
558
559 void *
560 virtqueue_drain(struct virtqueue *vq, int *last)
561 {
562         void *cookie;
563         int idx;
564
565         cookie = NULL;
566         idx = *last;
567
568         while (idx < vq->vq_nentries && cookie == NULL) {
569                 if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
570                         vq->vq_descx[idx].cookie = NULL;
571                         /* Free chain to keep free count consistent. */
572                         vq_ring_free_chain(vq, idx);
573                 }
574                 idx++;
575         }
576
577         *last = idx;
578
579         return (cookie);
580 }
581
582 void
583 virtqueue_dump(struct virtqueue *vq)
584 {
585
586         if (vq == NULL)
587                 return;
588
589         printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
590             "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
591             "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
592             vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
593             virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
594             vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
595             vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
596             vq->vq_ring.used->flags);
597 }
598
599 static void
600 vq_ring_init(struct virtqueue *vq)
601 {
602         struct vring *vr;
603         char *ring_mem;
604         int i, size;
605
606         ring_mem = vq->vq_ring_mem;
607         size = vq->vq_nentries;
608         vr = &vq->vq_ring;
609
610         vring_init(vr, size, ring_mem, vq->vq_alignment);
611
612         for (i = 0; i < size - 1; i++)
613                 vr->desc[i].next = i + 1;
614         vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
615 }
616
617 static void
618 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
619 {
620         uint16_t avail_idx;
621
622         /*
623          * Place the head of the descriptor chain into the next slot and make
624          * it usable to the host. The chain is made available now rather than
625          * deferring to virtqueue_notify() in the hopes that if the host is
626          * currently running on another CPU, we can keep it processing the new
627          * descriptor.
628          */
629         avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
630         vq->vq_ring.avail->ring[avail_idx] = desc_idx;
631
632         mb();
633         vq->vq_ring.avail->idx++;
634
635         /* Keep pending count until virtqueue_notify(). */
636         vq->vq_queued_cnt++;
637 }
638
639 static uint16_t
640 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
641     uint16_t head_idx, struct sglist *sg, int readable, int writable)
642 {
643         struct sglist_seg *seg;
644         struct vring_desc *dp;
645         int i, needed;
646         uint16_t idx;
647
648         needed = readable + writable;
649
650         for (i = 0, idx = head_idx, seg = sg->sg_segs;
651              i < needed;
652              i++, idx = dp->next, seg++) {
653                 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
654                     "premature end of free desc chain");
655
656                 dp = &desc[idx];
657                 dp->addr = seg->ss_paddr;
658                 dp->len = seg->ss_len;
659                 dp->flags = 0;
660
661                 if (i < needed - 1)
662                         dp->flags |= VRING_DESC_F_NEXT;
663                 if (i >= readable)
664                         dp->flags |= VRING_DESC_F_WRITE;
665         }
666
667         return (idx);
668 }
669
670 static int
671 vq_ring_use_indirect(struct virtqueue *vq, int needed)
672 {
673
674         if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
675                 return (0);
676
677         if (vq->vq_max_indirect_size < needed)
678                 return (0);
679
680         if (needed < 2)
681                 return (0);
682
683         return (1);
684 }
685
686 static void
687 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
688     struct sglist *sg, int readable, int writable)
689 {
690         struct vring_desc *dp;
691         struct vq_desc_extra *dxp;
692         int needed;
693         uint16_t head_idx;
694
695         needed = readable + writable;
696         VQASSERT(vq, needed <= vq->vq_max_indirect_size,
697             "enqueuing too many indirect descriptors");
698
699         head_idx = vq->vq_desc_head_idx;
700         VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
701         dp = &vq->vq_ring.desc[head_idx];
702         dxp = &vq->vq_descx[head_idx];
703
704         VQASSERT(vq, dxp->cookie == NULL,
705             "cookie already exists for index %d", head_idx);
706         dxp->cookie = cookie;
707         dxp->ndescs = 1;
708
709         dp->addr = dxp->indirect_paddr;
710         dp->len = needed * sizeof(struct vring_desc);
711         dp->flags = VRING_DESC_F_INDIRECT;
712
713         vq_ring_enqueue_segments(vq, dxp->indirect, 0,
714             sg, readable, writable);
715
716         vq->vq_desc_head_idx = dp->next;
717         vq->vq_free_cnt--;
718         if (vq->vq_free_cnt == 0)
719                 VQ_RING_ASSERT_CHAIN_TERM(vq);
720         else
721                 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
722
723         vq_ring_update_avail(vq, head_idx);
724 }
725
726 static int
727 vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
728 {
729
730         /*
731          * Enable interrupts, making sure we get the latest index of
732          * what's already been consumed.
733          */
734         if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
735                 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
736         else
737                 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
738
739         mb();
740
741         /*
742          * Enough items may have already been consumed to meet our threshold
743          * since we last checked. Let our caller know so it processes the new
744          * entries.
745          */
746         if (virtqueue_nused(vq) > ndesc)
747                 return (1);
748
749         return (0);
750 }
751
752 static int
753 vq_ring_must_notify_host(struct virtqueue *vq)
754 {
755         uint16_t new_idx, prev_idx, event_idx;
756
757         if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
758                 new_idx = vq->vq_ring.avail->idx;
759                 prev_idx = new_idx - vq->vq_queued_cnt;
760                 event_idx = vring_avail_event(&vq->vq_ring);
761
762                 return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
763         }
764
765         return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
766 }
767
768 static void
769 vq_ring_notify_host(struct virtqueue *vq)
770 {
771
772         VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
773 }
774
775 static void
776 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
777 {
778         struct vring_desc *dp;
779         struct vq_desc_extra *dxp;
780
781         VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
782         dp = &vq->vq_ring.desc[desc_idx];
783         dxp = &vq->vq_descx[desc_idx];
784
785         if (vq->vq_free_cnt == 0)
786                 VQ_RING_ASSERT_CHAIN_TERM(vq);
787
788         vq->vq_free_cnt += dxp->ndescs;
789         dxp->ndescs--;
790
791         if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
792                 while (dp->flags & VRING_DESC_F_NEXT) {
793                         VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
794                         dp = &vq->vq_ring.desc[dp->next];
795                         dxp->ndescs--;
796                 }
797         }
798
799         VQASSERT(vq, dxp->ndescs == 0,
800             "failed to free entire desc chain, remaining: %d", dxp->ndescs);
801
802         /*
803          * We must append the existing free chain, if any, to the end of
804          * newly freed chain. If the virtqueue was completely used, then
805          * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
806          */
807         dp->next = vq->vq_desc_head_idx;
808         vq->vq_desc_head_idx = desc_idx;
809 }