]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/virtio/virtqueue.c
- Remove two write-only local variables
[FreeBSD/FreeBSD.git] / sys / dev / virtio / virtqueue.c
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 /*
28  * Implements the virtqueue interface as basically described
29  * in the original VirtIO paper.
30  */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/sglist.h>
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
42
43 #include <machine/cpu.h>
44 #include <machine/bus.h>
45 #include <machine/atomic.h>
46 #include <machine/resource.h>
47 #include <sys/bus.h>
48 #include <sys/rman.h>
49
50 #include <dev/virtio/virtio.h>
51 #include <dev/virtio/virtio_config.h>
52 #include <dev/virtio/virtqueue.h>
53 #include <dev/virtio/virtio_ring.h>
54
55 #include "virtio_bus_if.h"
56
57 struct virtqueue {
58         device_t                 vq_dev;
59         char                     vq_name[VIRTQUEUE_MAX_NAME_SZ];
60         uint16_t                 vq_queue_index;
61         uint16_t                 vq_nentries;
62         uint32_t                 vq_flags;
63 #define VIRTQUEUE_FLAG_INDIRECT  0x0001
64 #define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
65
66         int                      vq_alignment;
67         int                      vq_ring_size;
68         void                    *vq_ring_mem;
69         int                      vq_max_indirect_size;
70         int                      vq_indirect_mem_size;
71         virtqueue_intr_t        *vq_intrhand;
72         void                    *vq_intrhand_arg;
73
74         struct vring             vq_ring;
75         uint16_t                 vq_free_cnt;
76         uint16_t                 vq_queued_cnt;
77         /*
78          * Head of the free chain in the descriptor table. If
79          * there are no free descriptors, this will be set to
80          * VQ_RING_DESC_CHAIN_END.
81          */
82         uint16_t                 vq_desc_head_idx;
83         /*
84          * Last consumed descriptor in the used table,
85          * trails vq_ring.used->idx.
86          */
87         uint16_t                 vq_used_cons_idx;
88
89         struct vq_desc_extra {
90                 void              *cookie;
91                 struct vring_desc *indirect;
92                 vm_paddr_t         indirect_paddr;
93                 uint16_t           ndescs;
94         } vq_descx[0];
95 };
96
97 /*
98  * The maximum virtqueue size is 2^15. Use that value as the end of
99  * descriptor chain terminator since it will never be a valid index
100  * in the descriptor table. This is used to verify we are correctly
101  * handling vq_free_cnt.
102  */
103 #define VQ_RING_DESC_CHAIN_END 32768
104
105 #define VQASSERT(_vq, _exp, _msg, ...)                          \
106     KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name,  \
107         ##__VA_ARGS__))
108
109 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)                     \
110     VQASSERT((_vq), (_idx) < (_vq)->vq_nentries,                \
111         "invalid ring index: %d, max: %d", (_idx),              \
112         (_vq)->vq_nentries)
113
114 #define VQ_RING_ASSERT_CHAIN_TERM(_vq)                          \
115     VQASSERT((_vq), (_vq)->vq_desc_head_idx ==                  \
116         VQ_RING_DESC_CHAIN_END, "full ring terminated "         \
117         "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
118
119 static int      virtqueue_init_indirect(struct virtqueue *vq, int);
120 static void     virtqueue_free_indirect(struct virtqueue *vq);
121 static void     virtqueue_init_indirect_list(struct virtqueue *,
122                     struct vring_desc *);
123
124 static void     vq_ring_init(struct virtqueue *);
125 static void     vq_ring_update_avail(struct virtqueue *, uint16_t);
126 static uint16_t vq_ring_enqueue_segments(struct virtqueue *,
127                     struct vring_desc *, uint16_t, struct sglist *, int, int);
128 static int      vq_ring_use_indirect(struct virtqueue *, int);
129 static void     vq_ring_enqueue_indirect(struct virtqueue *, void *,
130                     struct sglist *, int, int);
131 static int      vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
132 static int      vq_ring_must_notify_host(struct virtqueue *);
133 static void     vq_ring_notify_host(struct virtqueue *);
134 static void     vq_ring_free_chain(struct virtqueue *, uint16_t);
135
136 uint64_t
137 virtqueue_filter_features(uint64_t features)
138 {
139         uint64_t mask;
140
141         mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
142         mask |= VIRTIO_RING_F_INDIRECT_DESC;
143         mask |= VIRTIO_RING_F_EVENT_IDX;
144
145         return (features & mask);
146 }
147
148 int
149 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
150     vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
151 {
152         struct virtqueue *vq;
153         int error;
154
155         *vqp = NULL;
156         error = 0;
157
158         if (size == 0) {
159                 device_printf(dev,
160                     "virtqueue %d (%s) does not exist (size is zero)\n",
161                     queue, info->vqai_name);
162                 return (ENODEV);
163         } else if (!powerof2(size)) {
164                 device_printf(dev,
165                     "virtqueue %d (%s) size is not a power of 2: %d\n",
166                     queue, info->vqai_name, size);
167                 return (ENXIO);
168         } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
169                 device_printf(dev, "virtqueue %d (%s) requested too many "
170                     "indirect descriptors: %d, max %d\n",
171                     queue, info->vqai_name, info->vqai_maxindirsz,
172                     VIRTIO_MAX_INDIRECT);
173                 return (EINVAL);
174         }
175
176         vq = malloc(sizeof(struct virtqueue) +
177             size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
178         if (vq == NULL) {
179                 device_printf(dev, "cannot allocate virtqueue\n");
180                 return (ENOMEM);
181         }
182
183         vq->vq_dev = dev;
184         strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
185         vq->vq_queue_index = queue;
186         vq->vq_alignment = align;
187         vq->vq_nentries = size;
188         vq->vq_free_cnt = size;
189         vq->vq_intrhand = info->vqai_intr;
190         vq->vq_intrhand_arg = info->vqai_intr_arg;
191
192         if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
193                 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
194
195         if (info->vqai_maxindirsz > 1) {
196                 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
197                 if (error)
198                         goto fail;
199         }
200
201         vq->vq_ring_size = round_page(vring_size(size, align));
202         vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
203             M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
204         if (vq->vq_ring_mem == NULL) {
205                 device_printf(dev,
206                     "cannot allocate memory for virtqueue ring\n");
207                 error = ENOMEM;
208                 goto fail;
209         }
210
211         vq_ring_init(vq);
212         virtqueue_disable_intr(vq);
213
214         *vqp = vq;
215
216 fail:
217         if (error)
218                 virtqueue_free(vq);
219
220         return (error);
221 }
222
223 static int
224 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
225 {
226         device_t dev;
227         struct vq_desc_extra *dxp;
228         int i, size;
229
230         dev = vq->vq_dev;
231
232         if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
233                 /*
234                  * Indirect descriptors requested by the driver but not
235                  * negotiated. Return zero to keep the initialization
236                  * going: we'll run fine without.
237                  */
238                 if (bootverbose)
239                         device_printf(dev, "virtqueue %d (%s) requested "
240                             "indirect descriptors but not negotiated\n",
241                             vq->vq_queue_index, vq->vq_name);
242                 return (0);
243         }
244
245         size = indirect_size * sizeof(struct vring_desc);
246         vq->vq_max_indirect_size = indirect_size;
247         vq->vq_indirect_mem_size = size;
248         vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
249
250         for (i = 0; i < vq->vq_nentries; i++) {
251                 dxp = &vq->vq_descx[i];
252
253                 dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
254                 if (dxp->indirect == NULL) {
255                         device_printf(dev, "cannot allocate indirect list\n");
256                         return (ENOMEM);
257                 }
258
259                 dxp->indirect_paddr = vtophys(dxp->indirect);
260                 virtqueue_init_indirect_list(vq, dxp->indirect);
261         }
262
263         return (0);
264 }
265
266 static void
267 virtqueue_free_indirect(struct virtqueue *vq)
268 {
269         struct vq_desc_extra *dxp;
270         int i;
271
272         for (i = 0; i < vq->vq_nentries; i++) {
273                 dxp = &vq->vq_descx[i];
274
275                 if (dxp->indirect == NULL)
276                         break;
277
278                 free(dxp->indirect, M_DEVBUF);
279                 dxp->indirect = NULL;
280                 dxp->indirect_paddr = 0;
281         }
282
283         vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
284         vq->vq_indirect_mem_size = 0;
285 }
286
287 static void
288 virtqueue_init_indirect_list(struct virtqueue *vq,
289     struct vring_desc *indirect)
290 {
291         int i;
292
293         bzero(indirect, vq->vq_indirect_mem_size);
294
295         for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
296                 indirect[i].next = i + 1;
297         indirect[i].next = VQ_RING_DESC_CHAIN_END;
298 }
299
300 int
301 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
302 {
303         struct vq_desc_extra *dxp;
304         int i;
305
306         if (vq->vq_nentries != size) {
307                 device_printf(vq->vq_dev,
308                     "%s: '%s' changed size; old=%hu, new=%hu\n",
309                     __func__, vq->vq_name, vq->vq_nentries, size);
310                 return (EINVAL);
311         }
312
313         /* Warn if the virtqueue was not properly cleaned up. */
314         if (vq->vq_free_cnt != vq->vq_nentries) {
315                 device_printf(vq->vq_dev,
316                     "%s: warning '%s' virtqueue not empty, "
317                     "leaking %d entries\n", __func__, vq->vq_name,
318                     vq->vq_nentries - vq->vq_free_cnt);
319         }
320
321         vq->vq_desc_head_idx = 0;
322         vq->vq_used_cons_idx = 0;
323         vq->vq_queued_cnt = 0;
324         vq->vq_free_cnt = vq->vq_nentries;
325
326         /* To be safe, reset all our allocated memory. */
327         bzero(vq->vq_ring_mem, vq->vq_ring_size);
328         for (i = 0; i < vq->vq_nentries; i++) {
329                 dxp = &vq->vq_descx[i];
330                 dxp->cookie = NULL;
331                 dxp->ndescs = 0;
332                 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
333                         virtqueue_init_indirect_list(vq, dxp->indirect);
334         }
335
336         vq_ring_init(vq);
337         virtqueue_disable_intr(vq);
338
339         return (0);
340 }
341
342 void
343 virtqueue_free(struct virtqueue *vq)
344 {
345
346         if (vq->vq_free_cnt != vq->vq_nentries) {
347                 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
348                     "leaking %d entries\n", vq->vq_name,
349                     vq->vq_nentries - vq->vq_free_cnt);
350         }
351
352         if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
353                 virtqueue_free_indirect(vq);
354
355         if (vq->vq_ring_mem != NULL) {
356                 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
357                 vq->vq_ring_size = 0;
358                 vq->vq_ring_mem = NULL;
359         }
360
361         free(vq, M_DEVBUF);
362 }
363
364 vm_paddr_t
365 virtqueue_paddr(struct virtqueue *vq)
366 {
367
368         return (vtophys(vq->vq_ring_mem));
369 }
370
371 int
372 virtqueue_size(struct virtqueue *vq)
373 {
374
375         return (vq->vq_nentries);
376 }
377
378 int
379 virtqueue_empty(struct virtqueue *vq)
380 {
381
382         return (vq->vq_nentries == vq->vq_free_cnt);
383 }
384
385 int
386 virtqueue_full(struct virtqueue *vq)
387 {
388
389         return (vq->vq_free_cnt == 0);
390 }
391
392 void
393 virtqueue_notify(struct virtqueue *vq)
394 {
395
396         /* Ensure updated avail->idx is visible to host. */
397         mb();
398
399         if (vq_ring_must_notify_host(vq))
400                 vq_ring_notify_host(vq);
401         vq->vq_queued_cnt = 0;
402 }
403
404 int
405 virtqueue_nused(struct virtqueue *vq)
406 {
407         uint16_t used_idx, nused;
408
409         used_idx = vq->vq_ring.used->idx;
410
411         nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
412         VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
413
414         return (nused);
415 }
416
417 int
418 virtqueue_intr_filter(struct virtqueue *vq)
419 {
420
421         if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
422                 return (0);
423
424         virtqueue_disable_intr(vq);
425
426         return (1);
427 }
428
429 void
430 virtqueue_intr(struct virtqueue *vq)
431 {
432
433         vq->vq_intrhand(vq->vq_intrhand_arg);
434 }
435
436 int
437 virtqueue_enable_intr(struct virtqueue *vq)
438 {
439
440         return (vq_ring_enable_interrupt(vq, 0));
441 }
442
443 int
444 virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
445 {
446         uint16_t ndesc, avail_idx;
447
448         avail_idx = vq->vq_ring.avail->idx;
449         ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
450
451         switch (hint) {
452         case VQ_POSTPONE_SHORT:
453                 ndesc = ndesc / 4;
454                 break;
455         case VQ_POSTPONE_LONG:
456                 ndesc = (ndesc * 3) / 4;
457                 break;
458         case VQ_POSTPONE_EMPTIED:
459                 break;
460         }
461
462         return (vq_ring_enable_interrupt(vq, ndesc));
463 }
464
465 /*
466  * Note this is only considered a hint to the host.
467  */
468 void
469 virtqueue_disable_intr(struct virtqueue *vq)
470 {
471
472         if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
473                 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx -
474                     vq->vq_nentries - 1;
475         } else
476                 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
477 }
478
479 int
480 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
481     int readable, int writable)
482 {
483         struct vq_desc_extra *dxp;
484         int needed;
485         uint16_t head_idx, idx;
486
487         needed = readable + writable;
488
489         VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
490         VQASSERT(vq, needed == sg->sg_nseg,
491             "segment count mismatch, %d, %d", needed, sg->sg_nseg);
492         VQASSERT(vq,
493             needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
494             "too many segments to enqueue: %d, %d/%d", needed,
495             vq->vq_nentries, vq->vq_max_indirect_size);
496
497         if (needed < 1)
498                 return (EINVAL);
499         if (vq->vq_free_cnt == 0)
500                 return (ENOSPC);
501
502         if (vq_ring_use_indirect(vq, needed)) {
503                 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
504                 return (0);
505         } else if (vq->vq_free_cnt < needed)
506                 return (EMSGSIZE);
507
508         head_idx = vq->vq_desc_head_idx;
509         VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
510         dxp = &vq->vq_descx[head_idx];
511
512         VQASSERT(vq, dxp->cookie == NULL,
513             "cookie already exists for index %d", head_idx);
514         dxp->cookie = cookie;
515         dxp->ndescs = needed;
516
517         idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
518             sg, readable, writable);
519
520         vq->vq_desc_head_idx = idx;
521         vq->vq_free_cnt -= needed;
522         if (vq->vq_free_cnt == 0)
523                 VQ_RING_ASSERT_CHAIN_TERM(vq);
524         else
525                 VQ_RING_ASSERT_VALID_IDX(vq, idx);
526
527         vq_ring_update_avail(vq, head_idx);
528
529         return (0);
530 }
531
532 void *
533 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
534 {
535         struct vring_used_elem *uep;
536         void *cookie;
537         uint16_t used_idx, desc_idx;
538
539         if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
540                 return (NULL);
541
542         used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
543         uep = &vq->vq_ring.used->ring[used_idx];
544
545         rmb();
546         desc_idx = (uint16_t) uep->id;
547         if (len != NULL)
548                 *len = uep->len;
549
550         vq_ring_free_chain(vq, desc_idx);
551
552         cookie = vq->vq_descx[desc_idx].cookie;
553         VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
554         vq->vq_descx[desc_idx].cookie = NULL;
555
556         return (cookie);
557 }
558
559 void *
560 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
561 {
562         void *cookie;
563
564         while ((cookie = virtqueue_dequeue(vq, len)) == NULL)
565                 cpu_spinwait();
566
567         return (cookie);
568 }
569
570 void *
571 virtqueue_drain(struct virtqueue *vq, int *last)
572 {
573         void *cookie;
574         int idx;
575
576         cookie = NULL;
577         idx = *last;
578
579         while (idx < vq->vq_nentries && cookie == NULL) {
580                 if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
581                         vq->vq_descx[idx].cookie = NULL;
582                         /* Free chain to keep free count consistent. */
583                         vq_ring_free_chain(vq, idx);
584                 }
585                 idx++;
586         }
587
588         *last = idx;
589
590         return (cookie);
591 }
592
593 void
594 virtqueue_dump(struct virtqueue *vq)
595 {
596
597         if (vq == NULL)
598                 return;
599
600         printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
601             "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
602             "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
603             vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
604             virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
605             vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
606             vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
607             vq->vq_ring.used->flags);
608 }
609
610 static void
611 vq_ring_init(struct virtqueue *vq)
612 {
613         struct vring *vr;
614         char *ring_mem;
615         int i, size;
616
617         ring_mem = vq->vq_ring_mem;
618         size = vq->vq_nentries;
619         vr = &vq->vq_ring;
620
621         vring_init(vr, size, ring_mem, vq->vq_alignment);
622
623         for (i = 0; i < size - 1; i++)
624                 vr->desc[i].next = i + 1;
625         vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
626 }
627
628 static void
629 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
630 {
631         uint16_t avail_idx;
632
633         /*
634          * Place the head of the descriptor chain into the next slot and make
635          * it usable to the host. The chain is made available now rather than
636          * deferring to virtqueue_notify() in the hopes that if the host is
637          * currently running on another CPU, we can keep it processing the new
638          * descriptor.
639          */
640         avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
641         vq->vq_ring.avail->ring[avail_idx] = desc_idx;
642
643         wmb();
644         vq->vq_ring.avail->idx++;
645
646         /* Keep pending count until virtqueue_notify(). */
647         vq->vq_queued_cnt++;
648 }
649
650 static uint16_t
651 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
652     uint16_t head_idx, struct sglist *sg, int readable, int writable)
653 {
654         struct sglist_seg *seg;
655         struct vring_desc *dp;
656         int i, needed;
657         uint16_t idx;
658
659         needed = readable + writable;
660
661         for (i = 0, idx = head_idx, seg = sg->sg_segs;
662              i < needed;
663              i++, idx = dp->next, seg++) {
664                 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
665                     "premature end of free desc chain");
666
667                 dp = &desc[idx];
668                 dp->addr = seg->ss_paddr;
669                 dp->len = seg->ss_len;
670                 dp->flags = 0;
671
672                 if (i < needed - 1)
673                         dp->flags |= VRING_DESC_F_NEXT;
674                 if (i >= readable)
675                         dp->flags |= VRING_DESC_F_WRITE;
676         }
677
678         return (idx);
679 }
680
681 static int
682 vq_ring_use_indirect(struct virtqueue *vq, int needed)
683 {
684
685         if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
686                 return (0);
687
688         if (vq->vq_max_indirect_size < needed)
689                 return (0);
690
691         if (needed < 2)
692                 return (0);
693
694         return (1);
695 }
696
697 static void
698 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
699     struct sglist *sg, int readable, int writable)
700 {
701         struct vring_desc *dp;
702         struct vq_desc_extra *dxp;
703         int needed;
704         uint16_t head_idx;
705
706         needed = readable + writable;
707         VQASSERT(vq, needed <= vq->vq_max_indirect_size,
708             "enqueuing too many indirect descriptors");
709
710         head_idx = vq->vq_desc_head_idx;
711         VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
712         dp = &vq->vq_ring.desc[head_idx];
713         dxp = &vq->vq_descx[head_idx];
714
715         VQASSERT(vq, dxp->cookie == NULL,
716             "cookie already exists for index %d", head_idx);
717         dxp->cookie = cookie;
718         dxp->ndescs = 1;
719
720         dp->addr = dxp->indirect_paddr;
721         dp->len = needed * sizeof(struct vring_desc);
722         dp->flags = VRING_DESC_F_INDIRECT;
723
724         vq_ring_enqueue_segments(vq, dxp->indirect, 0,
725             sg, readable, writable);
726
727         vq->vq_desc_head_idx = dp->next;
728         vq->vq_free_cnt--;
729         if (vq->vq_free_cnt == 0)
730                 VQ_RING_ASSERT_CHAIN_TERM(vq);
731         else
732                 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
733
734         vq_ring_update_avail(vq, head_idx);
735 }
736
737 static int
738 vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
739 {
740
741         /*
742          * Enable interrupts, making sure we get the latest index of
743          * what's already been consumed.
744          */
745         if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
746                 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
747         else
748                 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
749
750         mb();
751
752         /*
753          * Enough items may have already been consumed to meet our threshold
754          * since we last checked. Let our caller know so it processes the new
755          * entries.
756          */
757         if (virtqueue_nused(vq) > ndesc)
758                 return (1);
759
760         return (0);
761 }
762
763 static int
764 vq_ring_must_notify_host(struct virtqueue *vq)
765 {
766         uint16_t new_idx, prev_idx, event_idx;
767
768         if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
769                 new_idx = vq->vq_ring.avail->idx;
770                 prev_idx = new_idx - vq->vq_queued_cnt;
771                 event_idx = vring_avail_event(&vq->vq_ring);
772
773                 return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
774         }
775
776         return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
777 }
778
779 static void
780 vq_ring_notify_host(struct virtqueue *vq)
781 {
782
783         VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
784 }
785
786 static void
787 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
788 {
789         struct vring_desc *dp;
790         struct vq_desc_extra *dxp;
791
792         VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
793         dp = &vq->vq_ring.desc[desc_idx];
794         dxp = &vq->vq_descx[desc_idx];
795
796         if (vq->vq_free_cnt == 0)
797                 VQ_RING_ASSERT_CHAIN_TERM(vq);
798
799         vq->vq_free_cnt += dxp->ndescs;
800         dxp->ndescs--;
801
802         if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
803                 while (dp->flags & VRING_DESC_F_NEXT) {
804                         VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
805                         dp = &vq->vq_ring.desc[dp->next];
806                         dxp->ndescs--;
807                 }
808         }
809
810         VQASSERT(vq, dxp->ndescs == 0,
811             "failed to free entire desc chain, remaining: %d", dxp->ndescs);
812
813         /*
814          * We must append the existing free chain, if any, to the end of
815          * newly freed chain. If the virtqueue was completely used, then
816          * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
817          */
818         dp->next = vq->vq_desc_head_idx;
819         vq->vq_desc_head_idx = desc_idx;
820 }