2 * Copyright (C) 2014-2018 Vincenzo Maffione, Luigi Rizzo.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <net/netmap.h>
31 #include <sys/selinfo.h>
33 #include <vm/pmap.h> /* vtophys ? */
34 #include <dev/netmap/netmap_kern.h>
37 * Return 1 if the queue identified by 't' and 'idx' is in netmap mode.
40 vtnet_netmap_queue_on(struct vtnet_softc *sc, enum txrx t, int idx)
42 struct netmap_adapter *na = NA(sc->vtnet_ifp);
44 if (!nm_native_on(na))
48 return !!(idx < na->num_rx_rings &&
49 na->rx_rings[idx]->nr_mode == NKR_NETMAP_ON);
51 return !!(idx < na->num_tx_rings &&
52 na->tx_rings[idx]->nr_mode == NKR_NETMAP_ON);
56 vtnet_free_used(struct virtqueue *vq, int netmap_bufs, enum txrx t, int idx)
61 while ((cookie = virtqueue_dequeue(vq, NULL)) != NULL) {
63 /* These are netmap buffers: there is nothing to do. */
65 /* These are mbufs that we need to free. */
69 struct vtnet_tx_header *txhdr = cookie;
72 uma_zfree(vtnet_tx_header_zone, txhdr);
82 nm_prinf("%d sgs dequeued from %s-%d (netmap=%d)",
83 deq, nm_txrx2str(t), idx, netmap_bufs);
86 /* Register and unregister. */
88 vtnet_netmap_reg(struct netmap_adapter *na, int state)
90 struct ifnet *ifp = na->ifp;
91 struct vtnet_softc *sc = ifp->if_softc;
96 /* Drain the taskqueues to make sure that there are no worker threads
97 * accessing the virtqueues. */
98 vtnet_drain_taskqueues(sc);
102 /* We need nm_netmap_on() to return true when called by
103 * vtnet_init_locked() below. */
105 nm_set_native_flags(na);
107 /* We need to trigger a device reset in order to unexpose guest buffers
108 * published to the host. */
109 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
110 /* Get pending used buffers. The way they are freed depends on whether
111 * they are netmap buffer or they are mbufs. We can tell apart the two
112 * cases by looking at kring->nr_mode, before this is possibly updated
113 * in the loop below. */
114 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
115 struct vtnet_txq *txq = &sc->vtnet_txqs[i];
116 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
117 struct netmap_kring *kring;
120 kring = NMR(na, NR_TX)[i];
121 vtnet_free_used(txq->vtntx_vq,
122 kring->nr_mode == NKR_NETMAP_ON, NR_TX, i);
123 VTNET_TXQ_UNLOCK(txq);
126 kring = NMR(na, NR_RX)[i];
127 vtnet_free_used(rxq->vtnrx_vq,
128 kring->nr_mode == NKR_NETMAP_ON, NR_RX, i);
129 VTNET_RXQ_UNLOCK(rxq);
131 vtnet_init_locked(sc);
132 success = (ifp->if_drv_flags & IFF_DRV_RUNNING) ? 0 : ENXIO;
136 /* Hardware rings. */
137 for (i = 0; i < nma_get_nrings(na, t); i++) {
138 struct netmap_kring *kring = NMR(na, t)[i];
140 if (nm_kring_pending_on(kring))
141 kring->nr_mode = NKR_NETMAP_ON;
145 for (i = 0; i < nma_get_host_nrings(na, t); i++) {
146 struct netmap_kring *kring =
147 NMR(na, t)[nma_get_nrings(na, t) + i];
149 if (nm_kring_pending_on(kring))
150 kring->nr_mode = NKR_NETMAP_ON;
154 nm_clear_native_flags(na);
156 /* Hardware rings. */
157 for (i = 0; i < nma_get_nrings(na, t); i++) {
158 struct netmap_kring *kring = NMR(na, t)[i];
160 if (nm_kring_pending_off(kring))
161 kring->nr_mode = NKR_NETMAP_OFF;
165 for (i = 0; i < nma_get_host_nrings(na, t); i++) {
166 struct netmap_kring *kring =
167 NMR(na, t)[nma_get_nrings(na, t) + i];
169 if (nm_kring_pending_off(kring))
170 kring->nr_mode = NKR_NETMAP_OFF;
175 VTNET_CORE_UNLOCK(sc);
181 /* Reconcile kernel and user view of the transmit ring. */
183 vtnet_netmap_txsync(struct netmap_kring *kring, int flags)
185 struct netmap_adapter *na = kring->na;
186 struct ifnet *ifp = na->ifp;
187 struct netmap_ring *ring = kring->ring;
188 u_int ring_nr = kring->ring_id;
189 u_int nm_i; /* index into the netmap ring */
190 u_int const lim = kring->nkr_num_slots - 1;
191 u_int const head = kring->rhead;
193 /* device-specific */
194 struct vtnet_softc *sc = ifp->if_softc;
195 struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];
196 struct virtqueue *vq = txq->vtntx_vq;
197 int interrupts = !(kring->nr_kflags & NKR_NOINTR);
201 * First part: process new packets to send.
205 nm_i = kring->nr_hwcur;
206 if (nm_i != head) { /* we have new packets to send */
207 struct sglist *sg = txq->vtntx_sg;
209 for (; nm_i != head; nm_i = nm_next(nm_i, lim)) {
210 /* we use an empty header here */
211 struct netmap_slot *slot = &ring->slot[nm_i];
212 u_int len = slot->len;
214 void *addr = PNMB(na, slot, &paddr);
217 NM_CHECK_ADDR_LEN(na, addr, len);
219 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
220 /* Initialize the scatterlist, expose it to the hypervisor,
221 * and kick the hypervisor (if necessary).
223 sglist_reset(sg); // cheap
224 err = sglist_append(sg, &txq->vtntx_shrhdr, sc->vtnet_hdr_size);
225 err |= sglist_append_phys(sg, paddr, len);
226 KASSERT(err == 0, ("%s: cannot append to sglist %d",
228 err = virtqueue_enqueue(vq, /*cookie=*/txq, sg,
229 /*readable=*/sg->sg_nseg,
233 nm_prerr("virtqueue_enqueue(%s) failed: %d",
239 virtqueue_notify(vq);
241 /* Update hwcur depending on where we stopped. */
242 kring->nr_hwcur = nm_i; /* note we migth break early */
245 /* Free used slots. We only consider our own used buffers, recognized
246 * by the token we passed to virtqueue_enqueue.
250 void *token = virtqueue_dequeue(vq, NULL);
253 if (unlikely(token != (void *)txq))
254 nm_prerr("BUG: TX token mismatch");
259 kring->nr_hwtail += n;
260 if (kring->nr_hwtail > lim)
261 kring->nr_hwtail -= lim + 1;
264 if (interrupts && virtqueue_nfree(vq) < 32)
265 virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG);
271 vtnet_netmap_kring_refill(struct netmap_kring *kring, u_int nm_i, u_int head)
273 struct netmap_adapter *na = kring->na;
274 struct ifnet *ifp = na->ifp;
275 struct netmap_ring *ring = kring->ring;
276 u_int ring_nr = kring->ring_id;
277 u_int const lim = kring->nkr_num_slots - 1;
279 /* device-specific */
280 struct vtnet_softc *sc = ifp->if_softc;
281 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
282 struct virtqueue *vq = rxq->vtnrx_vq;
284 /* use a local sglist, default might be short */
285 struct sglist_seg ss[2];
286 struct sglist sg = { ss, 0, 0, 2 };
288 for (; nm_i != head; nm_i = nm_next(nm_i, lim)) {
289 struct netmap_slot *slot = &ring->slot[nm_i];
291 void *addr = PNMB(na, slot, &paddr);
294 if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */
295 if (netmap_ring_reinit(kring))
299 slot->flags &= ~NS_BUF_CHANGED;
301 err = sglist_append(&sg, &rxq->vtnrx_shrhdr, sc->vtnet_hdr_size);
302 err |= sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na));
303 KASSERT(err == 0, ("%s: cannot append to sglist %d",
305 /* writable for the host */
306 err = virtqueue_enqueue(vq, /*cookie=*/rxq, &sg,
307 /*readable=*/0, /*writeable=*/sg.sg_nseg);
310 nm_prerr("virtqueue_enqueue(%s) failed: %d",
320 * Publish netmap buffers on a RX virtqueue.
321 * Returns -1 if this virtqueue is not being opened in netmap mode.
322 * If the virtqueue is being opened in netmap mode, return 0 on success and
323 * a positive error code on failure.
326 vtnet_netmap_rxq_populate(struct vtnet_rxq *rxq)
328 struct netmap_adapter *na = NA(rxq->vtnrx_sc->vtnet_ifp);
329 struct netmap_kring *kring;
332 if (!nm_native_on(na) || rxq->vtnrx_id >= na->num_rx_rings)
335 kring = na->rx_rings[rxq->vtnrx_id];
336 if (!(nm_kring_pending_on(kring) ||
337 kring->nr_pending_mode == NKR_NETMAP_ON))
340 /* Expose all the RX netmap buffers. Note that the number of
341 * netmap slots in the RX ring matches the maximum number of
342 * 2-elements sglist that the RX virtqueue can accommodate. */
343 error = vtnet_netmap_kring_refill(kring, 0, na->num_rx_desc);
344 virtqueue_notify(rxq->vtnrx_vq);
346 return error < 0 ? ENXIO : 0;
349 /* Reconcile kernel and user view of the receive ring. */
351 vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
353 struct netmap_adapter *na = kring->na;
354 struct ifnet *ifp = na->ifp;
355 struct netmap_ring *ring = kring->ring;
356 u_int ring_nr = kring->ring_id;
357 u_int nm_i; /* index into the netmap ring */
358 u_int const lim = kring->nkr_num_slots - 1;
359 u_int const head = kring->rhead;
360 int force_update = (flags & NAF_FORCE_READ) ||
361 (kring->nr_kflags & NKR_PENDINTR);
362 int interrupts = !(kring->nr_kflags & NKR_NOINTR);
364 /* device-specific */
365 struct vtnet_softc *sc = ifp->if_softc;
366 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
367 struct virtqueue *vq = rxq->vtnrx_vq;
371 * First part: import newly received packets.
372 * Only accept our own buffers (matching the token). We should only get
373 * matching buffers. We may need to stop early to avoid hwtail to overrun
376 if (netmap_no_pendintr || force_update) {
377 uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim);
380 vtnet_rxq_disable_intr(rxq);
382 nm_i = kring->nr_hwtail;
383 while (nm_i != hwtail_lim) {
385 token = virtqueue_dequeue(vq, &len);
387 if (interrupts && vtnet_rxq_enable_intr(rxq)) {
388 vtnet_rxq_disable_intr(rxq);
393 if (unlikely(token != (void *)rxq)) {
394 nm_prerr("BUG: RX token mismatch");
396 /* Skip the virtio-net header. */
397 len -= sc->vtnet_hdr_size;
398 if (unlikely(len < 0)) {
399 RD(1, "Truncated virtio-net-header, "
400 "missing %d bytes", -len);
403 ring->slot[nm_i].len = len;
404 ring->slot[nm_i].flags = 0;
405 nm_i = nm_next(nm_i, lim);
408 kring->nr_hwtail = nm_i;
409 kring->nr_kflags &= ~NKR_PENDINTR;
411 ND("[B] h %d c %d hwcur %d hwtail %d", ring->head, ring->cur,
412 kring->nr_hwcur, kring->nr_hwtail);
415 * Second part: skip past packets that userspace has released.
417 nm_i = kring->nr_hwcur; /* netmap ring index */
419 int nm_j = vtnet_netmap_kring_refill(kring, nm_i, head);
422 kring->nr_hwcur = nm_j;
423 virtqueue_notify(vq);
426 ND("[C] h %d c %d t %d hwcur %d hwtail %d", ring->head, ring->cur,
427 ring->tail, kring->nr_hwcur, kring->nr_hwtail);
433 /* Enable/disable interrupts on all virtqueues. */
435 vtnet_netmap_intr(struct netmap_adapter *na, int state)
437 struct vtnet_softc *sc = na->ifp->if_softc;
440 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
441 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
442 struct vtnet_txq *txq = &sc->vtnet_txqs[i];
443 struct virtqueue *txvq = txq->vtntx_vq;
446 vtnet_rxq_enable_intr(rxq);
447 virtqueue_enable_intr(txvq);
449 vtnet_rxq_disable_intr(rxq);
450 virtqueue_disable_intr(txvq);
456 vtnet_netmap_tx_slots(struct vtnet_softc *sc)
460 /* We need to prepend a virtio-net header to each netmap buffer to be
461 * transmitted, therefore calling virtqueue_enqueue() passing sglist
463 * TX virtqueues use indirect descriptors if the feature was negotiated
464 * with the host, and if sc->vtnet_tx_nsegs > 1. With indirect
465 * descriptors, a single virtio descriptor is sufficient to reference
466 * each TX sglist. Without them, we need two separate virtio descriptors
467 * for each TX sglist. We therefore compute the number of netmap TX
468 * slots according to these assumptions.
470 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_tx_nsegs > 1)
475 return virtqueue_size(sc->vtnet_txqs[0].vtntx_vq) / div;
479 vtnet_netmap_rx_slots(struct vtnet_softc *sc)
483 /* We need to prepend a virtio-net header to each netmap buffer to be
484 * received, therefore calling virtqueue_enqueue() passing sglist
486 * RX virtqueues use indirect descriptors if the feature was negotiated
487 * with the host, and if sc->vtnet_rx_nsegs > 1. With indirect
488 * descriptors, a single virtio descriptor is sufficient to reference
489 * each RX sglist. Without them, we need two separate virtio descriptors
490 * for each RX sglist. We therefore compute the number of netmap RX
491 * slots according to these assumptions.
493 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_rx_nsegs > 1)
498 return virtqueue_size(sc->vtnet_rxqs[0].vtnrx_vq) / div;
502 vtnet_netmap_config(struct netmap_adapter *na, struct nm_config_info *info)
504 struct vtnet_softc *sc = na->ifp->if_softc;
506 info->num_tx_rings = sc->vtnet_act_vq_pairs;
507 info->num_rx_rings = sc->vtnet_act_vq_pairs;
508 info->num_tx_descs = vtnet_netmap_tx_slots(sc);
509 info->num_rx_descs = vtnet_netmap_rx_slots(sc);
510 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
516 vtnet_netmap_attach(struct vtnet_softc *sc)
518 struct netmap_adapter na;
520 bzero(&na, sizeof(na));
522 na.ifp = sc->vtnet_ifp;
524 na.num_tx_desc = vtnet_netmap_tx_slots(sc);
525 na.num_rx_desc = vtnet_netmap_rx_slots(sc);
526 na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs;
527 na.rx_buf_maxsize = 0;
528 na.nm_register = vtnet_netmap_reg;
529 na.nm_txsync = vtnet_netmap_txsync;
530 na.nm_rxsync = vtnet_netmap_rxsync;
531 na.nm_intr = vtnet_netmap_intr;
532 na.nm_config = vtnet_netmap_config;
536 nm_prinf("vtnet attached txq=%d, txd=%d rxq=%d, rxd=%d",
537 na.num_tx_rings, na.num_tx_desc,
538 na.num_tx_rings, na.num_rx_desc);