2 * Copyright (C) 2014-2018 Vincenzo Maffione, Luigi Rizzo.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <net/netmap.h>
31 #include <sys/selinfo.h>
33 #include <vm/pmap.h> /* vtophys ? */
34 #include <dev/netmap/netmap_kern.h>
36 /* Register and unregister. */
38 vtnet_netmap_reg(struct netmap_adapter *na, int state)
40 struct ifnet *ifp = na->ifp;
41 struct vtnet_softc *sc = ifp->if_softc;
44 * Trigger a device reinit, asking vtnet_init_locked() to
45 * also enter or exit netmap mode.
48 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
49 vtnet_init_locked(sc, state ? VTNET_INIT_NETMAP_ENTER
50 : VTNET_INIT_NETMAP_EXIT);
51 VTNET_CORE_UNLOCK(sc);
57 /* Reconcile kernel and user view of the transmit ring. */
59 vtnet_netmap_txsync(struct netmap_kring *kring, int flags)
61 struct netmap_adapter *na = kring->na;
62 struct ifnet *ifp = na->ifp;
63 struct netmap_ring *ring = kring->ring;
64 u_int ring_nr = kring->ring_id;
65 u_int nm_i; /* index into the netmap ring */
66 u_int const lim = kring->nkr_num_slots - 1;
67 u_int const head = kring->rhead;
70 struct vtnet_softc *sc = ifp->if_softc;
71 struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];
72 struct virtqueue *vq = txq->vtntx_vq;
73 int interrupts = !(kring->nr_kflags & NKR_NOINTR);
77 * First part: process new packets to send.
80 nm_i = kring->nr_hwcur;
81 if (nm_i != head) { /* we have new packets to send */
82 struct sglist *sg = txq->vtntx_sg;
84 for (; nm_i != head; nm_i = nm_next(nm_i, lim)) {
85 /* we use an empty header here */
86 struct netmap_slot *slot = &ring->slot[nm_i];
87 u_int len = slot->len;
89 void *addr = PNMB(na, slot, &paddr);
92 NM_CHECK_ADDR_LEN(na, addr, len);
94 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
95 /* Initialize the scatterlist, expose it to the hypervisor,
96 * and kick the hypervisor (if necessary).
98 sglist_reset(sg); // cheap
99 err = sglist_append(sg, &txq->vtntx_shrhdr, sc->vtnet_hdr_size);
100 err |= sglist_append_phys(sg, paddr, len);
101 KASSERT(err == 0, ("%s: cannot append to sglist %d",
103 err = virtqueue_enqueue(vq, /*cookie=*/txq, sg,
104 /*readable=*/sg->sg_nseg,
108 nm_prerr("virtqueue_enqueue(%s) failed: %d",
114 virtqueue_notify(vq);
116 /* Update hwcur depending on where we stopped. */
117 kring->nr_hwcur = nm_i; /* note we migth break early */
120 /* Free used slots. We only consider our own used buffers, recognized
121 * by the token we passed to virtqueue_enqueue.
125 void *token = virtqueue_dequeue(vq, NULL);
128 if (unlikely(token != (void *)txq))
129 nm_prerr("BUG: TX token mismatch");
134 kring->nr_hwtail += n;
135 if (kring->nr_hwtail > lim)
136 kring->nr_hwtail -= lim + 1;
139 if (interrupts && virtqueue_nfree(vq) < 32)
140 virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG);
146 * Publish 'num 'netmap receive buffers to the host, starting
147 * from the next available one (rx->vtnrx_nm_refill).
148 * Return a positive error code on error, and 0 on success.
149 * If we could not publish all of the buffers that's an error,
150 * since the netmap ring and the virtqueue would go out of sync.
153 vtnet_netmap_kring_refill(struct netmap_kring *kring, u_int num)
155 struct netmap_adapter *na = kring->na;
156 struct ifnet *ifp = na->ifp;
157 struct netmap_ring *ring = kring->ring;
158 u_int ring_nr = kring->ring_id;
159 u_int const lim = kring->nkr_num_slots - 1;
162 /* device-specific */
163 struct vtnet_softc *sc = ifp->if_softc;
164 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
165 struct virtqueue *vq = rxq->vtnrx_vq;
167 /* use a local sglist, default might be short */
168 struct sglist_seg ss[2];
169 struct sglist sg = { ss, 0, 0, 2 };
171 for (nm_i = rxq->vtnrx_nm_refill; num > 0;
172 nm_i = nm_next(nm_i, lim), num--) {
173 struct netmap_slot *slot = &ring->slot[nm_i];
175 void *addr = PNMB(na, slot, &paddr);
178 if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */
179 if (netmap_ring_reinit(kring))
183 slot->flags &= ~NS_BUF_CHANGED;
185 err = sglist_append(&sg, &rxq->vtnrx_shrhdr, sc->vtnet_hdr_size);
186 err |= sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na));
187 KASSERT(err == 0, ("%s: cannot append to sglist %d",
189 /* writable for the host */
190 err = virtqueue_enqueue(vq, /*cookie=*/rxq, &sg,
191 /*readable=*/0, /*writeable=*/sg.sg_nseg);
193 nm_prerr("virtqueue_enqueue(%s) failed: %d",
198 rxq->vtnrx_nm_refill = nm_i;
200 return num == 0 ? 0 : ENOSPC;
204 * Publish netmap buffers on a RX virtqueue.
205 * Returns -1 if this virtqueue is not being opened in netmap mode.
206 * If the virtqueue is being opened in netmap mode, return 0 on success and
207 * a positive error code on failure.
210 vtnet_netmap_rxq_populate(struct vtnet_rxq *rxq)
212 struct netmap_adapter *na = NA(rxq->vtnrx_sc->vtnet_ifp);
213 struct netmap_kring *kring;
214 struct netmap_slot *slot;
218 slot = netmap_reset(na, NR_RX, rxq->vtnrx_id, 0);
221 kring = na->rx_rings[rxq->vtnrx_id];
224 * Expose all the RX netmap buffers we can. In case of no indirect
225 * buffers, the number of netmap slots in the RX ring matches the
226 * maximum number of 2-elements sglist that the RX virtqueue can
227 * accommodate. We need to start from kring->nr_hwtail, which is 0
228 * on the first netmap register and may be different from 0 if a
229 * virtio re-init (caused by a netma register or i.e., ifconfig)
230 * happens while the device is in use by netmap.
232 rxq->vtnrx_nm_refill = kring->nr_hwtail;
233 num = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
234 error = vtnet_netmap_kring_refill(kring, num);
235 virtqueue_notify(rxq->vtnrx_vq);
240 /* Reconcile kernel and user view of the receive ring. */
242 vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
244 struct netmap_adapter *na = kring->na;
245 struct ifnet *ifp = na->ifp;
246 struct netmap_ring *ring = kring->ring;
247 u_int ring_nr = kring->ring_id;
248 u_int nm_i; /* index into the netmap ring */
249 u_int const lim = kring->nkr_num_slots - 1;
250 u_int const head = kring->rhead;
251 int force_update = (flags & NAF_FORCE_READ) ||
252 (kring->nr_kflags & NKR_PENDINTR);
253 int interrupts = !(kring->nr_kflags & NKR_NOINTR);
255 /* device-specific */
256 struct vtnet_softc *sc = ifp->if_softc;
257 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
258 struct virtqueue *vq = rxq->vtnrx_vq;
261 * First part: import newly received packets.
262 * Only accept our own buffers (matching the token). We should only get
263 * matching buffers. The hwtail should never overrun hwcur, because
264 * we publish only N-1 receive buffers (and not N).
265 * In any case we must not leave this routine with the interrupts
266 * disabled, pending packets in the VQ and hwtail == (hwcur - 1),
267 * otherwise the pending packets could stall.
269 if (netmap_no_pendintr || force_update) {
270 uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim);
273 vtnet_rxq_disable_intr(rxq);
275 nm_i = kring->nr_hwtail;
278 token = virtqueue_dequeue(vq, &len);
281 * Enable the interrupts again and double-check
282 * for more work. We can go on until we win the
283 * race condition, since we are not replenishing
284 * in the meanwhile, and thus we will process at
287 if (interrupts && vtnet_rxq_enable_intr(rxq)) {
288 vtnet_rxq_disable_intr(rxq);
293 if (unlikely(token != (void *)rxq)) {
294 nm_prerr("BUG: RX token mismatch");
296 if (nm_i == hwtail_lim) {
297 KASSERT(false, ("hwtail would "
301 /* Skip the virtio-net header. */
302 len -= sc->vtnet_hdr_size;
303 if (unlikely(len < 0)) {
304 nm_prlim(1, "Truncated virtio-net-header, "
305 "missing %d bytes", -len);
308 ring->slot[nm_i].len = len;
309 ring->slot[nm_i].flags = 0;
310 nm_i = nm_next(nm_i, lim);
313 kring->nr_hwtail = nm_i;
314 kring->nr_kflags &= ~NKR_PENDINTR;
318 * Second part: skip past packets that userspace has released.
320 nm_i = kring->nr_hwcur; /* netmap ring index */
325 released = head - nm_i;
327 released += kring->nkr_num_slots;
328 error = vtnet_netmap_kring_refill(kring, released);
330 nm_prerr("Failed to replenish RX VQ with %u sgs",
334 kring->nr_hwcur = head;
335 virtqueue_notify(vq);
338 nm_prdis("h %d c %d t %d hwcur %d hwtail %d", kring->rhead,
339 kring->rcur, kring->rtail, kring->nr_hwcur, kring->nr_hwtail);
345 /* Enable/disable interrupts on all virtqueues. */
347 vtnet_netmap_intr(struct netmap_adapter *na, int state)
349 struct vtnet_softc *sc = na->ifp->if_softc;
352 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
353 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
354 struct vtnet_txq *txq = &sc->vtnet_txqs[i];
355 struct virtqueue *txvq = txq->vtntx_vq;
358 vtnet_rxq_enable_intr(rxq);
359 virtqueue_enable_intr(txvq);
361 vtnet_rxq_disable_intr(rxq);
362 virtqueue_disable_intr(txvq);
368 vtnet_netmap_tx_slots(struct vtnet_softc *sc)
372 /* We need to prepend a virtio-net header to each netmap buffer to be
373 * transmitted, therefore calling virtqueue_enqueue() passing sglist
375 * TX virtqueues use indirect descriptors if the feature was negotiated
376 * with the host, and if sc->vtnet_tx_nsegs > 1. With indirect
377 * descriptors, a single virtio descriptor is sufficient to reference
378 * each TX sglist. Without them, we need two separate virtio descriptors
379 * for each TX sglist. We therefore compute the number of netmap TX
380 * slots according to these assumptions.
382 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_tx_nsegs > 1)
387 return virtqueue_size(sc->vtnet_txqs[0].vtntx_vq) / div;
391 vtnet_netmap_rx_slots(struct vtnet_softc *sc)
395 /* We need to prepend a virtio-net header to each netmap buffer to be
396 * received, therefore calling virtqueue_enqueue() passing sglist
398 * RX virtqueues use indirect descriptors if the feature was negotiated
399 * with the host, and if sc->vtnet_rx_nsegs > 1. With indirect
400 * descriptors, a single virtio descriptor is sufficient to reference
401 * each RX sglist. Without them, we need two separate virtio descriptors
402 * for each RX sglist. We therefore compute the number of netmap RX
403 * slots according to these assumptions.
405 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_rx_nsegs > 1)
410 return virtqueue_size(sc->vtnet_rxqs[0].vtnrx_vq) / div;
414 vtnet_netmap_config(struct netmap_adapter *na, struct nm_config_info *info)
416 struct vtnet_softc *sc = na->ifp->if_softc;
418 info->num_tx_rings = sc->vtnet_act_vq_pairs;
419 info->num_rx_rings = sc->vtnet_act_vq_pairs;
420 info->num_tx_descs = vtnet_netmap_tx_slots(sc);
421 info->num_rx_descs = vtnet_netmap_rx_slots(sc);
422 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
428 vtnet_netmap_attach(struct vtnet_softc *sc)
430 struct netmap_adapter na;
432 bzero(&na, sizeof(na));
434 na.ifp = sc->vtnet_ifp;
436 na.num_tx_desc = vtnet_netmap_tx_slots(sc);
437 na.num_rx_desc = vtnet_netmap_rx_slots(sc);
438 na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs;
439 na.rx_buf_maxsize = 0;
440 na.nm_register = vtnet_netmap_reg;
441 na.nm_txsync = vtnet_netmap_txsync;
442 na.nm_rxsync = vtnet_netmap_rxsync;
443 na.nm_intr = vtnet_netmap_intr;
444 na.nm_config = vtnet_netmap_config;
448 nm_prinf("vtnet attached txq=%d, txd=%d rxq=%d, rxd=%d",
449 na.num_tx_rings, na.num_tx_desc,
450 na.num_tx_rings, na.num_rx_desc);