2 * Copyright (C) 2014-2018 Vincenzo Maffione, Luigi Rizzo.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <net/netmap.h>
31 #include <sys/selinfo.h>
33 #include <vm/pmap.h> /* vtophys ? */
34 #include <dev/netmap/netmap_kern.h>
36 /* Register and unregister. */
38 vtnet_netmap_reg(struct netmap_adapter *na, int state)
40 struct ifnet *ifp = na->ifp;
41 struct vtnet_softc *sc = ifp->if_softc;
44 * Trigger a device reinit, asking vtnet_init_locked() to
45 * also enter or exit netmap mode.
48 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
49 vtnet_init_locked(sc, state ? VTNET_INIT_NETMAP_ENTER
50 : VTNET_INIT_NETMAP_EXIT);
51 VTNET_CORE_UNLOCK(sc);
57 /* Reconcile kernel and user view of the transmit ring. */
59 vtnet_netmap_txsync(struct netmap_kring *kring, int flags)
61 struct netmap_adapter *na = kring->na;
62 struct ifnet *ifp = na->ifp;
63 struct netmap_ring *ring = kring->ring;
64 u_int ring_nr = kring->ring_id;
65 u_int nm_i; /* index into the netmap ring */
66 u_int const lim = kring->nkr_num_slots - 1;
67 u_int const head = kring->rhead;
70 struct vtnet_softc *sc = ifp->if_softc;
71 struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];
72 struct virtqueue *vq = txq->vtntx_vq;
73 int interrupts = !(kring->nr_kflags & NKR_NOINTR);
77 * First part: process new packets to send.
80 nm_i = kring->nr_hwcur;
81 if (nm_i != head) { /* we have new packets to send */
82 struct sglist *sg = txq->vtntx_sg;
84 for (; nm_i != head; nm_i = nm_next(nm_i, lim)) {
85 /* we use an empty header here */
86 struct netmap_slot *slot = &ring->slot[nm_i];
87 uint64_t offset = nm_get_offset(kring, slot);
88 u_int len = slot->len;
92 (void)PNMB(na, slot, &paddr);
93 NM_CHECK_ADDR_LEN_OFF(na, len, offset);
95 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
96 /* Initialize the scatterlist, expose it to the hypervisor,
97 * and kick the hypervisor (if necessary).
99 sglist_reset(sg); // cheap
100 err = sglist_append(sg, &txq->vtntx_shrhdr, sc->vtnet_hdr_size);
101 err |= sglist_append_phys(sg, paddr + offset, len);
102 KASSERT(err == 0, ("%s: cannot append to sglist %d",
104 err = virtqueue_enqueue(vq, /*cookie=*/txq, sg,
105 /*readable=*/sg->sg_nseg,
109 nm_prerr("virtqueue_enqueue(%s) failed: %d",
115 virtqueue_notify(vq);
117 /* Update hwcur depending on where we stopped. */
118 kring->nr_hwcur = nm_i; /* note we might break early */
121 /* Free used slots. We only consider our own used buffers, recognized
122 * by the token we passed to virtqueue_enqueue.
126 void *token = virtqueue_dequeue(vq, NULL);
129 if (unlikely(token != (void *)txq))
130 nm_prerr("BUG: TX token mismatch");
135 kring->nr_hwtail += n;
136 if (kring->nr_hwtail > lim)
137 kring->nr_hwtail -= lim + 1;
140 if (interrupts && virtqueue_nfree(vq) < 32)
141 virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG);
147 * Publish 'num 'netmap receive buffers to the host, starting
148 * from the next available one (rx->vtnrx_nm_refill).
149 * Return a positive error code on error, and 0 on success.
150 * If we could not publish all of the buffers that's an error,
151 * since the netmap ring and the virtqueue would go out of sync.
154 vtnet_netmap_kring_refill(struct netmap_kring *kring, u_int num)
156 struct netmap_adapter *na = kring->na;
157 struct ifnet *ifp = na->ifp;
158 struct netmap_ring *ring = kring->ring;
159 u_int ring_nr = kring->ring_id;
160 u_int const lim = kring->nkr_num_slots - 1;
163 /* device-specific */
164 struct vtnet_softc *sc = ifp->if_softc;
165 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
166 struct virtqueue *vq = rxq->vtnrx_vq;
168 /* use a local sglist, default might be short */
169 struct sglist_seg ss[2];
170 struct sglist sg = { ss, 0, 0, 2 };
172 for (nm_i = rxq->vtnrx_nm_refill; num > 0;
173 nm_i = nm_next(nm_i, lim), num--) {
174 struct netmap_slot *slot = &ring->slot[nm_i];
175 uint64_t offset = nm_get_offset(kring, slot);
177 void *addr = PNMB(na, slot, &paddr);
180 if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */
181 netmap_ring_reinit(kring);
185 slot->flags &= ~NS_BUF_CHANGED;
187 err = sglist_append(&sg, &rxq->vtnrx_shrhdr, sc->vtnet_hdr_size);
188 err |= sglist_append_phys(&sg, paddr + offset,
189 NETMAP_BUF_SIZE(na) - offset);
190 KASSERT(err == 0, ("%s: cannot append to sglist %d",
192 /* writable for the host */
193 err = virtqueue_enqueue(vq, /*cookie=*/rxq, &sg,
194 /*readable=*/0, /*writeable=*/sg.sg_nseg);
196 nm_prerr("virtqueue_enqueue(%s) failed: %d",
201 rxq->vtnrx_nm_refill = nm_i;
203 return num == 0 ? 0 : ENOSPC;
207 * Publish netmap buffers on a RX virtqueue.
208 * Returns -1 if this virtqueue is not being opened in netmap mode.
209 * If the virtqueue is being opened in netmap mode, return 0 on success and
210 * a positive error code on failure.
213 vtnet_netmap_rxq_populate(struct vtnet_rxq *rxq)
215 struct netmap_adapter *na = NA(rxq->vtnrx_sc->vtnet_ifp);
216 struct netmap_kring *kring;
217 struct netmap_slot *slot;
221 slot = netmap_reset(na, NR_RX, rxq->vtnrx_id, 0);
224 kring = na->rx_rings[rxq->vtnrx_id];
227 * Expose all the RX netmap buffers we can. In case of no indirect
228 * buffers, the number of netmap slots in the RX ring matches the
229 * maximum number of 2-elements sglist that the RX virtqueue can
230 * accommodate. We need to start from kring->nr_hwtail, which is 0
231 * on the first netmap register and may be different from 0 if a
232 * virtio re-init (caused by a netma register or i.e., ifconfig)
233 * happens while the device is in use by netmap.
235 rxq->vtnrx_nm_refill = kring->nr_hwtail;
236 num = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
237 error = vtnet_netmap_kring_refill(kring, num);
238 virtqueue_notify(rxq->vtnrx_vq);
243 /* Reconcile kernel and user view of the receive ring. */
245 vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
247 struct netmap_adapter *na = kring->na;
248 struct ifnet *ifp = na->ifp;
249 struct netmap_ring *ring = kring->ring;
250 u_int ring_nr = kring->ring_id;
251 u_int nm_i; /* index into the netmap ring */
252 u_int const lim = kring->nkr_num_slots - 1;
253 u_int const head = kring->rhead;
254 int force_update = (flags & NAF_FORCE_READ) ||
255 (kring->nr_kflags & NKR_PENDINTR);
256 int interrupts = !(kring->nr_kflags & NKR_NOINTR);
258 /* device-specific */
259 struct vtnet_softc *sc = ifp->if_softc;
260 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
261 struct virtqueue *vq = rxq->vtnrx_vq;
264 * First part: import newly received packets.
265 * Only accept our own buffers (matching the token). We should only get
266 * matching buffers. The hwtail should never overrun hwcur, because
267 * we publish only N-1 receive buffers (and not N).
268 * In any case we must not leave this routine with the interrupts
269 * disabled, pending packets in the VQ and hwtail == (hwcur - 1),
270 * otherwise the pending packets could stall.
272 if (netmap_no_pendintr || force_update) {
273 uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim);
276 vtnet_rxq_disable_intr(rxq);
278 nm_i = kring->nr_hwtail;
281 token = virtqueue_dequeue(vq, &len);
284 * Enable the interrupts again and double-check
285 * for more work. We can go on until we win the
286 * race condition, since we are not replenishing
287 * in the meanwhile, and thus we will process at
290 if (interrupts && vtnet_rxq_enable_intr(rxq)) {
291 vtnet_rxq_disable_intr(rxq);
296 if (unlikely(token != (void *)rxq)) {
297 nm_prerr("BUG: RX token mismatch");
299 if (nm_i == hwtail_lim) {
300 KASSERT(false, ("hwtail would "
304 /* Skip the virtio-net header. */
305 len -= sc->vtnet_hdr_size;
306 if (unlikely(len < 0)) {
307 nm_prlim(1, "Truncated virtio-net-header, "
308 "missing %d bytes", -len);
311 ring->slot[nm_i].len = len;
312 ring->slot[nm_i].flags = 0;
313 nm_i = nm_next(nm_i, lim);
316 kring->nr_hwtail = nm_i;
317 kring->nr_kflags &= ~NKR_PENDINTR;
321 * Second part: skip past packets that userspace has released.
323 nm_i = kring->nr_hwcur; /* netmap ring index */
328 released = head - nm_i;
330 released += kring->nkr_num_slots;
331 error = vtnet_netmap_kring_refill(kring, released);
333 nm_prerr("Failed to replenish RX VQ with %u sgs",
337 kring->nr_hwcur = head;
338 virtqueue_notify(vq);
341 nm_prdis("h %d c %d t %d hwcur %d hwtail %d", kring->rhead,
342 kring->rcur, kring->rtail, kring->nr_hwcur, kring->nr_hwtail);
348 /* Enable/disable interrupts on all virtqueues. */
350 vtnet_netmap_intr(struct netmap_adapter *na, int state)
352 struct vtnet_softc *sc = na->ifp->if_softc;
355 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
356 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
357 struct vtnet_txq *txq = &sc->vtnet_txqs[i];
358 struct virtqueue *txvq = txq->vtntx_vq;
361 vtnet_rxq_enable_intr(rxq);
362 virtqueue_enable_intr(txvq);
364 vtnet_rxq_disable_intr(rxq);
365 virtqueue_disable_intr(txvq);
371 vtnet_netmap_tx_slots(struct vtnet_softc *sc)
375 /* We need to prepend a virtio-net header to each netmap buffer to be
376 * transmitted, therefore calling virtqueue_enqueue() passing sglist
378 * TX virtqueues use indirect descriptors if the feature was negotiated
379 * with the host, and if sc->vtnet_tx_nsegs > 1. With indirect
380 * descriptors, a single virtio descriptor is sufficient to reference
381 * each TX sglist. Without them, we need two separate virtio descriptors
382 * for each TX sglist. We therefore compute the number of netmap TX
383 * slots according to these assumptions.
385 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_tx_nsegs > 1)
390 return virtqueue_size(sc->vtnet_txqs[0].vtntx_vq) / div;
394 vtnet_netmap_rx_slots(struct vtnet_softc *sc)
398 /* We need to prepend a virtio-net header to each netmap buffer to be
399 * received, therefore calling virtqueue_enqueue() passing sglist
401 * RX virtqueues use indirect descriptors if the feature was negotiated
402 * with the host, and if sc->vtnet_rx_nsegs > 1. With indirect
403 * descriptors, a single virtio descriptor is sufficient to reference
404 * each RX sglist. Without them, we need two separate virtio descriptors
405 * for each RX sglist. We therefore compute the number of netmap RX
406 * slots according to these assumptions.
408 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_rx_nsegs > 1)
413 return virtqueue_size(sc->vtnet_rxqs[0].vtnrx_vq) / div;
417 vtnet_netmap_config(struct netmap_adapter *na, struct nm_config_info *info)
419 struct vtnet_softc *sc = na->ifp->if_softc;
421 info->num_tx_rings = sc->vtnet_act_vq_pairs;
422 info->num_rx_rings = sc->vtnet_act_vq_pairs;
423 info->num_tx_descs = vtnet_netmap_tx_slots(sc);
424 info->num_rx_descs = vtnet_netmap_rx_slots(sc);
425 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
431 vtnet_netmap_attach(struct vtnet_softc *sc)
433 struct netmap_adapter na;
435 bzero(&na, sizeof(na));
437 na.ifp = sc->vtnet_ifp;
438 na.na_flags = NAF_OFFSETS;
439 na.num_tx_desc = vtnet_netmap_tx_slots(sc);
440 na.num_rx_desc = vtnet_netmap_rx_slots(sc);
441 na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs;
442 na.rx_buf_maxsize = 0;
443 na.nm_register = vtnet_netmap_reg;
444 na.nm_txsync = vtnet_netmap_txsync;
445 na.nm_rxsync = vtnet_netmap_rxsync;
446 na.nm_intr = vtnet_netmap_intr;
447 na.nm_config = vtnet_netmap_config;
451 nm_prinf("vtnet attached txq=%d, txd=%d rxq=%d, rxd=%d",
452 na.num_tx_rings, na.num_tx_desc,
453 na.num_tx_rings, na.num_rx_desc);