2 * Copyright (C) 2014 Vincenzo Maffione, Luigi Rizzo. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <net/netmap.h>
31 #include <sys/selinfo.h>
33 #include <vm/pmap.h> /* vtophys ? */
34 #include <dev/netmap/netmap_kern.h>
37 #define SOFTC_T vtnet_softc
39 /* Free all the unused buffer in all the RX virtqueues.
40 * This function is called when entering and exiting netmap mode.
41 * - buffers queued by the virtio driver return skbuf/mbuf pointer
42 * and need to be freed;
43 * - buffers queued by netmap return the txq/rxq, and do not need work
46 vtnet_netmap_free_bufs(struct SOFTC_T* sc)
48 int i, nmb = 0, n = 0, last;
50 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
51 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
54 struct vtnet_txq *txq = &sc->vtnet_txqs[i];
55 struct vtnet_tx_header *txhdr;
59 while ((m = virtqueue_drain(vq, &last)) != NULL) {
69 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
71 if (txhdr != (void *)txq) {
72 m_freem(txhdr->vth_mbuf);
73 uma_zfree(vtnet_tx_header_zone, txhdr);
78 D("freed %d mbufs, %d netmap bufs on %d queues",
82 /* Register and unregister. */
84 vtnet_netmap_reg(struct netmap_adapter *na, int onoff)
86 struct ifnet *ifp = na->ifp;
87 struct SOFTC_T *sc = ifp->if_softc;
90 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
91 /* enable or disable flags and callbacks in na and ifp */
93 nm_set_native_flags(na);
95 nm_clear_native_flags(na);
97 /* drain queues so netmap and native drivers
98 * do not interfere with each other
100 vtnet_netmap_free_bufs(sc);
101 vtnet_init_locked(sc); /* also enable intr */
102 VTNET_CORE_UNLOCK(sc);
103 return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
107 /* Reconcile kernel and user view of the transmit ring. */
109 vtnet_netmap_txsync(struct netmap_kring *kring, int flags)
111 struct netmap_adapter *na = kring->na;
112 struct ifnet *ifp = na->ifp;
113 struct netmap_ring *ring = kring->ring;
114 u_int ring_nr = kring->ring_id;
115 u_int nm_i; /* index into the netmap ring */
116 u_int nic_i; /* index into the NIC ring */
118 u_int const lim = kring->nkr_num_slots - 1;
119 u_int const head = kring->rhead;
121 /* device-specific */
122 struct SOFTC_T *sc = ifp->if_softc;
123 struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];
124 struct virtqueue *vq = txq->vtntx_vq;
127 * First part: process new packets to send.
131 nm_i = kring->nr_hwcur;
132 if (nm_i != head) { /* we have new packets to send */
133 struct sglist *sg = txq->vtntx_sg;
135 nic_i = netmap_idx_k2n(kring, nm_i);
136 for (n = 0; nm_i != head; n++) {
137 /* we use an empty header here */
138 static struct virtio_net_hdr_mrg_rxbuf hdr;
139 struct netmap_slot *slot = &ring->slot[nm_i];
140 u_int len = slot->len;
142 void *addr = PNMB(na, slot, &paddr);
145 NM_CHECK_ADDR_LEN(na, addr, len);
147 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
148 /* Initialize the scatterlist, expose it to the hypervisor,
149 * and kick the hypervisor (if necessary).
151 sglist_reset(sg); // cheap
152 // if vtnet_hdr_size > 0 ...
153 err = sglist_append(sg, &hdr, sc->vtnet_hdr_size);
154 // XXX later, support multi segment
155 err = sglist_append_phys(sg, paddr, len);
156 /* use na as the cookie */
157 err = virtqueue_enqueue(vq, txq, sg, sg->sg_nseg, 0);
158 if (unlikely(err < 0)) {
159 D("virtqueue_enqueue failed");
163 nm_i = nm_next(nm_i, lim);
164 nic_i = nm_next(nic_i, lim);
166 /* Update hwcur depending on where we stopped. */
167 kring->nr_hwcur = nm_i; /* note we migth break early */
169 /* No more free TX slots? Ask the hypervisor for notifications,
170 * possibly only when a considerable amount of work has been
173 ND(3,"sent %d packets, hwcur %d", n, nm_i);
174 virtqueue_disable_intr(vq);
175 virtqueue_notify(vq);
177 if (ring->head != ring->tail)
178 ND(5, "pure notify ? head %d tail %d nused %d %d",
179 ring->head, ring->tail, virtqueue_nused(vq),
180 (virtqueue_dump(vq), 1));
181 virtqueue_notify(vq);
182 virtqueue_enable_intr(vq); // like postpone with 0
186 /* Free used slots. We only consider our own used buffers, recognized
187 * by the token we passed to virtqueue_add_outbuf.
191 struct vtnet_tx_header *txhdr = virtqueue_dequeue(vq, NULL);
194 if (likely(txhdr == (void *)txq)) {
196 if (virtqueue_nused(vq) < 32) { // XXX slow release
199 } else { /* leftover from previous transmission */
200 m_freem(txhdr->vth_mbuf);
201 uma_zfree(vtnet_tx_header_zone, txhdr);
205 kring->nr_hwtail += n;
206 if (kring->nr_hwtail > lim)
207 kring->nr_hwtail -= lim + 1;
209 if (nm_i != kring->nr_hwtail /* && vtnet_txq_below_threshold(txq) == 0*/) {
210 ND(3, "disable intr, hwcur %d", nm_i);
211 virtqueue_disable_intr(vq);
213 ND(3, "enable intr, hwcur %d", nm_i);
214 virtqueue_postpone_intr(vq, VQ_POSTPONE_SHORT);
218 nm_txsync_finalize(kring);
224 vtnet_refill_rxq(struct netmap_kring *kring, u_int nm_i, u_int head)
226 struct netmap_adapter *na = kring->na;
227 struct ifnet *ifp = na->ifp;
228 struct netmap_ring *ring = kring->ring;
229 u_int ring_nr = kring->ring_id;
230 u_int const lim = kring->nkr_num_slots - 1;
233 /* device-specific */
234 struct SOFTC_T *sc = ifp->if_softc;
235 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
236 struct virtqueue *vq = rxq->vtnrx_vq;
238 /* use a local sglist, default might be short */
239 struct sglist_seg ss[2];
240 struct sglist sg = { ss, 0, 0, 2 };
242 for (n = 0; nm_i != head; n++) {
243 static struct virtio_net_hdr_mrg_rxbuf hdr;
244 struct netmap_slot *slot = &ring->slot[nm_i];
246 void *addr = PNMB(na, slot, &paddr);
249 if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */
250 if (netmap_ring_reinit(kring))
254 slot->flags &= ~NS_BUF_CHANGED;
255 sglist_reset(&sg); // cheap
256 err = sglist_append(&sg, &hdr, sc->vtnet_hdr_size);
257 err = sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na));
258 /* writable for the host */
259 err = virtqueue_enqueue(vq, rxq, &sg, 0, sg.sg_nseg);
261 D("virtqueue_enqueue failed");
264 nm_i = nm_next(nm_i, lim);
269 /* Reconcile kernel and user view of the receive ring. */
271 vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
273 struct netmap_adapter *na = kring->na;
274 struct ifnet *ifp = na->ifp;
275 struct netmap_ring *ring = kring->ring;
276 u_int ring_nr = kring->ring_id;
277 u_int nm_i; /* index into the netmap ring */
278 // u_int nic_i; /* index into the NIC ring */
280 u_int const lim = kring->nkr_num_slots - 1;
281 u_int const head = nm_rxsync_prologue(kring);
282 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
284 /* device-specific */
285 struct SOFTC_T *sc = ifp->if_softc;
286 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
287 struct virtqueue *vq = rxq->vtnrx_vq;
289 /* XXX netif_carrier_ok ? */
292 return netmap_ring_reinit(kring);
296 * First part: import newly received packets.
298 * own buffers (matching the token). We should only get
299 * matching buffers, because of vtnet_netmap_free_rx_unused_bufs()
300 * and vtnet_netmap_init_buffers().
302 if (netmap_no_pendintr || force_update) {
303 uint16_t slot_flags = kring->nkr_slot_flags;
304 struct netmap_adapter *token;
306 nm_i = kring->nr_hwtail;
310 token = virtqueue_dequeue(vq, &len);
313 if (likely(token == (void *)rxq)) {
314 ring->slot[nm_i].len = len;
315 ring->slot[nm_i].flags = slot_flags;
316 nm_i = nm_next(nm_i, lim);
319 D("This should not happen");
322 kring->nr_hwtail = nm_i;
323 kring->nr_kflags &= ~NKR_PENDINTR;
325 ND("[B] h %d c %d hwcur %d hwtail %d",
326 ring->head, ring->cur, kring->nr_hwcur,
330 * Second part: skip past packets that userspace has released.
332 nm_i = kring->nr_hwcur; /* netmap ring index */
334 int err = vtnet_refill_rxq(kring, nm_i, head);
337 kring->nr_hwcur = err;
338 virtqueue_notify(vq);
339 /* After draining the queue may need an intr from the hypervisor */
340 vtnet_rxq_enable_intr(rxq);
343 /* tell userspace that there might be new packets. */
344 nm_rxsync_finalize(kring);
346 ND("[C] h %d c %d t %d hwcur %d hwtail %d",
347 ring->head, ring->cur, ring->tail,
348 kring->nr_hwcur, kring->nr_hwtail);
354 /* Make RX virtqueues buffers pointing to netmap buffers. */
356 vtnet_netmap_init_rx_buffers(struct SOFTC_T *sc)
358 struct ifnet *ifp = sc->vtnet_ifp;
359 struct netmap_adapter* na = NA(ifp);
362 if (!nm_native_on(na))
364 for (r = 0; r < na->num_rx_rings; r++) {
365 struct netmap_kring *kring = &na->rx_rings[r];
366 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[r];
367 struct virtqueue *vq = rxq->vtnrx_vq;
368 struct netmap_slot* slot;
371 slot = netmap_reset(na, NR_RX, r, 0);
373 D("strange, null netmap ring %d", r);
376 /* Add up to na>-num_rx_desc-1 buffers to this RX virtqueue.
377 * It's important to leave one virtqueue slot free, otherwise
378 * we can run into ring->cur/ring->tail wraparounds.
380 err = vtnet_refill_rxq(kring, 0, na->num_rx_desc-1);
383 virtqueue_notify(vq);
389 /* Update the virtio-net device configurations. Number of queues can
390 * change dinamically, by 'ethtool --set-channels $IFNAME combined $N'.
391 * This is actually the only way virtio-net can currently enable
392 * the multiqueue mode.
393 * XXX note that we seem to lose packets if the netmap ring has more
394 * slots than the queue
397 vtnet_netmap_config(struct netmap_adapter *na, u_int *txr, u_int *txd,
398 u_int *rxr, u_int *rxd)
400 struct ifnet *ifp = na->ifp;
401 struct SOFTC_T *sc = ifp->if_softc;
403 *txr = *rxr = sc->vtnet_max_vq_pairs;
404 *rxd = 512; // sc->vtnet_rx_nmbufs;
406 D("vtnet config txq=%d, txd=%d rxq=%d, rxd=%d",
407 *txr, *txd, *rxr, *rxd);
413 vtnet_netmap_attach(struct SOFTC_T *sc)
415 struct netmap_adapter na;
417 bzero(&na, sizeof(na));
419 na.ifp = sc->vtnet_ifp;
420 na.num_tx_desc = 1024;// sc->vtnet_rx_nmbufs;
421 na.num_rx_desc = 1024; // sc->vtnet_rx_nmbufs;
422 na.nm_register = vtnet_netmap_reg;
423 na.nm_txsync = vtnet_netmap_txsync;
424 na.nm_rxsync = vtnet_netmap_rxsync;
425 na.nm_config = vtnet_netmap_config;
426 na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs;
427 D("max rings %d", sc->vtnet_max_vq_pairs);
430 D("virtio attached txq=%d, txd=%d rxq=%d, rxd=%d",
431 na.num_tx_rings, na.num_tx_desc,
432 na.num_tx_rings, na.num_rx_desc);