2 * Copyright (C) 2013-2016 Vincenzo Maffione
3 * Copyright (C) 2013-2016 Luigi Rizzo
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * This module implements netmap support on top of standard,
30 * unmodified device drivers.
32 * A NIOCREGIF request is handled here if the device does not
33 * have native support. TX and RX rings are emulated as follows:
36 * We preallocate a block of TX mbufs (roughly as many as
37 * tx descriptors; the number is not critical) to speed up
38 * operation during transmissions. The refcount on most of
39 * these buffers is artificially bumped up so we can recycle
40 * them more easily. Also, the destructor is intercepted
41 * so we use it as an interrupt notification to wake up
42 * processes blocked on a poll().
44 * For each receive ring we allocate one "struct mbq"
45 * (an mbuf tailq plus a spinlock). We intercept packets
47 * on the receive path and put them in the mbq from which
48 * netmap receive routines can grab them.
51 * in the generic_txsync() routine, netmap buffers are copied
52 * (or linked, in a future) to the preallocated mbufs
53 * and pushed to the transmit queue. Some of these mbufs
54 * (those with NS_REPORT, or otherwise every half ring)
55 * have the refcount=1, others have refcount=2.
56 * When the destructor is invoked, we take that as
57 * a notification that all mbufs up to that one in
58 * the specific ring have been completed, and generate
59 * the equivalent of a transmit interrupt.
67 #include <sys/cdefs.h> /* prerequisite */
68 __FBSDID("$FreeBSD$");
70 #include <sys/types.h>
71 #include <sys/errno.h>
72 #include <sys/malloc.h>
73 #include <sys/lock.h> /* PROT_EXEC */
74 #include <sys/rwlock.h>
75 #include <sys/socket.h> /* sockaddrs */
76 #include <sys/selinfo.h>
78 #include <net/if_var.h>
79 #include <machine/bus.h> /* bus_dmamap_* in netmap_kern.h */
81 // XXX temporary - D() defined here
82 #include <net/netmap.h>
83 #include <dev/netmap/netmap_kern.h>
84 #include <dev/netmap/netmap_mem2.h>
86 #define rtnl_lock() ND("rtnl_lock called")
87 #define rtnl_unlock() ND("rtnl_unlock called")
88 #define MBUF_RXQ(m) ((m)->m_pkthdr.flowid)
92 * FreeBSD mbuf allocator/deallocator in emulation mode:
94 #if __FreeBSD_version < 1100000
97 * For older versions of FreeBSD:
99 * We allocate EXT_PACKET mbuf+clusters, but need to set M_NOFREE
100 * so that the destructor, if invoked, will not free the packet.
101 * In principle we should set the destructor only on demand,
102 * but since there might be a race we better do it on allocation.
103 * As a consequence, we also need to set the destructor or we
104 * would leak buffers.
107 /* mbuf destructor, also need to change the type to EXT_EXTREF,
108 * add an M_NOFREE flag, and then clear the flag and
109 * chain into uma_zfree(zone_pack, mf)
110 * (or reinstall the buffer ?)
112 #define SET_MBUF_DESTRUCTOR(m, fn) do { \
113 (m)->m_ext.ext_free = (void *)fn; \
114 (m)->m_ext.ext_type = EXT_EXTREF; \
118 void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2)
120 /* restore original mbuf */
121 m->m_ext.ext_buf = m->m_data = m->m_ext.ext_arg1;
122 m->m_ext.ext_arg1 = NULL;
123 m->m_ext.ext_type = EXT_PACKET;
124 m->m_ext.ext_free = NULL;
125 if (MBUF_REFCNT(m) == 0)
126 SET_MBUF_REFCNT(m, 1);
127 uma_zfree(zone_pack, m);
132 static inline struct mbuf *
133 nm_os_get_mbuf(struct ifnet *ifp, int len)
138 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
140 /* m_getcl() (mb_ctor_mbuf) has an assert that checks that
141 * M_NOFREE flag is not specified as third argument,
142 * so we have to set M_NOFREE after m_getcl(). */
143 m->m_flags |= M_NOFREE;
144 m->m_ext.ext_arg1 = m->m_ext.ext_buf; // XXX save
145 m->m_ext.ext_free = (void *)void_mbuf_dtor;
146 m->m_ext.ext_type = EXT_EXTREF;
147 ND(5, "create m %p refcnt %d", m, MBUF_REFCNT(m));
152 #else /* __FreeBSD_version >= 1100000 */
155 * Newer versions of FreeBSD, using a straightforward scheme.
157 * We allocate mbufs with m_gethdr(), since the mbuf header is needed
158 * by the driver. We also attach a customly-provided external storage,
159 * which in this case is a netmap buffer. When calling m_extadd(), however
160 * we pass a NULL address, since the real address (and length) will be
161 * filled in by nm_os_generic_xmit_frame() right before calling
164 * The dtor function does nothing, however we need it since mb_free_ext()
165 * has a KASSERT(), checking that the mbuf dtor function is not NULL.
168 static void void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2) { }
170 #define SET_MBUF_DESTRUCTOR(m, fn) do { \
171 (m)->m_ext.ext_free = (fn != NULL) ? \
172 (void *)fn : (void *)void_mbuf_dtor; \
175 static inline struct mbuf *
176 nm_os_get_mbuf(struct ifnet *ifp, int len)
183 m = m_gethdr(M_NOWAIT, MT_DATA);
188 m_extadd(m, NULL /* buf */, 0 /* size */, void_mbuf_dtor,
189 NULL, NULL, 0, EXT_NET_DRV);
194 #endif /* __FreeBSD_version >= 1100000 */
198 #include "win_glue.h"
200 #define rtnl_lock() ND("rtnl_lock called")
201 #define rtnl_unlock() ND("rtnl_unlock called")
202 #define MBUF_TXQ(m) 0//((m)->m_pkthdr.flowid)
203 #define MBUF_RXQ(m) 0//((m)->m_pkthdr.flowid)
204 #define smp_mb() //XXX: to be correctly defined
208 #include "bsd_glue.h"
210 #include <linux/rtnetlink.h> /* rtnl_[un]lock() */
211 #include <linux/ethtool.h> /* struct ethtool_ops, get_ringparam */
212 #include <linux/hrtimer.h>
214 static inline struct mbuf *
215 nm_os_get_mbuf(struct ifnet *ifp, int len)
217 return alloc_skb(ifp->needed_headroom + len +
218 ifp->needed_tailroom, GFP_ATOMIC);
224 /* Common headers. */
225 #include <net/netmap.h>
226 #include <dev/netmap/netmap_kern.h>
227 #include <dev/netmap/netmap_mem2.h>
230 #define for_each_kring_n(_i, _k, _karr, _n) \
231 for (_k=_karr, _i = 0; _i < _n; (_k)++, (_i)++)
233 #define for_each_tx_kring(_i, _k, _na) \
234 for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings)
235 #define for_each_tx_kring_h(_i, _k, _na) \
236 for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings + 1)
238 #define for_each_rx_kring(_i, _k, _na) \
239 for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings)
240 #define for_each_rx_kring_h(_i, _k, _na) \
241 for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings + 1)
244 /* ======================== PERFORMANCE STATISTICS =========================== */
250 unsigned long txsync;
252 unsigned long txrepl;
253 unsigned long txdrop;
256 unsigned long rxsync;
259 struct rate_context {
261 struct timer_list timer;
262 struct rate_stats new;
263 struct rate_stats old;
266 #define RATE_PRINTK(_NAME_) \
267 printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD);
268 #define RATE_PERIOD 2
269 static void rate_callback(unsigned long arg)
271 struct rate_context * ctx = (struct rate_context *)arg;
272 struct rate_stats cur = ctx->new;
286 r = mod_timer(&ctx->timer, jiffies +
287 msecs_to_jiffies(RATE_PERIOD * 1000));
289 D("[v1000] Error: mod_timer()");
292 static struct rate_context rate_ctx;
294 void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi)
296 if (txp) rate_ctx.new.txpkt++;
297 if (txs) rate_ctx.new.txsync++;
298 if (txi) rate_ctx.new.txirq++;
299 if (rxp) rate_ctx.new.rxpkt++;
300 if (rxs) rate_ctx.new.rxsync++;
301 if (rxi) rate_ctx.new.rxirq++;
309 /* ========== GENERIC (EMULATED) NETMAP ADAPTER SUPPORT ============= */
312 * Wrapper used by the generic adapter layer to notify
313 * the poller threads. Differently from netmap_rx_irq(), we check
314 * only NAF_NETMAP_ON instead of NAF_NATIVE_ON to enable the irq.
317 netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
319 if (unlikely(!nm_netmap_on(na)))
322 netmap_common_irq(na, q, work_done);
325 rate_ctx.new.rxirq++;
327 rate_ctx.new.txirq++;
328 #endif /* RATE_GENERIC */
332 generic_netmap_unregister(struct netmap_adapter *na)
334 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
335 struct netmap_kring *kring = NULL;
338 if (na->active_fds == 0) {
341 na->na_flags &= ~NAF_NETMAP_ON;
343 /* Release packet steering control. */
344 nm_os_catch_tx(gna, 0);
346 /* Stop intercepting packets on the RX path. */
347 nm_os_catch_rx(gna, 0);
352 for_each_rx_kring_h(r, kring, na) {
353 if (nm_kring_pending_off(kring)) {
354 D("Emulated adapter: ring '%s' deactivated", kring->name);
355 kring->nr_mode = NKR_NETMAP_OFF;
358 for_each_tx_kring_h(r, kring, na) {
359 if (nm_kring_pending_off(kring)) {
360 kring->nr_mode = NKR_NETMAP_OFF;
361 D("Emulated adapter: ring '%s' deactivated", kring->name);
365 for_each_rx_kring(r, kring, na) {
366 /* Free the mbufs still pending in the RX queues,
367 * that did not end up into the corresponding netmap
369 mbq_safe_purge(&kring->rx_queue);
370 nm_os_mitigation_cleanup(&gna->mit[r]);
373 /* Decrement reference counter for the mbufs in the
374 * TX pools. These mbufs can be still pending in drivers,
375 * (e.g. this happens with virtio-net driver, which
376 * does lazy reclaiming of transmitted mbufs). */
377 for_each_tx_kring(r, kring, na) {
378 /* We must remove the destructor on the TX event,
379 * because the destructor invokes netmap code, and
380 * the netmap module may disappear before the
381 * TX event is consumed. */
382 mtx_lock_spin(&kring->tx_event_lock);
383 if (kring->tx_event) {
384 SET_MBUF_DESTRUCTOR(kring->tx_event, NULL);
386 kring->tx_event = NULL;
387 mtx_unlock_spin(&kring->tx_event_lock);
390 if (na->active_fds == 0) {
391 nm_os_free(gna->mit);
393 for_each_rx_kring(r, kring, na) {
394 mbq_safe_fini(&kring->rx_queue);
397 for_each_tx_kring(r, kring, na) {
398 mtx_destroy(&kring->tx_event_lock);
399 if (kring->tx_pool == NULL) {
403 for (i=0; i<na->num_tx_desc; i++) {
404 if (kring->tx_pool[i]) {
405 m_freem(kring->tx_pool[i]);
408 nm_os_free(kring->tx_pool);
409 kring->tx_pool = NULL;
413 if (--rate_ctx.refcount == 0) {
415 del_timer(&rate_ctx.timer);
418 D("Emulated adapter for %s deactivated", na->name);
424 /* Enable/disable netmap mode for a generic network interface. */
426 generic_netmap_register(struct netmap_adapter *na, int enable)
428 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
429 struct netmap_kring *kring = NULL;
438 /* This is actually an unregif. */
439 return generic_netmap_unregister(na);
442 if (na->active_fds == 0) {
443 D("Emulated adapter for %s activated", na->name);
444 /* Do all memory allocations when (na->active_fds == 0), to
445 * simplify error management. */
447 /* Allocate memory for mitigation support on all the rx queues. */
448 gna->mit = nm_os_malloc(na->num_rx_rings * sizeof(struct nm_generic_mit));
450 D("mitigation allocation failed");
455 for_each_rx_kring(r, kring, na) {
456 /* Init mitigation support. */
457 nm_os_mitigation_init(&gna->mit[r], r, na);
459 /* Initialize the rx queue, as generic_rx_handler() can
460 * be called as soon as nm_os_catch_rx() returns.
462 mbq_safe_init(&kring->rx_queue);
466 * Prepare mbuf pools (parallel to the tx rings), for packet
467 * transmission. Don't preallocate the mbufs here, it's simpler
468 * to leave this task to txsync.
470 for_each_tx_kring(r, kring, na) {
471 kring->tx_pool = NULL;
473 for_each_tx_kring(r, kring, na) {
475 nm_os_malloc(na->num_tx_desc * sizeof(struct mbuf *));
476 if (!kring->tx_pool) {
477 D("tx_pool allocation failed");
481 mtx_init(&kring->tx_event_lock, "tx_event_lock",
486 for_each_rx_kring_h(r, kring, na) {
487 if (nm_kring_pending_on(kring)) {
488 D("Emulated adapter: ring '%s' activated", kring->name);
489 kring->nr_mode = NKR_NETMAP_ON;
493 for_each_tx_kring_h(r, kring, na) {
494 if (nm_kring_pending_on(kring)) {
495 D("Emulated adapter: ring '%s' activated", kring->name);
496 kring->nr_mode = NKR_NETMAP_ON;
500 for_each_tx_kring(r, kring, na) {
501 /* Initialize tx_pool and tx_event. */
502 for (i=0; i<na->num_tx_desc; i++) {
503 kring->tx_pool[i] = NULL;
506 kring->tx_event = NULL;
509 if (na->active_fds == 0) {
512 /* Prepare to intercept incoming traffic. */
513 error = nm_os_catch_rx(gna, 1);
515 D("nm_os_catch_rx(1) failed (%d)", error);
516 goto register_handler;
519 /* Make netmap control the packet steering. */
520 error = nm_os_catch_tx(gna, 1);
522 D("nm_os_catch_tx(1) failed (%d)", error);
528 na->na_flags |= NAF_NETMAP_ON;
531 if (rate_ctx.refcount == 0) {
533 memset(&rate_ctx, 0, sizeof(rate_ctx));
534 setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx);
535 if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) {
536 D("Error: mod_timer()");
545 /* Here (na->active_fds == 0) holds. */
547 nm_os_catch_rx(gna, 0);
551 for_each_tx_kring(r, kring, na) {
552 mtx_destroy(&kring->tx_event_lock);
553 if (kring->tx_pool == NULL) {
556 nm_os_free(kring->tx_pool);
557 kring->tx_pool = NULL;
559 for_each_rx_kring(r, kring, na) {
560 mbq_safe_fini(&kring->rx_queue);
562 nm_os_free(gna->mit);
569 * Callback invoked when the device driver frees an mbuf used
570 * by netmap to transmit a packet. This usually happens when
571 * the NIC notifies the driver that transmission is completed.
574 generic_mbuf_destructor(struct mbuf *m)
576 struct netmap_adapter *na = NA(GEN_TX_MBUF_IFP(m));
577 struct netmap_kring *kring;
578 unsigned int r = MBUF_TXQ(m);
579 unsigned int r_orig = r;
581 if (unlikely(!nm_netmap_on(na) || r >= na->num_tx_rings)) {
582 D("Error: no netmap adapter on device %p",
588 * First, clear the event mbuf.
589 * In principle, the event 'm' should match the one stored
590 * on ring 'r'. However we check it explicitely to stay
591 * safe against lower layers (qdisc, driver, etc.) changing
592 * MBUF_TXQ(m) under our feet. If the match is not found
593 * on 'r', we try to see if it belongs to some other ring.
598 kring = &na->tx_rings[r];
599 mtx_lock_spin(&kring->tx_event_lock);
600 if (kring->tx_event == m) {
601 kring->tx_event = NULL;
604 mtx_unlock_spin(&kring->tx_event_lock);
608 RD(1, "event %p migrated: ring %u --> %u",
614 if (++r == na->num_tx_rings) r = 0;
617 RD(1, "Cannot match event %p", m);
622 /* Second, wake up clients. They will reclaim the event through
624 netmap_generic_irq(na, r, NULL);
626 void_mbuf_dtor(m, NULL, NULL);
630 /* Record completed transmissions and update hwtail.
632 * The oldest tx buffer not yet completed is at nr_hwtail + 1,
633 * nr_hwcur is the first unsent buffer.
636 generic_netmap_tx_clean(struct netmap_kring *kring, int txqdisc)
638 u_int const lim = kring->nkr_num_slots - 1;
639 u_int nm_i = nm_next(kring->nr_hwtail, lim);
640 u_int hwcur = kring->nr_hwcur;
642 struct mbuf **tx_pool = kring->tx_pool;
644 ND("hwcur = %d, hwtail = %d", kring->nr_hwcur, kring->nr_hwtail);
646 while (nm_i != hwcur) { /* buffers not completed */
647 struct mbuf *m = tx_pool[nm_i];
651 /* Nothing to do, this is going
652 * to be replenished. */
653 RD(3, "Is this happening?");
655 } else if (MBUF_QUEUED(m)) {
656 break; /* Not dequeued yet. */
658 } else if (MBUF_REFCNT(m) != 1) {
659 /* This mbuf has been dequeued but is still busy
661 * Leave it to the driver and replenish. */
663 tx_pool[nm_i] = NULL;
667 if (unlikely(m == NULL)) {
670 /* This slot was used to place an event. */
671 mtx_lock_spin(&kring->tx_event_lock);
672 event_consumed = (kring->tx_event == NULL);
673 mtx_unlock_spin(&kring->tx_event_lock);
674 if (!event_consumed) {
675 /* The event has not been consumed yet,
676 * still busy in the driver. */
679 /* The event has been consumed, we can go
682 } else if (MBUF_REFCNT(m) != 1) {
683 /* This mbuf is still busy: its refcnt is 2. */
689 nm_i = nm_next(nm_i, lim);
691 kring->nr_hwtail = nm_prev(nm_i, lim);
692 ND("tx completed [%d] -> hwtail %d", n, kring->nr_hwtail);
697 /* Compute a slot index in the middle between inf and sup. */
699 ring_middle(u_int inf, u_int sup, u_int lim)
706 } else { /* wrap around */
707 e = (sup + n + inf) / 2;
713 if (unlikely(e >= n)) {
714 D("This cannot happen");
722 generic_set_tx_event(struct netmap_kring *kring, u_int hwcur)
724 u_int lim = kring->nkr_num_slots - 1;
727 u_int ntc = nm_next(kring->nr_hwtail, lim); /* next to clean */
730 return; /* all buffers are free */
734 * We have pending packets in the driver between hwtail+1
735 * and hwcur, and we have to chose one of these slot to
736 * generate a notification.
737 * There is a race but this is only called within txsync which
738 * does a double check.
741 /* Choose a slot in the middle, so that we don't risk ending
742 * up in a situation where the client continuously wake up,
743 * fills one or a few TX slots and go to sleep again. */
744 e = ring_middle(ntc, hwcur, lim);
746 /* Choose the first pending slot, to be safe against driver
747 * reordering mbuf transmissions. */
751 m = kring->tx_pool[e];
753 /* An event is already in place. */
757 mtx_lock_spin(&kring->tx_event_lock);
758 if (kring->tx_event) {
759 /* An event is already in place. */
760 mtx_unlock_spin(&kring->tx_event_lock);
764 SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor);
766 mtx_unlock_spin(&kring->tx_event_lock);
768 kring->tx_pool[e] = NULL;
770 ND(5, "Request Event at %d mbuf %p refcnt %d", e, m, m ? MBUF_REFCNT(m) : -2 );
772 /* Decrement the refcount. This will free it if we lose the race
773 * with the driver. */
780 * generic_netmap_txsync() transforms netmap buffers into mbufs
781 * and passes them to the standard device driver
782 * (ndo_start_xmit() or ifp->if_transmit() ).
783 * On linux this is not done directly, but using dev_queue_xmit(),
784 * since it implements the TX flow control (and takes some locks).
787 generic_netmap_txsync(struct netmap_kring *kring, int flags)
789 struct netmap_adapter *na = kring->na;
790 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
791 struct ifnet *ifp = na->ifp;
792 struct netmap_ring *ring = kring->ring;
793 u_int nm_i; /* index into the netmap ring */ // j
794 u_int const lim = kring->nkr_num_slots - 1;
795 u_int const head = kring->rhead;
796 u_int ring_nr = kring->ring_id;
798 IFRATE(rate_ctx.new.txsync++);
803 * First part: process new packets to send.
805 nm_i = kring->nr_hwcur;
806 if (nm_i != head) { /* we have new packets to send */
807 struct nm_os_gen_arg a;
810 if (gna->txqdisc && nm_kr_txempty(kring)) {
811 /* In txqdisc mode, we ask for a delayed notification,
812 * but only when cur == hwtail, which means that the
813 * client is going to block. */
814 event = ring_middle(nm_i, head, lim);
815 ND(3, "Place txqdisc event (hwcur=%u,event=%u,"
816 "head=%u,hwtail=%u)", nm_i, event, head,
822 a.head = a.tail = NULL;
824 while (nm_i != head) {
825 struct netmap_slot *slot = &ring->slot[nm_i];
826 u_int len = slot->len;
827 void *addr = NMB(na, slot);
828 /* device-specific */
832 NM_CHECK_ADDR_LEN(na, addr, len);
834 /* Tale a mbuf from the tx pool (replenishing the pool
835 * entry if necessary) and copy in the user packet. */
836 m = kring->tx_pool[nm_i];
837 if (unlikely(m == NULL)) {
838 kring->tx_pool[nm_i] = m =
839 nm_os_get_mbuf(ifp, NETMAP_BUF_SIZE(na));
841 RD(2, "Failed to replenish mbuf");
842 /* Here we could schedule a timer which
843 * retries to replenish after a while,
844 * and notifies the client when it
845 * manages to replenish some slots. In
846 * any case we break early to avoid
850 IFRATE(rate_ctx.new.txrepl++);
856 a.qevent = (nm_i == event);
857 /* When not in txqdisc mode, we should ask
858 * notifications when NS_REPORT is set, or roughly
859 * every half ring. To optimize this, we set a
860 * notification event when the client runs out of
861 * TX ring space, or when transmission fails. In
862 * the latter case we also break early.
864 tx_ret = nm_os_generic_xmit_frame(&a);
865 if (unlikely(tx_ret)) {
868 * No room for this mbuf in the device driver.
869 * Request a notification FOR A PREVIOUS MBUF,
870 * then call generic_netmap_tx_clean(kring) to do the
871 * double check and see if we can free more buffers.
872 * If there is space continue, else break;
873 * NOTE: the double check is necessary if the problem
874 * occurs in the txsync call after selrecord().
875 * Also, we need some way to tell the caller that not
876 * all buffers were queued onto the device (this was
877 * not a problem with native netmap driver where space
878 * is preallocated). The bridge has a similar problem
879 * and we solve it there by dropping the excess packets.
881 generic_set_tx_event(kring, nm_i);
882 if (generic_netmap_tx_clean(kring, gna->txqdisc)) {
883 /* space now available */
890 /* In txqdisc mode, the netmap-aware qdisc
891 * queue has the same length as the number of
892 * netmap slots (N). Since tail is advanced
893 * only when packets are dequeued, qdisc
894 * queue overrun cannot happen, so
895 * nm_os_generic_xmit_frame() did not fail
897 * However, packets can be dropped because
898 * carrier is off, or because our qdisc is
899 * being deactivated, or possibly for other
900 * reasons. In these cases, we just let the
901 * packet to be dropped. */
902 IFRATE(rate_ctx.new.txdrop++);
905 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
906 nm_i = nm_next(nm_i, lim);
907 IFRATE(rate_ctx.new.txpkt++);
909 if (a.head != NULL) {
911 nm_os_generic_xmit_frame(&a);
913 /* Update hwcur to the next slot to transmit. Here nm_i
914 * is not necessarily head, we could break early. */
915 kring->nr_hwcur = nm_i;
919 * Second, reclaim completed buffers
921 if (!gna->txqdisc && (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring))) {
922 /* No more available slots? Set a notification event
923 * on a netmap slot that will be cleaned in the future.
924 * No doublecheck is performed, since txsync() will be
925 * called twice by netmap_poll().
927 generic_set_tx_event(kring, nm_i);
930 generic_netmap_tx_clean(kring, gna->txqdisc);
937 * This handler is registered (through nm_os_catch_rx())
938 * within the attached network interface
939 * in the RX subsystem, so that every mbuf passed up by
940 * the driver can be stolen to the network stack.
941 * Stolen packets are put in a queue where the
942 * generic_netmap_rxsync() callback can extract them.
943 * Returns 1 if the packet was stolen, 0 otherwise.
946 generic_rx_handler(struct ifnet *ifp, struct mbuf *m)
948 struct netmap_adapter *na = NA(ifp);
949 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
950 struct netmap_kring *kring;
952 u_int r = MBUF_RXQ(m); /* receive ring number */
954 if (r >= na->num_rx_rings) {
955 r = r % na->num_rx_rings;
958 kring = &na->rx_rings[r];
960 if (kring->nr_mode == NKR_NETMAP_OFF) {
961 /* We must not intercept this mbuf. */
965 /* limit the size of the queue */
966 if (unlikely(!gna->rxsg && MBUF_LEN(m) > NETMAP_BUF_SIZE(na))) {
967 /* This may happen when GRO/LRO features are enabled for
968 * the NIC driver when the generic adapter does not
969 * support RX scatter-gather. */
970 RD(2, "Warning: driver pushed up big packet "
971 "(size=%d)", (int)MBUF_LEN(m));
973 } else if (unlikely(mbq_len(&kring->rx_queue) > 1024)) {
976 mbq_safe_enqueue(&kring->rx_queue, m);
979 if (netmap_generic_mit < 32768) {
980 /* no rx mitigation, pass notification up */
981 netmap_generic_irq(na, r, &work_done);
983 /* same as send combining, filter notification if there is a
984 * pending timer, otherwise pass it up and start a timer.
986 if (likely(nm_os_mitigation_active(&gna->mit[r]))) {
987 /* Record that there is some pending work. */
988 gna->mit[r].mit_pending = 1;
990 netmap_generic_irq(na, r, &work_done);
991 nm_os_mitigation_start(&gna->mit[r]);
995 /* We have intercepted the mbuf. */
1000 * generic_netmap_rxsync() extracts mbufs from the queue filled by
1001 * generic_netmap_rx_handler() and puts their content in the netmap
1003 * Access must be protected because the rx handler is asynchronous,
1006 generic_netmap_rxsync(struct netmap_kring *kring, int flags)
1008 struct netmap_ring *ring = kring->ring;
1009 struct netmap_adapter *na = kring->na;
1010 u_int nm_i; /* index into the netmap ring */ //j,
1012 u_int const lim = kring->nkr_num_slots - 1;
1013 u_int const head = kring->rhead;
1014 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1016 /* Adapter-specific variables. */
1017 uint16_t slot_flags = kring->nkr_slot_flags;
1018 u_int nm_buf_len = NETMAP_BUF_SIZE(na);
1021 int avail; /* in bytes */
1026 return netmap_ring_reinit(kring);
1028 IFRATE(rate_ctx.new.rxsync++);
1031 * First part: skip past packets that userspace has released.
1032 * This can possibly make room for the second part.
1034 nm_i = kring->nr_hwcur;
1036 /* Userspace has released some packets. */
1037 for (n = 0; nm_i != head; n++) {
1038 struct netmap_slot *slot = &ring->slot[nm_i];
1040 slot->flags &= ~NS_BUF_CHANGED;
1041 nm_i = nm_next(nm_i, lim);
1043 kring->nr_hwcur = head;
1047 * Second part: import newly received packets.
1049 if (!netmap_no_pendintr && !force_update) {
1053 nm_i = kring->nr_hwtail; /* First empty slot in the receive ring. */
1055 /* Compute the available space (in bytes) in this netmap ring.
1056 * The first slot that is not considered in is the one before
1059 avail = nm_prev(kring->nr_hwcur, lim) - nm_i;
1062 avail *= nm_buf_len;
1064 /* First pass: While holding the lock on the RX mbuf queue,
1065 * extract as many mbufs as they fit the available space,
1066 * and put them in a temporary queue.
1067 * To avoid performing a per-mbuf division (mlen / nm_buf_len) to
1068 * to update avail, we do the update in a while loop that we
1069 * also use to set the RX slots, but without performing the copy. */
1071 mbq_lock(&kring->rx_queue);
1073 m = mbq_peek(&kring->rx_queue);
1075 /* No more packets from the driver. */
1081 /* No more space in the ring. */
1085 mbq_dequeue(&kring->rx_queue);
1093 avail -= nm_buf_len;
1095 ring->slot[nm_i].len = copy;
1096 ring->slot[nm_i].flags = slot_flags | (mlen ? NS_MOREFRAG : 0);
1097 nm_i = nm_next(nm_i, lim);
1100 mbq_enqueue(&tmpq, m);
1102 mbq_unlock(&kring->rx_queue);
1104 /* Second pass: Drain the temporary queue, going over the used RX slots,
1105 * and perform the copy out of the RX queue lock. */
1106 nm_i = kring->nr_hwtail;
1113 m = mbq_dequeue(&tmpq);
1119 nmaddr = NMB(na, &ring->slot[nm_i]);
1120 /* We only check the address here on generic rx rings. */
1121 if (nmaddr == NETMAP_BUF_BASE(na)) { /* Bad buffer */
1125 return netmap_ring_reinit(kring);
1128 copy = ring->slot[nm_i].len;
1129 m_copydata(m, ofs, copy, nmaddr);
1131 morefrag = ring->slot[nm_i].flags & NS_MOREFRAG;
1132 nm_i = nm_next(nm_i, lim);
1141 kring->nr_hwtail = nm_i;
1142 IFRATE(rate_ctx.new.rxpkt += n);
1144 kring->nr_kflags &= ~NKR_PENDINTR;
1150 generic_netmap_dtor(struct netmap_adapter *na)
1152 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na;
1153 struct ifnet *ifp = netmap_generic_getifp(gna);
1154 struct netmap_adapter *prev_na = gna->prev;
1156 if (prev_na != NULL) {
1157 netmap_adapter_put(prev_na);
1158 if (nm_iszombie(na)) {
1160 * The driver has been removed without releasing
1161 * the reference so we need to do it here.
1163 netmap_adapter_put(prev_na);
1165 D("Native netmap adapter %p restored", prev_na);
1167 NM_ATTACH_NA(ifp, prev_na);
1169 * netmap_detach_common(), that it's called after this function,
1170 * overrides WNA(ifp) if na->ifp is not NULL.
1173 D("Emulated netmap adapter for %s destroyed", na->name);
1177 na_is_generic(struct netmap_adapter *na)
1179 return na->nm_register == generic_netmap_register;
1183 * generic_netmap_attach() makes it possible to use netmap on
1184 * a device without native netmap support.
1185 * This is less performant than native support but potentially
1186 * faster than raw sockets or similar schemes.
1188 * In this "emulated" mode, netmap rings do not necessarily
1189 * have the same size as those in the NIC. We use a default
1190 * value and possibly override it if the OS has ways to fetch the
1191 * actual configuration.
1194 generic_netmap_attach(struct ifnet *ifp)
1196 struct netmap_adapter *na;
1197 struct netmap_generic_adapter *gna;
1199 u_int num_tx_desc, num_rx_desc;
1201 num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */
1203 nm_os_generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); /* ignore errors */
1204 ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc);
1205 if (num_tx_desc == 0 || num_rx_desc == 0) {
1206 D("Device has no hw slots (tx %u, rx %u)", num_tx_desc, num_rx_desc);
1210 gna = nm_os_malloc(sizeof(*gna));
1212 D("no memory on attach, give up");
1215 na = (struct netmap_adapter *)gna;
1216 strncpy(na->name, ifp->if_xname, sizeof(na->name));
1218 na->num_tx_desc = num_tx_desc;
1219 na->num_rx_desc = num_rx_desc;
1220 na->nm_register = &generic_netmap_register;
1221 na->nm_txsync = &generic_netmap_txsync;
1222 na->nm_rxsync = &generic_netmap_rxsync;
1223 na->nm_dtor = &generic_netmap_dtor;
1224 /* when using generic, NAF_NETMAP_ON is set so we force
1225 * NAF_SKIP_INTR to use the regular interrupt handler
1227 na->na_flags = NAF_SKIP_INTR | NAF_HOST_RINGS;
1229 ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)",
1230 ifp->num_tx_queues, ifp->real_num_tx_queues,
1232 ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)",
1233 ifp->num_rx_queues, ifp->real_num_rx_queues);
1235 nm_os_generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings);
1237 retval = netmap_attach_common(na);
1243 gna->prev = NA(ifp); /* save old na */
1244 if (gna->prev != NULL) {
1245 netmap_adapter_get(gna->prev);
1247 NM_ATTACH_NA(ifp, na);
1249 nm_os_generic_set_features(gna);
1251 D("Emulated adapter for %s created (prev was %p)", na->name, gna->prev);