2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2013-2016 Vincenzo Maffione
5 * Copyright (C) 2013-2016 Luigi Rizzo
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * This module implements netmap support on top of standard,
32 * unmodified device drivers.
34 * A NIOCREGIF request is handled here if the device does not
35 * have native support. TX and RX rings are emulated as follows:
38 * We preallocate a block of TX mbufs (roughly as many as
39 * tx descriptors; the number is not critical) to speed up
40 * operation during transmissions. The refcount on most of
41 * these buffers is artificially bumped up so we can recycle
42 * them more easily. Also, the destructor is intercepted
43 * so we use it as an interrupt notification to wake up
44 * processes blocked on a poll().
46 * For each receive ring we allocate one "struct mbq"
47 * (an mbuf tailq plus a spinlock). We intercept packets
49 * on the receive path and put them in the mbq from which
50 * netmap receive routines can grab them.
53 * in the generic_txsync() routine, netmap buffers are copied
54 * (or linked, in a future) to the preallocated mbufs
55 * and pushed to the transmit queue. Some of these mbufs
56 * (those with NS_REPORT, or otherwise every half ring)
57 * have the refcount=1, others have refcount=2.
58 * When the destructor is invoked, we take that as
59 * a notification that all mbufs up to that one in
60 * the specific ring have been completed, and generate
61 * the equivalent of a transmit interrupt.
69 #include <sys/cdefs.h> /* prerequisite */
70 __FBSDID("$FreeBSD$");
72 #include <sys/types.h>
73 #include <sys/errno.h>
74 #include <sys/malloc.h>
75 #include <sys/lock.h> /* PROT_EXEC */
76 #include <sys/rwlock.h>
77 #include <sys/socket.h> /* sockaddrs */
78 #include <sys/selinfo.h>
80 #include <net/if_types.h>
81 #include <net/if_var.h>
82 #include <machine/bus.h> /* bus_dmamap_* in netmap_kern.h */
84 #include <net/netmap.h>
85 #include <dev/netmap/netmap_kern.h>
86 #include <dev/netmap/netmap_mem2.h>
88 #define MBUF_RXQ(m) ((m)->m_pkthdr.flowid)
95 #define MBUF_TXQ(m) 0//((m)->m_pkthdr.flowid)
96 #define MBUF_RXQ(m) 0//((m)->m_pkthdr.flowid)
97 #define smp_mb() //XXX: to be correctly defined
101 #include "bsd_glue.h"
103 #include <linux/ethtool.h> /* struct ethtool_ops, get_ringparam */
104 #include <linux/hrtimer.h>
106 static inline struct mbuf *
107 nm_os_get_mbuf(struct ifnet *ifp, int len)
109 return alloc_skb(ifp->needed_headroom + len +
110 ifp->needed_tailroom, GFP_ATOMIC);
116 /* Common headers. */
117 #include <net/netmap.h>
118 #include <dev/netmap/netmap_kern.h>
119 #include <dev/netmap/netmap_mem2.h>
122 #define for_each_kring_n(_i, _k, _karr, _n) \
123 for ((_k)=*(_karr), (_i) = 0; (_i) < (_n); (_i)++, (_k) = (_karr)[(_i)])
125 #define for_each_tx_kring(_i, _k, _na) \
126 for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings)
127 #define for_each_tx_kring_h(_i, _k, _na) \
128 for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings + 1)
130 #define for_each_rx_kring(_i, _k, _na) \
131 for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings)
132 #define for_each_rx_kring_h(_i, _k, _na) \
133 for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings + 1)
136 /* ======================== PERFORMANCE STATISTICS =========================== */
142 unsigned long txsync;
144 unsigned long txrepl;
145 unsigned long txdrop;
148 unsigned long rxsync;
151 struct rate_context {
153 struct timer_list timer;
154 struct rate_stats new;
155 struct rate_stats old;
158 #define RATE_PRINTK(_NAME_) \
159 printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD);
160 #define RATE_PERIOD 2
161 static void rate_callback(unsigned long arg)
163 struct rate_context * ctx = (struct rate_context *)arg;
164 struct rate_stats cur = ctx->new;
178 r = mod_timer(&ctx->timer, jiffies +
179 msecs_to_jiffies(RATE_PERIOD * 1000));
181 nm_prerr("mod_timer() failed");
184 static struct rate_context rate_ctx;
186 void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi)
188 if (txp) rate_ctx.new.txpkt++;
189 if (txs) rate_ctx.new.txsync++;
190 if (txi) rate_ctx.new.txirq++;
191 if (rxp) rate_ctx.new.rxpkt++;
192 if (rxs) rate_ctx.new.rxsync++;
193 if (rxi) rate_ctx.new.rxirq++;
201 /* ========== GENERIC (EMULATED) NETMAP ADAPTER SUPPORT ============= */
204 * Wrapper used by the generic adapter layer to notify
205 * the poller threads. Differently from netmap_rx_irq(), we check
206 * only NAF_NETMAP_ON instead of NAF_NATIVE_ON to enable the irq.
209 netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
211 if (unlikely(!nm_netmap_on(na)))
214 netmap_common_irq(na, q, work_done);
217 rate_ctx.new.rxirq++;
219 rate_ctx.new.txirq++;
220 #endif /* RATE_GENERIC */
224 generic_netmap_unregister(struct netmap_adapter *na)
226 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
227 struct netmap_kring *kring = NULL;
230 if (na->active_fds == 0) {
231 na->na_flags &= ~NAF_NETMAP_ON;
233 /* Stop intercepting packets on the RX path. */
234 nm_os_catch_rx(gna, 0);
236 /* Release packet steering control. */
237 nm_os_catch_tx(gna, 0);
240 for_each_rx_kring_h(r, kring, na) {
241 if (nm_kring_pending_off(kring)) {
242 nm_prinf("Emulated adapter: ring '%s' deactivated", kring->name);
243 kring->nr_mode = NKR_NETMAP_OFF;
246 for_each_tx_kring_h(r, kring, na) {
247 if (nm_kring_pending_off(kring)) {
248 kring->nr_mode = NKR_NETMAP_OFF;
249 nm_prinf("Emulated adapter: ring '%s' deactivated", kring->name);
253 for_each_rx_kring(r, kring, na) {
254 /* Free the mbufs still pending in the RX queues,
255 * that did not end up into the corresponding netmap
257 mbq_safe_purge(&kring->rx_queue);
258 nm_os_mitigation_cleanup(&gna->mit[r]);
261 /* Decrement reference counter for the mbufs in the
262 * TX pools. These mbufs can be still pending in drivers,
263 * (e.g. this happens with virtio-net driver, which
264 * does lazy reclaiming of transmitted mbufs). */
265 for_each_tx_kring(r, kring, na) {
266 /* We must remove the destructor on the TX event,
267 * because the destructor invokes netmap code, and
268 * the netmap module may disappear before the
269 * TX event is consumed. */
270 mtx_lock_spin(&kring->tx_event_lock);
271 if (kring->tx_event) {
272 SET_MBUF_DESTRUCTOR(kring->tx_event, NULL);
274 kring->tx_event = NULL;
275 mtx_unlock_spin(&kring->tx_event_lock);
278 if (na->active_fds == 0) {
279 nm_os_free(gna->mit);
281 for_each_rx_kring(r, kring, na) {
282 mbq_safe_fini(&kring->rx_queue);
285 for_each_tx_kring(r, kring, na) {
286 mtx_destroy(&kring->tx_event_lock);
287 if (kring->tx_pool == NULL) {
291 for (i=0; i<na->num_tx_desc; i++) {
292 if (kring->tx_pool[i]) {
293 m_freem(kring->tx_pool[i]);
296 nm_os_free(kring->tx_pool);
297 kring->tx_pool = NULL;
301 if (--rate_ctx.refcount == 0) {
302 nm_prinf("del_timer()");
303 del_timer(&rate_ctx.timer);
306 nm_prinf("Emulated adapter for %s deactivated", na->name);
312 /* Enable/disable netmap mode for a generic network interface. */
314 generic_netmap_register(struct netmap_adapter *na, int enable)
316 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
317 struct netmap_kring *kring = NULL;
326 /* This is actually an unregif. */
327 return generic_netmap_unregister(na);
330 if (na->active_fds == 0) {
331 nm_prinf("Emulated adapter for %s activated", na->name);
332 /* Do all memory allocations when (na->active_fds == 0), to
333 * simplify error management. */
335 /* Allocate memory for mitigation support on all the rx queues. */
336 gna->mit = nm_os_malloc(na->num_rx_rings * sizeof(struct nm_generic_mit));
338 nm_prerr("mitigation allocation failed");
343 for_each_rx_kring(r, kring, na) {
344 /* Init mitigation support. */
345 nm_os_mitigation_init(&gna->mit[r], r, na);
347 /* Initialize the rx queue, as generic_rx_handler() can
348 * be called as soon as nm_os_catch_rx() returns.
350 mbq_safe_init(&kring->rx_queue);
354 * Prepare mbuf pools (parallel to the tx rings), for packet
355 * transmission. Don't preallocate the mbufs here, it's simpler
356 * to leave this task to txsync.
358 for_each_tx_kring(r, kring, na) {
359 kring->tx_pool = NULL;
361 for_each_tx_kring(r, kring, na) {
363 nm_os_malloc(na->num_tx_desc * sizeof(struct mbuf *));
364 if (!kring->tx_pool) {
365 nm_prerr("tx_pool allocation failed");
369 mtx_init(&kring->tx_event_lock, "tx_event_lock",
374 for_each_rx_kring_h(r, kring, na) {
375 if (nm_kring_pending_on(kring)) {
376 nm_prinf("Emulated adapter: ring '%s' activated", kring->name);
377 kring->nr_mode = NKR_NETMAP_ON;
381 for_each_tx_kring_h(r, kring, na) {
382 if (nm_kring_pending_on(kring)) {
383 nm_prinf("Emulated adapter: ring '%s' activated", kring->name);
384 kring->nr_mode = NKR_NETMAP_ON;
388 for_each_tx_kring(r, kring, na) {
389 /* Initialize tx_pool and tx_event. */
390 for (i=0; i<na->num_tx_desc; i++) {
391 kring->tx_pool[i] = NULL;
394 kring->tx_event = NULL;
397 if (na->active_fds == 0) {
398 /* Prepare to intercept incoming traffic. */
399 error = nm_os_catch_rx(gna, 1);
401 nm_prerr("nm_os_catch_rx(1) failed (%d)", error);
405 /* Let netmap control the packet steering. */
406 error = nm_os_catch_tx(gna, 1);
408 nm_prerr("nm_os_catch_tx(1) failed (%d)", error);
412 na->na_flags |= NAF_NETMAP_ON;
415 if (rate_ctx.refcount == 0) {
416 nm_prinf("setup_timer()");
417 memset(&rate_ctx, 0, sizeof(rate_ctx));
418 setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx);
419 if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) {
420 nm_prerr("Error: mod_timer()");
429 /* Here (na->active_fds == 0) holds. */
431 nm_os_catch_rx(gna, 0);
433 for_each_tx_kring(r, kring, na) {
434 mtx_destroy(&kring->tx_event_lock);
435 if (kring->tx_pool == NULL) {
438 nm_os_free(kring->tx_pool);
439 kring->tx_pool = NULL;
441 for_each_rx_kring(r, kring, na) {
442 mbq_safe_fini(&kring->rx_queue);
444 nm_os_free(gna->mit);
451 * Callback invoked when the device driver frees an mbuf used
452 * by netmap to transmit a packet. This usually happens when
453 * the NIC notifies the driver that transmission is completed.
456 generic_mbuf_destructor(struct mbuf *m)
458 struct netmap_adapter *na = NA(GEN_TX_MBUF_IFP(m));
459 struct netmap_kring *kring;
460 unsigned int r = MBUF_TXQ(m);
461 unsigned int r_orig = r;
463 if (unlikely(!nm_netmap_on(na) || r >= na->num_tx_rings)) {
464 nm_prerr("Error: no netmap adapter on device %p",
470 * First, clear the event mbuf.
471 * In principle, the event 'm' should match the one stored
472 * on ring 'r'. However we check it explicitely to stay
473 * safe against lower layers (qdisc, driver, etc.) changing
474 * MBUF_TXQ(m) under our feet. If the match is not found
475 * on 'r', we try to see if it belongs to some other ring.
480 kring = na->tx_rings[r];
481 mtx_lock_spin(&kring->tx_event_lock);
482 if (kring->tx_event == m) {
483 kring->tx_event = NULL;
486 mtx_unlock_spin(&kring->tx_event_lock);
490 nm_prlim(1, "event %p migrated: ring %u --> %u",
496 if (++r == na->num_tx_rings) r = 0;
499 nm_prlim(1, "Cannot match event %p", m);
504 /* Second, wake up clients. They will reclaim the event through
506 netmap_generic_irq(na, r, NULL);
508 #if __FreeBSD_version <= 1200050
509 void_mbuf_dtor(m, NULL, NULL);
510 #else /* __FreeBSD_version >= 1200051 */
512 #endif /* __FreeBSD_version >= 1200051 */
516 /* Record completed transmissions and update hwtail.
518 * The oldest tx buffer not yet completed is at nr_hwtail + 1,
519 * nr_hwcur is the first unsent buffer.
522 generic_netmap_tx_clean(struct netmap_kring *kring, int txqdisc)
524 u_int const lim = kring->nkr_num_slots - 1;
525 u_int nm_i = nm_next(kring->nr_hwtail, lim);
526 u_int hwcur = kring->nr_hwcur;
528 struct mbuf **tx_pool = kring->tx_pool;
530 nm_prdis("hwcur = %d, hwtail = %d", kring->nr_hwcur, kring->nr_hwtail);
532 while (nm_i != hwcur) { /* buffers not completed */
533 struct mbuf *m = tx_pool[nm_i];
537 /* Nothing to do, this is going
538 * to be replenished. */
539 nm_prlim(3, "Is this happening?");
541 } else if (MBUF_QUEUED(m)) {
542 break; /* Not dequeued yet. */
544 } else if (MBUF_REFCNT(m) != 1) {
545 /* This mbuf has been dequeued but is still busy
547 * Leave it to the driver and replenish. */
549 tx_pool[nm_i] = NULL;
553 if (unlikely(m == NULL)) {
556 /* This slot was used to place an event. */
557 mtx_lock_spin(&kring->tx_event_lock);
558 event_consumed = (kring->tx_event == NULL);
559 mtx_unlock_spin(&kring->tx_event_lock);
560 if (!event_consumed) {
561 /* The event has not been consumed yet,
562 * still busy in the driver. */
565 /* The event has been consumed, we can go
568 } else if (MBUF_REFCNT(m) != 1) {
569 /* This mbuf is still busy: its refcnt is 2. */
575 nm_i = nm_next(nm_i, lim);
577 kring->nr_hwtail = nm_prev(nm_i, lim);
578 nm_prdis("tx completed [%d] -> hwtail %d", n, kring->nr_hwtail);
583 /* Compute a slot index in the middle between inf and sup. */
585 ring_middle(u_int inf, u_int sup, u_int lim)
592 } else { /* wrap around */
593 e = (sup + n + inf) / 2;
599 if (unlikely(e >= n)) {
600 nm_prerr("This cannot happen");
608 generic_set_tx_event(struct netmap_kring *kring, u_int hwcur)
610 u_int lim = kring->nkr_num_slots - 1;
613 u_int ntc = nm_next(kring->nr_hwtail, lim); /* next to clean */
616 return; /* all buffers are free */
620 * We have pending packets in the driver between hwtail+1
621 * and hwcur, and we have to chose one of these slot to
622 * generate a notification.
623 * There is a race but this is only called within txsync which
624 * does a double check.
627 /* Choose a slot in the middle, so that we don't risk ending
628 * up in a situation where the client continuously wake up,
629 * fills one or a few TX slots and go to sleep again. */
630 e = ring_middle(ntc, hwcur, lim);
632 /* Choose the first pending slot, to be safe against driver
633 * reordering mbuf transmissions. */
637 m = kring->tx_pool[e];
639 /* An event is already in place. */
643 mtx_lock_spin(&kring->tx_event_lock);
644 if (kring->tx_event) {
645 /* An event is already in place. */
646 mtx_unlock_spin(&kring->tx_event_lock);
650 SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor);
652 mtx_unlock_spin(&kring->tx_event_lock);
654 kring->tx_pool[e] = NULL;
656 nm_prdis("Request Event at %d mbuf %p refcnt %d", e, m, m ? MBUF_REFCNT(m) : -2 );
658 /* Decrement the refcount. This will free it if we lose the race
659 * with the driver. */
666 * generic_netmap_txsync() transforms netmap buffers into mbufs
667 * and passes them to the standard device driver
668 * (ndo_start_xmit() or ifp->if_transmit() ).
669 * On linux this is not done directly, but using dev_queue_xmit(),
670 * since it implements the TX flow control (and takes some locks).
673 generic_netmap_txsync(struct netmap_kring *kring, int flags)
675 struct netmap_adapter *na = kring->na;
676 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
677 struct ifnet *ifp = na->ifp;
678 struct netmap_ring *ring = kring->ring;
679 u_int nm_i; /* index into the netmap ring */ // j
680 u_int const lim = kring->nkr_num_slots - 1;
681 u_int const head = kring->rhead;
682 u_int ring_nr = kring->ring_id;
684 IFRATE(rate_ctx.new.txsync++);
689 * First part: process new packets to send.
691 nm_i = kring->nr_hwcur;
692 if (nm_i != head) { /* we have new packets to send */
693 struct nm_os_gen_arg a;
696 if (gna->txqdisc && nm_kr_txempty(kring)) {
697 /* In txqdisc mode, we ask for a delayed notification,
698 * but only when cur == hwtail, which means that the
699 * client is going to block. */
700 event = ring_middle(nm_i, head, lim);
701 nm_prdis("Place txqdisc event (hwcur=%u,event=%u,"
702 "head=%u,hwtail=%u)", nm_i, event, head,
708 a.head = a.tail = NULL;
710 while (nm_i != head) {
711 struct netmap_slot *slot = &ring->slot[nm_i];
712 u_int len = slot->len;
713 void *addr = NMB(na, slot);
714 /* device-specific */
718 NM_CHECK_ADDR_LEN(na, addr, len);
720 /* Tale a mbuf from the tx pool (replenishing the pool
721 * entry if necessary) and copy in the user packet. */
722 m = kring->tx_pool[nm_i];
723 if (unlikely(m == NULL)) {
724 kring->tx_pool[nm_i] = m =
725 nm_os_get_mbuf(ifp, NETMAP_BUF_SIZE(na));
727 nm_prlim(2, "Failed to replenish mbuf");
728 /* Here we could schedule a timer which
729 * retries to replenish after a while,
730 * and notifies the client when it
731 * manages to replenish some slots. In
732 * any case we break early to avoid
736 IFRATE(rate_ctx.new.txrepl++);
742 a.qevent = (nm_i == event);
743 /* When not in txqdisc mode, we should ask
744 * notifications when NS_REPORT is set, or roughly
745 * every half ring. To optimize this, we set a
746 * notification event when the client runs out of
747 * TX ring space, or when transmission fails. In
748 * the latter case we also break early.
750 tx_ret = nm_os_generic_xmit_frame(&a);
751 if (unlikely(tx_ret)) {
754 * No room for this mbuf in the device driver.
755 * Request a notification FOR A PREVIOUS MBUF,
756 * then call generic_netmap_tx_clean(kring) to do the
757 * double check and see if we can free more buffers.
758 * If there is space continue, else break;
759 * NOTE: the double check is necessary if the problem
760 * occurs in the txsync call after selrecord().
761 * Also, we need some way to tell the caller that not
762 * all buffers were queued onto the device (this was
763 * not a problem with native netmap driver where space
764 * is preallocated). The bridge has a similar problem
765 * and we solve it there by dropping the excess packets.
767 generic_set_tx_event(kring, nm_i);
768 if (generic_netmap_tx_clean(kring, gna->txqdisc)) {
769 /* space now available */
776 /* In txqdisc mode, the netmap-aware qdisc
777 * queue has the same length as the number of
778 * netmap slots (N). Since tail is advanced
779 * only when packets are dequeued, qdisc
780 * queue overrun cannot happen, so
781 * nm_os_generic_xmit_frame() did not fail
783 * However, packets can be dropped because
784 * carrier is off, or because our qdisc is
785 * being deactivated, or possibly for other
786 * reasons. In these cases, we just let the
787 * packet to be dropped. */
788 IFRATE(rate_ctx.new.txdrop++);
791 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
792 nm_i = nm_next(nm_i, lim);
793 IFRATE(rate_ctx.new.txpkt++);
795 if (a.head != NULL) {
797 nm_os_generic_xmit_frame(&a);
799 /* Update hwcur to the next slot to transmit. Here nm_i
800 * is not necessarily head, we could break early. */
801 kring->nr_hwcur = nm_i;
805 * Second, reclaim completed buffers
807 if (!gna->txqdisc && (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring))) {
808 /* No more available slots? Set a notification event
809 * on a netmap slot that will be cleaned in the future.
810 * No doublecheck is performed, since txsync() will be
811 * called twice by netmap_poll().
813 generic_set_tx_event(kring, nm_i);
816 generic_netmap_tx_clean(kring, gna->txqdisc);
823 * This handler is registered (through nm_os_catch_rx())
824 * within the attached network interface
825 * in the RX subsystem, so that every mbuf passed up by
826 * the driver can be stolen to the network stack.
827 * Stolen packets are put in a queue where the
828 * generic_netmap_rxsync() callback can extract them.
829 * Returns 1 if the packet was stolen, 0 otherwise.
832 generic_rx_handler(struct ifnet *ifp, struct mbuf *m)
834 struct netmap_adapter *na = NA(ifp);
835 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
836 struct netmap_kring *kring;
838 u_int r = MBUF_RXQ(m); /* receive ring number */
840 if (r >= na->num_rx_rings) {
841 r = r % na->num_rx_rings;
844 kring = na->rx_rings[r];
846 if (kring->nr_mode == NKR_NETMAP_OFF) {
847 /* We must not intercept this mbuf. */
851 /* limit the size of the queue */
852 if (unlikely(!gna->rxsg && MBUF_LEN(m) > NETMAP_BUF_SIZE(na))) {
853 /* This may happen when GRO/LRO features are enabled for
854 * the NIC driver when the generic adapter does not
855 * support RX scatter-gather. */
856 nm_prlim(2, "Warning: driver pushed up big packet "
857 "(size=%d)", (int)MBUF_LEN(m));
859 } else if (unlikely(mbq_len(&kring->rx_queue) > 1024)) {
862 mbq_safe_enqueue(&kring->rx_queue, m);
865 if (netmap_generic_mit < 32768) {
866 /* no rx mitigation, pass notification up */
867 netmap_generic_irq(na, r, &work_done);
869 /* same as send combining, filter notification if there is a
870 * pending timer, otherwise pass it up and start a timer.
872 if (likely(nm_os_mitigation_active(&gna->mit[r]))) {
873 /* Record that there is some pending work. */
874 gna->mit[r].mit_pending = 1;
876 netmap_generic_irq(na, r, &work_done);
877 nm_os_mitigation_start(&gna->mit[r]);
881 /* We have intercepted the mbuf. */
886 * generic_netmap_rxsync() extracts mbufs from the queue filled by
887 * generic_netmap_rx_handler() and puts their content in the netmap
889 * Access must be protected because the rx handler is asynchronous,
892 generic_netmap_rxsync(struct netmap_kring *kring, int flags)
894 struct netmap_ring *ring = kring->ring;
895 struct netmap_adapter *na = kring->na;
896 u_int nm_i; /* index into the netmap ring */ //j,
898 u_int const lim = kring->nkr_num_slots - 1;
899 u_int const head = kring->rhead;
900 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
902 /* Adapter-specific variables. */
903 u_int nm_buf_len = NETMAP_BUF_SIZE(na);
906 int avail; /* in bytes */
911 return netmap_ring_reinit(kring);
913 IFRATE(rate_ctx.new.rxsync++);
916 * First part: skip past packets that userspace has released.
917 * This can possibly make room for the second part.
919 nm_i = kring->nr_hwcur;
921 /* Userspace has released some packets. */
922 for (n = 0; nm_i != head; n++) {
923 struct netmap_slot *slot = &ring->slot[nm_i];
925 slot->flags &= ~NS_BUF_CHANGED;
926 nm_i = nm_next(nm_i, lim);
928 kring->nr_hwcur = head;
932 * Second part: import newly received packets.
934 if (!netmap_no_pendintr && !force_update) {
938 nm_i = kring->nr_hwtail; /* First empty slot in the receive ring. */
940 /* Compute the available space (in bytes) in this netmap ring.
941 * The first slot that is not considered in is the one before
944 avail = nm_prev(kring->nr_hwcur, lim) - nm_i;
949 /* First pass: While holding the lock on the RX mbuf queue,
950 * extract as many mbufs as they fit the available space,
951 * and put them in a temporary queue.
952 * To avoid performing a per-mbuf division (mlen / nm_buf_len) to
953 * to update avail, we do the update in a while loop that we
954 * also use to set the RX slots, but without performing the copy. */
956 mbq_lock(&kring->rx_queue);
958 m = mbq_peek(&kring->rx_queue);
960 /* No more packets from the driver. */
966 /* No more space in the ring. */
970 mbq_dequeue(&kring->rx_queue);
980 ring->slot[nm_i].len = copy;
981 ring->slot[nm_i].flags = (mlen ? NS_MOREFRAG : 0);
982 nm_i = nm_next(nm_i, lim);
985 mbq_enqueue(&tmpq, m);
987 mbq_unlock(&kring->rx_queue);
989 /* Second pass: Drain the temporary queue, going over the used RX slots,
990 * and perform the copy out of the RX queue lock. */
991 nm_i = kring->nr_hwtail;
998 m = mbq_dequeue(&tmpq);
1004 nmaddr = NMB(na, &ring->slot[nm_i]);
1005 /* We only check the address here on generic rx rings. */
1006 if (nmaddr == NETMAP_BUF_BASE(na)) { /* Bad buffer */
1010 return netmap_ring_reinit(kring);
1013 copy = ring->slot[nm_i].len;
1014 m_copydata(m, ofs, copy, nmaddr);
1016 morefrag = ring->slot[nm_i].flags & NS_MOREFRAG;
1017 nm_i = nm_next(nm_i, lim);
1026 kring->nr_hwtail = nm_i;
1027 IFRATE(rate_ctx.new.rxpkt += n);
1029 kring->nr_kflags &= ~NKR_PENDINTR;
1035 generic_netmap_dtor(struct netmap_adapter *na)
1037 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na;
1038 struct ifnet *ifp = netmap_generic_getifp(gna);
1039 struct netmap_adapter *prev_na = gna->prev;
1041 if (prev_na != NULL) {
1042 netmap_adapter_put(prev_na);
1043 if (nm_iszombie(na)) {
1045 * The driver has been removed without releasing
1046 * the reference so we need to do it here.
1048 netmap_adapter_put(prev_na);
1050 nm_prinf("Native netmap adapter %p restored", prev_na);
1052 NM_RESTORE_NA(ifp, prev_na);
1054 * netmap_detach_common(), that it's called after this function,
1055 * overrides WNA(ifp) if na->ifp is not NULL.
1058 nm_prinf("Emulated netmap adapter for %s destroyed", na->name);
1062 na_is_generic(struct netmap_adapter *na)
1064 return na->nm_register == generic_netmap_register;
1068 * generic_netmap_attach() makes it possible to use netmap on
1069 * a device without native netmap support.
1070 * This is less performant than native support but potentially
1071 * faster than raw sockets or similar schemes.
1073 * In this "emulated" mode, netmap rings do not necessarily
1074 * have the same size as those in the NIC. We use a default
1075 * value and possibly override it if the OS has ways to fetch the
1076 * actual configuration.
1079 generic_netmap_attach(struct ifnet *ifp)
1081 struct netmap_adapter *na;
1082 struct netmap_generic_adapter *gna;
1084 u_int num_tx_desc, num_rx_desc;
1087 if (ifp->if_type == IFT_LOOP) {
1088 nm_prerr("if_loop is not supported by %s", __func__);
1093 if (NM_NA_CLASH(ifp)) {
1094 /* If NA(ifp) is not null but there is no valid netmap
1095 * adapter it means that someone else is using the same
1096 * pointer (e.g. ax25_ptr on linux). This happens for
1097 * instance when also PF_RING is in use. */
1098 nm_prerr("Error: netmap adapter hook is busy");
1102 num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */
1104 nm_os_generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); /* ignore errors */
1105 if (num_tx_desc == 0 || num_rx_desc == 0) {
1106 nm_prerr("Device has no hw slots (tx %u, rx %u)", num_tx_desc, num_rx_desc);
1110 gna = nm_os_malloc(sizeof(*gna));
1112 nm_prerr("no memory on attach, give up");
1115 na = (struct netmap_adapter *)gna;
1116 strlcpy(na->name, ifp->if_xname, sizeof(na->name));
1118 na->num_tx_desc = num_tx_desc;
1119 na->num_rx_desc = num_rx_desc;
1120 na->rx_buf_maxsize = 32768;
1121 na->nm_register = &generic_netmap_register;
1122 na->nm_txsync = &generic_netmap_txsync;
1123 na->nm_rxsync = &generic_netmap_rxsync;
1124 na->nm_dtor = &generic_netmap_dtor;
1125 /* when using generic, NAF_NETMAP_ON is set so we force
1126 * NAF_SKIP_INTR to use the regular interrupt handler
1128 na->na_flags = NAF_SKIP_INTR | NAF_HOST_RINGS;
1130 nm_prdis("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)",
1131 ifp->num_tx_queues, ifp->real_num_tx_queues,
1133 nm_prdis("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)",
1134 ifp->num_rx_queues, ifp->real_num_rx_queues);
1136 nm_os_generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings);
1138 retval = netmap_attach_common(na);
1144 if (NM_NA_VALID(ifp)) {
1145 gna->prev = NA(ifp); /* save old na */
1146 netmap_adapter_get(gna->prev);
1148 NM_ATTACH_NA(ifp, na);
1150 nm_os_generic_set_features(gna);
1152 nm_prinf("Emulated adapter for %s created (prev was %p)", na->name, gna->prev);