2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo
5 * Copyright (C) 2013-2016 Universita` di Pisa
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * The header contains the definitions of constants and function
34 * prototypes used only in kernelspace.
37 #ifndef _NET_NETMAP_KERN_H_
38 #define _NET_NETMAP_KERN_H_
42 #if defined(CONFIG_NETMAP_EXTMEM)
45 #if defined(CONFIG_NETMAP_VALE)
48 #if defined(CONFIG_NETMAP_PIPE)
51 #if defined(CONFIG_NETMAP_MONITOR)
54 #if defined(CONFIG_NETMAP_GENERIC)
57 #if defined(CONFIG_NETMAP_PTNETMAP_GUEST)
58 #define WITH_PTNETMAP_GUEST
60 #if defined(CONFIG_NETMAP_PTNETMAP_HOST)
61 #define WITH_PTNETMAP_HOST
63 #if defined(CONFIG_NETMAP_SINK)
67 #elif defined (_WIN32)
68 #define WITH_VALE // comment out to disable VALE support
73 #else /* neither linux nor windows */
74 #define WITH_VALE // comment out to disable VALE support
78 #define WITH_PTNETMAP_HOST /* ptnetmap host support */
79 #define WITH_PTNETMAP_GUEST /* ptnetmap guest support */
83 #if defined(__FreeBSD__)
84 #include <sys/selinfo.h>
86 #define likely(x) __builtin_expect((long)!!(x), 1L)
87 #define unlikely(x) __builtin_expect((long)!!(x), 0L)
90 #define NM_LOCK_T struct mtx /* low level spinlock, used to protect queues */
92 #define NM_MTX_T struct sx /* OS-specific mutex (sleepable) */
93 #define NM_MTX_INIT(m) sx_init(&(m), #m)
94 #define NM_MTX_DESTROY(m) sx_destroy(&(m))
95 #define NM_MTX_LOCK(m) sx_xlock(&(m))
96 #define NM_MTX_SPINLOCK(m) while (!sx_try_xlock(&(m))) ;
97 #define NM_MTX_UNLOCK(m) sx_xunlock(&(m))
98 #define NM_MTX_ASSERT(m) sx_assert(&(m), SA_XLOCKED)
100 #define NM_SELINFO_T struct nm_selinfo
101 #define NM_SELRECORD_T struct thread
102 #define MBUF_LEN(m) ((m)->m_pkthdr.len)
103 #define MBUF_TXQ(m) ((m)->m_pkthdr.flowid)
104 #define MBUF_TRANSMIT(na, ifp, m) ((na)->if_transmit(ifp, m))
105 #define GEN_TX_MBUF_IFP(m) ((m)->m_pkthdr.rcvif)
107 #define NM_ATOMIC_T volatile int /* required by atomic/bitops.h */
108 /* atomic operations */
109 #include <machine/atomic.h>
110 #define NM_ATOMIC_TEST_AND_SET(p) (!atomic_cmpset_acq_int((p), 0, 1))
111 #define NM_ATOMIC_CLEAR(p) atomic_store_rel_int((p), 0)
113 #if __FreeBSD_version >= 1100030
114 #define WNA(_ifp) (_ifp)->if_netmap
115 #else /* older FreeBSD */
116 #define WNA(_ifp) (_ifp)->if_pspare[0]
117 #endif /* older FreeBSD */
119 #if __FreeBSD_version >= 1100005
120 struct netmap_adapter *netmap_getna(if_t ifp);
123 #if __FreeBSD_version >= 1100027
124 #define MBUF_REFCNT(m) ((m)->m_ext.ext_count)
125 #define SET_MBUF_REFCNT(m, x) (m)->m_ext.ext_count = x
127 #define MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *((m)->m_ext.ref_cnt) : -1)
128 #define SET_MBUF_REFCNT(m, x) *((m)->m_ext.ref_cnt) = x
131 #define MBUF_QUEUED(m) 1
140 /* Not used in FreeBSD. */
143 #define NM_BNS_GET(b)
144 #define NM_BNS_PUT(b)
146 #elif defined (linux)
148 #define NM_LOCK_T safe_spinlock_t // see bsd_glue.h
149 #define NM_SELINFO_T wait_queue_head_t
150 #define MBUF_LEN(m) ((m)->len)
151 #define MBUF_TRANSMIT(na, ifp, m) \
153 /* Avoid infinite recursion with generic. */ \
154 m->priority = NM_MAGIC_PRIORITY_TX; \
155 (((struct net_device_ops *)(na)->if_transmit)->ndo_start_xmit(m, ifp)); \
159 /* See explanation in nm_os_generic_xmit_frame. */
160 #define GEN_TX_MBUF_IFP(m) ((struct ifnet *)skb_shinfo(m)->destructor_arg)
162 #define NM_ATOMIC_T volatile long unsigned int
164 #define NM_MTX_T struct mutex /* OS-specific sleepable lock */
165 #define NM_MTX_INIT(m) mutex_init(&(m))
166 #define NM_MTX_DESTROY(m) do { (void)(m); } while (0)
167 #define NM_MTX_LOCK(m) mutex_lock(&(m))
168 #define NM_MTX_UNLOCK(m) mutex_unlock(&(m))
169 #define NM_MTX_ASSERT(m) mutex_is_locked(&(m))
173 #endif /* DEV_NETMAP */
175 #elif defined (__APPLE__)
177 #warning apple support is incomplete.
178 #define likely(x) __builtin_expect(!!(x), 1)
179 #define unlikely(x) __builtin_expect(!!(x), 0)
180 #define NM_LOCK_T IOLock *
181 #define NM_SELINFO_T struct selinfo
182 #define MBUF_LEN(m) ((m)->m_pkthdr.len)
184 #elif defined (_WIN32)
185 #include "../../../WINDOWS/win_glue.h"
187 #define NM_SELRECORD_T IO_STACK_LOCATION
188 #define NM_SELINFO_T win_SELINFO // see win_glue.h
189 #define NM_LOCK_T win_spinlock_t // see win_glue.h
190 #define NM_MTX_T KGUARDED_MUTEX /* OS-specific mutex (sleepable) */
192 #define NM_MTX_INIT(m) KeInitializeGuardedMutex(&m);
193 #define NM_MTX_DESTROY(m) do { (void)(m); } while (0)
194 #define NM_MTX_LOCK(m) KeAcquireGuardedMutex(&(m))
195 #define NM_MTX_UNLOCK(m) KeReleaseGuardedMutex(&(m))
196 #define NM_MTX_ASSERT(m) assert(&m.Count>0)
198 //These linknames are for the NDIS driver
199 #define NETMAP_NDIS_LINKNAME_STRING L"\\DosDevices\\NMAPNDIS"
200 #define NETMAP_NDIS_NTDEVICE_STRING L"\\Device\\NMAPNDIS"
202 //Definition of internal driver-to-driver ioctl codes
203 #define NETMAP_KERNEL_XCHANGE_POINTERS _IO('i', 180)
204 #define NETMAP_KERNEL_SEND_SHUTDOWN_SIGNAL _IO_direct('i', 195)
206 typedef struct hrtimer{
212 /* MSVC does not have likely/unlikely support */
214 #define likely(x) (x)
215 #define unlikely(x) (x)
217 #define likely(x) __builtin_expect((long)!!(x), 1L)
218 #define unlikely(x) __builtin_expect((long)!!(x), 0L)
223 #error unsupported platform
225 #endif /* end - platform-specific code */
227 #ifndef _WIN32 /* support for emulated sysctl */
232 #define NM_ACCESS_ONCE(x) (*(volatile __typeof__(x) *)&(x))
234 #define NMG_LOCK_T NM_MTX_T
235 #define NMG_LOCK_INIT() NM_MTX_INIT(netmap_global_lock)
236 #define NMG_LOCK_DESTROY() NM_MTX_DESTROY(netmap_global_lock)
237 #define NMG_LOCK() NM_MTX_LOCK(netmap_global_lock)
238 #define NMG_UNLOCK() NM_MTX_UNLOCK(netmap_global_lock)
239 #define NMG_LOCK_ASSERT() NM_MTX_ASSERT(netmap_global_lock)
241 #if defined(__FreeBSD__)
242 #define nm_prerr printf
243 #define nm_prinf printf
244 #elif defined (_WIN32)
245 #define nm_prerr DbgPrint
246 #define nm_prinf DbgPrint
248 #define nm_prerr(fmt, arg...) printk(KERN_ERR fmt, ##arg)
249 #define nm_prinf(fmt, arg...) printk(KERN_INFO fmt, ##arg)
252 #define ND(format, ...)
253 #define D(format, ...) \
255 struct timeval __xxts; \
256 microtime(&__xxts); \
257 nm_prerr("%03d.%06d [%4d] %-25s " format "\n", \
258 (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \
259 __LINE__, __FUNCTION__, ##__VA_ARGS__); \
262 /* rate limited, lps indicates how many per second */
263 #define RD(lps, format, ...) \
265 static int t0, __cnt; \
266 if (t0 != time_second) { \
271 D(format, ##__VA_ARGS__); \
274 struct netmap_adapter;
277 struct netmap_priv_d;
280 /* os-specific NM_SELINFO_T initialzation/destruction functions */
281 void nm_os_selinfo_init(NM_SELINFO_T *);
282 void nm_os_selinfo_uninit(NM_SELINFO_T *);
284 const char *nm_dump_buf(char *p, int len, int lim, char *dst);
286 void nm_os_selwakeup(NM_SELINFO_T *si);
287 void nm_os_selrecord(NM_SELRECORD_T *sr, NM_SELINFO_T *si);
289 int nm_os_ifnet_init(void);
290 void nm_os_ifnet_fini(void);
291 void nm_os_ifnet_lock(void);
292 void nm_os_ifnet_unlock(void);
294 unsigned nm_os_ifnet_mtu(struct ifnet *ifp);
296 void nm_os_get_module(void);
297 void nm_os_put_module(void);
299 void netmap_make_zombie(struct ifnet *);
300 void netmap_undo_zombie(struct ifnet *);
302 /* os independent alloc/realloc/free */
303 void *nm_os_malloc(size_t);
304 void *nm_os_vmalloc(size_t);
305 void *nm_os_realloc(void *, size_t new_size, size_t old_size);
306 void nm_os_free(void *);
307 void nm_os_vfree(void *);
309 /* os specific attach/detach enter/exit-netmap-mode routines */
310 void nm_os_onattach(struct ifnet *);
311 void nm_os_ondetach(struct ifnet *);
312 void nm_os_onenter(struct ifnet *);
313 void nm_os_onexit(struct ifnet *);
315 /* passes a packet up to the host stack.
316 * If the packet is sent (or dropped) immediately it returns NULL,
317 * otherwise it links the packet to prev and returns m.
318 * In this case, a final call with m=NULL and prev != NULL will send up
319 * the entire chain to the host stack.
321 void *nm_os_send_up(struct ifnet *, struct mbuf *m, struct mbuf *prev);
323 int nm_os_mbuf_has_seg_offld(struct mbuf *m);
324 int nm_os_mbuf_has_csum_offld(struct mbuf *m);
326 #include "netmap_mbq.h"
328 extern NMG_LOCK_T netmap_global_lock;
330 enum txrx { NR_RX = 0, NR_TX = 1, NR_TXRX };
332 static __inline const char*
333 nm_txrx2str(enum txrx t)
335 return (t== NR_RX ? "RX" : "TX");
338 static __inline enum txrx
339 nm_txrx_swap(enum txrx t)
341 return (t== NR_RX ? NR_TX : NR_RX);
344 #define for_rx_tx(t) for ((t) = 0; (t) < NR_TXRX; (t)++)
347 struct netmap_zmon_list {
348 struct netmap_kring *next;
349 struct netmap_kring *prev;
351 #endif /* WITH_MONITOR */
354 * private, kernel view of a ring. Keeps track of the status of
355 * a ring across system calls.
357 * nr_hwcur index of the next buffer to refill.
358 * It corresponds to ring->head
359 * at the time the system call returns.
361 * nr_hwtail index of the first buffer owned by the kernel.
362 * On RX, hwcur->hwtail are receive buffers
363 * not yet released. hwcur is advanced following
364 * ring->head, hwtail is advanced on incoming packets,
365 * and a wakeup is generated when hwtail passes ring->cur
366 * On TX, hwcur->rcur have been filled by the sender
367 * but not sent yet to the NIC; rcur->hwtail are available
368 * for new transmissions, and hwtail->hwcur-1 are pending
369 * transmissions not yet acknowledged.
371 * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots.
372 * This is so that, on a reset, buffers owned by userspace are not
373 * modified by the kernel. In particular:
374 * RX rings: the next empty buffer (hwtail + hwofs) coincides with
375 * the next empty buffer as known by the hardware (next_to_check or so).
376 * TX rings: hwcur + hwofs coincides with next_to_send
378 * The following fields are used to implement lock-free copy of packets
379 * from input to output ports in VALE switch:
380 * nkr_hwlease buffer after the last one being copied.
381 * A writer in nm_bdg_flush reserves N buffers
382 * from nr_hwlease, advances it, then does the
383 * copy outside the lock.
384 * In RX rings (used for VALE ports),
385 * nkr_hwtail <= nkr_hwlease < nkr_hwcur+N-1
386 * In TX rings (used for NIC or host stack ports)
387 * nkr_hwcur <= nkr_hwlease < nkr_hwtail
388 * nkr_leases array of nkr_num_slots where writers can report
389 * completion of their block. NR_NOSLOT (~0) indicates
390 * that the writer has not finished yet
391 * nkr_lease_idx index of next free slot in nr_leases, to be assigned
393 * The kring is manipulated by txsync/rxsync and generic netmap function.
395 * Concurrent rxsync or txsync on the same ring are prevented through
396 * by nm_kr_(try)lock() which in turn uses nr_busy. This is all we need
397 * for NIC rings, and for TX rings attached to the host stack.
399 * RX rings attached to the host stack use an mbq (rx_queue) on both
400 * rxsync_from_host() and netmap_transmit(). The mbq is protected
401 * by its internal lock.
403 * RX rings attached to the VALE switch are accessed by both senders
404 * and receiver. They are protected through the q_lock on the RX ring.
406 struct netmap_kring {
407 struct netmap_ring *ring;
409 uint32_t nr_hwcur; /* should be nr_hwhead */
413 * Copies of values in user rings, so we do not need to look
414 * at the ring (which could be modified). These are set in the
415 * *sync_prologue()/finalize() routines.
421 uint32_t nr_kflags; /* private driver flags */
422 #define NKR_PENDINTR 0x1 // Pending interrupt.
423 #define NKR_EXCLUSIVE 0x2 /* exclusive binding */
424 #define NKR_FORWARD 0x4 /* (host ring only) there are
427 #define NKR_NEEDRING 0x8 /* ring needed even if users==0
428 * (used internally by pipes and
429 * by ptnetmap host ports)
431 #define NKR_NOINTR 0x10 /* don't use interrupts on this ring */
432 #define NKR_FAKERING 0x20 /* don't allocate/free buffers */
435 uint32_t nr_pending_mode;
436 #define NKR_NETMAP_OFF 0x0
437 #define NKR_NETMAP_ON 0x1
439 uint32_t nkr_num_slots;
442 * On a NIC reset, the NIC ring indexes may be reset but the
443 * indexes in the netmap rings remain the same. nkr_hwofs
444 * keeps track of the offset between the two.
448 /* last_reclaim is opaque marker to help reduce the frequency
449 * of operations such as reclaiming tx buffers. A possible use
450 * is set it to ticks and do the reclaim only once per tick.
452 uint64_t last_reclaim;
455 NM_SELINFO_T si; /* poll/select wait queue */
456 NM_LOCK_T q_lock; /* protects kring and ring. */
457 NM_ATOMIC_T nr_busy; /* prevent concurrent syscalls */
459 /* the adapter the owns this kring */
460 struct netmap_adapter *na;
462 /* the adapter that wants to be notified when this kring has
463 * new slots avaialable. This is usually the same as the above,
464 * but wrappers may let it point to themselves
466 struct netmap_adapter *notify_na;
468 /* The following fields are for VALE switch support */
469 struct nm_bdg_fwd *nkr_ft;
470 uint32_t *nkr_leases;
471 #define NR_NOSLOT ((uint32_t)~0) /* used in nkr_*lease* */
472 uint32_t nkr_hwlease;
473 uint32_t nkr_lease_idx;
475 /* while nkr_stopped is set, no new [tr]xsync operations can
476 * be started on this kring.
477 * This is used by netmap_disable_all_rings()
478 * to find a synchronization point where critical data
479 * structures pointed to by the kring can be added or removed
481 volatile int nkr_stopped;
483 /* Support for adapters without native netmap support.
484 * On tx rings we preallocate an array of tx buffers
485 * (same size as the netmap ring), on rx rings we
486 * store incoming mbufs in a queue that is drained by
489 struct mbuf **tx_pool;
490 struct mbuf *tx_event; /* TX event used as a notification */
491 NM_LOCK_T tx_event_lock; /* protects the tx_event mbuf */
492 struct mbq rx_queue; /* intercepted rx mbufs. */
494 uint32_t users; /* existing bindings for this ring */
496 uint32_t ring_id; /* kring identifier */
497 enum txrx tx; /* kind of ring (tx or rx) */
498 char name[64]; /* diagnostic */
500 /* [tx]sync callback for this kring.
501 * The default nm_kring_create callback (netmap_krings_create)
502 * sets the nm_sync callback of each hardware tx(rx) kring to
503 * the corresponding nm_txsync(nm_rxsync) taken from the
504 * netmap_adapter; moreover, it sets the sync callback
505 * of the host tx(rx) ring to netmap_txsync_to_host
506 * (netmap_rxsync_from_host).
508 * Overrides: the above configuration is not changed by
509 * any of the nm_krings_create callbacks.
511 int (*nm_sync)(struct netmap_kring *kring, int flags);
512 int (*nm_notify)(struct netmap_kring *kring, int flags);
515 struct netmap_kring *pipe; /* if this is a pipe ring,
516 * pointer to the other end
518 uint32_t pipe_tail; /* hwtail updated by the other end */
519 #endif /* WITH_PIPES */
521 int (*save_notify)(struct netmap_kring *kring, int flags);
524 /* array of krings that are monitoring this kring */
525 struct netmap_kring **monitors;
526 uint32_t max_monitors; /* current size of the monitors array */
527 uint32_t n_monitors; /* next unused entry in the monitor array */
528 uint32_t mon_pos[NR_TXRX]; /* index of this ring in the monitored ring array */
529 uint32_t mon_tail; /* last seen slot on rx */
531 /* circular list of zero-copy monitors */
532 struct netmap_zmon_list zmon_list[NR_TXRX];
535 * Monitors work by intercepting the sync and notify callbacks of the
536 * monitored krings. This is implemented by replacing the pointers
537 * above and saving the previous ones in mon_* pointers below
539 int (*mon_sync)(struct netmap_kring *kring, int flags);
540 int (*mon_notify)(struct netmap_kring *kring, int flags);
545 __declspec(align(64));
547 __attribute__((__aligned__(64)));
550 /* return 1 iff the kring needs to be turned on */
552 nm_kring_pending_on(struct netmap_kring *kring)
554 return kring->nr_pending_mode == NKR_NETMAP_ON &&
555 kring->nr_mode == NKR_NETMAP_OFF;
558 /* return 1 iff the kring needs to be turned off */
560 nm_kring_pending_off(struct netmap_kring *kring)
562 return kring->nr_pending_mode == NKR_NETMAP_OFF &&
563 kring->nr_mode == NKR_NETMAP_ON;
566 /* return the next index, with wraparound */
567 static inline uint32_t
568 nm_next(uint32_t i, uint32_t lim)
570 return unlikely (i == lim) ? 0 : i + 1;
574 /* return the previous index, with wraparound */
575 static inline uint32_t
576 nm_prev(uint32_t i, uint32_t lim)
578 return unlikely (i == 0) ? lim : i - 1;
584 * Here is the layout for the Rx and Tx rings.
588 +-----------------+ +-----------------+
591 +-----------------+ +-----------------+
592 head->| owned by user |<-hwcur | not sent to nic |<-hwcur
594 +-----------------+ | |
595 cur->| available to | | |
596 | user, not read | +-----------------+
597 | yet | cur->| (being |
600 +-----------------+ + ------ +
601 tail->| |<-hwtail | |<-hwlease
602 | (being | ... | | ...
603 | prepared) | ... | | ...
604 +-----------------+ ... | | ...
605 | |<-hwlease +-----------------+
606 | | tail->| |<-hwtail
610 +-----------------+ +-----------------+
612 * The cur/tail (user view) and hwcur/hwtail (kernel view)
613 * are used in the normal operation of the card.
615 * When a ring is the output of a switch port (Rx ring for
616 * a VALE port, Tx ring for the host stack or NIC), slots
617 * are reserved in blocks through 'hwlease' which points
618 * to the next unused slot.
619 * On an Rx ring, hwlease is always after hwtail,
620 * and completions cause hwtail to advance.
621 * On a Tx ring, hwlease is always between cur and hwtail,
622 * and completions cause cur to advance.
624 * nm_kr_space() returns the maximum number of slots that
626 * nm_kr_lease() reserves the required number of buffers,
627 * advances nkr_hwlease and also returns an entry in
628 * a circular array where completions should be reported.
633 #define plut_entry lut_entry
637 struct lut_entry *lut;
638 struct plut_entry *plut;
639 uint32_t objtotal; /* max buffer index */
640 uint32_t objsize; /* buffer size */
643 struct netmap_vp_adapter; // forward
646 /* Struct to be filled by nm_config callbacks. */
647 struct nm_config_info {
648 unsigned num_tx_rings;
649 unsigned num_rx_rings;
650 unsigned num_tx_descs;
651 unsigned num_rx_descs;
652 unsigned rx_buf_maxsize;
656 * default type for the magic field.
657 * May be overriden in glue code.
660 #define NM_OS_MAGIC uint32_t
661 #endif /* !NM_OS_MAGIC */
664 * The "struct netmap_adapter" extends the "struct adapter"
665 * (or equivalent) device descriptor.
666 * It contains all base fields needed to support netmap operation.
667 * There are in fact different types of netmap adapters
668 * (native, generic, VALE switch...) so a netmap_adapter is
669 * just the first field in the derived type.
671 struct netmap_adapter {
673 * On linux we do not have a good way to tell if an interface
674 * is netmap-capable. So we always use the following trick:
675 * NA(ifp) points here, and the first entry (which hopefully
676 * always exists and is at least 32 bits) contains a magic
677 * value which we can use to detect that the interface is good.
680 uint32_t na_flags; /* enabled, and other flags */
681 #define NAF_SKIP_INTR 1 /* use the regular interrupt handler.
682 * useful during initialization
684 #define NAF_SW_ONLY 2 /* forward packets only to sw adapter */
685 #define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when
686 * forwarding packets coming from this
689 #define NAF_MEM_OWNER 8 /* the adapter uses its own memory area
690 * that cannot be changed
692 #define NAF_NATIVE 16 /* the adapter is native.
693 * Virtual ports (non persistent vale ports,
694 * pipes, monitors...) should never use
697 #define NAF_NETMAP_ON 32 /* netmap is active (either native or
698 * emulated). Where possible (e.g. FreeBSD)
699 * IFCAP_NETMAP also mirrors this flag.
701 #define NAF_HOST_RINGS 64 /* the adapter supports the host rings */
702 #define NAF_FORCE_NATIVE 128 /* the adapter is always NATIVE */
703 #define NAF_PTNETMAP_HOST 256 /* the adapter supports ptnetmap in the host */
704 #define NAF_MOREFRAG 512 /* the adapter supports NS_MOREFRAG */
705 #define NAF_ZOMBIE (1U<<30) /* the nic driver has been unloaded */
706 #define NAF_BUSY (1U<<31) /* the adapter is used internally and
707 * cannot be registered from userspace
709 int active_fds; /* number of user-space descriptors using this
710 interface, which is equal to the number of
711 struct netmap_if objs in the mapped region. */
713 u_int num_rx_rings; /* number of adapter receive rings */
714 u_int num_tx_rings; /* number of adapter transmit rings */
715 u_int num_host_rx_rings; /* number of host receive rings */
716 u_int num_host_tx_rings; /* number of host transmit rings */
718 u_int num_tx_desc; /* number of descriptor in each queue */
721 /* tx_rings and rx_rings are private but allocated
722 * as a contiguous chunk of memory. Each array has
723 * N+1 entries, for the adapter queues and for the host queue.
725 struct netmap_kring **tx_rings; /* array of TX rings. */
726 struct netmap_kring **rx_rings; /* array of RX rings. */
728 void *tailroom; /* space below the rings array */
729 /* (used for leases) */
732 NM_SELINFO_T si[NR_TXRX]; /* global wait queues */
734 /* count users of the global wait queues */
735 int si_users[NR_TXRX];
737 void *pdev; /* used to store pci device */
739 /* copy of if_qflush and if_transmit pointers, to intercept
740 * packets from the network stack when netmap is active.
742 int (*if_transmit)(struct ifnet *, struct mbuf *);
744 /* copy of if_input for netmap_send_up() */
745 void (*if_input)(struct ifnet *, struct mbuf *);
747 /* Back reference to the parent ifnet struct. Used for
748 * hardware ports (emulated netmap included). */
749 struct ifnet *ifp; /* adapter is ifp->if_softc */
751 /*---- callbacks for this netmap adapter -----*/
753 * nm_dtor() is the cleanup routine called when destroying
755 * Called with NMG_LOCK held.
757 * nm_register() is called on NIOCREGIF and close() to enter
758 * or exit netmap mode on the NIC
759 * Called with NNG_LOCK held.
761 * nm_txsync() pushes packets to the underlying hw/switch
763 * nm_rxsync() collects packets from the underlying hw/switch
765 * nm_config() returns configuration information from the OS
766 * Called with NMG_LOCK held.
768 * nm_krings_create() create and init the tx_rings and
769 * rx_rings arrays of kring structures. In particular,
770 * set the nm_sync callbacks for each ring.
771 * There is no need to also allocate the corresponding
772 * netmap_rings, since netmap_mem_rings_create() will always
773 * be called to provide the missing ones.
774 * Called with NNG_LOCK held.
776 * nm_krings_delete() cleanup and delete the tx_rings and rx_rings
778 * Called with NMG_LOCK held.
780 * nm_notify() is used to act after data have become available
781 * (or the stopped state of the ring has changed)
782 * For hw devices this is typically a selwakeup(),
783 * but for NIC/host ports attached to a switch (or vice-versa)
784 * we also need to invoke the 'txsync' code downstream.
785 * This callback pointer is actually used only to initialize
787 * Return values are the same as for netmap_rx_irq().
789 void (*nm_dtor)(struct netmap_adapter *);
791 int (*nm_register)(struct netmap_adapter *, int onoff);
792 void (*nm_intr)(struct netmap_adapter *, int onoff);
794 int (*nm_txsync)(struct netmap_kring *kring, int flags);
795 int (*nm_rxsync)(struct netmap_kring *kring, int flags);
796 int (*nm_notify)(struct netmap_kring *kring, int flags);
797 #define NAF_FORCE_READ 1
798 #define NAF_FORCE_RECLAIM 2
799 #define NAF_CAN_FORWARD_DOWN 4
800 /* return configuration information */
801 int (*nm_config)(struct netmap_adapter *, struct nm_config_info *info);
802 int (*nm_krings_create)(struct netmap_adapter *);
803 void (*nm_krings_delete)(struct netmap_adapter *);
805 * nm_bdg_attach() initializes the na_vp field to point
806 * to an adapter that can be attached to a VALE switch. If the
807 * current adapter is already a VALE port, na_vp is simply a cast;
808 * otherwise, na_vp points to a netmap_bwrap_adapter.
809 * If applicable, this callback also initializes na_hostvp,
810 * that can be used to connect the adapter host rings to the
812 * Called with NMG_LOCK held.
814 * nm_bdg_ctl() is called on the actual attach/detach to/from
815 * to/from the switch, to perform adapter-specific
817 * Called with NMG_LOCK held.
819 int (*nm_bdg_attach)(const char *bdg_name, struct netmap_adapter *,
821 int (*nm_bdg_ctl)(struct nmreq_header *, struct netmap_adapter *);
823 /* adapter used to attach this adapter to a VALE switch (if any) */
824 struct netmap_vp_adapter *na_vp;
825 /* adapter used to attach the host rings of this adapter
826 * to a VALE switch (if any) */
827 struct netmap_vp_adapter *na_hostvp;
829 /* standard refcount to control the lifetime of the adapter
830 * (it should be equal to the lifetime of the corresponding ifp)
834 /* memory allocator (opaque)
835 * We also cache a pointer to the lut_entry for translating
836 * buffer addresses, the total number of buffers and the buffer size.
838 struct netmap_mem_d *nm_mem;
839 struct netmap_mem_d *nm_mem_prev;
840 struct netmap_lut na_lut;
842 /* additional information attached to this adapter
843 * by other netmap subsystems. Currently used by
844 * bwrap, LINUX/v1000 and ptnetmap
848 /* array of pipes that have this adapter as a parent */
849 struct netmap_pipe_adapter **na_pipes;
850 int na_next_pipe; /* next free slot in the array */
851 int na_max_pipes; /* size of the array */
853 /* Offset of ethernet header for each packet. */
856 /* Max number of bytes that the NIC can store in the buffer
857 * referenced by each RX descriptor. This translates to the maximum
858 * bytes that a single netmap slot can reference. Larger packets
859 * require NS_MOREFRAG support. */
860 unsigned rx_buf_maxsize;
862 char name[NETMAP_REQ_IFNAMSIZ]; /* used at least by pipes */
865 unsigned long monitor_id; /* debugging */
869 static __inline u_int
870 nma_get_ndesc(struct netmap_adapter *na, enum txrx t)
872 return (t == NR_TX ? na->num_tx_desc : na->num_rx_desc);
876 nma_set_ndesc(struct netmap_adapter *na, enum txrx t, u_int v)
884 static __inline u_int
885 nma_get_nrings(struct netmap_adapter *na, enum txrx t)
887 return (t == NR_TX ? na->num_tx_rings : na->num_rx_rings);
890 static __inline u_int
891 nma_get_host_nrings(struct netmap_adapter *na, enum txrx t)
893 return (t == NR_TX ? na->num_host_tx_rings : na->num_host_rx_rings);
897 nma_set_nrings(struct netmap_adapter *na, enum txrx t, u_int v)
900 na->num_tx_rings = v;
902 na->num_rx_rings = v;
906 nma_set_host_nrings(struct netmap_adapter *na, enum txrx t, u_int v)
909 na->num_host_tx_rings = v;
911 na->num_host_rx_rings = v;
914 static __inline struct netmap_kring**
915 NMR(struct netmap_adapter *na, enum txrx t)
917 return (t == NR_TX ? na->tx_rings : na->rx_rings);
920 int nma_intr_enable(struct netmap_adapter *na, int onoff);
923 * If the NIC is owned by the kernel
924 * (i.e., bridge), neither another bridge nor user can use it;
925 * if the NIC is owned by a user, only users can share it.
926 * Evaluation must be done under NMG_LOCK().
928 #define NETMAP_OWNED_BY_KERN(na) ((na)->na_flags & NAF_BUSY)
929 #define NETMAP_OWNED_BY_ANY(na) \
930 (NETMAP_OWNED_BY_KERN(na) || ((na)->active_fds > 0))
933 * derived netmap adapters for various types of ports
935 struct netmap_vp_adapter { /* VALE software port */
936 struct netmap_adapter up;
941 * bdg_port is the port number used in the bridge;
942 * na_bdg points to the bridge this NA is attached to.
945 struct nm_bridge *na_bdg;
947 int autodelete; /* remove the ifp on last reference */
949 /* Maximum Frame Size, used in bdg_mismatch_datapath() */
951 /* Last source MAC on this port */
956 struct netmap_hw_adapter { /* physical device */
957 struct netmap_adapter up;
960 struct net_device_ops nm_ndo;
961 struct ethtool_ops nm_eto;
963 const struct ethtool_ops* save_ethtool;
965 int (*nm_hw_register)(struct netmap_adapter *, int onoff);
969 /* Mitigation support. */
970 struct nm_generic_mit {
971 struct hrtimer mit_timer;
973 int mit_ring_idx; /* index of the ring being mitigated */
974 struct netmap_adapter *mit_na; /* backpointer */
977 struct netmap_generic_adapter { /* emulated device */
978 struct netmap_hw_adapter up;
980 /* Pointer to a previously used netmap adapter. */
981 struct netmap_adapter *prev;
983 /* Emulated netmap adapters support:
984 * - save_if_input saves the if_input hook (FreeBSD);
985 * - mit implements rx interrupt mitigation;
987 void (*save_if_input)(struct ifnet *, struct mbuf *);
989 struct nm_generic_mit *mit;
991 netdev_tx_t (*save_start_xmit)(struct mbuf *, struct ifnet *);
993 /* Is the adapter able to use multiple RX slots to scatter
994 * each packet pushed up by the driver? */
997 /* Is the transmission path controlled by a netmap-aware
998 * device queue (i.e. qdisc on linux)? */
1001 #endif /* WITH_GENERIC */
1003 static __inline u_int
1004 netmap_real_rings(struct netmap_adapter *na, enum txrx t)
1006 return nma_get_nrings(na, t) +
1007 !!(na->na_flags & NAF_HOST_RINGS) * nma_get_host_nrings(na, t);
1010 /* account for fake rings */
1011 static __inline u_int
1012 netmap_all_rings(struct netmap_adapter *na, enum txrx t)
1014 return max(nma_get_nrings(na, t) + 1, netmap_real_rings(na, t));
1017 int netmap_default_bdg_attach(const char *name, struct netmap_adapter *na,
1018 struct nm_bridge *);
1019 struct nm_bdg_polling_state;
1021 * Bridge wrapper for non VALE ports attached to a VALE switch.
1023 * The real device must already have its own netmap adapter (hwna).
1024 * The bridge wrapper and the hwna adapter share the same set of
1025 * netmap rings and buffers, but they have two separate sets of
1026 * krings descriptors, with tx/rx meanings swapped:
1029 * bwrap krings rings krings hwna
1030 * +------+ +------+ +-----+ +------+ +------+
1031 * |tx_rings->| |\ /| |----| |<-tx_rings|
1032 * | | +------+ \ / +-----+ +------+ | |
1035 * | | +------+/ \+-----+ +------+ | |
1036 * |rx_rings->| | | |----| |<-rx_rings|
1037 * | | +------+ +-----+ +------+ | |
1040 * - packets coming from the bridge go to the brwap rx rings,
1041 * which are also the hwna tx rings. The bwrap notify callback
1042 * will then complete the hwna tx (see netmap_bwrap_notify).
1044 * - packets coming from the outside go to the hwna rx rings,
1045 * which are also the bwrap tx rings. The (overwritten) hwna
1046 * notify method will then complete the bridge tx
1047 * (see netmap_bwrap_intr_notify).
1049 * The bridge wrapper may optionally connect the hwna 'host' rings
1050 * to the bridge. This is done by using a second port in the
1051 * bridge and connecting it to the 'host' netmap_vp_adapter
1052 * contained in the netmap_bwrap_adapter. The brwap host adapter
1053 * cross-links the hwna host rings in the same way as shown above.
1055 * - packets coming from the bridge and directed to the host stack
1056 * are handled by the bwrap host notify callback
1057 * (see netmap_bwrap_host_notify)
1059 * - packets coming from the host stack are still handled by the
1060 * overwritten hwna notify callback (netmap_bwrap_intr_notify),
1061 * but are diverted to the host adapter depending on the ring number.
1064 struct netmap_bwrap_adapter {
1065 struct netmap_vp_adapter up;
1066 struct netmap_vp_adapter host; /* for host rings */
1067 struct netmap_adapter *hwna; /* the underlying device */
1070 * When we attach a physical interface to the bridge, we
1071 * allow the controlling process to terminate, so we need
1072 * a place to store the n_detmap_priv_d data structure.
1073 * This is only done when physical interfaces
1074 * are attached to a bridge.
1076 struct netmap_priv_d *na_kpriv;
1077 struct nm_bdg_polling_state *na_polling_state;
1078 /* we overwrite the hwna->na_vp pointer, so we save
1079 * here its original value, to be restored at detach
1081 struct netmap_vp_adapter *saved_na_vp;
1083 int nm_bdg_ctl_attach(struct nmreq_header *hdr, void *auth_token);
1084 int nm_bdg_ctl_detach(struct nmreq_header *hdr, void *auth_token);
1085 int nm_bdg_polling(struct nmreq_header *hdr);
1086 int netmap_bdg_list(struct nmreq_header *hdr);
1089 int netmap_vi_create(struct nmreq_header *hdr, int);
1090 int nm_vi_create(struct nmreq_header *);
1091 int nm_vi_destroy(const char *name);
1092 #else /* !WITH_VALE */
1093 #define netmap_vi_create(hdr, a) (EOPNOTSUPP)
1094 #endif /* WITH_VALE */
1098 #define NM_MAXPIPES 64 /* max number of pipes per adapter */
1100 struct netmap_pipe_adapter {
1101 /* pipe identifier is up.name */
1102 struct netmap_adapter up;
1104 #define NM_PIPE_ROLE_MASTER 0x1
1105 #define NM_PIPE_ROLE_SLAVE 0x2
1106 int role; /* either NM_PIPE_ROLE_MASTER or NM_PIPE_ROLE_SLAVE */
1108 struct netmap_adapter *parent; /* adapter that owns the memory */
1109 struct netmap_pipe_adapter *peer; /* the other end of the pipe */
1110 int peer_ref; /* 1 iff we are holding a ref to the peer */
1111 struct ifnet *parent_ifp; /* maybe null */
1113 u_int parent_slot; /* index in the parent pipe array */
1116 #endif /* WITH_PIPES */
1119 /* return slots reserved to rx clients; used in drivers */
1120 static inline uint32_t
1121 nm_kr_rxspace(struct netmap_kring *k)
1123 int space = k->nr_hwtail - k->nr_hwcur;
1125 space += k->nkr_num_slots;
1126 ND("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail);
1131 /* return slots reserved to tx clients */
1132 #define nm_kr_txspace(_k) nm_kr_rxspace(_k)
1135 /* True if no space in the tx ring, only valid after txsync_prologue */
1137 nm_kr_txempty(struct netmap_kring *kring)
1139 return kring->rcur == kring->nr_hwtail;
1142 /* True if no more completed slots in the rx ring, only valid after
1143 * rxsync_prologue */
1144 #define nm_kr_rxempty(_k) nm_kr_txempty(_k)
1147 * protect against multiple threads using the same ring.
1148 * also check that the ring has not been stopped or locked
1150 #define NM_KR_BUSY 1 /* some other thread is syncing the ring */
1151 #define NM_KR_STOPPED 2 /* unbounded stop (ifconfig down or driver unload) */
1152 #define NM_KR_LOCKED 3 /* bounded, brief stop for mutual exclusion */
1155 /* release the previously acquired right to use the *sync() methods of the ring */
1156 static __inline void nm_kr_put(struct netmap_kring *kr)
1158 NM_ATOMIC_CLEAR(&kr->nr_busy);
1162 /* true if the ifp that backed the adapter has disappeared (e.g., the
1163 * driver has been unloaded)
1165 static inline int nm_iszombie(struct netmap_adapter *na);
1167 /* try to obtain exclusive right to issue the *sync() operations on the ring.
1168 * The right is obtained and must be later relinquished via nm_kr_put() if and
1169 * only if nm_kr_tryget() returns 0.
1170 * If can_sleep is 1 there are only two other possible outcomes:
1171 * - the function returns NM_KR_BUSY
1172 * - the function returns NM_KR_STOPPED and sets the POLLERR bit in *perr
1174 * In both cases the caller will typically skip the ring, possibly collecting
1175 * errors along the way.
1176 * If the calling context does not allow sleeping, the caller must pass 0 in can_sleep.
1177 * In the latter case, the function may also return NM_KR_LOCKED and leave *perr
1178 * untouched: ideally, the caller should try again at a later time.
1180 static __inline int nm_kr_tryget(struct netmap_kring *kr, int can_sleep, int *perr)
1182 int busy = 1, stopped;
1183 /* check a first time without taking the lock
1184 * to avoid starvation for nm_kr_get()
1187 stopped = kr->nkr_stopped;
1188 if (unlikely(stopped)) {
1191 busy = NM_ATOMIC_TEST_AND_SET(&kr->nr_busy);
1192 /* we should not return NM_KR_BUSY if the ring was
1193 * actually stopped, so check another time after
1194 * the barrier provided by the atomic operation
1196 stopped = kr->nkr_stopped;
1197 if (unlikely(stopped)) {
1201 if (unlikely(nm_iszombie(kr->na))) {
1202 stopped = NM_KR_STOPPED;
1206 return unlikely(busy) ? NM_KR_BUSY : 0;
1211 if (stopped == NM_KR_STOPPED) {
1212 /* if POLLERR is defined we want to use it to simplify netmap_poll().
1213 * Otherwise, any non-zero value will do.
1216 #define NM_POLLERR POLLERR
1218 #define NM_POLLERR 1
1219 #endif /* POLLERR */
1221 *perr |= NM_POLLERR;
1223 } else if (can_sleep) {
1224 tsleep(kr, 0, "NM_KR_TRYGET", 4);
1230 /* put the ring in the 'stopped' state and wait for the current user (if any) to
1231 * notice. stopped must be either NM_KR_STOPPED or NM_KR_LOCKED
1233 static __inline void nm_kr_stop(struct netmap_kring *kr, int stopped)
1235 kr->nkr_stopped = stopped;
1236 while (NM_ATOMIC_TEST_AND_SET(&kr->nr_busy))
1237 tsleep(kr, 0, "NM_KR_GET", 4);
1240 /* restart a ring after a stop */
1241 static __inline void nm_kr_start(struct netmap_kring *kr)
1243 kr->nkr_stopped = 0;
1249 * The following functions are used by individual drivers to
1250 * support netmap operation.
1252 * netmap_attach() initializes a struct netmap_adapter, allocating the
1253 * struct netmap_ring's and the struct selinfo.
1255 * netmap_detach() frees the memory allocated by netmap_attach().
1257 * netmap_transmit() replaces the if_transmit routine of the interface,
1258 * and is used to intercept packets coming from the stack.
1260 * netmap_load_map/netmap_reload_map are helper routines to set/reset
1261 * the dmamap for a packet buffer
1263 * netmap_reset() is a helper routine to be called in the hw driver
1264 * when reinitializing a ring. It should not be called by
1265 * virtual ports (vale, pipes, monitor)
1267 int netmap_attach(struct netmap_adapter *);
1268 int netmap_attach_ext(struct netmap_adapter *, size_t size, int override_reg);
1269 void netmap_detach(struct ifnet *);
1270 int netmap_transmit(struct ifnet *, struct mbuf *);
1271 struct netmap_slot *netmap_reset(struct netmap_adapter *na,
1272 enum txrx tx, u_int n, u_int new_cur);
1273 int netmap_ring_reinit(struct netmap_kring *);
1274 int netmap_rings_config_get(struct netmap_adapter *, struct nm_config_info *);
1276 /* Return codes for netmap_*x_irq. */
1278 /* Driver should do normal interrupt processing, e.g. because
1279 * the interface is not in netmap mode. */
1281 /* Port is in netmap mode, and the interrupt work has been
1282 * completed. The driver does not have to notify netmap
1283 * again before the next interrupt. */
1284 NM_IRQ_COMPLETED = -1,
1285 /* Port is in netmap mode, but the interrupt work has not been
1286 * completed. The driver has to make sure netmap will be
1287 * notified again soon, even if no more interrupts come (e.g.
1288 * on Linux the driver should not call napi_complete()). */
1289 NM_IRQ_RESCHED = -2,
1292 /* default functions to handle rx/tx interrupts */
1293 int netmap_rx_irq(struct ifnet *, u_int, u_int *);
1294 #define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL)
1295 int netmap_common_irq(struct netmap_adapter *, u_int, u_int *work_done);
1299 /* functions used by external modules to interface with VALE */
1300 #define netmap_vp_to_ifp(_vp) ((_vp)->up.ifp)
1301 #define netmap_ifp_to_vp(_ifp) (NA(_ifp)->na_vp)
1302 #define netmap_ifp_to_host_vp(_ifp) (NA(_ifp)->na_hostvp)
1303 #define netmap_bdg_idx(_vp) ((_vp)->bdg_port)
1304 const char *netmap_bdg_name(struct netmap_vp_adapter *);
1305 #else /* !WITH_VALE */
1306 #define netmap_vp_to_ifp(_vp) NULL
1307 #define netmap_ifp_to_vp(_ifp) NULL
1308 #define netmap_ifp_to_host_vp(_ifp) NULL
1309 #define netmap_bdg_idx(_vp) -1
1310 #endif /* WITH_VALE */
1313 nm_netmap_on(struct netmap_adapter *na)
1315 return na && na->na_flags & NAF_NETMAP_ON;
1319 nm_native_on(struct netmap_adapter *na)
1321 return nm_netmap_on(na) && (na->na_flags & NAF_NATIVE);
1325 nm_iszombie(struct netmap_adapter *na)
1327 return na == NULL || (na->na_flags & NAF_ZOMBIE);
1331 nm_update_hostrings_mode(struct netmap_adapter *na)
1333 /* Process nr_mode and nr_pending_mode for host rings. */
1334 na->tx_rings[na->num_tx_rings]->nr_mode =
1335 na->tx_rings[na->num_tx_rings]->nr_pending_mode;
1336 na->rx_rings[na->num_rx_rings]->nr_mode =
1337 na->rx_rings[na->num_rx_rings]->nr_pending_mode;
1340 void nm_set_native_flags(struct netmap_adapter *);
1341 void nm_clear_native_flags(struct netmap_adapter *);
1344 * nm_*sync_prologue() functions are used in ioctl/poll and ptnetmap
1346 * We need netmap_ring* parameter, because in ptnetmap it is decoupled
1348 * The user-space ring pointers (head/cur/tail) are shared through
1349 * CSB between host and guest.
1353 * validates parameters in the ring/kring, returns a value for head
1354 * If any error, returns ring_size to force a reinit.
1356 uint32_t nm_txsync_prologue(struct netmap_kring *, struct netmap_ring *);
1360 * validates parameters in the ring/kring, returns a value for head
1361 * If any error, returns ring_size lim to force a reinit.
1363 uint32_t nm_rxsync_prologue(struct netmap_kring *, struct netmap_ring *);
1366 /* check/fix address and len in tx rings */
1367 #if 1 /* debug version */
1368 #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \
1369 if (_a == NETMAP_BUF_BASE(_na) || _l > NETMAP_BUF_SIZE(_na)) { \
1370 RD(5, "bad addr/len ring %d slot %d idx %d len %d", \
1371 kring->ring_id, nm_i, slot->buf_idx, len); \
1372 if (_l > NETMAP_BUF_SIZE(_na)) \
1373 _l = NETMAP_BUF_SIZE(_na); \
1375 #else /* no debug version */
1376 #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \
1377 if (_l > NETMAP_BUF_SIZE(_na)) \
1378 _l = NETMAP_BUF_SIZE(_na); \
1383 /*---------------------------------------------------------------*/
1385 * Support routines used by netmap subsystems
1386 * (native drivers, VALE, generic, pipes, monitors, ...)
1390 /* common routine for all functions that create a netmap adapter. It performs
1392 * - if the na points to an ifp, mark the ifp as netmap capable
1393 * using na as its native adapter;
1394 * - provide defaults for the setup callbacks and the memory allocator
1396 int netmap_attach_common(struct netmap_adapter *);
1397 /* fill priv->np_[tr]xq{first,last} using the ringid and flags information
1398 * coming from a struct nmreq_register
1400 int netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode,
1401 uint16_t nr_ringid, uint64_t nr_flags);
1402 /* update the ring parameters (number and size of tx and rx rings).
1403 * It calls the nm_config callback, if available.
1405 int netmap_update_config(struct netmap_adapter *na);
1406 /* create and initialize the common fields of the krings array.
1407 * using the information that must be already available in the na.
1408 * tailroom can be used to request the allocation of additional
1409 * tailroom bytes after the krings array. This is used by
1410 * netmap_vp_adapter's (i.e., VALE ports) to make room for
1411 * leasing-related data structures
1413 int netmap_krings_create(struct netmap_adapter *na, u_int tailroom);
1414 /* deletes the kring array of the adapter. The array must have
1415 * been created using netmap_krings_create
1417 void netmap_krings_delete(struct netmap_adapter *na);
1419 int netmap_hw_krings_create(struct netmap_adapter *na);
1420 void netmap_hw_krings_delete(struct netmap_adapter *na);
1422 /* set the stopped/enabled status of ring
1423 * When stopping, they also wait for all current activity on the ring to
1424 * terminate. The status change is then notified using the na nm_notify
1427 void netmap_set_ring(struct netmap_adapter *, u_int ring_id, enum txrx, int stopped);
1428 /* set the stopped/enabled status of all rings of the adapter. */
1429 void netmap_set_all_rings(struct netmap_adapter *, int stopped);
1430 /* convenience wrappers for netmap_set_all_rings */
1431 void netmap_disable_all_rings(struct ifnet *);
1432 void netmap_enable_all_rings(struct ifnet *);
1434 int netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
1435 uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags);
1436 void netmap_do_unregif(struct netmap_priv_d *priv);
1438 u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg);
1439 int netmap_get_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1440 struct ifnet **ifp, struct netmap_mem_d *nmd, int create);
1441 void netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp);
1442 int netmap_get_hw_na(struct ifnet *ifp,
1443 struct netmap_mem_d *nmd, struct netmap_adapter **na);
1447 * The following bridge-related functions are used by other
1450 * VALE only supports unicast or broadcast. The lookup
1451 * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports,
1452 * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 to indicate
1455 typedef uint32_t (*bdg_lookup_fn_t)(struct nm_bdg_fwd *ft, uint8_t *ring_nr,
1456 struct netmap_vp_adapter *, void *private_data);
1457 typedef int (*bdg_config_fn_t)(struct nm_ifreq *);
1458 typedef void (*bdg_dtor_fn_t)(const struct netmap_vp_adapter *);
1459 typedef void *(*bdg_update_private_data_fn_t)(void *private_data, void *callback_data, int *error);
1460 typedef int (*bdg_vp_create_fn_t)(struct nmreq_header *hdr,
1461 struct ifnet *ifp, struct netmap_mem_d *nmd,
1462 struct netmap_vp_adapter **ret);
1463 typedef int (*bdg_bwrap_attach_fn_t)(const char *nr_name, struct netmap_adapter *hwna);
1464 struct netmap_bdg_ops {
1465 bdg_lookup_fn_t lookup;
1466 bdg_config_fn_t config;
1468 bdg_vp_create_fn_t vp_create;
1469 bdg_bwrap_attach_fn_t bwrap_attach;
1470 char name[IFNAMSIZ];
1472 int netmap_bwrap_attach(const char *name, struct netmap_adapter *, struct netmap_bdg_ops *);
1473 int netmap_bdg_regops(const char *name, struct netmap_bdg_ops *bdg_ops, void *private_data, void *auth_token);
1475 #define NM_BRIDGES 8 /* number of bridges */
1476 #define NM_BDG_MAXPORTS 254 /* up to 254 */
1477 #define NM_BDG_BROADCAST NM_BDG_MAXPORTS
1478 #define NM_BDG_NOPORT (NM_BDG_MAXPORTS+1)
1480 struct nm_bridge *netmap_init_bridges2(u_int);
1481 void netmap_uninit_bridges2(struct nm_bridge *, u_int);
1482 int netmap_init_bridges(void);
1483 void netmap_uninit_bridges(void);
1484 int nm_bdg_update_private_data(const char *name, bdg_update_private_data_fn_t callback,
1485 void *callback_data, void *auth_token);
1486 int netmap_bdg_config(struct nm_ifreq *nifr);
1489 uint32_t netmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring,
1490 struct netmap_vp_adapter *, void *private_data);
1492 /* these are redefined in case of no VALE support */
1493 int netmap_get_vale_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1494 struct netmap_mem_d *nmd, int create);
1495 void *netmap_vale_create(const char *bdg_name, int *return_status);
1496 int netmap_vale_destroy(const char *bdg_name, void *auth_token);
1498 #else /* !WITH_VALE */
1499 #define netmap_bdg_learning(_1, _2, _3, _4) 0
1500 #define netmap_get_vale_na(_1, _2, _3, _4) 0
1501 #define netmap_bdg_create(_1, _2) NULL
1502 #define netmap_bdg_destroy(_1, _2) 0
1503 #endif /* !WITH_VALE */
1506 /* max number of pipes per device */
1507 #define NM_MAXPIPES 64 /* XXX this should probably be a sysctl */
1508 void netmap_pipe_dealloc(struct netmap_adapter *);
1509 int netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1510 struct netmap_mem_d *nmd, int create);
1511 #else /* !WITH_PIPES */
1512 #define NM_MAXPIPES 0
1513 #define netmap_pipe_alloc(_1, _2) 0
1514 #define netmap_pipe_dealloc(_1)
1515 #define netmap_get_pipe_na(hdr, _2, _3, _4) \
1516 ((strchr(hdr->nr_name, '{') != NULL || strchr(hdr->nr_name, '}') != NULL) ? EOPNOTSUPP : 0)
1520 int netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1521 struct netmap_mem_d *nmd, int create);
1522 void netmap_monitor_stop(struct netmap_adapter *na);
1524 #define netmap_get_monitor_na(hdr, _2, _3, _4) \
1525 (((struct nmreq_register *)(uintptr_t)hdr->nr_body)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? EOPNOTSUPP : 0)
1528 #ifdef CONFIG_NET_NS
1529 struct net *netmap_bns_get(void);
1530 void netmap_bns_put(struct net *);
1531 void netmap_bns_getbridges(struct nm_bridge **, u_int *);
1533 #define netmap_bns_get()
1534 #define netmap_bns_put(_1)
1535 #define netmap_bns_getbridges(b, n) \
1536 do { *b = nm_bridges; *n = NM_BRIDGES; } while (0)
1539 /* Various prototypes */
1540 int netmap_poll(struct netmap_priv_d *, int events, NM_SELRECORD_T *td);
1541 int netmap_init(void);
1542 void netmap_fini(void);
1543 int netmap_get_memory(struct netmap_priv_d* p);
1544 void netmap_dtor(void *data);
1546 int netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
1547 struct thread *, int nr_body_is_user);
1548 int netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
1550 size_t nmreq_size_by_type(uint16_t nr_reqtype);
1552 /* netmap_adapter creation/destruction */
1554 // #define NM_DEBUG_PUTGET 1
1556 #ifdef NM_DEBUG_PUTGET
1558 #define NM_DBG(f) __##f
1560 void __netmap_adapter_get(struct netmap_adapter *na);
1562 #define netmap_adapter_get(na) \
1564 struct netmap_adapter *__na = na; \
1565 D("getting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \
1566 __netmap_adapter_get(__na); \
1569 int __netmap_adapter_put(struct netmap_adapter *na);
1571 #define netmap_adapter_put(na) \
1573 struct netmap_adapter *__na = na; \
1574 D("putting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \
1575 __netmap_adapter_put(__na); \
1578 #else /* !NM_DEBUG_PUTGET */
1581 void netmap_adapter_get(struct netmap_adapter *na);
1582 int netmap_adapter_put(struct netmap_adapter *na);
1584 #endif /* !NM_DEBUG_PUTGET */
1590 #define NETMAP_BUF_BASE(_na) ((_na)->na_lut.lut[0].vaddr)
1591 #define NETMAP_BUF_SIZE(_na) ((_na)->na_lut.objsize)
1592 extern int netmap_no_pendintr;
1593 extern int netmap_mitigate;
1594 extern int netmap_verbose; /* for debugging */
1595 enum { /* verbose flags */
1596 NM_VERB_ON = 1, /* generic verbose */
1597 NM_VERB_HOST = 0x2, /* verbose host stack */
1598 NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */
1599 NM_VERB_TXSYNC = 0x20,
1600 NM_VERB_RXINTR = 0x100, /* verbose on rx/tx intr (driver) */
1601 NM_VERB_TXINTR = 0x200,
1602 NM_VERB_NIC_RXSYNC = 0x1000, /* verbose on rx/tx intr (driver) */
1603 NM_VERB_NIC_TXSYNC = 0x2000,
1606 extern int netmap_txsync_retry;
1607 extern int netmap_flags;
1608 extern int netmap_generic_hwcsum;
1609 extern int netmap_generic_mit;
1610 extern int netmap_generic_ringsize;
1611 extern int netmap_generic_rings;
1613 extern int netmap_generic_txqdisc;
1615 extern int ptnetmap_tx_workers;
1618 * NA returns a pointer to the struct netmap adapter from the ifp.
1619 * WNA is os-specific and must be defined in glue code.
1621 #define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp))
1624 * we provide a default implementation of NM_ATTACH_NA/NM_DETACH_NA
1625 * based on the WNA field.
1626 * Glue code may override this by defining its own NM_ATTACH_NA
1628 #ifndef NM_ATTACH_NA
1630 * On old versions of FreeBSD, NA(ifp) is a pspare. On linux we
1631 * overload another pointer in the netdev.
1633 * We check if NA(ifp) is set and its first element has a related
1634 * magic value. The capenable is within the struct netmap_adapter.
1636 #define NETMAP_MAGIC 0x52697a7a
1638 #define NM_NA_VALID(ifp) (NA(ifp) && \
1639 ((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC )
1641 #define NM_ATTACH_NA(ifp, na) do { \
1645 ((uint32_t)(uintptr_t)NA(ifp)) ^ NETMAP_MAGIC; \
1647 #define NM_RESTORE_NA(ifp, na) WNA(ifp) = na;
1649 #define NM_DETACH_NA(ifp) do { WNA(ifp) = NULL; } while (0)
1650 #define NM_NA_CLASH(ifp) (NA(ifp) && !NM_NA_VALID(ifp))
1651 #endif /* !NM_ATTACH_NA */
1654 #define NM_IS_NATIVE(ifp) (NM_NA_VALID(ifp) && NA(ifp)->nm_dtor == netmap_hw_dtor)
1656 #if defined(__FreeBSD__)
1658 /* Assigns the device IOMMU domain to an allocator.
1659 * Returns -ENOMEM in case the domain is different */
1660 #define nm_iommu_group_id(dev) (0)
1662 /* Callback invoked by the dma machinery after a successful dmamap_load */
1663 static void netmap_dmamap_cb(__unused void *arg,
1664 __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error)
1668 /* bus_dmamap_load wrapper: call aforementioned function if map != NULL.
1669 * XXX can we do it without a callback ?
1672 netmap_load_map(struct netmap_adapter *na,
1673 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1676 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na),
1677 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT);
1682 netmap_unload_map(struct netmap_adapter *na,
1683 bus_dma_tag_t tag, bus_dmamap_t map)
1686 bus_dmamap_unload(tag, map);
1689 #define netmap_sync_map(na, tag, map, sz, t)
1691 /* update the map when a buffer changes. */
1693 netmap_reload_map(struct netmap_adapter *na,
1694 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1697 bus_dmamap_unload(tag, map);
1698 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na),
1699 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT);
1703 #elif defined(_WIN32)
1707 int nm_iommu_group_id(bus_dma_tag_t dev);
1708 #include <linux/dma-mapping.h>
1712 * dma_map_single(&pdev->dev, virt_addr, len, direction)
1713 * dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction)
1716 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[l];
1717 /* set time_stamp *before* dma to help avoid a possible race */
1718 buffer_info->time_stamp = jiffies;
1719 buffer_info->mapped_as_page = false;
1720 buffer_info->length = len;
1721 //buffer_info->next_to_watch = l;
1722 /* reload dma map */
1723 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1724 NETMAP_BUF_SIZE, DMA_TO_DEVICE);
1725 buffer_info->dma = dma_map_single(&adapter->pdev->dev,
1726 addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE);
1728 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1729 D("dma mapping error");
1730 /* goto dma_error; See e1000_put_txbuf() */
1733 tx_desc->buffer_addr = htole64(buffer_info->dma); //XXX
1738 netmap_load_map(struct netmap_adapter *na,
1739 bus_dma_tag_t tag, bus_dmamap_t map, void *buf, u_int size)
1742 *map = dma_map_single(na->pdev, buf, size,
1744 if (dma_mapping_error(na->pdev, *map)) {
1753 netmap_unload_map(struct netmap_adapter *na,
1754 bus_dma_tag_t tag, bus_dmamap_t map, u_int sz)
1757 dma_unmap_single(na->pdev, *map, sz,
1762 #ifdef NETMAP_LINUX_HAVE_DMASYNC
1764 netmap_sync_map_cpu(struct netmap_adapter *na,
1765 bus_dma_tag_t tag, bus_dmamap_t map, u_int sz, enum txrx t)
1768 dma_sync_single_for_cpu(na->pdev, *map, sz,
1769 (t == NR_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
1774 netmap_sync_map_dev(struct netmap_adapter *na,
1775 bus_dma_tag_t tag, bus_dmamap_t map, u_int sz, enum txrx t)
1778 dma_sync_single_for_device(na->pdev, *map, sz,
1779 (t == NR_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
1784 netmap_reload_map(struct netmap_adapter *na,
1785 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1787 u_int sz = NETMAP_BUF_SIZE(na);
1790 dma_unmap_single(na->pdev, *map, sz,
1794 *map = dma_map_single(na->pdev, buf, sz,
1797 #else /* !NETMAP_LINUX_HAVE_DMASYNC */
1798 #define netmap_sync_map_cpu(na, tag, map, sz, t)
1799 #define netmap_sync_map_dev(na, tag, map, sz, t)
1800 #endif /* NETMAP_LINUX_HAVE_DMASYNC */
1806 * functions to map NIC to KRING indexes (n2k) and vice versa (k2n)
1809 netmap_idx_n2k(struct netmap_kring *kr, int idx)
1811 int n = kr->nkr_num_slots;
1812 idx += kr->nkr_hwofs;
1823 netmap_idx_k2n(struct netmap_kring *kr, int idx)
1825 int n = kr->nkr_num_slots;
1826 idx -= kr->nkr_hwofs;
1836 /* Entries of the look-up table. */
1839 void *vaddr; /* virtual address. */
1840 vm_paddr_t paddr; /* physical address. */
1842 #else /* linux & _WIN32 */
1843 /* dma-mapping in linux can assign a buffer a different address
1844 * depending on the device, so we need to have a separate
1845 * physical-address look-up table for each na.
1846 * We can still share the vaddrs, though, therefore we split
1847 * the lut_entry structure.
1850 void *vaddr; /* virtual address. */
1854 vm_paddr_t paddr; /* physical address. */
1856 #endif /* linux & _WIN32 */
1858 struct netmap_obj_pool;
1861 * NMB return the virtual address of a buffer (buffer 0 on bad index)
1862 * PNMB also fills the physical address
1864 static inline void *
1865 NMB(struct netmap_adapter *na, struct netmap_slot *slot)
1867 struct lut_entry *lut = na->na_lut.lut;
1868 uint32_t i = slot->buf_idx;
1869 return (unlikely(i >= na->na_lut.objtotal)) ?
1870 lut[0].vaddr : lut[i].vaddr;
1873 static inline void *
1874 PNMB(struct netmap_adapter *na, struct netmap_slot *slot, uint64_t *pp)
1876 uint32_t i = slot->buf_idx;
1877 struct lut_entry *lut = na->na_lut.lut;
1878 struct plut_entry *plut = na->na_lut.plut;
1879 void *ret = (i >= na->na_lut.objtotal) ? lut[0].vaddr : lut[i].vaddr;
1882 *pp = (i >= na->na_lut.objtotal) ? (uint64_t)plut[0].paddr.QuadPart : (uint64_t)plut[i].paddr.QuadPart;
1884 *pp = (i >= na->na_lut.objtotal) ? plut[0].paddr : plut[i].paddr;
1891 * Structure associated to each netmap file descriptor.
1892 * It is created on open and left unbound (np_nifp == NULL).
1893 * A successful NIOCREGIF will set np_nifp and the first few fields;
1894 * this is protected by a global lock (NMG_LOCK) due to low contention.
1896 * np_refs counts the number of references to the structure: one for the fd,
1897 * plus (on FreeBSD) one for each active mmap which we track ourselves
1898 * (linux automatically tracks them, but FreeBSD does not).
1899 * np_refs is protected by NMG_LOCK.
1901 * Read access to the structure is lock free, because ni_nifp once set
1902 * can only go to 0 when nobody is using the entry anymore. Readers
1903 * must check that np_nifp != NULL before using the other fields.
1905 struct netmap_priv_d {
1906 struct netmap_if * volatile np_nifp; /* netmap if descriptor. */
1908 struct netmap_adapter *np_na;
1909 struct ifnet *np_ifp;
1910 uint32_t np_flags; /* from the ioctl */
1911 u_int np_qfirst[NR_TXRX],
1912 np_qlast[NR_TXRX]; /* range of tx/rx rings to scan */
1914 int np_sync_flags; /* to be passed to nm_sync */
1916 int np_refs; /* use with NMG_LOCK held */
1918 /* pointers to the selinfo to be used for selrecord.
1919 * Either the local or the global one depending on the
1922 NM_SELINFO_T *np_si[NR_TXRX];
1923 struct thread *np_td; /* kqueue, just debugging */
1926 struct netmap_priv_d *netmap_priv_new(void);
1927 void netmap_priv_delete(struct netmap_priv_d *);
1929 static inline int nm_kring_pending(struct netmap_priv_d *np)
1931 struct netmap_adapter *na = np->np_na;
1936 for (i = np->np_qfirst[t]; i < np->np_qlast[t]; i++) {
1937 struct netmap_kring *kring = NMR(na, t)[i];
1938 if (kring->nr_mode != kring->nr_pending_mode) {
1947 int netmap_pipe_txsync(struct netmap_kring *txkring, int flags);
1948 int netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags);
1949 #endif /* WITH_PIPES */
1953 struct netmap_monitor_adapter {
1954 struct netmap_adapter up;
1956 struct netmap_priv_d priv;
1960 #endif /* WITH_MONITOR */
1965 * generic netmap emulation for devices that do not have
1966 * native netmap support.
1968 int generic_netmap_attach(struct ifnet *ifp);
1969 int generic_rx_handler(struct ifnet *ifp, struct mbuf *m);;
1971 int nm_os_catch_rx(struct netmap_generic_adapter *gna, int intercept);
1972 int nm_os_catch_tx(struct netmap_generic_adapter *gna, int intercept);
1974 int na_is_generic(struct netmap_adapter *na);
1977 * the generic transmit routine is passed a structure to optionally
1978 * build a queue of descriptors, in an OS-specific way.
1979 * The payload is at addr, if non-null, and the routine should send or queue
1980 * the packet, returning 0 if successful, 1 on failure.
1982 * At the end, if head is non-null, there will be an additional call
1983 * to the function with addr = NULL; this should tell the OS-specific
1984 * routine to send the queue and free any resources. Failure is ignored.
1986 struct nm_os_gen_arg {
1988 void *m; /* os-specific mbuf-like object */
1989 void *head, *tail; /* tailq, if the OS-specific routine needs to build one */
1990 void *addr; /* payload of current packet */
1991 u_int len; /* packet length */
1992 u_int ring_nr; /* packet length */
1993 u_int qevent; /* in txqdisc mode, place an event on this mbuf */
1996 int nm_os_generic_xmit_frame(struct nm_os_gen_arg *);
1997 int nm_os_generic_find_num_desc(struct ifnet *ifp, u_int *tx, u_int *rx);
1998 void nm_os_generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq);
1999 void nm_os_generic_set_features(struct netmap_generic_adapter *gna);
2001 static inline struct ifnet*
2002 netmap_generic_getifp(struct netmap_generic_adapter *gna)
2005 return gna->prev->ifp;
2007 return gna->up.up.ifp;
2010 void netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done);
2012 //#define RATE_GENERIC /* Enables communication statistics for generic. */
2014 void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi);
2016 #define generic_rate(txp, txs, txi, rxp, rxs, rxi)
2020 * netmap_mitigation API. This is used by the generic adapter
2021 * to reduce the number of interrupt requests/selwakeup
2022 * to clients on incoming packets.
2024 void nm_os_mitigation_init(struct nm_generic_mit *mit, int idx,
2025 struct netmap_adapter *na);
2026 void nm_os_mitigation_start(struct nm_generic_mit *mit);
2027 void nm_os_mitigation_restart(struct nm_generic_mit *mit);
2028 int nm_os_mitigation_active(struct nm_generic_mit *mit);
2029 void nm_os_mitigation_cleanup(struct nm_generic_mit *mit);
2030 #else /* !WITH_GENERIC */
2031 #define generic_netmap_attach(ifp) (EOPNOTSUPP)
2032 #define na_is_generic(na) (0)
2033 #endif /* WITH_GENERIC */
2035 /* Shared declarations for the VALE switch. */
2038 * Each transmit queue accumulates a batch of packets into
2039 * a structure before forwarding. Packets to the same
2040 * destination are put in a list using ft_next as a link field.
2041 * ft_frags and ft_next are valid only on the first fragment.
2043 struct nm_bdg_fwd { /* forwarding entry for a bridge */
2044 void *ft_buf; /* netmap or indirect buffer */
2045 uint8_t ft_frags; /* how many fragments (only on 1st frag) */
2046 uint16_t ft_offset; /* dst port (unused) */
2047 uint16_t ft_flags; /* flags, e.g. indirect */
2048 uint16_t ft_len; /* src fragment len */
2049 uint16_t ft_next; /* next packet to same destination */
2052 /* struct 'virtio_net_hdr' from linux. */
2053 struct nm_vnet_hdr {
2054 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */
2055 #define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */
2057 #define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */
2058 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */
2059 #define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */
2060 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */
2061 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */
2065 uint16_t csum_start;
2066 uint16_t csum_offset;
2069 #define WORST_CASE_GSO_HEADER (14+40+60) /* IPv6 + TCP */
2071 /* Private definitions for IPv4, IPv6, UDP and TCP headers. */
2074 uint8_t version_ihl;
2084 /*The options start here. */
2092 uint8_t doff; /* Data offset + Reserved */
2107 uint8_t priority_version;
2108 uint8_t flow_lbl[3];
2110 uint16_t payload_len;
2118 /* Type used to store a checksum (in host byte order) that hasn't been
2121 #define rawsum_t uint32_t
2123 rawsum_t nm_os_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum);
2124 uint16_t nm_os_csum_ipv4(struct nm_iphdr *iph);
2125 void nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data,
2126 size_t datalen, uint16_t *check);
2127 void nm_os_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data,
2128 size_t datalen, uint16_t *check);
2129 uint16_t nm_os_csum_fold(rawsum_t cur_sum);
2131 void bdg_mismatch_datapath(struct netmap_vp_adapter *na,
2132 struct netmap_vp_adapter *dst_na,
2133 const struct nm_bdg_fwd *ft_p,
2134 struct netmap_ring *dst_ring,
2135 u_int *j, u_int lim, u_int *howmany);
2137 /* persistent virtual port routines */
2138 int nm_os_vi_persist(const char *, struct ifnet **);
2139 void nm_os_vi_detach(struct ifnet *);
2140 void nm_os_vi_init_index(void);
2143 * kernel thread routines
2145 struct nm_kctx; /* OS-specific kernel context - opaque */
2146 typedef void (*nm_kctx_worker_fn_t)(void *data, int is_kthread);
2147 typedef void (*nm_kctx_notify_fn_t)(void *data);
2149 /* kthread configuration */
2150 struct nm_kctx_cfg {
2151 long type; /* kthread type/identifier */
2152 nm_kctx_worker_fn_t worker_fn; /* worker function */
2153 void *worker_private;/* worker parameter */
2154 nm_kctx_notify_fn_t notify_fn; /* notify function */
2155 int attach_user; /* attach kthread to user process */
2156 int use_kthread; /* use a kthread for the context */
2158 /* kthread configuration */
2159 struct nm_kctx *nm_os_kctx_create(struct nm_kctx_cfg *cfg,
2161 int nm_os_kctx_worker_start(struct nm_kctx *);
2162 void nm_os_kctx_worker_stop(struct nm_kctx *);
2163 void nm_os_kctx_destroy(struct nm_kctx *);
2164 void nm_os_kctx_worker_wakeup(struct nm_kctx *nmk);
2165 void nm_os_kctx_send_irq(struct nm_kctx *);
2166 void nm_os_kctx_worker_setaff(struct nm_kctx *, int);
2167 u_int nm_os_ncpus(void);
2169 #ifdef WITH_PTNETMAP_HOST
2171 * netmap adapter for host ptnetmap ports
2173 struct netmap_pt_host_adapter {
2174 struct netmap_adapter up;
2176 /* the passed-through adapter */
2177 struct netmap_adapter *parent;
2178 /* parent->na_flags, saved at NETMAP_PT_HOST_CREATE time,
2179 * and restored at NETMAP_PT_HOST_DELETE time */
2180 uint32_t parent_na_flags;
2182 int (*parent_nm_notify)(struct netmap_kring *kring, int flags);
2186 /* ptnetmap host-side routines */
2187 int netmap_get_pt_host_na(struct nmreq_header *hdr, struct netmap_adapter **na,
2188 struct netmap_mem_d * nmd, int create);
2189 int ptnetmap_ctl(const char *nr_name, int create, struct netmap_adapter *na);
2192 nm_ptnetmap_host_on(struct netmap_adapter *na)
2194 return na && na->na_flags & NAF_PTNETMAP_HOST;
2196 #else /* !WITH_PTNETMAP_HOST */
2197 #define netmap_get_pt_host_na(hdr, _2, _3, _4) \
2198 (((struct nmreq_register *)(uintptr_t)hdr->nr_body)->nr_flags & (NR_PTNETMAP_HOST) ? EOPNOTSUPP : 0)
2199 #define ptnetmap_ctl(_1, _2, _3) EINVAL
2200 #define nm_ptnetmap_host_on(_1) EINVAL
2201 #endif /* !WITH_PTNETMAP_HOST */
2203 #ifdef WITH_PTNETMAP_GUEST
2204 /* ptnetmap GUEST routines */
2207 * netmap adapter for guest ptnetmap ports
2209 struct netmap_pt_guest_adapter {
2210 /* The netmap adapter to be used by netmap applications.
2211 * This field must be the first, to allow upcast. */
2212 struct netmap_hw_adapter hwup;
2214 /* The netmap adapter to be used by the driver. */
2215 struct netmap_hw_adapter dr;
2217 /* Reference counter to track users of backend netmap port: the
2218 * network stack and netmap clients.
2219 * Used to decide when we need (de)allocate krings/rings and
2220 * start (stop) ptnetmap kthreads. */
2225 int netmap_pt_guest_attach(struct netmap_adapter *na,
2226 unsigned int nifp_offset,
2227 unsigned int memid);
2228 struct ptnet_csb_gh;
2229 struct ptnet_csb_hg;
2230 bool netmap_pt_guest_txsync(struct ptnet_csb_gh *ptgh,
2231 struct ptnet_csb_hg *pthg,
2232 struct netmap_kring *kring,
2234 bool netmap_pt_guest_rxsync(struct ptnet_csb_gh *ptgh,
2235 struct ptnet_csb_hg *pthg,
2236 struct netmap_kring *kring, int flags);
2237 int ptnet_nm_krings_create(struct netmap_adapter *na);
2238 void ptnet_nm_krings_delete(struct netmap_adapter *na);
2239 void ptnet_nm_dtor(struct netmap_adapter *na);
2240 #endif /* WITH_PTNETMAP_GUEST */
2244 * FreeBSD mbuf allocator/deallocator in emulation mode:
2246 #if __FreeBSD_version < 1100000
2249 * For older versions of FreeBSD:
2251 * We allocate EXT_PACKET mbuf+clusters, but need to set M_NOFREE
2252 * so that the destructor, if invoked, will not free the packet.
2253 * In principle we should set the destructor only on demand,
2254 * but since there might be a race we better do it on allocation.
2255 * As a consequence, we also need to set the destructor or we
2256 * would leak buffers.
2259 /* mbuf destructor, also need to change the type to EXT_EXTREF,
2260 * add an M_NOFREE flag, and then clear the flag and
2261 * chain into uma_zfree(zone_pack, mf)
2262 * (or reinstall the buffer ?)
2264 #define SET_MBUF_DESTRUCTOR(m, fn) do { \
2265 (m)->m_ext.ext_free = (void *)fn; \
2266 (m)->m_ext.ext_type = EXT_EXTREF; \
2270 void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2)
2272 /* restore original mbuf */
2273 m->m_ext.ext_buf = m->m_data = m->m_ext.ext_arg1;
2274 m->m_ext.ext_arg1 = NULL;
2275 m->m_ext.ext_type = EXT_PACKET;
2276 m->m_ext.ext_free = NULL;
2277 if (MBUF_REFCNT(m) == 0)
2278 SET_MBUF_REFCNT(m, 1);
2279 uma_zfree(zone_pack, m);
2284 static inline struct mbuf *
2285 nm_os_get_mbuf(struct ifnet *ifp, int len)
2290 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2292 /* m_getcl() (mb_ctor_mbuf) has an assert that checks that
2293 * M_NOFREE flag is not specified as third argument,
2294 * so we have to set M_NOFREE after m_getcl(). */
2295 m->m_flags |= M_NOFREE;
2296 m->m_ext.ext_arg1 = m->m_ext.ext_buf; // XXX save
2297 m->m_ext.ext_free = (void *)void_mbuf_dtor;
2298 m->m_ext.ext_type = EXT_EXTREF;
2299 ND(5, "create m %p refcnt %d", m, MBUF_REFCNT(m));
2304 #else /* __FreeBSD_version >= 1100000 */
2307 * Newer versions of FreeBSD, using a straightforward scheme.
2309 * We allocate mbufs with m_gethdr(), since the mbuf header is needed
2310 * by the driver. We also attach a customly-provided external storage,
2311 * which in this case is a netmap buffer. When calling m_extadd(), however
2312 * we pass a NULL address, since the real address (and length) will be
2313 * filled in by nm_os_generic_xmit_frame() right before calling
2316 * The dtor function does nothing, however we need it since mb_free_ext()
2317 * has a KASSERT(), checking that the mbuf dtor function is not NULL.
2320 #if __FreeBSD_version <= 1200050
2321 static void void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2) { }
2322 #else /* __FreeBSD_version >= 1200051 */
2323 /* The arg1 and arg2 pointers argument were removed by r324446, which
2324 * in included since version 1200051. */
2325 static void void_mbuf_dtor(struct mbuf *m) { }
2326 #endif /* __FreeBSD_version >= 1200051 */
2328 #define SET_MBUF_DESTRUCTOR(m, fn) do { \
2329 (m)->m_ext.ext_free = (fn != NULL) ? \
2330 (void *)fn : (void *)void_mbuf_dtor; \
2333 static inline struct mbuf *
2334 nm_os_get_mbuf(struct ifnet *ifp, int len)
2341 m = m_gethdr(M_NOWAIT, MT_DATA);
2346 m_extadd(m, NULL /* buf */, 0 /* size */, void_mbuf_dtor,
2347 NULL, NULL, 0, EXT_NET_DRV);
2352 #endif /* __FreeBSD_version >= 1100000 */
2353 #endif /* __FreeBSD__ */
2355 struct nmreq_option * nmreq_findoption(struct nmreq_option *, uint16_t);
2356 int nmreq_checkduplicate(struct nmreq_option *);
2358 #endif /* _NET_NETMAP_KERN_H_ */