2 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo
3 * Copyright (C) 2013-2016 Universita` di Pisa
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * The header contains the definitions of constants and function
32 * prototypes used only in kernelspace.
35 #ifndef _NET_NETMAP_KERN_H_
36 #define _NET_NETMAP_KERN_H_
40 #if defined(CONFIG_NETMAP_EXTMEM)
43 #if defined(CONFIG_NETMAP_VALE)
46 #if defined(CONFIG_NETMAP_PIPE)
49 #if defined(CONFIG_NETMAP_MONITOR)
52 #if defined(CONFIG_NETMAP_GENERIC)
55 #if defined(CONFIG_NETMAP_PTNETMAP)
58 #if defined(CONFIG_NETMAP_SINK)
61 #if defined(CONFIG_NETMAP_NULL)
65 #elif defined (_WIN32)
66 #define WITH_VALE // comment out to disable VALE support
72 #else /* neither linux nor windows */
73 #define WITH_VALE // comment out to disable VALE support
77 #define WITH_PTNETMAP /* ptnetmap guest support */
82 #if defined(__FreeBSD__)
83 #include <sys/selinfo.h>
85 #define likely(x) __builtin_expect((long)!!(x), 1L)
86 #define unlikely(x) __builtin_expect((long)!!(x), 0L)
89 #define NM_LOCK_T struct mtx /* low level spinlock, used to protect queues */
91 #define NM_MTX_T struct sx /* OS-specific mutex (sleepable) */
92 #define NM_MTX_INIT(m) sx_init(&(m), #m)
93 #define NM_MTX_DESTROY(m) sx_destroy(&(m))
94 #define NM_MTX_LOCK(m) sx_xlock(&(m))
95 #define NM_MTX_SPINLOCK(m) while (!sx_try_xlock(&(m))) ;
96 #define NM_MTX_UNLOCK(m) sx_xunlock(&(m))
97 #define NM_MTX_ASSERT(m) sx_assert(&(m), SA_XLOCKED)
99 #define NM_SELINFO_T struct nm_selinfo
100 #define NM_SELRECORD_T struct thread
101 #define MBUF_LEN(m) ((m)->m_pkthdr.len)
102 #define MBUF_TXQ(m) ((m)->m_pkthdr.flowid)
103 #define MBUF_TRANSMIT(na, ifp, m) ((na)->if_transmit(ifp, m))
104 #define GEN_TX_MBUF_IFP(m) ((m)->m_pkthdr.rcvif)
106 #define NM_ATOMIC_T volatile int /* required by atomic/bitops.h */
107 /* atomic operations */
108 #include <machine/atomic.h>
109 #define NM_ATOMIC_TEST_AND_SET(p) (!atomic_cmpset_acq_int((p), 0, 1))
110 #define NM_ATOMIC_CLEAR(p) atomic_store_rel_int((p), 0)
112 #if __FreeBSD_version >= 1100030
113 #define WNA(_ifp) (_ifp)->if_netmap
114 #else /* older FreeBSD */
115 #define WNA(_ifp) (_ifp)->if_pspare[0]
116 #endif /* older FreeBSD */
118 #if __FreeBSD_version >= 1100005
119 struct netmap_adapter *netmap_getna(if_t ifp);
122 #if __FreeBSD_version >= 1100027
123 #define MBUF_REFCNT(m) ((m)->m_ext.ext_count)
124 #define SET_MBUF_REFCNT(m, x) (m)->m_ext.ext_count = x
126 #define MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *((m)->m_ext.ref_cnt) : -1)
127 #define SET_MBUF_REFCNT(m, x) *((m)->m_ext.ref_cnt) = x
130 #define MBUF_QUEUED(m) 1
133 /* Support for select(2) and poll(2). */
135 /* Support for kqueue(9). See comments in netmap_freebsd.c */
136 struct taskqueue *ntfytq;
137 struct task ntfytask;
145 /* Not used in FreeBSD. */
148 #define NM_BNS_GET(b)
149 #define NM_BNS_PUT(b)
151 #elif defined (linux)
153 #define NM_LOCK_T safe_spinlock_t // see bsd_glue.h
154 #define NM_SELINFO_T wait_queue_head_t
155 #define MBUF_LEN(m) ((m)->len)
156 #define MBUF_TRANSMIT(na, ifp, m) \
158 /* Avoid infinite recursion with generic. */ \
159 m->priority = NM_MAGIC_PRIORITY_TX; \
160 (((struct net_device_ops *)(na)->if_transmit)->ndo_start_xmit(m, ifp)); \
164 /* See explanation in nm_os_generic_xmit_frame. */
165 #define GEN_TX_MBUF_IFP(m) ((struct ifnet *)skb_shinfo(m)->destructor_arg)
167 #define NM_ATOMIC_T volatile long unsigned int
169 #define NM_MTX_T struct mutex /* OS-specific sleepable lock */
170 #define NM_MTX_INIT(m) mutex_init(&(m))
171 #define NM_MTX_DESTROY(m) do { (void)(m); } while (0)
172 #define NM_MTX_LOCK(m) mutex_lock(&(m))
173 #define NM_MTX_UNLOCK(m) mutex_unlock(&(m))
174 #define NM_MTX_ASSERT(m) mutex_is_locked(&(m))
178 #endif /* DEV_NETMAP */
180 #elif defined (__APPLE__)
182 #warning apple support is incomplete.
183 #define likely(x) __builtin_expect(!!(x), 1)
184 #define unlikely(x) __builtin_expect(!!(x), 0)
185 #define NM_LOCK_T IOLock *
186 #define NM_SELINFO_T struct selinfo
187 #define MBUF_LEN(m) ((m)->m_pkthdr.len)
189 #elif defined (_WIN32)
190 #include "../../../WINDOWS/win_glue.h"
192 #define NM_SELRECORD_T IO_STACK_LOCATION
193 #define NM_SELINFO_T win_SELINFO // see win_glue.h
194 #define NM_LOCK_T win_spinlock_t // see win_glue.h
195 #define NM_MTX_T KGUARDED_MUTEX /* OS-specific mutex (sleepable) */
197 #define NM_MTX_INIT(m) KeInitializeGuardedMutex(&m);
198 #define NM_MTX_DESTROY(m) do { (void)(m); } while (0)
199 #define NM_MTX_LOCK(m) KeAcquireGuardedMutex(&(m))
200 #define NM_MTX_UNLOCK(m) KeReleaseGuardedMutex(&(m))
201 #define NM_MTX_ASSERT(m) assert(&m.Count>0)
203 //These linknames are for the NDIS driver
204 #define NETMAP_NDIS_LINKNAME_STRING L"\\DosDevices\\NMAPNDIS"
205 #define NETMAP_NDIS_NTDEVICE_STRING L"\\Device\\NMAPNDIS"
207 //Definition of internal driver-to-driver ioctl codes
208 #define NETMAP_KERNEL_XCHANGE_POINTERS _IO('i', 180)
209 #define NETMAP_KERNEL_SEND_SHUTDOWN_SIGNAL _IO_direct('i', 195)
211 typedef struct hrtimer{
217 /* MSVC does not have likely/unlikely support */
219 #define likely(x) (x)
220 #define unlikely(x) (x)
222 #define likely(x) __builtin_expect((long)!!(x), 1L)
223 #define unlikely(x) __builtin_expect((long)!!(x), 0L)
228 #error unsupported platform
230 #endif /* end - platform-specific code */
232 #ifndef _WIN32 /* support for emulated sysctl */
237 #define NM_ACCESS_ONCE(x) (*(volatile __typeof__(x) *)&(x))
239 #define NMG_LOCK_T NM_MTX_T
240 #define NMG_LOCK_INIT() NM_MTX_INIT(netmap_global_lock)
241 #define NMG_LOCK_DESTROY() NM_MTX_DESTROY(netmap_global_lock)
242 #define NMG_LOCK() NM_MTX_LOCK(netmap_global_lock)
243 #define NMG_UNLOCK() NM_MTX_UNLOCK(netmap_global_lock)
244 #define NMG_LOCK_ASSERT() NM_MTX_ASSERT(netmap_global_lock)
246 #if defined(__FreeBSD__)
247 #define nm_prerr_int printf
248 #define nm_prinf_int printf
249 #elif defined (_WIN32)
250 #define nm_prerr_int DbgPrint
251 #define nm_prinf_int DbgPrint
253 #define nm_prerr_int(fmt, arg...) printk(KERN_ERR fmt, ##arg)
254 #define nm_prinf_int(fmt, arg...) printk(KERN_INFO fmt, ##arg)
257 #define nm_prinf(format, ...) \
259 struct timeval __xxts; \
260 microtime(&__xxts); \
261 nm_prinf_int("%03d.%06d [%4d] %-25s " format "\n",\
262 (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \
263 __LINE__, __FUNCTION__, ##__VA_ARGS__); \
266 #define nm_prerr(format, ...) \
268 struct timeval __xxts; \
269 microtime(&__xxts); \
270 nm_prerr_int("%03d.%06d [%4d] %-25s " format "\n",\
271 (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \
272 __LINE__, __FUNCTION__, ##__VA_ARGS__); \
275 /* Disabled printf (used to be nm_prdis). */
276 #define nm_prdis(format, ...)
278 /* Rate limited, lps indicates how many per second. */
279 #define nm_prlim(lps, format, ...) \
281 static int t0, __cnt; \
282 if (t0 != time_second) { \
287 nm_prinf(format, ##__VA_ARGS__); \
290 struct netmap_adapter;
293 struct netmap_priv_d;
296 /* os-specific NM_SELINFO_T initialzation/destruction functions */
297 int nm_os_selinfo_init(NM_SELINFO_T *, const char *name);
298 void nm_os_selinfo_uninit(NM_SELINFO_T *);
300 const char *nm_dump_buf(char *p, int len, int lim, char *dst);
302 void nm_os_selwakeup(NM_SELINFO_T *si);
303 void nm_os_selrecord(NM_SELRECORD_T *sr, NM_SELINFO_T *si);
305 int nm_os_ifnet_init(void);
306 void nm_os_ifnet_fini(void);
307 void nm_os_ifnet_lock(void);
308 void nm_os_ifnet_unlock(void);
310 unsigned nm_os_ifnet_mtu(struct ifnet *ifp);
312 void nm_os_get_module(void);
313 void nm_os_put_module(void);
315 void netmap_make_zombie(struct ifnet *);
316 void netmap_undo_zombie(struct ifnet *);
318 /* os independent alloc/realloc/free */
319 void *nm_os_malloc(size_t);
320 void *nm_os_vmalloc(size_t);
321 void *nm_os_realloc(void *, size_t new_size, size_t old_size);
322 void nm_os_free(void *);
323 void nm_os_vfree(void *);
325 /* os specific attach/detach enter/exit-netmap-mode routines */
326 void nm_os_onattach(struct ifnet *);
327 void nm_os_ondetach(struct ifnet *);
328 void nm_os_onenter(struct ifnet *);
329 void nm_os_onexit(struct ifnet *);
331 /* passes a packet up to the host stack.
332 * If the packet is sent (or dropped) immediately it returns NULL,
333 * otherwise it links the packet to prev and returns m.
334 * In this case, a final call with m=NULL and prev != NULL will send up
335 * the entire chain to the host stack.
337 void *nm_os_send_up(struct ifnet *, struct mbuf *m, struct mbuf *prev);
339 int nm_os_mbuf_has_seg_offld(struct mbuf *m);
340 int nm_os_mbuf_has_csum_offld(struct mbuf *m);
342 #include "netmap_mbq.h"
344 extern NMG_LOCK_T netmap_global_lock;
346 enum txrx { NR_RX = 0, NR_TX = 1, NR_TXRX };
348 static __inline const char*
349 nm_txrx2str(enum txrx t)
351 return (t== NR_RX ? "RX" : "TX");
354 static __inline enum txrx
355 nm_txrx_swap(enum txrx t)
357 return (t== NR_RX ? NR_TX : NR_RX);
360 #define for_rx_tx(t) for ((t) = 0; (t) < NR_TXRX; (t)++)
363 struct netmap_zmon_list {
364 struct netmap_kring *next;
365 struct netmap_kring *prev;
367 #endif /* WITH_MONITOR */
370 * private, kernel view of a ring. Keeps track of the status of
371 * a ring across system calls.
373 * nr_hwcur index of the next buffer to refill.
374 * It corresponds to ring->head
375 * at the time the system call returns.
377 * nr_hwtail index of the first buffer owned by the kernel.
378 * On RX, hwcur->hwtail are receive buffers
379 * not yet released. hwcur is advanced following
380 * ring->head, hwtail is advanced on incoming packets,
381 * and a wakeup is generated when hwtail passes ring->cur
382 * On TX, hwcur->rcur have been filled by the sender
383 * but not sent yet to the NIC; rcur->hwtail are available
384 * for new transmissions, and hwtail->hwcur-1 are pending
385 * transmissions not yet acknowledged.
387 * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots.
388 * This is so that, on a reset, buffers owned by userspace are not
389 * modified by the kernel. In particular:
390 * RX rings: the next empty buffer (hwtail + hwofs) coincides with
391 * the next empty buffer as known by the hardware (next_to_check or so).
392 * TX rings: hwcur + hwofs coincides with next_to_send
394 * The following fields are used to implement lock-free copy of packets
395 * from input to output ports in VALE switch:
396 * nkr_hwlease buffer after the last one being copied.
397 * A writer in nm_bdg_flush reserves N buffers
398 * from nr_hwlease, advances it, then does the
399 * copy outside the lock.
400 * In RX rings (used for VALE ports),
401 * nkr_hwtail <= nkr_hwlease < nkr_hwcur+N-1
402 * In TX rings (used for NIC or host stack ports)
403 * nkr_hwcur <= nkr_hwlease < nkr_hwtail
404 * nkr_leases array of nkr_num_slots where writers can report
405 * completion of their block. NR_NOSLOT (~0) indicates
406 * that the writer has not finished yet
407 * nkr_lease_idx index of next free slot in nr_leases, to be assigned
409 * The kring is manipulated by txsync/rxsync and generic netmap function.
411 * Concurrent rxsync or txsync on the same ring are prevented through
412 * by nm_kr_(try)lock() which in turn uses nr_busy. This is all we need
413 * for NIC rings, and for TX rings attached to the host stack.
415 * RX rings attached to the host stack use an mbq (rx_queue) on both
416 * rxsync_from_host() and netmap_transmit(). The mbq is protected
417 * by its internal lock.
419 * RX rings attached to the VALE switch are accessed by both senders
420 * and receiver. They are protected through the q_lock on the RX ring.
422 struct netmap_kring {
423 struct netmap_ring *ring;
425 uint32_t nr_hwcur; /* should be nr_hwhead */
429 * Copies of values in user rings, so we do not need to look
430 * at the ring (which could be modified). These are set in the
431 * *sync_prologue()/finalize() routines.
437 uint32_t nr_kflags; /* private driver flags */
438 #define NKR_PENDINTR 0x1 // Pending interrupt.
439 #define NKR_EXCLUSIVE 0x2 /* exclusive binding */
440 #define NKR_FORWARD 0x4 /* (host ring only) there are
443 #define NKR_NEEDRING 0x8 /* ring needed even if users==0
444 * (used internally by pipes and
445 * by ptnetmap host ports)
447 #define NKR_NOINTR 0x10 /* don't use interrupts on this ring */
448 #define NKR_FAKERING 0x20 /* don't allocate/free buffers */
451 uint32_t nr_pending_mode;
452 #define NKR_NETMAP_OFF 0x0
453 #define NKR_NETMAP_ON 0x1
455 uint32_t nkr_num_slots;
458 * On a NIC reset, the NIC ring indexes may be reset but the
459 * indexes in the netmap rings remain the same. nkr_hwofs
460 * keeps track of the offset between the two.
464 /* last_reclaim is opaque marker to help reduce the frequency
465 * of operations such as reclaiming tx buffers. A possible use
466 * is set it to ticks and do the reclaim only once per tick.
468 uint64_t last_reclaim;
471 NM_SELINFO_T si; /* poll/select wait queue */
472 NM_LOCK_T q_lock; /* protects kring and ring. */
473 NM_ATOMIC_T nr_busy; /* prevent concurrent syscalls */
475 /* the adapter the owns this kring */
476 struct netmap_adapter *na;
478 /* the adapter that wants to be notified when this kring has
479 * new slots avaialable. This is usually the same as the above,
480 * but wrappers may let it point to themselves
482 struct netmap_adapter *notify_na;
484 /* The following fields are for VALE switch support */
485 struct nm_bdg_fwd *nkr_ft;
486 uint32_t *nkr_leases;
487 #define NR_NOSLOT ((uint32_t)~0) /* used in nkr_*lease* */
488 uint32_t nkr_hwlease;
489 uint32_t nkr_lease_idx;
491 /* while nkr_stopped is set, no new [tr]xsync operations can
492 * be started on this kring.
493 * This is used by netmap_disable_all_rings()
494 * to find a synchronization point where critical data
495 * structures pointed to by the kring can be added or removed
497 volatile int nkr_stopped;
499 /* Support for adapters without native netmap support.
500 * On tx rings we preallocate an array of tx buffers
501 * (same size as the netmap ring), on rx rings we
502 * store incoming mbufs in a queue that is drained by
505 struct mbuf **tx_pool;
506 struct mbuf *tx_event; /* TX event used as a notification */
507 NM_LOCK_T tx_event_lock; /* protects the tx_event mbuf */
508 struct mbq rx_queue; /* intercepted rx mbufs. */
510 uint32_t users; /* existing bindings for this ring */
512 uint32_t ring_id; /* kring identifier */
513 enum txrx tx; /* kind of ring (tx or rx) */
514 char name[64]; /* diagnostic */
516 /* [tx]sync callback for this kring.
517 * The default nm_kring_create callback (netmap_krings_create)
518 * sets the nm_sync callback of each hardware tx(rx) kring to
519 * the corresponding nm_txsync(nm_rxsync) taken from the
520 * netmap_adapter; moreover, it sets the sync callback
521 * of the host tx(rx) ring to netmap_txsync_to_host
522 * (netmap_rxsync_from_host).
524 * Overrides: the above configuration is not changed by
525 * any of the nm_krings_create callbacks.
527 int (*nm_sync)(struct netmap_kring *kring, int flags);
528 int (*nm_notify)(struct netmap_kring *kring, int flags);
531 struct netmap_kring *pipe; /* if this is a pipe ring,
532 * pointer to the other end
534 uint32_t pipe_tail; /* hwtail updated by the other end */
535 #endif /* WITH_PIPES */
537 int (*save_notify)(struct netmap_kring *kring, int flags);
540 /* array of krings that are monitoring this kring */
541 struct netmap_kring **monitors;
542 uint32_t max_monitors; /* current size of the monitors array */
543 uint32_t n_monitors; /* next unused entry in the monitor array */
544 uint32_t mon_pos[NR_TXRX]; /* index of this ring in the monitored ring array */
545 uint32_t mon_tail; /* last seen slot on rx */
547 /* circular list of zero-copy monitors */
548 struct netmap_zmon_list zmon_list[NR_TXRX];
551 * Monitors work by intercepting the sync and notify callbacks of the
552 * monitored krings. This is implemented by replacing the pointers
553 * above and saving the previous ones in mon_* pointers below
555 int (*mon_sync)(struct netmap_kring *kring, int flags);
556 int (*mon_notify)(struct netmap_kring *kring, int flags);
561 __declspec(align(64));
563 __attribute__((__aligned__(64)));
566 /* return 1 iff the kring needs to be turned on */
568 nm_kring_pending_on(struct netmap_kring *kring)
570 return kring->nr_pending_mode == NKR_NETMAP_ON &&
571 kring->nr_mode == NKR_NETMAP_OFF;
574 /* return 1 iff the kring needs to be turned off */
576 nm_kring_pending_off(struct netmap_kring *kring)
578 return kring->nr_pending_mode == NKR_NETMAP_OFF &&
579 kring->nr_mode == NKR_NETMAP_ON;
582 /* return the next index, with wraparound */
583 static inline uint32_t
584 nm_next(uint32_t i, uint32_t lim)
586 return unlikely (i == lim) ? 0 : i + 1;
590 /* return the previous index, with wraparound */
591 static inline uint32_t
592 nm_prev(uint32_t i, uint32_t lim)
594 return unlikely (i == 0) ? lim : i - 1;
600 * Here is the layout for the Rx and Tx rings.
604 +-----------------+ +-----------------+
607 +-----------------+ +-----------------+
608 head->| owned by user |<-hwcur | not sent to nic |<-hwcur
610 +-----------------+ | |
611 cur->| available to | | |
612 | user, not read | +-----------------+
613 | yet | cur->| (being |
616 +-----------------+ + ------ +
617 tail->| |<-hwtail | |<-hwlease
618 | (being | ... | | ...
619 | prepared) | ... | | ...
620 +-----------------+ ... | | ...
621 | |<-hwlease +-----------------+
622 | | tail->| |<-hwtail
626 +-----------------+ +-----------------+
628 * The cur/tail (user view) and hwcur/hwtail (kernel view)
629 * are used in the normal operation of the card.
631 * When a ring is the output of a switch port (Rx ring for
632 * a VALE port, Tx ring for the host stack or NIC), slots
633 * are reserved in blocks through 'hwlease' which points
634 * to the next unused slot.
635 * On an Rx ring, hwlease is always after hwtail,
636 * and completions cause hwtail to advance.
637 * On a Tx ring, hwlease is always between cur and hwtail,
638 * and completions cause cur to advance.
640 * nm_kr_space() returns the maximum number of slots that
642 * nm_kr_lease() reserves the required number of buffers,
643 * advances nkr_hwlease and also returns an entry in
644 * a circular array where completions should be reported.
649 #define plut_entry lut_entry
653 struct lut_entry *lut;
654 struct plut_entry *plut;
655 uint32_t objtotal; /* max buffer index */
656 uint32_t objsize; /* buffer size */
659 struct netmap_vp_adapter; // forward
662 /* Struct to be filled by nm_config callbacks. */
663 struct nm_config_info {
664 unsigned num_tx_rings;
665 unsigned num_rx_rings;
666 unsigned num_tx_descs;
667 unsigned num_rx_descs;
668 unsigned rx_buf_maxsize;
672 * default type for the magic field.
673 * May be overriden in glue code.
676 #define NM_OS_MAGIC uint32_t
677 #endif /* !NM_OS_MAGIC */
680 * The "struct netmap_adapter" extends the "struct adapter"
681 * (or equivalent) device descriptor.
682 * It contains all base fields needed to support netmap operation.
683 * There are in fact different types of netmap adapters
684 * (native, generic, VALE switch...) so a netmap_adapter is
685 * just the first field in the derived type.
687 struct netmap_adapter {
689 * On linux we do not have a good way to tell if an interface
690 * is netmap-capable. So we always use the following trick:
691 * NA(ifp) points here, and the first entry (which hopefully
692 * always exists and is at least 32 bits) contains a magic
693 * value which we can use to detect that the interface is good.
696 uint32_t na_flags; /* enabled, and other flags */
697 #define NAF_SKIP_INTR 1 /* use the regular interrupt handler.
698 * useful during initialization
700 #define NAF_SW_ONLY 2 /* forward packets only to sw adapter */
701 #define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when
702 * forwarding packets coming from this
705 #define NAF_MEM_OWNER 8 /* the adapter uses its own memory area
706 * that cannot be changed
708 #define NAF_NATIVE 16 /* the adapter is native.
709 * Virtual ports (non persistent vale ports,
710 * pipes, monitors...) should never use
713 #define NAF_NETMAP_ON 32 /* netmap is active (either native or
714 * emulated). Where possible (e.g. FreeBSD)
715 * IFCAP_NETMAP also mirrors this flag.
717 #define NAF_HOST_RINGS 64 /* the adapter supports the host rings */
718 #define NAF_FORCE_NATIVE 128 /* the adapter is always NATIVE */
720 #define NAF_MOREFRAG 512 /* the adapter supports NS_MOREFRAG */
721 #define NAF_ZOMBIE (1U<<30) /* the nic driver has been unloaded */
722 #define NAF_BUSY (1U<<31) /* the adapter is used internally and
723 * cannot be registered from userspace
725 int active_fds; /* number of user-space descriptors using this
726 interface, which is equal to the number of
727 struct netmap_if objs in the mapped region. */
729 u_int num_rx_rings; /* number of adapter receive rings */
730 u_int num_tx_rings; /* number of adapter transmit rings */
731 u_int num_host_rx_rings; /* number of host receive rings */
732 u_int num_host_tx_rings; /* number of host transmit rings */
734 u_int num_tx_desc; /* number of descriptor in each queue */
737 /* tx_rings and rx_rings are private but allocated as a
738 * contiguous chunk of memory. Each array has N+K entries,
739 * N for the hardware rings and K for the host rings.
741 struct netmap_kring **tx_rings; /* array of TX rings. */
742 struct netmap_kring **rx_rings; /* array of RX rings. */
744 void *tailroom; /* space below the rings array */
745 /* (used for leases) */
748 NM_SELINFO_T si[NR_TXRX]; /* global wait queues */
750 /* count users of the global wait queues */
751 int si_users[NR_TXRX];
753 void *pdev; /* used to store pci device */
755 /* copy of if_qflush and if_transmit pointers, to intercept
756 * packets from the network stack when netmap is active.
758 int (*if_transmit)(struct ifnet *, struct mbuf *);
760 /* copy of if_input for netmap_send_up() */
761 void (*if_input)(struct ifnet *, struct mbuf *);
763 /* Back reference to the parent ifnet struct. Used for
764 * hardware ports (emulated netmap included). */
765 struct ifnet *ifp; /* adapter is ifp->if_softc */
767 /*---- callbacks for this netmap adapter -----*/
769 * nm_dtor() is the cleanup routine called when destroying
771 * Called with NMG_LOCK held.
773 * nm_register() is called on NIOCREGIF and close() to enter
774 * or exit netmap mode on the NIC
775 * Called with NNG_LOCK held.
777 * nm_txsync() pushes packets to the underlying hw/switch
779 * nm_rxsync() collects packets from the underlying hw/switch
781 * nm_config() returns configuration information from the OS
782 * Called with NMG_LOCK held.
784 * nm_krings_create() create and init the tx_rings and
785 * rx_rings arrays of kring structures. In particular,
786 * set the nm_sync callbacks for each ring.
787 * There is no need to also allocate the corresponding
788 * netmap_rings, since netmap_mem_rings_create() will always
789 * be called to provide the missing ones.
790 * Called with NNG_LOCK held.
792 * nm_krings_delete() cleanup and delete the tx_rings and rx_rings
794 * Called with NMG_LOCK held.
796 * nm_notify() is used to act after data have become available
797 * (or the stopped state of the ring has changed)
798 * For hw devices this is typically a selwakeup(),
799 * but for NIC/host ports attached to a switch (or vice-versa)
800 * we also need to invoke the 'txsync' code downstream.
801 * This callback pointer is actually used only to initialize
803 * Return values are the same as for netmap_rx_irq().
805 void (*nm_dtor)(struct netmap_adapter *);
807 int (*nm_register)(struct netmap_adapter *, int onoff);
808 void (*nm_intr)(struct netmap_adapter *, int onoff);
810 int (*nm_txsync)(struct netmap_kring *kring, int flags);
811 int (*nm_rxsync)(struct netmap_kring *kring, int flags);
812 int (*nm_notify)(struct netmap_kring *kring, int flags);
813 #define NAF_FORCE_READ 1
814 #define NAF_FORCE_RECLAIM 2
815 #define NAF_CAN_FORWARD_DOWN 4
816 /* return configuration information */
817 int (*nm_config)(struct netmap_adapter *, struct nm_config_info *info);
818 int (*nm_krings_create)(struct netmap_adapter *);
819 void (*nm_krings_delete)(struct netmap_adapter *);
821 * nm_bdg_attach() initializes the na_vp field to point
822 * to an adapter that can be attached to a VALE switch. If the
823 * current adapter is already a VALE port, na_vp is simply a cast;
824 * otherwise, na_vp points to a netmap_bwrap_adapter.
825 * If applicable, this callback also initializes na_hostvp,
826 * that can be used to connect the adapter host rings to the
828 * Called with NMG_LOCK held.
830 * nm_bdg_ctl() is called on the actual attach/detach to/from
831 * to/from the switch, to perform adapter-specific
833 * Called with NMG_LOCK held.
835 int (*nm_bdg_attach)(const char *bdg_name, struct netmap_adapter *,
837 int (*nm_bdg_ctl)(struct nmreq_header *, struct netmap_adapter *);
839 /* adapter used to attach this adapter to a VALE switch (if any) */
840 struct netmap_vp_adapter *na_vp;
841 /* adapter used to attach the host rings of this adapter
842 * to a VALE switch (if any) */
843 struct netmap_vp_adapter *na_hostvp;
845 /* standard refcount to control the lifetime of the adapter
846 * (it should be equal to the lifetime of the corresponding ifp)
850 /* memory allocator (opaque)
851 * We also cache a pointer to the lut_entry for translating
852 * buffer addresses, the total number of buffers and the buffer size.
854 struct netmap_mem_d *nm_mem;
855 struct netmap_mem_d *nm_mem_prev;
856 struct netmap_lut na_lut;
858 /* additional information attached to this adapter
859 * by other netmap subsystems. Currently used by
860 * bwrap, LINUX/v1000 and ptnetmap
864 /* array of pipes that have this adapter as a parent */
865 struct netmap_pipe_adapter **na_pipes;
866 int na_next_pipe; /* next free slot in the array */
867 int na_max_pipes; /* size of the array */
869 /* Offset of ethernet header for each packet. */
872 /* Max number of bytes that the NIC can store in the buffer
873 * referenced by each RX descriptor. This translates to the maximum
874 * bytes that a single netmap slot can reference. Larger packets
875 * require NS_MOREFRAG support. */
876 unsigned rx_buf_maxsize;
878 char name[NETMAP_REQ_IFNAMSIZ]; /* used at least by pipes */
881 unsigned long monitor_id; /* debugging */
885 static __inline u_int
886 nma_get_ndesc(struct netmap_adapter *na, enum txrx t)
888 return (t == NR_TX ? na->num_tx_desc : na->num_rx_desc);
892 nma_set_ndesc(struct netmap_adapter *na, enum txrx t, u_int v)
900 static __inline u_int
901 nma_get_nrings(struct netmap_adapter *na, enum txrx t)
903 return (t == NR_TX ? na->num_tx_rings : na->num_rx_rings);
906 static __inline u_int
907 nma_get_host_nrings(struct netmap_adapter *na, enum txrx t)
909 return (t == NR_TX ? na->num_host_tx_rings : na->num_host_rx_rings);
913 nma_set_nrings(struct netmap_adapter *na, enum txrx t, u_int v)
916 na->num_tx_rings = v;
918 na->num_rx_rings = v;
922 nma_set_host_nrings(struct netmap_adapter *na, enum txrx t, u_int v)
925 na->num_host_tx_rings = v;
927 na->num_host_rx_rings = v;
930 static __inline struct netmap_kring**
931 NMR(struct netmap_adapter *na, enum txrx t)
933 return (t == NR_TX ? na->tx_rings : na->rx_rings);
936 int nma_intr_enable(struct netmap_adapter *na, int onoff);
939 * If the NIC is owned by the kernel
940 * (i.e., bridge), neither another bridge nor user can use it;
941 * if the NIC is owned by a user, only users can share it.
942 * Evaluation must be done under NMG_LOCK().
944 #define NETMAP_OWNED_BY_KERN(na) ((na)->na_flags & NAF_BUSY)
945 #define NETMAP_OWNED_BY_ANY(na) \
946 (NETMAP_OWNED_BY_KERN(na) || ((na)->active_fds > 0))
949 * derived netmap adapters for various types of ports
951 struct netmap_vp_adapter { /* VALE software port */
952 struct netmap_adapter up;
957 * bdg_port is the port number used in the bridge;
958 * na_bdg points to the bridge this NA is attached to.
961 struct nm_bridge *na_bdg;
963 int autodelete; /* remove the ifp on last reference */
965 /* Maximum Frame Size, used in bdg_mismatch_datapath() */
967 /* Last source MAC on this port */
972 struct netmap_hw_adapter { /* physical device */
973 struct netmap_adapter up;
976 struct net_device_ops nm_ndo;
977 struct ethtool_ops nm_eto;
979 const struct ethtool_ops* save_ethtool;
981 int (*nm_hw_register)(struct netmap_adapter *, int onoff);
985 /* Mitigation support. */
986 struct nm_generic_mit {
987 struct hrtimer mit_timer;
989 int mit_ring_idx; /* index of the ring being mitigated */
990 struct netmap_adapter *mit_na; /* backpointer */
993 struct netmap_generic_adapter { /* emulated device */
994 struct netmap_hw_adapter up;
996 /* Pointer to a previously used netmap adapter. */
997 struct netmap_adapter *prev;
999 /* Emulated netmap adapters support:
1000 * - save_if_input saves the if_input hook (FreeBSD);
1001 * - mit implements rx interrupt mitigation;
1003 void (*save_if_input)(struct ifnet *, struct mbuf *);
1005 struct nm_generic_mit *mit;
1007 netdev_tx_t (*save_start_xmit)(struct mbuf *, struct ifnet *);
1009 /* Is the adapter able to use multiple RX slots to scatter
1010 * each packet pushed up by the driver? */
1013 /* Is the transmission path controlled by a netmap-aware
1014 * device queue (i.e. qdisc on linux)? */
1017 #endif /* WITH_GENERIC */
1019 static __inline u_int
1020 netmap_real_rings(struct netmap_adapter *na, enum txrx t)
1022 return nma_get_nrings(na, t) +
1023 !!(na->na_flags & NAF_HOST_RINGS) * nma_get_host_nrings(na, t);
1026 /* account for fake rings */
1027 static __inline u_int
1028 netmap_all_rings(struct netmap_adapter *na, enum txrx t)
1030 return max(nma_get_nrings(na, t) + 1, netmap_real_rings(na, t));
1033 int netmap_default_bdg_attach(const char *name, struct netmap_adapter *na,
1034 struct nm_bridge *);
1035 struct nm_bdg_polling_state;
1037 * Bridge wrapper for non VALE ports attached to a VALE switch.
1039 * The real device must already have its own netmap adapter (hwna).
1040 * The bridge wrapper and the hwna adapter share the same set of
1041 * netmap rings and buffers, but they have two separate sets of
1042 * krings descriptors, with tx/rx meanings swapped:
1045 * bwrap krings rings krings hwna
1046 * +------+ +------+ +-----+ +------+ +------+
1047 * |tx_rings->| |\ /| |----| |<-tx_rings|
1048 * | | +------+ \ / +-----+ +------+ | |
1051 * | | +------+/ \+-----+ +------+ | |
1052 * |rx_rings->| | | |----| |<-rx_rings|
1053 * | | +------+ +-----+ +------+ | |
1056 * - packets coming from the bridge go to the brwap rx rings,
1057 * which are also the hwna tx rings. The bwrap notify callback
1058 * will then complete the hwna tx (see netmap_bwrap_notify).
1060 * - packets coming from the outside go to the hwna rx rings,
1061 * which are also the bwrap tx rings. The (overwritten) hwna
1062 * notify method will then complete the bridge tx
1063 * (see netmap_bwrap_intr_notify).
1065 * The bridge wrapper may optionally connect the hwna 'host' rings
1066 * to the bridge. This is done by using a second port in the
1067 * bridge and connecting it to the 'host' netmap_vp_adapter
1068 * contained in the netmap_bwrap_adapter. The brwap host adapter
1069 * cross-links the hwna host rings in the same way as shown above.
1071 * - packets coming from the bridge and directed to the host stack
1072 * are handled by the bwrap host notify callback
1073 * (see netmap_bwrap_host_notify)
1075 * - packets coming from the host stack are still handled by the
1076 * overwritten hwna notify callback (netmap_bwrap_intr_notify),
1077 * but are diverted to the host adapter depending on the ring number.
1080 struct netmap_bwrap_adapter {
1081 struct netmap_vp_adapter up;
1082 struct netmap_vp_adapter host; /* for host rings */
1083 struct netmap_adapter *hwna; /* the underlying device */
1086 * When we attach a physical interface to the bridge, we
1087 * allow the controlling process to terminate, so we need
1088 * a place to store the n_detmap_priv_d data structure.
1089 * This is only done when physical interfaces
1090 * are attached to a bridge.
1092 struct netmap_priv_d *na_kpriv;
1093 struct nm_bdg_polling_state *na_polling_state;
1094 /* we overwrite the hwna->na_vp pointer, so we save
1095 * here its original value, to be restored at detach
1097 struct netmap_vp_adapter *saved_na_vp;
1099 int nm_bdg_polling(struct nmreq_header *hdr);
1102 int netmap_vale_attach(struct nmreq_header *hdr, void *auth_token);
1103 int netmap_vale_detach(struct nmreq_header *hdr, void *auth_token);
1104 int netmap_vale_list(struct nmreq_header *hdr);
1105 int netmap_vi_create(struct nmreq_header *hdr, int);
1106 int nm_vi_create(struct nmreq_header *);
1107 int nm_vi_destroy(const char *name);
1108 #else /* !WITH_VALE */
1109 #define netmap_vi_create(hdr, a) (EOPNOTSUPP)
1110 #endif /* WITH_VALE */
1114 #define NM_MAXPIPES 64 /* max number of pipes per adapter */
1116 struct netmap_pipe_adapter {
1117 /* pipe identifier is up.name */
1118 struct netmap_adapter up;
1120 #define NM_PIPE_ROLE_MASTER 0x1
1121 #define NM_PIPE_ROLE_SLAVE 0x2
1122 int role; /* either NM_PIPE_ROLE_MASTER or NM_PIPE_ROLE_SLAVE */
1124 struct netmap_adapter *parent; /* adapter that owns the memory */
1125 struct netmap_pipe_adapter *peer; /* the other end of the pipe */
1126 int peer_ref; /* 1 iff we are holding a ref to the peer */
1127 struct ifnet *parent_ifp; /* maybe null */
1129 u_int parent_slot; /* index in the parent pipe array */
1132 #endif /* WITH_PIPES */
1135 struct netmap_null_adapter {
1136 struct netmap_adapter up;
1138 #endif /* WITH_NMNULL */
1141 /* return slots reserved to rx clients; used in drivers */
1142 static inline uint32_t
1143 nm_kr_rxspace(struct netmap_kring *k)
1145 int space = k->nr_hwtail - k->nr_hwcur;
1147 space += k->nkr_num_slots;
1148 nm_prdis("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail);
1153 /* return slots reserved to tx clients */
1154 #define nm_kr_txspace(_k) nm_kr_rxspace(_k)
1157 /* True if no space in the tx ring, only valid after txsync_prologue */
1159 nm_kr_txempty(struct netmap_kring *kring)
1161 return kring->rhead == kring->nr_hwtail;
1164 /* True if no more completed slots in the rx ring, only valid after
1165 * rxsync_prologue */
1166 #define nm_kr_rxempty(_k) nm_kr_txempty(_k)
1168 /* True if the application needs to wait for more space on the ring
1169 * (more received packets or more free tx slots).
1170 * Only valid after *xsync_prologue. */
1172 nm_kr_wouldblock(struct netmap_kring *kring)
1174 return kring->rcur == kring->nr_hwtail;
1178 * protect against multiple threads using the same ring.
1179 * also check that the ring has not been stopped or locked
1181 #define NM_KR_BUSY 1 /* some other thread is syncing the ring */
1182 #define NM_KR_STOPPED 2 /* unbounded stop (ifconfig down or driver unload) */
1183 #define NM_KR_LOCKED 3 /* bounded, brief stop for mutual exclusion */
1186 /* release the previously acquired right to use the *sync() methods of the ring */
1187 static __inline void nm_kr_put(struct netmap_kring *kr)
1189 NM_ATOMIC_CLEAR(&kr->nr_busy);
1193 /* true if the ifp that backed the adapter has disappeared (e.g., the
1194 * driver has been unloaded)
1196 static inline int nm_iszombie(struct netmap_adapter *na);
1198 /* try to obtain exclusive right to issue the *sync() operations on the ring.
1199 * The right is obtained and must be later relinquished via nm_kr_put() if and
1200 * only if nm_kr_tryget() returns 0.
1201 * If can_sleep is 1 there are only two other possible outcomes:
1202 * - the function returns NM_KR_BUSY
1203 * - the function returns NM_KR_STOPPED and sets the POLLERR bit in *perr
1205 * In both cases the caller will typically skip the ring, possibly collecting
1206 * errors along the way.
1207 * If the calling context does not allow sleeping, the caller must pass 0 in can_sleep.
1208 * In the latter case, the function may also return NM_KR_LOCKED and leave *perr
1209 * untouched: ideally, the caller should try again at a later time.
1211 static __inline int nm_kr_tryget(struct netmap_kring *kr, int can_sleep, int *perr)
1213 int busy = 1, stopped;
1214 /* check a first time without taking the lock
1215 * to avoid starvation for nm_kr_get()
1218 stopped = kr->nkr_stopped;
1219 if (unlikely(stopped)) {
1222 busy = NM_ATOMIC_TEST_AND_SET(&kr->nr_busy);
1223 /* we should not return NM_KR_BUSY if the ring was
1224 * actually stopped, so check another time after
1225 * the barrier provided by the atomic operation
1227 stopped = kr->nkr_stopped;
1228 if (unlikely(stopped)) {
1232 if (unlikely(nm_iszombie(kr->na))) {
1233 stopped = NM_KR_STOPPED;
1237 return unlikely(busy) ? NM_KR_BUSY : 0;
1242 if (stopped == NM_KR_STOPPED) {
1243 /* if POLLERR is defined we want to use it to simplify netmap_poll().
1244 * Otherwise, any non-zero value will do.
1247 #define NM_POLLERR POLLERR
1249 #define NM_POLLERR 1
1250 #endif /* POLLERR */
1252 *perr |= NM_POLLERR;
1254 } else if (can_sleep) {
1255 tsleep(kr, 0, "NM_KR_TRYGET", 4);
1261 /* put the ring in the 'stopped' state and wait for the current user (if any) to
1262 * notice. stopped must be either NM_KR_STOPPED or NM_KR_LOCKED
1264 static __inline void nm_kr_stop(struct netmap_kring *kr, int stopped)
1266 kr->nkr_stopped = stopped;
1267 while (NM_ATOMIC_TEST_AND_SET(&kr->nr_busy))
1268 tsleep(kr, 0, "NM_KR_GET", 4);
1271 /* restart a ring after a stop */
1272 static __inline void nm_kr_start(struct netmap_kring *kr)
1274 kr->nkr_stopped = 0;
1280 * The following functions are used by individual drivers to
1281 * support netmap operation.
1283 * netmap_attach() initializes a struct netmap_adapter, allocating the
1284 * struct netmap_ring's and the struct selinfo.
1286 * netmap_detach() frees the memory allocated by netmap_attach().
1288 * netmap_transmit() replaces the if_transmit routine of the interface,
1289 * and is used to intercept packets coming from the stack.
1291 * netmap_load_map/netmap_reload_map are helper routines to set/reset
1292 * the dmamap for a packet buffer
1294 * netmap_reset() is a helper routine to be called in the hw driver
1295 * when reinitializing a ring. It should not be called by
1296 * virtual ports (vale, pipes, monitor)
1298 int netmap_attach(struct netmap_adapter *);
1299 int netmap_attach_ext(struct netmap_adapter *, size_t size, int override_reg);
1300 void netmap_detach(struct ifnet *);
1301 int netmap_transmit(struct ifnet *, struct mbuf *);
1302 struct netmap_slot *netmap_reset(struct netmap_adapter *na,
1303 enum txrx tx, u_int n, u_int new_cur);
1304 int netmap_ring_reinit(struct netmap_kring *);
1305 int netmap_rings_config_get(struct netmap_adapter *, struct nm_config_info *);
1307 /* Return codes for netmap_*x_irq. */
1309 /* Driver should do normal interrupt processing, e.g. because
1310 * the interface is not in netmap mode. */
1312 /* Port is in netmap mode, and the interrupt work has been
1313 * completed. The driver does not have to notify netmap
1314 * again before the next interrupt. */
1315 NM_IRQ_COMPLETED = -1,
1316 /* Port is in netmap mode, but the interrupt work has not been
1317 * completed. The driver has to make sure netmap will be
1318 * notified again soon, even if no more interrupts come (e.g.
1319 * on Linux the driver should not call napi_complete()). */
1320 NM_IRQ_RESCHED = -2,
1323 /* default functions to handle rx/tx interrupts */
1324 int netmap_rx_irq(struct ifnet *, u_int, u_int *);
1325 #define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL)
1326 int netmap_common_irq(struct netmap_adapter *, u_int, u_int *work_done);
1330 /* functions used by external modules to interface with VALE */
1331 #define netmap_vp_to_ifp(_vp) ((_vp)->up.ifp)
1332 #define netmap_ifp_to_vp(_ifp) (NA(_ifp)->na_vp)
1333 #define netmap_ifp_to_host_vp(_ifp) (NA(_ifp)->na_hostvp)
1334 #define netmap_bdg_idx(_vp) ((_vp)->bdg_port)
1335 const char *netmap_bdg_name(struct netmap_vp_adapter *);
1336 #else /* !WITH_VALE */
1337 #define netmap_vp_to_ifp(_vp) NULL
1338 #define netmap_ifp_to_vp(_ifp) NULL
1339 #define netmap_ifp_to_host_vp(_ifp) NULL
1340 #define netmap_bdg_idx(_vp) -1
1341 #endif /* WITH_VALE */
1344 nm_netmap_on(struct netmap_adapter *na)
1346 return na && na->na_flags & NAF_NETMAP_ON;
1350 nm_native_on(struct netmap_adapter *na)
1352 return nm_netmap_on(na) && (na->na_flags & NAF_NATIVE);
1356 nm_iszombie(struct netmap_adapter *na)
1358 return na == NULL || (na->na_flags & NAF_ZOMBIE);
1362 nm_update_hostrings_mode(struct netmap_adapter *na)
1364 /* Process nr_mode and nr_pending_mode for host rings. */
1365 na->tx_rings[na->num_tx_rings]->nr_mode =
1366 na->tx_rings[na->num_tx_rings]->nr_pending_mode;
1367 na->rx_rings[na->num_rx_rings]->nr_mode =
1368 na->rx_rings[na->num_rx_rings]->nr_pending_mode;
1371 void nm_set_native_flags(struct netmap_adapter *);
1372 void nm_clear_native_flags(struct netmap_adapter *);
1374 void netmap_krings_mode_commit(struct netmap_adapter *na, int onoff);
1377 * nm_*sync_prologue() functions are used in ioctl/poll and ptnetmap
1379 * We need netmap_ring* parameter, because in ptnetmap it is decoupled
1381 * The user-space ring pointers (head/cur/tail) are shared through
1382 * CSB between host and guest.
1386 * validates parameters in the ring/kring, returns a value for head
1387 * If any error, returns ring_size to force a reinit.
1389 uint32_t nm_txsync_prologue(struct netmap_kring *, struct netmap_ring *);
1393 * validates parameters in the ring/kring, returns a value for head
1394 * If any error, returns ring_size lim to force a reinit.
1396 uint32_t nm_rxsync_prologue(struct netmap_kring *, struct netmap_ring *);
1399 /* check/fix address and len in tx rings */
1400 #if 1 /* debug version */
1401 #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \
1402 if (_a == NETMAP_BUF_BASE(_na) || _l > NETMAP_BUF_SIZE(_na)) { \
1403 nm_prlim(5, "bad addr/len ring %d slot %d idx %d len %d", \
1404 kring->ring_id, nm_i, slot->buf_idx, len); \
1405 if (_l > NETMAP_BUF_SIZE(_na)) \
1406 _l = NETMAP_BUF_SIZE(_na); \
1408 #else /* no debug version */
1409 #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \
1410 if (_l > NETMAP_BUF_SIZE(_na)) \
1411 _l = NETMAP_BUF_SIZE(_na); \
1416 /*---------------------------------------------------------------*/
1418 * Support routines used by netmap subsystems
1419 * (native drivers, VALE, generic, pipes, monitors, ...)
1423 /* common routine for all functions that create a netmap adapter. It performs
1425 * - if the na points to an ifp, mark the ifp as netmap capable
1426 * using na as its native adapter;
1427 * - provide defaults for the setup callbacks and the memory allocator
1429 int netmap_attach_common(struct netmap_adapter *);
1430 /* fill priv->np_[tr]xq{first,last} using the ringid and flags information
1431 * coming from a struct nmreq_register
1433 int netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode,
1434 uint16_t nr_ringid, uint64_t nr_flags);
1435 /* update the ring parameters (number and size of tx and rx rings).
1436 * It calls the nm_config callback, if available.
1438 int netmap_update_config(struct netmap_adapter *na);
1439 /* create and initialize the common fields of the krings array.
1440 * using the information that must be already available in the na.
1441 * tailroom can be used to request the allocation of additional
1442 * tailroom bytes after the krings array. This is used by
1443 * netmap_vp_adapter's (i.e., VALE ports) to make room for
1444 * leasing-related data structures
1446 int netmap_krings_create(struct netmap_adapter *na, u_int tailroom);
1447 /* deletes the kring array of the adapter. The array must have
1448 * been created using netmap_krings_create
1450 void netmap_krings_delete(struct netmap_adapter *na);
1452 int netmap_hw_krings_create(struct netmap_adapter *na);
1453 void netmap_hw_krings_delete(struct netmap_adapter *na);
1455 /* set the stopped/enabled status of ring
1456 * When stopping, they also wait for all current activity on the ring to
1457 * terminate. The status change is then notified using the na nm_notify
1460 void netmap_set_ring(struct netmap_adapter *, u_int ring_id, enum txrx, int stopped);
1461 /* set the stopped/enabled status of all rings of the adapter. */
1462 void netmap_set_all_rings(struct netmap_adapter *, int stopped);
1463 /* convenience wrappers for netmap_set_all_rings */
1464 void netmap_disable_all_rings(struct ifnet *);
1465 void netmap_enable_all_rings(struct ifnet *);
1467 int netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu);
1468 int netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
1469 uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags);
1470 void netmap_do_unregif(struct netmap_priv_d *priv);
1472 u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg);
1473 int netmap_get_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1474 struct ifnet **ifp, struct netmap_mem_d *nmd, int create);
1475 void netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp);
1476 int netmap_get_hw_na(struct ifnet *ifp,
1477 struct netmap_mem_d *nmd, struct netmap_adapter **na);
1480 uint32_t netmap_vale_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring,
1481 struct netmap_vp_adapter *, void *private_data);
1483 /* these are redefined in case of no VALE support */
1484 int netmap_get_vale_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1485 struct netmap_mem_d *nmd, int create);
1486 void *netmap_vale_create(const char *bdg_name, int *return_status);
1487 int netmap_vale_destroy(const char *bdg_name, void *auth_token);
1489 #else /* !WITH_VALE */
1490 #define netmap_bdg_learning(_1, _2, _3, _4) 0
1491 #define netmap_get_vale_na(_1, _2, _3, _4) 0
1492 #define netmap_bdg_create(_1, _2) NULL
1493 #define netmap_bdg_destroy(_1, _2) 0
1494 #endif /* !WITH_VALE */
1497 /* max number of pipes per device */
1498 #define NM_MAXPIPES 64 /* XXX this should probably be a sysctl */
1499 void netmap_pipe_dealloc(struct netmap_adapter *);
1500 int netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1501 struct netmap_mem_d *nmd, int create);
1502 #else /* !WITH_PIPES */
1503 #define NM_MAXPIPES 0
1504 #define netmap_pipe_alloc(_1, _2) 0
1505 #define netmap_pipe_dealloc(_1)
1506 #define netmap_get_pipe_na(hdr, _2, _3, _4) \
1507 ((strchr(hdr->nr_name, '{') != NULL || strchr(hdr->nr_name, '}') != NULL) ? EOPNOTSUPP : 0)
1511 int netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1512 struct netmap_mem_d *nmd, int create);
1513 void netmap_monitor_stop(struct netmap_adapter *na);
1515 #define netmap_get_monitor_na(hdr, _2, _3, _4) \
1516 (((struct nmreq_register *)(uintptr_t)hdr->nr_body)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? EOPNOTSUPP : 0)
1520 int netmap_get_null_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1521 struct netmap_mem_d *nmd, int create);
1522 #else /* !WITH_NMNULL */
1523 #define netmap_get_null_na(hdr, _2, _3, _4) \
1524 (((struct nmreq_register *)(uintptr_t)hdr->nr_body)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? EOPNOTSUPP : 0)
1525 #endif /* WITH_NMNULL */
1527 #ifdef CONFIG_NET_NS
1528 struct net *netmap_bns_get(void);
1529 void netmap_bns_put(struct net *);
1530 void netmap_bns_getbridges(struct nm_bridge **, u_int *);
1532 extern struct nm_bridge *nm_bridges;
1533 #define netmap_bns_get()
1534 #define netmap_bns_put(_1)
1535 #define netmap_bns_getbridges(b, n) \
1536 do { *b = nm_bridges; *n = NM_BRIDGES; } while (0)
1539 /* Various prototypes */
1540 int netmap_poll(struct netmap_priv_d *, int events, NM_SELRECORD_T *td);
1541 int netmap_init(void);
1542 void netmap_fini(void);
1543 int netmap_get_memory(struct netmap_priv_d* p);
1544 void netmap_dtor(void *data);
1546 int netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
1547 struct thread *, int nr_body_is_user);
1548 int netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
1550 size_t nmreq_size_by_type(uint16_t nr_reqtype);
1552 /* netmap_adapter creation/destruction */
1554 // #define NM_DEBUG_PUTGET 1
1556 #ifdef NM_DEBUG_PUTGET
1558 #define NM_DBG(f) __##f
1560 void __netmap_adapter_get(struct netmap_adapter *na);
1562 #define netmap_adapter_get(na) \
1564 struct netmap_adapter *__na = na; \
1565 nm_prinf("getting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \
1566 __netmap_adapter_get(__na); \
1569 int __netmap_adapter_put(struct netmap_adapter *na);
1571 #define netmap_adapter_put(na) \
1573 struct netmap_adapter *__na = na; \
1574 nm_prinf("putting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \
1575 __netmap_adapter_put(__na); \
1578 #else /* !NM_DEBUG_PUTGET */
1581 void netmap_adapter_get(struct netmap_adapter *na);
1582 int netmap_adapter_put(struct netmap_adapter *na);
1584 #endif /* !NM_DEBUG_PUTGET */
1590 #define NETMAP_BUF_BASE(_na) ((_na)->na_lut.lut[0].vaddr)
1591 #define NETMAP_BUF_SIZE(_na) ((_na)->na_lut.objsize)
1592 extern int netmap_no_pendintr;
1593 extern int netmap_mitigate;
1594 extern int netmap_verbose;
1595 #ifdef CONFIG_NETMAP_DEBUG
1596 extern int netmap_debug; /* for debugging */
1597 #else /* !CONFIG_NETMAP_DEBUG */
1598 #define netmap_debug (0)
1599 #endif /* !CONFIG_NETMAP_DEBUG */
1600 enum { /* debug flags */
1601 NM_DEBUG_ON = 1, /* generic debug messsages */
1602 NM_DEBUG_HOST = 0x2, /* debug host stack */
1603 NM_DEBUG_RXSYNC = 0x10, /* debug on rxsync/txsync */
1604 NM_DEBUG_TXSYNC = 0x20,
1605 NM_DEBUG_RXINTR = 0x100, /* debug on rx/tx intr (driver) */
1606 NM_DEBUG_TXINTR = 0x200,
1607 NM_DEBUG_NIC_RXSYNC = 0x1000, /* debug on rx/tx intr (driver) */
1608 NM_DEBUG_NIC_TXSYNC = 0x2000,
1609 NM_DEBUG_MEM = 0x4000, /* verbose memory allocations/deallocations */
1610 NM_DEBUG_VALE = 0x8000, /* debug messages from memory allocators */
1611 NM_DEBUG_BDG = NM_DEBUG_VALE,
1614 extern int netmap_txsync_retry;
1615 extern int netmap_flags;
1616 extern int netmap_generic_hwcsum;
1617 extern int netmap_generic_mit;
1618 extern int netmap_generic_ringsize;
1619 extern int netmap_generic_rings;
1621 extern int netmap_generic_txqdisc;
1625 * NA returns a pointer to the struct netmap adapter from the ifp.
1626 * WNA is os-specific and must be defined in glue code.
1628 #define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp))
1631 * we provide a default implementation of NM_ATTACH_NA/NM_DETACH_NA
1632 * based on the WNA field.
1633 * Glue code may override this by defining its own NM_ATTACH_NA
1635 #ifndef NM_ATTACH_NA
1637 * On old versions of FreeBSD, NA(ifp) is a pspare. On linux we
1638 * overload another pointer in the netdev.
1640 * We check if NA(ifp) is set and its first element has a related
1641 * magic value. The capenable is within the struct netmap_adapter.
1643 #define NETMAP_MAGIC 0x52697a7a
1645 #define NM_NA_VALID(ifp) (NA(ifp) && \
1646 ((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC )
1648 #define NM_ATTACH_NA(ifp, na) do { \
1652 ((uint32_t)(uintptr_t)NA(ifp)) ^ NETMAP_MAGIC; \
1654 #define NM_RESTORE_NA(ifp, na) WNA(ifp) = na;
1656 #define NM_DETACH_NA(ifp) do { WNA(ifp) = NULL; } while (0)
1657 #define NM_NA_CLASH(ifp) (NA(ifp) && !NM_NA_VALID(ifp))
1658 #endif /* !NM_ATTACH_NA */
1661 #define NM_IS_NATIVE(ifp) (NM_NA_VALID(ifp) && NA(ifp)->nm_dtor == netmap_hw_dtor)
1663 #if defined(__FreeBSD__)
1665 /* Assigns the device IOMMU domain to an allocator.
1666 * Returns -ENOMEM in case the domain is different */
1667 #define nm_iommu_group_id(dev) (0)
1669 /* Callback invoked by the dma machinery after a successful dmamap_load */
1670 static void netmap_dmamap_cb(__unused void *arg,
1671 __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error)
1675 /* bus_dmamap_load wrapper: call aforementioned function if map != NULL.
1676 * XXX can we do it without a callback ?
1679 netmap_load_map(struct netmap_adapter *na,
1680 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1683 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na),
1684 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT);
1689 netmap_unload_map(struct netmap_adapter *na,
1690 bus_dma_tag_t tag, bus_dmamap_t map)
1693 bus_dmamap_unload(tag, map);
1696 #define netmap_sync_map(na, tag, map, sz, t)
1698 /* update the map when a buffer changes. */
1700 netmap_reload_map(struct netmap_adapter *na,
1701 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1704 bus_dmamap_unload(tag, map);
1705 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na),
1706 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT);
1710 #elif defined(_WIN32)
1714 int nm_iommu_group_id(bus_dma_tag_t dev);
1715 #include <linux/dma-mapping.h>
1719 * dma_map_single(&pdev->dev, virt_addr, len, direction)
1720 * dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction)
1723 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[l];
1724 /* set time_stamp *before* dma to help avoid a possible race */
1725 buffer_info->time_stamp = jiffies;
1726 buffer_info->mapped_as_page = false;
1727 buffer_info->length = len;
1728 //buffer_info->next_to_watch = l;
1729 /* reload dma map */
1730 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1731 NETMAP_BUF_SIZE, DMA_TO_DEVICE);
1732 buffer_info->dma = dma_map_single(&adapter->pdev->dev,
1733 addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE);
1735 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1736 nm_prerr("dma mapping error");
1737 /* goto dma_error; See e1000_put_txbuf() */
1740 tx_desc->buffer_addr = htole64(buffer_info->dma); //XXX
1745 netmap_load_map(struct netmap_adapter *na,
1746 bus_dma_tag_t tag, bus_dmamap_t map, void *buf, u_int size)
1749 *map = dma_map_single(na->pdev, buf, size,
1751 if (dma_mapping_error(na->pdev, *map)) {
1760 netmap_unload_map(struct netmap_adapter *na,
1761 bus_dma_tag_t tag, bus_dmamap_t map, u_int sz)
1764 dma_unmap_single(na->pdev, *map, sz,
1769 #ifdef NETMAP_LINUX_HAVE_DMASYNC
1771 netmap_sync_map_cpu(struct netmap_adapter *na,
1772 bus_dma_tag_t tag, bus_dmamap_t map, u_int sz, enum txrx t)
1775 dma_sync_single_for_cpu(na->pdev, *map, sz,
1776 (t == NR_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
1781 netmap_sync_map_dev(struct netmap_adapter *na,
1782 bus_dma_tag_t tag, bus_dmamap_t map, u_int sz, enum txrx t)
1785 dma_sync_single_for_device(na->pdev, *map, sz,
1786 (t == NR_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
1791 netmap_reload_map(struct netmap_adapter *na,
1792 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1794 u_int sz = NETMAP_BUF_SIZE(na);
1797 dma_unmap_single(na->pdev, *map, sz,
1801 *map = dma_map_single(na->pdev, buf, sz,
1804 #else /* !NETMAP_LINUX_HAVE_DMASYNC */
1805 #define netmap_sync_map_cpu(na, tag, map, sz, t)
1806 #define netmap_sync_map_dev(na, tag, map, sz, t)
1807 #endif /* NETMAP_LINUX_HAVE_DMASYNC */
1813 * functions to map NIC to KRING indexes (n2k) and vice versa (k2n)
1816 netmap_idx_n2k(struct netmap_kring *kr, int idx)
1818 int n = kr->nkr_num_slots;
1820 if (likely(kr->nkr_hwofs == 0)) {
1824 idx += kr->nkr_hwofs;
1835 netmap_idx_k2n(struct netmap_kring *kr, int idx)
1837 int n = kr->nkr_num_slots;
1839 if (likely(kr->nkr_hwofs == 0)) {
1843 idx -= kr->nkr_hwofs;
1853 /* Entries of the look-up table. */
1856 void *vaddr; /* virtual address. */
1857 vm_paddr_t paddr; /* physical address. */
1859 #else /* linux & _WIN32 */
1860 /* dma-mapping in linux can assign a buffer a different address
1861 * depending on the device, so we need to have a separate
1862 * physical-address look-up table for each na.
1863 * We can still share the vaddrs, though, therefore we split
1864 * the lut_entry structure.
1867 void *vaddr; /* virtual address. */
1871 vm_paddr_t paddr; /* physical address. */
1873 #endif /* linux & _WIN32 */
1875 struct netmap_obj_pool;
1878 * NMB return the virtual address of a buffer (buffer 0 on bad index)
1879 * PNMB also fills the physical address
1881 static inline void *
1882 NMB(struct netmap_adapter *na, struct netmap_slot *slot)
1884 struct lut_entry *lut = na->na_lut.lut;
1885 uint32_t i = slot->buf_idx;
1886 return (unlikely(i >= na->na_lut.objtotal)) ?
1887 lut[0].vaddr : lut[i].vaddr;
1890 static inline void *
1891 PNMB(struct netmap_adapter *na, struct netmap_slot *slot, uint64_t *pp)
1893 uint32_t i = slot->buf_idx;
1894 struct lut_entry *lut = na->na_lut.lut;
1895 struct plut_entry *plut = na->na_lut.plut;
1896 void *ret = (i >= na->na_lut.objtotal) ? lut[0].vaddr : lut[i].vaddr;
1899 *pp = (i >= na->na_lut.objtotal) ? (uint64_t)plut[0].paddr.QuadPart : (uint64_t)plut[i].paddr.QuadPart;
1901 *pp = (i >= na->na_lut.objtotal) ? plut[0].paddr : plut[i].paddr;
1908 * Structure associated to each netmap file descriptor.
1909 * It is created on open and left unbound (np_nifp == NULL).
1910 * A successful NIOCREGIF will set np_nifp and the first few fields;
1911 * this is protected by a global lock (NMG_LOCK) due to low contention.
1913 * np_refs counts the number of references to the structure: one for the fd,
1914 * plus (on FreeBSD) one for each active mmap which we track ourselves
1915 * (linux automatically tracks them, but FreeBSD does not).
1916 * np_refs is protected by NMG_LOCK.
1918 * Read access to the structure is lock free, because ni_nifp once set
1919 * can only go to 0 when nobody is using the entry anymore. Readers
1920 * must check that np_nifp != NULL before using the other fields.
1922 struct netmap_priv_d {
1923 struct netmap_if * volatile np_nifp; /* netmap if descriptor. */
1925 struct netmap_adapter *np_na;
1926 struct ifnet *np_ifp;
1927 uint32_t np_flags; /* from the ioctl */
1928 u_int np_qfirst[NR_TXRX],
1929 np_qlast[NR_TXRX]; /* range of tx/rx rings to scan */
1931 uint16_t np_kloop_state; /* use with NMG_LOCK held */
1932 #define NM_SYNC_KLOOP_RUNNING (1 << 0)
1933 #define NM_SYNC_KLOOP_STOPPING (1 << 1)
1934 int np_sync_flags; /* to be passed to nm_sync */
1936 int np_refs; /* use with NMG_LOCK held */
1938 /* pointers to the selinfo to be used for selrecord.
1939 * Either the local or the global one depending on the
1942 NM_SELINFO_T *np_si[NR_TXRX];
1944 /* In the optional CSB mode, the user must specify the start address
1945 * of two arrays of Communication Status Block (CSB) entries, for the
1946 * two directions (kernel read application write, and kernel write
1947 * application read).
1948 * The number of entries must agree with the number of rings bound to
1949 * the netmap file descriptor. The entries corresponding to the TX
1950 * rings are laid out before the ones corresponding to the RX rings.
1952 * Array of CSB entries for application --> kernel communication
1954 struct nm_csb_atok *np_csb_atok_base;
1955 /* Array of CSB entries for kernel --> application communication
1957 struct nm_csb_ktoa *np_csb_ktoa_base;
1960 struct file *np_filp; /* used by sync kloop */
1964 struct netmap_priv_d *netmap_priv_new(void);
1965 void netmap_priv_delete(struct netmap_priv_d *);
1967 static inline int nm_kring_pending(struct netmap_priv_d *np)
1969 struct netmap_adapter *na = np->np_na;
1974 for (i = np->np_qfirst[t]; i < np->np_qlast[t]; i++) {
1975 struct netmap_kring *kring = NMR(na, t)[i];
1976 if (kring->nr_mode != kring->nr_pending_mode) {
1984 /* call with NMG_LOCK held */
1986 nm_si_user(struct netmap_priv_d *priv, enum txrx t)
1988 return (priv->np_na != NULL &&
1989 (priv->np_qlast[t] - priv->np_qfirst[t] > 1));
1993 int netmap_pipe_txsync(struct netmap_kring *txkring, int flags);
1994 int netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags);
1995 int netmap_pipe_krings_create_both(struct netmap_adapter *na,
1996 struct netmap_adapter *ona);
1997 void netmap_pipe_krings_delete_both(struct netmap_adapter *na,
1998 struct netmap_adapter *ona);
1999 int netmap_pipe_reg_both(struct netmap_adapter *na,
2000 struct netmap_adapter *ona);
2001 #endif /* WITH_PIPES */
2005 struct netmap_monitor_adapter {
2006 struct netmap_adapter up;
2008 struct netmap_priv_d priv;
2012 #endif /* WITH_MONITOR */
2017 * generic netmap emulation for devices that do not have
2018 * native netmap support.
2020 int generic_netmap_attach(struct ifnet *ifp);
2021 int generic_rx_handler(struct ifnet *ifp, struct mbuf *m);;
2023 int nm_os_catch_rx(struct netmap_generic_adapter *gna, int intercept);
2024 int nm_os_catch_tx(struct netmap_generic_adapter *gna, int intercept);
2026 int na_is_generic(struct netmap_adapter *na);
2029 * the generic transmit routine is passed a structure to optionally
2030 * build a queue of descriptors, in an OS-specific way.
2031 * The payload is at addr, if non-null, and the routine should send or queue
2032 * the packet, returning 0 if successful, 1 on failure.
2034 * At the end, if head is non-null, there will be an additional call
2035 * to the function with addr = NULL; this should tell the OS-specific
2036 * routine to send the queue and free any resources. Failure is ignored.
2038 struct nm_os_gen_arg {
2040 void *m; /* os-specific mbuf-like object */
2041 void *head, *tail; /* tailq, if the OS-specific routine needs to build one */
2042 void *addr; /* payload of current packet */
2043 u_int len; /* packet length */
2044 u_int ring_nr; /* packet length */
2045 u_int qevent; /* in txqdisc mode, place an event on this mbuf */
2048 int nm_os_generic_xmit_frame(struct nm_os_gen_arg *);
2049 int nm_os_generic_find_num_desc(struct ifnet *ifp, u_int *tx, u_int *rx);
2050 void nm_os_generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq);
2051 void nm_os_generic_set_features(struct netmap_generic_adapter *gna);
2053 static inline struct ifnet*
2054 netmap_generic_getifp(struct netmap_generic_adapter *gna)
2057 return gna->prev->ifp;
2059 return gna->up.up.ifp;
2062 void netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done);
2064 //#define RATE_GENERIC /* Enables communication statistics for generic. */
2066 void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi);
2068 #define generic_rate(txp, txs, txi, rxp, rxs, rxi)
2072 * netmap_mitigation API. This is used by the generic adapter
2073 * to reduce the number of interrupt requests/selwakeup
2074 * to clients on incoming packets.
2076 void nm_os_mitigation_init(struct nm_generic_mit *mit, int idx,
2077 struct netmap_adapter *na);
2078 void nm_os_mitigation_start(struct nm_generic_mit *mit);
2079 void nm_os_mitigation_restart(struct nm_generic_mit *mit);
2080 int nm_os_mitigation_active(struct nm_generic_mit *mit);
2081 void nm_os_mitigation_cleanup(struct nm_generic_mit *mit);
2082 #else /* !WITH_GENERIC */
2083 #define generic_netmap_attach(ifp) (EOPNOTSUPP)
2084 #define na_is_generic(na) (0)
2085 #endif /* WITH_GENERIC */
2087 /* Shared declarations for the VALE switch. */
2090 * Each transmit queue accumulates a batch of packets into
2091 * a structure before forwarding. Packets to the same
2092 * destination are put in a list using ft_next as a link field.
2093 * ft_frags and ft_next are valid only on the first fragment.
2095 struct nm_bdg_fwd { /* forwarding entry for a bridge */
2096 void *ft_buf; /* netmap or indirect buffer */
2097 uint8_t ft_frags; /* how many fragments (only on 1st frag) */
2098 uint16_t ft_offset; /* dst port (unused) */
2099 uint16_t ft_flags; /* flags, e.g. indirect */
2100 uint16_t ft_len; /* src fragment len */
2101 uint16_t ft_next; /* next packet to same destination */
2104 /* struct 'virtio_net_hdr' from linux. */
2105 struct nm_vnet_hdr {
2106 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */
2107 #define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */
2109 #define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */
2110 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */
2111 #define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */
2112 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */
2113 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */
2117 uint16_t csum_start;
2118 uint16_t csum_offset;
2121 #define WORST_CASE_GSO_HEADER (14+40+60) /* IPv6 + TCP */
2123 /* Private definitions for IPv4, IPv6, UDP and TCP headers. */
2126 uint8_t version_ihl;
2136 /*The options start here. */
2144 uint8_t doff; /* Data offset + Reserved */
2159 uint8_t priority_version;
2160 uint8_t flow_lbl[3];
2162 uint16_t payload_len;
2170 /* Type used to store a checksum (in host byte order) that hasn't been
2173 #define rawsum_t uint32_t
2175 rawsum_t nm_os_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum);
2176 uint16_t nm_os_csum_ipv4(struct nm_iphdr *iph);
2177 void nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data,
2178 size_t datalen, uint16_t *check);
2179 void nm_os_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data,
2180 size_t datalen, uint16_t *check);
2181 uint16_t nm_os_csum_fold(rawsum_t cur_sum);
2183 void bdg_mismatch_datapath(struct netmap_vp_adapter *na,
2184 struct netmap_vp_adapter *dst_na,
2185 const struct nm_bdg_fwd *ft_p,
2186 struct netmap_ring *dst_ring,
2187 u_int *j, u_int lim, u_int *howmany);
2189 /* persistent virtual port routines */
2190 int nm_os_vi_persist(const char *, struct ifnet **);
2191 void nm_os_vi_detach(struct ifnet *);
2192 void nm_os_vi_init_index(void);
2195 * kernel thread routines
2197 struct nm_kctx; /* OS-specific kernel context - opaque */
2198 typedef void (*nm_kctx_worker_fn_t)(void *data);
2200 /* kthread configuration */
2201 struct nm_kctx_cfg {
2202 long type; /* kthread type/identifier */
2203 nm_kctx_worker_fn_t worker_fn; /* worker function */
2204 void *worker_private;/* worker parameter */
2205 int attach_user; /* attach kthread to user process */
2207 /* kthread configuration */
2208 struct nm_kctx *nm_os_kctx_create(struct nm_kctx_cfg *cfg,
2210 int nm_os_kctx_worker_start(struct nm_kctx *);
2211 void nm_os_kctx_worker_stop(struct nm_kctx *);
2212 void nm_os_kctx_destroy(struct nm_kctx *);
2213 void nm_os_kctx_worker_setaff(struct nm_kctx *, int);
2214 u_int nm_os_ncpus(void);
2216 int netmap_sync_kloop(struct netmap_priv_d *priv,
2217 struct nmreq_header *hdr);
2218 int netmap_sync_kloop_stop(struct netmap_priv_d *priv);
2220 #ifdef WITH_PTNETMAP
2221 /* ptnetmap guest routines */
2224 * ptnetmap_memdev routines used to talk with ptnetmap_memdev device driver
2226 struct ptnetmap_memdev;
2227 int nm_os_pt_memdev_iomap(struct ptnetmap_memdev *, vm_paddr_t *, void **,
2229 void nm_os_pt_memdev_iounmap(struct ptnetmap_memdev *);
2230 uint32_t nm_os_pt_memdev_ioread(struct ptnetmap_memdev *, unsigned int);
2233 * netmap adapter for guest ptnetmap ports
2235 struct netmap_pt_guest_adapter {
2236 /* The netmap adapter to be used by netmap applications.
2237 * This field must be the first, to allow upcast. */
2238 struct netmap_hw_adapter hwup;
2240 /* The netmap adapter to be used by the driver. */
2241 struct netmap_hw_adapter dr;
2243 /* Reference counter to track users of backend netmap port: the
2244 * network stack and netmap clients.
2245 * Used to decide when we need (de)allocate krings/rings and
2246 * start (stop) ptnetmap kthreads. */
2251 int netmap_pt_guest_attach(struct netmap_adapter *na,
2252 unsigned int nifp_offset,
2253 unsigned int memid);
2254 bool netmap_pt_guest_txsync(struct nm_csb_atok *atok,
2255 struct nm_csb_ktoa *ktoa,
2256 struct netmap_kring *kring, int flags);
2257 bool netmap_pt_guest_rxsync(struct nm_csb_atok *atok,
2258 struct nm_csb_ktoa *ktoa,
2259 struct netmap_kring *kring, int flags);
2260 int ptnet_nm_krings_create(struct netmap_adapter *na);
2261 void ptnet_nm_krings_delete(struct netmap_adapter *na);
2262 void ptnet_nm_dtor(struct netmap_adapter *na);
2264 /* Helper function wrapping nm_sync_kloop_appl_read(). */
2266 ptnet_sync_tail(struct nm_csb_ktoa *ktoa, struct netmap_kring *kring)
2268 struct netmap_ring *ring = kring->ring;
2270 /* Update hwcur and hwtail as known by the host. */
2271 nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, &kring->nr_hwcur);
2273 /* nm_sync_finalize */
2274 ring->tail = kring->rtail = kring->nr_hwtail;
2276 #endif /* WITH_PTNETMAP */
2280 * FreeBSD mbuf allocator/deallocator in emulation mode:
2282 #if __FreeBSD_version < 1100000
2285 * For older versions of FreeBSD:
2287 * We allocate EXT_PACKET mbuf+clusters, but need to set M_NOFREE
2288 * so that the destructor, if invoked, will not free the packet.
2289 * In principle we should set the destructor only on demand,
2290 * but since there might be a race we better do it on allocation.
2291 * As a consequence, we also need to set the destructor or we
2292 * would leak buffers.
2295 /* mbuf destructor, also need to change the type to EXT_EXTREF,
2296 * add an M_NOFREE flag, and then clear the flag and
2297 * chain into uma_zfree(zone_pack, mf)
2298 * (or reinstall the buffer ?)
2300 #define SET_MBUF_DESTRUCTOR(m, fn) do { \
2301 (m)->m_ext.ext_free = (void *)fn; \
2302 (m)->m_ext.ext_type = EXT_EXTREF; \
2306 void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2)
2308 /* restore original mbuf */
2309 m->m_ext.ext_buf = m->m_data = m->m_ext.ext_arg1;
2310 m->m_ext.ext_arg1 = NULL;
2311 m->m_ext.ext_type = EXT_PACKET;
2312 m->m_ext.ext_free = NULL;
2313 if (MBUF_REFCNT(m) == 0)
2314 SET_MBUF_REFCNT(m, 1);
2315 uma_zfree(zone_pack, m);
2320 static inline struct mbuf *
2321 nm_os_get_mbuf(struct ifnet *ifp, int len)
2326 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2328 /* m_getcl() (mb_ctor_mbuf) has an assert that checks that
2329 * M_NOFREE flag is not specified as third argument,
2330 * so we have to set M_NOFREE after m_getcl(). */
2331 m->m_flags |= M_NOFREE;
2332 m->m_ext.ext_arg1 = m->m_ext.ext_buf; // XXX save
2333 m->m_ext.ext_free = (void *)void_mbuf_dtor;
2334 m->m_ext.ext_type = EXT_EXTREF;
2335 nm_prdis(5, "create m %p refcnt %d", m, MBUF_REFCNT(m));
2340 #else /* __FreeBSD_version >= 1100000 */
2343 * Newer versions of FreeBSD, using a straightforward scheme.
2345 * We allocate mbufs with m_gethdr(), since the mbuf header is needed
2346 * by the driver. We also attach a customly-provided external storage,
2347 * which in this case is a netmap buffer. When calling m_extadd(), however
2348 * we pass a NULL address, since the real address (and length) will be
2349 * filled in by nm_os_generic_xmit_frame() right before calling
2352 * The dtor function does nothing, however we need it since mb_free_ext()
2353 * has a KASSERT(), checking that the mbuf dtor function is not NULL.
2356 #if __FreeBSD_version <= 1200050
2357 static void void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2) { }
2358 #else /* __FreeBSD_version >= 1200051 */
2359 /* The arg1 and arg2 pointers argument were removed by r324446, which
2360 * in included since version 1200051. */
2361 static void void_mbuf_dtor(struct mbuf *m) { }
2362 #endif /* __FreeBSD_version >= 1200051 */
2364 #define SET_MBUF_DESTRUCTOR(m, fn) do { \
2365 (m)->m_ext.ext_free = (fn != NULL) ? \
2366 (void *)fn : (void *)void_mbuf_dtor; \
2369 static inline struct mbuf *
2370 nm_os_get_mbuf(struct ifnet *ifp, int len)
2377 m = m_gethdr(M_NOWAIT, MT_DATA);
2382 m_extadd(m, NULL /* buf */, 0 /* size */, void_mbuf_dtor,
2383 NULL, NULL, 0, EXT_NET_DRV);
2388 #endif /* __FreeBSD_version >= 1100000 */
2389 #endif /* __FreeBSD__ */
2391 struct nmreq_option * nmreq_findoption(struct nmreq_option *, uint16_t);
2392 int nmreq_checkduplicate(struct nmreq_option *);
2394 int netmap_init_bridges(void);
2395 void netmap_uninit_bridges(void);
2397 /* Functions to read and write CSB fields from the kernel. */
2399 #define CSB_READ(csb, field, r) (get_user(r, &csb->field))
2400 #define CSB_WRITE(csb, field, v) (put_user(v, &csb->field))
2402 #define CSB_READ(csb, field, r) (r = fuword32(&csb->field))
2403 #define CSB_WRITE(csb, field, v) (suword32(&csb->field, v))
2404 #endif /* ! linux */
2406 #endif /* _NET_NETMAP_KERN_H_ */