2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo
5 * Copyright (C) 2013-2016 Universita` di Pisa
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * The header contains the definitions of constants and function
34 * prototypes used only in kernelspace.
37 #ifndef _NET_NETMAP_KERN_H_
38 #define _NET_NETMAP_KERN_H_
42 #if defined(CONFIG_NETMAP_EXTMEM)
45 #if defined(CONFIG_NETMAP_VALE)
48 #if defined(CONFIG_NETMAP_PIPE)
51 #if defined(CONFIG_NETMAP_MONITOR)
54 #if defined(CONFIG_NETMAP_GENERIC)
57 #if defined(CONFIG_NETMAP_PTNETMAP_GUEST)
58 #define WITH_PTNETMAP_GUEST
60 #if defined(CONFIG_NETMAP_PTNETMAP_HOST)
61 #define WITH_PTNETMAP_HOST
63 #if defined(CONFIG_NETMAP_SINK)
67 #elif defined (_WIN32)
68 #define WITH_VALE // comment out to disable VALE support
73 #else /* neither linux nor windows */
74 #define WITH_VALE // comment out to disable VALE support
78 #define WITH_PTNETMAP_HOST /* ptnetmap host support */
79 #define WITH_PTNETMAP_GUEST /* ptnetmap guest support */
83 #if defined(__FreeBSD__)
84 #include <sys/selinfo.h>
86 #define likely(x) __builtin_expect((long)!!(x), 1L)
87 #define unlikely(x) __builtin_expect((long)!!(x), 0L)
90 #define NM_LOCK_T struct mtx /* low level spinlock, used to protect queues */
92 #define NM_MTX_T struct sx /* OS-specific mutex (sleepable) */
93 #define NM_MTX_INIT(m) sx_init(&(m), #m)
94 #define NM_MTX_DESTROY(m) sx_destroy(&(m))
95 #define NM_MTX_LOCK(m) sx_xlock(&(m))
96 #define NM_MTX_SPINLOCK(m) while (!sx_try_xlock(&(m))) ;
97 #define NM_MTX_UNLOCK(m) sx_xunlock(&(m))
98 #define NM_MTX_ASSERT(m) sx_assert(&(m), SA_XLOCKED)
100 #define NM_SELINFO_T struct nm_selinfo
101 #define NM_SELRECORD_T struct thread
102 #define MBUF_LEN(m) ((m)->m_pkthdr.len)
103 #define MBUF_TXQ(m) ((m)->m_pkthdr.flowid)
104 #define MBUF_TRANSMIT(na, ifp, m) ((na)->if_transmit(ifp, m))
105 #define GEN_TX_MBUF_IFP(m) ((m)->m_pkthdr.rcvif)
107 #define NM_ATOMIC_T volatile int /* required by atomic/bitops.h */
108 /* atomic operations */
109 #include <machine/atomic.h>
110 #define NM_ATOMIC_TEST_AND_SET(p) (!atomic_cmpset_acq_int((p), 0, 1))
111 #define NM_ATOMIC_CLEAR(p) atomic_store_rel_int((p), 0)
113 #if __FreeBSD_version >= 1100030
114 #define WNA(_ifp) (_ifp)->if_netmap
115 #else /* older FreeBSD */
116 #define WNA(_ifp) (_ifp)->if_pspare[0]
117 #endif /* older FreeBSD */
119 #if __FreeBSD_version >= 1100005
120 struct netmap_adapter *netmap_getna(if_t ifp);
123 #if __FreeBSD_version >= 1100027
124 #define MBUF_REFCNT(m) ((m)->m_ext.ext_count)
125 #define SET_MBUF_REFCNT(m, x) (m)->m_ext.ext_count = x
127 #define MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *((m)->m_ext.ref_cnt) : -1)
128 #define SET_MBUF_REFCNT(m, x) *((m)->m_ext.ref_cnt) = x
131 #define MBUF_QUEUED(m) 1
140 /* Not used in FreeBSD. */
143 #define NM_BNS_GET(b)
144 #define NM_BNS_PUT(b)
146 #elif defined (linux)
148 #define NM_LOCK_T safe_spinlock_t // see bsd_glue.h
149 #define NM_SELINFO_T wait_queue_head_t
150 #define MBUF_LEN(m) ((m)->len)
151 #define MBUF_TRANSMIT(na, ifp, m) \
153 /* Avoid infinite recursion with generic. */ \
154 m->priority = NM_MAGIC_PRIORITY_TX; \
155 (((struct net_device_ops *)(na)->if_transmit)->ndo_start_xmit(m, ifp)); \
159 /* See explanation in nm_os_generic_xmit_frame. */
160 #define GEN_TX_MBUF_IFP(m) ((struct ifnet *)skb_shinfo(m)->destructor_arg)
162 #define NM_ATOMIC_T volatile long unsigned int
164 #define NM_MTX_T struct mutex /* OS-specific sleepable lock */
165 #define NM_MTX_INIT(m) mutex_init(&(m))
166 #define NM_MTX_DESTROY(m) do { (void)(m); } while (0)
167 #define NM_MTX_LOCK(m) mutex_lock(&(m))
168 #define NM_MTX_UNLOCK(m) mutex_unlock(&(m))
169 #define NM_MTX_ASSERT(m) mutex_is_locked(&(m))
173 #endif /* DEV_NETMAP */
175 #elif defined (__APPLE__)
177 #warning apple support is incomplete.
178 #define likely(x) __builtin_expect(!!(x), 1)
179 #define unlikely(x) __builtin_expect(!!(x), 0)
180 #define NM_LOCK_T IOLock *
181 #define NM_SELINFO_T struct selinfo
182 #define MBUF_LEN(m) ((m)->m_pkthdr.len)
184 #elif defined (_WIN32)
185 #include "../../../WINDOWS/win_glue.h"
187 #define NM_SELRECORD_T IO_STACK_LOCATION
188 #define NM_SELINFO_T win_SELINFO // see win_glue.h
189 #define NM_LOCK_T win_spinlock_t // see win_glue.h
190 #define NM_MTX_T KGUARDED_MUTEX /* OS-specific mutex (sleepable) */
192 #define NM_MTX_INIT(m) KeInitializeGuardedMutex(&m);
193 #define NM_MTX_DESTROY(m) do { (void)(m); } while (0)
194 #define NM_MTX_LOCK(m) KeAcquireGuardedMutex(&(m))
195 #define NM_MTX_UNLOCK(m) KeReleaseGuardedMutex(&(m))
196 #define NM_MTX_ASSERT(m) assert(&m.Count>0)
198 //These linknames are for the NDIS driver
199 #define NETMAP_NDIS_LINKNAME_STRING L"\\DosDevices\\NMAPNDIS"
200 #define NETMAP_NDIS_NTDEVICE_STRING L"\\Device\\NMAPNDIS"
202 //Definition of internal driver-to-driver ioctl codes
203 #define NETMAP_KERNEL_XCHANGE_POINTERS _IO('i', 180)
204 #define NETMAP_KERNEL_SEND_SHUTDOWN_SIGNAL _IO_direct('i', 195)
206 typedef struct hrtimer{
212 /* MSVC does not have likely/unlikely support */
214 #define likely(x) (x)
215 #define unlikely(x) (x)
217 #define likely(x) __builtin_expect((long)!!(x), 1L)
218 #define unlikely(x) __builtin_expect((long)!!(x), 0L)
223 #error unsupported platform
225 #endif /* end - platform-specific code */
227 #ifndef _WIN32 /* support for emulated sysctl */
232 #define NM_ACCESS_ONCE(x) (*(volatile __typeof__(x) *)&(x))
234 #define NMG_LOCK_T NM_MTX_T
235 #define NMG_LOCK_INIT() NM_MTX_INIT(netmap_global_lock)
236 #define NMG_LOCK_DESTROY() NM_MTX_DESTROY(netmap_global_lock)
237 #define NMG_LOCK() NM_MTX_LOCK(netmap_global_lock)
238 #define NMG_UNLOCK() NM_MTX_UNLOCK(netmap_global_lock)
239 #define NMG_LOCK_ASSERT() NM_MTX_ASSERT(netmap_global_lock)
241 #if defined(__FreeBSD__)
242 #define nm_prerr printf
243 #define nm_prinf printf
244 #elif defined (_WIN32)
245 #define nm_prerr DbgPrint
246 #define nm_prinf DbgPrint
248 #define nm_prerr(fmt, arg...) printk(KERN_ERR fmt, ##arg)
249 #define nm_prinf(fmt, arg...) printk(KERN_INFO fmt, ##arg)
252 #define ND(format, ...)
253 #define D(format, ...) \
255 struct timeval __xxts; \
256 microtime(&__xxts); \
257 nm_prerr("%03d.%06d [%4d] %-25s " format "\n", \
258 (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \
259 __LINE__, __FUNCTION__, ##__VA_ARGS__); \
262 /* rate limited, lps indicates how many per second */
263 #define RD(lps, format, ...) \
265 static int t0, __cnt; \
266 if (t0 != time_second) { \
271 D(format, ##__VA_ARGS__); \
274 struct netmap_adapter;
277 struct netmap_priv_d;
279 /* os-specific NM_SELINFO_T initialzation/destruction functions */
280 void nm_os_selinfo_init(NM_SELINFO_T *);
281 void nm_os_selinfo_uninit(NM_SELINFO_T *);
283 const char *nm_dump_buf(char *p, int len, int lim, char *dst);
285 void nm_os_selwakeup(NM_SELINFO_T *si);
286 void nm_os_selrecord(NM_SELRECORD_T *sr, NM_SELINFO_T *si);
288 int nm_os_ifnet_init(void);
289 void nm_os_ifnet_fini(void);
290 void nm_os_ifnet_lock(void);
291 void nm_os_ifnet_unlock(void);
293 unsigned nm_os_ifnet_mtu(struct ifnet *ifp);
295 void nm_os_get_module(void);
296 void nm_os_put_module(void);
298 void netmap_make_zombie(struct ifnet *);
299 void netmap_undo_zombie(struct ifnet *);
301 /* os independent alloc/realloc/free */
302 void *nm_os_malloc(size_t);
303 void *nm_os_vmalloc(size_t);
304 void *nm_os_realloc(void *, size_t new_size, size_t old_size);
305 void nm_os_free(void *);
306 void nm_os_vfree(void *);
308 /* passes a packet up to the host stack.
309 * If the packet is sent (or dropped) immediately it returns NULL,
310 * otherwise it links the packet to prev and returns m.
311 * In this case, a final call with m=NULL and prev != NULL will send up
312 * the entire chain to the host stack.
314 void *nm_os_send_up(struct ifnet *, struct mbuf *m, struct mbuf *prev);
316 int nm_os_mbuf_has_offld(struct mbuf *m);
318 #include "netmap_mbq.h"
320 extern NMG_LOCK_T netmap_global_lock;
322 enum txrx { NR_RX = 0, NR_TX = 1, NR_TXRX };
324 static __inline const char*
325 nm_txrx2str(enum txrx t)
327 return (t== NR_RX ? "RX" : "TX");
330 static __inline enum txrx
331 nm_txrx_swap(enum txrx t)
333 return (t== NR_RX ? NR_TX : NR_RX);
336 #define for_rx_tx(t) for ((t) = 0; (t) < NR_TXRX; (t)++)
339 struct netmap_zmon_list {
340 struct netmap_kring *next;
341 struct netmap_kring *prev;
343 #endif /* WITH_MONITOR */
346 * private, kernel view of a ring. Keeps track of the status of
347 * a ring across system calls.
349 * nr_hwcur index of the next buffer to refill.
350 * It corresponds to ring->head
351 * at the time the system call returns.
353 * nr_hwtail index of the first buffer owned by the kernel.
354 * On RX, hwcur->hwtail are receive buffers
355 * not yet released. hwcur is advanced following
356 * ring->head, hwtail is advanced on incoming packets,
357 * and a wakeup is generated when hwtail passes ring->cur
358 * On TX, hwcur->rcur have been filled by the sender
359 * but not sent yet to the NIC; rcur->hwtail are available
360 * for new transmissions, and hwtail->hwcur-1 are pending
361 * transmissions not yet acknowledged.
363 * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots.
364 * This is so that, on a reset, buffers owned by userspace are not
365 * modified by the kernel. In particular:
366 * RX rings: the next empty buffer (hwtail + hwofs) coincides with
367 * the next empty buffer as known by the hardware (next_to_check or so).
368 * TX rings: hwcur + hwofs coincides with next_to_send
370 * The following fields are used to implement lock-free copy of packets
371 * from input to output ports in VALE switch:
372 * nkr_hwlease buffer after the last one being copied.
373 * A writer in nm_bdg_flush reserves N buffers
374 * from nr_hwlease, advances it, then does the
375 * copy outside the lock.
376 * In RX rings (used for VALE ports),
377 * nkr_hwtail <= nkr_hwlease < nkr_hwcur+N-1
378 * In TX rings (used for NIC or host stack ports)
379 * nkr_hwcur <= nkr_hwlease < nkr_hwtail
380 * nkr_leases array of nkr_num_slots where writers can report
381 * completion of their block. NR_NOSLOT (~0) indicates
382 * that the writer has not finished yet
383 * nkr_lease_idx index of next free slot in nr_leases, to be assigned
385 * The kring is manipulated by txsync/rxsync and generic netmap function.
387 * Concurrent rxsync or txsync on the same ring are prevented through
388 * by nm_kr_(try)lock() which in turn uses nr_busy. This is all we need
389 * for NIC rings, and for TX rings attached to the host stack.
391 * RX rings attached to the host stack use an mbq (rx_queue) on both
392 * rxsync_from_host() and netmap_transmit(). The mbq is protected
393 * by its internal lock.
395 * RX rings attached to the VALE switch are accessed by both senders
396 * and receiver. They are protected through the q_lock on the RX ring.
398 struct netmap_kring {
399 struct netmap_ring *ring;
401 uint32_t nr_hwcur; /* should be nr_hwhead */
405 * Copies of values in user rings, so we do not need to look
406 * at the ring (which could be modified). These are set in the
407 * *sync_prologue()/finalize() routines.
413 uint32_t nr_kflags; /* private driver flags */
414 #define NKR_PENDINTR 0x1 // Pending interrupt.
415 #define NKR_EXCLUSIVE 0x2 /* exclusive binding */
416 #define NKR_FORWARD 0x4 /* (host ring only) there are
419 #define NKR_NEEDRING 0x8 /* ring needed even if users==0
420 * (used internally by pipes and
421 * by ptnetmap host ports)
423 #define NKR_NOINTR 0x10 /* don't use interrupts on this ring */
424 #define NKR_FAKERING 0x20 /* don't allocate/free buffers */
427 uint32_t nr_pending_mode;
428 #define NKR_NETMAP_OFF 0x0
429 #define NKR_NETMAP_ON 0x1
431 uint32_t nkr_num_slots;
434 * On a NIC reset, the NIC ring indexes may be reset but the
435 * indexes in the netmap rings remain the same. nkr_hwofs
436 * keeps track of the offset between the two.
440 /* last_reclaim is opaque marker to help reduce the frequency
441 * of operations such as reclaiming tx buffers. A possible use
442 * is set it to ticks and do the reclaim only once per tick.
444 uint64_t last_reclaim;
447 NM_SELINFO_T si; /* poll/select wait queue */
448 NM_LOCK_T q_lock; /* protects kring and ring. */
449 NM_ATOMIC_T nr_busy; /* prevent concurrent syscalls */
451 /* the adapter the owns this kring */
452 struct netmap_adapter *na;
454 /* the adapter that wants to be notified when this kring has
455 * new slots avaialable. This is usually the same as the above,
456 * but wrappers may let it point to themselves
458 struct netmap_adapter *notify_na;
460 /* The following fields are for VALE switch support */
461 struct nm_bdg_fwd *nkr_ft;
462 uint32_t *nkr_leases;
463 #define NR_NOSLOT ((uint32_t)~0) /* used in nkr_*lease* */
464 uint32_t nkr_hwlease;
465 uint32_t nkr_lease_idx;
467 /* while nkr_stopped is set, no new [tr]xsync operations can
468 * be started on this kring.
469 * This is used by netmap_disable_all_rings()
470 * to find a synchronization point where critical data
471 * structures pointed to by the kring can be added or removed
473 volatile int nkr_stopped;
475 /* Support for adapters without native netmap support.
476 * On tx rings we preallocate an array of tx buffers
477 * (same size as the netmap ring), on rx rings we
478 * store incoming mbufs in a queue that is drained by
481 struct mbuf **tx_pool;
482 struct mbuf *tx_event; /* TX event used as a notification */
483 NM_LOCK_T tx_event_lock; /* protects the tx_event mbuf */
484 struct mbq rx_queue; /* intercepted rx mbufs. */
486 uint32_t users; /* existing bindings for this ring */
488 uint32_t ring_id; /* kring identifier */
489 enum txrx tx; /* kind of ring (tx or rx) */
490 char name[64]; /* diagnostic */
492 /* [tx]sync callback for this kring.
493 * The default nm_kring_create callback (netmap_krings_create)
494 * sets the nm_sync callback of each hardware tx(rx) kring to
495 * the corresponding nm_txsync(nm_rxsync) taken from the
496 * netmap_adapter; moreover, it sets the sync callback
497 * of the host tx(rx) ring to netmap_txsync_to_host
498 * (netmap_rxsync_from_host).
500 * Overrides: the above configuration is not changed by
501 * any of the nm_krings_create callbacks.
503 int (*nm_sync)(struct netmap_kring *kring, int flags);
504 int (*nm_notify)(struct netmap_kring *kring, int flags);
507 struct netmap_kring *pipe; /* if this is a pipe ring,
508 * pointer to the other end
510 #endif /* WITH_PIPES */
513 int (*save_notify)(struct netmap_kring *kring, int flags);
517 /* array of krings that are monitoring this kring */
518 struct netmap_kring **monitors;
519 uint32_t max_monitors; /* current size of the monitors array */
520 uint32_t n_monitors; /* next unused entry in the monitor array */
521 uint32_t mon_pos[NR_TXRX]; /* index of this ring in the monitored ring array */
522 uint32_t mon_tail; /* last seen slot on rx */
524 /* circular list of zero-copy monitors */
525 struct netmap_zmon_list zmon_list[NR_TXRX];
528 * Monitors work by intercepting the sync and notify callbacks of the
529 * monitored krings. This is implemented by replacing the pointers
530 * above and saving the previous ones in mon_* pointers below
532 int (*mon_sync)(struct netmap_kring *kring, int flags);
533 int (*mon_notify)(struct netmap_kring *kring, int flags);
538 __declspec(align(64));
540 __attribute__((__aligned__(64)));
543 /* return 1 iff the kring needs to be turned on */
545 nm_kring_pending_on(struct netmap_kring *kring)
547 return kring->nr_pending_mode == NKR_NETMAP_ON &&
548 kring->nr_mode == NKR_NETMAP_OFF;
551 /* return 1 iff the kring needs to be turned off */
553 nm_kring_pending_off(struct netmap_kring *kring)
555 return kring->nr_pending_mode == NKR_NETMAP_OFF &&
556 kring->nr_mode == NKR_NETMAP_ON;
559 /* return the next index, with wraparound */
560 static inline uint32_t
561 nm_next(uint32_t i, uint32_t lim)
563 return unlikely (i == lim) ? 0 : i + 1;
567 /* return the previous index, with wraparound */
568 static inline uint32_t
569 nm_prev(uint32_t i, uint32_t lim)
571 return unlikely (i == 0) ? lim : i - 1;
577 * Here is the layout for the Rx and Tx rings.
581 +-----------------+ +-----------------+
584 +-----------------+ +-----------------+
585 head->| owned by user |<-hwcur | not sent to nic |<-hwcur
587 +-----------------+ | |
588 cur->| available to | | |
589 | user, not read | +-----------------+
590 | yet | cur->| (being |
593 +-----------------+ + ------ +
594 tail->| |<-hwtail | |<-hwlease
595 | (being | ... | | ...
596 | prepared) | ... | | ...
597 +-----------------+ ... | | ...
598 | |<-hwlease +-----------------+
599 | | tail->| |<-hwtail
603 +-----------------+ +-----------------+
605 * The cur/tail (user view) and hwcur/hwtail (kernel view)
606 * are used in the normal operation of the card.
608 * When a ring is the output of a switch port (Rx ring for
609 * a VALE port, Tx ring for the host stack or NIC), slots
610 * are reserved in blocks through 'hwlease' which points
611 * to the next unused slot.
612 * On an Rx ring, hwlease is always after hwtail,
613 * and completions cause hwtail to advance.
614 * On a Tx ring, hwlease is always between cur and hwtail,
615 * and completions cause cur to advance.
617 * nm_kr_space() returns the maximum number of slots that
619 * nm_kr_lease() reserves the required number of buffers,
620 * advances nkr_hwlease and also returns an entry in
621 * a circular array where completions should be reported.
626 #define plut_entry lut_entry
630 struct lut_entry *lut;
631 struct plut_entry *plut;
632 uint32_t objtotal; /* max buffer index */
633 uint32_t objsize; /* buffer size */
636 struct netmap_vp_adapter; // forward
638 /* Struct to be filled by nm_config callbacks. */
639 struct nm_config_info {
640 unsigned num_tx_rings;
641 unsigned num_rx_rings;
642 unsigned num_tx_descs;
643 unsigned num_rx_descs;
644 unsigned rx_buf_maxsize;
648 * The "struct netmap_adapter" extends the "struct adapter"
649 * (or equivalent) device descriptor.
650 * It contains all base fields needed to support netmap operation.
651 * There are in fact different types of netmap adapters
652 * (native, generic, VALE switch...) so a netmap_adapter is
653 * just the first field in the derived type.
655 struct netmap_adapter {
657 * On linux we do not have a good way to tell if an interface
658 * is netmap-capable. So we always use the following trick:
659 * NA(ifp) points here, and the first entry (which hopefully
660 * always exists and is at least 32 bits) contains a magic
661 * value which we can use to detect that the interface is good.
664 uint32_t na_flags; /* enabled, and other flags */
665 #define NAF_SKIP_INTR 1 /* use the regular interrupt handler.
666 * useful during initialization
668 #define NAF_SW_ONLY 2 /* forward packets only to sw adapter */
669 #define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when
670 * forwarding packets coming from this
673 #define NAF_MEM_OWNER 8 /* the adapter uses its own memory area
674 * that cannot be changed
676 #define NAF_NATIVE 16 /* the adapter is native.
677 * Virtual ports (non persistent vale ports,
678 * pipes, monitors...) should never use
681 #define NAF_NETMAP_ON 32 /* netmap is active (either native or
682 * emulated). Where possible (e.g. FreeBSD)
683 * IFCAP_NETMAP also mirrors this flag.
685 #define NAF_HOST_RINGS 64 /* the adapter supports the host rings */
686 #define NAF_FORCE_NATIVE 128 /* the adapter is always NATIVE */
687 #define NAF_PTNETMAP_HOST 256 /* the adapter supports ptnetmap in the host */
688 #define NAF_MOREFRAG 512 /* the adapter supports NS_MOREFRAG */
689 #define NAF_ZOMBIE (1U<<30) /* the nic driver has been unloaded */
690 #define NAF_BUSY (1U<<31) /* the adapter is used internally and
691 * cannot be registered from userspace
693 int active_fds; /* number of user-space descriptors using this
694 interface, which is equal to the number of
695 struct netmap_if objs in the mapped region. */
697 u_int num_rx_rings; /* number of adapter receive rings */
698 u_int num_tx_rings; /* number of adapter transmit rings */
700 u_int num_tx_desc; /* number of descriptor in each queue */
703 /* tx_rings and rx_rings are private but allocated
704 * as a contiguous chunk of memory. Each array has
705 * N+1 entries, for the adapter queues and for the host queue.
707 struct netmap_kring **tx_rings; /* array of TX rings. */
708 struct netmap_kring **rx_rings; /* array of RX rings. */
710 void *tailroom; /* space below the rings array */
711 /* (used for leases) */
714 NM_SELINFO_T si[NR_TXRX]; /* global wait queues */
716 /* count users of the global wait queues */
717 int si_users[NR_TXRX];
719 void *pdev; /* used to store pci device */
721 /* copy of if_qflush and if_transmit pointers, to intercept
722 * packets from the network stack when netmap is active.
724 int (*if_transmit)(struct ifnet *, struct mbuf *);
726 /* copy of if_input for netmap_send_up() */
727 void (*if_input)(struct ifnet *, struct mbuf *);
729 /* Back reference to the parent ifnet struct. Used for
730 * hardware ports (emulated netmap included). */
731 struct ifnet *ifp; /* adapter is ifp->if_softc */
733 /*---- callbacks for this netmap adapter -----*/
735 * nm_dtor() is the cleanup routine called when destroying
737 * Called with NMG_LOCK held.
739 * nm_register() is called on NIOCREGIF and close() to enter
740 * or exit netmap mode on the NIC
741 * Called with NNG_LOCK held.
743 * nm_txsync() pushes packets to the underlying hw/switch
745 * nm_rxsync() collects packets from the underlying hw/switch
747 * nm_config() returns configuration information from the OS
748 * Called with NMG_LOCK held.
750 * nm_krings_create() create and init the tx_rings and
751 * rx_rings arrays of kring structures. In particular,
752 * set the nm_sync callbacks for each ring.
753 * There is no need to also allocate the corresponding
754 * netmap_rings, since netmap_mem_rings_create() will always
755 * be called to provide the missing ones.
756 * Called with NNG_LOCK held.
758 * nm_krings_delete() cleanup and delete the tx_rings and rx_rings
760 * Called with NMG_LOCK held.
762 * nm_notify() is used to act after data have become available
763 * (or the stopped state of the ring has changed)
764 * For hw devices this is typically a selwakeup(),
765 * but for NIC/host ports attached to a switch (or vice-versa)
766 * we also need to invoke the 'txsync' code downstream.
767 * This callback pointer is actually used only to initialize
769 * Return values are the same as for netmap_rx_irq().
771 void (*nm_dtor)(struct netmap_adapter *);
773 int (*nm_register)(struct netmap_adapter *, int onoff);
774 void (*nm_intr)(struct netmap_adapter *, int onoff);
776 int (*nm_txsync)(struct netmap_kring *kring, int flags);
777 int (*nm_rxsync)(struct netmap_kring *kring, int flags);
778 int (*nm_notify)(struct netmap_kring *kring, int flags);
779 #define NAF_FORCE_READ 1
780 #define NAF_FORCE_RECLAIM 2
781 #define NAF_CAN_FORWARD_DOWN 4
782 /* return configuration information */
783 int (*nm_config)(struct netmap_adapter *, struct nm_config_info *info);
784 int (*nm_krings_create)(struct netmap_adapter *);
785 void (*nm_krings_delete)(struct netmap_adapter *);
788 * nm_bdg_attach() initializes the na_vp field to point
789 * to an adapter that can be attached to a VALE switch. If the
790 * current adapter is already a VALE port, na_vp is simply a cast;
791 * otherwise, na_vp points to a netmap_bwrap_adapter.
792 * If applicable, this callback also initializes na_hostvp,
793 * that can be used to connect the adapter host rings to the
795 * Called with NMG_LOCK held.
797 * nm_bdg_ctl() is called on the actual attach/detach to/from
798 * to/from the switch, to perform adapter-specific
800 * Called with NMG_LOCK held.
802 int (*nm_bdg_attach)(const char *bdg_name, struct netmap_adapter *);
803 int (*nm_bdg_ctl)(struct nmreq_header *, struct netmap_adapter *);
805 /* adapter used to attach this adapter to a VALE switch (if any) */
806 struct netmap_vp_adapter *na_vp;
807 /* adapter used to attach the host rings of this adapter
808 * to a VALE switch (if any) */
809 struct netmap_vp_adapter *na_hostvp;
812 /* standard refcount to control the lifetime of the adapter
813 * (it should be equal to the lifetime of the corresponding ifp)
817 /* memory allocator (opaque)
818 * We also cache a pointer to the lut_entry for translating
819 * buffer addresses, the total number of buffers and the buffer size.
821 struct netmap_mem_d *nm_mem;
822 struct netmap_mem_d *nm_mem_prev;
823 struct netmap_lut na_lut;
825 /* additional information attached to this adapter
826 * by other netmap subsystems. Currently used by
827 * bwrap, LINUX/v1000 and ptnetmap
831 /* array of pipes that have this adapter as a parent */
832 struct netmap_pipe_adapter **na_pipes;
833 int na_next_pipe; /* next free slot in the array */
834 int na_max_pipes; /* size of the array */
836 /* Offset of ethernet header for each packet. */
839 /* Max number of bytes that the NIC can store in the buffer
840 * referenced by each RX descriptor. This translates to the maximum
841 * bytes that a single netmap slot can reference. Larger packets
842 * require NS_MOREFRAG support. */
843 unsigned rx_buf_maxsize;
845 char name[NETMAP_REQ_IFNAMSIZ]; /* used at least by pipes */
848 static __inline u_int
849 nma_get_ndesc(struct netmap_adapter *na, enum txrx t)
851 return (t == NR_TX ? na->num_tx_desc : na->num_rx_desc);
855 nma_set_ndesc(struct netmap_adapter *na, enum txrx t, u_int v)
863 static __inline u_int
864 nma_get_nrings(struct netmap_adapter *na, enum txrx t)
866 return (t == NR_TX ? na->num_tx_rings : na->num_rx_rings);
870 nma_set_nrings(struct netmap_adapter *na, enum txrx t, u_int v)
873 na->num_tx_rings = v;
875 na->num_rx_rings = v;
878 static __inline struct netmap_kring**
879 NMR(struct netmap_adapter *na, enum txrx t)
881 return (t == NR_TX ? na->tx_rings : na->rx_rings);
884 int nma_intr_enable(struct netmap_adapter *na, int onoff);
887 * If the NIC is owned by the kernel
888 * (i.e., bridge), neither another bridge nor user can use it;
889 * if the NIC is owned by a user, only users can share it.
890 * Evaluation must be done under NMG_LOCK().
892 #define NETMAP_OWNED_BY_KERN(na) ((na)->na_flags & NAF_BUSY)
893 #define NETMAP_OWNED_BY_ANY(na) \
894 (NETMAP_OWNED_BY_KERN(na) || ((na)->active_fds > 0))
897 * derived netmap adapters for various types of ports
899 struct netmap_vp_adapter { /* VALE software port */
900 struct netmap_adapter up;
905 * bdg_port is the port number used in the bridge;
906 * na_bdg points to the bridge this NA is attached to.
909 struct nm_bridge *na_bdg;
911 int autodelete; /* remove the ifp on last reference */
913 /* Maximum Frame Size, used in bdg_mismatch_datapath() */
915 /* Last source MAC on this port */
920 struct netmap_hw_adapter { /* physical device */
921 struct netmap_adapter up;
924 struct net_device_ops nm_ndo;
925 struct ethtool_ops nm_eto;
927 const struct ethtool_ops* save_ethtool;
929 int (*nm_hw_register)(struct netmap_adapter *, int onoff);
933 /* Mitigation support. */
934 struct nm_generic_mit {
935 struct hrtimer mit_timer;
937 int mit_ring_idx; /* index of the ring being mitigated */
938 struct netmap_adapter *mit_na; /* backpointer */
941 struct netmap_generic_adapter { /* emulated device */
942 struct netmap_hw_adapter up;
944 /* Pointer to a previously used netmap adapter. */
945 struct netmap_adapter *prev;
947 /* Emulated netmap adapters support:
948 * - save_if_input saves the if_input hook (FreeBSD);
949 * - mit implements rx interrupt mitigation;
951 void (*save_if_input)(struct ifnet *, struct mbuf *);
953 struct nm_generic_mit *mit;
955 netdev_tx_t (*save_start_xmit)(struct mbuf *, struct ifnet *);
957 /* Is the adapter able to use multiple RX slots to scatter
958 * each packet pushed up by the driver? */
961 /* Is the transmission path controlled by a netmap-aware
962 * device queue (i.e. qdisc on linux)? */
965 #endif /* WITH_GENERIC */
968 netmap_real_rings(struct netmap_adapter *na, enum txrx t)
970 return nma_get_nrings(na, t) + !!(na->na_flags & NAF_HOST_RINGS);
974 struct nm_bdg_polling_state;
976 * Bridge wrapper for non VALE ports attached to a VALE switch.
978 * The real device must already have its own netmap adapter (hwna).
979 * The bridge wrapper and the hwna adapter share the same set of
980 * netmap rings and buffers, but they have two separate sets of
981 * krings descriptors, with tx/rx meanings swapped:
984 * bwrap krings rings krings hwna
985 * +------+ +------+ +-----+ +------+ +------+
986 * |tx_rings->| |\ /| |----| |<-tx_rings|
987 * | | +------+ \ / +-----+ +------+ | |
990 * | | +------+/ \+-----+ +------+ | |
991 * |rx_rings->| | | |----| |<-rx_rings|
992 * | | +------+ +-----+ +------+ | |
995 * - packets coming from the bridge go to the brwap rx rings,
996 * which are also the hwna tx rings. The bwrap notify callback
997 * will then complete the hwna tx (see netmap_bwrap_notify).
999 * - packets coming from the outside go to the hwna rx rings,
1000 * which are also the bwrap tx rings. The (overwritten) hwna
1001 * notify method will then complete the bridge tx
1002 * (see netmap_bwrap_intr_notify).
1004 * The bridge wrapper may optionally connect the hwna 'host' rings
1005 * to the bridge. This is done by using a second port in the
1006 * bridge and connecting it to the 'host' netmap_vp_adapter
1007 * contained in the netmap_bwrap_adapter. The brwap host adapter
1008 * cross-links the hwna host rings in the same way as shown above.
1010 * - packets coming from the bridge and directed to the host stack
1011 * are handled by the bwrap host notify callback
1012 * (see netmap_bwrap_host_notify)
1014 * - packets coming from the host stack are still handled by the
1015 * overwritten hwna notify callback (netmap_bwrap_intr_notify),
1016 * but are diverted to the host adapter depending on the ring number.
1019 struct netmap_bwrap_adapter {
1020 struct netmap_vp_adapter up;
1021 struct netmap_vp_adapter host; /* for host rings */
1022 struct netmap_adapter *hwna; /* the underlying device */
1025 * When we attach a physical interface to the bridge, we
1026 * allow the controlling process to terminate, so we need
1027 * a place to store the n_detmap_priv_d data structure.
1028 * This is only done when physical interfaces
1029 * are attached to a bridge.
1031 struct netmap_priv_d *na_kpriv;
1032 struct nm_bdg_polling_state *na_polling_state;
1033 /* we overwrite the hwna->na_vp pointer, so we save
1034 * here its original value, to be restored at detach
1036 struct netmap_vp_adapter *saved_na_vp;
1038 int nm_bdg_ctl_attach(struct nmreq_header *hdr, void *auth_token);
1039 int nm_bdg_ctl_detach(struct nmreq_header *hdr, void *auth_token);
1040 int nm_bdg_polling(struct nmreq_header *hdr);
1041 int netmap_bwrap_attach(const char *name, struct netmap_adapter *);
1042 int netmap_vi_create(struct nmreq_header *hdr, int);
1043 int nm_vi_create(struct nmreq_header *);
1044 int nm_vi_destroy(const char *name);
1045 int netmap_bdg_list(struct nmreq_header *hdr);
1047 #else /* !WITH_VALE */
1048 #define netmap_vi_create(hdr, a) (EOPNOTSUPP)
1049 #endif /* WITH_VALE */
1053 #define NM_MAXPIPES 64 /* max number of pipes per adapter */
1055 struct netmap_pipe_adapter {
1056 /* pipe identifier is up.name */
1057 struct netmap_adapter up;
1059 #define NM_PIPE_ROLE_MASTER 0x1
1060 #define NM_PIPE_ROLE_SLAVE 0x2
1061 int role; /* either NM_PIPE_ROLE_MASTER or NM_PIPE_ROLE_SLAVE */
1063 struct netmap_adapter *parent; /* adapter that owns the memory */
1064 struct netmap_pipe_adapter *peer; /* the other end of the pipe */
1065 int peer_ref; /* 1 iff we are holding a ref to the peer */
1066 struct ifnet *parent_ifp; /* maybe null */
1068 u_int parent_slot; /* index in the parent pipe array */
1071 #endif /* WITH_PIPES */
1074 /* return slots reserved to rx clients; used in drivers */
1075 static inline uint32_t
1076 nm_kr_rxspace(struct netmap_kring *k)
1078 int space = k->nr_hwtail - k->nr_hwcur;
1080 space += k->nkr_num_slots;
1081 ND("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail);
1086 /* return slots reserved to tx clients */
1087 #define nm_kr_txspace(_k) nm_kr_rxspace(_k)
1090 /* True if no space in the tx ring, only valid after txsync_prologue */
1092 nm_kr_txempty(struct netmap_kring *kring)
1094 return kring->rcur == kring->nr_hwtail;
1097 /* True if no more completed slots in the rx ring, only valid after
1098 * rxsync_prologue */
1099 #define nm_kr_rxempty(_k) nm_kr_txempty(_k)
1102 * protect against multiple threads using the same ring.
1103 * also check that the ring has not been stopped or locked
1105 #define NM_KR_BUSY 1 /* some other thread is syncing the ring */
1106 #define NM_KR_STOPPED 2 /* unbounded stop (ifconfig down or driver unload) */
1107 #define NM_KR_LOCKED 3 /* bounded, brief stop for mutual exclusion */
1110 /* release the previously acquired right to use the *sync() methods of the ring */
1111 static __inline void nm_kr_put(struct netmap_kring *kr)
1113 NM_ATOMIC_CLEAR(&kr->nr_busy);
1117 /* true if the ifp that backed the adapter has disappeared (e.g., the
1118 * driver has been unloaded)
1120 static inline int nm_iszombie(struct netmap_adapter *na);
1122 /* try to obtain exclusive right to issue the *sync() operations on the ring.
1123 * The right is obtained and must be later relinquished via nm_kr_put() if and
1124 * only if nm_kr_tryget() returns 0.
1125 * If can_sleep is 1 there are only two other possible outcomes:
1126 * - the function returns NM_KR_BUSY
1127 * - the function returns NM_KR_STOPPED and sets the POLLERR bit in *perr
1129 * In both cases the caller will typically skip the ring, possibly collecting
1130 * errors along the way.
1131 * If the calling context does not allow sleeping, the caller must pass 0 in can_sleep.
1132 * In the latter case, the function may also return NM_KR_LOCKED and leave *perr
1133 * untouched: ideally, the caller should try again at a later time.
1135 static __inline int nm_kr_tryget(struct netmap_kring *kr, int can_sleep, int *perr)
1137 int busy = 1, stopped;
1138 /* check a first time without taking the lock
1139 * to avoid starvation for nm_kr_get()
1142 stopped = kr->nkr_stopped;
1143 if (unlikely(stopped)) {
1146 busy = NM_ATOMIC_TEST_AND_SET(&kr->nr_busy);
1147 /* we should not return NM_KR_BUSY if the ring was
1148 * actually stopped, so check another time after
1149 * the barrier provided by the atomic operation
1151 stopped = kr->nkr_stopped;
1152 if (unlikely(stopped)) {
1156 if (unlikely(nm_iszombie(kr->na))) {
1157 stopped = NM_KR_STOPPED;
1161 return unlikely(busy) ? NM_KR_BUSY : 0;
1166 if (stopped == NM_KR_STOPPED) {
1167 /* if POLLERR is defined we want to use it to simplify netmap_poll().
1168 * Otherwise, any non-zero value will do.
1171 #define NM_POLLERR POLLERR
1173 #define NM_POLLERR 1
1174 #endif /* POLLERR */
1176 *perr |= NM_POLLERR;
1178 } else if (can_sleep) {
1179 tsleep(kr, 0, "NM_KR_TRYGET", 4);
1185 /* put the ring in the 'stopped' state and wait for the current user (if any) to
1186 * notice. stopped must be either NM_KR_STOPPED or NM_KR_LOCKED
1188 static __inline void nm_kr_stop(struct netmap_kring *kr, int stopped)
1190 kr->nkr_stopped = stopped;
1191 while (NM_ATOMIC_TEST_AND_SET(&kr->nr_busy))
1192 tsleep(kr, 0, "NM_KR_GET", 4);
1195 /* restart a ring after a stop */
1196 static __inline void nm_kr_start(struct netmap_kring *kr)
1198 kr->nkr_stopped = 0;
1204 * The following functions are used by individual drivers to
1205 * support netmap operation.
1207 * netmap_attach() initializes a struct netmap_adapter, allocating the
1208 * struct netmap_ring's and the struct selinfo.
1210 * netmap_detach() frees the memory allocated by netmap_attach().
1212 * netmap_transmit() replaces the if_transmit routine of the interface,
1213 * and is used to intercept packets coming from the stack.
1215 * netmap_load_map/netmap_reload_map are helper routines to set/reset
1216 * the dmamap for a packet buffer
1218 * netmap_reset() is a helper routine to be called in the hw driver
1219 * when reinitializing a ring. It should not be called by
1220 * virtual ports (vale, pipes, monitor)
1222 int netmap_attach(struct netmap_adapter *);
1223 int netmap_attach_ext(struct netmap_adapter *, size_t size, int override_reg);
1224 void netmap_detach(struct ifnet *);
1225 int netmap_transmit(struct ifnet *, struct mbuf *);
1226 struct netmap_slot *netmap_reset(struct netmap_adapter *na,
1227 enum txrx tx, u_int n, u_int new_cur);
1228 int netmap_ring_reinit(struct netmap_kring *);
1229 int netmap_rings_config_get(struct netmap_adapter *, struct nm_config_info *);
1231 /* Return codes for netmap_*x_irq. */
1233 /* Driver should do normal interrupt processing, e.g. because
1234 * the interface is not in netmap mode. */
1236 /* Port is in netmap mode, and the interrupt work has been
1237 * completed. The driver does not have to notify netmap
1238 * again before the next interrupt. */
1239 NM_IRQ_COMPLETED = -1,
1240 /* Port is in netmap mode, but the interrupt work has not been
1241 * completed. The driver has to make sure netmap will be
1242 * notified again soon, even if no more interrupts come (e.g.
1243 * on Linux the driver should not call napi_complete()). */
1244 NM_IRQ_RESCHED = -2,
1247 /* default functions to handle rx/tx interrupts */
1248 int netmap_rx_irq(struct ifnet *, u_int, u_int *);
1249 #define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL)
1250 int netmap_common_irq(struct netmap_adapter *, u_int, u_int *work_done);
1254 /* functions used by external modules to interface with VALE */
1255 #define netmap_vp_to_ifp(_vp) ((_vp)->up.ifp)
1256 #define netmap_ifp_to_vp(_ifp) (NA(_ifp)->na_vp)
1257 #define netmap_ifp_to_host_vp(_ifp) (NA(_ifp)->na_hostvp)
1258 #define netmap_bdg_idx(_vp) ((_vp)->bdg_port)
1259 const char *netmap_bdg_name(struct netmap_vp_adapter *);
1260 #else /* !WITH_VALE */
1261 #define netmap_vp_to_ifp(_vp) NULL
1262 #define netmap_ifp_to_vp(_ifp) NULL
1263 #define netmap_ifp_to_host_vp(_ifp) NULL
1264 #define netmap_bdg_idx(_vp) -1
1265 #define netmap_bdg_name(_vp) NULL
1266 #endif /* WITH_VALE */
1269 nm_netmap_on(struct netmap_adapter *na)
1271 return na && na->na_flags & NAF_NETMAP_ON;
1275 nm_native_on(struct netmap_adapter *na)
1277 return nm_netmap_on(na) && (na->na_flags & NAF_NATIVE);
1281 nm_iszombie(struct netmap_adapter *na)
1283 return na == NULL || (na->na_flags & NAF_ZOMBIE);
1287 nm_update_hostrings_mode(struct netmap_adapter *na)
1289 /* Process nr_mode and nr_pending_mode for host rings. */
1290 na->tx_rings[na->num_tx_rings]->nr_mode =
1291 na->tx_rings[na->num_tx_rings]->nr_pending_mode;
1292 na->rx_rings[na->num_rx_rings]->nr_mode =
1293 na->rx_rings[na->num_rx_rings]->nr_pending_mode;
1296 /* set/clear native flags and if_transmit/netdev_ops */
1298 nm_set_native_flags(struct netmap_adapter *na)
1300 struct ifnet *ifp = na->ifp;
1302 /* We do the setup for intercepting packets only if we are the
1303 * first user of this adapapter. */
1304 if (na->active_fds > 0) {
1308 na->na_flags |= NAF_NETMAP_ON;
1309 #ifdef IFCAP_NETMAP /* or FreeBSD ? */
1310 ifp->if_capenable |= IFCAP_NETMAP;
1312 #if defined (__FreeBSD__)
1313 na->if_transmit = ifp->if_transmit;
1314 ifp->if_transmit = netmap_transmit;
1315 #elif defined (_WIN32)
1316 (void)ifp; /* prevent a warning */
1317 #elif defined (linux)
1318 na->if_transmit = (void *)ifp->netdev_ops;
1319 ifp->netdev_ops = &((struct netmap_hw_adapter *)na)->nm_ndo;
1320 ((struct netmap_hw_adapter *)na)->save_ethtool = ifp->ethtool_ops;
1321 ifp->ethtool_ops = &((struct netmap_hw_adapter*)na)->nm_eto;
1323 nm_update_hostrings_mode(na);
1327 nm_clear_native_flags(struct netmap_adapter *na)
1329 struct ifnet *ifp = na->ifp;
1331 /* We undo the setup for intercepting packets only if we are the
1332 * last user of this adapapter. */
1333 if (na->active_fds > 0) {
1337 nm_update_hostrings_mode(na);
1339 #if defined(__FreeBSD__)
1340 ifp->if_transmit = na->if_transmit;
1341 #elif defined(_WIN32)
1342 (void)ifp; /* prevent a warning */
1344 ifp->netdev_ops = (void *)na->if_transmit;
1345 ifp->ethtool_ops = ((struct netmap_hw_adapter*)na)->save_ethtool;
1347 na->na_flags &= ~NAF_NETMAP_ON;
1348 #ifdef IFCAP_NETMAP /* or FreeBSD ? */
1349 ifp->if_capenable &= ~IFCAP_NETMAP;
1354 int netmap_linux_config(struct netmap_adapter *na,
1355 struct nm_config_info *info);
1359 * nm_*sync_prologue() functions are used in ioctl/poll and ptnetmap
1361 * We need netmap_ring* parameter, because in ptnetmap it is decoupled
1363 * The user-space ring pointers (head/cur/tail) are shared through
1364 * CSB between host and guest.
1368 * validates parameters in the ring/kring, returns a value for head
1369 * If any error, returns ring_size to force a reinit.
1371 uint32_t nm_txsync_prologue(struct netmap_kring *, struct netmap_ring *);
1375 * validates parameters in the ring/kring, returns a value for head
1376 * If any error, returns ring_size lim to force a reinit.
1378 uint32_t nm_rxsync_prologue(struct netmap_kring *, struct netmap_ring *);
1381 /* check/fix address and len in tx rings */
1382 #if 1 /* debug version */
1383 #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \
1384 if (_a == NETMAP_BUF_BASE(_na) || _l > NETMAP_BUF_SIZE(_na)) { \
1385 RD(5, "bad addr/len ring %d slot %d idx %d len %d", \
1386 kring->ring_id, nm_i, slot->buf_idx, len); \
1387 if (_l > NETMAP_BUF_SIZE(_na)) \
1388 _l = NETMAP_BUF_SIZE(_na); \
1390 #else /* no debug version */
1391 #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \
1392 if (_l > NETMAP_BUF_SIZE(_na)) \
1393 _l = NETMAP_BUF_SIZE(_na); \
1398 /*---------------------------------------------------------------*/
1400 * Support routines used by netmap subsystems
1401 * (native drivers, VALE, generic, pipes, monitors, ...)
1405 /* common routine for all functions that create a netmap adapter. It performs
1407 * - if the na points to an ifp, mark the ifp as netmap capable
1408 * using na as its native adapter;
1409 * - provide defaults for the setup callbacks and the memory allocator
1411 int netmap_attach_common(struct netmap_adapter *);
1412 /* fill priv->np_[tr]xq{first,last} using the ringid and flags information
1413 * coming from a struct nmreq_register
1415 int netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode,
1416 uint16_t nr_ringid, uint64_t nr_flags);
1417 /* update the ring parameters (number and size of tx and rx rings).
1418 * It calls the nm_config callback, if available.
1420 int netmap_update_config(struct netmap_adapter *na);
1421 /* create and initialize the common fields of the krings array.
1422 * using the information that must be already available in the na.
1423 * tailroom can be used to request the allocation of additional
1424 * tailroom bytes after the krings array. This is used by
1425 * netmap_vp_adapter's (i.e., VALE ports) to make room for
1426 * leasing-related data structures
1428 int netmap_krings_create(struct netmap_adapter *na, u_int tailroom);
1429 /* deletes the kring array of the adapter. The array must have
1430 * been created using netmap_krings_create
1432 void netmap_krings_delete(struct netmap_adapter *na);
1434 int netmap_hw_krings_create(struct netmap_adapter *na);
1435 void netmap_hw_krings_delete(struct netmap_adapter *na);
1437 /* set the stopped/enabled status of ring
1438 * When stopping, they also wait for all current activity on the ring to
1439 * terminate. The status change is then notified using the na nm_notify
1442 void netmap_set_ring(struct netmap_adapter *, u_int ring_id, enum txrx, int stopped);
1443 /* set the stopped/enabled status of all rings of the adapter. */
1444 void netmap_set_all_rings(struct netmap_adapter *, int stopped);
1445 /* convenience wrappers for netmap_set_all_rings */
1446 void netmap_disable_all_rings(struct ifnet *);
1447 void netmap_enable_all_rings(struct ifnet *);
1449 int netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
1450 uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags);
1451 void netmap_do_unregif(struct netmap_priv_d *priv);
1453 u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg);
1454 int netmap_get_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1455 struct ifnet **ifp, struct netmap_mem_d *nmd, int create);
1456 void netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp);
1457 int netmap_get_hw_na(struct ifnet *ifp,
1458 struct netmap_mem_d *nmd, struct netmap_adapter **na);
1463 * The following bridge-related functions are used by other
1466 * VALE only supports unicast or broadcast. The lookup
1467 * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports,
1468 * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 to indicate
1471 typedef uint32_t (*bdg_lookup_fn_t)(struct nm_bdg_fwd *ft, uint8_t *ring_nr,
1472 struct netmap_vp_adapter *, void *private_data);
1473 typedef int (*bdg_config_fn_t)(struct nm_ifreq *);
1474 typedef void (*bdg_dtor_fn_t)(const struct netmap_vp_adapter *);
1475 typedef void *(*bdg_update_private_data_fn_t)(void *private_data, void *callback_data, int *error);
1476 struct netmap_bdg_ops {
1477 bdg_lookup_fn_t lookup;
1478 bdg_config_fn_t config;
1482 uint32_t netmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring,
1483 struct netmap_vp_adapter *, void *private_data);
1485 #define NM_BRIDGES 8 /* number of bridges */
1486 #define NM_BDG_MAXPORTS 254 /* up to 254 */
1487 #define NM_BDG_BROADCAST NM_BDG_MAXPORTS
1488 #define NM_BDG_NOPORT (NM_BDG_MAXPORTS+1)
1490 /* these are redefined in case of no VALE support */
1491 int netmap_get_bdg_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1492 struct netmap_mem_d *nmd, int create);
1493 struct nm_bridge *netmap_init_bridges2(u_int);
1494 void netmap_uninit_bridges2(struct nm_bridge *, u_int);
1495 int netmap_init_bridges(void);
1496 void netmap_uninit_bridges(void);
1497 int netmap_bdg_regops(const char *name, struct netmap_bdg_ops *bdg_ops, void *private_data, void *auth_token);
1498 int nm_bdg_update_private_data(const char *name, bdg_update_private_data_fn_t callback,
1499 void *callback_data, void *auth_token);
1500 int netmap_bdg_config(struct nm_ifreq *nifr);
1501 void *netmap_bdg_create(const char *bdg_name, int *return_status);
1502 int netmap_bdg_destroy(const char *bdg_name, void *auth_token);
1504 #else /* !WITH_VALE */
1505 #define netmap_get_bdg_na(_1, _2, _3, _4) 0
1506 #define netmap_init_bridges(_1) 0
1507 #define netmap_uninit_bridges()
1508 #define netmap_bdg_regops(_1, _2) EINVAL
1509 #endif /* !WITH_VALE */
1512 /* max number of pipes per device */
1513 #define NM_MAXPIPES 64 /* XXX this should probably be a sysctl */
1514 void netmap_pipe_dealloc(struct netmap_adapter *);
1515 int netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1516 struct netmap_mem_d *nmd, int create);
1517 #else /* !WITH_PIPES */
1518 #define NM_MAXPIPES 0
1519 #define netmap_pipe_alloc(_1, _2) 0
1520 #define netmap_pipe_dealloc(_1)
1521 #define netmap_get_pipe_na(hdr, _2, _3, _4) \
1522 ((strchr(hdr->nr_name, '{') != NULL || strchr(hdr->nr_name, '}') != NULL) ? EOPNOTSUPP : 0)
1526 int netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1527 struct netmap_mem_d *nmd, int create);
1528 void netmap_monitor_stop(struct netmap_adapter *na);
1530 #define netmap_get_monitor_na(hdr, _2, _3, _4) \
1531 (((struct nmreq_register *)(uintptr_t)hdr->nr_body)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? EOPNOTSUPP : 0)
1534 #ifdef CONFIG_NET_NS
1535 struct net *netmap_bns_get(void);
1536 void netmap_bns_put(struct net *);
1537 void netmap_bns_getbridges(struct nm_bridge **, u_int *);
1539 #define netmap_bns_get()
1540 #define netmap_bns_put(_1)
1541 #define netmap_bns_getbridges(b, n) \
1542 do { *b = nm_bridges; *n = NM_BRIDGES; } while (0)
1545 /* Various prototypes */
1546 int netmap_poll(struct netmap_priv_d *, int events, NM_SELRECORD_T *td);
1547 int netmap_init(void);
1548 void netmap_fini(void);
1549 int netmap_get_memory(struct netmap_priv_d* p);
1550 void netmap_dtor(void *data);
1552 int netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
1553 struct thread *, int nr_body_is_user);
1554 int netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
1556 size_t nmreq_size_by_type(uint16_t nr_reqtype);
1558 /* netmap_adapter creation/destruction */
1560 // #define NM_DEBUG_PUTGET 1
1562 #ifdef NM_DEBUG_PUTGET
1564 #define NM_DBG(f) __##f
1566 void __netmap_adapter_get(struct netmap_adapter *na);
1568 #define netmap_adapter_get(na) \
1570 struct netmap_adapter *__na = na; \
1571 D("getting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \
1572 __netmap_adapter_get(__na); \
1575 int __netmap_adapter_put(struct netmap_adapter *na);
1577 #define netmap_adapter_put(na) \
1579 struct netmap_adapter *__na = na; \
1580 D("putting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \
1581 __netmap_adapter_put(__na); \
1584 #else /* !NM_DEBUG_PUTGET */
1587 void netmap_adapter_get(struct netmap_adapter *na);
1588 int netmap_adapter_put(struct netmap_adapter *na);
1590 #endif /* !NM_DEBUG_PUTGET */
1596 #define NETMAP_BUF_BASE(_na) ((_na)->na_lut.lut[0].vaddr)
1597 #define NETMAP_BUF_SIZE(_na) ((_na)->na_lut.objsize)
1598 extern int netmap_no_pendintr;
1599 extern int netmap_mitigate;
1600 extern int netmap_verbose; /* for debugging */
1601 enum { /* verbose flags */
1602 NM_VERB_ON = 1, /* generic verbose */
1603 NM_VERB_HOST = 0x2, /* verbose host stack */
1604 NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */
1605 NM_VERB_TXSYNC = 0x20,
1606 NM_VERB_RXINTR = 0x100, /* verbose on rx/tx intr (driver) */
1607 NM_VERB_TXINTR = 0x200,
1608 NM_VERB_NIC_RXSYNC = 0x1000, /* verbose on rx/tx intr (driver) */
1609 NM_VERB_NIC_TXSYNC = 0x2000,
1612 extern int netmap_txsync_retry;
1613 extern int netmap_flags;
1614 extern int netmap_generic_mit;
1615 extern int netmap_generic_ringsize;
1616 extern int netmap_generic_rings;
1618 extern int netmap_generic_txqdisc;
1620 extern int ptnetmap_tx_workers;
1623 * NA returns a pointer to the struct netmap adapter from the ifp,
1624 * WNA is used to write it.
1626 #define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp))
1629 * On old versions of FreeBSD, NA(ifp) is a pspare. On linux we
1630 * overload another pointer in the netdev.
1632 * We check if NA(ifp) is set and its first element has a related
1633 * magic value. The capenable is within the struct netmap_adapter.
1635 #define NETMAP_MAGIC 0x52697a7a
1637 #define NM_NA_VALID(ifp) (NA(ifp) && \
1638 ((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC )
1640 #define NM_ATTACH_NA(ifp, na) do { \
1644 ((uint32_t)(uintptr_t)NA(ifp)) ^ NETMAP_MAGIC; \
1647 #define NM_IS_NATIVE(ifp) (NM_NA_VALID(ifp) && NA(ifp)->nm_dtor == netmap_hw_dtor)
1649 #if defined(__FreeBSD__)
1651 /* Assigns the device IOMMU domain to an allocator.
1652 * Returns -ENOMEM in case the domain is different */
1653 #define nm_iommu_group_id(dev) (0)
1655 /* Callback invoked by the dma machinery after a successful dmamap_load */
1656 static void netmap_dmamap_cb(__unused void *arg,
1657 __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error)
1661 /* bus_dmamap_load wrapper: call aforementioned function if map != NULL.
1662 * XXX can we do it without a callback ?
1665 netmap_load_map(struct netmap_adapter *na,
1666 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1669 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na),
1670 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT);
1675 netmap_unload_map(struct netmap_adapter *na,
1676 bus_dma_tag_t tag, bus_dmamap_t map)
1679 bus_dmamap_unload(tag, map);
1682 #define netmap_sync_map(na, tag, map, sz, t)
1684 /* update the map when a buffer changes. */
1686 netmap_reload_map(struct netmap_adapter *na,
1687 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1690 bus_dmamap_unload(tag, map);
1691 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na),
1692 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT);
1696 #elif defined(_WIN32)
1700 int nm_iommu_group_id(bus_dma_tag_t dev);
1701 #include <linux/dma-mapping.h>
1705 * dma_map_single(&pdev->dev, virt_addr, len, direction)
1706 * dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction)
1709 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[l];
1710 /* set time_stamp *before* dma to help avoid a possible race */
1711 buffer_info->time_stamp = jiffies;
1712 buffer_info->mapped_as_page = false;
1713 buffer_info->length = len;
1714 //buffer_info->next_to_watch = l;
1715 /* reload dma map */
1716 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1717 NETMAP_BUF_SIZE, DMA_TO_DEVICE);
1718 buffer_info->dma = dma_map_single(&adapter->pdev->dev,
1719 addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE);
1721 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1722 D("dma mapping error");
1723 /* goto dma_error; See e1000_put_txbuf() */
1726 tx_desc->buffer_addr = htole64(buffer_info->dma); //XXX
1731 netmap_load_map(struct netmap_adapter *na,
1732 bus_dma_tag_t tag, bus_dmamap_t map, void *buf, u_int size)
1735 *map = dma_map_single(na->pdev, buf, size,
1737 if (dma_mapping_error(na->pdev, *map)) {
1746 netmap_unload_map(struct netmap_adapter *na,
1747 bus_dma_tag_t tag, bus_dmamap_t map, u_int sz)
1750 dma_unmap_single(na->pdev, *map, sz,
1756 netmap_sync_map(struct netmap_adapter *na,
1757 bus_dma_tag_t tag, bus_dmamap_t map, u_int sz, enum txrx t)
1761 dma_sync_single_for_cpu(na->pdev, *map, sz,
1764 dma_sync_single_for_device(na->pdev, *map, sz,
1770 netmap_reload_map(struct netmap_adapter *na,
1771 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1773 u_int sz = NETMAP_BUF_SIZE(na);
1776 dma_unmap_single(na->pdev, *map, sz,
1780 *map = dma_map_single(na->pdev, buf, sz,
1788 * functions to map NIC to KRING indexes (n2k) and vice versa (k2n)
1791 netmap_idx_n2k(struct netmap_kring *kr, int idx)
1793 int n = kr->nkr_num_slots;
1794 idx += kr->nkr_hwofs;
1805 netmap_idx_k2n(struct netmap_kring *kr, int idx)
1807 int n = kr->nkr_num_slots;
1808 idx -= kr->nkr_hwofs;
1818 /* Entries of the look-up table. */
1821 void *vaddr; /* virtual address. */
1822 vm_paddr_t paddr; /* physical address. */
1824 #else /* linux & _WIN32 */
1825 /* dma-mapping in linux can assign a buffer a different address
1826 * depending on the device, so we need to have a separate
1827 * physical-address look-up table for each na.
1828 * We can still share the vaddrs, though, therefore we split
1829 * the lut_entry structure.
1832 void *vaddr; /* virtual address. */
1836 vm_paddr_t paddr; /* physical address. */
1838 #endif /* linux & _WIN32 */
1840 struct netmap_obj_pool;
1843 * NMB return the virtual address of a buffer (buffer 0 on bad index)
1844 * PNMB also fills the physical address
1846 static inline void *
1847 NMB(struct netmap_adapter *na, struct netmap_slot *slot)
1849 struct lut_entry *lut = na->na_lut.lut;
1850 uint32_t i = slot->buf_idx;
1851 return (unlikely(i >= na->na_lut.objtotal)) ?
1852 lut[0].vaddr : lut[i].vaddr;
1855 static inline void *
1856 PNMB(struct netmap_adapter *na, struct netmap_slot *slot, uint64_t *pp)
1858 uint32_t i = slot->buf_idx;
1859 struct lut_entry *lut = na->na_lut.lut;
1860 struct plut_entry *plut = na->na_lut.plut;
1861 void *ret = (i >= na->na_lut.objtotal) ? lut[0].vaddr : lut[i].vaddr;
1864 *pp = (i >= na->na_lut.objtotal) ? (uint64_t)plut[0].paddr.QuadPart : (uint64_t)plut[i].paddr.QuadPart;
1866 *pp = (i >= na->na_lut.objtotal) ? plut[0].paddr : plut[i].paddr;
1873 * Structure associated to each netmap file descriptor.
1874 * It is created on open and left unbound (np_nifp == NULL).
1875 * A successful NIOCREGIF will set np_nifp and the first few fields;
1876 * this is protected by a global lock (NMG_LOCK) due to low contention.
1878 * np_refs counts the number of references to the structure: one for the fd,
1879 * plus (on FreeBSD) one for each active mmap which we track ourselves
1880 * (linux automatically tracks them, but FreeBSD does not).
1881 * np_refs is protected by NMG_LOCK.
1883 * Read access to the structure is lock free, because ni_nifp once set
1884 * can only go to 0 when nobody is using the entry anymore. Readers
1885 * must check that np_nifp != NULL before using the other fields.
1887 struct netmap_priv_d {
1888 struct netmap_if * volatile np_nifp; /* netmap if descriptor. */
1890 struct netmap_adapter *np_na;
1891 struct ifnet *np_ifp;
1892 uint32_t np_flags; /* from the ioctl */
1893 u_int np_qfirst[NR_TXRX],
1894 np_qlast[NR_TXRX]; /* range of tx/rx rings to scan */
1896 int np_sync_flags; /* to be passed to nm_sync */
1898 int np_refs; /* use with NMG_LOCK held */
1900 /* pointers to the selinfo to be used for selrecord.
1901 * Either the local or the global one depending on the
1904 NM_SELINFO_T *np_si[NR_TXRX];
1905 struct thread *np_td; /* kqueue, just debugging */
1908 struct netmap_priv_d *netmap_priv_new(void);
1909 void netmap_priv_delete(struct netmap_priv_d *);
1911 static inline int nm_kring_pending(struct netmap_priv_d *np)
1913 struct netmap_adapter *na = np->np_na;
1918 for (i = np->np_qfirst[t]; i < np->np_qlast[t]; i++) {
1919 struct netmap_kring *kring = NMR(na, t)[i];
1920 if (kring->nr_mode != kring->nr_pending_mode) {
1929 int netmap_pipe_txsync(struct netmap_kring *txkring, int flags);
1930 int netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags);
1931 #endif /* WITH_PIPES */
1935 struct netmap_monitor_adapter {
1936 struct netmap_adapter up;
1938 struct netmap_priv_d priv;
1942 #endif /* WITH_MONITOR */
1947 * generic netmap emulation for devices that do not have
1948 * native netmap support.
1950 int generic_netmap_attach(struct ifnet *ifp);
1951 int generic_rx_handler(struct ifnet *ifp, struct mbuf *m);;
1953 int nm_os_catch_rx(struct netmap_generic_adapter *gna, int intercept);
1954 int nm_os_catch_tx(struct netmap_generic_adapter *gna, int intercept);
1956 int na_is_generic(struct netmap_adapter *na);
1959 * the generic transmit routine is passed a structure to optionally
1960 * build a queue of descriptors, in an OS-specific way.
1961 * The payload is at addr, if non-null, and the routine should send or queue
1962 * the packet, returning 0 if successful, 1 on failure.
1964 * At the end, if head is non-null, there will be an additional call
1965 * to the function with addr = NULL; this should tell the OS-specific
1966 * routine to send the queue and free any resources. Failure is ignored.
1968 struct nm_os_gen_arg {
1970 void *m; /* os-specific mbuf-like object */
1971 void *head, *tail; /* tailq, if the OS-specific routine needs to build one */
1972 void *addr; /* payload of current packet */
1973 u_int len; /* packet length */
1974 u_int ring_nr; /* packet length */
1975 u_int qevent; /* in txqdisc mode, place an event on this mbuf */
1978 int nm_os_generic_xmit_frame(struct nm_os_gen_arg *);
1979 int nm_os_generic_find_num_desc(struct ifnet *ifp, u_int *tx, u_int *rx);
1980 void nm_os_generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq);
1981 void nm_os_generic_set_features(struct netmap_generic_adapter *gna);
1983 static inline struct ifnet*
1984 netmap_generic_getifp(struct netmap_generic_adapter *gna)
1987 return gna->prev->ifp;
1989 return gna->up.up.ifp;
1992 void netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done);
1994 //#define RATE_GENERIC /* Enables communication statistics for generic. */
1996 void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi);
1998 #define generic_rate(txp, txs, txi, rxp, rxs, rxi)
2002 * netmap_mitigation API. This is used by the generic adapter
2003 * to reduce the number of interrupt requests/selwakeup
2004 * to clients on incoming packets.
2006 void nm_os_mitigation_init(struct nm_generic_mit *mit, int idx,
2007 struct netmap_adapter *na);
2008 void nm_os_mitigation_start(struct nm_generic_mit *mit);
2009 void nm_os_mitigation_restart(struct nm_generic_mit *mit);
2010 int nm_os_mitigation_active(struct nm_generic_mit *mit);
2011 void nm_os_mitigation_cleanup(struct nm_generic_mit *mit);
2012 #else /* !WITH_GENERIC */
2013 #define generic_netmap_attach(ifp) (EOPNOTSUPP)
2014 #define na_is_generic(na) (0)
2015 #endif /* WITH_GENERIC */
2017 /* Shared declarations for the VALE switch. */
2020 * Each transmit queue accumulates a batch of packets into
2021 * a structure before forwarding. Packets to the same
2022 * destination are put in a list using ft_next as a link field.
2023 * ft_frags and ft_next are valid only on the first fragment.
2025 struct nm_bdg_fwd { /* forwarding entry for a bridge */
2026 void *ft_buf; /* netmap or indirect buffer */
2027 uint8_t ft_frags; /* how many fragments (only on 1st frag) */
2028 uint16_t ft_offset; /* dst port (unused) */
2029 uint16_t ft_flags; /* flags, e.g. indirect */
2030 uint16_t ft_len; /* src fragment len */
2031 uint16_t ft_next; /* next packet to same destination */
2034 /* struct 'virtio_net_hdr' from linux. */
2035 struct nm_vnet_hdr {
2036 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */
2037 #define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */
2039 #define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */
2040 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */
2041 #define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */
2042 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */
2043 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */
2047 uint16_t csum_start;
2048 uint16_t csum_offset;
2051 #define WORST_CASE_GSO_HEADER (14+40+60) /* IPv6 + TCP */
2053 /* Private definitions for IPv4, IPv6, UDP and TCP headers. */
2056 uint8_t version_ihl;
2066 /*The options start here. */
2074 uint8_t doff; /* Data offset + Reserved */
2089 uint8_t priority_version;
2090 uint8_t flow_lbl[3];
2092 uint16_t payload_len;
2100 /* Type used to store a checksum (in host byte order) that hasn't been
2103 #define rawsum_t uint32_t
2105 rawsum_t nm_os_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum);
2106 uint16_t nm_os_csum_ipv4(struct nm_iphdr *iph);
2107 void nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data,
2108 size_t datalen, uint16_t *check);
2109 void nm_os_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data,
2110 size_t datalen, uint16_t *check);
2111 uint16_t nm_os_csum_fold(rawsum_t cur_sum);
2113 void bdg_mismatch_datapath(struct netmap_vp_adapter *na,
2114 struct netmap_vp_adapter *dst_na,
2115 const struct nm_bdg_fwd *ft_p,
2116 struct netmap_ring *dst_ring,
2117 u_int *j, u_int lim, u_int *howmany);
2119 /* persistent virtual port routines */
2120 int nm_os_vi_persist(const char *, struct ifnet **);
2121 void nm_os_vi_detach(struct ifnet *);
2122 void nm_os_vi_init_index(void);
2125 * kernel thread routines
2127 struct nm_kctx; /* OS-specific kernel context - opaque */
2128 typedef void (*nm_kctx_worker_fn_t)(void *data, int is_kthread);
2129 typedef void (*nm_kctx_notify_fn_t)(void *data);
2131 /* kthread configuration */
2132 struct nm_kctx_cfg {
2133 long type; /* kthread type/identifier */
2134 nm_kctx_worker_fn_t worker_fn; /* worker function */
2135 void *worker_private;/* worker parameter */
2136 nm_kctx_notify_fn_t notify_fn; /* notify function */
2137 int attach_user; /* attach kthread to user process */
2138 int use_kthread; /* use a kthread for the context */
2140 /* kthread configuration */
2141 struct nm_kctx *nm_os_kctx_create(struct nm_kctx_cfg *cfg,
2143 int nm_os_kctx_worker_start(struct nm_kctx *);
2144 void nm_os_kctx_worker_stop(struct nm_kctx *);
2145 void nm_os_kctx_destroy(struct nm_kctx *);
2146 void nm_os_kctx_worker_wakeup(struct nm_kctx *nmk);
2147 void nm_os_kctx_send_irq(struct nm_kctx *);
2148 void nm_os_kctx_worker_setaff(struct nm_kctx *, int);
2149 u_int nm_os_ncpus(void);
2151 #ifdef WITH_PTNETMAP_HOST
2153 * netmap adapter for host ptnetmap ports
2155 struct netmap_pt_host_adapter {
2156 struct netmap_adapter up;
2158 /* the passed-through adapter */
2159 struct netmap_adapter *parent;
2160 /* parent->na_flags, saved at NETMAP_PT_HOST_CREATE time,
2161 * and restored at NETMAP_PT_HOST_DELETE time */
2162 uint32_t parent_na_flags;
2164 int (*parent_nm_notify)(struct netmap_kring *kring, int flags);
2168 /* ptnetmap host-side routines */
2169 int netmap_get_pt_host_na(struct nmreq_header *hdr, struct netmap_adapter **na,
2170 struct netmap_mem_d * nmd, int create);
2171 int ptnetmap_ctl(const char *nr_name, int create, struct netmap_adapter *na);
2174 nm_ptnetmap_host_on(struct netmap_adapter *na)
2176 return na && na->na_flags & NAF_PTNETMAP_HOST;
2178 #else /* !WITH_PTNETMAP_HOST */
2179 #define netmap_get_pt_host_na(hdr, _2, _3, _4) \
2180 (((struct nmreq_register *)(uintptr_t)hdr->nr_body)->nr_flags & (NR_PTNETMAP_HOST) ? EOPNOTSUPP : 0)
2181 #define ptnetmap_ctl(_1, _2, _3) EINVAL
2182 #define nm_ptnetmap_host_on(_1) EINVAL
2183 #endif /* !WITH_PTNETMAP_HOST */
2185 #ifdef WITH_PTNETMAP_GUEST
2186 /* ptnetmap GUEST routines */
2189 * netmap adapter for guest ptnetmap ports
2191 struct netmap_pt_guest_adapter {
2192 /* The netmap adapter to be used by netmap applications.
2193 * This field must be the first, to allow upcast. */
2194 struct netmap_hw_adapter hwup;
2196 /* The netmap adapter to be used by the driver. */
2197 struct netmap_hw_adapter dr;
2199 /* Reference counter to track users of backend netmap port: the
2200 * network stack and netmap clients.
2201 * Used to decide when we need (de)allocate krings/rings and
2202 * start (stop) ptnetmap kthreads. */
2207 int netmap_pt_guest_attach(struct netmap_adapter *na,
2208 unsigned int nifp_offset,
2209 unsigned int memid);
2210 struct ptnet_csb_gh;
2211 struct ptnet_csb_hg;
2212 bool netmap_pt_guest_txsync(struct ptnet_csb_gh *ptgh,
2213 struct ptnet_csb_hg *pthg,
2214 struct netmap_kring *kring,
2216 bool netmap_pt_guest_rxsync(struct ptnet_csb_gh *ptgh,
2217 struct ptnet_csb_hg *pthg,
2218 struct netmap_kring *kring, int flags);
2219 int ptnet_nm_krings_create(struct netmap_adapter *na);
2220 void ptnet_nm_krings_delete(struct netmap_adapter *na);
2221 void ptnet_nm_dtor(struct netmap_adapter *na);
2222 #endif /* WITH_PTNETMAP_GUEST */
2224 struct nmreq_option * nmreq_findoption(struct nmreq_option *, uint16_t);
2225 int nmreq_checkduplicate(struct nmreq_option *);
2227 #endif /* _NET_NETMAP_KERN_H_ */