2 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
3 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * The header contains the definitions of constants and function
31 * prototypes used only in kernelspace.
34 #ifndef _NET_NETMAP_KERN_H_
35 #define _NET_NETMAP_KERN_H_
39 #if defined(CONFIG_NETMAP_VALE)
42 #if defined(CONFIG_NETMAP_PIPE)
45 #if defined(CONFIG_NETMAP_MONITOR)
48 #if defined(CONFIG_NETMAP_GENERIC)
51 #if defined(CONFIG_NETMAP_V1000)
57 #define WITH_VALE // comment out to disable VALE support
64 #if defined(__FreeBSD__)
66 #define likely(x) __builtin_expect((long)!!(x), 1L)
67 #define unlikely(x) __builtin_expect((long)!!(x), 0L)
69 #define NM_LOCK_T struct mtx /* low level spinlock, used to protect queues */
71 #define NM_MTX_T struct sx /* OS-specific mutex (sleepable) */
72 #define NM_MTX_INIT(m) sx_init(&(m), #m)
73 #define NM_MTX_DESTROY(m) sx_destroy(&(m))
74 #define NM_MTX_LOCK(m) sx_xlock(&(m))
75 #define NM_MTX_UNLOCK(m) sx_xunlock(&(m))
76 #define NM_MTX_ASSERT(m) sx_assert(&(m), SA_XLOCKED)
78 #define NM_SELINFO_T struct nm_selinfo
79 #define MBUF_LEN(m) ((m)->m_pkthdr.len)
80 #define MBUF_IFP(m) ((m)->m_pkthdr.rcvif)
81 #define NM_SEND_UP(ifp, m) ((NA(ifp))->if_input)(ifp, m)
83 #define NM_ATOMIC_T volatile int // XXX ?
84 /* atomic operations */
85 #include <machine/atomic.h>
86 #define NM_ATOMIC_TEST_AND_SET(p) (!atomic_cmpset_acq_int((p), 0, 1))
87 #define NM_ATOMIC_CLEAR(p) atomic_store_rel_int((p), 0)
89 #if __FreeBSD_version >= 1100030
90 #define WNA(_ifp) (_ifp)->if_netmap
91 #else /* older FreeBSD */
92 #define WNA(_ifp) (_ifp)->if_pspare[0]
93 #endif /* older FreeBSD */
95 #if __FreeBSD_version >= 1100005
96 struct netmap_adapter *netmap_getna(if_t ifp);
99 #if __FreeBSD_version >= 1100027
100 #define GET_MBUF_REFCNT(m) ((m)->m_ext.ext_cnt ? *((m)->m_ext.ext_cnt) : -1)
101 #define SET_MBUF_REFCNT(m, x) *((m)->m_ext.ext_cnt) = x
102 #define PNT_MBUF_REFCNT(m) ((m)->m_ext.ext_cnt)
104 #define GET_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *((m)->m_ext.ref_cnt) : -1)
105 #define SET_MBUF_REFCNT(m, x) *((m)->m_ext.ref_cnt) = x
106 #define PNT_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt)
109 MALLOC_DECLARE(M_NETMAP);
116 void freebsd_selwakeup(struct nm_selinfo *si, int pri);
118 // XXX linux struct, not used in FreeBSD
119 struct net_device_ops {
125 #define NM_BNS_GET(b)
126 #define NM_BNS_PUT(b)
128 #elif defined (linux)
130 #define NM_LOCK_T safe_spinlock_t // see bsd_glue.h
131 #define NM_SELINFO_T wait_queue_head_t
132 #define MBUF_LEN(m) ((m)->len)
133 #define MBUF_IFP(m) ((m)->dev)
134 #define NM_SEND_UP(ifp, m) \
136 m->priority = NM_MAGIC_PRIORITY_RX; \
140 #define NM_ATOMIC_T volatile long unsigned int
142 #define NM_MTX_T struct mutex /* OS-specific sleepable lock */
143 #define NM_MTX_INIT(m) mutex_init(&(m))
144 #define NM_MTX_DESTROY(m) do { (void)(m); } while (0)
145 #define NM_MTX_LOCK(m) mutex_lock(&(m))
146 #define NM_MTX_UNLOCK(m) mutex_unlock(&(m))
147 #define NM_MTX_ASSERT(m) mutex_is_locked(&(m))
151 #endif /* DEV_NETMAP */
153 #elif defined (__APPLE__)
155 #warning apple support is incomplete.
156 #define likely(x) __builtin_expect(!!(x), 1)
157 #define unlikely(x) __builtin_expect(!!(x), 0)
158 #define NM_LOCK_T IOLock *
159 #define NM_SELINFO_T struct selinfo
160 #define MBUF_LEN(m) ((m)->m_pkthdr.len)
161 #define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m)
165 #error unsupported platform
167 #endif /* end - platform-specific code */
169 #define NMG_LOCK_T NM_MTX_T
170 #define NMG_LOCK_INIT() NM_MTX_INIT(netmap_global_lock)
171 #define NMG_LOCK_DESTROY() NM_MTX_DESTROY(netmap_global_lock)
172 #define NMG_LOCK() NM_MTX_LOCK(netmap_global_lock)
173 #define NMG_UNLOCK() NM_MTX_UNLOCK(netmap_global_lock)
174 #define NMG_LOCK_ASSERT() NM_MTX_ASSERT(netmap_global_lock)
176 #define ND(format, ...)
177 #define D(format, ...) \
179 struct timeval __xxts; \
180 microtime(&__xxts); \
181 printf("%03d.%06d [%4d] %-25s " format "\n", \
182 (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \
183 __LINE__, __FUNCTION__, ##__VA_ARGS__); \
186 /* rate limited, lps indicates how many per second */
187 #define RD(lps, format, ...) \
189 static int t0, __cnt; \
190 if (t0 != time_second) { \
195 D(format, ##__VA_ARGS__); \
198 struct netmap_adapter;
201 struct netmap_priv_d;
203 const char *nm_dump_buf(char *p, int len, int lim, char *dst);
205 #include "netmap_mbq.h"
207 extern NMG_LOCK_T netmap_global_lock;
209 enum txrx { NR_RX = 0, NR_TX = 1, NR_TXRX };
211 static __inline const char*
212 nm_txrx2str(enum txrx t)
214 return (t== NR_RX ? "RX" : "TX");
217 static __inline enum txrx
218 nm_txrx_swap(enum txrx t)
220 return (t== NR_RX ? NR_TX : NR_RX);
223 #define for_rx_tx(t) for ((t) = 0; (t) < NR_TXRX; (t)++)
227 * private, kernel view of a ring. Keeps track of the status of
228 * a ring across system calls.
230 * nr_hwcur index of the next buffer to refill.
231 * It corresponds to ring->head
232 * at the time the system call returns.
234 * nr_hwtail index of the first buffer owned by the kernel.
235 * On RX, hwcur->hwtail are receive buffers
236 * not yet released. hwcur is advanced following
237 * ring->head, hwtail is advanced on incoming packets,
238 * and a wakeup is generated when hwtail passes ring->cur
239 * On TX, hwcur->rcur have been filled by the sender
240 * but not sent yet to the NIC; rcur->hwtail are available
241 * for new transmissions, and hwtail->hwcur-1 are pending
242 * transmissions not yet acknowledged.
244 * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots.
245 * This is so that, on a reset, buffers owned by userspace are not
246 * modified by the kernel. In particular:
247 * RX rings: the next empty buffer (hwtail + hwofs) coincides with
248 * the next empty buffer as known by the hardware (next_to_check or so).
249 * TX rings: hwcur + hwofs coincides with next_to_send
251 * For received packets, slot->flags is set to nkr_slot_flags
252 * so we can provide a proper initial value (e.g. set NS_FORWARD
253 * when operating in 'transparent' mode).
255 * The following fields are used to implement lock-free copy of packets
256 * from input to output ports in VALE switch:
257 * nkr_hwlease buffer after the last one being copied.
258 * A writer in nm_bdg_flush reserves N buffers
259 * from nr_hwlease, advances it, then does the
260 * copy outside the lock.
261 * In RX rings (used for VALE ports),
262 * nkr_hwtail <= nkr_hwlease < nkr_hwcur+N-1
263 * In TX rings (used for NIC or host stack ports)
264 * nkr_hwcur <= nkr_hwlease < nkr_hwtail
265 * nkr_leases array of nkr_num_slots where writers can report
266 * completion of their block. NR_NOSLOT (~0) indicates
267 * that the writer has not finished yet
268 * nkr_lease_idx index of next free slot in nr_leases, to be assigned
270 * The kring is manipulated by txsync/rxsync and generic netmap function.
272 * Concurrent rxsync or txsync on the same ring are prevented through
273 * by nm_kr_(try)lock() which in turn uses nr_busy. This is all we need
274 * for NIC rings, and for TX rings attached to the host stack.
276 * RX rings attached to the host stack use an mbq (rx_queue) on both
277 * rxsync_from_host() and netmap_transmit(). The mbq is protected
278 * by its internal lock.
280 * RX rings attached to the VALE switch are accessed by both senders
281 * and receiver. They are protected through the q_lock on the RX ring.
283 struct netmap_kring {
284 struct netmap_ring *ring;
290 * Copies of values in user rings, so we do not need to look
291 * at the ring (which could be modified). These are set in the
292 * *sync_prologue()/finalize() routines.
298 uint32_t nr_kflags; /* private driver flags */
299 #define NKR_PENDINTR 0x1 // Pending interrupt.
300 #define NKR_EXCLUSIVE 0x2 /* exclusive binding */
301 uint32_t nkr_num_slots;
304 * On a NIC reset, the NIC ring indexes may be reset but the
305 * indexes in the netmap rings remain the same. nkr_hwofs
306 * keeps track of the offset between the two.
310 uint16_t nkr_slot_flags; /* initial value for flags */
312 /* last_reclaim is opaque marker to help reduce the frequency
313 * of operations such as reclaiming tx buffers. A possible use
314 * is set it to ticks and do the reclaim only once per tick.
316 uint64_t last_reclaim;
319 NM_SELINFO_T si; /* poll/select wait queue */
320 NM_LOCK_T q_lock; /* protects kring and ring. */
321 NM_ATOMIC_T nr_busy; /* prevent concurrent syscalls */
323 struct netmap_adapter *na;
325 /* The following fields are for VALE switch support */
326 struct nm_bdg_fwd *nkr_ft;
327 uint32_t *nkr_leases;
328 #define NR_NOSLOT ((uint32_t)~0) /* used in nkr_*lease* */
329 uint32_t nkr_hwlease;
330 uint32_t nkr_lease_idx;
332 /* while nkr_stopped is set, no new [tr]xsync operations can
333 * be started on this kring.
334 * This is used by netmap_disable_all_rings()
335 * to find a synchronization point where critical data
336 * structures pointed to by the kring can be added or removed
338 volatile int nkr_stopped;
340 /* Support for adapters without native netmap support.
341 * On tx rings we preallocate an array of tx buffers
342 * (same size as the netmap ring), on rx rings we
343 * store incoming mbufs in a queue that is drained by
346 struct mbuf **tx_pool;
347 // u_int nr_ntc; /* Emulation of a next-to-clean RX ring pointer. */
348 struct mbq rx_queue; /* intercepted rx mbufs. */
350 uint32_t users; /* existing bindings for this ring */
352 uint32_t ring_id; /* debugging */
353 enum txrx tx; /* kind of ring (tx or rx) */
354 char name[64]; /* diagnostic */
356 /* [tx]sync callback for this kring.
357 * The default nm_kring_create callback (netmap_krings_create)
358 * sets the nm_sync callback of each hardware tx(rx) kring to
359 * the corresponding nm_txsync(nm_rxsync) taken from the
360 * netmap_adapter; moreover, it sets the sync callback
361 * of the host tx(rx) ring to netmap_txsync_to_host
362 * (netmap_rxsync_from_host).
364 * Overrides: the above configuration is not changed by
365 * any of the nm_krings_create callbacks.
367 int (*nm_sync)(struct netmap_kring *kring, int flags);
368 int (*nm_notify)(struct netmap_kring *kring, int flags);
371 struct netmap_kring *pipe; /* if this is a pipe ring,
372 * pointer to the other end
374 struct netmap_ring *save_ring; /* pointer to hidden rings
375 * (see netmap_pipe.c for details)
377 #endif /* WITH_PIPES */
380 int (*save_notify)(struct netmap_kring *kring, int flags);
384 /* array of krings that are monitoring this kring */
385 struct netmap_kring **monitors;
386 uint32_t max_monitors; /* current size of the monitors array */
387 uint32_t n_monitors; /* next unused entry in the monitor array */
389 * Monitors work by intercepting the sync and notify callbacks of the
390 * monitored krings. This is implemented by replacing the pointers
391 * above and saving the previous ones in mon_* pointers below
393 int (*mon_sync)(struct netmap_kring *kring, int flags);
394 int (*mon_notify)(struct netmap_kring *kring, int flags);
396 uint32_t mon_tail; /* last seen slot on rx */
397 uint32_t mon_pos; /* index of this ring in the monitored ring array */
399 } __attribute__((__aligned__(64)));
402 /* return the next index, with wraparound */
403 static inline uint32_t
404 nm_next(uint32_t i, uint32_t lim)
406 return unlikely (i == lim) ? 0 : i + 1;
410 /* return the previous index, with wraparound */
411 static inline uint32_t
412 nm_prev(uint32_t i, uint32_t lim)
414 return unlikely (i == 0) ? lim : i - 1;
420 * Here is the layout for the Rx and Tx rings.
424 +-----------------+ +-----------------+
426 |XXX free slot XXX| |XXX free slot XXX|
427 +-----------------+ +-----------------+
428 head->| owned by user |<-hwcur | not sent to nic |<-hwcur
430 +-----------------+ | |
431 cur->| available to | | |
432 | user, not read | +-----------------+
433 | yet | cur->| (being |
436 +-----------------+ + ------ +
437 tail->| |<-hwtail | |<-hwlease
438 | (being | ... | | ...
439 | prepared) | ... | | ...
440 +-----------------+ ... | | ...
441 | |<-hwlease +-----------------+
442 | | tail->| |<-hwtail
446 +-----------------+ +-----------------+
448 * The cur/tail (user view) and hwcur/hwtail (kernel view)
449 * are used in the normal operation of the card.
451 * When a ring is the output of a switch port (Rx ring for
452 * a VALE port, Tx ring for the host stack or NIC), slots
453 * are reserved in blocks through 'hwlease' which points
454 * to the next unused slot.
455 * On an Rx ring, hwlease is always after hwtail,
456 * and completions cause hwtail to advance.
457 * On a Tx ring, hwlease is always between cur and hwtail,
458 * and completions cause cur to advance.
460 * nm_kr_space() returns the maximum number of slots that
462 * nm_kr_lease() reserves the required number of buffers,
463 * advances nkr_hwlease and also returns an entry in
464 * a circular array where completions should be reported.
469 struct lut_entry *lut;
470 uint32_t objtotal; /* max buffer index */
471 uint32_t objsize; /* buffer size */
474 struct netmap_vp_adapter; // forward
477 * The "struct netmap_adapter" extends the "struct adapter"
478 * (or equivalent) device descriptor.
479 * It contains all base fields needed to support netmap operation.
480 * There are in fact different types of netmap adapters
481 * (native, generic, VALE switch...) so a netmap_adapter is
482 * just the first field in the derived type.
484 struct netmap_adapter {
486 * On linux we do not have a good way to tell if an interface
487 * is netmap-capable. So we always use the following trick:
488 * NA(ifp) points here, and the first entry (which hopefully
489 * always exists and is at least 32 bits) contains a magic
490 * value which we can use to detect that the interface is good.
493 uint32_t na_flags; /* enabled, and other flags */
494 #define NAF_SKIP_INTR 1 /* use the regular interrupt handler.
495 * useful during initialization
497 #define NAF_SW_ONLY 2 /* forward packets only to sw adapter */
498 #define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when
499 * forwarding packets coming from this
502 #define NAF_MEM_OWNER 8 /* the adapter uses its own memory area
503 * that cannot be changed
505 #define NAF_NATIVE 16 /* the adapter is native.
506 * Virtual ports (non persistent vale ports,
507 * pipes, monitors...) should never use
510 #define NAF_NETMAP_ON 32 /* netmap is active (either native or
511 * emulated). Where possible (e.g. FreeBSD)
512 * IFCAP_NETMAP also mirrors this flag.
514 #define NAF_HOST_RINGS 64 /* the adapter supports the host rings */
515 #define NAF_FORCE_NATIVE 128 /* the adapter is always NATIVE */
516 #define NAF_BUSY (1U<<31) /* the adapter is used internally and
517 * cannot be registered from userspace
519 int active_fds; /* number of user-space descriptors using this
520 interface, which is equal to the number of
521 struct netmap_if objs in the mapped region. */
523 u_int num_rx_rings; /* number of adapter receive rings */
524 u_int num_tx_rings; /* number of adapter transmit rings */
526 u_int num_tx_desc; /* number of descriptor in each queue */
529 /* tx_rings and rx_rings are private but allocated
530 * as a contiguous chunk of memory. Each array has
531 * N+1 entries, for the adapter queues and for the host queue.
533 struct netmap_kring *tx_rings; /* array of TX rings. */
534 struct netmap_kring *rx_rings; /* array of RX rings. */
536 void *tailroom; /* space below the rings array */
537 /* (used for leases) */
540 NM_SELINFO_T si[NR_TXRX]; /* global wait queues */
542 /* count users of the global wait queues */
543 int si_users[NR_TXRX];
545 void *pdev; /* used to store pci device */
547 /* copy of if_qflush and if_transmit pointers, to intercept
548 * packets from the network stack when netmap is active.
550 int (*if_transmit)(struct ifnet *, struct mbuf *);
552 /* copy of if_input for netmap_send_up() */
553 void (*if_input)(struct ifnet *, struct mbuf *);
555 /* references to the ifnet and device routines, used by
556 * the generic netmap functions.
558 struct ifnet *ifp; /* adapter is ifp->if_softc */
560 /*---- callbacks for this netmap adapter -----*/
562 * nm_dtor() is the cleanup routine called when destroying
564 * Called with NMG_LOCK held.
566 * nm_register() is called on NIOCREGIF and close() to enter
567 * or exit netmap mode on the NIC
568 * Called with NNG_LOCK held.
570 * nm_txsync() pushes packets to the underlying hw/switch
572 * nm_rxsync() collects packets from the underlying hw/switch
574 * nm_config() returns configuration information from the OS
575 * Called with NMG_LOCK held.
577 * nm_krings_create() create and init the tx_rings and
578 * rx_rings arrays of kring structures. In particular,
579 * set the nm_sync callbacks for each ring.
580 * There is no need to also allocate the corresponding
581 * netmap_rings, since netmap_mem_rings_create() will always
582 * be called to provide the missing ones.
583 * Called with NNG_LOCK held.
585 * nm_krings_delete() cleanup and delete the tx_rings and rx_rings
587 * Called with NMG_LOCK held.
589 * nm_notify() is used to act after data have become available
590 * (or the stopped state of the ring has changed)
591 * For hw devices this is typically a selwakeup(),
592 * but for NIC/host ports attached to a switch (or vice-versa)
593 * we also need to invoke the 'txsync' code downstream.
595 void (*nm_dtor)(struct netmap_adapter *);
597 int (*nm_register)(struct netmap_adapter *, int onoff);
599 int (*nm_txsync)(struct netmap_kring *kring, int flags);
600 int (*nm_rxsync)(struct netmap_kring *kring, int flags);
601 int (*nm_notify)(struct netmap_kring *kring, int flags);
602 #define NAF_FORCE_READ 1
603 #define NAF_FORCE_RECLAIM 2
604 /* return configuration information */
605 int (*nm_config)(struct netmap_adapter *,
606 u_int *txr, u_int *txd, u_int *rxr, u_int *rxd);
607 int (*nm_krings_create)(struct netmap_adapter *);
608 void (*nm_krings_delete)(struct netmap_adapter *);
611 * nm_bdg_attach() initializes the na_vp field to point
612 * to an adapter that can be attached to a VALE switch. If the
613 * current adapter is already a VALE port, na_vp is simply a cast;
614 * otherwise, na_vp points to a netmap_bwrap_adapter.
615 * If applicable, this callback also initializes na_hostvp,
616 * that can be used to connect the adapter host rings to the
618 * Called with NMG_LOCK held.
620 * nm_bdg_ctl() is called on the actual attach/detach to/from
621 * to/from the switch, to perform adapter-specific
623 * Called with NMG_LOCK held.
625 int (*nm_bdg_attach)(const char *bdg_name, struct netmap_adapter *);
626 int (*nm_bdg_ctl)(struct netmap_adapter *, struct nmreq *, int);
628 /* adapter used to attach this adapter to a VALE switch (if any) */
629 struct netmap_vp_adapter *na_vp;
630 /* adapter used to attach the host rings of this adapter
631 * to a VALE switch (if any) */
632 struct netmap_vp_adapter *na_hostvp;
635 /* standard refcount to control the lifetime of the adapter
636 * (it should be equal to the lifetime of the corresponding ifp)
640 /* memory allocator (opaque)
641 * We also cache a pointer to the lut_entry for translating
642 * buffer addresses, and the total number of buffers.
644 struct netmap_mem_d *nm_mem;
645 struct netmap_lut na_lut;
647 /* additional information attached to this adapter
648 * by other netmap subsystems. Currently used by
649 * bwrap and LINUX/v1000.
653 /* array of pipes that have this adapter as a parent */
654 struct netmap_pipe_adapter **na_pipes;
655 int na_next_pipe; /* next free slot in the array */
656 int na_max_pipes; /* size of the array */
661 static __inline u_int
662 nma_get_ndesc(struct netmap_adapter *na, enum txrx t)
664 return (t == NR_TX ? na->num_tx_desc : na->num_rx_desc);
668 nma_set_ndesc(struct netmap_adapter *na, enum txrx t, u_int v)
676 static __inline u_int
677 nma_get_nrings(struct netmap_adapter *na, enum txrx t)
679 return (t == NR_TX ? na->num_tx_rings : na->num_rx_rings);
683 nma_set_nrings(struct netmap_adapter *na, enum txrx t, u_int v)
686 na->num_tx_rings = v;
688 na->num_rx_rings = v;
691 static __inline struct netmap_kring*
692 NMR(struct netmap_adapter *na, enum txrx t)
694 return (t == NR_TX ? na->tx_rings : na->rx_rings);
698 * If the NIC is owned by the kernel
699 * (i.e., bridge), neither another bridge nor user can use it;
700 * if the NIC is owned by a user, only users can share it.
701 * Evaluation must be done under NMG_LOCK().
703 #define NETMAP_OWNED_BY_KERN(na) ((na)->na_flags & NAF_BUSY)
704 #define NETMAP_OWNED_BY_ANY(na) \
705 (NETMAP_OWNED_BY_KERN(na) || ((na)->active_fds > 0))
708 * derived netmap adapters for various types of ports
710 struct netmap_vp_adapter { /* VALE software port */
711 struct netmap_adapter up;
716 * bdg_port is the port number used in the bridge;
717 * na_bdg points to the bridge this NA is attached to.
720 struct nm_bridge *na_bdg;
723 /* Offset of ethernet header for each packet. */
725 /* Maximum Frame Size, used in bdg_mismatch_datapath() */
727 /* Last source MAC on this port */
732 struct netmap_hw_adapter { /* physical device */
733 struct netmap_adapter up;
735 struct net_device_ops nm_ndo; // XXX linux only
736 struct ethtool_ops nm_eto; // XXX linux only
737 const struct ethtool_ops* save_ethtool;
739 int (*nm_hw_register)(struct netmap_adapter *, int onoff);
743 /* Mitigation support. */
744 struct nm_generic_mit {
745 struct hrtimer mit_timer;
747 int mit_ring_idx; /* index of the ring being mitigated */
748 struct netmap_adapter *mit_na; /* backpointer */
751 struct netmap_generic_adapter { /* emulated device */
752 struct netmap_hw_adapter up;
754 /* Pointer to a previously used netmap adapter. */
755 struct netmap_adapter *prev;
757 /* generic netmap adapters support:
758 * a net_device_ops struct overrides ndo_select_queue(),
759 * save_if_input saves the if_input hook (FreeBSD),
760 * mit implements rx interrupt mitigation,
762 struct net_device_ops generic_ndo;
763 void (*save_if_input)(struct ifnet *, struct mbuf *);
765 struct nm_generic_mit *mit;
767 netdev_tx_t (*save_start_xmit)(struct mbuf *, struct ifnet *);
770 #endif /* WITH_GENERIC */
773 netmap_real_rings(struct netmap_adapter *na, enum txrx t)
775 return nma_get_nrings(na, t) + !!(na->na_flags & NAF_HOST_RINGS);
781 * Bridge wrapper for non VALE ports attached to a VALE switch.
783 * The real device must already have its own netmap adapter (hwna).
784 * The bridge wrapper and the hwna adapter share the same set of
785 * netmap rings and buffers, but they have two separate sets of
786 * krings descriptors, with tx/rx meanings swapped:
789 * bwrap krings rings krings hwna
790 * +------+ +------+ +-----+ +------+ +------+
791 * |tx_rings->| |\ /| |----| |<-tx_rings|
792 * | | +------+ \ / +-----+ +------+ | |
795 * | | +------+/ \+-----+ +------+ | |
796 * |rx_rings->| | | |----| |<-rx_rings|
797 * | | +------+ +-----+ +------+ | |
800 * - packets coming from the bridge go to the brwap rx rings,
801 * which are also the hwna tx rings. The bwrap notify callback
802 * will then complete the hwna tx (see netmap_bwrap_notify).
804 * - packets coming from the outside go to the hwna rx rings,
805 * which are also the bwrap tx rings. The (overwritten) hwna
806 * notify method will then complete the bridge tx
807 * (see netmap_bwrap_intr_notify).
809 * The bridge wrapper may optionally connect the hwna 'host' rings
810 * to the bridge. This is done by using a second port in the
811 * bridge and connecting it to the 'host' netmap_vp_adapter
812 * contained in the netmap_bwrap_adapter. The brwap host adapter
813 * cross-links the hwna host rings in the same way as shown above.
815 * - packets coming from the bridge and directed to the host stack
816 * are handled by the bwrap host notify callback
817 * (see netmap_bwrap_host_notify)
819 * - packets coming from the host stack are still handled by the
820 * overwritten hwna notify callback (netmap_bwrap_intr_notify),
821 * but are diverted to the host adapter depending on the ring number.
824 struct netmap_bwrap_adapter {
825 struct netmap_vp_adapter up;
826 struct netmap_vp_adapter host; /* for host rings */
827 struct netmap_adapter *hwna; /* the underlying device */
829 /* backup of the hwna memory allocator */
830 struct netmap_mem_d *save_nmd;
833 * When we attach a physical interface to the bridge, we
834 * allow the controlling process to terminate, so we need
835 * a place to store the n_detmap_priv_d data structure.
836 * This is only done when physical interfaces
837 * are attached to a bridge.
839 struct netmap_priv_d *na_kpriv;
841 int netmap_bwrap_attach(const char *name, struct netmap_adapter *);
844 #endif /* WITH_VALE */
848 #define NM_MAXPIPES 64 /* max number of pipes per adapter */
850 struct netmap_pipe_adapter {
851 struct netmap_adapter up;
853 u_int id; /* pipe identifier */
854 int role; /* either NR_REG_PIPE_MASTER or NR_REG_PIPE_SLAVE */
856 struct netmap_adapter *parent; /* adapter that owns the memory */
857 struct netmap_pipe_adapter *peer; /* the other end of the pipe */
858 int peer_ref; /* 1 iff we are holding a ref to the peer */
860 u_int parent_slot; /* index in the parent pipe array */
863 #endif /* WITH_PIPES */
866 /* return slots reserved to rx clients; used in drivers */
867 static inline uint32_t
868 nm_kr_rxspace(struct netmap_kring *k)
870 int space = k->nr_hwtail - k->nr_hwcur;
872 space += k->nkr_num_slots;
873 ND("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail);
879 /* True if no space in the tx ring. only valid after txsync_prologue */
881 nm_kr_txempty(struct netmap_kring *kring)
883 return kring->rcur == kring->nr_hwtail;
888 * protect against multiple threads using the same ring.
889 * also check that the ring has not been stopped.
890 * We only care for 0 or !=0 as a return code.
893 #define NM_KR_STOPPED 2
896 static __inline void nm_kr_put(struct netmap_kring *kr)
898 NM_ATOMIC_CLEAR(&kr->nr_busy);
902 static __inline int nm_kr_tryget(struct netmap_kring *kr)
904 /* check a first time without taking the lock
905 * to avoid starvation for nm_kr_get()
907 if (unlikely(kr->nkr_stopped)) {
908 ND("ring %p stopped (%d)", kr, kr->nkr_stopped);
909 return NM_KR_STOPPED;
911 if (unlikely(NM_ATOMIC_TEST_AND_SET(&kr->nr_busy)))
913 /* check a second time with lock held */
914 if (unlikely(kr->nkr_stopped)) {
915 ND("ring %p stopped (%d)", kr, kr->nkr_stopped);
917 return NM_KR_STOPPED;
922 static __inline void nm_kr_get(struct netmap_kring *kr)
924 while (NM_ATOMIC_TEST_AND_SET(&kr->nr_busy))
925 tsleep(kr, 0, "NM_KR_GET", 4);
930 * The following functions are used by individual drivers to
931 * support netmap operation.
933 * netmap_attach() initializes a struct netmap_adapter, allocating the
934 * struct netmap_ring's and the struct selinfo.
936 * netmap_detach() frees the memory allocated by netmap_attach().
938 * netmap_transmit() replaces the if_transmit routine of the interface,
939 * and is used to intercept packets coming from the stack.
941 * netmap_load_map/netmap_reload_map are helper routines to set/reset
942 * the dmamap for a packet buffer
944 * netmap_reset() is a helper routine to be called in the hw driver
945 * when reinitializing a ring. It should not be called by
946 * virtual ports (vale, pipes, monitor)
948 int netmap_attach(struct netmap_adapter *);
949 void netmap_detach(struct ifnet *);
950 int netmap_transmit(struct ifnet *, struct mbuf *);
951 struct netmap_slot *netmap_reset(struct netmap_adapter *na,
952 enum txrx tx, u_int n, u_int new_cur);
953 int netmap_ring_reinit(struct netmap_kring *);
955 /* default functions to handle rx/tx interrupts */
956 int netmap_rx_irq(struct ifnet *, u_int, u_int *);
957 #define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL)
958 void netmap_common_irq(struct ifnet *, u_int, u_int *work_done);
962 /* functions used by external modules to interface with VALE */
963 #define netmap_vp_to_ifp(_vp) ((_vp)->up.ifp)
964 #define netmap_ifp_to_vp(_ifp) (NA(_ifp)->na_vp)
965 #define netmap_ifp_to_host_vp(_ifp) (NA(_ifp)->na_hostvp)
966 #define netmap_bdg_idx(_vp) ((_vp)->bdg_port)
967 const char *netmap_bdg_name(struct netmap_vp_adapter *);
968 #else /* !WITH_VALE */
969 #define netmap_vp_to_ifp(_vp) NULL
970 #define netmap_ifp_to_vp(_ifp) NULL
971 #define netmap_ifp_to_host_vp(_ifp) NULL
972 #define netmap_bdg_idx(_vp) -1
973 #define netmap_bdg_name(_vp) NULL
974 #endif /* WITH_VALE */
977 nm_netmap_on(struct netmap_adapter *na)
979 return na && na->na_flags & NAF_NETMAP_ON;
983 nm_native_on(struct netmap_adapter *na)
985 return nm_netmap_on(na) && (na->na_flags & NAF_NATIVE);
988 /* set/clear native flags and if_transmit/netdev_ops */
990 nm_set_native_flags(struct netmap_adapter *na)
992 struct ifnet *ifp = na->ifp;
994 na->na_flags |= NAF_NETMAP_ON;
995 #ifdef IFCAP_NETMAP /* or FreeBSD ? */
996 ifp->if_capenable |= IFCAP_NETMAP;
999 na->if_transmit = ifp->if_transmit;
1000 ifp->if_transmit = netmap_transmit;
1002 na->if_transmit = (void *)ifp->netdev_ops;
1003 ifp->netdev_ops = &((struct netmap_hw_adapter *)na)->nm_ndo;
1004 ((struct netmap_hw_adapter *)na)->save_ethtool = ifp->ethtool_ops;
1005 ifp->ethtool_ops = &((struct netmap_hw_adapter*)na)->nm_eto;
1011 nm_clear_native_flags(struct netmap_adapter *na)
1013 struct ifnet *ifp = na->ifp;
1016 ifp->if_transmit = na->if_transmit;
1018 ifp->netdev_ops = (void *)na->if_transmit;
1019 ifp->ethtool_ops = ((struct netmap_hw_adapter*)na)->save_ethtool;
1021 na->na_flags &= ~NAF_NETMAP_ON;
1022 #ifdef IFCAP_NETMAP /* or FreeBSD ? */
1023 ifp->if_capenable &= ~IFCAP_NETMAP;
1028 /* check/fix address and len in tx rings */
1029 #if 1 /* debug version */
1030 #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \
1031 if (_a == NETMAP_BUF_BASE(_na) || _l > NETMAP_BUF_SIZE(_na)) { \
1032 RD(5, "bad addr/len ring %d slot %d idx %d len %d", \
1033 kring->ring_id, nm_i, slot->buf_idx, len); \
1034 if (_l > NETMAP_BUF_SIZE(_na)) \
1035 _l = NETMAP_BUF_SIZE(_na); \
1037 #else /* no debug version */
1038 #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \
1039 if (_l > NETMAP_BUF_SIZE(_na)) \
1040 _l = NETMAP_BUF_SIZE(_na); \
1045 /*---------------------------------------------------------------*/
1047 * Support routines used by netmap subsystems
1048 * (native drivers, VALE, generic, pipes, monitors, ...)
1052 /* common routine for all functions that create a netmap adapter. It performs
1054 * - if the na points to an ifp, mark the ifp as netmap capable
1055 * using na as its native adapter;
1056 * - provide defaults for the setup callbacks and the memory allocator
1058 int netmap_attach_common(struct netmap_adapter *);
1059 /* common actions to be performed on netmap adapter destruction */
1060 void netmap_detach_common(struct netmap_adapter *);
1061 /* fill priv->np_[tr]xq{first,last} using the ringid and flags information
1062 * coming from a struct nmreq
1064 int netmap_interp_ringid(struct netmap_priv_d *priv, uint16_t ringid, uint32_t flags);
1065 /* update the ring parameters (number and size of tx and rx rings).
1066 * It calls the nm_config callback, if available.
1068 int netmap_update_config(struct netmap_adapter *na);
1069 /* create and initialize the common fields of the krings array.
1070 * using the information that must be already available in the na.
1071 * tailroom can be used to request the allocation of additional
1072 * tailroom bytes after the krings array. This is used by
1073 * netmap_vp_adapter's (i.e., VALE ports) to make room for
1074 * leasing-related data structures
1076 int netmap_krings_create(struct netmap_adapter *na, u_int tailroom);
1077 /* deletes the kring array of the adapter. The array must have
1078 * been created using netmap_krings_create
1080 void netmap_krings_delete(struct netmap_adapter *na);
1082 /* set the stopped/enabled status of ring
1083 * When stopping, they also wait for all current activity on the ring to
1084 * terminate. The status change is then notified using the na nm_notify
1087 void netmap_set_ring(struct netmap_adapter *, u_int ring_id, enum txrx, int stopped);
1088 /* set the stopped/enabled status of all rings of the adapter. */
1089 void netmap_set_all_rings(struct netmap_adapter *, int stopped);
1090 /* convenience wrappers for netmap_set_all_rings, used in drivers */
1091 void netmap_disable_all_rings(struct ifnet *);
1092 void netmap_enable_all_rings(struct ifnet *);
1094 int netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
1095 uint16_t ringid, uint32_t flags);
1098 u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg);
1099 int netmap_get_na(struct nmreq *nmr, struct netmap_adapter **na, int create);
1100 int netmap_get_hw_na(struct ifnet *ifp, struct netmap_adapter **na);
1105 * The following bridge-related functions are used by other
1108 * VALE only supports unicast or broadcast. The lookup
1109 * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports,
1110 * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 for unknown.
1111 * XXX in practice "unknown" might be handled same as broadcast.
1113 typedef u_int (*bdg_lookup_fn_t)(struct nm_bdg_fwd *ft, uint8_t *ring_nr,
1114 struct netmap_vp_adapter *);
1115 typedef int (*bdg_config_fn_t)(struct nm_ifreq *);
1116 typedef void (*bdg_dtor_fn_t)(const struct netmap_vp_adapter *);
1117 struct netmap_bdg_ops {
1118 bdg_lookup_fn_t lookup;
1119 bdg_config_fn_t config;
1123 u_int netmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring,
1124 struct netmap_vp_adapter *);
1126 #define NM_BDG_MAXPORTS 254 /* up to 254 */
1127 #define NM_BDG_BROADCAST NM_BDG_MAXPORTS
1128 #define NM_BDG_NOPORT (NM_BDG_MAXPORTS+1)
1130 #define NM_NAME "vale" /* prefix for bridge port name */
1132 /* these are redefined in case of no VALE support */
1133 int netmap_get_bdg_na(struct nmreq *nmr, struct netmap_adapter **na, int create);
1134 struct nm_bridge *netmap_init_bridges2(u_int);
1135 void netmap_uninit_bridges2(struct nm_bridge *, u_int);
1136 int netmap_init_bridges(void);
1137 void netmap_uninit_bridges(void);
1138 int netmap_bdg_ctl(struct nmreq *nmr, struct netmap_bdg_ops *bdg_ops);
1139 int netmap_bdg_config(struct nmreq *nmr);
1141 #else /* !WITH_VALE */
1142 #define netmap_get_bdg_na(_1, _2, _3) 0
1143 #define netmap_init_bridges(_1) 0
1144 #define netmap_uninit_bridges()
1145 #define netmap_bdg_ctl(_1, _2) EINVAL
1146 #endif /* !WITH_VALE */
1149 /* max number of pipes per device */
1150 #define NM_MAXPIPES 64 /* XXX how many? */
1151 void netmap_pipe_dealloc(struct netmap_adapter *);
1152 int netmap_get_pipe_na(struct nmreq *nmr, struct netmap_adapter **na, int create);
1153 #else /* !WITH_PIPES */
1154 #define NM_MAXPIPES 0
1155 #define netmap_pipe_alloc(_1, _2) 0
1156 #define netmap_pipe_dealloc(_1)
1157 #define netmap_get_pipe_na(nmr, _2, _3) \
1158 ({ int role__ = (nmr)->nr_flags & NR_REG_MASK; \
1159 (role__ == NR_REG_PIPE_MASTER || \
1160 role__ == NR_REG_PIPE_SLAVE) ? EOPNOTSUPP : 0; })
1164 int netmap_get_monitor_na(struct nmreq *nmr, struct netmap_adapter **na, int create);
1165 void netmap_monitor_stop(struct netmap_adapter *na);
1167 #define netmap_get_monitor_na(nmr, _2, _3) \
1168 ((nmr)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? EOPNOTSUPP : 0)
1171 #ifdef CONFIG_NET_NS
1172 struct net *netmap_bns_get(void);
1173 void netmap_bns_put(struct net *);
1174 void netmap_bns_getbridges(struct nm_bridge **, u_int *);
1176 #define netmap_bns_get()
1177 #define netmap_bns_put(_1)
1178 #define netmap_bns_getbridges(b, n) \
1179 do { *b = nm_bridges; *n = NM_BRIDGES; } while (0)
1182 /* Various prototypes */
1183 int netmap_poll(struct cdev *dev, int events, struct thread *td);
1184 int netmap_init(void);
1185 void netmap_fini(void);
1186 int netmap_get_memory(struct netmap_priv_d* p);
1187 void netmap_dtor(void *data);
1188 int netmap_dtor_locked(struct netmap_priv_d *priv);
1190 int netmap_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td);
1192 /* netmap_adapter creation/destruction */
1194 // #define NM_DEBUG_PUTGET 1
1196 #ifdef NM_DEBUG_PUTGET
1198 #define NM_DBG(f) __##f
1200 void __netmap_adapter_get(struct netmap_adapter *na);
1202 #define netmap_adapter_get(na) \
1204 struct netmap_adapter *__na = na; \
1205 D("getting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \
1206 __netmap_adapter_get(__na); \
1209 int __netmap_adapter_put(struct netmap_adapter *na);
1211 #define netmap_adapter_put(na) \
1213 struct netmap_adapter *__na = na; \
1214 D("putting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \
1215 __netmap_adapter_put(__na); \
1218 #else /* !NM_DEBUG_PUTGET */
1221 void netmap_adapter_get(struct netmap_adapter *na);
1222 int netmap_adapter_put(struct netmap_adapter *na);
1224 #endif /* !NM_DEBUG_PUTGET */
1230 #define NETMAP_BUF_BASE(na) ((na)->na_lut.lut[0].vaddr)
1231 #define NETMAP_BUF_SIZE(na) ((na)->na_lut.objsize)
1232 extern int netmap_mitigate; // XXX not really used
1233 extern int netmap_no_pendintr;
1234 extern int netmap_verbose; // XXX debugging
1235 enum { /* verbose flags */
1236 NM_VERB_ON = 1, /* generic verbose */
1237 NM_VERB_HOST = 0x2, /* verbose host stack */
1238 NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */
1239 NM_VERB_TXSYNC = 0x20,
1240 NM_VERB_RXINTR = 0x100, /* verbose on rx/tx intr (driver) */
1241 NM_VERB_TXINTR = 0x200,
1242 NM_VERB_NIC_RXSYNC = 0x1000, /* verbose on rx/tx intr (driver) */
1243 NM_VERB_NIC_TXSYNC = 0x2000,
1246 extern int netmap_txsync_retry;
1247 extern int netmap_generic_mit;
1248 extern int netmap_generic_ringsize;
1249 extern int netmap_generic_rings;
1250 extern int netmap_use_count;
1253 * NA returns a pointer to the struct netmap adapter from the ifp,
1254 * WNA is used to write it.
1256 #define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp))
1259 * Macros to determine if an interface is netmap capable or netmap enabled.
1260 * See the magic field in struct netmap_adapter.
1264 * on FreeBSD just use if_capabilities and if_capenable.
1266 #define NETMAP_CAPABLE(ifp) (NA(ifp) && \
1267 (ifp)->if_capabilities & IFCAP_NETMAP )
1269 #define NETMAP_SET_CAPABLE(ifp) \
1270 (ifp)->if_capabilities |= IFCAP_NETMAP
1276 * we check if NA(ifp) is set and its first element has a related
1277 * magic value. The capenable is within the struct netmap_adapter.
1279 #define NETMAP_MAGIC 0x52697a7a
1281 #define NETMAP_CAPABLE(ifp) (NA(ifp) && \
1282 ((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC )
1284 #define NETMAP_SET_CAPABLE(ifp) \
1285 NA(ifp)->magic = ((uint32_t)(uintptr_t)NA(ifp)) ^ NETMAP_MAGIC
1291 /* Assigns the device IOMMU domain to an allocator.
1292 * Returns -ENOMEM in case the domain is different */
1293 #define nm_iommu_group_id(dev) (0)
1295 /* Callback invoked by the dma machinery after a successful dmamap_load */
1296 static void netmap_dmamap_cb(__unused void *arg,
1297 __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error)
1301 /* bus_dmamap_load wrapper: call aforementioned function if map != NULL.
1302 * XXX can we do it without a callback ?
1305 netmap_load_map(struct netmap_adapter *na,
1306 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1309 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na),
1310 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT);
1314 netmap_unload_map(struct netmap_adapter *na,
1315 bus_dma_tag_t tag, bus_dmamap_t map)
1318 bus_dmamap_unload(tag, map);
1321 /* update the map when a buffer changes. */
1323 netmap_reload_map(struct netmap_adapter *na,
1324 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1327 bus_dmamap_unload(tag, map);
1328 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na),
1329 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT);
1335 int nm_iommu_group_id(bus_dma_tag_t dev);
1336 #include <linux/dma-mapping.h>
1339 netmap_load_map(struct netmap_adapter *na,
1340 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1343 *map = dma_map_single(na->pdev, buf, na->na_lut.objsize,
1349 netmap_unload_map(struct netmap_adapter *na,
1350 bus_dma_tag_t tag, bus_dmamap_t map)
1352 u_int sz = na->na_lut.objsize;
1355 dma_unmap_single(na->pdev, *map, sz,
1361 netmap_reload_map(struct netmap_adapter *na,
1362 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1364 u_int sz = na->na_lut.objsize;
1367 dma_unmap_single(na->pdev, *map, sz,
1371 *map = dma_map_single(na->pdev, buf, sz,
1376 * XXX How do we redefine these functions:
1379 * dma_map_single(&pdev->dev, virt_addr, len, direction)
1380 * dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction
1381 * The len can be implicit (on netmap it is NETMAP_BUF_SIZE)
1382 * unfortunately the direction is not, so we need to change
1383 * something to have a cross API
1387 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[l];
1388 /* set time_stamp *before* dma to help avoid a possible race */
1389 buffer_info->time_stamp = jiffies;
1390 buffer_info->mapped_as_page = false;
1391 buffer_info->length = len;
1392 //buffer_info->next_to_watch = l;
1393 /* reload dma map */
1394 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1395 NETMAP_BUF_SIZE, DMA_TO_DEVICE);
1396 buffer_info->dma = dma_map_single(&adapter->pdev->dev,
1397 addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE);
1399 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1400 D("dma mapping error");
1401 /* goto dma_error; See e1000_put_txbuf() */
1404 tx_desc->buffer_addr = htole64(buffer_info->dma); //XXX
1409 * The bus_dmamap_sync() can be one of wmb() or rmb() depending on direction.
1411 #define bus_dmamap_sync(_a, _b, _c)
1417 * functions to map NIC to KRING indexes (n2k) and vice versa (k2n)
1420 netmap_idx_n2k(struct netmap_kring *kr, int idx)
1422 int n = kr->nkr_num_slots;
1423 idx += kr->nkr_hwofs;
1434 netmap_idx_k2n(struct netmap_kring *kr, int idx)
1436 int n = kr->nkr_num_slots;
1437 idx -= kr->nkr_hwofs;
1447 /* Entries of the look-up table. */
1449 void *vaddr; /* virtual address. */
1450 vm_paddr_t paddr; /* physical address. */
1453 struct netmap_obj_pool;
1456 * NMB return the virtual address of a buffer (buffer 0 on bad index)
1457 * PNMB also fills the physical address
1459 static inline void *
1460 NMB(struct netmap_adapter *na, struct netmap_slot *slot)
1462 struct lut_entry *lut = na->na_lut.lut;
1463 uint32_t i = slot->buf_idx;
1464 return (unlikely(i >= na->na_lut.objtotal)) ?
1465 lut[0].vaddr : lut[i].vaddr;
1468 static inline void *
1469 PNMB(struct netmap_adapter *na, struct netmap_slot *slot, uint64_t *pp)
1471 uint32_t i = slot->buf_idx;
1472 struct lut_entry *lut = na->na_lut.lut;
1473 void *ret = (i >= na->na_lut.objtotal) ? lut[0].vaddr : lut[i].vaddr;
1475 *pp = (i >= na->na_lut.objtotal) ? lut[0].paddr : lut[i].paddr;
1481 * Structure associated to each netmap file descriptor.
1482 * It is created on open and left unbound (np_nifp == NULL).
1483 * A successful NIOCREGIF will set np_nifp and the first few fields;
1484 * this is protected by a global lock (NMG_LOCK) due to low contention.
1486 * np_refs counts the number of references to the structure: one for the fd,
1487 * plus (on FreeBSD) one for each active mmap which we track ourselves
1488 * (linux automatically tracks them, but FreeBSD does not).
1489 * np_refs is protected by NMG_LOCK.
1491 * Read access to the structure is lock free, because ni_nifp once set
1492 * can only go to 0 when nobody is using the entry anymore. Readers
1493 * must check that np_nifp != NULL before using the other fields.
1495 struct netmap_priv_d {
1496 struct netmap_if * volatile np_nifp; /* netmap if descriptor. */
1498 struct netmap_adapter *np_na;
1499 uint32_t np_flags; /* from the ioctl */
1500 u_int np_qfirst[NR_TXRX],
1501 np_qlast[NR_TXRX]; /* range of tx/rx rings to scan */
1502 uint16_t np_txpoll; /* XXX and also np_rxpoll ? */
1504 int np_refs; /* use with NMG_LOCK held */
1506 /* pointers to the selinfo to be used for selrecord.
1507 * Either the local or the global one depending on the
1510 NM_SELINFO_T *np_si[NR_TXRX];
1511 struct thread *np_td; /* kqueue, just debugging */
1516 struct netmap_monitor_adapter {
1517 struct netmap_adapter up;
1519 struct netmap_priv_d priv;
1523 #endif /* WITH_MONITOR */
1528 * generic netmap emulation for devices that do not have
1529 * native netmap support.
1531 int generic_netmap_attach(struct ifnet *ifp);
1533 int netmap_catch_rx(struct netmap_generic_adapter *na, int intercept);
1534 void generic_rx_handler(struct ifnet *ifp, struct mbuf *m);;
1535 void netmap_catch_tx(struct netmap_generic_adapter *na, int enable);
1536 int generic_xmit_frame(struct ifnet *ifp, struct mbuf *m, void *addr, u_int len, u_int ring_nr);
1537 int generic_find_num_desc(struct ifnet *ifp, u_int *tx, u_int *rx);
1538 void generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq);
1539 static inline struct ifnet*
1540 netmap_generic_getifp(struct netmap_generic_adapter *gna)
1543 return gna->prev->ifp;
1545 return gna->up.up.ifp;
1548 //#define RATE_GENERIC /* Enables communication statistics for generic. */
1550 void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi);
1552 #define generic_rate(txp, txs, txi, rxp, rxs, rxi)
1556 * netmap_mitigation API. This is used by the generic adapter
1557 * to reduce the number of interrupt requests/selwakeup
1558 * to clients on incoming packets.
1560 void netmap_mitigation_init(struct nm_generic_mit *mit, int idx,
1561 struct netmap_adapter *na);
1562 void netmap_mitigation_start(struct nm_generic_mit *mit);
1563 void netmap_mitigation_restart(struct nm_generic_mit *mit);
1564 int netmap_mitigation_active(struct nm_generic_mit *mit);
1565 void netmap_mitigation_cleanup(struct nm_generic_mit *mit);
1566 #endif /* WITH_GENERIC */
1570 /* Shared declarations for the VALE switch. */
1573 * Each transmit queue accumulates a batch of packets into
1574 * a structure before forwarding. Packets to the same
1575 * destination are put in a list using ft_next as a link field.
1576 * ft_frags and ft_next are valid only on the first fragment.
1578 struct nm_bdg_fwd { /* forwarding entry for a bridge */
1579 void *ft_buf; /* netmap or indirect buffer */
1580 uint8_t ft_frags; /* how many fragments (only on 1st frag) */
1581 uint8_t _ft_port; /* dst port (unused) */
1582 uint16_t ft_flags; /* flags, e.g. indirect */
1583 uint16_t ft_len; /* src fragment len */
1584 uint16_t ft_next; /* next packet to same destination */
1587 /* struct 'virtio_net_hdr' from linux. */
1588 struct nm_vnet_hdr {
1589 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */
1590 #define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */
1592 #define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */
1593 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */
1594 #define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */
1595 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */
1596 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */
1600 uint16_t csum_start;
1601 uint16_t csum_offset;
1604 #define WORST_CASE_GSO_HEADER (14+40+60) /* IPv6 + TCP */
1606 /* Private definitions for IPv4, IPv6, UDP and TCP headers. */
1609 uint8_t version_ihl;
1619 /*The options start here. */
1627 uint8_t doff; /* Data offset + Reserved */
1642 uint8_t priority_version;
1643 uint8_t flow_lbl[3];
1645 uint16_t payload_len;
1653 /* Type used to store a checksum (in host byte order) that hasn't been
1656 #define rawsum_t uint32_t
1658 rawsum_t nm_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum);
1659 uint16_t nm_csum_ipv4(struct nm_iphdr *iph);
1660 void nm_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data,
1661 size_t datalen, uint16_t *check);
1662 void nm_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data,
1663 size_t datalen, uint16_t *check);
1664 uint16_t nm_csum_fold(rawsum_t cur_sum);
1666 void bdg_mismatch_datapath(struct netmap_vp_adapter *na,
1667 struct netmap_vp_adapter *dst_na,
1668 struct nm_bdg_fwd *ft_p, struct netmap_ring *ring,
1669 u_int *j, u_int lim, u_int *howmany);
1671 /* persistent virtual port routines */
1672 int nm_vi_persist(const char *, struct ifnet **);
1673 void nm_vi_detach(struct ifnet *);
1674 void nm_vi_init_index(void);
1676 #endif /* _NET_NETMAP_KERN_H_ */