2 * Copyright (C) 2011-2013 Matteo Landi, Luigi Rizzo. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * This module supports memory mapped access to network devices,
32 * The module uses a large, memory pool allocated by the kernel
33 * and accessible as mmapped memory by multiple userspace threads/processes.
34 * The memory pool contains packet buffers and "netmap rings",
35 * i.e. user-accessible copies of the interface's queues.
37 * Access to the network card works like this:
38 * 1. a process/thread issues one or more open() on /dev/netmap, to create
39 * select()able file descriptor on which events are reported.
40 * 2. on each descriptor, the process issues an ioctl() to identify
41 * the interface that should report events to the file descriptor.
42 * 3. on each descriptor, the process issues an mmap() request to
43 * map the shared memory region within the process' address space.
44 * The list of interesting queues is indicated by a location in
45 * the shared memory region.
46 * 4. using the functions in the netmap(4) userspace API, a process
47 * can look up the occupation state of a queue, access memory buffers,
48 * and retrieve received packets or enqueue packets to transmit.
49 * 5. using some ioctl()s the process can synchronize the userspace view
50 * of the queue with the actual status in the kernel. This includes both
51 * receiving the notification of new packets, and transmitting new
52 * packets on the output interface.
53 * 6. select() or poll() can be used to wait for events on individual
54 * transmit or receive queues (or all queues for a given interface).
59 static netdev_tx_t linux_netmap_start(struct sk_buff *skb, struct net_device *dev);
64 #endif /* __APPLE__ */
67 #include <sys/cdefs.h> /* prerequisite */
68 __FBSDID("$FreeBSD$");
70 #include <sys/types.h>
71 #include <sys/module.h>
72 #include <sys/errno.h>
73 #include <sys/param.h> /* defines used in kernel.h */
75 #include <sys/kernel.h> /* types used in module initialization */
76 #include <sys/conf.h> /* cdevsw struct */
77 #include <sys/uio.h> /* uio struct */
78 #include <sys/sockio.h>
79 #include <sys/socketvar.h> /* struct socket */
80 #include <sys/malloc.h>
81 #include <sys/mman.h> /* PROT_EXEC */
84 #include <sys/rwlock.h>
85 #include <vm/vm.h> /* vtophys */
86 #include <vm/pmap.h> /* vtophys */
87 #include <sys/socket.h> /* sockaddrs */
88 #include <machine/bus.h>
89 #include <sys/selinfo.h>
90 #include <sys/sysctl.h>
92 #include <net/bpf.h> /* BIOCIMMEDIATE */
94 #include <machine/bus.h> /* bus_dmamap_* */
96 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map");
97 #endif /* __FreeBSD__ */
99 #include <net/netmap.h>
100 #include <dev/netmap/netmap_kern.h>
102 /* XXX the following variables must be deprecated and included in nm_mem */
103 u_int netmap_total_buffers;
104 u_int netmap_buf_size;
105 char *netmap_buffer_base; /* address of an invalid buffer */
107 /* user-controlled variables */
110 static int netmap_no_timestamp; /* don't timestamp on rxsync */
112 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args");
113 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
114 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
115 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
116 CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
117 int netmap_mitigate = 1;
118 SYSCTL_INT(_dev_netmap, OID_AUTO, mitigate, CTLFLAG_RW, &netmap_mitigate, 0, "");
119 int netmap_no_pendintr = 1;
120 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr,
121 CTLFLAG_RW, &netmap_no_pendintr, 0, "Always look for new received packets.");
122 int netmap_txsync_retry = 2;
123 SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW,
124 &netmap_txsync_retry, 0 , "Number of txsync loops in bridge's flush.");
126 int netmap_drop = 0; /* debugging */
127 int netmap_flags = 0; /* debug flags */
128 int netmap_fwd = 0; /* force transparent mode */
130 SYSCTL_INT(_dev_netmap, OID_AUTO, drop, CTLFLAG_RW, &netmap_drop, 0 , "");
131 SYSCTL_INT(_dev_netmap, OID_AUTO, flags, CTLFLAG_RW, &netmap_flags, 0 , "");
132 SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0 , "");
134 #ifdef NM_BRIDGE /* support for netmap virtual switch, called VALE */
137 * system parameters (most of them in netmap_kern.h)
138 * NM_NAME prefix for switch port names, default "vale"
139 * NM_MAXPORTS number of ports
140 * NM_BRIDGES max number of switches in the system.
141 * XXX should become a sysctl or tunable
143 * Switch ports are named valeX:Y where X is the switch name and Y
144 * is the port. If Y matches a physical interface name, the port is
145 * connected to a physical device.
147 * Unlike physical interfaces, switch ports use their own memory region
148 * for rings and buffers.
149 * The virtual interfaces use per-queue lock instead of core lock.
150 * In the tx loop, we aggregate traffic in batches to make all operations
151 * faster. The batch size is NM_BDG_BATCH
153 #define NM_BDG_MAXRINGS 16 /* XXX unclear how many. */
154 #define NM_BRIDGE_RINGSIZE 1024 /* in the device */
155 #define NM_BDG_HASH 1024 /* forwarding table entries */
156 #define NM_BDG_BATCH 1024 /* entries in the forwarding buffer */
157 #define NM_BRIDGES 8 /* number of bridges */
160 int netmap_bridge = NM_BDG_BATCH; /* bridge batch size */
161 SYSCTL_INT(_dev_netmap, OID_AUTO, bridge, CTLFLAG_RW, &netmap_bridge, 0 , "");
165 #define refcount_acquire(_a) atomic_add(1, (atomic_t *)_a)
166 #define refcount_release(_a) atomic_dec_and_test((atomic_t *)_a)
171 #include <sys/endian.h>
172 #include <sys/refcount.h>
173 #endif /* __FreeBSD__ */
175 #define prefetch(x) __builtin_prefetch(x)
180 * These are used to handle reference counters for bridge ports.
182 #define ADD_BDG_REF(ifp) refcount_acquire(&NA(ifp)->na_bdg_refcount)
183 #define DROP_BDG_REF(ifp) refcount_release(&NA(ifp)->na_bdg_refcount)
185 static void bdg_netmap_attach(struct netmap_adapter *);
186 static int bdg_netmap_reg(struct ifnet *ifp, int onoff);
187 static int kern_netmap_regif(struct nmreq *nmr);
189 /* per-tx-queue entry */
190 struct nm_bdg_fwd { /* forwarding entry for a bridge */
192 uint16_t _ft_dst; /* dst port, unused */
193 uint16_t ft_flags; /* flags, e.g. indirect */
194 uint16_t ft_len; /* src len */
195 uint16_t ft_next; /* next packet to same destination */
198 /* We need to build a list of buffers going to each destination.
199 * Each buffer is in one entry of struct nm_bdg_fwd, we use ft_next
200 * to build the list, and struct nm_bdg_q below for the queue.
201 * The structure should compact because potentially we have a lot
210 uint64_t mac; /* the top 2 bytes are the epoch */
215 * Interfaces for a bridge are all in bdg_ports[].
216 * The array has fixed size, an empty entry does not terminate
217 * the search. But lookups only occur on attach/detach so we
218 * don't mind if they are slow.
220 * The bridge is non blocking on the transmit ports.
222 * bdg_lock protects accesses to the bdg_ports array.
223 * This is a rw lock (or equivalent).
226 int namelen; /* 0 means free */
228 /* XXX what is the proper alignment/layout ? */
229 NM_RWLOCK_T bdg_lock; /* protects bdg_ports */
230 struct netmap_adapter *bdg_ports[NM_BDG_MAXPORTS];
232 char basename[IFNAMSIZ];
234 * The function to decide the destination port.
235 * It returns either of an index of the destination port,
236 * NM_BDG_BROADCAST to broadcast this packet, or NM_BDG_NOPORT not to
237 * forward this packet. ring_nr is the source ring index, and the
238 * function may overwrite this value to forward this packet to a
239 * different ring index.
240 * This function must be set by netmap_bdgctl().
242 bdg_lookup_fn_t nm_bdg_lookup;
244 /* the forwarding table, MAC+ports */
245 struct nm_hash_ent ht[NM_BDG_HASH];
248 struct nm_bridge nm_bridges[NM_BRIDGES];
249 NM_LOCK_T netmap_bridge_mutex;
251 /* other OS will have these macros defined in their own glue code. */
254 #define BDG_LOCK() mtx_lock(&netmap_bridge_mutex)
255 #define BDG_UNLOCK() mtx_unlock(&netmap_bridge_mutex)
256 #define BDG_WLOCK(b) rw_wlock(&(b)->bdg_lock)
257 #define BDG_WUNLOCK(b) rw_wunlock(&(b)->bdg_lock)
258 #define BDG_RLOCK(b) rw_rlock(&(b)->bdg_lock)
259 #define BDG_RUNLOCK(b) rw_runlock(&(b)->bdg_lock)
261 /* set/get variables. OS-specific macros may wrap these
262 * assignments into read/write lock or similar
264 #define BDG_SET_VAR(lval, p) (lval = p)
265 #define BDG_GET_VAR(lval) (lval)
266 #define BDG_FREE(p) free(p, M_DEVBUF)
267 #endif /* __FreeBSD__ */
270 nma_is_vp(struct netmap_adapter *na)
272 return na->nm_register == bdg_netmap_reg;
275 nma_is_host(struct netmap_adapter *na)
277 return na->nm_register == NULL;
280 nma_is_hw(struct netmap_adapter *na)
282 /* In case of sw adapter, nm_register is NULL */
283 return !nma_is_vp(na) && !nma_is_host(na);
287 * Regarding holding a NIC, if the NIC is owned by the kernel
288 * (i.e., bridge), neither another bridge nor user can use it;
289 * if the NIC is owned by a user, only users can share it.
290 * Evaluation must be done under NMA_LOCK().
292 #define NETMAP_OWNED_BY_KERN(ifp) (!nma_is_vp(NA(ifp)) && NA(ifp)->na_bdg)
293 #define NETMAP_OWNED_BY_ANY(ifp) \
294 (NETMAP_OWNED_BY_KERN(ifp) || (NA(ifp)->refcount > 0))
297 * NA(ifp)->bdg_port port index
300 // XXX only for multiples of 64 bytes, non overlapped.
302 pkt_copy(void *_src, void *_dst, int l)
304 uint64_t *src = _src;
305 uint64_t *dst = _dst;
306 if (unlikely(l >= 1024)) {
310 for (; likely(l > 0); l-=64) {
324 * locate a bridge among the existing ones.
325 * a ':' in the name terminates the bridge name. Otherwise, just NM_NAME.
326 * We assume that this is called with a name of at least NM_NAME chars.
328 static struct nm_bridge *
329 nm_find_bridge(const char *name, int create)
332 struct nm_bridge *b = NULL;
334 namelen = strlen(NM_NAME); /* base length */
335 l = strlen(name); /* actual length */
336 for (i = namelen + 1; i < l; i++) {
337 if (name[i] == ':') {
342 if (namelen >= IFNAMSIZ)
344 ND("--- prefix is '%.*s' ---", namelen, name);
347 /* lookup the name, remember empty slot if there is one */
348 for (i = 0; i < NM_BRIDGES; i++) {
349 struct nm_bridge *x = nm_bridges + i;
351 if (x->namelen == 0) {
352 if (create && b == NULL)
353 b = x; /* record empty slot */
354 } else if (x->namelen != namelen) {
356 } else if (strncmp(name, x->basename, namelen) == 0) {
357 ND("found '%.*s' at %d", namelen, name, i);
362 if (i == NM_BRIDGES && b) { /* name not found, can create entry */
363 strncpy(b->basename, name, namelen);
364 b->namelen = namelen;
365 /* set the default function */
366 b->nm_bdg_lookup = netmap_bdg_learning;
367 /* reset the MAC address table */
368 bzero(b->ht, sizeof(struct nm_hash_ent) * NM_BDG_HASH);
376 * Free the forwarding tables for rings attached to switch ports.
379 nm_free_bdgfwd(struct netmap_adapter *na)
382 struct netmap_kring *kring;
384 nrings = nma_is_vp(na) ? na->num_tx_rings : na->num_rx_rings;
385 kring = nma_is_vp(na) ? na->tx_rings : na->rx_rings;
386 for (i = 0; i < nrings; i++) {
387 if (kring[i].nkr_ft) {
388 free(kring[i].nkr_ft, M_DEVBUF);
389 kring[i].nkr_ft = NULL; /* protect from freeing twice */
393 nm_free_bdgfwd(SWNA(na->ifp));
398 * Allocate the forwarding tables for the rings attached to the bridge ports.
401 nm_alloc_bdgfwd(struct netmap_adapter *na)
403 int nrings, l, i, num_dstq;
404 struct netmap_kring *kring;
406 /* all port:rings + broadcast */
407 num_dstq = NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1;
408 l = sizeof(struct nm_bdg_fwd) * NM_BDG_BATCH;
409 l += sizeof(struct nm_bdg_q) * num_dstq;
410 l += sizeof(uint16_t) * NM_BDG_BATCH;
412 nrings = nma_is_vp(na) ? na->num_tx_rings : na->num_rx_rings;
413 kring = nma_is_vp(na) ? na->tx_rings : na->rx_rings;
414 for (i = 0; i < nrings; i++) {
415 struct nm_bdg_fwd *ft;
416 struct nm_bdg_q *dstq;
419 ft = malloc(l, M_DEVBUF, M_NOWAIT | M_ZERO);
424 dstq = (struct nm_bdg_q *)(ft + NM_BDG_BATCH);
425 for (j = 0; j < num_dstq; j++)
426 dstq[j].bq_head = dstq[j].bq_tail = NM_BDG_BATCH;
427 kring[i].nkr_ft = ft;
430 nm_alloc_bdgfwd(SWNA(na->ifp));
434 #endif /* NM_BRIDGE */
438 * Fetch configuration from the device, to cope with dynamic
439 * reconfigurations after loading the module.
442 netmap_update_config(struct netmap_adapter *na)
444 struct ifnet *ifp = na->ifp;
445 u_int txr, txd, rxr, rxd;
447 txr = txd = rxr = rxd = 0;
449 na->nm_config(ifp, &txr, &txd, &rxr, &rxd);
451 /* take whatever we had at init time */
452 txr = na->num_tx_rings;
453 txd = na->num_tx_desc;
454 rxr = na->num_rx_rings;
455 rxd = na->num_rx_desc;
458 if (na->num_tx_rings == txr && na->num_tx_desc == txd &&
459 na->num_rx_rings == rxr && na->num_rx_desc == rxd)
460 return 0; /* nothing changed */
461 if (netmap_verbose || na->refcount > 0) {
462 D("stored config %s: txring %d x %d, rxring %d x %d",
464 na->num_tx_rings, na->num_tx_desc,
465 na->num_rx_rings, na->num_rx_desc);
466 D("new config %s: txring %d x %d, rxring %d x %d",
467 ifp->if_xname, txr, txd, rxr, rxd);
469 if (na->refcount == 0) {
470 D("configuration changed (but fine)");
471 na->num_tx_rings = txr;
472 na->num_tx_desc = txd;
473 na->num_rx_rings = rxr;
474 na->num_rx_desc = rxd;
477 D("configuration changed while active, this is bad...");
481 /*------------- memory allocator -----------------*/
482 #include "netmap_mem2.c"
483 /*------------ end of memory allocator ----------*/
486 /* Structure associated to each thread which registered an interface.
488 * The first 4 fields of this structure are written by NIOCREGIF and
489 * read by poll() and NIOC?XSYNC.
490 * There is low contention among writers (actually, a correct user program
491 * should have no contention among writers) and among writers and readers,
492 * so we use a single global lock to protect the structure initialization.
493 * Since initialization involves the allocation of memory, we reuse the memory
495 * Read access to the structure is lock free. Readers must check that
496 * np_nifp is not NULL before using the other fields.
497 * If np_nifp is NULL initialization has not been performed, so they should
498 * return an error to userlevel.
500 * The ref_done field is used to regulate access to the refcount in the
501 * memory allocator. The refcount must be incremented at most once for
502 * each open("/dev/netmap"). The increment is performed by the first
503 * function that calls netmap_get_memory() (currently called by
504 * mmap(), NIOCGINFO and NIOCREGIF).
505 * If the refcount is incremented, it is then decremented when the
506 * private structure is destroyed.
508 struct netmap_priv_d {
509 struct netmap_if * volatile np_nifp; /* netmap interface descriptor. */
511 struct ifnet *np_ifp; /* device for which we hold a reference */
512 int np_ringid; /* from the ioctl */
513 u_int np_qfirst, np_qlast; /* range of rings to scan */
516 unsigned long ref_done; /* use with NMA_LOCK held */
521 netmap_get_memory(struct netmap_priv_d* p)
526 error = netmap_memory_finalize();
535 * File descriptor's private data destructor.
537 * Call nm_register(ifp,0) to stop netmap mode on the interface and
538 * revert to normal operation. We expect that np_ifp has not gone.
540 /* call with NMA_LOCK held */
542 netmap_dtor_locked(void *data)
544 struct netmap_priv_d *priv = data;
545 struct ifnet *ifp = priv->np_ifp;
546 struct netmap_adapter *na = NA(ifp);
547 struct netmap_if *nifp = priv->np_nifp;
550 if (na->refcount <= 0) { /* last instance */
554 D("deleting last instance for %s", ifp->if_xname);
556 * (TO CHECK) This function is only called
557 * when the last reference to this file descriptor goes
558 * away. This means we cannot have any pending poll()
559 * or interrupt routine operating on the structure.
561 na->nm_register(ifp, 0); /* off, clear IFCAP_NETMAP */
562 /* Wake up any sleeping threads. netmap_poll will
563 * then return POLLERR
565 for (i = 0; i < na->num_tx_rings + 1; i++)
566 selwakeuppri(&na->tx_rings[i].si, PI_NET);
567 for (i = 0; i < na->num_rx_rings + 1; i++)
568 selwakeuppri(&na->rx_rings[i].si, PI_NET);
569 selwakeuppri(&na->tx_si, PI_NET);
570 selwakeuppri(&na->rx_si, PI_NET);
573 #endif /* NM_BRIDGE */
574 /* release all buffers */
575 for (i = 0; i < na->num_tx_rings + 1; i++) {
576 struct netmap_ring *ring = na->tx_rings[i].ring;
577 lim = na->tx_rings[i].nkr_num_slots;
578 for (j = 0; j < lim; j++)
579 netmap_free_buf(nifp, ring->slot[j].buf_idx);
580 /* knlist_destroy(&na->tx_rings[i].si.si_note); */
581 mtx_destroy(&na->tx_rings[i].q_lock);
583 for (i = 0; i < na->num_rx_rings + 1; i++) {
584 struct netmap_ring *ring = na->rx_rings[i].ring;
585 lim = na->rx_rings[i].nkr_num_slots;
586 for (j = 0; j < lim; j++)
587 netmap_free_buf(nifp, ring->slot[j].buf_idx);
588 /* knlist_destroy(&na->rx_rings[i].si.si_note); */
589 mtx_destroy(&na->rx_rings[i].q_lock);
591 /* XXX kqueue(9) needed; these will mirror knlist_init. */
592 /* knlist_destroy(&na->tx_si.si_note); */
593 /* knlist_destroy(&na->rx_si.si_note); */
594 netmap_free_rings(na);
596 SWNA(ifp)->tx_rings = SWNA(ifp)->rx_rings = NULL;
598 netmap_if_free(nifp);
602 /* we assume netmap adapter exists */
604 nm_if_rele(struct ifnet *ifp)
608 #else /* NM_BRIDGE */
609 int i, full = 0, is_hw;
611 struct netmap_adapter *na;
613 /* I can be called not only for get_ifp()-ed references where netmap's
614 * capability is guaranteed, but also for non-netmap-capable NICs.
616 if (!NETMAP_CAPABLE(ifp) || !NA(ifp)->na_bdg) {
620 if (!DROP_BDG_REF(ifp))
625 is_hw = nma_is_hw(na);
628 ND("want to disconnect %s from the bridge", ifp->if_xname);
630 /* remove the entry from the bridge, also check
631 * if there are any leftover interfaces
632 * XXX we should optimize this code, e.g. going directly
633 * to na->bdg_port, and having a counter of ports that
634 * are connected. But it is not in a critical path.
635 * In NIC's case, index of sw na is always higher than hw na
637 for (i = 0; i < NM_BDG_MAXPORTS; i++) {
638 struct netmap_adapter *tmp = BDG_GET_VAR(b->bdg_ports[i]);
641 /* disconnect from bridge */
642 BDG_SET_VAR(b->bdg_ports[i], NULL);
644 if (is_hw && SWNA(ifp)->na_bdg) {
645 /* disconnect sw adapter too */
646 int j = SWNA(ifp)->bdg_port;
647 BDG_SET_VAR(b->bdg_ports[j], NULL);
648 SWNA(ifp)->na_bdg = NULL;
650 } else if (tmp != NULL) {
656 ND("marking bridge %d as free", b - nm_bridges);
658 b->nm_bdg_lookup = NULL;
660 if (na->na_bdg) { /* still attached to the bridge */
661 D("ouch, cannot find ifp to remove");
665 bzero(na, sizeof(*na));
667 bzero(ifp, sizeof(*ifp));
670 #endif /* NM_BRIDGE */
674 netmap_dtor(void *data)
676 struct netmap_priv_d *priv = data;
677 struct ifnet *ifp = priv->np_ifp;
681 struct netmap_adapter *na = NA(ifp);
684 BDG_WLOCK(na->na_bdg);
685 na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
686 netmap_dtor_locked(data);
687 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
689 BDG_WUNLOCK(na->na_bdg);
691 nm_if_rele(ifp); /* might also destroy *na */
693 if (priv->ref_done) {
694 netmap_memory_deref();
697 bzero(priv, sizeof(*priv)); /* XXX for safety */
698 free(priv, M_DEVBUF);
704 #include <vm/vm_param.h>
705 #include <vm/vm_object.h>
706 #include <vm/vm_page.h>
707 #include <vm/vm_pager.h>
711 * In order to track whether pages are still mapped, we hook into
712 * the standard cdev_pager and intercept the constructor and
714 * XXX but then ? Do we really use the information ?
715 * Need to investigate.
717 static struct cdev_pager_ops saved_cdev_pager_ops;
721 netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
722 vm_ooffset_t foff, struct ucred *cred, u_short *color)
725 D("first mmap for %p", handle);
726 return saved_cdev_pager_ops.cdev_pg_ctor(handle,
727 size, prot, foff, cred, color);
732 netmap_dev_pager_dtor(void *handle)
734 saved_cdev_pager_ops.cdev_pg_dtor(handle);
735 ND("ready to release memory for %p", handle);
739 static struct cdev_pager_ops netmap_cdev_pager_ops = {
740 .cdev_pg_ctor = netmap_dev_pager_ctor,
741 .cdev_pg_dtor = netmap_dev_pager_dtor,
742 .cdev_pg_fault = NULL,
746 // XXX check whether we need netmap_mmap_single _and_ netmap_mmap
748 netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff,
749 vm_size_t objsize, vm_object_t *objp, int prot)
753 ND("cdev %p foff %jd size %jd objp %p prot %d", cdev,
754 (intmax_t )*foff, (intmax_t )objsize, objp, prot);
755 obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
756 curthread->td_ucred);
757 ND("returns obj %p", obj);
760 if (saved_cdev_pager_ops.cdev_pg_fault == NULL) {
761 ND("initialize cdev_pager_ops");
762 saved_cdev_pager_ops = *(obj->un_pager.devp.ops);
763 netmap_cdev_pager_ops.cdev_pg_fault =
764 saved_cdev_pager_ops.cdev_pg_fault;
766 obj->un_pager.devp.ops = &netmap_cdev_pager_ops;
770 #endif /* __FreeBSD__ */
774 * mmap(2) support for the "netmap" device.
776 * Expose all the memory previously allocated by our custom memory
777 * allocator: this way the user has only to issue a single mmap(2), and
778 * can work on all the data structures flawlessly.
780 * Return 0 on success, -1 otherwise.
785 netmap_mmap(__unused struct cdev *dev,
786 #if __FreeBSD_version < 900000
787 vm_offset_t offset, vm_paddr_t *paddr, int nprot
789 vm_ooffset_t offset, vm_paddr_t *paddr, int nprot,
790 __unused vm_memattr_t *memattr
795 struct netmap_priv_d *priv;
797 if (nprot & PROT_EXEC)
798 return (-1); // XXX -1 or EINVAL ?
800 error = devfs_get_cdevpriv((void **)&priv);
801 if (error == EBADF) { /* called on fault, memory is initialized */
802 ND(5, "handling fault at ofs 0x%x", offset);
804 } else if (error == 0) /* make sure memory is set */
805 error = netmap_get_memory(priv);
809 ND("request for offset 0x%x", (uint32_t)offset);
810 *paddr = netmap_ofstophys(offset);
812 return (*paddr ? 0 : ENOMEM);
817 netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
820 D("dev %p fflag 0x%x devtype %d td %p",
821 dev, fflag, devtype, td);
827 netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
829 struct netmap_priv_d *priv;
832 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF,
837 error = devfs_set_cdevpriv(priv, netmap_dtor);
843 #endif /* __FreeBSD__ */
847 * Handlers for synchronization of the queues from/to the host.
848 * Netmap has two operating modes:
849 * - in the default mode, the rings connected to the host stack are
850 * just another ring pair managed by userspace;
851 * - in transparent mode (XXX to be defined) incoming packets
852 * (from the host or the NIC) are marked as NS_FORWARD upon
853 * arrival, and the user application has a chance to reset the
854 * flag for packets that should be dropped.
855 * On the RXSYNC or poll(), packets in RX rings between
856 * kring->nr_kcur and ring->cur with NS_FORWARD still set are moved
858 * The transfer NIC --> host is relatively easy, just encapsulate
859 * into mbufs and we are done. The host --> NIC side is slightly
860 * harder because there might not be room in the tx ring so it
861 * might take a while before releasing the buffer.
866 * pass a chain of buffers to the host stack as coming from 'dst'
869 netmap_send_up(struct ifnet *dst, struct mbuf *head)
873 /* send packets up, outside the lock */
874 while ((m = head) != NULL) {
875 head = head->m_nextpkt;
877 if (netmap_verbose & NM_VERB_HOST)
878 D("sending up pkt %p size %d", m, MBUF_LEN(m));
891 * put a copy of the buffers marked NS_FORWARD into an mbuf chain.
892 * Run from hwcur to cur - reserved
895 netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force)
897 /* Take packets from hwcur to cur-reserved and pass them up.
898 * In case of no buffers we give up. At the end of the loop,
899 * the queue is drained in all cases.
900 * XXX handle reserved
902 int k = kring->ring->cur - kring->ring->reserved;
903 u_int n, lim = kring->nkr_num_slots - 1;
904 struct mbuf *m, *tail = q->tail;
907 k = k + kring->nkr_num_slots;
908 for (n = kring->nr_hwcur; n != k;) {
909 struct netmap_slot *slot = &kring->ring->slot[n];
911 n = (n == lim) ? 0 : n + 1;
912 if ((slot->flags & NS_FORWARD) == 0 && !force)
914 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE) {
915 D("bad pkt at %d len %d", n, slot->len);
918 slot->flags &= ~NS_FORWARD; // XXX needed ?
919 m = m_devget(NMB(slot), slot->len, 0, kring->na->ifp, NULL);
936 * called under main lock to send packets from the host to the NIC
937 * The host ring has packets from nr_hwcur to (cur - reserved)
938 * to be sent down. We scan the tx rings, which have just been
939 * flushed so nr_hwcur == cur. Pushing packets down means
940 * increment cur and decrement avail.
944 netmap_sw_to_nic(struct netmap_adapter *na)
946 struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
947 struct netmap_kring *k1 = &na->tx_rings[0];
948 int i, howmany, src_lim, dst_lim;
950 howmany = kring->nr_hwavail; /* XXX otherwise cur - reserved - nr_hwcur */
952 src_lim = kring->nkr_num_slots;
953 for (i = 0; howmany > 0 && i < na->num_tx_rings; i++, k1++) {
954 ND("%d packets left to ring %d (space %d)", howmany, i, k1->nr_hwavail);
955 dst_lim = k1->nkr_num_slots;
956 while (howmany > 0 && k1->ring->avail > 0) {
957 struct netmap_slot *src, *dst, tmp;
958 src = &kring->ring->slot[kring->nr_hwcur];
959 dst = &k1->ring->slot[k1->ring->cur];
961 src->buf_idx = dst->buf_idx;
962 src->flags = NS_BUF_CHANGED;
964 dst->buf_idx = tmp.buf_idx;
966 dst->flags = NS_BUF_CHANGED;
967 ND("out len %d buf %d from %d to %d",
968 dst->len, dst->buf_idx,
969 kring->nr_hwcur, k1->ring->cur);
971 if (++kring->nr_hwcur >= src_lim)
975 if (++k1->ring->cur >= dst_lim)
979 kring->ring->cur = kring->nr_hwcur; // XXX
986 * netmap_sync_to_host() passes packets up. We are called from a
987 * system call in user process context, and the only contention
988 * can be among multiple user threads erroneously calling
989 * this routine concurrently.
992 netmap_sync_to_host(struct netmap_adapter *na)
994 struct netmap_kring *kring = &na->tx_rings[na->num_tx_rings];
995 struct netmap_ring *ring = kring->ring;
996 u_int k, lim = kring->nkr_num_slots - 1;
997 struct mbq q = { NULL, NULL };
1001 netmap_ring_reinit(kring);
1004 // na->nm_lock(na->ifp, NETMAP_CORE_LOCK, 0);
1006 /* Take packets from hwcur to cur and pass them up.
1007 * In case of no buffers we give up. At the end of the loop,
1008 * the queue is drained in all cases.
1010 netmap_grab_packets(kring, &q, 1);
1011 kring->nr_hwcur = k;
1012 kring->nr_hwavail = ring->avail = lim;
1013 // na->nm_lock(na->ifp, NETMAP_CORE_UNLOCK, 0);
1015 netmap_send_up(na->ifp, q.head);
1019 /* SWNA(ifp)->txrings[0] is always NA(ifp)->txrings[NA(ifp)->num_txrings] */
1021 netmap_bdg_to_host(struct ifnet *ifp, u_int ring_nr, int do_lock)
1025 netmap_sync_to_host(NA(ifp));
1031 * rxsync backend for packets coming from the host stack.
1032 * They have been put in the queue by netmap_start() so we
1033 * need to protect access to the kring using a lock.
1035 * This routine also does the selrecord if called from the poll handler
1036 * (we know because td != NULL).
1038 * NOTE: on linux, selrecord() is defined as a macro and uses pwait
1039 * as an additional hidden argument.
1042 netmap_sync_from_host(struct netmap_adapter *na, struct thread *td, void *pwait)
1044 struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
1045 struct netmap_ring *ring = kring->ring;
1046 u_int j, n, lim = kring->nkr_num_slots;
1047 u_int k = ring->cur, resvd = ring->reserved;
1049 (void)pwait; /* disable unused warnings */
1050 na->nm_lock(na->ifp, NETMAP_CORE_LOCK, 0);
1052 netmap_ring_reinit(kring);
1055 /* new packets are already set in nr_hwavail */
1056 /* skip past packets that userspace has released */
1057 j = kring->nr_hwcur;
1059 if (resvd + ring->avail >= lim + 1) {
1060 D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
1061 ring->reserved = resvd = 0; // XXX panic...
1063 k = (k >= resvd) ? k - resvd : k + lim - resvd;
1066 n = k >= j ? k - j : k + lim - j;
1067 kring->nr_hwavail -= n;
1068 kring->nr_hwcur = k;
1070 k = ring->avail = kring->nr_hwavail - resvd;
1072 selrecord(td, &kring->si);
1073 if (k && (netmap_verbose & NM_VERB_HOST))
1074 D("%d pkts from stack", k);
1075 na->nm_lock(na->ifp, NETMAP_CORE_UNLOCK, 0);
1080 * get a refcounted reference to an interface.
1081 * Return ENXIO if the interface does not exist, EINVAL if netmap
1082 * is not supported by the interface.
1083 * If successful, hold a reference.
1085 * During the NIC is attached to a bridge, reference is managed
1086 * at na->na_bdg_refcount using ADD/DROP_BDG_REF() as well as
1087 * virtual ports. Hence, on the final DROP_BDG_REF(), the NIC
1088 * is detached from the bridge, then ifp's refcount is dropped (this
1089 * is equivalent to that ifp is destroyed in case of virtual ports.
1091 * This function uses if_rele() when we want to prevent the NIC from
1092 * being detached from the bridge in error handling. But once refcount
1093 * is acquired by this function, it must be released using nm_if_rele().
1096 get_ifp(struct nmreq *nmr, struct ifnet **ifp)
1098 const char *name = nmr->nr_name;
1099 int namelen = strlen(name);
1101 struct ifnet *iter = NULL;
1105 struct nm_bridge *b;
1106 struct netmap_adapter *na;
1107 int i, cand = -1, cand2 = -1;
1109 if (strncmp(name, NM_NAME, sizeof(NM_NAME) - 1)) {
1113 b = nm_find_bridge(name, 1 /* create a new one if no exist */ );
1115 D("no bridges available for '%s'", name);
1118 /* Now we are sure that name starts with the bridge's name */
1120 /* lookup in the local list of ports */
1121 for (i = 0; i < NM_BDG_MAXPORTS; i++) {
1122 na = BDG_GET_VAR(b->bdg_ports[i]);
1125 cand = i; /* potential insert point */
1126 else if (cand2 == -1)
1127 cand2 = i; /* for host stack */
1131 /* XXX make sure the name only contains one : */
1132 if (!strcmp(iter->if_xname, name) /* virtual port */ ||
1133 (namelen > b->namelen && !strcmp(iter->if_xname,
1134 name + b->namelen + 1)) /* NIC */) {
1136 ND("found existing interface");
1141 if (i < NM_BDG_MAXPORTS) /* already unlocked */
1144 D("bridge full, cannot create new port");
1150 ND("create new bridge port %s", name);
1152 * create a struct ifnet for the new port.
1153 * The forwarding table is attached to the kring(s).
1156 * try see if there is a matching NIC with this name
1157 * (after the bridge's name)
1159 iter = ifunit_ref(name + b->namelen + 1);
1160 if (!iter) { /* this is a virtual port */
1161 /* Create a temporary NA with arguments, then
1162 * bdg_netmap_attach() will allocate the real one
1163 * and attach it to the ifp
1165 struct netmap_adapter tmp_na;
1167 if (nmr->nr_cmd) /* nr_cmd must be for a NIC */
1169 bzero(&tmp_na, sizeof(tmp_na));
1170 /* bound checking */
1171 if (nmr->nr_tx_rings < 1)
1172 nmr->nr_tx_rings = 1;
1173 if (nmr->nr_tx_rings > NM_BDG_MAXRINGS)
1174 nmr->nr_tx_rings = NM_BDG_MAXRINGS;
1175 tmp_na.num_tx_rings = nmr->nr_tx_rings;
1176 if (nmr->nr_rx_rings < 1)
1177 nmr->nr_rx_rings = 1;
1178 if (nmr->nr_rx_rings > NM_BDG_MAXRINGS)
1179 nmr->nr_rx_rings = NM_BDG_MAXRINGS;
1180 tmp_na.num_rx_rings = nmr->nr_rx_rings;
1182 iter = malloc(sizeof(*iter), M_DEVBUF, M_NOWAIT | M_ZERO);
1185 strcpy(iter->if_xname, name);
1187 /* bdg_netmap_attach creates a struct netmap_adapter */
1188 bdg_netmap_attach(&tmp_na);
1189 } else if (NETMAP_CAPABLE(iter)) { /* this is a NIC */
1190 /* cannot attach the NIC that any user or another
1191 * bridge already holds.
1193 if (NETMAP_OWNED_BY_ANY(iter) || cand2 == -1) {
1195 if_rele(iter); /* don't detach from bridge */
1198 /* bind the host stack to the bridge */
1199 if (nmr->nr_arg1 == NETMAP_BDG_HOST) {
1200 BDG_SET_VAR(b->bdg_ports[cand2], SWNA(iter));
1201 SWNA(iter)->bdg_port = cand2;
1202 SWNA(iter)->na_bdg = b;
1204 } else /* not a netmap-capable NIC */
1207 na->bdg_port = cand;
1208 /* bind the port to the bridge (virtual ports are not active) */
1209 BDG_SET_VAR(b->bdg_ports[cand], na);
1213 ND("attaching virtual bridge %p", b);
1217 #endif /* NM_BRIDGE */
1218 *ifp = ifunit_ref(name);
1221 /* can do this if the capability exists and if_pspare[0]
1222 * points to the netmap descriptor.
1224 if (NETMAP_CAPABLE(*ifp)) {
1226 /* Users cannot use the NIC attached to a bridge directly */
1227 if (no_prefix && NETMAP_OWNED_BY_KERN(*ifp)) {
1228 if_rele(*ifp); /* don't detach from bridge */
1231 #endif /* NM_BRIDGE */
1232 return 0; /* valid pointer, we hold the refcount */
1235 return EINVAL; // not NETMAP capable
1240 * Error routine called when txsync/rxsync detects an error.
1241 * Can't do much more than resetting cur = hwcur, avail = hwavail.
1242 * Return 1 on reinit.
1244 * This routine is only called by the upper half of the kernel.
1245 * It only reads hwcur (which is changed only by the upper half, too)
1246 * and hwavail (which may be changed by the lower half, but only on
1247 * a tx ring and only to increase it, so any error will be recovered
1248 * on the next call). For the above, we don't strictly need to call
1252 netmap_ring_reinit(struct netmap_kring *kring)
1254 struct netmap_ring *ring = kring->ring;
1255 u_int i, lim = kring->nkr_num_slots - 1;
1258 RD(10, "called for %s", kring->na->ifp->if_xname);
1259 if (ring->cur > lim)
1261 for (i = 0; i <= lim; i++) {
1262 u_int idx = ring->slot[i].buf_idx;
1263 u_int len = ring->slot[i].len;
1264 if (idx < 2 || idx >= netmap_total_buffers) {
1266 D("bad buffer at slot %d idx %d len %d ", i, idx, len);
1267 ring->slot[i].buf_idx = 0;
1268 ring->slot[i].len = 0;
1269 } else if (len > NETMAP_BUF_SIZE) {
1270 ring->slot[i].len = 0;
1272 D("bad len %d at slot %d idx %d",
1277 int pos = kring - kring->na->tx_rings;
1278 int n = kring->na->num_tx_rings + 1;
1280 RD(10, "total %d errors", errors);
1282 RD(10, "%s %s[%d] reinit, cur %d -> %d avail %d -> %d",
1283 kring->na->ifp->if_xname,
1284 pos < n ? "TX" : "RX", pos < n ? pos : pos - n,
1285 ring->cur, kring->nr_hwcur,
1286 ring->avail, kring->nr_hwavail);
1287 ring->cur = kring->nr_hwcur;
1288 ring->avail = kring->nr_hwavail;
1290 return (errors ? 1 : 0);
1295 * Set the ring ID. For devices with a single queue, a request
1296 * for all rings is the same as a single ring.
1299 netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid)
1301 struct ifnet *ifp = priv->np_ifp;
1302 struct netmap_adapter *na = NA(ifp);
1303 u_int i = ringid & NETMAP_RING_MASK;
1304 /* initially (np_qfirst == np_qlast) we don't want to lock */
1305 int need_lock = (priv->np_qfirst != priv->np_qlast);
1306 int lim = na->num_rx_rings;
1308 if (na->num_tx_rings > lim)
1309 lim = na->num_tx_rings;
1310 if ( (ringid & NETMAP_HW_RING) && i >= lim) {
1311 D("invalid ring id %d", i);
1315 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
1316 priv->np_ringid = ringid;
1317 if (ringid & NETMAP_SW_RING) {
1318 priv->np_qfirst = NETMAP_SW_RING;
1320 } else if (ringid & NETMAP_HW_RING) {
1321 priv->np_qfirst = i;
1322 priv->np_qlast = i + 1;
1324 priv->np_qfirst = 0;
1325 priv->np_qlast = NETMAP_HW_RING ;
1327 priv->np_txpoll = (ringid & NETMAP_NO_TX_POLL) ? 0 : 1;
1329 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
1330 if (netmap_verbose) {
1331 if (ringid & NETMAP_SW_RING)
1332 D("ringid %s set to SW RING", ifp->if_xname);
1333 else if (ringid & NETMAP_HW_RING)
1334 D("ringid %s set to HW RING %d", ifp->if_xname,
1337 D("ringid %s set to all %d HW RINGS", ifp->if_xname, lim);
1344 * possibly move the interface to netmap-mode.
1345 * If success it returns a pointer to netmap_if, otherwise NULL.
1346 * This must be called with NMA_LOCK held.
1348 static struct netmap_if *
1349 netmap_do_regif(struct netmap_priv_d *priv, struct ifnet *ifp,
1350 uint16_t ringid, int *err)
1352 struct netmap_adapter *na = NA(ifp);
1353 struct netmap_if *nifp = NULL;
1357 BDG_WLOCK(na->na_bdg);
1358 na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
1360 /* ring configuration may have changed, fetch from the card */
1361 netmap_update_config(na);
1362 priv->np_ifp = ifp; /* store the reference */
1363 error = netmap_set_ringid(priv, ringid);
1366 nifp = netmap_if_new(ifp->if_xname, na);
1367 if (nifp == NULL) { /* allocation failed */
1369 } else if (ifp->if_capenable & IFCAP_NETMAP) {
1370 /* was already set */
1372 /* Otherwise set the card in netmap mode
1373 * and make it use the shared buffers.
1375 for (i = 0 ; i < na->num_tx_rings + 1; i++)
1376 mtx_init(&na->tx_rings[i].q_lock, "nm_txq_lock",
1377 MTX_NETWORK_LOCK, MTX_DEF);
1378 for (i = 0 ; i < na->num_rx_rings + 1; i++) {
1379 mtx_init(&na->rx_rings[i].q_lock, "nm_rxq_lock",
1380 MTX_NETWORK_LOCK, MTX_DEF);
1382 if (nma_is_hw(na)) {
1383 SWNA(ifp)->tx_rings = &na->tx_rings[na->num_tx_rings];
1384 SWNA(ifp)->rx_rings = &na->rx_rings[na->num_rx_rings];
1386 error = na->nm_register(ifp, 1); /* mode on */
1389 error = nm_alloc_bdgfwd(na);
1390 #endif /* NM_BRIDGE */
1392 netmap_dtor_locked(priv);
1393 /* nifp is not yet in priv, so free it separately */
1394 netmap_if_free(nifp);
1401 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
1403 BDG_WUNLOCK(na->na_bdg);
1408 /* Process NETMAP_BDG_ATTACH and NETMAP_BDG_DETACH */
1410 kern_netmap_regif(struct nmreq *nmr)
1413 struct netmap_if *nifp;
1414 struct netmap_priv_d *npriv;
1417 npriv = malloc(sizeof(*npriv), M_DEVBUF, M_NOWAIT|M_ZERO);
1420 error = netmap_get_memory(npriv);
1423 bzero(npriv, sizeof(*npriv));
1424 free(npriv, M_DEVBUF);
1429 error = get_ifp(nmr, &ifp);
1430 if (error) { /* no device, or another bridge or user owns the device */
1433 } else if (!NETMAP_OWNED_BY_KERN(ifp)) {
1434 /* got reference to a virtual port or direct access to a NIC.
1435 * perhaps specified no bridge's prefix or wrong NIC's name
1444 if (nmr->nr_cmd == NETMAP_BDG_DETACH) {
1445 if (NA(ifp)->refcount == 0) { /* not registered */
1451 netmap_dtor(NA(ifp)->na_kpriv); /* unregister */
1452 NA(ifp)->na_kpriv = NULL;
1453 nm_if_rele(ifp); /* detach from the bridge */
1455 } else if (NA(ifp)->refcount > 0) { /* already registered */
1460 nifp = netmap_do_regif(npriv, ifp, nmr->nr_ringid, &error);
1463 wmb(); // XXX do we need it ?
1464 npriv->np_nifp = nifp;
1465 NA(ifp)->na_kpriv = npriv;
1467 D("registered %s to netmap-mode", ifp->if_xname);
1472 /* CORE_LOCK is not necessary */
1474 netmap_swlock_wrapper(struct ifnet *dev, int what, u_int queueid)
1476 struct netmap_adapter *na = SWNA(dev);
1479 case NETMAP_TX_LOCK:
1480 mtx_lock(&na->tx_rings[queueid].q_lock);
1483 case NETMAP_TX_UNLOCK:
1484 mtx_unlock(&na->tx_rings[queueid].q_lock);
1487 case NETMAP_RX_LOCK:
1488 mtx_lock(&na->rx_rings[queueid].q_lock);
1491 case NETMAP_RX_UNLOCK:
1492 mtx_unlock(&na->rx_rings[queueid].q_lock);
1498 /* Initialize necessary fields of sw adapter located in right after hw's
1499 * one. sw adapter attaches a pair of sw rings of the netmap-mode NIC.
1500 * It is always activated and deactivated at the same tie with the hw's one.
1501 * Thus we don't need refcounting on the sw adapter.
1502 * Regardless of NIC's feature we use separate lock so that anybody can lock
1503 * me independently from the hw adapter.
1504 * Make sure nm_register is NULL to be handled as FALSE in nma_is_hw
1507 netmap_attach_sw(struct ifnet *ifp)
1509 struct netmap_adapter *hw_na = NA(ifp);
1510 struct netmap_adapter *na = SWNA(ifp);
1513 na->separate_locks = 1;
1514 na->nm_lock = netmap_swlock_wrapper;
1515 na->num_rx_rings = na->num_tx_rings = 1;
1516 na->num_tx_desc = hw_na->num_tx_desc;
1517 na->num_rx_desc = hw_na->num_rx_desc;
1518 na->nm_txsync = netmap_bdg_to_host;
1522 /* exported to kernel callers */
1524 netmap_bdg_ctl(struct nmreq *nmr, bdg_lookup_fn_t func)
1526 struct nm_bridge *b;
1527 struct netmap_adapter *na;
1529 char *name = nmr->nr_name;
1530 int cmd = nmr->nr_cmd, namelen = strlen(name);
1531 int error = 0, i, j;
1534 case NETMAP_BDG_ATTACH:
1535 case NETMAP_BDG_DETACH:
1536 error = kern_netmap_regif(nmr);
1539 case NETMAP_BDG_LIST:
1540 /* this is used to enumerate bridges and ports */
1541 if (namelen) { /* look up indexes of bridge and port */
1542 if (strncmp(name, NM_NAME, strlen(NM_NAME))) {
1546 b = nm_find_bridge(name, 0 /* don't create */);
1554 for (i = 0; i < NM_BDG_MAXPORTS; i++) {
1555 na = BDG_GET_VAR(b->bdg_ports[i]);
1559 /* the former and the latter identify a
1560 * virtual port and a NIC, respectively
1562 if (!strcmp(iter->if_xname, name) ||
1563 (namelen > b->namelen &&
1564 !strcmp(iter->if_xname,
1565 name + b->namelen + 1))) {
1567 nmr->nr_arg1 = b - nm_bridges;
1568 nmr->nr_arg2 = i; /* port index */
1575 /* return the first non-empty entry starting from
1576 * bridge nr_arg1 and port nr_arg2.
1578 * Users can detect the end of the same bridge by
1579 * seeing the new and old value of nr_arg1, and can
1580 * detect the end of all the bridge by error != 0
1585 for (error = ENOENT; error && i < NM_BRIDGES; i++) {
1588 for (; j < NM_BDG_MAXPORTS; j++) {
1589 na = BDG_GET_VAR(b->bdg_ports[j]);
1595 strncpy(name, iter->if_xname, IFNAMSIZ);
1600 j = 0; /* following bridges scan from 0 */
1605 case NETMAP_BDG_LOOKUP_REG:
1606 /* register a lookup function to the given bridge.
1607 * nmr->nr_name may be just bridge's name (including ':'
1608 * if it is not just NM_NAME).
1614 b = nm_find_bridge(name, 0 /* don't create */);
1620 b->nm_bdg_lookup = func;
1624 D("invalid cmd (nmr->nr_cmd) (0x%x)", cmd);
1633 * ioctl(2) support for the "netmap" device.
1635 * Following a list of accepted commands:
1637 * - SIOCGIFADDR just for convenience
1643 * Return 0 on success, errno otherwise.
1646 netmap_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
1647 int fflag, struct thread *td)
1649 struct netmap_priv_d *priv = NULL;
1651 struct nmreq *nmr = (struct nmreq *) data;
1652 struct netmap_adapter *na;
1655 struct netmap_if *nifp;
1657 (void)dev; /* UNUSED */
1658 (void)fflag; /* UNUSED */
1660 #define devfs_get_cdevpriv(pp) \
1661 ({ *(struct netmap_priv_d **)pp = ((struct file *)td)->private_data; \
1662 (*pp ? 0 : ENOENT); })
1664 /* devfs_set_cdevpriv cannot fail on linux */
1665 #define devfs_set_cdevpriv(p, fn) \
1666 ({ ((struct file *)td)->private_data = p; (p ? 0 : EINVAL); })
1669 #define devfs_clear_cdevpriv() do { \
1670 netmap_dtor(priv); ((struct file *)td)->private_data = 0; \
1674 CURVNET_SET(TD_TO_VNET(td));
1676 error = devfs_get_cdevpriv((void **)&priv);
1679 /* XXX ENOENT should be impossible, since the priv
1680 * is now created in the open */
1681 return (error == ENOENT ? ENXIO : error);
1684 nmr->nr_name[sizeof(nmr->nr_name) - 1] = '\0'; /* truncate name */
1686 case NIOCGINFO: /* return capabilities etc */
1687 if (nmr->nr_version != NETMAP_API) {
1688 D("API mismatch got %d have %d",
1689 nmr->nr_version, NETMAP_API);
1690 nmr->nr_version = NETMAP_API;
1694 if (nmr->nr_cmd == NETMAP_BDG_LIST) {
1695 error = netmap_bdg_ctl(nmr, NULL);
1698 /* update configuration */
1699 error = netmap_get_memory(priv);
1700 ND("get_memory returned %d", error);
1703 /* memsize is always valid */
1704 nmr->nr_memsize = nm_mem.nm_totalsize;
1706 nmr->nr_rx_slots = nmr->nr_tx_slots = 0;
1707 if (nmr->nr_name[0] == '\0') /* just get memory info */
1709 /* lock because get_ifp and update_config see na->refcount */
1711 error = get_ifp(nmr, &ifp); /* get a refcount */
1716 na = NA(ifp); /* retrieve netmap_adapter */
1717 netmap_update_config(na);
1719 nmr->nr_rx_rings = na->num_rx_rings;
1720 nmr->nr_tx_rings = na->num_tx_rings;
1721 nmr->nr_rx_slots = na->num_rx_desc;
1722 nmr->nr_tx_slots = na->num_tx_desc;
1723 nm_if_rele(ifp); /* return the refcount */
1727 if (nmr->nr_version != NETMAP_API) {
1728 nmr->nr_version = NETMAP_API;
1732 /* possibly attach/detach NIC and VALE switch */
1734 if (i == NETMAP_BDG_ATTACH || i == NETMAP_BDG_DETACH) {
1735 error = netmap_bdg_ctl(nmr, NULL);
1737 } else if (i != 0) {
1738 D("nr_cmd must be 0 not %d", i);
1743 /* ensure allocators are ready */
1744 error = netmap_get_memory(priv);
1745 ND("get_memory returned %d", error);
1749 /* protect access to priv from concurrent NIOCREGIF */
1751 if (priv->np_ifp != NULL) { /* thread already registered */
1752 error = netmap_set_ringid(priv, nmr->nr_ringid);
1757 /* find the interface and a reference */
1758 error = get_ifp(nmr, &ifp); /* keep reference */
1761 else if (NETMAP_OWNED_BY_KERN(ifp)) {
1765 nifp = netmap_do_regif(priv, ifp, nmr->nr_ringid, &error);
1766 if (!nifp) { /* reg. failed, release priv and ref */
1767 nm_if_rele(ifp); /* return the refcount */
1768 priv->np_ifp = NULL;
1769 priv->np_nifp = NULL;
1773 /* the following assignment is a commitment.
1774 * Readers (i.e., poll and *SYNC) check for
1775 * np_nifp != NULL without locking
1777 wmb(); /* make sure previous writes are visible to all CPUs */
1778 priv->np_nifp = nifp;
1781 /* return the offset of the netmap_if object */
1782 na = NA(ifp); /* retrieve netmap adapter */
1783 nmr->nr_rx_rings = na->num_rx_rings;
1784 nmr->nr_tx_rings = na->num_tx_rings;
1785 nmr->nr_rx_slots = na->num_rx_desc;
1786 nmr->nr_tx_slots = na->num_tx_desc;
1787 nmr->nr_memsize = nm_mem.nm_totalsize;
1788 nmr->nr_offset = netmap_if_offset(nifp);
1792 // XXX we have no data here ?
1793 D("deprecated, data is %p", nmr);
1799 nifp = priv->np_nifp;
1805 rmb(); /* make sure following reads are not from cache */
1808 ifp = priv->np_ifp; /* we have a reference */
1811 D("Internal error: nifp != NULL && ifp == NULL");
1816 na = NA(ifp); /* retrieve netmap adapter */
1817 if (priv->np_qfirst == NETMAP_SW_RING) { /* host rings */
1818 if (cmd == NIOCTXSYNC)
1819 netmap_sync_to_host(na);
1821 netmap_sync_from_host(na, NULL, NULL);
1824 /* find the last ring to scan */
1825 lim = priv->np_qlast;
1826 if (lim == NETMAP_HW_RING)
1827 lim = (cmd == NIOCTXSYNC) ?
1828 na->num_tx_rings : na->num_rx_rings;
1830 for (i = priv->np_qfirst; i < lim; i++) {
1831 if (cmd == NIOCTXSYNC) {
1832 struct netmap_kring *kring = &na->tx_rings[i];
1833 if (netmap_verbose & NM_VERB_TXSYNC)
1834 D("pre txsync ring %d cur %d hwcur %d",
1835 i, kring->ring->cur,
1837 na->nm_txsync(ifp, i, 1 /* do lock */);
1838 if (netmap_verbose & NM_VERB_TXSYNC)
1839 D("post txsync ring %d cur %d hwcur %d",
1840 i, kring->ring->cur,
1843 na->nm_rxsync(ifp, i, 1 /* do lock */);
1844 microtime(&na->rx_rings[i].ring->ts);
1855 D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT");
1858 default: /* allow device-specific ioctls */
1861 bzero(&so, sizeof(so));
1862 error = get_ifp(nmr, &ifp); /* keep reference */
1865 so.so_vnet = ifp->if_vnet;
1866 // so->so_proto not null.
1867 error = ifioctl(&so, cmd, data, td);
1884 * select(2) and poll(2) handlers for the "netmap" device.
1886 * Can be called for one or more queues.
1887 * Return true the event mask corresponding to ready events.
1888 * If there are no ready events, do a selrecord on either individual
1889 * selfd or on the global one.
1890 * Device-dependent parts (locking and sync of tx/rx rings)
1891 * are done through callbacks.
1893 * On linux, arguments are really pwait, the poll table, and 'td' is struct file *
1894 * The first one is remapped to pwait as selrecord() uses the name as an
1898 netmap_poll(struct cdev *dev, int events, struct thread *td)
1900 struct netmap_priv_d *priv = NULL;
1901 struct netmap_adapter *na;
1903 struct netmap_kring *kring;
1904 u_int core_lock, i, check_all, want_tx, want_rx, revents = 0;
1905 u_int lim_tx, lim_rx, host_forwarded = 0;
1906 struct mbq q = { NULL, NULL, 0 };
1907 enum {NO_CL, NEED_CL, LOCKED_CL }; /* see below */
1908 void *pwait = dev; /* linux compatibility */
1912 if (devfs_get_cdevpriv((void **)&priv) != 0 || priv == NULL)
1915 if (priv->np_nifp == NULL) {
1916 D("No if registered");
1919 rmb(); /* make sure following reads are not from cache */
1922 // XXX check for deleting() ?
1923 if ( (ifp->if_capenable & IFCAP_NETMAP) == 0)
1926 if (netmap_verbose & 0x8000)
1927 D("device %s events 0x%x", ifp->if_xname, events);
1928 want_tx = events & (POLLOUT | POLLWRNORM);
1929 want_rx = events & (POLLIN | POLLRDNORM);
1931 na = NA(ifp); /* retrieve netmap adapter */
1933 lim_tx = na->num_tx_rings;
1934 lim_rx = na->num_rx_rings;
1935 /* how many queues we are scanning */
1936 if (priv->np_qfirst == NETMAP_SW_RING) {
1937 if (priv->np_txpoll || want_tx) {
1938 /* push any packets up, then we are always ready */
1939 netmap_sync_to_host(na);
1943 kring = &na->rx_rings[lim_rx];
1944 if (kring->ring->avail == 0)
1945 netmap_sync_from_host(na, td, dev);
1946 if (kring->ring->avail > 0) {
1953 /* if we are in transparent mode, check also the host rx ring */
1954 kring = &na->rx_rings[lim_rx];
1955 if ( (priv->np_qlast == NETMAP_HW_RING) // XXX check_all
1957 && (netmap_fwd || kring->ring->flags & NR_FORWARD) ) {
1958 if (kring->ring->avail == 0)
1959 netmap_sync_from_host(na, td, dev);
1960 if (kring->ring->avail > 0)
1965 * check_all is set if the card has more than one queue and
1966 * the client is polling all of them. If true, we sleep on
1967 * the "global" selfd, otherwise we sleep on individual selfd
1968 * (we can only sleep on one of them per direction).
1969 * The interrupt routine in the driver should always wake on
1970 * the individual selfd, and also on the global one if the card
1971 * has more than one ring.
1973 * If the card has only one lock, we just use that.
1974 * If the card has separate ring locks, we just use those
1975 * unless we are doing check_all, in which case the whole
1976 * loop is wrapped by the global lock.
1977 * We acquire locks only when necessary: if poll is called
1978 * when buffers are available, we can just return without locks.
1980 * rxsync() is only called if we run out of buffers on a POLLIN.
1981 * txsync() is called if we run out of buffers on POLLOUT, or
1982 * there are pending packets to send. The latter can be disabled
1983 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
1985 check_all = (priv->np_qlast == NETMAP_HW_RING) && (lim_tx > 1 || lim_rx > 1);
1988 * core_lock indicates what to do with the core lock.
1989 * The core lock is used when either the card has no individual
1990 * locks, or it has individual locks but we are cheking all
1991 * rings so we need the core lock to avoid missing wakeup events.
1993 * It has three possible states:
1994 * NO_CL we don't need to use the core lock, e.g.
1995 * because we are protected by individual locks.
1996 * NEED_CL we need the core lock. In this case, when we
1997 * call the lock routine, move to LOCKED_CL
1998 * to remember to release the lock once done.
1999 * LOCKED_CL core lock is set, so we need to release it.
2001 core_lock = (check_all || !na->separate_locks) ? NEED_CL : NO_CL;
2003 /* the bridge uses separate locks */
2004 if (na->nm_register == bdg_netmap_reg) {
2005 ND("not using core lock for %s", ifp->if_xname);
2008 #endif /* NM_BRIDGE */
2009 if (priv->np_qlast != NETMAP_HW_RING) {
2010 lim_tx = lim_rx = priv->np_qlast;
2014 * We start with a lock free round which is good if we have
2015 * data available. If this fails, then lock and call the sync
2018 for (i = priv->np_qfirst; want_rx && i < lim_rx; i++) {
2019 kring = &na->rx_rings[i];
2020 if (kring->ring->avail > 0) {
2022 want_rx = 0; /* also breaks the loop */
2025 for (i = priv->np_qfirst; want_tx && i < lim_tx; i++) {
2026 kring = &na->tx_rings[i];
2027 if (kring->ring->avail > 0) {
2029 want_tx = 0; /* also breaks the loop */
2034 * If we to push packets out (priv->np_txpoll) or want_tx is
2035 * still set, we do need to run the txsync calls (on all rings,
2036 * to avoid that the tx rings stall).
2038 if (priv->np_txpoll || want_tx) {
2040 for (i = priv->np_qfirst; i < lim_tx; i++) {
2041 kring = &na->tx_rings[i];
2043 * Skip the current ring if want_tx == 0
2044 * (we have already done a successful sync on
2045 * a previous ring) AND kring->cur == kring->hwcur
2046 * (there are no pending transmissions for this ring).
2048 if (!want_tx && kring->ring->cur == kring->nr_hwcur)
2050 if (core_lock == NEED_CL) {
2051 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
2052 core_lock = LOCKED_CL;
2054 if (na->separate_locks)
2055 na->nm_lock(ifp, NETMAP_TX_LOCK, i);
2056 if (netmap_verbose & NM_VERB_TXSYNC)
2057 D("send %d on %s %d",
2060 if (na->nm_txsync(ifp, i, 0 /* no lock */))
2063 /* Check avail/call selrecord only if called with POLLOUT */
2065 if (kring->ring->avail > 0) {
2066 /* stop at the first ring. We don't risk
2071 } else if (!check_all)
2072 selrecord(td, &kring->si);
2074 if (na->separate_locks)
2075 na->nm_lock(ifp, NETMAP_TX_UNLOCK, i);
2080 * now if want_rx is still set we need to lock and rxsync.
2081 * Do it on all rings because otherwise we starve.
2084 for (i = priv->np_qfirst; i < lim_rx; i++) {
2085 kring = &na->rx_rings[i];
2086 if (core_lock == NEED_CL) {
2087 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
2088 core_lock = LOCKED_CL;
2090 if (na->separate_locks)
2091 na->nm_lock(ifp, NETMAP_RX_LOCK, i);
2092 if (netmap_fwd ||kring->ring->flags & NR_FORWARD) {
2093 ND(10, "forwarding some buffers up %d to %d",
2094 kring->nr_hwcur, kring->ring->cur);
2095 netmap_grab_packets(kring, &q, netmap_fwd);
2098 if (na->nm_rxsync(ifp, i, 0 /* no lock */))
2100 if (netmap_no_timestamp == 0 ||
2101 kring->ring->flags & NR_TIMESTAMP) {
2102 microtime(&kring->ring->ts);
2105 if (kring->ring->avail > 0)
2107 else if (!check_all)
2108 selrecord(td, &kring->si);
2109 if (na->separate_locks)
2110 na->nm_lock(ifp, NETMAP_RX_UNLOCK, i);
2113 if (check_all && revents == 0) { /* signal on the global queue */
2115 selrecord(td, &na->tx_si);
2117 selrecord(td, &na->rx_si);
2120 /* forward host to the netmap ring */
2121 kring = &na->rx_rings[lim_rx];
2122 if (kring->nr_hwavail > 0)
2123 ND("host rx %d has %d packets", lim_rx, kring->nr_hwavail);
2124 if ( (priv->np_qlast == NETMAP_HW_RING) // XXX check_all
2125 && (netmap_fwd || kring->ring->flags & NR_FORWARD)
2126 && kring->nr_hwavail > 0 && !host_forwarded) {
2127 if (core_lock == NEED_CL) {
2128 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
2129 core_lock = LOCKED_CL;
2131 netmap_sw_to_nic(na);
2132 host_forwarded = 1; /* prevent another pass */
2137 if (core_lock == LOCKED_CL)
2138 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
2140 netmap_send_up(na->ifp, q.head);
2145 /*------- driver support routines ------*/
2149 * default lock wrapper.
2152 netmap_lock_wrapper(struct ifnet *dev, int what, u_int queueid)
2154 struct netmap_adapter *na = NA(dev);
2157 #ifdef linux /* some system do not need lock on register */
2158 case NETMAP_REG_LOCK:
2159 case NETMAP_REG_UNLOCK:
2163 case NETMAP_CORE_LOCK:
2164 mtx_lock(&na->core_lock);
2167 case NETMAP_CORE_UNLOCK:
2168 mtx_unlock(&na->core_lock);
2171 case NETMAP_TX_LOCK:
2172 mtx_lock(&na->tx_rings[queueid].q_lock);
2175 case NETMAP_TX_UNLOCK:
2176 mtx_unlock(&na->tx_rings[queueid].q_lock);
2179 case NETMAP_RX_LOCK:
2180 mtx_lock(&na->rx_rings[queueid].q_lock);
2183 case NETMAP_RX_UNLOCK:
2184 mtx_unlock(&na->rx_rings[queueid].q_lock);
2191 * Initialize a ``netmap_adapter`` object created by driver on attach.
2192 * We allocate a block of memory with room for a struct netmap_adapter
2193 * plus two sets of N+2 struct netmap_kring (where N is the number
2194 * of hardware rings):
2195 * krings 0..N-1 are for the hardware queues.
2196 * kring N is for the host stack queue
2197 * kring N+1 is only used for the selinfo for all queues.
2198 * Return 0 on success, ENOMEM otherwise.
2200 * By default the receive and transmit adapter ring counts are both initialized
2201 * to num_queues. na->num_tx_rings can be set for cards with different tx/rx
2205 netmap_attach(struct netmap_adapter *arg, int num_queues)
2207 struct netmap_adapter *na = NULL;
2208 struct ifnet *ifp = arg ? arg->ifp : NULL;
2211 if (arg == NULL || ifp == NULL)
2213 len = nma_is_vp(arg) ? sizeof(*na) : sizeof(*na) * 2;
2214 na = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2218 *na = *arg; /* copy everything, trust the driver to not pass junk */
2219 NETMAP_SET_CAPABLE(ifp);
2220 if (na->num_tx_rings == 0)
2221 na->num_tx_rings = num_queues;
2222 na->num_rx_rings = num_queues;
2223 na->refcount = na->na_single = na->na_multi = 0;
2224 /* Core lock initialized here, others after netmap_if_new. */
2225 mtx_init(&na->core_lock, "netmap core lock", MTX_NETWORK_LOCK, MTX_DEF);
2226 if (na->nm_lock == NULL) {
2227 ND("using default locks for %s", ifp->if_xname);
2228 na->nm_lock = netmap_lock_wrapper;
2231 if (ifp->netdev_ops) {
2232 ND("netdev_ops %p", ifp->netdev_ops);
2233 /* prepare a clone of the netdev ops */
2234 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
2235 na->nm_ndo.ndo_start_xmit = ifp->netdev_ops;
2237 na->nm_ndo = *ifp->netdev_ops;
2240 na->nm_ndo.ndo_start_xmit = linux_netmap_start;
2242 if (!nma_is_vp(arg))
2243 netmap_attach_sw(ifp);
2244 D("success for %s", ifp->if_xname);
2248 D("fail, arg %p ifp %p na %p", arg, ifp, na);
2250 return (na ? EINVAL : ENOMEM);
2255 * Free the allocated memory linked to the given ``netmap_adapter``
2259 netmap_detach(struct ifnet *ifp)
2261 struct netmap_adapter *na = NA(ifp);
2266 mtx_destroy(&na->core_lock);
2268 if (na->tx_rings) { /* XXX should not happen */
2269 D("freeing leftover tx_rings");
2270 free(na->tx_rings, M_DEVBUF);
2272 bzero(na, sizeof(*na));
2279 nm_bdg_flush(struct nm_bdg_fwd *ft, int n, struct netmap_adapter *na, u_int ring_nr);
2281 /* we don't need to lock myself */
2283 bdg_netmap_start(struct ifnet *ifp, struct mbuf *m)
2285 struct netmap_adapter *na = SWNA(ifp);
2286 struct nm_bdg_fwd *ft = na->rx_rings[0].nkr_ft;
2287 char *buf = NMB(&na->rx_rings[0].ring->slot[0]);
2288 u_int len = MBUF_LEN(m);
2290 if (!na->na_bdg) /* SWNA is not configured to be attached */
2292 m_copydata(m, 0, len, buf);
2293 ft->ft_flags = 0; // XXX could be indirect ?
2296 ft->ft_next = NM_BDG_BATCH; // XXX is it needed ?
2297 nm_bdg_flush(ft, 1, na, 0);
2299 /* release the mbuf in either cases of success or failure. As an
2300 * alternative, put the mbuf in a free list and free the list
2301 * only when really necessary.
2310 * Intercept packets from the network stack and pass them
2311 * to netmap as incoming packets on the 'software' ring.
2312 * We are not locked when called.
2315 netmap_start(struct ifnet *ifp, struct mbuf *m)
2317 struct netmap_adapter *na = NA(ifp);
2318 struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
2319 u_int i, len = MBUF_LEN(m);
2320 u_int error = EBUSY, lim = kring->nkr_num_slots - 1;
2321 struct netmap_slot *slot;
2323 if (netmap_verbose & NM_VERB_HOST)
2324 D("%s packet %d len %d from the stack", ifp->if_xname,
2325 kring->nr_hwcur + kring->nr_hwavail, len);
2326 if (len > NETMAP_BUF_SIZE) { /* too long for us */
2327 D("%s from_host, drop packet size %d > %d", ifp->if_xname,
2328 len, NETMAP_BUF_SIZE);
2333 return bdg_netmap_start(ifp, m);
2335 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
2336 if (kring->nr_hwavail >= lim) {
2338 D("stack ring %s full\n", ifp->if_xname);
2339 goto done; /* no space */
2342 /* compute the insert position */
2343 i = kring->nr_hwcur + kring->nr_hwavail;
2346 slot = &kring->ring->slot[i];
2347 m_copydata(m, 0, len, NMB(slot));
2349 slot->flags = kring->nkr_slot_flags;
2350 kring->nr_hwavail++;
2351 if (netmap_verbose & NM_VERB_HOST)
2352 D("wake up host ring %s %d", na->ifp->if_xname, na->num_rx_rings);
2353 selwakeuppri(&kring->si, PI_NET);
2356 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
2358 /* release the mbuf in either cases of success or failure. As an
2359 * alternative, put the mbuf in a free list and free the list
2360 * only when really necessary.
2369 * netmap_reset() is called by the driver routines when reinitializing
2370 * a ring. The driver is in charge of locking to protect the kring.
2371 * If netmap mode is not set just return NULL.
2373 struct netmap_slot *
2374 netmap_reset(struct netmap_adapter *na, enum txrx tx, int n,
2377 struct netmap_kring *kring;
2381 return NULL; /* no netmap support here */
2382 if (!(na->ifp->if_capenable & IFCAP_NETMAP))
2383 return NULL; /* nothing to reinitialize */
2386 if (n >= na->num_tx_rings)
2388 kring = na->tx_rings + n;
2389 new_hwofs = kring->nr_hwcur - new_cur;
2391 if (n >= na->num_rx_rings)
2393 kring = na->rx_rings + n;
2394 new_hwofs = kring->nr_hwcur + kring->nr_hwavail - new_cur;
2396 lim = kring->nkr_num_slots - 1;
2397 if (new_hwofs > lim)
2398 new_hwofs -= lim + 1;
2400 /* Alwayws set the new offset value and realign the ring. */
2401 kring->nkr_hwofs = new_hwofs;
2403 kring->nr_hwavail = kring->nkr_num_slots - 1;
2404 ND(10, "new hwofs %d on %s %s[%d]",
2405 kring->nkr_hwofs, na->ifp->if_xname,
2406 tx == NR_TX ? "TX" : "RX", n);
2409 /* XXX check that the mappings are correct */
2410 /* need ring_nr, adapter->pdev, direction */
2411 buffer_info->dma = dma_map_single(&pdev->dev, addr, adapter->rx_buffer_len, DMA_FROM_DEVICE);
2412 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
2413 D("error mapping rx netmap buffer %d", i);
2414 // XXX fix error handling
2419 * Wakeup on the individual and global lock
2420 * We do the wakeup here, but the ring is not yet reconfigured.
2421 * However, we are under lock so there are no races.
2423 selwakeuppri(&kring->si, PI_NET);
2424 selwakeuppri(tx == NR_TX ? &na->tx_si : &na->rx_si, PI_NET);
2425 return kring->ring->slot;
2429 /* returns the next position in the ring */
2431 nm_bdg_preflush(struct netmap_adapter *na, u_int ring_nr,
2432 struct netmap_kring *kring, u_int end)
2434 struct netmap_ring *ring = kring->ring;
2435 struct nm_bdg_fwd *ft = kring->nkr_ft;
2436 u_int j = kring->nr_hwcur, lim = kring->nkr_num_slots - 1;
2437 u_int ft_i = 0; /* start from 0 */
2439 for (; likely(j != end); j = unlikely(j == lim) ? 0 : j+1) {
2440 struct netmap_slot *slot = &ring->slot[j];
2441 char *buf = NMB(slot);
2442 int len = ft[ft_i].ft_len = slot->len;
2444 ft[ft_i].ft_flags = slot->flags;
2446 ND("flags is 0x%x", slot->flags);
2447 /* this slot goes into a list so initialize the link field */
2448 ft[ft_i].ft_next = NM_BDG_BATCH; /* equivalent to NULL */
2449 if (unlikely(len < 14))
2451 buf = ft[ft_i].ft_buf = (slot->flags & NS_INDIRECT) ?
2452 *((void **)buf) : buf;
2454 if (unlikely(++ft_i == netmap_bridge))
2455 ft_i = nm_bdg_flush(ft, ft_i, na, ring_nr);
2458 ft_i = nm_bdg_flush(ft, ft_i, na, ring_nr);
2464 * Pass packets from nic to the bridge. Must be called with
2465 * proper locks on the source interface.
2466 * Note, no user process can access this NIC so we can ignore
2467 * the info in the 'ring'.
2470 netmap_nic_to_bdg(struct ifnet *ifp, u_int ring_nr)
2472 struct netmap_adapter *na = NA(ifp);
2473 struct netmap_kring *kring = &na->rx_rings[ring_nr];
2474 struct netmap_ring *ring = kring->ring;
2475 int j, k, lim = kring->nkr_num_slots - 1;
2477 /* fetch packets that have arrived */
2478 na->nm_rxsync(ifp, ring_nr, 0);
2479 /* XXX we don't count reserved, but it should be 0 */
2480 j = kring->nr_hwcur;
2481 k = j + kring->nr_hwavail;
2484 if (k == j && netmap_verbose) {
2485 D("how strange, interrupt with no packets on %s",
2490 j = nm_bdg_preflush(na, ring_nr, kring, k);
2492 /* we consume everything, but we cannot update kring directly
2493 * because the nic may have destroyed the info in the NIC ring.
2494 * So we need to call rxsync again to restore it.
2498 na->nm_rxsync(ifp, ring_nr, 0);
2504 * Default functions to handle rx/tx interrupts
2506 * 1 ring, single lock:
2507 * lock(core); wake(i=0); unlock(core)
2508 * N rings, single lock:
2509 * lock(core); wake(i); wake(N+1) unlock(core)
2510 * 1 ring, separate locks: (i=0)
2511 * lock(i); wake(i); unlock(i)
2512 * N rings, separate locks:
2513 * lock(i); wake(i); unlock(i); lock(core) wake(N+1) unlock(core)
2514 * work_done is non-null on the RX path.
2516 * The 'q' argument also includes flag to tell whether the queue is
2517 * already locked on enter, and whether it should remain locked on exit.
2518 * This helps adapting to different defaults in drivers and OSes.
2521 netmap_rx_irq(struct ifnet *ifp, int q, int *work_done)
2523 struct netmap_adapter *na;
2524 struct netmap_kring *r;
2525 NM_SELINFO_T *main_wq;
2526 int locktype, unlocktype, nic_to_bridge, lock;
2528 if (!(ifp->if_capenable & IFCAP_NETMAP))
2531 lock = q & (NETMAP_LOCKED_ENTER | NETMAP_LOCKED_EXIT);
2532 q = q & NETMAP_RING_MASK;
2534 ND(5, "received %s queue %d", work_done ? "RX" : "TX" , q);
2536 if (na->na_flags & NAF_SKIP_INTR) {
2537 ND("use regular interrupt");
2541 if (work_done) { /* RX path */
2542 if (q >= na->num_rx_rings)
2543 return 0; // not a physical queue
2544 r = na->rx_rings + q;
2545 r->nr_kflags |= NKR_PENDINTR;
2546 main_wq = (na->num_rx_rings > 1) ? &na->rx_si : NULL;
2547 /* set a flag if the NIC is attached to a VALE switch */
2548 nic_to_bridge = (na->na_bdg != NULL);
2549 locktype = NETMAP_RX_LOCK;
2550 unlocktype = NETMAP_RX_UNLOCK;
2551 } else { /* TX path */
2552 if (q >= na->num_tx_rings)
2553 return 0; // not a physical queue
2554 r = na->tx_rings + q;
2555 main_wq = (na->num_tx_rings > 1) ? &na->tx_si : NULL;
2556 work_done = &q; /* dummy */
2558 locktype = NETMAP_TX_LOCK;
2559 unlocktype = NETMAP_TX_UNLOCK;
2561 if (na->separate_locks) {
2562 if (!(lock & NETMAP_LOCKED_ENTER))
2563 na->nm_lock(ifp, locktype, q);
2564 /* If a NIC is attached to a bridge, flush packets
2565 * (and no need to wakeup anyone). Otherwise, wakeup
2566 * possible processes waiting for packets.
2569 netmap_nic_to_bdg(ifp, q);
2571 selwakeuppri(&r->si, PI_NET);
2572 na->nm_lock(ifp, unlocktype, q);
2573 if (main_wq && !nic_to_bridge) {
2574 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
2575 selwakeuppri(main_wq, PI_NET);
2576 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
2578 /* lock the queue again if requested */
2579 if (lock & NETMAP_LOCKED_EXIT)
2580 na->nm_lock(ifp, locktype, q);
2582 if (!(lock & NETMAP_LOCKED_ENTER))
2583 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
2585 netmap_nic_to_bdg(ifp, q);
2587 selwakeuppri(&r->si, PI_NET);
2589 selwakeuppri(main_wq, PI_NET);
2591 if (!(lock & NETMAP_LOCKED_EXIT))
2592 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
2594 *work_done = 1; /* do not fire napi again */
2599 #ifdef linux /* linux-specific routines */
2603 * Remap linux arguments into the FreeBSD call.
2604 * - pwait is the poll table, passed as 'dev';
2605 * If pwait == NULL someone else already woke up before. We can report
2606 * events but they are filtered upstream.
2607 * If pwait != NULL, then pwait->key contains the list of events.
2608 * - events is computed from pwait as above.
2609 * - file is passed as 'td';
2612 linux_netmap_poll(struct file * file, struct poll_table_struct *pwait)
2614 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
2615 int events = POLLIN | POLLOUT; /* XXX maybe... */
2616 #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)
2617 int events = pwait ? pwait->key : POLLIN | POLLOUT;
2618 #else /* in 3.4.0 field 'key' was renamed to '_key' */
2619 int events = pwait ? pwait->_key : POLLIN | POLLOUT;
2621 return netmap_poll((void *)pwait, events, (void *)file);
2626 linux_netmap_mmap(struct file *f, struct vm_area_struct *vma)
2630 struct lut_entry *l_entry;
2632 unsigned long off, tomap;
2634 * vma->vm_start: start of mapping user address space
2635 * vma->vm_end: end of the mapping user address space
2636 * vma->vm_pfoff: offset of first page in the device
2639 // XXX security checks
2641 error = netmap_get_memory(f->private_data);
2642 ND("get_memory returned %d", error);
2646 off = vma->vm_pgoff << PAGE_SHIFT; /* offset in bytes */
2647 tomap = vma->vm_end - vma->vm_start;
2648 for (i = 0; i < NETMAP_POOLS_NR; i++) { /* loop through obj_pools */
2649 const struct netmap_obj_pool *p = &nm_mem.pools[i];
2651 * In each pool memory is allocated in clusters
2652 * of size _clustsize, each containing clustentries
2653 * entries. For each object k we already store the
2654 * vtophys mapping in lut[k] so we use that, scanning
2655 * the lut[] array in steps of clustentries,
2656 * and we map each cluster (not individual pages,
2657 * it would be overkill -- XXX slow ? 20130415).
2661 * We interpret vm_pgoff as an offset into the whole
2662 * netmap memory, as if all clusters where contiguous.
2664 for (lut_skip = 0, j = 0; j < p->_numclusters; j++, lut_skip += p->clustentries) {
2665 unsigned long paddr, mapsize;
2666 if (p->_clustsize <= off) {
2667 off -= p->_clustsize;
2670 l_entry = &p->lut[lut_skip]; /* first obj in the cluster */
2671 paddr = l_entry->paddr + off;
2672 mapsize = p->_clustsize - off;
2674 if (mapsize > tomap)
2676 ND("remap_pfn_range(%lx, %lx, %lx)",
2677 vma->vm_start + user_skip,
2678 paddr >> PAGE_SHIFT, mapsize);
2679 if (remap_pfn_range(vma, vma->vm_start + user_skip,
2680 paddr >> PAGE_SHIFT, mapsize,
2682 return -EAGAIN; // XXX check return value
2683 user_skip += mapsize;
2696 linux_netmap_start(struct sk_buff *skb, struct net_device *dev)
2698 netmap_start(dev, skb);
2699 return (NETDEV_TX_OK);
2703 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) // XXX was 38
2704 #define LIN_IOCTL_NAME .ioctl
2706 linux_netmap_ioctl(struct inode *inode, struct file *file, u_int cmd, u_long data /* arg */)
2708 #define LIN_IOCTL_NAME .unlocked_ioctl
2710 linux_netmap_ioctl(struct file *file, u_int cmd, u_long data /* arg */)
2715 bzero(&nmr, sizeof(nmr));
2717 if (data && copy_from_user(&nmr, (void *)data, sizeof(nmr) ) != 0)
2719 ret = netmap_ioctl(NULL, cmd, (caddr_t)&nmr, 0, (void *)file);
2720 if (data && copy_to_user((void*)data, &nmr, sizeof(nmr) ) != 0)
2727 netmap_release(struct inode *inode, struct file *file)
2729 (void)inode; /* UNUSED */
2730 if (file->private_data)
2731 netmap_dtor(file->private_data);
2737 linux_netmap_open(struct inode *inode, struct file *file)
2739 struct netmap_priv_d *priv;
2740 (void)inode; /* UNUSED */
2742 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF,
2747 file->private_data = priv;
2753 static struct file_operations netmap_fops = {
2754 .owner = THIS_MODULE,
2755 .open = linux_netmap_open,
2756 .mmap = linux_netmap_mmap,
2757 LIN_IOCTL_NAME = linux_netmap_ioctl,
2758 .poll = linux_netmap_poll,
2759 .release = netmap_release,
2763 static struct miscdevice netmap_cdevsw = { /* same name as FreeBSD */
2769 static int netmap_init(void);
2770 static void netmap_fini(void);
2773 /* Errors have negative values on linux */
2774 static int linux_netmap_init(void)
2776 return -netmap_init();
2779 module_init(linux_netmap_init);
2780 module_exit(netmap_fini);
2781 /* export certain symbols to other modules */
2782 EXPORT_SYMBOL(netmap_attach); // driver attach routines
2783 EXPORT_SYMBOL(netmap_detach); // driver detach routines
2784 EXPORT_SYMBOL(netmap_ring_reinit); // ring init on error
2785 EXPORT_SYMBOL(netmap_buffer_lut);
2786 EXPORT_SYMBOL(netmap_total_buffers); // index check
2787 EXPORT_SYMBOL(netmap_buffer_base);
2788 EXPORT_SYMBOL(netmap_reset); // ring init routines
2789 EXPORT_SYMBOL(netmap_buf_size);
2790 EXPORT_SYMBOL(netmap_rx_irq); // default irq handler
2791 EXPORT_SYMBOL(netmap_no_pendintr); // XXX mitigation - should go away
2792 EXPORT_SYMBOL(netmap_bdg_ctl); // bridge configuration routine
2793 EXPORT_SYMBOL(netmap_bdg_learning); // the default lookup function
2796 MODULE_AUTHOR("http://info.iet.unipi.it/~luigi/netmap/");
2797 MODULE_DESCRIPTION("The netmap packet I/O framework");
2798 MODULE_LICENSE("Dual BSD/GPL"); /* the code here is all BSD. */
2800 #else /* __FreeBSD__ */
2803 static struct cdevsw netmap_cdevsw = {
2804 .d_version = D_VERSION,
2806 .d_open = netmap_open,
2807 .d_mmap = netmap_mmap,
2808 .d_mmap_single = netmap_mmap_single,
2809 .d_ioctl = netmap_ioctl,
2810 .d_poll = netmap_poll,
2811 .d_close = netmap_close,
2813 #endif /* __FreeBSD__ */
2817 *---- support for virtual bridge -----
2820 /* ----- FreeBSD if_bridge hash function ------- */
2823 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2824 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2826 * http://www.burtleburtle.net/bob/hash/spooky.html
2828 #define mix(a, b, c) \
2830 a -= b; a -= c; a ^= (c >> 13); \
2831 b -= c; b -= a; b ^= (a << 8); \
2832 c -= a; c -= b; c ^= (b >> 13); \
2833 a -= b; a -= c; a ^= (c >> 12); \
2834 b -= c; b -= a; b ^= (a << 16); \
2835 c -= a; c -= b; c ^= (b >> 5); \
2836 a -= b; a -= c; a ^= (c >> 3); \
2837 b -= c; b -= a; b ^= (a << 10); \
2838 c -= a; c -= b; c ^= (b >> 15); \
2839 } while (/*CONSTCOND*/0)
2841 static __inline uint32_t
2842 nm_bridge_rthash(const uint8_t *addr)
2844 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key
2854 #define BRIDGE_RTHASH_MASK (NM_BDG_HASH-1)
2855 return (c & BRIDGE_RTHASH_MASK);
2862 bdg_netmap_reg(struct ifnet *ifp, int onoff)
2864 // struct nm_bridge *b = NA(ifp)->na_bdg;
2866 /* the interface is already attached to the bridge,
2867 * so we only need to toggle IFCAP_NETMAP.
2868 * Locking is not necessary (we are already under
2869 * NMA_LOCK, and the port is not in use during this call).
2873 ifp->if_capenable |= IFCAP_NETMAP;
2875 ifp->if_capenable &= ~IFCAP_NETMAP;
2877 /* BDG_WUNLOCK(b); */
2883 * Lookup function for a learning bridge.
2884 * Update the hash table with the source address,
2885 * and then returns the destination port index, and the
2886 * ring in *dst_ring (at the moment, always use ring 0)
2889 netmap_bdg_learning(char *buf, u_int len, uint8_t *dst_ring,
2890 struct netmap_adapter *na)
2892 struct nm_hash_ent *ht = na->na_bdg->ht;
2894 u_int dst, mysrc = na->bdg_port;
2895 uint64_t smac, dmac;
2897 dmac = le64toh(*(uint64_t *)(buf)) & 0xffffffffffff;
2898 smac = le64toh(*(uint64_t *)(buf + 4));
2902 * The hash is somewhat expensive, there might be some
2903 * worthwhile optimizations here.
2905 if ((buf[6] & 1) == 0) { /* valid src */
2907 sh = nm_bridge_rthash(buf+6); // XXX hash of source
2908 /* update source port forwarding entry */
2909 ht[sh].mac = smac; /* XXX expire ? */
2910 ht[sh].ports = mysrc;
2912 D("src %02x:%02x:%02x:%02x:%02x:%02x on port %d",
2913 s[0], s[1], s[2], s[3], s[4], s[5], mysrc);
2915 dst = NM_BDG_BROADCAST;
2916 if ((buf[0] & 1) == 0) { /* unicast */
2917 dh = nm_bridge_rthash(buf); // XXX hash of dst
2918 if (ht[dh].mac == dmac) { /* found dst */
2921 /* XXX otherwise return NM_BDG_UNKNOWN ? */
2929 * This flush routine supports only unicast and broadcast but a large
2930 * number of ports, and lets us replace the learn and dispatch functions.
2933 nm_bdg_flush(struct nm_bdg_fwd *ft, int n, struct netmap_adapter *na,
2936 struct nm_bdg_q *dst_ents, *brddst;
2937 uint16_t num_dsts = 0, *dsts;
2938 struct nm_bridge *b = na->na_bdg;
2939 u_int i, me = na->bdg_port;
2941 dst_ents = (struct nm_bdg_q *)(ft + NM_BDG_BATCH);
2942 dsts = (uint16_t *)(dst_ents + NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1);
2946 /* first pass: find a destination */
2947 for (i = 0; likely(i < n); i++) {
2948 uint8_t *buf = ft[i].ft_buf;
2949 uint8_t dst_ring = ring_nr;
2950 uint16_t dst_port, d_i;
2953 dst_port = b->nm_bdg_lookup(buf, ft[i].ft_len, &dst_ring, na);
2954 if (dst_port == NM_BDG_NOPORT) {
2955 continue; /* this packet is identified to be dropped */
2956 } else if (unlikely(dst_port > NM_BDG_MAXPORTS)) {
2958 } else if (dst_port == NM_BDG_BROADCAST) {
2959 dst_ring = 0; /* broadcasts always go to ring 0 */
2960 } else if (unlikely(dst_port == me ||
2961 !BDG_GET_VAR(b->bdg_ports[dst_port]))) {
2965 /* get a position in the scratch pad */
2966 d_i = dst_port * NM_BDG_MAXRINGS + dst_ring;
2968 if (d->bq_head == NM_BDG_BATCH) { /* new destination */
2969 d->bq_head = d->bq_tail = i;
2970 /* remember this position to be scanned later */
2971 if (dst_port != NM_BDG_BROADCAST)
2972 dsts[num_dsts++] = d_i;
2974 ft[d->bq_tail].ft_next = i;
2979 /* if there is a broadcast, set ring 0 of all ports to be scanned
2980 * XXX This would be optimized by recording the highest index of active
2983 brddst = dst_ents + NM_BDG_BROADCAST * NM_BDG_MAXRINGS;
2984 if (brddst->bq_head != NM_BDG_BATCH) {
2985 for (i = 0; likely(i < NM_BDG_MAXPORTS); i++) {
2986 uint16_t d_i = i * NM_BDG_MAXRINGS;
2987 if (unlikely(i == me) || !BDG_GET_VAR(b->bdg_ports[i]))
2989 else if (dst_ents[d_i].bq_head == NM_BDG_BATCH)
2990 dsts[num_dsts++] = d_i;
2994 /* second pass: scan destinations (XXX will be modular somehow) */
2995 for (i = 0; i < num_dsts; i++) {
2996 struct ifnet *dst_ifp;
2997 struct netmap_adapter *dst_na;
2998 struct netmap_kring *kring;
2999 struct netmap_ring *ring;
3000 u_int dst_nr, is_vp, lim, j, sent = 0, d_i, next, brd_next;
3001 int howmany, retry = netmap_txsync_retry;
3006 dst_na = BDG_GET_VAR(b->bdg_ports[d_i/NM_BDG_MAXRINGS]);
3007 /* protect from the lookup function returning an inactive
3010 if (unlikely(dst_na == NULL))
3012 else if (dst_na->na_flags & NAF_SW_ONLY)
3014 dst_ifp = dst_na->ifp;
3016 * The interface may be in !netmap mode in two cases:
3017 * - when na is attached but not activated yet;
3018 * - when na is being deactivated but is still attached.
3020 if (unlikely(!(dst_ifp->if_capenable & IFCAP_NETMAP)))
3023 /* there is at least one either unicast or broadcast packet */
3024 brd_next = brddst->bq_head;
3027 is_vp = nma_is_vp(dst_na);
3028 dst_nr = d_i & (NM_BDG_MAXRINGS-1);
3029 if (is_vp) { /* virtual port */
3030 if (dst_nr >= dst_na->num_rx_rings)
3031 dst_nr = dst_nr % dst_na->num_rx_rings;
3032 kring = &dst_na->rx_rings[dst_nr];
3034 lim = kring->nkr_num_slots - 1;
3035 dst_na->nm_lock(dst_ifp, NETMAP_RX_LOCK, dst_nr);
3036 j = kring->nr_hwcur + kring->nr_hwavail;
3038 j -= kring->nkr_num_slots;
3039 howmany = lim - kring->nr_hwavail;
3040 } else { /* hw or sw adapter */
3041 if (dst_nr >= dst_na->num_tx_rings)
3042 dst_nr = dst_nr % dst_na->num_tx_rings;
3043 kring = &dst_na->tx_rings[dst_nr];
3045 lim = kring->nkr_num_slots - 1;
3046 dst_na->nm_lock(dst_ifp, NETMAP_TX_LOCK, dst_nr);
3048 dst_na->nm_txsync(dst_ifp, dst_nr, 0);
3049 /* see nm_bdg_flush() */
3050 j = kring->nr_hwcur;
3051 howmany = kring->nr_hwavail;
3053 while (howmany-- > 0) {
3054 struct netmap_slot *slot;
3055 struct nm_bdg_fwd *ft_p;
3057 /* our 'NULL' is always higher than valid indexes
3058 * so we never dereference it if the other list
3059 * has packets (and if both are NULL we never
3062 if (next < brd_next) {
3064 next = ft_p->ft_next;
3065 ND("j %d uni %d next %d %d",
3066 j, ft_p - ft, next, brd_next);
3067 } else { /* insert broadcast */
3068 ft_p = ft + brd_next;
3069 brd_next = ft_p->ft_next;
3070 ND("j %d brd %d next %d %d",
3071 j, ft_p - ft, next, brd_next);
3073 slot = &ring->slot[j];
3074 ND("send %d %d bytes at %s:%d", i, ft_p->ft_len, dst_ifp->if_xname, j);
3075 if (ft_p->ft_flags & NS_INDIRECT) {
3076 ND("copying from INDIRECT source");
3077 copyin(ft_p->ft_buf, NMB(slot),
3078 (ft_p->ft_len + 63) & ~63);
3080 pkt_copy(ft_p->ft_buf, NMB(slot), ft_p->ft_len);
3082 slot->len = ft_p->ft_len;
3083 j = unlikely(j == lim) ? 0: j + 1; /* XXX to be macro-ed */
3086 if (next == NM_BDG_BATCH && brd_next == NM_BDG_BATCH)
3089 if (netmap_verbose && (howmany < 0))
3090 D("rx ring full on %s", dst_ifp->if_xname);
3093 kring->nr_hwavail += sent;
3094 selwakeuppri(&kring->si, PI_NET);
3096 dst_na->nm_lock(dst_ifp, NETMAP_RX_UNLOCK, dst_nr);
3099 ring->avail -= sent;
3101 dst_na->nm_txsync(dst_ifp, dst_nr, 0);
3103 /* retry to send more packets */
3104 if (nma_is_hw(dst_na) && howmany < 0 && retry--)
3106 dst_na->nm_lock(dst_ifp, NETMAP_TX_UNLOCK, dst_nr);
3108 /* NM_BDG_BATCH means 'no packet' */
3109 d->bq_head = d->bq_tail = NM_BDG_BATCH; /* cleanup */
3111 brddst->bq_head = brddst->bq_tail = NM_BDG_BATCH; /* cleanup */
3118 * main dispatch routine
3121 bdg_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
3123 struct netmap_adapter *na = NA(ifp);
3124 struct netmap_kring *kring = &na->tx_rings[ring_nr];
3125 struct netmap_ring *ring = kring->ring;
3126 int i, j, k, lim = kring->nkr_num_slots - 1;
3130 return netmap_ring_reinit(kring);
3132 na->nm_lock(ifp, NETMAP_TX_LOCK, ring_nr);
3134 if (netmap_bridge <= 0) { /* testing only */
3138 if (netmap_bridge > NM_BDG_BATCH)
3139 netmap_bridge = NM_BDG_BATCH;
3141 j = nm_bdg_preflush(na, ring_nr, kring, k);
3144 i += kring->nkr_num_slots;
3145 kring->nr_hwavail = kring->nkr_num_slots - 1 - i;
3147 D("early break at %d/ %d, avail %d", j, k, kring->nr_hwavail);
3150 kring->nr_hwcur = j;
3151 ring->avail = kring->nr_hwavail;
3153 na->nm_lock(ifp, NETMAP_TX_UNLOCK, ring_nr);
3156 D("%s ring %d lock %d", ifp->if_xname, ring_nr, do_lock);
3162 bdg_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
3164 struct netmap_adapter *na = NA(ifp);
3165 struct netmap_kring *kring = &na->rx_rings[ring_nr];
3166 struct netmap_ring *ring = kring->ring;
3167 u_int j, lim = kring->nkr_num_slots - 1;
3168 u_int k = ring->cur, resvd = ring->reserved;
3171 ND("%s ring %d lock %d avail %d",
3172 ifp->if_xname, ring_nr, do_lock, kring->nr_hwavail);
3175 return netmap_ring_reinit(kring);
3177 na->nm_lock(ifp, NETMAP_RX_LOCK, ring_nr);
3179 /* skip past packets that userspace has released */
3180 j = kring->nr_hwcur; /* netmap ring index */
3182 if (resvd + ring->avail >= lim + 1) {
3183 D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
3184 ring->reserved = resvd = 0; // XXX panic...
3186 k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
3189 if (j != k) { /* userspace has released some packets. */
3192 n += kring->nkr_num_slots;
3193 ND("userspace releases %d packets", n);
3194 for (n = 0; likely(j != k); n++) {
3195 struct netmap_slot *slot = &ring->slot[j];
3196 void *addr = NMB(slot);
3198 if (addr == netmap_buffer_base) { /* bad buf */
3200 na->nm_lock(ifp, NETMAP_RX_UNLOCK, ring_nr);
3201 return netmap_ring_reinit(kring);
3203 /* decrease refcount for buffer */
3205 slot->flags &= ~NS_BUF_CHANGED;
3206 j = unlikely(j == lim) ? 0 : j + 1;
3208 kring->nr_hwavail -= n;
3209 kring->nr_hwcur = k;
3211 /* tell userspace that there are new packets */
3212 ring->avail = kring->nr_hwavail - resvd;
3215 na->nm_lock(ifp, NETMAP_RX_UNLOCK, ring_nr);
3221 bdg_netmap_attach(struct netmap_adapter *arg)
3223 struct netmap_adapter na;
3225 ND("attaching virtual bridge");
3226 bzero(&na, sizeof(na));
3229 na.separate_locks = 1;
3230 na.num_tx_rings = arg->num_tx_rings;
3231 na.num_rx_rings = arg->num_rx_rings;
3232 na.num_tx_desc = NM_BRIDGE_RINGSIZE;
3233 na.num_rx_desc = NM_BRIDGE_RINGSIZE;
3234 na.nm_txsync = bdg_netmap_txsync;
3235 na.nm_rxsync = bdg_netmap_rxsync;
3236 na.nm_register = bdg_netmap_reg;
3237 netmap_attach(&na, na.num_tx_rings);
3240 #endif /* NM_BRIDGE */
3242 static struct cdev *netmap_dev; /* /dev/netmap character device. */
3248 * Create the /dev/netmap device and initialize all global
3251 * Return 0 on success, errno on failure.
3258 error = netmap_memory_init();
3260 printf("netmap: unable to initialize the memory allocator.\n");
3263 printf("netmap: loaded module\n");
3264 netmap_dev = make_dev(&netmap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660,
3270 mtx_init(&netmap_bridge_mutex, "netmap_bridge_mutex",
3271 MTX_NETWORK_LOCK, MTX_DEF);
3272 bzero(nm_bridges, sizeof(struct nm_bridge) * NM_BRIDGES); /* safety */
3273 for (i = 0; i < NM_BRIDGES; i++)
3274 rw_init(&nm_bridges[i].bdg_lock, "bdg lock");
3284 * Free all the memory, and destroy the ``/dev/netmap`` device.
3289 destroy_dev(netmap_dev);
3290 netmap_memory_fini();
3291 printf("netmap: unloaded module.\n");
3297 * Kernel entry point.
3299 * Initialize/finalize the module and return.
3301 * Return 0 on success, errno on failure.
3304 netmap_loader(__unused struct module *module, int event, __unused void *arg)
3310 error = netmap_init();
3326 DEV_MODULE(netmap, netmap_loader, NULL);
3327 #endif /* __FreeBSD__ */