2 * Copyright (C) 2011-2013 Matteo Landi, Luigi Rizzo. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * This module supports memory mapped access to network devices,
32 * The module uses a large, memory pool allocated by the kernel
33 * and accessible as mmapped memory by multiple userspace threads/processes.
34 * The memory pool contains packet buffers and "netmap rings",
35 * i.e. user-accessible copies of the interface's queues.
37 * Access to the network card works like this:
38 * 1. a process/thread issues one or more open() on /dev/netmap, to create
39 * select()able file descriptor on which events are reported.
40 * 2. on each descriptor, the process issues an ioctl() to identify
41 * the interface that should report events to the file descriptor.
42 * 3. on each descriptor, the process issues an mmap() request to
43 * map the shared memory region within the process' address space.
44 * The list of interesting queues is indicated by a location in
45 * the shared memory region.
46 * 4. using the functions in the netmap(4) userspace API, a process
47 * can look up the occupation state of a queue, access memory buffers,
48 * and retrieve received packets or enqueue packets to transmit.
49 * 5. using some ioctl()s the process can synchronize the userspace view
50 * of the queue with the actual status in the kernel. This includes both
51 * receiving the notification of new packets, and transmitting new
52 * packets on the output interface.
53 * 6. select() or poll() can be used to wait for events on individual
54 * transmit or receive queues (or all queues for a given interface).
59 static netdev_tx_t linux_netmap_start(struct sk_buff *skb, struct net_device *dev);
64 #endif /* __APPLE__ */
67 #include <sys/cdefs.h> /* prerequisite */
68 __FBSDID("$FreeBSD$");
70 #include <sys/types.h>
71 #include <sys/module.h>
72 #include <sys/errno.h>
73 #include <sys/param.h> /* defines used in kernel.h */
75 #include <sys/kernel.h> /* types used in module initialization */
76 #include <sys/conf.h> /* cdevsw struct */
77 #include <sys/uio.h> /* uio struct */
78 #include <sys/sockio.h>
79 #include <sys/socketvar.h> /* struct socket */
80 #include <sys/malloc.h>
81 #include <sys/mman.h> /* PROT_EXEC */
84 #include <sys/rwlock.h>
85 #include <vm/vm.h> /* vtophys */
86 #include <vm/pmap.h> /* vtophys */
87 #include <sys/socket.h> /* sockaddrs */
88 #include <machine/bus.h>
89 #include <sys/selinfo.h>
90 #include <sys/sysctl.h>
92 #include <net/bpf.h> /* BIOCIMMEDIATE */
94 #include <machine/bus.h> /* bus_dmamap_* */
96 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map");
97 #endif /* __FreeBSD__ */
99 #include <net/netmap.h>
100 #include <dev/netmap/netmap_kern.h>
102 /* XXX the following variables must be deprecated and included in nm_mem */
103 u_int netmap_total_buffers;
104 u_int netmap_buf_size;
105 char *netmap_buffer_base; /* address of an invalid buffer */
107 /* user-controlled variables */
110 static int netmap_no_timestamp; /* don't timestamp on rxsync */
112 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args");
113 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
114 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
115 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
116 CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
117 int netmap_mitigate = 1;
118 SYSCTL_INT(_dev_netmap, OID_AUTO, mitigate, CTLFLAG_RW, &netmap_mitigate, 0, "");
119 int netmap_no_pendintr = 1;
120 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr,
121 CTLFLAG_RW, &netmap_no_pendintr, 0, "Always look for new received packets.");
123 int netmap_drop = 0; /* debugging */
124 int netmap_flags = 0; /* debug flags */
125 int netmap_fwd = 0; /* force transparent mode */
127 SYSCTL_INT(_dev_netmap, OID_AUTO, drop, CTLFLAG_RW, &netmap_drop, 0 , "");
128 SYSCTL_INT(_dev_netmap, OID_AUTO, flags, CTLFLAG_RW, &netmap_flags, 0 , "");
129 SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0 , "");
131 #ifdef NM_BRIDGE /* support for netmap bridge */
136 * All switched ports have prefix NM_NAME.
137 * The switch has a max of NM_BDG_MAXPORTS ports (often stored in a bitmap,
138 * so a practical upper bound is 64).
139 * Each tx ring is read-write, whereas rx rings are readonly (XXX not done yet).
140 * The virtual interfaces use per-queue lock instead of core lock.
141 * In the tx loop, we aggregate traffic in batches to make all operations
142 * faster. The batch size is NM_BDG_BATCH
144 #define NM_NAME "vale" /* prefix for the interface */
145 #define NM_BDG_MAXPORTS 16 /* up to 64 ? */
146 #define NM_BRIDGE_RINGSIZE 1024 /* in the device */
147 #define NM_BDG_HASH 1024 /* forwarding table entries */
148 #define NM_BDG_BATCH 1024 /* entries in the forwarding buffer */
149 #define NM_BRIDGES 4 /* number of bridges */
152 int netmap_bridge = NM_BDG_BATCH; /* bridge batch size */
153 SYSCTL_INT(_dev_netmap, OID_AUTO, bridge, CTLFLAG_RW, &netmap_bridge, 0 , "");
157 #define refcount_acquire(_a) atomic_add(1, (atomic_t *)_a)
158 #define refcount_release(_a) atomic_dec_and_test((atomic_t *)_a)
163 #include <sys/endian.h>
164 #include <sys/refcount.h>
165 #endif /* __FreeBSD__ */
167 #define prefetch(x) __builtin_prefetch(x)
172 * These are used to handle reference counters for bridge ports.
174 #define ADD_BDG_REF(ifp) refcount_acquire(&NA(ifp)->na_bdg_refcount)
175 #define DROP_BDG_REF(ifp) refcount_release(&NA(ifp)->na_bdg_refcount)
177 static void bdg_netmap_attach(struct ifnet *ifp);
178 static int bdg_netmap_reg(struct ifnet *ifp, int onoff);
179 /* per-tx-queue entry */
180 struct nm_bdg_fwd { /* forwarding entry for a bridge */
182 uint64_t dst; /* dst mask */
183 uint32_t src; /* src index ? */
184 uint16_t len; /* src len */
188 uint64_t mac; /* the top 2 bytes are the epoch */
193 * Interfaces for a bridge are all in bdg_ports[].
194 * The array has fixed size, an empty entry does not terminate
195 * the search. But lookups only occur on attach/detach so we
196 * don't mind if they are slow.
198 * The bridge is non blocking on the transmit ports.
200 * bdg_lock protects accesses to the bdg_ports array.
203 struct ifnet *bdg_ports[NM_BDG_MAXPORTS];
206 int freelist; /* first buffer index */
207 NM_SELINFO_T si; /* poll/select wait queue */
208 NM_LOCK_T bdg_lock; /* protect the selinfo ? */
210 /* the forwarding table, MAC+ports */
211 struct nm_hash_ent ht[NM_BDG_HASH];
213 int namelen; /* 0 means free */
214 char basename[IFNAMSIZ];
217 struct nm_bridge nm_bridges[NM_BRIDGES];
219 #define BDG_LOCK(b) mtx_lock(&(b)->bdg_lock)
220 #define BDG_UNLOCK(b) mtx_unlock(&(b)->bdg_lock)
223 * NA(ifp)->bdg_port port index
226 // XXX only for multiples of 64 bytes, non overlapped.
228 pkt_copy(void *_src, void *_dst, int l)
230 uint64_t *src = _src;
231 uint64_t *dst = _dst;
232 if (unlikely(l >= 1024)) {
236 for (; likely(l > 0); l-=64) {
249 * locate a bridge among the existing ones.
250 * a ':' in the name terminates the bridge name. Otherwise, just NM_NAME.
251 * We assume that this is called with a name of at least NM_NAME chars.
253 static struct nm_bridge *
254 nm_find_bridge(const char *name)
256 int i, l, namelen, e;
257 struct nm_bridge *b = NULL;
259 namelen = strlen(NM_NAME); /* base length */
260 l = strlen(name); /* actual length */
261 for (i = namelen + 1; i < l; i++) {
262 if (name[i] == ':') {
267 if (namelen >= IFNAMSIZ)
269 ND("--- prefix is '%.*s' ---", namelen, name);
271 /* use the first entry for locking */
272 BDG_LOCK(nm_bridges); // XXX do better
273 for (e = -1, i = 1; i < NM_BRIDGES; i++) {
276 e = i; /* record empty slot */
277 else if (strncmp(name, b->basename, namelen) == 0) {
278 ND("found '%.*s' at %d", namelen, name, i);
282 if (i == NM_BRIDGES) { /* all full */
283 if (e == -1) { /* no empty slot */
287 strncpy(b->basename, name, namelen);
288 b->namelen = namelen;
291 BDG_UNLOCK(nm_bridges);
294 #endif /* NM_BRIDGE */
298 * Fetch configuration from the device, to cope with dynamic
299 * reconfigurations after loading the module.
302 netmap_update_config(struct netmap_adapter *na)
304 struct ifnet *ifp = na->ifp;
305 u_int txr, txd, rxr, rxd;
307 txr = txd = rxr = rxd = 0;
309 na->nm_config(ifp, &txr, &txd, &rxr, &rxd);
311 /* take whatever we had at init time */
312 txr = na->num_tx_rings;
313 txd = na->num_tx_desc;
314 rxr = na->num_rx_rings;
315 rxd = na->num_rx_desc;
318 if (na->num_tx_rings == txr && na->num_tx_desc == txd &&
319 na->num_rx_rings == rxr && na->num_rx_desc == rxd)
320 return 0; /* nothing changed */
321 if (netmap_verbose || na->refcount > 0) {
322 D("stored config %s: txring %d x %d, rxring %d x %d",
324 na->num_tx_rings, na->num_tx_desc,
325 na->num_rx_rings, na->num_rx_desc);
326 D("new config %s: txring %d x %d, rxring %d x %d",
327 ifp->if_xname, txr, txd, rxr, rxd);
329 if (na->refcount == 0) {
330 D("configuration changed (but fine)");
331 na->num_tx_rings = txr;
332 na->num_tx_desc = txd;
333 na->num_rx_rings = rxr;
334 na->num_rx_desc = rxd;
337 D("configuration changed while active, this is bad...");
341 /*------------- memory allocator -----------------*/
342 #include "netmap_mem2.c"
343 /*------------ end of memory allocator ----------*/
346 /* Structure associated to each thread which registered an interface.
348 * The first 4 fields of this structure are written by NIOCREGIF and
349 * read by poll() and NIOC?XSYNC.
350 * There is low contention among writers (actually, a correct user program
351 * should have no contention among writers) and among writers and readers,
352 * so we use a single global lock to protect the structure initialization.
353 * Since initialization involves the allocation of memory, we reuse the memory
355 * Read access to the structure is lock free. Readers must check that
356 * np_nifp is not NULL before using the other fields.
357 * If np_nifp is NULL initialization has not been performed, so they should
358 * return an error to userlevel.
360 * The ref_done field is used to regulate access to the refcount in the
361 * memory allocator. The refcount must be incremented at most once for
362 * each open("/dev/netmap"). The increment is performed by the first
363 * function that calls netmap_get_memory() (currently called by
364 * mmap(), NIOCGINFO and NIOCREGIF).
365 * If the refcount is incremented, it is then decremented when the
366 * private structure is destroyed.
368 struct netmap_priv_d {
369 struct netmap_if * volatile np_nifp; /* netmap interface descriptor. */
371 struct ifnet *np_ifp; /* device for which we hold a reference */
372 int np_ringid; /* from the ioctl */
373 u_int np_qfirst, np_qlast; /* range of rings to scan */
376 unsigned long ref_done; /* use with NMA_LOCK held */
381 netmap_get_memory(struct netmap_priv_d* p)
386 error = netmap_memory_finalize();
395 * File descriptor's private data destructor.
397 * Call nm_register(ifp,0) to stop netmap mode on the interface and
398 * revert to normal operation. We expect that np_ifp has not gone.
400 /* call with NMA_LOCK held */
402 netmap_dtor_locked(void *data)
404 struct netmap_priv_d *priv = data;
405 struct ifnet *ifp = priv->np_ifp;
406 struct netmap_adapter *na = NA(ifp);
407 struct netmap_if *nifp = priv->np_nifp;
410 if (na->refcount <= 0) { /* last instance */
414 D("deleting last instance for %s", ifp->if_xname);
416 * there is a race here with *_netmap_task() and
417 * netmap_poll(), which don't run under NETMAP_REG_LOCK.
418 * na->refcount == 0 && na->ifp->if_capenable & IFCAP_NETMAP
419 * (aka NETMAP_DELETING(na)) are a unique marker that the
421 * Before destroying stuff we sleep a bit, and then complete
422 * the job. NIOCREG should realize the condition and
423 * loop until they can continue; the other routines
424 * should check the condition at entry and quit if
427 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
428 tsleep(na, 0, "NIOCUNREG", 4);
429 na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
430 na->nm_register(ifp, 0); /* off, clear IFCAP_NETMAP */
431 /* Wake up any sleeping threads. netmap_poll will
432 * then return POLLERR
434 for (i = 0; i < na->num_tx_rings + 1; i++)
435 selwakeuppri(&na->tx_rings[i].si, PI_NET);
436 for (i = 0; i < na->num_rx_rings + 1; i++)
437 selwakeuppri(&na->rx_rings[i].si, PI_NET);
438 selwakeuppri(&na->tx_si, PI_NET);
439 selwakeuppri(&na->rx_si, PI_NET);
440 /* release all buffers */
441 for (i = 0; i < na->num_tx_rings + 1; i++) {
442 struct netmap_ring *ring = na->tx_rings[i].ring;
443 lim = na->tx_rings[i].nkr_num_slots;
444 for (j = 0; j < lim; j++)
445 netmap_free_buf(nifp, ring->slot[j].buf_idx);
446 /* knlist_destroy(&na->tx_rings[i].si.si_note); */
447 mtx_destroy(&na->tx_rings[i].q_lock);
449 for (i = 0; i < na->num_rx_rings + 1; i++) {
450 struct netmap_ring *ring = na->rx_rings[i].ring;
451 lim = na->rx_rings[i].nkr_num_slots;
452 for (j = 0; j < lim; j++)
453 netmap_free_buf(nifp, ring->slot[j].buf_idx);
454 /* knlist_destroy(&na->rx_rings[i].si.si_note); */
455 mtx_destroy(&na->rx_rings[i].q_lock);
457 /* XXX kqueue(9) needed; these will mirror knlist_init. */
458 /* knlist_destroy(&na->tx_si.si_note); */
459 /* knlist_destroy(&na->rx_si.si_note); */
460 netmap_free_rings(na);
463 netmap_if_free(nifp);
467 nm_if_rele(struct ifnet *ifp)
471 #else /* NM_BRIDGE */
475 if (strncmp(ifp->if_xname, NM_NAME, sizeof(NM_NAME) - 1)) {
479 if (!DROP_BDG_REF(ifp))
482 BDG_LOCK(nm_bridges);
484 ND("want to disconnect %s from the bridge", ifp->if_xname);
486 for (i = 0; i < NM_BDG_MAXPORTS; i++) {
487 if (b->bdg_ports[i] == ifp) {
488 b->bdg_ports[i] = NULL;
489 bzero(ifp, sizeof(*ifp));
493 else if (b->bdg_ports[i] != NULL)
498 ND("freeing bridge %d", b - nm_bridges);
501 BDG_UNLOCK(nm_bridges);
502 if (i == NM_BDG_MAXPORTS)
503 D("ouch, cannot find ifp to remove");
504 #endif /* NM_BRIDGE */
508 netmap_dtor(void *data)
510 struct netmap_priv_d *priv = data;
511 struct ifnet *ifp = priv->np_ifp;
515 struct netmap_adapter *na = NA(ifp);
517 na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
518 netmap_dtor_locked(data);
519 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
521 nm_if_rele(ifp); /* might also destroy *na */
523 if (priv->ref_done) {
524 netmap_memory_deref();
527 bzero(priv, sizeof(*priv)); /* XXX for safety */
528 free(priv, M_DEVBUF);
533 #include <vm/vm_param.h>
534 #include <vm/vm_object.h>
535 #include <vm/vm_page.h>
536 #include <vm/vm_pager.h>
539 static struct cdev_pager_ops saved_cdev_pager_ops;
542 netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
543 vm_ooffset_t foff, struct ucred *cred, u_short *color)
546 D("first mmap for %p", handle);
547 return saved_cdev_pager_ops.cdev_pg_ctor(handle,
548 size, prot, foff, cred, color);
552 netmap_dev_pager_dtor(void *handle)
554 saved_cdev_pager_ops.cdev_pg_dtor(handle);
555 ND("ready to release memory for %p", handle);
559 static struct cdev_pager_ops netmap_cdev_pager_ops = {
560 .cdev_pg_ctor = netmap_dev_pager_ctor,
561 .cdev_pg_dtor = netmap_dev_pager_dtor,
562 .cdev_pg_fault = NULL,
566 netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff,
567 vm_size_t objsize, vm_object_t *objp, int prot)
571 ND("cdev %p foff %jd size %jd objp %p prot %d", cdev,
572 (intmax_t )*foff, (intmax_t )objsize, objp, prot);
573 obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
574 curthread->td_ucred);
575 ND("returns obj %p", obj);
578 if (saved_cdev_pager_ops.cdev_pg_fault == NULL) {
579 ND("initialize cdev_pager_ops");
580 saved_cdev_pager_ops = *(obj->un_pager.devp.ops);
581 netmap_cdev_pager_ops.cdev_pg_fault =
582 saved_cdev_pager_ops.cdev_pg_fault;
584 obj->un_pager.devp.ops = &netmap_cdev_pager_ops;
588 #endif /* __FreeBSD__ */
592 * mmap(2) support for the "netmap" device.
594 * Expose all the memory previously allocated by our custom memory
595 * allocator: this way the user has only to issue a single mmap(2), and
596 * can work on all the data structures flawlessly.
598 * Return 0 on success, -1 otherwise.
603 netmap_mmap(__unused struct cdev *dev,
604 #if __FreeBSD_version < 900000
605 vm_offset_t offset, vm_paddr_t *paddr, int nprot
607 vm_ooffset_t offset, vm_paddr_t *paddr, int nprot,
608 __unused vm_memattr_t *memattr
613 struct netmap_priv_d *priv;
615 if (nprot & PROT_EXEC)
616 return (-1); // XXX -1 or EINVAL ?
618 error = devfs_get_cdevpriv((void **)&priv);
619 if (error == EBADF) { /* called on fault, memory is initialized */
620 ND(5, "handling fault at ofs 0x%x", offset);
622 } else if (error == 0) /* make sure memory is set */
623 error = netmap_get_memory(priv);
627 ND("request for offset 0x%x", (uint32_t)offset);
628 *paddr = netmap_ofstophys(offset);
630 return (*paddr ? 0 : ENOMEM);
634 netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
637 D("dev %p fflag 0x%x devtype %d td %p",
638 dev, fflag, devtype, td);
643 netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
645 struct netmap_priv_d *priv;
648 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF,
653 error = devfs_set_cdevpriv(priv, netmap_dtor);
659 #endif /* __FreeBSD__ */
663 * Handlers for synchronization of the queues from/to the host.
664 * Netmap has two operating modes:
665 * - in the default mode, the rings connected to the host stack are
666 * just another ring pair managed by userspace;
667 * - in transparent mode (XXX to be defined) incoming packets
668 * (from the host or the NIC) are marked as NS_FORWARD upon
669 * arrival, and the user application has a chance to reset the
670 * flag for packets that should be dropped.
671 * On the RXSYNC or poll(), packets in RX rings between
672 * kring->nr_kcur and ring->cur with NS_FORWARD still set are moved
674 * The transfer NIC --> host is relatively easy, just encapsulate
675 * into mbufs and we are done. The host --> NIC side is slightly
676 * harder because there might not be room in the tx ring so it
677 * might take a while before releasing the buffer.
681 * pass a chain of buffers to the host stack as coming from 'dst'
684 netmap_send_up(struct ifnet *dst, struct mbuf *head)
688 /* send packets up, outside the lock */
689 while ((m = head) != NULL) {
690 head = head->m_nextpkt;
692 if (netmap_verbose & NM_VERB_HOST)
693 D("sending up pkt %p size %d", m, MBUF_LEN(m));
705 * put a copy of the buffers marked NS_FORWARD into an mbuf chain.
706 * Run from hwcur to cur - reserved
709 netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force)
711 /* Take packets from hwcur to cur-reserved and pass them up.
712 * In case of no buffers we give up. At the end of the loop,
713 * the queue is drained in all cases.
714 * XXX handle reserved
716 int k = kring->ring->cur - kring->ring->reserved;
717 u_int n, lim = kring->nkr_num_slots - 1;
718 struct mbuf *m, *tail = q->tail;
721 k = k + kring->nkr_num_slots;
722 for (n = kring->nr_hwcur; n != k;) {
723 struct netmap_slot *slot = &kring->ring->slot[n];
725 n = (n == lim) ? 0 : n + 1;
726 if ((slot->flags & NS_FORWARD) == 0 && !force)
728 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE) {
729 D("bad pkt at %d len %d", n, slot->len);
732 slot->flags &= ~NS_FORWARD; // XXX needed ?
733 m = m_devget(NMB(slot), slot->len, 0, kring->na->ifp, NULL);
749 * called under main lock to send packets from the host to the NIC
750 * The host ring has packets from nr_hwcur to (cur - reserved)
751 * to be sent down. We scan the tx rings, which have just been
752 * flushed so nr_hwcur == cur. Pushing packets down means
753 * increment cur and decrement avail.
757 netmap_sw_to_nic(struct netmap_adapter *na)
759 struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
760 struct netmap_kring *k1 = &na->tx_rings[0];
761 int i, howmany, src_lim, dst_lim;
763 howmany = kring->nr_hwavail; /* XXX otherwise cur - reserved - nr_hwcur */
765 src_lim = kring->nkr_num_slots;
766 for (i = 0; howmany > 0 && i < na->num_tx_rings; i++, k1++) {
767 ND("%d packets left to ring %d (space %d)", howmany, i, k1->nr_hwavail);
768 dst_lim = k1->nkr_num_slots;
769 while (howmany > 0 && k1->ring->avail > 0) {
770 struct netmap_slot *src, *dst, tmp;
771 src = &kring->ring->slot[kring->nr_hwcur];
772 dst = &k1->ring->slot[k1->ring->cur];
774 src->buf_idx = dst->buf_idx;
775 src->flags = NS_BUF_CHANGED;
777 dst->buf_idx = tmp.buf_idx;
779 dst->flags = NS_BUF_CHANGED;
780 ND("out len %d buf %d from %d to %d",
781 dst->len, dst->buf_idx,
782 kring->nr_hwcur, k1->ring->cur);
784 if (++kring->nr_hwcur >= src_lim)
788 if (++k1->ring->cur >= dst_lim)
792 kring->ring->cur = kring->nr_hwcur; // XXX
798 * netmap_sync_to_host() passes packets up. We are called from a
799 * system call in user process context, and the only contention
800 * can be among multiple user threads erroneously calling
801 * this routine concurrently.
804 netmap_sync_to_host(struct netmap_adapter *na)
806 struct netmap_kring *kring = &na->tx_rings[na->num_tx_rings];
807 struct netmap_ring *ring = kring->ring;
808 u_int k, lim = kring->nkr_num_slots - 1;
809 struct mbq q = { NULL, NULL };
813 netmap_ring_reinit(kring);
816 // na->nm_lock(na->ifp, NETMAP_CORE_LOCK, 0);
818 /* Take packets from hwcur to cur and pass them up.
819 * In case of no buffers we give up. At the end of the loop,
820 * the queue is drained in all cases.
822 netmap_grab_packets(kring, &q, 1);
824 kring->nr_hwavail = ring->avail = lim;
825 // na->nm_lock(na->ifp, NETMAP_CORE_UNLOCK, 0);
827 netmap_send_up(na->ifp, q.head);
831 * rxsync backend for packets coming from the host stack.
832 * They have been put in the queue by netmap_start() so we
833 * need to protect access to the kring using a lock.
835 * This routine also does the selrecord if called from the poll handler
836 * (we know because td != NULL).
838 * NOTE: on linux, selrecord() is defined as a macro and uses pwait
839 * as an additional hidden argument.
842 netmap_sync_from_host(struct netmap_adapter *na, struct thread *td, void *pwait)
844 struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
845 struct netmap_ring *ring = kring->ring;
846 u_int j, n, lim = kring->nkr_num_slots;
847 u_int k = ring->cur, resvd = ring->reserved;
849 (void)pwait; /* disable unused warnings */
850 na->nm_lock(na->ifp, NETMAP_CORE_LOCK, 0);
852 netmap_ring_reinit(kring);
855 /* new packets are already set in nr_hwavail */
856 /* skip past packets that userspace has released */
859 if (resvd + ring->avail >= lim + 1) {
860 D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
861 ring->reserved = resvd = 0; // XXX panic...
863 k = (k >= resvd) ? k - resvd : k + lim - resvd;
866 n = k >= j ? k - j : k + lim - j;
867 kring->nr_hwavail -= n;
870 k = ring->avail = kring->nr_hwavail - resvd;
872 selrecord(td, &kring->si);
873 if (k && (netmap_verbose & NM_VERB_HOST))
874 D("%d pkts from stack", k);
875 na->nm_lock(na->ifp, NETMAP_CORE_UNLOCK, 0);
880 * get a refcounted reference to an interface.
881 * Return ENXIO if the interface does not exist, EINVAL if netmap
882 * is not supported by the interface.
883 * If successful, hold a reference.
886 get_ifp(const char *name, struct ifnet **ifp)
889 struct ifnet *iter = NULL;
895 if (strncmp(name, NM_NAME, sizeof(NM_NAME) - 1))
897 b = nm_find_bridge(name);
899 D("no bridges available for '%s'", name);
904 /* lookup in the local list of ports */
905 for (i = 0; i < NM_BDG_MAXPORTS; i++) {
906 iter = b->bdg_ports[i];
909 cand = i; /* potential insert point */
912 if (!strcmp(iter->if_xname, name)) {
914 ND("found existing interface");
919 if (i < NM_BDG_MAXPORTS) /* already unlocked */
922 D("bridge full, cannot create new port");
928 ND("create new bridge port %s", name);
929 /* space for forwarding list after the ifnet */
931 sizeof(struct nm_bdg_fwd)*NM_BDG_BATCH ;
932 iter = malloc(l, M_DEVBUF, M_NOWAIT | M_ZERO);
935 strcpy(iter->if_xname, name);
936 bdg_netmap_attach(iter);
937 b->bdg_ports[cand] = iter;
941 ND("attaching virtual bridge %p", b);
945 #endif /* NM_BRIDGE */
946 *ifp = ifunit_ref(name);
949 /* can do this if the capability exists and if_pspare[0]
950 * points to the netmap descriptor.
952 if (NETMAP_CAPABLE(*ifp))
953 return 0; /* valid pointer, we hold the refcount */
955 return EINVAL; // not NETMAP capable
960 * Error routine called when txsync/rxsync detects an error.
961 * Can't do much more than resetting cur = hwcur, avail = hwavail.
962 * Return 1 on reinit.
964 * This routine is only called by the upper half of the kernel.
965 * It only reads hwcur (which is changed only by the upper half, too)
966 * and hwavail (which may be changed by the lower half, but only on
967 * a tx ring and only to increase it, so any error will be recovered
968 * on the next call). For the above, we don't strictly need to call
972 netmap_ring_reinit(struct netmap_kring *kring)
974 struct netmap_ring *ring = kring->ring;
975 u_int i, lim = kring->nkr_num_slots - 1;
978 RD(10, "called for %s", kring->na->ifp->if_xname);
981 for (i = 0; i <= lim; i++) {
982 u_int idx = ring->slot[i].buf_idx;
983 u_int len = ring->slot[i].len;
984 if (idx < 2 || idx >= netmap_total_buffers) {
986 D("bad buffer at slot %d idx %d len %d ", i, idx, len);
987 ring->slot[i].buf_idx = 0;
988 ring->slot[i].len = 0;
989 } else if (len > NETMAP_BUF_SIZE) {
990 ring->slot[i].len = 0;
992 D("bad len %d at slot %d idx %d",
997 int pos = kring - kring->na->tx_rings;
998 int n = kring->na->num_tx_rings + 1;
1000 RD(10, "total %d errors", errors);
1002 RD(10, "%s %s[%d] reinit, cur %d -> %d avail %d -> %d",
1003 kring->na->ifp->if_xname,
1004 pos < n ? "TX" : "RX", pos < n ? pos : pos - n,
1005 ring->cur, kring->nr_hwcur,
1006 ring->avail, kring->nr_hwavail);
1007 ring->cur = kring->nr_hwcur;
1008 ring->avail = kring->nr_hwavail;
1010 return (errors ? 1 : 0);
1015 * Set the ring ID. For devices with a single queue, a request
1016 * for all rings is the same as a single ring.
1019 netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid)
1021 struct ifnet *ifp = priv->np_ifp;
1022 struct netmap_adapter *na = NA(ifp);
1023 u_int i = ringid & NETMAP_RING_MASK;
1024 /* initially (np_qfirst == np_qlast) we don't want to lock */
1025 int need_lock = (priv->np_qfirst != priv->np_qlast);
1026 int lim = na->num_rx_rings;
1028 if (na->num_tx_rings > lim)
1029 lim = na->num_tx_rings;
1030 if ( (ringid & NETMAP_HW_RING) && i >= lim) {
1031 D("invalid ring id %d", i);
1035 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
1036 priv->np_ringid = ringid;
1037 if (ringid & NETMAP_SW_RING) {
1038 priv->np_qfirst = NETMAP_SW_RING;
1040 } else if (ringid & NETMAP_HW_RING) {
1041 priv->np_qfirst = i;
1042 priv->np_qlast = i + 1;
1044 priv->np_qfirst = 0;
1045 priv->np_qlast = NETMAP_HW_RING ;
1047 priv->np_txpoll = (ringid & NETMAP_NO_TX_POLL) ? 0 : 1;
1049 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
1050 if (netmap_verbose) {
1051 if (ringid & NETMAP_SW_RING)
1052 D("ringid %s set to SW RING", ifp->if_xname);
1053 else if (ringid & NETMAP_HW_RING)
1054 D("ringid %s set to HW RING %d", ifp->if_xname,
1057 D("ringid %s set to all %d HW RINGS", ifp->if_xname, lim);
1063 * ioctl(2) support for the "netmap" device.
1065 * Following a list of accepted commands:
1067 * - SIOCGIFADDR just for convenience
1073 * Return 0 on success, errno otherwise.
1076 netmap_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
1077 int fflag, struct thread *td)
1079 struct netmap_priv_d *priv = NULL;
1081 struct nmreq *nmr = (struct nmreq *) data;
1082 struct netmap_adapter *na;
1085 struct netmap_if *nifp;
1087 (void)dev; /* UNUSED */
1088 (void)fflag; /* UNUSED */
1090 #define devfs_get_cdevpriv(pp) \
1091 ({ *(struct netmap_priv_d **)pp = ((struct file *)td)->private_data; \
1092 (*pp ? 0 : ENOENT); })
1094 /* devfs_set_cdevpriv cannot fail on linux */
1095 #define devfs_set_cdevpriv(p, fn) \
1096 ({ ((struct file *)td)->private_data = p; (p ? 0 : EINVAL); })
1099 #define devfs_clear_cdevpriv() do { \
1100 netmap_dtor(priv); ((struct file *)td)->private_data = 0; \
1104 CURVNET_SET(TD_TO_VNET(td));
1106 error = devfs_get_cdevpriv((void **)&priv);
1109 /* XXX ENOENT should be impossible, since the priv
1110 * is now created in the open */
1111 return (error == ENOENT ? ENXIO : error);
1114 nmr->nr_name[sizeof(nmr->nr_name) - 1] = '\0'; /* truncate name */
1116 case NIOCGINFO: /* return capabilities etc */
1117 if (nmr->nr_version != NETMAP_API) {
1118 D("API mismatch got %d have %d",
1119 nmr->nr_version, NETMAP_API);
1120 nmr->nr_version = NETMAP_API;
1124 /* update configuration */
1125 error = netmap_get_memory(priv);
1126 ND("get_memory returned %d", error);
1129 /* memsize is always valid */
1130 nmr->nr_memsize = nm_mem.nm_totalsize;
1132 nmr->nr_rx_rings = nmr->nr_tx_rings = 0;
1133 nmr->nr_rx_slots = nmr->nr_tx_slots = 0;
1134 if (nmr->nr_name[0] == '\0') /* just get memory info */
1136 error = get_ifp(nmr->nr_name, &ifp); /* get a refcount */
1139 na = NA(ifp); /* retrieve netmap_adapter */
1140 netmap_update_config(na);
1141 nmr->nr_rx_rings = na->num_rx_rings;
1142 nmr->nr_tx_rings = na->num_tx_rings;
1143 nmr->nr_rx_slots = na->num_rx_desc;
1144 nmr->nr_tx_slots = na->num_tx_desc;
1145 nm_if_rele(ifp); /* return the refcount */
1149 if (nmr->nr_version != NETMAP_API) {
1150 nmr->nr_version = NETMAP_API;
1154 /* ensure allocators are ready */
1155 error = netmap_get_memory(priv);
1156 ND("get_memory returned %d", error);
1160 /* protect access to priv from concurrent NIOCREGIF */
1162 if (priv->np_ifp != NULL) { /* thread already registered */
1163 error = netmap_set_ringid(priv, nmr->nr_ringid);
1167 /* find the interface and a reference */
1168 error = get_ifp(nmr->nr_name, &ifp); /* keep reference */
1173 na = NA(ifp); /* retrieve netmap adapter */
1175 for (i = 10; i > 0; i--) {
1176 na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
1177 if (!NETMAP_DELETING(na))
1179 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
1180 tsleep(na, 0, "NIOCREGIF", hz/10);
1183 D("too many NIOCREGIF attempts, give up");
1185 nm_if_rele(ifp); /* return the refcount */
1190 /* ring configuration may have changed, fetch from the card */
1191 netmap_update_config(na);
1192 priv->np_ifp = ifp; /* store the reference */
1193 error = netmap_set_ringid(priv, nmr->nr_ringid);
1196 nifp = netmap_if_new(nmr->nr_name, na);
1197 if (nifp == NULL) { /* allocation failed */
1199 } else if (ifp->if_capenable & IFCAP_NETMAP) {
1200 /* was already set */
1202 /* Otherwise set the card in netmap mode
1203 * and make it use the shared buffers.
1205 for (i = 0 ; i < na->num_tx_rings + 1; i++)
1206 mtx_init(&na->tx_rings[i].q_lock, "nm_txq_lock", MTX_NETWORK_LOCK, MTX_DEF);
1207 for (i = 0 ; i < na->num_rx_rings + 1; i++) {
1208 mtx_init(&na->rx_rings[i].q_lock, "nm_rxq_lock", MTX_NETWORK_LOCK, MTX_DEF);
1210 error = na->nm_register(ifp, 1); /* mode on */
1212 netmap_dtor_locked(priv);
1213 netmap_if_free(nifp);
1217 if (error) { /* reg. failed, release priv and ref */
1219 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
1220 nm_if_rele(ifp); /* return the refcount */
1221 priv->np_ifp = NULL;
1222 priv->np_nifp = NULL;
1227 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
1229 /* the following assignment is a commitment.
1230 * Readers (i.e., poll and *SYNC) check for
1231 * np_nifp != NULL without locking
1233 wmb(); /* make sure previous writes are visible to all CPUs */
1234 priv->np_nifp = nifp;
1237 /* return the offset of the netmap_if object */
1238 nmr->nr_rx_rings = na->num_rx_rings;
1239 nmr->nr_tx_rings = na->num_tx_rings;
1240 nmr->nr_rx_slots = na->num_rx_desc;
1241 nmr->nr_tx_slots = na->num_tx_desc;
1242 nmr->nr_memsize = nm_mem.nm_totalsize;
1243 nmr->nr_offset = netmap_if_offset(nifp);
1247 // XXX we have no data here ?
1248 D("deprecated, data is %p", nmr);
1254 nifp = priv->np_nifp;
1260 rmb(); /* make sure following reads are not from cache */
1263 ifp = priv->np_ifp; /* we have a reference */
1266 D("Internal error: nifp != NULL && ifp == NULL");
1271 na = NA(ifp); /* retrieve netmap adapter */
1272 if (priv->np_qfirst == NETMAP_SW_RING) { /* host rings */
1273 if (cmd == NIOCTXSYNC)
1274 netmap_sync_to_host(na);
1276 netmap_sync_from_host(na, NULL, NULL);
1279 /* find the last ring to scan */
1280 lim = priv->np_qlast;
1281 if (lim == NETMAP_HW_RING)
1282 lim = (cmd == NIOCTXSYNC) ?
1283 na->num_tx_rings : na->num_rx_rings;
1285 for (i = priv->np_qfirst; i < lim; i++) {
1286 if (cmd == NIOCTXSYNC) {
1287 struct netmap_kring *kring = &na->tx_rings[i];
1288 if (netmap_verbose & NM_VERB_TXSYNC)
1289 D("pre txsync ring %d cur %d hwcur %d",
1290 i, kring->ring->cur,
1292 na->nm_txsync(ifp, i, 1 /* do lock */);
1293 if (netmap_verbose & NM_VERB_TXSYNC)
1294 D("post txsync ring %d cur %d hwcur %d",
1295 i, kring->ring->cur,
1298 na->nm_rxsync(ifp, i, 1 /* do lock */);
1299 microtime(&na->rx_rings[i].ring->ts);
1310 D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT");
1313 default: /* allow device-specific ioctls */
1316 bzero(&so, sizeof(so));
1317 error = get_ifp(nmr->nr_name, &ifp); /* keep reference */
1320 so.so_vnet = ifp->if_vnet;
1321 // so->so_proto not null.
1322 error = ifioctl(&so, cmd, data, td);
1339 * select(2) and poll(2) handlers for the "netmap" device.
1341 * Can be called for one or more queues.
1342 * Return true the event mask corresponding to ready events.
1343 * If there are no ready events, do a selrecord on either individual
1344 * selfd or on the global one.
1345 * Device-dependent parts (locking and sync of tx/rx rings)
1346 * are done through callbacks.
1348 * On linux, arguments are really pwait, the poll table, and 'td' is struct file *
1349 * The first one is remapped to pwait as selrecord() uses the name as an
1353 netmap_poll(struct cdev *dev, int events, struct thread *td)
1355 struct netmap_priv_d *priv = NULL;
1356 struct netmap_adapter *na;
1358 struct netmap_kring *kring;
1359 u_int core_lock, i, check_all, want_tx, want_rx, revents = 0;
1360 u_int lim_tx, lim_rx, host_forwarded = 0;
1361 struct mbq q = { NULL, NULL, 0 };
1362 enum {NO_CL, NEED_CL, LOCKED_CL }; /* see below */
1363 void *pwait = dev; /* linux compatibility */
1367 if (devfs_get_cdevpriv((void **)&priv) != 0 || priv == NULL)
1370 if (priv->np_nifp == NULL) {
1371 D("No if registered");
1374 rmb(); /* make sure following reads are not from cache */
1377 // XXX check for deleting() ?
1378 if ( (ifp->if_capenable & IFCAP_NETMAP) == 0)
1381 if (netmap_verbose & 0x8000)
1382 D("device %s events 0x%x", ifp->if_xname, events);
1383 want_tx = events & (POLLOUT | POLLWRNORM);
1384 want_rx = events & (POLLIN | POLLRDNORM);
1386 na = NA(ifp); /* retrieve netmap adapter */
1388 lim_tx = na->num_tx_rings;
1389 lim_rx = na->num_rx_rings;
1390 /* how many queues we are scanning */
1391 if (priv->np_qfirst == NETMAP_SW_RING) {
1392 if (priv->np_txpoll || want_tx) {
1393 /* push any packets up, then we are always ready */
1394 kring = &na->tx_rings[lim_tx];
1395 netmap_sync_to_host(na);
1399 kring = &na->rx_rings[lim_rx];
1400 if (kring->ring->avail == 0)
1401 netmap_sync_from_host(na, td, dev);
1402 if (kring->ring->avail > 0) {
1409 /* if we are in transparent mode, check also the host rx ring */
1410 kring = &na->rx_rings[lim_rx];
1411 if ( (priv->np_qlast == NETMAP_HW_RING) // XXX check_all
1413 && (netmap_fwd || kring->ring->flags & NR_FORWARD) ) {
1414 if (kring->ring->avail == 0)
1415 netmap_sync_from_host(na, td, dev);
1416 if (kring->ring->avail > 0)
1421 * check_all is set if the card has more than one queue and
1422 * the client is polling all of them. If true, we sleep on
1423 * the "global" selfd, otherwise we sleep on individual selfd
1424 * (we can only sleep on one of them per direction).
1425 * The interrupt routine in the driver should always wake on
1426 * the individual selfd, and also on the global one if the card
1427 * has more than one ring.
1429 * If the card has only one lock, we just use that.
1430 * If the card has separate ring locks, we just use those
1431 * unless we are doing check_all, in which case the whole
1432 * loop is wrapped by the global lock.
1433 * We acquire locks only when necessary: if poll is called
1434 * when buffers are available, we can just return without locks.
1436 * rxsync() is only called if we run out of buffers on a POLLIN.
1437 * txsync() is called if we run out of buffers on POLLOUT, or
1438 * there are pending packets to send. The latter can be disabled
1439 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
1441 check_all = (priv->np_qlast == NETMAP_HW_RING) && (lim_tx > 1 || lim_rx > 1);
1444 * core_lock indicates what to do with the core lock.
1445 * The core lock is used when either the card has no individual
1446 * locks, or it has individual locks but we are cheking all
1447 * rings so we need the core lock to avoid missing wakeup events.
1449 * It has three possible states:
1450 * NO_CL we don't need to use the core lock, e.g.
1451 * because we are protected by individual locks.
1452 * NEED_CL we need the core lock. In this case, when we
1453 * call the lock routine, move to LOCKED_CL
1454 * to remember to release the lock once done.
1455 * LOCKED_CL core lock is set, so we need to release it.
1457 core_lock = (check_all || !na->separate_locks) ? NEED_CL : NO_CL;
1459 /* the bridge uses separate locks */
1460 if (na->nm_register == bdg_netmap_reg) {
1461 ND("not using core lock for %s", ifp->if_xname);
1464 #endif /* NM_BRIDGE */
1465 if (priv->np_qlast != NETMAP_HW_RING) {
1466 lim_tx = lim_rx = priv->np_qlast;
1470 * We start with a lock free round which is good if we have
1471 * data available. If this fails, then lock and call the sync
1474 for (i = priv->np_qfirst; want_rx && i < lim_rx; i++) {
1475 kring = &na->rx_rings[i];
1476 if (kring->ring->avail > 0) {
1478 want_rx = 0; /* also breaks the loop */
1481 for (i = priv->np_qfirst; want_tx && i < lim_tx; i++) {
1482 kring = &na->tx_rings[i];
1483 if (kring->ring->avail > 0) {
1485 want_tx = 0; /* also breaks the loop */
1490 * If we to push packets out (priv->np_txpoll) or want_tx is
1491 * still set, we do need to run the txsync calls (on all rings,
1492 * to avoid that the tx rings stall).
1494 if (priv->np_txpoll || want_tx) {
1496 for (i = priv->np_qfirst; i < lim_tx; i++) {
1497 kring = &na->tx_rings[i];
1499 * Skip the current ring if want_tx == 0
1500 * (we have already done a successful sync on
1501 * a previous ring) AND kring->cur == kring->hwcur
1502 * (there are no pending transmissions for this ring).
1504 if (!want_tx && kring->ring->cur == kring->nr_hwcur)
1506 if (core_lock == NEED_CL) {
1507 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
1508 core_lock = LOCKED_CL;
1510 if (na->separate_locks)
1511 na->nm_lock(ifp, NETMAP_TX_LOCK, i);
1512 if (netmap_verbose & NM_VERB_TXSYNC)
1513 D("send %d on %s %d",
1516 if (na->nm_txsync(ifp, i, 0 /* no lock */))
1519 /* Check avail/call selrecord only if called with POLLOUT */
1521 if (kring->ring->avail > 0) {
1522 /* stop at the first ring. We don't risk
1527 } else if (!check_all)
1528 selrecord(td, &kring->si);
1530 if (na->separate_locks)
1531 na->nm_lock(ifp, NETMAP_TX_UNLOCK, i);
1536 * now if want_rx is still set we need to lock and rxsync.
1537 * Do it on all rings because otherwise we starve.
1540 for (i = priv->np_qfirst; i < lim_rx; i++) {
1541 kring = &na->rx_rings[i];
1542 if (core_lock == NEED_CL) {
1543 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
1544 core_lock = LOCKED_CL;
1546 if (na->separate_locks)
1547 na->nm_lock(ifp, NETMAP_RX_LOCK, i);
1548 if (netmap_fwd ||kring->ring->flags & NR_FORWARD) {
1549 ND(10, "forwarding some buffers up %d to %d",
1550 kring->nr_hwcur, kring->ring->cur);
1551 netmap_grab_packets(kring, &q, netmap_fwd);
1554 if (na->nm_rxsync(ifp, i, 0 /* no lock */))
1556 if (netmap_no_timestamp == 0 ||
1557 kring->ring->flags & NR_TIMESTAMP) {
1558 microtime(&kring->ring->ts);
1561 if (kring->ring->avail > 0)
1563 else if (!check_all)
1564 selrecord(td, &kring->si);
1565 if (na->separate_locks)
1566 na->nm_lock(ifp, NETMAP_RX_UNLOCK, i);
1569 if (check_all && revents == 0) { /* signal on the global queue */
1571 selrecord(td, &na->tx_si);
1573 selrecord(td, &na->rx_si);
1576 /* forward host to the netmap ring */
1577 kring = &na->rx_rings[lim_rx];
1578 if (kring->nr_hwavail > 0)
1579 ND("host rx %d has %d packets", lim_rx, kring->nr_hwavail);
1580 if ( (priv->np_qlast == NETMAP_HW_RING) // XXX check_all
1581 && (netmap_fwd || kring->ring->flags & NR_FORWARD)
1582 && kring->nr_hwavail > 0 && !host_forwarded) {
1583 if (core_lock == NEED_CL) {
1584 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
1585 core_lock = LOCKED_CL;
1587 netmap_sw_to_nic(na);
1588 host_forwarded = 1; /* prevent another pass */
1593 if (core_lock == LOCKED_CL)
1594 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
1596 netmap_send_up(na->ifp, q.head);
1601 /*------- driver support routines ------*/
1604 * default lock wrapper.
1607 netmap_lock_wrapper(struct ifnet *dev, int what, u_int queueid)
1609 struct netmap_adapter *na = NA(dev);
1612 #ifdef linux /* some system do not need lock on register */
1613 case NETMAP_REG_LOCK:
1614 case NETMAP_REG_UNLOCK:
1618 case NETMAP_CORE_LOCK:
1619 mtx_lock(&na->core_lock);
1622 case NETMAP_CORE_UNLOCK:
1623 mtx_unlock(&na->core_lock);
1626 case NETMAP_TX_LOCK:
1627 mtx_lock(&na->tx_rings[queueid].q_lock);
1630 case NETMAP_TX_UNLOCK:
1631 mtx_unlock(&na->tx_rings[queueid].q_lock);
1634 case NETMAP_RX_LOCK:
1635 mtx_lock(&na->rx_rings[queueid].q_lock);
1638 case NETMAP_RX_UNLOCK:
1639 mtx_unlock(&na->rx_rings[queueid].q_lock);
1646 * Initialize a ``netmap_adapter`` object created by driver on attach.
1647 * We allocate a block of memory with room for a struct netmap_adapter
1648 * plus two sets of N+2 struct netmap_kring (where N is the number
1649 * of hardware rings):
1650 * krings 0..N-1 are for the hardware queues.
1651 * kring N is for the host stack queue
1652 * kring N+1 is only used for the selinfo for all queues.
1653 * Return 0 on success, ENOMEM otherwise.
1655 * By default the receive and transmit adapter ring counts are both initialized
1656 * to num_queues. na->num_tx_rings can be set for cards with different tx/rx
1660 netmap_attach(struct netmap_adapter *arg, int num_queues)
1662 struct netmap_adapter *na = NULL;
1663 struct ifnet *ifp = arg ? arg->ifp : NULL;
1665 if (arg == NULL || ifp == NULL)
1667 na = malloc(sizeof(*na), M_DEVBUF, M_NOWAIT | M_ZERO);
1671 *na = *arg; /* copy everything, trust the driver to not pass junk */
1672 NETMAP_SET_CAPABLE(ifp);
1673 if (na->num_tx_rings == 0)
1674 na->num_tx_rings = num_queues;
1675 na->num_rx_rings = num_queues;
1676 na->refcount = na->na_single = na->na_multi = 0;
1677 /* Core lock initialized here, others after netmap_if_new. */
1678 mtx_init(&na->core_lock, "netmap core lock", MTX_NETWORK_LOCK, MTX_DEF);
1679 if (na->nm_lock == NULL) {
1680 ND("using default locks for %s", ifp->if_xname);
1681 na->nm_lock = netmap_lock_wrapper;
1685 if (!ifp->netdev_ops) {
1686 D("ouch, we cannot override netdev_ops");
1689 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
1690 /* if needed, prepare a clone of the entire netdev ops */
1691 na->nm_ndo = *ifp->netdev_ops;
1692 #endif /* 2.6.28 and above */
1693 na->nm_ndo.ndo_start_xmit = linux_netmap_start;
1696 D("success for %s", ifp->if_xname);
1700 D("fail, arg %p ifp %p na %p", arg, ifp, na);
1702 return (na ? EINVAL : ENOMEM);
1707 * Free the allocated memory linked to the given ``netmap_adapter``
1711 netmap_detach(struct ifnet *ifp)
1713 struct netmap_adapter *na = NA(ifp);
1718 mtx_destroy(&na->core_lock);
1720 if (na->tx_rings) { /* XXX should not happen */
1721 D("freeing leftover tx_rings");
1722 free(na->tx_rings, M_DEVBUF);
1724 bzero(na, sizeof(*na));
1731 * Intercept packets from the network stack and pass them
1732 * to netmap as incoming packets on the 'software' ring.
1733 * We are not locked when called.
1736 netmap_start(struct ifnet *ifp, struct mbuf *m)
1738 struct netmap_adapter *na = NA(ifp);
1739 struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
1740 u_int i, len = MBUF_LEN(m);
1741 u_int error = EBUSY, lim = kring->nkr_num_slots - 1;
1742 struct netmap_slot *slot;
1744 if (netmap_verbose & NM_VERB_HOST)
1745 D("%s packet %d len %d from the stack", ifp->if_xname,
1746 kring->nr_hwcur + kring->nr_hwavail, len);
1747 if (len > NETMAP_BUF_SIZE) { /* too long for us */
1748 D("%s from_host, drop packet size %d > %d", ifp->if_xname,
1749 len, NETMAP_BUF_SIZE);
1753 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
1754 if (kring->nr_hwavail >= lim) {
1756 D("stack ring %s full\n", ifp->if_xname);
1757 goto done; /* no space */
1760 /* compute the insert position */
1761 i = kring->nr_hwcur + kring->nr_hwavail;
1764 slot = &kring->ring->slot[i];
1765 m_copydata(m, 0, len, NMB(slot));
1767 slot->flags = kring->nkr_slot_flags;
1768 kring->nr_hwavail++;
1769 if (netmap_verbose & NM_VERB_HOST)
1770 D("wake up host ring %s %d", na->ifp->if_xname, na->num_rx_rings);
1771 selwakeuppri(&kring->si, PI_NET);
1774 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
1776 /* release the mbuf in either cases of success or failure. As an
1777 * alternative, put the mbuf in a free list and free the list
1778 * only when really necessary.
1787 * netmap_reset() is called by the driver routines when reinitializing
1788 * a ring. The driver is in charge of locking to protect the kring.
1789 * If netmap mode is not set just return NULL.
1791 struct netmap_slot *
1792 netmap_reset(struct netmap_adapter *na, enum txrx tx, int n,
1795 struct netmap_kring *kring;
1799 return NULL; /* no netmap support here */
1800 if (!(na->ifp->if_capenable & IFCAP_NETMAP))
1801 return NULL; /* nothing to reinitialize */
1804 if (n >= na->num_tx_rings)
1806 kring = na->tx_rings + n;
1807 new_hwofs = kring->nr_hwcur - new_cur;
1809 if (n >= na->num_rx_rings)
1811 kring = na->rx_rings + n;
1812 new_hwofs = kring->nr_hwcur + kring->nr_hwavail - new_cur;
1814 lim = kring->nkr_num_slots - 1;
1815 if (new_hwofs > lim)
1816 new_hwofs -= lim + 1;
1818 /* Alwayws set the new offset value and realign the ring. */
1819 kring->nkr_hwofs = new_hwofs;
1821 kring->nr_hwavail = kring->nkr_num_slots - 1;
1822 ND(10, "new hwofs %d on %s %s[%d]",
1823 kring->nkr_hwofs, na->ifp->if_xname,
1824 tx == NR_TX ? "TX" : "RX", n);
1827 /* XXX check that the mappings are correct */
1828 /* need ring_nr, adapter->pdev, direction */
1829 buffer_info->dma = dma_map_single(&pdev->dev, addr, adapter->rx_buffer_len, DMA_FROM_DEVICE);
1830 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1831 D("error mapping rx netmap buffer %d", i);
1832 // XXX fix error handling
1837 * Wakeup on the individual and global lock
1838 * We do the wakeup here, but the ring is not yet reconfigured.
1839 * However, we are under lock so there are no races.
1841 selwakeuppri(&kring->si, PI_NET);
1842 selwakeuppri(tx == NR_TX ? &na->tx_si : &na->rx_si, PI_NET);
1843 return kring->ring->slot;
1848 * Default functions to handle rx/tx interrupts
1850 * 1 ring, single lock:
1851 * lock(core); wake(i=0); unlock(core)
1852 * N rings, single lock:
1853 * lock(core); wake(i); wake(N+1) unlock(core)
1854 * 1 ring, separate locks: (i=0)
1855 * lock(i); wake(i); unlock(i)
1856 * N rings, separate locks:
1857 * lock(i); wake(i); unlock(i); lock(core) wake(N+1) unlock(core)
1858 * work_done is non-null on the RX path.
1860 * The 'q' argument also includes flag to tell whether the queue is
1861 * already locked on enter, and whether it should remain locked on exit.
1862 * This helps adapting to different defaults in drivers and OSes.
1865 netmap_rx_irq(struct ifnet *ifp, int q, int *work_done)
1867 struct netmap_adapter *na;
1868 struct netmap_kring *r;
1869 NM_SELINFO_T *main_wq;
1870 int locktype, unlocktype, lock;
1872 if (!(ifp->if_capenable & IFCAP_NETMAP))
1875 lock = q & (NETMAP_LOCKED_ENTER | NETMAP_LOCKED_EXIT);
1876 q = q & NETMAP_RING_MASK;
1878 ND(5, "received %s queue %d", work_done ? "RX" : "TX" , q);
1880 if (na->na_flags & NAF_SKIP_INTR) {
1881 ND("use regular interrupt");
1885 if (work_done) { /* RX path */
1886 if (q >= na->num_rx_rings)
1887 return 0; // not a physical queue
1888 r = na->rx_rings + q;
1889 r->nr_kflags |= NKR_PENDINTR;
1890 main_wq = (na->num_rx_rings > 1) ? &na->rx_si : NULL;
1891 locktype = NETMAP_RX_LOCK;
1892 unlocktype = NETMAP_RX_UNLOCK;
1893 } else { /* TX path */
1894 if (q >= na->num_tx_rings)
1895 return 0; // not a physical queue
1896 r = na->tx_rings + q;
1897 main_wq = (na->num_tx_rings > 1) ? &na->tx_si : NULL;
1898 work_done = &q; /* dummy */
1899 locktype = NETMAP_TX_LOCK;
1900 unlocktype = NETMAP_TX_UNLOCK;
1902 if (na->separate_locks) {
1903 if (!(lock & NETMAP_LOCKED_ENTER))
1904 na->nm_lock(ifp, locktype, q);
1905 selwakeuppri(&r->si, PI_NET);
1906 na->nm_lock(ifp, unlocktype, q);
1908 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
1909 selwakeuppri(main_wq, PI_NET);
1910 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
1912 /* lock the queue again if requested */
1913 if (lock & NETMAP_LOCKED_EXIT)
1914 na->nm_lock(ifp, locktype, q);
1916 if (!(lock & NETMAP_LOCKED_ENTER))
1917 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
1918 selwakeuppri(&r->si, PI_NET);
1920 selwakeuppri(main_wq, PI_NET);
1921 if (!(lock & NETMAP_LOCKED_EXIT))
1922 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
1924 *work_done = 1; /* do not fire napi again */
1929 #ifdef linux /* linux-specific routines */
1932 * Remap linux arguments into the FreeBSD call.
1933 * - pwait is the poll table, passed as 'dev';
1934 * If pwait == NULL someone else already woke up before. We can report
1935 * events but they are filtered upstream.
1936 * If pwait != NULL, then pwait->key contains the list of events.
1937 * - events is computed from pwait as above.
1938 * - file is passed as 'td';
1941 linux_netmap_poll(struct file * file, struct poll_table_struct *pwait)
1943 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
1944 int events = POLLIN | POLLOUT; /* XXX maybe... */
1945 #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)
1946 int events = pwait ? pwait->key : POLLIN | POLLOUT;
1947 #else /* in 3.4.0 field 'key' was renamed to '_key' */
1948 int events = pwait ? pwait->_key : POLLIN | POLLOUT;
1950 return netmap_poll((void *)pwait, events, (void *)file);
1954 linux_netmap_mmap(struct file *f, struct vm_area_struct *vma)
1958 struct lut_entry *l_entry;
1960 unsigned long off, tomap;
1962 * vma->vm_start: start of mapping user address space
1963 * vma->vm_end: end of the mapping user address space
1964 * vma->vm_pfoff: offset of first page in the device
1967 // XXX security checks
1969 error = netmap_get_memory(f->private_data);
1970 ND("get_memory returned %d", error);
1974 off = vma->vm_pgoff << PAGE_SHIFT; /* offset in bytes */
1975 tomap = vma->vm_end - vma->vm_start;
1976 for (i = 0; i < NETMAP_POOLS_NR; i++) { /* loop through obj_pools */
1977 const struct netmap_obj_pool *p = &nm_mem.pools[i];
1979 * In each pool memory is allocated in clusters
1980 * of size _clustsize, each containing clustentries
1981 * entries. For each object k we already store the
1982 * vtophys mapping in lut[k] so we use that, scanning
1983 * the lut[] array in steps of clustentries,
1984 * and we map each cluster (not individual pages,
1985 * it would be overkill -- XXX slow ? 20130415).
1989 * We interpret vm_pgoff as an offset into the whole
1990 * netmap memory, as if all clusters where contiguous.
1992 for (lut_skip = 0, j = 0; j < p->_numclusters; j++, lut_skip += p->clustentries) {
1993 unsigned long paddr, mapsize;
1994 if (p->_clustsize <= off) {
1995 off -= p->_clustsize;
1998 l_entry = &p->lut[lut_skip]; /* first obj in the cluster */
1999 paddr = l_entry->paddr + off;
2000 mapsize = p->_clustsize - off;
2002 if (mapsize > tomap)
2004 ND("remap_pfn_range(%lx, %lx, %lx)",
2005 vma->vm_start + user_skip,
2006 paddr >> PAGE_SHIFT, mapsize);
2007 if (remap_pfn_range(vma, vma->vm_start + user_skip,
2008 paddr >> PAGE_SHIFT, mapsize,
2010 return -EAGAIN; // XXX check return value
2011 user_skip += mapsize;
2023 linux_netmap_start(struct sk_buff *skb, struct net_device *dev)
2025 netmap_start(dev, skb);
2026 return (NETDEV_TX_OK);
2030 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) // XXX was 38
2031 #define LIN_IOCTL_NAME .ioctl
2033 linux_netmap_ioctl(struct inode *inode, struct file *file, u_int cmd, u_long data /* arg */)
2035 #define LIN_IOCTL_NAME .unlocked_ioctl
2037 linux_netmap_ioctl(struct file *file, u_int cmd, u_long data /* arg */)
2042 bzero(&nmr, sizeof(nmr));
2044 if (data && copy_from_user(&nmr, (void *)data, sizeof(nmr) ) != 0)
2046 ret = netmap_ioctl(NULL, cmd, (caddr_t)&nmr, 0, (void *)file);
2047 if (data && copy_to_user((void*)data, &nmr, sizeof(nmr) ) != 0)
2054 netmap_release(struct inode *inode, struct file *file)
2056 (void)inode; /* UNUSED */
2057 if (file->private_data)
2058 netmap_dtor(file->private_data);
2063 linux_netmap_open(struct inode *inode, struct file *file)
2065 struct netmap_priv_d *priv;
2066 (void)inode; /* UNUSED */
2068 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF,
2073 file->private_data = priv;
2078 static struct file_operations netmap_fops = {
2079 .open = linux_netmap_open,
2080 .mmap = linux_netmap_mmap,
2081 LIN_IOCTL_NAME = linux_netmap_ioctl,
2082 .poll = linux_netmap_poll,
2083 .release = netmap_release,
2086 static struct miscdevice netmap_cdevsw = { /* same name as FreeBSD */
2092 static int netmap_init(void);
2093 static void netmap_fini(void);
2095 /* Errors have negative values on linux */
2096 static int linux_netmap_init(void)
2098 return -netmap_init();
2101 module_init(linux_netmap_init);
2102 module_exit(netmap_fini);
2103 /* export certain symbols to other modules */
2104 EXPORT_SYMBOL(netmap_attach); // driver attach routines
2105 EXPORT_SYMBOL(netmap_detach); // driver detach routines
2106 EXPORT_SYMBOL(netmap_ring_reinit); // ring init on error
2107 EXPORT_SYMBOL(netmap_buffer_lut);
2108 EXPORT_SYMBOL(netmap_total_buffers); // index check
2109 EXPORT_SYMBOL(netmap_buffer_base);
2110 EXPORT_SYMBOL(netmap_reset); // ring init routines
2111 EXPORT_SYMBOL(netmap_buf_size);
2112 EXPORT_SYMBOL(netmap_rx_irq); // default irq handler
2113 EXPORT_SYMBOL(netmap_no_pendintr); // XXX mitigation - should go away
2116 MODULE_AUTHOR("http://info.iet.unipi.it/~luigi/netmap/");
2117 MODULE_DESCRIPTION("The netmap packet I/O framework");
2118 MODULE_LICENSE("Dual BSD/GPL"); /* the code here is all BSD. */
2120 #else /* __FreeBSD__ */
2122 static struct cdevsw netmap_cdevsw = {
2123 .d_version = D_VERSION,
2125 .d_open = netmap_open,
2126 .d_mmap = netmap_mmap,
2127 .d_mmap_single = netmap_mmap_single,
2128 .d_ioctl = netmap_ioctl,
2129 .d_poll = netmap_poll,
2130 .d_close = netmap_close,
2132 #endif /* __FreeBSD__ */
2136 *---- support for virtual bridge -----
2139 /* ----- FreeBSD if_bridge hash function ------- */
2142 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2143 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2145 * http://www.burtleburtle.net/bob/hash/spooky.html
2147 #define mix(a, b, c) \
2149 a -= b; a -= c; a ^= (c >> 13); \
2150 b -= c; b -= a; b ^= (a << 8); \
2151 c -= a; c -= b; c ^= (b >> 13); \
2152 a -= b; a -= c; a ^= (c >> 12); \
2153 b -= c; b -= a; b ^= (a << 16); \
2154 c -= a; c -= b; c ^= (b >> 5); \
2155 a -= b; a -= c; a ^= (c >> 3); \
2156 b -= c; b -= a; b ^= (a << 10); \
2157 c -= a; c -= b; c ^= (b >> 15); \
2158 } while (/*CONSTCOND*/0)
2160 static __inline uint32_t
2161 nm_bridge_rthash(const uint8_t *addr)
2163 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key
2173 #define BRIDGE_RTHASH_MASK (NM_BDG_HASH-1)
2174 return (c & BRIDGE_RTHASH_MASK);
2181 bdg_netmap_reg(struct ifnet *ifp, int onoff)
2184 struct nm_bridge *b = ifp->if_bridge;
2188 /* the interface must be already in the list.
2189 * only need to mark the port as active
2191 ND("should attach %s to the bridge", ifp->if_xname);
2192 for (i=0; i < NM_BDG_MAXPORTS; i++)
2193 if (b->bdg_ports[i] == ifp)
2195 if (i == NM_BDG_MAXPORTS) {
2196 D("no more ports available");
2200 ND("setting %s in netmap mode", ifp->if_xname);
2201 ifp->if_capenable |= IFCAP_NETMAP;
2202 NA(ifp)->bdg_port = i;
2203 b->act_ports |= (1<<i);
2204 b->bdg_ports[i] = ifp;
2206 /* should be in the list, too -- remove from the mask */
2207 ND("removing %s from netmap mode", ifp->if_xname);
2208 ifp->if_capenable &= ~IFCAP_NETMAP;
2209 i = NA(ifp)->bdg_port;
2210 b->act_ports &= ~(1<<i);
2219 nm_bdg_flush(struct nm_bdg_fwd *ft, int n, struct ifnet *ifp)
2222 uint64_t all_dst, dst;
2224 uint64_t mysrc = 1 << NA(ifp)->bdg_port;
2225 uint64_t smac, dmac;
2226 struct netmap_slot *slot;
2227 struct nm_bridge *b = ifp->if_bridge;
2229 ND("prepare to send %d packets, act_ports 0x%x", n, b->act_ports);
2230 /* only consider valid destinations */
2231 all_dst = (b->act_ports & ~mysrc);
2232 /* first pass: hash and find destinations */
2233 for (i = 0; likely(i < n); i++) {
2234 uint8_t *buf = ft[i].buf;
2235 dmac = le64toh(*(uint64_t *)(buf)) & 0xffffffffffff;
2236 smac = le64toh(*(uint64_t *)(buf + 4));
2238 if (unlikely(netmap_verbose)) {
2239 uint8_t *s = buf+6, *d = buf;
2240 D("%d len %4d %02x:%02x:%02x:%02x:%02x:%02x -> %02x:%02x:%02x:%02x:%02x:%02x",
2243 s[0], s[1], s[2], s[3], s[4], s[5],
2244 d[0], d[1], d[2], d[3], d[4], d[5]);
2247 * The hash is somewhat expensive, there might be some
2248 * worthwhile optimizations here.
2250 if ((buf[6] & 1) == 0) { /* valid src */
2252 sh = nm_bridge_rthash(buf+6); // XXX hash of source
2253 /* update source port forwarding entry */
2254 b->ht[sh].mac = smac; /* XXX expire ? */
2255 b->ht[sh].ports = mysrc;
2257 D("src %02x:%02x:%02x:%02x:%02x:%02x on port %d",
2258 s[0], s[1], s[2], s[3], s[4], s[5], NA(ifp)->bdg_port);
2261 if ( (buf[0] & 1) == 0) { /* unicast */
2263 dh = nm_bridge_rthash(buf); // XXX hash of dst
2264 if (b->ht[dh].mac == dmac) { /* found dst */
2265 dst = b->ht[dh].ports;
2267 D("dst %02x:%02x:%02x:%02x:%02x:%02x to port %x",
2268 d[0], d[1], d[2], d[3], d[4], d[5], (uint32_t)(dst >> 16));
2273 dst &= all_dst; /* only consider valid ports */
2274 if (unlikely(netmap_verbose))
2275 D("pkt goes to ports 0x%x", (uint32_t)dst);
2279 /* second pass, scan interfaces and forward */
2280 all_dst = (b->act_ports & ~mysrc);
2281 for (ifn = 0; all_dst; ifn++) {
2282 struct ifnet *dst_ifp = b->bdg_ports[ifn];
2283 struct netmap_adapter *na;
2284 struct netmap_kring *kring;
2285 struct netmap_ring *ring;
2286 int j, lim, sent, locked;
2290 ND("scan port %d %s", ifn, dst_ifp->if_xname);
2292 if ((dst & all_dst) == 0) /* skip if not set */
2294 all_dst &= ~dst; /* clear current node */
2299 lim = sent = locked = 0;
2300 /* inside, scan slots */
2301 for (i = 0; likely(i < n); i++) {
2302 if ((ft[i].dst & dst) == 0)
2303 continue; /* not here */
2305 kring = &na->rx_rings[0];
2307 lim = kring->nkr_num_slots - 1;
2308 na->nm_lock(dst_ifp, NETMAP_RX_LOCK, 0);
2311 if (unlikely(kring->nr_hwavail >= lim)) {
2313 D("rx ring full on %s", ifp->if_xname);
2316 j = kring->nr_hwcur + kring->nr_hwavail;
2318 j -= kring->nkr_num_slots;
2319 slot = &ring->slot[j];
2320 ND("send %d %d bytes at %s:%d", i, ft[i].len, dst_ifp->if_xname, j);
2321 pkt_copy(ft[i].buf, NMB(slot), ft[i].len);
2322 slot->len = ft[i].len;
2323 kring->nr_hwavail++;
2327 ND("sent %d on %s", sent, dst_ifp->if_xname);
2329 selwakeuppri(&kring->si, PI_NET);
2330 na->nm_lock(dst_ifp, NETMAP_RX_UNLOCK, 0);
2337 * main dispatch routine
2340 bdg_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
2342 struct netmap_adapter *na = NA(ifp);
2343 struct netmap_kring *kring = &na->tx_rings[ring_nr];
2344 struct netmap_ring *ring = kring->ring;
2345 int i, j, k, lim = kring->nkr_num_slots - 1;
2346 struct nm_bdg_fwd *ft = (struct nm_bdg_fwd *)(ifp + 1);
2347 int ft_i; /* position in the forwarding table */
2351 return netmap_ring_reinit(kring);
2353 na->nm_lock(ifp, NETMAP_TX_LOCK, ring_nr);
2355 if (netmap_bridge <= 0) { /* testing only */
2359 if (netmap_bridge > NM_BDG_BATCH)
2360 netmap_bridge = NM_BDG_BATCH;
2362 ft_i = 0; /* start from 0 */
2363 for (j = kring->nr_hwcur; likely(j != k); j = unlikely(j == lim) ? 0 : j+1) {
2364 struct netmap_slot *slot = &ring->slot[j];
2365 int len = ft[ft_i].len = slot->len;
2366 char *buf = ft[ft_i].buf = NMB(slot);
2369 if (unlikely(len < 14))
2371 if (unlikely(++ft_i == netmap_bridge))
2372 ft_i = nm_bdg_flush(ft, ft_i, ifp);
2375 ft_i = nm_bdg_flush(ft, ft_i, ifp);
2376 /* count how many packets we sent */
2379 i += kring->nkr_num_slots;
2380 kring->nr_hwavail = kring->nkr_num_slots - 1 - i;
2382 D("early break at %d/ %d, avail %d", j, k, kring->nr_hwavail);
2385 kring->nr_hwcur = j;
2386 ring->avail = kring->nr_hwavail;
2388 na->nm_lock(ifp, NETMAP_TX_UNLOCK, ring_nr);
2391 D("%s ring %d lock %d", ifp->if_xname, ring_nr, do_lock);
2396 bdg_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
2398 struct netmap_adapter *na = NA(ifp);
2399 struct netmap_kring *kring = &na->rx_rings[ring_nr];
2400 struct netmap_ring *ring = kring->ring;
2401 u_int j, n, lim = kring->nkr_num_slots - 1;
2402 u_int k = ring->cur, resvd = ring->reserved;
2404 ND("%s ring %d lock %d avail %d",
2405 ifp->if_xname, ring_nr, do_lock, kring->nr_hwavail);
2408 return netmap_ring_reinit(kring);
2410 na->nm_lock(ifp, NETMAP_RX_LOCK, ring_nr);
2412 /* skip past packets that userspace has released */
2413 j = kring->nr_hwcur; /* netmap ring index */
2415 if (resvd + ring->avail >= lim + 1) {
2416 D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
2417 ring->reserved = resvd = 0; // XXX panic...
2419 k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
2422 if (j != k) { /* userspace has released some packets. */
2425 n += kring->nkr_num_slots;
2426 ND("userspace releases %d packets", n);
2427 for (n = 0; likely(j != k); n++) {
2428 struct netmap_slot *slot = &ring->slot[j];
2429 void *addr = NMB(slot);
2431 if (addr == netmap_buffer_base) { /* bad buf */
2433 na->nm_lock(ifp, NETMAP_RX_UNLOCK, ring_nr);
2434 return netmap_ring_reinit(kring);
2436 /* decrease refcount for buffer */
2438 slot->flags &= ~NS_BUF_CHANGED;
2439 j = unlikely(j == lim) ? 0 : j + 1;
2441 kring->nr_hwavail -= n;
2442 kring->nr_hwcur = k;
2444 /* tell userspace that there are new packets */
2445 ring->avail = kring->nr_hwavail - resvd;
2448 na->nm_lock(ifp, NETMAP_RX_UNLOCK, ring_nr);
2453 bdg_netmap_attach(struct ifnet *ifp)
2455 struct netmap_adapter na;
2457 ND("attaching virtual bridge");
2458 bzero(&na, sizeof(na));
2461 na.separate_locks = 1;
2462 na.num_tx_desc = NM_BRIDGE_RINGSIZE;
2463 na.num_rx_desc = NM_BRIDGE_RINGSIZE;
2464 na.nm_txsync = bdg_netmap_txsync;
2465 na.nm_rxsync = bdg_netmap_rxsync;
2466 na.nm_register = bdg_netmap_reg;
2467 netmap_attach(&na, 1);
2470 #endif /* NM_BRIDGE */
2472 static struct cdev *netmap_dev; /* /dev/netmap character device. */
2478 * Create the /dev/netmap device and initialize all global
2481 * Return 0 on success, errno on failure.
2488 error = netmap_memory_init();
2490 printf("netmap: unable to initialize the memory allocator.\n");
2493 printf("netmap: loaded module\n");
2494 netmap_dev = make_dev(&netmap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660,
2500 for (i = 0; i < NM_BRIDGES; i++)
2501 mtx_init(&nm_bridges[i].bdg_lock, "bdg lock", "bdg_lock", MTX_DEF);
2511 * Free all the memory, and destroy the ``/dev/netmap`` device.
2516 destroy_dev(netmap_dev);
2517 netmap_memory_fini();
2518 printf("netmap: unloaded module.\n");
2524 * Kernel entry point.
2526 * Initialize/finalize the module and return.
2528 * Return 0 on success, errno on failure.
2531 netmap_loader(__unused struct module *module, int event, __unused void *arg)
2537 error = netmap_init();
2553 DEV_MODULE(netmap, netmap_loader, NULL);
2554 #endif /* __FreeBSD__ */