2 * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * This module supports memory mapped access to network devices,
30 * The module uses a large, memory pool allocated by the kernel
31 * and accessible as mmapped memory by multiple userspace threads/processes.
32 * The memory pool contains packet buffers and "netmap rings",
33 * i.e. user-accessible copies of the interface's queues.
35 * Access to the network card works like this:
36 * 1. a process/thread issues one or more open() on /dev/netmap, to create
37 * select()able file descriptor on which events are reported.
38 * 2. on each descriptor, the process issues an ioctl() to identify
39 * the interface that should report events to the file descriptor.
40 * 3. on each descriptor, the process issues an mmap() request to
41 * map the shared memory region within the process' address space.
42 * The list of interesting queues is indicated by a location in
43 * the shared memory region.
44 * 4. using the functions in the netmap(4) userspace API, a process
45 * can look up the occupation state of a queue, access memory buffers,
46 * and retrieve received packets or enqueue packets to transmit.
47 * 5. using some ioctl()s the process can synchronize the userspace view
48 * of the queue with the actual status in the kernel. This includes both
49 * receiving the notification of new packets, and transmitting new
50 * packets on the output interface.
51 * 6. select() or poll() can be used to wait for events on individual
52 * transmit or receive queues (or all queues for a given interface).
55 #include <sys/cdefs.h> /* prerequisite */
56 __FBSDID("$FreeBSD$");
58 #include <sys/types.h>
59 #include <sys/module.h>
60 #include <sys/errno.h>
61 #include <sys/param.h> /* defines used in kernel.h */
63 #include <sys/kernel.h> /* types used in module initialization */
64 #include <sys/conf.h> /* cdevsw struct */
65 #include <sys/uio.h> /* uio struct */
66 #include <sys/sockio.h>
67 #include <sys/socketvar.h> /* struct socket */
68 #include <sys/malloc.h>
69 #include <sys/mman.h> /* PROT_EXEC */
72 #include <vm/vm.h> /* vtophys */
73 #include <vm/pmap.h> /* vtophys */
74 #include <sys/socket.h> /* sockaddrs */
75 #include <machine/bus.h>
76 #include <sys/selinfo.h>
77 #include <sys/sysctl.h>
79 #include <net/bpf.h> /* BIOCIMMEDIATE */
81 #include <net/netmap.h>
82 #include <dev/netmap/netmap_kern.h>
83 #include <machine/bus.h> /* bus_dmamap_* */
85 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map");
88 * lock and unlock for the netmap memory allocator
90 #define NMA_LOCK() mtx_lock(&nm_mem->nm_mtx);
91 #define NMA_UNLOCK() mtx_unlock(&nm_mem->nm_mtx);
93 static struct netmap_mem_d *nm_mem; /* Our memory allocator. */
95 u_int netmap_total_buffers;
96 char *netmap_buffer_base; /* address of an invalid buffer */
98 /* user-controlled variables */
101 static int netmap_no_timestamp; /* don't timestamp on rxsync */
103 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args");
104 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
105 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
106 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
107 CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
108 int netmap_buf_size = 2048;
109 TUNABLE_INT("hw.netmap.buf_size", &netmap_buf_size);
110 SYSCTL_INT(_dev_netmap, OID_AUTO, buf_size,
111 CTLFLAG_RD, &netmap_buf_size, 0, "Size of packet buffers");
112 int netmap_mitigate = 1;
113 SYSCTL_INT(_dev_netmap, OID_AUTO, mitigate, CTLFLAG_RW, &netmap_mitigate, 0, "");
114 int netmap_no_pendintr = 1;
115 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr,
116 CTLFLAG_RW, &netmap_no_pendintr, 0, "Always look for new received packets.");
119 /*------------- memory allocator -----------------*/
121 #include "netmap_mem2.c"
122 #else /* !NETMAP_MEM2 */
123 #include "netmap_mem1.c"
124 #endif /* !NETMAP_MEM2 */
125 /*------------ end of memory allocator ----------*/
127 /* Structure associated to each thread which registered an interface. */
128 struct netmap_priv_d {
129 struct netmap_if *np_nifp; /* netmap interface descriptor. */
131 struct ifnet *np_ifp; /* device for which we hold a reference */
132 int np_ringid; /* from the ioctl */
133 u_int np_qfirst, np_qlast; /* range of rings to scan */
139 * File descriptor's private data destructor.
141 * Call nm_register(ifp,0) to stop netmap mode on the interface and
142 * revert to normal operation. We expect that np_ifp has not gone.
145 netmap_dtor_locked(void *data)
147 struct netmap_priv_d *priv = data;
148 struct ifnet *ifp = priv->np_ifp;
149 struct netmap_adapter *na = NA(ifp);
150 struct netmap_if *nifp = priv->np_nifp;
153 if (na->refcount <= 0) { /* last instance */
156 D("deleting last netmap instance for %s", ifp->if_xname);
158 * there is a race here with *_netmap_task() and
159 * netmap_poll(), which don't run under NETMAP_REG_LOCK.
160 * na->refcount == 0 && na->ifp->if_capenable & IFCAP_NETMAP
161 * (aka NETMAP_DELETING(na)) are a unique marker that the
163 * Before destroying stuff we sleep a bit, and then complete
164 * the job. NIOCREG should realize the condition and
165 * loop until they can continue; the other routines
166 * should check the condition at entry and quit if
169 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
170 tsleep(na, 0, "NIOCUNREG", 4);
171 na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
172 na->nm_register(ifp, 0); /* off, clear IFCAP_NETMAP */
173 /* Wake up any sleeping threads. netmap_poll will
174 * then return POLLERR
176 for (i = 0; i < na->num_tx_rings + 1; i++)
177 selwakeuppri(&na->tx_rings[i].si, PI_NET);
178 for (i = 0; i < na->num_rx_rings + 1; i++)
179 selwakeuppri(&na->rx_rings[i].si, PI_NET);
180 selwakeuppri(&na->tx_si, PI_NET);
181 selwakeuppri(&na->rx_si, PI_NET);
182 /* release all buffers */
184 for (i = 0; i < na->num_tx_rings + 1; i++) {
185 struct netmap_ring *ring = na->tx_rings[i].ring;
186 lim = na->tx_rings[i].nkr_num_slots;
187 for (j = 0; j < lim; j++)
188 netmap_free_buf(nifp, ring->slot[j].buf_idx);
189 /* knlist_destroy(&na->tx_rings[i].si.si_note); */
190 mtx_destroy(&na->tx_rings[i].q_lock);
192 for (i = 0; i < na->num_rx_rings + 1; i++) {
193 struct netmap_ring *ring = na->rx_rings[i].ring;
194 lim = na->rx_rings[i].nkr_num_slots;
195 for (j = 0; j < lim; j++)
196 netmap_free_buf(nifp, ring->slot[j].buf_idx);
197 /* knlist_destroy(&na->rx_rings[i].si.si_note); */
198 mtx_destroy(&na->rx_rings[i].q_lock);
200 /* XXX kqueue(9) needed; these will mirror knlist_init. */
201 /* knlist_destroy(&na->tx_si.si_note); */
202 /* knlist_destroy(&na->rx_si.si_note); */
204 netmap_free_rings(na);
207 netmap_if_free(nifp);
212 netmap_dtor(void *data)
214 struct netmap_priv_d *priv = data;
215 struct ifnet *ifp = priv->np_ifp;
216 struct netmap_adapter *na = NA(ifp);
218 na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
219 netmap_dtor_locked(data);
220 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
223 bzero(priv, sizeof(*priv)); /* XXX for safety */
224 free(priv, M_DEVBUF);
229 * mmap(2) support for the "netmap" device.
231 * Expose all the memory previously allocated by our custom memory
232 * allocator: this way the user has only to issue a single mmap(2), and
233 * can work on all the data structures flawlessly.
235 * Return 0 on success, -1 otherwise.
239 netmap_mmap(__unused struct cdev *dev,
240 #if __FreeBSD_version < 900000
241 vm_offset_t offset, vm_paddr_t *paddr, int nprot
243 vm_ooffset_t offset, vm_paddr_t *paddr, int nprot,
244 __unused vm_memattr_t *memattr
248 if (nprot & PROT_EXEC)
249 return (-1); // XXX -1 or EINVAL ?
251 ND("request for offset 0x%x", (uint32_t)offset);
252 *paddr = netmap_ofstophys(offset);
259 * Handlers for synchronization of the queues from/to the host.
261 * netmap_sync_to_host() passes packets up. We are called from a
262 * system call in user process context, and the only contention
263 * can be among multiple user threads erroneously calling
264 * this routine concurrently. In principle we should not even
268 netmap_sync_to_host(struct netmap_adapter *na)
270 struct netmap_kring *kring = &na->tx_rings[na->num_tx_rings];
271 struct netmap_ring *ring = kring->ring;
272 struct mbuf *head = NULL, *tail = NULL, *m;
273 u_int k, n, lim = kring->nkr_num_slots - 1;
277 netmap_ring_reinit(kring);
280 // na->nm_lock(na->ifp, NETMAP_CORE_LOCK, 0);
282 /* Take packets from hwcur to cur and pass them up.
283 * In case of no buffers we give up. At the end of the loop,
284 * the queue is drained in all cases.
286 for (n = kring->nr_hwcur; n != k;) {
287 struct netmap_slot *slot = &ring->slot[n];
289 n = (n == lim) ? 0 : n + 1;
290 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE) {
291 D("bad pkt at %d len %d", n, slot->len);
294 m = m_devget(NMB(slot), slot->len, 0, na->ifp, NULL);
306 kring->nr_hwavail = ring->avail = lim;
307 // na->nm_lock(na->ifp, NETMAP_CORE_UNLOCK, 0);
309 /* send packets up, outside the lock */
310 while ((m = head) != NULL) {
311 head = head->m_nextpkt;
313 if (netmap_verbose & NM_VERB_HOST)
314 D("sending up pkt %p size %d", m, MBUF_LEN(m));
315 NM_SEND_UP(na->ifp, m);
320 * rxsync backend for packets coming from the host stack.
321 * They have been put in the queue by netmap_start() so we
322 * need to protect access to the kring using a lock.
324 * This routine also does the selrecord if called from the poll handler
325 * (we know because td != NULL).
328 netmap_sync_from_host(struct netmap_adapter *na, struct thread *td)
330 struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
331 struct netmap_ring *ring = kring->ring;
332 u_int j, n, lim = kring->nkr_num_slots;
333 u_int k = ring->cur, resvd = ring->reserved;
335 na->nm_lock(na->ifp, NETMAP_CORE_LOCK, 0);
337 netmap_ring_reinit(kring);
340 /* new packets are already set in nr_hwavail */
341 /* skip past packets that userspace has released */
344 if (resvd + ring->avail >= lim + 1) {
345 D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
346 ring->reserved = resvd = 0; // XXX panic...
348 k = (k >= resvd) ? k - resvd : k + lim - resvd;
351 n = k >= j ? k - j : k + lim - j;
352 kring->nr_hwavail -= n;
355 k = ring->avail = kring->nr_hwavail - resvd;
357 selrecord(td, &kring->si);
358 if (k && (netmap_verbose & NM_VERB_HOST))
359 D("%d pkts from stack", k);
360 na->nm_lock(na->ifp, NETMAP_CORE_UNLOCK, 0);
365 * get a refcounted reference to an interface.
366 * Return ENXIO if the interface does not exist, EINVAL if netmap
367 * is not supported by the interface.
368 * If successful, hold a reference.
371 get_ifp(const char *name, struct ifnet **ifp)
373 *ifp = ifunit_ref(name);
376 /* can do this if the capability exists and if_pspare[0]
377 * points to the netmap descriptor.
379 if ((*ifp)->if_capabilities & IFCAP_NETMAP && NA(*ifp))
380 return 0; /* valid pointer, we hold the refcount */
382 return EINVAL; // not NETMAP capable
387 * Error routine called when txsync/rxsync detects an error.
388 * Can't do much more than resetting cur = hwcur, avail = hwavail.
389 * Return 1 on reinit.
391 * This routine is only called by the upper half of the kernel.
392 * It only reads hwcur (which is changed only by the upper half, too)
393 * and hwavail (which may be changed by the lower half, but only on
394 * a tx ring and only to increase it, so any error will be recovered
395 * on the next call). For the above, we don't strictly need to call
399 netmap_ring_reinit(struct netmap_kring *kring)
401 struct netmap_ring *ring = kring->ring;
402 u_int i, lim = kring->nkr_num_slots - 1;
405 D("called for %s", kring->na->ifp->if_xname);
408 for (i = 0; i <= lim; i++) {
409 u_int idx = ring->slot[i].buf_idx;
410 u_int len = ring->slot[i].len;
411 if (idx < 2 || idx >= netmap_total_buffers) {
413 D("bad buffer at slot %d idx %d len %d ", i, idx, len);
414 ring->slot[i].buf_idx = 0;
415 ring->slot[i].len = 0;
416 } else if (len > NETMAP_BUF_SIZE) {
417 ring->slot[i].len = 0;
419 D("bad len %d at slot %d idx %d",
424 int pos = kring - kring->na->tx_rings;
425 int n = kring->na->num_tx_rings + 1;
427 D("total %d errors", errors);
429 D("%s %s[%d] reinit, cur %d -> %d avail %d -> %d",
430 kring->na->ifp->if_xname,
431 pos < n ? "TX" : "RX", pos < n ? pos : pos - n,
432 ring->cur, kring->nr_hwcur,
433 ring->avail, kring->nr_hwavail);
434 ring->cur = kring->nr_hwcur;
435 ring->avail = kring->nr_hwavail;
437 return (errors ? 1 : 0);
442 * Set the ring ID. For devices with a single queue, a request
443 * for all rings is the same as a single ring.
446 netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid)
448 struct ifnet *ifp = priv->np_ifp;
449 struct netmap_adapter *na = NA(ifp);
450 u_int i = ringid & NETMAP_RING_MASK;
451 /* initially (np_qfirst == np_qlast) we don't want to lock */
452 int need_lock = (priv->np_qfirst != priv->np_qlast);
453 int lim = na->num_rx_rings;
455 if (na->num_tx_rings > lim)
456 lim = na->num_tx_rings;
457 if ( (ringid & NETMAP_HW_RING) && i >= lim) {
458 D("invalid ring id %d", i);
462 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
463 priv->np_ringid = ringid;
464 if (ringid & NETMAP_SW_RING) {
465 priv->np_qfirst = NETMAP_SW_RING;
467 } else if (ringid & NETMAP_HW_RING) {
469 priv->np_qlast = i + 1;
472 priv->np_qlast = NETMAP_HW_RING ;
474 priv->np_txpoll = (ringid & NETMAP_NO_TX_POLL) ? 0 : 1;
476 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
477 if (ringid & NETMAP_SW_RING)
478 D("ringid %s set to SW RING", ifp->if_xname);
479 else if (ringid & NETMAP_HW_RING)
480 D("ringid %s set to HW RING %d", ifp->if_xname,
483 D("ringid %s set to all %d HW RINGS", ifp->if_xname, lim);
488 * ioctl(2) support for the "netmap" device.
490 * Following a list of accepted commands:
492 * - SIOCGIFADDR just for convenience
498 * Return 0 on success, errno otherwise.
501 netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data,
502 __unused int fflag, struct thread *td)
504 struct netmap_priv_d *priv = NULL;
506 struct nmreq *nmr = (struct nmreq *) data;
507 struct netmap_adapter *na;
510 struct netmap_if *nifp;
512 CURVNET_SET(TD_TO_VNET(td));
514 error = devfs_get_cdevpriv((void **)&priv);
515 if (error != ENOENT && error != 0) {
520 error = 0; /* Could be ENOENT */
522 case NIOCGINFO: /* return capabilities etc */
523 /* memsize is always valid */
524 nmr->nr_memsize = nm_mem->nm_totalsize;
526 nmr->nr_rx_rings = nmr->nr_tx_rings = 0;
527 nmr->nr_rx_slots = nmr->nr_tx_slots = 0;
528 if (nmr->nr_version != NETMAP_API) {
529 D("API mismatch got %d have %d",
530 nmr->nr_version, NETMAP_API);
531 nmr->nr_version = NETMAP_API;
535 if (nmr->nr_name[0] == '\0') /* just get memory info */
537 error = get_ifp(nmr->nr_name, &ifp); /* get a refcount */
540 na = NA(ifp); /* retrieve netmap_adapter */
541 nmr->nr_rx_rings = na->num_rx_rings;
542 nmr->nr_tx_rings = na->num_tx_rings;
543 nmr->nr_rx_slots = na->num_rx_desc;
544 nmr->nr_tx_slots = na->num_tx_desc;
545 if_rele(ifp); /* return the refcount */
549 if (nmr->nr_version != NETMAP_API) {
550 nmr->nr_version = NETMAP_API;
554 if (priv != NULL) { /* thread already registered */
555 error = netmap_set_ringid(priv, nmr->nr_ringid);
558 /* find the interface and a reference */
559 error = get_ifp(nmr->nr_name, &ifp); /* keep reference */
562 na = NA(ifp); /* retrieve netmap adapter */
564 * Allocate the private per-thread structure.
565 * XXX perhaps we can use a blocking malloc ?
567 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF,
571 if_rele(ifp); /* return the refcount */
575 for (i = 10; i > 0; i--) {
576 na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
577 if (!NETMAP_DELETING(na))
579 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
580 tsleep(na, 0, "NIOCREGIF", hz/10);
583 D("too many NIOCREGIF attempts, give up");
585 free(priv, M_DEVBUF);
586 if_rele(ifp); /* return the refcount */
590 priv->np_ifp = ifp; /* store the reference */
591 error = netmap_set_ringid(priv, nmr->nr_ringid);
594 priv->np_nifp = nifp = netmap_if_new(nmr->nr_name, na);
595 if (nifp == NULL) { /* allocation failed */
597 } else if (ifp->if_capenable & IFCAP_NETMAP) {
598 /* was already set */
600 /* Otherwise set the card in netmap mode
601 * and make it use the shared buffers.
603 for (i = 0 ; i < na->num_tx_rings + 1; i++)
604 mtx_init(&na->tx_rings[i].q_lock, "nm_txq_lock", NULL, MTX_DEF);
605 for (i = 0 ; i < na->num_rx_rings + 1; i++)
606 mtx_init(&na->rx_rings[i].q_lock, "nm_rxq_lock", NULL, MTX_DEF);
607 error = na->nm_register(ifp, 1); /* mode on */
609 netmap_dtor_locked(priv);
612 if (error) { /* reg. failed, release priv and ref */
614 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
615 if_rele(ifp); /* return the refcount */
616 bzero(priv, sizeof(*priv));
617 free(priv, M_DEVBUF);
621 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
622 error = devfs_set_cdevpriv(priv, netmap_dtor);
625 /* could not assign the private storage for the
626 * thread, call the destructor explicitly.
632 /* return the offset of the netmap_if object */
633 nmr->nr_rx_rings = na->num_rx_rings;
634 nmr->nr_tx_rings = na->num_tx_rings;
635 nmr->nr_rx_slots = na->num_rx_desc;
636 nmr->nr_tx_slots = na->num_tx_desc;
637 nmr->nr_memsize = nm_mem->nm_totalsize;
638 nmr->nr_offset = netmap_if_offset(nifp);
647 /* the interface is unregistered inside the
648 destructor of the private data. */
649 devfs_clear_cdevpriv();
658 ifp = priv->np_ifp; /* we have a reference */
659 na = NA(ifp); /* retrieve netmap adapter */
660 if (priv->np_qfirst == NETMAP_SW_RING) { /* host rings */
661 if (cmd == NIOCTXSYNC)
662 netmap_sync_to_host(na);
664 netmap_sync_from_host(na, NULL);
667 /* find the last ring to scan */
668 lim = priv->np_qlast;
669 if (lim == NETMAP_HW_RING)
670 lim = (cmd == NIOCTXSYNC) ?
671 na->num_tx_rings : na->num_rx_rings;
673 for (i = priv->np_qfirst; i < lim; i++) {
674 if (cmd == NIOCTXSYNC) {
675 struct netmap_kring *kring = &na->tx_rings[i];
676 if (netmap_verbose & NM_VERB_TXSYNC)
677 D("pre txsync ring %d cur %d hwcur %d",
680 na->nm_txsync(ifp, i, 1 /* do lock */);
681 if (netmap_verbose & NM_VERB_TXSYNC)
682 D("post txsync ring %d cur %d hwcur %d",
686 na->nm_rxsync(ifp, i, 1 /* do lock */);
687 microtime(&na->rx_rings[i].ring->ts);
697 D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT");
700 default: /* allow device-specific ioctls */
703 bzero(&so, sizeof(so));
704 error = get_ifp(nmr->nr_name, &ifp); /* keep reference */
707 so.so_vnet = ifp->if_vnet;
708 // so->so_proto not null.
709 error = ifioctl(&so, cmd, data, td);
721 * select(2) and poll(2) handlers for the "netmap" device.
723 * Can be called for one or more queues.
724 * Return true the event mask corresponding to ready events.
725 * If there are no ready events, do a selrecord on either individual
726 * selfd or on the global one.
727 * Device-dependent parts (locking and sync of tx/rx rings)
728 * are done through callbacks.
731 netmap_poll(__unused struct cdev *dev, int events, struct thread *td)
733 struct netmap_priv_d *priv = NULL;
734 struct netmap_adapter *na;
736 struct netmap_kring *kring;
737 u_int core_lock, i, check_all, want_tx, want_rx, revents = 0;
738 u_int lim_tx, lim_rx;
739 enum {NO_CL, NEED_CL, LOCKED_CL }; /* see below */
741 if (devfs_get_cdevpriv((void **)&priv) != 0 || priv == NULL)
745 // XXX check for deleting() ?
746 if ( (ifp->if_capenable & IFCAP_NETMAP) == 0)
749 if (netmap_verbose & 0x8000)
750 D("device %s events 0x%x", ifp->if_xname, events);
751 want_tx = events & (POLLOUT | POLLWRNORM);
752 want_rx = events & (POLLIN | POLLRDNORM);
754 na = NA(ifp); /* retrieve netmap adapter */
756 lim_tx = na->num_tx_rings;
757 lim_rx = na->num_rx_rings;
758 /* how many queues we are scanning */
759 if (priv->np_qfirst == NETMAP_SW_RING) {
760 if (priv->np_txpoll || want_tx) {
761 /* push any packets up, then we are always ready */
762 kring = &na->tx_rings[lim_tx];
763 netmap_sync_to_host(na);
767 kring = &na->rx_rings[lim_rx];
768 if (kring->ring->avail == 0)
769 netmap_sync_from_host(na, td);
770 if (kring->ring->avail > 0) {
778 * check_all is set if the card has more than one queue and
779 * the client is polling all of them. If true, we sleep on
780 * the "global" selfd, otherwise we sleep on individual selfd
781 * (we can only sleep on one of them per direction).
782 * The interrupt routine in the driver should always wake on
783 * the individual selfd, and also on the global one if the card
784 * has more than one ring.
786 * If the card has only one lock, we just use that.
787 * If the card has separate ring locks, we just use those
788 * unless we are doing check_all, in which case the whole
789 * loop is wrapped by the global lock.
790 * We acquire locks only when necessary: if poll is called
791 * when buffers are available, we can just return without locks.
793 * rxsync() is only called if we run out of buffers on a POLLIN.
794 * txsync() is called if we run out of buffers on POLLOUT, or
795 * there are pending packets to send. The latter can be disabled
796 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
798 check_all = (priv->np_qlast == NETMAP_HW_RING) && (lim_tx > 1 || lim_rx > 1);
801 * core_lock indicates what to do with the core lock.
802 * The core lock is used when either the card has no individual
803 * locks, or it has individual locks but we are cheking all
804 * rings so we need the core lock to avoid missing wakeup events.
806 * It has three possible states:
807 * NO_CL we don't need to use the core lock, e.g.
808 * because we are protected by individual locks.
809 * NEED_CL we need the core lock. In this case, when we
810 * call the lock routine, move to LOCKED_CL
811 * to remember to release the lock once done.
812 * LOCKED_CL core lock is set, so we need to release it.
814 core_lock = (check_all || !na->separate_locks) ? NEED_CL : NO_CL;
815 if (priv->np_qlast != NETMAP_HW_RING) {
816 lim_tx = lim_rx = priv->np_qlast;
820 * We start with a lock free round which is good if we have
821 * data available. If this fails, then lock and call the sync
824 for (i = priv->np_qfirst; want_rx && i < lim_rx; i++) {
825 kring = &na->rx_rings[i];
826 if (kring->ring->avail > 0) {
828 want_rx = 0; /* also breaks the loop */
831 for (i = priv->np_qfirst; want_tx && i < lim_tx; i++) {
832 kring = &na->tx_rings[i];
833 if (kring->ring->avail > 0) {
835 want_tx = 0; /* also breaks the loop */
840 * If we to push packets out (priv->np_txpoll) or want_tx is
841 * still set, we do need to run the txsync calls (on all rings,
842 * to avoid that the tx rings stall).
844 if (priv->np_txpoll || want_tx) {
845 for (i = priv->np_qfirst; i < lim_tx; i++) {
846 kring = &na->tx_rings[i];
848 * Skip the current ring if want_tx == 0
849 * (we have already done a successful sync on
850 * a previous ring) AND kring->cur == kring->hwcur
851 * (there are no pending transmissions for this ring).
853 if (!want_tx && kring->ring->cur == kring->nr_hwcur)
855 if (core_lock == NEED_CL) {
856 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
857 core_lock = LOCKED_CL;
859 if (na->separate_locks)
860 na->nm_lock(ifp, NETMAP_TX_LOCK, i);
861 if (netmap_verbose & NM_VERB_TXSYNC)
862 D("send %d on %s %d",
865 if (na->nm_txsync(ifp, i, 0 /* no lock */))
868 /* Check avail/call selrecord only if called with POLLOUT */
870 if (kring->ring->avail > 0) {
871 /* stop at the first ring. We don't risk
876 } else if (!check_all)
877 selrecord(td, &kring->si);
879 if (na->separate_locks)
880 na->nm_lock(ifp, NETMAP_TX_UNLOCK, i);
885 * now if want_rx is still set we need to lock and rxsync.
886 * Do it on all rings because otherwise we starve.
889 for (i = priv->np_qfirst; i < lim_rx; i++) {
890 kring = &na->rx_rings[i];
891 if (core_lock == NEED_CL) {
892 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
893 core_lock = LOCKED_CL;
895 if (na->separate_locks)
896 na->nm_lock(ifp, NETMAP_RX_LOCK, i);
898 if (na->nm_rxsync(ifp, i, 0 /* no lock */))
900 if (netmap_no_timestamp == 0 ||
901 kring->ring->flags & NR_TIMESTAMP) {
902 microtime(&kring->ring->ts);
905 if (kring->ring->avail > 0)
908 selrecord(td, &kring->si);
909 if (na->separate_locks)
910 na->nm_lock(ifp, NETMAP_RX_UNLOCK, i);
913 if (check_all && revents == 0) { /* signal on the global queue */
915 selrecord(td, &na->tx_si);
917 selrecord(td, &na->rx_si);
919 if (core_lock == LOCKED_CL)
920 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
925 /*------- driver support routines ------*/
928 * default lock wrapper.
931 netmap_lock_wrapper(struct ifnet *dev, int what, u_int queueid)
933 struct netmap_adapter *na = NA(dev);
936 #ifdef linux /* some system do not need lock on register */
937 case NETMAP_REG_LOCK:
938 case NETMAP_REG_UNLOCK:
942 case NETMAP_CORE_LOCK:
943 mtx_lock(&na->core_lock);
946 case NETMAP_CORE_UNLOCK:
947 mtx_unlock(&na->core_lock);
951 mtx_lock(&na->tx_rings[queueid].q_lock);
954 case NETMAP_TX_UNLOCK:
955 mtx_unlock(&na->tx_rings[queueid].q_lock);
959 mtx_lock(&na->rx_rings[queueid].q_lock);
962 case NETMAP_RX_UNLOCK:
963 mtx_unlock(&na->rx_rings[queueid].q_lock);
970 * Initialize a ``netmap_adapter`` object created by driver on attach.
971 * We allocate a block of memory with room for a struct netmap_adapter
972 * plus two sets of N+2 struct netmap_kring (where N is the number
973 * of hardware rings):
974 * krings 0..N-1 are for the hardware queues.
975 * kring N is for the host stack queue
976 * kring N+1 is only used for the selinfo for all queues.
977 * Return 0 on success, ENOMEM otherwise.
979 * By default the receive and transmit adapter ring counts are both initialized
980 * to num_queues. na->num_tx_rings can be set for cards with different tx/rx
984 netmap_attach(struct netmap_adapter *na, int num_queues)
988 struct ifnet *ifp = na->ifp;
991 D("ifp not set, giving up");
994 /* clear other fields ? */
996 if (na->num_tx_rings == 0)
997 na->num_tx_rings = num_queues;
998 na->num_rx_rings = num_queues;
999 /* on each direction we have N+1 resources
1000 * 0..n-1 are the hardware rings
1001 * n is the ring attached to the stack.
1003 n = na->num_rx_rings + na->num_tx_rings + 2;
1004 size = sizeof(*na) + n * sizeof(struct netmap_kring);
1006 buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
1009 na->tx_rings = (void *)((char *)buf + sizeof(*na));
1010 na->rx_rings = na->tx_rings + na->num_tx_rings + 1;
1011 bcopy(na, buf, sizeof(*na));
1012 ifp->if_capabilities |= IFCAP_NETMAP;
1015 /* Core lock initialized here. Others are initialized after
1018 mtx_init(&na->core_lock, "netmap core lock", NULL, MTX_DEF);
1019 if (na->nm_lock == NULL) {
1020 ND("using default locks for %s", ifp->if_xname);
1021 na->nm_lock = netmap_lock_wrapper;
1025 D("netdev_ops %p", ifp->netdev_ops);
1026 /* prepare a clone of the netdev ops */
1027 na->nm_ndo = *ifp->netdev_ops;
1028 na->nm_ndo.ndo_start_xmit = netmap_start_linux;
1030 D("%s for %s", buf ? "ok" : "failed", ifp->if_xname);
1032 return (buf ? 0 : ENOMEM);
1037 * Free the allocated memory linked to the given ``netmap_adapter``
1041 netmap_detach(struct ifnet *ifp)
1043 struct netmap_adapter *na = NA(ifp);
1048 mtx_destroy(&na->core_lock);
1050 bzero(na, sizeof(*na));
1057 * Intercept packets from the network stack and pass them
1058 * to netmap as incoming packets on the 'software' ring.
1059 * We are not locked when called.
1062 netmap_start(struct ifnet *ifp, struct mbuf *m)
1064 struct netmap_adapter *na = NA(ifp);
1065 struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
1066 u_int i, len = MBUF_LEN(m);
1067 int error = EBUSY, lim = kring->nkr_num_slots - 1;
1068 struct netmap_slot *slot;
1070 if (netmap_verbose & NM_VERB_HOST)
1071 D("%s packet %d len %d from the stack", ifp->if_xname,
1072 kring->nr_hwcur + kring->nr_hwavail, len);
1073 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
1074 if (kring->nr_hwavail >= lim) {
1076 D("stack ring %s full\n", ifp->if_xname);
1077 goto done; /* no space */
1079 if (len > NETMAP_BUF_SIZE) {
1080 D("drop packet size %d > %d", len, NETMAP_BUF_SIZE);
1081 goto done; /* too long for us */
1084 /* compute the insert position */
1085 i = kring->nr_hwcur + kring->nr_hwavail;
1088 slot = &kring->ring->slot[i];
1089 m_copydata(m, 0, len, NMB(slot));
1091 kring->nr_hwavail++;
1092 if (netmap_verbose & NM_VERB_HOST)
1093 D("wake up host ring %s %d", na->ifp->if_xname, na->num_rx_rings);
1094 selwakeuppri(&kring->si, PI_NET);
1097 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
1099 /* release the mbuf in either cases of success or failure. As an
1100 * alternative, put the mbuf in a free list and free the list
1101 * only when really necessary.
1110 * netmap_reset() is called by the driver routines when reinitializing
1111 * a ring. The driver is in charge of locking to protect the kring.
1112 * If netmap mode is not set just return NULL.
1114 struct netmap_slot *
1115 netmap_reset(struct netmap_adapter *na, enum txrx tx, int n,
1118 struct netmap_kring *kring;
1122 return NULL; /* no netmap support here */
1123 if (!(na->ifp->if_capenable & IFCAP_NETMAP))
1124 return NULL; /* nothing to reinitialize */
1127 kring = na->tx_rings + n;
1128 new_hwofs = kring->nr_hwcur - new_cur;
1130 kring = na->rx_rings + n;
1131 new_hwofs = kring->nr_hwcur + kring->nr_hwavail - new_cur;
1133 lim = kring->nkr_num_slots - 1;
1134 if (new_hwofs > lim)
1135 new_hwofs -= lim + 1;
1137 /* Alwayws set the new offset value and realign the ring. */
1138 kring->nkr_hwofs = new_hwofs;
1140 kring->nr_hwavail = kring->nkr_num_slots - 1;
1141 D("new hwofs %d on %s %s[%d]",
1142 kring->nkr_hwofs, na->ifp->if_xname,
1143 tx == NR_TX ? "TX" : "RX", n);
1146 * Wakeup on the individual and global lock
1147 * We do the wakeup here, but the ring is not yet reconfigured.
1148 * However, we are under lock so there are no races.
1150 selwakeuppri(&kring->si, PI_NET);
1151 selwakeuppri(tx == NR_TX ? &na->tx_si : &na->rx_si, PI_NET);
1152 return kring->ring->slot;
1157 * Default functions to handle rx/tx interrupts
1159 * 1 ring, single lock:
1160 * lock(core); wake(i=0); unlock(core)
1161 * N rings, single lock:
1162 * lock(core); wake(i); wake(N+1) unlock(core)
1163 * 1 ring, separate locks: (i=0)
1164 * lock(i); wake(i); unlock(i)
1165 * N rings, separate locks:
1166 * lock(i); wake(i); unlock(i); lock(core) wake(N+1) unlock(core)
1167 * work_done is non-null on the RX path.
1170 netmap_rx_irq(struct ifnet *ifp, int q, int *work_done)
1172 struct netmap_adapter *na;
1173 struct netmap_kring *r;
1174 NM_SELINFO_T *main_wq;
1176 if (!(ifp->if_capenable & IFCAP_NETMAP))
1179 if (work_done) { /* RX path */
1180 r = na->rx_rings + q;
1181 r->nr_kflags |= NKR_PENDINTR;
1182 main_wq = (na->num_rx_rings > 1) ? &na->rx_si : NULL;
1183 } else { /* tx path */
1184 r = na->tx_rings + q;
1185 main_wq = (na->num_tx_rings > 1) ? &na->tx_si : NULL;
1186 work_done = &q; /* dummy */
1188 if (na->separate_locks) {
1189 mtx_lock(&r->q_lock);
1190 selwakeuppri(&r->si, PI_NET);
1191 mtx_unlock(&r->q_lock);
1193 mtx_lock(&na->core_lock);
1194 selwakeuppri(main_wq, PI_NET);
1195 mtx_unlock(&na->core_lock);
1198 mtx_lock(&na->core_lock);
1199 selwakeuppri(&r->si, PI_NET);
1201 selwakeuppri(main_wq, PI_NET);
1202 mtx_unlock(&na->core_lock);
1204 *work_done = 1; /* do not fire napi again */
1209 static struct cdevsw netmap_cdevsw = {
1210 .d_version = D_VERSION,
1212 .d_mmap = netmap_mmap,
1213 .d_ioctl = netmap_ioctl,
1214 .d_poll = netmap_poll,
1218 static struct cdev *netmap_dev; /* /dev/netmap character device. */
1224 * Create the /dev/netmap device and initialize all global
1227 * Return 0 on success, errno on failure.
1234 error = netmap_memory_init();
1236 printf("netmap: unable to initialize the memory allocator.");
1239 printf("netmap: loaded module with %d Mbytes\n",
1240 (int)(nm_mem->nm_totalsize >> 20));
1241 netmap_dev = make_dev(&netmap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660,
1250 * Free all the memory, and destroy the ``/dev/netmap`` device.
1255 destroy_dev(netmap_dev);
1256 netmap_memory_fini();
1257 printf("netmap: unloaded module.\n");
1262 * Kernel entry point.
1264 * Initialize/finalize the module and return.
1266 * Return 0 on success, errno on failure.
1269 netmap_loader(__unused struct module *module, int event, __unused void *arg)
1275 error = netmap_init();
1291 DEV_MODULE(netmap, netmap_loader, NULL);