2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2011-2014 Matteo Landi
5 * Copyright (C) 2011-2016 Luigi Rizzo
6 * Copyright (C) 2011-2016 Giuseppe Lettieri
7 * Copyright (C) 2011-2016 Vincenzo Maffione
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This module supports memory mapped access to network devices,
39 * The module uses a large, memory pool allocated by the kernel
40 * and accessible as mmapped memory by multiple userspace threads/processes.
41 * The memory pool contains packet buffers and "netmap rings",
42 * i.e. user-accessible copies of the interface's queues.
44 * Access to the network card works like this:
45 * 1. a process/thread issues one or more open() on /dev/netmap, to create
46 * select()able file descriptor on which events are reported.
47 * 2. on each descriptor, the process issues an ioctl() to identify
48 * the interface that should report events to the file descriptor.
49 * 3. on each descriptor, the process issues an mmap() request to
50 * map the shared memory region within the process' address space.
51 * The list of interesting queues is indicated by a location in
52 * the shared memory region.
53 * 4. using the functions in the netmap(4) userspace API, a process
54 * can look up the occupation state of a queue, access memory buffers,
55 * and retrieve received packets or enqueue packets to transmit.
56 * 5. using some ioctl()s the process can synchronize the userspace view
57 * of the queue with the actual status in the kernel. This includes both
58 * receiving the notification of new packets, and transmitting new
59 * packets on the output interface.
60 * 6. select() or poll() can be used to wait for events on individual
61 * transmit or receive queues (or all queues for a given interface).
64 SYNCHRONIZATION (USER)
66 The netmap rings and data structures may be shared among multiple
67 user threads or even independent processes.
68 Any synchronization among those threads/processes is delegated
69 to the threads themselves. Only one thread at a time can be in
70 a system call on the same netmap ring. The OS does not enforce
71 this and only guarantees against system crashes in case of
76 Within the kernel, access to the netmap rings is protected as follows:
78 - a spinlock on each ring, to handle producer/consumer races on
79 RX rings attached to the host stack (against multiple host
80 threads writing from the host stack to the same ring),
81 and on 'destination' rings attached to a VALE switch
82 (i.e. RX rings in VALE ports, and TX rings in NIC/host ports)
83 protecting multiple active senders for the same destination)
85 - an atomic variable to guarantee that there is at most one
86 instance of *_*xsync() on the ring at any time.
87 For rings connected to user file
88 descriptors, an atomic_test_and_set() protects this, and the
89 lock on the ring is not actually used.
90 For NIC RX rings connected to a VALE switch, an atomic_test_and_set()
91 is also used to prevent multiple executions (the driver might indeed
92 already guarantee this).
93 For NIC TX rings connected to a VALE switch, the lock arbitrates
94 access to the queue (both when allocating buffers and when pushing
97 - *xsync() should be protected against initializations of the card.
98 On FreeBSD most devices have the reset routine protected by
99 a RING lock (ixgbe, igb, em) or core lock (re). lem is missing
100 the RING protection on rx_reset(), this should be added.
102 On linux there is an external lock on the tx path, which probably
103 also arbitrates access to the reset routine. XXX to be revised
105 - a per-interface core_lock protecting access from the host stack
106 while interfaces may be detached from netmap mode.
107 XXX there should be no need for this lock if we detach the interfaces
108 only while they are down.
113 NMG_LOCK() serializes all modifications to switches and ports.
114 A switch cannot be deleted until all ports are gone.
116 For each switch, an SX lock (RWlock on linux) protects
117 deletion of ports. When configuring or deleting a new port, the
118 lock is acquired in exclusive mode (after holding NMG_LOCK).
119 When forwarding, the lock is acquired in shared mode (without NMG_LOCK).
120 The lock is held throughout the entire forwarding cycle,
121 during which the thread may incur in a page fault.
122 Hence it is important that sleepable shared locks are used.
124 On the rx ring, the per-port lock is grabbed initially to reserve
125 a number of slot in the ring, then the lock is released,
126 packets are copied from source to destination, and then
127 the lock is acquired again and the receive ring is updated.
128 (A similar thing is done on the tx ring for NIC and host stack
129 ports attached to the switch)
134 /* --- internals ----
136 * Roadmap to the code that implements the above.
138 * > 1. a process/thread issues one or more open() on /dev/netmap, to create
139 * > select()able file descriptor on which events are reported.
141 * Internally, we allocate a netmap_priv_d structure, that will be
142 * initialized on ioctl(NIOCREGIF). There is one netmap_priv_d
143 * structure for each open().
146 * FreeBSD: see netmap_open() (netmap_freebsd.c)
147 * linux: see linux_netmap_open() (netmap_linux.c)
149 * > 2. on each descriptor, the process issues an ioctl() to identify
150 * > the interface that should report events to the file descriptor.
152 * Implemented by netmap_ioctl(), NIOCREGIF case, with nmr->nr_cmd==0.
153 * Most important things happen in netmap_get_na() and
154 * netmap_do_regif(), called from there. Additional details can be
155 * found in the comments above those functions.
157 * In all cases, this action creates/takes-a-reference-to a
158 * netmap_*_adapter describing the port, and allocates a netmap_if
159 * and all necessary netmap rings, filling them with netmap buffers.
161 * In this phase, the sync callbacks for each ring are set (these are used
162 * in steps 5 and 6 below). The callbacks depend on the type of adapter.
163 * The adapter creation/initialization code puts them in the
164 * netmap_adapter (fields na->nm_txsync and na->nm_rxsync). Then, they
165 * are copied from there to the netmap_kring's during netmap_do_regif(), by
166 * the nm_krings_create() callback. All the nm_krings_create callbacks
167 * actually call netmap_krings_create() to perform this and the other
168 * common stuff. netmap_krings_create() also takes care of the host rings,
169 * if needed, by setting their sync callbacks appropriately.
171 * Additional actions depend on the kind of netmap_adapter that has been
174 * - netmap_hw_adapter: [netmap.c]
175 * This is a system netdev/ifp with native netmap support.
176 * The ifp is detached from the host stack by redirecting:
177 * - transmissions (from the network stack) to netmap_transmit()
178 * - receive notifications to the nm_notify() callback for
179 * this adapter. The callback is normally netmap_notify(), unless
180 * the ifp is attached to a bridge using bwrap, in which case it
181 * is netmap_bwrap_intr_notify().
183 * - netmap_generic_adapter: [netmap_generic.c]
184 * A system netdev/ifp without native netmap support.
186 * (the decision about native/non native support is taken in
187 * netmap_get_hw_na(), called by netmap_get_na())
189 * - netmap_vp_adapter [netmap_vale.c]
190 * Returned by netmap_get_bdg_na().
191 * This is a persistent or ephemeral VALE port. Ephemeral ports
192 * are created on the fly if they don't already exist, and are
193 * always attached to a bridge.
194 * Persistent VALE ports must must be created separately, and i
195 * then attached like normal NICs. The NIOCREGIF we are examining
196 * will find them only if they had previosly been created and
197 * attached (see VALE_CTL below).
199 * - netmap_pipe_adapter [netmap_pipe.c]
200 * Returned by netmap_get_pipe_na().
201 * Both pipe ends are created, if they didn't already exist.
203 * - netmap_monitor_adapter [netmap_monitor.c]
204 * Returned by netmap_get_monitor_na().
205 * If successful, the nm_sync callbacks of the monitored adapter
206 * will be intercepted by the returned monitor.
208 * - netmap_bwrap_adapter [netmap_vale.c]
209 * Cannot be obtained in this way, see VALE_CTL below
213 * linux: we first go through linux_netmap_ioctl() to
214 * adapt the FreeBSD interface to the linux one.
217 * > 3. on each descriptor, the process issues an mmap() request to
218 * > map the shared memory region within the process' address space.
219 * > The list of interesting queues is indicated by a location in
220 * > the shared memory region.
223 * FreeBSD: netmap_mmap_single (netmap_freebsd.c).
224 * linux: linux_netmap_mmap (netmap_linux.c).
226 * > 4. using the functions in the netmap(4) userspace API, a process
227 * > can look up the occupation state of a queue, access memory buffers,
228 * > and retrieve received packets or enqueue packets to transmit.
230 * these actions do not involve the kernel.
232 * > 5. using some ioctl()s the process can synchronize the userspace view
233 * > of the queue with the actual status in the kernel. This includes both
234 * > receiving the notification of new packets, and transmitting new
235 * > packets on the output interface.
237 * These are implemented in netmap_ioctl(), NIOCTXSYNC and NIOCRXSYNC
238 * cases. They invoke the nm_sync callbacks on the netmap_kring
239 * structures, as initialized in step 2 and maybe later modified
240 * by a monitor. Monitors, however, will always call the original
241 * callback before doing anything else.
244 * > 6. select() or poll() can be used to wait for events on individual
245 * > transmit or receive queues (or all queues for a given interface).
247 * Implemented in netmap_poll(). This will call the same nm_sync()
248 * callbacks as in step 5 above.
251 * linux: we first go through linux_netmap_poll() to adapt
252 * the FreeBSD interface to the linux one.
255 * ---- VALE_CTL -----
257 * VALE switches are controlled by issuing a NIOCREGIF with a non-null
258 * nr_cmd in the nmreq structure. These subcommands are handled by
259 * netmap_bdg_ctl() in netmap_vale.c. Persistent VALE ports are created
260 * and destroyed by issuing the NETMAP_BDG_NEWIF and NETMAP_BDG_DELIF
261 * subcommands, respectively.
263 * Any network interface known to the system (including a persistent VALE
264 * port) can be attached to a VALE switch by issuing the
265 * NETMAP_REQ_VALE_ATTACH command. After the attachment, persistent VALE ports
266 * look exactly like ephemeral VALE ports (as created in step 2 above). The
267 * attachment of other interfaces, instead, requires the creation of a
268 * netmap_bwrap_adapter. Moreover, the attached interface must be put in
269 * netmap mode. This may require the creation of a netmap_generic_adapter if
270 * we have no native support for the interface, or if generic adapters have
271 * been forced by sysctl.
273 * Both persistent VALE ports and bwraps are handled by netmap_get_bdg_na(),
274 * called by nm_bdg_ctl_attach(), and discriminated by the nm_bdg_attach()
275 * callback. In the case of the bwrap, the callback creates the
276 * netmap_bwrap_adapter. The initialization of the bwrap is then
277 * completed by calling netmap_do_regif() on it, in the nm_bdg_ctl()
278 * callback (netmap_bwrap_bdg_ctl in netmap_vale.c).
279 * A generic adapter for the wrapped ifp will be created if needed, when
280 * netmap_get_bdg_na() calls netmap_get_hw_na().
283 * ---- DATAPATHS -----
285 * -= SYSTEM DEVICE WITH NATIVE SUPPORT =-
287 * na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach()
289 * - tx from netmap userspace:
291 * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
292 * kring->nm_sync() == DEVICE_netmap_txsync()
293 * 2) device interrupt handler
294 * na->nm_notify() == netmap_notify()
295 * - rx from netmap userspace:
297 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
298 * kring->nm_sync() == DEVICE_netmap_rxsync()
299 * 2) device interrupt handler
300 * na->nm_notify() == netmap_notify()
301 * - rx from host stack
305 * na->nm_notify == netmap_notify()
306 * 2) ioctl(NIOCRXSYNC)/netmap_poll() in process context
307 * kring->nm_sync() == netmap_rxsync_from_host
308 * netmap_rxsync_from_host(na, NULL, NULL)
310 * ioctl(NIOCTXSYNC)/netmap_poll() in process context
311 * kring->nm_sync() == netmap_txsync_to_host
312 * netmap_txsync_to_host(na)
314 * FreeBSD: na->if_input() == ether_input()
315 * linux: netif_rx() with NM_MAGIC_PRIORITY_RX
318 * -= SYSTEM DEVICE WITH GENERIC SUPPORT =-
320 * na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach()
322 * - tx from netmap userspace:
324 * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
325 * kring->nm_sync() == generic_netmap_txsync()
326 * nm_os_generic_xmit_frame()
327 * linux: dev_queue_xmit() with NM_MAGIC_PRIORITY_TX
328 * ifp->ndo_start_xmit == generic_ndo_start_xmit()
329 * gna->save_start_xmit == orig. dev. start_xmit
330 * FreeBSD: na->if_transmit() == orig. dev if_transmit
331 * 2) generic_mbuf_destructor()
332 * na->nm_notify() == netmap_notify()
333 * - rx from netmap userspace:
334 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
335 * kring->nm_sync() == generic_netmap_rxsync()
338 * generic_rx_handler()
340 * na->nm_notify() == netmap_notify()
341 * - rx from host stack
342 * FreeBSD: same as native
343 * Linux: same as native except:
345 * dev_queue_xmit() without NM_MAGIC_PRIORITY_TX
346 * ifp->ndo_start_xmit == generic_ndo_start_xmit()
348 * na->nm_notify() == netmap_notify()
349 * - tx to host stack (same as native):
357 * ioctl(NIOCTXSYNC)/netmap_poll() in process context
358 * kring->nm_sync() == netmap_vp_txsync()
360 * - system device with native support:
363 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
364 * kring->nm_sync() == DEVICE_netmap_rxsync()
366 * kring->nm_sync() == DEVICE_netmap_rxsync()
369 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
370 * kring->nm_sync() == netmap_rxsync_from_host()
373 * - system device with generic support:
374 * from device driver:
375 * generic_rx_handler()
376 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
377 * kring->nm_sync() == generic_netmap_rxsync()
379 * kring->nm_sync() == generic_netmap_rxsync()
382 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
383 * kring->nm_sync() == netmap_rxsync_from_host()
386 * (all cases) --> nm_bdg_flush()
387 * dest_na->nm_notify() == (see below)
393 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
394 * kring->nm_sync() == netmap_vp_rxsync()
395 * 2) from nm_bdg_flush()
396 * na->nm_notify() == netmap_notify()
398 * - system device with native support:
400 * na->nm_notify() == netmap_bwrap_notify()
402 * kring->nm_sync() == DEVICE_netmap_txsync()
406 * kring->nm_sync() == netmap_txsync_to_host
407 * netmap_vp_rxsync_locked()
409 * - system device with generic adapter:
411 * na->nm_notify() == netmap_bwrap_notify()
413 * kring->nm_sync() == generic_netmap_txsync()
417 * kring->nm_sync() == netmap_txsync_to_host
423 * OS-specific code that is used only within this file.
424 * Other OS-specific code that must be accessed by drivers
425 * is present in netmap_kern.h
428 #if defined(__FreeBSD__)
429 #include <sys/cdefs.h> /* prerequisite */
430 #include <sys/types.h>
431 #include <sys/errno.h>
432 #include <sys/param.h> /* defines used in kernel.h */
433 #include <sys/kernel.h> /* types used in module initialization */
434 #include <sys/conf.h> /* cdevsw struct, UID, GID */
435 #include <sys/filio.h> /* FIONBIO */
436 #include <sys/sockio.h>
437 #include <sys/socketvar.h> /* struct socket */
438 #include <sys/malloc.h>
439 #include <sys/poll.h>
440 #include <sys/rwlock.h>
441 #include <sys/socket.h> /* sockaddrs */
442 #include <sys/selinfo.h>
443 #include <sys/sysctl.h>
444 #include <sys/jail.h>
445 #include <net/vnet.h>
447 #include <net/if_var.h>
448 #include <net/bpf.h> /* BIOCIMMEDIATE */
449 #include <machine/bus.h> /* bus_dmamap_* */
450 #include <sys/endian.h>
451 #include <sys/refcount.h>
452 #include <net/ethernet.h> /* ETHER_BPF_MTAP */
457 #include "bsd_glue.h"
459 #elif defined(__APPLE__)
461 #warning OSX support is only partial
462 #include "osx_glue.h"
464 #elif defined (_WIN32)
466 #include "win_glue.h"
470 #error Unsupported platform
472 #endif /* unsupported */
477 #include <net/netmap.h>
478 #include <dev/netmap/netmap_kern.h>
479 #include <dev/netmap/netmap_mem2.h>
482 /* user-controlled variables */
484 #ifdef CONFIG_NETMAP_DEBUG
486 #endif /* CONFIG_NETMAP_DEBUG */
488 static int netmap_no_timestamp; /* don't timestamp on rxsync */
489 int netmap_no_pendintr = 1;
490 int netmap_txsync_retry = 2;
491 static int netmap_fwd = 0; /* force transparent forwarding */
494 * netmap_admode selects the netmap mode to use.
495 * Invalid values are reset to NETMAP_ADMODE_BEST
497 enum { NETMAP_ADMODE_BEST = 0, /* use native, fallback to generic */
498 NETMAP_ADMODE_NATIVE, /* either native or none */
499 NETMAP_ADMODE_GENERIC, /* force generic */
500 NETMAP_ADMODE_LAST };
501 static int netmap_admode = NETMAP_ADMODE_BEST;
503 /* netmap_generic_mit controls mitigation of RX notifications for
504 * the generic netmap adapter. The value is a time interval in
506 int netmap_generic_mit = 100*1000;
508 /* We use by default netmap-aware qdiscs with generic netmap adapters,
509 * even if there can be a little performance hit with hardware NICs.
510 * However, using the qdisc is the safer approach, for two reasons:
511 * 1) it prevents non-fifo qdiscs to break the TX notification
512 * scheme, which is based on mbuf destructors when txqdisc is
514 * 2) it makes it possible to transmit over software devices that
515 * change skb->dev, like bridge, veth, ...
517 * Anyway users looking for the best performance should
518 * use native adapters.
521 int netmap_generic_txqdisc = 1;
524 /* Default number of slots and queues for generic adapters. */
525 int netmap_generic_ringsize = 1024;
526 int netmap_generic_rings = 1;
528 /* Non-zero to enable checksum offloading in NIC drivers */
529 int netmap_generic_hwcsum = 0;
531 /* Non-zero if ptnet devices are allowed to use virtio-net headers. */
532 int ptnet_vnet_hdr = 1;
535 * SYSCTL calls are grouped between SYSBEGIN and SYSEND to be emulated
536 * in some other operating systems
540 SYSCTL_DECL(_dev_netmap);
541 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args");
542 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
543 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
544 #ifdef CONFIG_NETMAP_DEBUG
545 SYSCTL_INT(_dev_netmap, OID_AUTO, debug,
546 CTLFLAG_RW, &netmap_debug, 0, "Debug messages");
547 #endif /* CONFIG_NETMAP_DEBUG */
548 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
549 CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
550 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr,
551 0, "Always look for new received packets.");
552 SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW,
553 &netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush.");
555 SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0,
556 "Force NR_FORWARD mode");
557 SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0,
558 "Adapter mode. 0 selects the best option available,"
559 "1 forces native adapter, 2 forces emulated adapter");
560 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_hwcsum, CTLFLAG_RW, &netmap_generic_hwcsum,
561 0, "Hardware checksums. 0 to disable checksum generation by the NIC (default),"
562 "1 to enable checksum generation by the NIC");
563 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit,
564 0, "RX notification interval in nanoseconds");
565 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW,
566 &netmap_generic_ringsize, 0,
567 "Number of per-ring slots for emulated netmap mode");
568 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW,
569 &netmap_generic_rings, 0,
570 "Number of TX/RX queues for emulated netmap adapters");
572 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW,
573 &netmap_generic_txqdisc, 0, "Use qdisc for generic adapters");
575 SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr,
576 0, "Allow ptnet devices to use virtio-net headers");
580 NMG_LOCK_T netmap_global_lock;
583 * mark the ring as stopped, and run through the locks
584 * to make sure other users get to see it.
585 * stopped must be either NR_KR_STOPPED (for unbounded stop)
586 * of NR_KR_LOCKED (brief stop for mutual exclusion purposes)
589 netmap_disable_ring(struct netmap_kring *kr, int stopped)
591 nm_kr_stop(kr, stopped);
592 // XXX check if nm_kr_stop is sufficient
593 mtx_lock(&kr->q_lock);
594 mtx_unlock(&kr->q_lock);
598 /* stop or enable a single ring */
600 netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped)
603 netmap_disable_ring(NMR(na, t)[ring_id], stopped);
605 NMR(na, t)[ring_id]->nkr_stopped = 0;
609 /* stop or enable all the rings of na */
611 netmap_set_all_rings(struct netmap_adapter *na, int stopped)
616 if (!nm_netmap_on(na))
619 if (netmap_verbose) {
620 nm_prinf("%s: %sable all rings", na->name,
621 (stopped ? "dis" : "en"));
624 for (i = 0; i < netmap_real_rings(na, t); i++) {
625 netmap_set_ring(na, i, t, stopped);
631 * Convenience function used in drivers. Waits for current txsync()s/rxsync()s
632 * to finish and prevents any new one from starting. Call this before turning
633 * netmap mode off, or before removing the hardware rings (e.g., on module
637 netmap_disable_all_rings(struct ifnet *ifp)
639 if (NM_NA_VALID(ifp)) {
640 netmap_set_all_rings(NA(ifp), NM_KR_LOCKED);
645 * Convenience function used in drivers. Re-enables rxsync and txsync on the
646 * adapter's rings In linux drivers, this should be placed near each
650 netmap_enable_all_rings(struct ifnet *ifp)
652 if (NM_NA_VALID(ifp)) {
653 netmap_set_all_rings(NA(ifp), 0 /* enabled */);
658 netmap_make_zombie(struct ifnet *ifp)
660 if (NM_NA_VALID(ifp)) {
661 struct netmap_adapter *na = NA(ifp);
662 netmap_set_all_rings(na, NM_KR_LOCKED);
663 na->na_flags |= NAF_ZOMBIE;
664 netmap_set_all_rings(na, 0);
669 netmap_undo_zombie(struct ifnet *ifp)
671 if (NM_NA_VALID(ifp)) {
672 struct netmap_adapter *na = NA(ifp);
673 if (na->na_flags & NAF_ZOMBIE) {
674 netmap_set_all_rings(na, NM_KR_LOCKED);
675 na->na_flags &= ~NAF_ZOMBIE;
676 netmap_set_all_rings(na, 0);
682 * generic bound_checking function
685 nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg)
688 const char *op = NULL;
697 } else if (oldv > hi) {
702 nm_prinf("%s %s to %d (was %d)", op, msg, *v, oldv);
708 * packet-dump function, user-supplied or static buffer.
709 * The destination buffer must be at least 30+4*len
712 nm_dump_buf(char *p, int len, int lim, char *dst)
714 static char _dst[8192];
716 static char hex[] ="0123456789abcdef";
717 char *o; /* output position */
719 #define P_HI(x) hex[((x) & 0xf0)>>4]
720 #define P_LO(x) hex[((x) & 0xf)]
721 #define P_C(x) ((x) >= 0x20 && (x) <= 0x7e ? (x) : '.')
724 if (lim <= 0 || lim > len)
727 sprintf(o, "buf 0x%p len %d lim %d\n", p, len, lim);
729 /* hexdump routine */
730 for (i = 0; i < lim; ) {
731 sprintf(o, "%5d: ", i);
735 for (j=0; j < 16 && i < lim; i++, j++) {
737 o[j*3+1] = P_LO(p[i]);
740 for (j=0; j < 16 && i < lim; i++, j++)
741 o[j + 48] = P_C(p[i]);
754 * Fetch configuration from the device, to cope with dynamic
755 * reconfigurations after loading the module.
757 /* call with NMG_LOCK held */
759 netmap_update_config(struct netmap_adapter *na)
761 struct nm_config_info info;
763 bzero(&info, sizeof(info));
764 if (na->nm_config == NULL ||
765 na->nm_config(na, &info)) {
766 /* take whatever we had at init time */
767 info.num_tx_rings = na->num_tx_rings;
768 info.num_tx_descs = na->num_tx_desc;
769 info.num_rx_rings = na->num_rx_rings;
770 info.num_rx_descs = na->num_rx_desc;
771 info.rx_buf_maxsize = na->rx_buf_maxsize;
774 if (na->num_tx_rings == info.num_tx_rings &&
775 na->num_tx_desc == info.num_tx_descs &&
776 na->num_rx_rings == info.num_rx_rings &&
777 na->num_rx_desc == info.num_rx_descs &&
778 na->rx_buf_maxsize == info.rx_buf_maxsize)
779 return 0; /* nothing changed */
780 if (na->active_fds == 0) {
781 na->num_tx_rings = info.num_tx_rings;
782 na->num_tx_desc = info.num_tx_descs;
783 na->num_rx_rings = info.num_rx_rings;
784 na->num_rx_desc = info.num_rx_descs;
785 na->rx_buf_maxsize = info.rx_buf_maxsize;
787 nm_prinf("configuration changed for %s: txring %d x %d, "
788 "rxring %d x %d, rxbufsz %d",
789 na->name, na->num_tx_rings, na->num_tx_desc,
790 na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize);
793 nm_prerr("WARNING: configuration changed for %s while active: "
794 "txring %d x %d, rxring %d x %d, rxbufsz %d",
795 na->name, info.num_tx_rings, info.num_tx_descs,
796 info.num_rx_rings, info.num_rx_descs,
797 info.rx_buf_maxsize);
801 /* nm_sync callbacks for the host rings */
802 static int netmap_txsync_to_host(struct netmap_kring *kring, int flags);
803 static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags);
805 /* create the krings array and initialize the fields common to all adapters.
806 * The array layout is this:
809 * na->tx_rings ----->| | \
810 * | | } na->num_tx_ring
814 * na->rx_rings ----> +----------+
816 * | | } na->num_rx_rings
821 * na->tailroom ----->| | \
822 * | | } tailroom bytes
826 * Note: for compatibility, host krings are created even when not needed.
827 * The tailroom space is currently used by vale ports for allocating leases.
829 /* call with NMG_LOCK held */
831 netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
834 struct netmap_kring *kring;
839 if (na->tx_rings != NULL) {
840 if (netmap_debug & NM_DEBUG_ON)
841 nm_prerr("warning: krings were already created");
845 /* account for the (possibly fake) host rings */
846 n[NR_TX] = netmap_all_rings(na, NR_TX);
847 n[NR_RX] = netmap_all_rings(na, NR_RX);
849 len = (n[NR_TX] + n[NR_RX]) *
850 (sizeof(struct netmap_kring) + sizeof(struct netmap_kring *))
853 na->tx_rings = nm_os_malloc((size_t)len);
854 if (na->tx_rings == NULL) {
855 nm_prerr("Cannot allocate krings");
858 na->rx_rings = na->tx_rings + n[NR_TX];
859 na->tailroom = na->rx_rings + n[NR_RX];
861 /* link the krings in the krings array */
862 kring = (struct netmap_kring *)((char *)na->tailroom + tailroom);
863 for (i = 0; i < n[NR_TX] + n[NR_RX]; i++) {
864 na->tx_rings[i] = kring;
869 * All fields in krings are 0 except the one initialized below.
870 * but better be explicit on important kring fields.
873 ndesc = nma_get_ndesc(na, t);
874 for (i = 0; i < n[t]; i++) {
875 kring = NMR(na, t)[i];
876 bzero(kring, sizeof(*kring));
877 kring->notify_na = na;
880 kring->nkr_num_slots = ndesc;
881 kring->nr_mode = NKR_NETMAP_OFF;
882 kring->nr_pending_mode = NKR_NETMAP_OFF;
883 if (i < nma_get_nrings(na, t)) {
884 kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync);
886 if (!(na->na_flags & NAF_HOST_RINGS))
887 kring->nr_kflags |= NKR_FAKERING;
888 kring->nm_sync = (t == NR_TX ?
889 netmap_txsync_to_host:
890 netmap_rxsync_from_host);
892 kring->nm_notify = na->nm_notify;
893 kring->rhead = kring->rcur = kring->nr_hwcur = 0;
895 * IMPORTANT: Always keep one slot empty.
897 kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0);
898 snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name,
900 nm_prdis("ktx %s h %d c %d t %d",
901 kring->name, kring->rhead, kring->rcur, kring->rtail);
902 err = nm_os_selinfo_init(&kring->si, kring->name);
904 netmap_krings_delete(na);
907 mtx_init(&kring->q_lock, (t == NR_TX ? "nm_txq_lock" : "nm_rxq_lock"), NULL, MTX_DEF);
908 kring->na = na; /* setting this field marks the mutex as initialized */
910 err = nm_os_selinfo_init(&na->si[t], na->name);
912 netmap_krings_delete(na);
921 /* undo the actions performed by netmap_krings_create */
922 /* call with NMG_LOCK held */
924 netmap_krings_delete(struct netmap_adapter *na)
926 struct netmap_kring **kring = na->tx_rings;
929 if (na->tx_rings == NULL) {
930 if (netmap_debug & NM_DEBUG_ON)
931 nm_prerr("warning: krings were already deleted");
936 nm_os_selinfo_uninit(&na->si[t]);
938 /* we rely on the krings layout described above */
939 for ( ; kring != na->tailroom; kring++) {
940 if ((*kring)->na != NULL)
941 mtx_destroy(&(*kring)->q_lock);
942 nm_os_selinfo_uninit(&(*kring)->si);
944 nm_os_free(na->tx_rings);
945 na->tx_rings = na->rx_rings = na->tailroom = NULL;
950 * Destructor for NIC ports. They also have an mbuf queue
951 * on the rings connected to the host so we need to purge
954 /* call with NMG_LOCK held */
956 netmap_hw_krings_delete(struct netmap_adapter *na)
958 u_int lim = netmap_real_rings(na, NR_RX), i;
960 for (i = nma_get_nrings(na, NR_RX); i < lim; i++) {
961 struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue;
962 nm_prdis("destroy sw mbq with len %d", mbq_len(q));
966 netmap_krings_delete(na);
970 netmap_mem_drop(struct netmap_adapter *na)
972 int last = netmap_mem_deref(na->nm_mem, na);
973 /* if the native allocator had been overrided on regif,
974 * restore it now and drop the temporary one
976 if (last && na->nm_mem_prev) {
977 netmap_mem_put(na->nm_mem);
978 na->nm_mem = na->nm_mem_prev;
979 na->nm_mem_prev = NULL;
984 * Undo everything that was done in netmap_do_regif(). In particular,
985 * call nm_register(ifp,0) to stop netmap mode on the interface and
986 * revert to normal operation.
988 /* call with NMG_LOCK held */
989 static void netmap_unset_ringid(struct netmap_priv_d *);
990 static void netmap_krings_put(struct netmap_priv_d *);
992 netmap_do_unregif(struct netmap_priv_d *priv)
994 struct netmap_adapter *na = priv->np_na;
998 /* unset nr_pending_mode and possibly release exclusive mode */
999 netmap_krings_put(priv);
1002 /* XXX check whether we have to do something with monitor
1003 * when rings change nr_mode. */
1004 if (na->active_fds <= 0) {
1005 /* walk through all the rings and tell any monitor
1006 * that the port is going to exit netmap mode
1008 netmap_monitor_stop(na);
1012 if (na->active_fds <= 0 || nm_kring_pending(priv)) {
1013 na->nm_register(na, 0);
1016 /* delete rings and buffers that are no longer needed */
1017 netmap_mem_rings_delete(na);
1019 if (na->active_fds <= 0) { /* last instance */
1021 * (TO CHECK) We enter here
1022 * when the last reference to this file descriptor goes
1023 * away. This means we cannot have any pending poll()
1024 * or interrupt routine operating on the structure.
1025 * XXX The file may be closed in a thread while
1026 * another thread is using it.
1027 * Linux keeps the file opened until the last reference
1028 * by any outstanding ioctl/poll or mmap is gone.
1029 * FreeBSD does not track mmap()s (but we do) and
1030 * wakes up any sleeping poll(). Need to check what
1031 * happens if the close() occurs while a concurrent
1032 * syscall is running.
1034 if (netmap_debug & NM_DEBUG_ON)
1035 nm_prinf("deleting last instance for %s", na->name);
1037 if (nm_netmap_on(na)) {
1038 nm_prerr("BUG: netmap on while going to delete the krings");
1041 na->nm_krings_delete(na);
1043 /* restore the default number of host tx and rx rings */
1044 if (na->na_flags & NAF_HOST_RINGS) {
1045 na->num_host_tx_rings = 1;
1046 na->num_host_rx_rings = 1;
1048 na->num_host_tx_rings = 0;
1049 na->num_host_rx_rings = 0;
1053 /* possibily decrement counter of tx_si/rx_si users */
1054 netmap_unset_ringid(priv);
1055 /* delete the nifp */
1056 netmap_mem_if_delete(na, priv->np_nifp);
1057 /* drop the allocator */
1058 netmap_mem_drop(na);
1059 /* mark the priv as unregistered */
1061 priv->np_nifp = NULL;
1064 struct netmap_priv_d*
1065 netmap_priv_new(void)
1067 struct netmap_priv_d *priv;
1069 priv = nm_os_malloc(sizeof(struct netmap_priv_d));
1078 * Destructor of the netmap_priv_d, called when the fd is closed
1079 * Action: undo all the things done by NIOCREGIF,
1080 * On FreeBSD we need to track whether there are active mmap()s,
1081 * and we use np_active_mmaps for that. On linux, the field is always 0.
1082 * Return: 1 if we can free priv, 0 otherwise.
1085 /* call with NMG_LOCK held */
1087 netmap_priv_delete(struct netmap_priv_d *priv)
1089 struct netmap_adapter *na = priv->np_na;
1091 /* number of active references to this fd */
1092 if (--priv->np_refs > 0) {
1097 netmap_do_unregif(priv);
1099 netmap_unget_na(na, priv->np_ifp);
1100 bzero(priv, sizeof(*priv)); /* for safety */
1105 /* call with NMG_LOCK *not* held */
1107 netmap_dtor(void *data)
1109 struct netmap_priv_d *priv = data;
1112 netmap_priv_delete(priv);
1118 * Handlers for synchronization of the rings from/to the host stack.
1119 * These are associated to a network interface and are just another
1120 * ring pair managed by userspace.
1122 * Netmap also supports transparent forwarding (NS_FORWARD and NR_FORWARD
1125 * - Before releasing buffers on hw RX rings, the application can mark
1126 * them with the NS_FORWARD flag. During the next RXSYNC or poll(), they
1127 * will be forwarded to the host stack, similarly to what happened if
1128 * the application moved them to the host TX ring.
1130 * - Before releasing buffers on the host RX ring, the application can
1131 * mark them with the NS_FORWARD flag. During the next RXSYNC or poll(),
1132 * they will be forwarded to the hw TX rings, saving the application
1133 * from doing the same task in user-space.
1135 * Transparent fowarding can be enabled per-ring, by setting the NR_FORWARD
1136 * flag, or globally with the netmap_fwd sysctl.
1138 * The transfer NIC --> host is relatively easy, just encapsulate
1139 * into mbufs and we are done. The host --> NIC side is slightly
1140 * harder because there might not be room in the tx ring so it
1141 * might take a while before releasing the buffer.
1146 * Pass a whole queue of mbufs to the host stack as coming from 'dst'
1147 * We do not need to lock because the queue is private.
1148 * After this call the queue is empty.
1151 netmap_send_up(struct ifnet *dst, struct mbq *q)
1154 struct mbuf *head = NULL, *prev = NULL;
1156 /* Send packets up, outside the lock; head/prev machinery
1157 * is only useful for Windows. */
1158 while ((m = mbq_dequeue(q)) != NULL) {
1159 if (netmap_debug & NM_DEBUG_HOST)
1160 nm_prinf("sending up pkt %p size %d", m, MBUF_LEN(m));
1161 prev = nm_os_send_up(dst, m, prev);
1166 nm_os_send_up(dst, NULL, head);
1172 * Scan the buffers from hwcur to ring->head, and put a copy of those
1173 * marked NS_FORWARD (or all of them if forced) into a queue of mbufs.
1174 * Drop remaining packets in the unlikely event
1175 * of an mbuf shortage.
1178 netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force)
1180 u_int const lim = kring->nkr_num_slots - 1;
1181 u_int const head = kring->rhead;
1183 struct netmap_adapter *na = kring->na;
1185 for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) {
1187 struct netmap_slot *slot = &kring->ring->slot[n];
1189 if ((slot->flags & NS_FORWARD) == 0 && !force)
1191 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) {
1192 nm_prlim(5, "bad pkt at %d len %d", n, slot->len);
1195 slot->flags &= ~NS_FORWARD; // XXX needed ?
1196 /* XXX TODO: adapt to the case of a multisegment packet */
1197 m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL);
1206 _nm_may_forward(struct netmap_kring *kring)
1208 return ((netmap_fwd || kring->ring->flags & NR_FORWARD) &&
1209 kring->na->na_flags & NAF_HOST_RINGS &&
1210 kring->tx == NR_RX);
1214 nm_may_forward_up(struct netmap_kring *kring)
1216 return _nm_may_forward(kring) &&
1217 kring->ring_id != kring->na->num_rx_rings;
1221 nm_may_forward_down(struct netmap_kring *kring, int sync_flags)
1223 return _nm_may_forward(kring) &&
1224 (sync_flags & NAF_CAN_FORWARD_DOWN) &&
1225 kring->ring_id == kring->na->num_rx_rings;
1229 * Send to the NIC rings packets marked NS_FORWARD between
1230 * kring->nr_hwcur and kring->rhead.
1231 * Called under kring->rx_queue.lock on the sw rx ring.
1233 * It can only be called if the user opened all the TX hw rings,
1234 * see NAF_CAN_FORWARD_DOWN flag.
1235 * We can touch the TX netmap rings (slots, head and cur) since
1236 * we are in poll/ioctl system call context, and the application
1237 * is not supposed to touch the ring (using a different thread)
1238 * during the execution of the system call.
1241 netmap_sw_to_nic(struct netmap_adapter *na)
1243 struct netmap_kring *kring = na->rx_rings[na->num_rx_rings];
1244 struct netmap_slot *rxslot = kring->ring->slot;
1245 u_int i, rxcur = kring->nr_hwcur;
1246 u_int const head = kring->rhead;
1247 u_int const src_lim = kring->nkr_num_slots - 1;
1250 /* scan rings to find space, then fill as much as possible */
1251 for (i = 0; i < na->num_tx_rings; i++) {
1252 struct netmap_kring *kdst = na->tx_rings[i];
1253 struct netmap_ring *rdst = kdst->ring;
1254 u_int const dst_lim = kdst->nkr_num_slots - 1;
1256 /* XXX do we trust ring or kring->rcur,rtail ? */
1257 for (; rxcur != head && !nm_ring_empty(rdst);
1258 rxcur = nm_next(rxcur, src_lim) ) {
1259 struct netmap_slot *src, *dst, tmp;
1260 u_int dst_head = rdst->head;
1262 src = &rxslot[rxcur];
1263 if ((src->flags & NS_FORWARD) == 0 && !netmap_fwd)
1268 dst = &rdst->slot[dst_head];
1272 src->buf_idx = dst->buf_idx;
1273 src->flags = NS_BUF_CHANGED;
1275 dst->buf_idx = tmp.buf_idx;
1277 dst->flags = NS_BUF_CHANGED;
1279 rdst->head = rdst->cur = nm_next(dst_head, dst_lim);
1281 /* if (sent) XXX txsync ? it would be just an optimization */
1288 * netmap_txsync_to_host() passes packets up. We are called from a
1289 * system call in user process context, and the only contention
1290 * can be among multiple user threads erroneously calling
1291 * this routine concurrently.
1294 netmap_txsync_to_host(struct netmap_kring *kring, int flags)
1296 struct netmap_adapter *na = kring->na;
1297 u_int const lim = kring->nkr_num_slots - 1;
1298 u_int const head = kring->rhead;
1301 /* Take packets from hwcur to head and pass them up.
1302 * Force hwcur = head since netmap_grab_packets() stops at head
1305 netmap_grab_packets(kring, &q, 1 /* force */);
1306 nm_prdis("have %d pkts in queue", mbq_len(&q));
1307 kring->nr_hwcur = head;
1308 kring->nr_hwtail = head + lim;
1309 if (kring->nr_hwtail > lim)
1310 kring->nr_hwtail -= lim + 1;
1312 netmap_send_up(na->ifp, &q);
1318 * rxsync backend for packets coming from the host stack.
1319 * They have been put in kring->rx_queue by netmap_transmit().
1320 * We protect access to the kring using kring->rx_queue.lock
1322 * also moves to the nic hw rings any packet the user has marked
1323 * for transparent-mode forwarding, then sets the NR_FORWARD
1324 * flag in the kring to let the caller push them out
1327 netmap_rxsync_from_host(struct netmap_kring *kring, int flags)
1329 struct netmap_adapter *na = kring->na;
1330 struct netmap_ring *ring = kring->ring;
1332 u_int const lim = kring->nkr_num_slots - 1;
1333 u_int const head = kring->rhead;
1335 struct mbq *q = &kring->rx_queue, fq;
1337 mbq_init(&fq); /* fq holds packets to be freed */
1341 /* First part: import newly received packets */
1343 if (n) { /* grab packets from the queue */
1347 nm_i = kring->nr_hwtail;
1348 stop_i = nm_prev(kring->nr_hwcur, lim);
1349 while ( nm_i != stop_i && (m = mbq_dequeue(q)) != NULL ) {
1350 int len = MBUF_LEN(m);
1351 struct netmap_slot *slot = &ring->slot[nm_i];
1353 m_copydata(m, 0, len, NMB(na, slot));
1354 nm_prdis("nm %d len %d", nm_i, len);
1355 if (netmap_debug & NM_DEBUG_HOST)
1356 nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
1360 nm_i = nm_next(nm_i, lim);
1361 mbq_enqueue(&fq, m);
1363 kring->nr_hwtail = nm_i;
1367 * Second part: skip past packets that userspace has released.
1369 nm_i = kring->nr_hwcur;
1370 if (nm_i != head) { /* something was released */
1371 if (nm_may_forward_down(kring, flags)) {
1372 ret = netmap_sw_to_nic(na);
1374 kring->nr_kflags |= NR_FORWARD;
1378 kring->nr_hwcur = head;
1390 /* Get a netmap adapter for the port.
1392 * If it is possible to satisfy the request, return 0
1393 * with *na containing the netmap adapter found.
1394 * Otherwise return an error code, with *na containing NULL.
1396 * When the port is attached to a bridge, we always return
1398 * Otherwise, if the port is already bound to a file descriptor,
1399 * then we unconditionally return the existing adapter into *na.
1400 * In all the other cases, we return (into *na) either native,
1401 * generic or NULL, according to the following table:
1404 * active_fds dev.netmap.admode YES NO
1405 * -------------------------------------------------------
1406 * >0 * NA(ifp) NA(ifp)
1408 * 0 NETMAP_ADMODE_BEST NATIVE GENERIC
1409 * 0 NETMAP_ADMODE_NATIVE NATIVE NULL
1410 * 0 NETMAP_ADMODE_GENERIC GENERIC GENERIC
1413 static void netmap_hw_dtor(struct netmap_adapter *); /* needed by NM_IS_NATIVE() */
1415 netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na)
1417 /* generic support */
1418 int i = netmap_admode; /* Take a snapshot. */
1419 struct netmap_adapter *prev_na;
1422 *na = NULL; /* default */
1424 /* reset in case of invalid value */
1425 if (i < NETMAP_ADMODE_BEST || i >= NETMAP_ADMODE_LAST)
1426 i = netmap_admode = NETMAP_ADMODE_BEST;
1428 if (NM_NA_VALID(ifp)) {
1430 /* If an adapter already exists, return it if
1431 * there are active file descriptors or if
1432 * netmap is not forced to use generic
1435 if (NETMAP_OWNED_BY_ANY(prev_na)
1436 || i != NETMAP_ADMODE_GENERIC
1437 || prev_na->na_flags & NAF_FORCE_NATIVE
1439 /* ugly, but we cannot allow an adapter switch
1440 * if some pipe is referring to this one
1442 || prev_na->na_next_pipe > 0
1450 /* If there isn't native support and netmap is not allowed
1451 * to use generic adapters, we cannot satisfy the request.
1453 if (!NM_IS_NATIVE(ifp) && i == NETMAP_ADMODE_NATIVE)
1456 /* Otherwise, create a generic adapter and return it,
1457 * saving the previously used netmap adapter, if any.
1459 * Note that here 'prev_na', if not NULL, MUST be a
1460 * native adapter, and CANNOT be a generic one. This is
1461 * true because generic adapters are created on demand, and
1462 * destroyed when not used anymore. Therefore, if the adapter
1463 * currently attached to an interface 'ifp' is generic, it
1465 * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))).
1466 * Consequently, if NA(ifp) is generic, we will enter one of
1467 * the branches above. This ensures that we never override
1468 * a generic adapter with another generic adapter.
1470 error = generic_netmap_attach(ifp);
1477 if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) &&
1478 (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) {
1479 (*na)->nm_mem_prev = (*na)->nm_mem;
1480 (*na)->nm_mem = netmap_mem_get(nmd);
1487 * MUST BE CALLED UNDER NMG_LOCK()
1489 * Get a refcounted reference to a netmap adapter attached
1490 * to the interface specified by req.
1491 * This is always called in the execution of an ioctl().
1493 * Return ENXIO if the interface specified by the request does
1494 * not exist, ENOTSUP if netmap is not supported by the interface,
1495 * EBUSY if the interface is already attached to a bridge,
1496 * EINVAL if parameters are invalid, ENOMEM if needed resources
1497 * could not be allocated.
1498 * If successful, hold a reference to the netmap adapter.
1500 * If the interface specified by req is a system one, also keep
1501 * a reference to it and return a valid *ifp.
1504 netmap_get_na(struct nmreq_header *hdr,
1505 struct netmap_adapter **na, struct ifnet **ifp,
1506 struct netmap_mem_d *nmd, int create)
1508 struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1510 struct netmap_adapter *ret = NULL;
1513 *na = NULL; /* default return value */
1516 if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1520 if (req->nr_mode == NR_REG_PIPE_MASTER ||
1521 req->nr_mode == NR_REG_PIPE_SLAVE) {
1522 /* Do not accept deprecated pipe modes. */
1523 nm_prerr("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax");
1529 /* if the request contain a memid, try to find the
1530 * corresponding memory region
1532 if (nmd == NULL && req->nr_mem_id) {
1533 nmd = netmap_mem_find(req->nr_mem_id);
1536 /* keep the rereference */
1540 /* We cascade through all possible types of netmap adapter.
1541 * All netmap_get_*_na() functions return an error and an na,
1542 * with the following combinations:
1545 * 0 NULL type doesn't match
1546 * !0 NULL type matches, but na creation/lookup failed
1547 * 0 !NULL type matches and na created/found
1548 * !0 !NULL impossible
1550 error = netmap_get_null_na(hdr, na, nmd, create);
1551 if (error || *na != NULL)
1554 /* try to see if this is a monitor port */
1555 error = netmap_get_monitor_na(hdr, na, nmd, create);
1556 if (error || *na != NULL)
1559 /* try to see if this is a pipe port */
1560 error = netmap_get_pipe_na(hdr, na, nmd, create);
1561 if (error || *na != NULL)
1564 /* try to see if this is a bridge port */
1565 error = netmap_get_vale_na(hdr, na, nmd, create);
1569 if (*na != NULL) /* valid match in netmap_get_bdg_na() */
1573 * This must be a hardware na, lookup the name in the system.
1574 * Note that by hardware we actually mean "it shows up in ifconfig".
1575 * This may still be a tap, a veth/epair, or even a
1576 * persistent VALE port.
1578 *ifp = ifunit_ref(hdr->nr_name);
1584 error = netmap_get_hw_na(*ifp, nmd, &ret);
1589 netmap_adapter_get(ret);
1592 * if the adapter supports the host rings and it is not alread open,
1593 * try to set the number of host rings as requested by the user
1595 if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) {
1596 if (req->nr_host_tx_rings)
1597 (*na)->num_host_tx_rings = req->nr_host_tx_rings;
1598 if (req->nr_host_rx_rings)
1599 (*na)->num_host_rx_rings = req->nr_host_rx_rings;
1601 nm_prdis("%s: host tx %d rx %u", (*na)->name, (*na)->num_host_tx_rings,
1602 (*na)->num_host_rx_rings);
1607 netmap_adapter_put(ret);
1614 netmap_mem_put(nmd);
1619 /* undo netmap_get_na() */
1621 netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp)
1626 netmap_adapter_put(na);
1630 #define NM_FAIL_ON(t) do { \
1631 if (unlikely(t)) { \
1632 nm_prlim(5, "%s: fail '" #t "' " \
1634 "rh %d rc %d rt %d " \
1637 head, cur, ring->tail, \
1638 kring->rhead, kring->rcur, kring->rtail, \
1639 kring->nr_hwcur, kring->nr_hwtail); \
1640 return kring->nkr_num_slots; \
1645 * validate parameters on entry for *_txsync()
1646 * Returns ring->cur if ok, or something >= kring->nkr_num_slots
1649 * rhead, rcur and rtail=hwtail are stored from previous round.
1650 * hwcur is the next packet to send to the ring.
1653 * hwcur <= *rhead <= head <= cur <= tail = *rtail <= hwtail
1655 * hwcur, rhead, rtail and hwtail are reliable
1658 nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1660 u_int head = ring->head; /* read only once */
1661 u_int cur = ring->cur; /* read only once */
1662 u_int n = kring->nkr_num_slots;
1664 nm_prdis(5, "%s kcur %d ktail %d head %d cur %d tail %d",
1666 kring->nr_hwcur, kring->nr_hwtail,
1667 ring->head, ring->cur, ring->tail);
1668 #if 1 /* kernel sanity checks; but we can trust the kring. */
1669 NM_FAIL_ON(kring->nr_hwcur >= n || kring->rhead >= n ||
1670 kring->rtail >= n || kring->nr_hwtail >= n);
1671 #endif /* kernel sanity checks */
1673 * user sanity checks. We only use head,
1674 * A, B, ... are possible positions for head:
1676 * 0 A rhead B rtail C n-1
1677 * 0 D rtail E rhead F n-1
1679 * B, F, D are valid. A, C, E are wrong
1681 if (kring->rtail >= kring->rhead) {
1682 /* want rhead <= head <= rtail */
1683 NM_FAIL_ON(head < kring->rhead || head > kring->rtail);
1684 /* and also head <= cur <= rtail */
1685 NM_FAIL_ON(cur < head || cur > kring->rtail);
1686 } else { /* here rtail < rhead */
1687 /* we need head outside rtail .. rhead */
1688 NM_FAIL_ON(head > kring->rtail && head < kring->rhead);
1690 /* two cases now: head <= rtail or head >= rhead */
1691 if (head <= kring->rtail) {
1692 /* want head <= cur <= rtail */
1693 NM_FAIL_ON(cur < head || cur > kring->rtail);
1694 } else { /* head >= rhead */
1695 /* cur must be outside rtail..head */
1696 NM_FAIL_ON(cur > kring->rtail && cur < head);
1699 if (ring->tail != kring->rtail) {
1700 nm_prlim(5, "%s tail overwritten was %d need %d", kring->name,
1701 ring->tail, kring->rtail);
1702 ring->tail = kring->rtail;
1704 kring->rhead = head;
1711 * validate parameters on entry for *_rxsync()
1712 * Returns ring->head if ok, kring->nkr_num_slots on error.
1714 * For a valid configuration,
1715 * hwcur <= head <= cur <= tail <= hwtail
1717 * We only consider head and cur.
1718 * hwcur and hwtail are reliable.
1722 nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1724 uint32_t const n = kring->nkr_num_slots;
1727 nm_prdis(5,"%s kc %d kt %d h %d c %d t %d",
1729 kring->nr_hwcur, kring->nr_hwtail,
1730 ring->head, ring->cur, ring->tail);
1732 * Before storing the new values, we should check they do not
1733 * move backwards. However:
1734 * - head is not an issue because the previous value is hwcur;
1735 * - cur could in principle go back, however it does not matter
1736 * because we are processing a brand new rxsync()
1738 cur = kring->rcur = ring->cur; /* read only once */
1739 head = kring->rhead = ring->head; /* read only once */
1740 #if 1 /* kernel sanity checks */
1741 NM_FAIL_ON(kring->nr_hwcur >= n || kring->nr_hwtail >= n);
1742 #endif /* kernel sanity checks */
1743 /* user sanity checks */
1744 if (kring->nr_hwtail >= kring->nr_hwcur) {
1745 /* want hwcur <= rhead <= hwtail */
1746 NM_FAIL_ON(head < kring->nr_hwcur || head > kring->nr_hwtail);
1747 /* and also rhead <= rcur <= hwtail */
1748 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1750 /* we need rhead outside hwtail..hwcur */
1751 NM_FAIL_ON(head < kring->nr_hwcur && head > kring->nr_hwtail);
1752 /* two cases now: head <= hwtail or head >= hwcur */
1753 if (head <= kring->nr_hwtail) {
1754 /* want head <= cur <= hwtail */
1755 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1757 /* cur must be outside hwtail..head */
1758 NM_FAIL_ON(cur < head && cur > kring->nr_hwtail);
1761 if (ring->tail != kring->rtail) {
1762 nm_prlim(5, "%s tail overwritten was %d need %d",
1764 ring->tail, kring->rtail);
1765 ring->tail = kring->rtail;
1772 * Error routine called when txsync/rxsync detects an error.
1773 * Can't do much more than resetting head = cur = hwcur, tail = hwtail
1774 * Return 1 on reinit.
1776 * This routine is only called by the upper half of the kernel.
1777 * It only reads hwcur (which is changed only by the upper half, too)
1778 * and hwtail (which may be changed by the lower half, but only on
1779 * a tx ring and only to increase it, so any error will be recovered
1780 * on the next call). For the above, we don't strictly need to call
1784 netmap_ring_reinit(struct netmap_kring *kring)
1786 struct netmap_ring *ring = kring->ring;
1787 u_int i, lim = kring->nkr_num_slots - 1;
1790 // XXX KASSERT nm_kr_tryget
1791 nm_prlim(10, "called for %s", kring->name);
1792 // XXX probably wrong to trust userspace
1793 kring->rhead = ring->head;
1794 kring->rcur = ring->cur;
1795 kring->rtail = ring->tail;
1797 if (ring->cur > lim)
1799 if (ring->head > lim)
1801 if (ring->tail > lim)
1803 for (i = 0; i <= lim; i++) {
1804 u_int idx = ring->slot[i].buf_idx;
1805 u_int len = ring->slot[i].len;
1806 if (idx < 2 || idx >= kring->na->na_lut.objtotal) {
1807 nm_prlim(5, "bad index at slot %d idx %d len %d ", i, idx, len);
1808 ring->slot[i].buf_idx = 0;
1809 ring->slot[i].len = 0;
1810 } else if (len > NETMAP_BUF_SIZE(kring->na)) {
1811 ring->slot[i].len = 0;
1812 nm_prlim(5, "bad len at slot %d idx %d len %d", i, idx, len);
1816 nm_prlim(10, "total %d errors", errors);
1817 nm_prlim(10, "%s reinit, cur %d -> %d tail %d -> %d",
1819 ring->cur, kring->nr_hwcur,
1820 ring->tail, kring->nr_hwtail);
1821 ring->head = kring->rhead = kring->nr_hwcur;
1822 ring->cur = kring->rcur = kring->nr_hwcur;
1823 ring->tail = kring->rtail = kring->nr_hwtail;
1825 return (errors ? 1 : 0);
1828 /* interpret the ringid and flags fields of an nmreq, by translating them
1829 * into a pair of intervals of ring indices:
1831 * [priv->np_txqfirst, priv->np_txqlast) and
1832 * [priv->np_rxqfirst, priv->np_rxqlast)
1836 netmap_interp_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr)
1838 struct netmap_adapter *na = priv->np_na;
1839 struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body;
1840 int excluded_direction[] = { NR_TX_RINGS_ONLY, NR_RX_RINGS_ONLY };
1843 u_int nr_flags = reg->nr_flags, nr_mode = reg->nr_mode,
1844 nr_ringid = reg->nr_ringid;
1847 if (nr_flags & excluded_direction[t]) {
1848 priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1852 case NR_REG_ALL_NIC:
1854 priv->np_qfirst[t] = 0;
1855 priv->np_qlast[t] = nma_get_nrings(na, t);
1856 nm_prdis("ALL/PIPE: %s %d %d", nm_txrx2str(t),
1857 priv->np_qfirst[t], priv->np_qlast[t]);
1861 if (!(na->na_flags & NAF_HOST_RINGS)) {
1862 nm_prerr("host rings not supported");
1865 priv->np_qfirst[t] = (nr_mode == NR_REG_SW ?
1866 nma_get_nrings(na, t) : 0);
1867 priv->np_qlast[t] = netmap_all_rings(na, t);
1868 nm_prdis("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW",
1870 priv->np_qfirst[t], priv->np_qlast[t]);
1872 case NR_REG_ONE_NIC:
1873 if (nr_ringid >= na->num_tx_rings &&
1874 nr_ringid >= na->num_rx_rings) {
1875 nm_prerr("invalid ring id %d", nr_ringid);
1878 /* if not enough rings, use the first one */
1880 if (j >= nma_get_nrings(na, t))
1882 priv->np_qfirst[t] = j;
1883 priv->np_qlast[t] = j + 1;
1884 nm_prdis("ONE_NIC: %s %d %d", nm_txrx2str(t),
1885 priv->np_qfirst[t], priv->np_qlast[t]);
1888 if (!(na->na_flags & NAF_HOST_RINGS)) {
1889 nm_prerr("host rings not supported");
1892 if (nr_ringid >= na->num_host_tx_rings &&
1893 nr_ringid >= na->num_host_rx_rings) {
1894 nm_prerr("invalid ring id %d", nr_ringid);
1897 /* if not enough rings, use the first one */
1899 if (j >= nma_get_host_nrings(na, t))
1901 priv->np_qfirst[t] = nma_get_nrings(na, t) + j;
1902 priv->np_qlast[t] = nma_get_nrings(na, t) + j + 1;
1903 nm_prdis("ONE_SW: %s %d %d", nm_txrx2str(t),
1904 priv->np_qfirst[t], priv->np_qlast[t]);
1907 nm_prerr("invalid regif type %d", nr_mode);
1911 priv->np_flags = nr_flags;
1913 /* Allow transparent forwarding mode in the host --> nic
1914 * direction only if all the TX hw rings have been opened. */
1915 if (priv->np_qfirst[NR_TX] == 0 &&
1916 priv->np_qlast[NR_TX] >= na->num_tx_rings) {
1917 priv->np_sync_flags |= NAF_CAN_FORWARD_DOWN;
1920 if (netmap_verbose) {
1921 nm_prinf("%s: tx [%d,%d) rx [%d,%d) id %d",
1923 priv->np_qfirst[NR_TX],
1924 priv->np_qlast[NR_TX],
1925 priv->np_qfirst[NR_RX],
1926 priv->np_qlast[NR_RX],
1934 * Set the ring ID. For devices with a single queue, a request
1935 * for all rings is the same as a single ring.
1938 netmap_set_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr)
1940 struct netmap_adapter *na = priv->np_na;
1941 struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body;
1945 error = netmap_interp_ringid(priv, hdr);
1950 priv->np_txpoll = (reg->nr_flags & NR_NO_TX_POLL) ? 0 : 1;
1952 /* optimization: count the users registered for more than
1953 * one ring, which are the ones sleeping on the global queue.
1954 * The default netmap_notify() callback will then
1955 * avoid signaling the global queue if nobody is using it
1958 if (nm_si_user(priv, t))
1965 netmap_unset_ringid(struct netmap_priv_d *priv)
1967 struct netmap_adapter *na = priv->np_na;
1971 if (nm_si_user(priv, t))
1973 priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1976 priv->np_txpoll = 0;
1977 priv->np_kloop_state = 0;
1980 #define within_sel(p_, t_, i_) \
1981 ((i_) < (p_)->np_qlast[(t_)])
1982 #define nonempty_sel(p_, t_) \
1983 (within_sel((p_), (t_), (p_)->np_qfirst[(t_)]))
1984 #define foreach_selected_ring(p_, t_, i_, kring_) \
1985 for ((t_) = nonempty_sel((p_), NR_RX) ? NR_RX : NR_TX, \
1986 (i_) = (p_)->np_qfirst[(t_)]; \
1988 (t == NR_TX && within_sel((p_), (t_), (i_)))) && \
1989 ((kring_) = NMR((p_)->np_na, (t_))[(i_)]); \
1990 (i_) = within_sel((p_), (t_), (i_) + 1) ? (i_) + 1 : \
1991 (++(t_) < NR_TXRX ? (p_)->np_qfirst[(t_)] : (i_)))
1994 /* Set the nr_pending_mode for the requested rings.
1995 * If requested, also try to get exclusive access to the rings, provided
1996 * the rings we want to bind are not exclusively owned by a previous bind.
1999 netmap_krings_get(struct netmap_priv_d *priv)
2001 struct netmap_adapter *na = priv->np_na;
2003 struct netmap_kring *kring;
2004 int excl = (priv->np_flags & NR_EXCLUSIVE);
2007 if (netmap_debug & NM_DEBUG_ON)
2008 nm_prinf("%s: grabbing tx [%d, %d) rx [%d, %d)",
2010 priv->np_qfirst[NR_TX],
2011 priv->np_qlast[NR_TX],
2012 priv->np_qfirst[NR_RX],
2013 priv->np_qlast[NR_RX]);
2015 /* first round: check that all the requested rings
2016 * are neither alread exclusively owned, nor we
2017 * want exclusive ownership when they are already in use
2019 foreach_selected_ring(priv, t, i, kring) {
2020 if ((kring->nr_kflags & NKR_EXCLUSIVE) ||
2021 (kring->users && excl))
2023 nm_prdis("ring %s busy", kring->name);
2028 /* second round: increment usage count (possibly marking them
2029 * as exclusive) and set the nr_pending_mode
2031 foreach_selected_ring(priv, t, i, kring) {
2034 kring->nr_kflags |= NKR_EXCLUSIVE;
2035 kring->nr_pending_mode = NKR_NETMAP_ON;
2042 /* Undo netmap_krings_get(). This is done by clearing the exclusive mode
2043 * if was asked on regif, and unset the nr_pending_mode if we are the
2044 * last users of the involved rings. */
2046 netmap_krings_put(struct netmap_priv_d *priv)
2049 struct netmap_kring *kring;
2050 int excl = (priv->np_flags & NR_EXCLUSIVE);
2053 nm_prdis("%s: releasing tx [%d, %d) rx [%d, %d)",
2055 priv->np_qfirst[NR_TX],
2056 priv->np_qlast[NR_TX],
2057 priv->np_qfirst[NR_RX],
2058 priv->np_qlast[MR_RX]);
2060 foreach_selected_ring(priv, t, i, kring) {
2062 kring->nr_kflags &= ~NKR_EXCLUSIVE;
2064 if (kring->users == 0)
2065 kring->nr_pending_mode = NKR_NETMAP_OFF;
2070 nm_priv_rx_enabled(struct netmap_priv_d *priv)
2072 return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]);
2075 /* Validate the CSB entries for both directions (atok and ktoa).
2076 * To be called under NMG_LOCK(). */
2078 netmap_csb_validate(struct netmap_priv_d *priv, struct nmreq_opt_csb *csbo)
2080 struct nm_csb_atok *csb_atok_base =
2081 (struct nm_csb_atok *)(uintptr_t)csbo->csb_atok;
2082 struct nm_csb_ktoa *csb_ktoa_base =
2083 (struct nm_csb_ktoa *)(uintptr_t)csbo->csb_ktoa;
2085 int num_rings[NR_TXRX], tot_rings;
2086 size_t entry_size[2];
2090 if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) {
2091 nm_prerr("Cannot update CSB while kloop is running");
2097 num_rings[t] = priv->np_qlast[t] - priv->np_qfirst[t];
2098 tot_rings += num_rings[t];
2103 if (!(priv->np_flags & NR_EXCLUSIVE)) {
2104 nm_prerr("CSB mode requires NR_EXCLUSIVE");
2108 entry_size[0] = sizeof(*csb_atok_base);
2109 entry_size[1] = sizeof(*csb_ktoa_base);
2110 csb_start[0] = (void *)csb_atok_base;
2111 csb_start[1] = (void *)csb_ktoa_base;
2113 for (i = 0; i < 2; i++) {
2114 /* On Linux we could use access_ok() to simplify
2115 * the validation. However, the advantage of
2116 * this approach is that it works also on
2118 size_t csb_size = tot_rings * entry_size[i];
2122 if ((uintptr_t)csb_start[i] & (entry_size[i]-1)) {
2123 nm_prerr("Unaligned CSB address");
2127 tmp = nm_os_malloc(csb_size);
2131 /* Application --> kernel direction. */
2132 err = copyin(csb_start[i], tmp, csb_size);
2134 /* Kernel --> application direction. */
2135 memset(tmp, 0, csb_size);
2136 err = copyout(tmp, csb_start[i], csb_size);
2140 nm_prerr("Invalid CSB address");
2145 priv->np_csb_atok_base = csb_atok_base;
2146 priv->np_csb_ktoa_base = csb_ktoa_base;
2148 /* Initialize the CSB. */
2150 for (i = 0; i < num_rings[t]; i++) {
2151 struct netmap_kring *kring =
2152 NMR(priv->np_na, t)[i + priv->np_qfirst[t]];
2153 struct nm_csb_atok *csb_atok = csb_atok_base + i;
2154 struct nm_csb_ktoa *csb_ktoa = csb_ktoa_base + i;
2157 csb_atok += num_rings[NR_TX];
2158 csb_ktoa += num_rings[NR_TX];
2161 CSB_WRITE(csb_atok, head, kring->rhead);
2162 CSB_WRITE(csb_atok, cur, kring->rcur);
2163 CSB_WRITE(csb_atok, appl_need_kick, 1);
2164 CSB_WRITE(csb_atok, sync_flags, 1);
2165 CSB_WRITE(csb_ktoa, hwcur, kring->nr_hwcur);
2166 CSB_WRITE(csb_ktoa, hwtail, kring->nr_hwtail);
2167 CSB_WRITE(csb_ktoa, kern_need_kick, 1);
2169 nm_prinf("csb_init for kring %s: head %u, cur %u, "
2170 "hwcur %u, hwtail %u", kring->name,
2171 kring->rhead, kring->rcur, kring->nr_hwcur,
2179 /* Ensure that the netmap adapter can support the given MTU.
2180 * @return EINVAL if the na cannot be set to mtu, 0 otherwise.
2183 netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu) {
2184 unsigned nbs = NETMAP_BUF_SIZE(na);
2186 if (mtu <= na->rx_buf_maxsize) {
2187 /* The MTU fits a single NIC slot. We only
2188 * Need to check that netmap buffers are
2189 * large enough to hold an MTU. NS_MOREFRAG
2190 * cannot be used in this case. */
2192 nm_prerr("error: netmap buf size (%u) "
2193 "< device MTU (%u)", nbs, mtu);
2197 /* More NIC slots may be needed to receive
2198 * or transmit a single packet. Check that
2199 * the adapter supports NS_MOREFRAG and that
2200 * netmap buffers are large enough to hold
2201 * the maximum per-slot size. */
2202 if (!(na->na_flags & NAF_MOREFRAG)) {
2203 nm_prerr("error: large MTU (%d) needed "
2204 "but %s does not support "
2208 } else if (nbs < na->rx_buf_maxsize) {
2209 nm_prerr("error: using NS_MOREFRAG on "
2210 "%s requires netmap buf size "
2211 ">= %u", na->ifp->if_xname,
2212 na->rx_buf_maxsize);
2215 nm_prinf("info: netmap application on "
2216 "%s needs to support "
2218 "(MTU=%u,netmap_buf_size=%u)",
2219 na->ifp->if_xname, mtu, nbs);
2227 * possibly move the interface to netmap-mode.
2228 * If success it returns a pointer to netmap_if, otherwise NULL.
2229 * This must be called with NMG_LOCK held.
2231 * The following na callbacks are called in the process:
2233 * na->nm_config() [by netmap_update_config]
2234 * (get current number and size of rings)
2236 * We have a generic one for linux (netmap_linux_config).
2237 * The bwrap has to override this, since it has to forward
2238 * the request to the wrapped adapter (netmap_bwrap_config).
2241 * na->nm_krings_create()
2242 * (create and init the krings array)
2244 * One of the following:
2246 * * netmap_hw_krings_create, (hw ports)
2247 * creates the standard layout for the krings
2248 * and adds the mbq (used for the host rings).
2250 * * netmap_vp_krings_create (VALE ports)
2251 * add leases and scratchpads
2253 * * netmap_pipe_krings_create (pipes)
2254 * create the krings and rings of both ends and
2257 * * netmap_monitor_krings_create (monitors)
2258 * avoid allocating the mbq
2260 * * netmap_bwrap_krings_create (bwraps)
2261 * create both the brap krings array,
2262 * the krings array of the wrapped adapter, and
2263 * (if needed) the fake array for the host adapter
2265 * na->nm_register(, 1)
2266 * (put the adapter in netmap mode)
2268 * This may be one of the following:
2270 * * netmap_hw_reg (hw ports)
2271 * checks that the ifp is still there, then calls
2272 * the hardware specific callback;
2274 * * netmap_vp_reg (VALE ports)
2275 * If the port is connected to a bridge,
2276 * set the NAF_NETMAP_ON flag under the
2277 * bridge write lock.
2279 * * netmap_pipe_reg (pipes)
2280 * inform the other pipe end that it is no
2281 * longer responsible for the lifetime of this
2284 * * netmap_monitor_reg (monitors)
2285 * intercept the sync callbacks of the monitored
2288 * * netmap_bwrap_reg (bwraps)
2289 * cross-link the bwrap and hwna rings,
2290 * forward the request to the hwna, override
2291 * the hwna notify callback (to get the frames
2292 * coming from outside go through the bridge).
2297 netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
2298 struct nmreq_header *hdr)
2300 struct netmap_if *nifp = NULL;
2304 priv->np_na = na; /* store the reference */
2305 error = netmap_mem_finalize(na->nm_mem, na);
2309 if (na->active_fds == 0) {
2311 /* cache the allocator info in the na */
2312 error = netmap_mem_get_lut(na->nm_mem, &na->na_lut);
2315 nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal,
2316 na->na_lut.objsize);
2318 /* ring configuration may have changed, fetch from the card */
2319 netmap_update_config(na);
2322 /* compute the range of tx and rx rings to monitor */
2323 error = netmap_set_ringid(priv, hdr);
2327 if (na->active_fds == 0) {
2329 * If this is the first registration of the adapter,
2330 * perform sanity checks and create the in-kernel view
2331 * of the netmap rings (the netmap krings).
2333 if (na->ifp && nm_priv_rx_enabled(priv)) {
2334 /* This netmap adapter is attached to an ifnet. */
2335 unsigned mtu = nm_os_ifnet_mtu(na->ifp);
2337 nm_prdis("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d",
2338 na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na));
2340 if (na->rx_buf_maxsize == 0) {
2341 nm_prerr("%s: error: rx_buf_maxsize == 0", na->name);
2346 error = netmap_buf_size_validate(na, mtu);
2352 * Depending on the adapter, this may also create
2353 * the netmap rings themselves
2355 error = na->nm_krings_create(na);
2361 /* now the krings must exist and we can check whether some
2362 * previous bind has exclusive ownership on them, and set
2365 error = netmap_krings_get(priv);
2367 goto err_del_krings;
2369 /* create all needed missing netmap rings */
2370 error = netmap_mem_rings_create(na);
2374 /* in all cases, create a new netmap if */
2375 nifp = netmap_mem_if_new(na, priv);
2381 if (nm_kring_pending(priv)) {
2382 /* Some kring is switching mode, tell the adapter to
2384 error = na->nm_register(na, 1);
2389 /* Commit the reference. */
2393 * advertise that the interface is ready by setting np_nifp.
2394 * The barrier is needed because readers (poll, *SYNC and mmap)
2395 * check for priv->np_nifp != NULL without locking
2397 mb(); /* make sure previous writes are visible to all CPUs */
2398 priv->np_nifp = nifp;
2403 netmap_mem_if_delete(na, nifp);
2405 netmap_krings_put(priv);
2406 netmap_mem_rings_delete(na);
2408 if (na->active_fds == 0)
2409 na->nm_krings_delete(na);
2411 if (na->active_fds == 0)
2412 memset(&na->na_lut, 0, sizeof(na->na_lut));
2414 netmap_mem_drop(na);
2422 * update kring and ring at the end of rxsync/txsync.
2425 nm_sync_finalize(struct netmap_kring *kring)
2428 * Update ring tail to what the kernel knows
2429 * After txsync: head/rhead/hwcur might be behind cur/rcur
2432 kring->ring->tail = kring->rtail = kring->nr_hwtail;
2434 nm_prdis(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
2435 kring->name, kring->nr_hwcur, kring->nr_hwtail,
2436 kring->rhead, kring->rcur, kring->rtail);
2439 /* set ring timestamp */
2441 ring_timestamp_set(struct netmap_ring *ring)
2443 if (netmap_no_timestamp == 0 || ring->flags & NR_TIMESTAMP) {
2444 microtime(&ring->ts);
2448 static int nmreq_copyin(struct nmreq_header *, int);
2449 static int nmreq_copyout(struct nmreq_header *, int);
2450 static int nmreq_checkoptions(struct nmreq_header *);
2453 * ioctl(2) support for the "netmap" device.
2455 * Following a list of accepted commands:
2456 * - NIOCCTRL device control API
2457 * - NIOCTXSYNC sync TX rings
2458 * - NIOCRXSYNC sync RX rings
2459 * - SIOCGIFADDR just for convenience
2460 * - NIOCGINFO deprecated (legacy API)
2461 * - NIOCREGIF deprecated (legacy API)
2463 * Return 0 on success, errno otherwise.
2466 netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
2467 struct thread *td, int nr_body_is_user)
2469 struct mbq q; /* packets from RX hw queues to host stack */
2470 struct netmap_adapter *na = NULL;
2471 struct netmap_mem_d *nmd = NULL;
2472 struct ifnet *ifp = NULL;
2474 u_int i, qfirst, qlast;
2475 struct netmap_kring **krings;
2481 struct nmreq_header *hdr = (struct nmreq_header *)data;
2483 if (hdr->nr_version < NETMAP_MIN_API ||
2484 hdr->nr_version > NETMAP_MAX_API) {
2485 nm_prerr("API mismatch: got %d need %d",
2486 hdr->nr_version, NETMAP_API);
2490 /* Make a kernel-space copy of the user-space nr_body.
2491 * For convenince, the nr_body pointer and the pointers
2492 * in the options list will be replaced with their
2493 * kernel-space counterparts. The original pointers are
2494 * saved internally and later restored by nmreq_copyout
2496 error = nmreq_copyin(hdr, nr_body_is_user);
2501 /* Sanitize hdr->nr_name. */
2502 hdr->nr_name[sizeof(hdr->nr_name) - 1] = '\0';
2504 switch (hdr->nr_reqtype) {
2505 case NETMAP_REQ_REGISTER: {
2506 struct nmreq_register *req =
2507 (struct nmreq_register *)(uintptr_t)hdr->nr_body;
2508 struct netmap_if *nifp;
2510 /* Protect access to priv from concurrent requests. */
2513 struct nmreq_option *opt;
2516 if (priv->np_nifp != NULL) { /* thread already registered */
2522 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_EXTMEM);
2524 struct nmreq_opt_extmem *e =
2525 (struct nmreq_opt_extmem *)opt;
2527 nmd = netmap_mem_ext_create(e->nro_usrptr,
2528 &e->nro_info, &error);
2529 opt->nro_status = error;
2533 #endif /* WITH_EXTMEM */
2535 if (nmd == NULL && req->nr_mem_id) {
2536 /* find the allocator and get a reference */
2537 nmd = netmap_mem_find(req->nr_mem_id);
2539 if (netmap_verbose) {
2540 nm_prerr("%s: failed to find mem_id %u",
2541 hdr->nr_name, req->nr_mem_id);
2547 /* find the interface and a reference */
2548 error = netmap_get_na(hdr, &na, &ifp, nmd,
2549 1 /* create */); /* keep reference */
2552 if (NETMAP_OWNED_BY_KERN(na)) {
2557 if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) {
2558 nm_prerr("virt_hdr_len=%d, but application does "
2559 "not accept it", na->virt_hdr_len);
2564 error = netmap_do_regif(priv, na, hdr);
2565 if (error) { /* reg. failed, release priv and ref */
2569 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2571 struct nmreq_opt_csb *csbo =
2572 (struct nmreq_opt_csb *)opt;
2573 error = netmap_csb_validate(priv, csbo);
2574 opt->nro_status = error;
2576 netmap_do_unregif(priv);
2581 nifp = priv->np_nifp;
2583 /* return the offset of the netmap_if object */
2584 req->nr_rx_rings = na->num_rx_rings;
2585 req->nr_tx_rings = na->num_tx_rings;
2586 req->nr_rx_slots = na->num_rx_desc;
2587 req->nr_tx_slots = na->num_tx_desc;
2588 req->nr_host_tx_rings = na->num_host_tx_rings;
2589 req->nr_host_rx_rings = na->num_host_rx_rings;
2590 error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags,
2593 netmap_do_unregif(priv);
2596 if (memflags & NETMAP_MEM_PRIVATE) {
2597 *(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM;
2600 priv->np_si[t] = nm_si_user(priv, t) ?
2601 &na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si;
2604 if (req->nr_extra_bufs) {
2606 nm_prinf("requested %d extra buffers",
2607 req->nr_extra_bufs);
2608 req->nr_extra_bufs = netmap_extra_alloc(na,
2609 &nifp->ni_bufs_head, req->nr_extra_bufs);
2611 nm_prinf("got %d extra buffers", req->nr_extra_bufs);
2613 req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp);
2615 error = nmreq_checkoptions(hdr);
2617 netmap_do_unregif(priv);
2621 /* store ifp reference so that priv destructor may release it */
2625 netmap_unget_na(na, ifp);
2627 /* release the reference from netmap_mem_find() or
2628 * netmap_mem_ext_create()
2631 netmap_mem_put(nmd);
2636 case NETMAP_REQ_PORT_INFO_GET: {
2637 struct nmreq_port_info_get *req =
2638 (struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
2645 if (hdr->nr_name[0] != '\0') {
2646 /* Build a nmreq_register out of the nmreq_port_info_get,
2647 * so that we can call netmap_get_na(). */
2648 struct nmreq_register regreq;
2649 bzero(®req, sizeof(regreq));
2650 regreq.nr_mode = NR_REG_ALL_NIC;
2651 regreq.nr_tx_slots = req->nr_tx_slots;
2652 regreq.nr_rx_slots = req->nr_rx_slots;
2653 regreq.nr_tx_rings = req->nr_tx_rings;
2654 regreq.nr_rx_rings = req->nr_rx_rings;
2655 regreq.nr_host_tx_rings = req->nr_host_tx_rings;
2656 regreq.nr_host_rx_rings = req->nr_host_rx_rings;
2657 regreq.nr_mem_id = req->nr_mem_id;
2659 /* get a refcount */
2660 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2661 hdr->nr_body = (uintptr_t)®req;
2662 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2663 hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */
2664 hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2670 nmd = na->nm_mem; /* get memory allocator */
2672 nmd = netmap_mem_find(req->nr_mem_id ? req->nr_mem_id : 1);
2675 nm_prerr("%s: failed to find mem_id %u",
2677 req->nr_mem_id ? req->nr_mem_id : 1);
2684 error = netmap_mem_get_info(nmd, &req->nr_memsize, &memflags,
2688 if (na == NULL) /* only memory info */
2690 netmap_update_config(na);
2691 req->nr_rx_rings = na->num_rx_rings;
2692 req->nr_tx_rings = na->num_tx_rings;
2693 req->nr_rx_slots = na->num_rx_desc;
2694 req->nr_tx_slots = na->num_tx_desc;
2695 req->nr_host_tx_rings = na->num_host_tx_rings;
2696 req->nr_host_rx_rings = na->num_host_rx_rings;
2698 netmap_unget_na(na, ifp);
2700 netmap_mem_put(nmd);
2705 case NETMAP_REQ_VALE_ATTACH: {
2706 error = netmap_vale_attach(hdr, NULL /* userspace request */);
2710 case NETMAP_REQ_VALE_DETACH: {
2711 error = netmap_vale_detach(hdr, NULL /* userspace request */);
2715 case NETMAP_REQ_VALE_LIST: {
2716 error = netmap_vale_list(hdr);
2720 case NETMAP_REQ_PORT_HDR_SET: {
2721 struct nmreq_port_hdr *req =
2722 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2723 /* Build a nmreq_register out of the nmreq_port_hdr,
2724 * so that we can call netmap_get_bdg_na(). */
2725 struct nmreq_register regreq;
2726 bzero(®req, sizeof(regreq));
2727 regreq.nr_mode = NR_REG_ALL_NIC;
2729 /* For now we only support virtio-net headers, and only for
2730 * VALE ports, but this may change in future. Valid lengths
2731 * for the virtio-net header are 0 (no header), 10 and 12. */
2732 if (req->nr_hdr_len != 0 &&
2733 req->nr_hdr_len != sizeof(struct nm_vnet_hdr) &&
2734 req->nr_hdr_len != 12) {
2736 nm_prerr("invalid hdr_len %u", req->nr_hdr_len);
2741 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2742 hdr->nr_body = (uintptr_t)®req;
2743 error = netmap_get_vale_na(hdr, &na, NULL, 0);
2744 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET;
2745 hdr->nr_body = (uintptr_t)req;
2747 struct netmap_vp_adapter *vpna =
2748 (struct netmap_vp_adapter *)na;
2749 na->virt_hdr_len = req->nr_hdr_len;
2750 if (na->virt_hdr_len) {
2751 vpna->mfs = NETMAP_BUF_SIZE(na);
2754 nm_prinf("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na);
2755 netmap_adapter_put(na);
2763 case NETMAP_REQ_PORT_HDR_GET: {
2764 /* Get vnet-header length for this netmap port */
2765 struct nmreq_port_hdr *req =
2766 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2767 /* Build a nmreq_register out of the nmreq_port_hdr,
2768 * so that we can call netmap_get_bdg_na(). */
2769 struct nmreq_register regreq;
2772 bzero(®req, sizeof(regreq));
2773 regreq.nr_mode = NR_REG_ALL_NIC;
2775 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2776 hdr->nr_body = (uintptr_t)®req;
2777 error = netmap_get_na(hdr, &na, &ifp, NULL, 0);
2778 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET;
2779 hdr->nr_body = (uintptr_t)req;
2781 req->nr_hdr_len = na->virt_hdr_len;
2783 netmap_unget_na(na, ifp);
2788 case NETMAP_REQ_VALE_NEWIF: {
2789 error = nm_vi_create(hdr);
2793 case NETMAP_REQ_VALE_DELIF: {
2794 error = nm_vi_destroy(hdr->nr_name);
2798 case NETMAP_REQ_VALE_POLLING_ENABLE:
2799 case NETMAP_REQ_VALE_POLLING_DISABLE: {
2800 error = nm_bdg_polling(hdr);
2803 #endif /* WITH_VALE */
2804 case NETMAP_REQ_POOLS_INFO_GET: {
2805 /* Get information from the memory allocator used for
2807 struct nmreq_pools_info *req =
2808 (struct nmreq_pools_info *)(uintptr_t)hdr->nr_body;
2811 /* Build a nmreq_register out of the nmreq_pools_info,
2812 * so that we can call netmap_get_na(). */
2813 struct nmreq_register regreq;
2814 bzero(®req, sizeof(regreq));
2815 regreq.nr_mem_id = req->nr_mem_id;
2816 regreq.nr_mode = NR_REG_ALL_NIC;
2818 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2819 hdr->nr_body = (uintptr_t)®req;
2820 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2821 hdr->nr_reqtype = NETMAP_REQ_POOLS_INFO_GET; /* reset type */
2822 hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2828 nmd = na->nm_mem; /* grab the memory allocator */
2834 /* Finalize the memory allocator, get the pools
2835 * information and release the allocator. */
2836 error = netmap_mem_finalize(nmd, na);
2840 error = netmap_mem_pools_info_get(req, nmd);
2841 netmap_mem_drop(na);
2843 netmap_unget_na(na, ifp);
2848 case NETMAP_REQ_CSB_ENABLE: {
2849 struct nmreq_option *opt;
2851 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2855 struct nmreq_opt_csb *csbo =
2856 (struct nmreq_opt_csb *)opt;
2858 error = netmap_csb_validate(priv, csbo);
2860 opt->nro_status = error;
2865 case NETMAP_REQ_SYNC_KLOOP_START: {
2866 error = netmap_sync_kloop(priv, hdr);
2870 case NETMAP_REQ_SYNC_KLOOP_STOP: {
2871 error = netmap_sync_kloop_stop(priv);
2880 /* Write back request body to userspace and reset the
2881 * user-space pointer. */
2882 error = nmreq_copyout(hdr, error);
2888 if (unlikely(priv->np_nifp == NULL)) {
2892 mb(); /* make sure following reads are not from cache */
2894 if (unlikely(priv->np_csb_atok_base)) {
2895 nm_prerr("Invalid sync in CSB mode");
2900 na = priv->np_na; /* we have a reference */
2903 t = (cmd == NIOCTXSYNC ? NR_TX : NR_RX);
2904 krings = NMR(na, t);
2905 qfirst = priv->np_qfirst[t];
2906 qlast = priv->np_qlast[t];
2907 sync_flags = priv->np_sync_flags;
2909 for (i = qfirst; i < qlast; i++) {
2910 struct netmap_kring *kring = krings[i];
2911 struct netmap_ring *ring = kring->ring;
2913 if (unlikely(nm_kr_tryget(kring, 1, &error))) {
2914 error = (error ? EIO : 0);
2918 if (cmd == NIOCTXSYNC) {
2919 if (netmap_debug & NM_DEBUG_TXSYNC)
2920 nm_prinf("pre txsync ring %d cur %d hwcur %d",
2923 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
2924 netmap_ring_reinit(kring);
2925 } else if (kring->nm_sync(kring, sync_flags | NAF_FORCE_RECLAIM) == 0) {
2926 nm_sync_finalize(kring);
2928 if (netmap_debug & NM_DEBUG_TXSYNC)
2929 nm_prinf("post txsync ring %d cur %d hwcur %d",
2933 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
2934 netmap_ring_reinit(kring);
2936 if (nm_may_forward_up(kring)) {
2937 /* transparent forwarding, see netmap_poll() */
2938 netmap_grab_packets(kring, &q, netmap_fwd);
2940 if (kring->nm_sync(kring, sync_flags | NAF_FORCE_READ) == 0) {
2941 nm_sync_finalize(kring);
2943 ring_timestamp_set(ring);
2949 netmap_send_up(na->ifp, &q);
2956 return netmap_ioctl_legacy(priv, cmd, data, td);
2965 nmreq_size_by_type(uint16_t nr_reqtype)
2967 switch (nr_reqtype) {
2968 case NETMAP_REQ_REGISTER:
2969 return sizeof(struct nmreq_register);
2970 case NETMAP_REQ_PORT_INFO_GET:
2971 return sizeof(struct nmreq_port_info_get);
2972 case NETMAP_REQ_VALE_ATTACH:
2973 return sizeof(struct nmreq_vale_attach);
2974 case NETMAP_REQ_VALE_DETACH:
2975 return sizeof(struct nmreq_vale_detach);
2976 case NETMAP_REQ_VALE_LIST:
2977 return sizeof(struct nmreq_vale_list);
2978 case NETMAP_REQ_PORT_HDR_SET:
2979 case NETMAP_REQ_PORT_HDR_GET:
2980 return sizeof(struct nmreq_port_hdr);
2981 case NETMAP_REQ_VALE_NEWIF:
2982 return sizeof(struct nmreq_vale_newif);
2983 case NETMAP_REQ_VALE_DELIF:
2984 case NETMAP_REQ_SYNC_KLOOP_STOP:
2985 case NETMAP_REQ_CSB_ENABLE:
2987 case NETMAP_REQ_VALE_POLLING_ENABLE:
2988 case NETMAP_REQ_VALE_POLLING_DISABLE:
2989 return sizeof(struct nmreq_vale_polling);
2990 case NETMAP_REQ_POOLS_INFO_GET:
2991 return sizeof(struct nmreq_pools_info);
2992 case NETMAP_REQ_SYNC_KLOOP_START:
2993 return sizeof(struct nmreq_sync_kloop_start);
2999 nmreq_opt_size_by_type(uint32_t nro_reqtype, uint64_t nro_size)
3001 size_t rv = sizeof(struct nmreq_option);
3002 #ifdef NETMAP_REQ_OPT_DEBUG
3003 if (nro_reqtype & NETMAP_REQ_OPT_DEBUG)
3004 return (nro_reqtype & ~NETMAP_REQ_OPT_DEBUG);
3005 #endif /* NETMAP_REQ_OPT_DEBUG */
3006 switch (nro_reqtype) {
3008 case NETMAP_REQ_OPT_EXTMEM:
3009 rv = sizeof(struct nmreq_opt_extmem);
3011 #endif /* WITH_EXTMEM */
3012 case NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS:
3016 case NETMAP_REQ_OPT_CSB:
3017 rv = sizeof(struct nmreq_opt_csb);
3019 case NETMAP_REQ_OPT_SYNC_KLOOP_MODE:
3020 rv = sizeof(struct nmreq_opt_sync_kloop_mode);
3023 /* subtract the common header */
3024 return rv - sizeof(struct nmreq_option);
3028 * nmreq_copyin: create an in-kernel version of the request.
3030 * We build the following data structure:
3032 * hdr -> +-------+ buf
3033 * | | +---------------+
3034 * +-------+ |usr body ptr |
3035 * |options|-. +---------------+
3036 * +-------+ | |usr options ptr|
3037 * |body |--------->+---------------+
3039 * | | copy of body |
3041 * | +---------------+
3043 * | +---------------+
3045 * | | +---------------+ |
3047 * | | | +---------------+ \ option table
3048 * | | | | ... | / indexed by option
3049 * | | | +---------------+ | type
3051 * | | | +---------------+/
3052 * | | | |usr next ptr 1 |
3053 * `-|----->+---------------+
3054 * | | | copy of opt 1 |
3056 * | | .-| nro_next |
3057 * | | | +---------------+
3058 * | | | |usr next ptr 2 |
3059 * | `-`>+---------------+
3060 * | | copy of opt 2 |
3063 * | | +---------------+
3067 * `----->+---------------+
3068 * | |usr next ptr n |
3069 * `>+---------------+
3075 * The options and body fields of the hdr structure are overwritten
3076 * with in-kernel valid pointers inside the buf. The original user
3077 * pointers are saved in the buf and restored on copyout.
3078 * The list of options is copied and the pointers adjusted. The
3079 * original pointers are saved before the option they belonged.
3081 * The option table has an entry for every availabe option. Entries
3082 * for options that have not been passed contain NULL.
3087 nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
3089 size_t rqsz, optsz, bufsz, optbodysz;
3091 char *ker = NULL, *p;
3092 struct nmreq_option **next, *src, **opt_tab;
3093 struct nmreq_option buf;
3096 if (hdr->nr_reserved) {
3098 nm_prerr("nr_reserved must be zero");
3102 if (!nr_body_is_user)
3105 hdr->nr_reserved = nr_body_is_user;
3107 /* compute the total size of the buffer */
3108 rqsz = nmreq_size_by_type(hdr->nr_reqtype);
3109 if (rqsz > NETMAP_REQ_MAXSIZE) {
3113 if ((rqsz && hdr->nr_body == (uintptr_t)NULL) ||
3114 (!rqsz && hdr->nr_body != (uintptr_t)NULL)) {
3115 /* Request body expected, but not found; or
3116 * request body found but unexpected. */
3118 nm_prerr("nr_body expected but not found, or vice versa");
3123 bufsz = 2 * sizeof(void *) + rqsz +
3124 NETMAP_REQ_OPT_MAX * sizeof(opt_tab);
3125 /* compute the size of the buf below the option table.
3126 * It must contain a copy of every received option structure.
3127 * For every option we also need to store a copy of the user
3131 for (src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; src;
3132 src = (struct nmreq_option *)(uintptr_t)buf.nro_next)
3134 error = copyin(src, &buf, sizeof(*src));
3137 /* Validate nro_size to avoid integer overflow of optsz and bufsz. */
3138 if (buf.nro_size > NETMAP_REQ_MAXSIZE) {
3142 optsz += sizeof(*src);
3143 optbodysz = nmreq_opt_size_by_type(buf.nro_reqtype, buf.nro_size);
3144 if (optbodysz > NETMAP_REQ_MAXSIZE) {
3149 if (rqsz + optsz > NETMAP_REQ_MAXSIZE) {
3153 bufsz += sizeof(void *);
3157 ker = nm_os_malloc(bufsz);
3162 p = ker; /* write pointer into the buffer */
3164 /* make a copy of the user pointers */
3165 ptrs = (uint64_t*)p;
3166 *ptrs++ = hdr->nr_body;
3167 *ptrs++ = hdr->nr_options;
3171 error = copyin((void *)(uintptr_t)hdr->nr_body, p, rqsz);
3174 /* overwrite the user pointer with the in-kernel one */
3175 hdr->nr_body = (uintptr_t)p;
3177 /* start of the options table */
3178 opt_tab = (struct nmreq_option **)p;
3179 p += sizeof(opt_tab) * NETMAP_REQ_OPT_MAX;
3181 /* copy the options */
3182 next = (struct nmreq_option **)&hdr->nr_options;
3185 struct nmreq_option *opt;
3187 /* copy the option header */
3188 ptrs = (uint64_t *)p;
3189 opt = (struct nmreq_option *)(ptrs + 1);
3190 error = copyin(src, opt, sizeof(*src));
3193 /* make a copy of the user next pointer */
3194 *ptrs = opt->nro_next;
3195 /* overwrite the user pointer with the in-kernel one */
3198 /* initialize the option as not supported.
3199 * Recognized options will update this field.
3201 opt->nro_status = EOPNOTSUPP;
3203 /* check for invalid types */
3204 if (opt->nro_reqtype < 1) {
3206 nm_prinf("invalid option type: %u", opt->nro_reqtype);
3207 opt->nro_status = EINVAL;
3212 if (opt->nro_reqtype >= NETMAP_REQ_OPT_MAX) {
3213 /* opt->nro_status is already EOPNOTSUPP */
3218 /* if the type is valid, index the option in the table
3219 * unless it is a duplicate.
3221 if (opt_tab[opt->nro_reqtype] != NULL) {
3223 nm_prinf("duplicate option: %u", opt->nro_reqtype);
3224 opt->nro_status = EINVAL;
3225 opt_tab[opt->nro_reqtype]->nro_status = EINVAL;
3229 opt_tab[opt->nro_reqtype] = opt;
3231 p = (char *)(opt + 1);
3233 /* copy the option body */
3234 optsz = nmreq_opt_size_by_type(opt->nro_reqtype,
3237 /* the option body follows the option header */
3238 error = copyin(src + 1, p, optsz);
3245 /* move to next option */
3246 next = (struct nmreq_option **)&opt->nro_next;
3250 nmreq_copyout(hdr, error);
3254 ptrs = (uint64_t *)ker;
3255 hdr->nr_body = *ptrs++;
3256 hdr->nr_options = *ptrs++;
3257 hdr->nr_reserved = 0;
3264 nmreq_copyout(struct nmreq_header *hdr, int rerror)
3266 struct nmreq_option *src, *dst;
3267 void *ker = (void *)(uintptr_t)hdr->nr_body, *bufstart;
3272 if (!hdr->nr_reserved)
3275 /* restore the user pointers in the header */
3276 ptrs = (uint64_t *)ker - 2;
3278 hdr->nr_body = *ptrs++;
3279 src = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3280 hdr->nr_options = *ptrs;
3284 bodysz = nmreq_size_by_type(hdr->nr_reqtype);
3285 error = copyout(ker, (void *)(uintptr_t)hdr->nr_body, bodysz);
3292 /* copy the options */
3293 dst = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3298 /* restore the user pointer */
3299 next = src->nro_next;
3300 ptrs = (uint64_t *)src - 1;
3301 src->nro_next = *ptrs;
3303 /* always copy the option header */
3304 error = copyout(src, dst, sizeof(*src));
3310 /* copy the option body only if there was no error */
3311 if (!rerror && !src->nro_status) {
3312 optsz = nmreq_opt_size_by_type(src->nro_reqtype,
3315 error = copyout(src + 1, dst + 1, optsz);
3322 src = (struct nmreq_option *)(uintptr_t)next;
3323 dst = (struct nmreq_option *)(uintptr_t)*ptrs;
3328 hdr->nr_reserved = 0;
3329 nm_os_free(bufstart);
3333 struct nmreq_option *
3334 nmreq_getoption(struct nmreq_header *hdr, uint16_t reqtype)
3336 struct nmreq_option **opt_tab;
3338 if (!hdr->nr_options)
3341 opt_tab = (struct nmreq_option **)((uintptr_t)hdr->nr_options) -
3342 (NETMAP_REQ_OPT_MAX + 1);
3343 return opt_tab[reqtype];
3347 nmreq_checkoptions(struct nmreq_header *hdr)
3349 struct nmreq_option *opt;
3350 /* return error if there is still any option
3351 * marked as not supported
3354 for (opt = (struct nmreq_option *)(uintptr_t)hdr->nr_options; opt;
3355 opt = (struct nmreq_option *)(uintptr_t)opt->nro_next)
3356 if (opt->nro_status == EOPNOTSUPP)
3363 * select(2) and poll(2) handlers for the "netmap" device.
3365 * Can be called for one or more queues.
3366 * Return true the event mask corresponding to ready events.
3367 * If there are no ready events (and 'sr' is not NULL), do a
3368 * selrecord on either individual selinfo or on the global one.
3369 * Device-dependent parts (locking and sync of tx/rx rings)
3370 * are done through callbacks.
3372 * On linux, arguments are really pwait, the poll table, and 'td' is struct file *
3373 * The first one is remapped to pwait as selrecord() uses the name as an
3377 netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr)
3379 struct netmap_adapter *na;
3380 struct netmap_kring *kring;
3381 struct netmap_ring *ring;
3382 u_int i, want[NR_TXRX], revents = 0;
3383 NM_SELINFO_T *si[NR_TXRX];
3384 #define want_tx want[NR_TX]
3385 #define want_rx want[NR_RX]
3386 struct mbq q; /* packets from RX hw queues to host stack */
3389 * In order to avoid nested locks, we need to "double check"
3390 * txsync and rxsync if we decide to do a selrecord().
3391 * retry_tx (and retry_rx, later) prevent looping forever.
3393 int retry_tx = 1, retry_rx = 1;
3395 /* Transparent mode: send_down is 1 if we have found some
3396 * packets to forward (host RX ring --> NIC) during the rx
3397 * scan and we have not sent them down to the NIC yet.
3398 * Transparent mode requires to bind all rings to a single
3402 int sync_flags = priv->np_sync_flags;
3406 if (unlikely(priv->np_nifp == NULL)) {
3409 mb(); /* make sure following reads are not from cache */
3413 if (unlikely(!nm_netmap_on(na)))
3416 if (unlikely(priv->np_csb_atok_base)) {
3417 nm_prerr("Invalid poll in CSB mode");
3421 if (netmap_debug & NM_DEBUG_ON)
3422 nm_prinf("device %s events 0x%x", na->name, events);
3423 want_tx = events & (POLLOUT | POLLWRNORM);
3424 want_rx = events & (POLLIN | POLLRDNORM);
3427 * If the card has more than one queue AND the file descriptor is
3428 * bound to all of them, we sleep on the "global" selinfo, otherwise
3429 * we sleep on individual selinfo (FreeBSD only allows two selinfo's
3430 * per file descriptor).
3431 * The interrupt routine in the driver wake one or the other
3432 * (or both) depending on which clients are active.
3434 * rxsync() is only called if we run out of buffers on a POLLIN.
3435 * txsync() is called if we run out of buffers on POLLOUT, or
3436 * there are pending packets to send. The latter can be disabled
3437 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
3439 si[NR_RX] = priv->np_si[NR_RX];
3440 si[NR_TX] = priv->np_si[NR_TX];
3444 * We start with a lock free round which is cheap if we have
3445 * slots available. If this fails, then lock and call the sync
3446 * routines. We can't do this on Linux, as the contract says
3447 * that we must call nm_os_selrecord() unconditionally.
3450 const enum txrx t = NR_TX;
3451 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3452 kring = NMR(na, t)[i];
3453 if (kring->ring->cur != kring->ring->tail) {
3454 /* Some unseen TX space is available, so what
3455 * we don't need to run txsync. */
3463 const enum txrx t = NR_RX;
3464 int rxsync_needed = 0;
3466 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3467 kring = NMR(na, t)[i];
3468 if (kring->ring->cur == kring->ring->tail
3469 || kring->rhead != kring->ring->head) {
3470 /* There are no unseen packets on this ring,
3471 * or there are some buffers to be returned
3472 * to the netmap port. We therefore go ahead
3473 * and run rxsync. */
3478 if (!rxsync_needed) {
3486 /* The selrecord must be unconditional on linux. */
3487 nm_os_selrecord(sr, si[NR_RX]);
3488 nm_os_selrecord(sr, si[NR_TX]);
3492 * If we want to push packets out (priv->np_txpoll) or
3493 * want_tx is still set, we must issue txsync calls
3494 * (on all rings, to avoid that the tx rings stall).
3495 * Fortunately, normal tx mode has np_txpoll set.
3497 if (priv->np_txpoll || want_tx) {
3499 * The first round checks if anyone is ready, if not
3500 * do a selrecord and another round to handle races.
3501 * want_tx goes to 0 if any space is found, and is
3502 * used to skip rings with no pending transmissions.
3505 for (i = priv->np_qfirst[NR_TX]; i < priv->np_qlast[NR_TX]; i++) {
3508 kring = na->tx_rings[i];
3512 * Don't try to txsync this TX ring if we already found some
3513 * space in some of the TX rings (want_tx == 0) and there are no
3514 * TX slots in this ring that need to be flushed to the NIC
3517 if (!send_down && !want_tx && ring->head == kring->nr_hwcur)
3520 if (nm_kr_tryget(kring, 1, &revents))
3523 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3524 netmap_ring_reinit(kring);
3527 if (kring->nm_sync(kring, sync_flags))
3530 nm_sync_finalize(kring);
3534 * If we found new slots, notify potential
3535 * listeners on the same ring.
3536 * Since we just did a txsync, look at the copies
3537 * of cur,tail in the kring.
3539 found = kring->rcur != kring->rtail;
3541 if (found) { /* notify other listeners */
3545 kring->nm_notify(kring, 0);
3549 /* if there were any packet to forward we must have handled them by now */
3551 if (want_tx && retry_tx && sr) {
3553 nm_os_selrecord(sr, si[NR_TX]);
3561 * If want_rx is still set scan receive rings.
3562 * Do it on all rings because otherwise we starve.
3565 /* two rounds here for race avoidance */
3567 for (i = priv->np_qfirst[NR_RX]; i < priv->np_qlast[NR_RX]; i++) {
3570 kring = na->rx_rings[i];
3573 if (unlikely(nm_kr_tryget(kring, 1, &revents)))
3576 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3577 netmap_ring_reinit(kring);
3580 /* now we can use kring->rcur, rtail */
3583 * transparent mode support: collect packets from
3584 * hw rxring(s) that have been released by the user
3586 if (nm_may_forward_up(kring)) {
3587 netmap_grab_packets(kring, &q, netmap_fwd);
3590 /* Clear the NR_FORWARD flag anyway, it may be set by
3591 * the nm_sync() below only on for the host RX ring (see
3592 * netmap_rxsync_from_host()). */
3593 kring->nr_kflags &= ~NR_FORWARD;
3594 if (kring->nm_sync(kring, sync_flags))
3597 nm_sync_finalize(kring);
3598 send_down |= (kring->nr_kflags & NR_FORWARD);
3599 ring_timestamp_set(ring);
3600 found = kring->rcur != kring->rtail;
3606 kring->nm_notify(kring, 0);
3612 if (retry_rx && sr) {
3613 nm_os_selrecord(sr, si[NR_RX]);
3616 if (send_down || retry_rx) {
3619 goto flush_tx; /* and retry_rx */
3626 * Transparent mode: released bufs (i.e. between kring->nr_hwcur and
3627 * ring->head) marked with NS_FORWARD on hw rx rings are passed up
3628 * to the host stack.
3632 netmap_send_up(na->ifp, &q);
3641 nma_intr_enable(struct netmap_adapter *na, int onoff)
3643 bool changed = false;
3648 for (i = 0; i < nma_get_nrings(na, t); i++) {
3649 struct netmap_kring *kring = NMR(na, t)[i];
3650 int on = !(kring->nr_kflags & NKR_NOINTR);
3652 if (!!onoff != !!on) {
3656 kring->nr_kflags &= ~NKR_NOINTR;
3658 kring->nr_kflags |= NKR_NOINTR;
3664 return 0; /* nothing to do */
3668 nm_prerr("Cannot %s interrupts for %s", onoff ? "enable" : "disable",
3673 na->nm_intr(na, onoff);
3679 /*-------------------- driver support routines -------------------*/
3681 /* default notify callback */
3683 netmap_notify(struct netmap_kring *kring, int flags)
3685 struct netmap_adapter *na = kring->notify_na;
3686 enum txrx t = kring->tx;
3688 nm_os_selwakeup(&kring->si);
3689 /* optimization: avoid a wake up on the global
3690 * queue if nobody has registered for more
3693 if (na->si_users[t] > 0)
3694 nm_os_selwakeup(&na->si[t]);
3696 return NM_IRQ_COMPLETED;
3699 /* called by all routines that create netmap_adapters.
3700 * provide some defaults and get a reference to the
3704 netmap_attach_common(struct netmap_adapter *na)
3706 if (!na->rx_buf_maxsize) {
3707 /* Set a conservative default (larger is safer). */
3708 na->rx_buf_maxsize = PAGE_SIZE;
3712 if (na->na_flags & NAF_HOST_RINGS && na->ifp) {
3713 na->if_input = na->ifp->if_input; /* for netmap_send_up */
3715 na->pdev = na; /* make sure netmap_mem_map() is called */
3716 #endif /* __FreeBSD__ */
3717 if (na->na_flags & NAF_HOST_RINGS) {
3718 if (na->num_host_rx_rings == 0)
3719 na->num_host_rx_rings = 1;
3720 if (na->num_host_tx_rings == 0)
3721 na->num_host_tx_rings = 1;
3723 if (na->nm_krings_create == NULL) {
3724 /* we assume that we have been called by a driver,
3725 * since other port types all provide their own
3728 na->nm_krings_create = netmap_hw_krings_create;
3729 na->nm_krings_delete = netmap_hw_krings_delete;
3731 if (na->nm_notify == NULL)
3732 na->nm_notify = netmap_notify;
3735 if (na->nm_mem == NULL) {
3736 /* use the global allocator */
3737 na->nm_mem = netmap_mem_get(&nm_mem);
3740 if (na->nm_bdg_attach == NULL)
3741 /* no special nm_bdg_attach callback. On VALE
3742 * attach, we need to interpose a bwrap
3744 na->nm_bdg_attach = netmap_default_bdg_attach;
3750 /* Wrapper for the register callback provided netmap-enabled
3752 * nm_iszombie(na) means that the driver module has been
3753 * unloaded, so we cannot call into it.
3754 * nm_os_ifnet_lock() must guarantee mutual exclusion with
3758 netmap_hw_reg(struct netmap_adapter *na, int onoff)
3760 struct netmap_hw_adapter *hwna =
3761 (struct netmap_hw_adapter*)na;
3766 if (nm_iszombie(na)) {
3769 } else if (na != NULL) {
3770 na->na_flags &= ~NAF_NETMAP_ON;
3775 error = hwna->nm_hw_register(na, onoff);
3778 nm_os_ifnet_unlock();
3784 netmap_hw_dtor(struct netmap_adapter *na)
3786 if (na->ifp == NULL)
3789 NM_DETACH_NA(na->ifp);
3794 * Allocate a netmap_adapter object, and initialize it from the
3795 * 'arg' passed by the driver on attach.
3796 * We allocate a block of memory of 'size' bytes, which has room
3797 * for struct netmap_adapter plus additional room private to
3799 * Return 0 on success, ENOMEM otherwise.
3802 netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg)
3804 struct netmap_hw_adapter *hwna = NULL;
3805 struct ifnet *ifp = NULL;
3807 if (size < sizeof(struct netmap_hw_adapter)) {
3808 if (netmap_debug & NM_DEBUG_ON)
3809 nm_prerr("Invalid netmap adapter size %d", (int)size);
3813 if (arg == NULL || arg->ifp == NULL) {
3814 if (netmap_debug & NM_DEBUG_ON)
3815 nm_prerr("either arg or arg->ifp is NULL");
3819 if (arg->num_tx_rings == 0 || arg->num_rx_rings == 0) {
3820 if (netmap_debug & NM_DEBUG_ON)
3821 nm_prerr("%s: invalid rings tx %d rx %d",
3822 arg->name, arg->num_tx_rings, arg->num_rx_rings);
3827 if (NM_NA_CLASH(ifp)) {
3828 /* If NA(ifp) is not null but there is no valid netmap
3829 * adapter it means that someone else is using the same
3830 * pointer (e.g. ax25_ptr on linux). This happens for
3831 * instance when also PF_RING is in use. */
3832 nm_prerr("Error: netmap adapter hook is busy");
3836 hwna = nm_os_malloc(size);
3840 hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE;
3841 strlcpy(hwna->up.name, ifp->if_xname, sizeof(hwna->up.name));
3843 hwna->nm_hw_register = hwna->up.nm_register;
3844 hwna->up.nm_register = netmap_hw_reg;
3846 if (netmap_attach_common(&hwna->up)) {
3850 netmap_adapter_get(&hwna->up);
3852 NM_ATTACH_NA(ifp, &hwna->up);
3854 nm_os_onattach(ifp);
3856 if (arg->nm_dtor == NULL) {
3857 hwna->up.nm_dtor = netmap_hw_dtor;
3860 if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n",
3861 hwna->up.num_tx_rings, hwna->up.num_tx_desc,
3862 hwna->up.num_rx_rings, hwna->up.num_rx_desc);
3866 nm_prerr("fail, arg %p ifp %p na %p", arg, ifp, hwna);
3867 return (hwna ? EINVAL : ENOMEM);
3872 netmap_attach(struct netmap_adapter *arg)
3874 return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter),
3875 1 /* override nm_reg */);
3880 NM_DBG(netmap_adapter_get)(struct netmap_adapter *na)
3886 refcount_acquire(&na->na_refcount);
3890 /* returns 1 iff the netmap_adapter is destroyed */
3892 NM_DBG(netmap_adapter_put)(struct netmap_adapter *na)
3897 if (!refcount_release(&na->na_refcount))
3903 if (na->tx_rings) { /* XXX should not happen */
3904 if (netmap_debug & NM_DEBUG_ON)
3905 nm_prerr("freeing leftover tx_rings");
3906 na->nm_krings_delete(na);
3908 netmap_pipe_dealloc(na);
3910 netmap_mem_put(na->nm_mem);
3911 bzero(na, sizeof(*na));
3917 /* nm_krings_create callback for all hardware native adapters */
3919 netmap_hw_krings_create(struct netmap_adapter *na)
3921 int ret = netmap_krings_create(na, 0);
3923 /* initialize the mbq for the sw rx ring */
3924 u_int lim = netmap_real_rings(na, NR_RX), i;
3925 for (i = na->num_rx_rings; i < lim; i++) {
3926 mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue);
3928 nm_prdis("initialized sw rx queue %d", na->num_rx_rings);
3936 * Called on module unload by the netmap-enabled drivers
3939 netmap_detach(struct ifnet *ifp)
3941 struct netmap_adapter *na = NA(ifp);
3947 netmap_set_all_rings(na, NM_KR_LOCKED);
3949 * if the netmap adapter is not native, somebody
3950 * changed it, so we can not release it here.
3951 * The NAF_ZOMBIE flag will notify the new owner that
3952 * the driver is gone.
3954 if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) {
3955 na->na_flags |= NAF_ZOMBIE;
3957 /* give active users a chance to notice that NAF_ZOMBIE has been
3958 * turned on, so that they can stop and return an error to userspace.
3959 * Note that this becomes a NOP if there are no active users and,
3960 * therefore, the put() above has deleted the na, since now NA(ifp) is
3963 netmap_enable_all_rings(ifp);
3969 * Intercept packets from the network stack and pass them
3970 * to netmap as incoming packets on the 'software' ring.
3972 * We only store packets in a bounded mbq and then copy them
3973 * in the relevant rxsync routine.
3975 * We rely on the OS to make sure that the ifp and na do not go
3976 * away (typically the caller checks for IFF_DRV_RUNNING or the like).
3977 * In nm_register() or whenever there is a reinitialization,
3978 * we make sure to make the mode change visible here.
3981 netmap_transmit(struct ifnet *ifp, struct mbuf *m)
3983 struct netmap_adapter *na = NA(ifp);
3984 struct netmap_kring *kring, *tx_kring;
3985 u_int len = MBUF_LEN(m);
3986 u_int error = ENOBUFS;
3993 if (i >= na->num_host_rx_rings) {
3994 i = i % na->num_host_rx_rings;
3996 kring = NMR(na, NR_RX)[nma_get_nrings(na, NR_RX) + i];
3998 // XXX [Linux] we do not need this lock
3999 // if we follow the down/configure/up protocol -gl
4000 // mtx_lock(&na->core_lock);
4002 if (!nm_netmap_on(na)) {
4003 nm_prerr("%s not in netmap mode anymore", na->name);
4009 if (txr >= na->num_tx_rings) {
4010 txr %= na->num_tx_rings;
4012 tx_kring = NMR(na, NR_TX)[txr];
4014 if (tx_kring->nr_mode == NKR_NETMAP_OFF) {
4015 return MBUF_TRANSMIT(na, ifp, m);
4018 q = &kring->rx_queue;
4020 // XXX reconsider long packets if we handle fragments
4021 if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */
4022 nm_prerr("%s from_host, drop packet size %d > %d", na->name,
4023 len, NETMAP_BUF_SIZE(na));
4027 if (!netmap_generic_hwcsum) {
4028 if (nm_os_mbuf_has_csum_offld(m)) {
4029 nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name);
4034 if (nm_os_mbuf_has_seg_offld(m)) {
4035 nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name);
4040 ETHER_BPF_MTAP(ifp, m);
4041 #endif /* __FreeBSD__ */
4043 /* protect against netmap_rxsync_from_host(), netmap_sw_to_nic()
4044 * and maybe other instances of netmap_transmit (the latter
4045 * not possible on Linux).
4046 * We enqueue the mbuf only if we are sure there is going to be
4047 * enough room in the host RX ring, otherwise we drop it.
4051 busy = kring->nr_hwtail - kring->nr_hwcur;
4053 busy += kring->nkr_num_slots;
4054 if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) {
4055 nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
4056 kring->nr_hwcur, kring->nr_hwtail, mbq_len(q));
4059 nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q));
4060 /* notify outside the lock */
4069 /* unconditionally wake up listeners */
4070 kring->nm_notify(kring, 0);
4071 /* this is normally netmap_notify(), but for nics
4072 * connected to a bridge it is netmap_bwrap_intr_notify(),
4073 * that possibly forwards the frames through the switch
4081 * Reset function to be called by the driver routines when reinitializing
4082 * a hardware ring. The driver is in charge of locking to protect the kring
4083 * while this operation is being performed. This is normally achieved by
4084 * calling netmap_disable_all_rings() before triggering a reset.
4085 * If the kring is not in netmap mode, return NULL to inform the caller
4086 * that this is the case.
4087 * If the kring is in netmap mode, set hwofs so that the netmap indices
4088 * seen by userspace (head/cut/tail) do not change, although the internal
4089 * NIC indices have been reset to 0.
4090 * In any case, adjust kring->nr_mode.
4092 struct netmap_slot *
4093 netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n,
4096 struct netmap_kring *kring;
4097 u_int new_hwtail, new_hwofs;
4099 if (!nm_native_on(na)) {
4100 nm_prdis("interface not in native netmap mode");
4101 return NULL; /* nothing to reinitialize */
4105 if (n >= na->num_tx_rings)
4107 kring = na->tx_rings[n];
4109 * Set hwofs to rhead, so that slots[rhead] is mapped to
4110 * the NIC internal slot 0, and thus the netmap buffer
4111 * at rhead is the next to be transmitted. Transmissions
4112 * that were pending before the reset are considered as
4113 * sent, so that we can have hwcur = rhead. All the slots
4114 * are now owned by the user, so we can also reinit hwtail.
4116 new_hwofs = kring->rhead;
4117 new_hwtail = nm_prev(kring->rhead, kring->nkr_num_slots - 1);
4119 if (n >= na->num_rx_rings)
4121 kring = na->rx_rings[n];
4123 * Set hwofs to hwtail, so that slots[hwtail] is mapped to
4124 * the NIC internal slot 0, and thus the netmap buffer
4125 * at hwtail is the next to be given to the NIC.
4126 * Unread slots (the ones in [rhead,hwtail[) are owned by
4127 * the user, and thus the caller cannot give them
4128 * to the NIC right now.
4130 new_hwofs = kring->nr_hwtail;
4131 new_hwtail = kring->nr_hwtail;
4133 if (kring->nr_pending_mode == NKR_NETMAP_OFF) {
4134 kring->nr_mode = NKR_NETMAP_OFF;
4137 if (netmap_verbose) {
4138 nm_prinf("%s, hc %u->%u, ht %u->%u, ho %u->%u", kring->name,
4139 kring->nr_hwcur, kring->rhead,
4140 kring->nr_hwtail, new_hwtail,
4141 kring->nkr_hwofs, new_hwofs);
4143 kring->nr_hwcur = kring->rhead;
4144 kring->nr_hwtail = new_hwtail;
4145 kring->nkr_hwofs = new_hwofs;
4148 * Wakeup on the individual and global selwait
4149 * We do the wakeup here, but the ring is not yet reconfigured.
4150 * However, we are under lock so there are no races.
4152 kring->nr_mode = NKR_NETMAP_ON;
4153 kring->nm_notify(kring, 0);
4154 return kring->ring->slot;
4159 * Dispatch rx/tx interrupts to the netmap rings.
4161 * "work_done" is non-null on the RX path, NULL for the TX path.
4162 * We rely on the OS to make sure that there is only one active
4163 * instance per queue, and that there is appropriate locking.
4165 * The 'notify' routine depends on what the ring is attached to.
4166 * - for a netmap file descriptor, do a selwakeup on the individual
4167 * waitqueue, plus one on the global one if needed
4168 * (see netmap_notify)
4169 * - for a nic connected to a switch, call the proper forwarding routine
4170 * (see netmap_bwrap_intr_notify)
4173 netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
4175 struct netmap_kring *kring;
4176 enum txrx t = (work_done ? NR_RX : NR_TX);
4178 q &= NETMAP_RING_MASK;
4180 if (netmap_debug & (NM_DEBUG_RXINTR|NM_DEBUG_TXINTR)) {
4181 nm_prlim(5, "received %s queue %d", work_done ? "RX" : "TX" , q);
4184 if (q >= nma_get_nrings(na, t))
4185 return NM_IRQ_PASS; // not a physical queue
4187 kring = NMR(na, t)[q];
4189 if (kring->nr_mode == NKR_NETMAP_OFF) {
4194 kring->nr_kflags |= NKR_PENDINTR; // XXX atomic ?
4195 *work_done = 1; /* do not fire napi again */
4198 return kring->nm_notify(kring, 0);
4203 * Default functions to handle rx/tx interrupts from a physical device.
4204 * "work_done" is non-null on the RX path, NULL for the TX path.
4206 * If the card is not in netmap mode, simply return NM_IRQ_PASS,
4207 * so that the caller proceeds with regular processing.
4208 * Otherwise call netmap_common_irq().
4210 * If the card is connected to a netmap file descriptor,
4211 * do a selwakeup on the individual queue, plus one on the global one
4212 * if needed (multiqueue card _and_ there are multiqueue listeners),
4213 * and return NR_IRQ_COMPLETED.
4215 * Finally, if called on rx from an interface connected to a switch,
4216 * calls the proper forwarding routine.
4219 netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done)
4221 struct netmap_adapter *na = NA(ifp);
4224 * XXX emulated netmap mode sets NAF_SKIP_INTR so
4225 * we still use the regular driver even though the previous
4226 * check fails. It is unclear whether we should use
4227 * nm_native_on() here.
4229 if (!nm_netmap_on(na))
4232 if (na->na_flags & NAF_SKIP_INTR) {
4233 nm_prdis("use regular interrupt");
4237 return netmap_common_irq(na, q, work_done);
4240 /* set/clear native flags and if_transmit/netdev_ops */
4242 nm_set_native_flags(struct netmap_adapter *na)
4244 struct ifnet *ifp = na->ifp;
4246 /* We do the setup for intercepting packets only if we are the
4247 * first user of this adapter. */
4248 if (na->active_fds > 0) {
4252 na->na_flags |= NAF_NETMAP_ON;
4254 nm_update_hostrings_mode(na);
4258 nm_clear_native_flags(struct netmap_adapter *na)
4260 struct ifnet *ifp = na->ifp;
4262 /* We undo the setup for intercepting packets only if we are the
4263 * last user of this adapter. */
4264 if (na->active_fds > 0) {
4268 nm_update_hostrings_mode(na);
4271 na->na_flags &= ~NAF_NETMAP_ON;
4275 netmap_krings_mode_commit(struct netmap_adapter *na, int onoff)
4282 for (i = 0; i < netmap_real_rings(na, t); i++) {
4283 struct netmap_kring *kring = NMR(na, t)[i];
4285 if (onoff && nm_kring_pending_on(kring))
4286 kring->nr_mode = NKR_NETMAP_ON;
4287 else if (!onoff && nm_kring_pending_off(kring))
4288 kring->nr_mode = NKR_NETMAP_OFF;
4294 * Module loader and unloader
4296 * netmap_init() creates the /dev/netmap device and initializes
4297 * all global variables. Returns 0 on success, errno on failure
4298 * (but there is no chance)
4300 * netmap_fini() destroys everything.
4303 static struct cdev *netmap_dev; /* /dev/netmap character device. */
4304 extern struct cdevsw netmap_cdevsw;
4311 destroy_dev(netmap_dev);
4312 /* we assume that there are no longer netmap users */
4314 netmap_uninit_bridges();
4317 nm_prinf("netmap: unloaded module.");
4328 error = netmap_mem_init();
4332 * MAKEDEV_ETERNAL_KLD avoids an expensive check on syscalls
4333 * when the module is compiled in.
4334 * XXX could use make_dev_credv() to get error number
4336 netmap_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD,
4337 &netmap_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600,
4342 error = netmap_init_bridges();
4347 nm_os_vi_init_index();
4350 error = nm_os_ifnet_init();
4354 nm_prinf("netmap: loaded module");
4358 return (EINVAL); /* may be incorrect */