2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2011-2014 Matteo Landi
5 * Copyright (C) 2011-2016 Luigi Rizzo
6 * Copyright (C) 2011-2016 Giuseppe Lettieri
7 * Copyright (C) 2011-2016 Vincenzo Maffione
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This module supports memory mapped access to network devices,
39 * The module uses a large, memory pool allocated by the kernel
40 * and accessible as mmapped memory by multiple userspace threads/processes.
41 * The memory pool contains packet buffers and "netmap rings",
42 * i.e. user-accessible copies of the interface's queues.
44 * Access to the network card works like this:
45 * 1. a process/thread issues one or more open() on /dev/netmap, to create
46 * select()able file descriptor on which events are reported.
47 * 2. on each descriptor, the process issues an ioctl() to identify
48 * the interface that should report events to the file descriptor.
49 * 3. on each descriptor, the process issues an mmap() request to
50 * map the shared memory region within the process' address space.
51 * The list of interesting queues is indicated by a location in
52 * the shared memory region.
53 * 4. using the functions in the netmap(4) userspace API, a process
54 * can look up the occupation state of a queue, access memory buffers,
55 * and retrieve received packets or enqueue packets to transmit.
56 * 5. using some ioctl()s the process can synchronize the userspace view
57 * of the queue with the actual status in the kernel. This includes both
58 * receiving the notification of new packets, and transmitting new
59 * packets on the output interface.
60 * 6. select() or poll() can be used to wait for events on individual
61 * transmit or receive queues (or all queues for a given interface).
64 SYNCHRONIZATION (USER)
66 The netmap rings and data structures may be shared among multiple
67 user threads or even independent processes.
68 Any synchronization among those threads/processes is delegated
69 to the threads themselves. Only one thread at a time can be in
70 a system call on the same netmap ring. The OS does not enforce
71 this and only guarantees against system crashes in case of
76 Within the kernel, access to the netmap rings is protected as follows:
78 - a spinlock on each ring, to handle producer/consumer races on
79 RX rings attached to the host stack (against multiple host
80 threads writing from the host stack to the same ring),
81 and on 'destination' rings attached to a VALE switch
82 (i.e. RX rings in VALE ports, and TX rings in NIC/host ports)
83 protecting multiple active senders for the same destination)
85 - an atomic variable to guarantee that there is at most one
86 instance of *_*xsync() on the ring at any time.
87 For rings connected to user file
88 descriptors, an atomic_test_and_set() protects this, and the
89 lock on the ring is not actually used.
90 For NIC RX rings connected to a VALE switch, an atomic_test_and_set()
91 is also used to prevent multiple executions (the driver might indeed
92 already guarantee this).
93 For NIC TX rings connected to a VALE switch, the lock arbitrates
94 access to the queue (both when allocating buffers and when pushing
97 - *xsync() should be protected against initializations of the card.
98 On FreeBSD most devices have the reset routine protected by
99 a RING lock (ixgbe, igb, em) or core lock (re). lem is missing
100 the RING protection on rx_reset(), this should be added.
102 On linux there is an external lock on the tx path, which probably
103 also arbitrates access to the reset routine. XXX to be revised
105 - a per-interface core_lock protecting access from the host stack
106 while interfaces may be detached from netmap mode.
107 XXX there should be no need for this lock if we detach the interfaces
108 only while they are down.
113 NMG_LOCK() serializes all modifications to switches and ports.
114 A switch cannot be deleted until all ports are gone.
116 For each switch, an SX lock (RWlock on linux) protects
117 deletion of ports. When configuring or deleting a new port, the
118 lock is acquired in exclusive mode (after holding NMG_LOCK).
119 When forwarding, the lock is acquired in shared mode (without NMG_LOCK).
120 The lock is held throughout the entire forwarding cycle,
121 during which the thread may incur in a page fault.
122 Hence it is important that sleepable shared locks are used.
124 On the rx ring, the per-port lock is grabbed initially to reserve
125 a number of slot in the ring, then the lock is released,
126 packets are copied from source to destination, and then
127 the lock is acquired again and the receive ring is updated.
128 (A similar thing is done on the tx ring for NIC and host stack
129 ports attached to the switch)
134 /* --- internals ----
136 * Roadmap to the code that implements the above.
138 * > 1. a process/thread issues one or more open() on /dev/netmap, to create
139 * > select()able file descriptor on which events are reported.
141 * Internally, we allocate a netmap_priv_d structure, that will be
142 * initialized on ioctl(NIOCREGIF). There is one netmap_priv_d
143 * structure for each open().
146 * FreeBSD: see netmap_open() (netmap_freebsd.c)
147 * linux: see linux_netmap_open() (netmap_linux.c)
149 * > 2. on each descriptor, the process issues an ioctl() to identify
150 * > the interface that should report events to the file descriptor.
152 * Implemented by netmap_ioctl(), NIOCREGIF case, with nmr->nr_cmd==0.
153 * Most important things happen in netmap_get_na() and
154 * netmap_do_regif(), called from there. Additional details can be
155 * found in the comments above those functions.
157 * In all cases, this action creates/takes-a-reference-to a
158 * netmap_*_adapter describing the port, and allocates a netmap_if
159 * and all necessary netmap rings, filling them with netmap buffers.
161 * In this phase, the sync callbacks for each ring are set (these are used
162 * in steps 5 and 6 below). The callbacks depend on the type of adapter.
163 * The adapter creation/initialization code puts them in the
164 * netmap_adapter (fields na->nm_txsync and na->nm_rxsync). Then, they
165 * are copied from there to the netmap_kring's during netmap_do_regif(), by
166 * the nm_krings_create() callback. All the nm_krings_create callbacks
167 * actually call netmap_krings_create() to perform this and the other
168 * common stuff. netmap_krings_create() also takes care of the host rings,
169 * if needed, by setting their sync callbacks appropriately.
171 * Additional actions depend on the kind of netmap_adapter that has been
174 * - netmap_hw_adapter: [netmap.c]
175 * This is a system netdev/ifp with native netmap support.
176 * The ifp is detached from the host stack by redirecting:
177 * - transmissions (from the network stack) to netmap_transmit()
178 * - receive notifications to the nm_notify() callback for
179 * this adapter. The callback is normally netmap_notify(), unless
180 * the ifp is attached to a bridge using bwrap, in which case it
181 * is netmap_bwrap_intr_notify().
183 * - netmap_generic_adapter: [netmap_generic.c]
184 * A system netdev/ifp without native netmap support.
186 * (the decision about native/non native support is taken in
187 * netmap_get_hw_na(), called by netmap_get_na())
189 * - netmap_vp_adapter [netmap_vale.c]
190 * Returned by netmap_get_bdg_na().
191 * This is a persistent or ephemeral VALE port. Ephemeral ports
192 * are created on the fly if they don't already exist, and are
193 * always attached to a bridge.
194 * Persistent VALE ports must must be created separately, and i
195 * then attached like normal NICs. The NIOCREGIF we are examining
196 * will find them only if they had previosly been created and
197 * attached (see VALE_CTL below).
199 * - netmap_pipe_adapter [netmap_pipe.c]
200 * Returned by netmap_get_pipe_na().
201 * Both pipe ends are created, if they didn't already exist.
203 * - netmap_monitor_adapter [netmap_monitor.c]
204 * Returned by netmap_get_monitor_na().
205 * If successful, the nm_sync callbacks of the monitored adapter
206 * will be intercepted by the returned monitor.
208 * - netmap_bwrap_adapter [netmap_vale.c]
209 * Cannot be obtained in this way, see VALE_CTL below
213 * linux: we first go through linux_netmap_ioctl() to
214 * adapt the FreeBSD interface to the linux one.
217 * > 3. on each descriptor, the process issues an mmap() request to
218 * > map the shared memory region within the process' address space.
219 * > The list of interesting queues is indicated by a location in
220 * > the shared memory region.
223 * FreeBSD: netmap_mmap_single (netmap_freebsd.c).
224 * linux: linux_netmap_mmap (netmap_linux.c).
226 * > 4. using the functions in the netmap(4) userspace API, a process
227 * > can look up the occupation state of a queue, access memory buffers,
228 * > and retrieve received packets or enqueue packets to transmit.
230 * these actions do not involve the kernel.
232 * > 5. using some ioctl()s the process can synchronize the userspace view
233 * > of the queue with the actual status in the kernel. This includes both
234 * > receiving the notification of new packets, and transmitting new
235 * > packets on the output interface.
237 * These are implemented in netmap_ioctl(), NIOCTXSYNC and NIOCRXSYNC
238 * cases. They invoke the nm_sync callbacks on the netmap_kring
239 * structures, as initialized in step 2 and maybe later modified
240 * by a monitor. Monitors, however, will always call the original
241 * callback before doing anything else.
244 * > 6. select() or poll() can be used to wait for events on individual
245 * > transmit or receive queues (or all queues for a given interface).
247 * Implemented in netmap_poll(). This will call the same nm_sync()
248 * callbacks as in step 5 above.
251 * linux: we first go through linux_netmap_poll() to adapt
252 * the FreeBSD interface to the linux one.
255 * ---- VALE_CTL -----
257 * VALE switches are controlled by issuing a NIOCREGIF with a non-null
258 * nr_cmd in the nmreq structure. These subcommands are handled by
259 * netmap_bdg_ctl() in netmap_vale.c. Persistent VALE ports are created
260 * and destroyed by issuing the NETMAP_BDG_NEWIF and NETMAP_BDG_DELIF
261 * subcommands, respectively.
263 * Any network interface known to the system (including a persistent VALE
264 * port) can be attached to a VALE switch by issuing the
265 * NETMAP_REQ_VALE_ATTACH command. After the attachment, persistent VALE ports
266 * look exactly like ephemeral VALE ports (as created in step 2 above). The
267 * attachment of other interfaces, instead, requires the creation of a
268 * netmap_bwrap_adapter. Moreover, the attached interface must be put in
269 * netmap mode. This may require the creation of a netmap_generic_adapter if
270 * we have no native support for the interface, or if generic adapters have
271 * been forced by sysctl.
273 * Both persistent VALE ports and bwraps are handled by netmap_get_bdg_na(),
274 * called by nm_bdg_ctl_attach(), and discriminated by the nm_bdg_attach()
275 * callback. In the case of the bwrap, the callback creates the
276 * netmap_bwrap_adapter. The initialization of the bwrap is then
277 * completed by calling netmap_do_regif() on it, in the nm_bdg_ctl()
278 * callback (netmap_bwrap_bdg_ctl in netmap_vale.c).
279 * A generic adapter for the wrapped ifp will be created if needed, when
280 * netmap_get_bdg_na() calls netmap_get_hw_na().
283 * ---- DATAPATHS -----
285 * -= SYSTEM DEVICE WITH NATIVE SUPPORT =-
287 * na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach()
289 * - tx from netmap userspace:
291 * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
292 * kring->nm_sync() == DEVICE_netmap_txsync()
293 * 2) device interrupt handler
294 * na->nm_notify() == netmap_notify()
295 * - rx from netmap userspace:
297 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
298 * kring->nm_sync() == DEVICE_netmap_rxsync()
299 * 2) device interrupt handler
300 * na->nm_notify() == netmap_notify()
301 * - rx from host stack
305 * na->nm_notify == netmap_notify()
306 * 2) ioctl(NIOCRXSYNC)/netmap_poll() in process context
307 * kring->nm_sync() == netmap_rxsync_from_host
308 * netmap_rxsync_from_host(na, NULL, NULL)
310 * ioctl(NIOCTXSYNC)/netmap_poll() in process context
311 * kring->nm_sync() == netmap_txsync_to_host
312 * netmap_txsync_to_host(na)
314 * FreeBSD: na->if_input() == ether_input()
315 * linux: netif_rx() with NM_MAGIC_PRIORITY_RX
318 * -= SYSTEM DEVICE WITH GENERIC SUPPORT =-
320 * na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach()
322 * - tx from netmap userspace:
324 * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
325 * kring->nm_sync() == generic_netmap_txsync()
326 * nm_os_generic_xmit_frame()
327 * linux: dev_queue_xmit() with NM_MAGIC_PRIORITY_TX
328 * ifp->ndo_start_xmit == generic_ndo_start_xmit()
329 * gna->save_start_xmit == orig. dev. start_xmit
330 * FreeBSD: na->if_transmit() == orig. dev if_transmit
331 * 2) generic_mbuf_destructor()
332 * na->nm_notify() == netmap_notify()
333 * - rx from netmap userspace:
334 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
335 * kring->nm_sync() == generic_netmap_rxsync()
338 * generic_rx_handler()
340 * na->nm_notify() == netmap_notify()
341 * - rx from host stack
342 * FreeBSD: same as native
343 * Linux: same as native except:
345 * dev_queue_xmit() without NM_MAGIC_PRIORITY_TX
346 * ifp->ndo_start_xmit == generic_ndo_start_xmit()
348 * na->nm_notify() == netmap_notify()
349 * - tx to host stack (same as native):
357 * ioctl(NIOCTXSYNC)/netmap_poll() in process context
358 * kring->nm_sync() == netmap_vp_txsync()
360 * - system device with native support:
363 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
364 * kring->nm_sync() == DEVICE_netmap_rxsync()
366 * kring->nm_sync() == DEVICE_netmap_rxsync()
369 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
370 * kring->nm_sync() == netmap_rxsync_from_host()
373 * - system device with generic support:
374 * from device driver:
375 * generic_rx_handler()
376 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
377 * kring->nm_sync() == generic_netmap_rxsync()
379 * kring->nm_sync() == generic_netmap_rxsync()
382 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
383 * kring->nm_sync() == netmap_rxsync_from_host()
386 * (all cases) --> nm_bdg_flush()
387 * dest_na->nm_notify() == (see below)
393 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
394 * kring->nm_sync() == netmap_vp_rxsync()
395 * 2) from nm_bdg_flush()
396 * na->nm_notify() == netmap_notify()
398 * - system device with native support:
400 * na->nm_notify() == netmap_bwrap_notify()
402 * kring->nm_sync() == DEVICE_netmap_txsync()
406 * kring->nm_sync() == netmap_txsync_to_host
407 * netmap_vp_rxsync_locked()
409 * - system device with generic adapter:
411 * na->nm_notify() == netmap_bwrap_notify()
413 * kring->nm_sync() == generic_netmap_txsync()
417 * kring->nm_sync() == netmap_txsync_to_host
423 * OS-specific code that is used only within this file.
424 * Other OS-specific code that must be accessed by drivers
425 * is present in netmap_kern.h
428 #if defined(__FreeBSD__)
429 #include <sys/cdefs.h> /* prerequisite */
430 #include <sys/types.h>
431 #include <sys/errno.h>
432 #include <sys/param.h> /* defines used in kernel.h */
433 #include <sys/kernel.h> /* types used in module initialization */
434 #include <sys/conf.h> /* cdevsw struct, UID, GID */
435 #include <sys/filio.h> /* FIONBIO */
436 #include <sys/sockio.h>
437 #include <sys/socketvar.h> /* struct socket */
438 #include <sys/malloc.h>
439 #include <sys/poll.h>
440 #include <sys/proc.h>
441 #include <sys/rwlock.h>
442 #include <sys/socket.h> /* sockaddrs */
443 #include <sys/selinfo.h>
444 #include <sys/sysctl.h>
445 #include <sys/jail.h>
446 #include <sys/epoch.h>
447 #include <net/vnet.h>
449 #include <net/if_var.h>
450 #include <net/bpf.h> /* BIOCIMMEDIATE */
451 #include <machine/bus.h> /* bus_dmamap_* */
452 #include <sys/endian.h>
453 #include <sys/refcount.h>
454 #include <net/ethernet.h> /* ETHER_BPF_MTAP */
459 #include "bsd_glue.h"
461 #elif defined(__APPLE__)
463 #warning OSX support is only partial
464 #include "osx_glue.h"
466 #elif defined (_WIN32)
468 #include "win_glue.h"
472 #error Unsupported platform
474 #endif /* unsupported */
479 #include <net/netmap.h>
480 #include <dev/netmap/netmap_kern.h>
481 #include <dev/netmap/netmap_mem2.h>
484 /* user-controlled variables */
486 #ifdef CONFIG_NETMAP_DEBUG
488 #endif /* CONFIG_NETMAP_DEBUG */
490 static int netmap_no_timestamp; /* don't timestamp on rxsync */
491 int netmap_no_pendintr = 1;
492 int netmap_txsync_retry = 2;
493 static int netmap_fwd = 0; /* force transparent forwarding */
496 * netmap_admode selects the netmap mode to use.
497 * Invalid values are reset to NETMAP_ADMODE_BEST
499 enum { NETMAP_ADMODE_BEST = 0, /* use native, fallback to generic */
500 NETMAP_ADMODE_NATIVE, /* either native or none */
501 NETMAP_ADMODE_GENERIC, /* force generic */
502 NETMAP_ADMODE_LAST };
503 static int netmap_admode = NETMAP_ADMODE_BEST;
505 /* netmap_generic_mit controls mitigation of RX notifications for
506 * the generic netmap adapter. The value is a time interval in
508 int netmap_generic_mit = 100*1000;
510 /* We use by default netmap-aware qdiscs with generic netmap adapters,
511 * even if there can be a little performance hit with hardware NICs.
512 * However, using the qdisc is the safer approach, for two reasons:
513 * 1) it prevents non-fifo qdiscs to break the TX notification
514 * scheme, which is based on mbuf destructors when txqdisc is
516 * 2) it makes it possible to transmit over software devices that
517 * change skb->dev, like bridge, veth, ...
519 * Anyway users looking for the best performance should
520 * use native adapters.
523 int netmap_generic_txqdisc = 1;
526 /* Default number of slots and queues for generic adapters. */
527 int netmap_generic_ringsize = 1024;
528 int netmap_generic_rings = 1;
530 /* Non-zero to enable checksum offloading in NIC drivers */
531 int netmap_generic_hwcsum = 0;
533 /* Non-zero if ptnet devices are allowed to use virtio-net headers. */
534 int ptnet_vnet_hdr = 1;
537 * SYSCTL calls are grouped between SYSBEGIN and SYSEND to be emulated
538 * in some other operating systems
542 SYSCTL_DECL(_dev_netmap);
543 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
545 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
546 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
547 #ifdef CONFIG_NETMAP_DEBUG
548 SYSCTL_INT(_dev_netmap, OID_AUTO, debug,
549 CTLFLAG_RW, &netmap_debug, 0, "Debug messages");
550 #endif /* CONFIG_NETMAP_DEBUG */
551 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
552 CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
553 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr,
554 0, "Always look for new received packets.");
555 SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW,
556 &netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush.");
558 SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0,
559 "Force NR_FORWARD mode");
560 SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0,
561 "Adapter mode. 0 selects the best option available,"
562 "1 forces native adapter, 2 forces emulated adapter");
563 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_hwcsum, CTLFLAG_RW, &netmap_generic_hwcsum,
564 0, "Hardware checksums. 0 to disable checksum generation by the NIC (default),"
565 "1 to enable checksum generation by the NIC");
566 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit,
567 0, "RX notification interval in nanoseconds");
568 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW,
569 &netmap_generic_ringsize, 0,
570 "Number of per-ring slots for emulated netmap mode");
571 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW,
572 &netmap_generic_rings, 0,
573 "Number of TX/RX queues for emulated netmap adapters");
575 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW,
576 &netmap_generic_txqdisc, 0, "Use qdisc for generic adapters");
578 SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr,
579 0, "Allow ptnet devices to use virtio-net headers");
583 NMG_LOCK_T netmap_global_lock;
586 * mark the ring as stopped, and run through the locks
587 * to make sure other users get to see it.
588 * stopped must be either NR_KR_STOPPED (for unbounded stop)
589 * of NR_KR_LOCKED (brief stop for mutual exclusion purposes)
592 netmap_disable_ring(struct netmap_kring *kr, int stopped)
594 nm_kr_stop(kr, stopped);
595 // XXX check if nm_kr_stop is sufficient
596 mtx_lock(&kr->q_lock);
597 mtx_unlock(&kr->q_lock);
601 /* stop or enable a single ring */
603 netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped)
606 netmap_disable_ring(NMR(na, t)[ring_id], stopped);
608 NMR(na, t)[ring_id]->nkr_stopped = 0;
612 /* stop or enable all the rings of na */
614 netmap_set_all_rings(struct netmap_adapter *na, int stopped)
619 if (!nm_netmap_on(na))
623 for (i = 0; i < netmap_real_rings(na, t); i++) {
624 netmap_set_ring(na, i, t, stopped);
630 * Convenience function used in drivers. Waits for current txsync()s/rxsync()s
631 * to finish and prevents any new one from starting. Call this before turning
632 * netmap mode off, or before removing the hardware rings (e.g., on module
636 netmap_disable_all_rings(struct ifnet *ifp)
638 if (NM_NA_VALID(ifp)) {
639 netmap_set_all_rings(NA(ifp), NM_KR_STOPPED);
644 * Convenience function used in drivers. Re-enables rxsync and txsync on the
645 * adapter's rings In linux drivers, this should be placed near each
649 netmap_enable_all_rings(struct ifnet *ifp)
651 if (NM_NA_VALID(ifp)) {
652 netmap_set_all_rings(NA(ifp), 0 /* enabled */);
657 netmap_make_zombie(struct ifnet *ifp)
659 if (NM_NA_VALID(ifp)) {
660 struct netmap_adapter *na = NA(ifp);
661 netmap_set_all_rings(na, NM_KR_LOCKED);
662 na->na_flags |= NAF_ZOMBIE;
663 netmap_set_all_rings(na, 0);
668 netmap_undo_zombie(struct ifnet *ifp)
670 if (NM_NA_VALID(ifp)) {
671 struct netmap_adapter *na = NA(ifp);
672 if (na->na_flags & NAF_ZOMBIE) {
673 netmap_set_all_rings(na, NM_KR_LOCKED);
674 na->na_flags &= ~NAF_ZOMBIE;
675 netmap_set_all_rings(na, 0);
681 * generic bound_checking function
684 nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg)
687 const char *op = NULL;
696 } else if (oldv > hi) {
701 nm_prinf("%s %s to %d (was %d)", op, msg, *v, oldv);
707 * packet-dump function, user-supplied or static buffer.
708 * The destination buffer must be at least 30+4*len
711 nm_dump_buf(char *p, int len, int lim, char *dst)
713 static char _dst[8192];
715 static char hex[] ="0123456789abcdef";
716 char *o; /* output position */
718 #define P_HI(x) hex[((x) & 0xf0)>>4]
719 #define P_LO(x) hex[((x) & 0xf)]
720 #define P_C(x) ((x) >= 0x20 && (x) <= 0x7e ? (x) : '.')
723 if (lim <= 0 || lim > len)
726 sprintf(o, "buf 0x%p len %d lim %d\n", p, len, lim);
728 /* hexdump routine */
729 for (i = 0; i < lim; ) {
730 sprintf(o, "%5d: ", i);
734 for (j=0; j < 16 && i < lim; i++, j++) {
736 o[j*3+1] = P_LO(p[i]);
739 for (j=0; j < 16 && i < lim; i++, j++)
740 o[j + 48] = P_C(p[i]);
753 * Fetch configuration from the device, to cope with dynamic
754 * reconfigurations after loading the module.
756 /* call with NMG_LOCK held */
758 netmap_update_config(struct netmap_adapter *na)
760 struct nm_config_info info;
762 bzero(&info, sizeof(info));
763 if (na->nm_config == NULL ||
764 na->nm_config(na, &info)) {
765 /* take whatever we had at init time */
766 info.num_tx_rings = na->num_tx_rings;
767 info.num_tx_descs = na->num_tx_desc;
768 info.num_rx_rings = na->num_rx_rings;
769 info.num_rx_descs = na->num_rx_desc;
770 info.rx_buf_maxsize = na->rx_buf_maxsize;
773 if (na->num_tx_rings == info.num_tx_rings &&
774 na->num_tx_desc == info.num_tx_descs &&
775 na->num_rx_rings == info.num_rx_rings &&
776 na->num_rx_desc == info.num_rx_descs &&
777 na->rx_buf_maxsize == info.rx_buf_maxsize)
778 return 0; /* nothing changed */
779 if (na->active_fds == 0) {
780 na->num_tx_rings = info.num_tx_rings;
781 na->num_tx_desc = info.num_tx_descs;
782 na->num_rx_rings = info.num_rx_rings;
783 na->num_rx_desc = info.num_rx_descs;
784 na->rx_buf_maxsize = info.rx_buf_maxsize;
786 nm_prinf("configuration changed for %s: txring %d x %d, "
787 "rxring %d x %d, rxbufsz %d",
788 na->name, na->num_tx_rings, na->num_tx_desc,
789 na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize);
792 nm_prerr("WARNING: configuration changed for %s while active: "
793 "txring %d x %d, rxring %d x %d, rxbufsz %d",
794 na->name, info.num_tx_rings, info.num_tx_descs,
795 info.num_rx_rings, info.num_rx_descs,
796 info.rx_buf_maxsize);
800 /* nm_sync callbacks for the host rings */
801 static int netmap_txsync_to_host(struct netmap_kring *kring, int flags);
802 static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags);
804 /* create the krings array and initialize the fields common to all adapters.
805 * The array layout is this:
808 * na->tx_rings ----->| | \
809 * | | } na->num_tx_ring
813 * na->rx_rings ----> +----------+
815 * | | } na->num_rx_rings
820 * na->tailroom ----->| | \
821 * | | } tailroom bytes
825 * Note: for compatibility, host krings are created even when not needed.
826 * The tailroom space is currently used by vale ports for allocating leases.
828 /* call with NMG_LOCK held */
830 netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
833 struct netmap_kring *kring;
838 if (na->tx_rings != NULL) {
839 if (netmap_debug & NM_DEBUG_ON)
840 nm_prerr("warning: krings were already created");
844 /* account for the (possibly fake) host rings */
845 n[NR_TX] = netmap_all_rings(na, NR_TX);
846 n[NR_RX] = netmap_all_rings(na, NR_RX);
848 len = (n[NR_TX] + n[NR_RX]) *
849 (sizeof(struct netmap_kring) + sizeof(struct netmap_kring *))
852 na->tx_rings = nm_os_malloc((size_t)len);
853 if (na->tx_rings == NULL) {
854 nm_prerr("Cannot allocate krings");
857 na->rx_rings = na->tx_rings + n[NR_TX];
858 na->tailroom = na->rx_rings + n[NR_RX];
860 /* link the krings in the krings array */
861 kring = (struct netmap_kring *)((char *)na->tailroom + tailroom);
862 for (i = 0; i < n[NR_TX] + n[NR_RX]; i++) {
863 na->tx_rings[i] = kring;
868 * All fields in krings are 0 except the one initialized below.
869 * but better be explicit on important kring fields.
872 ndesc = nma_get_ndesc(na, t);
873 for (i = 0; i < n[t]; i++) {
874 kring = NMR(na, t)[i];
875 bzero(kring, sizeof(*kring));
876 kring->notify_na = na;
879 kring->nkr_num_slots = ndesc;
880 kring->nr_mode = NKR_NETMAP_OFF;
881 kring->nr_pending_mode = NKR_NETMAP_OFF;
882 if (i < nma_get_nrings(na, t)) {
883 kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync);
885 if (!(na->na_flags & NAF_HOST_RINGS))
886 kring->nr_kflags |= NKR_FAKERING;
887 kring->nm_sync = (t == NR_TX ?
888 netmap_txsync_to_host:
889 netmap_rxsync_from_host);
891 kring->nm_notify = na->nm_notify;
892 kring->rhead = kring->rcur = kring->nr_hwcur = 0;
894 * IMPORTANT: Always keep one slot empty.
896 kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0);
897 snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name,
899 nm_prdis("ktx %s h %d c %d t %d",
900 kring->name, kring->rhead, kring->rcur, kring->rtail);
901 err = nm_os_selinfo_init(&kring->si, kring->name);
903 netmap_krings_delete(na);
906 mtx_init(&kring->q_lock, (t == NR_TX ? "nm_txq_lock" : "nm_rxq_lock"), NULL, MTX_DEF);
907 kring->na = na; /* setting this field marks the mutex as initialized */
909 err = nm_os_selinfo_init(&na->si[t], na->name);
911 netmap_krings_delete(na);
920 /* undo the actions performed by netmap_krings_create */
921 /* call with NMG_LOCK held */
923 netmap_krings_delete(struct netmap_adapter *na)
925 struct netmap_kring **kring = na->tx_rings;
928 if (na->tx_rings == NULL) {
929 if (netmap_debug & NM_DEBUG_ON)
930 nm_prerr("warning: krings were already deleted");
935 nm_os_selinfo_uninit(&na->si[t]);
937 /* we rely on the krings layout described above */
938 for ( ; kring != na->tailroom; kring++) {
939 if ((*kring)->na != NULL)
940 mtx_destroy(&(*kring)->q_lock);
941 nm_os_selinfo_uninit(&(*kring)->si);
943 nm_os_free(na->tx_rings);
944 na->tx_rings = na->rx_rings = na->tailroom = NULL;
949 * Destructor for NIC ports. They also have an mbuf queue
950 * on the rings connected to the host so we need to purge
953 /* call with NMG_LOCK held */
955 netmap_hw_krings_delete(struct netmap_adapter *na)
957 u_int lim = netmap_real_rings(na, NR_RX), i;
959 for (i = nma_get_nrings(na, NR_RX); i < lim; i++) {
960 struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue;
961 nm_prdis("destroy sw mbq with len %d", mbq_len(q));
965 netmap_krings_delete(na);
969 netmap_mem_drop(struct netmap_adapter *na)
971 int last = netmap_mem_deref(na->nm_mem, na);
972 /* if the native allocator had been overrided on regif,
973 * restore it now and drop the temporary one
975 if (last && na->nm_mem_prev) {
976 netmap_mem_put(na->nm_mem);
977 na->nm_mem = na->nm_mem_prev;
978 na->nm_mem_prev = NULL;
983 * Undo everything that was done in netmap_do_regif(). In particular,
984 * call nm_register(ifp,0) to stop netmap mode on the interface and
985 * revert to normal operation.
987 /* call with NMG_LOCK held */
988 static void netmap_unset_ringid(struct netmap_priv_d *);
989 static void netmap_krings_put(struct netmap_priv_d *);
991 netmap_do_unregif(struct netmap_priv_d *priv)
993 struct netmap_adapter *na = priv->np_na;
997 /* unset nr_pending_mode and possibly release exclusive mode */
998 netmap_krings_put(priv);
1001 /* XXX check whether we have to do something with monitor
1002 * when rings change nr_mode. */
1003 if (na->active_fds <= 0) {
1004 /* walk through all the rings and tell any monitor
1005 * that the port is going to exit netmap mode
1007 netmap_monitor_stop(na);
1011 if (na->active_fds <= 0 || nm_kring_pending(priv)) {
1012 na->nm_register(na, 0);
1015 /* delete rings and buffers that are no longer needed */
1016 netmap_mem_rings_delete(na);
1018 if (na->active_fds <= 0) { /* last instance */
1020 * (TO CHECK) We enter here
1021 * when the last reference to this file descriptor goes
1022 * away. This means we cannot have any pending poll()
1023 * or interrupt routine operating on the structure.
1024 * XXX The file may be closed in a thread while
1025 * another thread is using it.
1026 * Linux keeps the file opened until the last reference
1027 * by any outstanding ioctl/poll or mmap is gone.
1028 * FreeBSD does not track mmap()s (but we do) and
1029 * wakes up any sleeping poll(). Need to check what
1030 * happens if the close() occurs while a concurrent
1031 * syscall is running.
1033 if (netmap_debug & NM_DEBUG_ON)
1034 nm_prinf("deleting last instance for %s", na->name);
1036 if (nm_netmap_on(na)) {
1037 nm_prerr("BUG: netmap on while going to delete the krings");
1040 na->nm_krings_delete(na);
1042 /* restore the default number of host tx and rx rings */
1043 if (na->na_flags & NAF_HOST_RINGS) {
1044 na->num_host_tx_rings = 1;
1045 na->num_host_rx_rings = 1;
1047 na->num_host_tx_rings = 0;
1048 na->num_host_rx_rings = 0;
1052 /* possibily decrement counter of tx_si/rx_si users */
1053 netmap_unset_ringid(priv);
1054 /* delete the nifp */
1055 netmap_mem_if_delete(na, priv->np_nifp);
1056 /* drop the allocator */
1057 netmap_mem_drop(na);
1058 /* mark the priv as unregistered */
1060 priv->np_nifp = NULL;
1063 struct netmap_priv_d*
1064 netmap_priv_new(void)
1066 struct netmap_priv_d *priv;
1068 priv = nm_os_malloc(sizeof(struct netmap_priv_d));
1077 * Destructor of the netmap_priv_d, called when the fd is closed
1078 * Action: undo all the things done by NIOCREGIF,
1079 * On FreeBSD we need to track whether there are active mmap()s,
1080 * and we use np_active_mmaps for that. On linux, the field is always 0.
1081 * Return: 1 if we can free priv, 0 otherwise.
1084 /* call with NMG_LOCK held */
1086 netmap_priv_delete(struct netmap_priv_d *priv)
1088 struct netmap_adapter *na = priv->np_na;
1090 /* number of active references to this fd */
1091 if (--priv->np_refs > 0) {
1096 netmap_do_unregif(priv);
1098 netmap_unget_na(na, priv->np_ifp);
1099 bzero(priv, sizeof(*priv)); /* for safety */
1104 /* call with NMG_LOCK *not* held */
1106 netmap_dtor(void *data)
1108 struct netmap_priv_d *priv = data;
1111 netmap_priv_delete(priv);
1117 * Handlers for synchronization of the rings from/to the host stack.
1118 * These are associated to a network interface and are just another
1119 * ring pair managed by userspace.
1121 * Netmap also supports transparent forwarding (NS_FORWARD and NR_FORWARD
1124 * - Before releasing buffers on hw RX rings, the application can mark
1125 * them with the NS_FORWARD flag. During the next RXSYNC or poll(), they
1126 * will be forwarded to the host stack, similarly to what happened if
1127 * the application moved them to the host TX ring.
1129 * - Before releasing buffers on the host RX ring, the application can
1130 * mark them with the NS_FORWARD flag. During the next RXSYNC or poll(),
1131 * they will be forwarded to the hw TX rings, saving the application
1132 * from doing the same task in user-space.
1134 * Transparent fowarding can be enabled per-ring, by setting the NR_FORWARD
1135 * flag, or globally with the netmap_fwd sysctl.
1137 * The transfer NIC --> host is relatively easy, just encapsulate
1138 * into mbufs and we are done. The host --> NIC side is slightly
1139 * harder because there might not be room in the tx ring so it
1140 * might take a while before releasing the buffer.
1145 * Pass a whole queue of mbufs to the host stack as coming from 'dst'
1146 * We do not need to lock because the queue is private.
1147 * After this call the queue is empty.
1150 netmap_send_up(struct ifnet *dst, struct mbq *q)
1153 struct mbuf *head = NULL, *prev = NULL;
1155 struct epoch_tracker et;
1157 NET_EPOCH_ENTER(et);
1158 #endif /* __FreeBSD__ */
1159 /* Send packets up, outside the lock; head/prev machinery
1160 * is only useful for Windows. */
1161 while ((m = mbq_dequeue(q)) != NULL) {
1162 if (netmap_debug & NM_DEBUG_HOST)
1163 nm_prinf("sending up pkt %p size %d", m, MBUF_LEN(m));
1164 prev = nm_os_send_up(dst, m, prev);
1169 nm_os_send_up(dst, NULL, head);
1172 #endif /* __FreeBSD__ */
1178 * Scan the buffers from hwcur to ring->head, and put a copy of those
1179 * marked NS_FORWARD (or all of them if forced) into a queue of mbufs.
1180 * Drop remaining packets in the unlikely event
1181 * of an mbuf shortage.
1184 netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force)
1186 u_int const lim = kring->nkr_num_slots - 1;
1187 u_int const head = kring->rhead;
1189 struct netmap_adapter *na = kring->na;
1191 for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) {
1193 struct netmap_slot *slot = &kring->ring->slot[n];
1195 if ((slot->flags & NS_FORWARD) == 0 && !force)
1197 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) {
1198 nm_prlim(5, "bad pkt at %d len %d", n, slot->len);
1201 slot->flags &= ~NS_FORWARD; // XXX needed ?
1202 /* XXX TODO: adapt to the case of a multisegment packet */
1203 m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL);
1212 _nm_may_forward(struct netmap_kring *kring)
1214 return ((netmap_fwd || kring->ring->flags & NR_FORWARD) &&
1215 kring->na->na_flags & NAF_HOST_RINGS &&
1216 kring->tx == NR_RX);
1220 nm_may_forward_up(struct netmap_kring *kring)
1222 return _nm_may_forward(kring) &&
1223 kring->ring_id != kring->na->num_rx_rings;
1227 nm_may_forward_down(struct netmap_kring *kring, int sync_flags)
1229 return _nm_may_forward(kring) &&
1230 (sync_flags & NAF_CAN_FORWARD_DOWN) &&
1231 kring->ring_id == kring->na->num_rx_rings;
1235 * Send to the NIC rings packets marked NS_FORWARD between
1236 * kring->nr_hwcur and kring->rhead.
1237 * Called under kring->rx_queue.lock on the sw rx ring.
1239 * It can only be called if the user opened all the TX hw rings,
1240 * see NAF_CAN_FORWARD_DOWN flag.
1241 * We can touch the TX netmap rings (slots, head and cur) since
1242 * we are in poll/ioctl system call context, and the application
1243 * is not supposed to touch the ring (using a different thread)
1244 * during the execution of the system call.
1247 netmap_sw_to_nic(struct netmap_adapter *na)
1249 struct netmap_kring *kring = na->rx_rings[na->num_rx_rings];
1250 struct netmap_slot *rxslot = kring->ring->slot;
1251 u_int i, rxcur = kring->nr_hwcur;
1252 u_int const head = kring->rhead;
1253 u_int const src_lim = kring->nkr_num_slots - 1;
1256 /* scan rings to find space, then fill as much as possible */
1257 for (i = 0; i < na->num_tx_rings; i++) {
1258 struct netmap_kring *kdst = na->tx_rings[i];
1259 struct netmap_ring *rdst = kdst->ring;
1260 u_int const dst_lim = kdst->nkr_num_slots - 1;
1262 /* XXX do we trust ring or kring->rcur,rtail ? */
1263 for (; rxcur != head && !nm_ring_empty(rdst);
1264 rxcur = nm_next(rxcur, src_lim) ) {
1265 struct netmap_slot *src, *dst, tmp;
1266 u_int dst_head = rdst->head;
1268 src = &rxslot[rxcur];
1269 if ((src->flags & NS_FORWARD) == 0 && !netmap_fwd)
1274 dst = &rdst->slot[dst_head];
1278 src->buf_idx = dst->buf_idx;
1279 src->flags = NS_BUF_CHANGED;
1281 dst->buf_idx = tmp.buf_idx;
1283 dst->flags = NS_BUF_CHANGED;
1285 rdst->head = rdst->cur = nm_next(dst_head, dst_lim);
1287 /* if (sent) XXX txsync ? it would be just an optimization */
1294 * netmap_txsync_to_host() passes packets up. We are called from a
1295 * system call in user process context, and the only contention
1296 * can be among multiple user threads erroneously calling
1297 * this routine concurrently.
1300 netmap_txsync_to_host(struct netmap_kring *kring, int flags)
1302 struct netmap_adapter *na = kring->na;
1303 u_int const lim = kring->nkr_num_slots - 1;
1304 u_int const head = kring->rhead;
1307 /* Take packets from hwcur to head and pass them up.
1308 * Force hwcur = head since netmap_grab_packets() stops at head
1311 netmap_grab_packets(kring, &q, 1 /* force */);
1312 nm_prdis("have %d pkts in queue", mbq_len(&q));
1313 kring->nr_hwcur = head;
1314 kring->nr_hwtail = head + lim;
1315 if (kring->nr_hwtail > lim)
1316 kring->nr_hwtail -= lim + 1;
1318 netmap_send_up(na->ifp, &q);
1324 * rxsync backend for packets coming from the host stack.
1325 * They have been put in kring->rx_queue by netmap_transmit().
1326 * We protect access to the kring using kring->rx_queue.lock
1328 * also moves to the nic hw rings any packet the user has marked
1329 * for transparent-mode forwarding, then sets the NR_FORWARD
1330 * flag in the kring to let the caller push them out
1333 netmap_rxsync_from_host(struct netmap_kring *kring, int flags)
1335 struct netmap_adapter *na = kring->na;
1336 struct netmap_ring *ring = kring->ring;
1338 u_int const lim = kring->nkr_num_slots - 1;
1339 u_int const head = kring->rhead;
1341 struct mbq *q = &kring->rx_queue, fq;
1343 mbq_init(&fq); /* fq holds packets to be freed */
1347 /* First part: import newly received packets */
1349 if (n) { /* grab packets from the queue */
1353 nm_i = kring->nr_hwtail;
1354 stop_i = nm_prev(kring->nr_hwcur, lim);
1355 while ( nm_i != stop_i && (m = mbq_dequeue(q)) != NULL ) {
1356 int len = MBUF_LEN(m);
1357 struct netmap_slot *slot = &ring->slot[nm_i];
1359 m_copydata(m, 0, len, NMB(na, slot));
1360 nm_prdis("nm %d len %d", nm_i, len);
1361 if (netmap_debug & NM_DEBUG_HOST)
1362 nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
1366 nm_i = nm_next(nm_i, lim);
1367 mbq_enqueue(&fq, m);
1369 kring->nr_hwtail = nm_i;
1373 * Second part: skip past packets that userspace has released.
1375 nm_i = kring->nr_hwcur;
1376 if (nm_i != head) { /* something was released */
1377 if (nm_may_forward_down(kring, flags)) {
1378 ret = netmap_sw_to_nic(na);
1380 kring->nr_kflags |= NR_FORWARD;
1384 kring->nr_hwcur = head;
1396 /* Get a netmap adapter for the port.
1398 * If it is possible to satisfy the request, return 0
1399 * with *na containing the netmap adapter found.
1400 * Otherwise return an error code, with *na containing NULL.
1402 * When the port is attached to a bridge, we always return
1404 * Otherwise, if the port is already bound to a file descriptor,
1405 * then we unconditionally return the existing adapter into *na.
1406 * In all the other cases, we return (into *na) either native,
1407 * generic or NULL, according to the following table:
1410 * active_fds dev.netmap.admode YES NO
1411 * -------------------------------------------------------
1412 * >0 * NA(ifp) NA(ifp)
1414 * 0 NETMAP_ADMODE_BEST NATIVE GENERIC
1415 * 0 NETMAP_ADMODE_NATIVE NATIVE NULL
1416 * 0 NETMAP_ADMODE_GENERIC GENERIC GENERIC
1419 static void netmap_hw_dtor(struct netmap_adapter *); /* needed by NM_IS_NATIVE() */
1421 netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na)
1423 /* generic support */
1424 int i = netmap_admode; /* Take a snapshot. */
1425 struct netmap_adapter *prev_na;
1428 *na = NULL; /* default */
1430 /* reset in case of invalid value */
1431 if (i < NETMAP_ADMODE_BEST || i >= NETMAP_ADMODE_LAST)
1432 i = netmap_admode = NETMAP_ADMODE_BEST;
1434 if (NM_NA_VALID(ifp)) {
1436 /* If an adapter already exists, return it if
1437 * there are active file descriptors or if
1438 * netmap is not forced to use generic
1441 if (NETMAP_OWNED_BY_ANY(prev_na)
1442 || i != NETMAP_ADMODE_GENERIC
1443 || prev_na->na_flags & NAF_FORCE_NATIVE
1445 /* ugly, but we cannot allow an adapter switch
1446 * if some pipe is referring to this one
1448 || prev_na->na_next_pipe > 0
1456 /* If there isn't native support and netmap is not allowed
1457 * to use generic adapters, we cannot satisfy the request.
1459 if (!NM_IS_NATIVE(ifp) && i == NETMAP_ADMODE_NATIVE)
1462 /* Otherwise, create a generic adapter and return it,
1463 * saving the previously used netmap adapter, if any.
1465 * Note that here 'prev_na', if not NULL, MUST be a
1466 * native adapter, and CANNOT be a generic one. This is
1467 * true because generic adapters are created on demand, and
1468 * destroyed when not used anymore. Therefore, if the adapter
1469 * currently attached to an interface 'ifp' is generic, it
1471 * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))).
1472 * Consequently, if NA(ifp) is generic, we will enter one of
1473 * the branches above. This ensures that we never override
1474 * a generic adapter with another generic adapter.
1476 error = generic_netmap_attach(ifp);
1483 if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) &&
1484 (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) {
1485 (*na)->nm_mem_prev = (*na)->nm_mem;
1486 (*na)->nm_mem = netmap_mem_get(nmd);
1493 * MUST BE CALLED UNDER NMG_LOCK()
1495 * Get a refcounted reference to a netmap adapter attached
1496 * to the interface specified by req.
1497 * This is always called in the execution of an ioctl().
1499 * Return ENXIO if the interface specified by the request does
1500 * not exist, ENOTSUP if netmap is not supported by the interface,
1501 * EBUSY if the interface is already attached to a bridge,
1502 * EINVAL if parameters are invalid, ENOMEM if needed resources
1503 * could not be allocated.
1504 * If successful, hold a reference to the netmap adapter.
1506 * If the interface specified by req is a system one, also keep
1507 * a reference to it and return a valid *ifp.
1510 netmap_get_na(struct nmreq_header *hdr,
1511 struct netmap_adapter **na, struct ifnet **ifp,
1512 struct netmap_mem_d *nmd, int create)
1514 struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1516 struct netmap_adapter *ret = NULL;
1519 *na = NULL; /* default return value */
1522 if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1526 if (req->nr_mode == NR_REG_PIPE_MASTER ||
1527 req->nr_mode == NR_REG_PIPE_SLAVE) {
1528 /* Do not accept deprecated pipe modes. */
1529 nm_prerr("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax");
1535 /* if the request contain a memid, try to find the
1536 * corresponding memory region
1538 if (nmd == NULL && req->nr_mem_id) {
1539 nmd = netmap_mem_find(req->nr_mem_id);
1542 /* keep the rereference */
1546 /* We cascade through all possible types of netmap adapter.
1547 * All netmap_get_*_na() functions return an error and an na,
1548 * with the following combinations:
1551 * 0 NULL type doesn't match
1552 * !0 NULL type matches, but na creation/lookup failed
1553 * 0 !NULL type matches and na created/found
1554 * !0 !NULL impossible
1556 error = netmap_get_null_na(hdr, na, nmd, create);
1557 if (error || *na != NULL)
1560 /* try to see if this is a monitor port */
1561 error = netmap_get_monitor_na(hdr, na, nmd, create);
1562 if (error || *na != NULL)
1565 /* try to see if this is a pipe port */
1566 error = netmap_get_pipe_na(hdr, na, nmd, create);
1567 if (error || *na != NULL)
1570 /* try to see if this is a bridge port */
1571 error = netmap_get_vale_na(hdr, na, nmd, create);
1575 if (*na != NULL) /* valid match in netmap_get_bdg_na() */
1579 * This must be a hardware na, lookup the name in the system.
1580 * Note that by hardware we actually mean "it shows up in ifconfig".
1581 * This may still be a tap, a veth/epair, or even a
1582 * persistent VALE port.
1584 *ifp = ifunit_ref(hdr->nr_name);
1590 error = netmap_get_hw_na(*ifp, nmd, &ret);
1595 netmap_adapter_get(ret);
1598 * if the adapter supports the host rings and it is not alread open,
1599 * try to set the number of host rings as requested by the user
1601 if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) {
1602 if (req->nr_host_tx_rings)
1603 (*na)->num_host_tx_rings = req->nr_host_tx_rings;
1604 if (req->nr_host_rx_rings)
1605 (*na)->num_host_rx_rings = req->nr_host_rx_rings;
1607 nm_prdis("%s: host tx %d rx %u", (*na)->name, (*na)->num_host_tx_rings,
1608 (*na)->num_host_rx_rings);
1613 netmap_adapter_put(ret);
1620 netmap_mem_put(nmd);
1625 /* undo netmap_get_na() */
1627 netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp)
1632 netmap_adapter_put(na);
1636 #define NM_FAIL_ON(t) do { \
1637 if (unlikely(t)) { \
1638 nm_prlim(5, "%s: fail '" #t "' " \
1640 "rh %d rc %d rt %d " \
1643 head, cur, ring->tail, \
1644 kring->rhead, kring->rcur, kring->rtail, \
1645 kring->nr_hwcur, kring->nr_hwtail); \
1646 return kring->nkr_num_slots; \
1651 * validate parameters on entry for *_txsync()
1652 * Returns ring->cur if ok, or something >= kring->nkr_num_slots
1655 * rhead, rcur and rtail=hwtail are stored from previous round.
1656 * hwcur is the next packet to send to the ring.
1659 * hwcur <= *rhead <= head <= cur <= tail = *rtail <= hwtail
1661 * hwcur, rhead, rtail and hwtail are reliable
1664 nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1666 u_int head = ring->head; /* read only once */
1667 u_int cur = ring->cur; /* read only once */
1668 u_int n = kring->nkr_num_slots;
1670 nm_prdis(5, "%s kcur %d ktail %d head %d cur %d tail %d",
1672 kring->nr_hwcur, kring->nr_hwtail,
1673 ring->head, ring->cur, ring->tail);
1674 #if 1 /* kernel sanity checks; but we can trust the kring. */
1675 NM_FAIL_ON(kring->nr_hwcur >= n || kring->rhead >= n ||
1676 kring->rtail >= n || kring->nr_hwtail >= n);
1677 #endif /* kernel sanity checks */
1679 * user sanity checks. We only use head,
1680 * A, B, ... are possible positions for head:
1682 * 0 A rhead B rtail C n-1
1683 * 0 D rtail E rhead F n-1
1685 * B, F, D are valid. A, C, E are wrong
1687 if (kring->rtail >= kring->rhead) {
1688 /* want rhead <= head <= rtail */
1689 NM_FAIL_ON(head < kring->rhead || head > kring->rtail);
1690 /* and also head <= cur <= rtail */
1691 NM_FAIL_ON(cur < head || cur > kring->rtail);
1692 } else { /* here rtail < rhead */
1693 /* we need head outside rtail .. rhead */
1694 NM_FAIL_ON(head > kring->rtail && head < kring->rhead);
1696 /* two cases now: head <= rtail or head >= rhead */
1697 if (head <= kring->rtail) {
1698 /* want head <= cur <= rtail */
1699 NM_FAIL_ON(cur < head || cur > kring->rtail);
1700 } else { /* head >= rhead */
1701 /* cur must be outside rtail..head */
1702 NM_FAIL_ON(cur > kring->rtail && cur < head);
1705 if (ring->tail != kring->rtail) {
1706 nm_prlim(5, "%s tail overwritten was %d need %d", kring->name,
1707 ring->tail, kring->rtail);
1708 ring->tail = kring->rtail;
1710 kring->rhead = head;
1717 * validate parameters on entry for *_rxsync()
1718 * Returns ring->head if ok, kring->nkr_num_slots on error.
1720 * For a valid configuration,
1721 * hwcur <= head <= cur <= tail <= hwtail
1723 * We only consider head and cur.
1724 * hwcur and hwtail are reliable.
1728 nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1730 uint32_t const n = kring->nkr_num_slots;
1733 nm_prdis(5,"%s kc %d kt %d h %d c %d t %d",
1735 kring->nr_hwcur, kring->nr_hwtail,
1736 ring->head, ring->cur, ring->tail);
1738 * Before storing the new values, we should check they do not
1739 * move backwards. However:
1740 * - head is not an issue because the previous value is hwcur;
1741 * - cur could in principle go back, however it does not matter
1742 * because we are processing a brand new rxsync()
1744 cur = kring->rcur = ring->cur; /* read only once */
1745 head = kring->rhead = ring->head; /* read only once */
1746 #if 1 /* kernel sanity checks */
1747 NM_FAIL_ON(kring->nr_hwcur >= n || kring->nr_hwtail >= n);
1748 #endif /* kernel sanity checks */
1749 /* user sanity checks */
1750 if (kring->nr_hwtail >= kring->nr_hwcur) {
1751 /* want hwcur <= rhead <= hwtail */
1752 NM_FAIL_ON(head < kring->nr_hwcur || head > kring->nr_hwtail);
1753 /* and also rhead <= rcur <= hwtail */
1754 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1756 /* we need rhead outside hwtail..hwcur */
1757 NM_FAIL_ON(head < kring->nr_hwcur && head > kring->nr_hwtail);
1758 /* two cases now: head <= hwtail or head >= hwcur */
1759 if (head <= kring->nr_hwtail) {
1760 /* want head <= cur <= hwtail */
1761 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1763 /* cur must be outside hwtail..head */
1764 NM_FAIL_ON(cur < head && cur > kring->nr_hwtail);
1767 if (ring->tail != kring->rtail) {
1768 nm_prlim(5, "%s tail overwritten was %d need %d",
1770 ring->tail, kring->rtail);
1771 ring->tail = kring->rtail;
1778 * Error routine called when txsync/rxsync detects an error.
1779 * Can't do much more than resetting head = cur = hwcur, tail = hwtail
1780 * Return 1 on reinit.
1782 * This routine is only called by the upper half of the kernel.
1783 * It only reads hwcur (which is changed only by the upper half, too)
1784 * and hwtail (which may be changed by the lower half, but only on
1785 * a tx ring and only to increase it, so any error will be recovered
1786 * on the next call). For the above, we don't strictly need to call
1790 netmap_ring_reinit(struct netmap_kring *kring)
1792 struct netmap_ring *ring = kring->ring;
1793 u_int i, lim = kring->nkr_num_slots - 1;
1796 // XXX KASSERT nm_kr_tryget
1797 nm_prlim(10, "called for %s", kring->name);
1798 // XXX probably wrong to trust userspace
1799 kring->rhead = ring->head;
1800 kring->rcur = ring->cur;
1801 kring->rtail = ring->tail;
1803 if (ring->cur > lim)
1805 if (ring->head > lim)
1807 if (ring->tail > lim)
1809 for (i = 0; i <= lim; i++) {
1810 u_int idx = ring->slot[i].buf_idx;
1811 u_int len = ring->slot[i].len;
1812 if (idx < 2 || idx >= kring->na->na_lut.objtotal) {
1813 nm_prlim(5, "bad index at slot %d idx %d len %d ", i, idx, len);
1814 ring->slot[i].buf_idx = 0;
1815 ring->slot[i].len = 0;
1816 } else if (len > NETMAP_BUF_SIZE(kring->na)) {
1817 ring->slot[i].len = 0;
1818 nm_prlim(5, "bad len at slot %d idx %d len %d", i, idx, len);
1822 nm_prlim(10, "total %d errors", errors);
1823 nm_prlim(10, "%s reinit, cur %d -> %d tail %d -> %d",
1825 ring->cur, kring->nr_hwcur,
1826 ring->tail, kring->nr_hwtail);
1827 ring->head = kring->rhead = kring->nr_hwcur;
1828 ring->cur = kring->rcur = kring->nr_hwcur;
1829 ring->tail = kring->rtail = kring->nr_hwtail;
1831 return (errors ? 1 : 0);
1834 /* interpret the ringid and flags fields of an nmreq, by translating them
1835 * into a pair of intervals of ring indices:
1837 * [priv->np_txqfirst, priv->np_txqlast) and
1838 * [priv->np_rxqfirst, priv->np_rxqlast)
1842 netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode,
1843 uint16_t nr_ringid, uint64_t nr_flags)
1845 struct netmap_adapter *na = priv->np_na;
1846 int excluded_direction[] = { NR_TX_RINGS_ONLY, NR_RX_RINGS_ONLY };
1851 if (nr_flags & excluded_direction[t]) {
1852 priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1856 case NR_REG_ALL_NIC:
1858 priv->np_qfirst[t] = 0;
1859 priv->np_qlast[t] = nma_get_nrings(na, t);
1860 nm_prdis("ALL/PIPE: %s %d %d", nm_txrx2str(t),
1861 priv->np_qfirst[t], priv->np_qlast[t]);
1865 if (!(na->na_flags & NAF_HOST_RINGS)) {
1866 nm_prerr("host rings not supported");
1869 priv->np_qfirst[t] = (nr_mode == NR_REG_SW ?
1870 nma_get_nrings(na, t) : 0);
1871 priv->np_qlast[t] = netmap_all_rings(na, t);
1872 nm_prdis("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW",
1874 priv->np_qfirst[t], priv->np_qlast[t]);
1876 case NR_REG_ONE_NIC:
1877 if (nr_ringid >= na->num_tx_rings &&
1878 nr_ringid >= na->num_rx_rings) {
1879 nm_prerr("invalid ring id %d", nr_ringid);
1882 /* if not enough rings, use the first one */
1884 if (j >= nma_get_nrings(na, t))
1886 priv->np_qfirst[t] = j;
1887 priv->np_qlast[t] = j + 1;
1888 nm_prdis("ONE_NIC: %s %d %d", nm_txrx2str(t),
1889 priv->np_qfirst[t], priv->np_qlast[t]);
1892 if (!(na->na_flags & NAF_HOST_RINGS)) {
1893 nm_prerr("host rings not supported");
1896 if (nr_ringid >= na->num_host_tx_rings &&
1897 nr_ringid >= na->num_host_rx_rings) {
1898 nm_prerr("invalid ring id %d", nr_ringid);
1901 /* if not enough rings, use the first one */
1903 if (j >= nma_get_host_nrings(na, t))
1905 priv->np_qfirst[t] = nma_get_nrings(na, t) + j;
1906 priv->np_qlast[t] = nma_get_nrings(na, t) + j + 1;
1907 nm_prdis("ONE_SW: %s %d %d", nm_txrx2str(t),
1908 priv->np_qfirst[t], priv->np_qlast[t]);
1911 nm_prerr("invalid regif type %d", nr_mode);
1915 priv->np_flags = nr_flags;
1917 /* Allow transparent forwarding mode in the host --> nic
1918 * direction only if all the TX hw rings have been opened. */
1919 if (priv->np_qfirst[NR_TX] == 0 &&
1920 priv->np_qlast[NR_TX] >= na->num_tx_rings) {
1921 priv->np_sync_flags |= NAF_CAN_FORWARD_DOWN;
1924 if (netmap_verbose) {
1925 nm_prinf("%s: tx [%d,%d) rx [%d,%d) id %d",
1927 priv->np_qfirst[NR_TX],
1928 priv->np_qlast[NR_TX],
1929 priv->np_qfirst[NR_RX],
1930 priv->np_qlast[NR_RX],
1938 * Set the ring ID. For devices with a single queue, a request
1939 * for all rings is the same as a single ring.
1942 netmap_set_ringid(struct netmap_priv_d *priv, uint32_t nr_mode,
1943 uint16_t nr_ringid, uint64_t nr_flags)
1945 struct netmap_adapter *na = priv->np_na;
1949 error = netmap_interp_ringid(priv, nr_mode, nr_ringid, nr_flags);
1954 priv->np_txpoll = (nr_flags & NR_NO_TX_POLL) ? 0 : 1;
1956 /* optimization: count the users registered for more than
1957 * one ring, which are the ones sleeping on the global queue.
1958 * The default netmap_notify() callback will then
1959 * avoid signaling the global queue if nobody is using it
1962 if (nm_si_user(priv, t))
1969 netmap_unset_ringid(struct netmap_priv_d *priv)
1971 struct netmap_adapter *na = priv->np_na;
1975 if (nm_si_user(priv, t))
1977 priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1980 priv->np_txpoll = 0;
1981 priv->np_kloop_state = 0;
1985 /* Set the nr_pending_mode for the requested rings.
1986 * If requested, also try to get exclusive access to the rings, provided
1987 * the rings we want to bind are not exclusively owned by a previous bind.
1990 netmap_krings_get(struct netmap_priv_d *priv)
1992 struct netmap_adapter *na = priv->np_na;
1994 struct netmap_kring *kring;
1995 int excl = (priv->np_flags & NR_EXCLUSIVE);
1998 if (netmap_debug & NM_DEBUG_ON)
1999 nm_prinf("%s: grabbing tx [%d, %d) rx [%d, %d)",
2001 priv->np_qfirst[NR_TX],
2002 priv->np_qlast[NR_TX],
2003 priv->np_qfirst[NR_RX],
2004 priv->np_qlast[NR_RX]);
2006 /* first round: check that all the requested rings
2007 * are neither alread exclusively owned, nor we
2008 * want exclusive ownership when they are already in use
2011 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
2012 kring = NMR(na, t)[i];
2013 if ((kring->nr_kflags & NKR_EXCLUSIVE) ||
2014 (kring->users && excl))
2016 nm_prdis("ring %s busy", kring->name);
2022 /* second round: increment usage count (possibly marking them
2023 * as exclusive) and set the nr_pending_mode
2026 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
2027 kring = NMR(na, t)[i];
2030 kring->nr_kflags |= NKR_EXCLUSIVE;
2031 kring->nr_pending_mode = NKR_NETMAP_ON;
2039 /* Undo netmap_krings_get(). This is done by clearing the exclusive mode
2040 * if was asked on regif, and unset the nr_pending_mode if we are the
2041 * last users of the involved rings. */
2043 netmap_krings_put(struct netmap_priv_d *priv)
2045 struct netmap_adapter *na = priv->np_na;
2047 struct netmap_kring *kring;
2048 int excl = (priv->np_flags & NR_EXCLUSIVE);
2051 nm_prdis("%s: releasing tx [%d, %d) rx [%d, %d)",
2053 priv->np_qfirst[NR_TX],
2054 priv->np_qlast[NR_TX],
2055 priv->np_qfirst[NR_RX],
2056 priv->np_qlast[MR_RX]);
2059 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
2060 kring = NMR(na, t)[i];
2062 kring->nr_kflags &= ~NKR_EXCLUSIVE;
2064 if (kring->users == 0)
2065 kring->nr_pending_mode = NKR_NETMAP_OFF;
2071 nm_priv_rx_enabled(struct netmap_priv_d *priv)
2073 return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]);
2076 /* Validate the CSB entries for both directions (atok and ktoa).
2077 * To be called under NMG_LOCK(). */
2079 netmap_csb_validate(struct netmap_priv_d *priv, struct nmreq_opt_csb *csbo)
2081 struct nm_csb_atok *csb_atok_base =
2082 (struct nm_csb_atok *)(uintptr_t)csbo->csb_atok;
2083 struct nm_csb_ktoa *csb_ktoa_base =
2084 (struct nm_csb_ktoa *)(uintptr_t)csbo->csb_ktoa;
2086 int num_rings[NR_TXRX], tot_rings;
2087 size_t entry_size[2];
2091 if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) {
2092 nm_prerr("Cannot update CSB while kloop is running");
2098 num_rings[t] = priv->np_qlast[t] - priv->np_qfirst[t];
2099 tot_rings += num_rings[t];
2104 if (!(priv->np_flags & NR_EXCLUSIVE)) {
2105 nm_prerr("CSB mode requires NR_EXCLUSIVE");
2109 entry_size[0] = sizeof(*csb_atok_base);
2110 entry_size[1] = sizeof(*csb_ktoa_base);
2111 csb_start[0] = (void *)csb_atok_base;
2112 csb_start[1] = (void *)csb_ktoa_base;
2114 for (i = 0; i < 2; i++) {
2115 /* On Linux we could use access_ok() to simplify
2116 * the validation. However, the advantage of
2117 * this approach is that it works also on
2119 size_t csb_size = tot_rings * entry_size[i];
2123 if ((uintptr_t)csb_start[i] & (entry_size[i]-1)) {
2124 nm_prerr("Unaligned CSB address");
2128 tmp = nm_os_malloc(csb_size);
2132 /* Application --> kernel direction. */
2133 err = copyin(csb_start[i], tmp, csb_size);
2135 /* Kernel --> application direction. */
2136 memset(tmp, 0, csb_size);
2137 err = copyout(tmp, csb_start[i], csb_size);
2141 nm_prerr("Invalid CSB address");
2146 priv->np_csb_atok_base = csb_atok_base;
2147 priv->np_csb_ktoa_base = csb_ktoa_base;
2149 /* Initialize the CSB. */
2151 for (i = 0; i < num_rings[t]; i++) {
2152 struct netmap_kring *kring =
2153 NMR(priv->np_na, t)[i + priv->np_qfirst[t]];
2154 struct nm_csb_atok *csb_atok = csb_atok_base + i;
2155 struct nm_csb_ktoa *csb_ktoa = csb_ktoa_base + i;
2158 csb_atok += num_rings[NR_TX];
2159 csb_ktoa += num_rings[NR_TX];
2162 CSB_WRITE(csb_atok, head, kring->rhead);
2163 CSB_WRITE(csb_atok, cur, kring->rcur);
2164 CSB_WRITE(csb_atok, appl_need_kick, 1);
2165 CSB_WRITE(csb_atok, sync_flags, 1);
2166 CSB_WRITE(csb_ktoa, hwcur, kring->nr_hwcur);
2167 CSB_WRITE(csb_ktoa, hwtail, kring->nr_hwtail);
2168 CSB_WRITE(csb_ktoa, kern_need_kick, 1);
2170 nm_prinf("csb_init for kring %s: head %u, cur %u, "
2171 "hwcur %u, hwtail %u", kring->name,
2172 kring->rhead, kring->rcur, kring->nr_hwcur,
2180 /* Ensure that the netmap adapter can support the given MTU.
2181 * @return EINVAL if the na cannot be set to mtu, 0 otherwise.
2184 netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu) {
2185 unsigned nbs = NETMAP_BUF_SIZE(na);
2187 if (mtu <= na->rx_buf_maxsize) {
2188 /* The MTU fits a single NIC slot. We only
2189 * Need to check that netmap buffers are
2190 * large enough to hold an MTU. NS_MOREFRAG
2191 * cannot be used in this case. */
2193 nm_prerr("error: netmap buf size (%u) "
2194 "< device MTU (%u)", nbs, mtu);
2198 /* More NIC slots may be needed to receive
2199 * or transmit a single packet. Check that
2200 * the adapter supports NS_MOREFRAG and that
2201 * netmap buffers are large enough to hold
2202 * the maximum per-slot size. */
2203 if (!(na->na_flags & NAF_MOREFRAG)) {
2204 nm_prerr("error: large MTU (%d) needed "
2205 "but %s does not support "
2209 } else if (nbs < na->rx_buf_maxsize) {
2210 nm_prerr("error: using NS_MOREFRAG on "
2211 "%s requires netmap buf size "
2212 ">= %u", na->ifp->if_xname,
2213 na->rx_buf_maxsize);
2216 nm_prinf("info: netmap application on "
2217 "%s needs to support "
2219 "(MTU=%u,netmap_buf_size=%u)",
2220 na->ifp->if_xname, mtu, nbs);
2228 * possibly move the interface to netmap-mode.
2229 * If success it returns a pointer to netmap_if, otherwise NULL.
2230 * This must be called with NMG_LOCK held.
2232 * The following na callbacks are called in the process:
2234 * na->nm_config() [by netmap_update_config]
2235 * (get current number and size of rings)
2237 * We have a generic one for linux (netmap_linux_config).
2238 * The bwrap has to override this, since it has to forward
2239 * the request to the wrapped adapter (netmap_bwrap_config).
2242 * na->nm_krings_create()
2243 * (create and init the krings array)
2245 * One of the following:
2247 * * netmap_hw_krings_create, (hw ports)
2248 * creates the standard layout for the krings
2249 * and adds the mbq (used for the host rings).
2251 * * netmap_vp_krings_create (VALE ports)
2252 * add leases and scratchpads
2254 * * netmap_pipe_krings_create (pipes)
2255 * create the krings and rings of both ends and
2258 * * netmap_monitor_krings_create (monitors)
2259 * avoid allocating the mbq
2261 * * netmap_bwrap_krings_create (bwraps)
2262 * create both the brap krings array,
2263 * the krings array of the wrapped adapter, and
2264 * (if needed) the fake array for the host adapter
2266 * na->nm_register(, 1)
2267 * (put the adapter in netmap mode)
2269 * This may be one of the following:
2271 * * netmap_hw_reg (hw ports)
2272 * checks that the ifp is still there, then calls
2273 * the hardware specific callback;
2275 * * netmap_vp_reg (VALE ports)
2276 * If the port is connected to a bridge,
2277 * set the NAF_NETMAP_ON flag under the
2278 * bridge write lock.
2280 * * netmap_pipe_reg (pipes)
2281 * inform the other pipe end that it is no
2282 * longer responsible for the lifetime of this
2285 * * netmap_monitor_reg (monitors)
2286 * intercept the sync callbacks of the monitored
2289 * * netmap_bwrap_reg (bwraps)
2290 * cross-link the bwrap and hwna rings,
2291 * forward the request to the hwna, override
2292 * the hwna notify callback (to get the frames
2293 * coming from outside go through the bridge).
2298 netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
2299 uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags)
2301 struct netmap_if *nifp = NULL;
2305 priv->np_na = na; /* store the reference */
2306 error = netmap_mem_finalize(na->nm_mem, na);
2310 if (na->active_fds == 0) {
2312 /* cache the allocator info in the na */
2313 error = netmap_mem_get_lut(na->nm_mem, &na->na_lut);
2316 nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal,
2317 na->na_lut.objsize);
2319 /* ring configuration may have changed, fetch from the card */
2320 netmap_update_config(na);
2323 /* compute the range of tx and rx rings to monitor */
2324 error = netmap_set_ringid(priv, nr_mode, nr_ringid, nr_flags);
2328 if (na->active_fds == 0) {
2330 * If this is the first registration of the adapter,
2331 * perform sanity checks and create the in-kernel view
2332 * of the netmap rings (the netmap krings).
2334 if (na->ifp && nm_priv_rx_enabled(priv)) {
2335 /* This netmap adapter is attached to an ifnet. */
2336 unsigned mtu = nm_os_ifnet_mtu(na->ifp);
2338 nm_prdis("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d",
2339 na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na));
2341 if (na->rx_buf_maxsize == 0) {
2342 nm_prerr("%s: error: rx_buf_maxsize == 0", na->name);
2347 error = netmap_buf_size_validate(na, mtu);
2353 * Depending on the adapter, this may also create
2354 * the netmap rings themselves
2356 error = na->nm_krings_create(na);
2362 /* now the krings must exist and we can check whether some
2363 * previous bind has exclusive ownership on them, and set
2366 error = netmap_krings_get(priv);
2368 goto err_del_krings;
2370 /* create all needed missing netmap rings */
2371 error = netmap_mem_rings_create(na);
2375 /* in all cases, create a new netmap if */
2376 nifp = netmap_mem_if_new(na, priv);
2382 if (nm_kring_pending(priv)) {
2383 /* Some kring is switching mode, tell the adapter to
2385 error = na->nm_register(na, 1);
2390 /* Commit the reference. */
2394 * advertise that the interface is ready by setting np_nifp.
2395 * The barrier is needed because readers (poll, *SYNC and mmap)
2396 * check for priv->np_nifp != NULL without locking
2398 mb(); /* make sure previous writes are visible to all CPUs */
2399 priv->np_nifp = nifp;
2404 netmap_mem_if_delete(na, nifp);
2406 netmap_krings_put(priv);
2407 netmap_mem_rings_delete(na);
2409 if (na->active_fds == 0)
2410 na->nm_krings_delete(na);
2412 if (na->active_fds == 0)
2413 memset(&na->na_lut, 0, sizeof(na->na_lut));
2415 netmap_mem_drop(na);
2423 * update kring and ring at the end of rxsync/txsync.
2426 nm_sync_finalize(struct netmap_kring *kring)
2429 * Update ring tail to what the kernel knows
2430 * After txsync: head/rhead/hwcur might be behind cur/rcur
2433 kring->ring->tail = kring->rtail = kring->nr_hwtail;
2435 nm_prdis(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
2436 kring->name, kring->nr_hwcur, kring->nr_hwtail,
2437 kring->rhead, kring->rcur, kring->rtail);
2440 /* set ring timestamp */
2442 ring_timestamp_set(struct netmap_ring *ring)
2444 if (netmap_no_timestamp == 0 || ring->flags & NR_TIMESTAMP) {
2445 microtime(&ring->ts);
2449 static int nmreq_copyin(struct nmreq_header *, int);
2450 static int nmreq_copyout(struct nmreq_header *, int);
2451 static int nmreq_checkoptions(struct nmreq_header *);
2454 * ioctl(2) support for the "netmap" device.
2456 * Following a list of accepted commands:
2457 * - NIOCCTRL device control API
2458 * - NIOCTXSYNC sync TX rings
2459 * - NIOCRXSYNC sync RX rings
2460 * - SIOCGIFADDR just for convenience
2461 * - NIOCGINFO deprecated (legacy API)
2462 * - NIOCREGIF deprecated (legacy API)
2464 * Return 0 on success, errno otherwise.
2467 netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
2468 struct thread *td, int nr_body_is_user)
2470 struct mbq q; /* packets from RX hw queues to host stack */
2471 struct netmap_adapter *na = NULL;
2472 struct netmap_mem_d *nmd = NULL;
2473 struct ifnet *ifp = NULL;
2475 u_int i, qfirst, qlast;
2476 struct netmap_kring **krings;
2482 struct nmreq_header *hdr = (struct nmreq_header *)data;
2484 if (hdr->nr_version < NETMAP_MIN_API ||
2485 hdr->nr_version > NETMAP_MAX_API) {
2486 nm_prerr("API mismatch: got %d need %d",
2487 hdr->nr_version, NETMAP_API);
2491 /* Make a kernel-space copy of the user-space nr_body.
2492 * For convenince, the nr_body pointer and the pointers
2493 * in the options list will be replaced with their
2494 * kernel-space counterparts. The original pointers are
2495 * saved internally and later restored by nmreq_copyout
2497 error = nmreq_copyin(hdr, nr_body_is_user);
2502 /* Sanitize hdr->nr_name. */
2503 hdr->nr_name[sizeof(hdr->nr_name) - 1] = '\0';
2505 switch (hdr->nr_reqtype) {
2506 case NETMAP_REQ_REGISTER: {
2507 struct nmreq_register *req =
2508 (struct nmreq_register *)(uintptr_t)hdr->nr_body;
2509 struct netmap_if *nifp;
2511 /* Protect access to priv from concurrent requests. */
2514 struct nmreq_option *opt;
2517 if (priv->np_nifp != NULL) { /* thread already registered */
2523 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_EXTMEM);
2525 struct nmreq_opt_extmem *e =
2526 (struct nmreq_opt_extmem *)opt;
2528 nmd = netmap_mem_ext_create(e->nro_usrptr,
2529 &e->nro_info, &error);
2530 opt->nro_status = error;
2534 #endif /* WITH_EXTMEM */
2536 if (nmd == NULL && req->nr_mem_id) {
2537 /* find the allocator and get a reference */
2538 nmd = netmap_mem_find(req->nr_mem_id);
2540 if (netmap_verbose) {
2541 nm_prerr("%s: failed to find mem_id %u",
2542 hdr->nr_name, req->nr_mem_id);
2548 /* find the interface and a reference */
2549 error = netmap_get_na(hdr, &na, &ifp, nmd,
2550 1 /* create */); /* keep reference */
2553 if (NETMAP_OWNED_BY_KERN(na)) {
2558 if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) {
2559 nm_prerr("virt_hdr_len=%d, but application does "
2560 "not accept it", na->virt_hdr_len);
2565 error = netmap_do_regif(priv, na, req->nr_mode,
2566 req->nr_ringid, req->nr_flags);
2567 if (error) { /* reg. failed, release priv and ref */
2571 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2573 struct nmreq_opt_csb *csbo =
2574 (struct nmreq_opt_csb *)opt;
2575 error = netmap_csb_validate(priv, csbo);
2576 opt->nro_status = error;
2578 netmap_do_unregif(priv);
2583 nifp = priv->np_nifp;
2585 /* return the offset of the netmap_if object */
2586 req->nr_rx_rings = na->num_rx_rings;
2587 req->nr_tx_rings = na->num_tx_rings;
2588 req->nr_rx_slots = na->num_rx_desc;
2589 req->nr_tx_slots = na->num_tx_desc;
2590 req->nr_host_tx_rings = na->num_host_tx_rings;
2591 req->nr_host_rx_rings = na->num_host_rx_rings;
2592 error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags,
2595 netmap_do_unregif(priv);
2598 if (memflags & NETMAP_MEM_PRIVATE) {
2599 *(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM;
2602 priv->np_si[t] = nm_si_user(priv, t) ?
2603 &na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si;
2606 if (req->nr_extra_bufs) {
2608 nm_prinf("requested %d extra buffers",
2609 req->nr_extra_bufs);
2610 req->nr_extra_bufs = netmap_extra_alloc(na,
2611 &nifp->ni_bufs_head, req->nr_extra_bufs);
2613 nm_prinf("got %d extra buffers", req->nr_extra_bufs);
2615 req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp);
2617 error = nmreq_checkoptions(hdr);
2619 netmap_do_unregif(priv);
2623 /* store ifp reference so that priv destructor may release it */
2627 netmap_unget_na(na, ifp);
2629 /* release the reference from netmap_mem_find() or
2630 * netmap_mem_ext_create()
2633 netmap_mem_put(nmd);
2638 case NETMAP_REQ_PORT_INFO_GET: {
2639 struct nmreq_port_info_get *req =
2640 (struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
2646 if (hdr->nr_name[0] != '\0') {
2647 /* Build a nmreq_register out of the nmreq_port_info_get,
2648 * so that we can call netmap_get_na(). */
2649 struct nmreq_register regreq;
2650 bzero(®req, sizeof(regreq));
2651 regreq.nr_mode = NR_REG_ALL_NIC;
2652 regreq.nr_tx_slots = req->nr_tx_slots;
2653 regreq.nr_rx_slots = req->nr_rx_slots;
2654 regreq.nr_tx_rings = req->nr_tx_rings;
2655 regreq.nr_rx_rings = req->nr_rx_rings;
2656 regreq.nr_host_tx_rings = req->nr_host_tx_rings;
2657 regreq.nr_host_rx_rings = req->nr_host_rx_rings;
2658 regreq.nr_mem_id = req->nr_mem_id;
2660 /* get a refcount */
2661 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2662 hdr->nr_body = (uintptr_t)®req;
2663 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2664 hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */
2665 hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2671 nmd = na->nm_mem; /* get memory allocator */
2673 nmd = netmap_mem_find(req->nr_mem_id ? req->nr_mem_id : 1);
2676 nm_prerr("%s: failed to find mem_id %u",
2678 req->nr_mem_id ? req->nr_mem_id : 1);
2684 error = netmap_mem_get_info(nmd, &req->nr_memsize, &memflags,
2688 if (na == NULL) /* only memory info */
2690 netmap_update_config(na);
2691 req->nr_rx_rings = na->num_rx_rings;
2692 req->nr_tx_rings = na->num_tx_rings;
2693 req->nr_rx_slots = na->num_rx_desc;
2694 req->nr_tx_slots = na->num_tx_desc;
2695 req->nr_host_tx_rings = na->num_host_tx_rings;
2696 req->nr_host_rx_rings = na->num_host_rx_rings;
2698 netmap_unget_na(na, ifp);
2703 case NETMAP_REQ_VALE_ATTACH: {
2704 error = netmap_vale_attach(hdr, NULL /* userspace request */);
2708 case NETMAP_REQ_VALE_DETACH: {
2709 error = netmap_vale_detach(hdr, NULL /* userspace request */);
2713 case NETMAP_REQ_VALE_LIST: {
2714 error = netmap_vale_list(hdr);
2718 case NETMAP_REQ_PORT_HDR_SET: {
2719 struct nmreq_port_hdr *req =
2720 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2721 /* Build a nmreq_register out of the nmreq_port_hdr,
2722 * so that we can call netmap_get_bdg_na(). */
2723 struct nmreq_register regreq;
2724 bzero(®req, sizeof(regreq));
2725 regreq.nr_mode = NR_REG_ALL_NIC;
2727 /* For now we only support virtio-net headers, and only for
2728 * VALE ports, but this may change in future. Valid lengths
2729 * for the virtio-net header are 0 (no header), 10 and 12. */
2730 if (req->nr_hdr_len != 0 &&
2731 req->nr_hdr_len != sizeof(struct nm_vnet_hdr) &&
2732 req->nr_hdr_len != 12) {
2734 nm_prerr("invalid hdr_len %u", req->nr_hdr_len);
2739 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2740 hdr->nr_body = (uintptr_t)®req;
2741 error = netmap_get_vale_na(hdr, &na, NULL, 0);
2742 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET;
2743 hdr->nr_body = (uintptr_t)req;
2745 struct netmap_vp_adapter *vpna =
2746 (struct netmap_vp_adapter *)na;
2747 na->virt_hdr_len = req->nr_hdr_len;
2748 if (na->virt_hdr_len) {
2749 vpna->mfs = NETMAP_BUF_SIZE(na);
2752 nm_prinf("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na);
2753 netmap_adapter_put(na);
2761 case NETMAP_REQ_PORT_HDR_GET: {
2762 /* Get vnet-header length for this netmap port */
2763 struct nmreq_port_hdr *req =
2764 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2765 /* Build a nmreq_register out of the nmreq_port_hdr,
2766 * so that we can call netmap_get_bdg_na(). */
2767 struct nmreq_register regreq;
2770 bzero(®req, sizeof(regreq));
2771 regreq.nr_mode = NR_REG_ALL_NIC;
2773 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2774 hdr->nr_body = (uintptr_t)®req;
2775 error = netmap_get_na(hdr, &na, &ifp, NULL, 0);
2776 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET;
2777 hdr->nr_body = (uintptr_t)req;
2779 req->nr_hdr_len = na->virt_hdr_len;
2781 netmap_unget_na(na, ifp);
2786 case NETMAP_REQ_VALE_NEWIF: {
2787 error = nm_vi_create(hdr);
2791 case NETMAP_REQ_VALE_DELIF: {
2792 error = nm_vi_destroy(hdr->nr_name);
2796 case NETMAP_REQ_VALE_POLLING_ENABLE:
2797 case NETMAP_REQ_VALE_POLLING_DISABLE: {
2798 error = nm_bdg_polling(hdr);
2801 #endif /* WITH_VALE */
2802 case NETMAP_REQ_POOLS_INFO_GET: {
2803 /* Get information from the memory allocator used for
2805 struct nmreq_pools_info *req =
2806 (struct nmreq_pools_info *)(uintptr_t)hdr->nr_body;
2809 /* Build a nmreq_register out of the nmreq_pools_info,
2810 * so that we can call netmap_get_na(). */
2811 struct nmreq_register regreq;
2812 bzero(®req, sizeof(regreq));
2813 regreq.nr_mem_id = req->nr_mem_id;
2814 regreq.nr_mode = NR_REG_ALL_NIC;
2816 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2817 hdr->nr_body = (uintptr_t)®req;
2818 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2819 hdr->nr_reqtype = NETMAP_REQ_POOLS_INFO_GET; /* reset type */
2820 hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2826 nmd = na->nm_mem; /* grab the memory allocator */
2832 /* Finalize the memory allocator, get the pools
2833 * information and release the allocator. */
2834 error = netmap_mem_finalize(nmd, na);
2838 error = netmap_mem_pools_info_get(req, nmd);
2839 netmap_mem_drop(na);
2841 netmap_unget_na(na, ifp);
2846 case NETMAP_REQ_CSB_ENABLE: {
2847 struct nmreq_option *opt;
2849 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2853 struct nmreq_opt_csb *csbo =
2854 (struct nmreq_opt_csb *)opt;
2856 error = netmap_csb_validate(priv, csbo);
2858 opt->nro_status = error;
2863 case NETMAP_REQ_SYNC_KLOOP_START: {
2864 error = netmap_sync_kloop(priv, hdr);
2868 case NETMAP_REQ_SYNC_KLOOP_STOP: {
2869 error = netmap_sync_kloop_stop(priv);
2878 /* Write back request body to userspace and reset the
2879 * user-space pointer. */
2880 error = nmreq_copyout(hdr, error);
2886 if (unlikely(priv->np_nifp == NULL)) {
2890 mb(); /* make sure following reads are not from cache */
2892 if (unlikely(priv->np_csb_atok_base)) {
2893 nm_prerr("Invalid sync in CSB mode");
2898 na = priv->np_na; /* we have a reference */
2901 t = (cmd == NIOCTXSYNC ? NR_TX : NR_RX);
2902 krings = NMR(na, t);
2903 qfirst = priv->np_qfirst[t];
2904 qlast = priv->np_qlast[t];
2905 sync_flags = priv->np_sync_flags;
2907 for (i = qfirst; i < qlast; i++) {
2908 struct netmap_kring *kring = krings[i];
2909 struct netmap_ring *ring = kring->ring;
2911 if (unlikely(nm_kr_tryget(kring, 1, &error))) {
2912 error = (error ? EIO : 0);
2916 if (cmd == NIOCTXSYNC) {
2917 if (netmap_debug & NM_DEBUG_TXSYNC)
2918 nm_prinf("pre txsync ring %d cur %d hwcur %d",
2921 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
2922 netmap_ring_reinit(kring);
2923 } else if (kring->nm_sync(kring, sync_flags | NAF_FORCE_RECLAIM) == 0) {
2924 nm_sync_finalize(kring);
2926 if (netmap_debug & NM_DEBUG_TXSYNC)
2927 nm_prinf("post txsync ring %d cur %d hwcur %d",
2931 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
2932 netmap_ring_reinit(kring);
2934 if (nm_may_forward_up(kring)) {
2935 /* transparent forwarding, see netmap_poll() */
2936 netmap_grab_packets(kring, &q, netmap_fwd);
2938 if (kring->nm_sync(kring, sync_flags | NAF_FORCE_READ) == 0) {
2939 nm_sync_finalize(kring);
2941 ring_timestamp_set(ring);
2947 netmap_send_up(na->ifp, &q);
2954 return netmap_ioctl_legacy(priv, cmd, data, td);
2963 nmreq_size_by_type(uint16_t nr_reqtype)
2965 switch (nr_reqtype) {
2966 case NETMAP_REQ_REGISTER:
2967 return sizeof(struct nmreq_register);
2968 case NETMAP_REQ_PORT_INFO_GET:
2969 return sizeof(struct nmreq_port_info_get);
2970 case NETMAP_REQ_VALE_ATTACH:
2971 return sizeof(struct nmreq_vale_attach);
2972 case NETMAP_REQ_VALE_DETACH:
2973 return sizeof(struct nmreq_vale_detach);
2974 case NETMAP_REQ_VALE_LIST:
2975 return sizeof(struct nmreq_vale_list);
2976 case NETMAP_REQ_PORT_HDR_SET:
2977 case NETMAP_REQ_PORT_HDR_GET:
2978 return sizeof(struct nmreq_port_hdr);
2979 case NETMAP_REQ_VALE_NEWIF:
2980 return sizeof(struct nmreq_vale_newif);
2981 case NETMAP_REQ_VALE_DELIF:
2982 case NETMAP_REQ_SYNC_KLOOP_STOP:
2983 case NETMAP_REQ_CSB_ENABLE:
2985 case NETMAP_REQ_VALE_POLLING_ENABLE:
2986 case NETMAP_REQ_VALE_POLLING_DISABLE:
2987 return sizeof(struct nmreq_vale_polling);
2988 case NETMAP_REQ_POOLS_INFO_GET:
2989 return sizeof(struct nmreq_pools_info);
2990 case NETMAP_REQ_SYNC_KLOOP_START:
2991 return sizeof(struct nmreq_sync_kloop_start);
2997 nmreq_opt_size_by_type(uint32_t nro_reqtype, uint64_t nro_size)
2999 size_t rv = sizeof(struct nmreq_option);
3000 #ifdef NETMAP_REQ_OPT_DEBUG
3001 if (nro_reqtype & NETMAP_REQ_OPT_DEBUG)
3002 return (nro_reqtype & ~NETMAP_REQ_OPT_DEBUG);
3003 #endif /* NETMAP_REQ_OPT_DEBUG */
3004 switch (nro_reqtype) {
3006 case NETMAP_REQ_OPT_EXTMEM:
3007 rv = sizeof(struct nmreq_opt_extmem);
3009 #endif /* WITH_EXTMEM */
3010 case NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS:
3014 case NETMAP_REQ_OPT_CSB:
3015 rv = sizeof(struct nmreq_opt_csb);
3017 case NETMAP_REQ_OPT_SYNC_KLOOP_MODE:
3018 rv = sizeof(struct nmreq_opt_sync_kloop_mode);
3021 /* subtract the common header */
3022 return rv - sizeof(struct nmreq_option);
3026 * nmreq_copyin: create an in-kernel version of the request.
3028 * We build the following data structure:
3030 * hdr -> +-------+ buf
3031 * | | +---------------+
3032 * +-------+ |usr body ptr |
3033 * |options|-. +---------------+
3034 * +-------+ | |usr options ptr|
3035 * |body |--------->+---------------+
3037 * | | copy of body |
3039 * | +---------------+
3041 * | +---------------+
3043 * | | +---------------+ |
3045 * | | | +---------------+ \ option table
3046 * | | | | ... | / indexed by option
3047 * | | | +---------------+ | type
3049 * | | | +---------------+/
3050 * | | | |usr next ptr 1 |
3051 * `-|----->+---------------+
3052 * | | | copy of opt 1 |
3054 * | | .-| nro_next |
3055 * | | | +---------------+
3056 * | | | |usr next ptr 2 |
3057 * | `-`>+---------------+
3058 * | | copy of opt 2 |
3061 * | | +---------------+
3065 * `----->+---------------+
3066 * | |usr next ptr n |
3067 * `>+---------------+
3073 * The options and body fields of the hdr structure are overwritten
3074 * with in-kernel valid pointers inside the buf. The original user
3075 * pointers are saved in the buf and restored on copyout.
3076 * The list of options is copied and the pointers adjusted. The
3077 * original pointers are saved before the option they belonged.
3079 * The option table has an entry for every availabe option. Entries
3080 * for options that have not been passed contain NULL.
3085 nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
3087 size_t rqsz, optsz, bufsz;
3089 char *ker = NULL, *p;
3090 struct nmreq_option **next, *src, **opt_tab;
3091 struct nmreq_option buf;
3094 if (hdr->nr_reserved) {
3096 nm_prerr("nr_reserved must be zero");
3100 if (!nr_body_is_user)
3103 hdr->nr_reserved = nr_body_is_user;
3105 /* compute the total size of the buffer */
3106 rqsz = nmreq_size_by_type(hdr->nr_reqtype);
3107 if (rqsz > NETMAP_REQ_MAXSIZE) {
3111 if ((rqsz && hdr->nr_body == (uintptr_t)NULL) ||
3112 (!rqsz && hdr->nr_body != (uintptr_t)NULL)) {
3113 /* Request body expected, but not found; or
3114 * request body found but unexpected. */
3116 nm_prerr("nr_body expected but not found, or vice versa");
3121 bufsz = 2 * sizeof(void *) + rqsz +
3122 NETMAP_REQ_OPT_MAX * sizeof(opt_tab);
3123 /* compute the size of the buf below the option table.
3124 * It must contain a copy of every received option structure.
3125 * For every option we also need to store a copy of the user
3129 for (src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; src;
3130 src = (struct nmreq_option *)(uintptr_t)buf.nro_next)
3132 error = copyin(src, &buf, sizeof(*src));
3135 optsz += sizeof(*src);
3136 optsz += nmreq_opt_size_by_type(buf.nro_reqtype, buf.nro_size);
3137 if (rqsz + optsz > NETMAP_REQ_MAXSIZE) {
3141 bufsz += sizeof(void *);
3145 ker = nm_os_malloc(bufsz);
3150 p = ker; /* write pointer into the buffer */
3152 /* make a copy of the user pointers */
3153 ptrs = (uint64_t*)p;
3154 *ptrs++ = hdr->nr_body;
3155 *ptrs++ = hdr->nr_options;
3159 error = copyin((void *)(uintptr_t)hdr->nr_body, p, rqsz);
3162 /* overwrite the user pointer with the in-kernel one */
3163 hdr->nr_body = (uintptr_t)p;
3165 /* start of the options table */
3166 opt_tab = (struct nmreq_option **)p;
3167 p += sizeof(opt_tab) * NETMAP_REQ_OPT_MAX;
3169 /* copy the options */
3170 next = (struct nmreq_option **)&hdr->nr_options;
3173 struct nmreq_option *opt;
3175 /* copy the option header */
3176 ptrs = (uint64_t *)p;
3177 opt = (struct nmreq_option *)(ptrs + 1);
3178 error = copyin(src, opt, sizeof(*src));
3181 /* make a copy of the user next pointer */
3182 *ptrs = opt->nro_next;
3183 /* overwrite the user pointer with the in-kernel one */
3186 /* initialize the option as not supported.
3187 * Recognized options will update this field.
3189 opt->nro_status = EOPNOTSUPP;
3191 /* check for invalid types */
3192 if (opt->nro_reqtype < 1) {
3194 nm_prinf("invalid option type: %u", opt->nro_reqtype);
3195 opt->nro_status = EINVAL;
3200 if (opt->nro_reqtype >= NETMAP_REQ_OPT_MAX) {
3201 /* opt->nro_status is already EOPNOTSUPP */
3206 /* if the type is valid, index the option in the table
3207 * unless it is a duplicate.
3209 if (opt_tab[opt->nro_reqtype] != NULL) {
3211 nm_prinf("duplicate option: %u", opt->nro_reqtype);
3212 opt->nro_status = EINVAL;
3213 opt_tab[opt->nro_reqtype]->nro_status = EINVAL;
3217 opt_tab[opt->nro_reqtype] = opt;
3219 p = (char *)(opt + 1);
3221 /* copy the option body */
3222 optsz = nmreq_opt_size_by_type(opt->nro_reqtype,
3225 /* the option body follows the option header */
3226 error = copyin(src + 1, p, optsz);
3233 /* move to next option */
3234 next = (struct nmreq_option **)&opt->nro_next;
3238 nmreq_copyout(hdr, error);
3242 ptrs = (uint64_t *)ker;
3243 hdr->nr_body = *ptrs++;
3244 hdr->nr_options = *ptrs++;
3245 hdr->nr_reserved = 0;
3252 nmreq_copyout(struct nmreq_header *hdr, int rerror)
3254 struct nmreq_option *src, *dst;
3255 void *ker = (void *)(uintptr_t)hdr->nr_body, *bufstart;
3260 if (!hdr->nr_reserved)
3263 /* restore the user pointers in the header */
3264 ptrs = (uint64_t *)ker - 2;
3266 hdr->nr_body = *ptrs++;
3267 src = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3268 hdr->nr_options = *ptrs;
3272 bodysz = nmreq_size_by_type(hdr->nr_reqtype);
3273 error = copyout(ker, (void *)(uintptr_t)hdr->nr_body, bodysz);
3280 /* copy the options */
3281 dst = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3286 /* restore the user pointer */
3287 next = src->nro_next;
3288 ptrs = (uint64_t *)src - 1;
3289 src->nro_next = *ptrs;
3291 /* always copy the option header */
3292 error = copyout(src, dst, sizeof(*src));
3298 /* copy the option body only if there was no error */
3299 if (!rerror && !src->nro_status) {
3300 optsz = nmreq_opt_size_by_type(src->nro_reqtype,
3303 error = copyout(src + 1, dst + 1, optsz);
3310 src = (struct nmreq_option *)(uintptr_t)next;
3311 dst = (struct nmreq_option *)(uintptr_t)*ptrs;
3316 hdr->nr_reserved = 0;
3317 nm_os_free(bufstart);
3321 struct nmreq_option *
3322 nmreq_getoption(struct nmreq_header *hdr, uint16_t reqtype)
3324 struct nmreq_option **opt_tab;
3326 if (!hdr->nr_options)
3329 opt_tab = (struct nmreq_option **)((uintptr_t)hdr->nr_options) -
3330 (NETMAP_REQ_OPT_MAX + 1);
3331 return opt_tab[reqtype];
3335 nmreq_checkoptions(struct nmreq_header *hdr)
3337 struct nmreq_option *opt;
3338 /* return error if there is still any option
3339 * marked as not supported
3342 for (opt = (struct nmreq_option *)(uintptr_t)hdr->nr_options; opt;
3343 opt = (struct nmreq_option *)(uintptr_t)opt->nro_next)
3344 if (opt->nro_status == EOPNOTSUPP)
3351 * select(2) and poll(2) handlers for the "netmap" device.
3353 * Can be called for one or more queues.
3354 * Return true the event mask corresponding to ready events.
3355 * If there are no ready events (and 'sr' is not NULL), do a
3356 * selrecord on either individual selinfo or on the global one.
3357 * Device-dependent parts (locking and sync of tx/rx rings)
3358 * are done through callbacks.
3360 * On linux, arguments are really pwait, the poll table, and 'td' is struct file *
3361 * The first one is remapped to pwait as selrecord() uses the name as an
3365 netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr)
3367 struct netmap_adapter *na;
3368 struct netmap_kring *kring;
3369 struct netmap_ring *ring;
3370 u_int i, want[NR_TXRX], revents = 0;
3371 NM_SELINFO_T *si[NR_TXRX];
3372 #define want_tx want[NR_TX]
3373 #define want_rx want[NR_RX]
3374 struct mbq q; /* packets from RX hw queues to host stack */
3377 * In order to avoid nested locks, we need to "double check"
3378 * txsync and rxsync if we decide to do a selrecord().
3379 * retry_tx (and retry_rx, later) prevent looping forever.
3381 int retry_tx = 1, retry_rx = 1;
3383 /* Transparent mode: send_down is 1 if we have found some
3384 * packets to forward (host RX ring --> NIC) during the rx
3385 * scan and we have not sent them down to the NIC yet.
3386 * Transparent mode requires to bind all rings to a single
3390 int sync_flags = priv->np_sync_flags;
3394 if (unlikely(priv->np_nifp == NULL)) {
3397 mb(); /* make sure following reads are not from cache */
3401 if (unlikely(!nm_netmap_on(na)))
3404 if (unlikely(priv->np_csb_atok_base)) {
3405 nm_prerr("Invalid poll in CSB mode");
3409 if (netmap_debug & NM_DEBUG_ON)
3410 nm_prinf("device %s events 0x%x", na->name, events);
3411 want_tx = events & (POLLOUT | POLLWRNORM);
3412 want_rx = events & (POLLIN | POLLRDNORM);
3415 * If the card has more than one queue AND the file descriptor is
3416 * bound to all of them, we sleep on the "global" selinfo, otherwise
3417 * we sleep on individual selinfo (FreeBSD only allows two selinfo's
3418 * per file descriptor).
3419 * The interrupt routine in the driver wake one or the other
3420 * (or both) depending on which clients are active.
3422 * rxsync() is only called if we run out of buffers on a POLLIN.
3423 * txsync() is called if we run out of buffers on POLLOUT, or
3424 * there are pending packets to send. The latter can be disabled
3425 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
3427 si[NR_RX] = priv->np_si[NR_RX];
3428 si[NR_TX] = priv->np_si[NR_TX];
3432 * We start with a lock free round which is cheap if we have
3433 * slots available. If this fails, then lock and call the sync
3434 * routines. We can't do this on Linux, as the contract says
3435 * that we must call nm_os_selrecord() unconditionally.
3438 const enum txrx t = NR_TX;
3439 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3440 kring = NMR(na, t)[i];
3441 if (kring->ring->cur != kring->ring->tail) {
3442 /* Some unseen TX space is available, so what
3443 * we don't need to run txsync. */
3451 const enum txrx t = NR_RX;
3452 int rxsync_needed = 0;
3454 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3455 kring = NMR(na, t)[i];
3456 if (kring->ring->cur == kring->ring->tail
3457 || kring->rhead != kring->ring->head) {
3458 /* There are no unseen packets on this ring,
3459 * or there are some buffers to be returned
3460 * to the netmap port. We therefore go ahead
3461 * and run rxsync. */
3466 if (!rxsync_needed) {
3474 /* The selrecord must be unconditional on linux. */
3475 nm_os_selrecord(sr, si[NR_RX]);
3476 nm_os_selrecord(sr, si[NR_TX]);
3480 * If we want to push packets out (priv->np_txpoll) or
3481 * want_tx is still set, we must issue txsync calls
3482 * (on all rings, to avoid that the tx rings stall).
3483 * Fortunately, normal tx mode has np_txpoll set.
3485 if (priv->np_txpoll || want_tx) {
3487 * The first round checks if anyone is ready, if not
3488 * do a selrecord and another round to handle races.
3489 * want_tx goes to 0 if any space is found, and is
3490 * used to skip rings with no pending transmissions.
3493 for (i = priv->np_qfirst[NR_TX]; i < priv->np_qlast[NR_TX]; i++) {
3496 kring = na->tx_rings[i];
3500 * Don't try to txsync this TX ring if we already found some
3501 * space in some of the TX rings (want_tx == 0) and there are no
3502 * TX slots in this ring that need to be flushed to the NIC
3505 if (!send_down && !want_tx && ring->head == kring->nr_hwcur)
3508 if (nm_kr_tryget(kring, 1, &revents))
3511 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3512 netmap_ring_reinit(kring);
3515 if (kring->nm_sync(kring, sync_flags))
3518 nm_sync_finalize(kring);
3522 * If we found new slots, notify potential
3523 * listeners on the same ring.
3524 * Since we just did a txsync, look at the copies
3525 * of cur,tail in the kring.
3527 found = kring->rcur != kring->rtail;
3529 if (found) { /* notify other listeners */
3533 kring->nm_notify(kring, 0);
3537 /* if there were any packet to forward we must have handled them by now */
3539 if (want_tx && retry_tx && sr) {
3541 nm_os_selrecord(sr, si[NR_TX]);
3549 * If want_rx is still set scan receive rings.
3550 * Do it on all rings because otherwise we starve.
3553 /* two rounds here for race avoidance */
3555 for (i = priv->np_qfirst[NR_RX]; i < priv->np_qlast[NR_RX]; i++) {
3558 kring = na->rx_rings[i];
3561 if (unlikely(nm_kr_tryget(kring, 1, &revents)))
3564 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3565 netmap_ring_reinit(kring);
3568 /* now we can use kring->rcur, rtail */
3571 * transparent mode support: collect packets from
3572 * hw rxring(s) that have been released by the user
3574 if (nm_may_forward_up(kring)) {
3575 netmap_grab_packets(kring, &q, netmap_fwd);
3578 /* Clear the NR_FORWARD flag anyway, it may be set by
3579 * the nm_sync() below only on for the host RX ring (see
3580 * netmap_rxsync_from_host()). */
3581 kring->nr_kflags &= ~NR_FORWARD;
3582 if (kring->nm_sync(kring, sync_flags))
3585 nm_sync_finalize(kring);
3586 send_down |= (kring->nr_kflags & NR_FORWARD);
3587 ring_timestamp_set(ring);
3588 found = kring->rcur != kring->rtail;
3594 kring->nm_notify(kring, 0);
3600 if (retry_rx && sr) {
3601 nm_os_selrecord(sr, si[NR_RX]);
3604 if (send_down || retry_rx) {
3607 goto flush_tx; /* and retry_rx */
3614 * Transparent mode: released bufs (i.e. between kring->nr_hwcur and
3615 * ring->head) marked with NS_FORWARD on hw rx rings are passed up
3616 * to the host stack.
3620 netmap_send_up(na->ifp, &q);
3629 nma_intr_enable(struct netmap_adapter *na, int onoff)
3631 bool changed = false;
3636 for (i = 0; i < nma_get_nrings(na, t); i++) {
3637 struct netmap_kring *kring = NMR(na, t)[i];
3638 int on = !(kring->nr_kflags & NKR_NOINTR);
3640 if (!!onoff != !!on) {
3644 kring->nr_kflags &= ~NKR_NOINTR;
3646 kring->nr_kflags |= NKR_NOINTR;
3652 return 0; /* nothing to do */
3656 nm_prerr("Cannot %s interrupts for %s", onoff ? "enable" : "disable",
3661 na->nm_intr(na, onoff);
3667 /*-------------------- driver support routines -------------------*/
3669 /* default notify callback */
3671 netmap_notify(struct netmap_kring *kring, int flags)
3673 struct netmap_adapter *na = kring->notify_na;
3674 enum txrx t = kring->tx;
3676 nm_os_selwakeup(&kring->si);
3677 /* optimization: avoid a wake up on the global
3678 * queue if nobody has registered for more
3681 if (na->si_users[t] > 0)
3682 nm_os_selwakeup(&na->si[t]);
3684 return NM_IRQ_COMPLETED;
3687 /* called by all routines that create netmap_adapters.
3688 * provide some defaults and get a reference to the
3692 netmap_attach_common(struct netmap_adapter *na)
3694 if (!na->rx_buf_maxsize) {
3695 /* Set a conservative default (larger is safer). */
3696 na->rx_buf_maxsize = PAGE_SIZE;
3700 if (na->na_flags & NAF_HOST_RINGS && na->ifp) {
3701 na->if_input = na->ifp->if_input; /* for netmap_send_up */
3703 na->pdev = na; /* make sure netmap_mem_map() is called */
3704 #endif /* __FreeBSD__ */
3705 if (na->na_flags & NAF_HOST_RINGS) {
3706 if (na->num_host_rx_rings == 0)
3707 na->num_host_rx_rings = 1;
3708 if (na->num_host_tx_rings == 0)
3709 na->num_host_tx_rings = 1;
3711 if (na->nm_krings_create == NULL) {
3712 /* we assume that we have been called by a driver,
3713 * since other port types all provide their own
3716 na->nm_krings_create = netmap_hw_krings_create;
3717 na->nm_krings_delete = netmap_hw_krings_delete;
3719 if (na->nm_notify == NULL)
3720 na->nm_notify = netmap_notify;
3723 if (na->nm_mem == NULL) {
3724 /* use the global allocator */
3725 na->nm_mem = netmap_mem_get(&nm_mem);
3728 if (na->nm_bdg_attach == NULL)
3729 /* no special nm_bdg_attach callback. On VALE
3730 * attach, we need to interpose a bwrap
3732 na->nm_bdg_attach = netmap_default_bdg_attach;
3738 /* Wrapper for the register callback provided netmap-enabled
3740 * nm_iszombie(na) means that the driver module has been
3741 * unloaded, so we cannot call into it.
3742 * nm_os_ifnet_lock() must guarantee mutual exclusion with
3746 netmap_hw_reg(struct netmap_adapter *na, int onoff)
3748 struct netmap_hw_adapter *hwna =
3749 (struct netmap_hw_adapter*)na;
3754 if (nm_iszombie(na)) {
3757 } else if (na != NULL) {
3758 na->na_flags &= ~NAF_NETMAP_ON;
3763 error = hwna->nm_hw_register(na, onoff);
3766 nm_os_ifnet_unlock();
3772 netmap_hw_dtor(struct netmap_adapter *na)
3774 if (na->ifp == NULL)
3777 NM_DETACH_NA(na->ifp);
3782 * Allocate a netmap_adapter object, and initialize it from the
3783 * 'arg' passed by the driver on attach.
3784 * We allocate a block of memory of 'size' bytes, which has room
3785 * for struct netmap_adapter plus additional room private to
3787 * Return 0 on success, ENOMEM otherwise.
3790 netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg)
3792 struct netmap_hw_adapter *hwna = NULL;
3793 struct ifnet *ifp = NULL;
3795 if (size < sizeof(struct netmap_hw_adapter)) {
3796 if (netmap_debug & NM_DEBUG_ON)
3797 nm_prerr("Invalid netmap adapter size %d", (int)size);
3801 if (arg == NULL || arg->ifp == NULL) {
3802 if (netmap_debug & NM_DEBUG_ON)
3803 nm_prerr("either arg or arg->ifp is NULL");
3807 if (arg->num_tx_rings == 0 || arg->num_rx_rings == 0) {
3808 if (netmap_debug & NM_DEBUG_ON)
3809 nm_prerr("%s: invalid rings tx %d rx %d",
3810 arg->name, arg->num_tx_rings, arg->num_rx_rings);
3815 if (NM_NA_CLASH(ifp)) {
3816 /* If NA(ifp) is not null but there is no valid netmap
3817 * adapter it means that someone else is using the same
3818 * pointer (e.g. ax25_ptr on linux). This happens for
3819 * instance when also PF_RING is in use. */
3820 nm_prerr("Error: netmap adapter hook is busy");
3824 hwna = nm_os_malloc(size);
3828 hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE;
3829 strlcpy(hwna->up.name, ifp->if_xname, sizeof(hwna->up.name));
3831 hwna->nm_hw_register = hwna->up.nm_register;
3832 hwna->up.nm_register = netmap_hw_reg;
3834 if (netmap_attach_common(&hwna->up)) {
3838 netmap_adapter_get(&hwna->up);
3840 NM_ATTACH_NA(ifp, &hwna->up);
3842 nm_os_onattach(ifp);
3844 if (arg->nm_dtor == NULL) {
3845 hwna->up.nm_dtor = netmap_hw_dtor;
3848 if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n",
3849 hwna->up.num_tx_rings, hwna->up.num_tx_desc,
3850 hwna->up.num_rx_rings, hwna->up.num_rx_desc);
3854 nm_prerr("fail, arg %p ifp %p na %p", arg, ifp, hwna);
3855 return (hwna ? EINVAL : ENOMEM);
3860 netmap_attach(struct netmap_adapter *arg)
3862 return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter),
3863 1 /* override nm_reg */);
3868 NM_DBG(netmap_adapter_get)(struct netmap_adapter *na)
3874 refcount_acquire(&na->na_refcount);
3878 /* returns 1 iff the netmap_adapter is destroyed */
3880 NM_DBG(netmap_adapter_put)(struct netmap_adapter *na)
3885 if (!refcount_release(&na->na_refcount))
3891 if (na->tx_rings) { /* XXX should not happen */
3892 if (netmap_debug & NM_DEBUG_ON)
3893 nm_prerr("freeing leftover tx_rings");
3894 na->nm_krings_delete(na);
3896 netmap_pipe_dealloc(na);
3898 netmap_mem_put(na->nm_mem);
3899 bzero(na, sizeof(*na));
3905 /* nm_krings_create callback for all hardware native adapters */
3907 netmap_hw_krings_create(struct netmap_adapter *na)
3909 int ret = netmap_krings_create(na, 0);
3911 /* initialize the mbq for the sw rx ring */
3912 u_int lim = netmap_real_rings(na, NR_RX), i;
3913 for (i = na->num_rx_rings; i < lim; i++) {
3914 mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue);
3916 nm_prdis("initialized sw rx queue %d", na->num_rx_rings);
3924 * Called on module unload by the netmap-enabled drivers
3927 netmap_detach(struct ifnet *ifp)
3929 struct netmap_adapter *na = NA(ifp);
3935 netmap_set_all_rings(na, NM_KR_LOCKED);
3937 * if the netmap adapter is not native, somebody
3938 * changed it, so we can not release it here.
3939 * The NAF_ZOMBIE flag will notify the new owner that
3940 * the driver is gone.
3942 if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) {
3943 na->na_flags |= NAF_ZOMBIE;
3945 /* give active users a chance to notice that NAF_ZOMBIE has been
3946 * turned on, so that they can stop and return an error to userspace.
3947 * Note that this becomes a NOP if there are no active users and,
3948 * therefore, the put() above has deleted the na, since now NA(ifp) is
3951 netmap_enable_all_rings(ifp);
3957 * Intercept packets from the network stack and pass them
3958 * to netmap as incoming packets on the 'software' ring.
3960 * We only store packets in a bounded mbq and then copy them
3961 * in the relevant rxsync routine.
3963 * We rely on the OS to make sure that the ifp and na do not go
3964 * away (typically the caller checks for IFF_DRV_RUNNING or the like).
3965 * In nm_register() or whenever there is a reinitialization,
3966 * we make sure to make the mode change visible here.
3969 netmap_transmit(struct ifnet *ifp, struct mbuf *m)
3971 struct netmap_adapter *na = NA(ifp);
3972 struct netmap_kring *kring, *tx_kring;
3973 u_int len = MBUF_LEN(m);
3974 u_int error = ENOBUFS;
3981 if (i >= na->num_host_rx_rings) {
3982 i = i % na->num_host_rx_rings;
3984 kring = NMR(na, NR_RX)[nma_get_nrings(na, NR_RX) + i];
3986 // XXX [Linux] we do not need this lock
3987 // if we follow the down/configure/up protocol -gl
3988 // mtx_lock(&na->core_lock);
3990 if (!nm_netmap_on(na)) {
3991 nm_prerr("%s not in netmap mode anymore", na->name);
3997 if (txr >= na->num_tx_rings) {
3998 txr %= na->num_tx_rings;
4000 tx_kring = NMR(na, NR_TX)[txr];
4002 if (tx_kring->nr_mode == NKR_NETMAP_OFF) {
4003 return MBUF_TRANSMIT(na, ifp, m);
4006 q = &kring->rx_queue;
4008 // XXX reconsider long packets if we handle fragments
4009 if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */
4010 nm_prerr("%s from_host, drop packet size %d > %d", na->name,
4011 len, NETMAP_BUF_SIZE(na));
4015 if (!netmap_generic_hwcsum) {
4016 if (nm_os_mbuf_has_csum_offld(m)) {
4017 nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name);
4022 if (nm_os_mbuf_has_seg_offld(m)) {
4023 nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name);
4028 ETHER_BPF_MTAP(ifp, m);
4029 #endif /* __FreeBSD__ */
4031 /* protect against netmap_rxsync_from_host(), netmap_sw_to_nic()
4032 * and maybe other instances of netmap_transmit (the latter
4033 * not possible on Linux).
4034 * We enqueue the mbuf only if we are sure there is going to be
4035 * enough room in the host RX ring, otherwise we drop it.
4039 busy = kring->nr_hwtail - kring->nr_hwcur;
4041 busy += kring->nkr_num_slots;
4042 if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) {
4043 nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
4044 kring->nr_hwcur, kring->nr_hwtail, mbq_len(q));
4047 nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q));
4048 /* notify outside the lock */
4057 /* unconditionally wake up listeners */
4058 kring->nm_notify(kring, 0);
4059 /* this is normally netmap_notify(), but for nics
4060 * connected to a bridge it is netmap_bwrap_intr_notify(),
4061 * that possibly forwards the frames through the switch
4069 * netmap_reset() is called by the driver routines when reinitializing
4070 * a ring. The driver is in charge of locking to protect the kring.
4071 * If native netmap mode is not set just return NULL.
4072 * If native netmap mode is set, in particular, we have to set nr_mode to
4075 struct netmap_slot *
4076 netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n,
4079 struct netmap_kring *kring;
4082 if (!nm_native_on(na)) {
4083 nm_prdis("interface not in native netmap mode");
4084 return NULL; /* nothing to reinitialize */
4087 /* XXX note- in the new scheme, we are not guaranteed to be
4088 * under lock (e.g. when called on a device reset).
4089 * In this case, we should set a flag and do not trust too
4090 * much the values. In practice: TODO
4091 * - set a RESET flag somewhere in the kring
4092 * - do the processing in a conservative way
4093 * - let the *sync() fixup at the end.
4096 if (n >= na->num_tx_rings)
4099 kring = na->tx_rings[n];
4101 if (kring->nr_pending_mode == NKR_NETMAP_OFF) {
4102 kring->nr_mode = NKR_NETMAP_OFF;
4106 // XXX check whether we should use hwcur or rcur
4107 new_hwofs = kring->nr_hwcur - new_cur;
4109 if (n >= na->num_rx_rings)
4111 kring = na->rx_rings[n];
4113 if (kring->nr_pending_mode == NKR_NETMAP_OFF) {
4114 kring->nr_mode = NKR_NETMAP_OFF;
4118 new_hwofs = kring->nr_hwtail - new_cur;
4120 lim = kring->nkr_num_slots - 1;
4121 if (new_hwofs > lim)
4122 new_hwofs -= lim + 1;
4124 /* Always set the new offset value and realign the ring. */
4125 if (netmap_debug & NM_DEBUG_ON)
4126 nm_prinf("%s %s%d hwofs %d -> %d, hwtail %d -> %d",
4128 tx == NR_TX ? "TX" : "RX", n,
4129 kring->nkr_hwofs, new_hwofs,
4131 tx == NR_TX ? lim : kring->nr_hwtail);
4132 kring->nkr_hwofs = new_hwofs;
4134 kring->nr_hwtail = kring->nr_hwcur + lim;
4135 if (kring->nr_hwtail > lim)
4136 kring->nr_hwtail -= lim + 1;
4140 * Wakeup on the individual and global selwait
4141 * We do the wakeup here, but the ring is not yet reconfigured.
4142 * However, we are under lock so there are no races.
4144 kring->nr_mode = NKR_NETMAP_ON;
4145 kring->nm_notify(kring, 0);
4146 return kring->ring->slot;
4151 * Dispatch rx/tx interrupts to the netmap rings.
4153 * "work_done" is non-null on the RX path, NULL for the TX path.
4154 * We rely on the OS to make sure that there is only one active
4155 * instance per queue, and that there is appropriate locking.
4157 * The 'notify' routine depends on what the ring is attached to.
4158 * - for a netmap file descriptor, do a selwakeup on the individual
4159 * waitqueue, plus one on the global one if needed
4160 * (see netmap_notify)
4161 * - for a nic connected to a switch, call the proper forwarding routine
4162 * (see netmap_bwrap_intr_notify)
4165 netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
4167 struct netmap_kring *kring;
4168 enum txrx t = (work_done ? NR_RX : NR_TX);
4170 q &= NETMAP_RING_MASK;
4172 if (netmap_debug & (NM_DEBUG_RXINTR|NM_DEBUG_TXINTR)) {
4173 nm_prlim(5, "received %s queue %d", work_done ? "RX" : "TX" , q);
4176 if (q >= nma_get_nrings(na, t))
4177 return NM_IRQ_PASS; // not a physical queue
4179 kring = NMR(na, t)[q];
4181 if (kring->nr_mode == NKR_NETMAP_OFF) {
4186 kring->nr_kflags |= NKR_PENDINTR; // XXX atomic ?
4187 *work_done = 1; /* do not fire napi again */
4190 return kring->nm_notify(kring, 0);
4195 * Default functions to handle rx/tx interrupts from a physical device.
4196 * "work_done" is non-null on the RX path, NULL for the TX path.
4198 * If the card is not in netmap mode, simply return NM_IRQ_PASS,
4199 * so that the caller proceeds with regular processing.
4200 * Otherwise call netmap_common_irq().
4202 * If the card is connected to a netmap file descriptor,
4203 * do a selwakeup on the individual queue, plus one on the global one
4204 * if needed (multiqueue card _and_ there are multiqueue listeners),
4205 * and return NR_IRQ_COMPLETED.
4207 * Finally, if called on rx from an interface connected to a switch,
4208 * calls the proper forwarding routine.
4211 netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done)
4213 struct netmap_adapter *na = NA(ifp);
4216 * XXX emulated netmap mode sets NAF_SKIP_INTR so
4217 * we still use the regular driver even though the previous
4218 * check fails. It is unclear whether we should use
4219 * nm_native_on() here.
4221 if (!nm_netmap_on(na))
4224 if (na->na_flags & NAF_SKIP_INTR) {
4225 nm_prdis("use regular interrupt");
4229 return netmap_common_irq(na, q, work_done);
4232 /* set/clear native flags and if_transmit/netdev_ops */
4234 nm_set_native_flags(struct netmap_adapter *na)
4236 struct ifnet *ifp = na->ifp;
4238 /* We do the setup for intercepting packets only if we are the
4239 * first user of this adapapter. */
4240 if (na->active_fds > 0) {
4244 na->na_flags |= NAF_NETMAP_ON;
4246 nm_update_hostrings_mode(na);
4250 nm_clear_native_flags(struct netmap_adapter *na)
4252 struct ifnet *ifp = na->ifp;
4254 /* We undo the setup for intercepting packets only if we are the
4255 * last user of this adapter. */
4256 if (na->active_fds > 0) {
4260 nm_update_hostrings_mode(na);
4263 na->na_flags &= ~NAF_NETMAP_ON;
4267 netmap_krings_mode_commit(struct netmap_adapter *na, int onoff)
4274 for (i = 0; i < netmap_real_rings(na, t); i++) {
4275 struct netmap_kring *kring = NMR(na, t)[i];
4277 if (onoff && nm_kring_pending_on(kring))
4278 kring->nr_mode = NKR_NETMAP_ON;
4279 else if (!onoff && nm_kring_pending_off(kring))
4280 kring->nr_mode = NKR_NETMAP_OFF;
4286 * Module loader and unloader
4288 * netmap_init() creates the /dev/netmap device and initializes
4289 * all global variables. Returns 0 on success, errno on failure
4290 * (but there is no chance)
4292 * netmap_fini() destroys everything.
4295 static struct cdev *netmap_dev; /* /dev/netmap character device. */
4296 extern struct cdevsw netmap_cdevsw;
4303 destroy_dev(netmap_dev);
4304 /* we assume that there are no longer netmap users */
4306 netmap_uninit_bridges();
4309 nm_prinf("netmap: unloaded module.");
4320 error = netmap_mem_init();
4324 * MAKEDEV_ETERNAL_KLD avoids an expensive check on syscalls
4325 * when the module is compiled in.
4326 * XXX could use make_dev_credv() to get error number
4328 netmap_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD,
4329 &netmap_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600,
4334 error = netmap_init_bridges();
4339 nm_os_vi_init_index();
4342 error = nm_os_ifnet_init();
4346 nm_prinf("netmap: loaded module");
4350 return (EINVAL); /* may be incorrect */