2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2011-2014 Matteo Landi
5 * Copyright (C) 2011-2016 Luigi Rizzo
6 * Copyright (C) 2011-2016 Giuseppe Lettieri
7 * Copyright (C) 2011-2016 Vincenzo Maffione
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This module supports memory mapped access to network devices,
39 * The module uses a large, memory pool allocated by the kernel
40 * and accessible as mmapped memory by multiple userspace threads/processes.
41 * The memory pool contains packet buffers and "netmap rings",
42 * i.e. user-accessible copies of the interface's queues.
44 * Access to the network card works like this:
45 * 1. a process/thread issues one or more open() on /dev/netmap, to create
46 * select()able file descriptor on which events are reported.
47 * 2. on each descriptor, the process issues an ioctl() to identify
48 * the interface that should report events to the file descriptor.
49 * 3. on each descriptor, the process issues an mmap() request to
50 * map the shared memory region within the process' address space.
51 * The list of interesting queues is indicated by a location in
52 * the shared memory region.
53 * 4. using the functions in the netmap(4) userspace API, a process
54 * can look up the occupation state of a queue, access memory buffers,
55 * and retrieve received packets or enqueue packets to transmit.
56 * 5. using some ioctl()s the process can synchronize the userspace view
57 * of the queue with the actual status in the kernel. This includes both
58 * receiving the notification of new packets, and transmitting new
59 * packets on the output interface.
60 * 6. select() or poll() can be used to wait for events on individual
61 * transmit or receive queues (or all queues for a given interface).
64 SYNCHRONIZATION (USER)
66 The netmap rings and data structures may be shared among multiple
67 user threads or even independent processes.
68 Any synchronization among those threads/processes is delegated
69 to the threads themselves. Only one thread at a time can be in
70 a system call on the same netmap ring. The OS does not enforce
71 this and only guarantees against system crashes in case of
76 Within the kernel, access to the netmap rings is protected as follows:
78 - a spinlock on each ring, to handle producer/consumer races on
79 RX rings attached to the host stack (against multiple host
80 threads writing from the host stack to the same ring),
81 and on 'destination' rings attached to a VALE switch
82 (i.e. RX rings in VALE ports, and TX rings in NIC/host ports)
83 protecting multiple active senders for the same destination)
85 - an atomic variable to guarantee that there is at most one
86 instance of *_*xsync() on the ring at any time.
87 For rings connected to user file
88 descriptors, an atomic_test_and_set() protects this, and the
89 lock on the ring is not actually used.
90 For NIC RX rings connected to a VALE switch, an atomic_test_and_set()
91 is also used to prevent multiple executions (the driver might indeed
92 already guarantee this).
93 For NIC TX rings connected to a VALE switch, the lock arbitrates
94 access to the queue (both when allocating buffers and when pushing
97 - *xsync() should be protected against initializations of the card.
98 On FreeBSD most devices have the reset routine protected by
99 a RING lock (ixgbe, igb, em) or core lock (re). lem is missing
100 the RING protection on rx_reset(), this should be added.
102 On linux there is an external lock on the tx path, which probably
103 also arbitrates access to the reset routine. XXX to be revised
105 - a per-interface core_lock protecting access from the host stack
106 while interfaces may be detached from netmap mode.
107 XXX there should be no need for this lock if we detach the interfaces
108 only while they are down.
113 NMG_LOCK() serializes all modifications to switches and ports.
114 A switch cannot be deleted until all ports are gone.
116 For each switch, an SX lock (RWlock on linux) protects
117 deletion of ports. When configuring or deleting a new port, the
118 lock is acquired in exclusive mode (after holding NMG_LOCK).
119 When forwarding, the lock is acquired in shared mode (without NMG_LOCK).
120 The lock is held throughout the entire forwarding cycle,
121 during which the thread may incur in a page fault.
122 Hence it is important that sleepable shared locks are used.
124 On the rx ring, the per-port lock is grabbed initially to reserve
125 a number of slot in the ring, then the lock is released,
126 packets are copied from source to destination, and then
127 the lock is acquired again and the receive ring is updated.
128 (A similar thing is done on the tx ring for NIC and host stack
129 ports attached to the switch)
134 /* --- internals ----
136 * Roadmap to the code that implements the above.
138 * > 1. a process/thread issues one or more open() on /dev/netmap, to create
139 * > select()able file descriptor on which events are reported.
141 * Internally, we allocate a netmap_priv_d structure, that will be
142 * initialized on ioctl(NIOCREGIF). There is one netmap_priv_d
143 * structure for each open().
146 * FreeBSD: see netmap_open() (netmap_freebsd.c)
147 * linux: see linux_netmap_open() (netmap_linux.c)
149 * > 2. on each descriptor, the process issues an ioctl() to identify
150 * > the interface that should report events to the file descriptor.
152 * Implemented by netmap_ioctl(), NIOCREGIF case, with nmr->nr_cmd==0.
153 * Most important things happen in netmap_get_na() and
154 * netmap_do_regif(), called from there. Additional details can be
155 * found in the comments above those functions.
157 * In all cases, this action creates/takes-a-reference-to a
158 * netmap_*_adapter describing the port, and allocates a netmap_if
159 * and all necessary netmap rings, filling them with netmap buffers.
161 * In this phase, the sync callbacks for each ring are set (these are used
162 * in steps 5 and 6 below). The callbacks depend on the type of adapter.
163 * The adapter creation/initialization code puts them in the
164 * netmap_adapter (fields na->nm_txsync and na->nm_rxsync). Then, they
165 * are copied from there to the netmap_kring's during netmap_do_regif(), by
166 * the nm_krings_create() callback. All the nm_krings_create callbacks
167 * actually call netmap_krings_create() to perform this and the other
168 * common stuff. netmap_krings_create() also takes care of the host rings,
169 * if needed, by setting their sync callbacks appropriately.
171 * Additional actions depend on the kind of netmap_adapter that has been
174 * - netmap_hw_adapter: [netmap.c]
175 * This is a system netdev/ifp with native netmap support.
176 * The ifp is detached from the host stack by redirecting:
177 * - transmissions (from the network stack) to netmap_transmit()
178 * - receive notifications to the nm_notify() callback for
179 * this adapter. The callback is normally netmap_notify(), unless
180 * the ifp is attached to a bridge using bwrap, in which case it
181 * is netmap_bwrap_intr_notify().
183 * - netmap_generic_adapter: [netmap_generic.c]
184 * A system netdev/ifp without native netmap support.
186 * (the decision about native/non native support is taken in
187 * netmap_get_hw_na(), called by netmap_get_na())
189 * - netmap_vp_adapter [netmap_vale.c]
190 * Returned by netmap_get_bdg_na().
191 * This is a persistent or ephemeral VALE port. Ephemeral ports
192 * are created on the fly if they don't already exist, and are
193 * always attached to a bridge.
194 * Persistent VALE ports must must be created separately, and i
195 * then attached like normal NICs. The NIOCREGIF we are examining
196 * will find them only if they had previosly been created and
197 * attached (see VALE_CTL below).
199 * - netmap_pipe_adapter [netmap_pipe.c]
200 * Returned by netmap_get_pipe_na().
201 * Both pipe ends are created, if they didn't already exist.
203 * - netmap_monitor_adapter [netmap_monitor.c]
204 * Returned by netmap_get_monitor_na().
205 * If successful, the nm_sync callbacks of the monitored adapter
206 * will be intercepted by the returned monitor.
208 * - netmap_bwrap_adapter [netmap_vale.c]
209 * Cannot be obtained in this way, see VALE_CTL below
213 * linux: we first go through linux_netmap_ioctl() to
214 * adapt the FreeBSD interface to the linux one.
217 * > 3. on each descriptor, the process issues an mmap() request to
218 * > map the shared memory region within the process' address space.
219 * > The list of interesting queues is indicated by a location in
220 * > the shared memory region.
223 * FreeBSD: netmap_mmap_single (netmap_freebsd.c).
224 * linux: linux_netmap_mmap (netmap_linux.c).
226 * > 4. using the functions in the netmap(4) userspace API, a process
227 * > can look up the occupation state of a queue, access memory buffers,
228 * > and retrieve received packets or enqueue packets to transmit.
230 * these actions do not involve the kernel.
232 * > 5. using some ioctl()s the process can synchronize the userspace view
233 * > of the queue with the actual status in the kernel. This includes both
234 * > receiving the notification of new packets, and transmitting new
235 * > packets on the output interface.
237 * These are implemented in netmap_ioctl(), NIOCTXSYNC and NIOCRXSYNC
238 * cases. They invoke the nm_sync callbacks on the netmap_kring
239 * structures, as initialized in step 2 and maybe later modified
240 * by a monitor. Monitors, however, will always call the original
241 * callback before doing anything else.
244 * > 6. select() or poll() can be used to wait for events on individual
245 * > transmit or receive queues (or all queues for a given interface).
247 * Implemented in netmap_poll(). This will call the same nm_sync()
248 * callbacks as in step 5 above.
251 * linux: we first go through linux_netmap_poll() to adapt
252 * the FreeBSD interface to the linux one.
255 * ---- VALE_CTL -----
257 * VALE switches are controlled by issuing a NIOCREGIF with a non-null
258 * nr_cmd in the nmreq structure. These subcommands are handled by
259 * netmap_bdg_ctl() in netmap_vale.c. Persistent VALE ports are created
260 * and destroyed by issuing the NETMAP_BDG_NEWIF and NETMAP_BDG_DELIF
261 * subcommands, respectively.
263 * Any network interface known to the system (including a persistent VALE
264 * port) can be attached to a VALE switch by issuing the
265 * NETMAP_REQ_VALE_ATTACH command. After the attachment, persistent VALE ports
266 * look exactly like ephemeral VALE ports (as created in step 2 above). The
267 * attachment of other interfaces, instead, requires the creation of a
268 * netmap_bwrap_adapter. Moreover, the attached interface must be put in
269 * netmap mode. This may require the creation of a netmap_generic_adapter if
270 * we have no native support for the interface, or if generic adapters have
271 * been forced by sysctl.
273 * Both persistent VALE ports and bwraps are handled by netmap_get_bdg_na(),
274 * called by nm_bdg_ctl_attach(), and discriminated by the nm_bdg_attach()
275 * callback. In the case of the bwrap, the callback creates the
276 * netmap_bwrap_adapter. The initialization of the bwrap is then
277 * completed by calling netmap_do_regif() on it, in the nm_bdg_ctl()
278 * callback (netmap_bwrap_bdg_ctl in netmap_vale.c).
279 * A generic adapter for the wrapped ifp will be created if needed, when
280 * netmap_get_bdg_na() calls netmap_get_hw_na().
283 * ---- DATAPATHS -----
285 * -= SYSTEM DEVICE WITH NATIVE SUPPORT =-
287 * na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach()
289 * - tx from netmap userspace:
291 * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
292 * kring->nm_sync() == DEVICE_netmap_txsync()
293 * 2) device interrupt handler
294 * na->nm_notify() == netmap_notify()
295 * - rx from netmap userspace:
297 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
298 * kring->nm_sync() == DEVICE_netmap_rxsync()
299 * 2) device interrupt handler
300 * na->nm_notify() == netmap_notify()
301 * - rx from host stack
305 * na->nm_notify == netmap_notify()
306 * 2) ioctl(NIOCRXSYNC)/netmap_poll() in process context
307 * kring->nm_sync() == netmap_rxsync_from_host
308 * netmap_rxsync_from_host(na, NULL, NULL)
310 * ioctl(NIOCTXSYNC)/netmap_poll() in process context
311 * kring->nm_sync() == netmap_txsync_to_host
312 * netmap_txsync_to_host(na)
314 * FreeBSD: na->if_input() == ether_input()
315 * linux: netif_rx() with NM_MAGIC_PRIORITY_RX
318 * -= SYSTEM DEVICE WITH GENERIC SUPPORT =-
320 * na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach()
322 * - tx from netmap userspace:
324 * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
325 * kring->nm_sync() == generic_netmap_txsync()
326 * nm_os_generic_xmit_frame()
327 * linux: dev_queue_xmit() with NM_MAGIC_PRIORITY_TX
328 * ifp->ndo_start_xmit == generic_ndo_start_xmit()
329 * gna->save_start_xmit == orig. dev. start_xmit
330 * FreeBSD: na->if_transmit() == orig. dev if_transmit
331 * 2) generic_mbuf_destructor()
332 * na->nm_notify() == netmap_notify()
333 * - rx from netmap userspace:
334 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
335 * kring->nm_sync() == generic_netmap_rxsync()
338 * generic_rx_handler()
340 * na->nm_notify() == netmap_notify()
341 * - rx from host stack
342 * FreeBSD: same as native
343 * Linux: same as native except:
345 * dev_queue_xmit() without NM_MAGIC_PRIORITY_TX
346 * ifp->ndo_start_xmit == generic_ndo_start_xmit()
348 * na->nm_notify() == netmap_notify()
349 * - tx to host stack (same as native):
357 * ioctl(NIOCTXSYNC)/netmap_poll() in process context
358 * kring->nm_sync() == netmap_vp_txsync()
360 * - system device with native support:
363 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
364 * kring->nm_sync() == DEVICE_netmap_rxsync()
366 * kring->nm_sync() == DEVICE_netmap_rxsync()
369 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
370 * kring->nm_sync() == netmap_rxsync_from_host()
373 * - system device with generic support:
374 * from device driver:
375 * generic_rx_handler()
376 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
377 * kring->nm_sync() == generic_netmap_rxsync()
379 * kring->nm_sync() == generic_netmap_rxsync()
382 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
383 * kring->nm_sync() == netmap_rxsync_from_host()
386 * (all cases) --> nm_bdg_flush()
387 * dest_na->nm_notify() == (see below)
393 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
394 * kring->nm_sync() == netmap_vp_rxsync()
395 * 2) from nm_bdg_flush()
396 * na->nm_notify() == netmap_notify()
398 * - system device with native support:
400 * na->nm_notify() == netmap_bwrap_notify()
402 * kring->nm_sync() == DEVICE_netmap_txsync()
406 * kring->nm_sync() == netmap_txsync_to_host
407 * netmap_vp_rxsync_locked()
409 * - system device with generic adapter:
411 * na->nm_notify() == netmap_bwrap_notify()
413 * kring->nm_sync() == generic_netmap_txsync()
417 * kring->nm_sync() == netmap_txsync_to_host
423 * OS-specific code that is used only within this file.
424 * Other OS-specific code that must be accessed by drivers
425 * is present in netmap_kern.h
428 #if defined(__FreeBSD__)
429 #include <sys/cdefs.h> /* prerequisite */
430 #include <sys/types.h>
431 #include <sys/errno.h>
432 #include <sys/param.h> /* defines used in kernel.h */
433 #include <sys/kernel.h> /* types used in module initialization */
434 #include <sys/conf.h> /* cdevsw struct, UID, GID */
435 #include <sys/filio.h> /* FIONBIO */
436 #include <sys/sockio.h>
437 #include <sys/socketvar.h> /* struct socket */
438 #include <sys/malloc.h>
439 #include <sys/poll.h>
440 #include <sys/rwlock.h>
441 #include <sys/socket.h> /* sockaddrs */
442 #include <sys/selinfo.h>
443 #include <sys/sysctl.h>
444 #include <sys/jail.h>
445 #include <net/vnet.h>
447 #include <net/if_var.h>
448 #include <net/bpf.h> /* BIOCIMMEDIATE */
449 #include <machine/bus.h> /* bus_dmamap_* */
450 #include <sys/endian.h>
451 #include <sys/refcount.h>
452 #include <net/ethernet.h> /* ETHER_BPF_MTAP */
457 #include "bsd_glue.h"
459 #elif defined(__APPLE__)
461 #warning OSX support is only partial
462 #include "osx_glue.h"
464 #elif defined (_WIN32)
466 #include "win_glue.h"
470 #error Unsupported platform
472 #endif /* unsupported */
477 #include <net/netmap.h>
478 #include <dev/netmap/netmap_kern.h>
479 #include <dev/netmap/netmap_mem2.h>
482 /* user-controlled variables */
484 #ifdef CONFIG_NETMAP_DEBUG
486 #endif /* CONFIG_NETMAP_DEBUG */
488 static int netmap_no_timestamp; /* don't timestamp on rxsync */
489 int netmap_no_pendintr = 1;
490 int netmap_txsync_retry = 2;
491 static int netmap_fwd = 0; /* force transparent forwarding */
494 * netmap_admode selects the netmap mode to use.
495 * Invalid values are reset to NETMAP_ADMODE_BEST
497 enum { NETMAP_ADMODE_BEST = 0, /* use native, fallback to generic */
498 NETMAP_ADMODE_NATIVE, /* either native or none */
499 NETMAP_ADMODE_GENERIC, /* force generic */
500 NETMAP_ADMODE_LAST };
501 static int netmap_admode = NETMAP_ADMODE_BEST;
503 /* netmap_generic_mit controls mitigation of RX notifications for
504 * the generic netmap adapter. The value is a time interval in
506 int netmap_generic_mit = 100*1000;
508 /* We use by default netmap-aware qdiscs with generic netmap adapters,
509 * even if there can be a little performance hit with hardware NICs.
510 * However, using the qdisc is the safer approach, for two reasons:
511 * 1) it prevents non-fifo qdiscs to break the TX notification
512 * scheme, which is based on mbuf destructors when txqdisc is
514 * 2) it makes it possible to transmit over software devices that
515 * change skb->dev, like bridge, veth, ...
517 * Anyway users looking for the best performance should
518 * use native adapters.
521 int netmap_generic_txqdisc = 1;
524 /* Default number of slots and queues for generic adapters. */
525 int netmap_generic_ringsize = 1024;
526 int netmap_generic_rings = 1;
528 /* Non-zero to enable checksum offloading in NIC drivers */
529 int netmap_generic_hwcsum = 0;
531 /* Non-zero if ptnet devices are allowed to use virtio-net headers. */
532 int ptnet_vnet_hdr = 1;
535 * SYSCTL calls are grouped between SYSBEGIN and SYSEND to be emulated
536 * in some other operating systems
540 SYSCTL_DECL(_dev_netmap);
541 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args");
542 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
543 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
544 #ifdef CONFIG_NETMAP_DEBUG
545 SYSCTL_INT(_dev_netmap, OID_AUTO, debug,
546 CTLFLAG_RW, &netmap_debug, 0, "Debug messages");
547 #endif /* CONFIG_NETMAP_DEBUG */
548 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
549 CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
550 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr,
551 0, "Always look for new received packets.");
552 SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW,
553 &netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush.");
555 SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0,
556 "Force NR_FORWARD mode");
557 SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0,
558 "Adapter mode. 0 selects the best option available,"
559 "1 forces native adapter, 2 forces emulated adapter");
560 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_hwcsum, CTLFLAG_RW, &netmap_generic_hwcsum,
561 0, "Hardware checksums. 0 to disable checksum generation by the NIC (default),"
562 "1 to enable checksum generation by the NIC");
563 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit,
564 0, "RX notification interval in nanoseconds");
565 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW,
566 &netmap_generic_ringsize, 0,
567 "Number of per-ring slots for emulated netmap mode");
568 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW,
569 &netmap_generic_rings, 0,
570 "Number of TX/RX queues for emulated netmap adapters");
572 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW,
573 &netmap_generic_txqdisc, 0, "Use qdisc for generic adapters");
575 SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr,
576 0, "Allow ptnet devices to use virtio-net headers");
580 NMG_LOCK_T netmap_global_lock;
583 * mark the ring as stopped, and run through the locks
584 * to make sure other users get to see it.
585 * stopped must be either NR_KR_STOPPED (for unbounded stop)
586 * of NR_KR_LOCKED (brief stop for mutual exclusion purposes)
589 netmap_disable_ring(struct netmap_kring *kr, int stopped)
591 nm_kr_stop(kr, stopped);
592 // XXX check if nm_kr_stop is sufficient
593 mtx_lock(&kr->q_lock);
594 mtx_unlock(&kr->q_lock);
598 /* stop or enable a single ring */
600 netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped)
603 netmap_disable_ring(NMR(na, t)[ring_id], stopped);
605 NMR(na, t)[ring_id]->nkr_stopped = 0;
609 /* stop or enable all the rings of na */
611 netmap_set_all_rings(struct netmap_adapter *na, int stopped)
616 if (!nm_netmap_on(na))
620 for (i = 0; i < netmap_real_rings(na, t); i++) {
621 netmap_set_ring(na, i, t, stopped);
627 * Convenience function used in drivers. Waits for current txsync()s/rxsync()s
628 * to finish and prevents any new one from starting. Call this before turning
629 * netmap mode off, or before removing the hardware rings (e.g., on module
633 netmap_disable_all_rings(struct ifnet *ifp)
635 if (NM_NA_VALID(ifp)) {
636 netmap_set_all_rings(NA(ifp), NM_KR_STOPPED);
641 * Convenience function used in drivers. Re-enables rxsync and txsync on the
642 * adapter's rings In linux drivers, this should be placed near each
646 netmap_enable_all_rings(struct ifnet *ifp)
648 if (NM_NA_VALID(ifp)) {
649 netmap_set_all_rings(NA(ifp), 0 /* enabled */);
654 netmap_make_zombie(struct ifnet *ifp)
656 if (NM_NA_VALID(ifp)) {
657 struct netmap_adapter *na = NA(ifp);
658 netmap_set_all_rings(na, NM_KR_LOCKED);
659 na->na_flags |= NAF_ZOMBIE;
660 netmap_set_all_rings(na, 0);
665 netmap_undo_zombie(struct ifnet *ifp)
667 if (NM_NA_VALID(ifp)) {
668 struct netmap_adapter *na = NA(ifp);
669 if (na->na_flags & NAF_ZOMBIE) {
670 netmap_set_all_rings(na, NM_KR_LOCKED);
671 na->na_flags &= ~NAF_ZOMBIE;
672 netmap_set_all_rings(na, 0);
678 * generic bound_checking function
681 nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg)
684 const char *op = NULL;
693 } else if (oldv > hi) {
698 nm_prinf("%s %s to %d (was %d)", op, msg, *v, oldv);
704 * packet-dump function, user-supplied or static buffer.
705 * The destination buffer must be at least 30+4*len
708 nm_dump_buf(char *p, int len, int lim, char *dst)
710 static char _dst[8192];
712 static char hex[] ="0123456789abcdef";
713 char *o; /* output position */
715 #define P_HI(x) hex[((x) & 0xf0)>>4]
716 #define P_LO(x) hex[((x) & 0xf)]
717 #define P_C(x) ((x) >= 0x20 && (x) <= 0x7e ? (x) : '.')
720 if (lim <= 0 || lim > len)
723 sprintf(o, "buf 0x%p len %d lim %d\n", p, len, lim);
725 /* hexdump routine */
726 for (i = 0; i < lim; ) {
727 sprintf(o, "%5d: ", i);
731 for (j=0; j < 16 && i < lim; i++, j++) {
733 o[j*3+1] = P_LO(p[i]);
736 for (j=0; j < 16 && i < lim; i++, j++)
737 o[j + 48] = P_C(p[i]);
750 * Fetch configuration from the device, to cope with dynamic
751 * reconfigurations after loading the module.
753 /* call with NMG_LOCK held */
755 netmap_update_config(struct netmap_adapter *na)
757 struct nm_config_info info;
759 bzero(&info, sizeof(info));
760 if (na->nm_config == NULL ||
761 na->nm_config(na, &info)) {
762 /* take whatever we had at init time */
763 info.num_tx_rings = na->num_tx_rings;
764 info.num_tx_descs = na->num_tx_desc;
765 info.num_rx_rings = na->num_rx_rings;
766 info.num_rx_descs = na->num_rx_desc;
767 info.rx_buf_maxsize = na->rx_buf_maxsize;
770 if (na->num_tx_rings == info.num_tx_rings &&
771 na->num_tx_desc == info.num_tx_descs &&
772 na->num_rx_rings == info.num_rx_rings &&
773 na->num_rx_desc == info.num_rx_descs &&
774 na->rx_buf_maxsize == info.rx_buf_maxsize)
775 return 0; /* nothing changed */
776 if (na->active_fds == 0) {
777 na->num_tx_rings = info.num_tx_rings;
778 na->num_tx_desc = info.num_tx_descs;
779 na->num_rx_rings = info.num_rx_rings;
780 na->num_rx_desc = info.num_rx_descs;
781 na->rx_buf_maxsize = info.rx_buf_maxsize;
783 nm_prinf("configuration changed for %s: txring %d x %d, "
784 "rxring %d x %d, rxbufsz %d",
785 na->name, na->num_tx_rings, na->num_tx_desc,
786 na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize);
789 nm_prerr("WARNING: configuration changed for %s while active: "
790 "txring %d x %d, rxring %d x %d, rxbufsz %d",
791 na->name, info.num_tx_rings, info.num_tx_descs,
792 info.num_rx_rings, info.num_rx_descs,
793 info.rx_buf_maxsize);
797 /* nm_sync callbacks for the host rings */
798 static int netmap_txsync_to_host(struct netmap_kring *kring, int flags);
799 static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags);
801 /* create the krings array and initialize the fields common to all adapters.
802 * The array layout is this:
805 * na->tx_rings ----->| | \
806 * | | } na->num_tx_ring
810 * na->rx_rings ----> +----------+
812 * | | } na->num_rx_rings
817 * na->tailroom ----->| | \
818 * | | } tailroom bytes
822 * Note: for compatibility, host krings are created even when not needed.
823 * The tailroom space is currently used by vale ports for allocating leases.
825 /* call with NMG_LOCK held */
827 netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
830 struct netmap_kring *kring;
835 if (na->tx_rings != NULL) {
836 if (netmap_debug & NM_DEBUG_ON)
837 nm_prerr("warning: krings were already created");
841 /* account for the (possibly fake) host rings */
842 n[NR_TX] = netmap_all_rings(na, NR_TX);
843 n[NR_RX] = netmap_all_rings(na, NR_RX);
845 len = (n[NR_TX] + n[NR_RX]) *
846 (sizeof(struct netmap_kring) + sizeof(struct netmap_kring *))
849 na->tx_rings = nm_os_malloc((size_t)len);
850 if (na->tx_rings == NULL) {
851 nm_prerr("Cannot allocate krings");
854 na->rx_rings = na->tx_rings + n[NR_TX];
855 na->tailroom = na->rx_rings + n[NR_RX];
857 /* link the krings in the krings array */
858 kring = (struct netmap_kring *)((char *)na->tailroom + tailroom);
859 for (i = 0; i < n[NR_TX] + n[NR_RX]; i++) {
860 na->tx_rings[i] = kring;
865 * All fields in krings are 0 except the one initialized below.
866 * but better be explicit on important kring fields.
869 ndesc = nma_get_ndesc(na, t);
870 for (i = 0; i < n[t]; i++) {
871 kring = NMR(na, t)[i];
872 bzero(kring, sizeof(*kring));
873 kring->notify_na = na;
876 kring->nkr_num_slots = ndesc;
877 kring->nr_mode = NKR_NETMAP_OFF;
878 kring->nr_pending_mode = NKR_NETMAP_OFF;
879 if (i < nma_get_nrings(na, t)) {
880 kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync);
882 if (!(na->na_flags & NAF_HOST_RINGS))
883 kring->nr_kflags |= NKR_FAKERING;
884 kring->nm_sync = (t == NR_TX ?
885 netmap_txsync_to_host:
886 netmap_rxsync_from_host);
888 kring->nm_notify = na->nm_notify;
889 kring->rhead = kring->rcur = kring->nr_hwcur = 0;
891 * IMPORTANT: Always keep one slot empty.
893 kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0);
894 snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name,
896 nm_prdis("ktx %s h %d c %d t %d",
897 kring->name, kring->rhead, kring->rcur, kring->rtail);
898 err = nm_os_selinfo_init(&kring->si, kring->name);
900 netmap_krings_delete(na);
903 mtx_init(&kring->q_lock, (t == NR_TX ? "nm_txq_lock" : "nm_rxq_lock"), NULL, MTX_DEF);
904 kring->na = na; /* setting this field marks the mutex as initialized */
906 err = nm_os_selinfo_init(&na->si[t], na->name);
908 netmap_krings_delete(na);
917 /* undo the actions performed by netmap_krings_create */
918 /* call with NMG_LOCK held */
920 netmap_krings_delete(struct netmap_adapter *na)
922 struct netmap_kring **kring = na->tx_rings;
925 if (na->tx_rings == NULL) {
926 if (netmap_debug & NM_DEBUG_ON)
927 nm_prerr("warning: krings were already deleted");
932 nm_os_selinfo_uninit(&na->si[t]);
934 /* we rely on the krings layout described above */
935 for ( ; kring != na->tailroom; kring++) {
936 if ((*kring)->na != NULL)
937 mtx_destroy(&(*kring)->q_lock);
938 nm_os_selinfo_uninit(&(*kring)->si);
940 nm_os_free(na->tx_rings);
941 na->tx_rings = na->rx_rings = na->tailroom = NULL;
946 * Destructor for NIC ports. They also have an mbuf queue
947 * on the rings connected to the host so we need to purge
950 /* call with NMG_LOCK held */
952 netmap_hw_krings_delete(struct netmap_adapter *na)
954 u_int lim = netmap_real_rings(na, NR_RX), i;
956 for (i = nma_get_nrings(na, NR_RX); i < lim; i++) {
957 struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue;
958 nm_prdis("destroy sw mbq with len %d", mbq_len(q));
962 netmap_krings_delete(na);
966 netmap_mem_drop(struct netmap_adapter *na)
968 int last = netmap_mem_deref(na->nm_mem, na);
969 /* if the native allocator had been overrided on regif,
970 * restore it now and drop the temporary one
972 if (last && na->nm_mem_prev) {
973 netmap_mem_put(na->nm_mem);
974 na->nm_mem = na->nm_mem_prev;
975 na->nm_mem_prev = NULL;
980 * Undo everything that was done in netmap_do_regif(). In particular,
981 * call nm_register(ifp,0) to stop netmap mode on the interface and
982 * revert to normal operation.
984 /* call with NMG_LOCK held */
985 static void netmap_unset_ringid(struct netmap_priv_d *);
986 static void netmap_krings_put(struct netmap_priv_d *);
988 netmap_do_unregif(struct netmap_priv_d *priv)
990 struct netmap_adapter *na = priv->np_na;
994 /* unset nr_pending_mode and possibly release exclusive mode */
995 netmap_krings_put(priv);
998 /* XXX check whether we have to do something with monitor
999 * when rings change nr_mode. */
1000 if (na->active_fds <= 0) {
1001 /* walk through all the rings and tell any monitor
1002 * that the port is going to exit netmap mode
1004 netmap_monitor_stop(na);
1008 if (na->active_fds <= 0 || nm_kring_pending(priv)) {
1009 na->nm_register(na, 0);
1012 /* delete rings and buffers that are no longer needed */
1013 netmap_mem_rings_delete(na);
1015 if (na->active_fds <= 0) { /* last instance */
1017 * (TO CHECK) We enter here
1018 * when the last reference to this file descriptor goes
1019 * away. This means we cannot have any pending poll()
1020 * or interrupt routine operating on the structure.
1021 * XXX The file may be closed in a thread while
1022 * another thread is using it.
1023 * Linux keeps the file opened until the last reference
1024 * by any outstanding ioctl/poll or mmap is gone.
1025 * FreeBSD does not track mmap()s (but we do) and
1026 * wakes up any sleeping poll(). Need to check what
1027 * happens if the close() occurs while a concurrent
1028 * syscall is running.
1030 if (netmap_debug & NM_DEBUG_ON)
1031 nm_prinf("deleting last instance for %s", na->name);
1033 if (nm_netmap_on(na)) {
1034 nm_prerr("BUG: netmap on while going to delete the krings");
1037 na->nm_krings_delete(na);
1039 /* restore the default number of host tx and rx rings */
1040 if (na->na_flags & NAF_HOST_RINGS) {
1041 na->num_host_tx_rings = 1;
1042 na->num_host_rx_rings = 1;
1044 na->num_host_tx_rings = 0;
1045 na->num_host_rx_rings = 0;
1049 /* possibily decrement counter of tx_si/rx_si users */
1050 netmap_unset_ringid(priv);
1051 /* delete the nifp */
1052 netmap_mem_if_delete(na, priv->np_nifp);
1053 /* drop the allocator */
1054 netmap_mem_drop(na);
1055 /* mark the priv as unregistered */
1057 priv->np_nifp = NULL;
1060 struct netmap_priv_d*
1061 netmap_priv_new(void)
1063 struct netmap_priv_d *priv;
1065 priv = nm_os_malloc(sizeof(struct netmap_priv_d));
1074 * Destructor of the netmap_priv_d, called when the fd is closed
1075 * Action: undo all the things done by NIOCREGIF,
1076 * On FreeBSD we need to track whether there are active mmap()s,
1077 * and we use np_active_mmaps for that. On linux, the field is always 0.
1078 * Return: 1 if we can free priv, 0 otherwise.
1081 /* call with NMG_LOCK held */
1083 netmap_priv_delete(struct netmap_priv_d *priv)
1085 struct netmap_adapter *na = priv->np_na;
1087 /* number of active references to this fd */
1088 if (--priv->np_refs > 0) {
1093 netmap_do_unregif(priv);
1095 netmap_unget_na(na, priv->np_ifp);
1096 bzero(priv, sizeof(*priv)); /* for safety */
1101 /* call with NMG_LOCK *not* held */
1103 netmap_dtor(void *data)
1105 struct netmap_priv_d *priv = data;
1108 netmap_priv_delete(priv);
1114 * Handlers for synchronization of the rings from/to the host stack.
1115 * These are associated to a network interface and are just another
1116 * ring pair managed by userspace.
1118 * Netmap also supports transparent forwarding (NS_FORWARD and NR_FORWARD
1121 * - Before releasing buffers on hw RX rings, the application can mark
1122 * them with the NS_FORWARD flag. During the next RXSYNC or poll(), they
1123 * will be forwarded to the host stack, similarly to what happened if
1124 * the application moved them to the host TX ring.
1126 * - Before releasing buffers on the host RX ring, the application can
1127 * mark them with the NS_FORWARD flag. During the next RXSYNC or poll(),
1128 * they will be forwarded to the hw TX rings, saving the application
1129 * from doing the same task in user-space.
1131 * Transparent fowarding can be enabled per-ring, by setting the NR_FORWARD
1132 * flag, or globally with the netmap_fwd sysctl.
1134 * The transfer NIC --> host is relatively easy, just encapsulate
1135 * into mbufs and we are done. The host --> NIC side is slightly
1136 * harder because there might not be room in the tx ring so it
1137 * might take a while before releasing the buffer.
1142 * Pass a whole queue of mbufs to the host stack as coming from 'dst'
1143 * We do not need to lock because the queue is private.
1144 * After this call the queue is empty.
1147 netmap_send_up(struct ifnet *dst, struct mbq *q)
1150 struct mbuf *head = NULL, *prev = NULL;
1152 /* Send packets up, outside the lock; head/prev machinery
1153 * is only useful for Windows. */
1154 while ((m = mbq_dequeue(q)) != NULL) {
1155 if (netmap_debug & NM_DEBUG_HOST)
1156 nm_prinf("sending up pkt %p size %d", m, MBUF_LEN(m));
1157 prev = nm_os_send_up(dst, m, prev);
1162 nm_os_send_up(dst, NULL, head);
1168 * Scan the buffers from hwcur to ring->head, and put a copy of those
1169 * marked NS_FORWARD (or all of them if forced) into a queue of mbufs.
1170 * Drop remaining packets in the unlikely event
1171 * of an mbuf shortage.
1174 netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force)
1176 u_int const lim = kring->nkr_num_slots - 1;
1177 u_int const head = kring->rhead;
1179 struct netmap_adapter *na = kring->na;
1181 for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) {
1183 struct netmap_slot *slot = &kring->ring->slot[n];
1185 if ((slot->flags & NS_FORWARD) == 0 && !force)
1187 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) {
1188 nm_prlim(5, "bad pkt at %d len %d", n, slot->len);
1191 slot->flags &= ~NS_FORWARD; // XXX needed ?
1192 /* XXX TODO: adapt to the case of a multisegment packet */
1193 m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL);
1202 _nm_may_forward(struct netmap_kring *kring)
1204 return ((netmap_fwd || kring->ring->flags & NR_FORWARD) &&
1205 kring->na->na_flags & NAF_HOST_RINGS &&
1206 kring->tx == NR_RX);
1210 nm_may_forward_up(struct netmap_kring *kring)
1212 return _nm_may_forward(kring) &&
1213 kring->ring_id != kring->na->num_rx_rings;
1217 nm_may_forward_down(struct netmap_kring *kring, int sync_flags)
1219 return _nm_may_forward(kring) &&
1220 (sync_flags & NAF_CAN_FORWARD_DOWN) &&
1221 kring->ring_id == kring->na->num_rx_rings;
1225 * Send to the NIC rings packets marked NS_FORWARD between
1226 * kring->nr_hwcur and kring->rhead.
1227 * Called under kring->rx_queue.lock on the sw rx ring.
1229 * It can only be called if the user opened all the TX hw rings,
1230 * see NAF_CAN_FORWARD_DOWN flag.
1231 * We can touch the TX netmap rings (slots, head and cur) since
1232 * we are in poll/ioctl system call context, and the application
1233 * is not supposed to touch the ring (using a different thread)
1234 * during the execution of the system call.
1237 netmap_sw_to_nic(struct netmap_adapter *na)
1239 struct netmap_kring *kring = na->rx_rings[na->num_rx_rings];
1240 struct netmap_slot *rxslot = kring->ring->slot;
1241 u_int i, rxcur = kring->nr_hwcur;
1242 u_int const head = kring->rhead;
1243 u_int const src_lim = kring->nkr_num_slots - 1;
1246 /* scan rings to find space, then fill as much as possible */
1247 for (i = 0; i < na->num_tx_rings; i++) {
1248 struct netmap_kring *kdst = na->tx_rings[i];
1249 struct netmap_ring *rdst = kdst->ring;
1250 u_int const dst_lim = kdst->nkr_num_slots - 1;
1252 /* XXX do we trust ring or kring->rcur,rtail ? */
1253 for (; rxcur != head && !nm_ring_empty(rdst);
1254 rxcur = nm_next(rxcur, src_lim) ) {
1255 struct netmap_slot *src, *dst, tmp;
1256 u_int dst_head = rdst->head;
1258 src = &rxslot[rxcur];
1259 if ((src->flags & NS_FORWARD) == 0 && !netmap_fwd)
1264 dst = &rdst->slot[dst_head];
1268 src->buf_idx = dst->buf_idx;
1269 src->flags = NS_BUF_CHANGED;
1271 dst->buf_idx = tmp.buf_idx;
1273 dst->flags = NS_BUF_CHANGED;
1275 rdst->head = rdst->cur = nm_next(dst_head, dst_lim);
1277 /* if (sent) XXX txsync ? it would be just an optimization */
1284 * netmap_txsync_to_host() passes packets up. We are called from a
1285 * system call in user process context, and the only contention
1286 * can be among multiple user threads erroneously calling
1287 * this routine concurrently.
1290 netmap_txsync_to_host(struct netmap_kring *kring, int flags)
1292 struct netmap_adapter *na = kring->na;
1293 u_int const lim = kring->nkr_num_slots - 1;
1294 u_int const head = kring->rhead;
1297 /* Take packets from hwcur to head and pass them up.
1298 * Force hwcur = head since netmap_grab_packets() stops at head
1301 netmap_grab_packets(kring, &q, 1 /* force */);
1302 nm_prdis("have %d pkts in queue", mbq_len(&q));
1303 kring->nr_hwcur = head;
1304 kring->nr_hwtail = head + lim;
1305 if (kring->nr_hwtail > lim)
1306 kring->nr_hwtail -= lim + 1;
1308 netmap_send_up(na->ifp, &q);
1314 * rxsync backend for packets coming from the host stack.
1315 * They have been put in kring->rx_queue by netmap_transmit().
1316 * We protect access to the kring using kring->rx_queue.lock
1318 * also moves to the nic hw rings any packet the user has marked
1319 * for transparent-mode forwarding, then sets the NR_FORWARD
1320 * flag in the kring to let the caller push them out
1323 netmap_rxsync_from_host(struct netmap_kring *kring, int flags)
1325 struct netmap_adapter *na = kring->na;
1326 struct netmap_ring *ring = kring->ring;
1328 u_int const lim = kring->nkr_num_slots - 1;
1329 u_int const head = kring->rhead;
1331 struct mbq *q = &kring->rx_queue, fq;
1333 mbq_init(&fq); /* fq holds packets to be freed */
1337 /* First part: import newly received packets */
1339 if (n) { /* grab packets from the queue */
1343 nm_i = kring->nr_hwtail;
1344 stop_i = nm_prev(kring->nr_hwcur, lim);
1345 while ( nm_i != stop_i && (m = mbq_dequeue(q)) != NULL ) {
1346 int len = MBUF_LEN(m);
1347 struct netmap_slot *slot = &ring->slot[nm_i];
1349 m_copydata(m, 0, len, NMB(na, slot));
1350 nm_prdis("nm %d len %d", nm_i, len);
1351 if (netmap_debug & NM_DEBUG_HOST)
1352 nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
1356 nm_i = nm_next(nm_i, lim);
1357 mbq_enqueue(&fq, m);
1359 kring->nr_hwtail = nm_i;
1363 * Second part: skip past packets that userspace has released.
1365 nm_i = kring->nr_hwcur;
1366 if (nm_i != head) { /* something was released */
1367 if (nm_may_forward_down(kring, flags)) {
1368 ret = netmap_sw_to_nic(na);
1370 kring->nr_kflags |= NR_FORWARD;
1374 kring->nr_hwcur = head;
1386 /* Get a netmap adapter for the port.
1388 * If it is possible to satisfy the request, return 0
1389 * with *na containing the netmap adapter found.
1390 * Otherwise return an error code, with *na containing NULL.
1392 * When the port is attached to a bridge, we always return
1394 * Otherwise, if the port is already bound to a file descriptor,
1395 * then we unconditionally return the existing adapter into *na.
1396 * In all the other cases, we return (into *na) either native,
1397 * generic or NULL, according to the following table:
1400 * active_fds dev.netmap.admode YES NO
1401 * -------------------------------------------------------
1402 * >0 * NA(ifp) NA(ifp)
1404 * 0 NETMAP_ADMODE_BEST NATIVE GENERIC
1405 * 0 NETMAP_ADMODE_NATIVE NATIVE NULL
1406 * 0 NETMAP_ADMODE_GENERIC GENERIC GENERIC
1409 static void netmap_hw_dtor(struct netmap_adapter *); /* needed by NM_IS_NATIVE() */
1411 netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na)
1413 /* generic support */
1414 int i = netmap_admode; /* Take a snapshot. */
1415 struct netmap_adapter *prev_na;
1418 *na = NULL; /* default */
1420 /* reset in case of invalid value */
1421 if (i < NETMAP_ADMODE_BEST || i >= NETMAP_ADMODE_LAST)
1422 i = netmap_admode = NETMAP_ADMODE_BEST;
1424 if (NM_NA_VALID(ifp)) {
1426 /* If an adapter already exists, return it if
1427 * there are active file descriptors or if
1428 * netmap is not forced to use generic
1431 if (NETMAP_OWNED_BY_ANY(prev_na)
1432 || i != NETMAP_ADMODE_GENERIC
1433 || prev_na->na_flags & NAF_FORCE_NATIVE
1435 /* ugly, but we cannot allow an adapter switch
1436 * if some pipe is referring to this one
1438 || prev_na->na_next_pipe > 0
1446 /* If there isn't native support and netmap is not allowed
1447 * to use generic adapters, we cannot satisfy the request.
1449 if (!NM_IS_NATIVE(ifp) && i == NETMAP_ADMODE_NATIVE)
1452 /* Otherwise, create a generic adapter and return it,
1453 * saving the previously used netmap adapter, if any.
1455 * Note that here 'prev_na', if not NULL, MUST be a
1456 * native adapter, and CANNOT be a generic one. This is
1457 * true because generic adapters are created on demand, and
1458 * destroyed when not used anymore. Therefore, if the adapter
1459 * currently attached to an interface 'ifp' is generic, it
1461 * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))).
1462 * Consequently, if NA(ifp) is generic, we will enter one of
1463 * the branches above. This ensures that we never override
1464 * a generic adapter with another generic adapter.
1466 error = generic_netmap_attach(ifp);
1473 if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) &&
1474 (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) {
1475 (*na)->nm_mem_prev = (*na)->nm_mem;
1476 (*na)->nm_mem = netmap_mem_get(nmd);
1483 * MUST BE CALLED UNDER NMG_LOCK()
1485 * Get a refcounted reference to a netmap adapter attached
1486 * to the interface specified by req.
1487 * This is always called in the execution of an ioctl().
1489 * Return ENXIO if the interface specified by the request does
1490 * not exist, ENOTSUP if netmap is not supported by the interface,
1491 * EBUSY if the interface is already attached to a bridge,
1492 * EINVAL if parameters are invalid, ENOMEM if needed resources
1493 * could not be allocated.
1494 * If successful, hold a reference to the netmap adapter.
1496 * If the interface specified by req is a system one, also keep
1497 * a reference to it and return a valid *ifp.
1500 netmap_get_na(struct nmreq_header *hdr,
1501 struct netmap_adapter **na, struct ifnet **ifp,
1502 struct netmap_mem_d *nmd, int create)
1504 struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1506 struct netmap_adapter *ret = NULL;
1509 *na = NULL; /* default return value */
1512 if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1516 if (req->nr_mode == NR_REG_PIPE_MASTER ||
1517 req->nr_mode == NR_REG_PIPE_SLAVE) {
1518 /* Do not accept deprecated pipe modes. */
1519 nm_prerr("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax");
1525 /* if the request contain a memid, try to find the
1526 * corresponding memory region
1528 if (nmd == NULL && req->nr_mem_id) {
1529 nmd = netmap_mem_find(req->nr_mem_id);
1532 /* keep the rereference */
1536 /* We cascade through all possible types of netmap adapter.
1537 * All netmap_get_*_na() functions return an error and an na,
1538 * with the following combinations:
1541 * 0 NULL type doesn't match
1542 * !0 NULL type matches, but na creation/lookup failed
1543 * 0 !NULL type matches and na created/found
1544 * !0 !NULL impossible
1546 error = netmap_get_null_na(hdr, na, nmd, create);
1547 if (error || *na != NULL)
1550 /* try to see if this is a monitor port */
1551 error = netmap_get_monitor_na(hdr, na, nmd, create);
1552 if (error || *na != NULL)
1555 /* try to see if this is a pipe port */
1556 error = netmap_get_pipe_na(hdr, na, nmd, create);
1557 if (error || *na != NULL)
1560 /* try to see if this is a bridge port */
1561 error = netmap_get_vale_na(hdr, na, nmd, create);
1565 if (*na != NULL) /* valid match in netmap_get_bdg_na() */
1569 * This must be a hardware na, lookup the name in the system.
1570 * Note that by hardware we actually mean "it shows up in ifconfig".
1571 * This may still be a tap, a veth/epair, or even a
1572 * persistent VALE port.
1574 *ifp = ifunit_ref(hdr->nr_name);
1580 error = netmap_get_hw_na(*ifp, nmd, &ret);
1585 netmap_adapter_get(ret);
1588 * if the adapter supports the host rings and it is not alread open,
1589 * try to set the number of host rings as requested by the user
1591 if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) {
1592 if (req->nr_host_tx_rings)
1593 (*na)->num_host_tx_rings = req->nr_host_tx_rings;
1594 if (req->nr_host_rx_rings)
1595 (*na)->num_host_rx_rings = req->nr_host_rx_rings;
1597 nm_prdis("%s: host tx %d rx %u", (*na)->name, (*na)->num_host_tx_rings,
1598 (*na)->num_host_rx_rings);
1603 netmap_adapter_put(ret);
1610 netmap_mem_put(nmd);
1615 /* undo netmap_get_na() */
1617 netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp)
1622 netmap_adapter_put(na);
1626 #define NM_FAIL_ON(t) do { \
1627 if (unlikely(t)) { \
1628 nm_prlim(5, "%s: fail '" #t "' " \
1630 "rh %d rc %d rt %d " \
1633 head, cur, ring->tail, \
1634 kring->rhead, kring->rcur, kring->rtail, \
1635 kring->nr_hwcur, kring->nr_hwtail); \
1636 return kring->nkr_num_slots; \
1641 * validate parameters on entry for *_txsync()
1642 * Returns ring->cur if ok, or something >= kring->nkr_num_slots
1645 * rhead, rcur and rtail=hwtail are stored from previous round.
1646 * hwcur is the next packet to send to the ring.
1649 * hwcur <= *rhead <= head <= cur <= tail = *rtail <= hwtail
1651 * hwcur, rhead, rtail and hwtail are reliable
1654 nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1656 u_int head = ring->head; /* read only once */
1657 u_int cur = ring->cur; /* read only once */
1658 u_int n = kring->nkr_num_slots;
1660 nm_prdis(5, "%s kcur %d ktail %d head %d cur %d tail %d",
1662 kring->nr_hwcur, kring->nr_hwtail,
1663 ring->head, ring->cur, ring->tail);
1664 #if 1 /* kernel sanity checks; but we can trust the kring. */
1665 NM_FAIL_ON(kring->nr_hwcur >= n || kring->rhead >= n ||
1666 kring->rtail >= n || kring->nr_hwtail >= n);
1667 #endif /* kernel sanity checks */
1669 * user sanity checks. We only use head,
1670 * A, B, ... are possible positions for head:
1672 * 0 A rhead B rtail C n-1
1673 * 0 D rtail E rhead F n-1
1675 * B, F, D are valid. A, C, E are wrong
1677 if (kring->rtail >= kring->rhead) {
1678 /* want rhead <= head <= rtail */
1679 NM_FAIL_ON(head < kring->rhead || head > kring->rtail);
1680 /* and also head <= cur <= rtail */
1681 NM_FAIL_ON(cur < head || cur > kring->rtail);
1682 } else { /* here rtail < rhead */
1683 /* we need head outside rtail .. rhead */
1684 NM_FAIL_ON(head > kring->rtail && head < kring->rhead);
1686 /* two cases now: head <= rtail or head >= rhead */
1687 if (head <= kring->rtail) {
1688 /* want head <= cur <= rtail */
1689 NM_FAIL_ON(cur < head || cur > kring->rtail);
1690 } else { /* head >= rhead */
1691 /* cur must be outside rtail..head */
1692 NM_FAIL_ON(cur > kring->rtail && cur < head);
1695 if (ring->tail != kring->rtail) {
1696 nm_prlim(5, "%s tail overwritten was %d need %d", kring->name,
1697 ring->tail, kring->rtail);
1698 ring->tail = kring->rtail;
1700 kring->rhead = head;
1707 * validate parameters on entry for *_rxsync()
1708 * Returns ring->head if ok, kring->nkr_num_slots on error.
1710 * For a valid configuration,
1711 * hwcur <= head <= cur <= tail <= hwtail
1713 * We only consider head and cur.
1714 * hwcur and hwtail are reliable.
1718 nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1720 uint32_t const n = kring->nkr_num_slots;
1723 nm_prdis(5,"%s kc %d kt %d h %d c %d t %d",
1725 kring->nr_hwcur, kring->nr_hwtail,
1726 ring->head, ring->cur, ring->tail);
1728 * Before storing the new values, we should check they do not
1729 * move backwards. However:
1730 * - head is not an issue because the previous value is hwcur;
1731 * - cur could in principle go back, however it does not matter
1732 * because we are processing a brand new rxsync()
1734 cur = kring->rcur = ring->cur; /* read only once */
1735 head = kring->rhead = ring->head; /* read only once */
1736 #if 1 /* kernel sanity checks */
1737 NM_FAIL_ON(kring->nr_hwcur >= n || kring->nr_hwtail >= n);
1738 #endif /* kernel sanity checks */
1739 /* user sanity checks */
1740 if (kring->nr_hwtail >= kring->nr_hwcur) {
1741 /* want hwcur <= rhead <= hwtail */
1742 NM_FAIL_ON(head < kring->nr_hwcur || head > kring->nr_hwtail);
1743 /* and also rhead <= rcur <= hwtail */
1744 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1746 /* we need rhead outside hwtail..hwcur */
1747 NM_FAIL_ON(head < kring->nr_hwcur && head > kring->nr_hwtail);
1748 /* two cases now: head <= hwtail or head >= hwcur */
1749 if (head <= kring->nr_hwtail) {
1750 /* want head <= cur <= hwtail */
1751 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1753 /* cur must be outside hwtail..head */
1754 NM_FAIL_ON(cur < head && cur > kring->nr_hwtail);
1757 if (ring->tail != kring->rtail) {
1758 nm_prlim(5, "%s tail overwritten was %d need %d",
1760 ring->tail, kring->rtail);
1761 ring->tail = kring->rtail;
1768 * Error routine called when txsync/rxsync detects an error.
1769 * Can't do much more than resetting head = cur = hwcur, tail = hwtail
1770 * Return 1 on reinit.
1772 * This routine is only called by the upper half of the kernel.
1773 * It only reads hwcur (which is changed only by the upper half, too)
1774 * and hwtail (which may be changed by the lower half, but only on
1775 * a tx ring and only to increase it, so any error will be recovered
1776 * on the next call). For the above, we don't strictly need to call
1780 netmap_ring_reinit(struct netmap_kring *kring)
1782 struct netmap_ring *ring = kring->ring;
1783 u_int i, lim = kring->nkr_num_slots - 1;
1786 // XXX KASSERT nm_kr_tryget
1787 nm_prlim(10, "called for %s", kring->name);
1788 // XXX probably wrong to trust userspace
1789 kring->rhead = ring->head;
1790 kring->rcur = ring->cur;
1791 kring->rtail = ring->tail;
1793 if (ring->cur > lim)
1795 if (ring->head > lim)
1797 if (ring->tail > lim)
1799 for (i = 0; i <= lim; i++) {
1800 u_int idx = ring->slot[i].buf_idx;
1801 u_int len = ring->slot[i].len;
1802 if (idx < 2 || idx >= kring->na->na_lut.objtotal) {
1803 nm_prlim(5, "bad index at slot %d idx %d len %d ", i, idx, len);
1804 ring->slot[i].buf_idx = 0;
1805 ring->slot[i].len = 0;
1806 } else if (len > NETMAP_BUF_SIZE(kring->na)) {
1807 ring->slot[i].len = 0;
1808 nm_prlim(5, "bad len at slot %d idx %d len %d", i, idx, len);
1812 nm_prlim(10, "total %d errors", errors);
1813 nm_prlim(10, "%s reinit, cur %d -> %d tail %d -> %d",
1815 ring->cur, kring->nr_hwcur,
1816 ring->tail, kring->nr_hwtail);
1817 ring->head = kring->rhead = kring->nr_hwcur;
1818 ring->cur = kring->rcur = kring->nr_hwcur;
1819 ring->tail = kring->rtail = kring->nr_hwtail;
1821 return (errors ? 1 : 0);
1824 /* interpret the ringid and flags fields of an nmreq, by translating them
1825 * into a pair of intervals of ring indices:
1827 * [priv->np_txqfirst, priv->np_txqlast) and
1828 * [priv->np_rxqfirst, priv->np_rxqlast)
1832 netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode,
1833 uint16_t nr_ringid, uint64_t nr_flags)
1835 struct netmap_adapter *na = priv->np_na;
1836 int excluded_direction[] = { NR_TX_RINGS_ONLY, NR_RX_RINGS_ONLY };
1841 if (nr_flags & excluded_direction[t]) {
1842 priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1846 case NR_REG_ALL_NIC:
1848 priv->np_qfirst[t] = 0;
1849 priv->np_qlast[t] = nma_get_nrings(na, t);
1850 nm_prdis("ALL/PIPE: %s %d %d", nm_txrx2str(t),
1851 priv->np_qfirst[t], priv->np_qlast[t]);
1855 if (!(na->na_flags & NAF_HOST_RINGS)) {
1856 nm_prerr("host rings not supported");
1859 priv->np_qfirst[t] = (nr_mode == NR_REG_SW ?
1860 nma_get_nrings(na, t) : 0);
1861 priv->np_qlast[t] = netmap_all_rings(na, t);
1862 nm_prdis("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW",
1864 priv->np_qfirst[t], priv->np_qlast[t]);
1866 case NR_REG_ONE_NIC:
1867 if (nr_ringid >= na->num_tx_rings &&
1868 nr_ringid >= na->num_rx_rings) {
1869 nm_prerr("invalid ring id %d", nr_ringid);
1872 /* if not enough rings, use the first one */
1874 if (j >= nma_get_nrings(na, t))
1876 priv->np_qfirst[t] = j;
1877 priv->np_qlast[t] = j + 1;
1878 nm_prdis("ONE_NIC: %s %d %d", nm_txrx2str(t),
1879 priv->np_qfirst[t], priv->np_qlast[t]);
1882 if (!(na->na_flags & NAF_HOST_RINGS)) {
1883 nm_prerr("host rings not supported");
1886 if (nr_ringid >= na->num_host_tx_rings &&
1887 nr_ringid >= na->num_host_rx_rings) {
1888 nm_prerr("invalid ring id %d", nr_ringid);
1891 /* if not enough rings, use the first one */
1893 if (j >= nma_get_host_nrings(na, t))
1895 priv->np_qfirst[t] = nma_get_nrings(na, t) + j;
1896 priv->np_qlast[t] = nma_get_nrings(na, t) + j + 1;
1897 nm_prdis("ONE_SW: %s %d %d", nm_txrx2str(t),
1898 priv->np_qfirst[t], priv->np_qlast[t]);
1901 nm_prerr("invalid regif type %d", nr_mode);
1905 priv->np_flags = nr_flags;
1907 /* Allow transparent forwarding mode in the host --> nic
1908 * direction only if all the TX hw rings have been opened. */
1909 if (priv->np_qfirst[NR_TX] == 0 &&
1910 priv->np_qlast[NR_TX] >= na->num_tx_rings) {
1911 priv->np_sync_flags |= NAF_CAN_FORWARD_DOWN;
1914 if (netmap_verbose) {
1915 nm_prinf("%s: tx [%d,%d) rx [%d,%d) id %d",
1917 priv->np_qfirst[NR_TX],
1918 priv->np_qlast[NR_TX],
1919 priv->np_qfirst[NR_RX],
1920 priv->np_qlast[NR_RX],
1928 * Set the ring ID. For devices with a single queue, a request
1929 * for all rings is the same as a single ring.
1932 netmap_set_ringid(struct netmap_priv_d *priv, uint32_t nr_mode,
1933 uint16_t nr_ringid, uint64_t nr_flags)
1935 struct netmap_adapter *na = priv->np_na;
1939 error = netmap_interp_ringid(priv, nr_mode, nr_ringid, nr_flags);
1944 priv->np_txpoll = (nr_flags & NR_NO_TX_POLL) ? 0 : 1;
1946 /* optimization: count the users registered for more than
1947 * one ring, which are the ones sleeping on the global queue.
1948 * The default netmap_notify() callback will then
1949 * avoid signaling the global queue if nobody is using it
1952 if (nm_si_user(priv, t))
1959 netmap_unset_ringid(struct netmap_priv_d *priv)
1961 struct netmap_adapter *na = priv->np_na;
1965 if (nm_si_user(priv, t))
1967 priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1970 priv->np_txpoll = 0;
1971 priv->np_kloop_state = 0;
1975 /* Set the nr_pending_mode for the requested rings.
1976 * If requested, also try to get exclusive access to the rings, provided
1977 * the rings we want to bind are not exclusively owned by a previous bind.
1980 netmap_krings_get(struct netmap_priv_d *priv)
1982 struct netmap_adapter *na = priv->np_na;
1984 struct netmap_kring *kring;
1985 int excl = (priv->np_flags & NR_EXCLUSIVE);
1988 if (netmap_debug & NM_DEBUG_ON)
1989 nm_prinf("%s: grabbing tx [%d, %d) rx [%d, %d)",
1991 priv->np_qfirst[NR_TX],
1992 priv->np_qlast[NR_TX],
1993 priv->np_qfirst[NR_RX],
1994 priv->np_qlast[NR_RX]);
1996 /* first round: check that all the requested rings
1997 * are neither alread exclusively owned, nor we
1998 * want exclusive ownership when they are already in use
2001 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
2002 kring = NMR(na, t)[i];
2003 if ((kring->nr_kflags & NKR_EXCLUSIVE) ||
2004 (kring->users && excl))
2006 nm_prdis("ring %s busy", kring->name);
2012 /* second round: increment usage count (possibly marking them
2013 * as exclusive) and set the nr_pending_mode
2016 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
2017 kring = NMR(na, t)[i];
2020 kring->nr_kflags |= NKR_EXCLUSIVE;
2021 kring->nr_pending_mode = NKR_NETMAP_ON;
2029 /* Undo netmap_krings_get(). This is done by clearing the exclusive mode
2030 * if was asked on regif, and unset the nr_pending_mode if we are the
2031 * last users of the involved rings. */
2033 netmap_krings_put(struct netmap_priv_d *priv)
2035 struct netmap_adapter *na = priv->np_na;
2037 struct netmap_kring *kring;
2038 int excl = (priv->np_flags & NR_EXCLUSIVE);
2041 nm_prdis("%s: releasing tx [%d, %d) rx [%d, %d)",
2043 priv->np_qfirst[NR_TX],
2044 priv->np_qlast[NR_TX],
2045 priv->np_qfirst[NR_RX],
2046 priv->np_qlast[MR_RX]);
2049 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
2050 kring = NMR(na, t)[i];
2052 kring->nr_kflags &= ~NKR_EXCLUSIVE;
2054 if (kring->users == 0)
2055 kring->nr_pending_mode = NKR_NETMAP_OFF;
2061 nm_priv_rx_enabled(struct netmap_priv_d *priv)
2063 return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]);
2066 /* Validate the CSB entries for both directions (atok and ktoa).
2067 * To be called under NMG_LOCK(). */
2069 netmap_csb_validate(struct netmap_priv_d *priv, struct nmreq_opt_csb *csbo)
2071 struct nm_csb_atok *csb_atok_base =
2072 (struct nm_csb_atok *)(uintptr_t)csbo->csb_atok;
2073 struct nm_csb_ktoa *csb_ktoa_base =
2074 (struct nm_csb_ktoa *)(uintptr_t)csbo->csb_ktoa;
2076 int num_rings[NR_TXRX], tot_rings;
2077 size_t entry_size[2];
2081 if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) {
2082 nm_prerr("Cannot update CSB while kloop is running");
2088 num_rings[t] = priv->np_qlast[t] - priv->np_qfirst[t];
2089 tot_rings += num_rings[t];
2094 if (!(priv->np_flags & NR_EXCLUSIVE)) {
2095 nm_prerr("CSB mode requires NR_EXCLUSIVE");
2099 entry_size[0] = sizeof(*csb_atok_base);
2100 entry_size[1] = sizeof(*csb_ktoa_base);
2101 csb_start[0] = (void *)csb_atok_base;
2102 csb_start[1] = (void *)csb_ktoa_base;
2104 for (i = 0; i < 2; i++) {
2105 /* On Linux we could use access_ok() to simplify
2106 * the validation. However, the advantage of
2107 * this approach is that it works also on
2109 size_t csb_size = tot_rings * entry_size[i];
2113 if ((uintptr_t)csb_start[i] & (entry_size[i]-1)) {
2114 nm_prerr("Unaligned CSB address");
2118 tmp = nm_os_malloc(csb_size);
2122 /* Application --> kernel direction. */
2123 err = copyin(csb_start[i], tmp, csb_size);
2125 /* Kernel --> application direction. */
2126 memset(tmp, 0, csb_size);
2127 err = copyout(tmp, csb_start[i], csb_size);
2131 nm_prerr("Invalid CSB address");
2136 priv->np_csb_atok_base = csb_atok_base;
2137 priv->np_csb_ktoa_base = csb_ktoa_base;
2139 /* Initialize the CSB. */
2141 for (i = 0; i < num_rings[t]; i++) {
2142 struct netmap_kring *kring =
2143 NMR(priv->np_na, t)[i + priv->np_qfirst[t]];
2144 struct nm_csb_atok *csb_atok = csb_atok_base + i;
2145 struct nm_csb_ktoa *csb_ktoa = csb_ktoa_base + i;
2148 csb_atok += num_rings[NR_TX];
2149 csb_ktoa += num_rings[NR_TX];
2152 CSB_WRITE(csb_atok, head, kring->rhead);
2153 CSB_WRITE(csb_atok, cur, kring->rcur);
2154 CSB_WRITE(csb_atok, appl_need_kick, 1);
2155 CSB_WRITE(csb_atok, sync_flags, 1);
2156 CSB_WRITE(csb_ktoa, hwcur, kring->nr_hwcur);
2157 CSB_WRITE(csb_ktoa, hwtail, kring->nr_hwtail);
2158 CSB_WRITE(csb_ktoa, kern_need_kick, 1);
2160 nm_prinf("csb_init for kring %s: head %u, cur %u, "
2161 "hwcur %u, hwtail %u", kring->name,
2162 kring->rhead, kring->rcur, kring->nr_hwcur,
2170 /* Ensure that the netmap adapter can support the given MTU.
2171 * @return EINVAL if the na cannot be set to mtu, 0 otherwise.
2174 netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu) {
2175 unsigned nbs = NETMAP_BUF_SIZE(na);
2177 if (mtu <= na->rx_buf_maxsize) {
2178 /* The MTU fits a single NIC slot. We only
2179 * Need to check that netmap buffers are
2180 * large enough to hold an MTU. NS_MOREFRAG
2181 * cannot be used in this case. */
2183 nm_prerr("error: netmap buf size (%u) "
2184 "< device MTU (%u)", nbs, mtu);
2188 /* More NIC slots may be needed to receive
2189 * or transmit a single packet. Check that
2190 * the adapter supports NS_MOREFRAG and that
2191 * netmap buffers are large enough to hold
2192 * the maximum per-slot size. */
2193 if (!(na->na_flags & NAF_MOREFRAG)) {
2194 nm_prerr("error: large MTU (%d) needed "
2195 "but %s does not support "
2199 } else if (nbs < na->rx_buf_maxsize) {
2200 nm_prerr("error: using NS_MOREFRAG on "
2201 "%s requires netmap buf size "
2202 ">= %u", na->ifp->if_xname,
2203 na->rx_buf_maxsize);
2206 nm_prinf("info: netmap application on "
2207 "%s needs to support "
2209 "(MTU=%u,netmap_buf_size=%u)",
2210 na->ifp->if_xname, mtu, nbs);
2218 * possibly move the interface to netmap-mode.
2219 * If success it returns a pointer to netmap_if, otherwise NULL.
2220 * This must be called with NMG_LOCK held.
2222 * The following na callbacks are called in the process:
2224 * na->nm_config() [by netmap_update_config]
2225 * (get current number and size of rings)
2227 * We have a generic one for linux (netmap_linux_config).
2228 * The bwrap has to override this, since it has to forward
2229 * the request to the wrapped adapter (netmap_bwrap_config).
2232 * na->nm_krings_create()
2233 * (create and init the krings array)
2235 * One of the following:
2237 * * netmap_hw_krings_create, (hw ports)
2238 * creates the standard layout for the krings
2239 * and adds the mbq (used for the host rings).
2241 * * netmap_vp_krings_create (VALE ports)
2242 * add leases and scratchpads
2244 * * netmap_pipe_krings_create (pipes)
2245 * create the krings and rings of both ends and
2248 * * netmap_monitor_krings_create (monitors)
2249 * avoid allocating the mbq
2251 * * netmap_bwrap_krings_create (bwraps)
2252 * create both the brap krings array,
2253 * the krings array of the wrapped adapter, and
2254 * (if needed) the fake array for the host adapter
2256 * na->nm_register(, 1)
2257 * (put the adapter in netmap mode)
2259 * This may be one of the following:
2261 * * netmap_hw_reg (hw ports)
2262 * checks that the ifp is still there, then calls
2263 * the hardware specific callback;
2265 * * netmap_vp_reg (VALE ports)
2266 * If the port is connected to a bridge,
2267 * set the NAF_NETMAP_ON flag under the
2268 * bridge write lock.
2270 * * netmap_pipe_reg (pipes)
2271 * inform the other pipe end that it is no
2272 * longer responsible for the lifetime of this
2275 * * netmap_monitor_reg (monitors)
2276 * intercept the sync callbacks of the monitored
2279 * * netmap_bwrap_reg (bwraps)
2280 * cross-link the bwrap and hwna rings,
2281 * forward the request to the hwna, override
2282 * the hwna notify callback (to get the frames
2283 * coming from outside go through the bridge).
2288 netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
2289 uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags)
2291 struct netmap_if *nifp = NULL;
2295 priv->np_na = na; /* store the reference */
2296 error = netmap_mem_finalize(na->nm_mem, na);
2300 if (na->active_fds == 0) {
2302 /* cache the allocator info in the na */
2303 error = netmap_mem_get_lut(na->nm_mem, &na->na_lut);
2306 nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal,
2307 na->na_lut.objsize);
2309 /* ring configuration may have changed, fetch from the card */
2310 netmap_update_config(na);
2313 /* compute the range of tx and rx rings to monitor */
2314 error = netmap_set_ringid(priv, nr_mode, nr_ringid, nr_flags);
2318 if (na->active_fds == 0) {
2320 * If this is the first registration of the adapter,
2321 * perform sanity checks and create the in-kernel view
2322 * of the netmap rings (the netmap krings).
2324 if (na->ifp && nm_priv_rx_enabled(priv)) {
2325 /* This netmap adapter is attached to an ifnet. */
2326 unsigned mtu = nm_os_ifnet_mtu(na->ifp);
2328 nm_prdis("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d",
2329 na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na));
2331 if (na->rx_buf_maxsize == 0) {
2332 nm_prerr("%s: error: rx_buf_maxsize == 0", na->name);
2337 error = netmap_buf_size_validate(na, mtu);
2343 * Depending on the adapter, this may also create
2344 * the netmap rings themselves
2346 error = na->nm_krings_create(na);
2352 /* now the krings must exist and we can check whether some
2353 * previous bind has exclusive ownership on them, and set
2356 error = netmap_krings_get(priv);
2358 goto err_del_krings;
2360 /* create all needed missing netmap rings */
2361 error = netmap_mem_rings_create(na);
2365 /* in all cases, create a new netmap if */
2366 nifp = netmap_mem_if_new(na, priv);
2372 if (nm_kring_pending(priv)) {
2373 /* Some kring is switching mode, tell the adapter to
2375 error = na->nm_register(na, 1);
2380 /* Commit the reference. */
2384 * advertise that the interface is ready by setting np_nifp.
2385 * The barrier is needed because readers (poll, *SYNC and mmap)
2386 * check for priv->np_nifp != NULL without locking
2388 mb(); /* make sure previous writes are visible to all CPUs */
2389 priv->np_nifp = nifp;
2394 netmap_mem_if_delete(na, nifp);
2396 netmap_krings_put(priv);
2397 netmap_mem_rings_delete(na);
2399 if (na->active_fds == 0)
2400 na->nm_krings_delete(na);
2402 if (na->active_fds == 0)
2403 memset(&na->na_lut, 0, sizeof(na->na_lut));
2405 netmap_mem_drop(na);
2413 * update kring and ring at the end of rxsync/txsync.
2416 nm_sync_finalize(struct netmap_kring *kring)
2419 * Update ring tail to what the kernel knows
2420 * After txsync: head/rhead/hwcur might be behind cur/rcur
2423 kring->ring->tail = kring->rtail = kring->nr_hwtail;
2425 nm_prdis(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
2426 kring->name, kring->nr_hwcur, kring->nr_hwtail,
2427 kring->rhead, kring->rcur, kring->rtail);
2430 /* set ring timestamp */
2432 ring_timestamp_set(struct netmap_ring *ring)
2434 if (netmap_no_timestamp == 0 || ring->flags & NR_TIMESTAMP) {
2435 microtime(&ring->ts);
2439 static int nmreq_copyin(struct nmreq_header *, int);
2440 static int nmreq_copyout(struct nmreq_header *, int);
2441 static int nmreq_checkoptions(struct nmreq_header *);
2444 * ioctl(2) support for the "netmap" device.
2446 * Following a list of accepted commands:
2447 * - NIOCCTRL device control API
2448 * - NIOCTXSYNC sync TX rings
2449 * - NIOCRXSYNC sync RX rings
2450 * - SIOCGIFADDR just for convenience
2451 * - NIOCGINFO deprecated (legacy API)
2452 * - NIOCREGIF deprecated (legacy API)
2454 * Return 0 on success, errno otherwise.
2457 netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
2458 struct thread *td, int nr_body_is_user)
2460 struct mbq q; /* packets from RX hw queues to host stack */
2461 struct netmap_adapter *na = NULL;
2462 struct netmap_mem_d *nmd = NULL;
2463 struct ifnet *ifp = NULL;
2465 u_int i, qfirst, qlast;
2466 struct netmap_kring **krings;
2472 struct nmreq_header *hdr = (struct nmreq_header *)data;
2474 if (hdr->nr_version < NETMAP_MIN_API ||
2475 hdr->nr_version > NETMAP_MAX_API) {
2476 nm_prerr("API mismatch: got %d need %d",
2477 hdr->nr_version, NETMAP_API);
2481 /* Make a kernel-space copy of the user-space nr_body.
2482 * For convenince, the nr_body pointer and the pointers
2483 * in the options list will be replaced with their
2484 * kernel-space counterparts. The original pointers are
2485 * saved internally and later restored by nmreq_copyout
2487 error = nmreq_copyin(hdr, nr_body_is_user);
2492 /* Sanitize hdr->nr_name. */
2493 hdr->nr_name[sizeof(hdr->nr_name) - 1] = '\0';
2495 switch (hdr->nr_reqtype) {
2496 case NETMAP_REQ_REGISTER: {
2497 struct nmreq_register *req =
2498 (struct nmreq_register *)(uintptr_t)hdr->nr_body;
2499 struct netmap_if *nifp;
2501 /* Protect access to priv from concurrent requests. */
2504 struct nmreq_option *opt;
2507 if (priv->np_nifp != NULL) { /* thread already registered */
2513 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_EXTMEM);
2515 struct nmreq_opt_extmem *e =
2516 (struct nmreq_opt_extmem *)opt;
2518 nmd = netmap_mem_ext_create(e->nro_usrptr,
2519 &e->nro_info, &error);
2520 opt->nro_status = error;
2524 #endif /* WITH_EXTMEM */
2526 if (nmd == NULL && req->nr_mem_id) {
2527 /* find the allocator and get a reference */
2528 nmd = netmap_mem_find(req->nr_mem_id);
2530 if (netmap_verbose) {
2531 nm_prerr("%s: failed to find mem_id %u",
2532 hdr->nr_name, req->nr_mem_id);
2538 /* find the interface and a reference */
2539 error = netmap_get_na(hdr, &na, &ifp, nmd,
2540 1 /* create */); /* keep reference */
2543 if (NETMAP_OWNED_BY_KERN(na)) {
2548 if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) {
2549 nm_prerr("virt_hdr_len=%d, but application does "
2550 "not accept it", na->virt_hdr_len);
2555 error = netmap_do_regif(priv, na, req->nr_mode,
2556 req->nr_ringid, req->nr_flags);
2557 if (error) { /* reg. failed, release priv and ref */
2561 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2563 struct nmreq_opt_csb *csbo =
2564 (struct nmreq_opt_csb *)opt;
2565 error = netmap_csb_validate(priv, csbo);
2566 opt->nro_status = error;
2568 netmap_do_unregif(priv);
2573 nifp = priv->np_nifp;
2575 /* return the offset of the netmap_if object */
2576 req->nr_rx_rings = na->num_rx_rings;
2577 req->nr_tx_rings = na->num_tx_rings;
2578 req->nr_rx_slots = na->num_rx_desc;
2579 req->nr_tx_slots = na->num_tx_desc;
2580 req->nr_host_tx_rings = na->num_host_tx_rings;
2581 req->nr_host_rx_rings = na->num_host_rx_rings;
2582 error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags,
2585 netmap_do_unregif(priv);
2588 if (memflags & NETMAP_MEM_PRIVATE) {
2589 *(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM;
2592 priv->np_si[t] = nm_si_user(priv, t) ?
2593 &na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si;
2596 if (req->nr_extra_bufs) {
2598 nm_prinf("requested %d extra buffers",
2599 req->nr_extra_bufs);
2600 req->nr_extra_bufs = netmap_extra_alloc(na,
2601 &nifp->ni_bufs_head, req->nr_extra_bufs);
2603 nm_prinf("got %d extra buffers", req->nr_extra_bufs);
2605 req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp);
2607 error = nmreq_checkoptions(hdr);
2609 netmap_do_unregif(priv);
2613 /* store ifp reference so that priv destructor may release it */
2617 netmap_unget_na(na, ifp);
2619 /* release the reference from netmap_mem_find() or
2620 * netmap_mem_ext_create()
2623 netmap_mem_put(nmd);
2628 case NETMAP_REQ_PORT_INFO_GET: {
2629 struct nmreq_port_info_get *req =
2630 (struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
2636 if (hdr->nr_name[0] != '\0') {
2637 /* Build a nmreq_register out of the nmreq_port_info_get,
2638 * so that we can call netmap_get_na(). */
2639 struct nmreq_register regreq;
2640 bzero(®req, sizeof(regreq));
2641 regreq.nr_mode = NR_REG_ALL_NIC;
2642 regreq.nr_tx_slots = req->nr_tx_slots;
2643 regreq.nr_rx_slots = req->nr_rx_slots;
2644 regreq.nr_tx_rings = req->nr_tx_rings;
2645 regreq.nr_rx_rings = req->nr_rx_rings;
2646 regreq.nr_host_tx_rings = req->nr_host_tx_rings;
2647 regreq.nr_host_rx_rings = req->nr_host_rx_rings;
2648 regreq.nr_mem_id = req->nr_mem_id;
2650 /* get a refcount */
2651 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2652 hdr->nr_body = (uintptr_t)®req;
2653 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2654 hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */
2655 hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2661 nmd = na->nm_mem; /* get memory allocator */
2663 nmd = netmap_mem_find(req->nr_mem_id ? req->nr_mem_id : 1);
2666 nm_prerr("%s: failed to find mem_id %u",
2668 req->nr_mem_id ? req->nr_mem_id : 1);
2674 error = netmap_mem_get_info(nmd, &req->nr_memsize, &memflags,
2678 if (na == NULL) /* only memory info */
2680 netmap_update_config(na);
2681 req->nr_rx_rings = na->num_rx_rings;
2682 req->nr_tx_rings = na->num_tx_rings;
2683 req->nr_rx_slots = na->num_rx_desc;
2684 req->nr_tx_slots = na->num_tx_desc;
2685 req->nr_host_tx_rings = na->num_host_tx_rings;
2686 req->nr_host_rx_rings = na->num_host_rx_rings;
2688 netmap_unget_na(na, ifp);
2693 case NETMAP_REQ_VALE_ATTACH: {
2694 error = netmap_vale_attach(hdr, NULL /* userspace request */);
2698 case NETMAP_REQ_VALE_DETACH: {
2699 error = netmap_vale_detach(hdr, NULL /* userspace request */);
2703 case NETMAP_REQ_VALE_LIST: {
2704 error = netmap_vale_list(hdr);
2708 case NETMAP_REQ_PORT_HDR_SET: {
2709 struct nmreq_port_hdr *req =
2710 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2711 /* Build a nmreq_register out of the nmreq_port_hdr,
2712 * so that we can call netmap_get_bdg_na(). */
2713 struct nmreq_register regreq;
2714 bzero(®req, sizeof(regreq));
2715 regreq.nr_mode = NR_REG_ALL_NIC;
2717 /* For now we only support virtio-net headers, and only for
2718 * VALE ports, but this may change in future. Valid lengths
2719 * for the virtio-net header are 0 (no header), 10 and 12. */
2720 if (req->nr_hdr_len != 0 &&
2721 req->nr_hdr_len != sizeof(struct nm_vnet_hdr) &&
2722 req->nr_hdr_len != 12) {
2724 nm_prerr("invalid hdr_len %u", req->nr_hdr_len);
2729 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2730 hdr->nr_body = (uintptr_t)®req;
2731 error = netmap_get_vale_na(hdr, &na, NULL, 0);
2732 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET;
2733 hdr->nr_body = (uintptr_t)req;
2735 struct netmap_vp_adapter *vpna =
2736 (struct netmap_vp_adapter *)na;
2737 na->virt_hdr_len = req->nr_hdr_len;
2738 if (na->virt_hdr_len) {
2739 vpna->mfs = NETMAP_BUF_SIZE(na);
2742 nm_prinf("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na);
2743 netmap_adapter_put(na);
2751 case NETMAP_REQ_PORT_HDR_GET: {
2752 /* Get vnet-header length for this netmap port */
2753 struct nmreq_port_hdr *req =
2754 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2755 /* Build a nmreq_register out of the nmreq_port_hdr,
2756 * so that we can call netmap_get_bdg_na(). */
2757 struct nmreq_register regreq;
2760 bzero(®req, sizeof(regreq));
2761 regreq.nr_mode = NR_REG_ALL_NIC;
2763 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2764 hdr->nr_body = (uintptr_t)®req;
2765 error = netmap_get_na(hdr, &na, &ifp, NULL, 0);
2766 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET;
2767 hdr->nr_body = (uintptr_t)req;
2769 req->nr_hdr_len = na->virt_hdr_len;
2771 netmap_unget_na(na, ifp);
2776 case NETMAP_REQ_VALE_NEWIF: {
2777 error = nm_vi_create(hdr);
2781 case NETMAP_REQ_VALE_DELIF: {
2782 error = nm_vi_destroy(hdr->nr_name);
2786 case NETMAP_REQ_VALE_POLLING_ENABLE:
2787 case NETMAP_REQ_VALE_POLLING_DISABLE: {
2788 error = nm_bdg_polling(hdr);
2791 #endif /* WITH_VALE */
2792 case NETMAP_REQ_POOLS_INFO_GET: {
2793 /* Get information from the memory allocator used for
2795 struct nmreq_pools_info *req =
2796 (struct nmreq_pools_info *)(uintptr_t)hdr->nr_body;
2799 /* Build a nmreq_register out of the nmreq_pools_info,
2800 * so that we can call netmap_get_na(). */
2801 struct nmreq_register regreq;
2802 bzero(®req, sizeof(regreq));
2803 regreq.nr_mem_id = req->nr_mem_id;
2804 regreq.nr_mode = NR_REG_ALL_NIC;
2806 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2807 hdr->nr_body = (uintptr_t)®req;
2808 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2809 hdr->nr_reqtype = NETMAP_REQ_POOLS_INFO_GET; /* reset type */
2810 hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2816 nmd = na->nm_mem; /* grab the memory allocator */
2822 /* Finalize the memory allocator, get the pools
2823 * information and release the allocator. */
2824 error = netmap_mem_finalize(nmd, na);
2828 error = netmap_mem_pools_info_get(req, nmd);
2829 netmap_mem_drop(na);
2831 netmap_unget_na(na, ifp);
2836 case NETMAP_REQ_CSB_ENABLE: {
2837 struct nmreq_option *opt;
2839 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2843 struct nmreq_opt_csb *csbo =
2844 (struct nmreq_opt_csb *)opt;
2846 error = netmap_csb_validate(priv, csbo);
2848 opt->nro_status = error;
2853 case NETMAP_REQ_SYNC_KLOOP_START: {
2854 error = netmap_sync_kloop(priv, hdr);
2858 case NETMAP_REQ_SYNC_KLOOP_STOP: {
2859 error = netmap_sync_kloop_stop(priv);
2868 /* Write back request body to userspace and reset the
2869 * user-space pointer. */
2870 error = nmreq_copyout(hdr, error);
2876 if (unlikely(priv->np_nifp == NULL)) {
2880 mb(); /* make sure following reads are not from cache */
2882 if (unlikely(priv->np_csb_atok_base)) {
2883 nm_prerr("Invalid sync in CSB mode");
2888 na = priv->np_na; /* we have a reference */
2891 t = (cmd == NIOCTXSYNC ? NR_TX : NR_RX);
2892 krings = NMR(na, t);
2893 qfirst = priv->np_qfirst[t];
2894 qlast = priv->np_qlast[t];
2895 sync_flags = priv->np_sync_flags;
2897 for (i = qfirst; i < qlast; i++) {
2898 struct netmap_kring *kring = krings[i];
2899 struct netmap_ring *ring = kring->ring;
2901 if (unlikely(nm_kr_tryget(kring, 1, &error))) {
2902 error = (error ? EIO : 0);
2906 if (cmd == NIOCTXSYNC) {
2907 if (netmap_debug & NM_DEBUG_TXSYNC)
2908 nm_prinf("pre txsync ring %d cur %d hwcur %d",
2911 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
2912 netmap_ring_reinit(kring);
2913 } else if (kring->nm_sync(kring, sync_flags | NAF_FORCE_RECLAIM) == 0) {
2914 nm_sync_finalize(kring);
2916 if (netmap_debug & NM_DEBUG_TXSYNC)
2917 nm_prinf("post txsync ring %d cur %d hwcur %d",
2921 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
2922 netmap_ring_reinit(kring);
2924 if (nm_may_forward_up(kring)) {
2925 /* transparent forwarding, see netmap_poll() */
2926 netmap_grab_packets(kring, &q, netmap_fwd);
2928 if (kring->nm_sync(kring, sync_flags | NAF_FORCE_READ) == 0) {
2929 nm_sync_finalize(kring);
2931 ring_timestamp_set(ring);
2937 netmap_send_up(na->ifp, &q);
2944 return netmap_ioctl_legacy(priv, cmd, data, td);
2953 nmreq_size_by_type(uint16_t nr_reqtype)
2955 switch (nr_reqtype) {
2956 case NETMAP_REQ_REGISTER:
2957 return sizeof(struct nmreq_register);
2958 case NETMAP_REQ_PORT_INFO_GET:
2959 return sizeof(struct nmreq_port_info_get);
2960 case NETMAP_REQ_VALE_ATTACH:
2961 return sizeof(struct nmreq_vale_attach);
2962 case NETMAP_REQ_VALE_DETACH:
2963 return sizeof(struct nmreq_vale_detach);
2964 case NETMAP_REQ_VALE_LIST:
2965 return sizeof(struct nmreq_vale_list);
2966 case NETMAP_REQ_PORT_HDR_SET:
2967 case NETMAP_REQ_PORT_HDR_GET:
2968 return sizeof(struct nmreq_port_hdr);
2969 case NETMAP_REQ_VALE_NEWIF:
2970 return sizeof(struct nmreq_vale_newif);
2971 case NETMAP_REQ_VALE_DELIF:
2972 case NETMAP_REQ_SYNC_KLOOP_STOP:
2973 case NETMAP_REQ_CSB_ENABLE:
2975 case NETMAP_REQ_VALE_POLLING_ENABLE:
2976 case NETMAP_REQ_VALE_POLLING_DISABLE:
2977 return sizeof(struct nmreq_vale_polling);
2978 case NETMAP_REQ_POOLS_INFO_GET:
2979 return sizeof(struct nmreq_pools_info);
2980 case NETMAP_REQ_SYNC_KLOOP_START:
2981 return sizeof(struct nmreq_sync_kloop_start);
2987 nmreq_opt_size_by_type(uint32_t nro_reqtype, uint64_t nro_size)
2989 size_t rv = sizeof(struct nmreq_option);
2990 #ifdef NETMAP_REQ_OPT_DEBUG
2991 if (nro_reqtype & NETMAP_REQ_OPT_DEBUG)
2992 return (nro_reqtype & ~NETMAP_REQ_OPT_DEBUG);
2993 #endif /* NETMAP_REQ_OPT_DEBUG */
2994 switch (nro_reqtype) {
2996 case NETMAP_REQ_OPT_EXTMEM:
2997 rv = sizeof(struct nmreq_opt_extmem);
2999 #endif /* WITH_EXTMEM */
3000 case NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS:
3004 case NETMAP_REQ_OPT_CSB:
3005 rv = sizeof(struct nmreq_opt_csb);
3007 case NETMAP_REQ_OPT_SYNC_KLOOP_MODE:
3008 rv = sizeof(struct nmreq_opt_sync_kloop_mode);
3011 /* subtract the common header */
3012 return rv - sizeof(struct nmreq_option);
3016 * nmreq_copyin: create an in-kernel version of the request.
3018 * We build the following data structure:
3020 * hdr -> +-------+ buf
3021 * | | +---------------+
3022 * +-------+ |usr body ptr |
3023 * |options|-. +---------------+
3024 * +-------+ | |usr options ptr|
3025 * |body |--------->+---------------+
3027 * | | copy of body |
3029 * | +---------------+
3031 * | +---------------+
3033 * | | +---------------+ |
3035 * | | | +---------------+ \ option table
3036 * | | | | ... | / indexed by option
3037 * | | | +---------------+ | type
3039 * | | | +---------------+/
3040 * | | | |usr next ptr 1 |
3041 * `-|----->+---------------+
3042 * | | | copy of opt 1 |
3044 * | | .-| nro_next |
3045 * | | | +---------------+
3046 * | | | |usr next ptr 2 |
3047 * | `-`>+---------------+
3048 * | | copy of opt 2 |
3051 * | | +---------------+
3055 * `----->+---------------+
3056 * | |usr next ptr n |
3057 * `>+---------------+
3063 * The options and body fields of the hdr structure are overwritten
3064 * with in-kernel valid pointers inside the buf. The original user
3065 * pointers are saved in the buf and restored on copyout.
3066 * The list of options is copied and the pointers adjusted. The
3067 * original pointers are saved before the option they belonged.
3069 * The option table has an entry for every availabe option. Entries
3070 * for options that have not been passed contain NULL.
3075 nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
3077 size_t rqsz, optsz, bufsz;
3079 char *ker = NULL, *p;
3080 struct nmreq_option **next, *src, **opt_tab;
3081 struct nmreq_option buf;
3084 if (hdr->nr_reserved) {
3086 nm_prerr("nr_reserved must be zero");
3090 if (!nr_body_is_user)
3093 hdr->nr_reserved = nr_body_is_user;
3095 /* compute the total size of the buffer */
3096 rqsz = nmreq_size_by_type(hdr->nr_reqtype);
3097 if (rqsz > NETMAP_REQ_MAXSIZE) {
3101 if ((rqsz && hdr->nr_body == (uintptr_t)NULL) ||
3102 (!rqsz && hdr->nr_body != (uintptr_t)NULL)) {
3103 /* Request body expected, but not found; or
3104 * request body found but unexpected. */
3106 nm_prerr("nr_body expected but not found, or vice versa");
3111 bufsz = 2 * sizeof(void *) + rqsz +
3112 NETMAP_REQ_OPT_MAX * sizeof(opt_tab);
3113 /* compute the size of the buf below the option table.
3114 * It must contain a copy of every received option structure.
3115 * For every option we also need to store a copy of the user
3119 for (src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; src;
3120 src = (struct nmreq_option *)(uintptr_t)buf.nro_next)
3122 error = copyin(src, &buf, sizeof(*src));
3125 optsz += sizeof(*src);
3126 optsz += nmreq_opt_size_by_type(buf.nro_reqtype, buf.nro_size);
3127 if (rqsz + optsz > NETMAP_REQ_MAXSIZE) {
3131 bufsz += sizeof(void *);
3135 ker = nm_os_malloc(bufsz);
3140 p = ker; /* write pointer into the buffer */
3142 /* make a copy of the user pointers */
3143 ptrs = (uint64_t*)p;
3144 *ptrs++ = hdr->nr_body;
3145 *ptrs++ = hdr->nr_options;
3149 error = copyin((void *)(uintptr_t)hdr->nr_body, p, rqsz);
3152 /* overwrite the user pointer with the in-kernel one */
3153 hdr->nr_body = (uintptr_t)p;
3155 /* start of the options table */
3156 opt_tab = (struct nmreq_option **)p;
3157 p += sizeof(opt_tab) * NETMAP_REQ_OPT_MAX;
3159 /* copy the options */
3160 next = (struct nmreq_option **)&hdr->nr_options;
3163 struct nmreq_option *opt;
3165 /* copy the option header */
3166 ptrs = (uint64_t *)p;
3167 opt = (struct nmreq_option *)(ptrs + 1);
3168 error = copyin(src, opt, sizeof(*src));
3171 /* make a copy of the user next pointer */
3172 *ptrs = opt->nro_next;
3173 /* overwrite the user pointer with the in-kernel one */
3176 /* initialize the option as not supported.
3177 * Recognized options will update this field.
3179 opt->nro_status = EOPNOTSUPP;
3181 /* check for invalid types */
3182 if (opt->nro_reqtype < 1) {
3184 nm_prinf("invalid option type: %u", opt->nro_reqtype);
3185 opt->nro_status = EINVAL;
3190 if (opt->nro_reqtype >= NETMAP_REQ_OPT_MAX) {
3191 /* opt->nro_status is already EOPNOTSUPP */
3196 /* if the type is valid, index the option in the table
3197 * unless it is a duplicate.
3199 if (opt_tab[opt->nro_reqtype] != NULL) {
3201 nm_prinf("duplicate option: %u", opt->nro_reqtype);
3202 opt->nro_status = EINVAL;
3203 opt_tab[opt->nro_reqtype]->nro_status = EINVAL;
3207 opt_tab[opt->nro_reqtype] = opt;
3209 p = (char *)(opt + 1);
3211 /* copy the option body */
3212 optsz = nmreq_opt_size_by_type(opt->nro_reqtype,
3215 /* the option body follows the option header */
3216 error = copyin(src + 1, p, optsz);
3223 /* move to next option */
3224 next = (struct nmreq_option **)&opt->nro_next;
3228 nmreq_copyout(hdr, error);
3232 ptrs = (uint64_t *)ker;
3233 hdr->nr_body = *ptrs++;
3234 hdr->nr_options = *ptrs++;
3235 hdr->nr_reserved = 0;
3242 nmreq_copyout(struct nmreq_header *hdr, int rerror)
3244 struct nmreq_option *src, *dst;
3245 void *ker = (void *)(uintptr_t)hdr->nr_body, *bufstart;
3250 if (!hdr->nr_reserved)
3253 /* restore the user pointers in the header */
3254 ptrs = (uint64_t *)ker - 2;
3256 hdr->nr_body = *ptrs++;
3257 src = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3258 hdr->nr_options = *ptrs;
3262 bodysz = nmreq_size_by_type(hdr->nr_reqtype);
3263 error = copyout(ker, (void *)(uintptr_t)hdr->nr_body, bodysz);
3270 /* copy the options */
3271 dst = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3276 /* restore the user pointer */
3277 next = src->nro_next;
3278 ptrs = (uint64_t *)src - 1;
3279 src->nro_next = *ptrs;
3281 /* always copy the option header */
3282 error = copyout(src, dst, sizeof(*src));
3288 /* copy the option body only if there was no error */
3289 if (!rerror && !src->nro_status) {
3290 optsz = nmreq_opt_size_by_type(src->nro_reqtype,
3293 error = copyout(src + 1, dst + 1, optsz);
3300 src = (struct nmreq_option *)(uintptr_t)next;
3301 dst = (struct nmreq_option *)(uintptr_t)*ptrs;
3306 hdr->nr_reserved = 0;
3307 nm_os_free(bufstart);
3311 struct nmreq_option *
3312 nmreq_getoption(struct nmreq_header *hdr, uint16_t reqtype)
3314 struct nmreq_option **opt_tab;
3316 if (!hdr->nr_options)
3319 opt_tab = (struct nmreq_option **)(hdr->nr_options) - (NETMAP_REQ_OPT_MAX + 1);
3320 return opt_tab[reqtype];
3324 nmreq_checkoptions(struct nmreq_header *hdr)
3326 struct nmreq_option *opt;
3327 /* return error if there is still any option
3328 * marked as not supported
3331 for (opt = (struct nmreq_option *)(uintptr_t)hdr->nr_options; opt;
3332 opt = (struct nmreq_option *)(uintptr_t)opt->nro_next)
3333 if (opt->nro_status == EOPNOTSUPP)
3340 * select(2) and poll(2) handlers for the "netmap" device.
3342 * Can be called for one or more queues.
3343 * Return true the event mask corresponding to ready events.
3344 * If there are no ready events (and 'sr' is not NULL), do a
3345 * selrecord on either individual selinfo or on the global one.
3346 * Device-dependent parts (locking and sync of tx/rx rings)
3347 * are done through callbacks.
3349 * On linux, arguments are really pwait, the poll table, and 'td' is struct file *
3350 * The first one is remapped to pwait as selrecord() uses the name as an
3354 netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr)
3356 struct netmap_adapter *na;
3357 struct netmap_kring *kring;
3358 struct netmap_ring *ring;
3359 u_int i, want[NR_TXRX], revents = 0;
3360 NM_SELINFO_T *si[NR_TXRX];
3361 #define want_tx want[NR_TX]
3362 #define want_rx want[NR_RX]
3363 struct mbq q; /* packets from RX hw queues to host stack */
3366 * In order to avoid nested locks, we need to "double check"
3367 * txsync and rxsync if we decide to do a selrecord().
3368 * retry_tx (and retry_rx, later) prevent looping forever.
3370 int retry_tx = 1, retry_rx = 1;
3372 /* Transparent mode: send_down is 1 if we have found some
3373 * packets to forward (host RX ring --> NIC) during the rx
3374 * scan and we have not sent them down to the NIC yet.
3375 * Transparent mode requires to bind all rings to a single
3379 int sync_flags = priv->np_sync_flags;
3383 if (unlikely(priv->np_nifp == NULL)) {
3386 mb(); /* make sure following reads are not from cache */
3390 if (unlikely(!nm_netmap_on(na)))
3393 if (unlikely(priv->np_csb_atok_base)) {
3394 nm_prerr("Invalid poll in CSB mode");
3398 if (netmap_debug & NM_DEBUG_ON)
3399 nm_prinf("device %s events 0x%x", na->name, events);
3400 want_tx = events & (POLLOUT | POLLWRNORM);
3401 want_rx = events & (POLLIN | POLLRDNORM);
3404 * If the card has more than one queue AND the file descriptor is
3405 * bound to all of them, we sleep on the "global" selinfo, otherwise
3406 * we sleep on individual selinfo (FreeBSD only allows two selinfo's
3407 * per file descriptor).
3408 * The interrupt routine in the driver wake one or the other
3409 * (or both) depending on which clients are active.
3411 * rxsync() is only called if we run out of buffers on a POLLIN.
3412 * txsync() is called if we run out of buffers on POLLOUT, or
3413 * there are pending packets to send. The latter can be disabled
3414 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
3416 si[NR_RX] = priv->np_si[NR_RX];
3417 si[NR_TX] = priv->np_si[NR_TX];
3421 * We start with a lock free round which is cheap if we have
3422 * slots available. If this fails, then lock and call the sync
3423 * routines. We can't do this on Linux, as the contract says
3424 * that we must call nm_os_selrecord() unconditionally.
3427 const enum txrx t = NR_TX;
3428 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3429 kring = NMR(na, t)[i];
3430 if (kring->ring->cur != kring->ring->tail) {
3431 /* Some unseen TX space is available, so what
3432 * we don't need to run txsync. */
3440 const enum txrx t = NR_RX;
3441 int rxsync_needed = 0;
3443 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3444 kring = NMR(na, t)[i];
3445 if (kring->ring->cur == kring->ring->tail
3446 || kring->rhead != kring->ring->head) {
3447 /* There are no unseen packets on this ring,
3448 * or there are some buffers to be returned
3449 * to the netmap port. We therefore go ahead
3450 * and run rxsync. */
3455 if (!rxsync_needed) {
3463 /* The selrecord must be unconditional on linux. */
3464 nm_os_selrecord(sr, si[NR_RX]);
3465 nm_os_selrecord(sr, si[NR_TX]);
3469 * If we want to push packets out (priv->np_txpoll) or
3470 * want_tx is still set, we must issue txsync calls
3471 * (on all rings, to avoid that the tx rings stall).
3472 * Fortunately, normal tx mode has np_txpoll set.
3474 if (priv->np_txpoll || want_tx) {
3476 * The first round checks if anyone is ready, if not
3477 * do a selrecord and another round to handle races.
3478 * want_tx goes to 0 if any space is found, and is
3479 * used to skip rings with no pending transmissions.
3482 for (i = priv->np_qfirst[NR_TX]; i < priv->np_qlast[NR_TX]; i++) {
3485 kring = na->tx_rings[i];
3489 * Don't try to txsync this TX ring if we already found some
3490 * space in some of the TX rings (want_tx == 0) and there are no
3491 * TX slots in this ring that need to be flushed to the NIC
3494 if (!send_down && !want_tx && ring->head == kring->nr_hwcur)
3497 if (nm_kr_tryget(kring, 1, &revents))
3500 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3501 netmap_ring_reinit(kring);
3504 if (kring->nm_sync(kring, sync_flags))
3507 nm_sync_finalize(kring);
3511 * If we found new slots, notify potential
3512 * listeners on the same ring.
3513 * Since we just did a txsync, look at the copies
3514 * of cur,tail in the kring.
3516 found = kring->rcur != kring->rtail;
3518 if (found) { /* notify other listeners */
3522 kring->nm_notify(kring, 0);
3526 /* if there were any packet to forward we must have handled them by now */
3528 if (want_tx && retry_tx && sr) {
3530 nm_os_selrecord(sr, si[NR_TX]);
3538 * If want_rx is still set scan receive rings.
3539 * Do it on all rings because otherwise we starve.
3542 /* two rounds here for race avoidance */
3544 for (i = priv->np_qfirst[NR_RX]; i < priv->np_qlast[NR_RX]; i++) {
3547 kring = na->rx_rings[i];
3550 if (unlikely(nm_kr_tryget(kring, 1, &revents)))
3553 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3554 netmap_ring_reinit(kring);
3557 /* now we can use kring->rcur, rtail */
3560 * transparent mode support: collect packets from
3561 * hw rxring(s) that have been released by the user
3563 if (nm_may_forward_up(kring)) {
3564 netmap_grab_packets(kring, &q, netmap_fwd);
3567 /* Clear the NR_FORWARD flag anyway, it may be set by
3568 * the nm_sync() below only on for the host RX ring (see
3569 * netmap_rxsync_from_host()). */
3570 kring->nr_kflags &= ~NR_FORWARD;
3571 if (kring->nm_sync(kring, sync_flags))
3574 nm_sync_finalize(kring);
3575 send_down |= (kring->nr_kflags & NR_FORWARD);
3576 ring_timestamp_set(ring);
3577 found = kring->rcur != kring->rtail;
3583 kring->nm_notify(kring, 0);
3589 if (retry_rx && sr) {
3590 nm_os_selrecord(sr, si[NR_RX]);
3593 if (send_down || retry_rx) {
3596 goto flush_tx; /* and retry_rx */
3603 * Transparent mode: released bufs (i.e. between kring->nr_hwcur and
3604 * ring->head) marked with NS_FORWARD on hw rx rings are passed up
3605 * to the host stack.
3609 netmap_send_up(na->ifp, &q);
3618 nma_intr_enable(struct netmap_adapter *na, int onoff)
3620 bool changed = false;
3625 for (i = 0; i < nma_get_nrings(na, t); i++) {
3626 struct netmap_kring *kring = NMR(na, t)[i];
3627 int on = !(kring->nr_kflags & NKR_NOINTR);
3629 if (!!onoff != !!on) {
3633 kring->nr_kflags &= ~NKR_NOINTR;
3635 kring->nr_kflags |= NKR_NOINTR;
3641 return 0; /* nothing to do */
3645 nm_prerr("Cannot %s interrupts for %s", onoff ? "enable" : "disable",
3650 na->nm_intr(na, onoff);
3656 /*-------------------- driver support routines -------------------*/
3658 /* default notify callback */
3660 netmap_notify(struct netmap_kring *kring, int flags)
3662 struct netmap_adapter *na = kring->notify_na;
3663 enum txrx t = kring->tx;
3665 nm_os_selwakeup(&kring->si);
3666 /* optimization: avoid a wake up on the global
3667 * queue if nobody has registered for more
3670 if (na->si_users[t] > 0)
3671 nm_os_selwakeup(&na->si[t]);
3673 return NM_IRQ_COMPLETED;
3676 /* called by all routines that create netmap_adapters.
3677 * provide some defaults and get a reference to the
3681 netmap_attach_common(struct netmap_adapter *na)
3683 if (!na->rx_buf_maxsize) {
3684 /* Set a conservative default (larger is safer). */
3685 na->rx_buf_maxsize = PAGE_SIZE;
3689 if (na->na_flags & NAF_HOST_RINGS && na->ifp) {
3690 na->if_input = na->ifp->if_input; /* for netmap_send_up */
3692 na->pdev = na; /* make sure netmap_mem_map() is called */
3693 #endif /* __FreeBSD__ */
3694 if (na->na_flags & NAF_HOST_RINGS) {
3695 if (na->num_host_rx_rings == 0)
3696 na->num_host_rx_rings = 1;
3697 if (na->num_host_tx_rings == 0)
3698 na->num_host_tx_rings = 1;
3700 if (na->nm_krings_create == NULL) {
3701 /* we assume that we have been called by a driver,
3702 * since other port types all provide their own
3705 na->nm_krings_create = netmap_hw_krings_create;
3706 na->nm_krings_delete = netmap_hw_krings_delete;
3708 if (na->nm_notify == NULL)
3709 na->nm_notify = netmap_notify;
3712 if (na->nm_mem == NULL) {
3713 /* use the global allocator */
3714 na->nm_mem = netmap_mem_get(&nm_mem);
3717 if (na->nm_bdg_attach == NULL)
3718 /* no special nm_bdg_attach callback. On VALE
3719 * attach, we need to interpose a bwrap
3721 na->nm_bdg_attach = netmap_default_bdg_attach;
3727 /* Wrapper for the register callback provided netmap-enabled
3729 * nm_iszombie(na) means that the driver module has been
3730 * unloaded, so we cannot call into it.
3731 * nm_os_ifnet_lock() must guarantee mutual exclusion with
3735 netmap_hw_reg(struct netmap_adapter *na, int onoff)
3737 struct netmap_hw_adapter *hwna =
3738 (struct netmap_hw_adapter*)na;
3743 if (nm_iszombie(na)) {
3746 } else if (na != NULL) {
3747 na->na_flags &= ~NAF_NETMAP_ON;
3752 error = hwna->nm_hw_register(na, onoff);
3755 nm_os_ifnet_unlock();
3761 netmap_hw_dtor(struct netmap_adapter *na)
3763 if (na->ifp == NULL)
3766 NM_DETACH_NA(na->ifp);
3771 * Allocate a netmap_adapter object, and initialize it from the
3772 * 'arg' passed by the driver on attach.
3773 * We allocate a block of memory of 'size' bytes, which has room
3774 * for struct netmap_adapter plus additional room private to
3776 * Return 0 on success, ENOMEM otherwise.
3779 netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg)
3781 struct netmap_hw_adapter *hwna = NULL;
3782 struct ifnet *ifp = NULL;
3784 if (size < sizeof(struct netmap_hw_adapter)) {
3785 if (netmap_debug & NM_DEBUG_ON)
3786 nm_prerr("Invalid netmap adapter size %d", (int)size);
3790 if (arg == NULL || arg->ifp == NULL) {
3791 if (netmap_debug & NM_DEBUG_ON)
3792 nm_prerr("either arg or arg->ifp is NULL");
3796 if (arg->num_tx_rings == 0 || arg->num_rx_rings == 0) {
3797 if (netmap_debug & NM_DEBUG_ON)
3798 nm_prerr("%s: invalid rings tx %d rx %d",
3799 arg->name, arg->num_tx_rings, arg->num_rx_rings);
3804 if (NM_NA_CLASH(ifp)) {
3805 /* If NA(ifp) is not null but there is no valid netmap
3806 * adapter it means that someone else is using the same
3807 * pointer (e.g. ax25_ptr on linux). This happens for
3808 * instance when also PF_RING is in use. */
3809 nm_prerr("Error: netmap adapter hook is busy");
3813 hwna = nm_os_malloc(size);
3817 hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE;
3818 strlcpy(hwna->up.name, ifp->if_xname, sizeof(hwna->up.name));
3820 hwna->nm_hw_register = hwna->up.nm_register;
3821 hwna->up.nm_register = netmap_hw_reg;
3823 if (netmap_attach_common(&hwna->up)) {
3827 netmap_adapter_get(&hwna->up);
3829 NM_ATTACH_NA(ifp, &hwna->up);
3831 nm_os_onattach(ifp);
3833 if (arg->nm_dtor == NULL) {
3834 hwna->up.nm_dtor = netmap_hw_dtor;
3837 if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n",
3838 hwna->up.num_tx_rings, hwna->up.num_tx_desc,
3839 hwna->up.num_rx_rings, hwna->up.num_rx_desc);
3843 nm_prerr("fail, arg %p ifp %p na %p", arg, ifp, hwna);
3844 return (hwna ? EINVAL : ENOMEM);
3849 netmap_attach(struct netmap_adapter *arg)
3851 return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter),
3852 1 /* override nm_reg */);
3857 NM_DBG(netmap_adapter_get)(struct netmap_adapter *na)
3863 refcount_acquire(&na->na_refcount);
3867 /* returns 1 iff the netmap_adapter is destroyed */
3869 NM_DBG(netmap_adapter_put)(struct netmap_adapter *na)
3874 if (!refcount_release(&na->na_refcount))
3880 if (na->tx_rings) { /* XXX should not happen */
3881 if (netmap_debug & NM_DEBUG_ON)
3882 nm_prerr("freeing leftover tx_rings");
3883 na->nm_krings_delete(na);
3885 netmap_pipe_dealloc(na);
3887 netmap_mem_put(na->nm_mem);
3888 bzero(na, sizeof(*na));
3894 /* nm_krings_create callback for all hardware native adapters */
3896 netmap_hw_krings_create(struct netmap_adapter *na)
3898 int ret = netmap_krings_create(na, 0);
3900 /* initialize the mbq for the sw rx ring */
3901 u_int lim = netmap_real_rings(na, NR_RX), i;
3902 for (i = na->num_rx_rings; i < lim; i++) {
3903 mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue);
3905 nm_prdis("initialized sw rx queue %d", na->num_rx_rings);
3913 * Called on module unload by the netmap-enabled drivers
3916 netmap_detach(struct ifnet *ifp)
3918 struct netmap_adapter *na = NA(ifp);
3924 netmap_set_all_rings(na, NM_KR_LOCKED);
3926 * if the netmap adapter is not native, somebody
3927 * changed it, so we can not release it here.
3928 * The NAF_ZOMBIE flag will notify the new owner that
3929 * the driver is gone.
3931 if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) {
3932 na->na_flags |= NAF_ZOMBIE;
3934 /* give active users a chance to notice that NAF_ZOMBIE has been
3935 * turned on, so that they can stop and return an error to userspace.
3936 * Note that this becomes a NOP if there are no active users and,
3937 * therefore, the put() above has deleted the na, since now NA(ifp) is
3940 netmap_enable_all_rings(ifp);
3946 * Intercept packets from the network stack and pass them
3947 * to netmap as incoming packets on the 'software' ring.
3949 * We only store packets in a bounded mbq and then copy them
3950 * in the relevant rxsync routine.
3952 * We rely on the OS to make sure that the ifp and na do not go
3953 * away (typically the caller checks for IFF_DRV_RUNNING or the like).
3954 * In nm_register() or whenever there is a reinitialization,
3955 * we make sure to make the mode change visible here.
3958 netmap_transmit(struct ifnet *ifp, struct mbuf *m)
3960 struct netmap_adapter *na = NA(ifp);
3961 struct netmap_kring *kring, *tx_kring;
3962 u_int len = MBUF_LEN(m);
3963 u_int error = ENOBUFS;
3970 if (i >= na->num_host_rx_rings) {
3971 i = i % na->num_host_rx_rings;
3973 kring = NMR(na, NR_RX)[nma_get_nrings(na, NR_RX) + i];
3975 // XXX [Linux] we do not need this lock
3976 // if we follow the down/configure/up protocol -gl
3977 // mtx_lock(&na->core_lock);
3979 if (!nm_netmap_on(na)) {
3980 nm_prerr("%s not in netmap mode anymore", na->name);
3986 if (txr >= na->num_tx_rings) {
3987 txr %= na->num_tx_rings;
3989 tx_kring = NMR(na, NR_TX)[txr];
3991 if (tx_kring->nr_mode == NKR_NETMAP_OFF) {
3992 return MBUF_TRANSMIT(na, ifp, m);
3995 q = &kring->rx_queue;
3997 // XXX reconsider long packets if we handle fragments
3998 if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */
3999 nm_prerr("%s from_host, drop packet size %d > %d", na->name,
4000 len, NETMAP_BUF_SIZE(na));
4004 if (!netmap_generic_hwcsum) {
4005 if (nm_os_mbuf_has_csum_offld(m)) {
4006 nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name);
4011 if (nm_os_mbuf_has_seg_offld(m)) {
4012 nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name);
4017 ETHER_BPF_MTAP(ifp, m);
4018 #endif /* __FreeBSD__ */
4020 /* protect against netmap_rxsync_from_host(), netmap_sw_to_nic()
4021 * and maybe other instances of netmap_transmit (the latter
4022 * not possible on Linux).
4023 * We enqueue the mbuf only if we are sure there is going to be
4024 * enough room in the host RX ring, otherwise we drop it.
4028 busy = kring->nr_hwtail - kring->nr_hwcur;
4030 busy += kring->nkr_num_slots;
4031 if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) {
4032 nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
4033 kring->nr_hwcur, kring->nr_hwtail, mbq_len(q));
4036 nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q));
4037 /* notify outside the lock */
4046 /* unconditionally wake up listeners */
4047 kring->nm_notify(kring, 0);
4048 /* this is normally netmap_notify(), but for nics
4049 * connected to a bridge it is netmap_bwrap_intr_notify(),
4050 * that possibly forwards the frames through the switch
4058 * netmap_reset() is called by the driver routines when reinitializing
4059 * a ring. The driver is in charge of locking to protect the kring.
4060 * If native netmap mode is not set just return NULL.
4061 * If native netmap mode is set, in particular, we have to set nr_mode to
4064 struct netmap_slot *
4065 netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n,
4068 struct netmap_kring *kring;
4071 if (!nm_native_on(na)) {
4072 nm_prdis("interface not in native netmap mode");
4073 return NULL; /* nothing to reinitialize */
4076 /* XXX note- in the new scheme, we are not guaranteed to be
4077 * under lock (e.g. when called on a device reset).
4078 * In this case, we should set a flag and do not trust too
4079 * much the values. In practice: TODO
4080 * - set a RESET flag somewhere in the kring
4081 * - do the processing in a conservative way
4082 * - let the *sync() fixup at the end.
4085 if (n >= na->num_tx_rings)
4088 kring = na->tx_rings[n];
4090 if (kring->nr_pending_mode == NKR_NETMAP_OFF) {
4091 kring->nr_mode = NKR_NETMAP_OFF;
4095 // XXX check whether we should use hwcur or rcur
4096 new_hwofs = kring->nr_hwcur - new_cur;
4098 if (n >= na->num_rx_rings)
4100 kring = na->rx_rings[n];
4102 if (kring->nr_pending_mode == NKR_NETMAP_OFF) {
4103 kring->nr_mode = NKR_NETMAP_OFF;
4107 new_hwofs = kring->nr_hwtail - new_cur;
4109 lim = kring->nkr_num_slots - 1;
4110 if (new_hwofs > lim)
4111 new_hwofs -= lim + 1;
4113 /* Always set the new offset value and realign the ring. */
4114 if (netmap_debug & NM_DEBUG_ON)
4115 nm_prinf("%s %s%d hwofs %d -> %d, hwtail %d -> %d",
4117 tx == NR_TX ? "TX" : "RX", n,
4118 kring->nkr_hwofs, new_hwofs,
4120 tx == NR_TX ? lim : kring->nr_hwtail);
4121 kring->nkr_hwofs = new_hwofs;
4123 kring->nr_hwtail = kring->nr_hwcur + lim;
4124 if (kring->nr_hwtail > lim)
4125 kring->nr_hwtail -= lim + 1;
4129 * Wakeup on the individual and global selwait
4130 * We do the wakeup here, but the ring is not yet reconfigured.
4131 * However, we are under lock so there are no races.
4133 kring->nr_mode = NKR_NETMAP_ON;
4134 kring->nm_notify(kring, 0);
4135 return kring->ring->slot;
4140 * Dispatch rx/tx interrupts to the netmap rings.
4142 * "work_done" is non-null on the RX path, NULL for the TX path.
4143 * We rely on the OS to make sure that there is only one active
4144 * instance per queue, and that there is appropriate locking.
4146 * The 'notify' routine depends on what the ring is attached to.
4147 * - for a netmap file descriptor, do a selwakeup on the individual
4148 * waitqueue, plus one on the global one if needed
4149 * (see netmap_notify)
4150 * - for a nic connected to a switch, call the proper forwarding routine
4151 * (see netmap_bwrap_intr_notify)
4154 netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
4156 struct netmap_kring *kring;
4157 enum txrx t = (work_done ? NR_RX : NR_TX);
4159 q &= NETMAP_RING_MASK;
4161 if (netmap_debug & (NM_DEBUG_RXINTR|NM_DEBUG_TXINTR)) {
4162 nm_prlim(5, "received %s queue %d", work_done ? "RX" : "TX" , q);
4165 if (q >= nma_get_nrings(na, t))
4166 return NM_IRQ_PASS; // not a physical queue
4168 kring = NMR(na, t)[q];
4170 if (kring->nr_mode == NKR_NETMAP_OFF) {
4175 kring->nr_kflags |= NKR_PENDINTR; // XXX atomic ?
4176 *work_done = 1; /* do not fire napi again */
4179 return kring->nm_notify(kring, 0);
4184 * Default functions to handle rx/tx interrupts from a physical device.
4185 * "work_done" is non-null on the RX path, NULL for the TX path.
4187 * If the card is not in netmap mode, simply return NM_IRQ_PASS,
4188 * so that the caller proceeds with regular processing.
4189 * Otherwise call netmap_common_irq().
4191 * If the card is connected to a netmap file descriptor,
4192 * do a selwakeup on the individual queue, plus one on the global one
4193 * if needed (multiqueue card _and_ there are multiqueue listeners),
4194 * and return NR_IRQ_COMPLETED.
4196 * Finally, if called on rx from an interface connected to a switch,
4197 * calls the proper forwarding routine.
4200 netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done)
4202 struct netmap_adapter *na = NA(ifp);
4205 * XXX emulated netmap mode sets NAF_SKIP_INTR so
4206 * we still use the regular driver even though the previous
4207 * check fails. It is unclear whether we should use
4208 * nm_native_on() here.
4210 if (!nm_netmap_on(na))
4213 if (na->na_flags & NAF_SKIP_INTR) {
4214 nm_prdis("use regular interrupt");
4218 return netmap_common_irq(na, q, work_done);
4221 /* set/clear native flags and if_transmit/netdev_ops */
4223 nm_set_native_flags(struct netmap_adapter *na)
4225 struct ifnet *ifp = na->ifp;
4227 /* We do the setup for intercepting packets only if we are the
4228 * first user of this adapapter. */
4229 if (na->active_fds > 0) {
4233 na->na_flags |= NAF_NETMAP_ON;
4235 nm_update_hostrings_mode(na);
4239 nm_clear_native_flags(struct netmap_adapter *na)
4241 struct ifnet *ifp = na->ifp;
4243 /* We undo the setup for intercepting packets only if we are the
4244 * last user of this adapter. */
4245 if (na->active_fds > 0) {
4249 nm_update_hostrings_mode(na);
4252 na->na_flags &= ~NAF_NETMAP_ON;
4256 netmap_krings_mode_commit(struct netmap_adapter *na, int onoff)
4263 for (i = 0; i < netmap_real_rings(na, t); i++) {
4264 struct netmap_kring *kring = NMR(na, t)[i];
4266 if (onoff && nm_kring_pending_on(kring))
4267 kring->nr_mode = NKR_NETMAP_ON;
4268 else if (!onoff && nm_kring_pending_off(kring))
4269 kring->nr_mode = NKR_NETMAP_OFF;
4275 * Module loader and unloader
4277 * netmap_init() creates the /dev/netmap device and initializes
4278 * all global variables. Returns 0 on success, errno on failure
4279 * (but there is no chance)
4281 * netmap_fini() destroys everything.
4284 static struct cdev *netmap_dev; /* /dev/netmap character device. */
4285 extern struct cdevsw netmap_cdevsw;
4292 destroy_dev(netmap_dev);
4293 /* we assume that there are no longer netmap users */
4295 netmap_uninit_bridges();
4298 nm_prinf("netmap: unloaded module.");
4309 error = netmap_mem_init();
4313 * MAKEDEV_ETERNAL_KLD avoids an expensive check on syscalls
4314 * when the module is compiled in.
4315 * XXX could use make_dev_credv() to get error number
4317 netmap_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD,
4318 &netmap_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600,
4323 error = netmap_init_bridges();
4328 nm_os_vi_init_index();
4331 error = nm_os_ifnet_init();
4335 nm_prinf("netmap: loaded module");
4339 return (EINVAL); /* may be incorrect */