2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2011-2014 Matteo Landi
5 * Copyright (C) 2011-2016 Luigi Rizzo
6 * Copyright (C) 2011-2016 Giuseppe Lettieri
7 * Copyright (C) 2011-2016 Vincenzo Maffione
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This module supports memory mapped access to network devices,
39 * The module uses a large, memory pool allocated by the kernel
40 * and accessible as mmapped memory by multiple userspace threads/processes.
41 * The memory pool contains packet buffers and "netmap rings",
42 * i.e. user-accessible copies of the interface's queues.
44 * Access to the network card works like this:
45 * 1. a process/thread issues one or more open() on /dev/netmap, to create
46 * select()able file descriptor on which events are reported.
47 * 2. on each descriptor, the process issues an ioctl() to identify
48 * the interface that should report events to the file descriptor.
49 * 3. on each descriptor, the process issues an mmap() request to
50 * map the shared memory region within the process' address space.
51 * The list of interesting queues is indicated by a location in
52 * the shared memory region.
53 * 4. using the functions in the netmap(4) userspace API, a process
54 * can look up the occupation state of a queue, access memory buffers,
55 * and retrieve received packets or enqueue packets to transmit.
56 * 5. using some ioctl()s the process can synchronize the userspace view
57 * of the queue with the actual status in the kernel. This includes both
58 * receiving the notification of new packets, and transmitting new
59 * packets on the output interface.
60 * 6. select() or poll() can be used to wait for events on individual
61 * transmit or receive queues (or all queues for a given interface).
64 SYNCHRONIZATION (USER)
66 The netmap rings and data structures may be shared among multiple
67 user threads or even independent processes.
68 Any synchronization among those threads/processes is delegated
69 to the threads themselves. Only one thread at a time can be in
70 a system call on the same netmap ring. The OS does not enforce
71 this and only guarantees against system crashes in case of
76 Within the kernel, access to the netmap rings is protected as follows:
78 - a spinlock on each ring, to handle producer/consumer races on
79 RX rings attached to the host stack (against multiple host
80 threads writing from the host stack to the same ring),
81 and on 'destination' rings attached to a VALE switch
82 (i.e. RX rings in VALE ports, and TX rings in NIC/host ports)
83 protecting multiple active senders for the same destination)
85 - an atomic variable to guarantee that there is at most one
86 instance of *_*xsync() on the ring at any time.
87 For rings connected to user file
88 descriptors, an atomic_test_and_set() protects this, and the
89 lock on the ring is not actually used.
90 For NIC RX rings connected to a VALE switch, an atomic_test_and_set()
91 is also used to prevent multiple executions (the driver might indeed
92 already guarantee this).
93 For NIC TX rings connected to a VALE switch, the lock arbitrates
94 access to the queue (both when allocating buffers and when pushing
97 - *xsync() should be protected against initializations of the card.
98 On FreeBSD most devices have the reset routine protected by
99 a RING lock (ixgbe, igb, em) or core lock (re). lem is missing
100 the RING protection on rx_reset(), this should be added.
102 On linux there is an external lock on the tx path, which probably
103 also arbitrates access to the reset routine. XXX to be revised
105 - a per-interface core_lock protecting access from the host stack
106 while interfaces may be detached from netmap mode.
107 XXX there should be no need for this lock if we detach the interfaces
108 only while they are down.
113 NMG_LOCK() serializes all modifications to switches and ports.
114 A switch cannot be deleted until all ports are gone.
116 For each switch, an SX lock (RWlock on linux) protects
117 deletion of ports. When configuring or deleting a new port, the
118 lock is acquired in exclusive mode (after holding NMG_LOCK).
119 When forwarding, the lock is acquired in shared mode (without NMG_LOCK).
120 The lock is held throughout the entire forwarding cycle,
121 during which the thread may incur in a page fault.
122 Hence it is important that sleepable shared locks are used.
124 On the rx ring, the per-port lock is grabbed initially to reserve
125 a number of slot in the ring, then the lock is released,
126 packets are copied from source to destination, and then
127 the lock is acquired again and the receive ring is updated.
128 (A similar thing is done on the tx ring for NIC and host stack
129 ports attached to the switch)
134 /* --- internals ----
136 * Roadmap to the code that implements the above.
138 * > 1. a process/thread issues one or more open() on /dev/netmap, to create
139 * > select()able file descriptor on which events are reported.
141 * Internally, we allocate a netmap_priv_d structure, that will be
142 * initialized on ioctl(NIOCREGIF). There is one netmap_priv_d
143 * structure for each open().
146 * FreeBSD: see netmap_open() (netmap_freebsd.c)
147 * linux: see linux_netmap_open() (netmap_linux.c)
149 * > 2. on each descriptor, the process issues an ioctl() to identify
150 * > the interface that should report events to the file descriptor.
152 * Implemented by netmap_ioctl(), NIOCREGIF case, with nmr->nr_cmd==0.
153 * Most important things happen in netmap_get_na() and
154 * netmap_do_regif(), called from there. Additional details can be
155 * found in the comments above those functions.
157 * In all cases, this action creates/takes-a-reference-to a
158 * netmap_*_adapter describing the port, and allocates a netmap_if
159 * and all necessary netmap rings, filling them with netmap buffers.
161 * In this phase, the sync callbacks for each ring are set (these are used
162 * in steps 5 and 6 below). The callbacks depend on the type of adapter.
163 * The adapter creation/initialization code puts them in the
164 * netmap_adapter (fields na->nm_txsync and na->nm_rxsync). Then, they
165 * are copied from there to the netmap_kring's during netmap_do_regif(), by
166 * the nm_krings_create() callback. All the nm_krings_create callbacks
167 * actually call netmap_krings_create() to perform this and the other
168 * common stuff. netmap_krings_create() also takes care of the host rings,
169 * if needed, by setting their sync callbacks appropriately.
171 * Additional actions depend on the kind of netmap_adapter that has been
174 * - netmap_hw_adapter: [netmap.c]
175 * This is a system netdev/ifp with native netmap support.
176 * The ifp is detached from the host stack by redirecting:
177 * - transmissions (from the network stack) to netmap_transmit()
178 * - receive notifications to the nm_notify() callback for
179 * this adapter. The callback is normally netmap_notify(), unless
180 * the ifp is attached to a bridge using bwrap, in which case it
181 * is netmap_bwrap_intr_notify().
183 * - netmap_generic_adapter: [netmap_generic.c]
184 * A system netdev/ifp without native netmap support.
186 * (the decision about native/non native support is taken in
187 * netmap_get_hw_na(), called by netmap_get_na())
189 * - netmap_vp_adapter [netmap_vale.c]
190 * Returned by netmap_get_bdg_na().
191 * This is a persistent or ephemeral VALE port. Ephemeral ports
192 * are created on the fly if they don't already exist, and are
193 * always attached to a bridge.
194 * Persistent VALE ports must must be created separately, and i
195 * then attached like normal NICs. The NIOCREGIF we are examining
196 * will find them only if they had previosly been created and
197 * attached (see VALE_CTL below).
199 * - netmap_pipe_adapter [netmap_pipe.c]
200 * Returned by netmap_get_pipe_na().
201 * Both pipe ends are created, if they didn't already exist.
203 * - netmap_monitor_adapter [netmap_monitor.c]
204 * Returned by netmap_get_monitor_na().
205 * If successful, the nm_sync callbacks of the monitored adapter
206 * will be intercepted by the returned monitor.
208 * - netmap_bwrap_adapter [netmap_vale.c]
209 * Cannot be obtained in this way, see VALE_CTL below
213 * linux: we first go through linux_netmap_ioctl() to
214 * adapt the FreeBSD interface to the linux one.
217 * > 3. on each descriptor, the process issues an mmap() request to
218 * > map the shared memory region within the process' address space.
219 * > The list of interesting queues is indicated by a location in
220 * > the shared memory region.
223 * FreeBSD: netmap_mmap_single (netmap_freebsd.c).
224 * linux: linux_netmap_mmap (netmap_linux.c).
226 * > 4. using the functions in the netmap(4) userspace API, a process
227 * > can look up the occupation state of a queue, access memory buffers,
228 * > and retrieve received packets or enqueue packets to transmit.
230 * these actions do not involve the kernel.
232 * > 5. using some ioctl()s the process can synchronize the userspace view
233 * > of the queue with the actual status in the kernel. This includes both
234 * > receiving the notification of new packets, and transmitting new
235 * > packets on the output interface.
237 * These are implemented in netmap_ioctl(), NIOCTXSYNC and NIOCRXSYNC
238 * cases. They invoke the nm_sync callbacks on the netmap_kring
239 * structures, as initialized in step 2 and maybe later modified
240 * by a monitor. Monitors, however, will always call the original
241 * callback before doing anything else.
244 * > 6. select() or poll() can be used to wait for events on individual
245 * > transmit or receive queues (or all queues for a given interface).
247 * Implemented in netmap_poll(). This will call the same nm_sync()
248 * callbacks as in step 5 above.
251 * linux: we first go through linux_netmap_poll() to adapt
252 * the FreeBSD interface to the linux one.
255 * ---- VALE_CTL -----
257 * VALE switches are controlled by issuing a NIOCREGIF with a non-null
258 * nr_cmd in the nmreq structure. These subcommands are handled by
259 * netmap_bdg_ctl() in netmap_vale.c. Persistent VALE ports are created
260 * and destroyed by issuing the NETMAP_BDG_NEWIF and NETMAP_BDG_DELIF
261 * subcommands, respectively.
263 * Any network interface known to the system (including a persistent VALE
264 * port) can be attached to a VALE switch by issuing the
265 * NETMAP_REQ_VALE_ATTACH command. After the attachment, persistent VALE ports
266 * look exactly like ephemeral VALE ports (as created in step 2 above). The
267 * attachment of other interfaces, instead, requires the creation of a
268 * netmap_bwrap_adapter. Moreover, the attached interface must be put in
269 * netmap mode. This may require the creation of a netmap_generic_adapter if
270 * we have no native support for the interface, or if generic adapters have
271 * been forced by sysctl.
273 * Both persistent VALE ports and bwraps are handled by netmap_get_bdg_na(),
274 * called by nm_bdg_ctl_attach(), and discriminated by the nm_bdg_attach()
275 * callback. In the case of the bwrap, the callback creates the
276 * netmap_bwrap_adapter. The initialization of the bwrap is then
277 * completed by calling netmap_do_regif() on it, in the nm_bdg_ctl()
278 * callback (netmap_bwrap_bdg_ctl in netmap_vale.c).
279 * A generic adapter for the wrapped ifp will be created if needed, when
280 * netmap_get_bdg_na() calls netmap_get_hw_na().
283 * ---- DATAPATHS -----
285 * -= SYSTEM DEVICE WITH NATIVE SUPPORT =-
287 * na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach()
289 * - tx from netmap userspace:
291 * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
292 * kring->nm_sync() == DEVICE_netmap_txsync()
293 * 2) device interrupt handler
294 * na->nm_notify() == netmap_notify()
295 * - rx from netmap userspace:
297 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
298 * kring->nm_sync() == DEVICE_netmap_rxsync()
299 * 2) device interrupt handler
300 * na->nm_notify() == netmap_notify()
301 * - rx from host stack
305 * na->nm_notify == netmap_notify()
306 * 2) ioctl(NIOCRXSYNC)/netmap_poll() in process context
307 * kring->nm_sync() == netmap_rxsync_from_host
308 * netmap_rxsync_from_host(na, NULL, NULL)
310 * ioctl(NIOCTXSYNC)/netmap_poll() in process context
311 * kring->nm_sync() == netmap_txsync_to_host
312 * netmap_txsync_to_host(na)
314 * FreeBSD: na->if_input() == ether_input()
315 * linux: netif_rx() with NM_MAGIC_PRIORITY_RX
318 * -= SYSTEM DEVICE WITH GENERIC SUPPORT =-
320 * na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach()
322 * - tx from netmap userspace:
324 * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
325 * kring->nm_sync() == generic_netmap_txsync()
326 * nm_os_generic_xmit_frame()
327 * linux: dev_queue_xmit() with NM_MAGIC_PRIORITY_TX
328 * ifp->ndo_start_xmit == generic_ndo_start_xmit()
329 * gna->save_start_xmit == orig. dev. start_xmit
330 * FreeBSD: na->if_transmit() == orig. dev if_transmit
331 * 2) generic_mbuf_destructor()
332 * na->nm_notify() == netmap_notify()
333 * - rx from netmap userspace:
334 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
335 * kring->nm_sync() == generic_netmap_rxsync()
338 * generic_rx_handler()
340 * na->nm_notify() == netmap_notify()
341 * - rx from host stack
342 * FreeBSD: same as native
343 * Linux: same as native except:
345 * dev_queue_xmit() without NM_MAGIC_PRIORITY_TX
346 * ifp->ndo_start_xmit == generic_ndo_start_xmit()
348 * na->nm_notify() == netmap_notify()
349 * - tx to host stack (same as native):
357 * ioctl(NIOCTXSYNC)/netmap_poll() in process context
358 * kring->nm_sync() == netmap_vp_txsync()
360 * - system device with native support:
363 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
364 * kring->nm_sync() == DEVICE_netmap_rxsync()
366 * kring->nm_sync() == DEVICE_netmap_rxsync()
369 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
370 * kring->nm_sync() == netmap_rxsync_from_host()
373 * - system device with generic support:
374 * from device driver:
375 * generic_rx_handler()
376 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
377 * kring->nm_sync() == generic_netmap_rxsync()
379 * kring->nm_sync() == generic_netmap_rxsync()
382 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
383 * kring->nm_sync() == netmap_rxsync_from_host()
386 * (all cases) --> nm_bdg_flush()
387 * dest_na->nm_notify() == (see below)
393 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
394 * kring->nm_sync() == netmap_vp_rxsync()
395 * 2) from nm_bdg_flush()
396 * na->nm_notify() == netmap_notify()
398 * - system device with native support:
400 * na->nm_notify() == netmap_bwrap_notify()
402 * kring->nm_sync() == DEVICE_netmap_txsync()
406 * kring->nm_sync() == netmap_txsync_to_host
407 * netmap_vp_rxsync_locked()
409 * - system device with generic adapter:
411 * na->nm_notify() == netmap_bwrap_notify()
413 * kring->nm_sync() == generic_netmap_txsync()
417 * kring->nm_sync() == netmap_txsync_to_host
423 * OS-specific code that is used only within this file.
424 * Other OS-specific code that must be accessed by drivers
425 * is present in netmap_kern.h
428 #if defined(__FreeBSD__)
429 #include <sys/cdefs.h> /* prerequisite */
430 #include <sys/types.h>
431 #include <sys/errno.h>
432 #include <sys/param.h> /* defines used in kernel.h */
433 #include <sys/kernel.h> /* types used in module initialization */
434 #include <sys/conf.h> /* cdevsw struct, UID, GID */
435 #include <sys/filio.h> /* FIONBIO */
436 #include <sys/sockio.h>
437 #include <sys/socketvar.h> /* struct socket */
438 #include <sys/malloc.h>
439 #include <sys/poll.h>
440 #include <sys/proc.h>
441 #include <sys/rwlock.h>
442 #include <sys/socket.h> /* sockaddrs */
443 #include <sys/selinfo.h>
444 #include <sys/sysctl.h>
445 #include <sys/jail.h>
446 #include <sys/epoch.h>
447 #include <net/vnet.h>
449 #include <net/if_var.h>
450 #include <net/bpf.h> /* BIOCIMMEDIATE */
451 #include <machine/bus.h> /* bus_dmamap_* */
452 #include <sys/endian.h>
453 #include <sys/refcount.h>
454 #include <net/ethernet.h> /* ETHER_BPF_MTAP */
459 #include "bsd_glue.h"
461 #elif defined(__APPLE__)
463 #warning OSX support is only partial
464 #include "osx_glue.h"
466 #elif defined (_WIN32)
468 #include "win_glue.h"
472 #error Unsupported platform
474 #endif /* unsupported */
479 #include <net/netmap.h>
480 #include <dev/netmap/netmap_kern.h>
481 #include <dev/netmap/netmap_mem2.h>
484 /* user-controlled variables */
486 #ifdef CONFIG_NETMAP_DEBUG
488 #endif /* CONFIG_NETMAP_DEBUG */
490 static int netmap_no_timestamp; /* don't timestamp on rxsync */
491 int netmap_no_pendintr = 1;
492 int netmap_txsync_retry = 2;
493 static int netmap_fwd = 0; /* force transparent forwarding */
496 * netmap_admode selects the netmap mode to use.
497 * Invalid values are reset to NETMAP_ADMODE_BEST
499 enum { NETMAP_ADMODE_BEST = 0, /* use native, fallback to generic */
500 NETMAP_ADMODE_NATIVE, /* either native or none */
501 NETMAP_ADMODE_GENERIC, /* force generic */
502 NETMAP_ADMODE_LAST };
503 static int netmap_admode = NETMAP_ADMODE_BEST;
505 /* netmap_generic_mit controls mitigation of RX notifications for
506 * the generic netmap adapter. The value is a time interval in
508 int netmap_generic_mit = 100*1000;
510 /* We use by default netmap-aware qdiscs with generic netmap adapters,
511 * even if there can be a little performance hit with hardware NICs.
512 * However, using the qdisc is the safer approach, for two reasons:
513 * 1) it prevents non-fifo qdiscs to break the TX notification
514 * scheme, which is based on mbuf destructors when txqdisc is
516 * 2) it makes it possible to transmit over software devices that
517 * change skb->dev, like bridge, veth, ...
519 * Anyway users looking for the best performance should
520 * use native adapters.
523 int netmap_generic_txqdisc = 1;
526 /* Default number of slots and queues for generic adapters. */
527 int netmap_generic_ringsize = 1024;
528 int netmap_generic_rings = 1;
530 /* Non-zero to enable checksum offloading in NIC drivers */
531 int netmap_generic_hwcsum = 0;
533 /* Non-zero if ptnet devices are allowed to use virtio-net headers. */
534 int ptnet_vnet_hdr = 1;
537 * SYSCTL calls are grouped between SYSBEGIN and SYSEND to be emulated
538 * in some other operating systems
542 SYSCTL_DECL(_dev_netmap);
543 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
545 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
546 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
547 #ifdef CONFIG_NETMAP_DEBUG
548 SYSCTL_INT(_dev_netmap, OID_AUTO, debug,
549 CTLFLAG_RW, &netmap_debug, 0, "Debug messages");
550 #endif /* CONFIG_NETMAP_DEBUG */
551 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
552 CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
553 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr,
554 0, "Always look for new received packets.");
555 SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW,
556 &netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush.");
558 SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0,
559 "Force NR_FORWARD mode");
560 SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0,
561 "Adapter mode. 0 selects the best option available,"
562 "1 forces native adapter, 2 forces emulated adapter");
563 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_hwcsum, CTLFLAG_RW, &netmap_generic_hwcsum,
564 0, "Hardware checksums. 0 to disable checksum generation by the NIC (default),"
565 "1 to enable checksum generation by the NIC");
566 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit,
567 0, "RX notification interval in nanoseconds");
568 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW,
569 &netmap_generic_ringsize, 0,
570 "Number of per-ring slots for emulated netmap mode");
571 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW,
572 &netmap_generic_rings, 0,
573 "Number of TX/RX queues for emulated netmap adapters");
575 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW,
576 &netmap_generic_txqdisc, 0, "Use qdisc for generic adapters");
578 SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr,
579 0, "Allow ptnet devices to use virtio-net headers");
583 NMG_LOCK_T netmap_global_lock;
586 * mark the ring as stopped, and run through the locks
587 * to make sure other users get to see it.
588 * stopped must be either NR_KR_STOPPED (for unbounded stop)
589 * of NR_KR_LOCKED (brief stop for mutual exclusion purposes)
592 netmap_disable_ring(struct netmap_kring *kr, int stopped)
594 nm_kr_stop(kr, stopped);
595 // XXX check if nm_kr_stop is sufficient
596 mtx_lock(&kr->q_lock);
597 mtx_unlock(&kr->q_lock);
601 /* stop or enable a single ring */
603 netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped)
606 netmap_disable_ring(NMR(na, t)[ring_id], stopped);
608 NMR(na, t)[ring_id]->nkr_stopped = 0;
612 /* stop or enable all the rings of na */
614 netmap_set_all_rings(struct netmap_adapter *na, int stopped)
619 if (!nm_netmap_on(na))
622 if (netmap_verbose) {
623 nm_prinf("%s: %sable all rings", na->name,
624 (stopped ? "dis" : "en"));
627 for (i = 0; i < netmap_real_rings(na, t); i++) {
628 netmap_set_ring(na, i, t, stopped);
634 * Convenience function used in drivers. Waits for current txsync()s/rxsync()s
635 * to finish and prevents any new one from starting. Call this before turning
636 * netmap mode off, or before removing the hardware rings (e.g., on module
640 netmap_disable_all_rings(struct ifnet *ifp)
642 if (NM_NA_VALID(ifp)) {
643 netmap_set_all_rings(NA(ifp), NM_KR_LOCKED);
648 * Convenience function used in drivers. Re-enables rxsync and txsync on the
649 * adapter's rings In linux drivers, this should be placed near each
653 netmap_enable_all_rings(struct ifnet *ifp)
655 if (NM_NA_VALID(ifp)) {
656 netmap_set_all_rings(NA(ifp), 0 /* enabled */);
661 netmap_make_zombie(struct ifnet *ifp)
663 if (NM_NA_VALID(ifp)) {
664 struct netmap_adapter *na = NA(ifp);
665 netmap_set_all_rings(na, NM_KR_LOCKED);
666 na->na_flags |= NAF_ZOMBIE;
667 netmap_set_all_rings(na, 0);
672 netmap_undo_zombie(struct ifnet *ifp)
674 if (NM_NA_VALID(ifp)) {
675 struct netmap_adapter *na = NA(ifp);
676 if (na->na_flags & NAF_ZOMBIE) {
677 netmap_set_all_rings(na, NM_KR_LOCKED);
678 na->na_flags &= ~NAF_ZOMBIE;
679 netmap_set_all_rings(na, 0);
685 * generic bound_checking function
688 nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg)
691 const char *op = NULL;
700 } else if (oldv > hi) {
705 nm_prinf("%s %s to %d (was %d)", op, msg, *v, oldv);
711 * packet-dump function, user-supplied or static buffer.
712 * The destination buffer must be at least 30+4*len
715 nm_dump_buf(char *p, int len, int lim, char *dst)
717 static char _dst[8192];
719 static char hex[] ="0123456789abcdef";
720 char *o; /* output position */
722 #define P_HI(x) hex[((x) & 0xf0)>>4]
723 #define P_LO(x) hex[((x) & 0xf)]
724 #define P_C(x) ((x) >= 0x20 && (x) <= 0x7e ? (x) : '.')
727 if (lim <= 0 || lim > len)
730 sprintf(o, "buf 0x%p len %d lim %d\n", p, len, lim);
732 /* hexdump routine */
733 for (i = 0; i < lim; ) {
734 sprintf(o, "%5d: ", i);
738 for (j=0; j < 16 && i < lim; i++, j++) {
740 o[j*3+1] = P_LO(p[i]);
743 for (j=0; j < 16 && i < lim; i++, j++)
744 o[j + 48] = P_C(p[i]);
757 * Fetch configuration from the device, to cope with dynamic
758 * reconfigurations after loading the module.
760 /* call with NMG_LOCK held */
762 netmap_update_config(struct netmap_adapter *na)
764 struct nm_config_info info;
766 bzero(&info, sizeof(info));
767 if (na->nm_config == NULL ||
768 na->nm_config(na, &info)) {
769 /* take whatever we had at init time */
770 info.num_tx_rings = na->num_tx_rings;
771 info.num_tx_descs = na->num_tx_desc;
772 info.num_rx_rings = na->num_rx_rings;
773 info.num_rx_descs = na->num_rx_desc;
774 info.rx_buf_maxsize = na->rx_buf_maxsize;
777 if (na->num_tx_rings == info.num_tx_rings &&
778 na->num_tx_desc == info.num_tx_descs &&
779 na->num_rx_rings == info.num_rx_rings &&
780 na->num_rx_desc == info.num_rx_descs &&
781 na->rx_buf_maxsize == info.rx_buf_maxsize)
782 return 0; /* nothing changed */
783 if (na->active_fds == 0) {
784 na->num_tx_rings = info.num_tx_rings;
785 na->num_tx_desc = info.num_tx_descs;
786 na->num_rx_rings = info.num_rx_rings;
787 na->num_rx_desc = info.num_rx_descs;
788 na->rx_buf_maxsize = info.rx_buf_maxsize;
790 nm_prinf("configuration changed for %s: txring %d x %d, "
791 "rxring %d x %d, rxbufsz %d",
792 na->name, na->num_tx_rings, na->num_tx_desc,
793 na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize);
796 nm_prerr("WARNING: configuration changed for %s while active: "
797 "txring %d x %d, rxring %d x %d, rxbufsz %d",
798 na->name, info.num_tx_rings, info.num_tx_descs,
799 info.num_rx_rings, info.num_rx_descs,
800 info.rx_buf_maxsize);
804 /* nm_sync callbacks for the host rings */
805 static int netmap_txsync_to_host(struct netmap_kring *kring, int flags);
806 static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags);
808 /* create the krings array and initialize the fields common to all adapters.
809 * The array layout is this:
812 * na->tx_rings ----->| | \
813 * | | } na->num_tx_ring
817 * na->rx_rings ----> +----------+
819 * | | } na->num_rx_rings
824 * na->tailroom ----->| | \
825 * | | } tailroom bytes
829 * Note: for compatibility, host krings are created even when not needed.
830 * The tailroom space is currently used by vale ports for allocating leases.
832 /* call with NMG_LOCK held */
834 netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
837 struct netmap_kring *kring;
842 if (na->tx_rings != NULL) {
843 if (netmap_debug & NM_DEBUG_ON)
844 nm_prerr("warning: krings were already created");
848 /* account for the (possibly fake) host rings */
849 n[NR_TX] = netmap_all_rings(na, NR_TX);
850 n[NR_RX] = netmap_all_rings(na, NR_RX);
852 len = (n[NR_TX] + n[NR_RX]) *
853 (sizeof(struct netmap_kring) + sizeof(struct netmap_kring *))
856 na->tx_rings = nm_os_malloc((size_t)len);
857 if (na->tx_rings == NULL) {
858 nm_prerr("Cannot allocate krings");
861 na->rx_rings = na->tx_rings + n[NR_TX];
862 na->tailroom = na->rx_rings + n[NR_RX];
864 /* link the krings in the krings array */
865 kring = (struct netmap_kring *)((char *)na->tailroom + tailroom);
866 for (i = 0; i < n[NR_TX] + n[NR_RX]; i++) {
867 na->tx_rings[i] = kring;
872 * All fields in krings are 0 except the one initialized below.
873 * but better be explicit on important kring fields.
876 ndesc = nma_get_ndesc(na, t);
877 for (i = 0; i < n[t]; i++) {
878 kring = NMR(na, t)[i];
879 bzero(kring, sizeof(*kring));
880 kring->notify_na = na;
883 kring->nkr_num_slots = ndesc;
884 kring->nr_mode = NKR_NETMAP_OFF;
885 kring->nr_pending_mode = NKR_NETMAP_OFF;
886 if (i < nma_get_nrings(na, t)) {
887 kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync);
889 if (!(na->na_flags & NAF_HOST_RINGS))
890 kring->nr_kflags |= NKR_FAKERING;
891 kring->nm_sync = (t == NR_TX ?
892 netmap_txsync_to_host:
893 netmap_rxsync_from_host);
895 kring->nm_notify = na->nm_notify;
896 kring->rhead = kring->rcur = kring->nr_hwcur = 0;
898 * IMPORTANT: Always keep one slot empty.
900 kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0);
901 snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name,
903 nm_prdis("ktx %s h %d c %d t %d",
904 kring->name, kring->rhead, kring->rcur, kring->rtail);
905 err = nm_os_selinfo_init(&kring->si, kring->name);
907 netmap_krings_delete(na);
910 mtx_init(&kring->q_lock, (t == NR_TX ? "nm_txq_lock" : "nm_rxq_lock"), NULL, MTX_DEF);
911 kring->na = na; /* setting this field marks the mutex as initialized */
913 err = nm_os_selinfo_init(&na->si[t], na->name);
915 netmap_krings_delete(na);
924 /* undo the actions performed by netmap_krings_create */
925 /* call with NMG_LOCK held */
927 netmap_krings_delete(struct netmap_adapter *na)
929 struct netmap_kring **kring = na->tx_rings;
932 if (na->tx_rings == NULL) {
933 if (netmap_debug & NM_DEBUG_ON)
934 nm_prerr("warning: krings were already deleted");
939 nm_os_selinfo_uninit(&na->si[t]);
941 /* we rely on the krings layout described above */
942 for ( ; kring != na->tailroom; kring++) {
943 if ((*kring)->na != NULL)
944 mtx_destroy(&(*kring)->q_lock);
945 nm_os_selinfo_uninit(&(*kring)->si);
947 nm_os_free(na->tx_rings);
948 na->tx_rings = na->rx_rings = na->tailroom = NULL;
953 * Destructor for NIC ports. They also have an mbuf queue
954 * on the rings connected to the host so we need to purge
957 /* call with NMG_LOCK held */
959 netmap_hw_krings_delete(struct netmap_adapter *na)
961 u_int lim = netmap_real_rings(na, NR_RX), i;
963 for (i = nma_get_nrings(na, NR_RX); i < lim; i++) {
964 struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue;
965 nm_prdis("destroy sw mbq with len %d", mbq_len(q));
969 netmap_krings_delete(na);
973 netmap_mem_drop(struct netmap_adapter *na)
975 int last = netmap_mem_deref(na->nm_mem, na);
976 /* if the native allocator had been overrided on regif,
977 * restore it now and drop the temporary one
979 if (last && na->nm_mem_prev) {
980 netmap_mem_put(na->nm_mem);
981 na->nm_mem = na->nm_mem_prev;
982 na->nm_mem_prev = NULL;
987 * Undo everything that was done in netmap_do_regif(). In particular,
988 * call nm_register(ifp,0) to stop netmap mode on the interface and
989 * revert to normal operation.
991 /* call with NMG_LOCK held */
992 static void netmap_unset_ringid(struct netmap_priv_d *);
993 static void netmap_krings_put(struct netmap_priv_d *);
995 netmap_do_unregif(struct netmap_priv_d *priv)
997 struct netmap_adapter *na = priv->np_na;
1001 /* unset nr_pending_mode and possibly release exclusive mode */
1002 netmap_krings_put(priv);
1005 /* XXX check whether we have to do something with monitor
1006 * when rings change nr_mode. */
1007 if (na->active_fds <= 0) {
1008 /* walk through all the rings and tell any monitor
1009 * that the port is going to exit netmap mode
1011 netmap_monitor_stop(na);
1015 if (na->active_fds <= 0 || nm_kring_pending(priv)) {
1016 na->nm_register(na, 0);
1019 /* delete rings and buffers that are no longer needed */
1020 netmap_mem_rings_delete(na);
1022 if (na->active_fds <= 0) { /* last instance */
1024 * (TO CHECK) We enter here
1025 * when the last reference to this file descriptor goes
1026 * away. This means we cannot have any pending poll()
1027 * or interrupt routine operating on the structure.
1028 * XXX The file may be closed in a thread while
1029 * another thread is using it.
1030 * Linux keeps the file opened until the last reference
1031 * by any outstanding ioctl/poll or mmap is gone.
1032 * FreeBSD does not track mmap()s (but we do) and
1033 * wakes up any sleeping poll(). Need to check what
1034 * happens if the close() occurs while a concurrent
1035 * syscall is running.
1037 if (netmap_debug & NM_DEBUG_ON)
1038 nm_prinf("deleting last instance for %s", na->name);
1040 if (nm_netmap_on(na)) {
1041 nm_prerr("BUG: netmap on while going to delete the krings");
1044 na->nm_krings_delete(na);
1046 /* restore the default number of host tx and rx rings */
1047 if (na->na_flags & NAF_HOST_RINGS) {
1048 na->num_host_tx_rings = 1;
1049 na->num_host_rx_rings = 1;
1051 na->num_host_tx_rings = 0;
1052 na->num_host_rx_rings = 0;
1056 /* possibily decrement counter of tx_si/rx_si users */
1057 netmap_unset_ringid(priv);
1058 /* delete the nifp */
1059 netmap_mem_if_delete(na, priv->np_nifp);
1060 /* drop the allocator */
1061 netmap_mem_drop(na);
1062 /* mark the priv as unregistered */
1064 priv->np_nifp = NULL;
1067 struct netmap_priv_d*
1068 netmap_priv_new(void)
1070 struct netmap_priv_d *priv;
1072 priv = nm_os_malloc(sizeof(struct netmap_priv_d));
1081 * Destructor of the netmap_priv_d, called when the fd is closed
1082 * Action: undo all the things done by NIOCREGIF,
1083 * On FreeBSD we need to track whether there are active mmap()s,
1084 * and we use np_active_mmaps for that. On linux, the field is always 0.
1085 * Return: 1 if we can free priv, 0 otherwise.
1088 /* call with NMG_LOCK held */
1090 netmap_priv_delete(struct netmap_priv_d *priv)
1092 struct netmap_adapter *na = priv->np_na;
1094 /* number of active references to this fd */
1095 if (--priv->np_refs > 0) {
1100 netmap_do_unregif(priv);
1102 netmap_unget_na(na, priv->np_ifp);
1103 bzero(priv, sizeof(*priv)); /* for safety */
1108 /* call with NMG_LOCK *not* held */
1110 netmap_dtor(void *data)
1112 struct netmap_priv_d *priv = data;
1115 netmap_priv_delete(priv);
1121 * Handlers for synchronization of the rings from/to the host stack.
1122 * These are associated to a network interface and are just another
1123 * ring pair managed by userspace.
1125 * Netmap also supports transparent forwarding (NS_FORWARD and NR_FORWARD
1128 * - Before releasing buffers on hw RX rings, the application can mark
1129 * them with the NS_FORWARD flag. During the next RXSYNC or poll(), they
1130 * will be forwarded to the host stack, similarly to what happened if
1131 * the application moved them to the host TX ring.
1133 * - Before releasing buffers on the host RX ring, the application can
1134 * mark them with the NS_FORWARD flag. During the next RXSYNC or poll(),
1135 * they will be forwarded to the hw TX rings, saving the application
1136 * from doing the same task in user-space.
1138 * Transparent fowarding can be enabled per-ring, by setting the NR_FORWARD
1139 * flag, or globally with the netmap_fwd sysctl.
1141 * The transfer NIC --> host is relatively easy, just encapsulate
1142 * into mbufs and we are done. The host --> NIC side is slightly
1143 * harder because there might not be room in the tx ring so it
1144 * might take a while before releasing the buffer.
1149 * Pass a whole queue of mbufs to the host stack as coming from 'dst'
1150 * We do not need to lock because the queue is private.
1151 * After this call the queue is empty.
1154 netmap_send_up(struct ifnet *dst, struct mbq *q)
1157 struct mbuf *head = NULL, *prev = NULL;
1159 struct epoch_tracker et;
1161 NET_EPOCH_ENTER(et);
1162 #endif /* __FreeBSD__ */
1163 /* Send packets up, outside the lock; head/prev machinery
1164 * is only useful for Windows. */
1165 while ((m = mbq_dequeue(q)) != NULL) {
1166 if (netmap_debug & NM_DEBUG_HOST)
1167 nm_prinf("sending up pkt %p size %d", m, MBUF_LEN(m));
1168 prev = nm_os_send_up(dst, m, prev);
1173 nm_os_send_up(dst, NULL, head);
1176 #endif /* __FreeBSD__ */
1182 * Scan the buffers from hwcur to ring->head, and put a copy of those
1183 * marked NS_FORWARD (or all of them if forced) into a queue of mbufs.
1184 * Drop remaining packets in the unlikely event
1185 * of an mbuf shortage.
1188 netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force)
1190 u_int const lim = kring->nkr_num_slots - 1;
1191 u_int const head = kring->rhead;
1193 struct netmap_adapter *na = kring->na;
1195 for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) {
1197 struct netmap_slot *slot = &kring->ring->slot[n];
1199 if ((slot->flags & NS_FORWARD) == 0 && !force)
1201 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) {
1202 nm_prlim(5, "bad pkt at %d len %d", n, slot->len);
1205 slot->flags &= ~NS_FORWARD; // XXX needed ?
1206 /* XXX TODO: adapt to the case of a multisegment packet */
1207 m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL);
1216 _nm_may_forward(struct netmap_kring *kring)
1218 return ((netmap_fwd || kring->ring->flags & NR_FORWARD) &&
1219 kring->na->na_flags & NAF_HOST_RINGS &&
1220 kring->tx == NR_RX);
1224 nm_may_forward_up(struct netmap_kring *kring)
1226 return _nm_may_forward(kring) &&
1227 kring->ring_id != kring->na->num_rx_rings;
1231 nm_may_forward_down(struct netmap_kring *kring, int sync_flags)
1233 return _nm_may_forward(kring) &&
1234 (sync_flags & NAF_CAN_FORWARD_DOWN) &&
1235 kring->ring_id == kring->na->num_rx_rings;
1239 * Send to the NIC rings packets marked NS_FORWARD between
1240 * kring->nr_hwcur and kring->rhead.
1241 * Called under kring->rx_queue.lock on the sw rx ring.
1243 * It can only be called if the user opened all the TX hw rings,
1244 * see NAF_CAN_FORWARD_DOWN flag.
1245 * We can touch the TX netmap rings (slots, head and cur) since
1246 * we are in poll/ioctl system call context, and the application
1247 * is not supposed to touch the ring (using a different thread)
1248 * during the execution of the system call.
1251 netmap_sw_to_nic(struct netmap_adapter *na)
1253 struct netmap_kring *kring = na->rx_rings[na->num_rx_rings];
1254 struct netmap_slot *rxslot = kring->ring->slot;
1255 u_int i, rxcur = kring->nr_hwcur;
1256 u_int const head = kring->rhead;
1257 u_int const src_lim = kring->nkr_num_slots - 1;
1260 /* scan rings to find space, then fill as much as possible */
1261 for (i = 0; i < na->num_tx_rings; i++) {
1262 struct netmap_kring *kdst = na->tx_rings[i];
1263 struct netmap_ring *rdst = kdst->ring;
1264 u_int const dst_lim = kdst->nkr_num_slots - 1;
1266 /* XXX do we trust ring or kring->rcur,rtail ? */
1267 for (; rxcur != head && !nm_ring_empty(rdst);
1268 rxcur = nm_next(rxcur, src_lim) ) {
1269 struct netmap_slot *src, *dst, tmp;
1270 u_int dst_head = rdst->head;
1272 src = &rxslot[rxcur];
1273 if ((src->flags & NS_FORWARD) == 0 && !netmap_fwd)
1278 dst = &rdst->slot[dst_head];
1282 src->buf_idx = dst->buf_idx;
1283 src->flags = NS_BUF_CHANGED;
1285 dst->buf_idx = tmp.buf_idx;
1287 dst->flags = NS_BUF_CHANGED;
1289 rdst->head = rdst->cur = nm_next(dst_head, dst_lim);
1291 /* if (sent) XXX txsync ? it would be just an optimization */
1298 * netmap_txsync_to_host() passes packets up. We are called from a
1299 * system call in user process context, and the only contention
1300 * can be among multiple user threads erroneously calling
1301 * this routine concurrently.
1304 netmap_txsync_to_host(struct netmap_kring *kring, int flags)
1306 struct netmap_adapter *na = kring->na;
1307 u_int const lim = kring->nkr_num_slots - 1;
1308 u_int const head = kring->rhead;
1311 /* Take packets from hwcur to head and pass them up.
1312 * Force hwcur = head since netmap_grab_packets() stops at head
1315 netmap_grab_packets(kring, &q, 1 /* force */);
1316 nm_prdis("have %d pkts in queue", mbq_len(&q));
1317 kring->nr_hwcur = head;
1318 kring->nr_hwtail = head + lim;
1319 if (kring->nr_hwtail > lim)
1320 kring->nr_hwtail -= lim + 1;
1322 netmap_send_up(na->ifp, &q);
1328 * rxsync backend for packets coming from the host stack.
1329 * They have been put in kring->rx_queue by netmap_transmit().
1330 * We protect access to the kring using kring->rx_queue.lock
1332 * also moves to the nic hw rings any packet the user has marked
1333 * for transparent-mode forwarding, then sets the NR_FORWARD
1334 * flag in the kring to let the caller push them out
1337 netmap_rxsync_from_host(struct netmap_kring *kring, int flags)
1339 struct netmap_adapter *na = kring->na;
1340 struct netmap_ring *ring = kring->ring;
1342 u_int const lim = kring->nkr_num_slots - 1;
1343 u_int const head = kring->rhead;
1345 struct mbq *q = &kring->rx_queue, fq;
1347 mbq_init(&fq); /* fq holds packets to be freed */
1351 /* First part: import newly received packets */
1353 if (n) { /* grab packets from the queue */
1357 nm_i = kring->nr_hwtail;
1358 stop_i = nm_prev(kring->nr_hwcur, lim);
1359 while ( nm_i != stop_i && (m = mbq_dequeue(q)) != NULL ) {
1360 int len = MBUF_LEN(m);
1361 struct netmap_slot *slot = &ring->slot[nm_i];
1363 m_copydata(m, 0, len, NMB(na, slot));
1364 nm_prdis("nm %d len %d", nm_i, len);
1365 if (netmap_debug & NM_DEBUG_HOST)
1366 nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
1370 nm_i = nm_next(nm_i, lim);
1371 mbq_enqueue(&fq, m);
1373 kring->nr_hwtail = nm_i;
1377 * Second part: skip past packets that userspace has released.
1379 nm_i = kring->nr_hwcur;
1380 if (nm_i != head) { /* something was released */
1381 if (nm_may_forward_down(kring, flags)) {
1382 ret = netmap_sw_to_nic(na);
1384 kring->nr_kflags |= NR_FORWARD;
1388 kring->nr_hwcur = head;
1400 /* Get a netmap adapter for the port.
1402 * If it is possible to satisfy the request, return 0
1403 * with *na containing the netmap adapter found.
1404 * Otherwise return an error code, with *na containing NULL.
1406 * When the port is attached to a bridge, we always return
1408 * Otherwise, if the port is already bound to a file descriptor,
1409 * then we unconditionally return the existing adapter into *na.
1410 * In all the other cases, we return (into *na) either native,
1411 * generic or NULL, according to the following table:
1414 * active_fds dev.netmap.admode YES NO
1415 * -------------------------------------------------------
1416 * >0 * NA(ifp) NA(ifp)
1418 * 0 NETMAP_ADMODE_BEST NATIVE GENERIC
1419 * 0 NETMAP_ADMODE_NATIVE NATIVE NULL
1420 * 0 NETMAP_ADMODE_GENERIC GENERIC GENERIC
1423 static void netmap_hw_dtor(struct netmap_adapter *); /* needed by NM_IS_NATIVE() */
1425 netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na)
1427 /* generic support */
1428 int i = netmap_admode; /* Take a snapshot. */
1429 struct netmap_adapter *prev_na;
1432 *na = NULL; /* default */
1434 /* reset in case of invalid value */
1435 if (i < NETMAP_ADMODE_BEST || i >= NETMAP_ADMODE_LAST)
1436 i = netmap_admode = NETMAP_ADMODE_BEST;
1438 if (NM_NA_VALID(ifp)) {
1440 /* If an adapter already exists, return it if
1441 * there are active file descriptors or if
1442 * netmap is not forced to use generic
1445 if (NETMAP_OWNED_BY_ANY(prev_na)
1446 || i != NETMAP_ADMODE_GENERIC
1447 || prev_na->na_flags & NAF_FORCE_NATIVE
1449 /* ugly, but we cannot allow an adapter switch
1450 * if some pipe is referring to this one
1452 || prev_na->na_next_pipe > 0
1460 /* If there isn't native support and netmap is not allowed
1461 * to use generic adapters, we cannot satisfy the request.
1463 if (!NM_IS_NATIVE(ifp) && i == NETMAP_ADMODE_NATIVE)
1466 /* Otherwise, create a generic adapter and return it,
1467 * saving the previously used netmap adapter, if any.
1469 * Note that here 'prev_na', if not NULL, MUST be a
1470 * native adapter, and CANNOT be a generic one. This is
1471 * true because generic adapters are created on demand, and
1472 * destroyed when not used anymore. Therefore, if the adapter
1473 * currently attached to an interface 'ifp' is generic, it
1475 * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))).
1476 * Consequently, if NA(ifp) is generic, we will enter one of
1477 * the branches above. This ensures that we never override
1478 * a generic adapter with another generic adapter.
1480 error = generic_netmap_attach(ifp);
1487 if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) &&
1488 (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) {
1489 (*na)->nm_mem_prev = (*na)->nm_mem;
1490 (*na)->nm_mem = netmap_mem_get(nmd);
1497 * MUST BE CALLED UNDER NMG_LOCK()
1499 * Get a refcounted reference to a netmap adapter attached
1500 * to the interface specified by req.
1501 * This is always called in the execution of an ioctl().
1503 * Return ENXIO if the interface specified by the request does
1504 * not exist, ENOTSUP if netmap is not supported by the interface,
1505 * EBUSY if the interface is already attached to a bridge,
1506 * EINVAL if parameters are invalid, ENOMEM if needed resources
1507 * could not be allocated.
1508 * If successful, hold a reference to the netmap adapter.
1510 * If the interface specified by req is a system one, also keep
1511 * a reference to it and return a valid *ifp.
1514 netmap_get_na(struct nmreq_header *hdr,
1515 struct netmap_adapter **na, struct ifnet **ifp,
1516 struct netmap_mem_d *nmd, int create)
1518 struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1520 struct netmap_adapter *ret = NULL;
1523 *na = NULL; /* default return value */
1526 if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1530 if (req->nr_mode == NR_REG_PIPE_MASTER ||
1531 req->nr_mode == NR_REG_PIPE_SLAVE) {
1532 /* Do not accept deprecated pipe modes. */
1533 nm_prerr("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax");
1539 /* if the request contain a memid, try to find the
1540 * corresponding memory region
1542 if (nmd == NULL && req->nr_mem_id) {
1543 nmd = netmap_mem_find(req->nr_mem_id);
1546 /* keep the rereference */
1550 /* We cascade through all possible types of netmap adapter.
1551 * All netmap_get_*_na() functions return an error and an na,
1552 * with the following combinations:
1555 * 0 NULL type doesn't match
1556 * !0 NULL type matches, but na creation/lookup failed
1557 * 0 !NULL type matches and na created/found
1558 * !0 !NULL impossible
1560 error = netmap_get_null_na(hdr, na, nmd, create);
1561 if (error || *na != NULL)
1564 /* try to see if this is a monitor port */
1565 error = netmap_get_monitor_na(hdr, na, nmd, create);
1566 if (error || *na != NULL)
1569 /* try to see if this is a pipe port */
1570 error = netmap_get_pipe_na(hdr, na, nmd, create);
1571 if (error || *na != NULL)
1574 /* try to see if this is a bridge port */
1575 error = netmap_get_vale_na(hdr, na, nmd, create);
1579 if (*na != NULL) /* valid match in netmap_get_bdg_na() */
1583 * This must be a hardware na, lookup the name in the system.
1584 * Note that by hardware we actually mean "it shows up in ifconfig".
1585 * This may still be a tap, a veth/epair, or even a
1586 * persistent VALE port.
1588 *ifp = ifunit_ref(hdr->nr_name);
1594 error = netmap_get_hw_na(*ifp, nmd, &ret);
1599 netmap_adapter_get(ret);
1602 * if the adapter supports the host rings and it is not alread open,
1603 * try to set the number of host rings as requested by the user
1605 if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) {
1606 if (req->nr_host_tx_rings)
1607 (*na)->num_host_tx_rings = req->nr_host_tx_rings;
1608 if (req->nr_host_rx_rings)
1609 (*na)->num_host_rx_rings = req->nr_host_rx_rings;
1611 nm_prdis("%s: host tx %d rx %u", (*na)->name, (*na)->num_host_tx_rings,
1612 (*na)->num_host_rx_rings);
1617 netmap_adapter_put(ret);
1624 netmap_mem_put(nmd);
1629 /* undo netmap_get_na() */
1631 netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp)
1636 netmap_adapter_put(na);
1640 #define NM_FAIL_ON(t) do { \
1641 if (unlikely(t)) { \
1642 nm_prlim(5, "%s: fail '" #t "' " \
1644 "rh %d rc %d rt %d " \
1647 head, cur, ring->tail, \
1648 kring->rhead, kring->rcur, kring->rtail, \
1649 kring->nr_hwcur, kring->nr_hwtail); \
1650 return kring->nkr_num_slots; \
1655 * validate parameters on entry for *_txsync()
1656 * Returns ring->cur if ok, or something >= kring->nkr_num_slots
1659 * rhead, rcur and rtail=hwtail are stored from previous round.
1660 * hwcur is the next packet to send to the ring.
1663 * hwcur <= *rhead <= head <= cur <= tail = *rtail <= hwtail
1665 * hwcur, rhead, rtail and hwtail are reliable
1668 nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1670 u_int head = ring->head; /* read only once */
1671 u_int cur = ring->cur; /* read only once */
1672 u_int n = kring->nkr_num_slots;
1674 nm_prdis(5, "%s kcur %d ktail %d head %d cur %d tail %d",
1676 kring->nr_hwcur, kring->nr_hwtail,
1677 ring->head, ring->cur, ring->tail);
1678 #if 1 /* kernel sanity checks; but we can trust the kring. */
1679 NM_FAIL_ON(kring->nr_hwcur >= n || kring->rhead >= n ||
1680 kring->rtail >= n || kring->nr_hwtail >= n);
1681 #endif /* kernel sanity checks */
1683 * user sanity checks. We only use head,
1684 * A, B, ... are possible positions for head:
1686 * 0 A rhead B rtail C n-1
1687 * 0 D rtail E rhead F n-1
1689 * B, F, D are valid. A, C, E are wrong
1691 if (kring->rtail >= kring->rhead) {
1692 /* want rhead <= head <= rtail */
1693 NM_FAIL_ON(head < kring->rhead || head > kring->rtail);
1694 /* and also head <= cur <= rtail */
1695 NM_FAIL_ON(cur < head || cur > kring->rtail);
1696 } else { /* here rtail < rhead */
1697 /* we need head outside rtail .. rhead */
1698 NM_FAIL_ON(head > kring->rtail && head < kring->rhead);
1700 /* two cases now: head <= rtail or head >= rhead */
1701 if (head <= kring->rtail) {
1702 /* want head <= cur <= rtail */
1703 NM_FAIL_ON(cur < head || cur > kring->rtail);
1704 } else { /* head >= rhead */
1705 /* cur must be outside rtail..head */
1706 NM_FAIL_ON(cur > kring->rtail && cur < head);
1709 if (ring->tail != kring->rtail) {
1710 nm_prlim(5, "%s tail overwritten was %d need %d", kring->name,
1711 ring->tail, kring->rtail);
1712 ring->tail = kring->rtail;
1714 kring->rhead = head;
1721 * validate parameters on entry for *_rxsync()
1722 * Returns ring->head if ok, kring->nkr_num_slots on error.
1724 * For a valid configuration,
1725 * hwcur <= head <= cur <= tail <= hwtail
1727 * We only consider head and cur.
1728 * hwcur and hwtail are reliable.
1732 nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1734 uint32_t const n = kring->nkr_num_slots;
1737 nm_prdis(5,"%s kc %d kt %d h %d c %d t %d",
1739 kring->nr_hwcur, kring->nr_hwtail,
1740 ring->head, ring->cur, ring->tail);
1742 * Before storing the new values, we should check they do not
1743 * move backwards. However:
1744 * - head is not an issue because the previous value is hwcur;
1745 * - cur could in principle go back, however it does not matter
1746 * because we are processing a brand new rxsync()
1748 cur = kring->rcur = ring->cur; /* read only once */
1749 head = kring->rhead = ring->head; /* read only once */
1750 #if 1 /* kernel sanity checks */
1751 NM_FAIL_ON(kring->nr_hwcur >= n || kring->nr_hwtail >= n);
1752 #endif /* kernel sanity checks */
1753 /* user sanity checks */
1754 if (kring->nr_hwtail >= kring->nr_hwcur) {
1755 /* want hwcur <= rhead <= hwtail */
1756 NM_FAIL_ON(head < kring->nr_hwcur || head > kring->nr_hwtail);
1757 /* and also rhead <= rcur <= hwtail */
1758 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1760 /* we need rhead outside hwtail..hwcur */
1761 NM_FAIL_ON(head < kring->nr_hwcur && head > kring->nr_hwtail);
1762 /* two cases now: head <= hwtail or head >= hwcur */
1763 if (head <= kring->nr_hwtail) {
1764 /* want head <= cur <= hwtail */
1765 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1767 /* cur must be outside hwtail..head */
1768 NM_FAIL_ON(cur < head && cur > kring->nr_hwtail);
1771 if (ring->tail != kring->rtail) {
1772 nm_prlim(5, "%s tail overwritten was %d need %d",
1774 ring->tail, kring->rtail);
1775 ring->tail = kring->rtail;
1782 * Error routine called when txsync/rxsync detects an error.
1783 * Can't do much more than resetting head = cur = hwcur, tail = hwtail
1784 * Return 1 on reinit.
1786 * This routine is only called by the upper half of the kernel.
1787 * It only reads hwcur (which is changed only by the upper half, too)
1788 * and hwtail (which may be changed by the lower half, but only on
1789 * a tx ring and only to increase it, so any error will be recovered
1790 * on the next call). For the above, we don't strictly need to call
1794 netmap_ring_reinit(struct netmap_kring *kring)
1796 struct netmap_ring *ring = kring->ring;
1797 u_int i, lim = kring->nkr_num_slots - 1;
1800 // XXX KASSERT nm_kr_tryget
1801 nm_prlim(10, "called for %s", kring->name);
1802 // XXX probably wrong to trust userspace
1803 kring->rhead = ring->head;
1804 kring->rcur = ring->cur;
1805 kring->rtail = ring->tail;
1807 if (ring->cur > lim)
1809 if (ring->head > lim)
1811 if (ring->tail > lim)
1813 for (i = 0; i <= lim; i++) {
1814 u_int idx = ring->slot[i].buf_idx;
1815 u_int len = ring->slot[i].len;
1816 if (idx < 2 || idx >= kring->na->na_lut.objtotal) {
1817 nm_prlim(5, "bad index at slot %d idx %d len %d ", i, idx, len);
1818 ring->slot[i].buf_idx = 0;
1819 ring->slot[i].len = 0;
1820 } else if (len > NETMAP_BUF_SIZE(kring->na)) {
1821 ring->slot[i].len = 0;
1822 nm_prlim(5, "bad len at slot %d idx %d len %d", i, idx, len);
1826 nm_prlim(10, "total %d errors", errors);
1827 nm_prlim(10, "%s reinit, cur %d -> %d tail %d -> %d",
1829 ring->cur, kring->nr_hwcur,
1830 ring->tail, kring->nr_hwtail);
1831 ring->head = kring->rhead = kring->nr_hwcur;
1832 ring->cur = kring->rcur = kring->nr_hwcur;
1833 ring->tail = kring->rtail = kring->nr_hwtail;
1835 return (errors ? 1 : 0);
1838 /* interpret the ringid and flags fields of an nmreq, by translating them
1839 * into a pair of intervals of ring indices:
1841 * [priv->np_txqfirst, priv->np_txqlast) and
1842 * [priv->np_rxqfirst, priv->np_rxqlast)
1846 netmap_interp_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr)
1848 struct netmap_adapter *na = priv->np_na;
1849 struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body;
1850 int excluded_direction[] = { NR_TX_RINGS_ONLY, NR_RX_RINGS_ONLY };
1853 u_int nr_flags = reg->nr_flags, nr_mode = reg->nr_mode,
1854 nr_ringid = reg->nr_ringid;
1857 if (nr_flags & excluded_direction[t]) {
1858 priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1862 case NR_REG_ALL_NIC:
1864 priv->np_qfirst[t] = 0;
1865 priv->np_qlast[t] = nma_get_nrings(na, t);
1866 nm_prdis("ALL/PIPE: %s %d %d", nm_txrx2str(t),
1867 priv->np_qfirst[t], priv->np_qlast[t]);
1871 if (!(na->na_flags & NAF_HOST_RINGS)) {
1872 nm_prerr("host rings not supported");
1875 priv->np_qfirst[t] = (nr_mode == NR_REG_SW ?
1876 nma_get_nrings(na, t) : 0);
1877 priv->np_qlast[t] = netmap_all_rings(na, t);
1878 nm_prdis("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW",
1880 priv->np_qfirst[t], priv->np_qlast[t]);
1882 case NR_REG_ONE_NIC:
1883 if (nr_ringid >= na->num_tx_rings &&
1884 nr_ringid >= na->num_rx_rings) {
1885 nm_prerr("invalid ring id %d", nr_ringid);
1888 /* if not enough rings, use the first one */
1890 if (j >= nma_get_nrings(na, t))
1892 priv->np_qfirst[t] = j;
1893 priv->np_qlast[t] = j + 1;
1894 nm_prdis("ONE_NIC: %s %d %d", nm_txrx2str(t),
1895 priv->np_qfirst[t], priv->np_qlast[t]);
1898 if (!(na->na_flags & NAF_HOST_RINGS)) {
1899 nm_prerr("host rings not supported");
1902 if (nr_ringid >= na->num_host_tx_rings &&
1903 nr_ringid >= na->num_host_rx_rings) {
1904 nm_prerr("invalid ring id %d", nr_ringid);
1907 /* if not enough rings, use the first one */
1909 if (j >= nma_get_host_nrings(na, t))
1911 priv->np_qfirst[t] = nma_get_nrings(na, t) + j;
1912 priv->np_qlast[t] = nma_get_nrings(na, t) + j + 1;
1913 nm_prdis("ONE_SW: %s %d %d", nm_txrx2str(t),
1914 priv->np_qfirst[t], priv->np_qlast[t]);
1917 nm_prerr("invalid regif type %d", nr_mode);
1921 priv->np_flags = nr_flags;
1923 /* Allow transparent forwarding mode in the host --> nic
1924 * direction only if all the TX hw rings have been opened. */
1925 if (priv->np_qfirst[NR_TX] == 0 &&
1926 priv->np_qlast[NR_TX] >= na->num_tx_rings) {
1927 priv->np_sync_flags |= NAF_CAN_FORWARD_DOWN;
1930 if (netmap_verbose) {
1931 nm_prinf("%s: tx [%d,%d) rx [%d,%d) id %d",
1933 priv->np_qfirst[NR_TX],
1934 priv->np_qlast[NR_TX],
1935 priv->np_qfirst[NR_RX],
1936 priv->np_qlast[NR_RX],
1944 * Set the ring ID. For devices with a single queue, a request
1945 * for all rings is the same as a single ring.
1948 netmap_set_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr)
1950 struct netmap_adapter *na = priv->np_na;
1951 struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body;
1955 error = netmap_interp_ringid(priv, hdr);
1960 priv->np_txpoll = (reg->nr_flags & NR_NO_TX_POLL) ? 0 : 1;
1962 /* optimization: count the users registered for more than
1963 * one ring, which are the ones sleeping on the global queue.
1964 * The default netmap_notify() callback will then
1965 * avoid signaling the global queue if nobody is using it
1968 if (nm_si_user(priv, t))
1975 netmap_unset_ringid(struct netmap_priv_d *priv)
1977 struct netmap_adapter *na = priv->np_na;
1981 if (nm_si_user(priv, t))
1983 priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1986 priv->np_txpoll = 0;
1987 priv->np_kloop_state = 0;
1990 #define within_sel(p_, t_, i_) \
1991 ((i_) < (p_)->np_qlast[(t_)])
1992 #define nonempty_sel(p_, t_) \
1993 (within_sel((p_), (t_), (p_)->np_qfirst[(t_)]))
1994 #define foreach_selected_ring(p_, t_, i_, kring_) \
1995 for ((t_) = nonempty_sel((p_), NR_RX) ? NR_RX : NR_TX, \
1996 (i_) = (p_)->np_qfirst[(t_)]; \
1998 (t == NR_TX && within_sel((p_), (t_), (i_)))) && \
1999 ((kring_) = NMR((p_)->np_na, (t_))[(i_)]); \
2000 (i_) = within_sel((p_), (t_), (i_) + 1) ? (i_) + 1 : \
2001 (++(t_) < NR_TXRX ? (p_)->np_qfirst[(t_)] : (i_)))
2004 /* Set the nr_pending_mode for the requested rings.
2005 * If requested, also try to get exclusive access to the rings, provided
2006 * the rings we want to bind are not exclusively owned by a previous bind.
2009 netmap_krings_get(struct netmap_priv_d *priv)
2011 struct netmap_adapter *na = priv->np_na;
2013 struct netmap_kring *kring;
2014 int excl = (priv->np_flags & NR_EXCLUSIVE);
2017 if (netmap_debug & NM_DEBUG_ON)
2018 nm_prinf("%s: grabbing tx [%d, %d) rx [%d, %d)",
2020 priv->np_qfirst[NR_TX],
2021 priv->np_qlast[NR_TX],
2022 priv->np_qfirst[NR_RX],
2023 priv->np_qlast[NR_RX]);
2025 /* first round: check that all the requested rings
2026 * are neither alread exclusively owned, nor we
2027 * want exclusive ownership when they are already in use
2029 foreach_selected_ring(priv, t, i, kring) {
2030 if ((kring->nr_kflags & NKR_EXCLUSIVE) ||
2031 (kring->users && excl))
2033 nm_prdis("ring %s busy", kring->name);
2038 /* second round: increment usage count (possibly marking them
2039 * as exclusive) and set the nr_pending_mode
2041 foreach_selected_ring(priv, t, i, kring) {
2044 kring->nr_kflags |= NKR_EXCLUSIVE;
2045 kring->nr_pending_mode = NKR_NETMAP_ON;
2052 /* Undo netmap_krings_get(). This is done by clearing the exclusive mode
2053 * if was asked on regif, and unset the nr_pending_mode if we are the
2054 * last users of the involved rings. */
2056 netmap_krings_put(struct netmap_priv_d *priv)
2059 struct netmap_kring *kring;
2060 int excl = (priv->np_flags & NR_EXCLUSIVE);
2063 nm_prdis("%s: releasing tx [%d, %d) rx [%d, %d)",
2065 priv->np_qfirst[NR_TX],
2066 priv->np_qlast[NR_TX],
2067 priv->np_qfirst[NR_RX],
2068 priv->np_qlast[MR_RX]);
2070 foreach_selected_ring(priv, t, i, kring) {
2072 kring->nr_kflags &= ~NKR_EXCLUSIVE;
2074 if (kring->users == 0)
2075 kring->nr_pending_mode = NKR_NETMAP_OFF;
2080 nm_priv_rx_enabled(struct netmap_priv_d *priv)
2082 return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]);
2085 /* Validate the CSB entries for both directions (atok and ktoa).
2086 * To be called under NMG_LOCK(). */
2088 netmap_csb_validate(struct netmap_priv_d *priv, struct nmreq_opt_csb *csbo)
2090 struct nm_csb_atok *csb_atok_base =
2091 (struct nm_csb_atok *)(uintptr_t)csbo->csb_atok;
2092 struct nm_csb_ktoa *csb_ktoa_base =
2093 (struct nm_csb_ktoa *)(uintptr_t)csbo->csb_ktoa;
2095 int num_rings[NR_TXRX], tot_rings;
2096 size_t entry_size[2];
2100 if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) {
2101 nm_prerr("Cannot update CSB while kloop is running");
2107 num_rings[t] = priv->np_qlast[t] - priv->np_qfirst[t];
2108 tot_rings += num_rings[t];
2113 if (!(priv->np_flags & NR_EXCLUSIVE)) {
2114 nm_prerr("CSB mode requires NR_EXCLUSIVE");
2118 entry_size[0] = sizeof(*csb_atok_base);
2119 entry_size[1] = sizeof(*csb_ktoa_base);
2120 csb_start[0] = (void *)csb_atok_base;
2121 csb_start[1] = (void *)csb_ktoa_base;
2123 for (i = 0; i < 2; i++) {
2124 /* On Linux we could use access_ok() to simplify
2125 * the validation. However, the advantage of
2126 * this approach is that it works also on
2128 size_t csb_size = tot_rings * entry_size[i];
2132 if ((uintptr_t)csb_start[i] & (entry_size[i]-1)) {
2133 nm_prerr("Unaligned CSB address");
2137 tmp = nm_os_malloc(csb_size);
2141 /* Application --> kernel direction. */
2142 err = copyin(csb_start[i], tmp, csb_size);
2144 /* Kernel --> application direction. */
2145 memset(tmp, 0, csb_size);
2146 err = copyout(tmp, csb_start[i], csb_size);
2150 nm_prerr("Invalid CSB address");
2155 priv->np_csb_atok_base = csb_atok_base;
2156 priv->np_csb_ktoa_base = csb_ktoa_base;
2158 /* Initialize the CSB. */
2160 for (i = 0; i < num_rings[t]; i++) {
2161 struct netmap_kring *kring =
2162 NMR(priv->np_na, t)[i + priv->np_qfirst[t]];
2163 struct nm_csb_atok *csb_atok = csb_atok_base + i;
2164 struct nm_csb_ktoa *csb_ktoa = csb_ktoa_base + i;
2167 csb_atok += num_rings[NR_TX];
2168 csb_ktoa += num_rings[NR_TX];
2171 CSB_WRITE(csb_atok, head, kring->rhead);
2172 CSB_WRITE(csb_atok, cur, kring->rcur);
2173 CSB_WRITE(csb_atok, appl_need_kick, 1);
2174 CSB_WRITE(csb_atok, sync_flags, 1);
2175 CSB_WRITE(csb_ktoa, hwcur, kring->nr_hwcur);
2176 CSB_WRITE(csb_ktoa, hwtail, kring->nr_hwtail);
2177 CSB_WRITE(csb_ktoa, kern_need_kick, 1);
2179 nm_prinf("csb_init for kring %s: head %u, cur %u, "
2180 "hwcur %u, hwtail %u", kring->name,
2181 kring->rhead, kring->rcur, kring->nr_hwcur,
2189 /* Ensure that the netmap adapter can support the given MTU.
2190 * @return EINVAL if the na cannot be set to mtu, 0 otherwise.
2193 netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu) {
2194 unsigned nbs = NETMAP_BUF_SIZE(na);
2196 if (mtu <= na->rx_buf_maxsize) {
2197 /* The MTU fits a single NIC slot. We only
2198 * Need to check that netmap buffers are
2199 * large enough to hold an MTU. NS_MOREFRAG
2200 * cannot be used in this case. */
2202 nm_prerr("error: netmap buf size (%u) "
2203 "< device MTU (%u)", nbs, mtu);
2207 /* More NIC slots may be needed to receive
2208 * or transmit a single packet. Check that
2209 * the adapter supports NS_MOREFRAG and that
2210 * netmap buffers are large enough to hold
2211 * the maximum per-slot size. */
2212 if (!(na->na_flags & NAF_MOREFRAG)) {
2213 nm_prerr("error: large MTU (%d) needed "
2214 "but %s does not support "
2218 } else if (nbs < na->rx_buf_maxsize) {
2219 nm_prerr("error: using NS_MOREFRAG on "
2220 "%s requires netmap buf size "
2221 ">= %u", na->ifp->if_xname,
2222 na->rx_buf_maxsize);
2225 nm_prinf("info: netmap application on "
2226 "%s needs to support "
2228 "(MTU=%u,netmap_buf_size=%u)",
2229 na->ifp->if_xname, mtu, nbs);
2237 * possibly move the interface to netmap-mode.
2238 * If success it returns a pointer to netmap_if, otherwise NULL.
2239 * This must be called with NMG_LOCK held.
2241 * The following na callbacks are called in the process:
2243 * na->nm_config() [by netmap_update_config]
2244 * (get current number and size of rings)
2246 * We have a generic one for linux (netmap_linux_config).
2247 * The bwrap has to override this, since it has to forward
2248 * the request to the wrapped adapter (netmap_bwrap_config).
2251 * na->nm_krings_create()
2252 * (create and init the krings array)
2254 * One of the following:
2256 * * netmap_hw_krings_create, (hw ports)
2257 * creates the standard layout for the krings
2258 * and adds the mbq (used for the host rings).
2260 * * netmap_vp_krings_create (VALE ports)
2261 * add leases and scratchpads
2263 * * netmap_pipe_krings_create (pipes)
2264 * create the krings and rings of both ends and
2267 * * netmap_monitor_krings_create (monitors)
2268 * avoid allocating the mbq
2270 * * netmap_bwrap_krings_create (bwraps)
2271 * create both the brap krings array,
2272 * the krings array of the wrapped adapter, and
2273 * (if needed) the fake array for the host adapter
2275 * na->nm_register(, 1)
2276 * (put the adapter in netmap mode)
2278 * This may be one of the following:
2280 * * netmap_hw_reg (hw ports)
2281 * checks that the ifp is still there, then calls
2282 * the hardware specific callback;
2284 * * netmap_vp_reg (VALE ports)
2285 * If the port is connected to a bridge,
2286 * set the NAF_NETMAP_ON flag under the
2287 * bridge write lock.
2289 * * netmap_pipe_reg (pipes)
2290 * inform the other pipe end that it is no
2291 * longer responsible for the lifetime of this
2294 * * netmap_monitor_reg (monitors)
2295 * intercept the sync callbacks of the monitored
2298 * * netmap_bwrap_reg (bwraps)
2299 * cross-link the bwrap and hwna rings,
2300 * forward the request to the hwna, override
2301 * the hwna notify callback (to get the frames
2302 * coming from outside go through the bridge).
2307 netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
2308 struct nmreq_header *hdr)
2310 struct netmap_if *nifp = NULL;
2314 priv->np_na = na; /* store the reference */
2315 error = netmap_mem_finalize(na->nm_mem, na);
2319 if (na->active_fds == 0) {
2321 /* cache the allocator info in the na */
2322 error = netmap_mem_get_lut(na->nm_mem, &na->na_lut);
2325 nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal,
2326 na->na_lut.objsize);
2328 /* ring configuration may have changed, fetch from the card */
2329 netmap_update_config(na);
2332 /* compute the range of tx and rx rings to monitor */
2333 error = netmap_set_ringid(priv, hdr);
2337 if (na->active_fds == 0) {
2339 * If this is the first registration of the adapter,
2340 * perform sanity checks and create the in-kernel view
2341 * of the netmap rings (the netmap krings).
2343 if (na->ifp && nm_priv_rx_enabled(priv)) {
2344 /* This netmap adapter is attached to an ifnet. */
2345 unsigned mtu = nm_os_ifnet_mtu(na->ifp);
2347 nm_prdis("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d",
2348 na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na));
2350 if (na->rx_buf_maxsize == 0) {
2351 nm_prerr("%s: error: rx_buf_maxsize == 0", na->name);
2356 error = netmap_buf_size_validate(na, mtu);
2362 * Depending on the adapter, this may also create
2363 * the netmap rings themselves
2365 error = na->nm_krings_create(na);
2371 /* now the krings must exist and we can check whether some
2372 * previous bind has exclusive ownership on them, and set
2375 error = netmap_krings_get(priv);
2377 goto err_del_krings;
2379 /* create all needed missing netmap rings */
2380 error = netmap_mem_rings_create(na);
2384 /* in all cases, create a new netmap if */
2385 nifp = netmap_mem_if_new(na, priv);
2391 if (nm_kring_pending(priv)) {
2392 /* Some kring is switching mode, tell the adapter to
2394 error = na->nm_register(na, 1);
2399 /* Commit the reference. */
2403 * advertise that the interface is ready by setting np_nifp.
2404 * The barrier is needed because readers (poll, *SYNC and mmap)
2405 * check for priv->np_nifp != NULL without locking
2407 mb(); /* make sure previous writes are visible to all CPUs */
2408 priv->np_nifp = nifp;
2413 netmap_mem_if_delete(na, nifp);
2415 netmap_krings_put(priv);
2416 netmap_mem_rings_delete(na);
2418 if (na->active_fds == 0)
2419 na->nm_krings_delete(na);
2421 if (na->active_fds == 0)
2422 memset(&na->na_lut, 0, sizeof(na->na_lut));
2424 netmap_mem_drop(na);
2432 * update kring and ring at the end of rxsync/txsync.
2435 nm_sync_finalize(struct netmap_kring *kring)
2438 * Update ring tail to what the kernel knows
2439 * After txsync: head/rhead/hwcur might be behind cur/rcur
2442 kring->ring->tail = kring->rtail = kring->nr_hwtail;
2444 nm_prdis(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
2445 kring->name, kring->nr_hwcur, kring->nr_hwtail,
2446 kring->rhead, kring->rcur, kring->rtail);
2449 /* set ring timestamp */
2451 ring_timestamp_set(struct netmap_ring *ring)
2453 if (netmap_no_timestamp == 0 || ring->flags & NR_TIMESTAMP) {
2454 microtime(&ring->ts);
2458 static int nmreq_copyin(struct nmreq_header *, int);
2459 static int nmreq_copyout(struct nmreq_header *, int);
2460 static int nmreq_checkoptions(struct nmreq_header *);
2463 * ioctl(2) support for the "netmap" device.
2465 * Following a list of accepted commands:
2466 * - NIOCCTRL device control API
2467 * - NIOCTXSYNC sync TX rings
2468 * - NIOCRXSYNC sync RX rings
2469 * - SIOCGIFADDR just for convenience
2470 * - NIOCGINFO deprecated (legacy API)
2471 * - NIOCREGIF deprecated (legacy API)
2473 * Return 0 on success, errno otherwise.
2476 netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
2477 struct thread *td, int nr_body_is_user)
2479 struct mbq q; /* packets from RX hw queues to host stack */
2480 struct netmap_adapter *na = NULL;
2481 struct netmap_mem_d *nmd = NULL;
2482 struct ifnet *ifp = NULL;
2484 u_int i, qfirst, qlast;
2485 struct netmap_kring **krings;
2491 struct nmreq_header *hdr = (struct nmreq_header *)data;
2493 if (hdr->nr_version < NETMAP_MIN_API ||
2494 hdr->nr_version > NETMAP_MAX_API) {
2495 nm_prerr("API mismatch: got %d need %d",
2496 hdr->nr_version, NETMAP_API);
2500 /* Make a kernel-space copy of the user-space nr_body.
2501 * For convenince, the nr_body pointer and the pointers
2502 * in the options list will be replaced with their
2503 * kernel-space counterparts. The original pointers are
2504 * saved internally and later restored by nmreq_copyout
2506 error = nmreq_copyin(hdr, nr_body_is_user);
2511 /* Sanitize hdr->nr_name. */
2512 hdr->nr_name[sizeof(hdr->nr_name) - 1] = '\0';
2514 switch (hdr->nr_reqtype) {
2515 case NETMAP_REQ_REGISTER: {
2516 struct nmreq_register *req =
2517 (struct nmreq_register *)(uintptr_t)hdr->nr_body;
2518 struct netmap_if *nifp;
2520 /* Protect access to priv from concurrent requests. */
2523 struct nmreq_option *opt;
2526 if (priv->np_nifp != NULL) { /* thread already registered */
2532 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_EXTMEM);
2534 struct nmreq_opt_extmem *e =
2535 (struct nmreq_opt_extmem *)opt;
2537 nmd = netmap_mem_ext_create(e->nro_usrptr,
2538 &e->nro_info, &error);
2539 opt->nro_status = error;
2543 #endif /* WITH_EXTMEM */
2545 if (nmd == NULL && req->nr_mem_id) {
2546 /* find the allocator and get a reference */
2547 nmd = netmap_mem_find(req->nr_mem_id);
2549 if (netmap_verbose) {
2550 nm_prerr("%s: failed to find mem_id %u",
2551 hdr->nr_name, req->nr_mem_id);
2557 /* find the interface and a reference */
2558 error = netmap_get_na(hdr, &na, &ifp, nmd,
2559 1 /* create */); /* keep reference */
2562 if (NETMAP_OWNED_BY_KERN(na)) {
2567 if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) {
2568 nm_prerr("virt_hdr_len=%d, but application does "
2569 "not accept it", na->virt_hdr_len);
2574 error = netmap_do_regif(priv, na, hdr);
2575 if (error) { /* reg. failed, release priv and ref */
2579 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2581 struct nmreq_opt_csb *csbo =
2582 (struct nmreq_opt_csb *)opt;
2583 error = netmap_csb_validate(priv, csbo);
2584 opt->nro_status = error;
2586 netmap_do_unregif(priv);
2591 nifp = priv->np_nifp;
2593 /* return the offset of the netmap_if object */
2594 req->nr_rx_rings = na->num_rx_rings;
2595 req->nr_tx_rings = na->num_tx_rings;
2596 req->nr_rx_slots = na->num_rx_desc;
2597 req->nr_tx_slots = na->num_tx_desc;
2598 req->nr_host_tx_rings = na->num_host_tx_rings;
2599 req->nr_host_rx_rings = na->num_host_rx_rings;
2600 error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags,
2603 netmap_do_unregif(priv);
2606 if (memflags & NETMAP_MEM_PRIVATE) {
2607 *(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM;
2610 priv->np_si[t] = nm_si_user(priv, t) ?
2611 &na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si;
2614 if (req->nr_extra_bufs) {
2616 nm_prinf("requested %d extra buffers",
2617 req->nr_extra_bufs);
2618 req->nr_extra_bufs = netmap_extra_alloc(na,
2619 &nifp->ni_bufs_head, req->nr_extra_bufs);
2621 nm_prinf("got %d extra buffers", req->nr_extra_bufs);
2623 req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp);
2625 error = nmreq_checkoptions(hdr);
2627 netmap_do_unregif(priv);
2631 /* store ifp reference so that priv destructor may release it */
2635 netmap_unget_na(na, ifp);
2637 /* release the reference from netmap_mem_find() or
2638 * netmap_mem_ext_create()
2641 netmap_mem_put(nmd);
2646 case NETMAP_REQ_PORT_INFO_GET: {
2647 struct nmreq_port_info_get *req =
2648 (struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
2654 if (hdr->nr_name[0] != '\0') {
2655 /* Build a nmreq_register out of the nmreq_port_info_get,
2656 * so that we can call netmap_get_na(). */
2657 struct nmreq_register regreq;
2658 bzero(®req, sizeof(regreq));
2659 regreq.nr_mode = NR_REG_ALL_NIC;
2660 regreq.nr_tx_slots = req->nr_tx_slots;
2661 regreq.nr_rx_slots = req->nr_rx_slots;
2662 regreq.nr_tx_rings = req->nr_tx_rings;
2663 regreq.nr_rx_rings = req->nr_rx_rings;
2664 regreq.nr_host_tx_rings = req->nr_host_tx_rings;
2665 regreq.nr_host_rx_rings = req->nr_host_rx_rings;
2666 regreq.nr_mem_id = req->nr_mem_id;
2668 /* get a refcount */
2669 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2670 hdr->nr_body = (uintptr_t)®req;
2671 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2672 hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */
2673 hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2679 nmd = na->nm_mem; /* get memory allocator */
2681 nmd = netmap_mem_find(req->nr_mem_id ? req->nr_mem_id : 1);
2684 nm_prerr("%s: failed to find mem_id %u",
2686 req->nr_mem_id ? req->nr_mem_id : 1);
2692 error = netmap_mem_get_info(nmd, &req->nr_memsize, &memflags,
2696 if (na == NULL) /* only memory info */
2698 netmap_update_config(na);
2699 req->nr_rx_rings = na->num_rx_rings;
2700 req->nr_tx_rings = na->num_tx_rings;
2701 req->nr_rx_slots = na->num_rx_desc;
2702 req->nr_tx_slots = na->num_tx_desc;
2703 req->nr_host_tx_rings = na->num_host_tx_rings;
2704 req->nr_host_rx_rings = na->num_host_rx_rings;
2706 netmap_unget_na(na, ifp);
2711 case NETMAP_REQ_VALE_ATTACH: {
2712 error = netmap_vale_attach(hdr, NULL /* userspace request */);
2716 case NETMAP_REQ_VALE_DETACH: {
2717 error = netmap_vale_detach(hdr, NULL /* userspace request */);
2721 case NETMAP_REQ_VALE_LIST: {
2722 error = netmap_vale_list(hdr);
2726 case NETMAP_REQ_PORT_HDR_SET: {
2727 struct nmreq_port_hdr *req =
2728 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2729 /* Build a nmreq_register out of the nmreq_port_hdr,
2730 * so that we can call netmap_get_bdg_na(). */
2731 struct nmreq_register regreq;
2732 bzero(®req, sizeof(regreq));
2733 regreq.nr_mode = NR_REG_ALL_NIC;
2735 /* For now we only support virtio-net headers, and only for
2736 * VALE ports, but this may change in future. Valid lengths
2737 * for the virtio-net header are 0 (no header), 10 and 12. */
2738 if (req->nr_hdr_len != 0 &&
2739 req->nr_hdr_len != sizeof(struct nm_vnet_hdr) &&
2740 req->nr_hdr_len != 12) {
2742 nm_prerr("invalid hdr_len %u", req->nr_hdr_len);
2747 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2748 hdr->nr_body = (uintptr_t)®req;
2749 error = netmap_get_vale_na(hdr, &na, NULL, 0);
2750 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET;
2751 hdr->nr_body = (uintptr_t)req;
2753 struct netmap_vp_adapter *vpna =
2754 (struct netmap_vp_adapter *)na;
2755 na->virt_hdr_len = req->nr_hdr_len;
2756 if (na->virt_hdr_len) {
2757 vpna->mfs = NETMAP_BUF_SIZE(na);
2760 nm_prinf("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na);
2761 netmap_adapter_put(na);
2769 case NETMAP_REQ_PORT_HDR_GET: {
2770 /* Get vnet-header length for this netmap port */
2771 struct nmreq_port_hdr *req =
2772 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2773 /* Build a nmreq_register out of the nmreq_port_hdr,
2774 * so that we can call netmap_get_bdg_na(). */
2775 struct nmreq_register regreq;
2778 bzero(®req, sizeof(regreq));
2779 regreq.nr_mode = NR_REG_ALL_NIC;
2781 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2782 hdr->nr_body = (uintptr_t)®req;
2783 error = netmap_get_na(hdr, &na, &ifp, NULL, 0);
2784 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET;
2785 hdr->nr_body = (uintptr_t)req;
2787 req->nr_hdr_len = na->virt_hdr_len;
2789 netmap_unget_na(na, ifp);
2794 case NETMAP_REQ_VALE_NEWIF: {
2795 error = nm_vi_create(hdr);
2799 case NETMAP_REQ_VALE_DELIF: {
2800 error = nm_vi_destroy(hdr->nr_name);
2804 case NETMAP_REQ_VALE_POLLING_ENABLE:
2805 case NETMAP_REQ_VALE_POLLING_DISABLE: {
2806 error = nm_bdg_polling(hdr);
2809 #endif /* WITH_VALE */
2810 case NETMAP_REQ_POOLS_INFO_GET: {
2811 /* Get information from the memory allocator used for
2813 struct nmreq_pools_info *req =
2814 (struct nmreq_pools_info *)(uintptr_t)hdr->nr_body;
2817 /* Build a nmreq_register out of the nmreq_pools_info,
2818 * so that we can call netmap_get_na(). */
2819 struct nmreq_register regreq;
2820 bzero(®req, sizeof(regreq));
2821 regreq.nr_mem_id = req->nr_mem_id;
2822 regreq.nr_mode = NR_REG_ALL_NIC;
2824 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2825 hdr->nr_body = (uintptr_t)®req;
2826 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2827 hdr->nr_reqtype = NETMAP_REQ_POOLS_INFO_GET; /* reset type */
2828 hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2834 nmd = na->nm_mem; /* grab the memory allocator */
2840 /* Finalize the memory allocator, get the pools
2841 * information and release the allocator. */
2842 error = netmap_mem_finalize(nmd, na);
2846 error = netmap_mem_pools_info_get(req, nmd);
2847 netmap_mem_drop(na);
2849 netmap_unget_na(na, ifp);
2854 case NETMAP_REQ_CSB_ENABLE: {
2855 struct nmreq_option *opt;
2857 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2861 struct nmreq_opt_csb *csbo =
2862 (struct nmreq_opt_csb *)opt;
2864 error = netmap_csb_validate(priv, csbo);
2866 opt->nro_status = error;
2871 case NETMAP_REQ_SYNC_KLOOP_START: {
2872 error = netmap_sync_kloop(priv, hdr);
2876 case NETMAP_REQ_SYNC_KLOOP_STOP: {
2877 error = netmap_sync_kloop_stop(priv);
2886 /* Write back request body to userspace and reset the
2887 * user-space pointer. */
2888 error = nmreq_copyout(hdr, error);
2894 if (unlikely(priv->np_nifp == NULL)) {
2898 mb(); /* make sure following reads are not from cache */
2900 if (unlikely(priv->np_csb_atok_base)) {
2901 nm_prerr("Invalid sync in CSB mode");
2906 na = priv->np_na; /* we have a reference */
2909 t = (cmd == NIOCTXSYNC ? NR_TX : NR_RX);
2910 krings = NMR(na, t);
2911 qfirst = priv->np_qfirst[t];
2912 qlast = priv->np_qlast[t];
2913 sync_flags = priv->np_sync_flags;
2915 for (i = qfirst; i < qlast; i++) {
2916 struct netmap_kring *kring = krings[i];
2917 struct netmap_ring *ring = kring->ring;
2919 if (unlikely(nm_kr_tryget(kring, 1, &error))) {
2920 error = (error ? EIO : 0);
2924 if (cmd == NIOCTXSYNC) {
2925 if (netmap_debug & NM_DEBUG_TXSYNC)
2926 nm_prinf("pre txsync ring %d cur %d hwcur %d",
2929 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
2930 netmap_ring_reinit(kring);
2931 } else if (kring->nm_sync(kring, sync_flags | NAF_FORCE_RECLAIM) == 0) {
2932 nm_sync_finalize(kring);
2934 if (netmap_debug & NM_DEBUG_TXSYNC)
2935 nm_prinf("post txsync ring %d cur %d hwcur %d",
2939 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
2940 netmap_ring_reinit(kring);
2942 if (nm_may_forward_up(kring)) {
2943 /* transparent forwarding, see netmap_poll() */
2944 netmap_grab_packets(kring, &q, netmap_fwd);
2946 if (kring->nm_sync(kring, sync_flags | NAF_FORCE_READ) == 0) {
2947 nm_sync_finalize(kring);
2949 ring_timestamp_set(ring);
2955 netmap_send_up(na->ifp, &q);
2962 return netmap_ioctl_legacy(priv, cmd, data, td);
2971 nmreq_size_by_type(uint16_t nr_reqtype)
2973 switch (nr_reqtype) {
2974 case NETMAP_REQ_REGISTER:
2975 return sizeof(struct nmreq_register);
2976 case NETMAP_REQ_PORT_INFO_GET:
2977 return sizeof(struct nmreq_port_info_get);
2978 case NETMAP_REQ_VALE_ATTACH:
2979 return sizeof(struct nmreq_vale_attach);
2980 case NETMAP_REQ_VALE_DETACH:
2981 return sizeof(struct nmreq_vale_detach);
2982 case NETMAP_REQ_VALE_LIST:
2983 return sizeof(struct nmreq_vale_list);
2984 case NETMAP_REQ_PORT_HDR_SET:
2985 case NETMAP_REQ_PORT_HDR_GET:
2986 return sizeof(struct nmreq_port_hdr);
2987 case NETMAP_REQ_VALE_NEWIF:
2988 return sizeof(struct nmreq_vale_newif);
2989 case NETMAP_REQ_VALE_DELIF:
2990 case NETMAP_REQ_SYNC_KLOOP_STOP:
2991 case NETMAP_REQ_CSB_ENABLE:
2993 case NETMAP_REQ_VALE_POLLING_ENABLE:
2994 case NETMAP_REQ_VALE_POLLING_DISABLE:
2995 return sizeof(struct nmreq_vale_polling);
2996 case NETMAP_REQ_POOLS_INFO_GET:
2997 return sizeof(struct nmreq_pools_info);
2998 case NETMAP_REQ_SYNC_KLOOP_START:
2999 return sizeof(struct nmreq_sync_kloop_start);
3005 nmreq_opt_size_by_type(uint32_t nro_reqtype, uint64_t nro_size)
3007 size_t rv = sizeof(struct nmreq_option);
3008 #ifdef NETMAP_REQ_OPT_DEBUG
3009 if (nro_reqtype & NETMAP_REQ_OPT_DEBUG)
3010 return (nro_reqtype & ~NETMAP_REQ_OPT_DEBUG);
3011 #endif /* NETMAP_REQ_OPT_DEBUG */
3012 switch (nro_reqtype) {
3014 case NETMAP_REQ_OPT_EXTMEM:
3015 rv = sizeof(struct nmreq_opt_extmem);
3017 #endif /* WITH_EXTMEM */
3018 case NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS:
3022 case NETMAP_REQ_OPT_CSB:
3023 rv = sizeof(struct nmreq_opt_csb);
3025 case NETMAP_REQ_OPT_SYNC_KLOOP_MODE:
3026 rv = sizeof(struct nmreq_opt_sync_kloop_mode);
3029 /* subtract the common header */
3030 return rv - sizeof(struct nmreq_option);
3034 * nmreq_copyin: create an in-kernel version of the request.
3036 * We build the following data structure:
3038 * hdr -> +-------+ buf
3039 * | | +---------------+
3040 * +-------+ |usr body ptr |
3041 * |options|-. +---------------+
3042 * +-------+ | |usr options ptr|
3043 * |body |--------->+---------------+
3045 * | | copy of body |
3047 * | +---------------+
3049 * | +---------------+
3051 * | | +---------------+ |
3053 * | | | +---------------+ \ option table
3054 * | | | | ... | / indexed by option
3055 * | | | +---------------+ | type
3057 * | | | +---------------+/
3058 * | | | |usr next ptr 1 |
3059 * `-|----->+---------------+
3060 * | | | copy of opt 1 |
3062 * | | .-| nro_next |
3063 * | | | +---------------+
3064 * | | | |usr next ptr 2 |
3065 * | `-`>+---------------+
3066 * | | copy of opt 2 |
3069 * | | +---------------+
3073 * `----->+---------------+
3074 * | |usr next ptr n |
3075 * `>+---------------+
3081 * The options and body fields of the hdr structure are overwritten
3082 * with in-kernel valid pointers inside the buf. The original user
3083 * pointers are saved in the buf and restored on copyout.
3084 * The list of options is copied and the pointers adjusted. The
3085 * original pointers are saved before the option they belonged.
3087 * The option table has an entry for every availabe option. Entries
3088 * for options that have not been passed contain NULL.
3093 nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
3095 size_t rqsz, optsz, bufsz;
3097 char *ker = NULL, *p;
3098 struct nmreq_option **next, *src, **opt_tab;
3099 struct nmreq_option buf;
3102 if (hdr->nr_reserved) {
3104 nm_prerr("nr_reserved must be zero");
3108 if (!nr_body_is_user)
3111 hdr->nr_reserved = nr_body_is_user;
3113 /* compute the total size of the buffer */
3114 rqsz = nmreq_size_by_type(hdr->nr_reqtype);
3115 if (rqsz > NETMAP_REQ_MAXSIZE) {
3119 if ((rqsz && hdr->nr_body == (uintptr_t)NULL) ||
3120 (!rqsz && hdr->nr_body != (uintptr_t)NULL)) {
3121 /* Request body expected, but not found; or
3122 * request body found but unexpected. */
3124 nm_prerr("nr_body expected but not found, or vice versa");
3129 bufsz = 2 * sizeof(void *) + rqsz +
3130 NETMAP_REQ_OPT_MAX * sizeof(opt_tab);
3131 /* compute the size of the buf below the option table.
3132 * It must contain a copy of every received option structure.
3133 * For every option we also need to store a copy of the user
3137 for (src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; src;
3138 src = (struct nmreq_option *)(uintptr_t)buf.nro_next)
3140 error = copyin(src, &buf, sizeof(*src));
3143 optsz += sizeof(*src);
3144 optsz += nmreq_opt_size_by_type(buf.nro_reqtype, buf.nro_size);
3145 if (rqsz + optsz > NETMAP_REQ_MAXSIZE) {
3149 bufsz += sizeof(void *);
3153 ker = nm_os_malloc(bufsz);
3158 p = ker; /* write pointer into the buffer */
3160 /* make a copy of the user pointers */
3161 ptrs = (uint64_t*)p;
3162 *ptrs++ = hdr->nr_body;
3163 *ptrs++ = hdr->nr_options;
3167 error = copyin((void *)(uintptr_t)hdr->nr_body, p, rqsz);
3170 /* overwrite the user pointer with the in-kernel one */
3171 hdr->nr_body = (uintptr_t)p;
3173 /* start of the options table */
3174 opt_tab = (struct nmreq_option **)p;
3175 p += sizeof(opt_tab) * NETMAP_REQ_OPT_MAX;
3177 /* copy the options */
3178 next = (struct nmreq_option **)&hdr->nr_options;
3181 struct nmreq_option *opt;
3183 /* copy the option header */
3184 ptrs = (uint64_t *)p;
3185 opt = (struct nmreq_option *)(ptrs + 1);
3186 error = copyin(src, opt, sizeof(*src));
3189 /* make a copy of the user next pointer */
3190 *ptrs = opt->nro_next;
3191 /* overwrite the user pointer with the in-kernel one */
3194 /* initialize the option as not supported.
3195 * Recognized options will update this field.
3197 opt->nro_status = EOPNOTSUPP;
3199 /* check for invalid types */
3200 if (opt->nro_reqtype < 1) {
3202 nm_prinf("invalid option type: %u", opt->nro_reqtype);
3203 opt->nro_status = EINVAL;
3208 if (opt->nro_reqtype >= NETMAP_REQ_OPT_MAX) {
3209 /* opt->nro_status is already EOPNOTSUPP */
3214 /* if the type is valid, index the option in the table
3215 * unless it is a duplicate.
3217 if (opt_tab[opt->nro_reqtype] != NULL) {
3219 nm_prinf("duplicate option: %u", opt->nro_reqtype);
3220 opt->nro_status = EINVAL;
3221 opt_tab[opt->nro_reqtype]->nro_status = EINVAL;
3225 opt_tab[opt->nro_reqtype] = opt;
3227 p = (char *)(opt + 1);
3229 /* copy the option body */
3230 optsz = nmreq_opt_size_by_type(opt->nro_reqtype,
3233 /* the option body follows the option header */
3234 error = copyin(src + 1, p, optsz);
3241 /* move to next option */
3242 next = (struct nmreq_option **)&opt->nro_next;
3246 nmreq_copyout(hdr, error);
3250 ptrs = (uint64_t *)ker;
3251 hdr->nr_body = *ptrs++;
3252 hdr->nr_options = *ptrs++;
3253 hdr->nr_reserved = 0;
3260 nmreq_copyout(struct nmreq_header *hdr, int rerror)
3262 struct nmreq_option *src, *dst;
3263 void *ker = (void *)(uintptr_t)hdr->nr_body, *bufstart;
3268 if (!hdr->nr_reserved)
3271 /* restore the user pointers in the header */
3272 ptrs = (uint64_t *)ker - 2;
3274 hdr->nr_body = *ptrs++;
3275 src = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3276 hdr->nr_options = *ptrs;
3280 bodysz = nmreq_size_by_type(hdr->nr_reqtype);
3281 error = copyout(ker, (void *)(uintptr_t)hdr->nr_body, bodysz);
3288 /* copy the options */
3289 dst = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3294 /* restore the user pointer */
3295 next = src->nro_next;
3296 ptrs = (uint64_t *)src - 1;
3297 src->nro_next = *ptrs;
3299 /* always copy the option header */
3300 error = copyout(src, dst, sizeof(*src));
3306 /* copy the option body only if there was no error */
3307 if (!rerror && !src->nro_status) {
3308 optsz = nmreq_opt_size_by_type(src->nro_reqtype,
3311 error = copyout(src + 1, dst + 1, optsz);
3318 src = (struct nmreq_option *)(uintptr_t)next;
3319 dst = (struct nmreq_option *)(uintptr_t)*ptrs;
3324 hdr->nr_reserved = 0;
3325 nm_os_free(bufstart);
3329 struct nmreq_option *
3330 nmreq_getoption(struct nmreq_header *hdr, uint16_t reqtype)
3332 struct nmreq_option **opt_tab;
3334 if (!hdr->nr_options)
3337 opt_tab = (struct nmreq_option **)((uintptr_t)hdr->nr_options) -
3338 (NETMAP_REQ_OPT_MAX + 1);
3339 return opt_tab[reqtype];
3343 nmreq_checkoptions(struct nmreq_header *hdr)
3345 struct nmreq_option *opt;
3346 /* return error if there is still any option
3347 * marked as not supported
3350 for (opt = (struct nmreq_option *)(uintptr_t)hdr->nr_options; opt;
3351 opt = (struct nmreq_option *)(uintptr_t)opt->nro_next)
3352 if (opt->nro_status == EOPNOTSUPP)
3359 * select(2) and poll(2) handlers for the "netmap" device.
3361 * Can be called for one or more queues.
3362 * Return true the event mask corresponding to ready events.
3363 * If there are no ready events (and 'sr' is not NULL), do a
3364 * selrecord on either individual selinfo or on the global one.
3365 * Device-dependent parts (locking and sync of tx/rx rings)
3366 * are done through callbacks.
3368 * On linux, arguments are really pwait, the poll table, and 'td' is struct file *
3369 * The first one is remapped to pwait as selrecord() uses the name as an
3373 netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr)
3375 struct netmap_adapter *na;
3376 struct netmap_kring *kring;
3377 struct netmap_ring *ring;
3378 u_int i, want[NR_TXRX], revents = 0;
3379 NM_SELINFO_T *si[NR_TXRX];
3380 #define want_tx want[NR_TX]
3381 #define want_rx want[NR_RX]
3382 struct mbq q; /* packets from RX hw queues to host stack */
3385 * In order to avoid nested locks, we need to "double check"
3386 * txsync and rxsync if we decide to do a selrecord().
3387 * retry_tx (and retry_rx, later) prevent looping forever.
3389 int retry_tx = 1, retry_rx = 1;
3391 /* Transparent mode: send_down is 1 if we have found some
3392 * packets to forward (host RX ring --> NIC) during the rx
3393 * scan and we have not sent them down to the NIC yet.
3394 * Transparent mode requires to bind all rings to a single
3398 int sync_flags = priv->np_sync_flags;
3402 if (unlikely(priv->np_nifp == NULL)) {
3405 mb(); /* make sure following reads are not from cache */
3409 if (unlikely(!nm_netmap_on(na)))
3412 if (unlikely(priv->np_csb_atok_base)) {
3413 nm_prerr("Invalid poll in CSB mode");
3417 if (netmap_debug & NM_DEBUG_ON)
3418 nm_prinf("device %s events 0x%x", na->name, events);
3419 want_tx = events & (POLLOUT | POLLWRNORM);
3420 want_rx = events & (POLLIN | POLLRDNORM);
3423 * If the card has more than one queue AND the file descriptor is
3424 * bound to all of them, we sleep on the "global" selinfo, otherwise
3425 * we sleep on individual selinfo (FreeBSD only allows two selinfo's
3426 * per file descriptor).
3427 * The interrupt routine in the driver wake one or the other
3428 * (or both) depending on which clients are active.
3430 * rxsync() is only called if we run out of buffers on a POLLIN.
3431 * txsync() is called if we run out of buffers on POLLOUT, or
3432 * there are pending packets to send. The latter can be disabled
3433 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
3435 si[NR_RX] = priv->np_si[NR_RX];
3436 si[NR_TX] = priv->np_si[NR_TX];
3440 * We start with a lock free round which is cheap if we have
3441 * slots available. If this fails, then lock and call the sync
3442 * routines. We can't do this on Linux, as the contract says
3443 * that we must call nm_os_selrecord() unconditionally.
3446 const enum txrx t = NR_TX;
3447 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3448 kring = NMR(na, t)[i];
3449 if (kring->ring->cur != kring->ring->tail) {
3450 /* Some unseen TX space is available, so what
3451 * we don't need to run txsync. */
3459 const enum txrx t = NR_RX;
3460 int rxsync_needed = 0;
3462 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3463 kring = NMR(na, t)[i];
3464 if (kring->ring->cur == kring->ring->tail
3465 || kring->rhead != kring->ring->head) {
3466 /* There are no unseen packets on this ring,
3467 * or there are some buffers to be returned
3468 * to the netmap port. We therefore go ahead
3469 * and run rxsync. */
3474 if (!rxsync_needed) {
3482 /* The selrecord must be unconditional on linux. */
3483 nm_os_selrecord(sr, si[NR_RX]);
3484 nm_os_selrecord(sr, si[NR_TX]);
3488 * If we want to push packets out (priv->np_txpoll) or
3489 * want_tx is still set, we must issue txsync calls
3490 * (on all rings, to avoid that the tx rings stall).
3491 * Fortunately, normal tx mode has np_txpoll set.
3493 if (priv->np_txpoll || want_tx) {
3495 * The first round checks if anyone is ready, if not
3496 * do a selrecord and another round to handle races.
3497 * want_tx goes to 0 if any space is found, and is
3498 * used to skip rings with no pending transmissions.
3501 for (i = priv->np_qfirst[NR_TX]; i < priv->np_qlast[NR_TX]; i++) {
3504 kring = na->tx_rings[i];
3508 * Don't try to txsync this TX ring if we already found some
3509 * space in some of the TX rings (want_tx == 0) and there are no
3510 * TX slots in this ring that need to be flushed to the NIC
3513 if (!send_down && !want_tx && ring->head == kring->nr_hwcur)
3516 if (nm_kr_tryget(kring, 1, &revents))
3519 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3520 netmap_ring_reinit(kring);
3523 if (kring->nm_sync(kring, sync_flags))
3526 nm_sync_finalize(kring);
3530 * If we found new slots, notify potential
3531 * listeners on the same ring.
3532 * Since we just did a txsync, look at the copies
3533 * of cur,tail in the kring.
3535 found = kring->rcur != kring->rtail;
3537 if (found) { /* notify other listeners */
3541 kring->nm_notify(kring, 0);
3545 /* if there were any packet to forward we must have handled them by now */
3547 if (want_tx && retry_tx && sr) {
3549 nm_os_selrecord(sr, si[NR_TX]);
3557 * If want_rx is still set scan receive rings.
3558 * Do it on all rings because otherwise we starve.
3561 /* two rounds here for race avoidance */
3563 for (i = priv->np_qfirst[NR_RX]; i < priv->np_qlast[NR_RX]; i++) {
3566 kring = na->rx_rings[i];
3569 if (unlikely(nm_kr_tryget(kring, 1, &revents)))
3572 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3573 netmap_ring_reinit(kring);
3576 /* now we can use kring->rcur, rtail */
3579 * transparent mode support: collect packets from
3580 * hw rxring(s) that have been released by the user
3582 if (nm_may_forward_up(kring)) {
3583 netmap_grab_packets(kring, &q, netmap_fwd);
3586 /* Clear the NR_FORWARD flag anyway, it may be set by
3587 * the nm_sync() below only on for the host RX ring (see
3588 * netmap_rxsync_from_host()). */
3589 kring->nr_kflags &= ~NR_FORWARD;
3590 if (kring->nm_sync(kring, sync_flags))
3593 nm_sync_finalize(kring);
3594 send_down |= (kring->nr_kflags & NR_FORWARD);
3595 ring_timestamp_set(ring);
3596 found = kring->rcur != kring->rtail;
3602 kring->nm_notify(kring, 0);
3608 if (retry_rx && sr) {
3609 nm_os_selrecord(sr, si[NR_RX]);
3612 if (send_down || retry_rx) {
3615 goto flush_tx; /* and retry_rx */
3622 * Transparent mode: released bufs (i.e. between kring->nr_hwcur and
3623 * ring->head) marked with NS_FORWARD on hw rx rings are passed up
3624 * to the host stack.
3628 netmap_send_up(na->ifp, &q);
3637 nma_intr_enable(struct netmap_adapter *na, int onoff)
3639 bool changed = false;
3644 for (i = 0; i < nma_get_nrings(na, t); i++) {
3645 struct netmap_kring *kring = NMR(na, t)[i];
3646 int on = !(kring->nr_kflags & NKR_NOINTR);
3648 if (!!onoff != !!on) {
3652 kring->nr_kflags &= ~NKR_NOINTR;
3654 kring->nr_kflags |= NKR_NOINTR;
3660 return 0; /* nothing to do */
3664 nm_prerr("Cannot %s interrupts for %s", onoff ? "enable" : "disable",
3669 na->nm_intr(na, onoff);
3675 /*-------------------- driver support routines -------------------*/
3677 /* default notify callback */
3679 netmap_notify(struct netmap_kring *kring, int flags)
3681 struct netmap_adapter *na = kring->notify_na;
3682 enum txrx t = kring->tx;
3684 nm_os_selwakeup(&kring->si);
3685 /* optimization: avoid a wake up on the global
3686 * queue if nobody has registered for more
3689 if (na->si_users[t] > 0)
3690 nm_os_selwakeup(&na->si[t]);
3692 return NM_IRQ_COMPLETED;
3695 /* called by all routines that create netmap_adapters.
3696 * provide some defaults and get a reference to the
3700 netmap_attach_common(struct netmap_adapter *na)
3702 if (!na->rx_buf_maxsize) {
3703 /* Set a conservative default (larger is safer). */
3704 na->rx_buf_maxsize = PAGE_SIZE;
3708 if (na->na_flags & NAF_HOST_RINGS && na->ifp) {
3709 na->if_input = na->ifp->if_input; /* for netmap_send_up */
3711 na->pdev = na; /* make sure netmap_mem_map() is called */
3712 #endif /* __FreeBSD__ */
3713 if (na->na_flags & NAF_HOST_RINGS) {
3714 if (na->num_host_rx_rings == 0)
3715 na->num_host_rx_rings = 1;
3716 if (na->num_host_tx_rings == 0)
3717 na->num_host_tx_rings = 1;
3719 if (na->nm_krings_create == NULL) {
3720 /* we assume that we have been called by a driver,
3721 * since other port types all provide their own
3724 na->nm_krings_create = netmap_hw_krings_create;
3725 na->nm_krings_delete = netmap_hw_krings_delete;
3727 if (na->nm_notify == NULL)
3728 na->nm_notify = netmap_notify;
3731 if (na->nm_mem == NULL) {
3732 /* use the global allocator */
3733 na->nm_mem = netmap_mem_get(&nm_mem);
3736 if (na->nm_bdg_attach == NULL)
3737 /* no special nm_bdg_attach callback. On VALE
3738 * attach, we need to interpose a bwrap
3740 na->nm_bdg_attach = netmap_default_bdg_attach;
3746 /* Wrapper for the register callback provided netmap-enabled
3748 * nm_iszombie(na) means that the driver module has been
3749 * unloaded, so we cannot call into it.
3750 * nm_os_ifnet_lock() must guarantee mutual exclusion with
3754 netmap_hw_reg(struct netmap_adapter *na, int onoff)
3756 struct netmap_hw_adapter *hwna =
3757 (struct netmap_hw_adapter*)na;
3762 if (nm_iszombie(na)) {
3765 } else if (na != NULL) {
3766 na->na_flags &= ~NAF_NETMAP_ON;
3771 error = hwna->nm_hw_register(na, onoff);
3774 nm_os_ifnet_unlock();
3780 netmap_hw_dtor(struct netmap_adapter *na)
3782 if (na->ifp == NULL)
3785 NM_DETACH_NA(na->ifp);
3790 * Allocate a netmap_adapter object, and initialize it from the
3791 * 'arg' passed by the driver on attach.
3792 * We allocate a block of memory of 'size' bytes, which has room
3793 * for struct netmap_adapter plus additional room private to
3795 * Return 0 on success, ENOMEM otherwise.
3798 netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg)
3800 struct netmap_hw_adapter *hwna = NULL;
3801 struct ifnet *ifp = NULL;
3803 if (size < sizeof(struct netmap_hw_adapter)) {
3804 if (netmap_debug & NM_DEBUG_ON)
3805 nm_prerr("Invalid netmap adapter size %d", (int)size);
3809 if (arg == NULL || arg->ifp == NULL) {
3810 if (netmap_debug & NM_DEBUG_ON)
3811 nm_prerr("either arg or arg->ifp is NULL");
3815 if (arg->num_tx_rings == 0 || arg->num_rx_rings == 0) {
3816 if (netmap_debug & NM_DEBUG_ON)
3817 nm_prerr("%s: invalid rings tx %d rx %d",
3818 arg->name, arg->num_tx_rings, arg->num_rx_rings);
3823 if (NM_NA_CLASH(ifp)) {
3824 /* If NA(ifp) is not null but there is no valid netmap
3825 * adapter it means that someone else is using the same
3826 * pointer (e.g. ax25_ptr on linux). This happens for
3827 * instance when also PF_RING is in use. */
3828 nm_prerr("Error: netmap adapter hook is busy");
3832 hwna = nm_os_malloc(size);
3836 hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE;
3837 strlcpy(hwna->up.name, ifp->if_xname, sizeof(hwna->up.name));
3839 hwna->nm_hw_register = hwna->up.nm_register;
3840 hwna->up.nm_register = netmap_hw_reg;
3842 if (netmap_attach_common(&hwna->up)) {
3846 netmap_adapter_get(&hwna->up);
3848 NM_ATTACH_NA(ifp, &hwna->up);
3850 nm_os_onattach(ifp);
3852 if (arg->nm_dtor == NULL) {
3853 hwna->up.nm_dtor = netmap_hw_dtor;
3856 if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n",
3857 hwna->up.num_tx_rings, hwna->up.num_tx_desc,
3858 hwna->up.num_rx_rings, hwna->up.num_rx_desc);
3862 nm_prerr("fail, arg %p ifp %p na %p", arg, ifp, hwna);
3863 return (hwna ? EINVAL : ENOMEM);
3868 netmap_attach(struct netmap_adapter *arg)
3870 return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter),
3871 1 /* override nm_reg */);
3876 NM_DBG(netmap_adapter_get)(struct netmap_adapter *na)
3882 refcount_acquire(&na->na_refcount);
3886 /* returns 1 iff the netmap_adapter is destroyed */
3888 NM_DBG(netmap_adapter_put)(struct netmap_adapter *na)
3893 if (!refcount_release(&na->na_refcount))
3899 if (na->tx_rings) { /* XXX should not happen */
3900 if (netmap_debug & NM_DEBUG_ON)
3901 nm_prerr("freeing leftover tx_rings");
3902 na->nm_krings_delete(na);
3904 netmap_pipe_dealloc(na);
3906 netmap_mem_put(na->nm_mem);
3907 bzero(na, sizeof(*na));
3913 /* nm_krings_create callback for all hardware native adapters */
3915 netmap_hw_krings_create(struct netmap_adapter *na)
3917 int ret = netmap_krings_create(na, 0);
3919 /* initialize the mbq for the sw rx ring */
3920 u_int lim = netmap_real_rings(na, NR_RX), i;
3921 for (i = na->num_rx_rings; i < lim; i++) {
3922 mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue);
3924 nm_prdis("initialized sw rx queue %d", na->num_rx_rings);
3932 * Called on module unload by the netmap-enabled drivers
3935 netmap_detach(struct ifnet *ifp)
3937 struct netmap_adapter *na = NA(ifp);
3943 netmap_set_all_rings(na, NM_KR_LOCKED);
3945 * if the netmap adapter is not native, somebody
3946 * changed it, so we can not release it here.
3947 * The NAF_ZOMBIE flag will notify the new owner that
3948 * the driver is gone.
3950 if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) {
3951 na->na_flags |= NAF_ZOMBIE;
3953 /* give active users a chance to notice that NAF_ZOMBIE has been
3954 * turned on, so that they can stop and return an error to userspace.
3955 * Note that this becomes a NOP if there are no active users and,
3956 * therefore, the put() above has deleted the na, since now NA(ifp) is
3959 netmap_enable_all_rings(ifp);
3965 * Intercept packets from the network stack and pass them
3966 * to netmap as incoming packets on the 'software' ring.
3968 * We only store packets in a bounded mbq and then copy them
3969 * in the relevant rxsync routine.
3971 * We rely on the OS to make sure that the ifp and na do not go
3972 * away (typically the caller checks for IFF_DRV_RUNNING or the like).
3973 * In nm_register() or whenever there is a reinitialization,
3974 * we make sure to make the mode change visible here.
3977 netmap_transmit(struct ifnet *ifp, struct mbuf *m)
3979 struct netmap_adapter *na = NA(ifp);
3980 struct netmap_kring *kring, *tx_kring;
3981 u_int len = MBUF_LEN(m);
3982 u_int error = ENOBUFS;
3989 if (i >= na->num_host_rx_rings) {
3990 i = i % na->num_host_rx_rings;
3992 kring = NMR(na, NR_RX)[nma_get_nrings(na, NR_RX) + i];
3994 // XXX [Linux] we do not need this lock
3995 // if we follow the down/configure/up protocol -gl
3996 // mtx_lock(&na->core_lock);
3998 if (!nm_netmap_on(na)) {
3999 nm_prerr("%s not in netmap mode anymore", na->name);
4005 if (txr >= na->num_tx_rings) {
4006 txr %= na->num_tx_rings;
4008 tx_kring = NMR(na, NR_TX)[txr];
4010 if (tx_kring->nr_mode == NKR_NETMAP_OFF) {
4011 return MBUF_TRANSMIT(na, ifp, m);
4014 q = &kring->rx_queue;
4016 // XXX reconsider long packets if we handle fragments
4017 if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */
4018 nm_prerr("%s from_host, drop packet size %d > %d", na->name,
4019 len, NETMAP_BUF_SIZE(na));
4023 if (!netmap_generic_hwcsum) {
4024 if (nm_os_mbuf_has_csum_offld(m)) {
4025 nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name);
4030 if (nm_os_mbuf_has_seg_offld(m)) {
4031 nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name);
4036 ETHER_BPF_MTAP(ifp, m);
4037 #endif /* __FreeBSD__ */
4039 /* protect against netmap_rxsync_from_host(), netmap_sw_to_nic()
4040 * and maybe other instances of netmap_transmit (the latter
4041 * not possible on Linux).
4042 * We enqueue the mbuf only if we are sure there is going to be
4043 * enough room in the host RX ring, otherwise we drop it.
4047 busy = kring->nr_hwtail - kring->nr_hwcur;
4049 busy += kring->nkr_num_slots;
4050 if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) {
4051 nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
4052 kring->nr_hwcur, kring->nr_hwtail, mbq_len(q));
4055 nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q));
4056 /* notify outside the lock */
4065 /* unconditionally wake up listeners */
4066 kring->nm_notify(kring, 0);
4067 /* this is normally netmap_notify(), but for nics
4068 * connected to a bridge it is netmap_bwrap_intr_notify(),
4069 * that possibly forwards the frames through the switch
4077 * Reset function to be called by the driver routines when reinitializing
4078 * a hardware ring. The driver is in charge of locking to protect the kring
4079 * while this operation is being performed. This is normally achieved by
4080 * calling netmap_disable_all_rings() before triggering a reset.
4081 * If the kring is not in netmap mode, return NULL to inform the caller
4082 * that this is the case.
4083 * If the kring is in netmap mode, set hwofs so that the netmap indices
4084 * seen by userspace (head/cut/tail) do not change, although the internal
4085 * NIC indices have been reset to 0.
4086 * In any case, adjust kring->nr_mode.
4088 struct netmap_slot *
4089 netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n,
4092 struct netmap_kring *kring;
4093 u_int new_hwtail, new_hwofs;
4095 if (!nm_native_on(na)) {
4096 nm_prdis("interface not in native netmap mode");
4097 return NULL; /* nothing to reinitialize */
4101 if (n >= na->num_tx_rings)
4103 kring = na->tx_rings[n];
4105 * Set hwofs to rhead, so that slots[rhead] is mapped to
4106 * the NIC internal slot 0, and thus the netmap buffer
4107 * at rhead is the next to be transmitted. Transmissions
4108 * that were pending before the reset are considered as
4109 * sent, so that we can have hwcur = rhead. All the slots
4110 * are now owned by the user, so we can also reinit hwtail.
4112 new_hwofs = kring->rhead;
4113 new_hwtail = nm_prev(kring->rhead, kring->nkr_num_slots - 1);
4115 if (n >= na->num_rx_rings)
4117 kring = na->rx_rings[n];
4119 * Set hwofs to hwtail, so that slots[hwtail] is mapped to
4120 * the NIC internal slot 0, and thus the netmap buffer
4121 * at hwtail is the next to be given to the NIC.
4122 * Unread slots (the ones in [rhead,hwtail[) are owned by
4123 * the user, and thus the caller cannot give them
4124 * to the NIC right now.
4126 new_hwofs = kring->nr_hwtail;
4127 new_hwtail = kring->nr_hwtail;
4129 if (kring->nr_pending_mode == NKR_NETMAP_OFF) {
4130 kring->nr_mode = NKR_NETMAP_OFF;
4133 if (netmap_verbose) {
4134 nm_prinf("%s, hc %u->%u, ht %u->%u, ho %u->%u", kring->name,
4135 kring->nr_hwcur, kring->rhead,
4136 kring->nr_hwtail, new_hwtail,
4137 kring->nkr_hwofs, new_hwofs);
4139 kring->nr_hwcur = kring->rhead;
4140 kring->nr_hwtail = new_hwtail;
4141 kring->nkr_hwofs = new_hwofs;
4144 * Wakeup on the individual and global selwait
4145 * We do the wakeup here, but the ring is not yet reconfigured.
4146 * However, we are under lock so there are no races.
4148 kring->nr_mode = NKR_NETMAP_ON;
4149 kring->nm_notify(kring, 0);
4150 return kring->ring->slot;
4155 * Dispatch rx/tx interrupts to the netmap rings.
4157 * "work_done" is non-null on the RX path, NULL for the TX path.
4158 * We rely on the OS to make sure that there is only one active
4159 * instance per queue, and that there is appropriate locking.
4161 * The 'notify' routine depends on what the ring is attached to.
4162 * - for a netmap file descriptor, do a selwakeup on the individual
4163 * waitqueue, plus one on the global one if needed
4164 * (see netmap_notify)
4165 * - for a nic connected to a switch, call the proper forwarding routine
4166 * (see netmap_bwrap_intr_notify)
4169 netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
4171 struct netmap_kring *kring;
4172 enum txrx t = (work_done ? NR_RX : NR_TX);
4174 q &= NETMAP_RING_MASK;
4176 if (netmap_debug & (NM_DEBUG_RXINTR|NM_DEBUG_TXINTR)) {
4177 nm_prlim(5, "received %s queue %d", work_done ? "RX" : "TX" , q);
4180 if (q >= nma_get_nrings(na, t))
4181 return NM_IRQ_PASS; // not a physical queue
4183 kring = NMR(na, t)[q];
4185 if (kring->nr_mode == NKR_NETMAP_OFF) {
4190 kring->nr_kflags |= NKR_PENDINTR; // XXX atomic ?
4191 *work_done = 1; /* do not fire napi again */
4194 return kring->nm_notify(kring, 0);
4199 * Default functions to handle rx/tx interrupts from a physical device.
4200 * "work_done" is non-null on the RX path, NULL for the TX path.
4202 * If the card is not in netmap mode, simply return NM_IRQ_PASS,
4203 * so that the caller proceeds with regular processing.
4204 * Otherwise call netmap_common_irq().
4206 * If the card is connected to a netmap file descriptor,
4207 * do a selwakeup on the individual queue, plus one on the global one
4208 * if needed (multiqueue card _and_ there are multiqueue listeners),
4209 * and return NR_IRQ_COMPLETED.
4211 * Finally, if called on rx from an interface connected to a switch,
4212 * calls the proper forwarding routine.
4215 netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done)
4217 struct netmap_adapter *na = NA(ifp);
4220 * XXX emulated netmap mode sets NAF_SKIP_INTR so
4221 * we still use the regular driver even though the previous
4222 * check fails. It is unclear whether we should use
4223 * nm_native_on() here.
4225 if (!nm_netmap_on(na))
4228 if (na->na_flags & NAF_SKIP_INTR) {
4229 nm_prdis("use regular interrupt");
4233 return netmap_common_irq(na, q, work_done);
4236 /* set/clear native flags and if_transmit/netdev_ops */
4238 nm_set_native_flags(struct netmap_adapter *na)
4240 struct ifnet *ifp = na->ifp;
4242 /* We do the setup for intercepting packets only if we are the
4243 * first user of this adapter. */
4244 if (na->active_fds > 0) {
4248 na->na_flags |= NAF_NETMAP_ON;
4250 nm_update_hostrings_mode(na);
4254 nm_clear_native_flags(struct netmap_adapter *na)
4256 struct ifnet *ifp = na->ifp;
4258 /* We undo the setup for intercepting packets only if we are the
4259 * last user of this adapter. */
4260 if (na->active_fds > 0) {
4264 nm_update_hostrings_mode(na);
4267 na->na_flags &= ~NAF_NETMAP_ON;
4271 netmap_krings_mode_commit(struct netmap_adapter *na, int onoff)
4278 for (i = 0; i < netmap_real_rings(na, t); i++) {
4279 struct netmap_kring *kring = NMR(na, t)[i];
4281 if (onoff && nm_kring_pending_on(kring))
4282 kring->nr_mode = NKR_NETMAP_ON;
4283 else if (!onoff && nm_kring_pending_off(kring))
4284 kring->nr_mode = NKR_NETMAP_OFF;
4290 * Module loader and unloader
4292 * netmap_init() creates the /dev/netmap device and initializes
4293 * all global variables. Returns 0 on success, errno on failure
4294 * (but there is no chance)
4296 * netmap_fini() destroys everything.
4299 static struct cdev *netmap_dev; /* /dev/netmap character device. */
4300 extern struct cdevsw netmap_cdevsw;
4307 destroy_dev(netmap_dev);
4308 /* we assume that there are no longer netmap users */
4310 netmap_uninit_bridges();
4313 nm_prinf("netmap: unloaded module.");
4324 error = netmap_mem_init();
4328 * MAKEDEV_ETERNAL_KLD avoids an expensive check on syscalls
4329 * when the module is compiled in.
4330 * XXX could use make_dev_credv() to get error number
4332 netmap_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD,
4333 &netmap_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600,
4338 error = netmap_init_bridges();
4343 nm_os_vi_init_index();
4346 error = nm_os_ifnet_init();
4350 nm_prinf("netmap: loaded module");
4354 return (EINVAL); /* may be incorrect */