2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2011-2014 Matteo Landi
5 * Copyright (C) 2011-2016 Luigi Rizzo
6 * Copyright (C) 2011-2016 Giuseppe Lettieri
7 * Copyright (C) 2011-2016 Vincenzo Maffione
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This module supports memory mapped access to network devices,
39 * The module uses a large, memory pool allocated by the kernel
40 * and accessible as mmapped memory by multiple userspace threads/processes.
41 * The memory pool contains packet buffers and "netmap rings",
42 * i.e. user-accessible copies of the interface's queues.
44 * Access to the network card works like this:
45 * 1. a process/thread issues one or more open() on /dev/netmap, to create
46 * select()able file descriptor on which events are reported.
47 * 2. on each descriptor, the process issues an ioctl() to identify
48 * the interface that should report events to the file descriptor.
49 * 3. on each descriptor, the process issues an mmap() request to
50 * map the shared memory region within the process' address space.
51 * The list of interesting queues is indicated by a location in
52 * the shared memory region.
53 * 4. using the functions in the netmap(4) userspace API, a process
54 * can look up the occupation state of a queue, access memory buffers,
55 * and retrieve received packets or enqueue packets to transmit.
56 * 5. using some ioctl()s the process can synchronize the userspace view
57 * of the queue with the actual status in the kernel. This includes both
58 * receiving the notification of new packets, and transmitting new
59 * packets on the output interface.
60 * 6. select() or poll() can be used to wait for events on individual
61 * transmit or receive queues (or all queues for a given interface).
64 SYNCHRONIZATION (USER)
66 The netmap rings and data structures may be shared among multiple
67 user threads or even independent processes.
68 Any synchronization among those threads/processes is delegated
69 to the threads themselves. Only one thread at a time can be in
70 a system call on the same netmap ring. The OS does not enforce
71 this and only guarantees against system crashes in case of
76 Within the kernel, access to the netmap rings is protected as follows:
78 - a spinlock on each ring, to handle producer/consumer races on
79 RX rings attached to the host stack (against multiple host
80 threads writing from the host stack to the same ring),
81 and on 'destination' rings attached to a VALE switch
82 (i.e. RX rings in VALE ports, and TX rings in NIC/host ports)
83 protecting multiple active senders for the same destination)
85 - an atomic variable to guarantee that there is at most one
86 instance of *_*xsync() on the ring at any time.
87 For rings connected to user file
88 descriptors, an atomic_test_and_set() protects this, and the
89 lock on the ring is not actually used.
90 For NIC RX rings connected to a VALE switch, an atomic_test_and_set()
91 is also used to prevent multiple executions (the driver might indeed
92 already guarantee this).
93 For NIC TX rings connected to a VALE switch, the lock arbitrates
94 access to the queue (both when allocating buffers and when pushing
97 - *xsync() should be protected against initializations of the card.
98 On FreeBSD most devices have the reset routine protected by
99 a RING lock (ixgbe, igb, em) or core lock (re). lem is missing
100 the RING protection on rx_reset(), this should be added.
102 On linux there is an external lock on the tx path, which probably
103 also arbitrates access to the reset routine. XXX to be revised
105 - a per-interface core_lock protecting access from the host stack
106 while interfaces may be detached from netmap mode.
107 XXX there should be no need for this lock if we detach the interfaces
108 only while they are down.
113 NMG_LOCK() serializes all modifications to switches and ports.
114 A switch cannot be deleted until all ports are gone.
116 For each switch, an SX lock (RWlock on linux) protects
117 deletion of ports. When configuring or deleting a new port, the
118 lock is acquired in exclusive mode (after holding NMG_LOCK).
119 When forwarding, the lock is acquired in shared mode (without NMG_LOCK).
120 The lock is held throughout the entire forwarding cycle,
121 during which the thread may incur in a page fault.
122 Hence it is important that sleepable shared locks are used.
124 On the rx ring, the per-port lock is grabbed initially to reserve
125 a number of slot in the ring, then the lock is released,
126 packets are copied from source to destination, and then
127 the lock is acquired again and the receive ring is updated.
128 (A similar thing is done on the tx ring for NIC and host stack
129 ports attached to the switch)
134 /* --- internals ----
136 * Roadmap to the code that implements the above.
138 * > 1. a process/thread issues one or more open() on /dev/netmap, to create
139 * > select()able file descriptor on which events are reported.
141 * Internally, we allocate a netmap_priv_d structure, that will be
142 * initialized on ioctl(NIOCREGIF). There is one netmap_priv_d
143 * structure for each open().
146 * FreeBSD: see netmap_open() (netmap_freebsd.c)
147 * linux: see linux_netmap_open() (netmap_linux.c)
149 * > 2. on each descriptor, the process issues an ioctl() to identify
150 * > the interface that should report events to the file descriptor.
152 * Implemented by netmap_ioctl(), NIOCREGIF case, with nmr->nr_cmd==0.
153 * Most important things happen in netmap_get_na() and
154 * netmap_do_regif(), called from there. Additional details can be
155 * found in the comments above those functions.
157 * In all cases, this action creates/takes-a-reference-to a
158 * netmap_*_adapter describing the port, and allocates a netmap_if
159 * and all necessary netmap rings, filling them with netmap buffers.
161 * In this phase, the sync callbacks for each ring are set (these are used
162 * in steps 5 and 6 below). The callbacks depend on the type of adapter.
163 * The adapter creation/initialization code puts them in the
164 * netmap_adapter (fields na->nm_txsync and na->nm_rxsync). Then, they
165 * are copied from there to the netmap_kring's during netmap_do_regif(), by
166 * the nm_krings_create() callback. All the nm_krings_create callbacks
167 * actually call netmap_krings_create() to perform this and the other
168 * common stuff. netmap_krings_create() also takes care of the host rings,
169 * if needed, by setting their sync callbacks appropriately.
171 * Additional actions depend on the kind of netmap_adapter that has been
174 * - netmap_hw_adapter: [netmap.c]
175 * This is a system netdev/ifp with native netmap support.
176 * The ifp is detached from the host stack by redirecting:
177 * - transmissions (from the network stack) to netmap_transmit()
178 * - receive notifications to the nm_notify() callback for
179 * this adapter. The callback is normally netmap_notify(), unless
180 * the ifp is attached to a bridge using bwrap, in which case it
181 * is netmap_bwrap_intr_notify().
183 * - netmap_generic_adapter: [netmap_generic.c]
184 * A system netdev/ifp without native netmap support.
186 * (the decision about native/non native support is taken in
187 * netmap_get_hw_na(), called by netmap_get_na())
189 * - netmap_vp_adapter [netmap_vale.c]
190 * Returned by netmap_get_bdg_na().
191 * This is a persistent or ephemeral VALE port. Ephemeral ports
192 * are created on the fly if they don't already exist, and are
193 * always attached to a bridge.
194 * Persistent VALE ports must must be created separately, and i
195 * then attached like normal NICs. The NIOCREGIF we are examining
196 * will find them only if they had previously been created and
197 * attached (see VALE_CTL below).
199 * - netmap_pipe_adapter [netmap_pipe.c]
200 * Returned by netmap_get_pipe_na().
201 * Both pipe ends are created, if they didn't already exist.
203 * - netmap_monitor_adapter [netmap_monitor.c]
204 * Returned by netmap_get_monitor_na().
205 * If successful, the nm_sync callbacks of the monitored adapter
206 * will be intercepted by the returned monitor.
208 * - netmap_bwrap_adapter [netmap_vale.c]
209 * Cannot be obtained in this way, see VALE_CTL below
213 * linux: we first go through linux_netmap_ioctl() to
214 * adapt the FreeBSD interface to the linux one.
217 * > 3. on each descriptor, the process issues an mmap() request to
218 * > map the shared memory region within the process' address space.
219 * > The list of interesting queues is indicated by a location in
220 * > the shared memory region.
223 * FreeBSD: netmap_mmap_single (netmap_freebsd.c).
224 * linux: linux_netmap_mmap (netmap_linux.c).
226 * > 4. using the functions in the netmap(4) userspace API, a process
227 * > can look up the occupation state of a queue, access memory buffers,
228 * > and retrieve received packets or enqueue packets to transmit.
230 * these actions do not involve the kernel.
232 * > 5. using some ioctl()s the process can synchronize the userspace view
233 * > of the queue with the actual status in the kernel. This includes both
234 * > receiving the notification of new packets, and transmitting new
235 * > packets on the output interface.
237 * These are implemented in netmap_ioctl(), NIOCTXSYNC and NIOCRXSYNC
238 * cases. They invoke the nm_sync callbacks on the netmap_kring
239 * structures, as initialized in step 2 and maybe later modified
240 * by a monitor. Monitors, however, will always call the original
241 * callback before doing anything else.
244 * > 6. select() or poll() can be used to wait for events on individual
245 * > transmit or receive queues (or all queues for a given interface).
247 * Implemented in netmap_poll(). This will call the same nm_sync()
248 * callbacks as in step 5 above.
251 * linux: we first go through linux_netmap_poll() to adapt
252 * the FreeBSD interface to the linux one.
255 * ---- VALE_CTL -----
257 * VALE switches are controlled by issuing a NIOCREGIF with a non-null
258 * nr_cmd in the nmreq structure. These subcommands are handled by
259 * netmap_bdg_ctl() in netmap_vale.c. Persistent VALE ports are created
260 * and destroyed by issuing the NETMAP_BDG_NEWIF and NETMAP_BDG_DELIF
261 * subcommands, respectively.
263 * Any network interface known to the system (including a persistent VALE
264 * port) can be attached to a VALE switch by issuing the
265 * NETMAP_REQ_VALE_ATTACH command. After the attachment, persistent VALE ports
266 * look exactly like ephemeral VALE ports (as created in step 2 above). The
267 * attachment of other interfaces, instead, requires the creation of a
268 * netmap_bwrap_adapter. Moreover, the attached interface must be put in
269 * netmap mode. This may require the creation of a netmap_generic_adapter if
270 * we have no native support for the interface, or if generic adapters have
271 * been forced by sysctl.
273 * Both persistent VALE ports and bwraps are handled by netmap_get_bdg_na(),
274 * called by nm_bdg_ctl_attach(), and discriminated by the nm_bdg_attach()
275 * callback. In the case of the bwrap, the callback creates the
276 * netmap_bwrap_adapter. The initialization of the bwrap is then
277 * completed by calling netmap_do_regif() on it, in the nm_bdg_ctl()
278 * callback (netmap_bwrap_bdg_ctl in netmap_vale.c).
279 * A generic adapter for the wrapped ifp will be created if needed, when
280 * netmap_get_bdg_na() calls netmap_get_hw_na().
283 * ---- DATAPATHS -----
285 * -= SYSTEM DEVICE WITH NATIVE SUPPORT =-
287 * na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach()
289 * - tx from netmap userspace:
291 * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
292 * kring->nm_sync() == DEVICE_netmap_txsync()
293 * 2) device interrupt handler
294 * na->nm_notify() == netmap_notify()
295 * - rx from netmap userspace:
297 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
298 * kring->nm_sync() == DEVICE_netmap_rxsync()
299 * 2) device interrupt handler
300 * na->nm_notify() == netmap_notify()
301 * - rx from host stack
305 * na->nm_notify == netmap_notify()
306 * 2) ioctl(NIOCRXSYNC)/netmap_poll() in process context
307 * kring->nm_sync() == netmap_rxsync_from_host
308 * netmap_rxsync_from_host(na, NULL, NULL)
310 * ioctl(NIOCTXSYNC)/netmap_poll() in process context
311 * kring->nm_sync() == netmap_txsync_to_host
312 * netmap_txsync_to_host(na)
314 * FreeBSD: na->if_input() == ether_input()
315 * linux: netif_rx() with NM_MAGIC_PRIORITY_RX
318 * -= SYSTEM DEVICE WITH GENERIC SUPPORT =-
320 * na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach()
322 * - tx from netmap userspace:
324 * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
325 * kring->nm_sync() == generic_netmap_txsync()
326 * nm_os_generic_xmit_frame()
327 * linux: dev_queue_xmit() with NM_MAGIC_PRIORITY_TX
328 * ifp->ndo_start_xmit == generic_ndo_start_xmit()
329 * gna->save_start_xmit == orig. dev. start_xmit
330 * FreeBSD: na->if_transmit() == orig. dev if_transmit
331 * 2) generic_mbuf_destructor()
332 * na->nm_notify() == netmap_notify()
333 * - rx from netmap userspace:
334 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
335 * kring->nm_sync() == generic_netmap_rxsync()
338 * generic_rx_handler()
340 * na->nm_notify() == netmap_notify()
341 * - rx from host stack
342 * FreeBSD: same as native
343 * Linux: same as native except:
345 * dev_queue_xmit() without NM_MAGIC_PRIORITY_TX
346 * ifp->ndo_start_xmit == generic_ndo_start_xmit()
348 * na->nm_notify() == netmap_notify()
349 * - tx to host stack (same as native):
357 * ioctl(NIOCTXSYNC)/netmap_poll() in process context
358 * kring->nm_sync() == netmap_vp_txsync()
360 * - system device with native support:
363 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
364 * kring->nm_sync() == DEVICE_netmap_rxsync()
366 * kring->nm_sync() == DEVICE_netmap_rxsync()
369 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
370 * kring->nm_sync() == netmap_rxsync_from_host()
373 * - system device with generic support:
374 * from device driver:
375 * generic_rx_handler()
376 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
377 * kring->nm_sync() == generic_netmap_rxsync()
379 * kring->nm_sync() == generic_netmap_rxsync()
382 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
383 * kring->nm_sync() == netmap_rxsync_from_host()
386 * (all cases) --> nm_bdg_flush()
387 * dest_na->nm_notify() == (see below)
393 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
394 * kring->nm_sync() == netmap_vp_rxsync()
395 * 2) from nm_bdg_flush()
396 * na->nm_notify() == netmap_notify()
398 * - system device with native support:
400 * na->nm_notify() == netmap_bwrap_notify()
402 * kring->nm_sync() == DEVICE_netmap_txsync()
406 * kring->nm_sync() == netmap_txsync_to_host
407 * netmap_vp_rxsync_locked()
409 * - system device with generic adapter:
411 * na->nm_notify() == netmap_bwrap_notify()
413 * kring->nm_sync() == generic_netmap_txsync()
417 * kring->nm_sync() == netmap_txsync_to_host
423 * OS-specific code that is used only within this file.
424 * Other OS-specific code that must be accessed by drivers
425 * is present in netmap_kern.h
428 #if defined(__FreeBSD__)
429 #include <sys/cdefs.h> /* prerequisite */
430 #include <sys/types.h>
431 #include <sys/errno.h>
432 #include <sys/param.h> /* defines used in kernel.h */
433 #include <sys/kernel.h> /* types used in module initialization */
434 #include <sys/conf.h> /* cdevsw struct, UID, GID */
435 #include <sys/filio.h> /* FIONBIO */
436 #include <sys/sockio.h>
437 #include <sys/socketvar.h> /* struct socket */
438 #include <sys/malloc.h>
439 #include <sys/poll.h>
440 #include <sys/proc.h>
441 #include <sys/rwlock.h>
442 #include <sys/socket.h> /* sockaddrs */
443 #include <sys/selinfo.h>
444 #include <sys/sysctl.h>
445 #include <sys/jail.h>
446 #include <sys/epoch.h>
447 #include <net/vnet.h>
449 #include <net/if_var.h>
450 #include <net/bpf.h> /* BIOCIMMEDIATE */
451 #include <machine/bus.h> /* bus_dmamap_* */
452 #include <sys/endian.h>
453 #include <sys/refcount.h>
454 #include <net/ethernet.h> /* ETHER_BPF_MTAP */
459 #include "bsd_glue.h"
461 #elif defined(__APPLE__)
463 #warning OSX support is only partial
464 #include "osx_glue.h"
466 #elif defined (_WIN32)
468 #include "win_glue.h"
472 #error Unsupported platform
474 #endif /* unsupported */
479 #include <net/netmap.h>
480 #include <dev/netmap/netmap_kern.h>
481 #include <dev/netmap/netmap_mem2.h>
484 /* user-controlled variables */
486 #ifdef CONFIG_NETMAP_DEBUG
488 #endif /* CONFIG_NETMAP_DEBUG */
490 static int netmap_no_timestamp; /* don't timestamp on rxsync */
491 int netmap_no_pendintr = 1;
492 int netmap_txsync_retry = 2;
493 static int netmap_fwd = 0; /* force transparent forwarding */
496 * netmap_admode selects the netmap mode to use.
497 * Invalid values are reset to NETMAP_ADMODE_BEST
499 enum { NETMAP_ADMODE_BEST = 0, /* use native, fallback to generic */
500 NETMAP_ADMODE_NATIVE, /* either native or none */
501 NETMAP_ADMODE_GENERIC, /* force generic */
502 NETMAP_ADMODE_LAST };
503 static int netmap_admode = NETMAP_ADMODE_BEST;
505 /* netmap_generic_mit controls mitigation of RX notifications for
506 * the generic netmap adapter. The value is a time interval in
508 int netmap_generic_mit = 100*1000;
510 /* We use by default netmap-aware qdiscs with generic netmap adapters,
511 * even if there can be a little performance hit with hardware NICs.
512 * However, using the qdisc is the safer approach, for two reasons:
513 * 1) it prevents non-fifo qdiscs to break the TX notification
514 * scheme, which is based on mbuf destructors when txqdisc is
516 * 2) it makes it possible to transmit over software devices that
517 * change skb->dev, like bridge, veth, ...
519 * Anyway users looking for the best performance should
520 * use native adapters.
523 int netmap_generic_txqdisc = 1;
526 /* Default number of slots and queues for generic adapters. */
527 int netmap_generic_ringsize = 1024;
528 int netmap_generic_rings = 1;
530 /* Non-zero to enable checksum offloading in NIC drivers */
531 int netmap_generic_hwcsum = 0;
533 /* Non-zero if ptnet devices are allowed to use virtio-net headers. */
534 int ptnet_vnet_hdr = 1;
537 * SYSCTL calls are grouped between SYSBEGIN and SYSEND to be emulated
538 * in some other operating systems
542 SYSCTL_DECL(_dev_netmap);
543 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
545 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
546 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
547 #ifdef CONFIG_NETMAP_DEBUG
548 SYSCTL_INT(_dev_netmap, OID_AUTO, debug,
549 CTLFLAG_RW, &netmap_debug, 0, "Debug messages");
550 #endif /* CONFIG_NETMAP_DEBUG */
551 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
552 CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
553 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr,
554 0, "Always look for new received packets.");
555 SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW,
556 &netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush.");
558 SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0,
559 "Force NR_FORWARD mode");
560 SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0,
561 "Adapter mode. 0 selects the best option available,"
562 "1 forces native adapter, 2 forces emulated adapter");
563 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_hwcsum, CTLFLAG_RW, &netmap_generic_hwcsum,
564 0, "Hardware checksums. 0 to disable checksum generation by the NIC (default),"
565 "1 to enable checksum generation by the NIC");
566 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit,
567 0, "RX notification interval in nanoseconds");
568 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW,
569 &netmap_generic_ringsize, 0,
570 "Number of per-ring slots for emulated netmap mode");
571 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW,
572 &netmap_generic_rings, 0,
573 "Number of TX/RX queues for emulated netmap adapters");
575 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW,
576 &netmap_generic_txqdisc, 0, "Use qdisc for generic adapters");
578 SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr,
579 0, "Allow ptnet devices to use virtio-net headers");
583 NMG_LOCK_T netmap_global_lock;
586 * mark the ring as stopped, and run through the locks
587 * to make sure other users get to see it.
588 * stopped must be either NR_KR_STOPPED (for unbounded stop)
589 * of NR_KR_LOCKED (brief stop for mutual exclusion purposes)
592 netmap_disable_ring(struct netmap_kring *kr, int stopped)
594 nm_kr_stop(kr, stopped);
595 // XXX check if nm_kr_stop is sufficient
596 mtx_lock(&kr->q_lock);
597 mtx_unlock(&kr->q_lock);
601 /* stop or enable a single ring */
603 netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped)
606 netmap_disable_ring(NMR(na, t)[ring_id], stopped);
608 NMR(na, t)[ring_id]->nkr_stopped = 0;
612 /* stop or enable all the rings of na */
614 netmap_set_all_rings(struct netmap_adapter *na, int stopped)
619 if (!nm_netmap_on(na))
622 if (netmap_verbose) {
623 nm_prinf("%s: %sable all rings", na->name,
624 (stopped ? "dis" : "en"));
627 for (i = 0; i < netmap_real_rings(na, t); i++) {
628 netmap_set_ring(na, i, t, stopped);
634 * Convenience function used in drivers. Waits for current txsync()s/rxsync()s
635 * to finish and prevents any new one from starting. Call this before turning
636 * netmap mode off, or before removing the hardware rings (e.g., on module
640 netmap_disable_all_rings(struct ifnet *ifp)
642 if (NM_NA_VALID(ifp)) {
643 netmap_set_all_rings(NA(ifp), NM_KR_LOCKED);
648 * Convenience function used in drivers. Re-enables rxsync and txsync on the
649 * adapter's rings In linux drivers, this should be placed near each
653 netmap_enable_all_rings(struct ifnet *ifp)
655 if (NM_NA_VALID(ifp)) {
656 netmap_set_all_rings(NA(ifp), 0 /* enabled */);
661 netmap_make_zombie(struct ifnet *ifp)
663 if (NM_NA_VALID(ifp)) {
664 struct netmap_adapter *na = NA(ifp);
665 netmap_set_all_rings(na, NM_KR_LOCKED);
666 na->na_flags |= NAF_ZOMBIE;
667 netmap_set_all_rings(na, 0);
672 netmap_undo_zombie(struct ifnet *ifp)
674 if (NM_NA_VALID(ifp)) {
675 struct netmap_adapter *na = NA(ifp);
676 if (na->na_flags & NAF_ZOMBIE) {
677 netmap_set_all_rings(na, NM_KR_LOCKED);
678 na->na_flags &= ~NAF_ZOMBIE;
679 netmap_set_all_rings(na, 0);
685 * generic bound_checking function
688 nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg)
691 const char *op = NULL;
700 } else if (oldv > hi) {
705 nm_prinf("%s %s to %d (was %d)", op, msg, *v, oldv);
711 * packet-dump function, user-supplied or static buffer.
712 * The destination buffer must be at least 30+4*len
715 nm_dump_buf(char *p, int len, int lim, char *dst)
717 static char _dst[8192];
719 static char hex[] ="0123456789abcdef";
720 char *o; /* output position */
722 #define P_HI(x) hex[((x) & 0xf0)>>4]
723 #define P_LO(x) hex[((x) & 0xf)]
724 #define P_C(x) ((x) >= 0x20 && (x) <= 0x7e ? (x) : '.')
727 if (lim <= 0 || lim > len)
730 sprintf(o, "buf 0x%p len %d lim %d\n", p, len, lim);
732 /* hexdump routine */
733 for (i = 0; i < lim; ) {
734 sprintf(o, "%5d: ", i);
738 for (j=0; j < 16 && i < lim; i++, j++) {
740 o[j*3+1] = P_LO(p[i]);
743 for (j=0; j < 16 && i < lim; i++, j++)
744 o[j + 48] = P_C(p[i]);
757 * Fetch configuration from the device, to cope with dynamic
758 * reconfigurations after loading the module.
760 /* call with NMG_LOCK held */
762 netmap_update_config(struct netmap_adapter *na)
764 struct nm_config_info info;
766 bzero(&info, sizeof(info));
767 if (na->nm_config == NULL ||
768 na->nm_config(na, &info)) {
769 /* take whatever we had at init time */
770 info.num_tx_rings = na->num_tx_rings;
771 info.num_tx_descs = na->num_tx_desc;
772 info.num_rx_rings = na->num_rx_rings;
773 info.num_rx_descs = na->num_rx_desc;
774 info.rx_buf_maxsize = na->rx_buf_maxsize;
777 if (na->num_tx_rings == info.num_tx_rings &&
778 na->num_tx_desc == info.num_tx_descs &&
779 na->num_rx_rings == info.num_rx_rings &&
780 na->num_rx_desc == info.num_rx_descs &&
781 na->rx_buf_maxsize == info.rx_buf_maxsize)
782 return 0; /* nothing changed */
783 if (na->active_fds == 0) {
784 na->num_tx_rings = info.num_tx_rings;
785 na->num_tx_desc = info.num_tx_descs;
786 na->num_rx_rings = info.num_rx_rings;
787 na->num_rx_desc = info.num_rx_descs;
788 na->rx_buf_maxsize = info.rx_buf_maxsize;
790 nm_prinf("configuration changed for %s: txring %d x %d, "
791 "rxring %d x %d, rxbufsz %d",
792 na->name, na->num_tx_rings, na->num_tx_desc,
793 na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize);
796 nm_prerr("WARNING: configuration changed for %s while active: "
797 "txring %d x %d, rxring %d x %d, rxbufsz %d",
798 na->name, info.num_tx_rings, info.num_tx_descs,
799 info.num_rx_rings, info.num_rx_descs,
800 info.rx_buf_maxsize);
804 /* nm_sync callbacks for the host rings */
805 static int netmap_txsync_to_host(struct netmap_kring *kring, int flags);
806 static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags);
809 netmap_default_bufcfg(struct netmap_kring *kring, uint64_t target)
811 kring->hwbuf_len = target;
812 kring->buf_align = 0; /* no alignment */
816 /* create the krings array and initialize the fields common to all adapters.
817 * The array layout is this:
820 * na->tx_rings ----->| | \
821 * | | } na->num_tx_ring
825 * na->rx_rings ----> +----------+
827 * | | } na->num_rx_rings
832 * na->tailroom ----->| | \
833 * | | } tailroom bytes
837 * Note: for compatibility, host krings are created even when not needed.
838 * The tailroom space is currently used by vale ports for allocating leases.
840 /* call with NMG_LOCK held */
842 netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
845 struct netmap_kring *kring;
850 if (na->tx_rings != NULL) {
851 if (netmap_debug & NM_DEBUG_ON)
852 nm_prerr("warning: krings were already created");
856 /* account for the (possibly fake) host rings */
857 n[NR_TX] = netmap_all_rings(na, NR_TX);
858 n[NR_RX] = netmap_all_rings(na, NR_RX);
860 len = (n[NR_TX] + n[NR_RX]) *
861 (sizeof(struct netmap_kring) + sizeof(struct netmap_kring *))
864 na->tx_rings = nm_os_malloc((size_t)len);
865 if (na->tx_rings == NULL) {
866 nm_prerr("Cannot allocate krings");
869 na->rx_rings = na->tx_rings + n[NR_TX];
870 na->tailroom = na->rx_rings + n[NR_RX];
872 /* link the krings in the krings array */
873 kring = (struct netmap_kring *)((char *)na->tailroom + tailroom);
874 for (i = 0; i < n[NR_TX] + n[NR_RX]; i++) {
875 na->tx_rings[i] = kring;
880 * All fields in krings are 0 except the one initialized below.
881 * but better be explicit on important kring fields.
884 ndesc = nma_get_ndesc(na, t);
885 for (i = 0; i < n[t]; i++) {
886 kring = NMR(na, t)[i];
887 bzero(kring, sizeof(*kring));
888 kring->notify_na = na;
891 kring->nkr_num_slots = ndesc;
892 kring->nr_mode = NKR_NETMAP_OFF;
893 kring->nr_pending_mode = NKR_NETMAP_OFF;
894 if (i < nma_get_nrings(na, t)) {
895 kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync);
896 kring->nm_bufcfg = na->nm_bufcfg;
897 if (kring->nm_bufcfg == NULL)
898 kring->nm_bufcfg = netmap_default_bufcfg;
900 if (!(na->na_flags & NAF_HOST_RINGS))
901 kring->nr_kflags |= NKR_FAKERING;
902 kring->nm_sync = (t == NR_TX ?
903 netmap_txsync_to_host:
904 netmap_rxsync_from_host);
905 kring->nm_bufcfg = netmap_default_bufcfg;
907 kring->nm_notify = na->nm_notify;
908 kring->rhead = kring->rcur = kring->nr_hwcur = 0;
910 * IMPORTANT: Always keep one slot empty.
912 kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0);
913 snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name,
915 nm_prdis("ktx %s h %d c %d t %d",
916 kring->name, kring->rhead, kring->rcur, kring->rtail);
917 err = nm_os_selinfo_init(&kring->si, kring->name);
919 netmap_krings_delete(na);
922 mtx_init(&kring->q_lock, (t == NR_TX ? "nm_txq_lock" : "nm_rxq_lock"), NULL, MTX_DEF);
923 kring->na = na; /* setting this field marks the mutex as initialized */
925 err = nm_os_selinfo_init(&na->si[t], na->name);
927 netmap_krings_delete(na);
936 /* undo the actions performed by netmap_krings_create */
937 /* call with NMG_LOCK held */
939 netmap_krings_delete(struct netmap_adapter *na)
941 struct netmap_kring **kring = na->tx_rings;
944 if (na->tx_rings == NULL) {
945 if (netmap_debug & NM_DEBUG_ON)
946 nm_prerr("warning: krings were already deleted");
951 nm_os_selinfo_uninit(&na->si[t]);
953 /* we rely on the krings layout described above */
954 for ( ; kring != na->tailroom; kring++) {
955 if ((*kring)->na != NULL)
956 mtx_destroy(&(*kring)->q_lock);
957 nm_os_selinfo_uninit(&(*kring)->si);
959 nm_os_free(na->tx_rings);
960 na->tx_rings = na->rx_rings = na->tailroom = NULL;
965 * Destructor for NIC ports. They also have an mbuf queue
966 * on the rings connected to the host so we need to purge
969 /* call with NMG_LOCK held */
971 netmap_hw_krings_delete(struct netmap_adapter *na)
973 u_int lim = netmap_real_rings(na, NR_RX), i;
975 for (i = nma_get_nrings(na, NR_RX); i < lim; i++) {
976 struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue;
977 nm_prdis("destroy sw mbq with len %d", mbq_len(q));
981 netmap_krings_delete(na);
985 netmap_mem_restore(struct netmap_adapter *na)
987 if (na->nm_mem_prev) {
988 netmap_mem_put(na->nm_mem);
989 na->nm_mem = na->nm_mem_prev;
990 na->nm_mem_prev = NULL;
995 netmap_mem_drop(struct netmap_adapter *na)
997 /* if the native allocator had been overridden on regif,
998 * restore it now and drop the temporary one
1000 if (netmap_mem_deref(na->nm_mem, na)) {
1001 netmap_mem_restore(na);
1006 netmap_update_hostrings_mode(struct netmap_adapter *na)
1009 struct netmap_kring *kring;
1013 for (i = nma_get_nrings(na, t);
1014 i < netmap_real_rings(na, t); i++) {
1015 kring = NMR(na, t)[i];
1016 kring->nr_mode = kring->nr_pending_mode;
1022 * Undo everything that was done in netmap_do_regif(). In particular,
1023 * call nm_register(ifp,0) to stop netmap mode on the interface and
1024 * revert to normal operation.
1026 /* call with NMG_LOCK held */
1027 static void netmap_unset_ringid(struct netmap_priv_d *);
1028 static void netmap_krings_put(struct netmap_priv_d *);
1030 netmap_do_unregif(struct netmap_priv_d *priv)
1032 struct netmap_adapter *na = priv->np_na;
1036 /* unset nr_pending_mode and possibly release exclusive mode */
1037 netmap_krings_put(priv);
1040 /* XXX check whether we have to do something with monitor
1041 * when rings change nr_mode. */
1042 if (na->active_fds <= 0) {
1043 /* walk through all the rings and tell any monitor
1044 * that the port is going to exit netmap mode
1046 netmap_monitor_stop(na);
1050 if (na->active_fds <= 0 || nm_kring_pending(priv)) {
1051 netmap_set_all_rings(na, NM_KR_LOCKED);
1052 na->nm_register(na, 0);
1053 netmap_set_all_rings(na, 0);
1056 /* delete rings and buffers that are no longer needed */
1057 netmap_mem_rings_delete(na);
1059 if (na->active_fds <= 0) { /* last instance */
1061 * (TO CHECK) We enter here
1062 * when the last reference to this file descriptor goes
1063 * away. This means we cannot have any pending poll()
1064 * or interrupt routine operating on the structure.
1065 * XXX The file may be closed in a thread while
1066 * another thread is using it.
1067 * Linux keeps the file opened until the last reference
1068 * by any outstanding ioctl/poll or mmap is gone.
1069 * FreeBSD does not track mmap()s (but we do) and
1070 * wakes up any sleeping poll(). Need to check what
1071 * happens if the close() occurs while a concurrent
1072 * syscall is running.
1074 if (netmap_debug & NM_DEBUG_ON)
1075 nm_prinf("deleting last instance for %s", na->name);
1077 if (nm_netmap_on(na)) {
1078 nm_prerr("BUG: netmap on while going to delete the krings");
1081 na->nm_krings_delete(na);
1083 /* restore the default number of host tx and rx rings */
1084 if (na->na_flags & NAF_HOST_RINGS) {
1085 na->num_host_tx_rings = 1;
1086 na->num_host_rx_rings = 1;
1088 na->num_host_tx_rings = 0;
1089 na->num_host_rx_rings = 0;
1093 /* possibly decrement counter of tx_si/rx_si users */
1094 netmap_unset_ringid(priv);
1095 /* delete the nifp */
1096 netmap_mem_if_delete(na, priv->np_nifp);
1097 /* drop the allocator */
1098 netmap_mem_drop(na);
1099 /* mark the priv as unregistered */
1101 priv->np_nifp = NULL;
1104 struct netmap_priv_d*
1105 netmap_priv_new(void)
1107 struct netmap_priv_d *priv;
1109 priv = nm_os_malloc(sizeof(struct netmap_priv_d));
1118 * Destructor of the netmap_priv_d, called when the fd is closed
1119 * Action: undo all the things done by NIOCREGIF,
1120 * On FreeBSD we need to track whether there are active mmap()s,
1121 * and we use np_active_mmaps for that. On linux, the field is always 0.
1122 * Return: 1 if we can free priv, 0 otherwise.
1125 /* call with NMG_LOCK held */
1127 netmap_priv_delete(struct netmap_priv_d *priv)
1129 struct netmap_adapter *na = priv->np_na;
1131 /* number of active references to this fd */
1132 if (--priv->np_refs > 0) {
1137 netmap_do_unregif(priv);
1139 netmap_unget_na(na, priv->np_ifp);
1140 bzero(priv, sizeof(*priv)); /* for safety */
1145 /* call with NMG_LOCK *not* held */
1147 netmap_dtor(void *data)
1149 struct netmap_priv_d *priv = data;
1152 netmap_priv_delete(priv);
1158 * Handlers for synchronization of the rings from/to the host stack.
1159 * These are associated to a network interface and are just another
1160 * ring pair managed by userspace.
1162 * Netmap also supports transparent forwarding (NS_FORWARD and NR_FORWARD
1165 * - Before releasing buffers on hw RX rings, the application can mark
1166 * them with the NS_FORWARD flag. During the next RXSYNC or poll(), they
1167 * will be forwarded to the host stack, similarly to what happened if
1168 * the application moved them to the host TX ring.
1170 * - Before releasing buffers on the host RX ring, the application can
1171 * mark them with the NS_FORWARD flag. During the next RXSYNC or poll(),
1172 * they will be forwarded to the hw TX rings, saving the application
1173 * from doing the same task in user-space.
1175 * Transparent forwarding can be enabled per-ring, by setting the NR_FORWARD
1176 * flag, or globally with the netmap_fwd sysctl.
1178 * The transfer NIC --> host is relatively easy, just encapsulate
1179 * into mbufs and we are done. The host --> NIC side is slightly
1180 * harder because there might not be room in the tx ring so it
1181 * might take a while before releasing the buffer.
1186 * Pass a whole queue of mbufs to the host stack as coming from 'dst'
1187 * We do not need to lock because the queue is private.
1188 * After this call the queue is empty.
1191 netmap_send_up(struct ifnet *dst, struct mbq *q)
1194 struct mbuf *head = NULL, *prev = NULL;
1196 struct epoch_tracker et;
1198 NET_EPOCH_ENTER(et);
1199 #endif /* __FreeBSD__ */
1200 /* Send packets up, outside the lock; head/prev machinery
1201 * is only useful for Windows. */
1202 while ((m = mbq_dequeue(q)) != NULL) {
1203 if (netmap_debug & NM_DEBUG_HOST)
1204 nm_prinf("sending up pkt %p size %d", m, MBUF_LEN(m));
1205 prev = nm_os_send_up(dst, m, prev);
1210 nm_os_send_up(dst, NULL, head);
1213 #endif /* __FreeBSD__ */
1219 * Scan the buffers from hwcur to ring->head, and put a copy of those
1220 * marked NS_FORWARD (or all of them if forced) into a queue of mbufs.
1221 * Drop remaining packets in the unlikely event
1222 * of an mbuf shortage.
1225 netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force)
1227 u_int const lim = kring->nkr_num_slots - 1;
1228 u_int const head = kring->rhead;
1230 struct netmap_adapter *na = kring->na;
1232 for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) {
1234 struct netmap_slot *slot = &kring->ring->slot[n];
1236 if ((slot->flags & NS_FORWARD) == 0 && !force)
1238 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) {
1239 nm_prlim(5, "bad pkt at %d len %d", n, slot->len);
1242 slot->flags &= ~NS_FORWARD; // XXX needed ?
1243 /* XXX TODO: adapt to the case of a multisegment packet */
1244 m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL);
1253 _nm_may_forward(struct netmap_kring *kring)
1255 return ((netmap_fwd || kring->ring->flags & NR_FORWARD) &&
1256 kring->na->na_flags & NAF_HOST_RINGS &&
1257 kring->tx == NR_RX);
1261 nm_may_forward_up(struct netmap_kring *kring)
1263 return _nm_may_forward(kring) &&
1264 kring->ring_id != kring->na->num_rx_rings;
1268 nm_may_forward_down(struct netmap_kring *kring, int sync_flags)
1270 return _nm_may_forward(kring) &&
1271 (sync_flags & NAF_CAN_FORWARD_DOWN) &&
1272 kring->ring_id == kring->na->num_rx_rings;
1276 * Send to the NIC rings packets marked NS_FORWARD between
1277 * kring->nr_hwcur and kring->rhead.
1278 * Called under kring->rx_queue.lock on the sw rx ring.
1280 * It can only be called if the user opened all the TX hw rings,
1281 * see NAF_CAN_FORWARD_DOWN flag.
1282 * We can touch the TX netmap rings (slots, head and cur) since
1283 * we are in poll/ioctl system call context, and the application
1284 * is not supposed to touch the ring (using a different thread)
1285 * during the execution of the system call.
1288 netmap_sw_to_nic(struct netmap_adapter *na)
1290 struct netmap_kring *kring = na->rx_rings[na->num_rx_rings];
1291 struct netmap_slot *rxslot = kring->ring->slot;
1292 u_int i, rxcur = kring->nr_hwcur;
1293 u_int const head = kring->rhead;
1294 u_int const src_lim = kring->nkr_num_slots - 1;
1297 /* scan rings to find space, then fill as much as possible */
1298 for (i = 0; i < na->num_tx_rings; i++) {
1299 struct netmap_kring *kdst = na->tx_rings[i];
1300 struct netmap_ring *rdst = kdst->ring;
1301 u_int const dst_lim = kdst->nkr_num_slots - 1;
1303 /* XXX do we trust ring or kring->rcur,rtail ? */
1304 for (; rxcur != head && !nm_ring_empty(rdst);
1305 rxcur = nm_next(rxcur, src_lim) ) {
1306 struct netmap_slot *src, *dst, tmp;
1307 u_int dst_head = rdst->head;
1309 src = &rxslot[rxcur];
1310 if ((src->flags & NS_FORWARD) == 0 && !netmap_fwd)
1315 dst = &rdst->slot[dst_head];
1319 src->buf_idx = dst->buf_idx;
1320 src->flags = NS_BUF_CHANGED;
1322 dst->buf_idx = tmp.buf_idx;
1324 dst->flags = NS_BUF_CHANGED;
1326 rdst->head = rdst->cur = nm_next(dst_head, dst_lim);
1328 /* if (sent) XXX txsync ? it would be just an optimization */
1335 * netmap_txsync_to_host() passes packets up. We are called from a
1336 * system call in user process context, and the only contention
1337 * can be among multiple user threads erroneously calling
1338 * this routine concurrently.
1341 netmap_txsync_to_host(struct netmap_kring *kring, int flags)
1343 struct netmap_adapter *na = kring->na;
1344 u_int const lim = kring->nkr_num_slots - 1;
1345 u_int const head = kring->rhead;
1348 /* Take packets from hwcur to head and pass them up.
1349 * Force hwcur = head since netmap_grab_packets() stops at head
1352 netmap_grab_packets(kring, &q, 1 /* force */);
1353 nm_prdis("have %d pkts in queue", mbq_len(&q));
1354 kring->nr_hwcur = head;
1355 kring->nr_hwtail = head + lim;
1356 if (kring->nr_hwtail > lim)
1357 kring->nr_hwtail -= lim + 1;
1359 netmap_send_up(na->ifp, &q);
1365 * rxsync backend for packets coming from the host stack.
1366 * They have been put in kring->rx_queue by netmap_transmit().
1367 * We protect access to the kring using kring->rx_queue.lock
1369 * also moves to the nic hw rings any packet the user has marked
1370 * for transparent-mode forwarding, then sets the NR_FORWARD
1371 * flag in the kring to let the caller push them out
1374 netmap_rxsync_from_host(struct netmap_kring *kring, int flags)
1376 struct netmap_adapter *na = kring->na;
1377 struct netmap_ring *ring = kring->ring;
1379 u_int const lim = kring->nkr_num_slots - 1;
1380 u_int const head = kring->rhead;
1382 struct mbq *q = &kring->rx_queue, fq;
1384 mbq_init(&fq); /* fq holds packets to be freed */
1388 /* First part: import newly received packets */
1390 if (n) { /* grab packets from the queue */
1394 nm_i = kring->nr_hwtail;
1395 stop_i = nm_prev(kring->nr_hwcur, lim);
1396 while ( nm_i != stop_i && (m = mbq_dequeue(q)) != NULL ) {
1397 int len = MBUF_LEN(m);
1398 struct netmap_slot *slot = &ring->slot[nm_i];
1400 m_copydata(m, 0, len, NMB(na, slot));
1401 nm_prdis("nm %d len %d", nm_i, len);
1402 if (netmap_debug & NM_DEBUG_HOST)
1403 nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
1407 nm_i = nm_next(nm_i, lim);
1408 mbq_enqueue(&fq, m);
1410 kring->nr_hwtail = nm_i;
1414 * Second part: skip past packets that userspace has released.
1416 nm_i = kring->nr_hwcur;
1417 if (nm_i != head) { /* something was released */
1418 if (nm_may_forward_down(kring, flags)) {
1419 ret = netmap_sw_to_nic(na);
1421 kring->nr_kflags |= NR_FORWARD;
1425 kring->nr_hwcur = head;
1437 /* Get a netmap adapter for the port.
1439 * If it is possible to satisfy the request, return 0
1440 * with *na containing the netmap adapter found.
1441 * Otherwise return an error code, with *na containing NULL.
1443 * When the port is attached to a bridge, we always return
1445 * Otherwise, if the port is already bound to a file descriptor,
1446 * then we unconditionally return the existing adapter into *na.
1447 * In all the other cases, we return (into *na) either native,
1448 * generic or NULL, according to the following table:
1451 * active_fds dev.netmap.admode YES NO
1452 * -------------------------------------------------------
1453 * >0 * NA(ifp) NA(ifp)
1455 * 0 NETMAP_ADMODE_BEST NATIVE GENERIC
1456 * 0 NETMAP_ADMODE_NATIVE NATIVE NULL
1457 * 0 NETMAP_ADMODE_GENERIC GENERIC GENERIC
1460 static void netmap_hw_dtor(struct netmap_adapter *); /* needed by NM_IS_NATIVE() */
1462 netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na)
1464 /* generic support */
1465 int i = netmap_admode; /* Take a snapshot. */
1466 struct netmap_adapter *prev_na;
1469 *na = NULL; /* default */
1471 /* reset in case of invalid value */
1472 if (i < NETMAP_ADMODE_BEST || i >= NETMAP_ADMODE_LAST)
1473 i = netmap_admode = NETMAP_ADMODE_BEST;
1475 if (NM_NA_VALID(ifp)) {
1477 /* If an adapter already exists, return it if
1478 * there are active file descriptors or if
1479 * netmap is not forced to use generic
1482 if (NETMAP_OWNED_BY_ANY(prev_na)
1483 || i != NETMAP_ADMODE_GENERIC
1484 || prev_na->na_flags & NAF_FORCE_NATIVE
1486 /* ugly, but we cannot allow an adapter switch
1487 * if some pipe is referring to this one
1489 || prev_na->na_next_pipe > 0
1497 /* If there isn't native support and netmap is not allowed
1498 * to use generic adapters, we cannot satisfy the request.
1500 if (!NM_IS_NATIVE(ifp) && i == NETMAP_ADMODE_NATIVE)
1503 /* Otherwise, create a generic adapter and return it,
1504 * saving the previously used netmap adapter, if any.
1506 * Note that here 'prev_na', if not NULL, MUST be a
1507 * native adapter, and CANNOT be a generic one. This is
1508 * true because generic adapters are created on demand, and
1509 * destroyed when not used anymore. Therefore, if the adapter
1510 * currently attached to an interface 'ifp' is generic, it
1512 * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))).
1513 * Consequently, if NA(ifp) is generic, we will enter one of
1514 * the branches above. This ensures that we never override
1515 * a generic adapter with another generic adapter.
1517 error = generic_netmap_attach(ifp);
1524 if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) &&
1525 (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) {
1526 (*na)->nm_mem_prev = (*na)->nm_mem;
1527 (*na)->nm_mem = netmap_mem_get(nmd);
1534 * MUST BE CALLED UNDER NMG_LOCK()
1536 * Get a refcounted reference to a netmap adapter attached
1537 * to the interface specified by req.
1538 * This is always called in the execution of an ioctl().
1540 * Return ENXIO if the interface specified by the request does
1541 * not exist, ENOTSUP if netmap is not supported by the interface,
1542 * EBUSY if the interface is already attached to a bridge,
1543 * EINVAL if parameters are invalid, ENOMEM if needed resources
1544 * could not be allocated.
1545 * If successful, hold a reference to the netmap adapter.
1547 * If the interface specified by req is a system one, also keep
1548 * a reference to it and return a valid *ifp.
1551 netmap_get_na(struct nmreq_header *hdr,
1552 struct netmap_adapter **na, struct ifnet **ifp,
1553 struct netmap_mem_d *nmd, int create)
1555 struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1557 struct netmap_adapter *ret = NULL;
1560 *na = NULL; /* default return value */
1563 if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1567 if (req->nr_mode == NR_REG_PIPE_MASTER ||
1568 req->nr_mode == NR_REG_PIPE_SLAVE) {
1569 /* Do not accept deprecated pipe modes. */
1570 nm_prerr("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax");
1576 /* if the request contain a memid, try to find the
1577 * corresponding memory region
1579 if (nmd == NULL && req->nr_mem_id) {
1580 nmd = netmap_mem_find(req->nr_mem_id);
1583 /* keep the rereference */
1587 /* We cascade through all possible types of netmap adapter.
1588 * All netmap_get_*_na() functions return an error and an na,
1589 * with the following combinations:
1592 * 0 NULL type doesn't match
1593 * !0 NULL type matches, but na creation/lookup failed
1594 * 0 !NULL type matches and na created/found
1595 * !0 !NULL impossible
1597 error = netmap_get_null_na(hdr, na, nmd, create);
1598 if (error || *na != NULL)
1601 /* try to see if this is a monitor port */
1602 error = netmap_get_monitor_na(hdr, na, nmd, create);
1603 if (error || *na != NULL)
1606 /* try to see if this is a pipe port */
1607 error = netmap_get_pipe_na(hdr, na, nmd, create);
1608 if (error || *na != NULL)
1611 /* try to see if this is a vale port */
1612 error = netmap_get_vale_na(hdr, na, nmd, create);
1616 if (*na != NULL) /* valid match in netmap_get_bdg_na() */
1620 * This must be a hardware na, lookup the name in the system.
1621 * Note that by hardware we actually mean "it shows up in ifconfig".
1622 * This may still be a tap, a veth/epair, or even a
1623 * persistent VALE port.
1625 *ifp = ifunit_ref(hdr->nr_name);
1631 error = netmap_get_hw_na(*ifp, nmd, &ret);
1636 netmap_adapter_get(ret);
1639 * if the adapter supports the host rings and it is not already open,
1640 * try to set the number of host rings as requested by the user
1642 if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) {
1643 if (req->nr_host_tx_rings)
1644 (*na)->num_host_tx_rings = req->nr_host_tx_rings;
1645 if (req->nr_host_rx_rings)
1646 (*na)->num_host_rx_rings = req->nr_host_rx_rings;
1648 nm_prdis("%s: host tx %d rx %u", (*na)->name, (*na)->num_host_tx_rings,
1649 (*na)->num_host_rx_rings);
1654 netmap_adapter_put(ret);
1661 netmap_mem_put(nmd);
1666 /* undo netmap_get_na() */
1668 netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp)
1673 netmap_adapter_put(na);
1677 #define NM_FAIL_ON(t) do { \
1678 if (unlikely(t)) { \
1679 nm_prlim(5, "%s: fail '" #t "' " \
1681 "rh %d rc %d rt %d " \
1684 head, cur, ring->tail, \
1685 kring->rhead, kring->rcur, kring->rtail, \
1686 kring->nr_hwcur, kring->nr_hwtail); \
1687 return kring->nkr_num_slots; \
1692 * validate parameters on entry for *_txsync()
1693 * Returns ring->cur if ok, or something >= kring->nkr_num_slots
1696 * rhead, rcur and rtail=hwtail are stored from previous round.
1697 * hwcur is the next packet to send to the ring.
1700 * hwcur <= *rhead <= head <= cur <= tail = *rtail <= hwtail
1702 * hwcur, rhead, rtail and hwtail are reliable
1705 nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1707 u_int head = ring->head; /* read only once */
1708 u_int cur = ring->cur; /* read only once */
1709 u_int n = kring->nkr_num_slots;
1711 nm_prdis(5, "%s kcur %d ktail %d head %d cur %d tail %d",
1713 kring->nr_hwcur, kring->nr_hwtail,
1714 ring->head, ring->cur, ring->tail);
1715 #if 1 /* kernel sanity checks; but we can trust the kring. */
1716 NM_FAIL_ON(kring->nr_hwcur >= n || kring->rhead >= n ||
1717 kring->rtail >= n || kring->nr_hwtail >= n);
1718 #endif /* kernel sanity checks */
1720 * user sanity checks. We only use head,
1721 * A, B, ... are possible positions for head:
1723 * 0 A rhead B rtail C n-1
1724 * 0 D rtail E rhead F n-1
1726 * B, F, D are valid. A, C, E are wrong
1728 if (kring->rtail >= kring->rhead) {
1729 /* want rhead <= head <= rtail */
1730 NM_FAIL_ON(head < kring->rhead || head > kring->rtail);
1731 /* and also head <= cur <= rtail */
1732 NM_FAIL_ON(cur < head || cur > kring->rtail);
1733 } else { /* here rtail < rhead */
1734 /* we need head outside rtail .. rhead */
1735 NM_FAIL_ON(head > kring->rtail && head < kring->rhead);
1737 /* two cases now: head <= rtail or head >= rhead */
1738 if (head <= kring->rtail) {
1739 /* want head <= cur <= rtail */
1740 NM_FAIL_ON(cur < head || cur > kring->rtail);
1741 } else { /* head >= rhead */
1742 /* cur must be outside rtail..head */
1743 NM_FAIL_ON(cur > kring->rtail && cur < head);
1746 if (ring->tail != kring->rtail) {
1747 nm_prlim(5, "%s tail overwritten was %d need %d", kring->name,
1748 ring->tail, kring->rtail);
1749 ring->tail = kring->rtail;
1751 kring->rhead = head;
1758 * validate parameters on entry for *_rxsync()
1759 * Returns ring->head if ok, kring->nkr_num_slots on error.
1761 * For a valid configuration,
1762 * hwcur <= head <= cur <= tail <= hwtail
1764 * We only consider head and cur.
1765 * hwcur and hwtail are reliable.
1769 nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1771 uint32_t const n = kring->nkr_num_slots;
1774 nm_prdis(5,"%s kc %d kt %d h %d c %d t %d",
1776 kring->nr_hwcur, kring->nr_hwtail,
1777 ring->head, ring->cur, ring->tail);
1779 * Before storing the new values, we should check they do not
1780 * move backwards. However:
1781 * - head is not an issue because the previous value is hwcur;
1782 * - cur could in principle go back, however it does not matter
1783 * because we are processing a brand new rxsync()
1785 cur = kring->rcur = ring->cur; /* read only once */
1786 head = kring->rhead = ring->head; /* read only once */
1787 #if 1 /* kernel sanity checks */
1788 NM_FAIL_ON(kring->nr_hwcur >= n || kring->nr_hwtail >= n);
1789 #endif /* kernel sanity checks */
1790 /* user sanity checks */
1791 if (kring->nr_hwtail >= kring->nr_hwcur) {
1792 /* want hwcur <= rhead <= hwtail */
1793 NM_FAIL_ON(head < kring->nr_hwcur || head > kring->nr_hwtail);
1794 /* and also rhead <= rcur <= hwtail */
1795 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1797 /* we need rhead outside hwtail..hwcur */
1798 NM_FAIL_ON(head < kring->nr_hwcur && head > kring->nr_hwtail);
1799 /* two cases now: head <= hwtail or head >= hwcur */
1800 if (head <= kring->nr_hwtail) {
1801 /* want head <= cur <= hwtail */
1802 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1804 /* cur must be outside hwtail..head */
1805 NM_FAIL_ON(cur < head && cur > kring->nr_hwtail);
1808 if (ring->tail != kring->rtail) {
1809 nm_prlim(5, "%s tail overwritten was %d need %d",
1811 ring->tail, kring->rtail);
1812 ring->tail = kring->rtail;
1819 * Error routine called when txsync/rxsync detects an error.
1820 * Can't do much more than resetting head = cur = hwcur, tail = hwtail
1821 * Return 1 on reinit.
1823 * This routine is only called by the upper half of the kernel.
1824 * It only reads hwcur (which is changed only by the upper half, too)
1825 * and hwtail (which may be changed by the lower half, but only on
1826 * a tx ring and only to increase it, so any error will be recovered
1827 * on the next call). For the above, we don't strictly need to call
1831 netmap_ring_reinit(struct netmap_kring *kring)
1833 struct netmap_ring *ring = kring->ring;
1834 u_int i, lim = kring->nkr_num_slots - 1;
1837 // XXX KASSERT nm_kr_tryget
1838 nm_prlim(10, "called for %s", kring->name);
1839 // XXX probably wrong to trust userspace
1840 kring->rhead = ring->head;
1841 kring->rcur = ring->cur;
1842 kring->rtail = ring->tail;
1844 if (ring->cur > lim)
1846 if (ring->head > lim)
1848 if (ring->tail > lim)
1850 for (i = 0; i <= lim; i++) {
1851 u_int idx = ring->slot[i].buf_idx;
1852 u_int len = ring->slot[i].len;
1853 if (idx < 2 || idx >= kring->na->na_lut.objtotal) {
1854 nm_prlim(5, "bad index at slot %d idx %d len %d ", i, idx, len);
1855 ring->slot[i].buf_idx = 0;
1856 ring->slot[i].len = 0;
1857 } else if (len > NETMAP_BUF_SIZE(kring->na)) {
1858 ring->slot[i].len = 0;
1859 nm_prlim(5, "bad len at slot %d idx %d len %d", i, idx, len);
1863 nm_prlim(10, "total %d errors", errors);
1864 nm_prlim(10, "%s reinit, cur %d -> %d tail %d -> %d",
1866 ring->cur, kring->nr_hwcur,
1867 ring->tail, kring->nr_hwtail);
1868 ring->head = kring->rhead = kring->nr_hwcur;
1869 ring->cur = kring->rcur = kring->nr_hwcur;
1870 ring->tail = kring->rtail = kring->nr_hwtail;
1872 return (errors ? 1 : 0);
1875 /* interpret the ringid and flags fields of an nmreq, by translating them
1876 * into a pair of intervals of ring indices:
1878 * [priv->np_txqfirst, priv->np_txqlast) and
1879 * [priv->np_rxqfirst, priv->np_rxqlast)
1883 netmap_interp_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr)
1885 struct netmap_adapter *na = priv->np_na;
1886 struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body;
1887 int excluded_direction[] = { NR_TX_RINGS_ONLY, NR_RX_RINGS_ONLY };
1890 u_int nr_flags = reg->nr_flags, nr_mode = reg->nr_mode,
1891 nr_ringid = reg->nr_ringid;
1894 if (nr_flags & excluded_direction[t]) {
1895 priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1899 case NR_REG_ALL_NIC:
1901 priv->np_qfirst[t] = 0;
1902 priv->np_qlast[t] = nma_get_nrings(na, t);
1903 nm_prdis("ALL/PIPE: %s %d %d", nm_txrx2str(t),
1904 priv->np_qfirst[t], priv->np_qlast[t]);
1908 if (!(na->na_flags & NAF_HOST_RINGS)) {
1909 nm_prerr("host rings not supported");
1912 priv->np_qfirst[t] = (nr_mode == NR_REG_SW ?
1913 nma_get_nrings(na, t) : 0);
1914 priv->np_qlast[t] = netmap_all_rings(na, t);
1915 nm_prdis("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW",
1917 priv->np_qfirst[t], priv->np_qlast[t]);
1919 case NR_REG_ONE_NIC:
1920 if (nr_ringid >= na->num_tx_rings &&
1921 nr_ringid >= na->num_rx_rings) {
1922 nm_prerr("invalid ring id %d", nr_ringid);
1925 /* if not enough rings, use the first one */
1927 if (j >= nma_get_nrings(na, t))
1929 priv->np_qfirst[t] = j;
1930 priv->np_qlast[t] = j + 1;
1931 nm_prdis("ONE_NIC: %s %d %d", nm_txrx2str(t),
1932 priv->np_qfirst[t], priv->np_qlast[t]);
1935 if (!(na->na_flags & NAF_HOST_RINGS)) {
1936 nm_prerr("host rings not supported");
1939 if (nr_ringid >= na->num_host_tx_rings &&
1940 nr_ringid >= na->num_host_rx_rings) {
1941 nm_prerr("invalid ring id %d", nr_ringid);
1944 /* if not enough rings, use the first one */
1946 if (j >= nma_get_host_nrings(na, t))
1948 priv->np_qfirst[t] = nma_get_nrings(na, t) + j;
1949 priv->np_qlast[t] = nma_get_nrings(na, t) + j + 1;
1950 nm_prdis("ONE_SW: %s %d %d", nm_txrx2str(t),
1951 priv->np_qfirst[t], priv->np_qlast[t]);
1954 nm_prerr("invalid regif type %d", nr_mode);
1958 priv->np_flags = nr_flags;
1960 /* Allow transparent forwarding mode in the host --> nic
1961 * direction only if all the TX hw rings have been opened. */
1962 if (priv->np_qfirst[NR_TX] == 0 &&
1963 priv->np_qlast[NR_TX] >= na->num_tx_rings) {
1964 priv->np_sync_flags |= NAF_CAN_FORWARD_DOWN;
1967 if (netmap_verbose) {
1968 nm_prinf("%s: tx [%d,%d) rx [%d,%d) id %d",
1970 priv->np_qfirst[NR_TX],
1971 priv->np_qlast[NR_TX],
1972 priv->np_qfirst[NR_RX],
1973 priv->np_qlast[NR_RX],
1981 * Set the ring ID. For devices with a single queue, a request
1982 * for all rings is the same as a single ring.
1985 netmap_set_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr)
1987 struct netmap_adapter *na = priv->np_na;
1988 struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body;
1992 error = netmap_interp_ringid(priv, hdr);
1997 priv->np_txpoll = (reg->nr_flags & NR_NO_TX_POLL) ? 0 : 1;
1999 /* optimization: count the users registered for more than
2000 * one ring, which are the ones sleeping on the global queue.
2001 * The default netmap_notify() callback will then
2002 * avoid signaling the global queue if nobody is using it
2005 if (nm_si_user(priv, t))
2012 netmap_unset_ringid(struct netmap_priv_d *priv)
2014 struct netmap_adapter *na = priv->np_na;
2018 if (nm_si_user(priv, t))
2020 priv->np_qfirst[t] = priv->np_qlast[t] = 0;
2023 priv->np_txpoll = 0;
2024 priv->np_kloop_state = 0;
2027 #define within_sel(p_, t_, i_) \
2028 ((i_) < (p_)->np_qlast[(t_)])
2029 #define nonempty_sel(p_, t_) \
2030 (within_sel((p_), (t_), (p_)->np_qfirst[(t_)]))
2031 #define foreach_selected_ring(p_, t_, i_, kring_) \
2032 for ((t_) = nonempty_sel((p_), NR_RX) ? NR_RX : NR_TX, \
2033 (i_) = (p_)->np_qfirst[(t_)]; \
2035 (t == NR_TX && within_sel((p_), (t_), (i_)))) && \
2036 ((kring_) = NMR((p_)->np_na, (t_))[(i_)]); \
2037 (i_) = within_sel((p_), (t_), (i_) + 1) ? (i_) + 1 : \
2038 (++(t_) < NR_TXRX ? (p_)->np_qfirst[(t_)] : (i_)))
2041 /* Set the nr_pending_mode for the requested rings.
2042 * If requested, also try to get exclusive access to the rings, provided
2043 * the rings we want to bind are not exclusively owned by a previous bind.
2046 netmap_krings_get(struct netmap_priv_d *priv)
2048 struct netmap_adapter *na = priv->np_na;
2050 struct netmap_kring *kring;
2051 int excl = (priv->np_flags & NR_EXCLUSIVE);
2054 if (netmap_debug & NM_DEBUG_ON)
2055 nm_prinf("%s: grabbing tx [%d, %d) rx [%d, %d)",
2057 priv->np_qfirst[NR_TX],
2058 priv->np_qlast[NR_TX],
2059 priv->np_qfirst[NR_RX],
2060 priv->np_qlast[NR_RX]);
2062 /* first round: check that all the requested rings
2063 * are neither already exclusively owned, nor we
2064 * want exclusive ownership when they are already in use
2066 foreach_selected_ring(priv, t, i, kring) {
2067 if ((kring->nr_kflags & NKR_EXCLUSIVE) ||
2068 (kring->users && excl))
2070 nm_prdis("ring %s busy", kring->name);
2075 /* second round: increment usage count (possibly marking them
2076 * as exclusive) and set the nr_pending_mode
2078 foreach_selected_ring(priv, t, i, kring) {
2081 kring->nr_kflags |= NKR_EXCLUSIVE;
2082 kring->nr_pending_mode = NKR_NETMAP_ON;
2089 /* Undo netmap_krings_get(). This is done by clearing the exclusive mode
2090 * if was asked on regif, and unset the nr_pending_mode if we are the
2091 * last users of the involved rings. */
2093 netmap_krings_put(struct netmap_priv_d *priv)
2096 struct netmap_kring *kring;
2097 int excl = (priv->np_flags & NR_EXCLUSIVE);
2100 nm_prdis("%s: releasing tx [%d, %d) rx [%d, %d)",
2102 priv->np_qfirst[NR_TX],
2103 priv->np_qlast[NR_TX],
2104 priv->np_qfirst[NR_RX],
2105 priv->np_qlast[MR_RX]);
2107 foreach_selected_ring(priv, t, i, kring) {
2109 kring->nr_kflags &= ~NKR_EXCLUSIVE;
2111 if (kring->users == 0)
2112 kring->nr_pending_mode = NKR_NETMAP_OFF;
2117 nm_priv_rx_enabled(struct netmap_priv_d *priv)
2119 return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]);
2122 /* Validate the CSB entries for both directions (atok and ktoa).
2123 * To be called under NMG_LOCK(). */
2125 netmap_csb_validate(struct netmap_priv_d *priv, struct nmreq_opt_csb *csbo)
2127 struct nm_csb_atok *csb_atok_base =
2128 (struct nm_csb_atok *)(uintptr_t)csbo->csb_atok;
2129 struct nm_csb_ktoa *csb_ktoa_base =
2130 (struct nm_csb_ktoa *)(uintptr_t)csbo->csb_ktoa;
2132 int num_rings[NR_TXRX], tot_rings;
2133 size_t entry_size[2];
2137 if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) {
2138 nm_prerr("Cannot update CSB while kloop is running");
2144 num_rings[t] = priv->np_qlast[t] - priv->np_qfirst[t];
2145 tot_rings += num_rings[t];
2150 if (!(priv->np_flags & NR_EXCLUSIVE)) {
2151 nm_prerr("CSB mode requires NR_EXCLUSIVE");
2155 entry_size[0] = sizeof(*csb_atok_base);
2156 entry_size[1] = sizeof(*csb_ktoa_base);
2157 csb_start[0] = (void *)csb_atok_base;
2158 csb_start[1] = (void *)csb_ktoa_base;
2160 for (i = 0; i < 2; i++) {
2161 /* On Linux we could use access_ok() to simplify
2162 * the validation. However, the advantage of
2163 * this approach is that it works also on
2165 size_t csb_size = tot_rings * entry_size[i];
2169 if ((uintptr_t)csb_start[i] & (entry_size[i]-1)) {
2170 nm_prerr("Unaligned CSB address");
2174 tmp = nm_os_malloc(csb_size);
2178 /* Application --> kernel direction. */
2179 err = copyin(csb_start[i], tmp, csb_size);
2181 /* Kernel --> application direction. */
2182 memset(tmp, 0, csb_size);
2183 err = copyout(tmp, csb_start[i], csb_size);
2187 nm_prerr("Invalid CSB address");
2192 priv->np_csb_atok_base = csb_atok_base;
2193 priv->np_csb_ktoa_base = csb_ktoa_base;
2195 /* Initialize the CSB. */
2197 for (i = 0; i < num_rings[t]; i++) {
2198 struct netmap_kring *kring =
2199 NMR(priv->np_na, t)[i + priv->np_qfirst[t]];
2200 struct nm_csb_atok *csb_atok = csb_atok_base + i;
2201 struct nm_csb_ktoa *csb_ktoa = csb_ktoa_base + i;
2204 csb_atok += num_rings[NR_TX];
2205 csb_ktoa += num_rings[NR_TX];
2208 CSB_WRITE(csb_atok, head, kring->rhead);
2209 CSB_WRITE(csb_atok, cur, kring->rcur);
2210 CSB_WRITE(csb_atok, appl_need_kick, 1);
2211 CSB_WRITE(csb_atok, sync_flags, 1);
2212 CSB_WRITE(csb_ktoa, hwcur, kring->nr_hwcur);
2213 CSB_WRITE(csb_ktoa, hwtail, kring->nr_hwtail);
2214 CSB_WRITE(csb_ktoa, kern_need_kick, 1);
2216 nm_prinf("csb_init for kring %s: head %u, cur %u, "
2217 "hwcur %u, hwtail %u", kring->name,
2218 kring->rhead, kring->rcur, kring->nr_hwcur,
2226 /* Ensure that the netmap adapter can support the given MTU.
2227 * @return EINVAL if the na cannot be set to mtu, 0 otherwise.
2230 netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu) {
2231 unsigned nbs = NETMAP_BUF_SIZE(na);
2233 if (mtu <= na->rx_buf_maxsize) {
2234 /* The MTU fits a single NIC slot. We only
2235 * Need to check that netmap buffers are
2236 * large enough to hold an MTU. NS_MOREFRAG
2237 * cannot be used in this case. */
2239 nm_prerr("error: netmap buf size (%u) "
2240 "< device MTU (%u)", nbs, mtu);
2244 /* More NIC slots may be needed to receive
2245 * or transmit a single packet. Check that
2246 * the adapter supports NS_MOREFRAG and that
2247 * netmap buffers are large enough to hold
2248 * the maximum per-slot size. */
2249 if (!(na->na_flags & NAF_MOREFRAG)) {
2250 nm_prerr("error: large MTU (%d) needed "
2251 "but %s does not support "
2255 } else if (nbs < na->rx_buf_maxsize) {
2256 nm_prerr("error: using NS_MOREFRAG on "
2257 "%s requires netmap buf size "
2258 ">= %u", na->ifp->if_xname,
2259 na->rx_buf_maxsize);
2262 nm_prinf("info: netmap application on "
2263 "%s needs to support "
2265 "(MTU=%u,netmap_buf_size=%u)",
2266 na->ifp->if_xname, mtu, nbs);
2272 /* Handle the offset option, if present in the hdr.
2273 * Returns 0 on success, or an error.
2276 netmap_offsets_init(struct netmap_priv_d *priv, struct nmreq_header *hdr)
2278 struct nmreq_opt_offsets *opt;
2279 struct netmap_adapter *na = priv->np_na;
2280 struct netmap_kring *kring;
2281 uint64_t mask = 0, bits = 0, maxbits = sizeof(uint64_t) * 8,
2282 max_offset = 0, initial_offset = 0, min_gap = 0;
2287 opt = (struct nmreq_opt_offsets *)
2288 nmreq_getoption(hdr, NETMAP_REQ_OPT_OFFSETS);
2292 if (!(na->na_flags & NAF_OFFSETS)) {
2294 nm_prerr("%s does not support offsets",
2300 /* check sanity of the opt values */
2301 max_offset = opt->nro_max_offset;
2302 min_gap = opt->nro_min_gap;
2303 initial_offset = opt->nro_initial_offset;
2304 bits = opt->nro_offset_bits;
2306 if (bits > maxbits) {
2308 nm_prerr("bits: %llu too large (max %llu)",
2309 (unsigned long long)bits,
2310 (unsigned long long)maxbits);
2314 /* we take bits == 0 as a request to use the entire field */
2315 if (bits == 0 || bits == maxbits) {
2316 /* shifting a type by sizeof(type) is undefined */
2318 mask = 0xffffffffffffffff;
2320 mask = (1ULL << bits) - 1;
2322 if (max_offset > NETMAP_BUF_SIZE(na)) {
2324 nm_prerr("max offset %llu > buf size %u",
2325 (unsigned long long)max_offset, NETMAP_BUF_SIZE(na));
2329 if ((max_offset & mask) != max_offset) {
2331 nm_prerr("max offset %llu to large for %llu bits",
2332 (unsigned long long)max_offset,
2333 (unsigned long long)bits);
2337 if (initial_offset > max_offset) {
2339 nm_prerr("initial offset %llu > max offset %llu",
2340 (unsigned long long)initial_offset,
2341 (unsigned long long)max_offset);
2346 /* initialize the kring and ring fields. */
2347 foreach_selected_ring(priv, t, i, kring) {
2348 struct netmap_kring *kring = NMR(na, t)[i];
2349 struct netmap_ring *ring = kring->ring;
2352 /* it the ring is already in use we check that the
2353 * new request is compatible with the existing one
2355 if (kring->offset_mask) {
2356 if ((kring->offset_mask & mask) != mask ||
2357 kring->offset_max < max_offset) {
2359 nm_prinf("%s: cannot increase"
2360 "offset mask and/or max"
2361 "(current: mask=%llx,max=%llu",
2363 (unsigned long long)kring->offset_mask,
2364 (unsigned long long)kring->offset_max);
2368 mask = kring->offset_mask;
2369 max_offset = kring->offset_max;
2371 kring->offset_mask = mask;
2372 *(uint64_t *)(uintptr_t)&ring->offset_mask = mask;
2373 kring->offset_max = max_offset;
2374 kring->offset_gap = min_gap;
2377 /* if there is an initial offset, put it into
2380 * Note: we cannot change the offsets if the
2381 * ring is already in use.
2383 if (!initial_offset || kring->users > 1)
2386 for (j = 0; j < kring->nkr_num_slots; j++) {
2387 struct netmap_slot *slot = ring->slot + j;
2389 nm_write_offset(kring, slot, initial_offset);
2394 opt->nro_opt.nro_status = error;
2396 opt->nro_max_offset = max_offset;
2403 /* set the hardware buffer length in each one of the newly opened rings
2404 * (hwbuf_len field in the kring struct). The purpose it to select
2405 * the maximum supported input buffer lenght that will not cause writes
2406 * outside of the available space, even when offsets are in use.
2409 netmap_compute_buf_len(struct netmap_priv_d *priv)
2413 struct netmap_kring *kring;
2416 struct netmap_adapter *na = priv->np_na;
2419 foreach_selected_ring(priv, t, i, kring) {
2420 /* rings that are already active have their hwbuf_len
2421 * already set and we cannot change it.
2423 if (kring->users > 1)
2426 /* For netmap buffers which are not shared among several ring
2427 * slots (the normal case), the available space is the buf size
2428 * minus the max offset declared by the user at open time. If
2429 * the user plans to have several slots pointing to different
2430 * offsets into the same large buffer, she must also declare a
2431 * "minimum gap" between two such consecutive offsets. In this
2432 * case the user-declared 'offset_gap' is taken as the
2433 * available space and offset_max is ignored.
2436 /* start with the normal case (unshared buffers) */
2437 target = NETMAP_BUF_SIZE(kring->na) -
2439 /* if offset_gap is zero, the user does not intend to use
2440 * shared buffers. In this case the minimum gap between
2441 * two consective offsets into the same buffer can be
2442 * assumed to be equal to the buffer size. In this way
2443 * offset_gap always contains the available space ignoring
2444 * offset_max. This may be used by drivers of NICs that
2445 * are guaranteed to never write more than MTU bytes, even
2446 * if the input buffer is larger: if the MTU is less
2447 * than the target they can set hwbuf_len to offset_gap.
2449 if (!kring->offset_gap)
2451 NETMAP_BUF_SIZE(kring->na);
2453 if (kring->offset_gap < target)
2454 target = kring->offset_gap;
2455 error = kring->nm_bufcfg(kring, target);
2459 *(uint64_t *)(uintptr_t)&kring->ring->buf_align = kring->buf_align;
2461 if (mtu && t == NR_RX && kring->hwbuf_len < mtu) {
2462 if (!(na->na_flags & NAF_MOREFRAG)) {
2463 nm_prerr("error: large MTU (%d) needed "
2464 "but %s does not support "
2470 nm_prinf("info: netmap application on "
2471 "%s needs to support "
2473 "(MTU=%u,buf_size=%llu)",
2475 (unsigned long long)kring->hwbuf_len);
2484 * possibly move the interface to netmap-mode.
2485 * If success it returns a pointer to netmap_if, otherwise NULL.
2486 * This must be called with NMG_LOCK held.
2488 * The following na callbacks are called in the process:
2490 * na->nm_config() [by netmap_update_config]
2491 * (get current number and size of rings)
2493 * We have a generic one for linux (netmap_linux_config).
2494 * The bwrap has to override this, since it has to forward
2495 * the request to the wrapped adapter (netmap_bwrap_config).
2498 * na->nm_krings_create()
2499 * (create and init the krings array)
2501 * One of the following:
2503 * * netmap_hw_krings_create, (hw ports)
2504 * creates the standard layout for the krings
2505 * and adds the mbq (used for the host rings).
2507 * * netmap_vp_krings_create (VALE ports)
2508 * add leases and scratchpads
2510 * * netmap_pipe_krings_create (pipes)
2511 * create the krings and rings of both ends and
2514 * * netmap_monitor_krings_create (monitors)
2515 * avoid allocating the mbq
2517 * * netmap_bwrap_krings_create (bwraps)
2518 * create both the brap krings array,
2519 * the krings array of the wrapped adapter, and
2520 * (if needed) the fake array for the host adapter
2522 * na->nm_register(, 1)
2523 * (put the adapter in netmap mode)
2525 * This may be one of the following:
2527 * * netmap_hw_reg (hw ports)
2528 * checks that the ifp is still there, then calls
2529 * the hardware specific callback;
2531 * * netmap_vp_reg (VALE ports)
2532 * If the port is connected to a bridge,
2533 * set the NAF_NETMAP_ON flag under the
2534 * bridge write lock.
2536 * * netmap_pipe_reg (pipes)
2537 * inform the other pipe end that it is no
2538 * longer responsible for the lifetime of this
2541 * * netmap_monitor_reg (monitors)
2542 * intercept the sync callbacks of the monitored
2545 * * netmap_bwrap_reg (bwraps)
2546 * cross-link the bwrap and hwna rings,
2547 * forward the request to the hwna, override
2548 * the hwna notify callback (to get the frames
2549 * coming from outside go through the bridge).
2554 netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
2555 struct nmreq_header *hdr)
2557 struct netmap_if *nifp = NULL;
2561 priv->np_na = na; /* store the reference */
2562 error = netmap_mem_finalize(na->nm_mem, na);
2566 if (na->active_fds == 0) {
2568 /* cache the allocator info in the na */
2569 error = netmap_mem_get_lut(na->nm_mem, &na->na_lut);
2572 nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal,
2573 na->na_lut.objsize);
2575 /* ring configuration may have changed, fetch from the card */
2576 netmap_update_config(na);
2579 /* compute the range of tx and rx rings to monitor */
2580 error = netmap_set_ringid(priv, hdr);
2584 if (na->active_fds == 0) {
2586 * If this is the first registration of the adapter,
2587 * perform sanity checks and create the in-kernel view
2588 * of the netmap rings (the netmap krings).
2590 if (na->ifp && nm_priv_rx_enabled(priv)) {
2591 /* This netmap adapter is attached to an ifnet. */
2592 unsigned mtu = nm_os_ifnet_mtu(na->ifp);
2594 nm_prdis("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d",
2595 na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na));
2597 if (na->rx_buf_maxsize == 0) {
2598 nm_prerr("%s: error: rx_buf_maxsize == 0", na->name);
2603 error = netmap_buf_size_validate(na, mtu);
2609 * Depending on the adapter, this may also create
2610 * the netmap rings themselves
2612 error = na->nm_krings_create(na);
2618 /* now the krings must exist and we can check whether some
2619 * previous bind has exclusive ownership on them, and set
2622 error = netmap_krings_get(priv);
2624 goto err_del_krings;
2626 /* create all needed missing netmap rings */
2627 error = netmap_mem_rings_create(na);
2631 /* initialize offsets if requested */
2632 error = netmap_offsets_init(priv, hdr);
2636 /* compute and validate the buf lengths */
2637 error = netmap_compute_buf_len(priv);
2641 /* in all cases, create a new netmap if */
2642 nifp = netmap_mem_if_new(na, priv);
2648 if (nm_kring_pending(priv)) {
2649 /* Some kring is switching mode, tell the adapter to
2651 netmap_set_all_rings(na, NM_KR_LOCKED);
2652 error = na->nm_register(na, 1);
2653 netmap_set_all_rings(na, 0);
2658 /* Commit the reference. */
2662 * advertise that the interface is ready by setting np_nifp.
2663 * The barrier is needed because readers (poll, *SYNC and mmap)
2664 * check for priv->np_nifp != NULL without locking
2666 mb(); /* make sure previous writes are visible to all CPUs */
2667 priv->np_nifp = nifp;
2672 netmap_mem_if_delete(na, nifp);
2674 netmap_krings_put(priv);
2675 netmap_mem_rings_delete(na);
2677 if (na->active_fds == 0)
2678 na->nm_krings_delete(na);
2680 if (na->active_fds == 0)
2681 memset(&na->na_lut, 0, sizeof(na->na_lut));
2683 netmap_mem_drop(na);
2691 * update kring and ring at the end of rxsync/txsync.
2694 nm_sync_finalize(struct netmap_kring *kring)
2697 * Update ring tail to what the kernel knows
2698 * After txsync: head/rhead/hwcur might be behind cur/rcur
2701 kring->ring->tail = kring->rtail = kring->nr_hwtail;
2703 nm_prdis(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
2704 kring->name, kring->nr_hwcur, kring->nr_hwtail,
2705 kring->rhead, kring->rcur, kring->rtail);
2708 /* set ring timestamp */
2710 ring_timestamp_set(struct netmap_ring *ring)
2712 if (netmap_no_timestamp == 0 || ring->flags & NR_TIMESTAMP) {
2713 microtime(&ring->ts);
2717 static int nmreq_copyin(struct nmreq_header *, int);
2718 static int nmreq_copyout(struct nmreq_header *, int);
2719 static int nmreq_checkoptions(struct nmreq_header *);
2722 * ioctl(2) support for the "netmap" device.
2724 * Following a list of accepted commands:
2725 * - NIOCCTRL device control API
2726 * - NIOCTXSYNC sync TX rings
2727 * - NIOCRXSYNC sync RX rings
2728 * - SIOCGIFADDR just for convenience
2729 * - NIOCGINFO deprecated (legacy API)
2730 * - NIOCREGIF deprecated (legacy API)
2732 * Return 0 on success, errno otherwise.
2735 netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
2736 struct thread *td, int nr_body_is_user)
2738 struct mbq q; /* packets from RX hw queues to host stack */
2739 struct netmap_adapter *na = NULL;
2740 struct netmap_mem_d *nmd = NULL;
2741 struct ifnet *ifp = NULL;
2743 u_int i, qfirst, qlast;
2744 struct netmap_kring **krings;
2750 struct nmreq_header *hdr = (struct nmreq_header *)data;
2752 if (hdr->nr_version < NETMAP_MIN_API ||
2753 hdr->nr_version > NETMAP_MAX_API) {
2754 nm_prerr("API mismatch: got %d need %d",
2755 hdr->nr_version, NETMAP_API);
2759 /* Make a kernel-space copy of the user-space nr_body.
2760 * For convenience, the nr_body pointer and the pointers
2761 * in the options list will be replaced with their
2762 * kernel-space counterparts. The original pointers are
2763 * saved internally and later restored by nmreq_copyout
2765 error = nmreq_copyin(hdr, nr_body_is_user);
2770 /* Sanitize hdr->nr_name. */
2771 hdr->nr_name[sizeof(hdr->nr_name) - 1] = '\0';
2773 switch (hdr->nr_reqtype) {
2774 case NETMAP_REQ_REGISTER: {
2775 struct nmreq_register *req =
2776 (struct nmreq_register *)(uintptr_t)hdr->nr_body;
2777 struct netmap_if *nifp;
2779 /* Protect access to priv from concurrent requests. */
2782 struct nmreq_option *opt;
2785 if (priv->np_nifp != NULL) { /* thread already registered */
2791 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_EXTMEM);
2793 struct nmreq_opt_extmem *e =
2794 (struct nmreq_opt_extmem *)opt;
2796 nmd = netmap_mem_ext_create(e->nro_usrptr,
2797 &e->nro_info, &error);
2798 opt->nro_status = error;
2802 #endif /* WITH_EXTMEM */
2804 if (nmd == NULL && req->nr_mem_id) {
2805 /* find the allocator and get a reference */
2806 nmd = netmap_mem_find(req->nr_mem_id);
2808 if (netmap_verbose) {
2809 nm_prerr("%s: failed to find mem_id %u",
2810 hdr->nr_name, req->nr_mem_id);
2816 /* find the interface and a reference */
2817 error = netmap_get_na(hdr, &na, &ifp, nmd,
2818 1 /* create */); /* keep reference */
2821 if (NETMAP_OWNED_BY_KERN(na)) {
2826 if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) {
2827 nm_prerr("virt_hdr_len=%d, but application does "
2828 "not accept it", na->virt_hdr_len);
2833 error = netmap_do_regif(priv, na, hdr);
2834 if (error) { /* reg. failed, release priv and ref */
2838 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2840 struct nmreq_opt_csb *csbo =
2841 (struct nmreq_opt_csb *)opt;
2842 error = netmap_csb_validate(priv, csbo);
2843 opt->nro_status = error;
2845 netmap_do_unregif(priv);
2850 nifp = priv->np_nifp;
2852 /* return the offset of the netmap_if object */
2853 req->nr_rx_rings = na->num_rx_rings;
2854 req->nr_tx_rings = na->num_tx_rings;
2855 req->nr_rx_slots = na->num_rx_desc;
2856 req->nr_tx_slots = na->num_tx_desc;
2857 req->nr_host_tx_rings = na->num_host_tx_rings;
2858 req->nr_host_rx_rings = na->num_host_rx_rings;
2859 error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags,
2862 netmap_do_unregif(priv);
2865 if (memflags & NETMAP_MEM_PRIVATE) {
2866 *(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM;
2869 priv->np_si[t] = nm_si_user(priv, t) ?
2870 &na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si;
2873 if (req->nr_extra_bufs) {
2875 nm_prinf("requested %d extra buffers",
2876 req->nr_extra_bufs);
2877 req->nr_extra_bufs = netmap_extra_alloc(na,
2878 &nifp->ni_bufs_head, req->nr_extra_bufs);
2880 nm_prinf("got %d extra buffers", req->nr_extra_bufs);
2882 nifp->ni_bufs_head = 0;
2884 req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp);
2886 error = nmreq_checkoptions(hdr);
2888 netmap_do_unregif(priv);
2892 /* store ifp reference so that priv destructor may release it */
2896 netmap_unget_na(na, ifp);
2898 /* release the reference from netmap_mem_find() or
2899 * netmap_mem_ext_create()
2902 netmap_mem_put(nmd);
2907 case NETMAP_REQ_PORT_INFO_GET: {
2908 struct nmreq_port_info_get *req =
2909 (struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
2916 if (hdr->nr_name[0] != '\0') {
2917 /* Build a nmreq_register out of the nmreq_port_info_get,
2918 * so that we can call netmap_get_na(). */
2919 struct nmreq_register regreq;
2920 bzero(®req, sizeof(regreq));
2921 regreq.nr_mode = NR_REG_ALL_NIC;
2922 regreq.nr_tx_slots = req->nr_tx_slots;
2923 regreq.nr_rx_slots = req->nr_rx_slots;
2924 regreq.nr_tx_rings = req->nr_tx_rings;
2925 regreq.nr_rx_rings = req->nr_rx_rings;
2926 regreq.nr_host_tx_rings = req->nr_host_tx_rings;
2927 regreq.nr_host_rx_rings = req->nr_host_rx_rings;
2928 regreq.nr_mem_id = req->nr_mem_id;
2930 /* get a refcount */
2931 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2932 hdr->nr_body = (uintptr_t)®req;
2933 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2934 hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */
2935 hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2941 nmd = na->nm_mem; /* get memory allocator */
2943 nmd = netmap_mem_find(req->nr_mem_id ? req->nr_mem_id : 1);
2946 nm_prerr("%s: failed to find mem_id %u",
2948 req->nr_mem_id ? req->nr_mem_id : 1);
2955 error = netmap_mem_get_info(nmd, &req->nr_memsize, &memflags,
2959 if (na == NULL) /* only memory info */
2961 netmap_update_config(na);
2962 req->nr_rx_rings = na->num_rx_rings;
2963 req->nr_tx_rings = na->num_tx_rings;
2964 req->nr_rx_slots = na->num_rx_desc;
2965 req->nr_tx_slots = na->num_tx_desc;
2966 req->nr_host_tx_rings = na->num_host_tx_rings;
2967 req->nr_host_rx_rings = na->num_host_rx_rings;
2969 netmap_unget_na(na, ifp);
2971 netmap_mem_put(nmd);
2976 case NETMAP_REQ_VALE_ATTACH: {
2977 error = netmap_bdg_attach(hdr, NULL /* userspace request */);
2981 case NETMAP_REQ_VALE_DETACH: {
2982 error = netmap_bdg_detach(hdr, NULL /* userspace request */);
2986 case NETMAP_REQ_PORT_HDR_SET: {
2987 struct nmreq_port_hdr *req =
2988 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2989 /* Build a nmreq_register out of the nmreq_port_hdr,
2990 * so that we can call netmap_get_bdg_na(). */
2991 struct nmreq_register regreq;
2992 bzero(®req, sizeof(regreq));
2993 regreq.nr_mode = NR_REG_ALL_NIC;
2995 /* For now we only support virtio-net headers, and only for
2996 * VALE ports, but this may change in future. Valid lengths
2997 * for the virtio-net header are 0 (no header), 10 and 12. */
2998 if (req->nr_hdr_len != 0 &&
2999 req->nr_hdr_len != sizeof(struct nm_vnet_hdr) &&
3000 req->nr_hdr_len != 12) {
3002 nm_prerr("invalid hdr_len %u", req->nr_hdr_len);
3007 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
3008 hdr->nr_body = (uintptr_t)®req;
3009 error = netmap_get_vale_na(hdr, &na, NULL, 0);
3010 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET;
3011 hdr->nr_body = (uintptr_t)req;
3013 struct netmap_vp_adapter *vpna =
3014 (struct netmap_vp_adapter *)na;
3015 na->virt_hdr_len = req->nr_hdr_len;
3016 if (na->virt_hdr_len) {
3017 vpna->mfs = NETMAP_BUF_SIZE(na);
3020 nm_prinf("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na);
3021 netmap_adapter_put(na);
3029 case NETMAP_REQ_PORT_HDR_GET: {
3030 /* Get vnet-header length for this netmap port */
3031 struct nmreq_port_hdr *req =
3032 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
3033 /* Build a nmreq_register out of the nmreq_port_hdr,
3034 * so that we can call netmap_get_bdg_na(). */
3035 struct nmreq_register regreq;
3038 bzero(®req, sizeof(regreq));
3039 regreq.nr_mode = NR_REG_ALL_NIC;
3041 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
3042 hdr->nr_body = (uintptr_t)®req;
3043 error = netmap_get_na(hdr, &na, &ifp, NULL, 0);
3044 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET;
3045 hdr->nr_body = (uintptr_t)req;
3047 req->nr_hdr_len = na->virt_hdr_len;
3049 netmap_unget_na(na, ifp);
3054 case NETMAP_REQ_VALE_LIST: {
3055 error = netmap_vale_list(hdr);
3059 case NETMAP_REQ_VALE_NEWIF: {
3060 error = nm_vi_create(hdr);
3064 case NETMAP_REQ_VALE_DELIF: {
3065 error = nm_vi_destroy(hdr->nr_name);
3068 #endif /* WITH_VALE */
3070 case NETMAP_REQ_VALE_POLLING_ENABLE:
3071 case NETMAP_REQ_VALE_POLLING_DISABLE: {
3072 error = nm_bdg_polling(hdr);
3075 case NETMAP_REQ_POOLS_INFO_GET: {
3076 /* Get information from the memory allocator used for
3078 struct nmreq_pools_info *req =
3079 (struct nmreq_pools_info *)(uintptr_t)hdr->nr_body;
3082 /* Build a nmreq_register out of the nmreq_pools_info,
3083 * so that we can call netmap_get_na(). */
3084 struct nmreq_register regreq;
3085 bzero(®req, sizeof(regreq));
3086 regreq.nr_mem_id = req->nr_mem_id;
3087 regreq.nr_mode = NR_REG_ALL_NIC;
3089 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
3090 hdr->nr_body = (uintptr_t)®req;
3091 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
3092 hdr->nr_reqtype = NETMAP_REQ_POOLS_INFO_GET; /* reset type */
3093 hdr->nr_body = (uintptr_t)req; /* reset nr_body */
3099 nmd = na->nm_mem; /* grab the memory allocator */
3105 /* Finalize the memory allocator, get the pools
3106 * information and release the allocator. */
3107 error = netmap_mem_finalize(nmd, na);
3111 error = netmap_mem_pools_info_get(req, nmd);
3112 netmap_mem_drop(na);
3114 netmap_unget_na(na, ifp);
3119 case NETMAP_REQ_CSB_ENABLE: {
3120 struct nmreq_option *opt;
3122 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
3126 struct nmreq_opt_csb *csbo =
3127 (struct nmreq_opt_csb *)opt;
3129 error = netmap_csb_validate(priv, csbo);
3131 opt->nro_status = error;
3136 case NETMAP_REQ_SYNC_KLOOP_START: {
3137 error = netmap_sync_kloop(priv, hdr);
3141 case NETMAP_REQ_SYNC_KLOOP_STOP: {
3142 error = netmap_sync_kloop_stop(priv);
3151 /* Write back request body to userspace and reset the
3152 * user-space pointer. */
3153 error = nmreq_copyout(hdr, error);
3159 if (unlikely(priv->np_nifp == NULL)) {
3163 mb(); /* make sure following reads are not from cache */
3165 if (unlikely(priv->np_csb_atok_base)) {
3166 nm_prerr("Invalid sync in CSB mode");
3171 na = priv->np_na; /* we have a reference */
3174 t = (cmd == NIOCTXSYNC ? NR_TX : NR_RX);
3175 krings = NMR(na, t);
3176 qfirst = priv->np_qfirst[t];
3177 qlast = priv->np_qlast[t];
3178 sync_flags = priv->np_sync_flags;
3180 for (i = qfirst; i < qlast; i++) {
3181 struct netmap_kring *kring = krings[i];
3182 struct netmap_ring *ring = kring->ring;
3184 if (unlikely(nm_kr_tryget(kring, 1, &error))) {
3185 error = (error ? EIO : 0);
3189 if (cmd == NIOCTXSYNC) {
3190 if (netmap_debug & NM_DEBUG_TXSYNC)
3191 nm_prinf("pre txsync ring %d cur %d hwcur %d",
3194 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3195 netmap_ring_reinit(kring);
3196 } else if (kring->nm_sync(kring, sync_flags | NAF_FORCE_RECLAIM) == 0) {
3197 nm_sync_finalize(kring);
3199 if (netmap_debug & NM_DEBUG_TXSYNC)
3200 nm_prinf("post txsync ring %d cur %d hwcur %d",
3204 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3205 netmap_ring_reinit(kring);
3207 if (nm_may_forward_up(kring)) {
3208 /* transparent forwarding, see netmap_poll() */
3209 netmap_grab_packets(kring, &q, netmap_fwd);
3211 if (kring->nm_sync(kring, sync_flags | NAF_FORCE_READ) == 0) {
3212 nm_sync_finalize(kring);
3214 ring_timestamp_set(ring);
3220 netmap_send_up(na->ifp, &q);
3227 return netmap_ioctl_legacy(priv, cmd, data, td);
3236 nmreq_size_by_type(uint16_t nr_reqtype)
3238 switch (nr_reqtype) {
3239 case NETMAP_REQ_REGISTER:
3240 return sizeof(struct nmreq_register);
3241 case NETMAP_REQ_PORT_INFO_GET:
3242 return sizeof(struct nmreq_port_info_get);
3243 case NETMAP_REQ_VALE_ATTACH:
3244 return sizeof(struct nmreq_vale_attach);
3245 case NETMAP_REQ_VALE_DETACH:
3246 return sizeof(struct nmreq_vale_detach);
3247 case NETMAP_REQ_VALE_LIST:
3248 return sizeof(struct nmreq_vale_list);
3249 case NETMAP_REQ_PORT_HDR_SET:
3250 case NETMAP_REQ_PORT_HDR_GET:
3251 return sizeof(struct nmreq_port_hdr);
3252 case NETMAP_REQ_VALE_NEWIF:
3253 return sizeof(struct nmreq_vale_newif);
3254 case NETMAP_REQ_VALE_DELIF:
3255 case NETMAP_REQ_SYNC_KLOOP_STOP:
3256 case NETMAP_REQ_CSB_ENABLE:
3258 case NETMAP_REQ_VALE_POLLING_ENABLE:
3259 case NETMAP_REQ_VALE_POLLING_DISABLE:
3260 return sizeof(struct nmreq_vale_polling);
3261 case NETMAP_REQ_POOLS_INFO_GET:
3262 return sizeof(struct nmreq_pools_info);
3263 case NETMAP_REQ_SYNC_KLOOP_START:
3264 return sizeof(struct nmreq_sync_kloop_start);
3270 nmreq_opt_size_by_type(uint32_t nro_reqtype, uint64_t nro_size)
3272 size_t rv = sizeof(struct nmreq_option);
3273 #ifdef NETMAP_REQ_OPT_DEBUG
3274 if (nro_reqtype & NETMAP_REQ_OPT_DEBUG)
3275 return (nro_reqtype & ~NETMAP_REQ_OPT_DEBUG);
3276 #endif /* NETMAP_REQ_OPT_DEBUG */
3277 switch (nro_reqtype) {
3279 case NETMAP_REQ_OPT_EXTMEM:
3280 rv = sizeof(struct nmreq_opt_extmem);
3282 #endif /* WITH_EXTMEM */
3283 case NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS:
3287 case NETMAP_REQ_OPT_CSB:
3288 rv = sizeof(struct nmreq_opt_csb);
3290 case NETMAP_REQ_OPT_SYNC_KLOOP_MODE:
3291 rv = sizeof(struct nmreq_opt_sync_kloop_mode);
3293 case NETMAP_REQ_OPT_OFFSETS:
3294 rv = sizeof(struct nmreq_opt_offsets);
3297 /* subtract the common header */
3298 return rv - sizeof(struct nmreq_option);
3302 * nmreq_copyin: create an in-kernel version of the request.
3304 * We build the following data structure:
3306 * hdr -> +-------+ buf
3307 * | | +---------------+
3308 * +-------+ |usr body ptr |
3309 * |options|-. +---------------+
3310 * +-------+ | |usr options ptr|
3311 * |body |--------->+---------------+
3313 * | | copy of body |
3315 * | +---------------+
3317 * | +---------------+
3319 * | | +---------------+ |
3321 * | | | +---------------+ \ option table
3322 * | | | | ... | / indexed by option
3323 * | | | +---------------+ | type
3325 * | | | +---------------+/
3326 * | | | |usr next ptr 1 |
3327 * `-|----->+---------------+
3328 * | | | copy of opt 1 |
3330 * | | .-| nro_next |
3331 * | | | +---------------+
3332 * | | | |usr next ptr 2 |
3333 * | `-`>+---------------+
3334 * | | copy of opt 2 |
3337 * | | +---------------+
3341 * `----->+---------------+
3342 * | |usr next ptr n |
3343 * `>+---------------+
3349 * The options and body fields of the hdr structure are overwritten
3350 * with in-kernel valid pointers inside the buf. The original user
3351 * pointers are saved in the buf and restored on copyout.
3352 * The list of options is copied and the pointers adjusted. The
3353 * original pointers are saved before the option they belonged.
3355 * The option table has an entry for every available option. Entries
3356 * for options that have not been passed contain NULL.
3361 nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
3363 size_t rqsz, optsz, bufsz;
3365 char *ker = NULL, *p;
3366 struct nmreq_option **next, *src, **opt_tab;
3367 struct nmreq_option buf;
3370 if (hdr->nr_reserved) {
3372 nm_prerr("nr_reserved must be zero");
3376 if (!nr_body_is_user)
3379 hdr->nr_reserved = nr_body_is_user;
3381 /* compute the total size of the buffer */
3382 rqsz = nmreq_size_by_type(hdr->nr_reqtype);
3383 if (rqsz > NETMAP_REQ_MAXSIZE) {
3387 if ((rqsz && hdr->nr_body == (uintptr_t)NULL) ||
3388 (!rqsz && hdr->nr_body != (uintptr_t)NULL)) {
3389 /* Request body expected, but not found; or
3390 * request body found but unexpected. */
3392 nm_prerr("nr_body expected but not found, or vice versa");
3397 bufsz = 2 * sizeof(void *) + rqsz +
3398 NETMAP_REQ_OPT_MAX * sizeof(opt_tab);
3399 /* compute the size of the buf below the option table.
3400 * It must contain a copy of every received option structure.
3401 * For every option we also need to store a copy of the user
3405 for (src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; src;
3406 src = (struct nmreq_option *)(uintptr_t)buf.nro_next)
3408 error = copyin(src, &buf, sizeof(*src));
3411 optsz += sizeof(*src);
3412 optsz += nmreq_opt_size_by_type(buf.nro_reqtype, buf.nro_size);
3413 if (rqsz + optsz > NETMAP_REQ_MAXSIZE) {
3417 bufsz += sizeof(void *);
3421 ker = nm_os_malloc(bufsz);
3426 p = ker; /* write pointer into the buffer */
3428 /* make a copy of the user pointers */
3429 ptrs = (uint64_t*)p;
3430 *ptrs++ = hdr->nr_body;
3431 *ptrs++ = hdr->nr_options;
3435 error = copyin((void *)(uintptr_t)hdr->nr_body, p, rqsz);
3438 /* overwrite the user pointer with the in-kernel one */
3439 hdr->nr_body = (uintptr_t)p;
3441 /* start of the options table */
3442 opt_tab = (struct nmreq_option **)p;
3443 p += sizeof(opt_tab) * NETMAP_REQ_OPT_MAX;
3445 /* copy the options */
3446 next = (struct nmreq_option **)&hdr->nr_options;
3449 struct nmreq_option *opt;
3451 /* copy the option header */
3452 ptrs = (uint64_t *)p;
3453 opt = (struct nmreq_option *)(ptrs + 1);
3454 error = copyin(src, opt, sizeof(*src));
3457 /* make a copy of the user next pointer */
3458 *ptrs = opt->nro_next;
3459 /* overwrite the user pointer with the in-kernel one */
3462 /* initialize the option as not supported.
3463 * Recognized options will update this field.
3465 opt->nro_status = EOPNOTSUPP;
3467 /* check for invalid types */
3468 if (opt->nro_reqtype < 1) {
3470 nm_prinf("invalid option type: %u", opt->nro_reqtype);
3471 opt->nro_status = EINVAL;
3476 if (opt->nro_reqtype >= NETMAP_REQ_OPT_MAX) {
3477 /* opt->nro_status is already EOPNOTSUPP */
3482 /* if the type is valid, index the option in the table
3483 * unless it is a duplicate.
3485 if (opt_tab[opt->nro_reqtype] != NULL) {
3487 nm_prinf("duplicate option: %u", opt->nro_reqtype);
3488 opt->nro_status = EINVAL;
3489 opt_tab[opt->nro_reqtype]->nro_status = EINVAL;
3493 opt_tab[opt->nro_reqtype] = opt;
3495 p = (char *)(opt + 1);
3497 /* copy the option body */
3498 optsz = nmreq_opt_size_by_type(opt->nro_reqtype,
3501 /* the option body follows the option header */
3502 error = copyin(src + 1, p, optsz);
3509 /* move to next option */
3510 next = (struct nmreq_option **)&opt->nro_next;
3514 nmreq_copyout(hdr, error);
3518 ptrs = (uint64_t *)ker;
3519 hdr->nr_body = *ptrs++;
3520 hdr->nr_options = *ptrs++;
3521 hdr->nr_reserved = 0;
3528 nmreq_copyout(struct nmreq_header *hdr, int rerror)
3530 struct nmreq_option *src, *dst;
3531 void *ker = (void *)(uintptr_t)hdr->nr_body, *bufstart;
3536 if (!hdr->nr_reserved)
3539 /* restore the user pointers in the header */
3540 ptrs = (uint64_t *)ker - 2;
3542 hdr->nr_body = *ptrs++;
3543 src = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3544 hdr->nr_options = *ptrs;
3548 bodysz = nmreq_size_by_type(hdr->nr_reqtype);
3549 error = copyout(ker, (void *)(uintptr_t)hdr->nr_body, bodysz);
3556 /* copy the options */
3557 dst = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3562 /* restore the user pointer */
3563 next = src->nro_next;
3564 ptrs = (uint64_t *)src - 1;
3565 src->nro_next = *ptrs;
3567 /* always copy the option header */
3568 error = copyout(src, dst, sizeof(*src));
3574 /* copy the option body only if there was no error */
3575 if (!rerror && !src->nro_status) {
3576 optsz = nmreq_opt_size_by_type(src->nro_reqtype,
3579 error = copyout(src + 1, dst + 1, optsz);
3586 src = (struct nmreq_option *)(uintptr_t)next;
3587 dst = (struct nmreq_option *)(uintptr_t)*ptrs;
3592 hdr->nr_reserved = 0;
3593 nm_os_free(bufstart);
3597 struct nmreq_option *
3598 nmreq_getoption(struct nmreq_header *hdr, uint16_t reqtype)
3600 struct nmreq_option **opt_tab;
3602 if (!hdr->nr_options)
3605 opt_tab = (struct nmreq_option **)((uintptr_t)hdr->nr_options) -
3606 (NETMAP_REQ_OPT_MAX + 1);
3607 return opt_tab[reqtype];
3611 nmreq_checkoptions(struct nmreq_header *hdr)
3613 struct nmreq_option *opt;
3614 /* return error if there is still any option
3615 * marked as not supported
3618 for (opt = (struct nmreq_option *)(uintptr_t)hdr->nr_options; opt;
3619 opt = (struct nmreq_option *)(uintptr_t)opt->nro_next)
3620 if (opt->nro_status == EOPNOTSUPP)
3627 * select(2) and poll(2) handlers for the "netmap" device.
3629 * Can be called for one or more queues.
3630 * Return true the event mask corresponding to ready events.
3631 * If there are no ready events (and 'sr' is not NULL), do a
3632 * selrecord on either individual selinfo or on the global one.
3633 * Device-dependent parts (locking and sync of tx/rx rings)
3634 * are done through callbacks.
3636 * On linux, arguments are really pwait, the poll table, and 'td' is struct file *
3637 * The first one is remapped to pwait as selrecord() uses the name as an
3641 netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr)
3643 struct netmap_adapter *na;
3644 struct netmap_kring *kring;
3645 struct netmap_ring *ring;
3646 u_int i, want[NR_TXRX], revents = 0;
3647 NM_SELINFO_T *si[NR_TXRX];
3648 #define want_tx want[NR_TX]
3649 #define want_rx want[NR_RX]
3650 struct mbq q; /* packets from RX hw queues to host stack */
3653 * In order to avoid nested locks, we need to "double check"
3654 * txsync and rxsync if we decide to do a selrecord().
3655 * retry_tx (and retry_rx, later) prevent looping forever.
3657 int retry_tx = 1, retry_rx = 1;
3659 /* Transparent mode: send_down is 1 if we have found some
3660 * packets to forward (host RX ring --> NIC) during the rx
3661 * scan and we have not sent them down to the NIC yet.
3662 * Transparent mode requires to bind all rings to a single
3666 int sync_flags = priv->np_sync_flags;
3670 if (unlikely(priv->np_nifp == NULL)) {
3673 mb(); /* make sure following reads are not from cache */
3677 if (unlikely(!nm_netmap_on(na)))
3680 if (unlikely(priv->np_csb_atok_base)) {
3681 nm_prerr("Invalid poll in CSB mode");
3685 if (netmap_debug & NM_DEBUG_ON)
3686 nm_prinf("device %s events 0x%x", na->name, events);
3687 want_tx = events & (POLLOUT | POLLWRNORM);
3688 want_rx = events & (POLLIN | POLLRDNORM);
3691 * If the card has more than one queue AND the file descriptor is
3692 * bound to all of them, we sleep on the "global" selinfo, otherwise
3693 * we sleep on individual selinfo (FreeBSD only allows two selinfo's
3694 * per file descriptor).
3695 * The interrupt routine in the driver wake one or the other
3696 * (or both) depending on which clients are active.
3698 * rxsync() is only called if we run out of buffers on a POLLIN.
3699 * txsync() is called if we run out of buffers on POLLOUT, or
3700 * there are pending packets to send. The latter can be disabled
3701 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
3703 si[NR_RX] = priv->np_si[NR_RX];
3704 si[NR_TX] = priv->np_si[NR_TX];
3708 * We start with a lock free round which is cheap if we have
3709 * slots available. If this fails, then lock and call the sync
3710 * routines. We can't do this on Linux, as the contract says
3711 * that we must call nm_os_selrecord() unconditionally.
3714 const enum txrx t = NR_TX;
3715 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3716 kring = NMR(na, t)[i];
3717 if (kring->ring->cur != kring->ring->tail) {
3718 /* Some unseen TX space is available, so what
3719 * we don't need to run txsync. */
3727 const enum txrx t = NR_RX;
3728 int rxsync_needed = 0;
3730 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3731 kring = NMR(na, t)[i];
3732 if (kring->ring->cur == kring->ring->tail
3733 || kring->rhead != kring->ring->head) {
3734 /* There are no unseen packets on this ring,
3735 * or there are some buffers to be returned
3736 * to the netmap port. We therefore go ahead
3737 * and run rxsync. */
3742 if (!rxsync_needed) {
3750 /* The selrecord must be unconditional on linux. */
3751 nm_os_selrecord(sr, si[NR_RX]);
3752 nm_os_selrecord(sr, si[NR_TX]);
3756 * If we want to push packets out (priv->np_txpoll) or
3757 * want_tx is still set, we must issue txsync calls
3758 * (on all rings, to avoid that the tx rings stall).
3759 * Fortunately, normal tx mode has np_txpoll set.
3761 if (priv->np_txpoll || want_tx) {
3763 * The first round checks if anyone is ready, if not
3764 * do a selrecord and another round to handle races.
3765 * want_tx goes to 0 if any space is found, and is
3766 * used to skip rings with no pending transmissions.
3769 for (i = priv->np_qfirst[NR_TX]; i < priv->np_qlast[NR_TX]; i++) {
3772 kring = na->tx_rings[i];
3776 * Don't try to txsync this TX ring if we already found some
3777 * space in some of the TX rings (want_tx == 0) and there are no
3778 * TX slots in this ring that need to be flushed to the NIC
3781 if (!send_down && !want_tx && ring->head == kring->nr_hwcur)
3784 if (nm_kr_tryget(kring, 1, &revents))
3787 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3788 netmap_ring_reinit(kring);
3791 if (kring->nm_sync(kring, sync_flags))
3794 nm_sync_finalize(kring);
3798 * If we found new slots, notify potential
3799 * listeners on the same ring.
3800 * Since we just did a txsync, look at the copies
3801 * of cur,tail in the kring.
3803 found = kring->rcur != kring->rtail;
3805 if (found) { /* notify other listeners */
3809 kring->nm_notify(kring, 0);
3813 /* if there were any packet to forward we must have handled them by now */
3815 if (want_tx && retry_tx && sr) {
3817 nm_os_selrecord(sr, si[NR_TX]);
3825 * If want_rx is still set scan receive rings.
3826 * Do it on all rings because otherwise we starve.
3829 /* two rounds here for race avoidance */
3831 for (i = priv->np_qfirst[NR_RX]; i < priv->np_qlast[NR_RX]; i++) {
3834 kring = na->rx_rings[i];
3837 if (unlikely(nm_kr_tryget(kring, 1, &revents)))
3840 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3841 netmap_ring_reinit(kring);
3844 /* now we can use kring->rcur, rtail */
3847 * transparent mode support: collect packets from
3848 * hw rxring(s) that have been released by the user
3850 if (nm_may_forward_up(kring)) {
3851 netmap_grab_packets(kring, &q, netmap_fwd);
3854 /* Clear the NR_FORWARD flag anyway, it may be set by
3855 * the nm_sync() below only on for the host RX ring (see
3856 * netmap_rxsync_from_host()). */
3857 kring->nr_kflags &= ~NR_FORWARD;
3858 if (kring->nm_sync(kring, sync_flags))
3861 nm_sync_finalize(kring);
3862 send_down |= (kring->nr_kflags & NR_FORWARD);
3863 ring_timestamp_set(ring);
3864 found = kring->rcur != kring->rtail;
3870 kring->nm_notify(kring, 0);
3876 if (retry_rx && sr) {
3877 nm_os_selrecord(sr, si[NR_RX]);
3880 if (send_down || retry_rx) {
3883 goto flush_tx; /* and retry_rx */
3890 * Transparent mode: released bufs (i.e. between kring->nr_hwcur and
3891 * ring->head) marked with NS_FORWARD on hw rx rings are passed up
3892 * to the host stack.
3896 netmap_send_up(na->ifp, &q);
3905 nma_intr_enable(struct netmap_adapter *na, int onoff)
3907 bool changed = false;
3912 for (i = 0; i < nma_get_nrings(na, t); i++) {
3913 struct netmap_kring *kring = NMR(na, t)[i];
3914 int on = !(kring->nr_kflags & NKR_NOINTR);
3916 if (!!onoff != !!on) {
3920 kring->nr_kflags &= ~NKR_NOINTR;
3922 kring->nr_kflags |= NKR_NOINTR;
3928 return 0; /* nothing to do */
3932 nm_prerr("Cannot %s interrupts for %s", onoff ? "enable" : "disable",
3937 na->nm_intr(na, onoff);
3943 /*-------------------- driver support routines -------------------*/
3945 /* default notify callback */
3947 netmap_notify(struct netmap_kring *kring, int flags)
3949 struct netmap_adapter *na = kring->notify_na;
3950 enum txrx t = kring->tx;
3952 nm_os_selwakeup(&kring->si);
3953 /* optimization: avoid a wake up on the global
3954 * queue if nobody has registered for more
3957 if (na->si_users[t] > 0)
3958 nm_os_selwakeup(&na->si[t]);
3960 return NM_IRQ_COMPLETED;
3963 /* called by all routines that create netmap_adapters.
3964 * provide some defaults and get a reference to the
3968 netmap_attach_common(struct netmap_adapter *na)
3970 if (!na->rx_buf_maxsize) {
3971 /* Set a conservative default (larger is safer). */
3972 na->rx_buf_maxsize = PAGE_SIZE;
3976 if (na->na_flags & NAF_HOST_RINGS && na->ifp) {
3977 na->if_input = na->ifp->if_input; /* for netmap_send_up */
3979 na->pdev = na; /* make sure netmap_mem_map() is called */
3980 #endif /* __FreeBSD__ */
3981 if (na->na_flags & NAF_HOST_RINGS) {
3982 if (na->num_host_rx_rings == 0)
3983 na->num_host_rx_rings = 1;
3984 if (na->num_host_tx_rings == 0)
3985 na->num_host_tx_rings = 1;
3987 if (na->nm_krings_create == NULL) {
3988 /* we assume that we have been called by a driver,
3989 * since other port types all provide their own
3992 na->nm_krings_create = netmap_hw_krings_create;
3993 na->nm_krings_delete = netmap_hw_krings_delete;
3995 if (na->nm_notify == NULL)
3996 na->nm_notify = netmap_notify;
3999 if (na->nm_mem == NULL) {
4000 /* use iommu or global allocator */
4001 na->nm_mem = netmap_mem_get_iommu(na);
4003 if (na->nm_bdg_attach == NULL)
4004 /* no special nm_bdg_attach callback. On VALE
4005 * attach, we need to interpose a bwrap
4007 na->nm_bdg_attach = netmap_default_bdg_attach;
4012 /* Wrapper for the register callback provided netmap-enabled
4014 * nm_iszombie(na) means that the driver module has been
4015 * unloaded, so we cannot call into it.
4016 * nm_os_ifnet_lock() must guarantee mutual exclusion with
4020 netmap_hw_reg(struct netmap_adapter *na, int onoff)
4022 struct netmap_hw_adapter *hwna =
4023 (struct netmap_hw_adapter*)na;
4028 if (nm_iszombie(na)) {
4031 } else if (na != NULL) {
4032 na->na_flags &= ~NAF_NETMAP_ON;
4037 error = hwna->nm_hw_register(na, onoff);
4040 nm_os_ifnet_unlock();
4046 netmap_hw_dtor(struct netmap_adapter *na)
4048 if (na->ifp == NULL)
4051 NM_DETACH_NA(na->ifp);
4056 * Allocate a netmap_adapter object, and initialize it from the
4057 * 'arg' passed by the driver on attach.
4058 * We allocate a block of memory of 'size' bytes, which has room
4059 * for struct netmap_adapter plus additional room private to
4061 * Return 0 on success, ENOMEM otherwise.
4064 netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg)
4066 struct netmap_hw_adapter *hwna = NULL;
4067 struct ifnet *ifp = NULL;
4069 if (size < sizeof(struct netmap_hw_adapter)) {
4070 if (netmap_debug & NM_DEBUG_ON)
4071 nm_prerr("Invalid netmap adapter size %d", (int)size);
4075 if (arg == NULL || arg->ifp == NULL) {
4076 if (netmap_debug & NM_DEBUG_ON)
4077 nm_prerr("either arg or arg->ifp is NULL");
4081 if (arg->num_tx_rings == 0 || arg->num_rx_rings == 0) {
4082 if (netmap_debug & NM_DEBUG_ON)
4083 nm_prerr("%s: invalid rings tx %d rx %d",
4084 arg->name, arg->num_tx_rings, arg->num_rx_rings);
4089 if (NM_NA_CLASH(ifp)) {
4090 /* If NA(ifp) is not null but there is no valid netmap
4091 * adapter it means that someone else is using the same
4092 * pointer (e.g. ax25_ptr on linux). This happens for
4093 * instance when also PF_RING is in use. */
4094 nm_prerr("Error: netmap adapter hook is busy");
4098 hwna = nm_os_malloc(size);
4102 hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE;
4103 strlcpy(hwna->up.name, ifp->if_xname, sizeof(hwna->up.name));
4105 hwna->nm_hw_register = hwna->up.nm_register;
4106 hwna->up.nm_register = netmap_hw_reg;
4108 if (netmap_attach_common(&hwna->up)) {
4112 netmap_adapter_get(&hwna->up);
4114 NM_ATTACH_NA(ifp, &hwna->up);
4116 nm_os_onattach(ifp);
4118 if (arg->nm_dtor == NULL) {
4119 hwna->up.nm_dtor = netmap_hw_dtor;
4122 if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n",
4123 hwna->up.num_tx_rings, hwna->up.num_tx_desc,
4124 hwna->up.num_rx_rings, hwna->up.num_rx_desc);
4128 nm_prerr("fail, arg %p ifp %p na %p", arg, ifp, hwna);
4129 return (hwna ? EINVAL : ENOMEM);
4134 netmap_attach(struct netmap_adapter *arg)
4136 return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter),
4137 1 /* override nm_reg */);
4142 NM_DBG(netmap_adapter_get)(struct netmap_adapter *na)
4148 refcount_acquire(&na->na_refcount);
4152 /* returns 1 iff the netmap_adapter is destroyed */
4154 NM_DBG(netmap_adapter_put)(struct netmap_adapter *na)
4159 if (!refcount_release(&na->na_refcount))
4165 if (na->tx_rings) { /* XXX should not happen */
4166 if (netmap_debug & NM_DEBUG_ON)
4167 nm_prerr("freeing leftover tx_rings");
4168 na->nm_krings_delete(na);
4170 netmap_pipe_dealloc(na);
4172 netmap_mem_put(na->nm_mem);
4173 bzero(na, sizeof(*na));
4179 /* nm_krings_create callback for all hardware native adapters */
4181 netmap_hw_krings_create(struct netmap_adapter *na)
4183 int ret = netmap_krings_create(na, 0);
4185 /* initialize the mbq for the sw rx ring */
4186 u_int lim = netmap_real_rings(na, NR_RX), i;
4187 for (i = na->num_rx_rings; i < lim; i++) {
4188 mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue);
4190 nm_prdis("initialized sw rx queue %d", na->num_rx_rings);
4198 * Called on module unload by the netmap-enabled drivers
4201 netmap_detach(struct ifnet *ifp)
4203 struct netmap_adapter *na;
4207 if (!NM_NA_VALID(ifp)) {
4213 netmap_set_all_rings(na, NM_KR_LOCKED);
4215 * if the netmap adapter is not native, somebody
4216 * changed it, so we can not release it here.
4217 * The NAF_ZOMBIE flag will notify the new owner that
4218 * the driver is gone.
4220 if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) {
4221 na->na_flags |= NAF_ZOMBIE;
4223 /* give active users a chance to notice that NAF_ZOMBIE has been
4224 * turned on, so that they can stop and return an error to userspace.
4225 * Note that this becomes a NOP if there are no active users and,
4226 * therefore, the put() above has deleted the na, since now NA(ifp) is
4229 netmap_enable_all_rings(ifp);
4235 * Intercept packets from the network stack and pass them
4236 * to netmap as incoming packets on the 'software' ring.
4238 * We only store packets in a bounded mbq and then copy them
4239 * in the relevant rxsync routine.
4241 * We rely on the OS to make sure that the ifp and na do not go
4242 * away (typically the caller checks for IFF_DRV_RUNNING or the like).
4243 * In nm_register() or whenever there is a reinitialization,
4244 * we make sure to make the mode change visible here.
4247 netmap_transmit(struct ifnet *ifp, struct mbuf *m)
4249 struct netmap_adapter *na = NA(ifp);
4250 struct netmap_kring *kring, *tx_kring;
4251 u_int len = MBUF_LEN(m);
4252 u_int error = ENOBUFS;
4259 if (i >= na->num_host_rx_rings) {
4260 i = i % na->num_host_rx_rings;
4262 kring = NMR(na, NR_RX)[nma_get_nrings(na, NR_RX) + i];
4264 // XXX [Linux] we do not need this lock
4265 // if we follow the down/configure/up protocol -gl
4266 // mtx_lock(&na->core_lock);
4268 if (!nm_netmap_on(na)) {
4269 nm_prerr("%s not in netmap mode anymore", na->name);
4275 if (txr >= na->num_tx_rings) {
4276 txr %= na->num_tx_rings;
4278 tx_kring = NMR(na, NR_TX)[txr];
4280 if (tx_kring->nr_mode == NKR_NETMAP_OFF) {
4281 return MBUF_TRANSMIT(na, ifp, m);
4284 q = &kring->rx_queue;
4286 // XXX reconsider long packets if we handle fragments
4287 if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */
4288 nm_prerr("%s from_host, drop packet size %d > %d", na->name,
4289 len, NETMAP_BUF_SIZE(na));
4293 if (!netmap_generic_hwcsum) {
4294 if (nm_os_mbuf_has_csum_offld(m)) {
4295 nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name);
4300 if (nm_os_mbuf_has_seg_offld(m)) {
4301 nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name);
4306 ETHER_BPF_MTAP(ifp, m);
4307 #endif /* __FreeBSD__ */
4309 /* protect against netmap_rxsync_from_host(), netmap_sw_to_nic()
4310 * and maybe other instances of netmap_transmit (the latter
4311 * not possible on Linux).
4312 * We enqueue the mbuf only if we are sure there is going to be
4313 * enough room in the host RX ring, otherwise we drop it.
4317 busy = kring->nr_hwtail - kring->nr_hwcur;
4319 busy += kring->nkr_num_slots;
4320 if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) {
4321 nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
4322 kring->nr_hwcur, kring->nr_hwtail, mbq_len(q));
4325 nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q));
4326 /* notify outside the lock */
4335 /* unconditionally wake up listeners */
4336 kring->nm_notify(kring, 0);
4337 /* this is normally netmap_notify(), but for nics
4338 * connected to a bridge it is netmap_bwrap_intr_notify(),
4339 * that possibly forwards the frames through the switch
4347 * Reset function to be called by the driver routines when reinitializing
4348 * a hardware ring. The driver is in charge of locking to protect the kring
4349 * while this operation is being performed. This is normally achieved by
4350 * calling netmap_disable_all_rings() before triggering a reset.
4351 * If the kring is not in netmap mode, return NULL to inform the caller
4352 * that this is the case.
4353 * If the kring is in netmap mode, set hwofs so that the netmap indices
4354 * seen by userspace (head/cut/tail) do not change, although the internal
4355 * NIC indices have been reset to 0.
4356 * In any case, adjust kring->nr_mode.
4358 struct netmap_slot *
4359 netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n,
4362 struct netmap_kring *kring;
4363 u_int new_hwtail, new_hwofs;
4365 if (!nm_native_on(na)) {
4366 nm_prdis("interface not in native netmap mode");
4367 return NULL; /* nothing to reinitialize */
4371 if (n >= na->num_tx_rings)
4373 kring = na->tx_rings[n];
4375 * Set hwofs to rhead, so that slots[rhead] is mapped to
4376 * the NIC internal slot 0, and thus the netmap buffer
4377 * at rhead is the next to be transmitted. Transmissions
4378 * that were pending before the reset are considered as
4379 * sent, so that we can have hwcur = rhead. All the slots
4380 * are now owned by the user, so we can also reinit hwtail.
4382 new_hwofs = kring->rhead;
4383 new_hwtail = nm_prev(kring->rhead, kring->nkr_num_slots - 1);
4385 if (n >= na->num_rx_rings)
4387 kring = na->rx_rings[n];
4389 * Set hwofs to hwtail, so that slots[hwtail] is mapped to
4390 * the NIC internal slot 0, and thus the netmap buffer
4391 * at hwtail is the next to be given to the NIC.
4392 * Unread slots (the ones in [rhead,hwtail[) are owned by
4393 * the user, and thus the caller cannot give them
4394 * to the NIC right now.
4396 new_hwofs = kring->nr_hwtail;
4397 new_hwtail = kring->nr_hwtail;
4399 if (kring->nr_pending_mode == NKR_NETMAP_OFF) {
4400 kring->nr_mode = NKR_NETMAP_OFF;
4403 if (netmap_verbose) {
4404 nm_prinf("%s, hc %u->%u, ht %u->%u, ho %u->%u", kring->name,
4405 kring->nr_hwcur, kring->rhead,
4406 kring->nr_hwtail, new_hwtail,
4407 kring->nkr_hwofs, new_hwofs);
4409 kring->nr_hwcur = kring->rhead;
4410 kring->nr_hwtail = new_hwtail;
4411 kring->nkr_hwofs = new_hwofs;
4414 * Wakeup on the individual and global selwait
4415 * We do the wakeup here, but the ring is not yet reconfigured.
4416 * However, we are under lock so there are no races.
4418 kring->nr_mode = NKR_NETMAP_ON;
4419 kring->nm_notify(kring, 0);
4420 return kring->ring->slot;
4425 * Dispatch rx/tx interrupts to the netmap rings.
4427 * "work_done" is non-null on the RX path, NULL for the TX path.
4428 * We rely on the OS to make sure that there is only one active
4429 * instance per queue, and that there is appropriate locking.
4431 * The 'notify' routine depends on what the ring is attached to.
4432 * - for a netmap file descriptor, do a selwakeup on the individual
4433 * waitqueue, plus one on the global one if needed
4434 * (see netmap_notify)
4435 * - for a nic connected to a switch, call the proper forwarding routine
4436 * (see netmap_bwrap_intr_notify)
4439 netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
4441 struct netmap_kring *kring;
4442 enum txrx t = (work_done ? NR_RX : NR_TX);
4444 q &= NETMAP_RING_MASK;
4446 if (netmap_debug & (NM_DEBUG_RXINTR|NM_DEBUG_TXINTR)) {
4447 nm_prlim(5, "received %s queue %d", work_done ? "RX" : "TX" , q);
4450 if (q >= nma_get_nrings(na, t))
4451 return NM_IRQ_PASS; // not a physical queue
4453 kring = NMR(na, t)[q];
4455 if (kring->nr_mode == NKR_NETMAP_OFF) {
4460 kring->nr_kflags |= NKR_PENDINTR; // XXX atomic ?
4461 *work_done = 1; /* do not fire napi again */
4464 return kring->nm_notify(kring, 0);
4469 * Default functions to handle rx/tx interrupts from a physical device.
4470 * "work_done" is non-null on the RX path, NULL for the TX path.
4472 * If the card is not in netmap mode, simply return NM_IRQ_PASS,
4473 * so that the caller proceeds with regular processing.
4474 * Otherwise call netmap_common_irq().
4476 * If the card is connected to a netmap file descriptor,
4477 * do a selwakeup on the individual queue, plus one on the global one
4478 * if needed (multiqueue card _and_ there are multiqueue listeners),
4479 * and return NR_IRQ_COMPLETED.
4481 * Finally, if called on rx from an interface connected to a switch,
4482 * calls the proper forwarding routine.
4485 netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done)
4487 struct netmap_adapter *na = NA(ifp);
4490 * XXX emulated netmap mode sets NAF_SKIP_INTR so
4491 * we still use the regular driver even though the previous
4492 * check fails. It is unclear whether we should use
4493 * nm_native_on() here.
4495 if (!nm_netmap_on(na))
4498 if (na->na_flags & NAF_SKIP_INTR) {
4499 nm_prdis("use regular interrupt");
4503 return netmap_common_irq(na, q, work_done);
4506 /* set/clear native flags and if_transmit/netdev_ops */
4508 nm_set_native_flags(struct netmap_adapter *na)
4510 struct ifnet *ifp = na->ifp;
4512 /* We do the setup for intercepting packets only if we are the
4513 * first user of this adapter. */
4514 if (na->active_fds > 0) {
4518 na->na_flags |= NAF_NETMAP_ON;
4520 netmap_update_hostrings_mode(na);
4524 nm_clear_native_flags(struct netmap_adapter *na)
4526 struct ifnet *ifp = na->ifp;
4528 /* We undo the setup for intercepting packets only if we are the
4529 * last user of this adapter. */
4530 if (na->active_fds > 0) {
4534 netmap_update_hostrings_mode(na);
4537 na->na_flags &= ~NAF_NETMAP_ON;
4541 netmap_krings_mode_commit(struct netmap_adapter *na, int onoff)
4548 for (i = 0; i < netmap_real_rings(na, t); i++) {
4549 struct netmap_kring *kring = NMR(na, t)[i];
4551 if (onoff && nm_kring_pending_on(kring))
4552 kring->nr_mode = NKR_NETMAP_ON;
4553 else if (!onoff && nm_kring_pending_off(kring))
4554 kring->nr_mode = NKR_NETMAP_OFF;
4560 * Module loader and unloader
4562 * netmap_init() creates the /dev/netmap device and initializes
4563 * all global variables. Returns 0 on success, errno on failure
4564 * (but there is no chance)
4566 * netmap_fini() destroys everything.
4569 static struct cdev *netmap_dev; /* /dev/netmap character device. */
4570 extern struct cdevsw netmap_cdevsw;
4577 destroy_dev(netmap_dev);
4578 /* we assume that there are no longer netmap users */
4580 netmap_uninit_bridges();
4583 nm_prinf("netmap: unloaded module.");
4594 error = netmap_mem_init();
4598 * MAKEDEV_ETERNAL_KLD avoids an expensive check on syscalls
4599 * when the module is compiled in.
4600 * XXX could use make_dev_credv() to get error number
4602 netmap_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD,
4603 &netmap_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600,
4608 error = netmap_init_bridges();
4613 nm_os_vi_init_index();
4616 error = nm_os_ifnet_init();
4620 #if !defined(__FreeBSD__) || defined(KLD_MODULE)
4621 nm_prinf("netmap: loaded module");
4626 return (EINVAL); /* may be incorrect */