2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2011-2014 Matteo Landi
5 * Copyright (C) 2011-2016 Luigi Rizzo
6 * Copyright (C) 2011-2016 Giuseppe Lettieri
7 * Copyright (C) 2011-2016 Vincenzo Maffione
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This module supports memory mapped access to network devices,
39 * The module uses a large, memory pool allocated by the kernel
40 * and accessible as mmapped memory by multiple userspace threads/processes.
41 * The memory pool contains packet buffers and "netmap rings",
42 * i.e. user-accessible copies of the interface's queues.
44 * Access to the network card works like this:
45 * 1. a process/thread issues one or more open() on /dev/netmap, to create
46 * select()able file descriptor on which events are reported.
47 * 2. on each descriptor, the process issues an ioctl() to identify
48 * the interface that should report events to the file descriptor.
49 * 3. on each descriptor, the process issues an mmap() request to
50 * map the shared memory region within the process' address space.
51 * The list of interesting queues is indicated by a location in
52 * the shared memory region.
53 * 4. using the functions in the netmap(4) userspace API, a process
54 * can look up the occupation state of a queue, access memory buffers,
55 * and retrieve received packets or enqueue packets to transmit.
56 * 5. using some ioctl()s the process can synchronize the userspace view
57 * of the queue with the actual status in the kernel. This includes both
58 * receiving the notification of new packets, and transmitting new
59 * packets on the output interface.
60 * 6. select() or poll() can be used to wait for events on individual
61 * transmit or receive queues (or all queues for a given interface).
64 SYNCHRONIZATION (USER)
66 The netmap rings and data structures may be shared among multiple
67 user threads or even independent processes.
68 Any synchronization among those threads/processes is delegated
69 to the threads themselves. Only one thread at a time can be in
70 a system call on the same netmap ring. The OS does not enforce
71 this and only guarantees against system crashes in case of
76 Within the kernel, access to the netmap rings is protected as follows:
78 - a spinlock on each ring, to handle producer/consumer races on
79 RX rings attached to the host stack (against multiple host
80 threads writing from the host stack to the same ring),
81 and on 'destination' rings attached to a VALE switch
82 (i.e. RX rings in VALE ports, and TX rings in NIC/host ports)
83 protecting multiple active senders for the same destination)
85 - an atomic variable to guarantee that there is at most one
86 instance of *_*xsync() on the ring at any time.
87 For rings connected to user file
88 descriptors, an atomic_test_and_set() protects this, and the
89 lock on the ring is not actually used.
90 For NIC RX rings connected to a VALE switch, an atomic_test_and_set()
91 is also used to prevent multiple executions (the driver might indeed
92 already guarantee this).
93 For NIC TX rings connected to a VALE switch, the lock arbitrates
94 access to the queue (both when allocating buffers and when pushing
97 - *xsync() should be protected against initializations of the card.
98 On FreeBSD most devices have the reset routine protected by
99 a RING lock (ixgbe, igb, em) or core lock (re). lem is missing
100 the RING protection on rx_reset(), this should be added.
102 On linux there is an external lock on the tx path, which probably
103 also arbitrates access to the reset routine. XXX to be revised
105 - a per-interface core_lock protecting access from the host stack
106 while interfaces may be detached from netmap mode.
107 XXX there should be no need for this lock if we detach the interfaces
108 only while they are down.
113 NMG_LOCK() serializes all modifications to switches and ports.
114 A switch cannot be deleted until all ports are gone.
116 For each switch, an SX lock (RWlock on linux) protects
117 deletion of ports. When configuring or deleting a new port, the
118 lock is acquired in exclusive mode (after holding NMG_LOCK).
119 When forwarding, the lock is acquired in shared mode (without NMG_LOCK).
120 The lock is held throughout the entire forwarding cycle,
121 during which the thread may incur in a page fault.
122 Hence it is important that sleepable shared locks are used.
124 On the rx ring, the per-port lock is grabbed initially to reserve
125 a number of slot in the ring, then the lock is released,
126 packets are copied from source to destination, and then
127 the lock is acquired again and the receive ring is updated.
128 (A similar thing is done on the tx ring for NIC and host stack
129 ports attached to the switch)
134 /* --- internals ----
136 * Roadmap to the code that implements the above.
138 * > 1. a process/thread issues one or more open() on /dev/netmap, to create
139 * > select()able file descriptor on which events are reported.
141 * Internally, we allocate a netmap_priv_d structure, that will be
142 * initialized on ioctl(NIOCREGIF). There is one netmap_priv_d
143 * structure for each open().
146 * FreeBSD: see netmap_open() (netmap_freebsd.c)
147 * linux: see linux_netmap_open() (netmap_linux.c)
149 * > 2. on each descriptor, the process issues an ioctl() to identify
150 * > the interface that should report events to the file descriptor.
152 * Implemented by netmap_ioctl(), NIOCREGIF case, with nmr->nr_cmd==0.
153 * Most important things happen in netmap_get_na() and
154 * netmap_do_regif(), called from there. Additional details can be
155 * found in the comments above those functions.
157 * In all cases, this action creates/takes-a-reference-to a
158 * netmap_*_adapter describing the port, and allocates a netmap_if
159 * and all necessary netmap rings, filling them with netmap buffers.
161 * In this phase, the sync callbacks for each ring are set (these are used
162 * in steps 5 and 6 below). The callbacks depend on the type of adapter.
163 * The adapter creation/initialization code puts them in the
164 * netmap_adapter (fields na->nm_txsync and na->nm_rxsync). Then, they
165 * are copied from there to the netmap_kring's during netmap_do_regif(), by
166 * the nm_krings_create() callback. All the nm_krings_create callbacks
167 * actually call netmap_krings_create() to perform this and the other
168 * common stuff. netmap_krings_create() also takes care of the host rings,
169 * if needed, by setting their sync callbacks appropriately.
171 * Additional actions depend on the kind of netmap_adapter that has been
174 * - netmap_hw_adapter: [netmap.c]
175 * This is a system netdev/ifp with native netmap support.
176 * The ifp is detached from the host stack by redirecting:
177 * - transmissions (from the network stack) to netmap_transmit()
178 * - receive notifications to the nm_notify() callback for
179 * this adapter. The callback is normally netmap_notify(), unless
180 * the ifp is attached to a bridge using bwrap, in which case it
181 * is netmap_bwrap_intr_notify().
183 * - netmap_generic_adapter: [netmap_generic.c]
184 * A system netdev/ifp without native netmap support.
186 * (the decision about native/non native support is taken in
187 * netmap_get_hw_na(), called by netmap_get_na())
189 * - netmap_vp_adapter [netmap_vale.c]
190 * Returned by netmap_get_bdg_na().
191 * This is a persistent or ephemeral VALE port. Ephemeral ports
192 * are created on the fly if they don't already exist, and are
193 * always attached to a bridge.
194 * Persistent VALE ports must must be created separately, and i
195 * then attached like normal NICs. The NIOCREGIF we are examining
196 * will find them only if they had previosly been created and
197 * attached (see VALE_CTL below).
199 * - netmap_pipe_adapter [netmap_pipe.c]
200 * Returned by netmap_get_pipe_na().
201 * Both pipe ends are created, if they didn't already exist.
203 * - netmap_monitor_adapter [netmap_monitor.c]
204 * Returned by netmap_get_monitor_na().
205 * If successful, the nm_sync callbacks of the monitored adapter
206 * will be intercepted by the returned monitor.
208 * - netmap_bwrap_adapter [netmap_vale.c]
209 * Cannot be obtained in this way, see VALE_CTL below
213 * linux: we first go through linux_netmap_ioctl() to
214 * adapt the FreeBSD interface to the linux one.
217 * > 3. on each descriptor, the process issues an mmap() request to
218 * > map the shared memory region within the process' address space.
219 * > The list of interesting queues is indicated by a location in
220 * > the shared memory region.
223 * FreeBSD: netmap_mmap_single (netmap_freebsd.c).
224 * linux: linux_netmap_mmap (netmap_linux.c).
226 * > 4. using the functions in the netmap(4) userspace API, a process
227 * > can look up the occupation state of a queue, access memory buffers,
228 * > and retrieve received packets or enqueue packets to transmit.
230 * these actions do not involve the kernel.
232 * > 5. using some ioctl()s the process can synchronize the userspace view
233 * > of the queue with the actual status in the kernel. This includes both
234 * > receiving the notification of new packets, and transmitting new
235 * > packets on the output interface.
237 * These are implemented in netmap_ioctl(), NIOCTXSYNC and NIOCRXSYNC
238 * cases. They invoke the nm_sync callbacks on the netmap_kring
239 * structures, as initialized in step 2 and maybe later modified
240 * by a monitor. Monitors, however, will always call the original
241 * callback before doing anything else.
244 * > 6. select() or poll() can be used to wait for events on individual
245 * > transmit or receive queues (or all queues for a given interface).
247 * Implemented in netmap_poll(). This will call the same nm_sync()
248 * callbacks as in step 5 above.
251 * linux: we first go through linux_netmap_poll() to adapt
252 * the FreeBSD interface to the linux one.
255 * ---- VALE_CTL -----
257 * VALE switches are controlled by issuing a NIOCREGIF with a non-null
258 * nr_cmd in the nmreq structure. These subcommands are handled by
259 * netmap_bdg_ctl() in netmap_vale.c. Persistent VALE ports are created
260 * and destroyed by issuing the NETMAP_BDG_NEWIF and NETMAP_BDG_DELIF
261 * subcommands, respectively.
263 * Any network interface known to the system (including a persistent VALE
264 * port) can be attached to a VALE switch by issuing the
265 * NETMAP_REQ_VALE_ATTACH command. After the attachment, persistent VALE ports
266 * look exactly like ephemeral VALE ports (as created in step 2 above). The
267 * attachment of other interfaces, instead, requires the creation of a
268 * netmap_bwrap_adapter. Moreover, the attached interface must be put in
269 * netmap mode. This may require the creation of a netmap_generic_adapter if
270 * we have no native support for the interface, or if generic adapters have
271 * been forced by sysctl.
273 * Both persistent VALE ports and bwraps are handled by netmap_get_bdg_na(),
274 * called by nm_bdg_ctl_attach(), and discriminated by the nm_bdg_attach()
275 * callback. In the case of the bwrap, the callback creates the
276 * netmap_bwrap_adapter. The initialization of the bwrap is then
277 * completed by calling netmap_do_regif() on it, in the nm_bdg_ctl()
278 * callback (netmap_bwrap_bdg_ctl in netmap_vale.c).
279 * A generic adapter for the wrapped ifp will be created if needed, when
280 * netmap_get_bdg_na() calls netmap_get_hw_na().
283 * ---- DATAPATHS -----
285 * -= SYSTEM DEVICE WITH NATIVE SUPPORT =-
287 * na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach()
289 * - tx from netmap userspace:
291 * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
292 * kring->nm_sync() == DEVICE_netmap_txsync()
293 * 2) device interrupt handler
294 * na->nm_notify() == netmap_notify()
295 * - rx from netmap userspace:
297 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
298 * kring->nm_sync() == DEVICE_netmap_rxsync()
299 * 2) device interrupt handler
300 * na->nm_notify() == netmap_notify()
301 * - rx from host stack
305 * na->nm_notify == netmap_notify()
306 * 2) ioctl(NIOCRXSYNC)/netmap_poll() in process context
307 * kring->nm_sync() == netmap_rxsync_from_host
308 * netmap_rxsync_from_host(na, NULL, NULL)
310 * ioctl(NIOCTXSYNC)/netmap_poll() in process context
311 * kring->nm_sync() == netmap_txsync_to_host
312 * netmap_txsync_to_host(na)
314 * FreeBSD: na->if_input() == ether_input()
315 * linux: netif_rx() with NM_MAGIC_PRIORITY_RX
318 * -= SYSTEM DEVICE WITH GENERIC SUPPORT =-
320 * na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach()
322 * - tx from netmap userspace:
324 * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
325 * kring->nm_sync() == generic_netmap_txsync()
326 * nm_os_generic_xmit_frame()
327 * linux: dev_queue_xmit() with NM_MAGIC_PRIORITY_TX
328 * ifp->ndo_start_xmit == generic_ndo_start_xmit()
329 * gna->save_start_xmit == orig. dev. start_xmit
330 * FreeBSD: na->if_transmit() == orig. dev if_transmit
331 * 2) generic_mbuf_destructor()
332 * na->nm_notify() == netmap_notify()
333 * - rx from netmap userspace:
334 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
335 * kring->nm_sync() == generic_netmap_rxsync()
338 * generic_rx_handler()
340 * na->nm_notify() == netmap_notify()
341 * - rx from host stack
342 * FreeBSD: same as native
343 * Linux: same as native except:
345 * dev_queue_xmit() without NM_MAGIC_PRIORITY_TX
346 * ifp->ndo_start_xmit == generic_ndo_start_xmit()
348 * na->nm_notify() == netmap_notify()
349 * - tx to host stack (same as native):
357 * ioctl(NIOCTXSYNC)/netmap_poll() in process context
358 * kring->nm_sync() == netmap_vp_txsync()
360 * - system device with native support:
363 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
364 * kring->nm_sync() == DEVICE_netmap_rxsync()
366 * kring->nm_sync() == DEVICE_netmap_rxsync()
369 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
370 * kring->nm_sync() == netmap_rxsync_from_host()
373 * - system device with generic support:
374 * from device driver:
375 * generic_rx_handler()
376 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
377 * kring->nm_sync() == generic_netmap_rxsync()
379 * kring->nm_sync() == generic_netmap_rxsync()
382 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
383 * kring->nm_sync() == netmap_rxsync_from_host()
386 * (all cases) --> nm_bdg_flush()
387 * dest_na->nm_notify() == (see below)
393 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
394 * kring->nm_sync() == netmap_vp_rxsync()
395 * 2) from nm_bdg_flush()
396 * na->nm_notify() == netmap_notify()
398 * - system device with native support:
400 * na->nm_notify() == netmap_bwrap_notify()
402 * kring->nm_sync() == DEVICE_netmap_txsync()
406 * kring->nm_sync() == netmap_txsync_to_host
407 * netmap_vp_rxsync_locked()
409 * - system device with generic adapter:
411 * na->nm_notify() == netmap_bwrap_notify()
413 * kring->nm_sync() == generic_netmap_txsync()
417 * kring->nm_sync() == netmap_txsync_to_host
423 * OS-specific code that is used only within this file.
424 * Other OS-specific code that must be accessed by drivers
425 * is present in netmap_kern.h
428 #if defined(__FreeBSD__)
429 #include <sys/cdefs.h> /* prerequisite */
430 #include <sys/types.h>
431 #include <sys/errno.h>
432 #include <sys/param.h> /* defines used in kernel.h */
433 #include <sys/kernel.h> /* types used in module initialization */
434 #include <sys/conf.h> /* cdevsw struct, UID, GID */
435 #include <sys/filio.h> /* FIONBIO */
436 #include <sys/sockio.h>
437 #include <sys/socketvar.h> /* struct socket */
438 #include <sys/malloc.h>
439 #include <sys/poll.h>
440 #include <sys/proc.h>
441 #include <sys/rwlock.h>
442 #include <sys/socket.h> /* sockaddrs */
443 #include <sys/selinfo.h>
444 #include <sys/sysctl.h>
445 #include <sys/jail.h>
446 #include <sys/epoch.h>
447 #include <net/vnet.h>
449 #include <net/if_var.h>
450 #include <net/bpf.h> /* BIOCIMMEDIATE */
451 #include <machine/bus.h> /* bus_dmamap_* */
452 #include <sys/endian.h>
453 #include <sys/refcount.h>
454 #include <net/ethernet.h> /* ETHER_BPF_MTAP */
459 #include "bsd_glue.h"
461 #elif defined(__APPLE__)
463 #warning OSX support is only partial
464 #include "osx_glue.h"
466 #elif defined (_WIN32)
468 #include "win_glue.h"
472 #error Unsupported platform
474 #endif /* unsupported */
479 #include <net/netmap.h>
480 #include <dev/netmap/netmap_kern.h>
481 #include <dev/netmap/netmap_mem2.h>
484 /* user-controlled variables */
486 #ifdef CONFIG_NETMAP_DEBUG
488 #endif /* CONFIG_NETMAP_DEBUG */
490 static int netmap_no_timestamp; /* don't timestamp on rxsync */
491 int netmap_no_pendintr = 1;
492 int netmap_txsync_retry = 2;
493 static int netmap_fwd = 0; /* force transparent forwarding */
496 * netmap_admode selects the netmap mode to use.
497 * Invalid values are reset to NETMAP_ADMODE_BEST
499 enum { NETMAP_ADMODE_BEST = 0, /* use native, fallback to generic */
500 NETMAP_ADMODE_NATIVE, /* either native or none */
501 NETMAP_ADMODE_GENERIC, /* force generic */
502 NETMAP_ADMODE_LAST };
503 static int netmap_admode = NETMAP_ADMODE_BEST;
505 /* netmap_generic_mit controls mitigation of RX notifications for
506 * the generic netmap adapter. The value is a time interval in
508 int netmap_generic_mit = 100*1000;
510 /* We use by default netmap-aware qdiscs with generic netmap adapters,
511 * even if there can be a little performance hit with hardware NICs.
512 * However, using the qdisc is the safer approach, for two reasons:
513 * 1) it prevents non-fifo qdiscs to break the TX notification
514 * scheme, which is based on mbuf destructors when txqdisc is
516 * 2) it makes it possible to transmit over software devices that
517 * change skb->dev, like bridge, veth, ...
519 * Anyway users looking for the best performance should
520 * use native adapters.
523 int netmap_generic_txqdisc = 1;
526 /* Default number of slots and queues for generic adapters. */
527 int netmap_generic_ringsize = 1024;
528 int netmap_generic_rings = 1;
530 /* Non-zero to enable checksum offloading in NIC drivers */
531 int netmap_generic_hwcsum = 0;
533 /* Non-zero if ptnet devices are allowed to use virtio-net headers. */
534 int ptnet_vnet_hdr = 1;
537 * SYSCTL calls are grouped between SYSBEGIN and SYSEND to be emulated
538 * in some other operating systems
542 SYSCTL_DECL(_dev_netmap);
543 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
545 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
546 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
547 #ifdef CONFIG_NETMAP_DEBUG
548 SYSCTL_INT(_dev_netmap, OID_AUTO, debug,
549 CTLFLAG_RW, &netmap_debug, 0, "Debug messages");
550 #endif /* CONFIG_NETMAP_DEBUG */
551 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
552 CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
553 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr,
554 0, "Always look for new received packets.");
555 SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW,
556 &netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush.");
558 SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0,
559 "Force NR_FORWARD mode");
560 SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0,
561 "Adapter mode. 0 selects the best option available,"
562 "1 forces native adapter, 2 forces emulated adapter");
563 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_hwcsum, CTLFLAG_RW, &netmap_generic_hwcsum,
564 0, "Hardware checksums. 0 to disable checksum generation by the NIC (default),"
565 "1 to enable checksum generation by the NIC");
566 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit,
567 0, "RX notification interval in nanoseconds");
568 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW,
569 &netmap_generic_ringsize, 0,
570 "Number of per-ring slots for emulated netmap mode");
571 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW,
572 &netmap_generic_rings, 0,
573 "Number of TX/RX queues for emulated netmap adapters");
575 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW,
576 &netmap_generic_txqdisc, 0, "Use qdisc for generic adapters");
578 SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr,
579 0, "Allow ptnet devices to use virtio-net headers");
583 NMG_LOCK_T netmap_global_lock;
586 * mark the ring as stopped, and run through the locks
587 * to make sure other users get to see it.
588 * stopped must be either NR_KR_STOPPED (for unbounded stop)
589 * of NR_KR_LOCKED (brief stop for mutual exclusion purposes)
592 netmap_disable_ring(struct netmap_kring *kr, int stopped)
594 nm_kr_stop(kr, stopped);
595 // XXX check if nm_kr_stop is sufficient
596 mtx_lock(&kr->q_lock);
597 mtx_unlock(&kr->q_lock);
601 /* stop or enable a single ring */
603 netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped)
606 netmap_disable_ring(NMR(na, t)[ring_id], stopped);
608 NMR(na, t)[ring_id]->nkr_stopped = 0;
612 /* stop or enable all the rings of na */
614 netmap_set_all_rings(struct netmap_adapter *na, int stopped)
619 if (!nm_netmap_on(na))
622 if (netmap_verbose) {
623 nm_prinf("%s: %sable all rings", na->name,
624 (stopped ? "dis" : "en"));
627 for (i = 0; i < netmap_real_rings(na, t); i++) {
628 netmap_set_ring(na, i, t, stopped);
634 * Convenience function used in drivers. Waits for current txsync()s/rxsync()s
635 * to finish and prevents any new one from starting. Call this before turning
636 * netmap mode off, or before removing the hardware rings (e.g., on module
640 netmap_disable_all_rings(struct ifnet *ifp)
642 if (NM_NA_VALID(ifp)) {
643 netmap_set_all_rings(NA(ifp), NM_KR_STOPPED);
648 * Convenience function used in drivers. Re-enables rxsync and txsync on the
649 * adapter's rings In linux drivers, this should be placed near each
653 netmap_enable_all_rings(struct ifnet *ifp)
655 if (NM_NA_VALID(ifp)) {
656 netmap_set_all_rings(NA(ifp), 0 /* enabled */);
661 netmap_make_zombie(struct ifnet *ifp)
663 if (NM_NA_VALID(ifp)) {
664 struct netmap_adapter *na = NA(ifp);
665 netmap_set_all_rings(na, NM_KR_LOCKED);
666 na->na_flags |= NAF_ZOMBIE;
667 netmap_set_all_rings(na, 0);
672 netmap_undo_zombie(struct ifnet *ifp)
674 if (NM_NA_VALID(ifp)) {
675 struct netmap_adapter *na = NA(ifp);
676 if (na->na_flags & NAF_ZOMBIE) {
677 netmap_set_all_rings(na, NM_KR_LOCKED);
678 na->na_flags &= ~NAF_ZOMBIE;
679 netmap_set_all_rings(na, 0);
685 * generic bound_checking function
688 nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg)
691 const char *op = NULL;
700 } else if (oldv > hi) {
705 nm_prinf("%s %s to %d (was %d)", op, msg, *v, oldv);
711 * packet-dump function, user-supplied or static buffer.
712 * The destination buffer must be at least 30+4*len
715 nm_dump_buf(char *p, int len, int lim, char *dst)
717 static char _dst[8192];
719 static char hex[] ="0123456789abcdef";
720 char *o; /* output position */
722 #define P_HI(x) hex[((x) & 0xf0)>>4]
723 #define P_LO(x) hex[((x) & 0xf)]
724 #define P_C(x) ((x) >= 0x20 && (x) <= 0x7e ? (x) : '.')
727 if (lim <= 0 || lim > len)
730 sprintf(o, "buf 0x%p len %d lim %d\n", p, len, lim);
732 /* hexdump routine */
733 for (i = 0; i < lim; ) {
734 sprintf(o, "%5d: ", i);
738 for (j=0; j < 16 && i < lim; i++, j++) {
740 o[j*3+1] = P_LO(p[i]);
743 for (j=0; j < 16 && i < lim; i++, j++)
744 o[j + 48] = P_C(p[i]);
757 * Fetch configuration from the device, to cope with dynamic
758 * reconfigurations after loading the module.
760 /* call with NMG_LOCK held */
762 netmap_update_config(struct netmap_adapter *na)
764 struct nm_config_info info;
766 bzero(&info, sizeof(info));
767 if (na->nm_config == NULL ||
768 na->nm_config(na, &info)) {
769 /* take whatever we had at init time */
770 info.num_tx_rings = na->num_tx_rings;
771 info.num_tx_descs = na->num_tx_desc;
772 info.num_rx_rings = na->num_rx_rings;
773 info.num_rx_descs = na->num_rx_desc;
774 info.rx_buf_maxsize = na->rx_buf_maxsize;
777 if (na->num_tx_rings == info.num_tx_rings &&
778 na->num_tx_desc == info.num_tx_descs &&
779 na->num_rx_rings == info.num_rx_rings &&
780 na->num_rx_desc == info.num_rx_descs &&
781 na->rx_buf_maxsize == info.rx_buf_maxsize)
782 return 0; /* nothing changed */
783 if (na->active_fds == 0) {
784 na->num_tx_rings = info.num_tx_rings;
785 na->num_tx_desc = info.num_tx_descs;
786 na->num_rx_rings = info.num_rx_rings;
787 na->num_rx_desc = info.num_rx_descs;
788 na->rx_buf_maxsize = info.rx_buf_maxsize;
790 nm_prinf("configuration changed for %s: txring %d x %d, "
791 "rxring %d x %d, rxbufsz %d",
792 na->name, na->num_tx_rings, na->num_tx_desc,
793 na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize);
796 nm_prerr("WARNING: configuration changed for %s while active: "
797 "txring %d x %d, rxring %d x %d, rxbufsz %d",
798 na->name, info.num_tx_rings, info.num_tx_descs,
799 info.num_rx_rings, info.num_rx_descs,
800 info.rx_buf_maxsize);
804 /* nm_sync callbacks for the host rings */
805 static int netmap_txsync_to_host(struct netmap_kring *kring, int flags);
806 static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags);
808 /* create the krings array and initialize the fields common to all adapters.
809 * The array layout is this:
812 * na->tx_rings ----->| | \
813 * | | } na->num_tx_ring
817 * na->rx_rings ----> +----------+
819 * | | } na->num_rx_rings
824 * na->tailroom ----->| | \
825 * | | } tailroom bytes
829 * Note: for compatibility, host krings are created even when not needed.
830 * The tailroom space is currently used by vale ports for allocating leases.
832 /* call with NMG_LOCK held */
834 netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
837 struct netmap_kring *kring;
842 if (na->tx_rings != NULL) {
843 if (netmap_debug & NM_DEBUG_ON)
844 nm_prerr("warning: krings were already created");
848 /* account for the (possibly fake) host rings */
849 n[NR_TX] = netmap_all_rings(na, NR_TX);
850 n[NR_RX] = netmap_all_rings(na, NR_RX);
852 len = (n[NR_TX] + n[NR_RX]) *
853 (sizeof(struct netmap_kring) + sizeof(struct netmap_kring *))
856 na->tx_rings = nm_os_malloc((size_t)len);
857 if (na->tx_rings == NULL) {
858 nm_prerr("Cannot allocate krings");
861 na->rx_rings = na->tx_rings + n[NR_TX];
862 na->tailroom = na->rx_rings + n[NR_RX];
864 /* link the krings in the krings array */
865 kring = (struct netmap_kring *)((char *)na->tailroom + tailroom);
866 for (i = 0; i < n[NR_TX] + n[NR_RX]; i++) {
867 na->tx_rings[i] = kring;
872 * All fields in krings are 0 except the one initialized below.
873 * but better be explicit on important kring fields.
876 ndesc = nma_get_ndesc(na, t);
877 for (i = 0; i < n[t]; i++) {
878 kring = NMR(na, t)[i];
879 bzero(kring, sizeof(*kring));
880 kring->notify_na = na;
883 kring->nkr_num_slots = ndesc;
884 kring->nr_mode = NKR_NETMAP_OFF;
885 kring->nr_pending_mode = NKR_NETMAP_OFF;
886 if (i < nma_get_nrings(na, t)) {
887 kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync);
889 if (!(na->na_flags & NAF_HOST_RINGS))
890 kring->nr_kflags |= NKR_FAKERING;
891 kring->nm_sync = (t == NR_TX ?
892 netmap_txsync_to_host:
893 netmap_rxsync_from_host);
895 kring->nm_notify = na->nm_notify;
896 kring->rhead = kring->rcur = kring->nr_hwcur = 0;
898 * IMPORTANT: Always keep one slot empty.
900 kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0);
901 snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name,
903 nm_prdis("ktx %s h %d c %d t %d",
904 kring->name, kring->rhead, kring->rcur, kring->rtail);
905 err = nm_os_selinfo_init(&kring->si, kring->name);
907 netmap_krings_delete(na);
910 mtx_init(&kring->q_lock, (t == NR_TX ? "nm_txq_lock" : "nm_rxq_lock"), NULL, MTX_DEF);
911 kring->na = na; /* setting this field marks the mutex as initialized */
913 err = nm_os_selinfo_init(&na->si[t], na->name);
915 netmap_krings_delete(na);
924 /* undo the actions performed by netmap_krings_create */
925 /* call with NMG_LOCK held */
927 netmap_krings_delete(struct netmap_adapter *na)
929 struct netmap_kring **kring = na->tx_rings;
932 if (na->tx_rings == NULL) {
933 if (netmap_debug & NM_DEBUG_ON)
934 nm_prerr("warning: krings were already deleted");
939 nm_os_selinfo_uninit(&na->si[t]);
941 /* we rely on the krings layout described above */
942 for ( ; kring != na->tailroom; kring++) {
943 if ((*kring)->na != NULL)
944 mtx_destroy(&(*kring)->q_lock);
945 nm_os_selinfo_uninit(&(*kring)->si);
947 nm_os_free(na->tx_rings);
948 na->tx_rings = na->rx_rings = na->tailroom = NULL;
953 * Destructor for NIC ports. They also have an mbuf queue
954 * on the rings connected to the host so we need to purge
957 /* call with NMG_LOCK held */
959 netmap_hw_krings_delete(struct netmap_adapter *na)
961 u_int lim = netmap_real_rings(na, NR_RX), i;
963 for (i = nma_get_nrings(na, NR_RX); i < lim; i++) {
964 struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue;
965 nm_prdis("destroy sw mbq with len %d", mbq_len(q));
969 netmap_krings_delete(na);
973 netmap_mem_drop(struct netmap_adapter *na)
975 int last = netmap_mem_deref(na->nm_mem, na);
976 /* if the native allocator had been overrided on regif,
977 * restore it now and drop the temporary one
979 if (last && na->nm_mem_prev) {
980 netmap_mem_put(na->nm_mem);
981 na->nm_mem = na->nm_mem_prev;
982 na->nm_mem_prev = NULL;
987 * Undo everything that was done in netmap_do_regif(). In particular,
988 * call nm_register(ifp,0) to stop netmap mode on the interface and
989 * revert to normal operation.
991 /* call with NMG_LOCK held */
992 static void netmap_unset_ringid(struct netmap_priv_d *);
993 static void netmap_krings_put(struct netmap_priv_d *);
995 netmap_do_unregif(struct netmap_priv_d *priv)
997 struct netmap_adapter *na = priv->np_na;
1001 /* unset nr_pending_mode and possibly release exclusive mode */
1002 netmap_krings_put(priv);
1005 /* XXX check whether we have to do something with monitor
1006 * when rings change nr_mode. */
1007 if (na->active_fds <= 0) {
1008 /* walk through all the rings and tell any monitor
1009 * that the port is going to exit netmap mode
1011 netmap_monitor_stop(na);
1015 if (na->active_fds <= 0 || nm_kring_pending(priv)) {
1016 na->nm_register(na, 0);
1019 /* delete rings and buffers that are no longer needed */
1020 netmap_mem_rings_delete(na);
1022 if (na->active_fds <= 0) { /* last instance */
1024 * (TO CHECK) We enter here
1025 * when the last reference to this file descriptor goes
1026 * away. This means we cannot have any pending poll()
1027 * or interrupt routine operating on the structure.
1028 * XXX The file may be closed in a thread while
1029 * another thread is using it.
1030 * Linux keeps the file opened until the last reference
1031 * by any outstanding ioctl/poll or mmap is gone.
1032 * FreeBSD does not track mmap()s (but we do) and
1033 * wakes up any sleeping poll(). Need to check what
1034 * happens if the close() occurs while a concurrent
1035 * syscall is running.
1037 if (netmap_debug & NM_DEBUG_ON)
1038 nm_prinf("deleting last instance for %s", na->name);
1040 if (nm_netmap_on(na)) {
1041 nm_prerr("BUG: netmap on while going to delete the krings");
1044 na->nm_krings_delete(na);
1046 /* restore the default number of host tx and rx rings */
1047 if (na->na_flags & NAF_HOST_RINGS) {
1048 na->num_host_tx_rings = 1;
1049 na->num_host_rx_rings = 1;
1051 na->num_host_tx_rings = 0;
1052 na->num_host_rx_rings = 0;
1056 /* possibily decrement counter of tx_si/rx_si users */
1057 netmap_unset_ringid(priv);
1058 /* delete the nifp */
1059 netmap_mem_if_delete(na, priv->np_nifp);
1060 /* drop the allocator */
1061 netmap_mem_drop(na);
1062 /* mark the priv as unregistered */
1064 priv->np_nifp = NULL;
1067 struct netmap_priv_d*
1068 netmap_priv_new(void)
1070 struct netmap_priv_d *priv;
1072 priv = nm_os_malloc(sizeof(struct netmap_priv_d));
1081 * Destructor of the netmap_priv_d, called when the fd is closed
1082 * Action: undo all the things done by NIOCREGIF,
1083 * On FreeBSD we need to track whether there are active mmap()s,
1084 * and we use np_active_mmaps for that. On linux, the field is always 0.
1085 * Return: 1 if we can free priv, 0 otherwise.
1088 /* call with NMG_LOCK held */
1090 netmap_priv_delete(struct netmap_priv_d *priv)
1092 struct netmap_adapter *na = priv->np_na;
1094 /* number of active references to this fd */
1095 if (--priv->np_refs > 0) {
1100 netmap_do_unregif(priv);
1102 netmap_unget_na(na, priv->np_ifp);
1103 bzero(priv, sizeof(*priv)); /* for safety */
1108 /* call with NMG_LOCK *not* held */
1110 netmap_dtor(void *data)
1112 struct netmap_priv_d *priv = data;
1115 netmap_priv_delete(priv);
1121 * Handlers for synchronization of the rings from/to the host stack.
1122 * These are associated to a network interface and are just another
1123 * ring pair managed by userspace.
1125 * Netmap also supports transparent forwarding (NS_FORWARD and NR_FORWARD
1128 * - Before releasing buffers on hw RX rings, the application can mark
1129 * them with the NS_FORWARD flag. During the next RXSYNC or poll(), they
1130 * will be forwarded to the host stack, similarly to what happened if
1131 * the application moved them to the host TX ring.
1133 * - Before releasing buffers on the host RX ring, the application can
1134 * mark them with the NS_FORWARD flag. During the next RXSYNC or poll(),
1135 * they will be forwarded to the hw TX rings, saving the application
1136 * from doing the same task in user-space.
1138 * Transparent fowarding can be enabled per-ring, by setting the NR_FORWARD
1139 * flag, or globally with the netmap_fwd sysctl.
1141 * The transfer NIC --> host is relatively easy, just encapsulate
1142 * into mbufs and we are done. The host --> NIC side is slightly
1143 * harder because there might not be room in the tx ring so it
1144 * might take a while before releasing the buffer.
1149 * Pass a whole queue of mbufs to the host stack as coming from 'dst'
1150 * We do not need to lock because the queue is private.
1151 * After this call the queue is empty.
1154 netmap_send_up(struct ifnet *dst, struct mbq *q)
1157 struct mbuf *head = NULL, *prev = NULL;
1159 struct epoch_tracker et;
1161 NET_EPOCH_ENTER(et);
1162 #endif /* __FreeBSD__ */
1163 /* Send packets up, outside the lock; head/prev machinery
1164 * is only useful for Windows. */
1165 while ((m = mbq_dequeue(q)) != NULL) {
1166 if (netmap_debug & NM_DEBUG_HOST)
1167 nm_prinf("sending up pkt %p size %d", m, MBUF_LEN(m));
1168 prev = nm_os_send_up(dst, m, prev);
1173 nm_os_send_up(dst, NULL, head);
1176 #endif /* __FreeBSD__ */
1182 * Scan the buffers from hwcur to ring->head, and put a copy of those
1183 * marked NS_FORWARD (or all of them if forced) into a queue of mbufs.
1184 * Drop remaining packets in the unlikely event
1185 * of an mbuf shortage.
1188 netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force)
1190 u_int const lim = kring->nkr_num_slots - 1;
1191 u_int const head = kring->rhead;
1193 struct netmap_adapter *na = kring->na;
1195 for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) {
1197 struct netmap_slot *slot = &kring->ring->slot[n];
1199 if ((slot->flags & NS_FORWARD) == 0 && !force)
1201 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) {
1202 nm_prlim(5, "bad pkt at %d len %d", n, slot->len);
1205 slot->flags &= ~NS_FORWARD; // XXX needed ?
1206 /* XXX TODO: adapt to the case of a multisegment packet */
1207 m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL);
1216 _nm_may_forward(struct netmap_kring *kring)
1218 return ((netmap_fwd || kring->ring->flags & NR_FORWARD) &&
1219 kring->na->na_flags & NAF_HOST_RINGS &&
1220 kring->tx == NR_RX);
1224 nm_may_forward_up(struct netmap_kring *kring)
1226 return _nm_may_forward(kring) &&
1227 kring->ring_id != kring->na->num_rx_rings;
1231 nm_may_forward_down(struct netmap_kring *kring, int sync_flags)
1233 return _nm_may_forward(kring) &&
1234 (sync_flags & NAF_CAN_FORWARD_DOWN) &&
1235 kring->ring_id == kring->na->num_rx_rings;
1239 * Send to the NIC rings packets marked NS_FORWARD between
1240 * kring->nr_hwcur and kring->rhead.
1241 * Called under kring->rx_queue.lock on the sw rx ring.
1243 * It can only be called if the user opened all the TX hw rings,
1244 * see NAF_CAN_FORWARD_DOWN flag.
1245 * We can touch the TX netmap rings (slots, head and cur) since
1246 * we are in poll/ioctl system call context, and the application
1247 * is not supposed to touch the ring (using a different thread)
1248 * during the execution of the system call.
1251 netmap_sw_to_nic(struct netmap_adapter *na)
1253 struct netmap_kring *kring = na->rx_rings[na->num_rx_rings];
1254 struct netmap_slot *rxslot = kring->ring->slot;
1255 u_int i, rxcur = kring->nr_hwcur;
1256 u_int const head = kring->rhead;
1257 u_int const src_lim = kring->nkr_num_slots - 1;
1260 /* scan rings to find space, then fill as much as possible */
1261 for (i = 0; i < na->num_tx_rings; i++) {
1262 struct netmap_kring *kdst = na->tx_rings[i];
1263 struct netmap_ring *rdst = kdst->ring;
1264 u_int const dst_lim = kdst->nkr_num_slots - 1;
1266 /* XXX do we trust ring or kring->rcur,rtail ? */
1267 for (; rxcur != head && !nm_ring_empty(rdst);
1268 rxcur = nm_next(rxcur, src_lim) ) {
1269 struct netmap_slot *src, *dst, tmp;
1270 u_int dst_head = rdst->head;
1272 src = &rxslot[rxcur];
1273 if ((src->flags & NS_FORWARD) == 0 && !netmap_fwd)
1278 dst = &rdst->slot[dst_head];
1282 src->buf_idx = dst->buf_idx;
1283 src->flags = NS_BUF_CHANGED;
1285 dst->buf_idx = tmp.buf_idx;
1287 dst->flags = NS_BUF_CHANGED;
1289 rdst->head = rdst->cur = nm_next(dst_head, dst_lim);
1291 /* if (sent) XXX txsync ? it would be just an optimization */
1298 * netmap_txsync_to_host() passes packets up. We are called from a
1299 * system call in user process context, and the only contention
1300 * can be among multiple user threads erroneously calling
1301 * this routine concurrently.
1304 netmap_txsync_to_host(struct netmap_kring *kring, int flags)
1306 struct netmap_adapter *na = kring->na;
1307 u_int const lim = kring->nkr_num_slots - 1;
1308 u_int const head = kring->rhead;
1311 /* Take packets from hwcur to head and pass them up.
1312 * Force hwcur = head since netmap_grab_packets() stops at head
1315 netmap_grab_packets(kring, &q, 1 /* force */);
1316 nm_prdis("have %d pkts in queue", mbq_len(&q));
1317 kring->nr_hwcur = head;
1318 kring->nr_hwtail = head + lim;
1319 if (kring->nr_hwtail > lim)
1320 kring->nr_hwtail -= lim + 1;
1322 netmap_send_up(na->ifp, &q);
1328 * rxsync backend for packets coming from the host stack.
1329 * They have been put in kring->rx_queue by netmap_transmit().
1330 * We protect access to the kring using kring->rx_queue.lock
1332 * also moves to the nic hw rings any packet the user has marked
1333 * for transparent-mode forwarding, then sets the NR_FORWARD
1334 * flag in the kring to let the caller push them out
1337 netmap_rxsync_from_host(struct netmap_kring *kring, int flags)
1339 struct netmap_adapter *na = kring->na;
1340 struct netmap_ring *ring = kring->ring;
1342 u_int const lim = kring->nkr_num_slots - 1;
1343 u_int const head = kring->rhead;
1345 struct mbq *q = &kring->rx_queue, fq;
1347 mbq_init(&fq); /* fq holds packets to be freed */
1351 /* First part: import newly received packets */
1353 if (n) { /* grab packets from the queue */
1357 nm_i = kring->nr_hwtail;
1358 stop_i = nm_prev(kring->nr_hwcur, lim);
1359 while ( nm_i != stop_i && (m = mbq_dequeue(q)) != NULL ) {
1360 int len = MBUF_LEN(m);
1361 struct netmap_slot *slot = &ring->slot[nm_i];
1363 m_copydata(m, 0, len, NMB(na, slot));
1364 nm_prdis("nm %d len %d", nm_i, len);
1365 if (netmap_debug & NM_DEBUG_HOST)
1366 nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
1370 nm_i = nm_next(nm_i, lim);
1371 mbq_enqueue(&fq, m);
1373 kring->nr_hwtail = nm_i;
1377 * Second part: skip past packets that userspace has released.
1379 nm_i = kring->nr_hwcur;
1380 if (nm_i != head) { /* something was released */
1381 if (nm_may_forward_down(kring, flags)) {
1382 ret = netmap_sw_to_nic(na);
1384 kring->nr_kflags |= NR_FORWARD;
1388 kring->nr_hwcur = head;
1400 /* Get a netmap adapter for the port.
1402 * If it is possible to satisfy the request, return 0
1403 * with *na containing the netmap adapter found.
1404 * Otherwise return an error code, with *na containing NULL.
1406 * When the port is attached to a bridge, we always return
1408 * Otherwise, if the port is already bound to a file descriptor,
1409 * then we unconditionally return the existing adapter into *na.
1410 * In all the other cases, we return (into *na) either native,
1411 * generic or NULL, according to the following table:
1414 * active_fds dev.netmap.admode YES NO
1415 * -------------------------------------------------------
1416 * >0 * NA(ifp) NA(ifp)
1418 * 0 NETMAP_ADMODE_BEST NATIVE GENERIC
1419 * 0 NETMAP_ADMODE_NATIVE NATIVE NULL
1420 * 0 NETMAP_ADMODE_GENERIC GENERIC GENERIC
1423 static void netmap_hw_dtor(struct netmap_adapter *); /* needed by NM_IS_NATIVE() */
1425 netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na)
1427 /* generic support */
1428 int i = netmap_admode; /* Take a snapshot. */
1429 struct netmap_adapter *prev_na;
1432 *na = NULL; /* default */
1434 /* reset in case of invalid value */
1435 if (i < NETMAP_ADMODE_BEST || i >= NETMAP_ADMODE_LAST)
1436 i = netmap_admode = NETMAP_ADMODE_BEST;
1438 if (NM_NA_VALID(ifp)) {
1440 /* If an adapter already exists, return it if
1441 * there are active file descriptors or if
1442 * netmap is not forced to use generic
1445 if (NETMAP_OWNED_BY_ANY(prev_na)
1446 || i != NETMAP_ADMODE_GENERIC
1447 || prev_na->na_flags & NAF_FORCE_NATIVE
1449 /* ugly, but we cannot allow an adapter switch
1450 * if some pipe is referring to this one
1452 || prev_na->na_next_pipe > 0
1460 /* If there isn't native support and netmap is not allowed
1461 * to use generic adapters, we cannot satisfy the request.
1463 if (!NM_IS_NATIVE(ifp) && i == NETMAP_ADMODE_NATIVE)
1466 /* Otherwise, create a generic adapter and return it,
1467 * saving the previously used netmap adapter, if any.
1469 * Note that here 'prev_na', if not NULL, MUST be a
1470 * native adapter, and CANNOT be a generic one. This is
1471 * true because generic adapters are created on demand, and
1472 * destroyed when not used anymore. Therefore, if the adapter
1473 * currently attached to an interface 'ifp' is generic, it
1475 * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))).
1476 * Consequently, if NA(ifp) is generic, we will enter one of
1477 * the branches above. This ensures that we never override
1478 * a generic adapter with another generic adapter.
1480 error = generic_netmap_attach(ifp);
1487 if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) &&
1488 (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) {
1489 (*na)->nm_mem_prev = (*na)->nm_mem;
1490 (*na)->nm_mem = netmap_mem_get(nmd);
1497 * MUST BE CALLED UNDER NMG_LOCK()
1499 * Get a refcounted reference to a netmap adapter attached
1500 * to the interface specified by req.
1501 * This is always called in the execution of an ioctl().
1503 * Return ENXIO if the interface specified by the request does
1504 * not exist, ENOTSUP if netmap is not supported by the interface,
1505 * EBUSY if the interface is already attached to a bridge,
1506 * EINVAL if parameters are invalid, ENOMEM if needed resources
1507 * could not be allocated.
1508 * If successful, hold a reference to the netmap adapter.
1510 * If the interface specified by req is a system one, also keep
1511 * a reference to it and return a valid *ifp.
1514 netmap_get_na(struct nmreq_header *hdr,
1515 struct netmap_adapter **na, struct ifnet **ifp,
1516 struct netmap_mem_d *nmd, int create)
1518 struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1520 struct netmap_adapter *ret = NULL;
1523 *na = NULL; /* default return value */
1526 if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1530 if (req->nr_mode == NR_REG_PIPE_MASTER ||
1531 req->nr_mode == NR_REG_PIPE_SLAVE) {
1532 /* Do not accept deprecated pipe modes. */
1533 nm_prerr("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax");
1539 /* if the request contain a memid, try to find the
1540 * corresponding memory region
1542 if (nmd == NULL && req->nr_mem_id) {
1543 nmd = netmap_mem_find(req->nr_mem_id);
1546 /* keep the rereference */
1550 /* We cascade through all possible types of netmap adapter.
1551 * All netmap_get_*_na() functions return an error and an na,
1552 * with the following combinations:
1555 * 0 NULL type doesn't match
1556 * !0 NULL type matches, but na creation/lookup failed
1557 * 0 !NULL type matches and na created/found
1558 * !0 !NULL impossible
1560 error = netmap_get_null_na(hdr, na, nmd, create);
1561 if (error || *na != NULL)
1564 /* try to see if this is a monitor port */
1565 error = netmap_get_monitor_na(hdr, na, nmd, create);
1566 if (error || *na != NULL)
1569 /* try to see if this is a pipe port */
1570 error = netmap_get_pipe_na(hdr, na, nmd, create);
1571 if (error || *na != NULL)
1574 /* try to see if this is a bridge port */
1575 error = netmap_get_vale_na(hdr, na, nmd, create);
1579 if (*na != NULL) /* valid match in netmap_get_bdg_na() */
1583 * This must be a hardware na, lookup the name in the system.
1584 * Note that by hardware we actually mean "it shows up in ifconfig".
1585 * This may still be a tap, a veth/epair, or even a
1586 * persistent VALE port.
1588 *ifp = ifunit_ref(hdr->nr_name);
1594 error = netmap_get_hw_na(*ifp, nmd, &ret);
1599 netmap_adapter_get(ret);
1602 * if the adapter supports the host rings and it is not alread open,
1603 * try to set the number of host rings as requested by the user
1605 if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) {
1606 if (req->nr_host_tx_rings)
1607 (*na)->num_host_tx_rings = req->nr_host_tx_rings;
1608 if (req->nr_host_rx_rings)
1609 (*na)->num_host_rx_rings = req->nr_host_rx_rings;
1611 nm_prdis("%s: host tx %d rx %u", (*na)->name, (*na)->num_host_tx_rings,
1612 (*na)->num_host_rx_rings);
1617 netmap_adapter_put(ret);
1624 netmap_mem_put(nmd);
1629 /* undo netmap_get_na() */
1631 netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp)
1636 netmap_adapter_put(na);
1640 #define NM_FAIL_ON(t) do { \
1641 if (unlikely(t)) { \
1642 nm_prlim(5, "%s: fail '" #t "' " \
1644 "rh %d rc %d rt %d " \
1647 head, cur, ring->tail, \
1648 kring->rhead, kring->rcur, kring->rtail, \
1649 kring->nr_hwcur, kring->nr_hwtail); \
1650 return kring->nkr_num_slots; \
1655 * validate parameters on entry for *_txsync()
1656 * Returns ring->cur if ok, or something >= kring->nkr_num_slots
1659 * rhead, rcur and rtail=hwtail are stored from previous round.
1660 * hwcur is the next packet to send to the ring.
1663 * hwcur <= *rhead <= head <= cur <= tail = *rtail <= hwtail
1665 * hwcur, rhead, rtail and hwtail are reliable
1668 nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1670 u_int head = ring->head; /* read only once */
1671 u_int cur = ring->cur; /* read only once */
1672 u_int n = kring->nkr_num_slots;
1674 nm_prdis(5, "%s kcur %d ktail %d head %d cur %d tail %d",
1676 kring->nr_hwcur, kring->nr_hwtail,
1677 ring->head, ring->cur, ring->tail);
1678 #if 1 /* kernel sanity checks; but we can trust the kring. */
1679 NM_FAIL_ON(kring->nr_hwcur >= n || kring->rhead >= n ||
1680 kring->rtail >= n || kring->nr_hwtail >= n);
1681 #endif /* kernel sanity checks */
1683 * user sanity checks. We only use head,
1684 * A, B, ... are possible positions for head:
1686 * 0 A rhead B rtail C n-1
1687 * 0 D rtail E rhead F n-1
1689 * B, F, D are valid. A, C, E are wrong
1691 if (kring->rtail >= kring->rhead) {
1692 /* want rhead <= head <= rtail */
1693 NM_FAIL_ON(head < kring->rhead || head > kring->rtail);
1694 /* and also head <= cur <= rtail */
1695 NM_FAIL_ON(cur < head || cur > kring->rtail);
1696 } else { /* here rtail < rhead */
1697 /* we need head outside rtail .. rhead */
1698 NM_FAIL_ON(head > kring->rtail && head < kring->rhead);
1700 /* two cases now: head <= rtail or head >= rhead */
1701 if (head <= kring->rtail) {
1702 /* want head <= cur <= rtail */
1703 NM_FAIL_ON(cur < head || cur > kring->rtail);
1704 } else { /* head >= rhead */
1705 /* cur must be outside rtail..head */
1706 NM_FAIL_ON(cur > kring->rtail && cur < head);
1709 if (ring->tail != kring->rtail) {
1710 nm_prlim(5, "%s tail overwritten was %d need %d", kring->name,
1711 ring->tail, kring->rtail);
1712 ring->tail = kring->rtail;
1714 kring->rhead = head;
1721 * validate parameters on entry for *_rxsync()
1722 * Returns ring->head if ok, kring->nkr_num_slots on error.
1724 * For a valid configuration,
1725 * hwcur <= head <= cur <= tail <= hwtail
1727 * We only consider head and cur.
1728 * hwcur and hwtail are reliable.
1732 nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1734 uint32_t const n = kring->nkr_num_slots;
1737 nm_prdis(5,"%s kc %d kt %d h %d c %d t %d",
1739 kring->nr_hwcur, kring->nr_hwtail,
1740 ring->head, ring->cur, ring->tail);
1742 * Before storing the new values, we should check they do not
1743 * move backwards. However:
1744 * - head is not an issue because the previous value is hwcur;
1745 * - cur could in principle go back, however it does not matter
1746 * because we are processing a brand new rxsync()
1748 cur = kring->rcur = ring->cur; /* read only once */
1749 head = kring->rhead = ring->head; /* read only once */
1750 #if 1 /* kernel sanity checks */
1751 NM_FAIL_ON(kring->nr_hwcur >= n || kring->nr_hwtail >= n);
1752 #endif /* kernel sanity checks */
1753 /* user sanity checks */
1754 if (kring->nr_hwtail >= kring->nr_hwcur) {
1755 /* want hwcur <= rhead <= hwtail */
1756 NM_FAIL_ON(head < kring->nr_hwcur || head > kring->nr_hwtail);
1757 /* and also rhead <= rcur <= hwtail */
1758 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1760 /* we need rhead outside hwtail..hwcur */
1761 NM_FAIL_ON(head < kring->nr_hwcur && head > kring->nr_hwtail);
1762 /* two cases now: head <= hwtail or head >= hwcur */
1763 if (head <= kring->nr_hwtail) {
1764 /* want head <= cur <= hwtail */
1765 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1767 /* cur must be outside hwtail..head */
1768 NM_FAIL_ON(cur < head && cur > kring->nr_hwtail);
1771 if (ring->tail != kring->rtail) {
1772 nm_prlim(5, "%s tail overwritten was %d need %d",
1774 ring->tail, kring->rtail);
1775 ring->tail = kring->rtail;
1782 * Error routine called when txsync/rxsync detects an error.
1783 * Can't do much more than resetting head = cur = hwcur, tail = hwtail
1784 * Return 1 on reinit.
1786 * This routine is only called by the upper half of the kernel.
1787 * It only reads hwcur (which is changed only by the upper half, too)
1788 * and hwtail (which may be changed by the lower half, but only on
1789 * a tx ring and only to increase it, so any error will be recovered
1790 * on the next call). For the above, we don't strictly need to call
1794 netmap_ring_reinit(struct netmap_kring *kring)
1796 struct netmap_ring *ring = kring->ring;
1797 u_int i, lim = kring->nkr_num_slots - 1;
1800 // XXX KASSERT nm_kr_tryget
1801 nm_prlim(10, "called for %s", kring->name);
1802 // XXX probably wrong to trust userspace
1803 kring->rhead = ring->head;
1804 kring->rcur = ring->cur;
1805 kring->rtail = ring->tail;
1807 if (ring->cur > lim)
1809 if (ring->head > lim)
1811 if (ring->tail > lim)
1813 for (i = 0; i <= lim; i++) {
1814 u_int idx = ring->slot[i].buf_idx;
1815 u_int len = ring->slot[i].len;
1816 if (idx < 2 || idx >= kring->na->na_lut.objtotal) {
1817 nm_prlim(5, "bad index at slot %d idx %d len %d ", i, idx, len);
1818 ring->slot[i].buf_idx = 0;
1819 ring->slot[i].len = 0;
1820 } else if (len > NETMAP_BUF_SIZE(kring->na)) {
1821 ring->slot[i].len = 0;
1822 nm_prlim(5, "bad len at slot %d idx %d len %d", i, idx, len);
1826 nm_prlim(10, "total %d errors", errors);
1827 nm_prlim(10, "%s reinit, cur %d -> %d tail %d -> %d",
1829 ring->cur, kring->nr_hwcur,
1830 ring->tail, kring->nr_hwtail);
1831 ring->head = kring->rhead = kring->nr_hwcur;
1832 ring->cur = kring->rcur = kring->nr_hwcur;
1833 ring->tail = kring->rtail = kring->nr_hwtail;
1835 return (errors ? 1 : 0);
1838 /* interpret the ringid and flags fields of an nmreq, by translating them
1839 * into a pair of intervals of ring indices:
1841 * [priv->np_txqfirst, priv->np_txqlast) and
1842 * [priv->np_rxqfirst, priv->np_rxqlast)
1846 netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode,
1847 uint16_t nr_ringid, uint64_t nr_flags)
1849 struct netmap_adapter *na = priv->np_na;
1850 int excluded_direction[] = { NR_TX_RINGS_ONLY, NR_RX_RINGS_ONLY };
1855 if (nr_flags & excluded_direction[t]) {
1856 priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1860 case NR_REG_ALL_NIC:
1862 priv->np_qfirst[t] = 0;
1863 priv->np_qlast[t] = nma_get_nrings(na, t);
1864 nm_prdis("ALL/PIPE: %s %d %d", nm_txrx2str(t),
1865 priv->np_qfirst[t], priv->np_qlast[t]);
1869 if (!(na->na_flags & NAF_HOST_RINGS)) {
1870 nm_prerr("host rings not supported");
1873 priv->np_qfirst[t] = (nr_mode == NR_REG_SW ?
1874 nma_get_nrings(na, t) : 0);
1875 priv->np_qlast[t] = netmap_all_rings(na, t);
1876 nm_prdis("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW",
1878 priv->np_qfirst[t], priv->np_qlast[t]);
1880 case NR_REG_ONE_NIC:
1881 if (nr_ringid >= na->num_tx_rings &&
1882 nr_ringid >= na->num_rx_rings) {
1883 nm_prerr("invalid ring id %d", nr_ringid);
1886 /* if not enough rings, use the first one */
1888 if (j >= nma_get_nrings(na, t))
1890 priv->np_qfirst[t] = j;
1891 priv->np_qlast[t] = j + 1;
1892 nm_prdis("ONE_NIC: %s %d %d", nm_txrx2str(t),
1893 priv->np_qfirst[t], priv->np_qlast[t]);
1896 if (!(na->na_flags & NAF_HOST_RINGS)) {
1897 nm_prerr("host rings not supported");
1900 if (nr_ringid >= na->num_host_tx_rings &&
1901 nr_ringid >= na->num_host_rx_rings) {
1902 nm_prerr("invalid ring id %d", nr_ringid);
1905 /* if not enough rings, use the first one */
1907 if (j >= nma_get_host_nrings(na, t))
1909 priv->np_qfirst[t] = nma_get_nrings(na, t) + j;
1910 priv->np_qlast[t] = nma_get_nrings(na, t) + j + 1;
1911 nm_prdis("ONE_SW: %s %d %d", nm_txrx2str(t),
1912 priv->np_qfirst[t], priv->np_qlast[t]);
1915 nm_prerr("invalid regif type %d", nr_mode);
1919 priv->np_flags = nr_flags;
1921 /* Allow transparent forwarding mode in the host --> nic
1922 * direction only if all the TX hw rings have been opened. */
1923 if (priv->np_qfirst[NR_TX] == 0 &&
1924 priv->np_qlast[NR_TX] >= na->num_tx_rings) {
1925 priv->np_sync_flags |= NAF_CAN_FORWARD_DOWN;
1928 if (netmap_verbose) {
1929 nm_prinf("%s: tx [%d,%d) rx [%d,%d) id %d",
1931 priv->np_qfirst[NR_TX],
1932 priv->np_qlast[NR_TX],
1933 priv->np_qfirst[NR_RX],
1934 priv->np_qlast[NR_RX],
1942 * Set the ring ID. For devices with a single queue, a request
1943 * for all rings is the same as a single ring.
1946 netmap_set_ringid(struct netmap_priv_d *priv, uint32_t nr_mode,
1947 uint16_t nr_ringid, uint64_t nr_flags)
1949 struct netmap_adapter *na = priv->np_na;
1953 error = netmap_interp_ringid(priv, nr_mode, nr_ringid, nr_flags);
1958 priv->np_txpoll = (nr_flags & NR_NO_TX_POLL) ? 0 : 1;
1960 /* optimization: count the users registered for more than
1961 * one ring, which are the ones sleeping on the global queue.
1962 * The default netmap_notify() callback will then
1963 * avoid signaling the global queue if nobody is using it
1966 if (nm_si_user(priv, t))
1973 netmap_unset_ringid(struct netmap_priv_d *priv)
1975 struct netmap_adapter *na = priv->np_na;
1979 if (nm_si_user(priv, t))
1981 priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1984 priv->np_txpoll = 0;
1985 priv->np_kloop_state = 0;
1989 /* Set the nr_pending_mode for the requested rings.
1990 * If requested, also try to get exclusive access to the rings, provided
1991 * the rings we want to bind are not exclusively owned by a previous bind.
1994 netmap_krings_get(struct netmap_priv_d *priv)
1996 struct netmap_adapter *na = priv->np_na;
1998 struct netmap_kring *kring;
1999 int excl = (priv->np_flags & NR_EXCLUSIVE);
2002 if (netmap_debug & NM_DEBUG_ON)
2003 nm_prinf("%s: grabbing tx [%d, %d) rx [%d, %d)",
2005 priv->np_qfirst[NR_TX],
2006 priv->np_qlast[NR_TX],
2007 priv->np_qfirst[NR_RX],
2008 priv->np_qlast[NR_RX]);
2010 /* first round: check that all the requested rings
2011 * are neither alread exclusively owned, nor we
2012 * want exclusive ownership when they are already in use
2015 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
2016 kring = NMR(na, t)[i];
2017 if ((kring->nr_kflags & NKR_EXCLUSIVE) ||
2018 (kring->users && excl))
2020 nm_prdis("ring %s busy", kring->name);
2026 /* second round: increment usage count (possibly marking them
2027 * as exclusive) and set the nr_pending_mode
2030 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
2031 kring = NMR(na, t)[i];
2034 kring->nr_kflags |= NKR_EXCLUSIVE;
2035 kring->nr_pending_mode = NKR_NETMAP_ON;
2043 /* Undo netmap_krings_get(). This is done by clearing the exclusive mode
2044 * if was asked on regif, and unset the nr_pending_mode if we are the
2045 * last users of the involved rings. */
2047 netmap_krings_put(struct netmap_priv_d *priv)
2049 struct netmap_adapter *na = priv->np_na;
2051 struct netmap_kring *kring;
2052 int excl = (priv->np_flags & NR_EXCLUSIVE);
2055 nm_prdis("%s: releasing tx [%d, %d) rx [%d, %d)",
2057 priv->np_qfirst[NR_TX],
2058 priv->np_qlast[NR_TX],
2059 priv->np_qfirst[NR_RX],
2060 priv->np_qlast[MR_RX]);
2063 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
2064 kring = NMR(na, t)[i];
2066 kring->nr_kflags &= ~NKR_EXCLUSIVE;
2068 if (kring->users == 0)
2069 kring->nr_pending_mode = NKR_NETMAP_OFF;
2075 nm_priv_rx_enabled(struct netmap_priv_d *priv)
2077 return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]);
2080 /* Validate the CSB entries for both directions (atok and ktoa).
2081 * To be called under NMG_LOCK(). */
2083 netmap_csb_validate(struct netmap_priv_d *priv, struct nmreq_opt_csb *csbo)
2085 struct nm_csb_atok *csb_atok_base =
2086 (struct nm_csb_atok *)(uintptr_t)csbo->csb_atok;
2087 struct nm_csb_ktoa *csb_ktoa_base =
2088 (struct nm_csb_ktoa *)(uintptr_t)csbo->csb_ktoa;
2090 int num_rings[NR_TXRX], tot_rings;
2091 size_t entry_size[2];
2095 if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) {
2096 nm_prerr("Cannot update CSB while kloop is running");
2102 num_rings[t] = priv->np_qlast[t] - priv->np_qfirst[t];
2103 tot_rings += num_rings[t];
2108 if (!(priv->np_flags & NR_EXCLUSIVE)) {
2109 nm_prerr("CSB mode requires NR_EXCLUSIVE");
2113 entry_size[0] = sizeof(*csb_atok_base);
2114 entry_size[1] = sizeof(*csb_ktoa_base);
2115 csb_start[0] = (void *)csb_atok_base;
2116 csb_start[1] = (void *)csb_ktoa_base;
2118 for (i = 0; i < 2; i++) {
2119 /* On Linux we could use access_ok() to simplify
2120 * the validation. However, the advantage of
2121 * this approach is that it works also on
2123 size_t csb_size = tot_rings * entry_size[i];
2127 if ((uintptr_t)csb_start[i] & (entry_size[i]-1)) {
2128 nm_prerr("Unaligned CSB address");
2132 tmp = nm_os_malloc(csb_size);
2136 /* Application --> kernel direction. */
2137 err = copyin(csb_start[i], tmp, csb_size);
2139 /* Kernel --> application direction. */
2140 memset(tmp, 0, csb_size);
2141 err = copyout(tmp, csb_start[i], csb_size);
2145 nm_prerr("Invalid CSB address");
2150 priv->np_csb_atok_base = csb_atok_base;
2151 priv->np_csb_ktoa_base = csb_ktoa_base;
2153 /* Initialize the CSB. */
2155 for (i = 0; i < num_rings[t]; i++) {
2156 struct netmap_kring *kring =
2157 NMR(priv->np_na, t)[i + priv->np_qfirst[t]];
2158 struct nm_csb_atok *csb_atok = csb_atok_base + i;
2159 struct nm_csb_ktoa *csb_ktoa = csb_ktoa_base + i;
2162 csb_atok += num_rings[NR_TX];
2163 csb_ktoa += num_rings[NR_TX];
2166 CSB_WRITE(csb_atok, head, kring->rhead);
2167 CSB_WRITE(csb_atok, cur, kring->rcur);
2168 CSB_WRITE(csb_atok, appl_need_kick, 1);
2169 CSB_WRITE(csb_atok, sync_flags, 1);
2170 CSB_WRITE(csb_ktoa, hwcur, kring->nr_hwcur);
2171 CSB_WRITE(csb_ktoa, hwtail, kring->nr_hwtail);
2172 CSB_WRITE(csb_ktoa, kern_need_kick, 1);
2174 nm_prinf("csb_init for kring %s: head %u, cur %u, "
2175 "hwcur %u, hwtail %u", kring->name,
2176 kring->rhead, kring->rcur, kring->nr_hwcur,
2184 /* Ensure that the netmap adapter can support the given MTU.
2185 * @return EINVAL if the na cannot be set to mtu, 0 otherwise.
2188 netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu) {
2189 unsigned nbs = NETMAP_BUF_SIZE(na);
2191 if (mtu <= na->rx_buf_maxsize) {
2192 /* The MTU fits a single NIC slot. We only
2193 * Need to check that netmap buffers are
2194 * large enough to hold an MTU. NS_MOREFRAG
2195 * cannot be used in this case. */
2197 nm_prerr("error: netmap buf size (%u) "
2198 "< device MTU (%u)", nbs, mtu);
2202 /* More NIC slots may be needed to receive
2203 * or transmit a single packet. Check that
2204 * the adapter supports NS_MOREFRAG and that
2205 * netmap buffers are large enough to hold
2206 * the maximum per-slot size. */
2207 if (!(na->na_flags & NAF_MOREFRAG)) {
2208 nm_prerr("error: large MTU (%d) needed "
2209 "but %s does not support "
2213 } else if (nbs < na->rx_buf_maxsize) {
2214 nm_prerr("error: using NS_MOREFRAG on "
2215 "%s requires netmap buf size "
2216 ">= %u", na->ifp->if_xname,
2217 na->rx_buf_maxsize);
2220 nm_prinf("info: netmap application on "
2221 "%s needs to support "
2223 "(MTU=%u,netmap_buf_size=%u)",
2224 na->ifp->if_xname, mtu, nbs);
2232 * possibly move the interface to netmap-mode.
2233 * If success it returns a pointer to netmap_if, otherwise NULL.
2234 * This must be called with NMG_LOCK held.
2236 * The following na callbacks are called in the process:
2238 * na->nm_config() [by netmap_update_config]
2239 * (get current number and size of rings)
2241 * We have a generic one for linux (netmap_linux_config).
2242 * The bwrap has to override this, since it has to forward
2243 * the request to the wrapped adapter (netmap_bwrap_config).
2246 * na->nm_krings_create()
2247 * (create and init the krings array)
2249 * One of the following:
2251 * * netmap_hw_krings_create, (hw ports)
2252 * creates the standard layout for the krings
2253 * and adds the mbq (used for the host rings).
2255 * * netmap_vp_krings_create (VALE ports)
2256 * add leases and scratchpads
2258 * * netmap_pipe_krings_create (pipes)
2259 * create the krings and rings of both ends and
2262 * * netmap_monitor_krings_create (monitors)
2263 * avoid allocating the mbq
2265 * * netmap_bwrap_krings_create (bwraps)
2266 * create both the brap krings array,
2267 * the krings array of the wrapped adapter, and
2268 * (if needed) the fake array for the host adapter
2270 * na->nm_register(, 1)
2271 * (put the adapter in netmap mode)
2273 * This may be one of the following:
2275 * * netmap_hw_reg (hw ports)
2276 * checks that the ifp is still there, then calls
2277 * the hardware specific callback;
2279 * * netmap_vp_reg (VALE ports)
2280 * If the port is connected to a bridge,
2281 * set the NAF_NETMAP_ON flag under the
2282 * bridge write lock.
2284 * * netmap_pipe_reg (pipes)
2285 * inform the other pipe end that it is no
2286 * longer responsible for the lifetime of this
2289 * * netmap_monitor_reg (monitors)
2290 * intercept the sync callbacks of the monitored
2293 * * netmap_bwrap_reg (bwraps)
2294 * cross-link the bwrap and hwna rings,
2295 * forward the request to the hwna, override
2296 * the hwna notify callback (to get the frames
2297 * coming from outside go through the bridge).
2302 netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
2303 uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags)
2305 struct netmap_if *nifp = NULL;
2309 priv->np_na = na; /* store the reference */
2310 error = netmap_mem_finalize(na->nm_mem, na);
2314 if (na->active_fds == 0) {
2316 /* cache the allocator info in the na */
2317 error = netmap_mem_get_lut(na->nm_mem, &na->na_lut);
2320 nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal,
2321 na->na_lut.objsize);
2323 /* ring configuration may have changed, fetch from the card */
2324 netmap_update_config(na);
2327 /* compute the range of tx and rx rings to monitor */
2328 error = netmap_set_ringid(priv, nr_mode, nr_ringid, nr_flags);
2332 if (na->active_fds == 0) {
2334 * If this is the first registration of the adapter,
2335 * perform sanity checks and create the in-kernel view
2336 * of the netmap rings (the netmap krings).
2338 if (na->ifp && nm_priv_rx_enabled(priv)) {
2339 /* This netmap adapter is attached to an ifnet. */
2340 unsigned mtu = nm_os_ifnet_mtu(na->ifp);
2342 nm_prdis("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d",
2343 na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na));
2345 if (na->rx_buf_maxsize == 0) {
2346 nm_prerr("%s: error: rx_buf_maxsize == 0", na->name);
2351 error = netmap_buf_size_validate(na, mtu);
2357 * Depending on the adapter, this may also create
2358 * the netmap rings themselves
2360 error = na->nm_krings_create(na);
2366 /* now the krings must exist and we can check whether some
2367 * previous bind has exclusive ownership on them, and set
2370 error = netmap_krings_get(priv);
2372 goto err_del_krings;
2374 /* create all needed missing netmap rings */
2375 error = netmap_mem_rings_create(na);
2379 /* in all cases, create a new netmap if */
2380 nifp = netmap_mem_if_new(na, priv);
2386 if (nm_kring_pending(priv)) {
2387 /* Some kring is switching mode, tell the adapter to
2389 error = na->nm_register(na, 1);
2394 /* Commit the reference. */
2398 * advertise that the interface is ready by setting np_nifp.
2399 * The barrier is needed because readers (poll, *SYNC and mmap)
2400 * check for priv->np_nifp != NULL without locking
2402 mb(); /* make sure previous writes are visible to all CPUs */
2403 priv->np_nifp = nifp;
2408 netmap_mem_if_delete(na, nifp);
2410 netmap_krings_put(priv);
2411 netmap_mem_rings_delete(na);
2413 if (na->active_fds == 0)
2414 na->nm_krings_delete(na);
2416 if (na->active_fds == 0)
2417 memset(&na->na_lut, 0, sizeof(na->na_lut));
2419 netmap_mem_drop(na);
2427 * update kring and ring at the end of rxsync/txsync.
2430 nm_sync_finalize(struct netmap_kring *kring)
2433 * Update ring tail to what the kernel knows
2434 * After txsync: head/rhead/hwcur might be behind cur/rcur
2437 kring->ring->tail = kring->rtail = kring->nr_hwtail;
2439 nm_prdis(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
2440 kring->name, kring->nr_hwcur, kring->nr_hwtail,
2441 kring->rhead, kring->rcur, kring->rtail);
2444 /* set ring timestamp */
2446 ring_timestamp_set(struct netmap_ring *ring)
2448 if (netmap_no_timestamp == 0 || ring->flags & NR_TIMESTAMP) {
2449 microtime(&ring->ts);
2453 static int nmreq_copyin(struct nmreq_header *, int);
2454 static int nmreq_copyout(struct nmreq_header *, int);
2455 static int nmreq_checkoptions(struct nmreq_header *);
2458 * ioctl(2) support for the "netmap" device.
2460 * Following a list of accepted commands:
2461 * - NIOCCTRL device control API
2462 * - NIOCTXSYNC sync TX rings
2463 * - NIOCRXSYNC sync RX rings
2464 * - SIOCGIFADDR just for convenience
2465 * - NIOCGINFO deprecated (legacy API)
2466 * - NIOCREGIF deprecated (legacy API)
2468 * Return 0 on success, errno otherwise.
2471 netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
2472 struct thread *td, int nr_body_is_user)
2474 struct mbq q; /* packets from RX hw queues to host stack */
2475 struct netmap_adapter *na = NULL;
2476 struct netmap_mem_d *nmd = NULL;
2477 struct ifnet *ifp = NULL;
2479 u_int i, qfirst, qlast;
2480 struct netmap_kring **krings;
2486 struct nmreq_header *hdr = (struct nmreq_header *)data;
2488 if (hdr->nr_version < NETMAP_MIN_API ||
2489 hdr->nr_version > NETMAP_MAX_API) {
2490 nm_prerr("API mismatch: got %d need %d",
2491 hdr->nr_version, NETMAP_API);
2495 /* Make a kernel-space copy of the user-space nr_body.
2496 * For convenince, the nr_body pointer and the pointers
2497 * in the options list will be replaced with their
2498 * kernel-space counterparts. The original pointers are
2499 * saved internally and later restored by nmreq_copyout
2501 error = nmreq_copyin(hdr, nr_body_is_user);
2506 /* Sanitize hdr->nr_name. */
2507 hdr->nr_name[sizeof(hdr->nr_name) - 1] = '\0';
2509 switch (hdr->nr_reqtype) {
2510 case NETMAP_REQ_REGISTER: {
2511 struct nmreq_register *req =
2512 (struct nmreq_register *)(uintptr_t)hdr->nr_body;
2513 struct netmap_if *nifp;
2515 /* Protect access to priv from concurrent requests. */
2518 struct nmreq_option *opt;
2521 if (priv->np_nifp != NULL) { /* thread already registered */
2527 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_EXTMEM);
2529 struct nmreq_opt_extmem *e =
2530 (struct nmreq_opt_extmem *)opt;
2532 nmd = netmap_mem_ext_create(e->nro_usrptr,
2533 &e->nro_info, &error);
2534 opt->nro_status = error;
2538 #endif /* WITH_EXTMEM */
2540 if (nmd == NULL && req->nr_mem_id) {
2541 /* find the allocator and get a reference */
2542 nmd = netmap_mem_find(req->nr_mem_id);
2544 if (netmap_verbose) {
2545 nm_prerr("%s: failed to find mem_id %u",
2546 hdr->nr_name, req->nr_mem_id);
2552 /* find the interface and a reference */
2553 error = netmap_get_na(hdr, &na, &ifp, nmd,
2554 1 /* create */); /* keep reference */
2557 if (NETMAP_OWNED_BY_KERN(na)) {
2562 if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) {
2563 nm_prerr("virt_hdr_len=%d, but application does "
2564 "not accept it", na->virt_hdr_len);
2569 error = netmap_do_regif(priv, na, req->nr_mode,
2570 req->nr_ringid, req->nr_flags);
2571 if (error) { /* reg. failed, release priv and ref */
2575 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2577 struct nmreq_opt_csb *csbo =
2578 (struct nmreq_opt_csb *)opt;
2579 error = netmap_csb_validate(priv, csbo);
2580 opt->nro_status = error;
2582 netmap_do_unregif(priv);
2587 nifp = priv->np_nifp;
2589 /* return the offset of the netmap_if object */
2590 req->nr_rx_rings = na->num_rx_rings;
2591 req->nr_tx_rings = na->num_tx_rings;
2592 req->nr_rx_slots = na->num_rx_desc;
2593 req->nr_tx_slots = na->num_tx_desc;
2594 req->nr_host_tx_rings = na->num_host_tx_rings;
2595 req->nr_host_rx_rings = na->num_host_rx_rings;
2596 error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags,
2599 netmap_do_unregif(priv);
2602 if (memflags & NETMAP_MEM_PRIVATE) {
2603 *(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM;
2606 priv->np_si[t] = nm_si_user(priv, t) ?
2607 &na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si;
2610 if (req->nr_extra_bufs) {
2612 nm_prinf("requested %d extra buffers",
2613 req->nr_extra_bufs);
2614 req->nr_extra_bufs = netmap_extra_alloc(na,
2615 &nifp->ni_bufs_head, req->nr_extra_bufs);
2617 nm_prinf("got %d extra buffers", req->nr_extra_bufs);
2619 req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp);
2621 error = nmreq_checkoptions(hdr);
2623 netmap_do_unregif(priv);
2627 /* store ifp reference so that priv destructor may release it */
2631 netmap_unget_na(na, ifp);
2633 /* release the reference from netmap_mem_find() or
2634 * netmap_mem_ext_create()
2637 netmap_mem_put(nmd);
2642 case NETMAP_REQ_PORT_INFO_GET: {
2643 struct nmreq_port_info_get *req =
2644 (struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
2650 if (hdr->nr_name[0] != '\0') {
2651 /* Build a nmreq_register out of the nmreq_port_info_get,
2652 * so that we can call netmap_get_na(). */
2653 struct nmreq_register regreq;
2654 bzero(®req, sizeof(regreq));
2655 regreq.nr_mode = NR_REG_ALL_NIC;
2656 regreq.nr_tx_slots = req->nr_tx_slots;
2657 regreq.nr_rx_slots = req->nr_rx_slots;
2658 regreq.nr_tx_rings = req->nr_tx_rings;
2659 regreq.nr_rx_rings = req->nr_rx_rings;
2660 regreq.nr_host_tx_rings = req->nr_host_tx_rings;
2661 regreq.nr_host_rx_rings = req->nr_host_rx_rings;
2662 regreq.nr_mem_id = req->nr_mem_id;
2664 /* get a refcount */
2665 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2666 hdr->nr_body = (uintptr_t)®req;
2667 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2668 hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */
2669 hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2675 nmd = na->nm_mem; /* get memory allocator */
2677 nmd = netmap_mem_find(req->nr_mem_id ? req->nr_mem_id : 1);
2680 nm_prerr("%s: failed to find mem_id %u",
2682 req->nr_mem_id ? req->nr_mem_id : 1);
2688 error = netmap_mem_get_info(nmd, &req->nr_memsize, &memflags,
2692 if (na == NULL) /* only memory info */
2694 netmap_update_config(na);
2695 req->nr_rx_rings = na->num_rx_rings;
2696 req->nr_tx_rings = na->num_tx_rings;
2697 req->nr_rx_slots = na->num_rx_desc;
2698 req->nr_tx_slots = na->num_tx_desc;
2699 req->nr_host_tx_rings = na->num_host_tx_rings;
2700 req->nr_host_rx_rings = na->num_host_rx_rings;
2702 netmap_unget_na(na, ifp);
2707 case NETMAP_REQ_VALE_ATTACH: {
2708 error = netmap_vale_attach(hdr, NULL /* userspace request */);
2712 case NETMAP_REQ_VALE_DETACH: {
2713 error = netmap_vale_detach(hdr, NULL /* userspace request */);
2717 case NETMAP_REQ_VALE_LIST: {
2718 error = netmap_vale_list(hdr);
2722 case NETMAP_REQ_PORT_HDR_SET: {
2723 struct nmreq_port_hdr *req =
2724 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2725 /* Build a nmreq_register out of the nmreq_port_hdr,
2726 * so that we can call netmap_get_bdg_na(). */
2727 struct nmreq_register regreq;
2728 bzero(®req, sizeof(regreq));
2729 regreq.nr_mode = NR_REG_ALL_NIC;
2731 /* For now we only support virtio-net headers, and only for
2732 * VALE ports, but this may change in future. Valid lengths
2733 * for the virtio-net header are 0 (no header), 10 and 12. */
2734 if (req->nr_hdr_len != 0 &&
2735 req->nr_hdr_len != sizeof(struct nm_vnet_hdr) &&
2736 req->nr_hdr_len != 12) {
2738 nm_prerr("invalid hdr_len %u", req->nr_hdr_len);
2743 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2744 hdr->nr_body = (uintptr_t)®req;
2745 error = netmap_get_vale_na(hdr, &na, NULL, 0);
2746 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET;
2747 hdr->nr_body = (uintptr_t)req;
2749 struct netmap_vp_adapter *vpna =
2750 (struct netmap_vp_adapter *)na;
2751 na->virt_hdr_len = req->nr_hdr_len;
2752 if (na->virt_hdr_len) {
2753 vpna->mfs = NETMAP_BUF_SIZE(na);
2756 nm_prinf("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na);
2757 netmap_adapter_put(na);
2765 case NETMAP_REQ_PORT_HDR_GET: {
2766 /* Get vnet-header length for this netmap port */
2767 struct nmreq_port_hdr *req =
2768 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2769 /* Build a nmreq_register out of the nmreq_port_hdr,
2770 * so that we can call netmap_get_bdg_na(). */
2771 struct nmreq_register regreq;
2774 bzero(®req, sizeof(regreq));
2775 regreq.nr_mode = NR_REG_ALL_NIC;
2777 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2778 hdr->nr_body = (uintptr_t)®req;
2779 error = netmap_get_na(hdr, &na, &ifp, NULL, 0);
2780 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET;
2781 hdr->nr_body = (uintptr_t)req;
2783 req->nr_hdr_len = na->virt_hdr_len;
2785 netmap_unget_na(na, ifp);
2790 case NETMAP_REQ_VALE_NEWIF: {
2791 error = nm_vi_create(hdr);
2795 case NETMAP_REQ_VALE_DELIF: {
2796 error = nm_vi_destroy(hdr->nr_name);
2800 case NETMAP_REQ_VALE_POLLING_ENABLE:
2801 case NETMAP_REQ_VALE_POLLING_DISABLE: {
2802 error = nm_bdg_polling(hdr);
2805 #endif /* WITH_VALE */
2806 case NETMAP_REQ_POOLS_INFO_GET: {
2807 /* Get information from the memory allocator used for
2809 struct nmreq_pools_info *req =
2810 (struct nmreq_pools_info *)(uintptr_t)hdr->nr_body;
2813 /* Build a nmreq_register out of the nmreq_pools_info,
2814 * so that we can call netmap_get_na(). */
2815 struct nmreq_register regreq;
2816 bzero(®req, sizeof(regreq));
2817 regreq.nr_mem_id = req->nr_mem_id;
2818 regreq.nr_mode = NR_REG_ALL_NIC;
2820 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2821 hdr->nr_body = (uintptr_t)®req;
2822 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2823 hdr->nr_reqtype = NETMAP_REQ_POOLS_INFO_GET; /* reset type */
2824 hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2830 nmd = na->nm_mem; /* grab the memory allocator */
2836 /* Finalize the memory allocator, get the pools
2837 * information and release the allocator. */
2838 error = netmap_mem_finalize(nmd, na);
2842 error = netmap_mem_pools_info_get(req, nmd);
2843 netmap_mem_drop(na);
2845 netmap_unget_na(na, ifp);
2850 case NETMAP_REQ_CSB_ENABLE: {
2851 struct nmreq_option *opt;
2853 opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2857 struct nmreq_opt_csb *csbo =
2858 (struct nmreq_opt_csb *)opt;
2860 error = netmap_csb_validate(priv, csbo);
2862 opt->nro_status = error;
2867 case NETMAP_REQ_SYNC_KLOOP_START: {
2868 error = netmap_sync_kloop(priv, hdr);
2872 case NETMAP_REQ_SYNC_KLOOP_STOP: {
2873 error = netmap_sync_kloop_stop(priv);
2882 /* Write back request body to userspace and reset the
2883 * user-space pointer. */
2884 error = nmreq_copyout(hdr, error);
2890 if (unlikely(priv->np_nifp == NULL)) {
2894 mb(); /* make sure following reads are not from cache */
2896 if (unlikely(priv->np_csb_atok_base)) {
2897 nm_prerr("Invalid sync in CSB mode");
2902 na = priv->np_na; /* we have a reference */
2905 t = (cmd == NIOCTXSYNC ? NR_TX : NR_RX);
2906 krings = NMR(na, t);
2907 qfirst = priv->np_qfirst[t];
2908 qlast = priv->np_qlast[t];
2909 sync_flags = priv->np_sync_flags;
2911 for (i = qfirst; i < qlast; i++) {
2912 struct netmap_kring *kring = krings[i];
2913 struct netmap_ring *ring = kring->ring;
2915 if (unlikely(nm_kr_tryget(kring, 1, &error))) {
2916 error = (error ? EIO : 0);
2920 if (cmd == NIOCTXSYNC) {
2921 if (netmap_debug & NM_DEBUG_TXSYNC)
2922 nm_prinf("pre txsync ring %d cur %d hwcur %d",
2925 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
2926 netmap_ring_reinit(kring);
2927 } else if (kring->nm_sync(kring, sync_flags | NAF_FORCE_RECLAIM) == 0) {
2928 nm_sync_finalize(kring);
2930 if (netmap_debug & NM_DEBUG_TXSYNC)
2931 nm_prinf("post txsync ring %d cur %d hwcur %d",
2935 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
2936 netmap_ring_reinit(kring);
2938 if (nm_may_forward_up(kring)) {
2939 /* transparent forwarding, see netmap_poll() */
2940 netmap_grab_packets(kring, &q, netmap_fwd);
2942 if (kring->nm_sync(kring, sync_flags | NAF_FORCE_READ) == 0) {
2943 nm_sync_finalize(kring);
2945 ring_timestamp_set(ring);
2951 netmap_send_up(na->ifp, &q);
2958 return netmap_ioctl_legacy(priv, cmd, data, td);
2967 nmreq_size_by_type(uint16_t nr_reqtype)
2969 switch (nr_reqtype) {
2970 case NETMAP_REQ_REGISTER:
2971 return sizeof(struct nmreq_register);
2972 case NETMAP_REQ_PORT_INFO_GET:
2973 return sizeof(struct nmreq_port_info_get);
2974 case NETMAP_REQ_VALE_ATTACH:
2975 return sizeof(struct nmreq_vale_attach);
2976 case NETMAP_REQ_VALE_DETACH:
2977 return sizeof(struct nmreq_vale_detach);
2978 case NETMAP_REQ_VALE_LIST:
2979 return sizeof(struct nmreq_vale_list);
2980 case NETMAP_REQ_PORT_HDR_SET:
2981 case NETMAP_REQ_PORT_HDR_GET:
2982 return sizeof(struct nmreq_port_hdr);
2983 case NETMAP_REQ_VALE_NEWIF:
2984 return sizeof(struct nmreq_vale_newif);
2985 case NETMAP_REQ_VALE_DELIF:
2986 case NETMAP_REQ_SYNC_KLOOP_STOP:
2987 case NETMAP_REQ_CSB_ENABLE:
2989 case NETMAP_REQ_VALE_POLLING_ENABLE:
2990 case NETMAP_REQ_VALE_POLLING_DISABLE:
2991 return sizeof(struct nmreq_vale_polling);
2992 case NETMAP_REQ_POOLS_INFO_GET:
2993 return sizeof(struct nmreq_pools_info);
2994 case NETMAP_REQ_SYNC_KLOOP_START:
2995 return sizeof(struct nmreq_sync_kloop_start);
3001 nmreq_opt_size_by_type(uint32_t nro_reqtype, uint64_t nro_size)
3003 size_t rv = sizeof(struct nmreq_option);
3004 #ifdef NETMAP_REQ_OPT_DEBUG
3005 if (nro_reqtype & NETMAP_REQ_OPT_DEBUG)
3006 return (nro_reqtype & ~NETMAP_REQ_OPT_DEBUG);
3007 #endif /* NETMAP_REQ_OPT_DEBUG */
3008 switch (nro_reqtype) {
3010 case NETMAP_REQ_OPT_EXTMEM:
3011 rv = sizeof(struct nmreq_opt_extmem);
3013 #endif /* WITH_EXTMEM */
3014 case NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS:
3018 case NETMAP_REQ_OPT_CSB:
3019 rv = sizeof(struct nmreq_opt_csb);
3021 case NETMAP_REQ_OPT_SYNC_KLOOP_MODE:
3022 rv = sizeof(struct nmreq_opt_sync_kloop_mode);
3025 /* subtract the common header */
3026 return rv - sizeof(struct nmreq_option);
3030 * nmreq_copyin: create an in-kernel version of the request.
3032 * We build the following data structure:
3034 * hdr -> +-------+ buf
3035 * | | +---------------+
3036 * +-------+ |usr body ptr |
3037 * |options|-. +---------------+
3038 * +-------+ | |usr options ptr|
3039 * |body |--------->+---------------+
3041 * | | copy of body |
3043 * | +---------------+
3045 * | +---------------+
3047 * | | +---------------+ |
3049 * | | | +---------------+ \ option table
3050 * | | | | ... | / indexed by option
3051 * | | | +---------------+ | type
3053 * | | | +---------------+/
3054 * | | | |usr next ptr 1 |
3055 * `-|----->+---------------+
3056 * | | | copy of opt 1 |
3058 * | | .-| nro_next |
3059 * | | | +---------------+
3060 * | | | |usr next ptr 2 |
3061 * | `-`>+---------------+
3062 * | | copy of opt 2 |
3065 * | | +---------------+
3069 * `----->+---------------+
3070 * | |usr next ptr n |
3071 * `>+---------------+
3077 * The options and body fields of the hdr structure are overwritten
3078 * with in-kernel valid pointers inside the buf. The original user
3079 * pointers are saved in the buf and restored on copyout.
3080 * The list of options is copied and the pointers adjusted. The
3081 * original pointers are saved before the option they belonged.
3083 * The option table has an entry for every availabe option. Entries
3084 * for options that have not been passed contain NULL.
3089 nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
3091 size_t rqsz, optsz, bufsz;
3093 char *ker = NULL, *p;
3094 struct nmreq_option **next, *src, **opt_tab;
3095 struct nmreq_option buf;
3098 if (hdr->nr_reserved) {
3100 nm_prerr("nr_reserved must be zero");
3104 if (!nr_body_is_user)
3107 hdr->nr_reserved = nr_body_is_user;
3109 /* compute the total size of the buffer */
3110 rqsz = nmreq_size_by_type(hdr->nr_reqtype);
3111 if (rqsz > NETMAP_REQ_MAXSIZE) {
3115 if ((rqsz && hdr->nr_body == (uintptr_t)NULL) ||
3116 (!rqsz && hdr->nr_body != (uintptr_t)NULL)) {
3117 /* Request body expected, but not found; or
3118 * request body found but unexpected. */
3120 nm_prerr("nr_body expected but not found, or vice versa");
3125 bufsz = 2 * sizeof(void *) + rqsz +
3126 NETMAP_REQ_OPT_MAX * sizeof(opt_tab);
3127 /* compute the size of the buf below the option table.
3128 * It must contain a copy of every received option structure.
3129 * For every option we also need to store a copy of the user
3133 for (src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; src;
3134 src = (struct nmreq_option *)(uintptr_t)buf.nro_next)
3136 error = copyin(src, &buf, sizeof(*src));
3139 optsz += sizeof(*src);
3140 optsz += nmreq_opt_size_by_type(buf.nro_reqtype, buf.nro_size);
3141 if (rqsz + optsz > NETMAP_REQ_MAXSIZE) {
3145 bufsz += sizeof(void *);
3149 ker = nm_os_malloc(bufsz);
3154 p = ker; /* write pointer into the buffer */
3156 /* make a copy of the user pointers */
3157 ptrs = (uint64_t*)p;
3158 *ptrs++ = hdr->nr_body;
3159 *ptrs++ = hdr->nr_options;
3163 error = copyin((void *)(uintptr_t)hdr->nr_body, p, rqsz);
3166 /* overwrite the user pointer with the in-kernel one */
3167 hdr->nr_body = (uintptr_t)p;
3169 /* start of the options table */
3170 opt_tab = (struct nmreq_option **)p;
3171 p += sizeof(opt_tab) * NETMAP_REQ_OPT_MAX;
3173 /* copy the options */
3174 next = (struct nmreq_option **)&hdr->nr_options;
3177 struct nmreq_option *opt;
3179 /* copy the option header */
3180 ptrs = (uint64_t *)p;
3181 opt = (struct nmreq_option *)(ptrs + 1);
3182 error = copyin(src, opt, sizeof(*src));
3185 /* make a copy of the user next pointer */
3186 *ptrs = opt->nro_next;
3187 /* overwrite the user pointer with the in-kernel one */
3190 /* initialize the option as not supported.
3191 * Recognized options will update this field.
3193 opt->nro_status = EOPNOTSUPP;
3195 /* check for invalid types */
3196 if (opt->nro_reqtype < 1) {
3198 nm_prinf("invalid option type: %u", opt->nro_reqtype);
3199 opt->nro_status = EINVAL;
3204 if (opt->nro_reqtype >= NETMAP_REQ_OPT_MAX) {
3205 /* opt->nro_status is already EOPNOTSUPP */
3210 /* if the type is valid, index the option in the table
3211 * unless it is a duplicate.
3213 if (opt_tab[opt->nro_reqtype] != NULL) {
3215 nm_prinf("duplicate option: %u", opt->nro_reqtype);
3216 opt->nro_status = EINVAL;
3217 opt_tab[opt->nro_reqtype]->nro_status = EINVAL;
3221 opt_tab[opt->nro_reqtype] = opt;
3223 p = (char *)(opt + 1);
3225 /* copy the option body */
3226 optsz = nmreq_opt_size_by_type(opt->nro_reqtype,
3229 /* the option body follows the option header */
3230 error = copyin(src + 1, p, optsz);
3237 /* move to next option */
3238 next = (struct nmreq_option **)&opt->nro_next;
3242 nmreq_copyout(hdr, error);
3246 ptrs = (uint64_t *)ker;
3247 hdr->nr_body = *ptrs++;
3248 hdr->nr_options = *ptrs++;
3249 hdr->nr_reserved = 0;
3256 nmreq_copyout(struct nmreq_header *hdr, int rerror)
3258 struct nmreq_option *src, *dst;
3259 void *ker = (void *)(uintptr_t)hdr->nr_body, *bufstart;
3264 if (!hdr->nr_reserved)
3267 /* restore the user pointers in the header */
3268 ptrs = (uint64_t *)ker - 2;
3270 hdr->nr_body = *ptrs++;
3271 src = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3272 hdr->nr_options = *ptrs;
3276 bodysz = nmreq_size_by_type(hdr->nr_reqtype);
3277 error = copyout(ker, (void *)(uintptr_t)hdr->nr_body, bodysz);
3284 /* copy the options */
3285 dst = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3290 /* restore the user pointer */
3291 next = src->nro_next;
3292 ptrs = (uint64_t *)src - 1;
3293 src->nro_next = *ptrs;
3295 /* always copy the option header */
3296 error = copyout(src, dst, sizeof(*src));
3302 /* copy the option body only if there was no error */
3303 if (!rerror && !src->nro_status) {
3304 optsz = nmreq_opt_size_by_type(src->nro_reqtype,
3307 error = copyout(src + 1, dst + 1, optsz);
3314 src = (struct nmreq_option *)(uintptr_t)next;
3315 dst = (struct nmreq_option *)(uintptr_t)*ptrs;
3320 hdr->nr_reserved = 0;
3321 nm_os_free(bufstart);
3325 struct nmreq_option *
3326 nmreq_getoption(struct nmreq_header *hdr, uint16_t reqtype)
3328 struct nmreq_option **opt_tab;
3330 if (!hdr->nr_options)
3333 opt_tab = (struct nmreq_option **)((uintptr_t)hdr->nr_options) -
3334 (NETMAP_REQ_OPT_MAX + 1);
3335 return opt_tab[reqtype];
3339 nmreq_checkoptions(struct nmreq_header *hdr)
3341 struct nmreq_option *opt;
3342 /* return error if there is still any option
3343 * marked as not supported
3346 for (opt = (struct nmreq_option *)(uintptr_t)hdr->nr_options; opt;
3347 opt = (struct nmreq_option *)(uintptr_t)opt->nro_next)
3348 if (opt->nro_status == EOPNOTSUPP)
3355 * select(2) and poll(2) handlers for the "netmap" device.
3357 * Can be called for one or more queues.
3358 * Return true the event mask corresponding to ready events.
3359 * If there are no ready events (and 'sr' is not NULL), do a
3360 * selrecord on either individual selinfo or on the global one.
3361 * Device-dependent parts (locking and sync of tx/rx rings)
3362 * are done through callbacks.
3364 * On linux, arguments are really pwait, the poll table, and 'td' is struct file *
3365 * The first one is remapped to pwait as selrecord() uses the name as an
3369 netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr)
3371 struct netmap_adapter *na;
3372 struct netmap_kring *kring;
3373 struct netmap_ring *ring;
3374 u_int i, want[NR_TXRX], revents = 0;
3375 NM_SELINFO_T *si[NR_TXRX];
3376 #define want_tx want[NR_TX]
3377 #define want_rx want[NR_RX]
3378 struct mbq q; /* packets from RX hw queues to host stack */
3381 * In order to avoid nested locks, we need to "double check"
3382 * txsync and rxsync if we decide to do a selrecord().
3383 * retry_tx (and retry_rx, later) prevent looping forever.
3385 int retry_tx = 1, retry_rx = 1;
3387 /* Transparent mode: send_down is 1 if we have found some
3388 * packets to forward (host RX ring --> NIC) during the rx
3389 * scan and we have not sent them down to the NIC yet.
3390 * Transparent mode requires to bind all rings to a single
3394 int sync_flags = priv->np_sync_flags;
3398 if (unlikely(priv->np_nifp == NULL)) {
3401 mb(); /* make sure following reads are not from cache */
3405 if (unlikely(!nm_netmap_on(na)))
3408 if (unlikely(priv->np_csb_atok_base)) {
3409 nm_prerr("Invalid poll in CSB mode");
3413 if (netmap_debug & NM_DEBUG_ON)
3414 nm_prinf("device %s events 0x%x", na->name, events);
3415 want_tx = events & (POLLOUT | POLLWRNORM);
3416 want_rx = events & (POLLIN | POLLRDNORM);
3419 * If the card has more than one queue AND the file descriptor is
3420 * bound to all of them, we sleep on the "global" selinfo, otherwise
3421 * we sleep on individual selinfo (FreeBSD only allows two selinfo's
3422 * per file descriptor).
3423 * The interrupt routine in the driver wake one or the other
3424 * (or both) depending on which clients are active.
3426 * rxsync() is only called if we run out of buffers on a POLLIN.
3427 * txsync() is called if we run out of buffers on POLLOUT, or
3428 * there are pending packets to send. The latter can be disabled
3429 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
3431 si[NR_RX] = priv->np_si[NR_RX];
3432 si[NR_TX] = priv->np_si[NR_TX];
3436 * We start with a lock free round which is cheap if we have
3437 * slots available. If this fails, then lock and call the sync
3438 * routines. We can't do this on Linux, as the contract says
3439 * that we must call nm_os_selrecord() unconditionally.
3442 const enum txrx t = NR_TX;
3443 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3444 kring = NMR(na, t)[i];
3445 if (kring->ring->cur != kring->ring->tail) {
3446 /* Some unseen TX space is available, so what
3447 * we don't need to run txsync. */
3455 const enum txrx t = NR_RX;
3456 int rxsync_needed = 0;
3458 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3459 kring = NMR(na, t)[i];
3460 if (kring->ring->cur == kring->ring->tail
3461 || kring->rhead != kring->ring->head) {
3462 /* There are no unseen packets on this ring,
3463 * or there are some buffers to be returned
3464 * to the netmap port. We therefore go ahead
3465 * and run rxsync. */
3470 if (!rxsync_needed) {
3478 /* The selrecord must be unconditional on linux. */
3479 nm_os_selrecord(sr, si[NR_RX]);
3480 nm_os_selrecord(sr, si[NR_TX]);
3484 * If we want to push packets out (priv->np_txpoll) or
3485 * want_tx is still set, we must issue txsync calls
3486 * (on all rings, to avoid that the tx rings stall).
3487 * Fortunately, normal tx mode has np_txpoll set.
3489 if (priv->np_txpoll || want_tx) {
3491 * The first round checks if anyone is ready, if not
3492 * do a selrecord and another round to handle races.
3493 * want_tx goes to 0 if any space is found, and is
3494 * used to skip rings with no pending transmissions.
3497 for (i = priv->np_qfirst[NR_TX]; i < priv->np_qlast[NR_TX]; i++) {
3500 kring = na->tx_rings[i];
3504 * Don't try to txsync this TX ring if we already found some
3505 * space in some of the TX rings (want_tx == 0) and there are no
3506 * TX slots in this ring that need to be flushed to the NIC
3509 if (!send_down && !want_tx && ring->head == kring->nr_hwcur)
3512 if (nm_kr_tryget(kring, 1, &revents))
3515 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3516 netmap_ring_reinit(kring);
3519 if (kring->nm_sync(kring, sync_flags))
3522 nm_sync_finalize(kring);
3526 * If we found new slots, notify potential
3527 * listeners on the same ring.
3528 * Since we just did a txsync, look at the copies
3529 * of cur,tail in the kring.
3531 found = kring->rcur != kring->rtail;
3533 if (found) { /* notify other listeners */
3537 kring->nm_notify(kring, 0);
3541 /* if there were any packet to forward we must have handled them by now */
3543 if (want_tx && retry_tx && sr) {
3545 nm_os_selrecord(sr, si[NR_TX]);
3553 * If want_rx is still set scan receive rings.
3554 * Do it on all rings because otherwise we starve.
3557 /* two rounds here for race avoidance */
3559 for (i = priv->np_qfirst[NR_RX]; i < priv->np_qlast[NR_RX]; i++) {
3562 kring = na->rx_rings[i];
3565 if (unlikely(nm_kr_tryget(kring, 1, &revents)))
3568 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3569 netmap_ring_reinit(kring);
3572 /* now we can use kring->rcur, rtail */
3575 * transparent mode support: collect packets from
3576 * hw rxring(s) that have been released by the user
3578 if (nm_may_forward_up(kring)) {
3579 netmap_grab_packets(kring, &q, netmap_fwd);
3582 /* Clear the NR_FORWARD flag anyway, it may be set by
3583 * the nm_sync() below only on for the host RX ring (see
3584 * netmap_rxsync_from_host()). */
3585 kring->nr_kflags &= ~NR_FORWARD;
3586 if (kring->nm_sync(kring, sync_flags))
3589 nm_sync_finalize(kring);
3590 send_down |= (kring->nr_kflags & NR_FORWARD);
3591 ring_timestamp_set(ring);
3592 found = kring->rcur != kring->rtail;
3598 kring->nm_notify(kring, 0);
3604 if (retry_rx && sr) {
3605 nm_os_selrecord(sr, si[NR_RX]);
3608 if (send_down || retry_rx) {
3611 goto flush_tx; /* and retry_rx */
3618 * Transparent mode: released bufs (i.e. between kring->nr_hwcur and
3619 * ring->head) marked with NS_FORWARD on hw rx rings are passed up
3620 * to the host stack.
3624 netmap_send_up(na->ifp, &q);
3633 nma_intr_enable(struct netmap_adapter *na, int onoff)
3635 bool changed = false;
3640 for (i = 0; i < nma_get_nrings(na, t); i++) {
3641 struct netmap_kring *kring = NMR(na, t)[i];
3642 int on = !(kring->nr_kflags & NKR_NOINTR);
3644 if (!!onoff != !!on) {
3648 kring->nr_kflags &= ~NKR_NOINTR;
3650 kring->nr_kflags |= NKR_NOINTR;
3656 return 0; /* nothing to do */
3660 nm_prerr("Cannot %s interrupts for %s", onoff ? "enable" : "disable",
3665 na->nm_intr(na, onoff);
3671 /*-------------------- driver support routines -------------------*/
3673 /* default notify callback */
3675 netmap_notify(struct netmap_kring *kring, int flags)
3677 struct netmap_adapter *na = kring->notify_na;
3678 enum txrx t = kring->tx;
3680 nm_os_selwakeup(&kring->si);
3681 /* optimization: avoid a wake up on the global
3682 * queue if nobody has registered for more
3685 if (na->si_users[t] > 0)
3686 nm_os_selwakeup(&na->si[t]);
3688 return NM_IRQ_COMPLETED;
3691 /* called by all routines that create netmap_adapters.
3692 * provide some defaults and get a reference to the
3696 netmap_attach_common(struct netmap_adapter *na)
3698 if (!na->rx_buf_maxsize) {
3699 /* Set a conservative default (larger is safer). */
3700 na->rx_buf_maxsize = PAGE_SIZE;
3704 if (na->na_flags & NAF_HOST_RINGS && na->ifp) {
3705 na->if_input = na->ifp->if_input; /* for netmap_send_up */
3707 na->pdev = na; /* make sure netmap_mem_map() is called */
3708 #endif /* __FreeBSD__ */
3709 if (na->na_flags & NAF_HOST_RINGS) {
3710 if (na->num_host_rx_rings == 0)
3711 na->num_host_rx_rings = 1;
3712 if (na->num_host_tx_rings == 0)
3713 na->num_host_tx_rings = 1;
3715 if (na->nm_krings_create == NULL) {
3716 /* we assume that we have been called by a driver,
3717 * since other port types all provide their own
3720 na->nm_krings_create = netmap_hw_krings_create;
3721 na->nm_krings_delete = netmap_hw_krings_delete;
3723 if (na->nm_notify == NULL)
3724 na->nm_notify = netmap_notify;
3727 if (na->nm_mem == NULL) {
3728 /* use the global allocator */
3729 na->nm_mem = netmap_mem_get(&nm_mem);
3732 if (na->nm_bdg_attach == NULL)
3733 /* no special nm_bdg_attach callback. On VALE
3734 * attach, we need to interpose a bwrap
3736 na->nm_bdg_attach = netmap_default_bdg_attach;
3742 /* Wrapper for the register callback provided netmap-enabled
3744 * nm_iszombie(na) means that the driver module has been
3745 * unloaded, so we cannot call into it.
3746 * nm_os_ifnet_lock() must guarantee mutual exclusion with
3750 netmap_hw_reg(struct netmap_adapter *na, int onoff)
3752 struct netmap_hw_adapter *hwna =
3753 (struct netmap_hw_adapter*)na;
3758 if (nm_iszombie(na)) {
3761 } else if (na != NULL) {
3762 na->na_flags &= ~NAF_NETMAP_ON;
3767 error = hwna->nm_hw_register(na, onoff);
3770 nm_os_ifnet_unlock();
3776 netmap_hw_dtor(struct netmap_adapter *na)
3778 if (na->ifp == NULL)
3781 NM_DETACH_NA(na->ifp);
3786 * Allocate a netmap_adapter object, and initialize it from the
3787 * 'arg' passed by the driver on attach.
3788 * We allocate a block of memory of 'size' bytes, which has room
3789 * for struct netmap_adapter plus additional room private to
3791 * Return 0 on success, ENOMEM otherwise.
3794 netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg)
3796 struct netmap_hw_adapter *hwna = NULL;
3797 struct ifnet *ifp = NULL;
3799 if (size < sizeof(struct netmap_hw_adapter)) {
3800 if (netmap_debug & NM_DEBUG_ON)
3801 nm_prerr("Invalid netmap adapter size %d", (int)size);
3805 if (arg == NULL || arg->ifp == NULL) {
3806 if (netmap_debug & NM_DEBUG_ON)
3807 nm_prerr("either arg or arg->ifp is NULL");
3811 if (arg->num_tx_rings == 0 || arg->num_rx_rings == 0) {
3812 if (netmap_debug & NM_DEBUG_ON)
3813 nm_prerr("%s: invalid rings tx %d rx %d",
3814 arg->name, arg->num_tx_rings, arg->num_rx_rings);
3819 if (NM_NA_CLASH(ifp)) {
3820 /* If NA(ifp) is not null but there is no valid netmap
3821 * adapter it means that someone else is using the same
3822 * pointer (e.g. ax25_ptr on linux). This happens for
3823 * instance when also PF_RING is in use. */
3824 nm_prerr("Error: netmap adapter hook is busy");
3828 hwna = nm_os_malloc(size);
3832 hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE;
3833 strlcpy(hwna->up.name, ifp->if_xname, sizeof(hwna->up.name));
3835 hwna->nm_hw_register = hwna->up.nm_register;
3836 hwna->up.nm_register = netmap_hw_reg;
3838 if (netmap_attach_common(&hwna->up)) {
3842 netmap_adapter_get(&hwna->up);
3844 NM_ATTACH_NA(ifp, &hwna->up);
3846 nm_os_onattach(ifp);
3848 if (arg->nm_dtor == NULL) {
3849 hwna->up.nm_dtor = netmap_hw_dtor;
3852 if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n",
3853 hwna->up.num_tx_rings, hwna->up.num_tx_desc,
3854 hwna->up.num_rx_rings, hwna->up.num_rx_desc);
3858 nm_prerr("fail, arg %p ifp %p na %p", arg, ifp, hwna);
3859 return (hwna ? EINVAL : ENOMEM);
3864 netmap_attach(struct netmap_adapter *arg)
3866 return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter),
3867 1 /* override nm_reg */);
3872 NM_DBG(netmap_adapter_get)(struct netmap_adapter *na)
3878 refcount_acquire(&na->na_refcount);
3882 /* returns 1 iff the netmap_adapter is destroyed */
3884 NM_DBG(netmap_adapter_put)(struct netmap_adapter *na)
3889 if (!refcount_release(&na->na_refcount))
3895 if (na->tx_rings) { /* XXX should not happen */
3896 if (netmap_debug & NM_DEBUG_ON)
3897 nm_prerr("freeing leftover tx_rings");
3898 na->nm_krings_delete(na);
3900 netmap_pipe_dealloc(na);
3902 netmap_mem_put(na->nm_mem);
3903 bzero(na, sizeof(*na));
3909 /* nm_krings_create callback for all hardware native adapters */
3911 netmap_hw_krings_create(struct netmap_adapter *na)
3913 int ret = netmap_krings_create(na, 0);
3915 /* initialize the mbq for the sw rx ring */
3916 u_int lim = netmap_real_rings(na, NR_RX), i;
3917 for (i = na->num_rx_rings; i < lim; i++) {
3918 mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue);
3920 nm_prdis("initialized sw rx queue %d", na->num_rx_rings);
3928 * Called on module unload by the netmap-enabled drivers
3931 netmap_detach(struct ifnet *ifp)
3933 struct netmap_adapter *na = NA(ifp);
3939 netmap_set_all_rings(na, NM_KR_LOCKED);
3941 * if the netmap adapter is not native, somebody
3942 * changed it, so we can not release it here.
3943 * The NAF_ZOMBIE flag will notify the new owner that
3944 * the driver is gone.
3946 if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) {
3947 na->na_flags |= NAF_ZOMBIE;
3949 /* give active users a chance to notice that NAF_ZOMBIE has been
3950 * turned on, so that they can stop and return an error to userspace.
3951 * Note that this becomes a NOP if there are no active users and,
3952 * therefore, the put() above has deleted the na, since now NA(ifp) is
3955 netmap_enable_all_rings(ifp);
3961 * Intercept packets from the network stack and pass them
3962 * to netmap as incoming packets on the 'software' ring.
3964 * We only store packets in a bounded mbq and then copy them
3965 * in the relevant rxsync routine.
3967 * We rely on the OS to make sure that the ifp and na do not go
3968 * away (typically the caller checks for IFF_DRV_RUNNING or the like).
3969 * In nm_register() or whenever there is a reinitialization,
3970 * we make sure to make the mode change visible here.
3973 netmap_transmit(struct ifnet *ifp, struct mbuf *m)
3975 struct netmap_adapter *na = NA(ifp);
3976 struct netmap_kring *kring, *tx_kring;
3977 u_int len = MBUF_LEN(m);
3978 u_int error = ENOBUFS;
3985 if (i >= na->num_host_rx_rings) {
3986 i = i % na->num_host_rx_rings;
3988 kring = NMR(na, NR_RX)[nma_get_nrings(na, NR_RX) + i];
3990 // XXX [Linux] we do not need this lock
3991 // if we follow the down/configure/up protocol -gl
3992 // mtx_lock(&na->core_lock);
3994 if (!nm_netmap_on(na)) {
3995 nm_prerr("%s not in netmap mode anymore", na->name);
4001 if (txr >= na->num_tx_rings) {
4002 txr %= na->num_tx_rings;
4004 tx_kring = NMR(na, NR_TX)[txr];
4006 if (tx_kring->nr_mode == NKR_NETMAP_OFF) {
4007 return MBUF_TRANSMIT(na, ifp, m);
4010 q = &kring->rx_queue;
4012 // XXX reconsider long packets if we handle fragments
4013 if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */
4014 nm_prerr("%s from_host, drop packet size %d > %d", na->name,
4015 len, NETMAP_BUF_SIZE(na));
4019 if (!netmap_generic_hwcsum) {
4020 if (nm_os_mbuf_has_csum_offld(m)) {
4021 nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name);
4026 if (nm_os_mbuf_has_seg_offld(m)) {
4027 nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name);
4032 ETHER_BPF_MTAP(ifp, m);
4033 #endif /* __FreeBSD__ */
4035 /* protect against netmap_rxsync_from_host(), netmap_sw_to_nic()
4036 * and maybe other instances of netmap_transmit (the latter
4037 * not possible on Linux).
4038 * We enqueue the mbuf only if we are sure there is going to be
4039 * enough room in the host RX ring, otherwise we drop it.
4043 busy = kring->nr_hwtail - kring->nr_hwcur;
4045 busy += kring->nkr_num_slots;
4046 if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) {
4047 nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
4048 kring->nr_hwcur, kring->nr_hwtail, mbq_len(q));
4051 nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q));
4052 /* notify outside the lock */
4061 /* unconditionally wake up listeners */
4062 kring->nm_notify(kring, 0);
4063 /* this is normally netmap_notify(), but for nics
4064 * connected to a bridge it is netmap_bwrap_intr_notify(),
4065 * that possibly forwards the frames through the switch
4073 * Reset function to be called by the driver routines when reinitializing
4074 * a hardware ring. The driver is in charge of locking to protect the kring
4075 * while this operation is being performed.
4076 * This is normally done by calling netmap_disable_all_rings() before
4077 * triggering a reset.
4078 * If the kring is not in netmap mode, return NULL to inform the caller
4079 * that this is the case.
4080 * If the kring is in netmap mode, reset the kring indices to 0.
4081 * In any case, adjust kring->nr_mode.
4083 struct netmap_slot *
4084 netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n,
4087 struct netmap_kring *kring;
4089 if (!nm_native_on(na)) {
4090 nm_prdis("interface not in native netmap mode");
4091 return NULL; /* nothing to reinitialize */
4095 if (n >= na->num_tx_rings)
4098 kring = na->tx_rings[n];
4101 if (n >= na->num_rx_rings)
4103 kring = na->rx_rings[n];
4105 if (kring->nr_pending_mode == NKR_NETMAP_OFF) {
4106 kring->nr_mode = NKR_NETMAP_OFF;
4109 if (netmap_verbose) {
4110 nm_prinf("%s, was: hc %u h %u c %u ht %u", kring->name,
4111 kring->nr_hwcur, kring->ring->head,
4112 kring->ring->cur, kring->nr_hwtail);
4114 /* For the moment being, nkr_hwofs is not used. */
4115 kring->rhead = kring->rcur = kring->nr_hwcur = kring->nkr_hwofs = 0;
4116 kring->nr_hwtail = (tx == NR_TX) ? (kring->nkr_num_slots - 1) : 0;
4119 * Wakeup on the individual and global selwait
4120 * We do the wakeup here, but the ring is not yet reconfigured.
4121 * However, we are under lock so there are no races.
4123 kring->nr_mode = NKR_NETMAP_ON;
4124 kring->nm_notify(kring, 0);
4125 return kring->ring->slot;
4130 * Dispatch rx/tx interrupts to the netmap rings.
4132 * "work_done" is non-null on the RX path, NULL for the TX path.
4133 * We rely on the OS to make sure that there is only one active
4134 * instance per queue, and that there is appropriate locking.
4136 * The 'notify' routine depends on what the ring is attached to.
4137 * - for a netmap file descriptor, do a selwakeup on the individual
4138 * waitqueue, plus one on the global one if needed
4139 * (see netmap_notify)
4140 * - for a nic connected to a switch, call the proper forwarding routine
4141 * (see netmap_bwrap_intr_notify)
4144 netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
4146 struct netmap_kring *kring;
4147 enum txrx t = (work_done ? NR_RX : NR_TX);
4149 q &= NETMAP_RING_MASK;
4151 if (netmap_debug & (NM_DEBUG_RXINTR|NM_DEBUG_TXINTR)) {
4152 nm_prlim(5, "received %s queue %d", work_done ? "RX" : "TX" , q);
4155 if (q >= nma_get_nrings(na, t))
4156 return NM_IRQ_PASS; // not a physical queue
4158 kring = NMR(na, t)[q];
4160 if (kring->nr_mode == NKR_NETMAP_OFF) {
4165 kring->nr_kflags |= NKR_PENDINTR; // XXX atomic ?
4166 *work_done = 1; /* do not fire napi again */
4169 return kring->nm_notify(kring, 0);
4174 * Default functions to handle rx/tx interrupts from a physical device.
4175 * "work_done" is non-null on the RX path, NULL for the TX path.
4177 * If the card is not in netmap mode, simply return NM_IRQ_PASS,
4178 * so that the caller proceeds with regular processing.
4179 * Otherwise call netmap_common_irq().
4181 * If the card is connected to a netmap file descriptor,
4182 * do a selwakeup on the individual queue, plus one on the global one
4183 * if needed (multiqueue card _and_ there are multiqueue listeners),
4184 * and return NR_IRQ_COMPLETED.
4186 * Finally, if called on rx from an interface connected to a switch,
4187 * calls the proper forwarding routine.
4190 netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done)
4192 struct netmap_adapter *na = NA(ifp);
4195 * XXX emulated netmap mode sets NAF_SKIP_INTR so
4196 * we still use the regular driver even though the previous
4197 * check fails. It is unclear whether we should use
4198 * nm_native_on() here.
4200 if (!nm_netmap_on(na))
4203 if (na->na_flags & NAF_SKIP_INTR) {
4204 nm_prdis("use regular interrupt");
4208 return netmap_common_irq(na, q, work_done);
4211 /* set/clear native flags and if_transmit/netdev_ops */
4213 nm_set_native_flags(struct netmap_adapter *na)
4215 struct ifnet *ifp = na->ifp;
4217 /* We do the setup for intercepting packets only if we are the
4218 * first user of this adapter. */
4219 if (na->active_fds > 0) {
4223 na->na_flags |= NAF_NETMAP_ON;
4225 nm_update_hostrings_mode(na);
4229 nm_clear_native_flags(struct netmap_adapter *na)
4231 struct ifnet *ifp = na->ifp;
4233 /* We undo the setup for intercepting packets only if we are the
4234 * last user of this adapter. */
4235 if (na->active_fds > 0) {
4239 nm_update_hostrings_mode(na);
4242 na->na_flags &= ~NAF_NETMAP_ON;
4246 netmap_krings_mode_commit(struct netmap_adapter *na, int onoff)
4253 for (i = 0; i < netmap_real_rings(na, t); i++) {
4254 struct netmap_kring *kring = NMR(na, t)[i];
4256 if (onoff && nm_kring_pending_on(kring))
4257 kring->nr_mode = NKR_NETMAP_ON;
4258 else if (!onoff && nm_kring_pending_off(kring))
4259 kring->nr_mode = NKR_NETMAP_OFF;
4265 * Module loader and unloader
4267 * netmap_init() creates the /dev/netmap device and initializes
4268 * all global variables. Returns 0 on success, errno on failure
4269 * (but there is no chance)
4271 * netmap_fini() destroys everything.
4274 static struct cdev *netmap_dev; /* /dev/netmap character device. */
4275 extern struct cdevsw netmap_cdevsw;
4282 destroy_dev(netmap_dev);
4283 /* we assume that there are no longer netmap users */
4285 netmap_uninit_bridges();
4288 nm_prinf("netmap: unloaded module.");
4299 error = netmap_mem_init();
4303 * MAKEDEV_ETERNAL_KLD avoids an expensive check on syscalls
4304 * when the module is compiled in.
4305 * XXX could use make_dev_credv() to get error number
4307 netmap_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD,
4308 &netmap_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600,
4313 error = netmap_init_bridges();
4318 nm_os_vi_init_index();
4321 error = nm_os_ifnet_init();
4325 nm_prinf("netmap: loaded module");
4329 return (EINVAL); /* may be incorrect */