2 * Copyright (C) 2013-2016 Universita` di Pisa
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * This module implements the VALE switch for netmap
33 NMG_LOCK() serializes all modifications to switches and ports.
34 A switch cannot be deleted until all ports are gone.
36 For each switch, an SX lock (RWlock on linux) protects
37 deletion of ports. When configuring or deleting a new port, the
38 lock is acquired in exclusive mode (after holding NMG_LOCK).
39 When forwarding, the lock is acquired in shared mode (without NMG_LOCK).
40 The lock is held throughout the entire forwarding cycle,
41 during which the thread may incur in a page fault.
42 Hence it is important that sleepable shared locks are used.
44 On the rx ring, the per-port lock is grabbed initially to reserve
45 a number of slot in the ring, then the lock is released,
46 packets are copied from source to destination, and then
47 the lock is acquired again and the receive ring is updated.
48 (A similar thing is done on the tx ring for NIC and host stack
49 ports attached to the switch)
54 * OS-specific code that is used only within this file.
55 * Other OS-specific code that must be accessed by drivers
56 * is present in netmap_kern.h
59 #if defined(__FreeBSD__)
60 #include <sys/cdefs.h> /* prerequisite */
61 __FBSDID("$FreeBSD$");
63 #include <sys/types.h>
64 #include <sys/errno.h>
65 #include <sys/param.h> /* defines used in kernel.h */
66 #include <sys/kernel.h> /* types used in module initialization */
67 #include <sys/conf.h> /* cdevsw struct, UID, GID */
68 #include <sys/sockio.h>
69 #include <sys/socketvar.h> /* struct socket */
70 #include <sys/malloc.h>
72 #include <sys/rwlock.h>
73 #include <sys/socket.h> /* sockaddrs */
74 #include <sys/selinfo.h>
75 #include <sys/sysctl.h>
77 #include <net/if_var.h>
78 #include <net/bpf.h> /* BIOCIMMEDIATE */
79 #include <machine/bus.h> /* bus_dmamap_* */
80 #include <sys/endian.h>
81 #include <sys/refcount.h>
84 #define BDG_RWLOCK_T struct rwlock // struct rwlock
86 #define BDG_RWINIT(b) \
87 rw_init_flags(&(b)->bdg_lock, "bdg lock", RW_NOWITNESS)
88 #define BDG_WLOCK(b) rw_wlock(&(b)->bdg_lock)
89 #define BDG_WUNLOCK(b) rw_wunlock(&(b)->bdg_lock)
90 #define BDG_RLOCK(b) rw_rlock(&(b)->bdg_lock)
91 #define BDG_RTRYLOCK(b) rw_try_rlock(&(b)->bdg_lock)
92 #define BDG_RUNLOCK(b) rw_runlock(&(b)->bdg_lock)
93 #define BDG_RWDESTROY(b) rw_destroy(&(b)->bdg_lock)
100 #elif defined(__APPLE__)
102 #warning OSX support is only partial
103 #include "osx_glue.h"
105 #elif defined(_WIN32)
106 #include "win_glue.h"
110 #error Unsupported platform
112 #endif /* unsupported */
118 #include <net/netmap.h>
119 #include <dev/netmap/netmap_kern.h>
120 #include <dev/netmap/netmap_mem2.h>
125 * system parameters (most of them in netmap_kern.h)
126 * NM_BDG_NAME prefix for switch port names, default "vale"
127 * NM_BDG_MAXPORTS number of ports
128 * NM_BRIDGES max number of switches in the system.
129 * XXX should become a sysctl or tunable
131 * Switch ports are named valeX:Y where X is the switch name and Y
132 * is the port. If Y matches a physical interface name, the port is
133 * connected to a physical device.
135 * Unlike physical interfaces, switch ports use their own memory region
136 * for rings and buffers.
137 * The virtual interfaces use per-queue lock instead of core lock.
138 * In the tx loop, we aggregate traffic in batches to make all operations
139 * faster. The batch size is bridge_batch.
141 #define NM_BDG_MAXRINGS 16 /* XXX unclear how many. */
142 #define NM_BDG_MAXSLOTS 4096 /* XXX same as above */
143 #define NM_BRIDGE_RINGSIZE 1024 /* in the device */
144 #define NM_BDG_HASH 1024 /* forwarding table entries */
145 #define NM_BDG_BATCH 1024 /* entries in the forwarding buffer */
146 #define NM_MULTISEG 64 /* max size of a chain of bufs */
147 /* actual size of the tables */
148 #define NM_BDG_BATCH_MAX (NM_BDG_BATCH + NM_MULTISEG)
149 /* NM_FT_NULL terminates a list of slots in the ft */
150 #define NM_FT_NULL NM_BDG_BATCH_MAX
154 * bridge_batch is set via sysctl to the max batch size to be
155 * used in the bridge. The actual value may be larger as the
156 * last packet in the block may overflow the size.
158 static int bridge_batch = NM_BDG_BATCH; /* bridge batch size */
160 SYSCTL_DECL(_dev_netmap);
161 SYSCTL_INT(_dev_netmap, OID_AUTO, bridge_batch, CTLFLAG_RW, &bridge_batch, 0 , "");
164 static int netmap_vp_create(struct nmreq *, struct ifnet *, struct netmap_vp_adapter **);
165 static int netmap_vp_reg(struct netmap_adapter *na, int onoff);
166 static int netmap_bwrap_reg(struct netmap_adapter *, int onoff);
169 * For each output interface, nm_bdg_q is used to construct a list.
170 * bq_len is the number of output buffers (we can have coalescing
176 uint32_t bq_len; /* number of buffers */
179 /* XXX revise this */
181 uint64_t mac; /* the top 2 bytes are the epoch */
186 * nm_bridge is a descriptor for a VALE switch.
187 * Interfaces for a bridge are all in bdg_ports[].
188 * The array has fixed size, an empty entry does not terminate
189 * the search, but lookups only occur on attach/detach so we
190 * don't mind if they are slow.
192 * The bridge is non blocking on the transmit ports: excess
193 * packets are dropped if there is no room on the output port.
195 * bdg_lock protects accesses to the bdg_ports array.
196 * This is a rw lock (or equivalent).
199 /* XXX what is the proper alignment/layout ? */
200 BDG_RWLOCK_T bdg_lock; /* protects bdg_ports */
202 uint32_t bdg_active_ports; /* 0 means free */
203 char bdg_basename[IFNAMSIZ];
205 /* Indexes of active ports (up to active_ports)
206 * and all other remaining ports.
208 uint8_t bdg_port_index[NM_BDG_MAXPORTS];
210 struct netmap_vp_adapter *bdg_ports[NM_BDG_MAXPORTS];
214 * The function to decide the destination port.
215 * It returns either of an index of the destination port,
216 * NM_BDG_BROADCAST to broadcast this packet, or NM_BDG_NOPORT not to
217 * forward this packet. ring_nr is the source ring index, and the
218 * function may overwrite this value to forward this packet to a
219 * different ring index.
220 * This function must be set by netmap_bdg_ctl().
222 struct netmap_bdg_ops bdg_ops;
224 /* the forwarding table, MAC+ports.
225 * XXX should be changed to an argument to be passed to
226 * the lookup function, and allocated on attach
228 struct nm_hash_ent ht[NM_BDG_HASH];
232 #endif /* CONFIG_NET_NS */
236 netmap_bdg_name(struct netmap_vp_adapter *vp)
238 struct nm_bridge *b = vp->na_bdg;
241 return b->bdg_basename;
245 #ifndef CONFIG_NET_NS
247 * XXX in principle nm_bridges could be created dynamically
248 * Right now we have a static array and deletions are protected
249 * by an exclusive lock.
251 static struct nm_bridge *nm_bridges;
252 #endif /* !CONFIG_NET_NS */
256 * this is a slightly optimized copy routine which rounds
257 * to multiple of 64 bytes and is often faster than dealing
258 * with other odd sizes. We assume there is enough room
259 * in the source and destination buffers.
261 * XXX only for multiples of 64 bytes, non overlapped.
264 pkt_copy(void *_src, void *_dst, int l)
266 uint64_t *src = _src;
267 uint64_t *dst = _dst;
268 if (unlikely(l >= 1024)) {
272 for (; likely(l > 0); l-=64) {
286 nm_is_id_char(const char c)
288 return (c >= 'a' && c <= 'z') ||
289 (c >= 'A' && c <= 'Z') ||
290 (c >= '0' && c <= '9') ||
294 /* Validate the name of a VALE bridge port and return the
295 * position of the ":" character. */
297 nm_vale_name_validate(const char *name)
302 if (!name || strlen(name) < strlen(NM_BDG_NAME)) {
306 for (i = 0; name[i]; i++) {
307 if (name[i] == ':') {
308 if (colon_pos != -1) {
312 } else if (!nm_is_id_char(name[i])) {
325 * locate a bridge among the existing ones.
326 * MUST BE CALLED WITH NMG_LOCK()
328 * a ':' in the name terminates the bridge name. Otherwise, just NM_NAME.
329 * We assume that this is called with a name of at least NM_NAME chars.
331 static struct nm_bridge *
332 nm_find_bridge(const char *name, int create)
335 struct nm_bridge *b = NULL, *bridges;
340 netmap_bns_getbridges(&bridges, &num_bridges);
342 namelen = nm_vale_name_validate(name);
344 D("invalid bridge name %s", name ? name : NULL);
348 /* lookup the name, remember empty slot if there is one */
349 for (i = 0; i < num_bridges; i++) {
350 struct nm_bridge *x = bridges + i;
352 if (x->bdg_active_ports == 0) {
353 if (create && b == NULL)
354 b = x; /* record empty slot */
355 } else if (x->bdg_namelen != namelen) {
357 } else if (strncmp(name, x->bdg_basename, namelen) == 0) {
358 ND("found '%.*s' at %d", namelen, name, i);
363 if (i == num_bridges && b) { /* name not found, can create entry */
364 /* initialize the bridge */
365 strncpy(b->bdg_basename, name, namelen);
366 ND("create new bridge %s with ports %d", b->bdg_basename,
367 b->bdg_active_ports);
368 b->bdg_namelen = namelen;
369 b->bdg_active_ports = 0;
370 for (i = 0; i < NM_BDG_MAXPORTS; i++)
371 b->bdg_port_index[i] = i;
372 /* set the default function */
373 b->bdg_ops.lookup = netmap_bdg_learning;
374 /* reset the MAC address table */
375 bzero(b->ht, sizeof(struct nm_hash_ent) * NM_BDG_HASH);
383 * Free the forwarding tables for rings attached to switch ports.
386 nm_free_bdgfwd(struct netmap_adapter *na)
389 struct netmap_kring *kring;
392 nrings = na->num_tx_rings;
393 kring = na->tx_rings;
394 for (i = 0; i < nrings; i++) {
395 if (kring[i].nkr_ft) {
396 free(kring[i].nkr_ft, M_DEVBUF);
397 kring[i].nkr_ft = NULL; /* protect from freeing twice */
404 * Allocate the forwarding tables for the rings attached to the bridge ports.
407 nm_alloc_bdgfwd(struct netmap_adapter *na)
409 int nrings, l, i, num_dstq;
410 struct netmap_kring *kring;
413 /* all port:rings + broadcast */
414 num_dstq = NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1;
415 l = sizeof(struct nm_bdg_fwd) * NM_BDG_BATCH_MAX;
416 l += sizeof(struct nm_bdg_q) * num_dstq;
417 l += sizeof(uint16_t) * NM_BDG_BATCH_MAX;
419 nrings = netmap_real_rings(na, NR_TX);
420 kring = na->tx_rings;
421 for (i = 0; i < nrings; i++) {
422 struct nm_bdg_fwd *ft;
423 struct nm_bdg_q *dstq;
426 ft = malloc(l, M_DEVBUF, M_NOWAIT | M_ZERO);
431 dstq = (struct nm_bdg_q *)(ft + NM_BDG_BATCH_MAX);
432 for (j = 0; j < num_dstq; j++) {
433 dstq[j].bq_head = dstq[j].bq_tail = NM_FT_NULL;
436 kring[i].nkr_ft = ft;
442 /* remove from bridge b the ports in slots hw and sw
443 * (sw can be -1 if not needed)
446 netmap_bdg_detach_common(struct nm_bridge *b, int hw, int sw)
448 int s_hw = hw, s_sw = sw;
449 int i, lim =b->bdg_active_ports;
450 uint8_t tmp[NM_BDG_MAXPORTS];
454 make a copy of bdg_port_index;
455 lookup NA(ifp)->bdg_port and SWNA(ifp)->bdg_port
456 in the array of bdg_port_index, replacing them with
457 entries from the bottom of the array;
458 decrement bdg_active_ports;
459 acquire BDG_WLOCK() and copy back the array.
463 D("detach %d and %d (lim %d)", hw, sw, lim);
464 /* make a copy of the list of active ports, update it,
465 * and then copy back within BDG_WLOCK().
467 memcpy(tmp, b->bdg_port_index, sizeof(tmp));
468 for (i = 0; (hw >= 0 || sw >= 0) && i < lim; ) {
469 if (hw >= 0 && tmp[i] == hw) {
470 ND("detach hw %d at %d", hw, i);
471 lim--; /* point to last active port */
472 tmp[i] = tmp[lim]; /* swap with i */
473 tmp[lim] = hw; /* now this is inactive */
475 } else if (sw >= 0 && tmp[i] == sw) {
476 ND("detach sw %d at %d", sw, i);
485 if (hw >= 0 || sw >= 0) {
486 D("XXX delete failed hw %d sw %d, should panic...", hw, sw);
491 b->bdg_ops.dtor(b->bdg_ports[s_hw]);
492 b->bdg_ports[s_hw] = NULL;
494 b->bdg_ports[s_sw] = NULL;
496 memcpy(b->bdg_port_index, tmp, sizeof(tmp));
497 b->bdg_active_ports = lim;
500 ND("now %d active ports", lim);
502 ND("marking bridge %s as free", b->bdg_basename);
503 bzero(&b->bdg_ops, sizeof(b->bdg_ops));
508 /* nm_bdg_ctl callback for VALE ports */
510 netmap_vp_bdg_ctl(struct netmap_adapter *na, struct nmreq *nmr, int attach)
512 struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na;
513 struct nm_bridge *b = vpna->na_bdg;
515 (void)nmr; // XXX merge ?
517 return 0; /* nothing to do */
519 netmap_set_all_rings(na, 0 /* disable */);
520 netmap_bdg_detach_common(b, vpna->bdg_port, -1);
522 netmap_set_all_rings(na, 1 /* enable */);
524 /* I have took reference just for attach */
525 netmap_adapter_put(na);
529 /* nm_dtor callback for ephemeral VALE ports */
531 netmap_vp_dtor(struct netmap_adapter *na)
533 struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter*)na;
534 struct nm_bridge *b = vpna->na_bdg;
536 ND("%s has %d references", na->name, na->na_refcount);
539 netmap_bdg_detach_common(b, vpna->bdg_port, -1);
543 /* remove a persistent VALE port from the system */
545 nm_vi_destroy(const char *name)
550 ifp = ifunit_ref(name);
554 /* make sure this is actually a VALE port */
555 if (!NM_NA_VALID(ifp) || NA(ifp)->nm_register != netmap_vp_reg) {
560 if (NA(ifp)->na_refcount > 1) {
566 D("destroying a persistent vale interface %s", ifp->if_xname);
567 /* Linux requires all the references are released
572 nm_os_vi_detach(ifp);
582 * Create a virtual interface registered to the system.
583 * The interface will be attached to a bridge later.
586 nm_vi_create(struct nmreq *nmr)
589 struct netmap_vp_adapter *vpna;
592 /* don't include VALE prefix */
593 if (!strncmp(nmr->nr_name, NM_BDG_NAME, strlen(NM_BDG_NAME)))
595 ifp = ifunit_ref(nmr->nr_name);
596 if (ifp) { /* already exist, cannot create new one */
600 error = nm_os_vi_persist(nmr->nr_name, &ifp);
605 /* netmap_vp_create creates a struct netmap_vp_adapter */
606 error = netmap_vp_create(nmr, ifp, &vpna);
608 D("error %d", error);
609 nm_os_vi_detach(ifp);
612 /* persist-specific routines */
613 vpna->up.nm_bdg_ctl = netmap_vp_bdg_ctl;
614 netmap_adapter_get(&vpna->up);
615 NM_ATTACH_NA(ifp, &vpna->up);
617 D("created %s", ifp->if_xname);
621 /* Try to get a reference to a netmap adapter attached to a VALE switch.
622 * If the adapter is found (or is created), this function returns 0, a
623 * non NULL pointer is returned into *na, and the caller holds a
624 * reference to the adapter.
625 * If an adapter is not found, then no reference is grabbed and the
626 * function returns an error code, or 0 if there is just a VALE prefix
627 * mismatch. Therefore the caller holds a reference when
628 * (*na != NULL && return == 0).
631 netmap_get_bdg_na(struct nmreq *nmr, struct netmap_adapter **na, int create)
633 char *nr_name = nmr->nr_name;
637 struct netmap_vp_adapter *vpna, *hostna = NULL;
639 int i, j, cand = -1, cand2 = -1;
642 *na = NULL; /* default return value */
644 /* first try to see if this is a bridge port. */
646 if (strncmp(nr_name, NM_BDG_NAME, sizeof(NM_BDG_NAME) - 1)) {
647 return 0; /* no error, but no VALE prefix */
650 b = nm_find_bridge(nr_name, create);
652 D("no bridges available for '%s'", nr_name);
653 return (create ? ENOMEM : ENXIO);
655 if (strlen(nr_name) < b->bdg_namelen) /* impossible */
658 /* Now we are sure that name starts with the bridge's name,
659 * lookup the port in the bridge. We need to scan the entire
660 * list. It is not important to hold a WLOCK on the bridge
661 * during the search because NMG_LOCK already guarantees
662 * that there are no other possible writers.
665 /* lookup in the local list of ports */
666 for (j = 0; j < b->bdg_active_ports; j++) {
667 i = b->bdg_port_index[j];
668 vpna = b->bdg_ports[i];
669 // KASSERT(na != NULL);
670 ND("checking %s", vpna->up.name);
671 if (!strcmp(vpna->up.name, nr_name)) {
672 netmap_adapter_get(&vpna->up);
673 ND("found existing if %s refs %d", nr_name)
678 /* not found, should we create it? */
681 /* yes we should, see if we have space to attach entries */
682 needed = 2; /* in some cases we only need 1 */
683 if (b->bdg_active_ports + needed >= NM_BDG_MAXPORTS) {
684 D("bridge full %d, cannot create new port", b->bdg_active_ports);
687 /* record the next two ports available, but do not allocate yet */
688 cand = b->bdg_port_index[b->bdg_active_ports];
689 cand2 = b->bdg_port_index[b->bdg_active_ports + 1];
690 ND("+++ bridge %s port %s used %d avail %d %d",
691 b->bdg_basename, ifname, b->bdg_active_ports, cand, cand2);
694 * try see if there is a matching NIC with this name
695 * (after the bridge's name)
697 ifname = nr_name + b->bdg_namelen + 1;
698 ifp = ifunit_ref(ifname);
700 /* Create an ephemeral virtual port
701 * This block contains all the ephemeral-specific logics
704 /* nr_cmd must be 0 for a virtual port */
708 /* bdg_netmap_attach creates a struct netmap_adapter */
709 error = netmap_vp_create(nmr, NULL, &vpna);
711 D("error %d", error);
715 /* shortcut - we can skip get_hw_na(),
716 * ownership check and nm_bdg_attach()
719 struct netmap_adapter *hw;
721 error = netmap_get_hw_na(ifp, &hw);
722 if (error || hw == NULL)
725 /* host adapter might not be created */
726 error = hw->nm_bdg_attach(nr_name, hw);
730 hostna = hw->na_hostvp;
731 if (nmr->nr_arg1 != NETMAP_BDG_HOST)
736 vpna->bdg_port = cand;
737 ND("NIC %p to bridge port %d", vpna, cand);
738 /* bind the port to the bridge (virtual ports are not active) */
739 b->bdg_ports[cand] = vpna;
741 b->bdg_active_ports++;
742 if (hostna != NULL) {
743 /* also bind the host stack to the bridge */
744 b->bdg_ports[cand2] = hostna;
745 hostna->bdg_port = cand2;
747 b->bdg_active_ports++;
748 ND("host %p to bridge port %d", hostna, cand2);
750 ND("if %s refs %d", ifname, vpna->up.na_refcount);
753 netmap_adapter_get(*na);
763 /* Process NETMAP_BDG_ATTACH */
765 nm_bdg_ctl_attach(struct nmreq *nmr)
767 struct netmap_adapter *na;
772 error = netmap_get_bdg_na(nmr, &na, 1 /* create if not exists */);
773 if (error) /* no device */
776 if (na == NULL) { /* VALE prefix missing */
781 if (NETMAP_OWNED_BY_ANY(na)) {
786 if (na->nm_bdg_ctl) {
787 /* nop for VALE ports. The bwrap needs to put the hwna
788 * in netmap mode (see netmap_bwrap_bdg_ctl)
790 error = na->nm_bdg_ctl(na, nmr, 1);
793 ND("registered %s to netmap-mode", na->name);
799 netmap_adapter_put(na);
806 nm_is_bwrap(struct netmap_adapter *na)
808 return na->nm_register == netmap_bwrap_reg;
811 /* process NETMAP_BDG_DETACH */
813 nm_bdg_ctl_detach(struct nmreq *nmr)
815 struct netmap_adapter *na;
819 error = netmap_get_bdg_na(nmr, &na, 0 /* don't create */);
820 if (error) { /* no device, or another bridge or user owns the device */
824 if (na == NULL) { /* VALE prefix missing */
827 } else if (nm_is_bwrap(na) &&
828 ((struct netmap_bwrap_adapter *)na)->na_polling_state) {
829 /* Don't detach a NIC with polling */
831 netmap_adapter_put(na);
834 if (na->nm_bdg_ctl) {
835 /* remove the port from bridge. The bwrap
836 * also needs to put the hwna in normal mode
838 error = na->nm_bdg_ctl(na, nmr, 0);
841 netmap_adapter_put(na);
848 struct nm_bdg_polling_state;
851 struct nm_kthread *nmk;
854 struct nm_bdg_polling_state *bps;
857 struct nm_bdg_polling_state {
860 struct netmap_bwrap_adapter *bna;
866 struct nm_bdg_kthread *kthreads;
870 netmap_bwrap_polling(void *data)
872 struct nm_bdg_kthread *nbk = data;
873 struct netmap_bwrap_adapter *bna;
874 u_int qfirst, qlast, i;
875 struct netmap_kring *kring0, *kring;
879 qfirst = nbk->qfirst;
882 kring0 = NMR(bna->hwna, NR_RX);
884 for (i = qfirst; i < qlast; i++) {
886 kring->nm_notify(kring, 0);
891 nm_bdg_create_kthreads(struct nm_bdg_polling_state *bps)
893 struct nm_kthread_cfg kcfg;
896 bps->kthreads = malloc(sizeof(struct nm_bdg_kthread) * bps->ncpus,
897 M_DEVBUF, M_NOWAIT | M_ZERO);
898 if (bps->kthreads == NULL)
901 bzero(&kcfg, sizeof(kcfg));
902 kcfg.worker_fn = netmap_bwrap_polling;
903 for (i = 0; i < bps->ncpus; i++) {
904 struct nm_bdg_kthread *t = bps->kthreads + i;
905 int all = (bps->ncpus == 1 && bps->reg == NR_REG_ALL_NIC);
906 int affinity = bps->cpu_from + i;
909 t->qfirst = all ? bps->qfirst /* must be 0 */: affinity;
910 t->qlast = all ? bps->qlast : t->qfirst + 1;
911 D("kthread %d a:%u qf:%u ql:%u", i, affinity, t->qfirst,
915 kcfg.worker_private = t;
916 t->nmk = nm_os_kthread_create(&kcfg, 0, NULL);
917 if (t->nmk == NULL) {
920 nm_os_kthread_set_affinity(t->nmk, affinity);
925 for (j = 0; j < i; j++) {
926 struct nm_bdg_kthread *t = bps->kthreads + i;
927 nm_os_kthread_delete(t->nmk);
929 free(bps->kthreads, M_DEVBUF);
933 /* a version of ptnetmap_start_kthreads() */
935 nm_bdg_polling_start_kthreads(struct nm_bdg_polling_state *bps)
940 D("polling is not configured");
943 bps->stopped = false;
945 for (i = 0; i < bps->ncpus; i++) {
946 struct nm_bdg_kthread *t = bps->kthreads + i;
947 error = nm_os_kthread_start(t->nmk);
949 D("error in nm_kthread_start()");
956 for (j = 0; j < i; j++) {
957 struct nm_bdg_kthread *t = bps->kthreads + i;
958 nm_os_kthread_stop(t->nmk);
965 nm_bdg_polling_stop_delete_kthreads(struct nm_bdg_polling_state *bps)
972 for (i = 0; i < bps->ncpus; i++) {
973 struct nm_bdg_kthread *t = bps->kthreads + i;
974 nm_os_kthread_stop(t->nmk);
975 nm_os_kthread_delete(t->nmk);
981 get_polling_cfg(struct nmreq *nmr, struct netmap_adapter *na,
982 struct nm_bdg_polling_state *bps)
984 int req_cpus, avail_cpus, core_from;
985 u_int reg, i, qfirst, qlast;
987 avail_cpus = nm_os_ncpus();
988 req_cpus = nmr->nr_arg1;
991 D("req_cpus must be > 0");
993 } else if (req_cpus >= avail_cpus) {
994 D("for safety, we need at least one core left in the system");
997 reg = nmr->nr_flags & NR_REG_MASK;
998 i = nmr->nr_ringid & NETMAP_RING_MASK;
1000 * ONE_NIC: dedicate one core to one ring. If multiple cores
1001 * are specified, consecutive rings are also polled.
1002 * For example, if ringid=2 and 2 cores are given,
1003 * ring 2 and 3 are polled by core 2 and 3, respectively.
1004 * ALL_NIC: poll all the rings using a core specified by ringid.
1005 * the number of cores must be 1.
1007 if (reg == NR_REG_ONE_NIC) {
1008 if (i + req_cpus > nma_get_nrings(na, NR_RX)) {
1009 D("only %d rings exist (ring %u-%u is given)",
1010 nma_get_nrings(na, NR_RX), i, i+req_cpus);
1014 qlast = qfirst + req_cpus;
1016 } else if (reg == NR_REG_ALL_NIC) {
1017 if (req_cpus != 1) {
1018 D("ncpus must be 1 not %d for REG_ALL_NIC", req_cpus);
1022 qlast = nma_get_nrings(na, NR_RX);
1025 D("reg must be ALL_NIC or ONE_NIC");
1030 bps->qfirst = qfirst;
1032 bps->cpu_from = core_from;
1033 bps->ncpus = req_cpus;
1034 D("%s qfirst %u qlast %u cpu_from %u ncpus %u",
1035 reg == NR_REG_ALL_NIC ? "REG_ALL_NIC" : "REG_ONE_NIC",
1036 qfirst, qlast, core_from, req_cpus);
1041 nm_bdg_ctl_polling_start(struct nmreq *nmr, struct netmap_adapter *na)
1043 struct nm_bdg_polling_state *bps;
1044 struct netmap_bwrap_adapter *bna;
1047 bna = (struct netmap_bwrap_adapter *)na;
1048 if (bna->na_polling_state) {
1049 D("ERROR adapter already in polling mode");
1053 bps = malloc(sizeof(*bps), M_DEVBUF, M_NOWAIT | M_ZERO);
1056 bps->configured = false;
1057 bps->stopped = true;
1059 if (get_polling_cfg(nmr, na, bps)) {
1060 free(bps, M_DEVBUF);
1064 if (nm_bdg_create_kthreads(bps)) {
1065 free(bps, M_DEVBUF);
1069 bps->configured = true;
1070 bna->na_polling_state = bps;
1073 /* disable interrupt if possible */
1074 if (bna->hwna->nm_intr)
1075 bna->hwna->nm_intr(bna->hwna, 0);
1076 /* start kthread now */
1077 error = nm_bdg_polling_start_kthreads(bps);
1079 D("ERROR nm_bdg_polling_start_kthread()");
1080 free(bps->kthreads, M_DEVBUF);
1081 free(bps, M_DEVBUF);
1082 bna->na_polling_state = NULL;
1083 if (bna->hwna->nm_intr)
1084 bna->hwna->nm_intr(bna->hwna, 1);
1090 nm_bdg_ctl_polling_stop(struct nmreq *nmr, struct netmap_adapter *na)
1092 struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na;
1093 struct nm_bdg_polling_state *bps;
1095 if (!bna->na_polling_state) {
1096 D("ERROR adapter is not in polling mode");
1099 bps = bna->na_polling_state;
1100 nm_bdg_polling_stop_delete_kthreads(bna->na_polling_state);
1101 bps->configured = false;
1102 free(bps, M_DEVBUF);
1103 bna->na_polling_state = NULL;
1104 /* reenable interrupt */
1105 if (bna->hwna->nm_intr)
1106 bna->hwna->nm_intr(bna->hwna, 1);
1110 /* Called by either user's context (netmap_ioctl())
1111 * or external kernel modules (e.g., Openvswitch).
1112 * Operation is indicated in nmr->nr_cmd.
1113 * NETMAP_BDG_OPS that sets configure/lookup/dtor functions to the bridge
1114 * requires bdg_ops argument; the other commands ignore this argument.
1116 * Called without NMG_LOCK.
1119 netmap_bdg_ctl(struct nmreq *nmr, struct netmap_bdg_ops *bdg_ops)
1121 struct nm_bridge *b, *bridges;
1122 struct netmap_adapter *na;
1123 struct netmap_vp_adapter *vpna;
1124 char *name = nmr->nr_name;
1125 int cmd = nmr->nr_cmd, namelen = strlen(name);
1126 int error = 0, i, j;
1129 netmap_bns_getbridges(&bridges, &num_bridges);
1132 case NETMAP_BDG_NEWIF:
1133 error = nm_vi_create(nmr);
1136 case NETMAP_BDG_DELIF:
1137 error = nm_vi_destroy(nmr->nr_name);
1140 case NETMAP_BDG_ATTACH:
1141 error = nm_bdg_ctl_attach(nmr);
1144 case NETMAP_BDG_DETACH:
1145 error = nm_bdg_ctl_detach(nmr);
1148 case NETMAP_BDG_LIST:
1149 /* this is used to enumerate bridges and ports */
1150 if (namelen) { /* look up indexes of bridge and port */
1151 if (strncmp(name, NM_BDG_NAME, strlen(NM_BDG_NAME))) {
1156 b = nm_find_bridge(name, 0 /* don't create */);
1164 nmr->nr_arg1 = b - bridges; /* bridge index */
1165 nmr->nr_arg2 = NM_BDG_NOPORT;
1166 for (j = 0; j < b->bdg_active_ports; j++) {
1167 i = b->bdg_port_index[j];
1168 vpna = b->bdg_ports[i];
1170 D("---AAAAAAAAARGH-------");
1173 /* the former and the latter identify a
1174 * virtual port and a NIC, respectively
1176 if (!strcmp(vpna->up.name, name)) {
1177 nmr->nr_arg2 = i; /* port index */
1183 /* return the first non-empty entry starting from
1184 * bridge nr_arg1 and port nr_arg2.
1186 * Users can detect the end of the same bridge by
1187 * seeing the new and old value of nr_arg1, and can
1188 * detect the end of all the bridge by error != 0
1194 for (error = ENOENT; i < NM_BRIDGES; i++) {
1196 if (j >= b->bdg_active_ports) {
1197 j = 0; /* following bridges scan from 0 */
1202 j = b->bdg_port_index[j];
1203 vpna = b->bdg_ports[j];
1204 strncpy(name, vpna->up.name, (size_t)IFNAMSIZ);
1212 case NETMAP_BDG_REGOPS: /* XXX this should not be available from userspace */
1213 /* register callbacks to the given bridge.
1214 * nmr->nr_name may be just bridge's name (including ':'
1215 * if it is not just NM_NAME).
1222 b = nm_find_bridge(name, 0 /* don't create */);
1226 b->bdg_ops = *bdg_ops;
1231 case NETMAP_BDG_VNET_HDR:
1232 /* Valid lengths for the virtio-net header are 0 (no header),
1234 if (nmr->nr_arg1 != 0 &&
1235 nmr->nr_arg1 != sizeof(struct nm_vnet_hdr) &&
1236 nmr->nr_arg1 != 12) {
1241 error = netmap_get_bdg_na(nmr, &na, 0);
1243 vpna = (struct netmap_vp_adapter *)na;
1244 na->virt_hdr_len = nmr->nr_arg1;
1245 if (na->virt_hdr_len) {
1246 vpna->mfs = NETMAP_BUF_SIZE(na);
1248 D("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na);
1249 netmap_adapter_put(na);
1256 case NETMAP_BDG_POLLING_ON:
1257 case NETMAP_BDG_POLLING_OFF:
1259 error = netmap_get_bdg_na(nmr, &na, 0);
1261 if (!nm_is_bwrap(na)) {
1263 } else if (cmd == NETMAP_BDG_POLLING_ON) {
1264 error = nm_bdg_ctl_polling_start(nmr, na);
1266 netmap_adapter_get(na);
1268 error = nm_bdg_ctl_polling_stop(nmr, na);
1270 netmap_adapter_put(na);
1272 netmap_adapter_put(na);
1278 D("invalid cmd (nmr->nr_cmd) (0x%x)", cmd);
1286 netmap_bdg_config(struct nmreq *nmr)
1288 struct nm_bridge *b;
1292 b = nm_find_bridge(nmr->nr_name, 0);
1298 /* Don't call config() with NMG_LOCK() held */
1300 if (b->bdg_ops.config != NULL)
1301 error = b->bdg_ops.config((struct nm_ifreq *)nmr);
1307 /* nm_krings_create callback for VALE ports.
1308 * Calls the standard netmap_krings_create, then adds leases on rx
1309 * rings and bdgfwd on tx rings.
1312 netmap_vp_krings_create(struct netmap_adapter *na)
1317 u_int nrx = netmap_real_rings(na, NR_RX);
1320 * Leases are attached to RX rings on vale ports
1322 tailroom = sizeof(uint32_t) * na->num_rx_desc * nrx;
1324 error = netmap_krings_create(na, tailroom);
1328 leases = na->tailroom;
1330 for (i = 0; i < nrx; i++) { /* Receive rings */
1331 na->rx_rings[i].nkr_leases = leases;
1332 leases += na->num_rx_desc;
1335 error = nm_alloc_bdgfwd(na);
1337 netmap_krings_delete(na);
1345 /* nm_krings_delete callback for VALE ports. */
1347 netmap_vp_krings_delete(struct netmap_adapter *na)
1350 netmap_krings_delete(na);
1355 nm_bdg_flush(struct nm_bdg_fwd *ft, u_int n,
1356 struct netmap_vp_adapter *na, u_int ring_nr);
1360 * main dispatch routine for the bridge.
1361 * Grab packets from a kring, move them into the ft structure
1362 * associated to the tx (input) port. Max one instance per port,
1363 * filtered on input (ioctl, poll or XXX).
1364 * Returns the next position in the ring.
1367 nm_bdg_preflush(struct netmap_kring *kring, u_int end)
1369 struct netmap_vp_adapter *na =
1370 (struct netmap_vp_adapter*)kring->na;
1371 struct netmap_ring *ring = kring->ring;
1372 struct nm_bdg_fwd *ft;
1373 u_int ring_nr = kring->ring_id;
1374 u_int j = kring->nr_hwcur, lim = kring->nkr_num_slots - 1;
1375 u_int ft_i = 0; /* start from 0 */
1376 u_int frags = 1; /* how many frags ? */
1377 struct nm_bridge *b = na->na_bdg;
1379 /* To protect against modifications to the bridge we acquire a
1380 * shared lock, waiting if we can sleep (if the source port is
1381 * attached to a user process) or with a trylock otherwise (NICs).
1383 ND("wait rlock for %d packets", ((j > end ? lim+1 : 0) + end) - j);
1384 if (na->up.na_flags & NAF_BDG_MAYSLEEP)
1386 else if (!BDG_RTRYLOCK(b))
1388 ND(5, "rlock acquired for %d packets", ((j > end ? lim+1 : 0) + end) - j);
1391 for (; likely(j != end); j = nm_next(j, lim)) {
1392 struct netmap_slot *slot = &ring->slot[j];
1395 ft[ft_i].ft_len = slot->len;
1396 ft[ft_i].ft_flags = slot->flags;
1398 ND("flags is 0x%x", slot->flags);
1399 /* we do not use the buf changed flag, but we still need to reset it */
1400 slot->flags &= ~NS_BUF_CHANGED;
1402 /* this slot goes into a list so initialize the link field */
1403 ft[ft_i].ft_next = NM_FT_NULL;
1404 buf = ft[ft_i].ft_buf = (slot->flags & NS_INDIRECT) ?
1405 (void *)(uintptr_t)slot->ptr : NMB(&na->up, slot);
1406 if (unlikely(buf == NULL)) {
1407 RD(5, "NULL %s buffer pointer from %s slot %d len %d",
1408 (slot->flags & NS_INDIRECT) ? "INDIRECT" : "DIRECT",
1409 kring->name, j, ft[ft_i].ft_len);
1410 buf = ft[ft_i].ft_buf = NETMAP_BUF_BASE(&na->up);
1411 ft[ft_i].ft_len = 0;
1412 ft[ft_i].ft_flags = 0;
1414 __builtin_prefetch(buf);
1416 if (slot->flags & NS_MOREFRAG) {
1420 if (unlikely(netmap_verbose && frags > 1))
1421 RD(5, "%d frags at %d", frags, ft_i - frags);
1422 ft[ft_i - frags].ft_frags = frags;
1424 if (unlikely((int)ft_i >= bridge_batch))
1425 ft_i = nm_bdg_flush(ft, ft_i, na, ring_nr);
1428 /* Here ft_i > 0, ft[ft_i-1].flags has NS_MOREFRAG, and we
1429 * have to fix frags count. */
1431 ft[ft_i - 1].ft_flags &= ~NS_MOREFRAG;
1432 ft[ft_i - frags].ft_frags = frags;
1433 D("Truncate incomplete fragment at %d (%d frags)", ft_i, frags);
1436 ft_i = nm_bdg_flush(ft, ft_i, na, ring_nr);
1442 /* ----- FreeBSD if_bridge hash function ------- */
1445 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
1446 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
1448 * http://www.burtleburtle.net/bob/hash/spooky.html
1450 #define mix(a, b, c) \
1452 a -= b; a -= c; a ^= (c >> 13); \
1453 b -= c; b -= a; b ^= (a << 8); \
1454 c -= a; c -= b; c ^= (b >> 13); \
1455 a -= b; a -= c; a ^= (c >> 12); \
1456 b -= c; b -= a; b ^= (a << 16); \
1457 c -= a; c -= b; c ^= (b >> 5); \
1458 a -= b; a -= c; a ^= (c >> 3); \
1459 b -= c; b -= a; b ^= (a << 10); \
1460 c -= a; c -= b; c ^= (b >> 15); \
1461 } while (/*CONSTCOND*/0)
1464 static __inline uint32_t
1465 nm_bridge_rthash(const uint8_t *addr)
1467 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key
1477 #define BRIDGE_RTHASH_MASK (NM_BDG_HASH-1)
1478 return (c & BRIDGE_RTHASH_MASK);
1484 /* nm_register callback for VALE ports */
1486 netmap_vp_reg(struct netmap_adapter *na, int onoff)
1488 struct netmap_vp_adapter *vpna =
1489 (struct netmap_vp_adapter*)na;
1493 /* persistent ports may be put in netmap mode
1494 * before being attached to a bridge
1497 BDG_WLOCK(vpna->na_bdg);
1500 for (i = 0; i < nma_get_nrings(na, t) + 1; i++) {
1501 struct netmap_kring *kring = &NMR(na, t)[i];
1503 if (nm_kring_pending_on(kring))
1504 kring->nr_mode = NKR_NETMAP_ON;
1507 if (na->active_fds == 0)
1508 na->na_flags |= NAF_NETMAP_ON;
1509 /* XXX on FreeBSD, persistent VALE ports should also
1510 * toggle IFCAP_NETMAP in na->ifp (2014-03-16)
1513 if (na->active_fds == 0)
1514 na->na_flags &= ~NAF_NETMAP_ON;
1516 for (i = 0; i < nma_get_nrings(na, t) + 1; i++) {
1517 struct netmap_kring *kring = &NMR(na, t)[i];
1519 if (nm_kring_pending_off(kring))
1520 kring->nr_mode = NKR_NETMAP_OFF;
1525 BDG_WUNLOCK(vpna->na_bdg);
1531 * Lookup function for a learning bridge.
1532 * Update the hash table with the source address,
1533 * and then returns the destination port index, and the
1534 * ring in *dst_ring (at the moment, always use ring 0)
1537 netmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring,
1538 struct netmap_vp_adapter *na)
1540 uint8_t *buf = ft->ft_buf;
1541 u_int buf_len = ft->ft_len;
1542 struct nm_hash_ent *ht = na->na_bdg->ht;
1544 u_int dst, mysrc = na->bdg_port;
1545 uint64_t smac, dmac;
1548 /* safety check, unfortunately we have many cases */
1549 if (buf_len >= 14 + na->up.virt_hdr_len) {
1550 /* virthdr + mac_hdr in the same slot */
1551 buf += na->up.virt_hdr_len;
1552 buf_len -= na->up.virt_hdr_len;
1553 } else if (buf_len == na->up.virt_hdr_len && ft->ft_flags & NS_MOREFRAG) {
1554 /* only header in first fragment */
1557 buf_len = ft->ft_len;
1559 RD(5, "invalid buf format, length %d", buf_len);
1560 return NM_BDG_NOPORT;
1563 if (ft->ft_flags & NS_INDIRECT) {
1564 if (copyin(buf, indbuf, sizeof(indbuf))) {
1565 return NM_BDG_NOPORT;
1570 dmac = le64toh(*(uint64_t *)(buf)) & 0xffffffffffff;
1571 smac = le64toh(*(uint64_t *)(buf + 4));
1575 * The hash is somewhat expensive, there might be some
1576 * worthwhile optimizations here.
1578 if (((buf[6] & 1) == 0) && (na->last_smac != smac)) { /* valid src */
1580 sh = nm_bridge_rthash(s); // XXX hash of source
1581 /* update source port forwarding entry */
1582 na->last_smac = ht[sh].mac = smac; /* XXX expire ? */
1583 ht[sh].ports = mysrc;
1585 D("src %02x:%02x:%02x:%02x:%02x:%02x on port %d",
1586 s[0], s[1], s[2], s[3], s[4], s[5], mysrc);
1588 dst = NM_BDG_BROADCAST;
1589 if ((buf[0] & 1) == 0) { /* unicast */
1590 dh = nm_bridge_rthash(buf); // XXX hash of dst
1591 if (ht[dh].mac == dmac) { /* found dst */
1594 /* XXX otherwise return NM_BDG_UNKNOWN ? */
1601 * Available space in the ring. Only used in VALE code
1602 * and only with is_rx = 1
1604 static inline uint32_t
1605 nm_kr_space(struct netmap_kring *k, int is_rx)
1610 int busy = k->nkr_hwlease - k->nr_hwcur;
1612 busy += k->nkr_num_slots;
1613 space = k->nkr_num_slots - 1 - busy;
1615 /* XXX never used in this branch */
1616 space = k->nr_hwtail - k->nkr_hwlease;
1618 space += k->nkr_num_slots;
1622 if (k->nkr_hwlease >= k->nkr_num_slots ||
1623 k->nr_hwcur >= k->nkr_num_slots ||
1624 k->nr_tail >= k->nkr_num_slots ||
1626 busy >= k->nkr_num_slots) {
1627 D("invalid kring, cur %d tail %d lease %d lease_idx %d lim %d", k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease,
1628 k->nkr_lease_idx, k->nkr_num_slots);
1637 /* make a lease on the kring for N positions. return the
1639 * XXX only used in VALE code and with is_rx = 1
1641 static inline uint32_t
1642 nm_kr_lease(struct netmap_kring *k, u_int n, int is_rx)
1644 uint32_t lim = k->nkr_num_slots - 1;
1645 uint32_t lease_idx = k->nkr_lease_idx;
1647 k->nkr_leases[lease_idx] = NR_NOSLOT;
1648 k->nkr_lease_idx = nm_next(lease_idx, lim);
1650 if (n > nm_kr_space(k, is_rx)) {
1651 D("invalid request for %d slots", n);
1654 /* XXX verify that there are n slots */
1655 k->nkr_hwlease += n;
1656 if (k->nkr_hwlease > lim)
1657 k->nkr_hwlease -= lim + 1;
1659 if (k->nkr_hwlease >= k->nkr_num_slots ||
1660 k->nr_hwcur >= k->nkr_num_slots ||
1661 k->nr_hwtail >= k->nkr_num_slots ||
1662 k->nkr_lease_idx >= k->nkr_num_slots) {
1663 D("invalid kring %s, cur %d tail %d lease %d lease_idx %d lim %d",
1665 k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease,
1666 k->nkr_lease_idx, k->nkr_num_slots);
1673 * This flush routine supports only unicast and broadcast but a large
1674 * number of ports, and lets us replace the learn and dispatch functions.
1677 nm_bdg_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
1680 struct nm_bdg_q *dst_ents, *brddst;
1681 uint16_t num_dsts = 0, *dsts;
1682 struct nm_bridge *b = na->na_bdg;
1683 u_int i, me = na->bdg_port;
1686 * The work area (pointed by ft) is followed by an array of
1687 * pointers to queues , dst_ents; there are NM_BDG_MAXRINGS
1688 * queues per port plus one for the broadcast traffic.
1689 * Then we have an array of destination indexes.
1691 dst_ents = (struct nm_bdg_q *)(ft + NM_BDG_BATCH_MAX);
1692 dsts = (uint16_t *)(dst_ents + NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1);
1694 /* first pass: find a destination for each packet in the batch */
1695 for (i = 0; likely(i < n); i += ft[i].ft_frags) {
1696 uint8_t dst_ring = ring_nr; /* default, same ring as origin */
1697 uint16_t dst_port, d_i;
1700 ND("slot %d frags %d", i, ft[i].ft_frags);
1701 /* Drop the packet if the virtio-net header is not into the first
1702 fragment nor at the very beginning of the second. */
1703 if (unlikely(na->up.virt_hdr_len > ft[i].ft_len))
1705 dst_port = b->bdg_ops.lookup(&ft[i], &dst_ring, na);
1706 if (netmap_verbose > 255)
1707 RD(5, "slot %d port %d -> %d", i, me, dst_port);
1708 if (dst_port == NM_BDG_NOPORT)
1709 continue; /* this packet is identified to be dropped */
1710 else if (unlikely(dst_port > NM_BDG_MAXPORTS))
1712 else if (dst_port == NM_BDG_BROADCAST)
1713 dst_ring = 0; /* broadcasts always go to ring 0 */
1714 else if (unlikely(dst_port == me ||
1715 !b->bdg_ports[dst_port]))
1718 /* get a position in the scratch pad */
1719 d_i = dst_port * NM_BDG_MAXRINGS + dst_ring;
1722 /* append the first fragment to the list */
1723 if (d->bq_head == NM_FT_NULL) { /* new destination */
1724 d->bq_head = d->bq_tail = i;
1725 /* remember this position to be scanned later */
1726 if (dst_port != NM_BDG_BROADCAST)
1727 dsts[num_dsts++] = d_i;
1729 ft[d->bq_tail].ft_next = i;
1732 d->bq_len += ft[i].ft_frags;
1736 * Broadcast traffic goes to ring 0 on all destinations.
1737 * So we need to add these rings to the list of ports to scan.
1738 * XXX at the moment we scan all NM_BDG_MAXPORTS ports, which is
1739 * expensive. We should keep a compact list of active destinations
1740 * so we could shorten this loop.
1742 brddst = dst_ents + NM_BDG_BROADCAST * NM_BDG_MAXRINGS;
1743 if (brddst->bq_head != NM_FT_NULL) {
1745 for (j = 0; likely(j < b->bdg_active_ports); j++) {
1747 i = b->bdg_port_index[j];
1748 if (unlikely(i == me))
1750 d_i = i * NM_BDG_MAXRINGS;
1751 if (dst_ents[d_i].bq_head == NM_FT_NULL)
1752 dsts[num_dsts++] = d_i;
1756 ND(5, "pass 1 done %d pkts %d dsts", n, num_dsts);
1757 /* second pass: scan destinations */
1758 for (i = 0; i < num_dsts; i++) {
1759 struct netmap_vp_adapter *dst_na;
1760 struct netmap_kring *kring;
1761 struct netmap_ring *ring;
1762 u_int dst_nr, lim, j, d_i, next, brd_next;
1763 u_int needed, howmany;
1764 int retry = netmap_txsync_retry;
1766 uint32_t my_start = 0, lease_idx = 0;
1768 int virt_hdr_mismatch = 0;
1771 ND("second pass %d port %d", i, d_i);
1773 // XXX fix the division
1774 dst_na = b->bdg_ports[d_i/NM_BDG_MAXRINGS];
1775 /* protect from the lookup function returning an inactive
1778 if (unlikely(dst_na == NULL))
1780 if (dst_na->up.na_flags & NAF_SW_ONLY)
1783 * The interface may be in !netmap mode in two cases:
1784 * - when na is attached but not activated yet;
1785 * - when na is being deactivated but is still attached.
1787 if (unlikely(!nm_netmap_on(&dst_na->up))) {
1788 ND("not in netmap mode!");
1792 /* there is at least one either unicast or broadcast packet */
1793 brd_next = brddst->bq_head;
1795 /* we need to reserve this many slots. If fewer are
1796 * available, some packets will be dropped.
1797 * Packets may have multiple fragments, so we may not use
1798 * there is a chance that we may not use all of the slots
1799 * we have claimed, so we will need to handle the leftover
1800 * ones when we regain the lock.
1802 needed = d->bq_len + brddst->bq_len;
1804 if (unlikely(dst_na->up.virt_hdr_len != na->up.virt_hdr_len)) {
1805 RD(3, "virt_hdr_mismatch, src %d dst %d", na->up.virt_hdr_len,
1806 dst_na->up.virt_hdr_len);
1807 /* There is a virtio-net header/offloadings mismatch between
1808 * source and destination. The slower mismatch datapath will
1809 * be used to cope with all the mismatches.
1811 virt_hdr_mismatch = 1;
1812 if (dst_na->mfs < na->mfs) {
1813 /* We may need to do segmentation offloadings, and so
1814 * we may need a number of destination slots greater
1815 * than the number of input slots ('needed').
1816 * We look for the smallest integer 'x' which satisfies:
1817 * needed * na->mfs + x * H <= x * na->mfs
1818 * where 'H' is the length of the longest header that may
1819 * be replicated in the segmentation process (e.g. for
1820 * TCPv4 we must account for ethernet header, IP header
1821 * and TCPv4 header).
1823 needed = (needed * na->mfs) /
1824 (dst_na->mfs - WORST_CASE_GSO_HEADER) + 1;
1825 ND(3, "srcmtu=%u, dstmtu=%u, x=%u", na->mfs, dst_na->mfs, needed);
1829 ND(5, "pass 2 dst %d is %x %s",
1830 i, d_i, is_vp ? "virtual" : "nic/host");
1831 dst_nr = d_i & (NM_BDG_MAXRINGS-1);
1832 nrings = dst_na->up.num_rx_rings;
1833 if (dst_nr >= nrings)
1834 dst_nr = dst_nr % nrings;
1835 kring = &dst_na->up.rx_rings[dst_nr];
1837 lim = kring->nkr_num_slots - 1;
1841 if (dst_na->retry && retry) {
1842 /* try to get some free slot from the previous run */
1843 kring->nm_notify(kring, 0);
1844 /* actually useful only for bwraps, since there
1845 * the notify will trigger a txsync on the hwna. VALE ports
1846 * have dst_na->retry == 0
1849 /* reserve the buffers in the queue and an entry
1850 * to report completion, and drop lock.
1851 * XXX this might become a helper function.
1853 mtx_lock(&kring->q_lock);
1854 if (kring->nkr_stopped) {
1855 mtx_unlock(&kring->q_lock);
1858 my_start = j = kring->nkr_hwlease;
1859 howmany = nm_kr_space(kring, 1);
1860 if (needed < howmany)
1862 lease_idx = nm_kr_lease(kring, howmany, 1);
1863 mtx_unlock(&kring->q_lock);
1865 /* only retry if we need more than available slots */
1866 if (retry && needed <= howmany)
1869 /* copy to the destination queue */
1870 while (howmany > 0) {
1871 struct netmap_slot *slot;
1872 struct nm_bdg_fwd *ft_p, *ft_end;
1875 /* find the queue from which we pick next packet.
1876 * NM_FT_NULL is always higher than valid indexes
1877 * so we never dereference it if the other list
1878 * has packets (and if both are empty we never
1881 if (next < brd_next) {
1883 next = ft_p->ft_next;
1884 } else { /* insert broadcast */
1885 ft_p = ft + brd_next;
1886 brd_next = ft_p->ft_next;
1888 cnt = ft_p->ft_frags; // cnt > 0
1889 if (unlikely(cnt > howmany))
1890 break; /* no more space */
1891 if (netmap_verbose && cnt > 1)
1892 RD(5, "rx %d frags to %d", cnt, j);
1893 ft_end = ft_p + cnt;
1894 if (unlikely(virt_hdr_mismatch)) {
1895 bdg_mismatch_datapath(na, dst_na, ft_p, ring, &j, lim, &howmany);
1899 char *dst, *src = ft_p->ft_buf;
1900 size_t copy_len = ft_p->ft_len, dst_len = copy_len;
1902 slot = &ring->slot[j];
1903 dst = NMB(&dst_na->up, slot);
1905 ND("send [%d] %d(%d) bytes at %s:%d",
1906 i, (int)copy_len, (int)dst_len,
1907 NM_IFPNAME(dst_ifp), j);
1908 /* round to a multiple of 64 */
1909 copy_len = (copy_len + 63) & ~63;
1911 if (unlikely(copy_len > NETMAP_BUF_SIZE(&dst_na->up) ||
1912 copy_len > NETMAP_BUF_SIZE(&na->up))) {
1913 RD(5, "invalid len %d, down to 64", (int)copy_len);
1914 copy_len = dst_len = 64; // XXX
1916 if (ft_p->ft_flags & NS_INDIRECT) {
1917 if (copyin(src, dst, copy_len)) {
1918 // invalid user pointer, pretend len is 0
1922 //memcpy(dst, src, copy_len);
1923 pkt_copy(src, dst, (int)copy_len);
1925 slot->len = dst_len;
1926 slot->flags = (cnt << 8)| NS_MOREFRAG;
1927 j = nm_next(j, lim);
1930 } while (ft_p != ft_end);
1931 slot->flags = (cnt << 8); /* clear flag on last entry */
1934 if (next == NM_FT_NULL && brd_next == NM_FT_NULL)
1938 /* current position */
1939 uint32_t *p = kring->nkr_leases; /* shorthand */
1940 uint32_t update_pos;
1941 int still_locked = 1;
1943 mtx_lock(&kring->q_lock);
1944 if (unlikely(howmany > 0)) {
1945 /* not used all bufs. If i am the last one
1946 * i can recover the slots, otherwise must
1947 * fill them with 0 to mark empty packets.
1949 ND("leftover %d bufs", howmany);
1950 if (nm_next(lease_idx, lim) == kring->nkr_lease_idx) {
1951 /* yes i am the last one */
1952 ND("roll back nkr_hwlease to %d", j);
1953 kring->nkr_hwlease = j;
1955 while (howmany-- > 0) {
1956 ring->slot[j].len = 0;
1957 ring->slot[j].flags = 0;
1958 j = nm_next(j, lim);
1962 p[lease_idx] = j; /* report I am done */
1964 update_pos = kring->nr_hwtail;
1966 if (my_start == update_pos) {
1967 /* all slots before my_start have been reported,
1968 * so scan subsequent leases to see if other ranges
1969 * have been completed, and to a selwakeup or txsync.
1971 while (lease_idx != kring->nkr_lease_idx &&
1972 p[lease_idx] != NR_NOSLOT) {
1974 p[lease_idx] = NR_NOSLOT;
1975 lease_idx = nm_next(lease_idx, lim);
1977 /* j is the new 'write' position. j != my_start
1978 * means there are new buffers to report
1980 if (likely(j != my_start)) {
1981 kring->nr_hwtail = j;
1983 mtx_unlock(&kring->q_lock);
1984 kring->nm_notify(kring, 0);
1985 /* this is netmap_notify for VALE ports and
1986 * netmap_bwrap_notify for bwrap. The latter will
1987 * trigger a txsync on the underlying hwna
1989 if (dst_na->retry && retry--) {
1990 /* XXX this is going to call nm_notify again.
1991 * Only useful for bwrap in virtual machines
1998 mtx_unlock(&kring->q_lock);
2001 d->bq_head = d->bq_tail = NM_FT_NULL; /* cleanup */
2004 brddst->bq_head = brddst->bq_tail = NM_FT_NULL; /* cleanup */
2009 /* nm_txsync callback for VALE ports */
2011 netmap_vp_txsync(struct netmap_kring *kring, int flags)
2013 struct netmap_vp_adapter *na =
2014 (struct netmap_vp_adapter *)kring->na;
2016 u_int const lim = kring->nkr_num_slots - 1;
2017 u_int const head = kring->rhead;
2019 if (bridge_batch <= 0) { /* testing only */
2020 done = head; // used all
2027 if (bridge_batch > NM_BDG_BATCH)
2028 bridge_batch = NM_BDG_BATCH;
2030 done = nm_bdg_preflush(kring, head);
2033 D("early break at %d/ %d, tail %d", done, head, kring->nr_hwtail);
2035 * packets between 'done' and 'cur' are left unsent.
2037 kring->nr_hwcur = done;
2038 kring->nr_hwtail = nm_prev(done, lim);
2040 D("%s ring %d flags %d", na->up.name, kring->ring_id, flags);
2045 /* rxsync code used by VALE ports nm_rxsync callback and also
2046 * internally by the brwap
2049 netmap_vp_rxsync_locked(struct netmap_kring *kring, int flags)
2051 struct netmap_adapter *na = kring->na;
2052 struct netmap_ring *ring = kring->ring;
2053 u_int nm_i, lim = kring->nkr_num_slots - 1;
2054 u_int head = kring->rhead;
2058 D("ouch dangerous reset!!!");
2059 n = netmap_ring_reinit(kring);
2063 /* First part, import newly received packets. */
2064 /* actually nothing to do here, they are already in the kring */
2066 /* Second part, skip past packets that userspace has released. */
2067 nm_i = kring->nr_hwcur;
2069 /* consistency check, but nothing really important here */
2070 for (n = 0; likely(nm_i != head); n++) {
2071 struct netmap_slot *slot = &ring->slot[nm_i];
2072 void *addr = NMB(na, slot);
2074 if (addr == NETMAP_BUF_BASE(kring->na)) { /* bad buf */
2075 D("bad buffer index %d, ignore ?",
2078 slot->flags &= ~NS_BUF_CHANGED;
2079 nm_i = nm_next(nm_i, lim);
2081 kring->nr_hwcur = head;
2090 * nm_rxsync callback for VALE ports
2091 * user process reading from a VALE switch.
2092 * Already protected against concurrent calls from userspace,
2093 * but we must acquire the queue's lock to protect against
2094 * writers on the same queue.
2097 netmap_vp_rxsync(struct netmap_kring *kring, int flags)
2101 mtx_lock(&kring->q_lock);
2102 n = netmap_vp_rxsync_locked(kring, flags);
2103 mtx_unlock(&kring->q_lock);
2108 /* nm_bdg_attach callback for VALE ports
2109 * The na_vp port is this same netmap_adapter. There is no host port.
2112 netmap_vp_bdg_attach(const char *name, struct netmap_adapter *na)
2114 struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na;
2119 strncpy(na->name, name, sizeof(na->name));
2120 na->na_hostvp = NULL;
2124 /* create a netmap_vp_adapter that describes a VALE port.
2125 * Only persistent VALE ports have a non-null ifp.
2128 netmap_vp_create(struct nmreq *nmr, struct ifnet *ifp, struct netmap_vp_adapter **ret)
2130 struct netmap_vp_adapter *vpna;
2131 struct netmap_adapter *na;
2135 vpna = malloc(sizeof(*vpna), M_DEVBUF, M_NOWAIT | M_ZERO);
2142 strncpy(na->name, nmr->nr_name, sizeof(na->name));
2144 /* bound checking */
2145 na->num_tx_rings = nmr->nr_tx_rings;
2146 nm_bound_var(&na->num_tx_rings, 1, 1, NM_BDG_MAXRINGS, NULL);
2147 nmr->nr_tx_rings = na->num_tx_rings; // write back
2148 na->num_rx_rings = nmr->nr_rx_rings;
2149 nm_bound_var(&na->num_rx_rings, 1, 1, NM_BDG_MAXRINGS, NULL);
2150 nmr->nr_rx_rings = na->num_rx_rings; // write back
2151 nm_bound_var(&nmr->nr_tx_slots, NM_BRIDGE_RINGSIZE,
2152 1, NM_BDG_MAXSLOTS, NULL);
2153 na->num_tx_desc = nmr->nr_tx_slots;
2154 nm_bound_var(&nmr->nr_rx_slots, NM_BRIDGE_RINGSIZE,
2155 1, NM_BDG_MAXSLOTS, NULL);
2156 /* validate number of pipes. We want at least 1,
2157 * but probably can do with some more.
2158 * So let's use 2 as default (when 0 is supplied)
2160 npipes = nmr->nr_arg1;
2161 nm_bound_var(&npipes, 2, 1, NM_MAXPIPES, NULL);
2162 nmr->nr_arg1 = npipes; /* write back */
2163 /* validate extra bufs */
2164 nm_bound_var(&nmr->nr_arg3, 0, 0,
2165 128*NM_BDG_MAXSLOTS, NULL);
2166 na->num_rx_desc = nmr->nr_rx_slots;
2168 vpna->last_smac = ~0llu;
2169 /*if (vpna->mfs > netmap_buf_size) TODO netmap_buf_size is zero??
2170 vpna->mfs = netmap_buf_size; */
2172 D("max frame size %u", vpna->mfs);
2174 na->na_flags |= NAF_BDG_MAYSLEEP;
2175 /* persistent VALE ports look like hw devices
2176 * with a native netmap adapter
2179 na->na_flags |= NAF_NATIVE;
2180 na->nm_txsync = netmap_vp_txsync;
2181 na->nm_rxsync = netmap_vp_rxsync;
2182 na->nm_register = netmap_vp_reg;
2183 na->nm_krings_create = netmap_vp_krings_create;
2184 na->nm_krings_delete = netmap_vp_krings_delete;
2185 na->nm_dtor = netmap_vp_dtor;
2186 na->nm_mem = netmap_mem_private_new(na->name,
2187 na->num_tx_rings, na->num_tx_desc,
2188 na->num_rx_rings, na->num_rx_desc,
2189 nmr->nr_arg3, npipes, &error);
2190 if (na->nm_mem == NULL)
2192 na->nm_bdg_attach = netmap_vp_bdg_attach;
2193 /* other nmd fields are set in the common routine */
2194 error = netmap_attach_common(na);
2201 if (na->nm_mem != NULL)
2202 netmap_mem_delete(na->nm_mem);
2203 free(vpna, M_DEVBUF);
2207 /* Bridge wrapper code (bwrap).
2208 * This is used to connect a non-VALE-port netmap_adapter (hwna) to a
2210 * The main task is to swap the meaning of tx and rx rings to match the
2211 * expectations of the VALE switch code (see nm_bdg_flush).
2213 * The bwrap works by interposing a netmap_bwrap_adapter between the
2214 * rest of the system and the hwna. The netmap_bwrap_adapter looks like
2215 * a netmap_vp_adapter to the rest the system, but, internally, it
2216 * translates all callbacks to what the hwna expects.
2218 * Note that we have to intercept callbacks coming from two sides:
2220 * - callbacks coming from the netmap module are intercepted by
2221 * passing around the netmap_bwrap_adapter instead of the hwna
2223 * - callbacks coming from outside of the netmap module only know
2224 * about the hwna. This, however, only happens in interrupt
2225 * handlers, where only the hwna->nm_notify callback is called.
2226 * What the bwrap does is to overwrite the hwna->nm_notify callback
2227 * with its own netmap_bwrap_intr_notify.
2228 * XXX This assumes that the hwna->nm_notify callback was the
2229 * standard netmap_notify(), as it is the case for nic adapters.
2230 * Any additional action performed by hwna->nm_notify will not be
2231 * performed by netmap_bwrap_intr_notify.
2233 * Additionally, the bwrap can optionally attach the host rings pair
2234 * of the wrapped adapter to a different port of the switch.
2239 netmap_bwrap_dtor(struct netmap_adapter *na)
2241 struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter*)na;
2242 struct netmap_adapter *hwna = bna->hwna;
2243 struct nm_bridge *b = bna->up.na_bdg,
2244 *bh = bna->host.na_bdg;
2247 netmap_bdg_detach_common(b, bna->up.bdg_port,
2248 (bh ? bna->host.bdg_port : -1));
2253 bna->host.up.ifp = NULL;
2254 hwna->na_private = NULL;
2255 hwna->na_vp = hwna->na_hostvp = NULL;
2256 hwna->na_flags &= ~NAF_BUSY;
2257 netmap_adapter_put(hwna);
2263 * Intr callback for NICs connected to a bridge.
2264 * Simply ignore tx interrupts (maybe we could try to recover space ?)
2265 * and pass received packets from nic to the bridge.
2267 * XXX TODO check locking: this is called from the interrupt
2268 * handler so we should make sure that the interface is not
2269 * disconnected while passing down an interrupt.
2271 * Note, no user process can access this NIC or the host stack.
2272 * The only part of the ring that is significant are the slots,
2273 * and head/cur/tail are set from the kring as needed
2274 * (part as a receive ring, part as a transmit ring).
2276 * callback that overwrites the hwna notify callback.
2277 * Packets come from the outside or from the host stack and are put on an
2279 * The bridge wrapper then sends the packets through the bridge.
2282 netmap_bwrap_intr_notify(struct netmap_kring *kring, int flags)
2284 struct netmap_adapter *na = kring->na;
2285 struct netmap_bwrap_adapter *bna = na->na_private;
2286 struct netmap_kring *bkring;
2287 struct netmap_vp_adapter *vpna = &bna->up;
2288 u_int ring_nr = kring->ring_id;
2289 int ret = NM_IRQ_COMPLETED;
2293 D("%s %s 0x%x", na->name, kring->name, flags);
2295 bkring = &vpna->up.tx_rings[ring_nr];
2297 /* make sure the ring is not disabled */
2298 if (nm_kr_tryget(kring, 0 /* can't sleep */, NULL)) {
2303 D("%s head %d cur %d tail %d", na->name,
2304 kring->rhead, kring->rcur, kring->rtail);
2306 /* simulate a user wakeup on the rx ring
2307 * fetch packets that have arrived.
2309 error = kring->nm_sync(kring, 0);
2312 if (kring->nr_hwcur == kring->nr_hwtail) {
2314 D("how strange, interrupt with no packets on %s",
2319 /* new packets are kring->rcur to kring->nr_hwtail, and the bkring
2320 * had hwcur == bkring->rhead. So advance bkring->rhead to kring->nr_hwtail
2321 * to push all packets out.
2323 bkring->rhead = bkring->rcur = kring->nr_hwtail;
2325 netmap_vp_txsync(bkring, flags);
2327 /* mark all buffers as released on this ring */
2328 kring->rhead = kring->rcur = kring->rtail = kring->nr_hwtail;
2329 /* another call to actually release the buffers */
2330 error = kring->nm_sync(kring, 0);
2332 /* The second rxsync may have further advanced hwtail. If this happens,
2333 * return NM_IRQ_RESCHED, otherwise just return NM_IRQ_COMPLETED. */
2334 if (kring->rcur != kring->nr_hwtail) {
2335 ret = NM_IRQ_RESCHED;
2340 return error ? error : ret;
2344 /* nm_register callback for bwrap */
2346 netmap_bwrap_reg(struct netmap_adapter *na, int onoff)
2348 struct netmap_bwrap_adapter *bna =
2349 (struct netmap_bwrap_adapter *)na;
2350 struct netmap_adapter *hwna = bna->hwna;
2351 struct netmap_vp_adapter *hostna = &bna->host;
2355 ND("%s %s", na->name, onoff ? "on" : "off");
2358 /* netmap_do_regif has been called on the bwrap na.
2359 * We need to pass the information about the
2360 * memory allocator down to the hwna before
2361 * putting it in netmap mode
2363 hwna->na_lut = na->na_lut;
2365 if (hostna->na_bdg) {
2366 /* if the host rings have been attached to switch,
2367 * we need to copy the memory allocator information
2368 * in the hostna also
2370 hostna->up.na_lut = na->na_lut;
2373 /* cross-link the netmap rings
2374 * The original number of rings comes from hwna,
2375 * rx rings on one side equals tx rings on the other.
2378 enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */
2379 for (i = 0; i < nma_get_nrings(hwna, r) + 1; i++) {
2380 NMR(hwna, r)[i].ring = NMR(na, t)[i].ring;
2384 if (na->na_flags & NAF_HOST_RINGS) {
2385 struct netmap_adapter *hna = &hostna->up;
2386 /* the hostna rings are the host rings of the bwrap.
2387 * The corresponding krings must point back to the
2390 hna->tx_rings = &na->tx_rings[na->num_tx_rings];
2391 hna->tx_rings[0].na = hna;
2392 hna->rx_rings = &na->rx_rings[na->num_rx_rings];
2393 hna->rx_rings[0].na = hna;
2397 /* pass down the pending ring state information */
2399 for (i = 0; i < nma_get_nrings(na, t) + 1; i++)
2400 NMR(hwna, t)[i].nr_pending_mode =
2401 NMR(na, t)[i].nr_pending_mode;
2404 /* forward the request to the hwna */
2405 error = hwna->nm_register(hwna, onoff);
2409 /* copy up the current ring state information */
2411 for (i = 0; i < nma_get_nrings(na, t) + 1; i++)
2412 NMR(na, t)[i].nr_mode =
2413 NMR(hwna, t)[i].nr_mode;
2416 /* impersonate a netmap_vp_adapter */
2417 netmap_vp_reg(na, onoff);
2419 netmap_vp_reg(&hostna->up, onoff);
2423 /* intercept the hwna nm_nofify callback on the hw rings */
2424 for (i = 0; i < hwna->num_rx_rings; i++) {
2425 hwna->rx_rings[i].save_notify = hwna->rx_rings[i].nm_notify;
2426 hwna->rx_rings[i].nm_notify = netmap_bwrap_intr_notify;
2428 i = hwna->num_rx_rings; /* for safety */
2429 /* save the host ring notify unconditionally */
2430 hwna->rx_rings[i].save_notify = hwna->rx_rings[i].nm_notify;
2431 if (hostna->na_bdg) {
2432 /* also intercept the host ring notify */
2433 hwna->rx_rings[i].nm_notify = netmap_bwrap_intr_notify;
2435 if (na->active_fds == 0)
2436 na->na_flags |= NAF_NETMAP_ON;
2440 if (na->active_fds == 0)
2441 na->na_flags &= ~NAF_NETMAP_ON;
2443 /* reset all notify callbacks (including host ring) */
2444 for (i = 0; i <= hwna->num_rx_rings; i++) {
2445 hwna->rx_rings[i].nm_notify = hwna->rx_rings[i].save_notify;
2446 hwna->rx_rings[i].save_notify = NULL;
2448 hwna->na_lut.lut = NULL;
2449 hwna->na_lut.objtotal = 0;
2450 hwna->na_lut.objsize = 0;
2456 /* nm_config callback for bwrap */
2458 netmap_bwrap_config(struct netmap_adapter *na, u_int *txr, u_int *txd,
2459 u_int *rxr, u_int *rxd)
2461 struct netmap_bwrap_adapter *bna =
2462 (struct netmap_bwrap_adapter *)na;
2463 struct netmap_adapter *hwna = bna->hwna;
2465 /* forward the request */
2466 netmap_update_config(hwna);
2467 /* swap the results */
2468 *txr = hwna->num_rx_rings;
2469 *txd = hwna->num_rx_desc;
2470 *rxr = hwna->num_tx_rings;
2471 *rxd = hwna->num_rx_desc;
2477 /* nm_krings_create callback for bwrap */
2479 netmap_bwrap_krings_create(struct netmap_adapter *na)
2481 struct netmap_bwrap_adapter *bna =
2482 (struct netmap_bwrap_adapter *)na;
2483 struct netmap_adapter *hwna = bna->hwna;
2489 /* impersonate a netmap_vp_adapter */
2490 error = netmap_vp_krings_create(na);
2494 /* also create the hwna krings */
2495 error = hwna->nm_krings_create(hwna);
2497 goto err_del_vp_rings;
2500 /* get each ring slot number from the corresponding hwna ring */
2502 enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */
2503 for (i = 0; i < nma_get_nrings(hwna, r) + 1; i++) {
2504 NMR(na, t)[i].nkr_num_slots = NMR(hwna, r)[i].nkr_num_slots;
2511 netmap_vp_krings_delete(na);
2518 netmap_bwrap_krings_delete(struct netmap_adapter *na)
2520 struct netmap_bwrap_adapter *bna =
2521 (struct netmap_bwrap_adapter *)na;
2522 struct netmap_adapter *hwna = bna->hwna;
2526 hwna->nm_krings_delete(hwna);
2527 netmap_vp_krings_delete(na);
2531 /* notify method for the bridge-->hwna direction */
2533 netmap_bwrap_notify(struct netmap_kring *kring, int flags)
2535 struct netmap_adapter *na = kring->na;
2536 struct netmap_bwrap_adapter *bna = na->na_private;
2537 struct netmap_adapter *hwna = bna->hwna;
2538 u_int ring_n = kring->ring_id;
2539 u_int lim = kring->nkr_num_slots - 1;
2540 struct netmap_kring *hw_kring;
2543 ND("%s: na %s hwna %s",
2544 (kring ? kring->name : "NULL!"),
2545 (na ? na->name : "NULL!"),
2546 (hwna ? hwna->name : "NULL!"));
2547 hw_kring = &hwna->tx_rings[ring_n];
2549 if (nm_kr_tryget(hw_kring, 0, NULL)) {
2553 /* first step: simulate a user wakeup on the rx ring */
2554 netmap_vp_rxsync(kring, flags);
2555 ND("%s[%d] PRE rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)",
2557 kring->nr_hwcur, kring->nr_hwtail, kring->nkr_hwlease,
2558 ring->head, ring->cur, ring->tail,
2559 hw_kring->nr_hwcur, hw_kring->nr_hwtail, hw_ring->rtail);
2560 /* second step: the new packets are sent on the tx ring
2561 * (which is actually the same ring)
2563 hw_kring->rhead = hw_kring->rcur = kring->nr_hwtail;
2564 error = hw_kring->nm_sync(hw_kring, flags);
2568 /* third step: now we are back the rx ring */
2569 /* claim ownership on all hw owned bufs */
2570 kring->rhead = kring->rcur = nm_next(hw_kring->nr_hwtail, lim); /* skip past reserved slot */
2572 /* fourth step: the user goes to sleep again, causing another rxsync */
2573 netmap_vp_rxsync(kring, flags);
2574 ND("%s[%d] PST rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)",
2576 kring->nr_hwcur, kring->nr_hwtail, kring->nkr_hwlease,
2577 ring->head, ring->cur, ring->tail,
2578 hw_kring->nr_hwcur, hw_kring->nr_hwtail, hw_kring->rtail);
2580 nm_kr_put(hw_kring);
2582 return error ? error : NM_IRQ_COMPLETED;
2586 /* nm_bdg_ctl callback for the bwrap.
2587 * Called on bridge-attach and detach, as an effect of vale-ctl -[ahd].
2588 * On attach, it needs to provide a fake netmap_priv_d structure and
2589 * perform a netmap_do_regif() on the bwrap. This will put both the
2590 * bwrap and the hwna in netmap mode, with the netmap rings shared
2591 * and cross linked. Moroever, it will start intercepting interrupts
2595 netmap_bwrap_bdg_ctl(struct netmap_adapter *na, struct nmreq *nmr, int attach)
2597 struct netmap_priv_d *npriv;
2598 struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter*)na;
2602 if (NETMAP_OWNED_BY_ANY(na)) {
2605 if (bna->na_kpriv) {
2609 npriv = netmap_priv_new();
2612 npriv->np_ifp = na->ifp; /* let the priv destructor release the ref */
2613 error = netmap_do_regif(npriv, na, 0, NR_REG_NIC_SW);
2615 netmap_priv_delete(npriv);
2618 bna->na_kpriv = npriv;
2619 na->na_flags |= NAF_BUSY;
2621 if (na->active_fds == 0) /* not registered */
2623 netmap_priv_delete(bna->na_kpriv);
2624 bna->na_kpriv = NULL;
2625 na->na_flags &= ~NAF_BUSY;
2631 /* attach a bridge wrapper to the 'real' device */
2633 netmap_bwrap_attach(const char *nr_name, struct netmap_adapter *hwna)
2635 struct netmap_bwrap_adapter *bna;
2636 struct netmap_adapter *na = NULL;
2637 struct netmap_adapter *hostna = NULL;
2641 /* make sure the NIC is not already in use */
2642 if (NETMAP_OWNED_BY_ANY(hwna)) {
2643 D("NIC %s busy, cannot attach to bridge", hwna->name);
2647 bna = malloc(sizeof(*bna), M_DEVBUF, M_NOWAIT | M_ZERO);
2653 /* make bwrap ifp point to the real ifp */
2654 na->ifp = hwna->ifp;
2655 na->na_private = bna;
2656 strncpy(na->name, nr_name, sizeof(na->name));
2657 /* fill the ring data for the bwrap adapter with rx/tx meanings
2658 * swapped. The real cross-linking will be done during register,
2659 * when all the krings will have been created.
2662 enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */
2663 nma_set_nrings(na, t, nma_get_nrings(hwna, r));
2664 nma_set_ndesc(na, t, nma_get_ndesc(hwna, r));
2666 na->nm_dtor = netmap_bwrap_dtor;
2667 na->nm_register = netmap_bwrap_reg;
2668 // na->nm_txsync = netmap_bwrap_txsync;
2669 // na->nm_rxsync = netmap_bwrap_rxsync;
2670 na->nm_config = netmap_bwrap_config;
2671 na->nm_krings_create = netmap_bwrap_krings_create;
2672 na->nm_krings_delete = netmap_bwrap_krings_delete;
2673 na->nm_notify = netmap_bwrap_notify;
2674 na->nm_bdg_ctl = netmap_bwrap_bdg_ctl;
2675 na->pdev = hwna->pdev;
2676 na->nm_mem = hwna->nm_mem;
2677 na->virt_hdr_len = hwna->virt_hdr_len;
2678 bna->up.retry = 1; /* XXX maybe this should depend on the hwna */
2681 netmap_adapter_get(hwna);
2682 hwna->na_private = bna; /* weak reference */
2683 hwna->na_vp = &bna->up;
2685 if (hwna->na_flags & NAF_HOST_RINGS) {
2686 if (hwna->na_flags & NAF_SW_ONLY)
2687 na->na_flags |= NAF_SW_ONLY;
2688 na->na_flags |= NAF_HOST_RINGS;
2689 hostna = &bna->host.up;
2690 snprintf(hostna->name, sizeof(hostna->name), "%s^", nr_name);
2691 hostna->ifp = hwna->ifp;
2693 enum txrx r = nm_txrx_swap(t);
2694 nma_set_nrings(hostna, t, 1);
2695 nma_set_ndesc(hostna, t, nma_get_ndesc(hwna, r));
2697 // hostna->nm_txsync = netmap_bwrap_host_txsync;
2698 // hostna->nm_rxsync = netmap_bwrap_host_rxsync;
2699 hostna->nm_notify = netmap_bwrap_notify;
2700 hostna->nm_mem = na->nm_mem;
2701 hostna->na_private = bna;
2702 hostna->na_vp = &bna->up;
2703 na->na_hostvp = hwna->na_hostvp =
2704 hostna->na_hostvp = &bna->host;
2705 hostna->na_flags = NAF_BUSY; /* prevent NIOCREGIF */
2708 ND("%s<->%s txr %d txd %d rxr %d rxd %d",
2709 na->name, ifp->if_xname,
2710 na->num_tx_rings, na->num_tx_desc,
2711 na->num_rx_rings, na->num_rx_desc);
2713 error = netmap_attach_common(na);
2717 hwna->na_flags |= NAF_BUSY;
2721 hwna->na_vp = hwna->na_hostvp = NULL;
2722 netmap_adapter_put(hwna);
2723 free(bna, M_DEVBUF);
2729 netmap_init_bridges2(u_int n)
2732 struct nm_bridge *b;
2734 b = malloc(sizeof(struct nm_bridge) * n, M_DEVBUF,
2738 for (i = 0; i < n; i++)
2744 netmap_uninit_bridges2(struct nm_bridge *b, u_int n)
2751 for (i = 0; i < n; i++)
2752 BDG_RWDESTROY(&b[i]);
2757 netmap_init_bridges(void)
2759 #ifdef CONFIG_NET_NS
2760 return netmap_bns_register();
2762 nm_bridges = netmap_init_bridges2(NM_BRIDGES);
2763 if (nm_bridges == NULL)
2770 netmap_uninit_bridges(void)
2772 #ifdef CONFIG_NET_NS
2773 netmap_bns_unregister();
2775 netmap_uninit_bridges2(nm_bridges, NM_BRIDGES);
2778 #endif /* WITH_VALE */