2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2013-2016 Universita` di Pisa
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #if defined(__FreeBSD__)
31 #include <sys/cdefs.h> /* prerequisite */
32 __FBSDID("$FreeBSD$");
34 #include <sys/types.h>
35 #include <sys/errno.h>
36 #include <sys/param.h> /* defines used in kernel.h */
37 #include <sys/kernel.h> /* types used in module initialization */
38 #include <sys/conf.h> /* cdevsw struct, UID, GID */
39 #include <sys/sockio.h>
40 #include <sys/socketvar.h> /* struct socket */
41 #include <sys/malloc.h>
43 #include <sys/rwlock.h>
44 #include <sys/socket.h> /* sockaddrs */
45 #include <sys/selinfo.h>
46 #include <sys/sysctl.h>
48 #include <net/if_var.h>
49 #include <net/bpf.h> /* BIOCIMMEDIATE */
50 #include <machine/bus.h> /* bus_dmamap_* */
51 #include <sys/endian.h>
52 #include <sys/refcount.h>
60 #elif defined(__APPLE__)
62 #warning OSX support is only partial
70 #error Unsupported platform
72 #endif /* unsupported */
78 #include <net/netmap.h>
79 #include <dev/netmap/netmap_kern.h>
80 #include <dev/netmap/netmap_mem2.h>
81 #include <dev/netmap/netmap_bdg.h>
86 * system parameters (most of them in netmap_kern.h)
87 * NM_BDG_NAME prefix for switch port names, default "vale"
88 * NM_BDG_MAXPORTS number of ports
89 * NM_BRIDGES max number of switches in the system.
90 * XXX should become a sysctl or tunable
92 * Switch ports are named valeX:Y where X is the switch name and Y
93 * is the port. If Y matches a physical interface name, the port is
94 * connected to a physical device.
96 * Unlike physical interfaces, switch ports use their own memory region
97 * for rings and buffers.
98 * The virtual interfaces use per-queue lock instead of core lock.
99 * In the tx loop, we aggregate traffic in batches to make all operations
100 * faster. The batch size is bridge_batch.
102 #define NM_BDG_MAXRINGS 16 /* XXX unclear how many (must be a pow of 2). */
103 #define NM_BDG_MAXSLOTS 4096 /* XXX same as above */
104 #define NM_BRIDGE_RINGSIZE 1024 /* in the device */
105 #define NM_BDG_BATCH 1024 /* entries in the forwarding buffer */
106 /* actual size of the tables */
107 #define NM_BDG_BATCH_MAX (NM_BDG_BATCH + NETMAP_MAX_FRAGS)
108 /* NM_FT_NULL terminates a list of slots in the ft */
109 #define NM_FT_NULL NM_BDG_BATCH_MAX
113 * bridge_batch is set via sysctl to the max batch size to be
114 * used in the bridge. The actual value may be larger as the
115 * last packet in the block may overflow the size.
117 static int bridge_batch = NM_BDG_BATCH; /* bridge batch size */
119 SYSCTL_DECL(_dev_netmap);
120 SYSCTL_INT(_dev_netmap, OID_AUTO, bridge_batch, CTLFLAG_RW, &bridge_batch, 0,
121 "Max batch size to be used in the bridge");
124 static int netmap_vale_vp_create(struct nmreq_header *hdr, struct ifnet *,
125 struct netmap_mem_d *nmd, struct netmap_vp_adapter **);
126 static int netmap_vale_vp_bdg_attach(const char *, struct netmap_adapter *,
128 static int netmap_vale_bwrap_attach(const char *, struct netmap_adapter *);
131 * For each output interface, nm_vale_q is used to construct a list.
132 * bq_len is the number of output buffers (we can have coalescing
138 uint32_t bq_len; /* number of buffers */
141 /* Holds the default callbacks */
142 struct netmap_bdg_ops vale_bdg_ops = {
143 .lookup = netmap_vale_learning,
146 .vp_create = netmap_vale_vp_create,
147 .bwrap_attach = netmap_vale_bwrap_attach,
152 * this is a slightly optimized copy routine which rounds
153 * to multiple of 64 bytes and is often faster than dealing
154 * with other odd sizes. We assume there is enough room
155 * in the source and destination buffers.
157 * XXX only for multiples of NM_BUF_ALIGN bytes, non overlapped.
161 pkt_copy(void *_src, void *_dst, int l)
163 uint64_t *src = _src;
164 uint64_t *dst = _dst;
165 if (unlikely(l >= 1024)) {
169 for (; likely(l > 0); l -= NM_BUF_ALIGN) {
170 /* XXX NM_BUF_ALIGN/sizeof(uint64_t) statements */
184 * Free the forwarding tables for rings attached to switch ports.
187 nm_free_bdgfwd(struct netmap_adapter *na)
190 struct netmap_kring **kring;
193 nrings = na->num_tx_rings;
194 kring = na->tx_rings;
195 for (i = 0; i < nrings; i++) {
196 if (kring[i]->nkr_ft) {
197 nm_os_free(kring[i]->nkr_ft);
198 kring[i]->nkr_ft = NULL; /* protect from freeing twice */
205 * Allocate the forwarding tables for the rings attached to the bridge ports.
208 nm_alloc_bdgfwd(struct netmap_adapter *na)
210 int nrings, l, i, num_dstq;
211 struct netmap_kring **kring;
214 /* all port:rings + broadcast */
215 num_dstq = NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1;
216 l = sizeof(struct nm_bdg_fwd) * NM_BDG_BATCH_MAX;
217 l += sizeof(struct nm_vale_q) * num_dstq;
218 l += sizeof(uint16_t) * NM_BDG_BATCH_MAX;
220 nrings = netmap_real_rings(na, NR_TX);
221 kring = na->tx_rings;
222 for (i = 0; i < nrings; i++) {
223 struct nm_bdg_fwd *ft;
224 struct nm_vale_q *dstq;
227 ft = nm_os_malloc(l);
232 dstq = (struct nm_vale_q *)(ft + NM_BDG_BATCH_MAX);
233 for (j = 0; j < num_dstq; j++) {
234 dstq[j].bq_head = dstq[j].bq_tail = NM_FT_NULL;
237 kring[i]->nkr_ft = ft;
242 /* Allows external modules to create bridges in exclusive mode,
243 * returns an authentication token that the external module will need
244 * to provide during nm_bdg_ctl_{attach, detach}(), netmap_bdg_regops(),
245 * and nm_bdg_update_private_data() operations.
246 * Successfully executed if ret != NULL and *return_status == 0.
249 netmap_vale_create(const char *bdg_name, int *return_status)
251 struct nm_bridge *b = NULL;
255 b = nm_find_bridge(bdg_name, 0 /* don't create */, NULL);
257 *return_status = EEXIST;
258 goto unlock_bdg_create;
261 b = nm_find_bridge(bdg_name, 1 /* create */, &vale_bdg_ops);
263 *return_status = ENOMEM;
264 goto unlock_bdg_create;
267 b->bdg_flags |= NM_BDG_ACTIVE | NM_BDG_EXCLUSIVE;
268 ret = nm_bdg_get_auth_token(b);
276 /* Allows external modules to destroy a bridge created through
277 * netmap_bdg_create(), the bridge must be empty.
280 netmap_vale_destroy(const char *bdg_name, void *auth_token)
282 struct nm_bridge *b = NULL;
286 b = nm_find_bridge(bdg_name, 0 /* don't create */, NULL);
289 goto unlock_bdg_free;
292 if (!nm_bdg_valid_auth_token(b, auth_token)) {
294 goto unlock_bdg_free;
296 if (!(b->bdg_flags & NM_BDG_EXCLUSIVE)) {
298 goto unlock_bdg_free;
301 b->bdg_flags &= ~(NM_BDG_EXCLUSIVE | NM_BDG_ACTIVE);
302 ret = netmap_bdg_free(b);
304 b->bdg_flags |= NM_BDG_EXCLUSIVE | NM_BDG_ACTIVE;
312 /* Process NETMAP_REQ_VALE_LIST. */
314 netmap_vale_list(struct nmreq_header *hdr)
316 struct nmreq_vale_list *req =
317 (struct nmreq_vale_list *)(uintptr_t)hdr->nr_body;
318 int namelen = strlen(hdr->nr_name);
319 struct nm_bridge *b, *bridges;
320 struct netmap_vp_adapter *vpna;
324 netmap_bns_getbridges(&bridges, &num_bridges);
326 /* this is used to enumerate bridges and ports */
327 if (namelen) { /* look up indexes of bridge and port */
328 if (strncmp(hdr->nr_name, NM_BDG_NAME,
329 strlen(NM_BDG_NAME))) {
333 b = nm_find_bridge(hdr->nr_name, 0 /* don't create */, NULL);
339 req->nr_bridge_idx = b - bridges; /* bridge index */
340 req->nr_port_idx = NM_BDG_NOPORT;
341 for (j = 0; j < b->bdg_active_ports; j++) {
342 i = b->bdg_port_index[j];
343 vpna = b->bdg_ports[i];
345 nm_prerr("This should not happen");
348 /* the former and the latter identify a
349 * virtual port and a NIC, respectively
351 if (!strcmp(vpna->up.name, hdr->nr_name)) {
352 req->nr_port_idx = i; /* port index */
358 /* return the first non-empty entry starting from
359 * bridge nr_arg1 and port nr_arg2.
361 * Users can detect the end of the same bridge by
362 * seeing the new and old value of nr_arg1, and can
363 * detect the end of all the bridge by error != 0
365 i = req->nr_bridge_idx;
366 j = req->nr_port_idx;
369 for (error = ENOENT; i < NM_BRIDGES; i++) {
371 for ( ; j < NM_BDG_MAXPORTS; j++) {
372 if (b->bdg_ports[j] == NULL)
374 vpna = b->bdg_ports[j];
375 /* write back the VALE switch name */
376 strlcpy(hdr->nr_name, vpna->up.name,
377 sizeof(hdr->nr_name));
381 j = 0; /* following bridges scan from 0 */
384 req->nr_bridge_idx = i;
385 req->nr_port_idx = j;
393 /* nm_dtor callback for ephemeral VALE ports */
395 netmap_vale_vp_dtor(struct netmap_adapter *na)
397 struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter*)na;
398 struct nm_bridge *b = vpna->na_bdg;
400 nm_prdis("%s has %d references", na->name, na->na_refcount);
403 netmap_bdg_detach_common(b, vpna->bdg_port, -1);
406 if (na->ifp != NULL && !nm_iszombie(na)) {
407 NM_DETACH_NA(na->ifp);
408 if (vpna->autodelete) {
409 nm_prdis("releasing %s", na->ifp->if_xname);
411 nm_os_vi_detach(na->ifp);
419 /* nm_krings_create callback for VALE ports.
420 * Calls the standard netmap_krings_create, then adds leases on rx
421 * rings and bdgfwd on tx rings.
424 netmap_vale_vp_krings_create(struct netmap_adapter *na)
429 u_int nrx = netmap_real_rings(na, NR_RX);
432 * Leases are attached to RX rings on vale ports
434 tailroom = sizeof(uint32_t) * na->num_rx_desc * nrx;
436 error = netmap_krings_create(na, tailroom);
440 leases = na->tailroom;
442 for (i = 0; i < nrx; i++) { /* Receive rings */
443 na->rx_rings[i]->nkr_leases = leases;
444 leases += na->num_rx_desc;
447 error = nm_alloc_bdgfwd(na);
449 netmap_krings_delete(na);
457 /* nm_krings_delete callback for VALE ports. */
459 netmap_vale_vp_krings_delete(struct netmap_adapter *na)
462 netmap_krings_delete(na);
467 nm_vale_flush(struct nm_bdg_fwd *ft, u_int n,
468 struct netmap_vp_adapter *na, u_int ring_nr);
472 * main dispatch routine for the bridge.
473 * Grab packets from a kring, move them into the ft structure
474 * associated to the tx (input) port. Max one instance per port,
475 * filtered on input (ioctl, poll or XXX).
476 * Returns the next position in the ring.
479 nm_vale_preflush(struct netmap_kring *kring, u_int end)
481 struct netmap_vp_adapter *na =
482 (struct netmap_vp_adapter*)kring->na;
483 struct netmap_ring *ring = kring->ring;
484 struct nm_bdg_fwd *ft;
485 u_int ring_nr = kring->ring_id;
486 u_int j = kring->nr_hwcur, lim = kring->nkr_num_slots - 1;
487 u_int ft_i = 0; /* start from 0 */
488 u_int frags = 1; /* how many frags ? */
489 struct nm_bridge *b = na->na_bdg;
491 /* To protect against modifications to the bridge we acquire a
492 * shared lock, waiting if we can sleep (if the source port is
493 * attached to a user process) or with a trylock otherwise (NICs).
495 nm_prdis("wait rlock for %d packets", ((j > end ? lim+1 : 0) + end) - j);
496 if (na->up.na_flags & NAF_BDG_MAYSLEEP)
498 else if (!BDG_RTRYLOCK(b))
500 nm_prdis(5, "rlock acquired for %d packets", ((j > end ? lim+1 : 0) + end) - j);
503 for (; likely(j != end); j = nm_next(j, lim)) {
504 struct netmap_slot *slot = &ring->slot[j];
507 ft[ft_i].ft_len = slot->len;
508 ft[ft_i].ft_flags = slot->flags;
509 ft[ft_i].ft_offset = 0;
511 nm_prdis("flags is 0x%x", slot->flags);
512 /* we do not use the buf changed flag, but we still need to reset it */
513 slot->flags &= ~NS_BUF_CHANGED;
515 /* this slot goes into a list so initialize the link field */
516 ft[ft_i].ft_next = NM_FT_NULL;
517 buf = ft[ft_i].ft_buf = (slot->flags & NS_INDIRECT) ?
518 (void *)(uintptr_t)slot->ptr : NMB_O(kring, slot);
519 if (unlikely(buf == NULL ||
520 slot->len > NETMAP_BUF_SIZE(&na->up) - nm_get_offset(kring, slot))) {
521 nm_prlim(5, "NULL %s buffer pointer from %s slot %d len %d",
522 (slot->flags & NS_INDIRECT) ? "INDIRECT" : "DIRECT",
523 kring->name, j, ft[ft_i].ft_len);
524 buf = ft[ft_i].ft_buf = NETMAP_BUF_BASE(&na->up);
526 ft[ft_i].ft_flags = 0;
528 __builtin_prefetch(buf);
530 if (slot->flags & NS_MOREFRAG) {
534 if (unlikely(netmap_verbose && frags > 1))
535 nm_prlim(5, "%d frags at %d", frags, ft_i - frags);
536 ft[ft_i - frags].ft_frags = frags;
538 if (unlikely((int)ft_i >= bridge_batch))
539 ft_i = nm_vale_flush(ft, ft_i, na, ring_nr);
542 /* Here ft_i > 0, ft[ft_i-1].flags has NS_MOREFRAG, and we
543 * have to fix frags count. */
545 ft[ft_i - 1].ft_flags &= ~NS_MOREFRAG;
546 ft[ft_i - frags].ft_frags = frags;
547 nm_prlim(5, "Truncate incomplete fragment at %d (%d frags)", ft_i, frags);
550 ft_i = nm_vale_flush(ft, ft_i, na, ring_nr);
556 /* ----- FreeBSD if_bridge hash function ------- */
559 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
560 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
562 * http://www.burtleburtle.net/bob/hash/spooky.html
564 #define mix(a, b, c) \
566 a -= b; a -= c; a ^= (c >> 13); \
567 b -= c; b -= a; b ^= (a << 8); \
568 c -= a; c -= b; c ^= (b >> 13); \
569 a -= b; a -= c; a ^= (c >> 12); \
570 b -= c; b -= a; b ^= (a << 16); \
571 c -= a; c -= b; c ^= (b >> 5); \
572 a -= b; a -= c; a ^= (c >> 3); \
573 b -= c; b -= a; b ^= (a << 10); \
574 c -= a; c -= b; c ^= (b >> 15); \
575 } while (/*CONSTCOND*/0)
578 static __inline uint32_t
579 nm_vale_rthash(const uint8_t *addr)
581 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hash key
591 #define BRIDGE_RTHASH_MASK (NM_BDG_HASH-1)
592 return (c & BRIDGE_RTHASH_MASK);
599 * Lookup function for a learning bridge.
600 * Update the hash table with the source address,
601 * and then returns the destination port index, and the
602 * ring in *dst_ring (at the moment, always use ring 0)
605 netmap_vale_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring,
606 struct netmap_vp_adapter *na, void *private_data)
608 uint8_t *buf = ((uint8_t *)ft->ft_buf) + ft->ft_offset;
609 u_int buf_len = ft->ft_len - ft->ft_offset;
610 struct nm_hash_ent *ht = private_data;
612 u_int dst, mysrc = na->bdg_port;
617 return NM_BDG_NOPORT;
620 if (ft->ft_flags & NS_INDIRECT) {
621 if (copyin(buf, indbuf, sizeof(indbuf))) {
622 return NM_BDG_NOPORT;
627 dmac = le64toh(*(uint64_t *)(buf)) & 0xffffffffffff;
628 smac = le64toh(*(uint64_t *)(buf + 4));
632 * The hash is somewhat expensive, there might be some
633 * worthwhile optimizations here.
635 if (((buf[6] & 1) == 0) && (na->last_smac != smac)) { /* valid src */
637 sh = nm_vale_rthash(s); /* hash of source */
638 /* update source port forwarding entry */
639 na->last_smac = ht[sh].mac = smac; /* XXX expire ? */
640 ht[sh].ports = mysrc;
641 if (netmap_debug & NM_DEBUG_VALE)
642 nm_prinf("src %02x:%02x:%02x:%02x:%02x:%02x on port %d",
643 s[0], s[1], s[2], s[3], s[4], s[5], mysrc);
645 dst = NM_BDG_BROADCAST;
646 if ((buf[0] & 1) == 0) { /* unicast */
647 dh = nm_vale_rthash(buf); /* hash of dst */
648 if (ht[dh].mac == dmac) { /* found dst */
657 * Available space in the ring. Only used in VALE code
658 * and only with is_rx = 1
660 static inline uint32_t
661 nm_kr_space(struct netmap_kring *k, int is_rx)
666 int busy = k->nkr_hwlease - k->nr_hwcur;
668 busy += k->nkr_num_slots;
669 space = k->nkr_num_slots - 1 - busy;
671 /* XXX never used in this branch */
672 space = k->nr_hwtail - k->nkr_hwlease;
674 space += k->nkr_num_slots;
678 if (k->nkr_hwlease >= k->nkr_num_slots ||
679 k->nr_hwcur >= k->nkr_num_slots ||
680 k->nr_tail >= k->nkr_num_slots ||
682 busy >= k->nkr_num_slots) {
683 nm_prerr("invalid kring, cur %d tail %d lease %d lease_idx %d lim %d",
684 k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease,
685 k->nkr_lease_idx, k->nkr_num_slots);
694 /* make a lease on the kring for N positions. return the
696 * XXX only used in VALE code and with is_rx = 1
698 static inline uint32_t
699 nm_kr_lease(struct netmap_kring *k, u_int n, int is_rx)
701 uint32_t lim = k->nkr_num_slots - 1;
702 uint32_t lease_idx = k->nkr_lease_idx;
704 k->nkr_leases[lease_idx] = NR_NOSLOT;
705 k->nkr_lease_idx = nm_next(lease_idx, lim);
707 #ifdef CONFIG_NETMAP_DEBUG
708 if (n > nm_kr_space(k, is_rx)) {
709 nm_prerr("invalid request for %d slots", n);
712 #endif /* CONFIG NETMAP_DEBUG */
713 /* XXX verify that there are n slots */
715 if (k->nkr_hwlease > lim)
716 k->nkr_hwlease -= lim + 1;
718 #ifdef CONFIG_NETMAP_DEBUG
719 if (k->nkr_hwlease >= k->nkr_num_slots ||
720 k->nr_hwcur >= k->nkr_num_slots ||
721 k->nr_hwtail >= k->nkr_num_slots ||
722 k->nkr_lease_idx >= k->nkr_num_slots) {
723 nm_prerr("invalid kring %s, cur %d tail %d lease %d lease_idx %d lim %d",
725 k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease,
726 k->nkr_lease_idx, k->nkr_num_slots);
728 #endif /* CONFIG_NETMAP_DEBUG */
734 * This flush routine supports only unicast and broadcast but a large
735 * number of ports, and lets us replace the learn and dispatch functions.
738 nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
741 struct nm_vale_q *dst_ents, *brddst;
742 uint16_t num_dsts = 0, *dsts;
743 struct nm_bridge *b = na->na_bdg;
744 u_int i, me = na->bdg_port;
747 * The work area (pointed by ft) is followed by an array of
748 * pointers to queues , dst_ents; there are NM_BDG_MAXRINGS
749 * queues per port plus one for the broadcast traffic.
750 * Then we have an array of destination indexes.
752 dst_ents = (struct nm_vale_q *)(ft + NM_BDG_BATCH_MAX);
753 dsts = (uint16_t *)(dst_ents + NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1);
755 /* first pass: find a destination for each packet in the batch */
756 for (i = 0; likely(i < n); i += ft[i].ft_frags) {
757 uint8_t dst_ring = ring_nr; /* default, same ring as origin */
758 uint16_t dst_port, d_i;
760 struct nm_bdg_fwd *start_ft = NULL;
762 nm_prdis("slot %d frags %d", i, ft[i].ft_frags);
764 if (na->up.virt_hdr_len < ft[i].ft_len) {
765 ft[i].ft_offset = na->up.virt_hdr_len;
767 } else if (na->up.virt_hdr_len == ft[i].ft_len && ft[i].ft_flags & NS_MOREFRAG) {
768 ft[i].ft_offset = ft[i].ft_len;
771 /* Drop the packet if the virtio-net header is not into the first
772 * fragment nor at the very beginning of the second.
776 dst_port = b->bdg_ops.lookup(start_ft, &dst_ring, na, b->private_data);
777 if (netmap_verbose > 255)
778 nm_prlim(5, "slot %d port %d -> %d", i, me, dst_port);
779 if (dst_port >= NM_BDG_NOPORT)
780 continue; /* this packet is identified to be dropped */
781 else if (dst_port == NM_BDG_BROADCAST)
782 dst_ring = 0; /* broadcasts always go to ring 0 */
783 else if (unlikely(dst_port == me ||
784 !b->bdg_ports[dst_port]))
787 /* get a position in the scratch pad */
788 d_i = dst_port * NM_BDG_MAXRINGS + dst_ring;
791 /* append the first fragment to the list */
792 if (d->bq_head == NM_FT_NULL) { /* new destination */
793 d->bq_head = d->bq_tail = i;
794 /* remember this position to be scanned later */
795 if (dst_port != NM_BDG_BROADCAST)
796 dsts[num_dsts++] = d_i;
798 ft[d->bq_tail].ft_next = i;
801 d->bq_len += ft[i].ft_frags;
805 * Broadcast traffic goes to ring 0 on all destinations.
806 * So we need to add these rings to the list of ports to scan.
808 brddst = dst_ents + NM_BDG_BROADCAST * NM_BDG_MAXRINGS;
809 if (brddst->bq_head != NM_FT_NULL) {
811 for (j = 0; likely(j < b->bdg_active_ports); j++) {
813 i = b->bdg_port_index[j];
814 if (unlikely(i == me))
816 d_i = i * NM_BDG_MAXRINGS;
817 if (dst_ents[d_i].bq_head == NM_FT_NULL)
818 dsts[num_dsts++] = d_i;
822 nm_prdis(5, "pass 1 done %d pkts %d dsts", n, num_dsts);
823 /* second pass: scan destinations */
824 for (i = 0; i < num_dsts; i++) {
825 struct netmap_vp_adapter *dst_na;
826 struct netmap_kring *kring;
827 struct netmap_ring *ring;
828 u_int dst_nr, lim, j, d_i, next, brd_next;
829 u_int needed, howmany;
830 int retry = netmap_txsync_retry;
832 uint32_t my_start = 0, lease_idx = 0;
834 int virt_hdr_mismatch = 0;
837 nm_prdis("second pass %d port %d", i, d_i);
839 // XXX fix the division
840 dst_na = b->bdg_ports[d_i/NM_BDG_MAXRINGS];
841 /* protect from the lookup function returning an inactive
844 if (unlikely(dst_na == NULL))
846 if (dst_na->up.na_flags & NAF_SW_ONLY)
849 * The interface may be in !netmap mode in two cases:
850 * - when na is attached but not activated yet;
851 * - when na is being deactivated but is still attached.
853 if (unlikely(!nm_netmap_on(&dst_na->up))) {
854 nm_prdis("not in netmap mode!");
858 /* there is at least one either unicast or broadcast packet */
859 brd_next = brddst->bq_head;
861 /* we need to reserve this many slots. If fewer are
862 * available, some packets will be dropped.
863 * Packets may have multiple fragments, so
864 * there is a chance that we may not use all of the slots
865 * we have claimed, so we will need to handle the leftover
866 * ones when we regain the lock.
868 needed = d->bq_len + brddst->bq_len;
870 if (unlikely(dst_na->up.virt_hdr_len != na->up.virt_hdr_len)) {
871 if (netmap_verbose) {
872 nm_prlim(3, "virt_hdr_mismatch, src %d dst %d", na->up.virt_hdr_len,
873 dst_na->up.virt_hdr_len);
875 /* There is a virtio-net header/offloadings mismatch between
876 * source and destination. The slower mismatch datapath will
877 * be used to cope with all the mismatches.
879 virt_hdr_mismatch = 1;
880 if (dst_na->mfs < na->mfs) {
881 /* We may need to do segmentation offloadings, and so
882 * we may need a number of destination slots greater
883 * than the number of input slots ('needed').
884 * We look for the smallest integer 'x' which satisfies:
885 * needed * na->mfs + x * H <= x * na->mfs
886 * where 'H' is the length of the longest header that may
887 * be replicated in the segmentation process (e.g. for
888 * TCPv4 we must account for ethernet header, IP header
891 KASSERT(dst_na->mfs > 0, ("vpna->mfs is 0"));
892 needed = (needed * na->mfs) /
893 (dst_na->mfs - WORST_CASE_GSO_HEADER) + 1;
894 nm_prdis(3, "srcmtu=%u, dstmtu=%u, x=%u", na->mfs, dst_na->mfs, needed);
898 nm_prdis(5, "pass 2 dst %d is %x %s",
899 i, d_i, nm_is_bwrap(&dst_na->up) ? "nic/host" : "virtual");
900 dst_nr = d_i & (NM_BDG_MAXRINGS-1);
901 nrings = dst_na->up.num_rx_rings;
902 if (dst_nr >= nrings)
903 dst_nr = dst_nr % nrings;
904 kring = dst_na->up.rx_rings[dst_nr];
906 /* the destination ring may have not been opened for RX */
907 if (unlikely(ring == NULL || kring->nr_mode != NKR_NETMAP_ON))
909 lim = kring->nkr_num_slots - 1;
913 if (dst_na->retry && retry) {
914 /* try to get some free slot from the previous run */
915 kring->nm_notify(kring, NAF_FORCE_RECLAIM);
916 /* actually useful only for bwraps, since there
917 * the notify will trigger a txsync on the hwna. VALE ports
918 * have dst_na->retry == 0
921 /* reserve the buffers in the queue and an entry
922 * to report completion, and drop lock.
923 * XXX this might become a helper function.
925 mtx_lock(&kring->q_lock);
926 if (kring->nkr_stopped) {
927 mtx_unlock(&kring->q_lock);
930 my_start = j = kring->nkr_hwlease;
931 howmany = nm_kr_space(kring, 1);
932 if (needed < howmany)
934 lease_idx = nm_kr_lease(kring, howmany, 1);
935 mtx_unlock(&kring->q_lock);
937 /* only retry if we need more than available slots */
938 if (retry && needed <= howmany)
941 /* copy to the destination queue */
942 while (howmany > 0) {
943 struct netmap_slot *slot;
944 struct nm_bdg_fwd *ft_p, *ft_end;
947 /* find the queue from which we pick next packet.
948 * NM_FT_NULL is always higher than valid indexes
949 * so we never dereference it if the other list
950 * has packets (and if both are empty we never
953 if (next < brd_next) {
955 next = ft_p->ft_next;
956 } else { /* insert broadcast */
957 ft_p = ft + brd_next;
958 brd_next = ft_p->ft_next;
960 cnt = ft_p->ft_frags; // cnt > 0
961 if (unlikely(cnt > howmany))
962 break; /* no more space */
963 if (netmap_verbose && cnt > 1)
964 nm_prlim(5, "rx %d frags to %d", cnt, j);
966 if (unlikely(virt_hdr_mismatch)) {
967 bdg_mismatch_datapath(na, dst_na, ft_p, ring, &j, lim, &howmany);
971 char *dst, *src = ft_p->ft_buf;
972 size_t copy_len = ft_p->ft_len, dst_len = copy_len;
974 uint64_t dstoff, dstoff_cb;
976 const uintptr_t mask = NM_BUF_ALIGN - 1;
978 slot = &ring->slot[j];
979 dst = NMB(&dst_na->up, slot);
980 dstoff = nm_get_offset(kring, slot);
981 dstoff_cb = dstoff & ~mask;
982 src_cb = ((uintptr_t)src) & ~mask;
983 src_co = ((uintptr_t)src) & mask;
984 dst_co = ((uintptr_t)(dst + dstoff)) & mask;
985 if (dst_co < src_co) {
986 dstoff_cb += NM_BUF_ALIGN;
988 dstoff = dstoff_cb + src_co;
991 nm_prdis("send [%d] %d(%d) bytes at %s:%d",
992 i, (int)copy_len, (int)dst_len,
993 NM_IFPNAME(dst_ifp), j);
995 if (unlikely(dstoff > NETMAP_BUF_SIZE(&dst_na->up) ||
996 dst_len > NETMAP_BUF_SIZE(&dst_na->up) - dstoff)) {
997 nm_prlim(5, "dropping packet/fragment of len %zu, dest offset %llu",
998 dst_len, (unsigned long long)dstoff);
999 copy_len = dst_len = 0;
1000 dstoff = nm_get_offset(kring, slot);
1003 if (ft_p->ft_flags & NS_INDIRECT) {
1004 if (copyin(src, dst, copy_len)) {
1005 // invalid user pointer, pretend len is 0
1009 //memcpy(dst, src, copy_len);
1010 pkt_copy((char *)src_cb, dst + dstoff_cb, (int)copy_len);
1012 slot->len = dst_len;
1013 slot->flags = (cnt << 8)| NS_MOREFRAG;
1014 nm_write_offset(kring, slot, dstoff);
1015 j = nm_next(j, lim);
1018 } while (ft_p != ft_end);
1019 slot->flags = (cnt << 8); /* clear flag on last entry */
1022 if (next == NM_FT_NULL && brd_next == NM_FT_NULL)
1026 /* current position */
1027 uint32_t *p = kring->nkr_leases; /* shorthand */
1028 uint32_t update_pos;
1029 int still_locked = 1;
1031 mtx_lock(&kring->q_lock);
1032 if (unlikely(howmany > 0)) {
1033 /* not used all bufs. If i am the last one
1034 * i can recover the slots, otherwise must
1035 * fill them with 0 to mark empty packets.
1037 nm_prdis("leftover %d bufs", howmany);
1038 if (nm_next(lease_idx, lim) == kring->nkr_lease_idx) {
1039 /* yes i am the last one */
1040 nm_prdis("roll back nkr_hwlease to %d", j);
1041 kring->nkr_hwlease = j;
1043 while (howmany-- > 0) {
1044 ring->slot[j].len = 0;
1045 ring->slot[j].flags = 0;
1046 j = nm_next(j, lim);
1050 p[lease_idx] = j; /* report I am done */
1052 update_pos = kring->nr_hwtail;
1054 if (my_start == update_pos) {
1055 /* all slots before my_start have been reported,
1056 * so scan subsequent leases to see if other ranges
1057 * have been completed, and to a selwakeup or txsync.
1059 while (lease_idx != kring->nkr_lease_idx &&
1060 p[lease_idx] != NR_NOSLOT) {
1062 p[lease_idx] = NR_NOSLOT;
1063 lease_idx = nm_next(lease_idx, lim);
1065 /* j is the new 'write' position. j != my_start
1066 * means there are new buffers to report
1068 if (likely(j != my_start)) {
1069 kring->nr_hwtail = j;
1071 mtx_unlock(&kring->q_lock);
1072 kring->nm_notify(kring, 0);
1073 /* this is netmap_notify for VALE ports and
1074 * netmap_bwrap_notify for bwrap. The latter will
1075 * trigger a txsync on the underlying hwna
1077 if (dst_na->retry && retry--) {
1078 /* XXX this is going to call nm_notify again.
1079 * Only useful for bwrap in virtual machines
1086 mtx_unlock(&kring->q_lock);
1089 d->bq_head = d->bq_tail = NM_FT_NULL; /* cleanup */
1092 brddst->bq_head = brddst->bq_tail = NM_FT_NULL; /* cleanup */
1097 /* nm_txsync callback for VALE ports */
1099 netmap_vale_vp_txsync(struct netmap_kring *kring, int flags)
1101 struct netmap_vp_adapter *na =
1102 (struct netmap_vp_adapter *)kring->na;
1104 u_int const lim = kring->nkr_num_slots - 1;
1105 u_int const head = kring->rhead;
1107 if (bridge_batch <= 0) { /* testing only */
1108 done = head; // used all
1115 if (bridge_batch > NM_BDG_BATCH)
1116 bridge_batch = NM_BDG_BATCH;
1118 done = nm_vale_preflush(kring, head);
1121 nm_prerr("early break at %d/ %d, tail %d", done, head, kring->nr_hwtail);
1123 * packets between 'done' and 'cur' are left unsent.
1125 kring->nr_hwcur = done;
1126 kring->nr_hwtail = nm_prev(done, lim);
1127 if (netmap_debug & NM_DEBUG_TXSYNC)
1128 nm_prinf("%s ring %d flags %d", na->up.name, kring->ring_id, flags);
1133 /* create a netmap_vp_adapter that describes a VALE port.
1134 * Only persistent VALE ports have a non-null ifp.
1137 netmap_vale_vp_create(struct nmreq_header *hdr, struct ifnet *ifp,
1138 struct netmap_mem_d *nmd, struct netmap_vp_adapter **ret)
1140 struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1141 struct netmap_vp_adapter *vpna;
1142 struct netmap_adapter *na;
1145 u_int extrabufs = 0;
1147 if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1151 vpna = nm_os_malloc(sizeof(*vpna));
1158 strlcpy(na->name, hdr->nr_name, sizeof(na->name));
1160 /* bound checking */
1161 na->num_tx_rings = req->nr_tx_rings;
1162 nm_bound_var(&na->num_tx_rings, 1, 1, NM_BDG_MAXRINGS, NULL);
1163 req->nr_tx_rings = na->num_tx_rings; /* write back */
1164 na->num_rx_rings = req->nr_rx_rings;
1165 nm_bound_var(&na->num_rx_rings, 1, 1, NM_BDG_MAXRINGS, NULL);
1166 req->nr_rx_rings = na->num_rx_rings; /* write back */
1167 nm_bound_var(&req->nr_tx_slots, NM_BRIDGE_RINGSIZE,
1168 1, NM_BDG_MAXSLOTS, NULL);
1169 na->num_tx_desc = req->nr_tx_slots;
1170 nm_bound_var(&req->nr_rx_slots, NM_BRIDGE_RINGSIZE,
1171 1, NM_BDG_MAXSLOTS, NULL);
1172 /* validate number of pipes. We want at least 1,
1173 * but probably can do with some more.
1174 * So let's use 2 as default (when 0 is supplied)
1176 nm_bound_var(&npipes, 2, 1, NM_MAXPIPES, NULL);
1177 /* validate extra bufs */
1178 extrabufs = req->nr_extra_bufs;
1179 nm_bound_var(&extrabufs, 0, 0,
1180 128*NM_BDG_MAXSLOTS, NULL);
1181 req->nr_extra_bufs = extrabufs; /* write back */
1182 na->num_rx_desc = req->nr_rx_slots;
1183 /* Set the mfs to a default value, as it is needed on the VALE
1184 * mismatch datapath. XXX We should set it according to the MTU
1185 * known to the kernel. */
1186 vpna->mfs = NM_BDG_MFS_DEFAULT;
1187 vpna->last_smac = ~0llu;
1188 /*if (vpna->mfs > netmap_buf_size) TODO netmap_buf_size is zero??
1189 vpna->mfs = netmap_buf_size; */
1191 nm_prinf("max frame size %u", vpna->mfs);
1193 na->na_flags |= (NAF_BDG_MAYSLEEP | NAF_OFFSETS);
1194 /* persistent VALE ports look like hw devices
1195 * with a native netmap adapter
1198 na->na_flags |= NAF_NATIVE;
1199 na->nm_txsync = netmap_vale_vp_txsync;
1200 na->nm_rxsync = netmap_vp_rxsync; /* use the one provided by bdg */
1201 na->nm_register = netmap_vp_reg; /* use the one provided by bdg */
1202 na->nm_krings_create = netmap_vale_vp_krings_create;
1203 na->nm_krings_delete = netmap_vale_vp_krings_delete;
1204 na->nm_dtor = netmap_vale_vp_dtor;
1205 nm_prdis("nr_mem_id %d", req->nr_mem_id);
1207 netmap_mem_get(nmd):
1208 netmap_mem_private_new(
1209 na->num_tx_rings, na->num_tx_desc,
1210 na->num_rx_rings, na->num_rx_desc,
1211 req->nr_extra_bufs, npipes, &error);
1212 if (na->nm_mem == NULL)
1214 na->nm_bdg_attach = netmap_vale_vp_bdg_attach;
1215 /* other nmd fields are set in the common routine */
1216 error = netmap_attach_common(na);
1223 if (na->nm_mem != NULL)
1224 netmap_mem_put(na->nm_mem);
1229 /* nm_bdg_attach callback for VALE ports
1230 * The na_vp port is this same netmap_adapter. There is no host port.
1233 netmap_vale_vp_bdg_attach(const char *name, struct netmap_adapter *na,
1234 struct nm_bridge *b)
1236 struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na;
1238 if ((b->bdg_flags & NM_BDG_NEED_BWRAP) || vpna->na_bdg) {
1239 return NM_NEED_BWRAP;
1242 strlcpy(na->name, name, sizeof(na->name));
1243 na->na_hostvp = NULL;
1248 netmap_vale_bwrap_krings_create(struct netmap_adapter *na)
1252 /* impersonate a netmap_vp_adapter */
1253 error = netmap_vale_vp_krings_create(na);
1256 error = netmap_bwrap_krings_create_common(na);
1258 netmap_vale_vp_krings_delete(na);
1264 netmap_vale_bwrap_krings_delete(struct netmap_adapter *na)
1266 netmap_bwrap_krings_delete_common(na);
1267 netmap_vale_vp_krings_delete(na);
1271 netmap_vale_bwrap_attach(const char *nr_name, struct netmap_adapter *hwna)
1273 struct netmap_bwrap_adapter *bna;
1274 struct netmap_adapter *na = NULL;
1275 struct netmap_adapter *hostna = NULL;
1278 bna = nm_os_malloc(sizeof(*bna));
1283 strlcpy(na->name, nr_name, sizeof(na->name));
1284 na->nm_register = netmap_bwrap_reg;
1285 na->nm_txsync = netmap_vale_vp_txsync;
1286 // na->nm_rxsync = netmap_bwrap_rxsync;
1287 na->nm_krings_create = netmap_vale_bwrap_krings_create;
1288 na->nm_krings_delete = netmap_vale_bwrap_krings_delete;
1289 na->nm_notify = netmap_bwrap_notify;
1290 bna->nm_intr_notify = netmap_bwrap_intr_notify;
1291 bna->up.retry = 1; /* XXX maybe this should depend on the hwna */
1292 /* Set the mfs, needed on the VALE mismatch datapath. */
1293 bna->up.mfs = NM_BDG_MFS_DEFAULT;
1295 if (hwna->na_flags & NAF_HOST_RINGS) {
1296 hostna = &bna->host.up;
1297 hostna->nm_notify = netmap_bwrap_notify;
1298 bna->host.mfs = NM_BDG_MFS_DEFAULT;
1301 error = netmap_bwrap_attach_common(na, hwna);
1309 netmap_get_vale_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1310 struct netmap_mem_d *nmd, int create)
1312 return netmap_get_bdg_na(hdr, na, nmd, create, &vale_bdg_ops);
1316 /* creates a persistent VALE port */
1318 nm_vi_create(struct nmreq_header *hdr)
1320 struct nmreq_vale_newif *req =
1321 (struct nmreq_vale_newif *)(uintptr_t)hdr->nr_body;
1323 /* Build a nmreq_register out of the nmreq_vale_newif,
1324 * so that we can call netmap_get_bdg_na(). */
1325 struct nmreq_register regreq;
1326 bzero(®req, sizeof(regreq));
1327 regreq.nr_tx_slots = req->nr_tx_slots;
1328 regreq.nr_rx_slots = req->nr_rx_slots;
1329 regreq.nr_tx_rings = req->nr_tx_rings;
1330 regreq.nr_rx_rings = req->nr_rx_rings;
1331 regreq.nr_mem_id = req->nr_mem_id;
1332 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
1333 hdr->nr_body = (uintptr_t)®req;
1334 error = netmap_vi_create(hdr, 0 /* no autodelete */);
1335 hdr->nr_reqtype = NETMAP_REQ_VALE_NEWIF;
1336 hdr->nr_body = (uintptr_t)req;
1337 /* Write back to the original struct. */
1338 req->nr_tx_slots = regreq.nr_tx_slots;
1339 req->nr_rx_slots = regreq.nr_rx_slots;
1340 req->nr_tx_rings = regreq.nr_tx_rings;
1341 req->nr_rx_rings = regreq.nr_rx_rings;
1342 req->nr_mem_id = regreq.nr_mem_id;
1346 /* remove a persistent VALE port from the system */
1348 nm_vi_destroy(const char *name)
1351 struct netmap_vp_adapter *vpna;
1354 ifp = ifunit_ref(name);
1358 /* make sure this is actually a VALE port */
1359 if (!NM_NA_VALID(ifp) || NA(ifp)->nm_register != netmap_vp_reg) {
1364 vpna = (struct netmap_vp_adapter *)NA(ifp);
1366 /* we can only destroy ports that were created via NETMAP_BDG_NEWIF */
1367 if (vpna->autodelete) {
1372 /* also make sure that nobody is using the interface */
1373 if (NETMAP_OWNED_BY_ANY(&vpna->up) ||
1374 vpna->up.na_refcount > 1 /* any ref besides the one in nm_vi_create()? */) {
1382 nm_prinf("destroying a persistent vale interface %s", ifp->if_xname);
1383 /* Linux requires all the references are released
1388 nm_os_vi_detach(ifp);
1398 nm_update_info(struct nmreq_register *req, struct netmap_adapter *na)
1400 req->nr_rx_rings = na->num_rx_rings;
1401 req->nr_tx_rings = na->num_tx_rings;
1402 req->nr_rx_slots = na->num_rx_desc;
1403 req->nr_tx_slots = na->num_tx_desc;
1404 return netmap_mem_get_info(na->nm_mem, &req->nr_memsize, NULL,
1410 * Create a virtual interface registered to the system.
1411 * The interface will be attached to a bridge later.
1414 netmap_vi_create(struct nmreq_header *hdr, int autodelete)
1416 struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1418 struct netmap_vp_adapter *vpna;
1419 struct netmap_mem_d *nmd = NULL;
1422 if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1426 /* don't include VALE prefix */
1427 if (!strncmp(hdr->nr_name, NM_BDG_NAME, strlen(NM_BDG_NAME)))
1429 if (strlen(hdr->nr_name) >= IFNAMSIZ) {
1432 ifp = ifunit_ref(hdr->nr_name);
1433 if (ifp) { /* already exist, cannot create new one */
1436 if (NM_NA_VALID(ifp)) {
1437 int update_err = nm_update_info(req, NA(ifp));
1445 error = nm_os_vi_persist(hdr->nr_name, &ifp);
1450 if (req->nr_mem_id) {
1451 nmd = netmap_mem_find(req->nr_mem_id);
1457 /* netmap_vp_create creates a struct netmap_vp_adapter */
1458 error = netmap_vale_vp_create(hdr, ifp, nmd, &vpna);
1460 if (netmap_debug & NM_DEBUG_VALE)
1461 nm_prerr("error %d", error);
1464 /* persist-specific routines */
1465 vpna->up.nm_bdg_ctl = netmap_vp_bdg_ctl;
1467 netmap_adapter_get(&vpna->up);
1469 vpna->autodelete = 1;
1471 NM_ATTACH_NA(ifp, &vpna->up);
1472 /* return the updated info */
1473 error = nm_update_info(req, &vpna->up);
1477 nm_prdis("returning nr_mem_id %d", req->nr_mem_id);
1479 netmap_mem_put(nmd);
1481 nm_prdis("created %s", ifp->if_xname);
1488 netmap_mem_put(nmd);
1490 nm_os_vi_detach(ifp);
1495 #endif /* WITH_VALE */