2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2013-2016 Universita` di Pisa
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #if defined(__FreeBSD__)
31 #include <sys/cdefs.h> /* prerequisite */
32 __FBSDID("$FreeBSD$");
34 #include <sys/types.h>
35 #include <sys/errno.h>
36 #include <sys/param.h> /* defines used in kernel.h */
37 #include <sys/kernel.h> /* types used in module initialization */
38 #include <sys/conf.h> /* cdevsw struct, UID, GID */
39 #include <sys/sockio.h>
40 #include <sys/socketvar.h> /* struct socket */
41 #include <sys/malloc.h>
43 #include <sys/rwlock.h>
44 #include <sys/socket.h> /* sockaddrs */
45 #include <sys/selinfo.h>
46 #include <sys/sysctl.h>
48 #include <net/if_var.h>
49 #include <net/bpf.h> /* BIOCIMMEDIATE */
50 #include <machine/bus.h> /* bus_dmamap_* */
51 #include <sys/endian.h>
52 #include <sys/refcount.h>
60 #elif defined(__APPLE__)
62 #warning OSX support is only partial
70 #error Unsupported platform
72 #endif /* unsupported */
78 #include <net/netmap.h>
79 #include <dev/netmap/netmap_kern.h>
80 #include <dev/netmap/netmap_mem2.h>
81 #include <dev/netmap/netmap_bdg.h>
86 * system parameters (most of them in netmap_kern.h)
87 * NM_BDG_NAME prefix for switch port names, default "vale"
88 * NM_BDG_MAXPORTS number of ports
89 * NM_BRIDGES max number of switches in the system.
90 * XXX should become a sysctl or tunable
92 * Switch ports are named valeX:Y where X is the switch name and Y
93 * is the port. If Y matches a physical interface name, the port is
94 * connected to a physical device.
96 * Unlike physical interfaces, switch ports use their own memory region
97 * for rings and buffers.
98 * The virtual interfaces use per-queue lock instead of core lock.
99 * In the tx loop, we aggregate traffic in batches to make all operations
100 * faster. The batch size is bridge_batch.
102 #define NM_BDG_MAXRINGS 16 /* XXX unclear how many. */
103 #define NM_BDG_MAXSLOTS 4096 /* XXX same as above */
104 #define NM_BRIDGE_RINGSIZE 1024 /* in the device */
105 #define NM_BDG_BATCH 1024 /* entries in the forwarding buffer */
106 /* actual size of the tables */
107 #define NM_BDG_BATCH_MAX (NM_BDG_BATCH + NETMAP_MAX_FRAGS)
108 /* NM_FT_NULL terminates a list of slots in the ft */
109 #define NM_FT_NULL NM_BDG_BATCH_MAX
113 * bridge_batch is set via sysctl to the max batch size to be
114 * used in the bridge. The actual value may be larger as the
115 * last packet in the block may overflow the size.
117 static int bridge_batch = NM_BDG_BATCH; /* bridge batch size */
119 SYSCTL_DECL(_dev_netmap);
120 SYSCTL_INT(_dev_netmap, OID_AUTO, bridge_batch, CTLFLAG_RW, &bridge_batch, 0,
121 "Max batch size to be used in the bridge");
124 static int netmap_vale_vp_create(struct nmreq_header *hdr, struct ifnet *,
125 struct netmap_mem_d *nmd, struct netmap_vp_adapter **);
126 static int netmap_vale_vp_bdg_attach(const char *, struct netmap_adapter *,
128 static int netmap_vale_bwrap_attach(const char *, struct netmap_adapter *);
131 * For each output interface, nm_vale_q is used to construct a list.
132 * bq_len is the number of output buffers (we can have coalescing
138 uint32_t bq_len; /* number of buffers */
141 /* Holds the default callbacks */
142 struct netmap_bdg_ops vale_bdg_ops = {
143 .lookup = netmap_vale_learning,
146 .vp_create = netmap_vale_vp_create,
147 .bwrap_attach = netmap_vale_bwrap_attach,
152 * this is a slightly optimized copy routine which rounds
153 * to multiple of 64 bytes and is often faster than dealing
154 * with other odd sizes. We assume there is enough room
155 * in the source and destination buffers.
157 * XXX only for multiples of 64 bytes, non overlapped.
160 pkt_copy(void *_src, void *_dst, int l)
162 uint64_t *src = _src;
163 uint64_t *dst = _dst;
164 if (unlikely(l >= 1024)) {
168 for (; likely(l > 0); l-=64) {
182 * Free the forwarding tables for rings attached to switch ports.
185 nm_free_bdgfwd(struct netmap_adapter *na)
188 struct netmap_kring **kring;
191 nrings = na->num_tx_rings;
192 kring = na->tx_rings;
193 for (i = 0; i < nrings; i++) {
194 if (kring[i]->nkr_ft) {
195 nm_os_free(kring[i]->nkr_ft);
196 kring[i]->nkr_ft = NULL; /* protect from freeing twice */
203 * Allocate the forwarding tables for the rings attached to the bridge ports.
206 nm_alloc_bdgfwd(struct netmap_adapter *na)
208 int nrings, l, i, num_dstq;
209 struct netmap_kring **kring;
212 /* all port:rings + broadcast */
213 num_dstq = NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1;
214 l = sizeof(struct nm_bdg_fwd) * NM_BDG_BATCH_MAX;
215 l += sizeof(struct nm_vale_q) * num_dstq;
216 l += sizeof(uint16_t) * NM_BDG_BATCH_MAX;
218 nrings = netmap_real_rings(na, NR_TX);
219 kring = na->tx_rings;
220 for (i = 0; i < nrings; i++) {
221 struct nm_bdg_fwd *ft;
222 struct nm_vale_q *dstq;
225 ft = nm_os_malloc(l);
230 dstq = (struct nm_vale_q *)(ft + NM_BDG_BATCH_MAX);
231 for (j = 0; j < num_dstq; j++) {
232 dstq[j].bq_head = dstq[j].bq_tail = NM_FT_NULL;
235 kring[i]->nkr_ft = ft;
240 /* Allows external modules to create bridges in exclusive mode,
241 * returns an authentication token that the external module will need
242 * to provide during nm_bdg_ctl_{attach, detach}(), netmap_bdg_regops(),
243 * and nm_bdg_update_private_data() operations.
244 * Successfully executed if ret != NULL and *return_status == 0.
247 netmap_vale_create(const char *bdg_name, int *return_status)
249 struct nm_bridge *b = NULL;
253 b = nm_find_bridge(bdg_name, 0 /* don't create */, NULL);
255 *return_status = EEXIST;
256 goto unlock_bdg_create;
259 b = nm_find_bridge(bdg_name, 1 /* create */, &vale_bdg_ops);
261 *return_status = ENOMEM;
262 goto unlock_bdg_create;
265 b->bdg_flags |= NM_BDG_ACTIVE | NM_BDG_EXCLUSIVE;
266 ret = nm_bdg_get_auth_token(b);
274 /* Allows external modules to destroy a bridge created through
275 * netmap_bdg_create(), the bridge must be empty.
278 netmap_vale_destroy(const char *bdg_name, void *auth_token)
280 struct nm_bridge *b = NULL;
284 b = nm_find_bridge(bdg_name, 0 /* don't create */, NULL);
287 goto unlock_bdg_free;
290 if (!nm_bdg_valid_auth_token(b, auth_token)) {
292 goto unlock_bdg_free;
294 if (!(b->bdg_flags & NM_BDG_EXCLUSIVE)) {
296 goto unlock_bdg_free;
299 b->bdg_flags &= ~(NM_BDG_EXCLUSIVE | NM_BDG_ACTIVE);
300 ret = netmap_bdg_free(b);
302 b->bdg_flags |= NM_BDG_EXCLUSIVE | NM_BDG_ACTIVE;
310 /* Process NETMAP_REQ_VALE_LIST. */
312 netmap_vale_list(struct nmreq_header *hdr)
314 struct nmreq_vale_list *req =
315 (struct nmreq_vale_list *)(uintptr_t)hdr->nr_body;
316 int namelen = strlen(hdr->nr_name);
317 struct nm_bridge *b, *bridges;
318 struct netmap_vp_adapter *vpna;
322 netmap_bns_getbridges(&bridges, &num_bridges);
324 /* this is used to enumerate bridges and ports */
325 if (namelen) { /* look up indexes of bridge and port */
326 if (strncmp(hdr->nr_name, NM_BDG_NAME,
327 strlen(NM_BDG_NAME))) {
331 b = nm_find_bridge(hdr->nr_name, 0 /* don't create */, NULL);
337 req->nr_bridge_idx = b - bridges; /* bridge index */
338 req->nr_port_idx = NM_BDG_NOPORT;
339 for (j = 0; j < b->bdg_active_ports; j++) {
340 i = b->bdg_port_index[j];
341 vpna = b->bdg_ports[i];
343 nm_prerr("This should not happen");
346 /* the former and the latter identify a
347 * virtual port and a NIC, respectively
349 if (!strcmp(vpna->up.name, hdr->nr_name)) {
350 req->nr_port_idx = i; /* port index */
356 /* return the first non-empty entry starting from
357 * bridge nr_arg1 and port nr_arg2.
359 * Users can detect the end of the same bridge by
360 * seeing the new and old value of nr_arg1, and can
361 * detect the end of all the bridge by error != 0
363 i = req->nr_bridge_idx;
364 j = req->nr_port_idx;
367 for (error = ENOENT; i < NM_BRIDGES; i++) {
369 for ( ; j < NM_BDG_MAXPORTS; j++) {
370 if (b->bdg_ports[j] == NULL)
372 vpna = b->bdg_ports[j];
373 /* write back the VALE switch name */
374 strlcpy(hdr->nr_name, vpna->up.name,
375 sizeof(hdr->nr_name));
379 j = 0; /* following bridges scan from 0 */
382 req->nr_bridge_idx = i;
383 req->nr_port_idx = j;
390 /* Process NETMAP_REQ_VALE_ATTACH.
393 netmap_vale_attach(struct nmreq_header *hdr, void *auth_token)
395 struct nmreq_vale_attach *req =
396 (struct nmreq_vale_attach *)(uintptr_t)hdr->nr_body;
397 struct netmap_vp_adapter * vpna;
398 struct netmap_adapter *na = NULL;
399 struct netmap_mem_d *nmd = NULL;
400 struct nm_bridge *b = NULL;
404 /* permission check for modified bridges */
405 b = nm_find_bridge(hdr->nr_name, 0 /* don't create */, NULL);
406 if (b && !nm_bdg_valid_auth_token(b, auth_token)) {
411 if (req->reg.nr_mem_id) {
412 nmd = netmap_mem_find(req->reg.nr_mem_id);
419 /* check for existing one */
420 error = netmap_get_vale_na(hdr, &na, nmd, 0);
425 error = netmap_get_vale_na(hdr, &na,
426 nmd, 1 /* create if not exists */);
427 if (error) { /* no device */
431 if (na == NULL) { /* VALE prefix missing */
436 if (NETMAP_OWNED_BY_ANY(na)) {
441 if (na->nm_bdg_ctl) {
442 /* nop for VALE ports. The bwrap needs to put the hwna
443 * in netmap mode (see netmap_bwrap_bdg_ctl)
445 error = na->nm_bdg_ctl(hdr, na);
448 ND("registered %s to netmap-mode", na->name);
450 vpna = (struct netmap_vp_adapter *)na;
451 req->port_index = vpna->bdg_port;
460 netmap_adapter_put(na);
469 /* Process NETMAP_REQ_VALE_DETACH.
472 netmap_vale_detach(struct nmreq_header *hdr, void *auth_token)
474 struct nmreq_vale_detach *nmreq_det = (void *)(uintptr_t)hdr->nr_body;
475 struct netmap_vp_adapter *vpna;
476 struct netmap_adapter *na;
477 struct nm_bridge *b = NULL;
481 /* permission check for modified bridges */
482 b = nm_find_bridge(hdr->nr_name, 0 /* don't create */, NULL);
483 if (b && !nm_bdg_valid_auth_token(b, auth_token)) {
488 error = netmap_get_vale_na(hdr, &na, NULL, 0 /* don't create */);
489 if (error) { /* no device, or another bridge or user owns the device */
493 if (na == NULL) { /* VALE prefix missing */
496 } else if (nm_is_bwrap(na) &&
497 ((struct netmap_bwrap_adapter *)na)->na_polling_state) {
498 /* Don't detach a NIC with polling */
503 vpna = (struct netmap_vp_adapter *)na;
504 if (na->na_vp != vpna) {
505 /* trying to detach first attach of VALE persistent port attached
511 nmreq_det->port_index = vpna->bdg_port;
513 if (na->nm_bdg_ctl) {
514 /* remove the port from bridge. The bwrap
515 * also needs to put the hwna in normal mode
517 error = na->nm_bdg_ctl(hdr, na);
521 netmap_adapter_put(na);
529 /* nm_dtor callback for ephemeral VALE ports */
531 netmap_vale_vp_dtor(struct netmap_adapter *na)
533 struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter*)na;
534 struct nm_bridge *b = vpna->na_bdg;
536 ND("%s has %d references", na->name, na->na_refcount);
539 netmap_bdg_detach_common(b, vpna->bdg_port, -1);
542 if (na->ifp != NULL && !nm_iszombie(na)) {
543 NM_DETACH_NA(na->ifp);
544 if (vpna->autodelete) {
545 ND("releasing %s", na->ifp->if_xname);
547 nm_os_vi_detach(na->ifp);
555 /* nm_krings_create callback for VALE ports.
556 * Calls the standard netmap_krings_create, then adds leases on rx
557 * rings and bdgfwd on tx rings.
560 netmap_vale_vp_krings_create(struct netmap_adapter *na)
565 u_int nrx = netmap_real_rings(na, NR_RX);
568 * Leases are attached to RX rings on vale ports
570 tailroom = sizeof(uint32_t) * na->num_rx_desc * nrx;
572 error = netmap_krings_create(na, tailroom);
576 leases = na->tailroom;
578 for (i = 0; i < nrx; i++) { /* Receive rings */
579 na->rx_rings[i]->nkr_leases = leases;
580 leases += na->num_rx_desc;
583 error = nm_alloc_bdgfwd(na);
585 netmap_krings_delete(na);
593 /* nm_krings_delete callback for VALE ports. */
595 netmap_vale_vp_krings_delete(struct netmap_adapter *na)
598 netmap_krings_delete(na);
603 nm_vale_flush(struct nm_bdg_fwd *ft, u_int n,
604 struct netmap_vp_adapter *na, u_int ring_nr);
608 * main dispatch routine for the bridge.
609 * Grab packets from a kring, move them into the ft structure
610 * associated to the tx (input) port. Max one instance per port,
611 * filtered on input (ioctl, poll or XXX).
612 * Returns the next position in the ring.
615 nm_vale_preflush(struct netmap_kring *kring, u_int end)
617 struct netmap_vp_adapter *na =
618 (struct netmap_vp_adapter*)kring->na;
619 struct netmap_ring *ring = kring->ring;
620 struct nm_bdg_fwd *ft;
621 u_int ring_nr = kring->ring_id;
622 u_int j = kring->nr_hwcur, lim = kring->nkr_num_slots - 1;
623 u_int ft_i = 0; /* start from 0 */
624 u_int frags = 1; /* how many frags ? */
625 struct nm_bridge *b = na->na_bdg;
627 /* To protect against modifications to the bridge we acquire a
628 * shared lock, waiting if we can sleep (if the source port is
629 * attached to a user process) or with a trylock otherwise (NICs).
631 ND("wait rlock for %d packets", ((j > end ? lim+1 : 0) + end) - j);
632 if (na->up.na_flags & NAF_BDG_MAYSLEEP)
634 else if (!BDG_RTRYLOCK(b))
636 ND(5, "rlock acquired for %d packets", ((j > end ? lim+1 : 0) + end) - j);
639 for (; likely(j != end); j = nm_next(j, lim)) {
640 struct netmap_slot *slot = &ring->slot[j];
643 ft[ft_i].ft_len = slot->len;
644 ft[ft_i].ft_flags = slot->flags;
645 ft[ft_i].ft_offset = 0;
647 ND("flags is 0x%x", slot->flags);
648 /* we do not use the buf changed flag, but we still need to reset it */
649 slot->flags &= ~NS_BUF_CHANGED;
651 /* this slot goes into a list so initialize the link field */
652 ft[ft_i].ft_next = NM_FT_NULL;
653 buf = ft[ft_i].ft_buf = (slot->flags & NS_INDIRECT) ?
654 (void *)(uintptr_t)slot->ptr : NMB(&na->up, slot);
655 if (unlikely(buf == NULL)) {
656 nm_prlim(5, "NULL %s buffer pointer from %s slot %d len %d",
657 (slot->flags & NS_INDIRECT) ? "INDIRECT" : "DIRECT",
658 kring->name, j, ft[ft_i].ft_len);
659 buf = ft[ft_i].ft_buf = NETMAP_BUF_BASE(&na->up);
661 ft[ft_i].ft_flags = 0;
663 __builtin_prefetch(buf);
665 if (slot->flags & NS_MOREFRAG) {
669 if (unlikely(netmap_verbose && frags > 1))
670 RD(5, "%d frags at %d", frags, ft_i - frags);
671 ft[ft_i - frags].ft_frags = frags;
673 if (unlikely((int)ft_i >= bridge_batch))
674 ft_i = nm_vale_flush(ft, ft_i, na, ring_nr);
677 /* Here ft_i > 0, ft[ft_i-1].flags has NS_MOREFRAG, and we
678 * have to fix frags count. */
680 ft[ft_i - 1].ft_flags &= ~NS_MOREFRAG;
681 ft[ft_i - frags].ft_frags = frags;
682 nm_prlim(5, "Truncate incomplete fragment at %d (%d frags)", ft_i, frags);
685 ft_i = nm_vale_flush(ft, ft_i, na, ring_nr);
691 /* ----- FreeBSD if_bridge hash function ------- */
694 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
695 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
697 * http://www.burtleburtle.net/bob/hash/spooky.html
699 #define mix(a, b, c) \
701 a -= b; a -= c; a ^= (c >> 13); \
702 b -= c; b -= a; b ^= (a << 8); \
703 c -= a; c -= b; c ^= (b >> 13); \
704 a -= b; a -= c; a ^= (c >> 12); \
705 b -= c; b -= a; b ^= (a << 16); \
706 c -= a; c -= b; c ^= (b >> 5); \
707 a -= b; a -= c; a ^= (c >> 3); \
708 b -= c; b -= a; b ^= (a << 10); \
709 c -= a; c -= b; c ^= (b >> 15); \
710 } while (/*CONSTCOND*/0)
713 static __inline uint32_t
714 nm_vale_rthash(const uint8_t *addr)
716 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key
726 #define BRIDGE_RTHASH_MASK (NM_BDG_HASH-1)
727 return (c & BRIDGE_RTHASH_MASK);
734 * Lookup function for a learning bridge.
735 * Update the hash table with the source address,
736 * and then returns the destination port index, and the
737 * ring in *dst_ring (at the moment, always use ring 0)
740 netmap_vale_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring,
741 struct netmap_vp_adapter *na, void *private_data)
743 uint8_t *buf = ((uint8_t *)ft->ft_buf) + ft->ft_offset;
744 u_int buf_len = ft->ft_len - ft->ft_offset;
745 struct nm_hash_ent *ht = private_data;
747 u_int dst, mysrc = na->bdg_port;
752 return NM_BDG_NOPORT;
755 if (ft->ft_flags & NS_INDIRECT) {
756 if (copyin(buf, indbuf, sizeof(indbuf))) {
757 return NM_BDG_NOPORT;
762 dmac = le64toh(*(uint64_t *)(buf)) & 0xffffffffffff;
763 smac = le64toh(*(uint64_t *)(buf + 4));
767 * The hash is somewhat expensive, there might be some
768 * worthwhile optimizations here.
770 if (((buf[6] & 1) == 0) && (na->last_smac != smac)) { /* valid src */
772 sh = nm_vale_rthash(s); /* hash of source */
773 /* update source port forwarding entry */
774 na->last_smac = ht[sh].mac = smac; /* XXX expire ? */
775 ht[sh].ports = mysrc;
776 if (netmap_debug & NM_DEBUG_VALE)
777 nm_prinf("src %02x:%02x:%02x:%02x:%02x:%02x on port %d",
778 s[0], s[1], s[2], s[3], s[4], s[5], mysrc);
780 dst = NM_BDG_BROADCAST;
781 if ((buf[0] & 1) == 0) { /* unicast */
782 dh = nm_vale_rthash(buf); /* hash of dst */
783 if (ht[dh].mac == dmac) { /* found dst */
792 * Available space in the ring. Only used in VALE code
793 * and only with is_rx = 1
795 static inline uint32_t
796 nm_kr_space(struct netmap_kring *k, int is_rx)
801 int busy = k->nkr_hwlease - k->nr_hwcur;
803 busy += k->nkr_num_slots;
804 space = k->nkr_num_slots - 1 - busy;
806 /* XXX never used in this branch */
807 space = k->nr_hwtail - k->nkr_hwlease;
809 space += k->nkr_num_slots;
813 if (k->nkr_hwlease >= k->nkr_num_slots ||
814 k->nr_hwcur >= k->nkr_num_slots ||
815 k->nr_tail >= k->nkr_num_slots ||
817 busy >= k->nkr_num_slots) {
818 D("invalid kring, cur %d tail %d lease %d lease_idx %d lim %d", k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease,
819 k->nkr_lease_idx, k->nkr_num_slots);
828 /* make a lease on the kring for N positions. return the
830 * XXX only used in VALE code and with is_rx = 1
832 static inline uint32_t
833 nm_kr_lease(struct netmap_kring *k, u_int n, int is_rx)
835 uint32_t lim = k->nkr_num_slots - 1;
836 uint32_t lease_idx = k->nkr_lease_idx;
838 k->nkr_leases[lease_idx] = NR_NOSLOT;
839 k->nkr_lease_idx = nm_next(lease_idx, lim);
841 #ifdef CONFIG_NETMAP_DEBUG
842 if (n > nm_kr_space(k, is_rx)) {
843 nm_prerr("invalid request for %d slots", n);
846 #endif /* CONFIG NETMAP_DEBUG */
847 /* XXX verify that there are n slots */
849 if (k->nkr_hwlease > lim)
850 k->nkr_hwlease -= lim + 1;
852 #ifdef CONFIG_NETMAP_DEBUG
853 if (k->nkr_hwlease >= k->nkr_num_slots ||
854 k->nr_hwcur >= k->nkr_num_slots ||
855 k->nr_hwtail >= k->nkr_num_slots ||
856 k->nkr_lease_idx >= k->nkr_num_slots) {
857 nm_prerr("invalid kring %s, cur %d tail %d lease %d lease_idx %d lim %d",
859 k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease,
860 k->nkr_lease_idx, k->nkr_num_slots);
862 #endif /* CONFIG_NETMAP_DEBUG */
868 * This flush routine supports only unicast and broadcast but a large
869 * number of ports, and lets us replace the learn and dispatch functions.
872 nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
875 struct nm_vale_q *dst_ents, *brddst;
876 uint16_t num_dsts = 0, *dsts;
877 struct nm_bridge *b = na->na_bdg;
878 u_int i, me = na->bdg_port;
881 * The work area (pointed by ft) is followed by an array of
882 * pointers to queues , dst_ents; there are NM_BDG_MAXRINGS
883 * queues per port plus one for the broadcast traffic.
884 * Then we have an array of destination indexes.
886 dst_ents = (struct nm_vale_q *)(ft + NM_BDG_BATCH_MAX);
887 dsts = (uint16_t *)(dst_ents + NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1);
889 /* first pass: find a destination for each packet in the batch */
890 for (i = 0; likely(i < n); i += ft[i].ft_frags) {
891 uint8_t dst_ring = ring_nr; /* default, same ring as origin */
892 uint16_t dst_port, d_i;
894 struct nm_bdg_fwd *start_ft = NULL;
896 ND("slot %d frags %d", i, ft[i].ft_frags);
898 if (na->up.virt_hdr_len < ft[i].ft_len) {
899 ft[i].ft_offset = na->up.virt_hdr_len;
901 } else if (na->up.virt_hdr_len == ft[i].ft_len && ft[i].ft_flags & NS_MOREFRAG) {
902 ft[i].ft_offset = ft[i].ft_len;
905 /* Drop the packet if the virtio-net header is not into the first
906 * fragment nor at the very beginning of the second.
910 dst_port = b->bdg_ops.lookup(start_ft, &dst_ring, na, b->private_data);
911 if (netmap_verbose > 255)
912 RD(5, "slot %d port %d -> %d", i, me, dst_port);
913 if (dst_port >= NM_BDG_NOPORT)
914 continue; /* this packet is identified to be dropped */
915 else if (dst_port == NM_BDG_BROADCAST)
916 dst_ring = 0; /* broadcasts always go to ring 0 */
917 else if (unlikely(dst_port == me ||
918 !b->bdg_ports[dst_port]))
921 /* get a position in the scratch pad */
922 d_i = dst_port * NM_BDG_MAXRINGS + dst_ring;
925 /* append the first fragment to the list */
926 if (d->bq_head == NM_FT_NULL) { /* new destination */
927 d->bq_head = d->bq_tail = i;
928 /* remember this position to be scanned later */
929 if (dst_port != NM_BDG_BROADCAST)
930 dsts[num_dsts++] = d_i;
932 ft[d->bq_tail].ft_next = i;
935 d->bq_len += ft[i].ft_frags;
939 * Broadcast traffic goes to ring 0 on all destinations.
940 * So we need to add these rings to the list of ports to scan.
941 * XXX at the moment we scan all NM_BDG_MAXPORTS ports, which is
942 * expensive. We should keep a compact list of active destinations
943 * so we could shorten this loop.
945 brddst = dst_ents + NM_BDG_BROADCAST * NM_BDG_MAXRINGS;
946 if (brddst->bq_head != NM_FT_NULL) {
948 for (j = 0; likely(j < b->bdg_active_ports); j++) {
950 i = b->bdg_port_index[j];
951 if (unlikely(i == me))
953 d_i = i * NM_BDG_MAXRINGS;
954 if (dst_ents[d_i].bq_head == NM_FT_NULL)
955 dsts[num_dsts++] = d_i;
959 ND(5, "pass 1 done %d pkts %d dsts", n, num_dsts);
960 /* second pass: scan destinations */
961 for (i = 0; i < num_dsts; i++) {
962 struct netmap_vp_adapter *dst_na;
963 struct netmap_kring *kring;
964 struct netmap_ring *ring;
965 u_int dst_nr, lim, j, d_i, next, brd_next;
966 u_int needed, howmany;
967 int retry = netmap_txsync_retry;
969 uint32_t my_start = 0, lease_idx = 0;
971 int virt_hdr_mismatch = 0;
974 ND("second pass %d port %d", i, d_i);
976 // XXX fix the division
977 dst_na = b->bdg_ports[d_i/NM_BDG_MAXRINGS];
978 /* protect from the lookup function returning an inactive
981 if (unlikely(dst_na == NULL))
983 if (dst_na->up.na_flags & NAF_SW_ONLY)
986 * The interface may be in !netmap mode in two cases:
987 * - when na is attached but not activated yet;
988 * - when na is being deactivated but is still attached.
990 if (unlikely(!nm_netmap_on(&dst_na->up))) {
991 ND("not in netmap mode!");
995 /* there is at least one either unicast or broadcast packet */
996 brd_next = brddst->bq_head;
998 /* we need to reserve this many slots. If fewer are
999 * available, some packets will be dropped.
1000 * Packets may have multiple fragments, so we may not use
1001 * there is a chance that we may not use all of the slots
1002 * we have claimed, so we will need to handle the leftover
1003 * ones when we regain the lock.
1005 needed = d->bq_len + brddst->bq_len;
1007 if (unlikely(dst_na->up.virt_hdr_len != na->up.virt_hdr_len)) {
1008 if (netmap_verbose) {
1009 RD(3, "virt_hdr_mismatch, src %d dst %d", na->up.virt_hdr_len,
1010 dst_na->up.virt_hdr_len);
1012 /* There is a virtio-net header/offloadings mismatch between
1013 * source and destination. The slower mismatch datapath will
1014 * be used to cope with all the mismatches.
1016 virt_hdr_mismatch = 1;
1017 if (dst_na->mfs < na->mfs) {
1018 /* We may need to do segmentation offloadings, and so
1019 * we may need a number of destination slots greater
1020 * than the number of input slots ('needed').
1021 * We look for the smallest integer 'x' which satisfies:
1022 * needed * na->mfs + x * H <= x * na->mfs
1023 * where 'H' is the length of the longest header that may
1024 * be replicated in the segmentation process (e.g. for
1025 * TCPv4 we must account for ethernet header, IP header
1026 * and TCPv4 header).
1028 KASSERT(dst_na->mfs > 0, ("vpna->mfs is 0"));
1029 needed = (needed * na->mfs) /
1030 (dst_na->mfs - WORST_CASE_GSO_HEADER) + 1;
1031 ND(3, "srcmtu=%u, dstmtu=%u, x=%u", na->mfs, dst_na->mfs, needed);
1035 ND(5, "pass 2 dst %d is %x %s",
1036 i, d_i, is_vp ? "virtual" : "nic/host");
1037 dst_nr = d_i & (NM_BDG_MAXRINGS-1);
1038 nrings = dst_na->up.num_rx_rings;
1039 if (dst_nr >= nrings)
1040 dst_nr = dst_nr % nrings;
1041 kring = dst_na->up.rx_rings[dst_nr];
1043 /* the destination ring may have not been opened for RX */
1044 if (unlikely(ring == NULL || kring->nr_mode != NKR_NETMAP_ON))
1046 lim = kring->nkr_num_slots - 1;
1050 if (dst_na->retry && retry) {
1051 /* try to get some free slot from the previous run */
1052 kring->nm_notify(kring, NAF_FORCE_RECLAIM);
1053 /* actually useful only for bwraps, since there
1054 * the notify will trigger a txsync on the hwna. VALE ports
1055 * have dst_na->retry == 0
1058 /* reserve the buffers in the queue and an entry
1059 * to report completion, and drop lock.
1060 * XXX this might become a helper function.
1062 mtx_lock(&kring->q_lock);
1063 if (kring->nkr_stopped) {
1064 mtx_unlock(&kring->q_lock);
1067 my_start = j = kring->nkr_hwlease;
1068 howmany = nm_kr_space(kring, 1);
1069 if (needed < howmany)
1071 lease_idx = nm_kr_lease(kring, howmany, 1);
1072 mtx_unlock(&kring->q_lock);
1074 /* only retry if we need more than available slots */
1075 if (retry && needed <= howmany)
1078 /* copy to the destination queue */
1079 while (howmany > 0) {
1080 struct netmap_slot *slot;
1081 struct nm_bdg_fwd *ft_p, *ft_end;
1084 /* find the queue from which we pick next packet.
1085 * NM_FT_NULL is always higher than valid indexes
1086 * so we never dereference it if the other list
1087 * has packets (and if both are empty we never
1090 if (next < brd_next) {
1092 next = ft_p->ft_next;
1093 } else { /* insert broadcast */
1094 ft_p = ft + brd_next;
1095 brd_next = ft_p->ft_next;
1097 cnt = ft_p->ft_frags; // cnt > 0
1098 if (unlikely(cnt > howmany))
1099 break; /* no more space */
1100 if (netmap_verbose && cnt > 1)
1101 RD(5, "rx %d frags to %d", cnt, j);
1102 ft_end = ft_p + cnt;
1103 if (unlikely(virt_hdr_mismatch)) {
1104 bdg_mismatch_datapath(na, dst_na, ft_p, ring, &j, lim, &howmany);
1108 char *dst, *src = ft_p->ft_buf;
1109 size_t copy_len = ft_p->ft_len, dst_len = copy_len;
1111 slot = &ring->slot[j];
1112 dst = NMB(&dst_na->up, slot);
1114 ND("send [%d] %d(%d) bytes at %s:%d",
1115 i, (int)copy_len, (int)dst_len,
1116 NM_IFPNAME(dst_ifp), j);
1117 /* round to a multiple of 64 */
1118 copy_len = (copy_len + 63) & ~63;
1120 if (unlikely(copy_len > NETMAP_BUF_SIZE(&dst_na->up) ||
1121 copy_len > NETMAP_BUF_SIZE(&na->up))) {
1122 RD(5, "invalid len %d, down to 64", (int)copy_len);
1123 copy_len = dst_len = 64; // XXX
1125 if (ft_p->ft_flags & NS_INDIRECT) {
1126 if (copyin(src, dst, copy_len)) {
1127 // invalid user pointer, pretend len is 0
1131 //memcpy(dst, src, copy_len);
1132 pkt_copy(src, dst, (int)copy_len);
1134 slot->len = dst_len;
1135 slot->flags = (cnt << 8)| NS_MOREFRAG;
1136 j = nm_next(j, lim);
1139 } while (ft_p != ft_end);
1140 slot->flags = (cnt << 8); /* clear flag on last entry */
1143 if (next == NM_FT_NULL && brd_next == NM_FT_NULL)
1147 /* current position */
1148 uint32_t *p = kring->nkr_leases; /* shorthand */
1149 uint32_t update_pos;
1150 int still_locked = 1;
1152 mtx_lock(&kring->q_lock);
1153 if (unlikely(howmany > 0)) {
1154 /* not used all bufs. If i am the last one
1155 * i can recover the slots, otherwise must
1156 * fill them with 0 to mark empty packets.
1158 ND("leftover %d bufs", howmany);
1159 if (nm_next(lease_idx, lim) == kring->nkr_lease_idx) {
1160 /* yes i am the last one */
1161 ND("roll back nkr_hwlease to %d", j);
1162 kring->nkr_hwlease = j;
1164 while (howmany-- > 0) {
1165 ring->slot[j].len = 0;
1166 ring->slot[j].flags = 0;
1167 j = nm_next(j, lim);
1171 p[lease_idx] = j; /* report I am done */
1173 update_pos = kring->nr_hwtail;
1175 if (my_start == update_pos) {
1176 /* all slots before my_start have been reported,
1177 * so scan subsequent leases to see if other ranges
1178 * have been completed, and to a selwakeup or txsync.
1180 while (lease_idx != kring->nkr_lease_idx &&
1181 p[lease_idx] != NR_NOSLOT) {
1183 p[lease_idx] = NR_NOSLOT;
1184 lease_idx = nm_next(lease_idx, lim);
1186 /* j is the new 'write' position. j != my_start
1187 * means there are new buffers to report
1189 if (likely(j != my_start)) {
1190 kring->nr_hwtail = j;
1192 mtx_unlock(&kring->q_lock);
1193 kring->nm_notify(kring, 0);
1194 /* this is netmap_notify for VALE ports and
1195 * netmap_bwrap_notify for bwrap. The latter will
1196 * trigger a txsync on the underlying hwna
1198 if (dst_na->retry && retry--) {
1199 /* XXX this is going to call nm_notify again.
1200 * Only useful for bwrap in virtual machines
1207 mtx_unlock(&kring->q_lock);
1210 d->bq_head = d->bq_tail = NM_FT_NULL; /* cleanup */
1213 brddst->bq_head = brddst->bq_tail = NM_FT_NULL; /* cleanup */
1218 /* nm_txsync callback for VALE ports */
1220 netmap_vale_vp_txsync(struct netmap_kring *kring, int flags)
1222 struct netmap_vp_adapter *na =
1223 (struct netmap_vp_adapter *)kring->na;
1225 u_int const lim = kring->nkr_num_slots - 1;
1226 u_int const head = kring->rhead;
1228 if (bridge_batch <= 0) { /* testing only */
1229 done = head; // used all
1236 if (bridge_batch > NM_BDG_BATCH)
1237 bridge_batch = NM_BDG_BATCH;
1239 done = nm_vale_preflush(kring, head);
1242 nm_prerr("early break at %d/ %d, tail %d", done, head, kring->nr_hwtail);
1244 * packets between 'done' and 'cur' are left unsent.
1246 kring->nr_hwcur = done;
1247 kring->nr_hwtail = nm_prev(done, lim);
1248 if (netmap_debug & NM_DEBUG_TXSYNC)
1249 nm_prinf("%s ring %d flags %d", na->up.name, kring->ring_id, flags);
1254 /* create a netmap_vp_adapter that describes a VALE port.
1255 * Only persistent VALE ports have a non-null ifp.
1258 netmap_vale_vp_create(struct nmreq_header *hdr, struct ifnet *ifp,
1259 struct netmap_mem_d *nmd, struct netmap_vp_adapter **ret)
1261 struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1262 struct netmap_vp_adapter *vpna;
1263 struct netmap_adapter *na;
1266 u_int extrabufs = 0;
1268 if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1272 vpna = nm_os_malloc(sizeof(*vpna));
1279 strlcpy(na->name, hdr->nr_name, sizeof(na->name));
1281 /* bound checking */
1282 na->num_tx_rings = req->nr_tx_rings;
1283 nm_bound_var(&na->num_tx_rings, 1, 1, NM_BDG_MAXRINGS, NULL);
1284 req->nr_tx_rings = na->num_tx_rings; /* write back */
1285 na->num_rx_rings = req->nr_rx_rings;
1286 nm_bound_var(&na->num_rx_rings, 1, 1, NM_BDG_MAXRINGS, NULL);
1287 req->nr_rx_rings = na->num_rx_rings; /* write back */
1288 nm_bound_var(&req->nr_tx_slots, NM_BRIDGE_RINGSIZE,
1289 1, NM_BDG_MAXSLOTS, NULL);
1290 na->num_tx_desc = req->nr_tx_slots;
1291 nm_bound_var(&req->nr_rx_slots, NM_BRIDGE_RINGSIZE,
1292 1, NM_BDG_MAXSLOTS, NULL);
1293 /* validate number of pipes. We want at least 1,
1294 * but probably can do with some more.
1295 * So let's use 2 as default (when 0 is supplied)
1297 nm_bound_var(&npipes, 2, 1, NM_MAXPIPES, NULL);
1298 /* validate extra bufs */
1299 extrabufs = req->nr_extra_bufs;
1300 nm_bound_var(&extrabufs, 0, 0,
1301 128*NM_BDG_MAXSLOTS, NULL);
1302 req->nr_extra_bufs = extrabufs; /* write back */
1303 na->num_rx_desc = req->nr_rx_slots;
1304 /* Set the mfs to a default value, as it is needed on the VALE
1305 * mismatch datapath. XXX We should set it according to the MTU
1306 * known to the kernel. */
1307 vpna->mfs = NM_BDG_MFS_DEFAULT;
1308 vpna->last_smac = ~0llu;
1309 /*if (vpna->mfs > netmap_buf_size) TODO netmap_buf_size is zero??
1310 vpna->mfs = netmap_buf_size; */
1312 nm_prinf("max frame size %u", vpna->mfs);
1314 na->na_flags |= NAF_BDG_MAYSLEEP;
1315 /* persistent VALE ports look like hw devices
1316 * with a native netmap adapter
1319 na->na_flags |= NAF_NATIVE;
1320 na->nm_txsync = netmap_vale_vp_txsync;
1321 na->nm_rxsync = netmap_vp_rxsync; /* use the one provided by bdg */
1322 na->nm_register = netmap_vp_reg; /* use the one provided by bdg */
1323 na->nm_krings_create = netmap_vale_vp_krings_create;
1324 na->nm_krings_delete = netmap_vale_vp_krings_delete;
1325 na->nm_dtor = netmap_vale_vp_dtor;
1326 ND("nr_mem_id %d", req->nr_mem_id);
1328 netmap_mem_get(nmd):
1329 netmap_mem_private_new(
1330 na->num_tx_rings, na->num_tx_desc,
1331 na->num_rx_rings, na->num_rx_desc,
1332 req->nr_extra_bufs, npipes, &error);
1333 if (na->nm_mem == NULL)
1335 na->nm_bdg_attach = netmap_vale_vp_bdg_attach;
1336 /* other nmd fields are set in the common routine */
1337 error = netmap_attach_common(na);
1344 if (na->nm_mem != NULL)
1345 netmap_mem_put(na->nm_mem);
1350 /* nm_bdg_attach callback for VALE ports
1351 * The na_vp port is this same netmap_adapter. There is no host port.
1354 netmap_vale_vp_bdg_attach(const char *name, struct netmap_adapter *na,
1355 struct nm_bridge *b)
1357 struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na;
1359 if ((b->bdg_flags & NM_BDG_NEED_BWRAP) || vpna->na_bdg) {
1360 return NM_NEED_BWRAP;
1363 strlcpy(na->name, name, sizeof(na->name));
1364 na->na_hostvp = NULL;
1369 netmap_vale_bwrap_krings_create(struct netmap_adapter *na)
1373 /* impersonate a netmap_vp_adapter */
1374 error = netmap_vale_vp_krings_create(na);
1377 error = netmap_bwrap_krings_create_common(na);
1379 netmap_vale_vp_krings_delete(na);
1385 netmap_vale_bwrap_krings_delete(struct netmap_adapter *na)
1387 netmap_bwrap_krings_delete_common(na);
1388 netmap_vale_vp_krings_delete(na);
1392 netmap_vale_bwrap_attach(const char *nr_name, struct netmap_adapter *hwna)
1394 struct netmap_bwrap_adapter *bna;
1395 struct netmap_adapter *na = NULL;
1396 struct netmap_adapter *hostna = NULL;
1399 bna = nm_os_malloc(sizeof(*bna));
1404 strlcpy(na->name, nr_name, sizeof(na->name));
1405 na->nm_register = netmap_bwrap_reg;
1406 na->nm_txsync = netmap_vale_vp_txsync;
1407 // na->nm_rxsync = netmap_bwrap_rxsync;
1408 na->nm_krings_create = netmap_vale_bwrap_krings_create;
1409 na->nm_krings_delete = netmap_vale_bwrap_krings_delete;
1410 na->nm_notify = netmap_bwrap_notify;
1411 bna->up.retry = 1; /* XXX maybe this should depend on the hwna */
1412 /* Set the mfs, needed on the VALE mismatch datapath. */
1413 bna->up.mfs = NM_BDG_MFS_DEFAULT;
1415 if (hwna->na_flags & NAF_HOST_RINGS) {
1416 hostna = &bna->host.up;
1417 hostna->nm_notify = netmap_bwrap_notify;
1418 bna->host.mfs = NM_BDG_MFS_DEFAULT;
1421 error = netmap_bwrap_attach_common(na, hwna);
1429 netmap_get_vale_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1430 struct netmap_mem_d *nmd, int create)
1432 return netmap_get_bdg_na(hdr, na, nmd, create, &vale_bdg_ops);
1436 /* creates a persistent VALE port */
1438 nm_vi_create(struct nmreq_header *hdr)
1440 struct nmreq_vale_newif *req =
1441 (struct nmreq_vale_newif *)(uintptr_t)hdr->nr_body;
1443 /* Build a nmreq_register out of the nmreq_vale_newif,
1444 * so that we can call netmap_get_bdg_na(). */
1445 struct nmreq_register regreq;
1446 bzero(®req, sizeof(regreq));
1447 regreq.nr_tx_slots = req->nr_tx_slots;
1448 regreq.nr_rx_slots = req->nr_rx_slots;
1449 regreq.nr_tx_rings = req->nr_tx_rings;
1450 regreq.nr_rx_rings = req->nr_rx_rings;
1451 regreq.nr_mem_id = req->nr_mem_id;
1452 hdr->nr_reqtype = NETMAP_REQ_REGISTER;
1453 hdr->nr_body = (uintptr_t)®req;
1454 error = netmap_vi_create(hdr, 0 /* no autodelete */);
1455 hdr->nr_reqtype = NETMAP_REQ_VALE_NEWIF;
1456 hdr->nr_body = (uintptr_t)req;
1457 /* Write back to the original struct. */
1458 req->nr_tx_slots = regreq.nr_tx_slots;
1459 req->nr_rx_slots = regreq.nr_rx_slots;
1460 req->nr_tx_rings = regreq.nr_tx_rings;
1461 req->nr_rx_rings = regreq.nr_rx_rings;
1462 req->nr_mem_id = regreq.nr_mem_id;
1466 /* remove a persistent VALE port from the system */
1468 nm_vi_destroy(const char *name)
1471 struct netmap_vp_adapter *vpna;
1474 ifp = ifunit_ref(name);
1478 /* make sure this is actually a VALE port */
1479 if (!NM_NA_VALID(ifp) || NA(ifp)->nm_register != netmap_vp_reg) {
1484 vpna = (struct netmap_vp_adapter *)NA(ifp);
1486 /* we can only destroy ports that were created via NETMAP_BDG_NEWIF */
1487 if (vpna->autodelete) {
1492 /* also make sure that nobody is using the inferface */
1493 if (NETMAP_OWNED_BY_ANY(&vpna->up) ||
1494 vpna->up.na_refcount > 1 /* any ref besides the one in nm_vi_create()? */) {
1502 nm_prinf("destroying a persistent vale interface %s", ifp->if_xname);
1503 /* Linux requires all the references are released
1508 nm_os_vi_detach(ifp);
1518 nm_update_info(struct nmreq_register *req, struct netmap_adapter *na)
1520 req->nr_rx_rings = na->num_rx_rings;
1521 req->nr_tx_rings = na->num_tx_rings;
1522 req->nr_rx_slots = na->num_rx_desc;
1523 req->nr_tx_slots = na->num_tx_desc;
1524 return netmap_mem_get_info(na->nm_mem, &req->nr_memsize, NULL,
1530 * Create a virtual interface registered to the system.
1531 * The interface will be attached to a bridge later.
1534 netmap_vi_create(struct nmreq_header *hdr, int autodelete)
1536 struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1538 struct netmap_vp_adapter *vpna;
1539 struct netmap_mem_d *nmd = NULL;
1542 if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1546 /* don't include VALE prefix */
1547 if (!strncmp(hdr->nr_name, NM_BDG_NAME, strlen(NM_BDG_NAME)))
1549 if (strlen(hdr->nr_name) >= IFNAMSIZ) {
1552 ifp = ifunit_ref(hdr->nr_name);
1553 if (ifp) { /* already exist, cannot create new one */
1556 if (NM_NA_VALID(ifp)) {
1557 int update_err = nm_update_info(req, NA(ifp));
1565 error = nm_os_vi_persist(hdr->nr_name, &ifp);
1570 if (req->nr_mem_id) {
1571 nmd = netmap_mem_find(req->nr_mem_id);
1577 /* netmap_vp_create creates a struct netmap_vp_adapter */
1578 error = netmap_vale_vp_create(hdr, ifp, nmd, &vpna);
1580 if (netmap_debug & NM_DEBUG_VALE)
1581 nm_prerr("error %d", error);
1584 /* persist-specific routines */
1585 vpna->up.nm_bdg_ctl = netmap_vp_bdg_ctl;
1587 netmap_adapter_get(&vpna->up);
1589 vpna->autodelete = 1;
1591 NM_ATTACH_NA(ifp, &vpna->up);
1592 /* return the updated info */
1593 error = nm_update_info(req, &vpna->up);
1597 ND("returning nr_mem_id %d", req->nr_mem_id);
1599 netmap_mem_put(nmd);
1601 ND("created %s", ifp->if_xname);
1608 netmap_mem_put(nmd);
1610 nm_os_vi_detach(ifp);
1615 #endif /* WITH_VALE */