2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2014-2018 Giuseppe Lettieri
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #if defined(__FreeBSD__)
32 #include <sys/cdefs.h> /* prerequisite */
34 #include <sys/types.h>
35 #include <sys/errno.h>
36 #include <sys/param.h> /* defines used in kernel.h */
37 #include <sys/kernel.h> /* types used in module initialization */
38 #include <sys/malloc.h>
41 #include <sys/rwlock.h>
42 #include <sys/selinfo.h>
43 #include <sys/sysctl.h>
44 #include <sys/socket.h> /* sockaddrs */
46 #include <net/if_var.h>
47 #include <machine/bus.h> /* bus_dmamap_* */
48 #include <sys/refcount.h>
55 #elif defined(__APPLE__)
57 #warning OSX support is only partial
65 #error Unsupported platform
67 #endif /* unsupported */
73 #include <net/netmap.h>
74 #include <dev/netmap/netmap_kern.h>
75 #include <dev/netmap/netmap_mem2.h>
79 #define NM_PIPE_MAXSLOTS 4096
80 #define NM_PIPE_MAXRINGS 256
82 static int netmap_default_pipes = 0; /* ignored, kept for compatibility */
84 SYSCTL_DECL(_dev_netmap);
85 SYSCTL_INT(_dev_netmap, OID_AUTO, default_pipes, CTLFLAG_RW,
86 &netmap_default_pipes, 0, "For compatibility only");
89 /* allocate the pipe array in the parent adapter */
91 nm_pipe_alloc(struct netmap_adapter *na, u_int npipes)
94 struct netmap_pipe_adapter **npa;
96 if (npipes <= na->na_max_pipes)
97 /* we already have more entries that requested */
100 if (npipes < na->na_next_pipe || npipes > NM_MAXPIPES)
103 old_len = sizeof(struct netmap_pipe_adapter *)*na->na_max_pipes;
104 len = sizeof(struct netmap_pipe_adapter *) * npipes;
105 npa = nm_os_realloc(na->na_pipes, len, old_len);
110 na->na_max_pipes = npipes;
115 /* deallocate the parent array in the parent adapter */
117 netmap_pipe_dealloc(struct netmap_adapter *na)
120 if (na->na_next_pipe > 0) {
121 D("freeing not empty pipe array for %s (%d dangling pipes)!", na->name,
124 nm_os_free(na->na_pipes);
126 na->na_max_pipes = 0;
127 na->na_next_pipe = 0;
131 /* find a pipe endpoint with the given id among the parent's pipes */
132 static struct netmap_pipe_adapter *
133 netmap_pipe_find(struct netmap_adapter *parent, const char *pipe_id)
136 struct netmap_pipe_adapter *na;
138 for (i = 0; i < parent->na_next_pipe; i++) {
139 const char *na_pipe_id;
140 na = parent->na_pipes[i];
141 na_pipe_id = strrchr(na->up.name,
142 na->role == NM_PIPE_ROLE_MASTER ? '{' : '}');
143 KASSERT(na_pipe_id != NULL, ("Invalid pipe name"));
145 if (!strcmp(na_pipe_id, pipe_id)) {
152 /* add a new pipe endpoint to the parent array */
154 netmap_pipe_add(struct netmap_adapter *parent, struct netmap_pipe_adapter *na)
156 if (parent->na_next_pipe >= parent->na_max_pipes) {
157 u_int npipes = parent->na_max_pipes ? 2*parent->na_max_pipes : 2;
158 int error = nm_pipe_alloc(parent, npipes);
163 parent->na_pipes[parent->na_next_pipe] = na;
164 na->parent_slot = parent->na_next_pipe;
165 parent->na_next_pipe++;
169 /* remove the given pipe endpoint from the parent array */
171 netmap_pipe_remove(struct netmap_adapter *parent, struct netmap_pipe_adapter *na)
174 n = --parent->na_next_pipe;
175 if (n != na->parent_slot) {
176 struct netmap_pipe_adapter **p =
177 &parent->na_pipes[na->parent_slot];
178 *p = parent->na_pipes[n];
179 (*p)->parent_slot = na->parent_slot;
181 parent->na_pipes[n] = NULL;
185 netmap_pipe_txsync(struct netmap_kring *txkring, int flags)
187 struct netmap_kring *rxkring = txkring->pipe;
188 u_int k, lim = txkring->nkr_num_slots - 1, nk;
189 int m; /* slots to transfer */
190 int complete; /* did we see a complete packet ? */
191 struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring;
193 ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
194 ND(20, "TX before: hwcur %d hwtail %d cur %d head %d tail %d",
195 txkring->nr_hwcur, txkring->nr_hwtail,
196 txkring->rcur, txkring->rhead, txkring->rtail);
198 /* update the hwtail */
199 txkring->nr_hwtail = txkring->pipe_tail;
201 m = txkring->rhead - txkring->nr_hwcur; /* new slots */
203 m += txkring->nkr_num_slots;
206 /* nothing to send */
210 for (k = txkring->nr_hwcur, nk = lim + 1, complete = 0; m;
211 m--, k = nm_next(k, lim), nk = (complete ? k : nk)) {
212 struct netmap_slot *rs = &rxring->slot[k];
213 struct netmap_slot *ts = &txring->slot[k];
216 if (ts->flags & NS_BUF_CHANGED) {
217 ts->flags &= ~NS_BUF_CHANGED;
219 complete = !(ts->flags & NS_MOREFRAG);
222 txkring->nr_hwcur = k;
224 ND(20, "TX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
225 txkring->nr_hwcur, txkring->nr_hwtail,
226 txkring->rcur, txkring->rhead, txkring->rtail, k);
228 if (likely(nk <= lim)) {
229 mb(); /* make sure the slots are updated before publishing them */
230 rxkring->pipe_tail = nk; /* only publish complete packets */
231 rxkring->nm_notify(rxkring, 0);
238 netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags)
240 struct netmap_kring *txkring = rxkring->pipe;
241 u_int k, lim = rxkring->nkr_num_slots - 1;
242 int m; /* slots to release */
243 struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring;
245 ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
246 ND(20, "RX before: hwcur %d hwtail %d cur %d head %d tail %d",
247 rxkring->nr_hwcur, rxkring->nr_hwtail,
248 rxkring->rcur, rxkring->rhead, rxkring->rtail);
250 /* update the hwtail */
251 rxkring->nr_hwtail = rxkring->pipe_tail;
253 m = rxkring->rhead - rxkring->nr_hwcur; /* released slots */
255 m += rxkring->nkr_num_slots;
258 /* nothing to release */
262 for (k = rxkring->nr_hwcur; m; m--, k = nm_next(k, lim)) {
263 struct netmap_slot *rs = &rxring->slot[k];
264 struct netmap_slot *ts = &txring->slot[k];
266 if (rs->flags & NS_BUF_CHANGED) {
267 /* copy the slot and report the buffer change */
269 rs->flags &= ~NS_BUF_CHANGED;
273 mb(); /* make sure the slots are updated before publishing them */
274 txkring->pipe_tail = nm_prev(k, lim);
275 rxkring->nr_hwcur = k;
277 ND(20, "RX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
278 rxkring->nr_hwcur, rxkring->nr_hwtail,
279 rxkring->rcur, rxkring->rhead, rxkring->rtail, k);
281 txkring->nm_notify(txkring, 0);
286 /* Pipe endpoints are created and destroyed together, so that endopoints do not
287 * have to check for the existence of their peer at each ?xsync.
289 * To play well with the existing netmap infrastructure (refcounts etc.), we
290 * adopt the following strategy:
292 * 1) The first endpoint that is created also creates the other endpoint and
293 * grabs a reference to it.
295 * state A) user1 --> endpoint1 --> endpoint2
297 * 2) If, starting from state A, endpoint2 is then registered, endpoint1 gives
298 * its reference to the user:
300 * state B) user1 --> endpoint1 endpoint2 <--- user2
302 * 3) Assume that, starting from state B endpoint2 is closed. In the unregister
303 * callback endpoint2 notes that endpoint1 is still active and adds a reference
304 * from endpoint1 to itself. When user2 then releases her own reference,
305 * endpoint2 is not destroyed and we are back to state A. A symmetrical state
306 * would be reached if endpoint1 were released instead.
308 * 4) If, starting from state A, endpoint1 is closed, the destructor notes that
309 * it owns a reference to endpoint2 and releases it.
311 * Something similar goes on for the creation and destruction of the krings.
315 /* netmap_pipe_krings_create.
317 * There are two cases:
323 * and we are e1. We have to create both sets
330 * and we are e2. e1 is certainly registered and our
331 * krings already exist. Nothing to do.
334 netmap_pipe_krings_create(struct netmap_adapter *na)
336 struct netmap_pipe_adapter *pna =
337 (struct netmap_pipe_adapter *)na;
338 struct netmap_adapter *ona = &pna->peer->up;
346 ND("%p: case 1, create both ends", na);
347 error = netmap_krings_create(na, 0);
351 /* create the krings of the other end */
352 error = netmap_krings_create(ona, 0);
356 /* cross link the krings and initialize the pipe_tails */
358 enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */
359 for (i = 0; i < nma_get_nrings(na, t); i++) {
360 struct netmap_kring *k1 = NMR(na, t)[i],
361 *k2 = NMR(ona, r)[i];
364 /* mark all peer-adapter rings as fake */
365 k2->nr_kflags |= NKR_FAKERING;
367 k1->pipe_tail = k1->nr_hwtail;
368 k2->pipe_tail = k2->nr_hwtail;
376 netmap_krings_delete(na);
383 * There are two cases on registration (onoff==1)
389 * and we are e1. Create the needed rings of the
394 * usr1 --> e1 --> e2 <-- usr2
396 * and we are e2. Drop the ref e1 is holding.
398 * There are two additional cases on unregister (onoff==0)
404 * and we are e1. Nothing special to do, e2 will
405 * be cleaned up by the destructor of e1.
409 * usr1 --> e1 e2 <-- usr2
411 * and we are either e1 or e2. Add a ref from the
415 netmap_pipe_reg(struct netmap_adapter *na, int onoff)
417 struct netmap_pipe_adapter *pna =
418 (struct netmap_pipe_adapter *)na;
419 struct netmap_adapter *ona = &pna->peer->up;
423 ND("%p: onoff %d", na, onoff);
426 for (i = 0; i < nma_get_nrings(na, t); i++) {
427 struct netmap_kring *kring = NMR(na, t)[i];
429 if (nm_kring_pending_on(kring)) {
430 /* mark the peer ring as needed */
431 kring->pipe->nr_kflags |= NKR_NEEDRING;
436 /* create all missing needed rings on the other end.
437 * Either our end, or the other, has been marked as
438 * fake, so the allocation will not be done twice.
440 error = netmap_mem_rings_create(ona);
444 /* In case of no error we put our rings in netmap mode */
446 for (i = 0; i < nma_get_nrings(na, t); i++) {
447 struct netmap_kring *kring = NMR(na, t)[i];
448 if (nm_kring_pending_on(kring)) {
449 struct netmap_kring *sring, *dring;
451 kring->nr_mode = NKR_NETMAP_ON;
452 if ((kring->nr_kflags & NKR_FAKERING) &&
453 (kring->pipe->nr_kflags & NKR_FAKERING)) {
454 /* this is a re-open of a pipe
455 * end-point kept alive by the other end.
456 * We need to leave everything as it is
461 /* copy the buffers from the non-fake ring */
462 if (kring->nr_kflags & NKR_FAKERING) {
469 memcpy(dring->ring->slot,
471 sizeof(struct netmap_slot) *
472 sring->nkr_num_slots);
473 /* mark both rings as fake and needed,
474 * so that buffers will not be
475 * deleted by the standard machinery
476 * (we will delete them by ourselves in
477 * netmap_pipe_krings_delete)
480 (NKR_FAKERING | NKR_NEEDRING);
482 (NKR_FAKERING | NKR_NEEDRING);
483 kring->nr_mode = NKR_NETMAP_ON;
487 if (na->active_fds == 0)
488 na->na_flags |= NAF_NETMAP_ON;
490 if (na->active_fds == 0)
491 na->na_flags &= ~NAF_NETMAP_ON;
493 for (i = 0; i < nma_get_nrings(na, t); i++) {
494 struct netmap_kring *kring = NMR(na, t)[i];
496 if (nm_kring_pending_off(kring)) {
497 kring->nr_mode = NKR_NETMAP_OFF;
503 if (na->active_fds) {
504 ND("active_fds %d", na->active_fds);
509 ND("%p: case 1.a or 2.a, nothing to do", na);
513 ND("%p: case 1.b, drop peer", na);
514 pna->peer->peer_ref = 0;
515 netmap_adapter_put(na);
517 ND("%p: case 2.b, grab peer", na);
518 netmap_adapter_get(na);
519 pna->peer->peer_ref = 1;
524 /* netmap_pipe_krings_delete.
526 * There are two cases:
532 * and we are e1 (e2 is not registered, so krings_delete cannot be
537 * usr1 --> e1 e2 <-- usr2
539 * and we are either e1 or e2.
541 * In the former case we have to also delete the krings of e2;
542 * in the latter case we do nothing.
545 netmap_pipe_krings_delete(struct netmap_adapter *na)
547 struct netmap_pipe_adapter *pna =
548 (struct netmap_pipe_adapter *)na;
549 struct netmap_adapter *sna, *ona; /* na of the other end */
553 if (!pna->peer_ref) {
554 ND("%p: case 2, kept alive by peer", na);
557 ona = &pna->peer->up;
559 ND("%p: case 1, deleting everything", na);
560 /* To avoid double-frees we zero-out all the buffers in the kernel part
561 * of each ring. The reason is this: If the user is behaving correctly,
562 * all buffers are found in exactly one slot in the userspace part of
563 * some ring. If the user is not behaving correctly, we cannot release
564 * buffers cleanly anyway. In the latter case, the allocator will
565 * return to a clean state only when all its users will close.
570 for (i = 0; i < nma_get_nrings(sna, t); i++) {
571 struct netmap_kring *kring = NMR(sna, t)[i];
572 struct netmap_ring *ring = kring->ring;
573 uint32_t j, lim = kring->nkr_num_slots - 1;
575 ND("%s ring %p hwtail %u hwcur %u",
576 kring->name, ring, kring->nr_hwtail, kring->nr_hwcur);
581 if (kring->tx == NR_RX)
582 ring->slot[kring->pipe_tail].buf_idx = 0;
584 for (j = nm_next(kring->pipe_tail, lim);
585 j != kring->nr_hwcur;
588 ND("%s[%d] %u", kring->name, j, ring->slot[j].buf_idx);
589 ring->slot[j].buf_idx = 0;
591 kring->nr_kflags &= ~(NKR_FAKERING | NKR_NEEDRING);
595 if (sna != ona && ona->tx_rings) {
600 netmap_mem_rings_delete(na);
601 netmap_krings_delete(na); /* also zeroes tx_rings etc. */
603 if (ona->tx_rings == NULL) {
604 /* already deleted, we must be on an
605 * cleanup-after-error path */
608 netmap_mem_rings_delete(ona);
609 netmap_krings_delete(ona);
614 netmap_pipe_dtor(struct netmap_adapter *na)
616 struct netmap_pipe_adapter *pna =
617 (struct netmap_pipe_adapter *)na;
618 ND("%p %p", na, pna->parent_ifp);
620 ND("%p: clean up peer", na);
622 netmap_adapter_put(&pna->peer->up);
624 if (pna->role == NM_PIPE_ROLE_MASTER)
625 netmap_pipe_remove(pna->parent, pna);
627 if_rele(pna->parent_ifp);
628 netmap_adapter_put(pna->parent);
633 netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
634 struct netmap_mem_d *nmd, int create)
636 struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
637 struct netmap_adapter *pna; /* parent adapter */
638 struct netmap_pipe_adapter *mna, *sna, *reqna;
639 struct ifnet *ifp = NULL;
640 const char *pipe_id = NULL;
642 int error, retries = 0;
645 /* Try to parse the pipe syntax 'xx{yy' or 'xx}yy'. */
646 cbra = strrchr(hdr->nr_name, '{');
648 role = NM_PIPE_ROLE_MASTER;
650 cbra = strrchr(hdr->nr_name, '}');
652 role = NM_PIPE_ROLE_SLAVE;
659 if (*pipe_id == '\0' || cbra == hdr->nr_name) {
660 /* Bracket is the last character, so pipe name is missing;
661 * or bracket is the first character, so base port name
666 if (req->nr_mode != NR_REG_ALL_NIC && req->nr_mode != NR_REG_ONE_NIC) {
667 /* We only accept modes involving hardware rings. */
671 /* first, try to find the parent adapter */
673 char nr_name_orig[NETMAP_REQ_IFNAMSIZ];
676 /* Temporarily remove the pipe suffix. */
677 strlcpy(nr_name_orig, hdr->nr_name, sizeof(nr_name_orig));
679 error = netmap_get_na(hdr, &pna, &ifp, nmd, create);
680 /* Restore the pipe suffix. */
681 strlcpy(hdr->nr_name, nr_name_orig, sizeof(hdr->nr_name));
684 if (error != ENXIO || retries++) {
685 ND("parent lookup failed: %d", error);
688 ND("try to create a persistent vale port");
689 /* create a persistent vale port and try again */
692 create_error = netmap_vi_create(hdr, 1 /* autodelete */);
694 strlcpy(hdr->nr_name, nr_name_orig, sizeof(hdr->nr_name));
695 if (create_error && create_error != EEXIST) {
696 if (create_error != EOPNOTSUPP) {
697 D("failed to create a persistent vale port: %d", create_error);
703 if (NETMAP_OWNED_BY_KERN(pna)) {
709 /* next, lookup the pipe id in the parent list */
711 mna = netmap_pipe_find(pna, pipe_id);
713 if (mna->role == role) {
714 ND("found %s directly at %d", pipe_id, mna->parent_slot);
717 ND("found %s indirectly at %d", pipe_id, mna->parent_slot);
720 /* the pipe we have found already holds a ref to the parent,
721 * so we need to drop the one we got from netmap_get_na()
723 netmap_unget_na(pna, ifp);
726 ND("pipe %s not found, create %d", pipe_id, create);
731 /* we create both master and slave.
732 * The endpoint we were asked for holds a reference to
735 mna = nm_os_malloc(sizeof(*mna));
740 snprintf(mna->up.name, sizeof(mna->up.name), "%s{%s", pna->name, pipe_id);
742 mna->role = NM_PIPE_ROLE_MASTER;
744 mna->parent_ifp = ifp;
746 mna->up.nm_txsync = netmap_pipe_txsync;
747 mna->up.nm_rxsync = netmap_pipe_rxsync;
748 mna->up.nm_register = netmap_pipe_reg;
749 mna->up.nm_dtor = netmap_pipe_dtor;
750 mna->up.nm_krings_create = netmap_pipe_krings_create;
751 mna->up.nm_krings_delete = netmap_pipe_krings_delete;
752 mna->up.nm_mem = netmap_mem_get(pna->nm_mem);
753 mna->up.na_flags |= NAF_MEM_OWNER;
754 mna->up.na_lut = pna->na_lut;
756 mna->up.num_tx_rings = req->nr_tx_rings;
757 nm_bound_var(&mna->up.num_tx_rings, 1,
758 1, NM_PIPE_MAXRINGS, NULL);
759 mna->up.num_rx_rings = req->nr_rx_rings;
760 nm_bound_var(&mna->up.num_rx_rings, 1,
761 1, NM_PIPE_MAXRINGS, NULL);
762 mna->up.num_tx_desc = req->nr_tx_slots;
763 nm_bound_var(&mna->up.num_tx_desc, pna->num_tx_desc,
764 1, NM_PIPE_MAXSLOTS, NULL);
765 mna->up.num_rx_desc = req->nr_rx_slots;
766 nm_bound_var(&mna->up.num_rx_desc, pna->num_rx_desc,
767 1, NM_PIPE_MAXSLOTS, NULL);
768 error = netmap_attach_common(&mna->up);
771 /* register the master with the parent */
772 error = netmap_pipe_add(pna, mna);
776 /* create the slave */
777 sna = nm_os_malloc(sizeof(*mna));
782 /* most fields are the same, copy from master and then fix */
784 sna->up.nm_mem = netmap_mem_get(mna->up.nm_mem);
785 /* swap the number of tx/rx rings and slots */
786 sna->up.num_tx_rings = mna->up.num_rx_rings;
787 sna->up.num_tx_desc = mna->up.num_rx_desc;
788 sna->up.num_rx_rings = mna->up.num_tx_rings;
789 sna->up.num_rx_desc = mna->up.num_tx_desc;
790 snprintf(sna->up.name, sizeof(sna->up.name), "%s}%s", pna->name, pipe_id);
791 sna->role = NM_PIPE_ROLE_SLAVE;
792 error = netmap_attach_common(&sna->up);
796 /* join the two endpoints */
800 /* we already have a reference to the parent, but we
801 * need another one for the other endpoint we created
803 netmap_adapter_get(pna);
804 /* likewise for the ifp, if any */
808 if (role == NM_PIPE_ROLE_MASTER) {
811 netmap_adapter_get(&sna->up);
815 netmap_adapter_get(&mna->up);
817 ND("created master %p and slave %p", mna, sna);
820 ND("pipe %s %s at %p", pipe_id,
821 (reqna->role == NM_PIPE_ROLE_MASTER ? "master" : "slave"), reqna);
823 netmap_adapter_get(*na);
825 /* keep the reference to the parent.
826 * It will be released by the req destructor
834 netmap_pipe_remove(pna, mna);
838 netmap_unget_na(pna, ifp);
843 #endif /* WITH_PIPES */