2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2019 Vincenzo Maffione <vmaffione@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
19 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
20 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
24 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
25 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * This file implements multiple network backends (tap, netmap, ...),
32 * to be used by network frontends such as virtio-net and e1000.
33 * The API to access the backend (e.g. send/receive packets, negotiate
34 * features) is exported by net_backends.h.
37 #include <sys/types.h> /* u_short etc */
38 #ifndef WITHOUT_CAPSICUM
39 #include <sys/capsicum.h>
41 #include <sys/cdefs.h>
42 #include <sys/ioctl.h>
47 #include <net/netmap.h>
48 #include <net/netmap_virt.h>
49 #define NETMAP_WITH_LIBS
50 #include <net/netmap_user.h>
52 #ifndef WITHOUT_CAPSICUM
53 #include <capsicum_helpers.h>
66 #include <pthread_np.h>
73 #include "net_backends.h"
75 #include <sys/linker_set.h>
78 * Each network backend registers a set of function pointers that are
79 * used to implement the net backends API.
80 * This might need to be exposed if we implement backends in separate files.
83 const char *prefix; /* prefix matching this backend */
86 * Routines used to initialize and cleanup the resources needed
87 * by a backend. The cleanup function is used internally,
88 * and should not be called by the frontend.
90 int (*init)(struct net_backend *be, const char *devname,
91 net_be_rxeof_t cb, void *param);
92 void (*cleanup)(struct net_backend *be);
95 * Called to serve a guest transmit request. The scatter-gather
96 * vector provided by the caller has 'iovcnt' elements and contains
99 ssize_t (*send)(struct net_backend *be, struct iovec *iov, int iovcnt);
102 * Called to receive a packet from the backend. When the function
103 * returns a positive value 'len', the scatter-gather vector
104 * provided by the caller contains a packet with such length.
105 * The function returns 0 if the backend doesn't have a new packet to
108 ssize_t (*recv)(struct net_backend *be, struct iovec *iov, int iovcnt);
111 * Ask the backend for the virtio-net features it is able to
112 * support. Possible features are TSO, UFO and checksum offloading
113 * in both rx and tx direction and for both IPv4 and IPv6.
115 uint64_t (*get_cap)(struct net_backend *be);
118 * Tell the backend to enable/disable the specified virtio-net
119 * features (capabilities).
121 int (*set_cap)(struct net_backend *be, uint64_t features,
122 unsigned int vnet_hdr_len);
124 struct pci_vtnet_softc *sc;
128 * Length of the virtio-net header used by the backend and the
129 * frontend, respectively. A zero value means that the header
132 unsigned int be_vnet_hdr_len;
133 unsigned int fe_vnet_hdr_len;
135 /* Size of backend-specific private data. */
138 /* Room for backend-specific data. */
142 SET_DECLARE(net_backend_set, struct net_backend);
144 #define VNET_HDR_LEN sizeof(struct virtio_net_rxhdr)
146 #define WPRINTF(params) printf params
157 tap_cleanup(struct net_backend *be)
159 struct tap_priv *priv = (struct tap_priv *)be->opaque;
162 mevent_delete(priv->mevp);
171 tap_init(struct net_backend *be, const char *devname,
172 net_be_rxeof_t cb, void *param)
174 struct tap_priv *priv = (struct tap_priv *)be->opaque;
178 #ifndef WITHOUT_CAPSICUM
183 WPRINTF(("TAP backend requires non-NULL callback\n"));
187 strcpy(tbuf, "/dev/");
188 strlcat(tbuf, devname, sizeof(tbuf));
190 fd = open(tbuf, O_RDWR);
192 WPRINTF(("open of tap device %s failed\n", tbuf));
197 * Set non-blocking and register for read
198 * notifications with the event loop
200 if (ioctl(fd, FIONBIO, &opt) < 0) {
201 WPRINTF(("tap device O_NONBLOCK failed\n"));
205 #ifndef WITHOUT_CAPSICUM
206 cap_rights_init(&rights, CAP_EVENT, CAP_READ, CAP_WRITE);
207 if (caph_rights_limit(fd, &rights) == -1)
208 errx(EX_OSERR, "Unable to apply rights for sandbox");
211 priv->mevp = mevent_add(fd, EVF_READ, cb, param);
212 if (priv->mevp == NULL) {
213 WPRINTF(("Could not register event\n"));
227 * Called to send a buffer chain out to the tap device
230 tap_send(struct net_backend *be, struct iovec *iov, int iovcnt)
232 return (writev(be->fd, iov, iovcnt));
236 tap_recv(struct net_backend *be, struct iovec *iov, int iovcnt)
240 /* Should never be called without a valid tap fd */
241 assert(be->fd != -1);
243 ret = readv(be->fd, iov, iovcnt);
245 if (ret < 0 && errno == EWOULDBLOCK) {
253 tap_get_cap(struct net_backend *be)
256 return (0); /* no capabilities for now */
260 tap_set_cap(struct net_backend *be, uint64_t features,
261 unsigned vnet_hdr_len)
264 return ((features || vnet_hdr_len) ? -1 : 0);
267 static struct net_backend tap_backend = {
269 .priv_size = sizeof(struct tap_priv),
271 .cleanup = tap_cleanup,
274 .get_cap = tap_get_cap,
275 .set_cap = tap_set_cap,
278 /* A clone of the tap backend, with a different prefix. */
279 static struct net_backend vmnet_backend = {
281 .priv_size = sizeof(struct tap_priv),
283 .cleanup = tap_cleanup,
286 .get_cap = tap_get_cap,
287 .set_cap = tap_set_cap,
290 DATA_SET(net_backend_set, tap_backend);
291 DATA_SET(net_backend_set, vmnet_backend);
297 /* The virtio-net features supported by netmap. */
298 #define NETMAP_FEATURES (VIRTIO_NET_F_CSUM | VIRTIO_NET_F_HOST_TSO4 | \
299 VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_UFO | \
300 VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 | \
301 VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_UFO)
304 char ifname[IFNAMSIZ];
307 struct netmap_ring *rx;
308 struct netmap_ring *tx;
315 nmreq_init(struct nmreq *req, char *ifname)
318 memset(req, 0, sizeof(*req));
319 strlcpy(req->nr_name, ifname, sizeof(req->nr_name));
320 req->nr_version = NETMAP_API;
324 netmap_set_vnet_hdr_len(struct net_backend *be, int vnet_hdr_len)
328 struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
330 nmreq_init(&req, priv->ifname);
331 req.nr_cmd = NETMAP_BDG_VNET_HDR;
332 req.nr_arg1 = vnet_hdr_len;
333 err = ioctl(be->fd, NIOCREGIF, &req);
335 WPRINTF(("Unable to set vnet header length %d\n",
340 be->be_vnet_hdr_len = vnet_hdr_len;
346 netmap_has_vnet_hdr_len(struct net_backend *be, unsigned vnet_hdr_len)
348 int prev_hdr_len = be->be_vnet_hdr_len;
351 if (vnet_hdr_len == prev_hdr_len) {
355 ret = netmap_set_vnet_hdr_len(be, vnet_hdr_len);
360 netmap_set_vnet_hdr_len(be, prev_hdr_len);
366 netmap_get_cap(struct net_backend *be)
369 return (netmap_has_vnet_hdr_len(be, VNET_HDR_LEN) ?
370 NETMAP_FEATURES : 0);
374 netmap_set_cap(struct net_backend *be, uint64_t features,
375 unsigned vnet_hdr_len)
378 return (netmap_set_vnet_hdr_len(be, vnet_hdr_len));
382 netmap_init(struct net_backend *be, const char *devname,
383 net_be_rxeof_t cb, void *param)
385 struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
387 strlcpy(priv->ifname, devname, sizeof(priv->ifname));
388 priv->ifname[sizeof(priv->ifname) - 1] = '\0';
390 priv->nmd = nm_open(priv->ifname, NULL, NETMAP_NO_TX_POLL, NULL);
391 if (priv->nmd == NULL) {
392 WPRINTF(("Unable to nm_open(): interface '%s', errno (%s)\n",
393 devname, strerror(errno)));
398 priv->memid = priv->nmd->req.nr_arg2;
399 priv->tx = NETMAP_TXRING(priv->nmd->nifp, 0);
400 priv->rx = NETMAP_RXRING(priv->nmd->nifp, 0);
402 priv->cb_param = param;
403 be->fd = priv->nmd->fd;
405 priv->mevp = mevent_add(be->fd, EVF_READ, cb, param);
406 if (priv->mevp == NULL) {
407 WPRINTF(("Could not register event\n"));
415 netmap_cleanup(struct net_backend *be)
417 struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
420 mevent_delete(priv->mevp);
429 netmap_send(struct net_backend *be, struct iovec *iov,
432 struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
433 struct netmap_ring *ring;
443 if (head == ring->tail) {
444 WPRINTF(("No space, drop %zu bytes\n", count_iov(iov, iovcnt)));
447 nm_buf = NETMAP_BUF(ring, ring->slot[head].buf_idx);
448 nm_buf_size = ring->nr_buf_size;
451 for (j = 0; j < iovcnt; j++) {
452 int iov_frag_size = iov[j].iov_len;
453 void *iov_frag_buf = iov[j].iov_base;
455 totlen += iov_frag_size;
458 * Split each iovec fragment over more netmap slots, if
464 copylen = iov_frag_size < nm_buf_size ? iov_frag_size : nm_buf_size;
465 memcpy(nm_buf, iov_frag_buf, copylen);
467 iov_frag_buf += copylen;
468 iov_frag_size -= copylen;
470 nm_buf_size -= copylen;
471 nm_buf_len += copylen;
473 if (iov_frag_size == 0) {
477 ring->slot[head].len = nm_buf_len;
478 ring->slot[head].flags = NS_MOREFRAG;
479 head = nm_ring_next(ring, head);
480 if (head == ring->tail) {
482 * We ran out of netmap slots while
483 * splitting the iovec fragments.
485 WPRINTF(("No space, drop %zu bytes\n",
486 count_iov(iov, iovcnt)));
489 nm_buf = NETMAP_BUF(ring, ring->slot[head].buf_idx);
490 nm_buf_size = ring->nr_buf_size;
495 /* Complete the last slot, which must not have NS_MOREFRAG set. */
496 ring->slot[head].len = nm_buf_len;
497 ring->slot[head].flags = 0;
498 head = nm_ring_next(ring, head);
500 /* Now update ring->head and ring->cur. */
501 ring->head = ring->cur = head;
503 ioctl(be->fd, NIOCTXSYNC, NULL);
509 netmap_recv(struct net_backend *be, struct iovec *iov, int iovcnt)
511 struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
512 struct netmap_slot *slot = NULL;
513 struct netmap_ring *ring;
523 iov_frag_buf = iov->iov_base;
524 iov_frag_size = iov->iov_len;
530 if (head == ring->tail) {
534 slot = ring->slot + head;
535 nm_buf = NETMAP_BUF(ring, slot->buf_idx);
536 nm_buf_len = slot->len;
539 int copylen = nm_buf_len < iov_frag_size ?
540 nm_buf_len : iov_frag_size;
542 memcpy(iov_frag_buf, nm_buf, copylen);
544 nm_buf_len -= copylen;
545 iov_frag_buf += copylen;
546 iov_frag_size -= copylen;
549 if (nm_buf_len == 0) {
556 /* No space to receive. */
557 WPRINTF(("Short iov, drop %zd bytes\n",
561 iov_frag_buf = iov->iov_base;
562 iov_frag_size = iov->iov_len;
565 head = nm_ring_next(ring, head);
567 } while (slot->flags & NS_MOREFRAG);
569 /* Release slots to netmap. */
570 ring->head = ring->cur = head;
575 static struct net_backend netmap_backend = {
577 .priv_size = sizeof(struct netmap_priv),
579 .cleanup = netmap_cleanup,
582 .get_cap = netmap_get_cap,
583 .set_cap = netmap_set_cap,
586 /* A clone of the netmap backend, with a different prefix. */
587 static struct net_backend vale_backend = {
589 .priv_size = sizeof(struct netmap_priv),
591 .cleanup = netmap_cleanup,
594 .get_cap = netmap_get_cap,
595 .set_cap = netmap_set_cap,
598 DATA_SET(net_backend_set, netmap_backend);
599 DATA_SET(net_backend_set, vale_backend);
602 * Initialize a backend and attach to the frontend.
603 * This is called during frontend initialization.
604 * @pbe is a pointer to the backend to be initialized
605 * @devname is the backend-name as supplied on the command line,
606 * e.g. -s 2:0,frontend-name,backend-name[,other-args]
607 * @cb is the receive callback supplied by the frontend,
608 * and it is invoked in the event loop when a receive
609 * event is generated in the hypervisor,
610 * @param is a pointer to the frontend, and normally used as
611 * the argument for the callback.
614 netbe_init(struct net_backend **ret, const char *devname, net_be_rxeof_t cb,
617 struct net_backend **pbe, *nbe, *tbe = NULL;
621 * Find the network backend that matches the user-provided
622 * device name. net_backend_set is built using a linker set.
624 SET_FOREACH(pbe, net_backend_set) {
625 if (strncmp(devname, (*pbe)->prefix,
626 strlen((*pbe)->prefix)) == 0) {
628 assert(tbe->init != NULL);
629 assert(tbe->cleanup != NULL);
630 assert(tbe->send != NULL);
631 assert(tbe->recv != NULL);
632 assert(tbe->get_cap != NULL);
633 assert(tbe->set_cap != NULL);
641 nbe = calloc(1, sizeof(*nbe) + tbe->priv_size);
642 *nbe = *tbe; /* copy the template */
645 nbe->be_vnet_hdr_len = 0;
646 nbe->fe_vnet_hdr_len = 0;
648 /* Initialize the backend. */
649 err = nbe->init(nbe, devname, cb, param);
661 netbe_cleanup(struct net_backend *be)
671 netbe_get_cap(struct net_backend *be)
675 return (be->get_cap(be));
679 netbe_set_cap(struct net_backend *be, uint64_t features,
680 unsigned vnet_hdr_len)
686 /* There are only three valid lengths, i.e., 0, 10 and 12. */
687 if (vnet_hdr_len && vnet_hdr_len != VNET_HDR_LEN
688 && vnet_hdr_len != (VNET_HDR_LEN - sizeof(uint16_t)))
691 be->fe_vnet_hdr_len = vnet_hdr_len;
693 ret = be->set_cap(be, features, vnet_hdr_len);
694 assert(be->be_vnet_hdr_len == 0 ||
695 be->be_vnet_hdr_len == be->fe_vnet_hdr_len);
700 static __inline struct iovec *
701 iov_trim(struct iovec *iov, int *iovcnt, unsigned int tlen)
705 /* XXX short-cut: assume first segment is >= tlen */
706 assert(iov[0].iov_len >= tlen);
708 iov[0].iov_len -= tlen;
709 if (iov[0].iov_len == 0) {
714 iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base + tlen);
722 netbe_send(struct net_backend *be, struct iovec *iov, int iovcnt)
726 if (be->be_vnet_hdr_len != be->fe_vnet_hdr_len) {
728 * The frontend uses a virtio-net header, but the backend
729 * does not. We ignore it (as it must be all zeroes) and
732 assert(be->be_vnet_hdr_len == 0);
733 iov = iov_trim(iov, &iovcnt, be->fe_vnet_hdr_len);
736 return (be->send(be, iov, iovcnt));
740 * Try to read a packet from the backend, without blocking.
741 * If no packets are available, return 0. In case of success, return
742 * the length of the packet just read. Return -1 in case of errors.
745 netbe_recv(struct net_backend *be, struct iovec *iov, int iovcnt)
747 /* Length of prepended virtio-net header. */
748 unsigned int hlen = be->fe_vnet_hdr_len;
753 if (hlen && hlen != be->be_vnet_hdr_len) {
755 * The frontend uses a virtio-net header, but the backend
756 * does not. We need to prepend a zeroed header.
758 struct virtio_net_rxhdr *vh;
760 assert(be->be_vnet_hdr_len == 0);
763 * Get a pointer to the rx header, and use the
764 * data immediately following it for the packet buffer.
766 vh = iov[0].iov_base;
767 iov = iov_trim(iov, &iovcnt, hlen);
770 * The only valid field in the rx packet header is the
771 * number of buffers if merged rx bufs were negotiated.
774 if (hlen == VNET_HDR_LEN) {
779 ret = be->recv(be, iov, iovcnt);
788 * Read a packet from the backend and discard it.
789 * Returns the size of the discarded packet or zero if no packet was available.
790 * A negative error code is returned in case of read error.
793 netbe_rx_discard(struct net_backend *be)
796 * MP note: the dummybuf is only used to discard frames,
797 * so there is no need for it to be per-vtnet or locked.
798 * We only make it large enough for TSO-sized segment.
800 static uint8_t dummybuf[65536 + 64];
803 iov.iov_base = dummybuf;
804 iov.iov_len = sizeof(dummybuf);
806 return netbe_recv(be, &iov, 1);