2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2019 Vincenzo Maffione <vmaffione@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
19 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
20 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
24 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
25 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * This file implements multiple network backends (tap, netmap, ...),
32 * to be used by network frontends such as virtio-net and e1000.
33 * The API to access the backend (e.g. send/receive packets, negotiate
34 * features) is exported by net_backends.h.
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include <sys/types.h> /* u_short etc */
41 #ifndef WITHOUT_CAPSICUM
42 #include <sys/capsicum.h>
44 #include <sys/ioctl.h>
49 #include <net/netmap.h>
50 #include <net/netmap_virt.h>
51 #define NETMAP_WITH_LIBS
52 #include <net/netmap_user.h>
54 #ifndef WITHOUT_CAPSICUM
55 #include <capsicum_helpers.h>
68 #include <pthread_np.h>
73 #include <sys/param.h>
74 #include <sys/sysctl.h>
81 #include "net_backends.h"
83 #include <sys/linker_set.h>
86 * Each network backend registers a set of function pointers that are
87 * used to implement the net backends API.
88 * This might need to be exposed if we implement backends in separate files.
91 const char *prefix; /* prefix matching this backend */
94 * Routines used to initialize and cleanup the resources needed
95 * by a backend. The cleanup function is used internally,
96 * and should not be called by the frontend.
98 int (*init)(struct net_backend *be, const char *devname,
99 const char *opts, net_be_rxeof_t cb, void *param);
100 void (*cleanup)(struct net_backend *be);
103 * Called to serve a guest transmit request. The scatter-gather
104 * vector provided by the caller has 'iovcnt' elements and contains
105 * the packet to send.
107 ssize_t (*send)(struct net_backend *be, const struct iovec *iov,
111 * Get the length of the next packet that can be received from
112 * the backend. If no packets are currently available, this
113 * function returns 0.
115 ssize_t (*peek_recvlen)(struct net_backend *be);
118 * Called to receive a packet from the backend. When the function
119 * returns a positive value 'len', the scatter-gather vector
120 * provided by the caller contains a packet with such length.
121 * The function returns 0 if the backend doesn't have a new packet to
124 ssize_t (*recv)(struct net_backend *be, const struct iovec *iov,
128 * Ask the backend to enable or disable receive operation in the
129 * backend. On return from a disable operation, it is guaranteed
130 * that the receive callback won't be called until receive is
131 * enabled again. Note however that it is up to the caller to make
132 * sure that netbe_recv() is not currently being executed by another
135 void (*recv_enable)(struct net_backend *be);
136 void (*recv_disable)(struct net_backend *be);
139 * Ask the backend for the virtio-net features it is able to
140 * support. Possible features are TSO, UFO and checksum offloading
141 * in both rx and tx direction and for both IPv4 and IPv6.
143 uint64_t (*get_cap)(struct net_backend *be);
146 * Tell the backend to enable/disable the specified virtio-net
147 * features (capabilities).
149 int (*set_cap)(struct net_backend *be, uint64_t features,
150 unsigned int vnet_hdr_len);
152 struct pci_vtnet_softc *sc;
156 * Length of the virtio-net header used by the backend and the
157 * frontend, respectively. A zero value means that the header
160 unsigned int be_vnet_hdr_len;
161 unsigned int fe_vnet_hdr_len;
163 /* Size of backend-specific private data. */
166 /* Room for backend-specific data. */
170 SET_DECLARE(net_backend_set, struct net_backend);
172 #define VNET_HDR_LEN sizeof(struct virtio_net_rxhdr)
174 #define WPRINTF(params) PRINTLN params
183 * A bounce buffer that allows us to implement the peek_recvlen
184 * callback. In the future we may get the same information from
192 tap_cleanup(struct net_backend *be)
194 struct tap_priv *priv = (struct tap_priv *)be->opaque;
197 mevent_delete(priv->mevp);
206 tap_init(struct net_backend *be, const char *devname,
207 const char *opts, net_be_rxeof_t cb, void *param)
209 struct tap_priv *priv = (struct tap_priv *)be->opaque;
212 #ifndef WITHOUT_CAPSICUM
217 WPRINTF(("TAP backend requires non-NULL callback"));
221 strcpy(tbuf, "/dev/");
222 strlcat(tbuf, devname, sizeof(tbuf));
224 be->fd = open(tbuf, O_RDWR);
226 WPRINTF(("open of tap device %s failed", tbuf));
231 * Set non-blocking and register for read
232 * notifications with the event loop
234 if (ioctl(be->fd, FIONBIO, &opt) < 0) {
235 WPRINTF(("tap device O_NONBLOCK failed"));
239 #ifndef WITHOUT_CAPSICUM
240 cap_rights_init(&rights, CAP_EVENT, CAP_READ, CAP_WRITE);
241 if (caph_rights_limit(be->fd, &rights) == -1)
242 errx(EX_OSERR, "Unable to apply rights for sandbox");
245 memset(priv->bbuf, 0, sizeof(priv->bbuf));
248 priv->mevp = mevent_add_disabled(be->fd, EVF_READ, cb, param);
249 if (priv->mevp == NULL) {
250 WPRINTF(("Could not register event"));
262 * Called to send a buffer chain out to the tap device
265 tap_send(struct net_backend *be, const struct iovec *iov, int iovcnt)
267 return (writev(be->fd, iov, iovcnt));
271 tap_peek_recvlen(struct net_backend *be)
273 struct tap_priv *priv = (struct tap_priv *)be->opaque;
276 if (priv->bbuflen > 0) {
278 * We already have a packet in the bounce buffer.
279 * Just return its length.
281 return priv->bbuflen;
285 * Read the next packet (if any) into the bounce buffer, so
286 * that we get to know its length and we can return that
289 ret = read(be->fd, priv->bbuf, sizeof(priv->bbuf));
290 if (ret < 0 && errno == EWOULDBLOCK) {
301 tap_recv(struct net_backend *be, const struct iovec *iov, int iovcnt)
303 struct tap_priv *priv = (struct tap_priv *)be->opaque;
306 if (priv->bbuflen > 0) {
308 * A packet is available in the bounce buffer, so
309 * we read it from there.
311 ret = buf_to_iov(priv->bbuf, priv->bbuflen,
314 /* Mark the bounce buffer as empty. */
320 ret = readv(be->fd, iov, iovcnt);
321 if (ret < 0 && errno == EWOULDBLOCK) {
329 tap_recv_enable(struct net_backend *be)
331 struct tap_priv *priv = (struct tap_priv *)be->opaque;
333 mevent_enable(priv->mevp);
337 tap_recv_disable(struct net_backend *be)
339 struct tap_priv *priv = (struct tap_priv *)be->opaque;
341 mevent_disable(priv->mevp);
345 tap_get_cap(struct net_backend *be)
348 return (0); /* no capabilities for now */
352 tap_set_cap(struct net_backend *be, uint64_t features,
353 unsigned vnet_hdr_len)
356 return ((features || vnet_hdr_len) ? -1 : 0);
359 static struct net_backend tap_backend = {
361 .priv_size = sizeof(struct tap_priv),
363 .cleanup = tap_cleanup,
365 .peek_recvlen = tap_peek_recvlen,
367 .recv_enable = tap_recv_enable,
368 .recv_disable = tap_recv_disable,
369 .get_cap = tap_get_cap,
370 .set_cap = tap_set_cap,
373 /* A clone of the tap backend, with a different prefix. */
374 static struct net_backend vmnet_backend = {
376 .priv_size = sizeof(struct tap_priv),
378 .cleanup = tap_cleanup,
380 .peek_recvlen = tap_peek_recvlen,
382 .recv_enable = tap_recv_enable,
383 .recv_disable = tap_recv_disable,
384 .get_cap = tap_get_cap,
385 .set_cap = tap_set_cap,
388 DATA_SET(net_backend_set, tap_backend);
389 DATA_SET(net_backend_set, vmnet_backend);
397 #define NG_SBUF_MAX_SIZE (4 * 1024 * 1024)
400 ng_init(struct net_backend *be, const char *devname,
401 const char *opts, net_be_rxeof_t cb, void *param)
403 struct tap_priv *p = (struct tap_priv *)be->opaque;
404 struct ngm_connect ngc;
405 char *ngopts, *tofree;
406 char nodename[NG_NODESIZ];
411 int peerhook_provided;
413 unsigned long maxsbsz;
415 #ifndef WITHOUT_CAPSICUM
420 WPRINTF(("Netgraph backend requires non-NULL callback"));
426 memset(&ngc, 0, sizeof(ngc));
428 strncpy(ngc.ourhook, "vmlink", NG_HOOKSIZ - 1);
430 tofree = ngopts = strdup(opts);
432 if (ngopts == NULL) {
433 WPRINTF(("strdup error"));
439 peerhook_provided = 0;
441 (void)strsep(&ngopts, ",");
443 while (ngopts != NULL) {
444 char *value = ngopts;
447 key = strsep(&value, "=");
451 (void) strsep(&ngopts, ",");
453 if (strcmp(key, "socket") == 0) {
454 strncpy(nodename, value, NG_NODESIZ - 1);
456 } else if (strcmp(key, "path") == 0) {
457 strncpy(ngc.path, value, NG_PATHSIZ - 1);
459 } else if (strcmp(key, "hook") == 0) {
460 strncpy(ngc.ourhook, value, NG_HOOKSIZ - 1);
461 } else if (strcmp(key, "peerhook") == 0) {
462 strncpy(ngc.peerhook, value, NG_HOOKSIZ - 1);
463 peerhook_provided = 1;
469 if (!path_provided) {
470 WPRINTF(("path must be provided"));
474 if (!peerhook_provided) {
475 WPRINTF(("peer hook must be provided"));
479 if (NgMkSockNode(socket_provided ? nodename : NULL,
480 &ctrl_sock, &be->fd) < 0) {
481 WPRINTF(("can't get Netgraph sockets"));
485 if (NgSendMsg(ctrl_sock, ".",
487 NGM_CONNECT, &ngc, sizeof(ngc)) < 0) {
488 WPRINTF(("can't connect to node"));
495 flags = fcntl(be->fd, F_GETFL);
498 WPRINTF(("can't get socket flags"));
502 if (fcntl(be->fd, F_SETFL, flags | O_NONBLOCK) < 0) {
503 WPRINTF(("can't set O_NONBLOCK flag"));
508 * The default ng_socket(4) buffer's size is too low.
509 * Calculate the minimum value between NG_SBUF_MAX_SIZE
510 * and kern.ipc.maxsockbuf.
512 msbsz = sizeof(maxsbsz);
513 if (sysctlbyname("kern.ipc.maxsockbuf", &maxsbsz, &msbsz,
515 WPRINTF(("can't get 'kern.ipc.maxsockbuf' value"));
520 * We can't set the socket buffer size to kern.ipc.maxsockbuf value,
521 * as it takes into account the mbuf(9) overhead.
523 maxsbsz = maxsbsz * MCLBYTES / (MSIZE + MCLBYTES);
525 sbsz = MIN(NG_SBUF_MAX_SIZE, maxsbsz);
527 if (setsockopt(be->fd, SOL_SOCKET, SO_SNDBUF, &sbsz,
529 WPRINTF(("can't set TX buffer size"));
533 if (setsockopt(be->fd, SOL_SOCKET, SO_RCVBUF, &sbsz,
535 WPRINTF(("can't set RX buffer size"));
539 #ifndef WITHOUT_CAPSICUM
540 cap_rights_init(&rights, CAP_EVENT, CAP_READ, CAP_WRITE);
541 if (caph_rights_limit(be->fd, &rights) == -1)
542 errx(EX_OSERR, "Unable to apply rights for sandbox");
545 memset(p->bbuf, 0, sizeof(p->bbuf));
548 p->mevp = mevent_add_disabled(be->fd, EVF_READ, cb, param);
549 if (p->mevp == NULL) {
550 WPRINTF(("Could not register event"));
561 static struct net_backend ng_backend = {
562 .prefix = "netgraph",
563 .priv_size = sizeof(struct tap_priv),
565 .cleanup = tap_cleanup,
567 .peek_recvlen = tap_peek_recvlen,
569 .recv_enable = tap_recv_enable,
570 .recv_disable = tap_recv_disable,
571 .get_cap = tap_get_cap,
572 .set_cap = tap_set_cap,
575 DATA_SET(net_backend_set, ng_backend);
577 #endif /* NETGRAPH */
583 /* The virtio-net features supported by netmap. */
584 #define NETMAP_FEATURES (VIRTIO_NET_F_CSUM | VIRTIO_NET_F_HOST_TSO4 | \
585 VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_UFO | \
586 VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 | \
587 VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_UFO)
590 char ifname[IFNAMSIZ];
593 struct netmap_ring *rx;
594 struct netmap_ring *tx;
601 nmreq_init(struct nmreq *req, char *ifname)
604 memset(req, 0, sizeof(*req));
605 strlcpy(req->nr_name, ifname, sizeof(req->nr_name));
606 req->nr_version = NETMAP_API;
610 netmap_set_vnet_hdr_len(struct net_backend *be, int vnet_hdr_len)
614 struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
616 nmreq_init(&req, priv->ifname);
617 req.nr_cmd = NETMAP_BDG_VNET_HDR;
618 req.nr_arg1 = vnet_hdr_len;
619 err = ioctl(be->fd, NIOCREGIF, &req);
621 WPRINTF(("Unable to set vnet header length %d",
626 be->be_vnet_hdr_len = vnet_hdr_len;
632 netmap_has_vnet_hdr_len(struct net_backend *be, unsigned vnet_hdr_len)
634 int prev_hdr_len = be->be_vnet_hdr_len;
637 if (vnet_hdr_len == prev_hdr_len) {
641 ret = netmap_set_vnet_hdr_len(be, vnet_hdr_len);
646 netmap_set_vnet_hdr_len(be, prev_hdr_len);
652 netmap_get_cap(struct net_backend *be)
655 return (netmap_has_vnet_hdr_len(be, VNET_HDR_LEN) ?
656 NETMAP_FEATURES : 0);
660 netmap_set_cap(struct net_backend *be, uint64_t features,
661 unsigned vnet_hdr_len)
664 return (netmap_set_vnet_hdr_len(be, vnet_hdr_len));
668 netmap_init(struct net_backend *be, const char *devname,
669 const char *opts, net_be_rxeof_t cb, void *param)
671 struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
673 strlcpy(priv->ifname, devname, sizeof(priv->ifname));
674 priv->ifname[sizeof(priv->ifname) - 1] = '\0';
676 priv->nmd = nm_open(priv->ifname, NULL, NETMAP_NO_TX_POLL, NULL);
677 if (priv->nmd == NULL) {
678 WPRINTF(("Unable to nm_open(): interface '%s', errno (%s)",
679 devname, strerror(errno)));
684 priv->memid = priv->nmd->req.nr_arg2;
685 priv->tx = NETMAP_TXRING(priv->nmd->nifp, 0);
686 priv->rx = NETMAP_RXRING(priv->nmd->nifp, 0);
688 priv->cb_param = param;
689 be->fd = priv->nmd->fd;
691 priv->mevp = mevent_add_disabled(be->fd, EVF_READ, cb, param);
692 if (priv->mevp == NULL) {
693 WPRINTF(("Could not register event"));
701 netmap_cleanup(struct net_backend *be)
703 struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
706 mevent_delete(priv->mevp);
715 netmap_send(struct net_backend *be, const struct iovec *iov,
718 struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
719 struct netmap_ring *ring;
729 if (head == ring->tail) {
730 WPRINTF(("No space, drop %zu bytes", count_iov(iov, iovcnt)));
733 nm_buf = NETMAP_BUF(ring, ring->slot[head].buf_idx);
734 nm_buf_size = ring->nr_buf_size;
737 for (j = 0; j < iovcnt; j++) {
738 int iov_frag_size = iov[j].iov_len;
739 void *iov_frag_buf = iov[j].iov_base;
741 totlen += iov_frag_size;
744 * Split each iovec fragment over more netmap slots, if
750 copylen = iov_frag_size < nm_buf_size ? iov_frag_size : nm_buf_size;
751 memcpy(nm_buf, iov_frag_buf, copylen);
753 iov_frag_buf += copylen;
754 iov_frag_size -= copylen;
756 nm_buf_size -= copylen;
757 nm_buf_len += copylen;
759 if (iov_frag_size == 0) {
763 ring->slot[head].len = nm_buf_len;
764 ring->slot[head].flags = NS_MOREFRAG;
765 head = nm_ring_next(ring, head);
766 if (head == ring->tail) {
768 * We ran out of netmap slots while
769 * splitting the iovec fragments.
771 WPRINTF(("No space, drop %zu bytes",
772 count_iov(iov, iovcnt)));
775 nm_buf = NETMAP_BUF(ring, ring->slot[head].buf_idx);
776 nm_buf_size = ring->nr_buf_size;
781 /* Complete the last slot, which must not have NS_MOREFRAG set. */
782 ring->slot[head].len = nm_buf_len;
783 ring->slot[head].flags = 0;
784 head = nm_ring_next(ring, head);
786 /* Now update ring->head and ring->cur. */
787 ring->head = ring->cur = head;
789 ioctl(be->fd, NIOCTXSYNC, NULL);
795 netmap_peek_recvlen(struct net_backend *be)
797 struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
798 struct netmap_ring *ring = priv->rx;
799 uint32_t head = ring->head;
802 while (head != ring->tail) {
803 struct netmap_slot *slot = ring->slot + head;
806 if ((slot->flags & NS_MOREFRAG) == 0)
808 head = nm_ring_next(ring, head);
815 netmap_recv(struct net_backend *be, const struct iovec *iov, int iovcnt)
817 struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
818 struct netmap_slot *slot = NULL;
819 struct netmap_ring *ring;
829 iov_frag_buf = iov->iov_base;
830 iov_frag_size = iov->iov_len;
836 if (head == ring->tail) {
840 slot = ring->slot + head;
841 nm_buf = NETMAP_BUF(ring, slot->buf_idx);
842 nm_buf_len = slot->len;
845 int copylen = nm_buf_len < iov_frag_size ?
846 nm_buf_len : iov_frag_size;
848 memcpy(iov_frag_buf, nm_buf, copylen);
850 nm_buf_len -= copylen;
851 iov_frag_buf += copylen;
852 iov_frag_size -= copylen;
855 if (nm_buf_len == 0) {
862 /* No space to receive. */
863 WPRINTF(("Short iov, drop %zd bytes",
867 iov_frag_buf = iov->iov_base;
868 iov_frag_size = iov->iov_len;
871 head = nm_ring_next(ring, head);
873 } while (slot->flags & NS_MOREFRAG);
875 /* Release slots to netmap. */
876 ring->head = ring->cur = head;
882 netmap_recv_enable(struct net_backend *be)
884 struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
886 mevent_enable(priv->mevp);
890 netmap_recv_disable(struct net_backend *be)
892 struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
894 mevent_disable(priv->mevp);
897 static struct net_backend netmap_backend = {
899 .priv_size = sizeof(struct netmap_priv),
901 .cleanup = netmap_cleanup,
903 .peek_recvlen = netmap_peek_recvlen,
905 .recv_enable = netmap_recv_enable,
906 .recv_disable = netmap_recv_disable,
907 .get_cap = netmap_get_cap,
908 .set_cap = netmap_set_cap,
911 /* A clone of the netmap backend, with a different prefix. */
912 static struct net_backend vale_backend = {
914 .priv_size = sizeof(struct netmap_priv),
916 .cleanup = netmap_cleanup,
918 .peek_recvlen = netmap_peek_recvlen,
920 .recv_enable = netmap_recv_enable,
921 .recv_disable = netmap_recv_disable,
922 .get_cap = netmap_get_cap,
923 .set_cap = netmap_set_cap,
926 DATA_SET(net_backend_set, netmap_backend);
927 DATA_SET(net_backend_set, vale_backend);
930 * Initialize a backend and attach to the frontend.
931 * This is called during frontend initialization.
932 * @pbe is a pointer to the backend to be initialized
933 * @devname is the backend-name as supplied on the command line,
934 * e.g. -s 2:0,frontend-name,backend-name[,other-args]
935 * @cb is the receive callback supplied by the frontend,
936 * and it is invoked in the event loop when a receive
937 * event is generated in the hypervisor,
938 * @param is a pointer to the frontend, and normally used as
939 * the argument for the callback.
942 netbe_init(struct net_backend **ret, const char *opts, net_be_rxeof_t cb,
945 struct net_backend **pbe, *nbe, *tbe = NULL;
950 devname = options = strdup(opts);
952 if (devname == NULL) {
956 devname = strsep(&options, ",");
959 * Find the network backend that matches the user-provided
960 * device name. net_backend_set is built using a linker set.
962 SET_FOREACH(pbe, net_backend_set) {
963 if (strncmp(devname, (*pbe)->prefix,
964 strlen((*pbe)->prefix)) == 0) {
966 assert(tbe->init != NULL);
967 assert(tbe->cleanup != NULL);
968 assert(tbe->send != NULL);
969 assert(tbe->recv != NULL);
970 assert(tbe->get_cap != NULL);
971 assert(tbe->set_cap != NULL);
982 nbe = calloc(1, sizeof(*nbe) + tbe->priv_size);
983 *nbe = *tbe; /* copy the template */
986 nbe->be_vnet_hdr_len = 0;
987 nbe->fe_vnet_hdr_len = 0;
989 /* Initialize the backend. */
990 err = nbe->init(nbe, devname, options, cb, param);
1004 netbe_cleanup(struct net_backend *be)
1014 netbe_get_cap(struct net_backend *be)
1018 return (be->get_cap(be));
1022 netbe_set_cap(struct net_backend *be, uint64_t features,
1023 unsigned vnet_hdr_len)
1029 /* There are only three valid lengths, i.e., 0, 10 and 12. */
1030 if (vnet_hdr_len && vnet_hdr_len != VNET_HDR_LEN
1031 && vnet_hdr_len != (VNET_HDR_LEN - sizeof(uint16_t)))
1034 be->fe_vnet_hdr_len = vnet_hdr_len;
1036 ret = be->set_cap(be, features, vnet_hdr_len);
1037 assert(be->be_vnet_hdr_len == 0 ||
1038 be->be_vnet_hdr_len == be->fe_vnet_hdr_len);
1044 netbe_send(struct net_backend *be, const struct iovec *iov, int iovcnt)
1047 return (be->send(be, iov, iovcnt));
1051 netbe_peek_recvlen(struct net_backend *be)
1054 return (be->peek_recvlen(be));
1058 * Try to read a packet from the backend, without blocking.
1059 * If no packets are available, return 0. In case of success, return
1060 * the length of the packet just read. Return -1 in case of errors.
1063 netbe_recv(struct net_backend *be, const struct iovec *iov, int iovcnt)
1066 return (be->recv(be, iov, iovcnt));
1070 * Read a packet from the backend and discard it.
1071 * Returns the size of the discarded packet or zero if no packet was available.
1072 * A negative error code is returned in case of read error.
1075 netbe_rx_discard(struct net_backend *be)
1078 * MP note: the dummybuf is only used to discard frames,
1079 * so there is no need for it to be per-vtnet or locked.
1080 * We only make it large enough for TSO-sized segment.
1082 static uint8_t dummybuf[65536 + 64];
1085 iov.iov_base = dummybuf;
1086 iov.iov_len = sizeof(dummybuf);
1088 return netbe_recv(be, &iov, 1);
1092 netbe_rx_disable(struct net_backend *be)
1095 return be->recv_disable(be);
1099 netbe_rx_enable(struct net_backend *be)
1102 return be->recv_enable(be);
1106 netbe_get_vnet_hdr_len(struct net_backend *be)
1109 return (be->be_vnet_hdr_len);