2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2019 Vincenzo Maffione <vmaffione@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
19 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
20 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
24 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
25 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <net/netmap.h>
30 #include <net/netmap_virt.h>
31 #define NETMAP_WITH_LIBS
32 #include <net/netmap_user.h>
39 #include "net_backends.h"
40 #include "net_backends_priv.h"
42 /* The virtio-net features supported by netmap. */
43 #define NETMAP_FEATURES (VIRTIO_NET_F_CSUM | VIRTIO_NET_F_HOST_TSO4 | \
44 VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_UFO | \
45 VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 | \
46 VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_UFO)
49 char ifname[IFNAMSIZ];
52 struct netmap_ring *rx;
53 struct netmap_ring *tx;
60 nmreq_init(struct nmreq *req, char *ifname)
63 memset(req, 0, sizeof(*req));
64 strlcpy(req->nr_name, ifname, sizeof(req->nr_name));
65 req->nr_version = NETMAP_API;
69 netmap_set_vnet_hdr_len(struct net_backend *be, int vnet_hdr_len)
73 struct netmap_priv *priv = NET_BE_PRIV(be);
75 nmreq_init(&req, priv->ifname);
76 req.nr_cmd = NETMAP_BDG_VNET_HDR;
77 req.nr_arg1 = vnet_hdr_len;
78 err = ioctl(be->fd, NIOCREGIF, &req);
80 EPRINTLN("Unable to set vnet header length %d", vnet_hdr_len);
84 be->be_vnet_hdr_len = vnet_hdr_len;
90 netmap_has_vnet_hdr_len(struct net_backend *be, unsigned vnet_hdr_len)
92 unsigned prev_hdr_len = be->be_vnet_hdr_len;
95 if (vnet_hdr_len == prev_hdr_len) {
99 ret = netmap_set_vnet_hdr_len(be, vnet_hdr_len);
104 netmap_set_vnet_hdr_len(be, prev_hdr_len);
110 netmap_get_cap(struct net_backend *be)
113 return (netmap_has_vnet_hdr_len(be, VNET_HDR_LEN) ?
114 NETMAP_FEATURES : 0);
118 netmap_set_cap(struct net_backend *be, uint64_t features __unused,
119 unsigned vnet_hdr_len)
122 return (netmap_set_vnet_hdr_len(be, vnet_hdr_len));
126 netmap_init(struct net_backend *be, const char *devname,
127 nvlist_t *nvl __unused, net_be_rxeof_t cb, void *param)
129 struct netmap_priv *priv = NET_BE_PRIV(be);
131 strlcpy(priv->ifname, devname, sizeof(priv->ifname));
132 priv->ifname[sizeof(priv->ifname) - 1] = '\0';
134 priv->nmd = nm_open(priv->ifname, NULL, NETMAP_NO_TX_POLL, NULL);
135 if (priv->nmd == NULL) {
136 EPRINTLN("Unable to nm_open(): interface '%s', errno (%s)",
137 devname, strerror(errno));
141 priv->memid = priv->nmd->req.nr_arg2;
142 priv->tx = NETMAP_TXRING(priv->nmd->nifp, 0);
143 priv->rx = NETMAP_RXRING(priv->nmd->nifp, 0);
145 priv->cb_param = param;
146 be->fd = priv->nmd->fd;
148 priv->mevp = mevent_add_disabled(be->fd, EVF_READ, cb, param);
149 if (priv->mevp == NULL) {
150 EPRINTLN("Could not register event");
158 netmap_cleanup(struct net_backend *be)
160 struct netmap_priv *priv = NET_BE_PRIV(be);
163 mevent_delete(priv->mevp);
172 netmap_send(struct net_backend *be, const struct iovec *iov,
175 struct netmap_priv *priv = NET_BE_PRIV(be);
176 struct netmap_ring *ring;
186 if (head == ring->tail) {
187 EPRINTLN("No space, drop %zu bytes", count_iov(iov, iovcnt));
190 nm_buf = NETMAP_BUF(ring, ring->slot[head].buf_idx);
191 nm_buf_size = ring->nr_buf_size;
194 for (j = 0; j < iovcnt; j++) {
195 uint8_t *iov_frag_buf = iov[j].iov_base;
196 int iov_frag_size = iov[j].iov_len;
198 totlen += iov_frag_size;
201 * Split each iovec fragment over more netmap slots, if
207 copylen = iov_frag_size < nm_buf_size ? iov_frag_size : nm_buf_size;
208 memcpy(nm_buf, iov_frag_buf, copylen);
210 iov_frag_buf += copylen;
211 iov_frag_size -= copylen;
213 nm_buf_size -= copylen;
214 nm_buf_len += copylen;
216 if (iov_frag_size == 0) {
220 ring->slot[head].len = nm_buf_len;
221 ring->slot[head].flags = NS_MOREFRAG;
222 head = nm_ring_next(ring, head);
223 if (head == ring->tail) {
225 * We ran out of netmap slots while
226 * splitting the iovec fragments.
228 EPRINTLN("No space, drop %zu bytes",
229 count_iov(iov, iovcnt));
232 nm_buf = NETMAP_BUF(ring, ring->slot[head].buf_idx);
233 nm_buf_size = ring->nr_buf_size;
238 /* Complete the last slot, which must not have NS_MOREFRAG set. */
239 ring->slot[head].len = nm_buf_len;
240 ring->slot[head].flags = 0;
241 head = nm_ring_next(ring, head);
243 /* Now update ring->head and ring->cur. */
244 ring->head = ring->cur = head;
246 ioctl(be->fd, NIOCTXSYNC, NULL);
252 netmap_peek_recvlen(struct net_backend *be)
254 struct netmap_priv *priv = NET_BE_PRIV(be);
255 struct netmap_ring *ring = priv->rx;
256 uint32_t head = ring->head;
259 while (head != ring->tail) {
260 struct netmap_slot *slot = ring->slot + head;
263 if ((slot->flags & NS_MOREFRAG) == 0)
265 head = nm_ring_next(ring, head);
272 netmap_recv(struct net_backend *be, const struct iovec *iov, int iovcnt)
274 struct netmap_priv *priv = NET_BE_PRIV(be);
275 struct netmap_slot *slot = NULL;
276 struct netmap_ring *ring;
277 uint8_t *iov_frag_buf;
286 iov_frag_buf = iov->iov_base;
287 iov_frag_size = iov->iov_len;
293 if (head == ring->tail) {
297 slot = ring->slot + head;
298 nm_buf = NETMAP_BUF(ring, slot->buf_idx);
299 nm_buf_len = slot->len;
302 int copylen = nm_buf_len < iov_frag_size ?
303 nm_buf_len : iov_frag_size;
305 memcpy(iov_frag_buf, nm_buf, copylen);
307 nm_buf_len -= copylen;
308 iov_frag_buf += copylen;
309 iov_frag_size -= copylen;
312 if (nm_buf_len == 0) {
319 /* No space to receive. */
320 EPRINTLN("Short iov, drop %zd bytes",
324 iov_frag_buf = iov->iov_base;
325 iov_frag_size = iov->iov_len;
328 head = nm_ring_next(ring, head);
330 } while (slot->flags & NS_MOREFRAG);
332 /* Release slots to netmap. */
333 ring->head = ring->cur = head;
339 netmap_recv_enable(struct net_backend *be)
341 struct netmap_priv *priv = NET_BE_PRIV(be);
343 mevent_enable(priv->mevp);
347 netmap_recv_disable(struct net_backend *be)
349 struct netmap_priv *priv = NET_BE_PRIV(be);
351 mevent_disable(priv->mevp);
354 static struct net_backend netmap_backend = {
356 .priv_size = sizeof(struct netmap_priv),
358 .cleanup = netmap_cleanup,
360 .peek_recvlen = netmap_peek_recvlen,
362 .recv_enable = netmap_recv_enable,
363 .recv_disable = netmap_recv_disable,
364 .get_cap = netmap_get_cap,
365 .set_cap = netmap_set_cap,
368 /* A clone of the netmap backend, with a different prefix. */
369 static struct net_backend vale_backend = {
371 .priv_size = sizeof(struct netmap_priv),
373 .cleanup = netmap_cleanup,
375 .peek_recvlen = netmap_peek_recvlen,
377 .recv_enable = netmap_recv_enable,
378 .recv_disable = netmap_recv_disable,
379 .get_cap = netmap_get_cap,
380 .set_cap = netmap_set_cap,
383 DATA_SET(net_backend_set, netmap_backend);
384 DATA_SET(net_backend_set, vale_backend);