2 * Copyright (C) 2011-2014 Universita` di Pisa. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Functions and macros to manipulate netmap structures and packets
31 * in userspace. See netmap(4) for more information.
33 * The address of the struct netmap_if, say nifp, is computed from the
34 * value returned from ioctl(.., NIOCREG, ...) and the mmap region:
35 * ioctl(fd, NIOCREG, &req);
36 * mem = mmap(0, ... );
37 * nifp = NETMAP_IF(mem, req.nr_nifp);
38 * (so simple, we could just do it manually)
41 * struct netmap_ring *NETMAP_TXRING(nifp, index)
42 * struct netmap_ring *NETMAP_RXRING(nifp, index)
43 * we can access ring->nr_cur, ring->nr_avail, ring->nr_flags
45 * ring->slot[i] gives us the i-th slot (we can access
46 * directly len, flags, buf_idx)
48 * char *buf = NETMAP_BUF(ring, x) returns a pointer to
49 * the buffer numbered x
51 * All ring indexes (head, cur, tail) should always move forward.
52 * To compute the next index in a circular ring you can use
53 * i = nm_ring_next(ring, i);
55 * To ease porting apps from pcap to netmap we supply a few fuctions
56 * that can be called to open, close, read and write on netmap in a way
57 * similar to libpcap. Note that the read/write function depend on
58 * an ioctl()/select()/poll() being issued to refill rings or push
61 * In order to use these, include #define NETMAP_WITH_LIBS
62 * in the source file that invokes these functions.
65 #ifndef _NET_NETMAP_USER_H_
66 #define _NET_NETMAP_USER_H_
69 #include <sys/socket.h> /* apple needs sockaddr */
70 #include <net/if.h> /* IFNAMSIZ */
73 #define likely(x) __builtin_expect(!!(x), 1)
74 #define unlikely(x) __builtin_expect(!!(x), 0)
75 #endif /* likely and unlikely */
77 #include <net/netmap.h>
80 #define _NETMAP_OFFSET(type, ptr, offset) \
81 ((type)(void *)((char *)(ptr) + (offset)))
83 #define NETMAP_IF(_base, _ofs) _NETMAP_OFFSET(struct netmap_if *, _base, _ofs)
85 #define NETMAP_TXRING(nifp, index) _NETMAP_OFFSET(struct netmap_ring *, \
86 nifp, (nifp)->ring_ofs[index] )
88 #define NETMAP_RXRING(nifp, index) _NETMAP_OFFSET(struct netmap_ring *, \
89 nifp, (nifp)->ring_ofs[index + (nifp)->ni_tx_rings + 1] )
91 #define NETMAP_BUF(ring, index) \
92 ((char *)(ring) + (ring)->buf_ofs + ((index)*(ring)->nr_buf_size))
94 #define NETMAP_BUF_IDX(ring, buf) \
95 ( ((char *)(buf) - ((char *)(ring) + (ring)->buf_ofs) ) / \
99 static inline uint32_t
100 nm_ring_next(struct netmap_ring *r, uint32_t i)
102 return ( unlikely(i + 1 == r->num_slots) ? 0 : i + 1);
107 * Return 1 if we have pending transmissions in the tx ring.
108 * When everything is complete ring->head = ring->tail + 1 (modulo ring size)
111 nm_tx_pending(struct netmap_ring *r)
113 return nm_ring_next(r, r->tail) != r->head;
117 static inline uint32_t
118 nm_ring_space(struct netmap_ring *ring)
120 int ret = ring->tail - ring->cur;
122 ret += ring->num_slots;
127 #ifdef NETMAP_WITH_LIBS
129 * Support for simple I/O libraries.
130 * Include other system headers required for compiling this.
133 #ifndef HAVE_NETMAP_WITH_LIBS
134 #define HAVE_NETMAP_WITH_LIBS
137 #include <sys/time.h>
138 #include <sys/mman.h>
139 #include <string.h> /* memset */
140 #include <sys/ioctl.h>
141 #include <sys/errno.h> /* EINVAL */
142 #include <fcntl.h> /* O_RDWR */
143 #include <unistd.h> /* close() */
147 #ifndef ND /* debug macros */
149 #define ND(_fmt, ...) do {} while(0)
150 #define D(_fmt, ...) \
152 struct timeval _t0; \
153 gettimeofday(&_t0, NULL); \
154 fprintf(stderr, "%03d.%06d %s [%d] " _fmt "\n", \
155 (int)(_t0.tv_sec % 1000), (int)_t0.tv_usec, \
156 __FUNCTION__, __LINE__, ##__VA_ARGS__); \
159 /* Rate limited version of "D", lps indicates how many per second */
160 #define RD(lps, format, ...) \
162 static int __t0, __cnt; \
163 struct timeval __xxts; \
164 gettimeofday(&__xxts, NULL); \
165 if (__t0 != __xxts.tv_sec) { \
166 __t0 = __xxts.tv_sec; \
169 if (__cnt++ < lps) { \
170 D(format, ##__VA_ARGS__); \
175 struct nm_pkthdr { /* same as pcap_pkthdr */
181 struct nm_stat { /* same as pcap_stat */
190 #define NM_ERRBUF_SIZE 512
193 struct nm_desc *self; /* point to self if netmap. */
197 int done_mmap; /* set if mem is the result of mmap */
198 struct netmap_if * const nifp;
199 uint16_t first_tx_ring, last_tx_ring, cur_tx_ring;
200 uint16_t first_rx_ring, last_rx_ring, cur_rx_ring;
201 struct nmreq req; /* also contains the nr_name = ifname */
202 struct nm_pkthdr hdr;
205 * The memory contains netmap_if, rings and then buffers.
206 * Given a pointer (e.g. to nm_inject) we can compare with
207 * mem/buf_start/buf_end to tell if it is a buffer or
208 * some other descriptor in our region.
209 * We also store a pointer to some ring as it helps in the
210 * translation from buffer indexes to addresses.
212 struct netmap_ring * const some_ring;
213 void * const buf_start;
214 void * const buf_end;
215 /* parameters from pcap_open_live */
221 /* save flags so we can restore them on close */
227 char msg[NM_ERRBUF_SIZE];
231 * when the descriptor is open correctly, d->self == d
232 * Eventually we should also use some magic number.
234 #define P2NMD(p) ((struct nm_desc *)(p))
235 #define IS_NETMAP_DESC(d) ((d) && P2NMD(d)->self == P2NMD(d))
236 #define NETMAP_FD(d) (P2NMD(d)->fd)
240 * this is a slightly optimized copy routine which rounds
241 * to multiple of 64 bytes and is often faster than dealing
242 * with other odd sizes. We assume there is enough room
243 * in the source and destination buffers.
245 * XXX only for multiples of 64 bytes, non overlapped.
248 nm_pkt_copy(const void *_src, void *_dst, int l)
250 const uint64_t *src = (const uint64_t *)_src;
251 uint64_t *dst = (uint64_t *)_dst;
253 if (unlikely(l >= 1024)) {
257 for (; likely(l > 0); l-=64) {
271 * The callback, invoked on each received packet. Same as libpcap
273 typedef void (*nm_cb_t)(u_char *, const struct nm_pkthdr *, const u_char *d);
276 *--- the pcap-like API ---
278 * nm_open() opens a file descriptor, binds to a port and maps memory.
280 * ifname (netmap:foo or vale:foo) is the port name
281 * a suffix can indicate the follwing:
282 * ^ bind the host (sw) ring pair
283 * * bind host and NIC ring pairs (transparent)
284 * -NN bind individual NIC ring pair
285 * {NN bind master side of pipe NN
286 * }NN bind slave side of pipe NN
288 * req provides the initial values of nmreq before parsing ifname.
289 * Remember that the ifname parsing will override the ring
290 * number in nm_ringid, and part of nm_flags;
291 * flags special functions, normally 0
292 * indicates which fields of *arg are significant
293 * arg special functions, normally NULL
294 * if passed a netmap_desc with mem != NULL,
295 * use that memory instead of mmap.
298 static struct nm_desc *nm_open(const char *ifname, const struct nmreq *req,
299 uint64_t flags, const struct nm_desc *arg);
302 * nm_open can import some fields from the parent descriptor.
303 * These flags control which ones.
304 * Also in flags you can specify NETMAP_NO_TX_POLL and NETMAP_DO_RX_POLL,
305 * which set the initial value for these flags.
306 * Note that the 16 low bits of the flags are reserved for data
307 * that may go into the nmreq.
310 NM_OPEN_NO_MMAP = 0x040000, /* reuse mmap from parent */
311 NM_OPEN_IFNAME = 0x080000, /* nr_name, nr_ringid, nr_flags */
312 NM_OPEN_ARG1 = 0x100000,
313 NM_OPEN_ARG2 = 0x200000,
314 NM_OPEN_ARG3 = 0x400000,
315 NM_OPEN_RING_CFG = 0x800000, /* tx|rx rings|slots */
320 * nm_close() closes and restores the port to its previous state
323 static int nm_close(struct nm_desc *);
326 * nm_inject() is the same as pcap_inject()
327 * nm_dispatch() is the same as pcap_dispatch()
328 * nm_nextpkt() is the same as pcap_next()
331 static int nm_inject(struct nm_desc *, const void *, size_t);
332 static int nm_dispatch(struct nm_desc *, int, nm_cb_t, u_char *);
333 static u_char *nm_nextpkt(struct nm_desc *, struct nm_pkthdr *);
337 * Try to open, return descriptor if successful, NULL otherwise.
338 * An invalid netmap name will return errno = 0;
339 * You can pass a pointer to a pre-filled nm_desc to add special
340 * parameters. Flags is used as follows
341 * NM_OPEN_NO_MMAP use the memory from arg, only
342 * if the nr_arg2 (memory block) matches.
343 * NM_OPEN_ARG1 use req.nr_arg1 from arg
344 * NM_OPEN_ARG2 use req.nr_arg2 from arg
345 * NM_OPEN_RING_CFG user ring config from arg
347 static struct nm_desc *
348 nm_open(const char *ifname, const struct nmreq *req,
349 uint64_t new_flags, const struct nm_desc *arg)
351 struct nm_desc *d = NULL;
352 const struct nm_desc *parent = arg;
354 uint32_t nr_ringid = 0, nr_flags;
355 const char *port = NULL;
356 const char *errmsg = NULL;
358 if (strncmp(ifname, "netmap:", 7) && strncmp(ifname, "vale", 4)) {
359 errno = 0; /* name not recognised, not an error */
362 if (ifname[0] == 'n')
364 /* scan for a separator */
365 for (port = ifname; *port && !index("-*^{}", *port); port++)
367 namelen = port - ifname;
368 if (namelen >= sizeof(d->req.nr_name)) {
369 errmsg = "name too long";
373 default: /* '\0', no suffix */
374 nr_flags = NR_REG_ALL_NIC;
376 case '-': /* one NIC */
377 nr_flags = NR_REG_ONE_NIC;
378 nr_ringid = atoi(port + 1);
380 case '*': /* NIC and SW, ignore port */
381 nr_flags = NR_REG_NIC_SW;
383 errmsg = "invalid port for nic+sw";
387 case '^': /* only sw ring */
388 nr_flags = NR_REG_SW;
390 errmsg = "invalid port for sw ring";
395 nr_flags = NR_REG_PIPE_MASTER;
396 nr_ringid = atoi(port + 1);
399 nr_flags = NR_REG_PIPE_SLAVE;
400 nr_ringid = atoi(port + 1);
404 if (nr_ringid >= NETMAP_RING_MASK) {
405 errmsg = "invalid ringid";
409 d = (struct nm_desc *)calloc(1, sizeof(*d));
411 errmsg = "nm_desc alloc failure";
415 d->self = d; /* set this early so nm_close() works */
416 d->fd = open("/dev/netmap", O_RDWR);
418 errmsg = "cannot open /dev/netmap";
424 d->req.nr_version = NETMAP_API;
425 d->req.nr_ringid &= ~NETMAP_RING_MASK;
427 /* these fields are overridden by ifname and flags processing */
428 d->req.nr_ringid |= nr_ringid;
429 d->req.nr_flags = nr_flags;
430 memcpy(d->req.nr_name, ifname, namelen);
431 d->req.nr_name[namelen] = '\0';
432 /* optionally import info from parent */
433 if (IS_NETMAP_DESC(parent) && new_flags) {
434 if (new_flags & NM_OPEN_ARG1)
435 D("overriding ARG1 %d", parent->req.nr_arg1);
436 d->req.nr_arg1 = new_flags & NM_OPEN_ARG1 ?
437 parent->req.nr_arg1 : 4;
438 if (new_flags & NM_OPEN_ARG2)
439 D("overriding ARG2 %d", parent->req.nr_arg2);
440 d->req.nr_arg2 = new_flags & NM_OPEN_ARG2 ?
441 parent->req.nr_arg2 : 0;
442 if (new_flags & NM_OPEN_ARG3)
443 D("overriding ARG3 %d", parent->req.nr_arg3);
444 d->req.nr_arg3 = new_flags & NM_OPEN_ARG3 ?
445 parent->req.nr_arg3 : 0;
446 if (new_flags & NM_OPEN_RING_CFG) {
447 D("overriding RING_CFG");
448 d->req.nr_tx_slots = parent->req.nr_tx_slots;
449 d->req.nr_rx_slots = parent->req.nr_rx_slots;
450 d->req.nr_tx_rings = parent->req.nr_tx_rings;
451 d->req.nr_rx_rings = parent->req.nr_rx_rings;
453 if (new_flags & NM_OPEN_IFNAME) {
454 D("overriding ifname %s ringid 0x%x flags 0x%x",
455 parent->req.nr_name, parent->req.nr_ringid,
456 parent->req.nr_flags);
457 memcpy(d->req.nr_name, parent->req.nr_name,
458 sizeof(d->req.nr_name));
459 d->req.nr_ringid = parent->req.nr_ringid;
460 d->req.nr_flags = parent->req.nr_flags;
463 /* add the *XPOLL flags */
464 d->req.nr_ringid |= new_flags & (NETMAP_NO_TX_POLL | NETMAP_DO_RX_POLL);
466 if (ioctl(d->fd, NIOCREGIF, &d->req)) {
467 errmsg = "NIOCREGIF failed";
471 if (IS_NETMAP_DESC(parent) && parent->mem &&
472 parent->req.nr_arg2 == d->req.nr_arg2) {
473 /* do not mmap, inherit from parent */
474 d->memsize = parent->memsize;
475 d->mem = parent->mem;
477 /* XXX TODO: check if memsize is too large (or there is overflow) */
478 d->memsize = d->req.nr_memsize;
479 d->mem = mmap(0, d->memsize, PROT_WRITE | PROT_READ, MAP_SHARED,
481 if (d->mem == MAP_FAILED) {
482 errmsg = "mmap failed";
488 struct netmap_if *nifp = NETMAP_IF(d->mem, d->req.nr_offset);
489 struct netmap_ring *r = NETMAP_RXRING(nifp, );
491 *(struct netmap_if **)(uintptr_t)&(d->nifp) = nifp;
492 *(struct netmap_ring **)(uintptr_t)&d->some_ring = r;
493 *(void **)(uintptr_t)&d->buf_start = NETMAP_BUF(r, 0);
494 *(void **)(uintptr_t)&d->buf_end =
495 (char *)d->mem + d->memsize;
498 if (d->req.nr_flags == NR_REG_SW) { /* host stack */
499 d->first_tx_ring = d->last_tx_ring = d->req.nr_tx_rings;
500 d->first_rx_ring = d->last_rx_ring = d->req.nr_rx_rings;
501 } else if (d->req.nr_flags == NR_REG_ALL_NIC) { /* only nic */
502 d->first_tx_ring = 0;
503 d->first_rx_ring = 0;
504 d->last_tx_ring = d->req.nr_tx_rings - 1;
505 d->last_rx_ring = d->req.nr_rx_rings - 1;
506 } else if (d->req.nr_flags == NR_REG_NIC_SW) {
507 d->first_tx_ring = 0;
508 d->first_rx_ring = 0;
509 d->last_tx_ring = d->req.nr_tx_rings;
510 d->last_rx_ring = d->req.nr_rx_rings;
511 } else if (d->req.nr_flags == NR_REG_ONE_NIC) {
512 /* XXX check validity */
513 d->first_tx_ring = d->last_tx_ring =
514 d->first_rx_ring = d->last_rx_ring = d->req.nr_ringid & NETMAP_RING_MASK;
516 d->first_tx_ring = d->last_tx_ring = 0;
517 d->first_rx_ring = d->last_rx_ring = 0;
520 #ifdef DEBUG_NETMAP_USER
521 { /* debugging code */
524 D("%s tx %d .. %d %d rx %d .. %d %d", ifname,
525 d->first_tx_ring, d->last_tx_ring, d->req.nr_tx_rings,
526 d->first_rx_ring, d->last_rx_ring, d->req.nr_rx_rings);
527 for (i = 0; i <= d->req.nr_tx_rings; i++) {
528 struct netmap_ring *r = NETMAP_TXRING(d->nifp, i);
529 D("TX%d %p h %d c %d t %d", i, r, r->head, r->cur, r->tail);
531 for (i = 0; i <= d->req.nr_rx_rings; i++) {
532 struct netmap_ring *r = NETMAP_RXRING(d->nifp, i);
533 D("RX%d %p h %d c %d t %d", i, r, r->head, r->cur, r->tail);
536 #endif /* debugging */
538 d->cur_tx_ring = d->first_tx_ring;
539 d->cur_rx_ring = d->first_rx_ring;
545 D("%s %s", errmsg, ifname);
552 nm_close(struct nm_desc *d)
555 * ugly trick to avoid unused warnings
557 static void *__xxzt[] __attribute__ ((unused)) =
558 { (void *)nm_open, (void *)nm_inject,
559 (void *)nm_dispatch, (void *)nm_nextpkt } ;
561 if (d == NULL || d->self != d)
563 if (d->done_mmap && d->mem)
564 munmap(d->mem, d->memsize);
567 bzero(d, sizeof(*d));
574 * Same prototype as pcap_inject(), only need to cast.
577 nm_inject(struct nm_desc *d, const void *buf, size_t size)
579 u_int c, n = d->last_tx_ring - d->first_tx_ring + 1;
581 for (c = 0; c < n ; c++) {
582 /* compute current ring to use */
583 struct netmap_ring *ring;
585 uint32_t ri = d->cur_tx_ring + c;
587 if (ri > d->last_tx_ring)
588 ri = d->first_tx_ring;
589 ring = NETMAP_TXRING(d->nifp, ri);
590 if (nm_ring_empty(ring)) {
594 idx = ring->slot[i].buf_idx;
595 ring->slot[i].len = size;
596 nm_pkt_copy(buf, NETMAP_BUF(ring, idx), size);
598 ring->head = ring->cur = nm_ring_next(ring, i);
606 * Same prototype as pcap_dispatch(), only need to cast.
609 nm_dispatch(struct nm_desc *d, int cnt, nm_cb_t cb, u_char *arg)
611 int n = d->last_rx_ring - d->first_rx_ring + 1;
612 int c, got = 0, ri = d->cur_rx_ring;
616 /* cnt == -1 means infinite, but rings have a finite amount
617 * of buffers and the int is large enough that we never wrap,
618 * so we can omit checking for -1
620 for (c=0; c < n && cnt != got; c++) {
621 /* compute current ring to use */
622 struct netmap_ring *ring;
624 ri = d->cur_rx_ring + c;
625 if (ri > d->last_rx_ring)
626 ri = d->first_rx_ring;
627 ring = NETMAP_RXRING(d->nifp, ri);
628 for ( ; !nm_ring_empty(ring) && cnt != got; got++) {
630 u_int idx = ring->slot[i].buf_idx;
631 u_char *buf = (u_char *)NETMAP_BUF(ring, idx);
633 // __builtin_prefetch(buf);
634 d->hdr.len = d->hdr.caplen = ring->slot[i].len;
635 d->hdr.ts = ring->ts;
636 cb(arg, &d->hdr, buf);
637 ring->head = ring->cur = nm_ring_next(ring, i);
645 nm_nextpkt(struct nm_desc *d, struct nm_pkthdr *hdr)
647 int ri = d->cur_rx_ring;
650 /* compute current ring to use */
651 struct netmap_ring *ring = NETMAP_RXRING(d->nifp, ri);
652 if (!nm_ring_empty(ring)) {
654 u_int idx = ring->slot[i].buf_idx;
655 u_char *buf = (u_char *)NETMAP_BUF(ring, idx);
657 // __builtin_prefetch(buf);
659 hdr->len = hdr->caplen = ring->slot[i].len;
660 ring->cur = nm_ring_next(ring, i);
661 /* we could postpone advancing head if we want
662 * to hold the buffer. This can be supported in
665 ring->head = ring->cur;
670 if (ri > d->last_rx_ring)
671 ri = d->first_rx_ring;
672 } while (ri != d->cur_rx_ring);
673 return NULL; /* nothing found */
676 #endif /* !HAVE_NETMAP_WITH_LIBS */
678 #endif /* NETMAP_WITH_LIBS */
680 #endif /* _NET_NETMAP_USER_H_ */