2 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
3 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * $Id: pkt-gen.c 12346 2013-06-12 17:36:25Z luigi $
31 * Example program to show how to build a multithreaded packet
32 * source/sink using the netmap device.
34 * In this example we create a programmable number of threads
35 * to take care of all the queues of the interface used to
36 * send or receive traffic.
40 // #define TRASH_VHOST_HDR
42 #define _GNU_SOURCE /* for CPU_SET() */
44 #define NETMAP_WITH_LIBS
45 #include <net/netmap_user.h>
48 #include <ctype.h> // isprint()
49 #include <unistd.h> // sysconf()
51 #include <arpa/inet.h> /* ntohs */
52 #include <sys/sysctl.h> /* sysctl */
53 #include <ifaddrs.h> /* getifaddrs */
54 #include <net/ethernet.h>
55 #include <netinet/in.h>
56 #include <netinet/ip.h>
57 #include <netinet/udp.h>
62 #include <pcap/pcap.h>
67 #define cpuset_t cpu_set_t
69 #define ifr_flagshigh ifr_flags /* only the low 16 bits here */
70 #define IFF_PPROMISC IFF_PROMISC /* IFF_PPROMISC does not exist */
71 #include <linux/ethtool.h>
72 #include <linux/sockios.h>
74 #define CLOCK_REALTIME_PRECISE CLOCK_REALTIME
75 #include <netinet/ether.h> /* ether_aton */
76 #include <linux/if_packet.h> /* sockaddr_ll */
80 #include <sys/endian.h> /* le64toh */
81 #include <machine/param.h>
83 #include <pthread_np.h> /* pthread w/ affinity */
84 #include <sys/cpuset.h> /* cpu_set */
85 #include <net/if_dl.h> /* LLADDR */
86 #endif /* __FreeBSD__ */
90 #define cpuset_t uint64_t // XXX
91 static inline void CPU_ZERO(cpuset_t *p)
96 static inline void CPU_SET(uint32_t i, cpuset_t *p)
101 #define pthread_setaffinity_np(a, b, c) ((void)a, 0)
103 #define ifr_flagshigh ifr_flags // XXX
104 #define IFF_PPROMISC IFF_PROMISC
105 #include <net/if_dl.h> /* LLADDR */
106 #define clock_gettime(a,b) \
107 do {struct timespec t0 = {0,0}; *(b) = t0; } while (0)
108 #endif /* __APPLE__ */
110 const char *default_payload="netmap pkt-gen DIRECT payload\n"
111 "http://info.iet.unipi.it/~luigi/netmap/ ";
113 const char *indirect_payload="netmap pkt-gen indirect payload\n"
114 "http://info.iet.unipi.it/~luigi/netmap/ ";
118 #define SKIP_PAYLOAD 1 /* do not check payload. XXX unused */
121 #define VIRT_HDR_1 10 /* length of a base vnet-hdr */
122 #define VIRT_HDR_2 12 /* length of the extenede vnet-hdr */
123 #define VIRT_HDR_MAX VIRT_HDR_2
125 uint8_t fields[VIRT_HDR_MAX];
128 #define MAX_BODYSIZE 16384
131 struct virt_header vh;
132 struct ether_header eh;
135 uint8_t body[MAX_BODYSIZE]; // XXX hardwired
136 } __attribute__((__packed__));
140 uint32_t start, end; /* same as struct in_addr */
141 uint16_t port0, port1;
146 struct ether_addr start, end;
149 /* ifname can be netmap:foo-xxxx */
150 #define MAX_IFNAMELEN 64 /* our buffer for ifname */
151 //#define MAX_PKTSIZE 1536
152 #define MAX_PKTSIZE MAX_BODYSIZE /* XXX: + IP_HDR + ETH_HDR */
154 /* compact timestamp to fit into 60 byte packet. (enough to obtain RTT) */
161 * global arguments for all threads
165 struct ip_range src_ip;
166 struct ip_range dst_ip;
167 struct mac_range dst_mac;
168 struct mac_range src_mac;
172 int npackets; /* total packets to send */
173 int frags; /* fragments per packet */
176 int options; /* testing */
177 #define OPT_PREFETCH 1
181 #define OPT_TS 16 /* add a timestamp */
182 #define OPT_INDIRECT 32 /* use indirect buffers, tx only */
183 #define OPT_DUMP 64 /* dump rx/tx traffic */
184 #define OPT_MONITOR_TX 128
185 #define OPT_MONITOR_RX 256
192 struct timespec tx_period;
197 int report_interval; /* milliseconds between prints */
198 void *(*td_body)(void *);
200 char ifname[MAX_IFNAMELEN];
203 int virt_header; /* send also the virt_header */
204 int extra_bufs; /* goes in nr_arg3 */
206 enum dev_type { DEV_NONE, DEV_NETMAP, DEV_PCAP, DEV_TAP };
210 * Arguments for a new thread. The same structure is used by
211 * the source and the sink
220 volatile uint64_t count;
221 struct timespec tic, toc;
231 * extract the extremes from a range of ipv4 addresses.
232 * addr_lo[-addr_hi][:port_lo[-port_hi]]
235 extract_ip_range(struct ip_range *r)
241 D("extract IP range from %s", r->name);
242 r->port0 = r->port1 = 0;
243 r->start = r->end = 0;
245 /* the first - splits start/end of range */
246 ap = index(r->name, '-'); /* do we have ports ? */
250 /* grab the initial values (mandatory) */
251 pp = index(r->name, ':');
254 r->port0 = r->port1 = strtol(pp, NULL, 0);
256 inet_aton(r->name, &a);
257 r->start = r->end = ntohl(a.s_addr);
263 r->port1 = strtol(pp, NULL, 0);
267 r->end = ntohl(a.s_addr);
270 if (r->port0 > r->port1) {
271 uint16_t tmp = r->port0;
275 if (r->start > r->end) {
276 uint32_t tmp = r->start;
282 char buf1[16]; // one ip address
284 a.s_addr = htonl(r->end);
285 strncpy(buf1, inet_ntoa(a), sizeof(buf1));
286 a.s_addr = htonl(r->start);
288 D("range is %s:%d to %s:%d",
289 inet_ntoa(a), r->port0, buf1, r->port1);
294 extract_mac_range(struct mac_range *r)
297 D("extract MAC range from %s", r->name);
298 bcopy(ether_aton(r->name), &r->start, 6);
299 bcopy(ether_aton(r->name), &r->end, 6);
301 bcopy(targ->src_mac, eh->ether_shost, 6);
302 p = index(targ->g->src_mac, '-');
304 targ->src_mac_range = atoi(p+1);
306 bcopy(ether_aton(targ->g->dst_mac), targ->dst_mac, 6);
307 bcopy(targ->dst_mac, eh->ether_dhost, 6);
308 p = index(targ->g->dst_mac, '-');
310 targ->dst_mac_range = atoi(p+1);
313 D("%s starts at %s", r->name, ether_ntoa(&r->start));
316 static struct targ *targs;
317 static int global_nthreads;
319 /* control-C handler */
325 (void)sig; /* UNUSED */
326 D("received control-C on thread %p", pthread_self());
327 for (i = 0; i < global_nthreads; i++) {
330 signal(SIGINT, SIG_DFL);
333 /* sysctl wrapper to return the number of active CPUs */
338 #if defined (__FreeBSD__)
339 int mib[2] = { CTL_HW, HW_NCPU };
340 size_t len = sizeof(mib);
341 sysctl(mib, 2, &ncpus, &len, NULL, 0);
343 ncpus = sysconf(_SC_NPROCESSORS_ONLN);
351 #define sockaddr_dl sockaddr_ll
352 #define sdl_family sll_family
353 #define AF_LINK AF_PACKET
354 #define LLADDR(s) s->sll_addr;
355 #include <linux/if_tun.h>
356 #define TAP_CLONEDEV "/dev/net/tun"
357 #endif /* __linux__ */
360 #include <net/if_tun.h>
361 #define TAP_CLONEDEV "/dev/tap"
362 #endif /* __FreeBSD */
365 // #warning TAP not supported on apple ?
366 #include <net/if_utun.h>
367 #define TAP_CLONEDEV "/dev/tap"
368 #endif /* __APPLE__ */
372 * parse the vale configuration in conf and put it in nmr.
373 * Return the flag set if necessary.
374 * The configuration may consist of 0 to 4 numbers separated
375 * by commas: #tx-slots,#rx-slots,#tx-rings,#rx-rings.
376 * Missing numbers or zeroes stand for default values.
377 * As an additional convenience, if exactly one number
378 * is specified, then this is assigned to both #tx-slots and #rx-slots.
379 * If there is no 4th number, then the 3rd is assigned to both #tx-rings
383 parse_nmr_config(const char* conf, struct nmreq *nmr)
388 nmr->nr_tx_rings = nmr->nr_rx_rings = 0;
389 nmr->nr_tx_slots = nmr->nr_rx_slots = 0;
390 if (conf == NULL || ! *conf)
393 for (i = 0, tok = strtok(w, ","); tok; i++, tok = strtok(NULL, ",")) {
397 nmr->nr_tx_slots = nmr->nr_rx_slots = v;
400 nmr->nr_rx_slots = v;
403 nmr->nr_tx_rings = nmr->nr_rx_rings = v;
406 nmr->nr_rx_rings = v;
409 D("ignored config: %s", tok);
413 D("txr %d txd %d rxr %d rxd %d",
414 nmr->nr_tx_rings, nmr->nr_tx_slots,
415 nmr->nr_rx_rings, nmr->nr_rx_slots);
417 return (nmr->nr_tx_rings || nmr->nr_tx_slots ||
418 nmr->nr_rx_rings || nmr->nr_rx_slots) ?
419 NM_OPEN_RING_CFG : 0;
424 * locate the src mac address for our interface, put it
425 * into the user-supplied buffer. return 0 if ok, -1 on error.
428 source_hwaddr(const char *ifname, char *buf)
430 struct ifaddrs *ifaphead, *ifap;
431 int l = sizeof(ifap->ifa_name);
433 if (getifaddrs(&ifaphead) != 0) {
434 D("getifaddrs %s failed", ifname);
438 for (ifap = ifaphead; ifap; ifap = ifap->ifa_next) {
439 struct sockaddr_dl *sdl =
440 (struct sockaddr_dl *)ifap->ifa_addr;
443 if (!sdl || sdl->sdl_family != AF_LINK)
445 if (strncmp(ifap->ifa_name, ifname, l) != 0)
447 mac = (uint8_t *)LLADDR(sdl);
448 sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
449 mac[0], mac[1], mac[2],
450 mac[3], mac[4], mac[5]);
452 D("source hwaddr %s", buf);
455 freeifaddrs(ifaphead);
460 /* set the thread affinity. */
462 setaffinity(pthread_t me, int i)
469 /* Set thread affinity affinity.*/
471 CPU_SET(i, &cpumask);
473 if (pthread_setaffinity_np(me, sizeof(cpuset_t), &cpumask) != 0) {
474 D("Unable to set affinity: %s", strerror(errno));
480 /* Compute the checksum of the given ip header. */
482 checksum(const void *data, uint16_t len, uint32_t sum)
484 const uint8_t *addr = data;
487 /* Checksum all the pairs of bytes first... */
488 for (i = 0; i < (len & ~1U); i += 2) {
489 sum += (u_int16_t)ntohs(*((u_int16_t *)(addr + i)));
494 * If there's a single byte left over, checksum it, too.
495 * Network byte order is big-endian, so the remaining byte is
507 wrapsum(u_int32_t sum)
513 /* Check the payload of the packet for errors (use it for debug).
514 * Look for consecutive ascii representations of the size of the packet.
517 dump_payload(char *p, int len, struct netmap_ring *ring, int cur)
522 /* get the length in ASCII of the length of the packet. */
524 printf("ring %p cur %5d [buf %6d flags 0x%04x len %5d]\n",
525 ring, cur, ring->slot[cur].buf_idx,
526 ring->slot[cur].flags, len);
527 /* hexdump routine */
528 for (i = 0; i < len; ) {
529 memset(buf, sizeof(buf), ' ');
530 sprintf(buf, "%5d: ", i);
532 for (j=0; j < 16 && i < len; i++, j++)
533 sprintf(buf+7+j*3, "%02x ", (uint8_t)(p[i]));
535 for (j=0; j < 16 && i < len; i++, j++)
536 sprintf(buf+7+j + 48, "%c",
537 isprint(p[i]) ? p[i] : '.');
543 * Fill a packet with some payload.
544 * We create a UDP packet so the payload starts at
545 * 14+20+8 = 42 bytes.
548 #define uh_sport source
549 #define uh_dport dest
555 * increment the addressed in the packet,
556 * starting from the least significant field.
557 * DST_IP DST_PORT SRC_IP SRC_PORT
560 update_addresses(struct pkt *pkt, struct glob_arg *g)
564 struct ip *ip = &pkt->ip;
565 struct udphdr *udp = &pkt->udp;
568 p = ntohs(udp->uh_sport);
569 if (p < g->src_ip.port1) { /* just inc, no wrap */
570 udp->uh_sport = htons(p + 1);
573 udp->uh_sport = htons(g->src_ip.port0);
575 a = ntohl(ip->ip_src.s_addr);
576 if (a < g->src_ip.end) { /* just inc, no wrap */
577 ip->ip_src.s_addr = htonl(a + 1);
580 ip->ip_src.s_addr = htonl(g->src_ip.start);
582 udp->uh_sport = htons(g->src_ip.port0);
583 p = ntohs(udp->uh_dport);
584 if (p < g->dst_ip.port1) { /* just inc, no wrap */
585 udp->uh_dport = htons(p + 1);
588 udp->uh_dport = htons(g->dst_ip.port0);
590 a = ntohl(ip->ip_dst.s_addr);
591 if (a < g->dst_ip.end) { /* just inc, no wrap */
592 ip->ip_dst.s_addr = htonl(a + 1);
595 ip->ip_dst.s_addr = htonl(g->dst_ip.start);
601 * initialize one packet and prepare for the next one.
602 * The copy could be done better instead of repeating it each time.
605 initialize_packet(struct targ *targ)
607 struct pkt *pkt = &targ->pkt;
608 struct ether_header *eh;
611 uint16_t paylen = targ->g->pkt_size - sizeof(*eh) - sizeof(struct ip);
612 const char *payload = targ->g->options & OPT_INDIRECT ?
613 indirect_payload : default_payload;
614 int i, l0 = strlen(payload);
616 /* create a nice NUL-terminated string */
617 for (i = 0; i < paylen; i += l0) {
619 l0 = paylen - i; // last round
620 bcopy(payload, pkt->body + i, l0);
622 pkt->body[i-1] = '\0';
625 /* prepare the headers */
626 ip->ip_v = IPVERSION;
629 ip->ip_tos = IPTOS_LOWDELAY;
630 ip->ip_len = ntohs(targ->g->pkt_size - sizeof(*eh));
632 ip->ip_off = htons(IP_DF); /* Don't fragment */
633 ip->ip_ttl = IPDEFTTL;
634 ip->ip_p = IPPROTO_UDP;
635 ip->ip_dst.s_addr = htonl(targ->g->dst_ip.start);
636 ip->ip_src.s_addr = htonl(targ->g->src_ip.start);
637 ip->ip_sum = wrapsum(checksum(ip, sizeof(*ip), 0));
641 udp->uh_sport = htons(targ->g->src_ip.port0);
642 udp->uh_dport = htons(targ->g->dst_ip.port0);
643 udp->uh_ulen = htons(paylen);
644 /* Magic: taken from sbin/dhclient/packet.c */
645 udp->uh_sum = wrapsum(checksum(udp, sizeof(*udp),
647 paylen - sizeof(*udp),
648 checksum(&ip->ip_src, 2 * sizeof(ip->ip_src),
649 IPPROTO_UDP + (u_int32_t)ntohs(udp->uh_ulen)
655 bcopy(&targ->g->src_mac.start, eh->ether_shost, 6);
656 bcopy(&targ->g->dst_mac.start, eh->ether_dhost, 6);
657 eh->ether_type = htons(ETHERTYPE_IP);
659 bzero(&pkt->vh, sizeof(pkt->vh));
660 #ifdef TRASH_VHOST_HDR
661 /* set bogus content */
662 pkt->vh.fields[0] = 0xff;
663 pkt->vh.fields[1] = 0xff;
664 pkt->vh.fields[2] = 0xff;
665 pkt->vh.fields[3] = 0xff;
666 pkt->vh.fields[4] = 0xff;
667 pkt->vh.fields[5] = 0xff;
668 #endif /* TRASH_VHOST_HDR */
669 // dump_payload((void *)pkt, targ->g->pkt_size, NULL, 0);
673 set_vnet_hdr_len(struct targ *t)
675 int err, l = t->g->virt_header;
681 memset(&req, 0, sizeof(req));
682 bcopy(t->nmd->req.nr_name, req.nr_name, sizeof(req.nr_name));
683 req.nr_version = NETMAP_API;
684 req.nr_cmd = NETMAP_BDG_VNET_HDR;
686 err = ioctl(t->fd, NIOCREGIF, &req);
688 D("Unable to set vnet header length %d", l);
694 * create and enqueue a batch of packets on a ring.
695 * On the last one set NS_REPORT to tell the driver to generate
696 * an interrupt when done.
699 send_packets(struct netmap_ring *ring, struct pkt *pkt, void *frame,
700 int size, struct glob_arg *g, u_int count, int options,
703 u_int n, sent, cur = ring->cur;
706 n = nm_ring_space(ring);
709 if (count < nfrags) {
710 D("truncating packet, no room for frags %d %d",
714 if (options & (OPT_COPY | OPT_PREFETCH) ) {
715 for (sent = 0; sent < count; sent++) {
716 struct netmap_slot *slot = &ring->slot[cur];
717 char *p = NETMAP_BUF(ring, slot->buf_idx);
719 __builtin_prefetch(p);
720 cur = nm_ring_next(ring, cur);
725 for (fcnt = nfrags, sent = 0; sent < count; sent++) {
726 struct netmap_slot *slot = &ring->slot[cur];
727 char *p = NETMAP_BUF(ring, slot->buf_idx);
730 if (options & OPT_INDIRECT) {
731 slot->flags |= NS_INDIRECT;
732 slot->ptr = (uint64_t)frame;
733 } else if (options & OPT_COPY) {
734 nm_pkt_copy(frame, p, size);
736 update_addresses(pkt, g);
737 } else if (options & OPT_MEMCPY) {
738 memcpy(p, frame, size);
740 update_addresses(pkt, g);
741 } else if (options & OPT_PREFETCH) {
742 __builtin_prefetch(p);
744 if (options & OPT_DUMP)
745 dump_payload(p, size, ring, cur);
748 slot->flags |= NS_MOREFRAG;
751 if (sent == count - 1) {
752 slot->flags &= ~NS_MOREFRAG;
753 slot->flags |= NS_REPORT;
755 cur = nm_ring_next(ring, cur);
757 ring->head = ring->cur = cur;
763 * Send a packet, and wait for a response.
764 * The payload (after UDP header, ofs 42) has a 4-byte sequence
765 * followed by a struct timeval (or bintime?)
767 #define PAY_OFS 42 /* where in the pkt... */
770 pinger_body(void *data)
772 struct targ *targ = (struct targ *) data;
773 struct pollfd pfd = { .fd = targ->fd, .events = POLLIN };
774 struct netmap_if *nifp = targ->nmd->nifp;
775 int i, rx = 0, n = targ->g->npackets;
779 struct timespec ts, now, last_print;
780 uint32_t count = 0, min = 1000000000, av = 0;
783 frame += sizeof(targ->pkt.vh) - targ->g->virt_header;
784 size = targ->g->pkt_size + targ->g->virt_header;
787 if (targ->g->nthreads > 1) {
788 D("can only ping with 1 thread");
792 clock_gettime(CLOCK_REALTIME_PRECISE, &last_print);
794 while (n == 0 || (int)sent < n) {
795 struct netmap_ring *ring = NETMAP_TXRING(nifp, 0);
796 struct netmap_slot *slot;
798 for (i = 0; i < 1; i++) { /* XXX why the loop for 1 pkt ? */
799 slot = &ring->slot[ring->cur];
801 p = NETMAP_BUF(ring, slot->buf_idx);
803 if (nm_ring_empty(ring)) {
804 D("-- ouch, cannot send");
807 nm_pkt_copy(frame, p, size);
808 clock_gettime(CLOCK_REALTIME_PRECISE, &ts);
809 bcopy(&sent, p+42, sizeof(sent));
810 tp = (struct tstamp *)(p+46);
811 tp->sec = (uint32_t)ts.tv_sec;
812 tp->nsec = (uint32_t)ts.tv_nsec;
814 ring->head = ring->cur = nm_ring_next(ring, ring->cur);
817 /* should use a parameter to decide how often to send */
818 if (poll(&pfd, 1, 3000) <= 0) {
819 D("poll error/timeout on queue %d: %s", targ->me,
823 /* see what we got back */
824 for (i = targ->nmd->first_tx_ring;
825 i <= targ->nmd->last_tx_ring; i++) {
826 ring = NETMAP_RXRING(nifp, i);
827 while (!nm_ring_empty(ring)) {
830 slot = &ring->slot[ring->cur];
831 p = NETMAP_BUF(ring, slot->buf_idx);
833 clock_gettime(CLOCK_REALTIME_PRECISE, &now);
834 bcopy(p+42, &seq, sizeof(seq));
835 tp = (struct tstamp *)(p+46);
836 ts.tv_sec = (time_t)tp->sec;
837 ts.tv_nsec = (long)tp->nsec;
838 ts.tv_sec = now.tv_sec - ts.tv_sec;
839 ts.tv_nsec = now.tv_nsec - ts.tv_nsec;
840 if (ts.tv_nsec < 0) {
841 ts.tv_nsec += 1000000000;
844 if (1) D("seq %d/%d delta %d.%09d", seq, sent,
845 (int)ts.tv_sec, (int)ts.tv_nsec);
846 if (ts.tv_nsec < (int)min)
850 ring->head = ring->cur = nm_ring_next(ring, ring->cur);
854 //D("tx %d rx %d", sent, rx);
856 ts.tv_sec = now.tv_sec - last_print.tv_sec;
857 ts.tv_nsec = now.tv_nsec - last_print.tv_nsec;
858 if (ts.tv_nsec < 0) {
859 ts.tv_nsec += 1000000000;
862 if (ts.tv_sec >= 1) {
863 D("count %d min %d av %d",
864 count, min, av/count);
876 * reply to ping requests
879 ponger_body(void *data)
881 struct targ *targ = (struct targ *) data;
882 struct pollfd pfd = { .fd = targ->fd, .events = POLLIN };
883 struct netmap_if *nifp = targ->nmd->nifp;
884 struct netmap_ring *txring, *rxring;
885 int i, rx = 0, sent = 0, n = targ->g->npackets;
887 if (targ->g->nthreads > 1) {
888 D("can only reply ping with 1 thread");
891 D("understood ponger %d but don't know how to do it", n);
892 while (n == 0 || sent < n) {
893 uint32_t txcur, txavail;
896 ioctl(pfd.fd, NIOCRXSYNC, NULL);
898 if (poll(&pfd, 1, 1000) <= 0) {
899 D("poll error/timeout on queue %d: %s", targ->me,
904 txring = NETMAP_TXRING(nifp, 0);
906 txavail = nm_ring_space(txring);
907 /* see what we got back */
908 for (i = targ->nmd->first_rx_ring; i <= targ->nmd->last_rx_ring; i++) {
909 rxring = NETMAP_RXRING(nifp, i);
910 while (!nm_ring_empty(rxring)) {
911 uint16_t *spkt, *dpkt;
912 uint32_t cur = rxring->cur;
913 struct netmap_slot *slot = &rxring->slot[cur];
915 src = NETMAP_BUF(rxring, slot->buf_idx);
916 //D("got pkt %p of size %d", src, slot->len);
917 rxring->head = rxring->cur = nm_ring_next(rxring, cur);
921 dst = NETMAP_BUF(txring,
922 txring->slot[txcur].buf_idx);
924 dpkt = (uint16_t *)dst;
925 spkt = (uint16_t *)src;
926 nm_pkt_copy(src, dst, slot->len);
933 txring->slot[txcur].len = slot->len;
934 /* XXX swap src dst mac */
935 txcur = nm_ring_next(txring, txcur);
940 txring->head = txring->cur = txcur;
943 ioctl(pfd.fd, NIOCTXSYNC, NULL);
945 //D("tx %d rx %d", sent, rx);
951 timespec_ge(const struct timespec *a, const struct timespec *b)
954 if (a->tv_sec > b->tv_sec)
956 if (a->tv_sec < b->tv_sec)
958 if (a->tv_nsec >= b->tv_nsec)
963 static __inline struct timespec
964 timeval2spec(const struct timeval *a)
966 struct timespec ts = {
968 .tv_nsec = a->tv_usec * 1000
973 static __inline struct timeval
974 timespec2val(const struct timespec *a)
976 struct timeval tv = {
978 .tv_usec = a->tv_nsec / 1000
984 static __inline struct timespec
985 timespec_add(struct timespec a, struct timespec b)
987 struct timespec ret = { a.tv_sec + b.tv_sec, a.tv_nsec + b.tv_nsec };
988 if (ret.tv_nsec >= 1000000000) {
990 ret.tv_nsec -= 1000000000;
995 static __inline struct timespec
996 timespec_sub(struct timespec a, struct timespec b)
998 struct timespec ret = { a.tv_sec - b.tv_sec, a.tv_nsec - b.tv_nsec };
999 if (ret.tv_nsec < 0) {
1001 ret.tv_nsec += 1000000000;
1008 * wait until ts, either busy or sleeping if more than 1ms.
1009 * Return wakeup time.
1011 static struct timespec
1012 wait_time(struct timespec ts)
1015 struct timespec w, cur;
1016 clock_gettime(CLOCK_REALTIME_PRECISE, &cur);
1017 w = timespec_sub(ts, cur);
1020 else if (w.tv_sec > 0 || w.tv_nsec > 1000000)
1026 sender_body(void *data)
1028 struct targ *targ = (struct targ *) data;
1029 struct pollfd pfd = { .fd = targ->fd, .events = POLLOUT };
1030 struct netmap_if *nifp;
1031 struct netmap_ring *txring;
1032 int i, n = targ->g->npackets / targ->g->nthreads;
1034 int options = targ->g->options | OPT_COPY;
1035 struct timespec nexttime = { 0, 0}; // XXX silence compiler
1036 int rate_limit = targ->g->tx_rate;
1037 struct pkt *pkt = &targ->pkt;
1042 frame += sizeof(pkt->vh) - targ->g->virt_header;
1043 size = targ->g->pkt_size + targ->g->virt_header;
1045 D("start, fd %d main_fd %d", targ->fd, targ->g->main_fd);
1046 if (setaffinity(targ->thread, targ->affinity))
1050 clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic);
1052 targ->tic = timespec_add(targ->tic, (struct timespec){2,0});
1053 targ->tic.tv_nsec = 0;
1054 wait_time(targ->tic);
1055 nexttime = targ->tic;
1057 if (targ->g->dev_type == DEV_TAP) {
1058 D("writing to file desc %d", targ->g->main_fd);
1060 for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) {
1061 if (write(targ->g->main_fd, frame, size) != -1)
1063 update_addresses(pkt, targ->g);
1070 } else if (targ->g->dev_type == DEV_PCAP) {
1071 pcap_t *p = targ->g->p;
1073 for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) {
1074 if (pcap_inject(p, frame, size) != -1)
1076 update_addresses(pkt, targ->g);
1082 #endif /* NO_PCAP */
1085 int frags = targ->g->frags;
1087 nifp = targ->nmd->nifp;
1088 while (!targ->cancel && (n == 0 || sent < n)) {
1090 if (rate_limit && tosend <= 0) {
1091 tosend = targ->g->burst;
1092 nexttime = timespec_add(nexttime, targ->g->tx_period);
1093 wait_time(nexttime);
1097 * wait for available room in the send queue(s)
1099 if (poll(&pfd, 1, 2000) <= 0) {
1102 D("poll error/timeout on queue %d: %s", targ->me,
1106 if (pfd.revents & POLLERR) {
1111 * scan our queues and send on those with room
1113 if (options & OPT_COPY && sent > 100000 && !(targ->g->options & OPT_COPY) ) {
1115 options &= ~OPT_COPY;
1117 for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) {
1118 int m, limit = rate_limit ? tosend : targ->g->burst;
1119 if (n > 0 && n - sent < limit)
1121 txring = NETMAP_TXRING(nifp, i);
1122 if (nm_ring_empty(txring))
1125 limit = ((limit + frags - 1) / frags) * frags;
1127 m = send_packets(txring, pkt, frame, size, targ->g,
1128 limit, options, frags);
1129 ND("limit %d tail %d frags %d m %d",
1130 limit, txring->tail, frags, m);
1140 /* flush any remaining packets */
1141 D("flush tail %d head %d on thread %p",
1142 txring->tail, txring->head,
1144 ioctl(pfd.fd, NIOCTXSYNC, NULL);
1146 /* final part: wait all the TX queues to be empty. */
1147 for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) {
1148 txring = NETMAP_TXRING(nifp, i);
1149 while (nm_tx_pending(txring)) {
1150 RD(5, "pending tx tail %d head %d on ring %d",
1151 txring->tail, txring->head, i);
1152 ioctl(pfd.fd, NIOCTXSYNC, NULL);
1153 usleep(1); /* wait 1 tick */
1156 } /* end DEV_NETMAP */
1158 clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc);
1159 targ->completed = 1;
1163 /* reset the ``used`` flag. */
1172 receive_pcap(u_char *user, const struct pcap_pkthdr * h,
1173 const u_char * bytes)
1175 int *count = (int *)user;
1176 (void)h; /* UNUSED */
1177 (void)bytes; /* UNUSED */
1180 #endif /* !NO_PCAP */
1183 receive_packets(struct netmap_ring *ring, u_int limit, int dump)
1188 n = nm_ring_space(ring);
1191 for (rx = 0; rx < limit; rx++) {
1192 struct netmap_slot *slot = &ring->slot[cur];
1193 char *p = NETMAP_BUF(ring, slot->buf_idx);
1196 dump_payload(p, slot->len, ring, cur);
1198 cur = nm_ring_next(ring, cur);
1200 ring->head = ring->cur = cur;
1206 receiver_body(void *data)
1208 struct targ *targ = (struct targ *) data;
1209 struct pollfd pfd = { .fd = targ->fd, .events = POLLIN };
1210 struct netmap_if *nifp;
1211 struct netmap_ring *rxring;
1213 uint64_t received = 0;
1215 if (setaffinity(targ->thread, targ->affinity))
1218 D("reading from %s fd %d main_fd %d",
1219 targ->g->ifname, targ->fd, targ->g->main_fd);
1220 /* unbounded wait for the first packet. */
1221 for (;!targ->cancel;) {
1222 i = poll(&pfd, 1, 1000);
1223 if (i > 0 && !(pfd.revents & POLLERR))
1225 RD(1, "waiting for initial packets, poll returns %d %d",
1228 /* main loop, exit after 1s silence */
1229 clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic);
1230 if (targ->g->dev_type == DEV_TAP) {
1231 while (!targ->cancel) {
1232 char buf[MAX_BODYSIZE];
1233 /* XXX should we poll ? */
1234 if (read(targ->g->main_fd, buf, sizeof(buf)) > 0)
1238 } else if (targ->g->dev_type == DEV_PCAP) {
1239 while (!targ->cancel) {
1240 /* XXX should we poll ? */
1241 pcap_dispatch(targ->g->p, targ->g->burst, receive_pcap,
1242 (u_char *)&targ->count);
1244 #endif /* !NO_PCAP */
1246 int dump = targ->g->options & OPT_DUMP;
1248 nifp = targ->nmd->nifp;
1249 while (!targ->cancel) {
1250 /* Once we started to receive packets, wait at most 1 seconds
1252 if (poll(&pfd, 1, 1 * 1000) <= 0 && !targ->g->forever) {
1253 clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc);
1254 targ->toc.tv_sec -= 1; /* Subtract timeout time. */
1258 if (pfd.revents & POLLERR) {
1263 for (i = targ->nmd->first_rx_ring; i <= targ->nmd->last_rx_ring; i++) {
1266 rxring = NETMAP_RXRING(nifp, i);
1267 if (nm_ring_empty(rxring))
1270 m = receive_packets(rxring, targ->g->burst, dump);
1273 targ->count = received;
1277 clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc);
1280 targ->completed = 1;
1281 targ->count = received;
1284 /* reset the ``used`` flag. */
1290 /* very crude code to print a number in normalized form.
1291 * Caller has to make sure that the buffer is large enough.
1294 norm(char *buf, double val)
1296 char *units[] = { "", "K", "M", "G", "T" };
1299 for (i = 0; val >=1000 && i < sizeof(units)/sizeof(char *) - 1; i++)
1301 sprintf(buf, "%.2f %s", val, units[i]);
1306 tx_output(uint64_t sent, int size, double delta)
1308 double bw, raw_bw, pps;
1309 char b1[40], b2[80], b3[80];
1311 printf("Sent %llu packets, %d bytes each, in %.2f seconds.\n",
1312 (unsigned long long)sent, size, delta);
1315 if (size < 60) /* correct for min packet size */
1318 bw = (8.0 * size * sent) / delta;
1319 /* raw packets have4 bytes crc + 20 bytes framing */
1320 raw_bw = (8.0 * (size + 24) * sent) / delta;
1322 printf("Speed: %spps Bandwidth: %sbps (raw %sbps)\n",
1323 norm(b1, pps), norm(b2, bw), norm(b3, raw_bw) );
1328 rx_output(uint64_t received, double delta)
1333 printf("Received %llu packets, in %.2f seconds.\n",
1334 (unsigned long long) received, delta);
1338 pps = received / delta;
1339 printf("Speed: %spps\n", norm(b1, pps));
1345 const char *cmd = "pkt-gen";
1349 "\t-i interface interface name\n"
1350 "\t-f function tx rx ping pong\n"
1351 "\t-n count number of iterations (can be 0)\n"
1352 "\t-t pkts_to_send also forces tx mode\n"
1353 "\t-r pkts_to_receive also forces rx mode\n"
1354 "\t-l pkt_size in bytes excluding CRC\n"
1355 "\t-d dst_ip[:port[-dst_ip:port]] single or range\n"
1356 "\t-s src_ip[:port[-src_ip:port]] single or range\n"
1359 "\t-a cpu_id use setaffinity\n"
1360 "\t-b burst size testing, mostly\n"
1361 "\t-c cores cores to use\n"
1362 "\t-p threads processes/threads to use\n"
1363 "\t-T report_ms milliseconds between reports\n"
1364 "\t-P use libpcap instead of netmap\n"
1365 "\t-w wait_for_link_time in seconds\n"
1366 "\t-R rate in packets per second\n"
1367 "\t-X dump payload\n"
1368 "\t-H len add empty virtio-net-header with size 'len'\n"
1376 start_threads(struct glob_arg *g)
1380 targs = calloc(g->nthreads, sizeof(*targs));
1382 * Now create the desired number of threads, each one
1383 * using a single descriptor.
1385 for (i = 0; i < g->nthreads; i++) {
1386 struct targ *t = &targs[i];
1388 bzero(t, sizeof(*t));
1389 t->fd = -1; /* default, with pcap */
1392 if (g->dev_type == DEV_NETMAP) {
1393 struct nm_desc nmd = *g->nmd; /* copy, we overwrite ringid */
1394 uint64_t nmd_flags = 0;
1397 if (g->nthreads > 1) {
1398 if (nmd.req.nr_flags != NR_REG_ALL_NIC) {
1399 D("invalid nthreads mode %d", nmd.req.nr_flags);
1402 nmd.req.nr_flags = NR_REG_ONE_NIC;
1403 nmd.req.nr_ringid = i;
1405 /* Only touch one of the rings (rx is already ok) */
1406 if (g->td_body == receiver_body)
1407 nmd_flags |= NETMAP_NO_TX_POLL;
1409 /* register interface. Override ifname and ringid etc. */
1410 if (g->options & OPT_MONITOR_TX)
1411 nmd.req.nr_flags |= NR_MONITOR_TX;
1412 if (g->options & OPT_MONITOR_RX)
1413 nmd.req.nr_flags |= NR_MONITOR_RX;
1415 t->nmd = nm_open(t->g->ifname, NULL, nmd_flags |
1416 NM_OPEN_IFNAME | NM_OPEN_NO_MMAP, &nmd);
1417 if (t->nmd == NULL) {
1418 D("Unable to open %s: %s",
1419 t->g->ifname, strerror(errno));
1423 set_vnet_hdr_len(t);
1426 targs[i].fd = g->main_fd;
1430 if (g->affinity >= 0) {
1431 if (g->affinity < g->cpus)
1432 t->affinity = g->affinity;
1434 t->affinity = i % g->cpus;
1438 /* default, init packets */
1439 initialize_packet(t);
1441 if (pthread_create(&t->thread, NULL, g->td_body, t) == -1) {
1442 D("Unable to create thread %d: %s", i, strerror(errno));
1449 main_thread(struct glob_arg *g)
1456 struct timeval tic, toc;
1458 gettimeofday(&toc, NULL);
1460 struct timeval now, delta;
1461 uint64_t pps, usec, my_count, npkts;
1464 delta.tv_sec = g->report_interval/1000;
1465 delta.tv_usec = (g->report_interval%1000)*1000;
1466 select(0, NULL, NULL, NULL, &delta);
1467 gettimeofday(&now, NULL);
1468 timersub(&now, &toc, &toc);
1470 for (i = 0; i < g->nthreads; i++) {
1471 my_count += targs[i].count;
1472 if (targs[i].used == 0)
1475 usec = toc.tv_sec* 1000000 + toc.tv_usec;
1478 npkts = my_count - prev;
1479 pps = (npkts*1000000 + usec/2) / usec;
1480 D("%llu pps (%llu pkts in %llu usec)",
1481 (unsigned long long)pps,
1482 (unsigned long long)npkts,
1483 (unsigned long long)usec);
1486 if (done == g->nthreads)
1492 for (i = 0; i < g->nthreads; i++) {
1493 struct timespec t_tic, t_toc;
1495 * Join active threads, unregister interfaces and close
1499 pthread_join(targs[i].thread, NULL);
1502 if (targs[i].completed == 0)
1503 D("ouch, thread %d exited with error", i);
1506 * Collect threads output and extract information about
1507 * how long it took to send all the packets.
1509 count += targs[i].count;
1510 t_tic = timeval2spec(&tic);
1511 t_toc = timeval2spec(&toc);
1512 if (!timerisset(&tic) || timespec_ge(&targs[i].tic, &t_tic))
1513 tic = timespec2val(&targs[i].tic);
1514 if (!timerisset(&toc) || timespec_ge(&targs[i].toc, &t_toc))
1515 toc = timespec2val(&targs[i].toc);
1519 timersub(&toc, &tic, &toc);
1520 delta_t = toc.tv_sec + 1e-6* toc.tv_usec;
1521 if (g->td_body == sender_body)
1522 tx_output(count, g->pkt_size, delta_t);
1524 rx_output(count, delta_t);
1526 if (g->dev_type == DEV_NETMAP) {
1527 munmap(g->nmd->mem, g->nmd->req.nr_memsize);
1538 static struct sf func[] = {
1539 { "tx", sender_body },
1540 { "rx", receiver_body },
1541 { "ping", pinger_body },
1542 { "pong", ponger_body },
1547 tap_alloc(char *dev)
1551 char *clonedev = TAP_CLONEDEV;
1555 /* Arguments taken by the function:
1557 * char *dev: the name of an interface (or '\0'). MUST have enough
1558 * space to hold the interface name if '\0' is passed
1559 * int flags: interface flags (eg, IFF_TUN etc.)
1563 if (dev[3]) { /* tapSomething */
1564 static char buf[128];
1565 snprintf(buf, sizeof(buf), "/dev/%s", dev);
1569 /* open the device */
1570 if( (fd = open(clonedev, O_RDWR)) < 0 ) {
1573 D("%s open successful", clonedev);
1575 /* preparation of the struct ifr, of type "struct ifreq" */
1576 memset(&ifr, 0, sizeof(ifr));
1579 ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
1582 /* if a device name was specified, put it in the structure; otherwise,
1583 * the kernel will try to allocate the "next" device of the
1585 strncpy(ifr.ifr_name, dev, IFNAMSIZ);
1588 /* try to create the device */
1589 if( (err = ioctl(fd, TUNSETIFF, (void *) &ifr)) < 0 ) {
1590 D("failed to to a TUNSETIFF: %s", strerror(errno));
1595 /* if the operation was successful, write back the name of the
1596 * interface to the variable "dev", so the caller can know
1597 * it. Note that the caller MUST reserve space in *dev (see calling
1599 strcpy(dev, ifr.ifr_name);
1600 D("new name is %s", dev);
1603 /* this is the special file descriptor that the caller will use to talk
1604 * with the virtual interface */
1609 main(int arc, char **argv)
1617 int devqueues = 1; /* how many device queues */
1619 bzero(&g, sizeof(g));
1622 g.td_body = receiver_body;
1623 g.report_interval = 1000; /* report interval */
1625 /* ip addresses can also be a range x.x.x.x-x.x.x.y */
1626 g.src_ip.name = "10.0.0.1";
1627 g.dst_ip.name = "10.1.0.1";
1628 g.dst_mac.name = "ff:ff:ff:ff:ff:ff";
1629 g.src_mac.name = NULL;
1631 g.burst = 512; // default
1640 while ( (ch = getopt(arc, argv,
1641 "a:f:F:n:i:Il:d:s:D:S:b:c:o:p:T:w:WvR:XC:H:e:m:")) != -1) {
1646 D("bad option %c %s", ch, optarg);
1651 g.npackets = atoi(optarg);
1656 if (i < 1 || i > 63) {
1657 D("invalid frags %d [1..63], ignore", i);
1664 for (fn = func; fn->key; fn++) {
1665 if (!strcmp(fn->key, optarg))
1671 D("unrecognised function %s", optarg);
1674 case 'o': /* data generation options */
1675 g.options = atoi(optarg);
1678 case 'a': /* force affinity */
1679 g.affinity = atoi(optarg);
1682 case 'i': /* interface */
1683 /* a prefix of tap: netmap: or pcap: forces the mode.
1684 * otherwise we guess
1686 D("interface is %s", optarg);
1687 if (strlen(optarg) > MAX_IFNAMELEN - 8) {
1688 D("ifname too long %s", optarg);
1691 strcpy(g.ifname, optarg);
1692 if (!strcmp(optarg, "null")) {
1693 g.dev_type = DEV_NETMAP;
1695 } else if (!strncmp(optarg, "tap:", 4)) {
1696 g.dev_type = DEV_TAP;
1697 strcpy(g.ifname, optarg + 4);
1698 } else if (!strncmp(optarg, "pcap:", 5)) {
1699 g.dev_type = DEV_PCAP;
1700 strcpy(g.ifname, optarg + 5);
1701 } else if (!strncmp(optarg, "netmap:", 7) ||
1702 !strncmp(optarg, "vale", 4)) {
1703 g.dev_type = DEV_NETMAP;
1704 } else if (!strncmp(optarg, "tap", 3)) {
1705 g.dev_type = DEV_TAP;
1706 } else { /* prepend netmap: */
1707 g.dev_type = DEV_NETMAP;
1708 sprintf(g.ifname, "netmap:%s", optarg);
1713 g.options |= OPT_INDIRECT; /* XXX use indirect buffer */
1716 case 'l': /* pkt_size */
1717 g.pkt_size = atoi(optarg);
1721 g.dst_ip.name = optarg;
1725 g.src_ip.name = optarg;
1728 case 'T': /* report interval */
1729 g.report_interval = atoi(optarg);
1733 wait_link = atoi(optarg);
1736 case 'W': /* XXX changed default */
1737 g.forever = 0; /* do not exit rx even with no traffic */
1740 case 'b': /* burst */
1741 g.burst = atoi(optarg);
1744 g.cpus = atoi(optarg);
1747 g.nthreads = atoi(optarg);
1750 case 'D': /* destination mac */
1751 g.dst_mac.name = optarg;
1754 case 'S': /* source mac */
1755 g.src_mac.name = optarg;
1761 g.tx_rate = atoi(optarg);
1764 g.options |= OPT_DUMP;
1767 g.nmr_config = strdup(optarg);
1770 g.virt_header = atoi(optarg);
1772 case 'e': /* extra bufs */
1773 g.extra_bufs = atoi(optarg);
1776 if (strcmp(optarg, "tx") == 0) {
1777 g.options |= OPT_MONITOR_TX;
1778 } else if (strcmp(optarg, "rx") == 0) {
1779 g.options |= OPT_MONITOR_RX;
1781 D("unrecognized monitor mode %s", optarg);
1787 if (g.ifname == NULL) {
1788 D("missing ifname");
1793 if (g.cpus < 0 || g.cpus > i) {
1794 D("%d cpus is too high, have only %d cpus", g.cpus, i);
1800 if (g.pkt_size < 16 || g.pkt_size > MAX_PKTSIZE) {
1801 D("bad pktsize %d [16..%d]\n", g.pkt_size, MAX_PKTSIZE);
1805 if (g.src_mac.name == NULL) {
1806 static char mybuf[20] = "00:00:00:00:00:00";
1807 /* retrieve source mac address. */
1808 if (source_hwaddr(g.ifname, mybuf) == -1) {
1809 D("Unable to retrieve source mac");
1810 // continue, fail later
1812 g.src_mac.name = mybuf;
1814 /* extract address ranges */
1815 extract_ip_range(&g.src_ip);
1816 extract_ip_range(&g.dst_ip);
1817 extract_mac_range(&g.src_mac);
1818 extract_mac_range(&g.dst_mac);
1820 if (g.src_ip.start != g.src_ip.end ||
1821 g.src_ip.port0 != g.src_ip.port1 ||
1822 g.dst_ip.start != g.dst_ip.end ||
1823 g.dst_ip.port0 != g.dst_ip.port1)
1824 g.options |= OPT_COPY;
1826 if (g.virt_header != 0 && g.virt_header != VIRT_HDR_1
1827 && g.virt_header != VIRT_HDR_2) {
1828 D("bad virtio-net-header length");
1832 if (g.dev_type == DEV_TAP) {
1833 D("want to use tap %s", g.ifname);
1834 g.main_fd = tap_alloc(g.ifname);
1835 if (g.main_fd < 0) {
1836 D("cannot open tap %s", g.ifname);
1840 } else if (g.dev_type == DEV_PCAP) {
1841 char pcap_errbuf[PCAP_ERRBUF_SIZE];
1843 pcap_errbuf[0] = '\0'; // init the buffer
1844 g.p = pcap_open_live(g.ifname, 256 /* XXX */, 1, 100, pcap_errbuf);
1846 D("cannot open pcap on %s", g.ifname);
1849 g.main_fd = pcap_fileno(g.p);
1850 D("using pcap on %s fileno %d", g.ifname, g.main_fd);
1851 #endif /* !NO_PCAP */
1852 } else if (g.dummy_send) { /* but DEV_NETMAP */
1853 D("using a dummy send routine");
1855 struct nmreq base_nmd;
1857 bzero(&base_nmd, sizeof(base_nmd));
1859 parse_nmr_config(g.nmr_config, &base_nmd);
1861 base_nmd.nr_arg3 = g.extra_bufs;
1865 * Open the netmap device using nm_open().
1867 * protocol stack and may cause a reset of the card,
1868 * which in turn may take some time for the PHY to
1869 * reconfigure. We do the open here to have time to reset.
1871 g.nmd = nm_open(g.ifname, &base_nmd, 0, NULL);
1872 if (g.nmd == NULL) {
1873 D("Unable to open %s: %s", g.ifname, strerror(errno));
1876 g.main_fd = g.nmd->fd;
1877 D("mapped %dKB at %p", g.nmd->req.nr_memsize>>10, g.nmd->mem);
1879 /* get num of queues in tx or rx */
1880 if (g.td_body == sender_body)
1881 devqueues = g.nmd->req.nr_tx_rings;
1883 devqueues = g.nmd->req.nr_rx_rings;
1885 /* validate provided nthreads. */
1886 if (g.nthreads < 1 || g.nthreads > devqueues) {
1887 D("bad nthreads %d, have %d queues", g.nthreads, devqueues);
1888 // continue, fail later
1892 struct netmap_if *nifp = g.nmd->nifp;
1893 struct nmreq *req = &g.nmd->req;
1895 D("nifp at offset %d, %d tx %d rx region %d",
1896 req->nr_offset, req->nr_tx_rings, req->nr_rx_rings,
1898 for (i = 0; i <= req->nr_tx_rings; i++) {
1899 struct netmap_ring *ring = NETMAP_TXRING(nifp, i);
1900 D(" TX%d at 0x%lx slots %d", i,
1901 (char *)ring - (char *)nifp, ring->num_slots);
1903 for (i = 0; i <= req->nr_rx_rings; i++) {
1904 struct netmap_ring *ring = NETMAP_RXRING(nifp, i);
1905 D(" RX%d at 0x%lx slots %d", i,
1906 (char *)ring - (char *)nifp, ring->num_slots);
1910 /* Print some debug information. */
1912 "%s %s: %d queues, %d threads and %d cpus.\n",
1913 (g.td_body == sender_body) ? "Sending on" : "Receiving from",
1918 if (g.td_body == sender_body) {
1919 fprintf(stdout, "%s -> %s (%s -> %s)\n",
1920 g.src_ip.name, g.dst_ip.name,
1921 g.src_mac.name, g.dst_mac.name);
1925 /* Exit if something went wrong. */
1926 if (g.main_fd < 0) {
1934 D("--- SPECIAL OPTIONS:%s%s%s%s%s\n",
1935 g.options & OPT_PREFETCH ? " prefetch" : "",
1936 g.options & OPT_ACCESS ? " access" : "",
1937 g.options & OPT_MEMCPY ? " memcpy" : "",
1938 g.options & OPT_INDIRECT ? " indirect" : "",
1939 g.options & OPT_COPY ? " copy" : "");
1942 g.tx_period.tv_sec = g.tx_period.tv_nsec = 0;
1943 if (g.tx_rate > 0) {
1944 /* try to have at least something every second,
1945 * reducing the burst size to some 0.01s worth of data
1946 * (but no less than one full set of fragments)
1949 int lim = (g.tx_rate)/300;
1952 if (g.burst < g.frags)
1954 x = ((uint64_t)1000000000 * (uint64_t)g.burst) / (uint64_t) g.tx_rate;
1955 g.tx_period.tv_nsec = x;
1956 g.tx_period.tv_sec = g.tx_period.tv_nsec / 1000000000;
1957 g.tx_period.tv_nsec = g.tx_period.tv_nsec % 1000000000;
1959 if (g.td_body == sender_body)
1960 D("Sending %d packets every %ld.%09ld s",
1961 g.burst, g.tx_period.tv_sec, g.tx_period.tv_nsec);
1962 /* Wait for PHY reset. */
1963 D("Wait %d secs for phy reset", wait_link);
1967 /* Install ^C handler. */
1968 global_nthreads = g.nthreads;
1969 signal(SIGINT, sigint_h);