2 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
3 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * $Id: pkt-gen.c 12346 2013-06-12 17:36:25Z luigi $
31 * Example program to show how to build a multithreaded packet
32 * source/sink using the netmap device.
34 * In this example we create a programmable number of threads
35 * to take care of all the queues of the interface used to
36 * send or receive traffic.
40 // #define TRASH_VHOST_HDR
42 #define _GNU_SOURCE /* for CPU_SET() */
44 #define NETMAP_WITH_LIBS
45 #include <net/netmap_user.h>
48 #include <ctype.h> // isprint()
49 #include <unistd.h> // sysconf()
51 #include <arpa/inet.h> /* ntohs */
52 #include <sys/sysctl.h> /* sysctl */
53 #include <ifaddrs.h> /* getifaddrs */
54 #include <net/ethernet.h>
55 #include <netinet/in.h>
56 #include <netinet/ip.h>
57 #include <netinet/udp.h>
62 #include <pcap/pcap.h>
67 #define cpuset_t cpu_set_t
69 #define ifr_flagshigh ifr_flags /* only the low 16 bits here */
70 #define IFF_PPROMISC IFF_PROMISC /* IFF_PPROMISC does not exist */
71 #include <linux/ethtool.h>
72 #include <linux/sockios.h>
74 #define CLOCK_REALTIME_PRECISE CLOCK_REALTIME
75 #include <netinet/ether.h> /* ether_aton */
76 #include <linux/if_packet.h> /* sockaddr_ll */
80 #include <sys/endian.h> /* le64toh */
81 #include <machine/param.h>
83 #include <pthread_np.h> /* pthread w/ affinity */
84 #include <sys/cpuset.h> /* cpu_set */
85 #include <net/if_dl.h> /* LLADDR */
86 #endif /* __FreeBSD__ */
90 #define cpuset_t uint64_t // XXX
91 static inline void CPU_ZERO(cpuset_t *p)
96 static inline void CPU_SET(uint32_t i, cpuset_t *p)
101 #define pthread_setaffinity_np(a, b, c) ((void)a, 0)
103 #define ifr_flagshigh ifr_flags // XXX
104 #define IFF_PPROMISC IFF_PROMISC
105 #include <net/if_dl.h> /* LLADDR */
106 #define clock_gettime(a,b) \
107 do {struct timespec t0 = {0,0}; *(b) = t0; } while (0)
108 #endif /* __APPLE__ */
110 const char *default_payload="netmap pkt-gen DIRECT payload\n"
111 "http://info.iet.unipi.it/~luigi/netmap/ ";
113 const char *indirect_payload="netmap pkt-gen indirect payload\n"
114 "http://info.iet.unipi.it/~luigi/netmap/ ";
118 #define SKIP_PAYLOAD 1 /* do not check payload. XXX unused */
121 #define VIRT_HDR_1 10 /* length of a base vnet-hdr */
122 #define VIRT_HDR_2 12 /* length of the extenede vnet-hdr */
123 #define VIRT_HDR_MAX VIRT_HDR_2
125 uint8_t fields[VIRT_HDR_MAX];
128 #define MAX_BODYSIZE 16384
131 struct virt_header vh;
132 struct ether_header eh;
135 uint8_t body[MAX_BODYSIZE]; // XXX hardwired
136 } __attribute__((__packed__));
140 uint32_t start, end; /* same as struct in_addr */
141 uint16_t port0, port1;
146 struct ether_addr start, end;
149 /* ifname can be netmap:foo-xxxx */
150 #define MAX_IFNAMELEN 64 /* our buffer for ifname */
151 //#define MAX_PKTSIZE 1536
152 #define MAX_PKTSIZE MAX_BODYSIZE /* XXX: + IP_HDR + ETH_HDR */
154 /* compact timestamp to fit into 60 byte packet. (enough to obtain RTT) */
161 * global arguments for all threads
165 struct ip_range src_ip;
166 struct ip_range dst_ip;
167 struct mac_range dst_mac;
168 struct mac_range src_mac;
172 int npackets; /* total packets to send */
173 int frags; /* fragments per packet */
176 int options; /* testing */
177 #define OPT_PREFETCH 1
181 #define OPT_TS 16 /* add a timestamp */
182 #define OPT_INDIRECT 32 /* use indirect buffers, tx only */
183 #define OPT_DUMP 64 /* dump rx/tx traffic */
184 #define OPT_MONITOR_TX 128
185 #define OPT_MONITOR_RX 256
186 #define OPT_RANDOM_SRC 512
187 #define OPT_RANDOM_DST 1024
194 struct timespec tx_period;
199 int report_interval; /* milliseconds between prints */
200 void *(*td_body)(void *);
202 char ifname[MAX_IFNAMELEN];
205 int virt_header; /* send also the virt_header */
206 int extra_bufs; /* goes in nr_arg3 */
207 char *packet_file; /* -P option */
209 enum dev_type { DEV_NONE, DEV_NETMAP, DEV_PCAP, DEV_TAP };
213 * Arguments for a new thread. The same structure is used by
214 * the source and the sink
223 volatile uint64_t count;
224 struct timespec tic, toc;
235 * extract the extremes from a range of ipv4 addresses.
236 * addr_lo[-addr_hi][:port_lo[-port_hi]]
239 extract_ip_range(struct ip_range *r)
245 D("extract IP range from %s", r->name);
246 r->port0 = r->port1 = 0;
247 r->start = r->end = 0;
249 /* the first - splits start/end of range */
250 ap = index(r->name, '-'); /* do we have ports ? */
254 /* grab the initial values (mandatory) */
255 pp = index(r->name, ':');
258 r->port0 = r->port1 = strtol(pp, NULL, 0);
260 inet_aton(r->name, &a);
261 r->start = r->end = ntohl(a.s_addr);
267 r->port1 = strtol(pp, NULL, 0);
271 r->end = ntohl(a.s_addr);
274 if (r->port0 > r->port1) {
275 uint16_t tmp = r->port0;
279 if (r->start > r->end) {
280 uint32_t tmp = r->start;
286 char buf1[16]; // one ip address
288 a.s_addr = htonl(r->end);
289 strncpy(buf1, inet_ntoa(a), sizeof(buf1));
290 a.s_addr = htonl(r->start);
292 D("range is %s:%d to %s:%d",
293 inet_ntoa(a), r->port0, buf1, r->port1);
298 extract_mac_range(struct mac_range *r)
301 D("extract MAC range from %s", r->name);
302 bcopy(ether_aton(r->name), &r->start, 6);
303 bcopy(ether_aton(r->name), &r->end, 6);
305 bcopy(targ->src_mac, eh->ether_shost, 6);
306 p = index(targ->g->src_mac, '-');
308 targ->src_mac_range = atoi(p+1);
310 bcopy(ether_aton(targ->g->dst_mac), targ->dst_mac, 6);
311 bcopy(targ->dst_mac, eh->ether_dhost, 6);
312 p = index(targ->g->dst_mac, '-');
314 targ->dst_mac_range = atoi(p+1);
317 D("%s starts at %s", r->name, ether_ntoa(&r->start));
320 static struct targ *targs;
321 static int global_nthreads;
323 /* control-C handler */
329 (void)sig; /* UNUSED */
330 D("received control-C on thread %p", pthread_self());
331 for (i = 0; i < global_nthreads; i++) {
334 signal(SIGINT, SIG_DFL);
337 /* sysctl wrapper to return the number of active CPUs */
342 #if defined (__FreeBSD__)
343 int mib[2] = { CTL_HW, HW_NCPU };
344 size_t len = sizeof(mib);
345 sysctl(mib, 2, &ncpus, &len, NULL, 0);
347 ncpus = sysconf(_SC_NPROCESSORS_ONLN);
355 #define sockaddr_dl sockaddr_ll
356 #define sdl_family sll_family
357 #define AF_LINK AF_PACKET
358 #define LLADDR(s) s->sll_addr;
359 #include <linux/if_tun.h>
360 #define TAP_CLONEDEV "/dev/net/tun"
361 #endif /* __linux__ */
364 #include <net/if_tun.h>
365 #define TAP_CLONEDEV "/dev/tap"
366 #endif /* __FreeBSD */
369 // #warning TAP not supported on apple ?
370 #include <net/if_utun.h>
371 #define TAP_CLONEDEV "/dev/tap"
372 #endif /* __APPLE__ */
376 * parse the vale configuration in conf and put it in nmr.
377 * Return the flag set if necessary.
378 * The configuration may consist of 0 to 4 numbers separated
379 * by commas: #tx-slots,#rx-slots,#tx-rings,#rx-rings.
380 * Missing numbers or zeroes stand for default values.
381 * As an additional convenience, if exactly one number
382 * is specified, then this is assigned to both #tx-slots and #rx-slots.
383 * If there is no 4th number, then the 3rd is assigned to both #tx-rings
387 parse_nmr_config(const char* conf, struct nmreq *nmr)
392 nmr->nr_tx_rings = nmr->nr_rx_rings = 0;
393 nmr->nr_tx_slots = nmr->nr_rx_slots = 0;
394 if (conf == NULL || ! *conf)
397 for (i = 0, tok = strtok(w, ","); tok; i++, tok = strtok(NULL, ",")) {
401 nmr->nr_tx_slots = nmr->nr_rx_slots = v;
404 nmr->nr_rx_slots = v;
407 nmr->nr_tx_rings = nmr->nr_rx_rings = v;
410 nmr->nr_rx_rings = v;
413 D("ignored config: %s", tok);
417 D("txr %d txd %d rxr %d rxd %d",
418 nmr->nr_tx_rings, nmr->nr_tx_slots,
419 nmr->nr_rx_rings, nmr->nr_rx_slots);
421 return (nmr->nr_tx_rings || nmr->nr_tx_slots ||
422 nmr->nr_rx_rings || nmr->nr_rx_slots) ?
423 NM_OPEN_RING_CFG : 0;
428 * locate the src mac address for our interface, put it
429 * into the user-supplied buffer. return 0 if ok, -1 on error.
432 source_hwaddr(const char *ifname, char *buf)
434 struct ifaddrs *ifaphead, *ifap;
435 int l = sizeof(ifap->ifa_name);
437 if (getifaddrs(&ifaphead) != 0) {
438 D("getifaddrs %s failed", ifname);
442 for (ifap = ifaphead; ifap; ifap = ifap->ifa_next) {
443 struct sockaddr_dl *sdl =
444 (struct sockaddr_dl *)ifap->ifa_addr;
447 if (!sdl || sdl->sdl_family != AF_LINK)
449 if (strncmp(ifap->ifa_name, ifname, l) != 0)
451 mac = (uint8_t *)LLADDR(sdl);
452 sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
453 mac[0], mac[1], mac[2],
454 mac[3], mac[4], mac[5]);
456 D("source hwaddr %s", buf);
459 freeifaddrs(ifaphead);
464 /* set the thread affinity. */
466 setaffinity(pthread_t me, int i)
473 /* Set thread affinity affinity.*/
475 CPU_SET(i, &cpumask);
477 if (pthread_setaffinity_np(me, sizeof(cpuset_t), &cpumask) != 0) {
478 D("Unable to set affinity: %s", strerror(errno));
484 /* Compute the checksum of the given ip header. */
486 checksum(const void *data, uint16_t len, uint32_t sum)
488 const uint8_t *addr = data;
491 /* Checksum all the pairs of bytes first... */
492 for (i = 0; i < (len & ~1U); i += 2) {
493 sum += (u_int16_t)ntohs(*((u_int16_t *)(addr + i)));
498 * If there's a single byte left over, checksum it, too.
499 * Network byte order is big-endian, so the remaining byte is
511 wrapsum(u_int32_t sum)
517 /* Check the payload of the packet for errors (use it for debug).
518 * Look for consecutive ascii representations of the size of the packet.
521 dump_payload(char *p, int len, struct netmap_ring *ring, int cur)
526 /* get the length in ASCII of the length of the packet. */
528 printf("ring %p cur %5d [buf %6d flags 0x%04x len %5d]\n",
529 ring, cur, ring->slot[cur].buf_idx,
530 ring->slot[cur].flags, len);
531 /* hexdump routine */
532 for (i = 0; i < len; ) {
533 memset(buf, sizeof(buf), ' ');
534 sprintf(buf, "%5d: ", i);
536 for (j=0; j < 16 && i < len; i++, j++)
537 sprintf(buf+7+j*3, "%02x ", (uint8_t)(p[i]));
539 for (j=0; j < 16 && i < len; i++, j++)
540 sprintf(buf+7+j + 48, "%c",
541 isprint(p[i]) ? p[i] : '.');
547 * Fill a packet with some payload.
548 * We create a UDP packet so the payload starts at
549 * 14+20+8 = 42 bytes.
552 #define uh_sport source
553 #define uh_dport dest
559 * increment the addressed in the packet,
560 * starting from the least significant field.
561 * DST_IP DST_PORT SRC_IP SRC_PORT
564 update_addresses(struct pkt *pkt, struct glob_arg *g)
568 struct ip *ip = &pkt->ip;
569 struct udphdr *udp = &pkt->udp;
572 /* XXX for now it doesn't handle non-random src, random dst */
573 if (g->options & OPT_RANDOM_SRC) {
574 udp->uh_sport = random();
575 ip->ip_src.s_addr = random();
577 p = ntohs(udp->uh_sport);
578 if (p < g->src_ip.port1) { /* just inc, no wrap */
579 udp->uh_sport = htons(p + 1);
582 udp->uh_sport = htons(g->src_ip.port0);
584 a = ntohl(ip->ip_src.s_addr);
585 if (a < g->src_ip.end) { /* just inc, no wrap */
586 ip->ip_src.s_addr = htonl(a + 1);
589 ip->ip_src.s_addr = htonl(g->src_ip.start);
591 udp->uh_sport = htons(g->src_ip.port0);
594 if (g->options & OPT_RANDOM_DST) {
595 udp->uh_dport = random();
596 ip->ip_dst.s_addr = random();
598 p = ntohs(udp->uh_dport);
599 if (p < g->dst_ip.port1) { /* just inc, no wrap */
600 udp->uh_dport = htons(p + 1);
603 udp->uh_dport = htons(g->dst_ip.port0);
605 a = ntohl(ip->ip_dst.s_addr);
606 if (a < g->dst_ip.end) { /* just inc, no wrap */
607 ip->ip_dst.s_addr = htonl(a + 1);
611 ip->ip_dst.s_addr = htonl(g->dst_ip.start);
617 * initialize one packet and prepare for the next one.
618 * The copy could be done better instead of repeating it each time.
621 initialize_packet(struct targ *targ)
623 struct pkt *pkt = &targ->pkt;
624 struct ether_header *eh;
627 uint16_t paylen = targ->g->pkt_size - sizeof(*eh) - sizeof(struct ip);
628 const char *payload = targ->g->options & OPT_INDIRECT ?
629 indirect_payload : default_payload;
630 int i, l0 = strlen(payload);
632 char errbuf[PCAP_ERRBUF_SIZE];
634 struct pcap_pkthdr *header;
635 const unsigned char *packet;
637 /* Read a packet from a PCAP file if asked. */
638 if (targ->g->packet_file != NULL) {
639 if ((file = pcap_open_offline(targ->g->packet_file,
641 D("failed to open pcap file %s",
642 targ->g->packet_file);
643 if (pcap_next_ex(file, &header, &packet) < 0)
644 D("failed to read packet from %s",
645 targ->g->packet_file);
646 if ((targ->frame = malloc(header->caplen)) == NULL)
648 bcopy(packet, (unsigned char *)targ->frame, header->caplen);
649 targ->g->pkt_size = header->caplen;
654 /* create a nice NUL-terminated string */
655 for (i = 0; i < paylen; i += l0) {
657 l0 = paylen - i; // last round
658 bcopy(payload, pkt->body + i, l0);
660 pkt->body[i-1] = '\0';
663 /* prepare the headers */
664 ip->ip_v = IPVERSION;
667 ip->ip_tos = IPTOS_LOWDELAY;
668 ip->ip_len = ntohs(targ->g->pkt_size - sizeof(*eh));
670 ip->ip_off = htons(IP_DF); /* Don't fragment */
671 ip->ip_ttl = IPDEFTTL;
672 ip->ip_p = IPPROTO_UDP;
673 ip->ip_dst.s_addr = htonl(targ->g->dst_ip.start);
674 ip->ip_src.s_addr = htonl(targ->g->src_ip.start);
675 ip->ip_sum = wrapsum(checksum(ip, sizeof(*ip), 0));
679 udp->uh_sport = htons(targ->g->src_ip.port0);
680 udp->uh_dport = htons(targ->g->dst_ip.port0);
681 udp->uh_ulen = htons(paylen);
682 /* Magic: taken from sbin/dhclient/packet.c */
683 udp->uh_sum = wrapsum(checksum(udp, sizeof(*udp),
685 paylen - sizeof(*udp),
686 checksum(&ip->ip_src, 2 * sizeof(ip->ip_src),
687 IPPROTO_UDP + (u_int32_t)ntohs(udp->uh_ulen)
693 bcopy(&targ->g->src_mac.start, eh->ether_shost, 6);
694 bcopy(&targ->g->dst_mac.start, eh->ether_dhost, 6);
695 eh->ether_type = htons(ETHERTYPE_IP);
697 bzero(&pkt->vh, sizeof(pkt->vh));
698 #ifdef TRASH_VHOST_HDR
699 /* set bogus content */
700 pkt->vh.fields[0] = 0xff;
701 pkt->vh.fields[1] = 0xff;
702 pkt->vh.fields[2] = 0xff;
703 pkt->vh.fields[3] = 0xff;
704 pkt->vh.fields[4] = 0xff;
705 pkt->vh.fields[5] = 0xff;
706 #endif /* TRASH_VHOST_HDR */
707 // dump_payload((void *)pkt, targ->g->pkt_size, NULL, 0);
711 set_vnet_hdr_len(struct targ *t)
713 int err, l = t->g->virt_header;
719 memset(&req, 0, sizeof(req));
720 bcopy(t->nmd->req.nr_name, req.nr_name, sizeof(req.nr_name));
721 req.nr_version = NETMAP_API;
722 req.nr_cmd = NETMAP_BDG_VNET_HDR;
724 err = ioctl(t->fd, NIOCREGIF, &req);
726 D("Unable to set vnet header length %d", l);
732 * create and enqueue a batch of packets on a ring.
733 * On the last one set NS_REPORT to tell the driver to generate
734 * an interrupt when done.
737 send_packets(struct netmap_ring *ring, struct pkt *pkt, void *frame,
738 int size, struct glob_arg *g, u_int count, int options,
741 u_int n, sent, cur = ring->cur;
744 n = nm_ring_space(ring);
747 if (count < nfrags) {
748 D("truncating packet, no room for frags %d %d",
752 if (options & (OPT_COPY | OPT_PREFETCH) ) {
753 for (sent = 0; sent < count; sent++) {
754 struct netmap_slot *slot = &ring->slot[cur];
755 char *p = NETMAP_BUF(ring, slot->buf_idx);
757 __builtin_prefetch(p);
758 cur = nm_ring_next(ring, cur);
763 for (fcnt = nfrags, sent = 0; sent < count; sent++) {
764 struct netmap_slot *slot = &ring->slot[cur];
765 char *p = NETMAP_BUF(ring, slot->buf_idx);
768 if (options & OPT_INDIRECT) {
769 slot->flags |= NS_INDIRECT;
770 slot->ptr = (uint64_t)frame;
771 } else if (options & OPT_COPY) {
772 nm_pkt_copy(frame, p, size);
774 update_addresses(pkt, g);
775 } else if (options & OPT_MEMCPY) {
776 memcpy(p, frame, size);
778 update_addresses(pkt, g);
779 } else if (options & OPT_PREFETCH) {
780 __builtin_prefetch(p);
782 if (options & OPT_DUMP)
783 dump_payload(p, size, ring, cur);
786 slot->flags |= NS_MOREFRAG;
789 if (sent == count - 1) {
790 slot->flags &= ~NS_MOREFRAG;
791 slot->flags |= NS_REPORT;
793 cur = nm_ring_next(ring, cur);
795 ring->head = ring->cur = cur;
801 * Send a packet, and wait for a response.
802 * The payload (after UDP header, ofs 42) has a 4-byte sequence
803 * followed by a struct timeval (or bintime?)
805 #define PAY_OFS 42 /* where in the pkt... */
808 pinger_body(void *data)
810 struct targ *targ = (struct targ *) data;
811 struct pollfd pfd = { .fd = targ->fd, .events = POLLIN };
812 struct netmap_if *nifp = targ->nmd->nifp;
813 int i, rx = 0, n = targ->g->npackets;
817 struct timespec ts, now, last_print;
818 uint32_t count = 0, min = 1000000000, av = 0;
821 frame += sizeof(targ->pkt.vh) - targ->g->virt_header;
822 size = targ->g->pkt_size + targ->g->virt_header;
824 if (targ->g->nthreads > 1) {
825 D("can only ping with 1 thread");
829 clock_gettime(CLOCK_REALTIME_PRECISE, &last_print);
831 while (n == 0 || (int)sent < n) {
832 struct netmap_ring *ring = NETMAP_TXRING(nifp, 0);
833 struct netmap_slot *slot;
835 for (i = 0; i < 1; i++) { /* XXX why the loop for 1 pkt ? */
836 slot = &ring->slot[ring->cur];
838 p = NETMAP_BUF(ring, slot->buf_idx);
840 if (nm_ring_empty(ring)) {
841 D("-- ouch, cannot send");
844 nm_pkt_copy(frame, p, size);
845 clock_gettime(CLOCK_REALTIME_PRECISE, &ts);
846 bcopy(&sent, p+42, sizeof(sent));
847 tp = (struct tstamp *)(p+46);
848 tp->sec = (uint32_t)ts.tv_sec;
849 tp->nsec = (uint32_t)ts.tv_nsec;
851 ring->head = ring->cur = nm_ring_next(ring, ring->cur);
854 /* should use a parameter to decide how often to send */
855 if (poll(&pfd, 1, 3000) <= 0) {
856 D("poll error/timeout on queue %d: %s", targ->me,
860 /* see what we got back */
861 for (i = targ->nmd->first_tx_ring;
862 i <= targ->nmd->last_tx_ring; i++) {
863 ring = NETMAP_RXRING(nifp, i);
864 while (!nm_ring_empty(ring)) {
867 slot = &ring->slot[ring->cur];
868 p = NETMAP_BUF(ring, slot->buf_idx);
870 clock_gettime(CLOCK_REALTIME_PRECISE, &now);
871 bcopy(p+42, &seq, sizeof(seq));
872 tp = (struct tstamp *)(p+46);
873 ts.tv_sec = (time_t)tp->sec;
874 ts.tv_nsec = (long)tp->nsec;
875 ts.tv_sec = now.tv_sec - ts.tv_sec;
876 ts.tv_nsec = now.tv_nsec - ts.tv_nsec;
877 if (ts.tv_nsec < 0) {
878 ts.tv_nsec += 1000000000;
881 if (1) D("seq %d/%d delta %d.%09d", seq, sent,
882 (int)ts.tv_sec, (int)ts.tv_nsec);
883 if (ts.tv_nsec < (int)min)
887 ring->head = ring->cur = nm_ring_next(ring, ring->cur);
891 //D("tx %d rx %d", sent, rx);
893 ts.tv_sec = now.tv_sec - last_print.tv_sec;
894 ts.tv_nsec = now.tv_nsec - last_print.tv_nsec;
895 if (ts.tv_nsec < 0) {
896 ts.tv_nsec += 1000000000;
899 if (ts.tv_sec >= 1) {
900 D("count %d min %d av %d",
901 count, min, av/count);
913 * reply to ping requests
916 ponger_body(void *data)
918 struct targ *targ = (struct targ *) data;
919 struct pollfd pfd = { .fd = targ->fd, .events = POLLIN };
920 struct netmap_if *nifp = targ->nmd->nifp;
921 struct netmap_ring *txring, *rxring;
922 int i, rx = 0, sent = 0, n = targ->g->npackets;
924 if (targ->g->nthreads > 1) {
925 D("can only reply ping with 1 thread");
928 D("understood ponger %d but don't know how to do it", n);
929 while (n == 0 || sent < n) {
930 uint32_t txcur, txavail;
933 ioctl(pfd.fd, NIOCRXSYNC, NULL);
935 if (poll(&pfd, 1, 1000) <= 0) {
936 D("poll error/timeout on queue %d: %s", targ->me,
941 txring = NETMAP_TXRING(nifp, 0);
943 txavail = nm_ring_space(txring);
944 /* see what we got back */
945 for (i = targ->nmd->first_rx_ring; i <= targ->nmd->last_rx_ring; i++) {
946 rxring = NETMAP_RXRING(nifp, i);
947 while (!nm_ring_empty(rxring)) {
948 uint16_t *spkt, *dpkt;
949 uint32_t cur = rxring->cur;
950 struct netmap_slot *slot = &rxring->slot[cur];
952 src = NETMAP_BUF(rxring, slot->buf_idx);
953 //D("got pkt %p of size %d", src, slot->len);
954 rxring->head = rxring->cur = nm_ring_next(rxring, cur);
958 dst = NETMAP_BUF(txring,
959 txring->slot[txcur].buf_idx);
961 dpkt = (uint16_t *)dst;
962 spkt = (uint16_t *)src;
963 nm_pkt_copy(src, dst, slot->len);
970 txring->slot[txcur].len = slot->len;
971 /* XXX swap src dst mac */
972 txcur = nm_ring_next(txring, txcur);
977 txring->head = txring->cur = txcur;
980 ioctl(pfd.fd, NIOCTXSYNC, NULL);
982 //D("tx %d rx %d", sent, rx);
988 timespec_ge(const struct timespec *a, const struct timespec *b)
991 if (a->tv_sec > b->tv_sec)
993 if (a->tv_sec < b->tv_sec)
995 if (a->tv_nsec >= b->tv_nsec)
1000 static __inline struct timespec
1001 timeval2spec(const struct timeval *a)
1003 struct timespec ts = {
1004 .tv_sec = a->tv_sec,
1005 .tv_nsec = a->tv_usec * 1000
1010 static __inline struct timeval
1011 timespec2val(const struct timespec *a)
1013 struct timeval tv = {
1014 .tv_sec = a->tv_sec,
1015 .tv_usec = a->tv_nsec / 1000
1021 static __inline struct timespec
1022 timespec_add(struct timespec a, struct timespec b)
1024 struct timespec ret = { a.tv_sec + b.tv_sec, a.tv_nsec + b.tv_nsec };
1025 if (ret.tv_nsec >= 1000000000) {
1027 ret.tv_nsec -= 1000000000;
1032 static __inline struct timespec
1033 timespec_sub(struct timespec a, struct timespec b)
1035 struct timespec ret = { a.tv_sec - b.tv_sec, a.tv_nsec - b.tv_nsec };
1036 if (ret.tv_nsec < 0) {
1038 ret.tv_nsec += 1000000000;
1045 * wait until ts, either busy or sleeping if more than 1ms.
1046 * Return wakeup time.
1048 static struct timespec
1049 wait_time(struct timespec ts)
1052 struct timespec w, cur;
1053 clock_gettime(CLOCK_REALTIME_PRECISE, &cur);
1054 w = timespec_sub(ts, cur);
1057 else if (w.tv_sec > 0 || w.tv_nsec > 1000000)
1063 sender_body(void *data)
1065 struct targ *targ = (struct targ *) data;
1066 struct pollfd pfd = { .fd = targ->fd, .events = POLLOUT };
1067 struct netmap_if *nifp;
1068 struct netmap_ring *txring;
1069 int i, n = targ->g->npackets / targ->g->nthreads;
1071 int options = targ->g->options | OPT_COPY;
1072 struct timespec nexttime = { 0, 0}; // XXX silence compiler
1073 int rate_limit = targ->g->tx_rate;
1074 struct pkt *pkt = &targ->pkt;
1078 if (targ->frame == NULL) {
1080 frame += sizeof(pkt->vh) - targ->g->virt_header;
1081 size = targ->g->pkt_size + targ->g->virt_header;
1083 frame = targ->frame;
1084 size = targ->g->pkt_size;
1087 D("start, fd %d main_fd %d", targ->fd, targ->g->main_fd);
1088 if (setaffinity(targ->thread, targ->affinity))
1092 clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic);
1094 targ->tic = timespec_add(targ->tic, (struct timespec){2,0});
1095 targ->tic.tv_nsec = 0;
1096 wait_time(targ->tic);
1097 nexttime = targ->tic;
1099 if (targ->g->dev_type == DEV_TAP) {
1100 D("writing to file desc %d", targ->g->main_fd);
1102 for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) {
1103 if (write(targ->g->main_fd, frame, size) != -1)
1105 update_addresses(pkt, targ->g);
1112 } else if (targ->g->dev_type == DEV_PCAP) {
1113 pcap_t *p = targ->g->p;
1115 for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) {
1116 if (pcap_inject(p, frame, size) != -1)
1118 update_addresses(pkt, targ->g);
1124 #endif /* NO_PCAP */
1127 int frags = targ->g->frags;
1129 nifp = targ->nmd->nifp;
1130 while (!targ->cancel && (n == 0 || sent < n)) {
1132 if (rate_limit && tosend <= 0) {
1133 tosend = targ->g->burst;
1134 nexttime = timespec_add(nexttime, targ->g->tx_period);
1135 wait_time(nexttime);
1139 * wait for available room in the send queue(s)
1141 if (poll(&pfd, 1, 2000) <= 0) {
1144 D("poll error/timeout on queue %d: %s", targ->me,
1148 if (pfd.revents & POLLERR) {
1153 * scan our queues and send on those with room
1155 if (options & OPT_COPY && sent > 100000 && !(targ->g->options & OPT_COPY) ) {
1157 options &= ~OPT_COPY;
1159 for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) {
1160 int m, limit = rate_limit ? tosend : targ->g->burst;
1161 if (n > 0 && n - sent < limit)
1163 txring = NETMAP_TXRING(nifp, i);
1164 if (nm_ring_empty(txring))
1167 limit = ((limit + frags - 1) / frags) * frags;
1169 m = send_packets(txring, pkt, frame, size, targ->g,
1170 limit, options, frags);
1171 ND("limit %d tail %d frags %d m %d",
1172 limit, txring->tail, frags, m);
1182 /* flush any remaining packets */
1183 D("flush tail %d head %d on thread %p",
1184 txring->tail, txring->head,
1186 ioctl(pfd.fd, NIOCTXSYNC, NULL);
1188 /* final part: wait all the TX queues to be empty. */
1189 for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) {
1190 txring = NETMAP_TXRING(nifp, i);
1191 while (nm_tx_pending(txring)) {
1192 RD(5, "pending tx tail %d head %d on ring %d",
1193 txring->tail, txring->head, i);
1194 ioctl(pfd.fd, NIOCTXSYNC, NULL);
1195 usleep(1); /* wait 1 tick */
1198 } /* end DEV_NETMAP */
1200 clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc);
1201 targ->completed = 1;
1205 /* reset the ``used`` flag. */
1214 receive_pcap(u_char *user, const struct pcap_pkthdr * h,
1215 const u_char * bytes)
1217 int *count = (int *)user;
1218 (void)h; /* UNUSED */
1219 (void)bytes; /* UNUSED */
1222 #endif /* !NO_PCAP */
1225 receive_packets(struct netmap_ring *ring, u_int limit, int dump)
1230 n = nm_ring_space(ring);
1233 for (rx = 0; rx < limit; rx++) {
1234 struct netmap_slot *slot = &ring->slot[cur];
1235 char *p = NETMAP_BUF(ring, slot->buf_idx);
1238 dump_payload(p, slot->len, ring, cur);
1240 cur = nm_ring_next(ring, cur);
1242 ring->head = ring->cur = cur;
1248 receiver_body(void *data)
1250 struct targ *targ = (struct targ *) data;
1251 struct pollfd pfd = { .fd = targ->fd, .events = POLLIN };
1252 struct netmap_if *nifp;
1253 struct netmap_ring *rxring;
1255 uint64_t received = 0;
1257 if (setaffinity(targ->thread, targ->affinity))
1260 D("reading from %s fd %d main_fd %d",
1261 targ->g->ifname, targ->fd, targ->g->main_fd);
1262 /* unbounded wait for the first packet. */
1263 for (;!targ->cancel;) {
1264 i = poll(&pfd, 1, 1000);
1265 if (i > 0 && !(pfd.revents & POLLERR))
1267 RD(1, "waiting for initial packets, poll returns %d %d",
1270 /* main loop, exit after 1s silence */
1271 clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic);
1272 if (targ->g->dev_type == DEV_TAP) {
1273 while (!targ->cancel) {
1274 char buf[MAX_BODYSIZE];
1275 /* XXX should we poll ? */
1276 if (read(targ->g->main_fd, buf, sizeof(buf)) > 0)
1280 } else if (targ->g->dev_type == DEV_PCAP) {
1281 while (!targ->cancel) {
1282 /* XXX should we poll ? */
1283 pcap_dispatch(targ->g->p, targ->g->burst, receive_pcap,
1284 (u_char *)&targ->count);
1286 #endif /* !NO_PCAP */
1288 int dump = targ->g->options & OPT_DUMP;
1290 nifp = targ->nmd->nifp;
1291 while (!targ->cancel) {
1292 /* Once we started to receive packets, wait at most 1 seconds
1294 if (poll(&pfd, 1, 1 * 1000) <= 0 && !targ->g->forever) {
1295 clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc);
1296 targ->toc.tv_sec -= 1; /* Subtract timeout time. */
1300 if (pfd.revents & POLLERR) {
1305 for (i = targ->nmd->first_rx_ring; i <= targ->nmd->last_rx_ring; i++) {
1308 rxring = NETMAP_RXRING(nifp, i);
1309 if (nm_ring_empty(rxring))
1312 m = receive_packets(rxring, targ->g->burst, dump);
1315 targ->count = received;
1319 clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc);
1322 targ->completed = 1;
1323 targ->count = received;
1326 /* reset the ``used`` flag. */
1332 /* very crude code to print a number in normalized form.
1333 * Caller has to make sure that the buffer is large enough.
1336 norm(char *buf, double val)
1338 char *units[] = { "", "K", "M", "G", "T" };
1341 for (i = 0; val >=1000 && i < sizeof(units)/sizeof(char *) - 1; i++)
1343 sprintf(buf, "%.2f %s", val, units[i]);
1348 tx_output(uint64_t sent, int size, double delta)
1350 double bw, raw_bw, pps;
1351 char b1[40], b2[80], b3[80];
1353 printf("Sent %llu packets, %d bytes each, in %.2f seconds.\n",
1354 (unsigned long long)sent, size, delta);
1357 if (size < 60) /* correct for min packet size */
1360 bw = (8.0 * size * sent) / delta;
1361 /* raw packets have4 bytes crc + 20 bytes framing */
1362 raw_bw = (8.0 * (size + 24) * sent) / delta;
1364 printf("Speed: %spps Bandwidth: %sbps (raw %sbps)\n",
1365 norm(b1, pps), norm(b2, bw), norm(b3, raw_bw) );
1370 rx_output(uint64_t received, double delta)
1375 printf("Received %llu packets, in %.2f seconds.\n",
1376 (unsigned long long) received, delta);
1380 pps = received / delta;
1381 printf("Speed: %spps\n", norm(b1, pps));
1387 const char *cmd = "pkt-gen";
1391 "\t-i interface interface name\n"
1392 "\t-f function tx rx ping pong\n"
1393 "\t-n count number of iterations (can be 0)\n"
1394 "\t-t pkts_to_send also forces tx mode\n"
1395 "\t-r pkts_to_receive also forces rx mode\n"
1396 "\t-l pkt_size in bytes excluding CRC\n"
1397 "\t-d dst_ip[:port[-dst_ip:port]] single or range\n"
1398 "\t-s src_ip[:port[-src_ip:port]] single or range\n"
1401 "\t-a cpu_id use setaffinity\n"
1402 "\t-b burst size testing, mostly\n"
1403 "\t-c cores cores to use\n"
1404 "\t-p threads processes/threads to use\n"
1405 "\t-T report_ms milliseconds between reports\n"
1406 "\t-P use libpcap instead of netmap\n"
1407 "\t-w wait_for_link_time in seconds\n"
1408 "\t-R rate in packets per second\n"
1409 "\t-X dump payload\n"
1410 "\t-H len add empty virtio-net-header with size 'len'\n"
1411 "\t-P file load packet from pcap file\n"
1412 "\t-z use random IPv4 src address/port\n"
1413 "\t-Z use random IPv4 dst address/port\n"
1421 start_threads(struct glob_arg *g)
1425 targs = calloc(g->nthreads, sizeof(*targs));
1427 * Now create the desired number of threads, each one
1428 * using a single descriptor.
1430 for (i = 0; i < g->nthreads; i++) {
1431 struct targ *t = &targs[i];
1433 bzero(t, sizeof(*t));
1434 t->fd = -1; /* default, with pcap */
1437 if (g->dev_type == DEV_NETMAP) {
1438 struct nm_desc nmd = *g->nmd; /* copy, we overwrite ringid */
1439 uint64_t nmd_flags = 0;
1442 if (g->nthreads > 1) {
1443 if (nmd.req.nr_flags != NR_REG_ALL_NIC) {
1444 D("invalid nthreads mode %d", nmd.req.nr_flags);
1447 nmd.req.nr_flags = NR_REG_ONE_NIC;
1448 nmd.req.nr_ringid = i;
1450 /* Only touch one of the rings (rx is already ok) */
1451 if (g->td_body == receiver_body)
1452 nmd_flags |= NETMAP_NO_TX_POLL;
1454 /* register interface. Override ifname and ringid etc. */
1455 if (g->options & OPT_MONITOR_TX)
1456 nmd.req.nr_flags |= NR_MONITOR_TX;
1457 if (g->options & OPT_MONITOR_RX)
1458 nmd.req.nr_flags |= NR_MONITOR_RX;
1460 t->nmd = nm_open(t->g->ifname, NULL, nmd_flags |
1461 NM_OPEN_IFNAME | NM_OPEN_NO_MMAP, &nmd);
1462 if (t->nmd == NULL) {
1463 D("Unable to open %s: %s",
1464 t->g->ifname, strerror(errno));
1468 set_vnet_hdr_len(t);
1471 targs[i].fd = g->main_fd;
1475 if (g->affinity >= 0) {
1476 if (g->affinity < g->cpus)
1477 t->affinity = g->affinity;
1479 t->affinity = i % g->cpus;
1483 /* default, init packets */
1484 initialize_packet(t);
1486 if (pthread_create(&t->thread, NULL, g->td_body, t) == -1) {
1487 D("Unable to create thread %d: %s", i, strerror(errno));
1494 main_thread(struct glob_arg *g)
1501 struct timeval tic, toc;
1503 gettimeofday(&toc, NULL);
1505 struct timeval now, delta;
1506 uint64_t pps, usec, my_count, npkts;
1509 delta.tv_sec = g->report_interval/1000;
1510 delta.tv_usec = (g->report_interval%1000)*1000;
1511 select(0, NULL, NULL, NULL, &delta);
1512 gettimeofday(&now, NULL);
1513 timersub(&now, &toc, &toc);
1515 for (i = 0; i < g->nthreads; i++) {
1516 my_count += targs[i].count;
1517 if (targs[i].used == 0)
1520 usec = toc.tv_sec* 1000000 + toc.tv_usec;
1523 npkts = my_count - prev;
1524 pps = (npkts*1000000 + usec/2) / usec;
1525 D("%llu pps (%llu pkts in %llu usec)",
1526 (unsigned long long)pps,
1527 (unsigned long long)npkts,
1528 (unsigned long long)usec);
1531 if (done == g->nthreads)
1537 for (i = 0; i < g->nthreads; i++) {
1538 struct timespec t_tic, t_toc;
1540 * Join active threads, unregister interfaces and close
1544 pthread_join(targs[i].thread, NULL);
1547 if (targs[i].completed == 0)
1548 D("ouch, thread %d exited with error", i);
1551 * Collect threads output and extract information about
1552 * how long it took to send all the packets.
1554 count += targs[i].count;
1555 t_tic = timeval2spec(&tic);
1556 t_toc = timeval2spec(&toc);
1557 if (!timerisset(&tic) || timespec_ge(&targs[i].tic, &t_tic))
1558 tic = timespec2val(&targs[i].tic);
1559 if (!timerisset(&toc) || timespec_ge(&targs[i].toc, &t_toc))
1560 toc = timespec2val(&targs[i].toc);
1564 timersub(&toc, &tic, &toc);
1565 delta_t = toc.tv_sec + 1e-6* toc.tv_usec;
1566 if (g->td_body == sender_body)
1567 tx_output(count, g->pkt_size, delta_t);
1569 rx_output(count, delta_t);
1571 if (g->dev_type == DEV_NETMAP) {
1572 munmap(g->nmd->mem, g->nmd->req.nr_memsize);
1583 static struct sf func[] = {
1584 { "tx", sender_body },
1585 { "rx", receiver_body },
1586 { "ping", pinger_body },
1587 { "pong", ponger_body },
1592 tap_alloc(char *dev)
1596 char *clonedev = TAP_CLONEDEV;
1600 /* Arguments taken by the function:
1602 * char *dev: the name of an interface (or '\0'). MUST have enough
1603 * space to hold the interface name if '\0' is passed
1604 * int flags: interface flags (eg, IFF_TUN etc.)
1608 if (dev[3]) { /* tapSomething */
1609 static char buf[128];
1610 snprintf(buf, sizeof(buf), "/dev/%s", dev);
1614 /* open the device */
1615 if( (fd = open(clonedev, O_RDWR)) < 0 ) {
1618 D("%s open successful", clonedev);
1620 /* preparation of the struct ifr, of type "struct ifreq" */
1621 memset(&ifr, 0, sizeof(ifr));
1624 ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
1627 /* if a device name was specified, put it in the structure; otherwise,
1628 * the kernel will try to allocate the "next" device of the
1630 strncpy(ifr.ifr_name, dev, IFNAMSIZ);
1633 /* try to create the device */
1634 if( (err = ioctl(fd, TUNSETIFF, (void *) &ifr)) < 0 ) {
1635 D("failed to to a TUNSETIFF: %s", strerror(errno));
1640 /* if the operation was successful, write back the name of the
1641 * interface to the variable "dev", so the caller can know
1642 * it. Note that the caller MUST reserve space in *dev (see calling
1644 strcpy(dev, ifr.ifr_name);
1645 D("new name is %s", dev);
1648 /* this is the special file descriptor that the caller will use to talk
1649 * with the virtual interface */
1654 main(int arc, char **argv)
1662 int devqueues = 1; /* how many device queues */
1664 bzero(&g, sizeof(g));
1667 g.td_body = receiver_body;
1668 g.report_interval = 1000; /* report interval */
1670 /* ip addresses can also be a range x.x.x.x-x.x.x.y */
1671 g.src_ip.name = "10.0.0.1";
1672 g.dst_ip.name = "10.1.0.1";
1673 g.dst_mac.name = "ff:ff:ff:ff:ff:ff";
1674 g.src_mac.name = NULL;
1676 g.burst = 512; // default
1685 while ( (ch = getopt(arc, argv,
1686 "a:f:F:n:i:Il:d:s:D:S:b:c:o:p:T:w:WvR:XC:H:e:m:P:zZ")) != -1) {
1691 D("bad option %c %s", ch, optarg);
1696 g.npackets = atoi(optarg);
1701 if (i < 1 || i > 63) {
1702 D("invalid frags %d [1..63], ignore", i);
1709 for (fn = func; fn->key; fn++) {
1710 if (!strcmp(fn->key, optarg))
1716 D("unrecognised function %s", optarg);
1719 case 'o': /* data generation options */
1720 g.options = atoi(optarg);
1723 case 'a': /* force affinity */
1724 g.affinity = atoi(optarg);
1727 case 'i': /* interface */
1728 /* a prefix of tap: netmap: or pcap: forces the mode.
1729 * otherwise we guess
1731 D("interface is %s", optarg);
1732 if (strlen(optarg) > MAX_IFNAMELEN - 8) {
1733 D("ifname too long %s", optarg);
1736 strcpy(g.ifname, optarg);
1737 if (!strcmp(optarg, "null")) {
1738 g.dev_type = DEV_NETMAP;
1740 } else if (!strncmp(optarg, "tap:", 4)) {
1741 g.dev_type = DEV_TAP;
1742 strcpy(g.ifname, optarg + 4);
1743 } else if (!strncmp(optarg, "pcap:", 5)) {
1744 g.dev_type = DEV_PCAP;
1745 strcpy(g.ifname, optarg + 5);
1746 } else if (!strncmp(optarg, "netmap:", 7) ||
1747 !strncmp(optarg, "vale", 4)) {
1748 g.dev_type = DEV_NETMAP;
1749 } else if (!strncmp(optarg, "tap", 3)) {
1750 g.dev_type = DEV_TAP;
1751 } else { /* prepend netmap: */
1752 g.dev_type = DEV_NETMAP;
1753 sprintf(g.ifname, "netmap:%s", optarg);
1758 g.options |= OPT_INDIRECT; /* XXX use indirect buffer */
1761 case 'l': /* pkt_size */
1762 g.pkt_size = atoi(optarg);
1766 g.dst_ip.name = optarg;
1770 g.src_ip.name = optarg;
1773 case 'T': /* report interval */
1774 g.report_interval = atoi(optarg);
1778 wait_link = atoi(optarg);
1781 case 'W': /* XXX changed default */
1782 g.forever = 0; /* do not exit rx even with no traffic */
1785 case 'b': /* burst */
1786 g.burst = atoi(optarg);
1789 g.cpus = atoi(optarg);
1792 g.nthreads = atoi(optarg);
1795 case 'D': /* destination mac */
1796 g.dst_mac.name = optarg;
1799 case 'S': /* source mac */
1800 g.src_mac.name = optarg;
1806 g.tx_rate = atoi(optarg);
1809 g.options |= OPT_DUMP;
1812 g.nmr_config = strdup(optarg);
1815 g.virt_header = atoi(optarg);
1817 case 'e': /* extra bufs */
1818 g.extra_bufs = atoi(optarg);
1821 if (strcmp(optarg, "tx") == 0) {
1822 g.options |= OPT_MONITOR_TX;
1823 } else if (strcmp(optarg, "rx") == 0) {
1824 g.options |= OPT_MONITOR_RX;
1826 D("unrecognized monitor mode %s", optarg);
1830 g.packet_file = strdup(optarg);
1833 g.options |= OPT_RANDOM_SRC;
1836 g.options |= OPT_RANDOM_DST;
1841 if (strlen(g.ifname) <=0 ) {
1842 D("missing ifname");
1847 if (g.cpus < 0 || g.cpus > i) {
1848 D("%d cpus is too high, have only %d cpus", g.cpus, i);
1854 if (g.pkt_size < 16 || g.pkt_size > MAX_PKTSIZE) {
1855 D("bad pktsize %d [16..%d]\n", g.pkt_size, MAX_PKTSIZE);
1859 if (g.src_mac.name == NULL) {
1860 static char mybuf[20] = "00:00:00:00:00:00";
1861 /* retrieve source mac address. */
1862 if (source_hwaddr(g.ifname, mybuf) == -1) {
1863 D("Unable to retrieve source mac");
1864 // continue, fail later
1866 g.src_mac.name = mybuf;
1868 /* extract address ranges */
1869 extract_ip_range(&g.src_ip);
1870 extract_ip_range(&g.dst_ip);
1871 extract_mac_range(&g.src_mac);
1872 extract_mac_range(&g.dst_mac);
1874 if (g.src_ip.start != g.src_ip.end ||
1875 g.src_ip.port0 != g.src_ip.port1 ||
1876 g.dst_ip.start != g.dst_ip.end ||
1877 g.dst_ip.port0 != g.dst_ip.port1)
1878 g.options |= OPT_COPY;
1880 if (g.virt_header != 0 && g.virt_header != VIRT_HDR_1
1881 && g.virt_header != VIRT_HDR_2) {
1882 D("bad virtio-net-header length");
1886 if (g.dev_type == DEV_TAP) {
1887 D("want to use tap %s", g.ifname);
1888 g.main_fd = tap_alloc(g.ifname);
1889 if (g.main_fd < 0) {
1890 D("cannot open tap %s", g.ifname);
1894 } else if (g.dev_type == DEV_PCAP) {
1895 char pcap_errbuf[PCAP_ERRBUF_SIZE];
1897 pcap_errbuf[0] = '\0'; // init the buffer
1898 g.p = pcap_open_live(g.ifname, 256 /* XXX */, 1, 100, pcap_errbuf);
1900 D("cannot open pcap on %s", g.ifname);
1903 g.main_fd = pcap_fileno(g.p);
1904 D("using pcap on %s fileno %d", g.ifname, g.main_fd);
1905 #endif /* !NO_PCAP */
1906 } else if (g.dummy_send) { /* but DEV_NETMAP */
1907 D("using a dummy send routine");
1909 struct nmreq base_nmd;
1911 bzero(&base_nmd, sizeof(base_nmd));
1913 parse_nmr_config(g.nmr_config, &base_nmd);
1915 base_nmd.nr_arg3 = g.extra_bufs;
1919 * Open the netmap device using nm_open().
1921 * protocol stack and may cause a reset of the card,
1922 * which in turn may take some time for the PHY to
1923 * reconfigure. We do the open here to have time to reset.
1925 g.nmd = nm_open(g.ifname, &base_nmd, 0, NULL);
1926 if (g.nmd == NULL) {
1927 D("Unable to open %s: %s", g.ifname, strerror(errno));
1930 g.main_fd = g.nmd->fd;
1931 D("mapped %dKB at %p", g.nmd->req.nr_memsize>>10, g.nmd->mem);
1933 /* get num of queues in tx or rx */
1934 if (g.td_body == sender_body)
1935 devqueues = g.nmd->req.nr_tx_rings;
1937 devqueues = g.nmd->req.nr_rx_rings;
1939 /* validate provided nthreads. */
1940 if (g.nthreads < 1 || g.nthreads > devqueues) {
1941 D("bad nthreads %d, have %d queues", g.nthreads, devqueues);
1942 // continue, fail later
1946 struct netmap_if *nifp = g.nmd->nifp;
1947 struct nmreq *req = &g.nmd->req;
1949 D("nifp at offset %d, %d tx %d rx region %d",
1950 req->nr_offset, req->nr_tx_rings, req->nr_rx_rings,
1952 for (i = 0; i <= req->nr_tx_rings; i++) {
1953 struct netmap_ring *ring = NETMAP_TXRING(nifp, i);
1954 D(" TX%d at 0x%lx slots %d", i,
1955 (char *)ring - (char *)nifp, ring->num_slots);
1957 for (i = 0; i <= req->nr_rx_rings; i++) {
1958 struct netmap_ring *ring = NETMAP_RXRING(nifp, i);
1959 D(" RX%d at 0x%lx slots %d", i,
1960 (char *)ring - (char *)nifp, ring->num_slots);
1964 /* Print some debug information. */
1966 "%s %s: %d queues, %d threads and %d cpus.\n",
1967 (g.td_body == sender_body) ? "Sending on" : "Receiving from",
1972 if (g.td_body == sender_body) {
1973 fprintf(stdout, "%s -> %s (%s -> %s)\n",
1974 g.src_ip.name, g.dst_ip.name,
1975 g.src_mac.name, g.dst_mac.name);
1979 /* Exit if something went wrong. */
1980 if (g.main_fd < 0) {
1988 D("--- SPECIAL OPTIONS:%s%s%s%s%s\n",
1989 g.options & OPT_PREFETCH ? " prefetch" : "",
1990 g.options & OPT_ACCESS ? " access" : "",
1991 g.options & OPT_MEMCPY ? " memcpy" : "",
1992 g.options & OPT_INDIRECT ? " indirect" : "",
1993 g.options & OPT_COPY ? " copy" : "");
1996 g.tx_period.tv_sec = g.tx_period.tv_nsec = 0;
1997 if (g.tx_rate > 0) {
1998 /* try to have at least something every second,
1999 * reducing the burst size to some 0.01s worth of data
2000 * (but no less than one full set of fragments)
2003 int lim = (g.tx_rate)/300;
2006 if (g.burst < g.frags)
2008 x = ((uint64_t)1000000000 * (uint64_t)g.burst) / (uint64_t) g.tx_rate;
2009 g.tx_period.tv_nsec = x;
2010 g.tx_period.tv_sec = g.tx_period.tv_nsec / 1000000000;
2011 g.tx_period.tv_nsec = g.tx_period.tv_nsec % 1000000000;
2013 if (g.td_body == sender_body)
2014 D("Sending %d packets every %ld.%09ld s",
2015 g.burst, g.tx_period.tv_sec, g.tx_period.tv_nsec);
2016 /* Wait for PHY reset. */
2017 D("Wait %d secs for phy reset", wait_link);
2021 /* Install ^C handler. */
2022 global_nthreads = g.nthreads;
2023 signal(SIGINT, sigint_h);