2 * Copyright (c) 2004-2005 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 2001-2003 Roman V. Palagin <romanp@unshadow.net>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $SourceForge: netflow.c,v 1.41 2004/09/05 11:41:10 glebius Exp $
30 static const char rcs_id[] =
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/limits.h>
37 #include <sys/syslog.h>
38 #include <sys/systm.h>
39 #include <sys/socket.h>
41 #include <machine/atomic.h>
44 #include <net/route.h>
45 #include <netinet/in.h>
46 #include <netinet/in_systm.h>
47 #include <netinet/ip.h>
48 #include <netinet/tcp.h>
49 #include <netinet/udp.h>
51 #include <netgraph/ng_message.h>
52 #include <netgraph/netgraph.h>
54 #include <netgraph/netflow/netflow.h>
55 #include <netgraph/netflow/ng_netflow.h>
57 #define NBUCKETS (65536) /* must be power of 2 */
59 /* This hash is for TCP or UDP packets */
60 #define FULL_HASH(addr1,addr2,port1,port2)\
63 ((port1 ^ port2) << 8) )& \
66 /* This hash is for all other IP packets */
67 #define ADDR_HASH(addr1,addr2)\
72 /* Macros to shorten logical constructions */
73 /* XXX: priv must exist in namespace */
74 #define INACTIVE(fle) (time_uptime - fle->f.last > priv->info.nfinfo_inact_t)
75 #define AGED(fle) (time_uptime - fle->f.first > priv->info.nfinfo_act_t)
76 #define ISFREE(fle) (fle->f.packets == 0)
79 * 4 is a magical number: statistically number of 4-packet flows is
80 * bigger than 5,6,7...-packet flows by an order of magnitude. Most UDP/ICMP
81 * scans are 1 packet (~ 90% of flow cache). TCP scans are 2-packet in case
82 * of reachable host and 4-packet otherwise.
84 #define SMALL(fle) (fle->f.packets <= 4)
87 * Cisco uses milliseconds for uptime. Bad idea, since it overflows
88 * every 48+ days. But we will do same to keep compatibility. This macro
89 * does overflowable multiplication to 1000.
91 #define MILLIUPTIME(t) (((t) << 9) + /* 512 */ \
92 ((t) << 8) + /* 256 */ \
93 ((t) << 7) + /* 128 */ \
94 ((t) << 6) + /* 64 */ \
95 ((t) << 5) + /* 32 */ \
98 MALLOC_DECLARE(M_NETFLOW_HASH);
99 MALLOC_DEFINE(M_NETFLOW_HASH, "netflow_hash", "NetFlow hash");
101 static int export_add(item_p, struct flow_entry *);
102 static int export_send(priv_p, item_p, int flags);
104 /* Generate hash for a given flow record. */
105 static __inline uint32_t
106 ip_hash(struct flow_rec *r)
111 return FULL_HASH(r->r_src.s_addr, r->r_dst.s_addr,
112 r->r_sport, r->r_dport);
114 return ADDR_HASH(r->r_src.s_addr, r->r_dst.s_addr);
118 /* This is callback from uma(9), called on alloc. */
120 uma_ctor_flow(void *mem, int size, void *arg, int how)
122 priv_p priv = (priv_p )arg;
124 if (atomic_load_acq_32(&priv->info.nfinfo_used) >= CACHESIZE)
127 atomic_add_32(&priv->info.nfinfo_used, 1);
132 /* This is callback from uma(9), called on free. */
134 uma_dtor_flow(void *mem, int size, void *arg)
136 priv_p priv = (priv_p )arg;
138 atomic_subtract_32(&priv->info.nfinfo_used, 1);
142 * Detach export datagram from priv, if there is any.
143 * If there is no, allocate a new one.
146 get_export_dgram(priv_p priv)
150 mtx_lock(&priv->export_mtx);
151 if (priv->export_item != NULL) {
152 item = priv->export_item;
153 priv->export_item = NULL;
155 mtx_unlock(&priv->export_mtx);
158 struct netflow_v5_export_dgram *dgram;
161 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
164 item = ng_package_data(m, NG_NOFLAGS);
167 dgram = mtod(m, struct netflow_v5_export_dgram *);
168 dgram->header.count = 0;
169 dgram->header.version = htons(NETFLOW_V5);
177 * Re-attach incomplete datagram back to priv.
178 * If there is already another one, then send incomplete. */
180 return_export_dgram(priv_p priv, item_p item, int flags)
183 * It may happen on SMP, that some thread has already
184 * put its item there, in this case we bail out and
185 * send what we have to collector.
187 mtx_lock(&priv->export_mtx);
188 if (priv->export_item == NULL) {
189 priv->export_item = item;
190 mtx_unlock(&priv->export_mtx);
192 mtx_unlock(&priv->export_mtx);
193 export_send(priv, item, flags);
198 * The flow is over. Call export_add() and free it. If datagram is
199 * full, then call export_send().
202 expire_flow(priv_p priv, item_p *item, struct flow_entry *fle, int flags)
205 *item = get_export_dgram(priv);
207 atomic_add_32(&priv->info.nfinfo_export_failed, 1);
208 uma_zfree_arg(priv->zone, fle, priv);
211 if (export_add(*item, fle) > 0) {
212 export_send(priv, *item, flags);
215 uma_zfree_arg(priv->zone, fle, priv);
218 /* Get a snapshot of node statistics */
220 ng_netflow_copyinfo(priv_p priv, struct ng_netflow_info *i)
223 memcpy((void *)i, (void *)&priv->info, sizeof(priv->info));
226 /* Calculate number of bits in netmask */
227 #define g21 0x55555555ul /* = 0101_0101_0101_0101_0101_0101_0101_0101 */
228 #define g22 0x33333333ul /* = 0011_0011_0011_0011_0011_0011_0011_0011 */
229 #define g23 0x0f0f0f0ful /* = 0000_1111_0000_1111_0000_1111_0000_1111 */
230 static __inline u_char
231 bit_count(uint32_t v)
233 v = (v & g21) + ((v >> 1) & g21);
234 v = (v & g22) + ((v >> 2) & g22);
235 v = (v + (v >> 4)) & g23;
236 return (v + (v >> 8) + (v >> 16) + (v >> 24)) & 0x3f;
240 * Insert a record into defined slot.
242 * First we get for us a free flow entry, then fill in all
243 * possible fields in it.
245 * TODO: consider dropping hash mutex while filling in datagram,
246 * as this was done in previous version. Need to test & profile
250 hash_insert(priv_p priv, struct flow_hash_entry *hsh, struct flow_rec *r,
251 int plen, uint8_t tcp_flags)
253 struct flow_entry *fle;
255 struct sockaddr_in *sin;
257 mtx_assert(&hsh->mtx, MA_OWNED);
259 fle = uma_zalloc_arg(priv->zone, priv, M_NOWAIT);
261 atomic_add_32(&priv->info.nfinfo_alloc_failed, 1);
266 * Now fle is totally ours. It is detached from all lists,
267 * we can safely edit it.
270 bcopy(r, &fle->f.r, sizeof(struct flow_rec));
273 fle->f.tcp_flags = tcp_flags;
275 fle->f.first = fle->f.last = time_uptime;
278 * First we do route table lookup on destination address. So we can
279 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
281 bzero((caddr_t)&ro, sizeof(ro));
282 sin = (struct sockaddr_in *)&ro.ro_dst;
283 sin->sin_len = sizeof(*sin);
284 sin->sin_family = AF_INET;
285 sin->sin_addr = fle->f.r.r_dst;
286 rtalloc_ign(&ro, RTF_CLONING);
287 if (ro.ro_rt != NULL) {
288 struct rtentry *rt = ro.ro_rt;
290 fle->f.fle_o_ifx = rt->rt_ifp->if_index;
292 if (rt->rt_flags & RTF_GATEWAY &&
293 rt->rt_gateway->sa_family == AF_INET)
295 ((struct sockaddr_in *)(rt->rt_gateway))->sin_addr;
299 bit_count(((struct sockaddr_in *)rt_mask(rt))->sin_addr.s_addr);
300 else if (rt->rt_flags & RTF_HOST)
301 /* Give up. We can't determine mask :( */
302 fle->f.dst_mask = 32;
307 /* Do route lookup on source address, to fill in src_mask. */
309 bzero((caddr_t)&ro, sizeof(ro));
310 sin = (struct sockaddr_in *)&ro.ro_dst;
311 sin->sin_len = sizeof(*sin);
312 sin->sin_family = AF_INET;
313 sin->sin_addr = fle->f.r.r_src;
314 rtalloc_ign(&ro, RTF_CLONING);
315 if (ro.ro_rt != NULL) {
316 struct rtentry *rt = ro.ro_rt;
320 bit_count(((struct sockaddr_in *)rt_mask(rt))->sin_addr.s_addr);
321 else if (rt->rt_flags & RTF_HOST)
322 /* Give up. We can't determine mask :( */
323 fle->f.src_mask = 32;
328 /* Push new flow at the and of hash. */
329 TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
336 * Non-static functions called from ng_netflow.c
339 /* Allocate memory and set up flow cache */
341 ng_netflow_cache_init(priv_p priv)
343 struct flow_hash_entry *hsh;
346 /* Initialize cache UMA zone. */
347 priv->zone = uma_zcreate("NetFlow cache", sizeof(struct flow_entry),
348 uma_ctor_flow, uma_dtor_flow, NULL, NULL, UMA_ALIGN_CACHE, 0);
349 uma_zone_set_max(priv->zone, CACHESIZE);
352 MALLOC(priv->hash, struct flow_hash_entry *,
353 NBUCKETS * sizeof(struct flow_hash_entry),
354 M_NETFLOW_HASH, M_WAITOK | M_ZERO);
356 if (priv->hash == NULL) {
357 uma_zdestroy(priv->zone);
361 /* Initialize hash. */
362 for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++) {
363 mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF);
364 TAILQ_INIT(&hsh->head);
367 mtx_init(&priv->export_mtx, "export dgram lock", NULL, MTX_DEF);
372 /* Free all flow cache memory. Called from node close method. */
374 ng_netflow_cache_flush(priv_p priv)
376 struct flow_entry *fle, *fle1;
377 struct flow_hash_entry *hsh;
382 * We are going to free probably billable data.
383 * Expire everything before freeing it.
384 * No locking is required since callout is already drained.
386 for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++)
387 TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
388 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
389 expire_flow(priv, &item, fle, NG_QUEUE);
393 export_send(priv, item, NG_QUEUE);
395 uma_zdestroy(priv->zone);
397 /* Destroy hash mutexes. */
398 for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++)
399 mtx_destroy(&hsh->mtx);
401 /* Free hash memory. */
403 FREE(priv->hash, M_NETFLOW_HASH);
405 mtx_destroy(&priv->export_mtx);
408 /* Insert packet from into flow cache. */
410 ng_netflow_flow_add(priv_p priv, struct ip *ip, iface_p iface,
413 register struct flow_entry *fle, *fle1;
414 struct flow_hash_entry *hsh;
419 uint8_t tcp_flags = 0;
421 /* Try to fill flow_rec r */
422 bzero(&r, sizeof(r));
424 if (ip->ip_v != IPVERSION)
427 /* verify min header length */
428 hlen = ip->ip_hl << 2;
430 if (hlen < sizeof(struct ip))
433 r.r_src = ip->ip_src;
434 r.r_dst = ip->ip_dst;
436 /* save packet length */
437 plen = ntohs(ip->ip_len);
440 r.r_tos = ip->ip_tos;
442 /* Configured in_ifx overrides mbuf's */
443 if (iface->info.ifinfo_index == 0) {
445 r.r_i_ifx = ifp->if_index;
447 r.r_i_ifx = iface->info.ifinfo_index;
450 * XXX NOTE: only first fragment of fragmented TCP, UDP and
451 * ICMP packet will be recorded with proper s_port and d_port.
452 * Following fragments will be recorded simply as IP packet with
453 * ip_proto = ip->ip_p and s_port, d_port set to zero.
454 * I know, it looks like bug. But I don't want to re-implement
455 * ip packet assebmling here. Anyway, (in)famous trafd works this way -
456 * and nobody complains yet :)
458 if ((ip->ip_off & htons(IP_OFFMASK)) == 0)
462 register struct tcphdr *tcp;
464 tcp = (struct tcphdr *)((caddr_t )ip + hlen);
465 r.r_sport = tcp->th_sport;
466 r.r_dport = tcp->th_dport;
467 tcp_flags = tcp->th_flags;
471 r.r_ports = *(uint32_t *)((caddr_t )ip + hlen);
475 /* Update node statistics. XXX: race... */
476 priv->info.nfinfo_packets ++;
477 priv->info.nfinfo_bytes += plen;
479 /* Find hash slot. */
480 hsh = &priv->hash[ip_hash(&r)];
485 * Go through hash and find our entry. If we encounter an
486 * entry, that should be expired, purge it. We do a reverse
487 * search since most active entries are first, and most
488 * searches are done on most active entries.
490 TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
491 if (bcmp(&r, &fle->f.r, sizeof(struct flow_rec)) == 0)
493 if ((INACTIVE(fle) && SMALL(fle)) || AGED(fle)) {
494 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
495 expire_flow(priv, &item, fle, NG_QUEUE);
496 atomic_add_32(&priv->info.nfinfo_act_exp, 1);
500 if (fle) { /* An existent entry. */
502 fle->f.bytes += plen;
504 fle->f.tcp_flags |= tcp_flags;
505 fle->f.last = time_uptime;
508 * We have the following reasons to expire flow in active way:
509 * - it hit active timeout
510 * - a TCP connection closed
511 * - it is going to overflow counter
513 if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle) ||
514 (fle->f.bytes >= (UINT_MAX - IF_MAXMTU)) ) {
515 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
516 expire_flow(priv, &item, fle, NG_QUEUE);
517 atomic_add_32(&priv->info.nfinfo_act_exp, 1);
520 * It is the newest, move it to the tail,
521 * if it isn't there already. Next search will
524 if (fle != TAILQ_LAST(&hsh->head, fhead)) {
525 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
526 TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
529 } else /* A new flow entry. */
530 error = hash_insert(priv, hsh, &r, plen, tcp_flags);
532 mtx_unlock(&hsh->mtx);
535 return_export_dgram(priv, item, NG_QUEUE);
541 * Return records from cache to userland.
543 * TODO: matching particular IP should be done in kernel, here.
546 ng_netflow_flow_show(priv_p priv, uint32_t last, struct ng_mesg *resp)
548 struct flow_hash_entry *hsh;
549 struct flow_entry *fle;
550 struct ngnf_flows *data;
553 data = (struct ngnf_flows *)resp->data;
557 /* Check if this is a first run */
562 if (last > NBUCKETS-1)
564 hsh = priv->hash + last;
569 * We will transfer not more than NREC_AT_ONCE. More data
570 * will come in next message.
571 * We send current hash index to userland, and userland should
572 * return it back to us. Then, we will restart with new entry.
574 * The resulting cache snapshot is inaccurate for the
576 * - we skip locked hash entries
577 * - we bail out, if someone wants our entry
578 * - we skip rest of entry, when hit NREC_AT_ONCE
580 for (; i < NBUCKETS; hsh++, i++) {
581 if (mtx_trylock(&hsh->mtx) == 0)
584 TAILQ_FOREACH(fle, &hsh->head, fle_hash) {
585 if (hsh->mtx.mtx_lock & MTX_CONTESTED)
588 bcopy(&fle->f, &(data->entries[data->nentries]),
591 if (data->nentries == NREC_AT_ONCE) {
592 mtx_unlock(&hsh->mtx);
598 mtx_unlock(&hsh->mtx);
604 /* We have full datagram in privdata. Send it to export hook. */
606 export_send(priv_p priv, item_p item, int flags)
608 struct mbuf *m = NGI_M(item);
609 struct netflow_v5_export_dgram *dgram = mtod(m,
610 struct netflow_v5_export_dgram *);
611 struct netflow_v5_header *header = &dgram->header;
615 /* Fill mbuf header. */
616 m->m_len = m->m_pkthdr.len = sizeof(struct netflow_v5_record) *
617 header->count + sizeof(struct netflow_v5_header);
619 /* Fill export header. */
620 header->sys_uptime = htonl(MILLIUPTIME(time_uptime));
622 header->unix_secs = htonl(ts.tv_sec);
623 header->unix_nsecs = htonl(ts.tv_nsec);
624 header->engine_type = 0;
625 header->engine_id = 0;
627 header->flow_seq = htonl(atomic_fetchadd_32(&priv->flow_seq,
629 header->count = htons(header->count);
631 if (priv->export != NULL)
632 /* Should also NET_LOCK_GIANT(). */
633 NG_FWD_ITEM_HOOK_FLAGS(error, item, priv->export, flags);
639 /* Add export record to dgram. */
641 export_add(item_p item, struct flow_entry *fle)
643 struct netflow_v5_export_dgram *dgram = mtod(NGI_M(item),
644 struct netflow_v5_export_dgram *);
645 struct netflow_v5_header *header = &dgram->header;
646 struct netflow_v5_record *rec;
648 if (header->count == 0 ) { /* first record */
651 } else { /* continue filling datagram */
652 rec = &dgram->r[header->count];
656 KASSERT(header->count <= NETFLOW_V5_MAX_RECORDS,
657 ("ng_netflow: export too big"));
659 /* Fill in export record. */
660 rec->src_addr = fle->f.r.r_src.s_addr;
661 rec->dst_addr = fle->f.r.r_dst.s_addr;
662 rec->next_hop = fle->f.next_hop.s_addr;
663 rec->i_ifx = htons(fle->f.fle_i_ifx);
664 rec->o_ifx = htons(fle->f.fle_o_ifx);
665 rec->packets = htonl(fle->f.packets);
666 rec->octets = htonl(fle->f.bytes);
667 rec->first = htonl(MILLIUPTIME(fle->f.first));
668 rec->last = htonl(MILLIUPTIME(fle->f.last));
669 rec->s_port = fle->f.r.r_sport;
670 rec->d_port = fle->f.r.r_dport;
671 rec->flags = fle->f.tcp_flags;
672 rec->prot = fle->f.r.r_ip_p;
673 rec->tos = fle->f.r.r_tos;
674 rec->dst_mask = fle->f.dst_mask;
675 rec->src_mask = fle->f.src_mask;
677 /* Not supported fields. */
678 rec->src_as = rec->dst_as = 0;
680 if (header->count == NETFLOW_V5_MAX_RECORDS)
681 return (1); /* end of datagram */
686 /* Periodic flow expiry run. */
688 ng_netflow_expire(void *arg)
690 struct flow_entry *fle, *fle1;
691 struct flow_hash_entry *hsh;
692 priv_p priv = (priv_p )arg;
698 * Going through all the cache.
700 for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++) {
702 * Skip entries, that are already being worked on.
704 if (mtx_trylock(&hsh->mtx) == 0)
707 used = atomic_load_acq_32(&priv->info.nfinfo_used);
708 TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
710 * Interrupt thread wants this entry!
711 * Quick! Quick! Bail out!
713 if (hsh->mtx.mtx_lock & MTX_CONTESTED)
717 * Don't expire aggressively while hash collision
718 * ratio is predicted small.
720 if (used <= (NBUCKETS*2) && !INACTIVE(fle))
723 if ((INACTIVE(fle) && (SMALL(fle) || (used > (NBUCKETS*2)))) ||
725 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
726 expire_flow(priv, &item, fle, NG_NOFLAGS);
728 atomic_add_32(&priv->info.nfinfo_inact_exp, 1);
731 mtx_unlock(&hsh->mtx);
735 return_export_dgram(priv, item, NG_NOFLAGS);
737 /* Schedule next expire. */
738 callout_reset(&priv->exp_callout, (1*hz), &ng_netflow_expire,