2 * Copyright (c) 2004-2005 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 2001-2003 Roman V. Palagin <romanp@unshadow.net>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $SourceForge: netflow.c,v 1.41 2004/09/05 11:41:10 glebius Exp $
30 static const char rcs_id[] =
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/limits.h>
37 #include <sys/syslog.h>
38 #include <sys/systm.h>
39 #include <sys/socket.h>
41 #include <machine/atomic.h>
44 #include <net/route.h>
45 #include <netinet/in.h>
46 #include <netinet/in_systm.h>
47 #include <netinet/ip.h>
48 #include <netinet/tcp.h>
49 #include <netinet/udp.h>
51 #include <netgraph/ng_message.h>
52 #include <netgraph/netgraph.h>
54 #include <netgraph/netflow/netflow.h>
55 #include <netgraph/netflow/ng_netflow.h>
57 #define NBUCKETS (65536) /* must be power of 2 */
59 /* This hash is for TCP or UDP packets. */
60 #define FULL_HASH(addr1, addr2, port1, port2) \
61 (((addr1 ^ (addr1 >> 16) ^ \
62 htons(addr2 ^ (addr2 >> 16))) ^ \
63 port1 ^ htons(port2)) & \
66 /* This hash is for all other IP packets. */
67 #define ADDR_HASH(addr1, addr2) \
68 ((addr1 ^ (addr1 >> 16) ^ \
69 htons(addr2 ^ (addr2 >> 16))) & \
72 /* Macros to shorten logical constructions */
73 /* XXX: priv must exist in namespace */
74 #define INACTIVE(fle) (time_uptime - fle->f.last > priv->info.nfinfo_inact_t)
75 #define AGED(fle) (time_uptime - fle->f.first > priv->info.nfinfo_act_t)
76 #define ISFREE(fle) (fle->f.packets == 0)
79 * 4 is a magical number: statistically number of 4-packet flows is
80 * bigger than 5,6,7...-packet flows by an order of magnitude. Most UDP/ICMP
81 * scans are 1 packet (~ 90% of flow cache). TCP scans are 2-packet in case
82 * of reachable host and 4-packet otherwise.
84 #define SMALL(fle) (fle->f.packets <= 4)
87 * Cisco uses milliseconds for uptime. Bad idea, since it overflows
88 * every 48+ days. But we will do same to keep compatibility. This macro
89 * does overflowable multiplication to 1000.
91 #define MILLIUPTIME(t) (((t) << 9) + /* 512 */ \
92 ((t) << 8) + /* 256 */ \
93 ((t) << 7) + /* 128 */ \
94 ((t) << 6) + /* 64 */ \
95 ((t) << 5) + /* 32 */ \
98 MALLOC_DECLARE(M_NETFLOW_HASH);
99 MALLOC_DEFINE(M_NETFLOW_HASH, "netflow_hash", "NetFlow hash");
101 static int export_add(item_p, struct flow_entry *);
102 static int export_send(priv_p, item_p, int flags);
104 /* Generate hash for a given flow record. */
105 static __inline uint32_t
106 ip_hash(struct flow_rec *r)
111 return FULL_HASH(r->r_src.s_addr, r->r_dst.s_addr,
112 r->r_sport, r->r_dport);
114 return ADDR_HASH(r->r_src.s_addr, r->r_dst.s_addr);
118 /* This is callback from uma(9), called on alloc. */
120 uma_ctor_flow(void *mem, int size, void *arg, int how)
122 priv_p priv = (priv_p )arg;
124 if (atomic_load_acq_32(&priv->info.nfinfo_used) >= CACHESIZE)
127 atomic_add_32(&priv->info.nfinfo_used, 1);
132 /* This is callback from uma(9), called on free. */
134 uma_dtor_flow(void *mem, int size, void *arg)
136 priv_p priv = (priv_p )arg;
138 atomic_subtract_32(&priv->info.nfinfo_used, 1);
142 * Detach export datagram from priv, if there is any.
143 * If there is no, allocate a new one.
146 get_export_dgram(priv_p priv)
150 mtx_lock(&priv->export_mtx);
151 if (priv->export_item != NULL) {
152 item = priv->export_item;
153 priv->export_item = NULL;
155 mtx_unlock(&priv->export_mtx);
158 struct netflow_v5_export_dgram *dgram;
161 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
164 item = ng_package_data(m, NG_NOFLAGS);
167 dgram = mtod(m, struct netflow_v5_export_dgram *);
168 dgram->header.count = 0;
169 dgram->header.version = htons(NETFLOW_V5);
170 dgram->header.pad = 0;
178 * Re-attach incomplete datagram back to priv.
179 * If there is already another one, then send incomplete. */
181 return_export_dgram(priv_p priv, item_p item, int flags)
184 * It may happen on SMP, that some thread has already
185 * put its item there, in this case we bail out and
186 * send what we have to collector.
188 mtx_lock(&priv->export_mtx);
189 if (priv->export_item == NULL) {
190 priv->export_item = item;
191 mtx_unlock(&priv->export_mtx);
193 mtx_unlock(&priv->export_mtx);
194 export_send(priv, item, flags);
199 * The flow is over. Call export_add() and free it. If datagram is
200 * full, then call export_send().
203 expire_flow(priv_p priv, item_p *item, struct flow_entry *fle, int flags)
206 *item = get_export_dgram(priv);
208 atomic_add_32(&priv->info.nfinfo_export_failed, 1);
209 uma_zfree_arg(priv->zone, fle, priv);
212 if (export_add(*item, fle) > 0) {
213 export_send(priv, *item, flags);
216 uma_zfree_arg(priv->zone, fle, priv);
219 /* Get a snapshot of node statistics */
221 ng_netflow_copyinfo(priv_p priv, struct ng_netflow_info *i)
224 memcpy((void *)i, (void *)&priv->info, sizeof(priv->info));
228 * Insert a record into defined slot.
230 * First we get for us a free flow entry, then fill in all
231 * possible fields in it.
233 * TODO: consider dropping hash mutex while filling in datagram,
234 * as this was done in previous version. Need to test & profile
238 hash_insert(priv_p priv, struct flow_hash_entry *hsh, struct flow_rec *r,
239 int plen, uint8_t tcp_flags)
241 struct flow_entry *fle;
242 struct sockaddr_in sin;
245 mtx_assert(&hsh->mtx, MA_OWNED);
247 fle = uma_zalloc_arg(priv->zone, priv, M_NOWAIT);
249 atomic_add_32(&priv->info.nfinfo_alloc_failed, 1);
254 * Now fle is totally ours. It is detached from all lists,
255 * we can safely edit it.
258 bcopy(r, &fle->f.r, sizeof(struct flow_rec));
261 fle->f.tcp_flags = tcp_flags;
263 fle->f.first = fle->f.last = time_uptime;
266 * First we do route table lookup on destination address. So we can
267 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
269 bzero(&sin, sizeof(sin));
270 sin.sin_len = sizeof(struct sockaddr_in);
271 sin.sin_family = AF_INET;
272 sin.sin_addr = fle->f.r.r_dst;
273 /* XXX MRT 0 as a default.. need the m here to get fib */
274 rt = rtalloc1_fib((struct sockaddr *)&sin, 0, 0, 0);
276 fle->f.fle_o_ifx = rt->rt_ifp->if_index;
278 if (rt->rt_flags & RTF_GATEWAY &&
279 rt->rt_gateway->sa_family == AF_INET)
281 ((struct sockaddr_in *)(rt->rt_gateway))->sin_addr;
284 fle->f.dst_mask = bitcount32(((struct sockaddr_in *)
285 rt_mask(rt))->sin_addr.s_addr);
286 else if (rt->rt_flags & RTF_HOST)
287 /* Give up. We can't determine mask :( */
288 fle->f.dst_mask = 32;
293 /* Do route lookup on source address, to fill in src_mask. */
294 bzero(&sin, sizeof(sin));
295 sin.sin_len = sizeof(struct sockaddr_in);
296 sin.sin_family = AF_INET;
297 sin.sin_addr = fle->f.r.r_src;
298 /* XXX MRT 0 as a default revisit. need the mbuf for fib*/
299 rt = rtalloc1_fib((struct sockaddr *)&sin, 0, 0, 0);
302 fle->f.src_mask = bitcount32(((struct sockaddr_in *)
303 rt_mask(rt))->sin_addr.s_addr);
304 else if (rt->rt_flags & RTF_HOST)
305 /* Give up. We can't determine mask :( */
306 fle->f.src_mask = 32;
311 /* Push new flow at the and of hash. */
312 TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
319 * Non-static functions called from ng_netflow.c
322 /* Allocate memory and set up flow cache */
324 ng_netflow_cache_init(priv_p priv)
326 struct flow_hash_entry *hsh;
329 /* Initialize cache UMA zone. */
330 priv->zone = uma_zcreate("NetFlow cache", sizeof(struct flow_entry),
331 uma_ctor_flow, uma_dtor_flow, NULL, NULL, UMA_ALIGN_CACHE, 0);
332 uma_zone_set_max(priv->zone, CACHESIZE);
335 priv->hash = malloc(NBUCKETS * sizeof(struct flow_hash_entry),
336 M_NETFLOW_HASH, M_WAITOK | M_ZERO);
338 if (priv->hash == NULL) {
339 uma_zdestroy(priv->zone);
343 /* Initialize hash. */
344 for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++) {
345 mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF);
346 TAILQ_INIT(&hsh->head);
349 mtx_init(&priv->export_mtx, "export dgram lock", NULL, MTX_DEF);
354 /* Free all flow cache memory. Called from node close method. */
356 ng_netflow_cache_flush(priv_p priv)
358 struct flow_entry *fle, *fle1;
359 struct flow_hash_entry *hsh;
364 * We are going to free probably billable data.
365 * Expire everything before freeing it.
366 * No locking is required since callout is already drained.
368 for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++)
369 TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
370 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
371 expire_flow(priv, &item, fle, NG_QUEUE);
375 export_send(priv, item, NG_QUEUE);
377 uma_zdestroy(priv->zone);
379 /* Destroy hash mutexes. */
380 for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++)
381 mtx_destroy(&hsh->mtx);
383 /* Free hash memory. */
385 free(priv->hash, M_NETFLOW_HASH);
387 mtx_destroy(&priv->export_mtx);
390 /* Insert packet from into flow cache. */
392 ng_netflow_flow_add(priv_p priv, struct ip *ip, unsigned int src_if_index)
394 register struct flow_entry *fle, *fle1;
395 struct flow_hash_entry *hsh;
400 uint8_t tcp_flags = 0;
402 /* Try to fill flow_rec r */
403 bzero(&r, sizeof(r));
405 if (ip->ip_v != IPVERSION)
408 /* verify min header length */
409 hlen = ip->ip_hl << 2;
411 if (hlen < sizeof(struct ip))
414 r.r_src = ip->ip_src;
415 r.r_dst = ip->ip_dst;
417 /* save packet length */
418 plen = ntohs(ip->ip_len);
421 r.r_tos = ip->ip_tos;
423 r.r_i_ifx = src_if_index;
426 * XXX NOTE: only first fragment of fragmented TCP, UDP and
427 * ICMP packet will be recorded with proper s_port and d_port.
428 * Following fragments will be recorded simply as IP packet with
429 * ip_proto = ip->ip_p and s_port, d_port set to zero.
430 * I know, it looks like bug. But I don't want to re-implement
431 * ip packet assebmling here. Anyway, (in)famous trafd works this way -
432 * and nobody complains yet :)
434 if ((ip->ip_off & htons(IP_OFFMASK)) == 0)
438 register struct tcphdr *tcp;
440 tcp = (struct tcphdr *)((caddr_t )ip + hlen);
441 r.r_sport = tcp->th_sport;
442 r.r_dport = tcp->th_dport;
443 tcp_flags = tcp->th_flags;
447 r.r_ports = *(uint32_t *)((caddr_t )ip + hlen);
451 /* Update node statistics. XXX: race... */
452 priv->info.nfinfo_packets ++;
453 priv->info.nfinfo_bytes += plen;
455 /* Find hash slot. */
456 hsh = &priv->hash[ip_hash(&r)];
461 * Go through hash and find our entry. If we encounter an
462 * entry, that should be expired, purge it. We do a reverse
463 * search since most active entries are first, and most
464 * searches are done on most active entries.
466 TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
467 if (bcmp(&r, &fle->f.r, sizeof(struct flow_rec)) == 0)
469 if ((INACTIVE(fle) && SMALL(fle)) || AGED(fle)) {
470 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
471 expire_flow(priv, &item, fle, NG_QUEUE);
472 atomic_add_32(&priv->info.nfinfo_act_exp, 1);
476 if (fle) { /* An existent entry. */
478 fle->f.bytes += plen;
480 fle->f.tcp_flags |= tcp_flags;
481 fle->f.last = time_uptime;
484 * We have the following reasons to expire flow in active way:
485 * - it hit active timeout
486 * - a TCP connection closed
487 * - it is going to overflow counter
489 if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle) ||
490 (fle->f.bytes >= (UINT_MAX - IF_MAXMTU)) ) {
491 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
492 expire_flow(priv, &item, fle, NG_QUEUE);
493 atomic_add_32(&priv->info.nfinfo_act_exp, 1);
496 * It is the newest, move it to the tail,
497 * if it isn't there already. Next search will
500 if (fle != TAILQ_LAST(&hsh->head, fhead)) {
501 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
502 TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
505 } else /* A new flow entry. */
506 error = hash_insert(priv, hsh, &r, plen, tcp_flags);
508 mtx_unlock(&hsh->mtx);
511 return_export_dgram(priv, item, NG_QUEUE);
517 * Return records from cache to userland.
519 * TODO: matching particular IP should be done in kernel, here.
522 ng_netflow_flow_show(priv_p priv, uint32_t last, struct ng_mesg *resp)
524 struct flow_hash_entry *hsh;
525 struct flow_entry *fle;
526 struct ngnf_flows *data;
529 data = (struct ngnf_flows *)resp->data;
533 /* Check if this is a first run */
538 if (last > NBUCKETS-1)
540 hsh = priv->hash + last;
545 * We will transfer not more than NREC_AT_ONCE. More data
546 * will come in next message.
547 * We send current hash index to userland, and userland should
548 * return it back to us. Then, we will restart with new entry.
550 * The resulting cache snapshot is inaccurate for the
552 * - we skip locked hash entries
553 * - we bail out, if someone wants our entry
554 * - we skip rest of entry, when hit NREC_AT_ONCE
556 for (; i < NBUCKETS; hsh++, i++) {
557 if (mtx_trylock(&hsh->mtx) == 0)
560 TAILQ_FOREACH(fle, &hsh->head, fle_hash) {
561 if (hsh->mtx.mtx_lock & MTX_CONTESTED)
564 bcopy(&fle->f, &(data->entries[data->nentries]),
567 if (data->nentries == NREC_AT_ONCE) {
568 mtx_unlock(&hsh->mtx);
574 mtx_unlock(&hsh->mtx);
580 /* We have full datagram in privdata. Send it to export hook. */
582 export_send(priv_p priv, item_p item, int flags)
584 struct mbuf *m = NGI_M(item);
585 struct netflow_v5_export_dgram *dgram = mtod(m,
586 struct netflow_v5_export_dgram *);
587 struct netflow_v5_header *header = &dgram->header;
591 /* Fill mbuf header. */
592 m->m_len = m->m_pkthdr.len = sizeof(struct netflow_v5_record) *
593 header->count + sizeof(struct netflow_v5_header);
595 /* Fill export header. */
596 header->sys_uptime = htonl(MILLIUPTIME(time_uptime));
598 header->unix_secs = htonl(ts.tv_sec);
599 header->unix_nsecs = htonl(ts.tv_nsec);
600 header->engine_type = 0;
601 header->engine_id = 0;
603 header->flow_seq = htonl(atomic_fetchadd_32(&priv->flow_seq,
605 header->count = htons(header->count);
607 if (priv->export != NULL)
608 NG_FWD_ITEM_HOOK_FLAGS(error, item, priv->export, flags);
616 /* Add export record to dgram. */
618 export_add(item_p item, struct flow_entry *fle)
620 struct netflow_v5_export_dgram *dgram = mtod(NGI_M(item),
621 struct netflow_v5_export_dgram *);
622 struct netflow_v5_header *header = &dgram->header;
623 struct netflow_v5_record *rec;
625 rec = &dgram->r[header->count];
628 KASSERT(header->count <= NETFLOW_V5_MAX_RECORDS,
629 ("ng_netflow: export too big"));
631 /* Fill in export record. */
632 rec->src_addr = fle->f.r.r_src.s_addr;
633 rec->dst_addr = fle->f.r.r_dst.s_addr;
634 rec->next_hop = fle->f.next_hop.s_addr;
635 rec->i_ifx = htons(fle->f.fle_i_ifx);
636 rec->o_ifx = htons(fle->f.fle_o_ifx);
637 rec->packets = htonl(fle->f.packets);
638 rec->octets = htonl(fle->f.bytes);
639 rec->first = htonl(MILLIUPTIME(fle->f.first));
640 rec->last = htonl(MILLIUPTIME(fle->f.last));
641 rec->s_port = fle->f.r.r_sport;
642 rec->d_port = fle->f.r.r_dport;
643 rec->flags = fle->f.tcp_flags;
644 rec->prot = fle->f.r.r_ip_p;
645 rec->tos = fle->f.r.r_tos;
646 rec->dst_mask = fle->f.dst_mask;
647 rec->src_mask = fle->f.src_mask;
651 /* Not supported fields. */
652 rec->src_as = rec->dst_as = 0;
654 if (header->count == NETFLOW_V5_MAX_RECORDS)
655 return (1); /* end of datagram */
660 /* Periodic flow expiry run. */
662 ng_netflow_expire(void *arg)
664 struct flow_entry *fle, *fle1;
665 struct flow_hash_entry *hsh;
666 priv_p priv = (priv_p )arg;
672 * Going through all the cache.
674 for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++) {
676 * Skip entries, that are already being worked on.
678 if (mtx_trylock(&hsh->mtx) == 0)
681 used = atomic_load_acq_32(&priv->info.nfinfo_used);
682 TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
684 * Interrupt thread wants this entry!
685 * Quick! Quick! Bail out!
687 if (hsh->mtx.mtx_lock & MTX_CONTESTED)
691 * Don't expire aggressively while hash collision
692 * ratio is predicted small.
694 if (used <= (NBUCKETS*2) && !INACTIVE(fle))
697 if ((INACTIVE(fle) && (SMALL(fle) ||
698 (used > (NBUCKETS*2)))) || AGED(fle)) {
699 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
700 expire_flow(priv, &item, fle, NG_NOFLAGS);
702 atomic_add_32(&priv->info.nfinfo_inact_exp, 1);
705 mtx_unlock(&hsh->mtx);
709 return_export_dgram(priv, item, NG_NOFLAGS);
711 /* Schedule next expire. */
712 callout_reset(&priv->exp_callout, (1*hz), &ng_netflow_expire,