1 /**************************************************************************
3 Copyright (c) 2008-2010, BitGravity Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the BitGravity Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include "opt_route.h"
31 #include "opt_mpath.h"
34 #include "opt_inet6.h"
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/types.h>
41 #include <sys/bitstring.h>
42 #include <sys/condvar.h>
43 #include <sys/callout.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/limits.h>
48 #include <sys/malloc.h>
52 #include <sys/sched.h>
54 #include <sys/socket.h>
55 #include <sys/syslog.h>
56 #include <sys/sysctl.h>
59 #include <net/if_llatbl.h>
60 #include <net/if_var.h>
61 #include <net/route.h>
62 #include <net/flowtable.h>
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/if_ether.h>
69 #include <netinet/ip.h>
71 #include <netinet/ip6.h>
73 #include <netinet/tcp.h>
74 #include <netinet/udp.h>
75 #include <netinet/sctp.h>
80 uint16_t ip_sport; /* source port */
81 uint16_t ip_dport; /* destination port */
82 in_addr_t ip_saddr; /* source address */
83 in_addr_t ip_daddr; /* destination address */
87 struct ipv4_tuple ipf_ipt;
92 uint16_t ip_sport; /* source port */
93 uint16_t ip_dport; /* destination port */
94 struct in6_addr ip_saddr; /* source address */
95 struct in6_addr ip_daddr; /* destination address */
99 struct ipv6_tuple ipf_ipt;
104 volatile uint32_t f_fhash; /* hash flowing forward */
105 uint16_t f_flags; /* flow flags */
107 uint8_t f_proto; /* protocol */
108 uint32_t f_fibnum; /* fib index */
109 uint32_t f_uptime; /* uptime at last access */
110 struct flentry *f_next; /* pointer to collision entry */
111 volatile struct rtentry *f_rt; /* rtentry for flow */
112 volatile struct llentry *f_lle; /* llentry for flow */
116 struct flentry fl_entry;
117 union ipv4_flow fl_flow;
121 struct flentry fl_entry;
122 union ipv6_flow fl_flow;
125 #define fl_fhash fl_entry.fl_fhash
126 #define fl_flags fl_entry.fl_flags
127 #define fl_proto fl_entry.fl_proto
128 #define fl_uptime fl_entry.fl_uptime
129 #define fl_rt fl_entry.fl_rt
130 #define fl_lle fl_entry.fl_lle
132 #define SECS_PER_HOUR 3600
133 #define SECS_PER_DAY (24*SECS_PER_HOUR)
137 #define FIN_WAIT_IDLE 600
138 #define TCP_IDLE SECS_PER_DAY
141 typedef void fl_lock_t(struct flowtable *, uint32_t);
142 typedef void fl_rtalloc_t(struct route *, uint32_t, u_int);
145 struct flentry **global;
146 struct flentry **pcpu[MAXCPU];
150 counter_u64_t *ft_stat;
155 uint32_t ft_max_depth;
157 fl_lock_t *ft_unlock;
158 fl_rtalloc_t *ft_rtalloc;
160 * XXX need to pad out
162 struct mtx *ft_locks;
163 union flentryp ft_table;
164 bitstr_t *ft_masks[MAXCPU];
165 bitstr_t *ft_tmpmask;
167 uint32_t ft_udp_idle __aligned(CACHE_LINE_SIZE);
168 uint32_t ft_fin_wait_idle;
169 uint32_t ft_syn_idle;
170 uint32_t ft_tcp_idle;
172 } __aligned(CACHE_LINE_SIZE);
174 #define FLOWSTAT_ADD(ft, name, v) \
175 counter_u64_add((ft)->ft_stat[offsetof(struct flowtable_stat, name) / sizeof(uint64_t)], (v))
176 #define FLOWSTAT_INC(ft, name) FLOWSTAT_ADD(ft, name, 1)
178 static struct proc *flowcleanerproc;
179 static uint32_t flow_hashjitter;
181 static struct cv flowclean_f_cv;
182 static struct cv flowclean_c_cv;
183 static struct mtx flowclean_lock;
184 static uint32_t flowclean_cycles;
185 static uint32_t flowclean_freq;
189 * - add sysctls to resize && flush flow tables
190 * - Add per flowtable sysctls for statistics and configuring timeouts
191 * - add saturation counter to rtentry to support per-packet load-balancing
192 * add flag to indicate round-robin flow, add list lookup from head
194 * - add sysctl / device node / syscall to support exporting and importing
195 * of flows with flag to indicate that a flow was imported so should
196 * not be considered for auto-cleaning
197 * - support explicit connection state (currently only ad-hoc for DSR)
198 * - idetach() cleanup for options VIMAGE builds.
201 static VNET_DEFINE(struct flowtable, ip4_ft);
202 #define V_ip4_ft VNET(ip4_ft)
203 static uma_zone_t flow_ipv4_zone;
206 static VNET_DEFINE(struct flowtable, ip6_ft);
207 #define V_ip6_ft VNET(ip6_ft)
208 static uma_zone_t flow_ipv6_zone;
211 static VNET_DEFINE(int, flowtable_enable) = 1;
212 static VNET_DEFINE(int, flowtable_syn_expire) = SYN_IDLE;
213 static VNET_DEFINE(int, flowtable_udp_expire) = UDP_IDLE;
214 static VNET_DEFINE(int, flowtable_fin_wait_expire) = FIN_WAIT_IDLE;
215 static VNET_DEFINE(int, flowtable_tcp_expire) = TCP_IDLE;
217 #define V_flowtable_enable VNET(flowtable_enable)
218 #define V_flowtable_syn_expire VNET(flowtable_syn_expire)
219 #define V_flowtable_udp_expire VNET(flowtable_udp_expire)
220 #define V_flowtable_fin_wait_expire VNET(flowtable_fin_wait_expire)
221 #define V_flowtable_tcp_expire VNET(flowtable_tcp_expire)
223 static SYSCTL_NODE(_net, OID_AUTO, flowtable, CTLFLAG_RD, NULL,
225 SYSCTL_VNET_INT(_net_flowtable, OID_AUTO, enable, CTLFLAG_RW,
226 &VNET_NAME(flowtable_enable), 0, "enable flowtable caching.");
229 * XXX This does not end up updating timeouts at runtime
230 * and only reflects the value for the last table added :-/
232 SYSCTL_VNET_INT(_net_flowtable, OID_AUTO, syn_expire, CTLFLAG_RW,
233 &VNET_NAME(flowtable_syn_expire), 0,
234 "seconds after which to remove syn allocated flow.");
235 SYSCTL_VNET_INT(_net_flowtable, OID_AUTO, udp_expire, CTLFLAG_RW,
236 &VNET_NAME(flowtable_udp_expire), 0,
237 "seconds after which to remove flow allocated to UDP.");
238 SYSCTL_VNET_INT(_net_flowtable, OID_AUTO, fin_wait_expire, CTLFLAG_RW,
239 &VNET_NAME(flowtable_fin_wait_expire), 0,
240 "seconds after which to remove a flow in FIN_WAIT.");
241 SYSCTL_VNET_INT(_net_flowtable, OID_AUTO, tcp_expire, CTLFLAG_RW,
242 &VNET_NAME(flowtable_tcp_expire), 0,
243 "seconds after which to remove flow allocated to a TCP connection.");
247 rtalloc_ign_wrapper(struct route *ro, uint32_t hash, u_int fibnum)
250 rtalloc_ign_fib(ro, 0, fibnum);
255 flowtable_global_lock(struct flowtable *table, uint32_t hash)
257 int lock_index = (hash)&(table->ft_lock_count - 1);
259 mtx_lock(&table->ft_locks[lock_index]);
263 flowtable_global_unlock(struct flowtable *table, uint32_t hash)
265 int lock_index = (hash)&(table->ft_lock_count - 1);
267 mtx_unlock(&table->ft_locks[lock_index]);
271 flowtable_pcpu_lock(struct flowtable *table, uint32_t hash)
278 flowtable_pcpu_unlock(struct flowtable *table, uint32_t hash)
284 #define FL_ENTRY_INDEX(table, hash)((hash) % (table)->ft_size)
285 #define FL_ENTRY(table, hash) *flowtable_entry((table), (hash))
286 #define FL_ENTRY_LOCK(table, hash) (table)->ft_lock((table), (hash))
287 #define FL_ENTRY_UNLOCK(table, hash) (table)->ft_unlock((table), (hash))
289 #define FL_STALE (1<<8)
290 #define FL_OVERWRITE (1<<10)
292 static struct flentry *flowtable_lookup_common(struct flowtable *,
293 struct sockaddr_storage *, struct sockaddr_storage *, struct mbuf *, int);
296 proto_to_flags(uint8_t proto)
319 flags_to_proto(int flags)
321 int proto, protoflags;
323 protoflags = flags & (FL_TCP|FL_SCTP|FL_UDP);
324 switch (protoflags) {
329 proto = IPPROTO_SCTP;
342 #ifdef FLOWTABLE_DEBUG
344 ipv4_flow_print_tuple(int flags, int proto, struct sockaddr_in *ssin,
345 struct sockaddr_in *dsin)
347 char saddr[4*sizeof "123"], daddr[4*sizeof "123"];
349 if (flags & FL_HASH_ALL) {
350 inet_ntoa_r(ssin->sin_addr, saddr);
351 inet_ntoa_r(dsin->sin_addr, daddr);
352 printf("proto=%d %s:%d->%s:%d\n",
353 proto, saddr, ntohs(ssin->sin_port), daddr,
354 ntohs(dsin->sin_port));
356 inet_ntoa_r(*(struct in_addr *) &dsin->sin_addr, daddr);
357 printf("proto=%d %s\n", proto, daddr);
364 ipv4_mbuf_demarshal(struct mbuf *m, struct sockaddr_in *ssin,
365 struct sockaddr_in *dsin, uint16_t *flags)
373 uint16_t sport, dport;
375 proto = sport = dport = 0;
376 ip = mtod(m, struct ip *);
377 dsin->sin_family = AF_INET;
378 dsin->sin_len = sizeof(*dsin);
379 dsin->sin_addr = ip->ip_dst;
380 ssin->sin_family = AF_INET;
381 ssin->sin_len = sizeof(*ssin);
382 ssin->sin_addr = ip->ip_src;
385 if ((*flags & FL_HASH_ALL) == 0)
388 iphlen = ip->ip_hl << 2; /* XXX options? */
392 th = (struct tcphdr *)((caddr_t)ip + iphlen);
393 sport = th->th_sport;
394 dport = th->th_dport;
395 if ((*flags & FL_HASH_ALL) &&
396 (th->th_flags & (TH_RST|TH_FIN)))
400 uh = (struct udphdr *)((caddr_t)ip + iphlen);
401 sport = uh->uh_sport;
402 dport = uh->uh_dport;
405 sh = (struct sctphdr *)((caddr_t)ip + iphlen);
406 sport = sh->src_port;
407 dport = sh->dest_port;
411 /* no port - hence not a protocol we care about */
417 *flags |= proto_to_flags(proto);
418 ssin->sin_port = sport;
419 dsin->sin_port = dport;
424 ipv4_flow_lookup_hash(
425 struct sockaddr_in *ssin, struct sockaddr_in *dsin,
426 uint32_t *key, uint16_t flags)
428 uint16_t sport, dport;
432 proto = flags_to_proto(flags);
433 sport = dport = key[2] = key[1] = key[0] = 0;
434 if ((ssin != NULL) && (flags & FL_HASH_ALL)) {
435 key[1] = ssin->sin_addr.s_addr;
436 sport = ssin->sin_port;
439 key[2] = dsin->sin_addr.s_addr;
440 dport = dsin->sin_port;
442 if (flags & FL_HASH_ALL) {
443 ((uint16_t *)key)[0] = sport;
444 ((uint16_t *)key)[1] = dport;
446 offset = flow_hashjitter + proto;
448 return (jenkins_hash32(key, 3, offset));
451 static struct flentry *
452 flowtable_lookup_ipv4(struct mbuf *m)
454 struct sockaddr_storage ssa, dsa;
456 struct sockaddr_in *dsin, *ssin;
458 dsin = (struct sockaddr_in *)&dsa;
459 ssin = (struct sockaddr_in *)&ssa;
460 bzero(dsin, sizeof(*dsin));
461 bzero(ssin, sizeof(*ssin));
462 flags = V_ip4_ft.ft_flags;
463 if (ipv4_mbuf_demarshal(m, ssin, dsin, &flags) != 0)
466 return (flowtable_lookup_common(&V_ip4_ft, &ssa, &dsa, m, flags));
470 flow_to_route(struct flentry *fle, struct route *ro)
472 uint32_t *hashkey = NULL;
473 struct sockaddr_in *sin;
475 sin = (struct sockaddr_in *)&ro->ro_dst;
476 sin->sin_family = AF_INET;
477 sin->sin_len = sizeof(*sin);
478 hashkey = ((struct flentry_v4 *)fle)->fl_flow.ipf_key;
479 sin->sin_addr.s_addr = hashkey[2];
480 ro->ro_rt = __DEVOLATILE(struct rtentry *, fle->f_rt);
481 ro->ro_lle = __DEVOLATILE(struct llentry *, fle->f_lle);
482 ro->ro_flags |= RT_NORTREF;
488 * PULLUP_TO(len, p, T) makes sure that len + sizeof(T) is contiguous,
489 * then it sets p to point at the offset "len" in the mbuf. WARNING: the
490 * pointer might become stale after other pullups (but we never use it
493 #define PULLUP_TO(_len, p, T) \
495 int x = (_len) + sizeof(T); \
496 if ((m)->m_len < x) { \
497 goto receive_failed; \
499 p = (mtod(m, char *) + (_len)); \
502 #define TCP(p) ((struct tcphdr *)(p))
503 #define SCTP(p) ((struct sctphdr *)(p))
504 #define UDP(p) ((struct udphdr *)(p))
507 ipv6_mbuf_demarshal(struct mbuf *m, struct sockaddr_in6 *ssin6,
508 struct sockaddr_in6 *dsin6, uint16_t *flags)
513 uint16_t src_port, dst_port;
517 offset = hlen = src_port = dst_port = 0;
519 ip6 = mtod(m, struct ip6_hdr *);
520 hlen = sizeof(struct ip6_hdr);
521 proto = ip6->ip6_nxt;
523 if ((*flags & FL_HASH_ALL) == 0)
526 while (ulp == NULL) {
529 case IPPROTO_OSPFIGP:
537 PULLUP_TO(hlen, ulp, struct tcphdr);
538 dst_port = TCP(ulp)->th_dport;
539 src_port = TCP(ulp)->th_sport;
540 if ((*flags & FL_HASH_ALL) &&
541 (TCP(ulp)->th_flags & (TH_RST|TH_FIN)))
545 PULLUP_TO(hlen, ulp, struct sctphdr);
546 src_port = SCTP(ulp)->src_port;
547 dst_port = SCTP(ulp)->dest_port;
550 PULLUP_TO(hlen, ulp, struct udphdr);
551 dst_port = UDP(ulp)->uh_dport;
552 src_port = UDP(ulp)->uh_sport;
554 case IPPROTO_HOPOPTS: /* RFC 2460 */
555 PULLUP_TO(hlen, ulp, struct ip6_hbh);
556 hlen += (((struct ip6_hbh *)ulp)->ip6h_len + 1) << 3;
557 proto = ((struct ip6_hbh *)ulp)->ip6h_nxt;
560 case IPPROTO_ROUTING: /* RFC 2460 */
561 PULLUP_TO(hlen, ulp, struct ip6_rthdr);
562 hlen += (((struct ip6_rthdr *)ulp)->ip6r_len + 1) << 3;
563 proto = ((struct ip6_rthdr *)ulp)->ip6r_nxt;
566 case IPPROTO_FRAGMENT: /* RFC 2460 */
567 PULLUP_TO(hlen, ulp, struct ip6_frag);
568 hlen += sizeof (struct ip6_frag);
569 proto = ((struct ip6_frag *)ulp)->ip6f_nxt;
570 offset = ((struct ip6_frag *)ulp)->ip6f_offlg &
574 case IPPROTO_DSTOPTS: /* RFC 2460 */
575 PULLUP_TO(hlen, ulp, struct ip6_hbh);
576 hlen += (((struct ip6_hbh *)ulp)->ip6h_len + 1) << 3;
577 proto = ((struct ip6_hbh *)ulp)->ip6h_nxt;
580 case IPPROTO_AH: /* RFC 2402 */
581 PULLUP_TO(hlen, ulp, struct ip6_ext);
582 hlen += (((struct ip6_ext *)ulp)->ip6e_len + 2) << 2;
583 proto = ((struct ip6_ext *)ulp)->ip6e_nxt;
587 PULLUP_TO(hlen, ulp, struct ip6_ext);
598 dsin6->sin6_family = AF_INET6;
599 dsin6->sin6_len = sizeof(*dsin6);
600 dsin6->sin6_port = dst_port;
601 memcpy(&dsin6->sin6_addr, &ip6->ip6_dst, sizeof(struct in6_addr));
603 ssin6->sin6_family = AF_INET6;
604 ssin6->sin6_len = sizeof(*ssin6);
605 ssin6->sin6_port = src_port;
606 memcpy(&ssin6->sin6_addr, &ip6->ip6_src, sizeof(struct in6_addr));
607 *flags |= proto_to_flags(proto);
612 #define zero_key(key) \
626 ipv6_flow_lookup_hash(
627 struct sockaddr_in6 *ssin6, struct sockaddr_in6 *dsin6,
628 uint32_t *key, uint16_t flags)
630 uint16_t sport, dport;
634 proto = flags_to_proto(flags);
638 memcpy(&key[1], &dsin6->sin6_addr, sizeof(struct in6_addr));
639 dport = dsin6->sin6_port;
641 if ((ssin6 != NULL) && (flags & FL_HASH_ALL)) {
642 memcpy(&key[5], &ssin6->sin6_addr, sizeof(struct in6_addr));
643 sport = ssin6->sin6_port;
645 if (flags & FL_HASH_ALL) {
646 ((uint16_t *)key)[0] = sport;
647 ((uint16_t *)key)[1] = dport;
649 offset = flow_hashjitter + proto;
651 return (jenkins_hash32(key, 9, offset));
654 static struct flentry *
655 flowtable_lookup_ipv6(struct mbuf *m)
657 struct sockaddr_storage ssa, dsa;
658 struct sockaddr_in6 *dsin6, *ssin6;
661 dsin6 = (struct sockaddr_in6 *)&dsa;
662 ssin6 = (struct sockaddr_in6 *)&ssa;
663 bzero(dsin6, sizeof(*dsin6));
664 bzero(ssin6, sizeof(*ssin6));
665 flags = V_ip6_ft.ft_flags;
667 if (ipv6_mbuf_demarshal(m, ssin6, dsin6, &flags) != 0)
670 return (flowtable_lookup_common(&V_ip6_ft, &ssa, &dsa, m, flags));
674 flow_to_route_in6(struct flentry *fle, struct route_in6 *ro)
676 uint32_t *hashkey = NULL;
677 struct sockaddr_in6 *sin6;
679 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
681 sin6->sin6_family = AF_INET6;
682 sin6->sin6_len = sizeof(*sin6);
683 hashkey = ((struct flentry_v6 *)fle)->fl_flow.ipf_key;
684 memcpy(&sin6->sin6_addr, &hashkey[5], sizeof (struct in6_addr));
685 ro->ro_rt = __DEVOLATILE(struct rtentry *, fle->f_rt);
686 ro->ro_lle = __DEVOLATILE(struct llentry *, fle->f_lle);
687 ro->ro_flags |= RT_NORTREF;
692 flowtable_mask(struct flowtable *ft)
696 if (ft->ft_flags & FL_PCPU)
697 mask = ft->ft_masks[curcpu];
699 mask = ft->ft_masks[0];
704 static struct flentry **
705 flowtable_entry(struct flowtable *ft, uint32_t hash)
707 struct flentry **fle;
708 int index = (hash % ft->ft_size);
710 if (ft->ft_flags & FL_PCPU) {
711 KASSERT(&ft->ft_table.pcpu[curcpu][0] != NULL, ("pcpu not set"));
712 fle = &ft->ft_table.pcpu[curcpu][index];
714 KASSERT(&ft->ft_table.global[0] != NULL, ("global not set"));
715 fle = &ft->ft_table.global[index];
722 flow_stale(struct flowtable *ft, struct flentry *fle)
726 if ((fle->f_fhash == 0)
727 || ((fle->f_rt->rt_flags & RTF_HOST) &&
728 ((fle->f_rt->rt_flags & (RTF_UP))
730 || (fle->f_rt->rt_ifp == NULL)
731 || !RT_LINK_IS_UP(fle->f_rt->rt_ifp))
734 idle_time = time_uptime - fle->f_uptime;
736 if ((fle->f_flags & FL_STALE) ||
737 ((fle->f_flags & (TH_SYN|TH_ACK|TH_FIN)) == 0
738 && (idle_time > ft->ft_udp_idle)) ||
739 ((fle->f_flags & TH_FIN)
740 && (idle_time > ft->ft_fin_wait_idle)) ||
741 ((fle->f_flags & (TH_SYN|TH_ACK)) == TH_SYN
742 && (idle_time > ft->ft_syn_idle)) ||
743 ((fle->f_flags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)
744 && (idle_time > ft->ft_tcp_idle)) ||
745 ((fle->f_rt->rt_flags & RTF_UP) == 0 ||
746 (fle->f_rt->rt_ifp == NULL)))
753 flowtable_set_hashkey(struct flentry *fle, uint32_t *key)
758 if (fle->f_flags & FL_IPV6) {
760 hashkey = ((struct flentry_v4 *)fle)->fl_flow.ipf_key;
763 hashkey = ((struct flentry_v6 *)fle)->fl_flow.ipf_key;
766 for (i = 0; i < nwords; i++)
771 flow_full(struct flowtable *ft)
777 count = uma_zone_get_cur(ft->ft_zone);
778 max = uma_zone_get_max(ft->ft_zone);
780 if (full && (count < (max - (max >> 3))))
782 else if (!full && (count > (max - (max >> 5))))
785 if (full && !ft->ft_full) {
786 flowclean_freq = 4*hz;
787 if ((ft->ft_flags & FL_HASH_ALL) == 0)
788 ft->ft_udp_idle = ft->ft_fin_wait_idle =
789 ft->ft_syn_idle = ft->ft_tcp_idle = 5;
790 cv_broadcast(&flowclean_c_cv);
791 } else if (!full && ft->ft_full) {
792 flowclean_freq = 20*hz;
793 if ((ft->ft_flags & FL_HASH_ALL) == 0)
794 ft->ft_udp_idle = ft->ft_fin_wait_idle =
795 ft->ft_syn_idle = ft->ft_tcp_idle = 30;
798 return (ft->ft_full);
802 flowtable_insert(struct flowtable *ft, uint32_t hash, uint32_t *key,
803 uint32_t fibnum, struct route *ro, uint16_t flags)
805 struct flentry *fle, *fletail, *newfle, **flep;
810 newfle = uma_zalloc(ft->ft_zone, M_NOWAIT | M_ZERO);
814 newfle->f_flags |= (flags & FL_IPV6);
815 proto = flags_to_proto(flags);
817 FL_ENTRY_LOCK(ft, hash);
818 mask = flowtable_mask(ft);
819 flep = flowtable_entry(ft, hash);
820 fletail = fle = *flep;
823 bit_set(mask, FL_ENTRY_INDEX(ft, hash));
824 *flep = fle = newfle;
829 FLOWSTAT_INC(ft, ft_collisions);
831 * find end of list and make sure that we were not
832 * preempted by another thread handling this flow
834 while (fle != NULL) {
835 if (fle->f_fhash == hash && !flow_stale(ft, fle)) {
837 * there was either a hash collision
838 * or we lost a race to insert
840 FL_ENTRY_UNLOCK(ft, hash);
841 uma_zfree(ft->ft_zone, newfle);
843 if (flags & FL_OVERWRITE)
848 * re-visit this double condition XXX
850 if (fletail->f_next != NULL)
851 fletail = fle->f_next;
857 if (depth > ft->ft_max_depth)
858 ft->ft_max_depth = depth;
859 fletail->f_next = newfle;
862 flowtable_set_hashkey(fle, key);
864 fle->f_proto = proto;
865 fle->f_rt = ro->ro_rt;
866 fle->f_lle = ro->ro_lle;
868 fle->f_fibnum = fibnum;
869 fle->f_uptime = time_uptime;
870 FL_ENTRY_UNLOCK(ft, hash);
875 flowtable_key_equal(struct flentry *fle, uint32_t *key)
880 if (fle->f_flags & FL_IPV6) {
882 hashkey = ((struct flentry_v4 *)fle)->fl_flow.ipf_key;
885 hashkey = ((struct flentry_v6 *)fle)->fl_flow.ipf_key;
888 for (i = 0; i < nwords; i++)
889 if (hashkey[i] != key[i])
896 flowtable_lookup(sa_family_t sa, struct mbuf *m)
902 return (flowtable_lookup_ipv4(m));
906 return (flowtable_lookup_ipv6(m));
909 panic("%s: sa %d", __func__, sa);
913 static struct flentry *
914 flowtable_lookup_common(struct flowtable *ft, struct sockaddr_storage *ssa,
915 struct sockaddr_storage *dsa, struct mbuf *m, int flags)
917 struct route_in6 sro6;
918 struct route sro, *ro;
922 struct sockaddr_storage *l3addr;
924 uint32_t key[9], hash, fibnum;
927 if (V_flowtable_enable == 0)
930 sro.ro_rt = sro6.ro_rt = NULL;
931 sro.ro_lle = sro6.ro_lle = NULL;
932 flags |= ft->ft_flags;
933 proto = flags_to_proto(flags);
934 fibnum = M_GETFIB(m);
936 switch (ssa->ss_family) {
939 struct sockaddr_in *ssin, *dsin;
941 KASSERT(dsa->ss_family == AF_INET,
942 ("%s: dsa family %d\n", __func__, dsa->ss_family));
945 memcpy(&ro->ro_dst, dsa, sizeof(struct sockaddr_in));
947 * The harvested source and destination addresses
948 * may contain port information if the packet is
949 * from a transport protocol (e.g. TCP/UDP). The
950 * port field must be cleared before performing
953 ((struct sockaddr_in *)&ro->ro_dst)->sin_port = 0;
954 dsin = (struct sockaddr_in *)dsa;
955 ssin = (struct sockaddr_in *)ssa;
956 if ((dsin->sin_addr.s_addr == ssin->sin_addr.s_addr) ||
957 (ntohl(dsin->sin_addr.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
958 (ntohl(ssin->sin_addr.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
961 hash = ipv4_flow_lookup_hash(ssin, dsin, key, flags);
967 struct sockaddr_in6 *ssin6, *dsin6;
969 KASSERT(dsa->ss_family == AF_INET6,
970 ("%s: dsa family %d\n", __func__, dsa->ss_family));
972 ro = (struct route *)&sro6;
973 memcpy(&sro6.ro_dst, dsa,
974 sizeof(struct sockaddr_in6));
975 ((struct sockaddr_in6 *)&ro->ro_dst)->sin6_port = 0;
976 dsin6 = (struct sockaddr_in6 *)dsa;
977 ssin6 = (struct sockaddr_in6 *)ssa;
980 hash = ipv6_flow_lookup_hash(ssin6, dsin6, key, flags);
985 panic("%s: ssa family %d", __func__, ssa->ss_family);
989 * Ports are zero and this isn't a transmit cache
990 * - thus not a protocol for which we need to keep
992 * FL_HASH_ALL => key[0] != 0 for TCP || UDP || SCTP
994 if (key[0] == 0 && (ft->ft_flags & FL_HASH_ALL))
997 FLOWSTAT_INC(ft, ft_lookups);
998 FL_ENTRY_LOCK(ft, hash);
999 if ((fle = FL_ENTRY(ft, hash)) == NULL) {
1000 FL_ENTRY_UNLOCK(ft, hash);
1004 rt = __DEVOLATILE(struct rtentry *, fle->f_rt);
1005 lle = __DEVOLATILE(struct llentry *, fle->f_lle);
1008 && fle->f_fhash == hash
1009 && flowtable_key_equal(fle, key)
1010 && (proto == fle->f_proto)
1011 && (fibnum == fle->f_fibnum)
1012 && (rt->rt_flags & RTF_UP)
1013 && (rt->rt_ifp != NULL)
1014 && (lle->la_flags & LLE_VALID)) {
1015 FLOWSTAT_INC(ft, ft_hits);
1016 fle->f_uptime = time_uptime;
1017 fle->f_flags |= flags;
1018 FL_ENTRY_UNLOCK(ft, hash);
1020 } else if (fle->f_next != NULL) {
1024 FL_ENTRY_UNLOCK(ft, hash);
1026 if (flags & FL_NOAUTO || flow_full(ft))
1029 FLOWSTAT_INC(ft, ft_misses);
1031 * This bit of code ends up locking the
1032 * same route 3 times (just like ip_output + ether_output)
1034 * - in rt_check when called by arpresolve
1035 * - dropping the refcount for the rtentry
1037 * This could be consolidated to one if we wrote a variant
1038 * of arpresolve with an rt_check variant that expected to
1039 * receive the route locked
1042 ft->ft_rtalloc(ro, hash, fibnum);
1043 if (ro->ro_rt == NULL)
1049 if (ifp->if_flags & (IFF_POINTOPOINT | IFF_LOOPBACK)) {
1054 switch (ssa->ss_family) {
1057 if (rt->rt_flags & RTF_GATEWAY)
1058 l3addr = (struct sockaddr_storage *)rt->rt_gateway;
1060 l3addr = (struct sockaddr_storage *)&ro->ro_dst;
1061 lle = llentry_alloc(ifp, LLTABLE(ifp), l3addr);
1066 struct sockaddr_in6 *dsin6;
1068 dsin6 = (struct sockaddr_in6 *)dsa;
1069 if (in6_localaddr(&dsin6->sin6_addr)) {
1074 if (rt->rt_flags & RTF_GATEWAY)
1075 l3addr = (struct sockaddr_storage *)rt->rt_gateway;
1077 l3addr = (struct sockaddr_storage *)&ro->ro_dst;
1078 lle = llentry_alloc(ifp, LLTABLE6(ifp), l3addr);
1090 if (flowtable_insert(ft, hash, key, fibnum, ro, flags) != 0) {
1097 if (fle != NULL && (m->m_flags & M_FLOWID) == 0) {
1098 m->m_flags |= M_FLOWID;
1099 m->m_pkthdr.flowid = fle->f_fhash;
1105 * used by the bit_alloc macro
1107 #define calloc(count, size) malloc((count)*(size), M_DEVBUF, M_WAITOK|M_ZERO)
1110 flowtable_alloc(struct flowtable *ft)
1114 ft->ft_rtalloc = rtalloc_mpath_fib;
1116 ft->ft_rtalloc = rtalloc_ign_wrapper;
1118 if (ft->ft_flags & FL_PCPU) {
1119 ft->ft_lock = flowtable_pcpu_lock;
1120 ft->ft_unlock = flowtable_pcpu_unlock;
1122 for (int i = 0; i <= mp_maxid; i++) {
1123 ft->ft_table.pcpu[i] =
1124 malloc(ft->ft_size * sizeof(struct flentry *),
1125 M_RTABLE, M_WAITOK | M_ZERO);
1126 ft->ft_masks[i] = bit_alloc(ft->ft_size);
1129 ft->ft_lock_count = 2*(powerof2(mp_maxid + 1) ? (mp_maxid + 1):
1130 (fls(mp_maxid + 1) << 1));
1132 ft->ft_lock = flowtable_global_lock;
1133 ft->ft_unlock = flowtable_global_unlock;
1134 ft->ft_table.global =
1135 malloc(ft->ft_size * sizeof(struct flentry *),
1136 M_RTABLE, M_WAITOK | M_ZERO);
1137 ft->ft_locks = malloc(ft->ft_lock_count*sizeof(struct mtx),
1138 M_RTABLE, M_WAITOK | M_ZERO);
1139 for (int i = 0; i < ft->ft_lock_count; i++)
1140 mtx_init(&ft->ft_locks[i], "flow", NULL,
1141 MTX_DEF | MTX_DUPOK);
1143 ft->ft_masks[0] = bit_alloc(ft->ft_size);
1145 ft->ft_tmpmask = bit_alloc(ft->ft_size);
1148 * In the local transmit case the table truly is
1149 * just a cache - so everything is eligible for
1150 * replacement after 5s of non-use
1152 if (ft->ft_flags & FL_HASH_ALL) {
1153 ft->ft_udp_idle = V_flowtable_udp_expire;
1154 ft->ft_syn_idle = V_flowtable_syn_expire;
1155 ft->ft_fin_wait_idle = V_flowtable_fin_wait_expire;
1156 ft->ft_tcp_idle = V_flowtable_fin_wait_expire;
1158 ft->ft_udp_idle = ft->ft_fin_wait_idle =
1159 ft->ft_syn_idle = ft->ft_tcp_idle = 30;
1165 * The rest of the code is devoted to garbage collection of expired entries.
1166 * It is a new additon made necessary by the switch to dynamically allocating
1171 fle_free(struct flentry *fle, struct flowtable *ft)
1174 struct llentry *lle;
1176 rt = __DEVOLATILE(struct rtentry *, fle->f_rt);
1177 lle = __DEVOLATILE(struct llentry *, fle->f_lle);
1182 uma_zfree(ft->ft_zone, fle);
1186 flowtable_free_stale(struct flowtable *ft, struct rtentry *rt)
1188 int curbit = 0, tmpsize;
1189 struct flentry *fle, **flehead, *fleprev;
1190 struct flentry *flefreehead, *flefreetail, *fletmp;
1191 bitstr_t *mask, *tmpmask;
1193 flefreehead = flefreetail = NULL;
1194 mask = flowtable_mask(ft);
1195 tmpmask = ft->ft_tmpmask;
1196 tmpsize = ft->ft_size;
1197 memcpy(tmpmask, mask, ft->ft_size/8);
1199 * XXX Note to self, bit_ffs operates at the byte level
1200 * and thus adds gratuitous overhead
1202 bit_ffs(tmpmask, ft->ft_size, &curbit);
1203 while (curbit != -1) {
1204 if (curbit >= ft->ft_size || curbit < -1) {
1206 "warning: bad curbit value %d \n",
1211 FL_ENTRY_LOCK(ft, curbit);
1212 flehead = flowtable_entry(ft, curbit);
1213 fle = fleprev = *flehead;
1215 FLOWSTAT_INC(ft, ft_free_checks);
1217 if (fle == NULL && curbit > 0) {
1219 "warning bit=%d set, but no fle found\n",
1223 while (fle != NULL) {
1225 if (__DEVOLATILE(struct rtentry *, fle->f_rt) != rt) {
1230 } else if (!flow_stale(ft, fle)) {
1236 * delete head of the list
1238 if (fleprev == *flehead) {
1240 if (fle == fleprev) {
1241 fleprev = *flehead = fle->f_next;
1243 fleprev = *flehead = fle;
1247 * don't advance fleprev
1250 fleprev->f_next = fle->f_next;
1251 fle = fleprev->f_next;
1254 if (flefreehead == NULL)
1255 flefreehead = flefreetail = fletmp;
1257 flefreetail->f_next = fletmp;
1258 flefreetail = fletmp;
1260 fletmp->f_next = NULL;
1262 if (*flehead == NULL)
1263 bit_clear(mask, curbit);
1264 FL_ENTRY_UNLOCK(ft, curbit);
1265 bit_clear(tmpmask, curbit);
1266 tmpmask += (curbit / 8);
1267 tmpsize -= (curbit / 8) * 8;
1268 bit_ffs(tmpmask, tmpsize, &curbit);
1270 while ((fle = flefreehead) != NULL) {
1271 flefreehead = fle->f_next;
1272 FLOWSTAT_INC(ft, ft_frees);
1278 flowtable_route_flush(sa_family_t sa, struct rtentry *rt)
1280 struct flowtable *ft;
1295 panic("%s: sa %d", __func__, sa);
1298 if (ft->ft_flags & FL_PCPU) {
1300 if (smp_started == 1) {
1301 thread_lock(curthread);
1302 sched_bind(curthread, i);
1303 thread_unlock(curthread);
1306 flowtable_free_stale(ft, rt);
1308 if (smp_started == 1) {
1309 thread_lock(curthread);
1310 sched_unbind(curthread);
1311 thread_unlock(curthread);
1315 flowtable_free_stale(ft, rt);
1320 flowtable_clean_vnet(struct flowtable *ft)
1323 if (ft->ft_flags & FL_PCPU) {
1327 if (smp_started == 1) {
1328 thread_lock(curthread);
1329 sched_bind(curthread, i);
1330 thread_unlock(curthread);
1333 flowtable_free_stale(ft, NULL);
1335 if (smp_started == 1) {
1336 thread_lock(curthread);
1337 sched_unbind(curthread);
1338 thread_unlock(curthread);
1342 flowtable_free_stale(ft, NULL);
1346 flowtable_cleaner(void)
1348 VNET_ITERATOR_DECL(vnet_iter);
1352 log(LOG_INFO, "flowtable cleaner started\n");
1356 VNET_FOREACH(vnet_iter) {
1357 CURVNET_SET(vnet_iter);
1359 flowtable_clean_vnet(&V_ip4_ft);
1362 flowtable_clean_vnet(&V_ip6_ft);
1366 VNET_LIST_RUNLOCK();
1369 * The 10 second interval between cleaning checks
1372 mtx_lock(&flowclean_lock);
1374 sched_prio(td, PPAUSE);
1377 cv_broadcast(&flowclean_f_cv);
1378 cv_timedwait(&flowclean_c_cv, &flowclean_lock, flowclean_freq);
1379 mtx_unlock(&flowclean_lock);
1384 flowtable_flush(void *unused __unused)
1388 mtx_lock(&flowclean_lock);
1389 start = flowclean_cycles;
1390 while (start == flowclean_cycles) {
1391 cv_broadcast(&flowclean_c_cv);
1392 cv_wait(&flowclean_f_cv, &flowclean_lock);
1394 mtx_unlock(&flowclean_lock);
1397 static struct kproc_desc flow_kp = {
1402 SYSINIT(flowcleaner, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, kproc_start, &flow_kp);
1405 flowtable_get_size(char *name)
1409 if (TUNABLE_INT_FETCH(name, &size)) {
1412 if (!powerof2(size)) {
1413 printf("%s must be power of 2\n", name);
1418 * round up to the next power of 2
1420 size = 1 << fls((1024 + maxusers * 64) - 1);
1427 flowtable_init(const void *unused __unused)
1430 flow_hashjitter = arc4random();
1433 flow_ipv4_zone = uma_zcreate("ip4flow", sizeof(struct flentry_v4),
1434 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, UMA_ZONE_MAXBUCKET);
1435 uma_zone_set_max(flow_ipv4_zone, 1024 + maxusers * 64 * mp_ncpus);
1438 flow_ipv6_zone = uma_zcreate("ip6flow", sizeof(struct flentry_v6),
1439 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, UMA_ZONE_MAXBUCKET);
1440 uma_zone_set_max(flow_ipv6_zone, 1024 + maxusers * 64 * mp_ncpus);
1443 cv_init(&flowclean_c_cv, "c_flowcleanwait");
1444 cv_init(&flowclean_f_cv, "f_flowcleanwait");
1445 mtx_init(&flowclean_lock, "flowclean lock", NULL, MTX_DEF);
1446 EVENTHANDLER_REGISTER(ifnet_departure_event, flowtable_flush, NULL,
1447 EVENTHANDLER_PRI_ANY);
1448 flowclean_freq = 20*hz;
1450 SYSINIT(flowtable_init, SI_SUB_PROTO_BEGIN, SI_ORDER_FIRST,
1451 flowtable_init, NULL);
1454 static SYSCTL_NODE(_net_flowtable, OID_AUTO, ip4, CTLFLAG_RD, NULL,
1455 "Flowtable for IPv4");
1456 SYSCTL_UMA_MAX(_net_flowtable_ip4, OID_AUTO, maxflows, CTLFLAG_RW,
1457 &flow_ipv4_zone, "Maximum number of IPv4 flows allowed");
1459 static VNET_PCPUSTAT_DEFINE(struct flowtable_stat, ip4_ftstat);
1460 VNET_PCPUSTAT_SYSINIT(ip4_ftstat);
1461 VNET_PCPUSTAT_SYSUNINIT(ip4_ftstat);
1462 SYSCTL_VNET_PCPUSTAT(_net_flowtable_ip4, OID_AUTO, stat, struct flowtable_stat,
1463 ip4_ftstat, "Flowtable statistics for IPv4 "
1464 "(struct flowtable_stat, net/flowtable.h)");
1467 flowtable_init_vnet_v4(const void *unused __unused)
1470 V_ip4_ft.ft_zone = flow_ipv4_zone;
1471 V_ip4_ft.ft_size = flowtable_get_size("net.flowtable.ip4.size");
1472 V_ip4_ft.ft_flags = FL_PCPU;
1473 V_ip4_ft.ft_stat = VNET(ip4_ftstat);
1474 flowtable_alloc(&V_ip4_ft);
1476 VNET_SYSINIT(ft_vnet_v4, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
1477 flowtable_init_vnet_v4, NULL);
1481 static SYSCTL_NODE(_net_flowtable, OID_AUTO, ip6, CTLFLAG_RD, NULL,
1482 "Flowtable for IPv6");
1483 SYSCTL_UMA_MAX(_net_flowtable_ip6, OID_AUTO, maxflows, CTLFLAG_RW,
1484 &flow_ipv6_zone, "Maximum number of IPv6 flows allowed");
1486 static VNET_PCPUSTAT_DEFINE(struct flowtable_stat, ip6_ftstat);
1487 VNET_PCPUSTAT_SYSINIT(ip6_ftstat);
1488 VNET_PCPUSTAT_SYSUNINIT(ip6_ftstat);
1489 SYSCTL_VNET_PCPUSTAT(_net_flowtable_ip6, OID_AUTO, stat, struct flowtable_stat,
1490 ip6_ftstat, "Flowtable statistics for IPv6 "
1491 "(struct flowtable_stat, net/flowtable.h)");
1494 flowtable_init_vnet_v6(const void *unused __unused)
1497 V_ip6_ft.ft_zone = flow_ipv6_zone;
1498 V_ip6_ft.ft_size = flowtable_get_size("net.flowtable.ip6.size");
1499 V_ip6_ft.ft_flags = FL_PCPU;
1500 V_ip6_ft.ft_stat = VNET(ip6_ftstat);
1501 flowtable_alloc(&V_ip6_ft);
1503 VNET_SYSINIT(flowtable_init_vnet_v6, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
1504 flowtable_init_vnet_v6, NULL);
1509 flowtable_get_hashkey(struct flentry *fle)
1513 if (fle->f_flags & FL_IPV6)
1514 hashkey = ((struct flentry_v4 *)fle)->fl_flow.ipf_key;
1516 hashkey = ((struct flentry_v6 *)fle)->fl_flow.ipf_key;
1522 flowtable_mask_pcpu(struct flowtable *ft, int cpuid)
1526 if (ft->ft_flags & FL_PCPU)
1527 mask = ft->ft_masks[cpuid];
1529 mask = ft->ft_masks[0];
1534 static struct flentry **
1535 flowtable_entry_pcpu(struct flowtable *ft, uint32_t hash, int cpuid)
1537 struct flentry **fle;
1538 int index = (hash % ft->ft_size);
1540 if (ft->ft_flags & FL_PCPU) {
1541 fle = &ft->ft_table.pcpu[cpuid][index];
1543 fle = &ft->ft_table.global[index];
1550 flow_show(struct flowtable *ft, struct flentry *fle)
1553 int rt_valid, ifp_valid;
1554 uint16_t sport, dport;
1556 char saddr[4*sizeof "123"], daddr[4*sizeof "123"];
1557 volatile struct rtentry *rt;
1558 struct ifnet *ifp = NULL;
1560 idle_time = (int)(time_uptime - fle->f_uptime);
1562 rt_valid = rt != NULL;
1565 ifp_valid = ifp != NULL;
1566 hashkey = flowtable_get_hashkey(fle);
1567 if (fle->f_flags & FL_IPV6)
1570 inet_ntoa_r(*(struct in_addr *) &hashkey[2], daddr);
1571 if (ft->ft_flags & FL_HASH_ALL) {
1572 inet_ntoa_r(*(struct in_addr *) &hashkey[1], saddr);
1573 sport = ntohs(((uint16_t *)hashkey)[0]);
1574 dport = ntohs(((uint16_t *)hashkey)[1]);
1575 db_printf("%s:%d->%s:%d",
1576 saddr, sport, daddr,
1579 db_printf("%s ", daddr);
1582 if (fle->f_flags & FL_STALE)
1583 db_printf(" FL_STALE ");
1584 if (fle->f_flags & FL_TCP)
1585 db_printf(" FL_TCP ");
1586 if (fle->f_flags & FL_UDP)
1587 db_printf(" FL_UDP ");
1589 if (rt->rt_flags & RTF_UP)
1590 db_printf(" RTF_UP ");
1593 if (ifp->if_flags & IFF_LOOPBACK)
1594 db_printf(" IFF_LOOPBACK ");
1595 if (ifp->if_flags & IFF_UP)
1596 db_printf(" IFF_UP ");
1597 if (ifp->if_flags & IFF_POINTOPOINT)
1598 db_printf(" IFF_POINTOPOINT ");
1600 if (fle->f_flags & FL_IPV6)
1601 db_printf("\n\tkey=%08x:%08x:%08x%08x:%08x:%08x%08x:%08x:%08x",
1602 hashkey[0], hashkey[1], hashkey[2],
1603 hashkey[3], hashkey[4], hashkey[5],
1604 hashkey[6], hashkey[7], hashkey[8]);
1606 db_printf("\n\tkey=%08x:%08x:%08x ",
1607 hashkey[0], hashkey[1], hashkey[2]);
1608 db_printf("hash=%08x idle_time=%03d"
1609 "\n\tfibnum=%02d rt=%p",
1610 fle->f_fhash, idle_time, fle->f_fibnum, fle->f_rt);
1615 flowtable_show(struct flowtable *ft, int cpuid)
1618 struct flentry *fle, **flehead;
1619 bitstr_t *mask, *tmpmask;
1622 db_printf("cpu: %d\n", cpuid);
1623 mask = flowtable_mask_pcpu(ft, cpuid);
1624 tmpmask = ft->ft_tmpmask;
1625 memcpy(tmpmask, mask, ft->ft_size/8);
1627 * XXX Note to self, bit_ffs operates at the byte level
1628 * and thus adds gratuitous overhead
1630 bit_ffs(tmpmask, ft->ft_size, &curbit);
1631 while (curbit != -1) {
1632 if (curbit >= ft->ft_size || curbit < -1) {
1633 db_printf("warning: bad curbit value %d \n",
1638 flehead = flowtable_entry_pcpu(ft, curbit, cpuid);
1641 while (fle != NULL) {
1646 bit_clear(tmpmask, curbit);
1647 bit_ffs(tmpmask, ft->ft_size, &curbit);
1652 flowtable_show_vnet(struct flowtable *ft)
1655 if (ft->ft_flags & FL_PCPU) {
1659 flowtable_show(ft, i);
1662 flowtable_show(ft, -1);
1665 DB_SHOW_COMMAND(flowtables, db_show_flowtables)
1667 VNET_ITERATOR_DECL(vnet_iter);
1669 VNET_FOREACH(vnet_iter) {
1670 CURVNET_SET(vnet_iter);
1672 db_printf("vnet %p\n", vnet_iter);
1676 flowtable_show_vnet(&V_ip4_ft);
1680 flowtable_show_vnet(&V_ip6_ft);