2 * Copyright (c) 2002 Cedric Berger
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following
13 * disclaimer in the documentation and/or other materials provided
14 * with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
19 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
20 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
29 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
36 #include "opt_inet6.h"
38 #include <sys/param.h>
39 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/refcount.h>
44 #include <sys/rwlock.h>
45 #include <sys/socket.h>
50 #include <net/pfvar.h>
52 #define ACCEPT_FLAGS(flags, oklist) \
54 if ((flags & ~(oklist)) & \
59 #define FILLIN_SIN(sin, addr) \
61 (sin).sin_len = sizeof(sin); \
62 (sin).sin_family = AF_INET; \
63 (sin).sin_addr = (addr); \
66 #define FILLIN_SIN6(sin6, addr) \
68 (sin6).sin6_len = sizeof(sin6); \
69 (sin6).sin6_family = AF_INET6; \
70 (sin6).sin6_addr = (addr); \
73 #define SWAP(type, a1, a2) \
80 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
81 (struct pf_addr *)&(su)->sin.sin_addr : \
82 (struct pf_addr *)&(su)->sin6.sin6_addr)
84 #define AF_BITS(af) (((af)==AF_INET)?32:128)
85 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
86 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
87 #define KENTRY_RNF_ROOT(ke) \
88 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
90 #define NO_ADDRESSES (-1)
91 #define ENQUEUE_UNMARKED_ONLY (1)
92 #define INVERT_NEG_FLAG (1)
105 struct pfr_addr *pfrw1_addr;
106 struct pfr_astats *pfrw1_astats;
107 struct pfr_kentryworkq *pfrw1_workq;
108 struct pfr_kentry *pfrw1_kentry;
109 struct pfi_dynaddr *pfrw1_dyn;
113 #define pfrw_addr pfrw_1.pfrw1_addr
114 #define pfrw_astats pfrw_1.pfrw1_astats
115 #define pfrw_workq pfrw_1.pfrw1_workq
116 #define pfrw_kentry pfrw_1.pfrw1_kentry
117 #define pfrw_dyn pfrw_1.pfrw1_dyn
118 #define pfrw_cnt pfrw_free
120 #define senderr(e) do { rv = (e); goto _bad; } while (0)
122 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
123 static VNET_DEFINE(uma_zone_t, pfr_kentry_z);
124 #define V_pfr_kentry_z VNET(pfr_kentry_z)
125 static VNET_DEFINE(uma_zone_t, pfr_kcounters_z);
126 #define V_pfr_kcounters_z VNET(pfr_kcounters_z)
128 static struct pf_addr pfr_ffaddr = {
129 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
132 static void pfr_copyout_addr(struct pfr_addr *,
133 struct pfr_kentry *ke);
134 static int pfr_validate_addr(struct pfr_addr *);
135 static void pfr_enqueue_addrs(struct pfr_ktable *,
136 struct pfr_kentryworkq *, int *, int);
137 static void pfr_mark_addrs(struct pfr_ktable *);
138 static struct pfr_kentry
139 *pfr_lookup_addr(struct pfr_ktable *,
140 struct pfr_addr *, int);
141 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
142 static void pfr_destroy_kentries(struct pfr_kentryworkq *);
143 static void pfr_destroy_kentry(struct pfr_kentry *);
144 static void pfr_insert_kentries(struct pfr_ktable *,
145 struct pfr_kentryworkq *, long);
146 static void pfr_remove_kentries(struct pfr_ktable *,
147 struct pfr_kentryworkq *);
148 static void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
150 static void pfr_reset_feedback(struct pfr_addr *, int);
151 static void pfr_prepare_network(union sockaddr_union *, int, int);
152 static int pfr_route_kentry(struct pfr_ktable *,
153 struct pfr_kentry *);
154 static int pfr_unroute_kentry(struct pfr_ktable *,
155 struct pfr_kentry *);
156 static int pfr_walktree(struct radix_node *, void *);
157 static int pfr_validate_table(struct pfr_table *, int, int);
158 static int pfr_fix_anchor(char *);
159 static void pfr_commit_ktable(struct pfr_ktable *, long);
160 static void pfr_insert_ktables(struct pfr_ktableworkq *);
161 static void pfr_insert_ktable(struct pfr_ktable *);
162 static void pfr_setflags_ktables(struct pfr_ktableworkq *);
163 static void pfr_setflags_ktable(struct pfr_ktable *, int);
164 static void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
166 static void pfr_clstats_ktable(struct pfr_ktable *, long, int);
167 static struct pfr_ktable
168 *pfr_create_ktable(struct pfr_table *, long, int);
169 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
170 static void pfr_destroy_ktable(struct pfr_ktable *, int);
171 static int pfr_ktable_compare(struct pfr_ktable *,
172 struct pfr_ktable *);
173 static struct pfr_ktable
174 *pfr_lookup_table(struct pfr_table *);
175 static void pfr_clean_node_mask(struct pfr_ktable *,
176 struct pfr_kentryworkq *);
177 static int pfr_table_count(struct pfr_table *, int);
178 static int pfr_skip_table(struct pfr_table *,
179 struct pfr_ktable *, int);
180 static struct pfr_kentry
181 *pfr_kentry_byidx(struct pfr_ktable *, int, int);
183 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
184 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
186 struct pfr_ktablehead pfr_ktables;
187 struct pfr_table pfr_nulltable;
194 V_pfr_kentry_z = uma_zcreate("pf table entries",
195 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
197 V_pfr_kcounters_z = uma_zcreate("pf table counters",
198 sizeof(struct pfr_kcounters), NULL, NULL, NULL, NULL,
200 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
201 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
208 uma_zdestroy(V_pfr_kentry_z);
209 uma_zdestroy(V_pfr_kcounters_z);
213 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
215 struct pfr_ktable *kt;
216 struct pfr_kentryworkq workq;
220 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
221 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
223 kt = pfr_lookup_table(tbl);
224 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
226 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
228 pfr_enqueue_addrs(kt, &workq, ndel, 0);
230 if (!(flags & PFR_FLAG_DUMMY)) {
231 pfr_remove_kentries(kt, &workq);
232 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
238 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
239 int *nadd, int flags)
241 struct pfr_ktable *kt, *tmpkt;
242 struct pfr_kentryworkq workq;
243 struct pfr_kentry *p, *q;
246 long tzero = time_second;
250 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
251 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
253 kt = pfr_lookup_table(tbl);
254 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
256 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
258 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
262 for (i = 0, ad = addr; i < size; i++, ad++) {
263 if (pfr_validate_addr(ad))
265 p = pfr_lookup_addr(kt, ad, 1);
266 q = pfr_lookup_addr(tmpkt, ad, 1);
267 if (flags & PFR_FLAG_FEEDBACK) {
269 ad->pfra_fback = PFR_FB_DUPLICATE;
271 ad->pfra_fback = PFR_FB_ADDED;
272 else if (p->pfrke_not != ad->pfra_not)
273 ad->pfra_fback = PFR_FB_CONFLICT;
275 ad->pfra_fback = PFR_FB_NONE;
277 if (p == NULL && q == NULL) {
278 p = pfr_create_kentry(ad);
281 if (pfr_route_kentry(tmpkt, p)) {
282 pfr_destroy_kentry(p);
283 ad->pfra_fback = PFR_FB_NONE;
285 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
290 pfr_clean_node_mask(tmpkt, &workq);
291 if (!(flags & PFR_FLAG_DUMMY))
292 pfr_insert_kentries(kt, &workq, tzero);
294 pfr_destroy_kentries(&workq);
297 pfr_destroy_ktable(tmpkt, 0);
300 pfr_clean_node_mask(tmpkt, &workq);
301 pfr_destroy_kentries(&workq);
302 if (flags & PFR_FLAG_FEEDBACK)
303 pfr_reset_feedback(addr, size);
304 pfr_destroy_ktable(tmpkt, 0);
309 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
310 int *ndel, int flags)
312 struct pfr_ktable *kt;
313 struct pfr_kentryworkq workq;
314 struct pfr_kentry *p;
316 int i, rv, xdel = 0, log = 1;
320 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
321 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
323 kt = pfr_lookup_table(tbl);
324 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
326 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
329 * there are two algorithms to choose from here.
331 * n: number of addresses to delete
332 * N: number of addresses in the table
334 * one is O(N) and is better for large 'n'
335 * one is O(n*LOG(N)) and is better for small 'n'
337 * following code try to decide which one is best.
339 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
341 if (size > kt->pfrkt_cnt/log) {
342 /* full table scan */
345 /* iterate over addresses to delete */
346 for (i = 0, ad = addr; i < size; i++, ad++) {
347 if (pfr_validate_addr(ad))
349 p = pfr_lookup_addr(kt, ad, 1);
355 for (i = 0, ad = addr; i < size; i++, ad++) {
356 if (pfr_validate_addr(ad))
358 p = pfr_lookup_addr(kt, ad, 1);
359 if (flags & PFR_FLAG_FEEDBACK) {
361 ad->pfra_fback = PFR_FB_NONE;
362 else if (p->pfrke_not != ad->pfra_not)
363 ad->pfra_fback = PFR_FB_CONFLICT;
364 else if (p->pfrke_mark)
365 ad->pfra_fback = PFR_FB_DUPLICATE;
367 ad->pfra_fback = PFR_FB_DELETED;
369 if (p != NULL && p->pfrke_not == ad->pfra_not &&
372 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
376 if (!(flags & PFR_FLAG_DUMMY))
377 pfr_remove_kentries(kt, &workq);
382 if (flags & PFR_FLAG_FEEDBACK)
383 pfr_reset_feedback(addr, size);
388 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
389 int *size2, int *nadd, int *ndel, int *nchange, int flags,
390 u_int32_t ignore_pfrt_flags)
392 struct pfr_ktable *kt, *tmpkt;
393 struct pfr_kentryworkq addq, delq, changeq;
394 struct pfr_kentry *p, *q;
396 int i, rv, xadd = 0, xdel = 0, xchange = 0;
397 long tzero = time_second;
401 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
402 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
405 kt = pfr_lookup_table(tbl);
406 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
408 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
410 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
416 SLIST_INIT(&changeq);
417 for (i = 0; i < size; i++) {
419 * XXXGL: undertand pf_if usage of this function
420 * and make ad a moving pointer
422 bcopy(addr + i, &ad, sizeof(ad));
423 if (pfr_validate_addr(&ad))
425 ad.pfra_fback = PFR_FB_NONE;
426 p = pfr_lookup_addr(kt, &ad, 1);
429 ad.pfra_fback = PFR_FB_DUPLICATE;
433 if (p->pfrke_not != ad.pfra_not) {
434 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
435 ad.pfra_fback = PFR_FB_CHANGED;
439 q = pfr_lookup_addr(tmpkt, &ad, 1);
441 ad.pfra_fback = PFR_FB_DUPLICATE;
444 p = pfr_create_kentry(&ad);
447 if (pfr_route_kentry(tmpkt, p)) {
448 pfr_destroy_kentry(p);
449 ad.pfra_fback = PFR_FB_NONE;
451 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
452 ad.pfra_fback = PFR_FB_ADDED;
457 if (flags & PFR_FLAG_FEEDBACK)
458 bcopy(&ad, addr + i, sizeof(ad));
460 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
461 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
462 if (*size2 < size+xdel) {
467 SLIST_FOREACH(p, &delq, pfrke_workq) {
468 pfr_copyout_addr(&ad, p);
469 ad.pfra_fback = PFR_FB_DELETED;
470 bcopy(&ad, addr + size + i, sizeof(ad));
474 pfr_clean_node_mask(tmpkt, &addq);
475 if (!(flags & PFR_FLAG_DUMMY)) {
476 pfr_insert_kentries(kt, &addq, tzero);
477 pfr_remove_kentries(kt, &delq);
478 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
480 pfr_destroy_kentries(&addq);
487 if ((flags & PFR_FLAG_FEEDBACK) && size2)
489 pfr_destroy_ktable(tmpkt, 0);
492 pfr_clean_node_mask(tmpkt, &addq);
493 pfr_destroy_kentries(&addq);
494 if (flags & PFR_FLAG_FEEDBACK)
495 pfr_reset_feedback(addr, size);
496 pfr_destroy_ktable(tmpkt, 0);
501 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
502 int *nmatch, int flags)
504 struct pfr_ktable *kt;
505 struct pfr_kentry *p;
511 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
512 if (pfr_validate_table(tbl, 0, 0))
514 kt = pfr_lookup_table(tbl);
515 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
518 for (i = 0, ad = addr; i < size; i++, ad++) {
519 if (pfr_validate_addr(ad))
521 if (ADDR_NETWORK(ad))
523 p = pfr_lookup_addr(kt, ad, 0);
524 if (flags & PFR_FLAG_REPLACE)
525 pfr_copyout_addr(ad, p);
526 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
527 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
528 if (p != NULL && !p->pfrke_not)
537 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
540 struct pfr_ktable *kt;
541 struct pfr_walktree w;
546 ACCEPT_FLAGS(flags, 0);
547 if (pfr_validate_table(tbl, 0, 0))
549 kt = pfr_lookup_table(tbl);
550 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
552 if (kt->pfrkt_cnt > *size) {
553 *size = kt->pfrkt_cnt;
557 bzero(&w, sizeof(w));
558 w.pfrw_op = PFRW_GET_ADDRS;
560 w.pfrw_free = kt->pfrkt_cnt;
561 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
563 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
568 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
571 *size = kt->pfrkt_cnt;
576 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
579 struct pfr_ktable *kt;
580 struct pfr_walktree w;
581 struct pfr_kentryworkq workq;
583 long tzero = time_second;
587 /* XXX PFR_FLAG_CLSTATS disabled */
588 ACCEPT_FLAGS(flags, 0);
589 if (pfr_validate_table(tbl, 0, 0))
591 kt = pfr_lookup_table(tbl);
592 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
594 if (kt->pfrkt_cnt > *size) {
595 *size = kt->pfrkt_cnt;
599 bzero(&w, sizeof(w));
600 w.pfrw_op = PFRW_GET_ASTATS;
601 w.pfrw_astats = addr;
602 w.pfrw_free = kt->pfrkt_cnt;
603 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
605 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
607 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
608 pfr_enqueue_addrs(kt, &workq, NULL, 0);
609 pfr_clstats_kentries(&workq, tzero, 0);
615 printf("pfr_get_astats: corruption detected (%d).\n",
619 *size = kt->pfrkt_cnt;
624 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
625 int *nzero, int flags)
627 struct pfr_ktable *kt;
628 struct pfr_kentryworkq workq;
629 struct pfr_kentry *p;
631 int i, rv, xzero = 0;
635 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
636 if (pfr_validate_table(tbl, 0, 0))
638 kt = pfr_lookup_table(tbl);
639 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
642 for (i = 0, ad = addr; i < size; i++, ad++) {
643 if (pfr_validate_addr(ad))
645 p = pfr_lookup_addr(kt, ad, 1);
646 if (flags & PFR_FLAG_FEEDBACK) {
647 ad->pfra_fback = (p != NULL) ?
648 PFR_FB_CLEARED : PFR_FB_NONE;
651 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
656 if (!(flags & PFR_FLAG_DUMMY))
657 pfr_clstats_kentries(&workq, 0, 0);
662 if (flags & PFR_FLAG_FEEDBACK)
663 pfr_reset_feedback(addr, size);
668 pfr_validate_addr(struct pfr_addr *ad)
672 switch (ad->pfra_af) {
675 if (ad->pfra_net > 32)
681 if (ad->pfra_net > 128)
688 if (ad->pfra_net < 128 &&
689 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
691 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
692 if (((caddr_t)ad)[i])
694 if (ad->pfra_not && ad->pfra_not != 1)
702 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
703 int *naddr, int sweep)
705 struct pfr_walktree w;
708 bzero(&w, sizeof(w));
709 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
710 w.pfrw_workq = workq;
711 if (kt->pfrkt_ip4 != NULL)
712 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree,
714 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
715 if (kt->pfrkt_ip6 != NULL)
716 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
718 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
724 pfr_mark_addrs(struct pfr_ktable *kt)
726 struct pfr_walktree w;
728 bzero(&w, sizeof(w));
729 w.pfrw_op = PFRW_MARK;
730 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
731 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
732 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
733 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
737 static struct pfr_kentry *
738 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
740 union sockaddr_union sa, mask;
741 struct radix_node_head *head = NULL;
742 struct pfr_kentry *ke;
746 bzero(&sa, sizeof(sa));
747 if (ad->pfra_af == AF_INET) {
748 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
749 head = kt->pfrkt_ip4;
750 } else if ( ad->pfra_af == AF_INET6 ) {
751 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
752 head = kt->pfrkt_ip6;
754 if (ADDR_NETWORK(ad)) {
755 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
756 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
757 if (ke && KENTRY_RNF_ROOT(ke))
760 ke = (struct pfr_kentry *)rn_match(&sa, head);
761 if (ke && KENTRY_RNF_ROOT(ke))
763 if (exact && ke && KENTRY_NETWORK(ke))
769 static struct pfr_kentry *
770 pfr_create_kentry(struct pfr_addr *ad)
772 struct pfr_kentry *ke;
774 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
778 if (ad->pfra_af == AF_INET)
779 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
780 else if (ad->pfra_af == AF_INET6)
781 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
782 ke->pfrke_af = ad->pfra_af;
783 ke->pfrke_net = ad->pfra_net;
784 ke->pfrke_not = ad->pfra_not;
789 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
791 struct pfr_kentry *p, *q;
793 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
794 q = SLIST_NEXT(p, pfrke_workq);
795 pfr_destroy_kentry(p);
800 pfr_destroy_kentry(struct pfr_kentry *ke)
802 if (ke->pfrke_counters)
803 uma_zfree(V_pfr_kcounters_z, ke->pfrke_counters);
804 uma_zfree(V_pfr_kentry_z, ke);
808 pfr_insert_kentries(struct pfr_ktable *kt,
809 struct pfr_kentryworkq *workq, long tzero)
811 struct pfr_kentry *p;
814 SLIST_FOREACH(p, workq, pfrke_workq) {
815 rv = pfr_route_kentry(kt, p);
817 printf("pfr_insert_kentries: cannot route entry "
821 p->pfrke_tzero = tzero;
828 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
830 struct pfr_kentry *p;
833 p = pfr_lookup_addr(kt, ad, 1);
836 p = pfr_create_kentry(ad);
840 rv = pfr_route_kentry(kt, p);
844 p->pfrke_tzero = tzero;
851 pfr_remove_kentries(struct pfr_ktable *kt,
852 struct pfr_kentryworkq *workq)
854 struct pfr_kentry *p;
857 SLIST_FOREACH(p, workq, pfrke_workq) {
858 pfr_unroute_kentry(kt, p);
862 pfr_destroy_kentries(workq);
866 pfr_clean_node_mask(struct pfr_ktable *kt,
867 struct pfr_kentryworkq *workq)
869 struct pfr_kentry *p;
871 SLIST_FOREACH(p, workq, pfrke_workq)
872 pfr_unroute_kentry(kt, p);
876 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
878 struct pfr_kentry *p;
880 SLIST_FOREACH(p, workq, pfrke_workq) {
882 p->pfrke_not = !p->pfrke_not;
883 if (p->pfrke_counters) {
884 uma_zfree(V_pfr_kcounters_z, p->pfrke_counters);
885 p->pfrke_counters = NULL;
887 p->pfrke_tzero = tzero;
892 pfr_reset_feedback(struct pfr_addr *addr, int size)
897 for (i = 0, ad = addr; i < size; i++, ad++)
898 ad->pfra_fback = PFR_FB_NONE;
902 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
906 bzero(sa, sizeof(*sa));
908 sa->sin.sin_len = sizeof(sa->sin);
909 sa->sin.sin_family = AF_INET;
910 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
911 } else if (af == AF_INET6) {
912 sa->sin6.sin6_len = sizeof(sa->sin6);
913 sa->sin6.sin6_family = AF_INET6;
914 for (i = 0; i < 4; i++) {
916 sa->sin6.sin6_addr.s6_addr32[i] =
917 net ? htonl(-1 << (32-net)) : 0;
920 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
927 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
929 union sockaddr_union mask;
930 struct radix_node *rn;
931 struct radix_node_head *head = NULL;
935 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
936 if (ke->pfrke_af == AF_INET)
937 head = kt->pfrkt_ip4;
938 else if (ke->pfrke_af == AF_INET6)
939 head = kt->pfrkt_ip6;
941 if (KENTRY_NETWORK(ke)) {
942 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
943 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
945 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
947 return (rn == NULL ? -1 : 0);
951 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
953 union sockaddr_union mask;
954 struct radix_node *rn;
955 struct radix_node_head *head = NULL;
957 if (ke->pfrke_af == AF_INET)
958 head = kt->pfrkt_ip4;
959 else if (ke->pfrke_af == AF_INET6)
960 head = kt->pfrkt_ip6;
962 if (KENTRY_NETWORK(ke)) {
963 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
964 rn = rn_delete(&ke->pfrke_sa, &mask, head);
966 rn = rn_delete(&ke->pfrke_sa, NULL, head);
969 printf("pfr_unroute_kentry: delete failed.\n");
976 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
978 bzero(ad, sizeof(*ad));
981 ad->pfra_af = ke->pfrke_af;
982 ad->pfra_net = ke->pfrke_net;
983 ad->pfra_not = ke->pfrke_not;
984 if (ad->pfra_af == AF_INET)
985 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
986 else if (ad->pfra_af == AF_INET6)
987 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
991 pfr_walktree(struct radix_node *rn, void *arg)
993 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
994 struct pfr_walktree *w = arg;
996 switch (w->pfrw_op) {
1005 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1008 case PFRW_GET_ADDRS:
1009 if (w->pfrw_free-- > 0) {
1010 pfr_copyout_addr(w->pfrw_addr, ke);
1014 case PFRW_GET_ASTATS:
1015 if (w->pfrw_free-- > 0) {
1016 struct pfr_astats as;
1018 pfr_copyout_addr(&as.pfras_a, ke);
1020 if (ke->pfrke_counters) {
1021 bcopy(ke->pfrke_counters->pfrkc_packets,
1022 as.pfras_packets, sizeof(as.pfras_packets));
1023 bcopy(ke->pfrke_counters->pfrkc_bytes,
1024 as.pfras_bytes, sizeof(as.pfras_bytes));
1026 bzero(as.pfras_packets, sizeof(as.pfras_packets));
1027 bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1028 as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1030 as.pfras_tzero = ke->pfrke_tzero;
1032 bcopy(&as, w->pfrw_astats, sizeof(as));
1038 break; /* negative entries are ignored */
1039 if (!w->pfrw_cnt--) {
1040 w->pfrw_kentry = ke;
1041 return (1); /* finish search */
1044 case PFRW_DYNADDR_UPDATE:
1046 union sockaddr_union pfr_mask;
1048 if (ke->pfrke_af == AF_INET) {
1049 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1051 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1052 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa,
1054 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask,
1056 } else if (ke->pfrke_af == AF_INET6){
1057 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1059 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1060 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa,
1062 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask,
1072 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1074 struct pfr_ktableworkq workq;
1075 struct pfr_ktable *p;
1078 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1079 if (pfr_fix_anchor(filter->pfrt_anchor))
1081 if (pfr_table_count(filter, flags) < 0)
1085 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1086 if (pfr_skip_table(filter, p, flags))
1088 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1090 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1092 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1093 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1096 if (!(flags & PFR_FLAG_DUMMY))
1097 pfr_setflags_ktables(&workq);
1104 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1106 struct pfr_ktableworkq addq, changeq;
1107 struct pfr_ktable *p, *q, *r, key;
1108 int i, rv, xadd = 0;
1109 long tzero = time_second;
1111 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1113 SLIST_INIT(&changeq);
1114 for (i = 0; i < size; i++) {
1115 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1116 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1117 flags & PFR_FLAG_USERIOCTL))
1119 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1120 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1122 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1125 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1126 if (!pfr_ktable_compare(p, q))
1129 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1131 if (!key.pfrkt_anchor[0])
1134 /* find or create root table */
1135 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1136 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1141 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1142 if (!pfr_ktable_compare(&key, q)) {
1147 key.pfrkt_flags = 0;
1148 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1151 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1153 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1154 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1155 if (!pfr_ktable_compare(&key, q))
1157 p->pfrkt_nflags = (p->pfrkt_flags &
1158 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1159 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1165 if (!(flags & PFR_FLAG_DUMMY)) {
1166 pfr_insert_ktables(&addq);
1167 pfr_setflags_ktables(&changeq);
1169 pfr_destroy_ktables(&addq, 0);
1174 pfr_destroy_ktables(&addq, 0);
1179 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1181 struct pfr_ktableworkq workq;
1182 struct pfr_ktable *p, *q, key;
1185 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1187 for (i = 0; i < size; i++) {
1188 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1189 if (pfr_validate_table(&key.pfrkt_t, 0,
1190 flags & PFR_FLAG_USERIOCTL))
1192 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1193 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1194 SLIST_FOREACH(q, &workq, pfrkt_workq)
1195 if (!pfr_ktable_compare(p, q))
1197 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1198 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1205 if (!(flags & PFR_FLAG_DUMMY))
1206 pfr_setflags_ktables(&workq);
1213 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1216 struct pfr_ktable *p;
1221 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1222 if (pfr_fix_anchor(filter->pfrt_anchor))
1224 n = nn = pfr_table_count(filter, flags);
1231 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1232 if (pfr_skip_table(filter, p, flags))
1236 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1239 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1246 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1249 struct pfr_ktable *p;
1250 struct pfr_ktableworkq workq;
1252 long tzero = time_second;
1254 /* XXX PFR_FLAG_CLSTATS disabled */
1255 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1256 if (pfr_fix_anchor(filter->pfrt_anchor))
1258 n = nn = pfr_table_count(filter, flags);
1266 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1267 if (pfr_skip_table(filter, p, flags))
1271 bcopy(&p->pfrkt_ts, tbl++, sizeof(*tbl));
1272 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1274 if (flags & PFR_FLAG_CLSTATS)
1275 pfr_clstats_ktables(&workq, tzero,
1276 flags & PFR_FLAG_ADDRSTOO);
1278 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1285 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1287 struct pfr_ktableworkq workq;
1288 struct pfr_ktable *p, key;
1290 long tzero = time_second;
1292 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1294 for (i = 0; i < size; i++) {
1295 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1296 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1298 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1300 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1304 if (!(flags & PFR_FLAG_DUMMY))
1305 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1312 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1313 int *nchange, int *ndel, int flags)
1315 struct pfr_ktableworkq workq;
1316 struct pfr_ktable *p, *q, key;
1317 int i, xchange = 0, xdel = 0;
1319 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1320 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1321 (clrflag & ~PFR_TFLAG_USRMASK) ||
1322 (setflag & clrflag))
1325 for (i = 0; i < size; i++) {
1326 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1327 if (pfr_validate_table(&key.pfrkt_t, 0,
1328 flags & PFR_FLAG_USERIOCTL))
1330 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1331 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1332 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1334 if (p->pfrkt_nflags == p->pfrkt_flags)
1336 SLIST_FOREACH(q, &workq, pfrkt_workq)
1337 if (!pfr_ktable_compare(p, q))
1339 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1340 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1341 (clrflag & PFR_TFLAG_PERSIST) &&
1342 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1350 if (!(flags & PFR_FLAG_DUMMY))
1351 pfr_setflags_ktables(&workq);
1352 if (nchange != NULL)
1360 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1362 struct pfr_ktableworkq workq;
1363 struct pfr_ktable *p;
1364 struct pf_ruleset *rs;
1367 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1368 rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1372 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1373 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1374 pfr_skip_table(trs, p, 0))
1376 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1377 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1380 if (!(flags & PFR_FLAG_DUMMY)) {
1381 pfr_setflags_ktables(&workq);
1383 *ticket = ++rs->tticket;
1386 pf_remove_if_empty_ruleset(rs);
1393 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1394 int *nadd, int *naddr, u_int32_t ticket, int flags)
1396 struct pfr_ktableworkq tableq;
1397 struct pfr_kentryworkq addrq;
1398 struct pfr_ktable *kt, *rt, *shadow, key;
1399 struct pfr_kentry *p;
1400 struct pfr_addr *ad;
1401 struct pf_ruleset *rs;
1402 int i, rv, xadd = 0, xaddr = 0;
1406 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1407 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1409 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1410 flags & PFR_FLAG_USERIOCTL))
1412 rs = pf_find_ruleset(tbl->pfrt_anchor);
1413 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1415 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1416 SLIST_INIT(&tableq);
1417 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1419 kt = pfr_create_ktable(tbl, 0, 1);
1422 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1424 if (!tbl->pfrt_anchor[0])
1427 /* find or create root table */
1428 bzero(&key, sizeof(key));
1429 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1430 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1432 kt->pfrkt_root = rt;
1435 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1437 pfr_destroy_ktables(&tableq, 0);
1440 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1441 kt->pfrkt_root = rt;
1442 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1445 shadow = pfr_create_ktable(tbl, 0, 0);
1446 if (shadow == NULL) {
1447 pfr_destroy_ktables(&tableq, 0);
1451 for (i = 0, ad = addr; i < size; i++, ad++) {
1452 if (pfr_validate_addr(ad))
1454 if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1456 p = pfr_create_kentry(ad);
1459 if (pfr_route_kentry(shadow, p)) {
1460 pfr_destroy_kentry(p);
1463 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1466 if (!(flags & PFR_FLAG_DUMMY)) {
1467 if (kt->pfrkt_shadow != NULL)
1468 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1469 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1470 pfr_insert_ktables(&tableq);
1471 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1472 xaddr : NO_ADDRESSES;
1473 kt->pfrkt_shadow = shadow;
1475 pfr_clean_node_mask(shadow, &addrq);
1476 pfr_destroy_ktable(shadow, 0);
1477 pfr_destroy_ktables(&tableq, 0);
1478 pfr_destroy_kentries(&addrq);
1486 pfr_destroy_ktable(shadow, 0);
1487 pfr_destroy_ktables(&tableq, 0);
1488 pfr_destroy_kentries(&addrq);
1493 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1495 struct pfr_ktableworkq workq;
1496 struct pfr_ktable *p;
1497 struct pf_ruleset *rs;
1502 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1503 rs = pf_find_ruleset(trs->pfrt_anchor);
1504 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1507 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1508 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1509 pfr_skip_table(trs, p, 0))
1511 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1512 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1515 if (!(flags & PFR_FLAG_DUMMY)) {
1516 pfr_setflags_ktables(&workq);
1518 pf_remove_if_empty_ruleset(rs);
1526 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1527 int *nchange, int flags)
1529 struct pfr_ktable *p, *q;
1530 struct pfr_ktableworkq workq;
1531 struct pf_ruleset *rs;
1532 int xadd = 0, xchange = 0;
1533 long tzero = time_second;
1537 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1538 rs = pf_find_ruleset(trs->pfrt_anchor);
1539 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1543 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1544 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1545 pfr_skip_table(trs, p, 0))
1547 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1548 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1554 if (!(flags & PFR_FLAG_DUMMY)) {
1555 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1556 q = SLIST_NEXT(p, pfrkt_workq);
1557 pfr_commit_ktable(p, tzero);
1560 pf_remove_if_empty_ruleset(rs);
1564 if (nchange != NULL)
1571 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1573 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1578 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1579 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1580 pfr_clstats_ktable(kt, tzero, 1);
1581 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1582 /* kt might contain addresses */
1583 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1584 struct pfr_kentry *p, *q, *next;
1587 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1590 SLIST_INIT(&changeq);
1592 SLIST_INIT(&garbageq);
1593 pfr_clean_node_mask(shadow, &addrq);
1594 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1595 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1596 pfr_copyout_addr(&ad, p);
1597 q = pfr_lookup_addr(kt, &ad, 1);
1599 if (q->pfrke_not != p->pfrke_not)
1600 SLIST_INSERT_HEAD(&changeq, q,
1603 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1605 p->pfrke_tzero = tzero;
1606 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1609 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1610 pfr_insert_kentries(kt, &addq, tzero);
1611 pfr_remove_kentries(kt, &delq);
1612 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1613 pfr_destroy_kentries(&garbageq);
1615 /* kt cannot contain addresses */
1616 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1618 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1620 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1621 pfr_clstats_ktable(kt, tzero, 1);
1623 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1624 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1625 & ~PFR_TFLAG_INACTIVE;
1626 pfr_destroy_ktable(shadow, 0);
1627 kt->pfrkt_shadow = NULL;
1628 pfr_setflags_ktable(kt, nflags);
1632 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1636 if (!tbl->pfrt_name[0])
1638 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1640 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1642 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1643 if (tbl->pfrt_name[i])
1645 if (pfr_fix_anchor(tbl->pfrt_anchor))
1647 if (tbl->pfrt_flags & ~allowedflags)
1653 * Rewrite anchors referenced by tables to remove slashes
1654 * and check for validity.
1657 pfr_fix_anchor(char *anchor)
1659 size_t siz = MAXPATHLEN;
1662 if (anchor[0] == '/') {
1668 while (*++path == '/')
1670 bcopy(path, anchor, siz - off);
1671 memset(anchor + siz - off, 0, off);
1673 if (anchor[siz - 1])
1675 for (i = strlen(anchor); i < siz; i++)
1682 pfr_table_count(struct pfr_table *filter, int flags)
1684 struct pf_ruleset *rs;
1688 if (flags & PFR_FLAG_ALLRSETS)
1689 return (pfr_ktable_cnt);
1690 if (filter->pfrt_anchor[0]) {
1691 rs = pf_find_ruleset(filter->pfrt_anchor);
1692 return ((rs != NULL) ? rs->tables : -1);
1694 return (pf_main_ruleset.tables);
1698 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1700 if (flags & PFR_FLAG_ALLRSETS)
1702 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1708 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1710 struct pfr_ktable *p;
1712 SLIST_FOREACH(p, workq, pfrkt_workq)
1713 pfr_insert_ktable(p);
1717 pfr_insert_ktable(struct pfr_ktable *kt)
1722 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1724 if (kt->pfrkt_root != NULL)
1725 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1726 pfr_setflags_ktable(kt->pfrkt_root,
1727 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1731 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1733 struct pfr_ktable *p, *q;
1735 for (p = SLIST_FIRST(workq); p; p = q) {
1736 q = SLIST_NEXT(p, pfrkt_workq);
1737 pfr_setflags_ktable(p, p->pfrkt_nflags);
1742 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1744 struct pfr_kentryworkq addrq;
1748 if (!(newf & PFR_TFLAG_REFERENCED) &&
1749 !(newf & PFR_TFLAG_PERSIST))
1750 newf &= ~PFR_TFLAG_ACTIVE;
1751 if (!(newf & PFR_TFLAG_ACTIVE))
1752 newf &= ~PFR_TFLAG_USRMASK;
1753 if (!(newf & PFR_TFLAG_SETMASK)) {
1754 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1755 if (kt->pfrkt_root != NULL)
1756 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1757 pfr_setflags_ktable(kt->pfrkt_root,
1758 kt->pfrkt_root->pfrkt_flags &
1759 ~PFR_TFLAG_REFDANCHOR);
1760 pfr_destroy_ktable(kt, 1);
1764 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1765 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1766 pfr_remove_kentries(kt, &addrq);
1768 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1769 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1770 kt->pfrkt_shadow = NULL;
1772 kt->pfrkt_flags = newf;
1776 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1778 struct pfr_ktable *p;
1780 SLIST_FOREACH(p, workq, pfrkt_workq)
1781 pfr_clstats_ktable(p, tzero, recurse);
1785 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1787 struct pfr_kentryworkq addrq;
1790 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1791 pfr_clstats_kentries(&addrq, tzero, 0);
1793 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1794 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1795 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1796 kt->pfrkt_tzero = tzero;
1799 static struct pfr_ktable *
1800 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1802 struct pfr_ktable *kt;
1803 struct pf_ruleset *rs;
1807 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1812 if (attachruleset) {
1813 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1815 pfr_destroy_ktable(kt, 0);
1822 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1823 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1824 !rn_inithead((void **)&kt->pfrkt_ip6,
1825 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1826 pfr_destroy_ktable(kt, 0);
1829 kt->pfrkt_tzero = tzero;
1835 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1837 struct pfr_ktable *p, *q;
1839 for (p = SLIST_FIRST(workq); p; p = q) {
1840 q = SLIST_NEXT(p, pfrkt_workq);
1841 pfr_destroy_ktable(p, flushaddr);
1846 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1848 struct pfr_kentryworkq addrq;
1851 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1852 pfr_clean_node_mask(kt, &addrq);
1853 pfr_destroy_kentries(&addrq);
1855 if (kt->pfrkt_ip4 != NULL) {
1856 RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip4);
1857 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1859 if (kt->pfrkt_ip6 != NULL) {
1860 RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip6);
1861 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1863 if (kt->pfrkt_shadow != NULL)
1864 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1865 if (kt->pfrkt_rs != NULL) {
1866 kt->pfrkt_rs->tables--;
1867 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1869 free(kt, M_PFTABLE);
1873 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1877 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1879 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1882 static struct pfr_ktable *
1883 pfr_lookup_table(struct pfr_table *tbl)
1885 /* struct pfr_ktable start like a struct pfr_table */
1886 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1887 (struct pfr_ktable *)tbl));
1891 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1893 struct pfr_kentry *ke = NULL;
1898 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1899 kt = kt->pfrkt_root;
1900 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1907 struct sockaddr_in sin;
1909 bzero(&sin, sizeof(sin));
1910 sin.sin_len = sizeof(sin);
1911 sin.sin_family = AF_INET;
1912 sin.sin_addr.s_addr = a->addr32[0];
1913 ke = (struct pfr_kentry *)rn_match(&sin, kt->pfrkt_ip4);
1914 if (ke && KENTRY_RNF_ROOT(ke))
1922 struct sockaddr_in6 sin6;
1924 bzero(&sin6, sizeof(sin6));
1925 sin6.sin6_len = sizeof(sin6);
1926 sin6.sin6_family = AF_INET6;
1927 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
1928 ke = (struct pfr_kentry *)rn_match(&sin6, kt->pfrkt_ip6);
1929 if (ke && KENTRY_RNF_ROOT(ke))
1935 match = (ke && !ke->pfrke_not);
1939 kt->pfrkt_nomatch++;
1944 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1945 u_int64_t len, int dir_out, int op_pass, int notrule)
1947 struct pfr_kentry *ke = NULL;
1949 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1950 kt = kt->pfrkt_root;
1951 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1958 struct sockaddr_in sin;
1960 bzero(&sin, sizeof(sin));
1961 sin.sin_len = sizeof(sin);
1962 sin.sin_family = AF_INET;
1963 sin.sin_addr.s_addr = a->addr32[0];
1964 ke = (struct pfr_kentry *)rn_match(&sin, kt->pfrkt_ip4);
1965 if (ke && KENTRY_RNF_ROOT(ke))
1973 struct sockaddr_in6 sin6;
1975 bzero(&sin6, sizeof(sin6));
1976 sin6.sin6_len = sizeof(sin6);
1977 sin6.sin6_family = AF_INET6;
1978 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
1979 ke = (struct pfr_kentry *)rn_match(&sin6, kt->pfrkt_ip6);
1980 if (ke && KENTRY_RNF_ROOT(ke))
1986 panic("%s: unknown address family %u", __func__, af);
1988 if ((ke == NULL || ke->pfrke_not) != notrule) {
1989 if (op_pass != PFR_OP_PASS)
1990 printf("pfr_update_stats: assertion failed.\n");
1991 op_pass = PFR_OP_XPASS;
1993 kt->pfrkt_packets[dir_out][op_pass]++;
1994 kt->pfrkt_bytes[dir_out][op_pass] += len;
1995 if (ke != NULL && op_pass != PFR_OP_XPASS &&
1996 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1997 if (ke->pfrke_counters == NULL)
1998 ke->pfrke_counters = uma_zalloc(V_pfr_kcounters_z,
2000 if (ke->pfrke_counters != NULL) {
2001 ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2002 ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2008 pfr_attach_table(struct pf_ruleset *rs, char *name)
2010 struct pfr_ktable *kt, *rt;
2011 struct pfr_table tbl;
2012 struct pf_anchor *ac = rs->anchor;
2016 bzero(&tbl, sizeof(tbl));
2017 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2019 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2020 kt = pfr_lookup_table(&tbl);
2022 kt = pfr_create_ktable(&tbl, time_second, 1);
2026 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2027 rt = pfr_lookup_table(&tbl);
2029 rt = pfr_create_ktable(&tbl, 0, 1);
2031 pfr_destroy_ktable(kt, 0);
2034 pfr_insert_ktable(rt);
2036 kt->pfrkt_root = rt;
2038 pfr_insert_ktable(kt);
2040 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2041 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2046 pfr_detach_table(struct pfr_ktable *kt)
2050 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2051 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2053 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2054 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2058 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2061 struct pf_addr *addr, *cur, *mask;
2062 union sockaddr_union uaddr, umask;
2063 struct pfr_kentry *ke, *ke2 = NULL;
2064 int idx = -1, use_counter = 0;
2068 uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2069 uaddr.sin.sin_family = AF_INET;
2072 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2073 uaddr.sin6.sin6_family = AF_INET6;
2076 addr = SUNION2PF(&uaddr, af);
2078 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2079 kt = kt->pfrkt_root;
2080 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2085 if (counter != NULL && idx >= 0)
2091 ke = pfr_kentry_byidx(kt, idx, af);
2093 kt->pfrkt_nomatch++;
2096 pfr_prepare_network(&umask, af, ke->pfrke_net);
2097 cur = SUNION2PF(&ke->pfrke_sa, af);
2098 mask = SUNION2PF(&umask, af);
2101 /* is supplied address within block? */
2102 if (!PF_MATCHA(0, cur, mask, counter, af)) {
2103 /* no, go to next block in table */
2108 PF_ACPY(addr, counter, af);
2110 /* use first address of block */
2111 PF_ACPY(addr, cur, af);
2114 if (!KENTRY_NETWORK(ke)) {
2115 /* this is a single IP address - no possible nested block */
2116 PF_ACPY(counter, addr, af);
2122 /* we don't want to use a nested block */
2125 ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2129 ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2133 /* no need to check KENTRY_RNF_ROOT() here */
2135 /* lookup return the same block - perfect */
2136 PF_ACPY(counter, addr, af);
2142 /* we need to increase the counter past the nested block */
2143 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2144 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af);
2146 if (!PF_MATCHA(0, cur, mask, addr, af)) {
2147 /* ok, we reached the end of our main block */
2148 /* go to next block in table */
2156 static struct pfr_kentry *
2157 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2159 struct pfr_walktree w;
2161 bzero(&w, sizeof(w));
2162 w.pfrw_op = PFRW_POOL_GET;
2168 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2169 return (w.pfrw_kentry);
2173 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2174 return (w.pfrw_kentry);
2182 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2184 struct pfr_walktree w;
2186 bzero(&w, sizeof(w));
2187 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2190 dyn->pfid_acnt4 = 0;
2191 dyn->pfid_acnt6 = 0;
2192 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2193 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2194 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2195 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);