2 * Copyright (c) 2002 Cedric Berger
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following
13 * disclaimer in the documentation and/or other materials provided
14 * with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
19 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
20 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
29 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
36 #include "opt_inet6.h"
38 #include <sys/param.h>
39 #include <sys/kernel.h>
41 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 #include <sys/refcount.h>
45 #include <sys/rwlock.h>
46 #include <sys/socket.h>
51 #include <net/pfvar.h>
53 #define ACCEPT_FLAGS(flags, oklist) \
55 if ((flags & ~(oklist)) & \
60 #define FILLIN_SIN(sin, addr) \
62 (sin).sin_len = sizeof(sin); \
63 (sin).sin_family = AF_INET; \
64 (sin).sin_addr = (addr); \
67 #define FILLIN_SIN6(sin6, addr) \
69 (sin6).sin6_len = sizeof(sin6); \
70 (sin6).sin6_family = AF_INET6; \
71 (sin6).sin6_addr = (addr); \
74 #define SWAP(type, a1, a2) \
81 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
82 (struct pf_addr *)&(su)->sin.sin_addr : \
83 (struct pf_addr *)&(su)->sin6.sin6_addr)
85 #define AF_BITS(af) (((af)==AF_INET)?32:128)
86 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
87 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
88 #define KENTRY_RNF_ROOT(ke) \
89 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
91 #define NO_ADDRESSES (-1)
92 #define ENQUEUE_UNMARKED_ONLY (1)
93 #define INVERT_NEG_FLAG (1)
106 struct pfr_addr *pfrw1_addr;
107 struct pfr_astats *pfrw1_astats;
108 struct pfr_kentryworkq *pfrw1_workq;
109 struct pfr_kentry *pfrw1_kentry;
110 struct pfi_dynaddr *pfrw1_dyn;
114 #define pfrw_addr pfrw_1.pfrw1_addr
115 #define pfrw_astats pfrw_1.pfrw1_astats
116 #define pfrw_workq pfrw_1.pfrw1_workq
117 #define pfrw_kentry pfrw_1.pfrw1_kentry
118 #define pfrw_dyn pfrw_1.pfrw1_dyn
119 #define pfrw_cnt pfrw_free
121 #define senderr(e) do { rv = (e); goto _bad; } while (0)
123 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
124 static VNET_DEFINE(uma_zone_t, pfr_kentry_z);
125 #define V_pfr_kentry_z VNET(pfr_kentry_z)
126 static VNET_DEFINE(uma_zone_t, pfr_kcounters_z);
127 #define V_pfr_kcounters_z VNET(pfr_kcounters_z)
129 static struct pf_addr pfr_ffaddr = {
130 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
133 static void pfr_copyout_addr(struct pfr_addr *,
134 struct pfr_kentry *ke);
135 static int pfr_validate_addr(struct pfr_addr *);
136 static void pfr_enqueue_addrs(struct pfr_ktable *,
137 struct pfr_kentryworkq *, int *, int);
138 static void pfr_mark_addrs(struct pfr_ktable *);
139 static struct pfr_kentry
140 *pfr_lookup_addr(struct pfr_ktable *,
141 struct pfr_addr *, int);
142 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
143 static void pfr_destroy_kentries(struct pfr_kentryworkq *);
144 static void pfr_destroy_kentry(struct pfr_kentry *);
145 static void pfr_insert_kentries(struct pfr_ktable *,
146 struct pfr_kentryworkq *, long);
147 static void pfr_remove_kentries(struct pfr_ktable *,
148 struct pfr_kentryworkq *);
149 static void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
151 static void pfr_reset_feedback(struct pfr_addr *, int);
152 static void pfr_prepare_network(union sockaddr_union *, int, int);
153 static int pfr_route_kentry(struct pfr_ktable *,
154 struct pfr_kentry *);
155 static int pfr_unroute_kentry(struct pfr_ktable *,
156 struct pfr_kentry *);
157 static int pfr_walktree(struct radix_node *, void *);
158 static int pfr_validate_table(struct pfr_table *, int, int);
159 static int pfr_fix_anchor(char *);
160 static void pfr_commit_ktable(struct pfr_ktable *, long);
161 static void pfr_insert_ktables(struct pfr_ktableworkq *);
162 static void pfr_insert_ktable(struct pfr_ktable *);
163 static void pfr_setflags_ktables(struct pfr_ktableworkq *);
164 static void pfr_setflags_ktable(struct pfr_ktable *, int);
165 static void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
167 static void pfr_clstats_ktable(struct pfr_ktable *, long, int);
168 static struct pfr_ktable
169 *pfr_create_ktable(struct pfr_table *, long, int);
170 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
171 static void pfr_destroy_ktable(struct pfr_ktable *, int);
172 static int pfr_ktable_compare(struct pfr_ktable *,
173 struct pfr_ktable *);
174 static struct pfr_ktable
175 *pfr_lookup_table(struct pfr_table *);
176 static void pfr_clean_node_mask(struct pfr_ktable *,
177 struct pfr_kentryworkq *);
178 static int pfr_table_count(struct pfr_table *, int);
179 static int pfr_skip_table(struct pfr_table *,
180 struct pfr_ktable *, int);
181 static struct pfr_kentry
182 *pfr_kentry_byidx(struct pfr_ktable *, int, int);
184 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
185 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
187 static VNET_DEFINE(struct pfr_ktablehead, pfr_ktables);
188 #define V_pfr_ktables VNET(pfr_ktables)
190 static VNET_DEFINE(struct pfr_table, pfr_nulltable);
191 #define V_pfr_nulltable VNET(pfr_nulltable)
193 static VNET_DEFINE(int, pfr_ktable_cnt);
194 #define V_pfr_ktable_cnt VNET(pfr_ktable_cnt)
200 V_pfr_kentry_z = uma_zcreate("pf table entries",
201 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
203 V_pfr_kcounters_z = uma_zcreate("pf table counters",
204 sizeof(struct pfr_kcounters), NULL, NULL, NULL, NULL,
206 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
207 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
214 uma_zdestroy(V_pfr_kentry_z);
215 uma_zdestroy(V_pfr_kcounters_z);
219 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
221 struct pfr_ktable *kt;
222 struct pfr_kentryworkq workq;
226 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
227 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
229 kt = pfr_lookup_table(tbl);
230 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
232 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
234 pfr_enqueue_addrs(kt, &workq, ndel, 0);
236 if (!(flags & PFR_FLAG_DUMMY)) {
237 pfr_remove_kentries(kt, &workq);
238 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
244 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
245 int *nadd, int flags)
247 struct pfr_ktable *kt, *tmpkt;
248 struct pfr_kentryworkq workq;
249 struct pfr_kentry *p, *q;
252 long tzero = time_second;
256 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
257 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
259 kt = pfr_lookup_table(tbl);
260 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
262 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
264 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
268 for (i = 0, ad = addr; i < size; i++, ad++) {
269 if (pfr_validate_addr(ad))
271 p = pfr_lookup_addr(kt, ad, 1);
272 q = pfr_lookup_addr(tmpkt, ad, 1);
273 if (flags & PFR_FLAG_FEEDBACK) {
275 ad->pfra_fback = PFR_FB_DUPLICATE;
277 ad->pfra_fback = PFR_FB_ADDED;
278 else if (p->pfrke_not != ad->pfra_not)
279 ad->pfra_fback = PFR_FB_CONFLICT;
281 ad->pfra_fback = PFR_FB_NONE;
283 if (p == NULL && q == NULL) {
284 p = pfr_create_kentry(ad);
287 if (pfr_route_kentry(tmpkt, p)) {
288 pfr_destroy_kentry(p);
289 ad->pfra_fback = PFR_FB_NONE;
291 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
296 pfr_clean_node_mask(tmpkt, &workq);
297 if (!(flags & PFR_FLAG_DUMMY))
298 pfr_insert_kentries(kt, &workq, tzero);
300 pfr_destroy_kentries(&workq);
303 pfr_destroy_ktable(tmpkt, 0);
306 pfr_clean_node_mask(tmpkt, &workq);
307 pfr_destroy_kentries(&workq);
308 if (flags & PFR_FLAG_FEEDBACK)
309 pfr_reset_feedback(addr, size);
310 pfr_destroy_ktable(tmpkt, 0);
315 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
316 int *ndel, int flags)
318 struct pfr_ktable *kt;
319 struct pfr_kentryworkq workq;
320 struct pfr_kentry *p;
322 int i, rv, xdel = 0, log = 1;
326 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
327 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
329 kt = pfr_lookup_table(tbl);
330 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
332 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
335 * there are two algorithms to choose from here.
337 * n: number of addresses to delete
338 * N: number of addresses in the table
340 * one is O(N) and is better for large 'n'
341 * one is O(n*LOG(N)) and is better for small 'n'
343 * following code try to decide which one is best.
345 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
347 if (size > kt->pfrkt_cnt/log) {
348 /* full table scan */
351 /* iterate over addresses to delete */
352 for (i = 0, ad = addr; i < size; i++, ad++) {
353 if (pfr_validate_addr(ad))
355 p = pfr_lookup_addr(kt, ad, 1);
361 for (i = 0, ad = addr; i < size; i++, ad++) {
362 if (pfr_validate_addr(ad))
364 p = pfr_lookup_addr(kt, ad, 1);
365 if (flags & PFR_FLAG_FEEDBACK) {
367 ad->pfra_fback = PFR_FB_NONE;
368 else if (p->pfrke_not != ad->pfra_not)
369 ad->pfra_fback = PFR_FB_CONFLICT;
370 else if (p->pfrke_mark)
371 ad->pfra_fback = PFR_FB_DUPLICATE;
373 ad->pfra_fback = PFR_FB_DELETED;
375 if (p != NULL && p->pfrke_not == ad->pfra_not &&
378 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
382 if (!(flags & PFR_FLAG_DUMMY))
383 pfr_remove_kentries(kt, &workq);
388 if (flags & PFR_FLAG_FEEDBACK)
389 pfr_reset_feedback(addr, size);
394 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
395 int *size2, int *nadd, int *ndel, int *nchange, int flags,
396 u_int32_t ignore_pfrt_flags)
398 struct pfr_ktable *kt, *tmpkt;
399 struct pfr_kentryworkq addq, delq, changeq;
400 struct pfr_kentry *p, *q;
402 int i, rv, xadd = 0, xdel = 0, xchange = 0;
403 long tzero = time_second;
407 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
408 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
411 kt = pfr_lookup_table(tbl);
412 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
414 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
416 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
422 SLIST_INIT(&changeq);
423 for (i = 0; i < size; i++) {
425 * XXXGL: undertand pf_if usage of this function
426 * and make ad a moving pointer
428 bcopy(addr + i, &ad, sizeof(ad));
429 if (pfr_validate_addr(&ad))
431 ad.pfra_fback = PFR_FB_NONE;
432 p = pfr_lookup_addr(kt, &ad, 1);
435 ad.pfra_fback = PFR_FB_DUPLICATE;
439 if (p->pfrke_not != ad.pfra_not) {
440 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
441 ad.pfra_fback = PFR_FB_CHANGED;
445 q = pfr_lookup_addr(tmpkt, &ad, 1);
447 ad.pfra_fback = PFR_FB_DUPLICATE;
450 p = pfr_create_kentry(&ad);
453 if (pfr_route_kentry(tmpkt, p)) {
454 pfr_destroy_kentry(p);
455 ad.pfra_fback = PFR_FB_NONE;
457 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
458 ad.pfra_fback = PFR_FB_ADDED;
463 if (flags & PFR_FLAG_FEEDBACK)
464 bcopy(&ad, addr + i, sizeof(ad));
466 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
467 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
468 if (*size2 < size+xdel) {
473 SLIST_FOREACH(p, &delq, pfrke_workq) {
474 pfr_copyout_addr(&ad, p);
475 ad.pfra_fback = PFR_FB_DELETED;
476 bcopy(&ad, addr + size + i, sizeof(ad));
480 pfr_clean_node_mask(tmpkt, &addq);
481 if (!(flags & PFR_FLAG_DUMMY)) {
482 pfr_insert_kentries(kt, &addq, tzero);
483 pfr_remove_kentries(kt, &delq);
484 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
486 pfr_destroy_kentries(&addq);
493 if ((flags & PFR_FLAG_FEEDBACK) && size2)
495 pfr_destroy_ktable(tmpkt, 0);
498 pfr_clean_node_mask(tmpkt, &addq);
499 pfr_destroy_kentries(&addq);
500 if (flags & PFR_FLAG_FEEDBACK)
501 pfr_reset_feedback(addr, size);
502 pfr_destroy_ktable(tmpkt, 0);
507 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
508 int *nmatch, int flags)
510 struct pfr_ktable *kt;
511 struct pfr_kentry *p;
517 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
518 if (pfr_validate_table(tbl, 0, 0))
520 kt = pfr_lookup_table(tbl);
521 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
524 for (i = 0, ad = addr; i < size; i++, ad++) {
525 if (pfr_validate_addr(ad))
527 if (ADDR_NETWORK(ad))
529 p = pfr_lookup_addr(kt, ad, 0);
530 if (flags & PFR_FLAG_REPLACE)
531 pfr_copyout_addr(ad, p);
532 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
533 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
534 if (p != NULL && !p->pfrke_not)
543 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
546 struct pfr_ktable *kt;
547 struct pfr_walktree w;
552 ACCEPT_FLAGS(flags, 0);
553 if (pfr_validate_table(tbl, 0, 0))
555 kt = pfr_lookup_table(tbl);
556 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
558 if (kt->pfrkt_cnt > *size) {
559 *size = kt->pfrkt_cnt;
563 bzero(&w, sizeof(w));
564 w.pfrw_op = PFRW_GET_ADDRS;
566 w.pfrw_free = kt->pfrkt_cnt;
567 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
569 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
574 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
577 *size = kt->pfrkt_cnt;
582 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
585 struct pfr_ktable *kt;
586 struct pfr_walktree w;
587 struct pfr_kentryworkq workq;
589 long tzero = time_second;
593 /* XXX PFR_FLAG_CLSTATS disabled */
594 ACCEPT_FLAGS(flags, 0);
595 if (pfr_validate_table(tbl, 0, 0))
597 kt = pfr_lookup_table(tbl);
598 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
600 if (kt->pfrkt_cnt > *size) {
601 *size = kt->pfrkt_cnt;
605 bzero(&w, sizeof(w));
606 w.pfrw_op = PFRW_GET_ASTATS;
607 w.pfrw_astats = addr;
608 w.pfrw_free = kt->pfrkt_cnt;
609 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
611 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
613 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
614 pfr_enqueue_addrs(kt, &workq, NULL, 0);
615 pfr_clstats_kentries(&workq, tzero, 0);
621 printf("pfr_get_astats: corruption detected (%d).\n",
625 *size = kt->pfrkt_cnt;
630 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
631 int *nzero, int flags)
633 struct pfr_ktable *kt;
634 struct pfr_kentryworkq workq;
635 struct pfr_kentry *p;
637 int i, rv, xzero = 0;
641 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
642 if (pfr_validate_table(tbl, 0, 0))
644 kt = pfr_lookup_table(tbl);
645 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
648 for (i = 0, ad = addr; i < size; i++, ad++) {
649 if (pfr_validate_addr(ad))
651 p = pfr_lookup_addr(kt, ad, 1);
652 if (flags & PFR_FLAG_FEEDBACK) {
653 ad->pfra_fback = (p != NULL) ?
654 PFR_FB_CLEARED : PFR_FB_NONE;
657 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
662 if (!(flags & PFR_FLAG_DUMMY))
663 pfr_clstats_kentries(&workq, 0, 0);
668 if (flags & PFR_FLAG_FEEDBACK)
669 pfr_reset_feedback(addr, size);
674 pfr_validate_addr(struct pfr_addr *ad)
678 switch (ad->pfra_af) {
681 if (ad->pfra_net > 32)
687 if (ad->pfra_net > 128)
694 if (ad->pfra_net < 128 &&
695 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
697 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
698 if (((caddr_t)ad)[i])
700 if (ad->pfra_not && ad->pfra_not != 1)
708 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
709 int *naddr, int sweep)
711 struct pfr_walktree w;
714 bzero(&w, sizeof(w));
715 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
716 w.pfrw_workq = workq;
717 if (kt->pfrkt_ip4 != NULL)
718 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh,
720 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
721 if (kt->pfrkt_ip6 != NULL)
722 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
724 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
730 pfr_mark_addrs(struct pfr_ktable *kt)
732 struct pfr_walktree w;
734 bzero(&w, sizeof(w));
735 w.pfrw_op = PFRW_MARK;
736 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w))
737 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
738 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w))
739 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
743 static struct pfr_kentry *
744 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
746 union sockaddr_union sa, mask;
747 struct radix_head *head = NULL;
748 struct pfr_kentry *ke;
752 bzero(&sa, sizeof(sa));
753 if (ad->pfra_af == AF_INET) {
754 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
755 head = &kt->pfrkt_ip4->rh;
756 } else if ( ad->pfra_af == AF_INET6 ) {
757 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
758 head = &kt->pfrkt_ip6->rh;
760 if (ADDR_NETWORK(ad)) {
761 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
762 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
763 if (ke && KENTRY_RNF_ROOT(ke))
766 ke = (struct pfr_kentry *)rn_match(&sa, head);
767 if (ke && KENTRY_RNF_ROOT(ke))
769 if (exact && ke && KENTRY_NETWORK(ke))
775 static struct pfr_kentry *
776 pfr_create_kentry(struct pfr_addr *ad)
778 struct pfr_kentry *ke;
780 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
784 if (ad->pfra_af == AF_INET)
785 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
786 else if (ad->pfra_af == AF_INET6)
787 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
788 ke->pfrke_af = ad->pfra_af;
789 ke->pfrke_net = ad->pfra_net;
790 ke->pfrke_not = ad->pfra_not;
795 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
797 struct pfr_kentry *p, *q;
799 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
800 q = SLIST_NEXT(p, pfrke_workq);
801 pfr_destroy_kentry(p);
806 pfr_destroy_kentry(struct pfr_kentry *ke)
808 if (ke->pfrke_counters)
809 uma_zfree(V_pfr_kcounters_z, ke->pfrke_counters);
810 uma_zfree(V_pfr_kentry_z, ke);
814 pfr_insert_kentries(struct pfr_ktable *kt,
815 struct pfr_kentryworkq *workq, long tzero)
817 struct pfr_kentry *p;
820 SLIST_FOREACH(p, workq, pfrke_workq) {
821 rv = pfr_route_kentry(kt, p);
823 printf("pfr_insert_kentries: cannot route entry "
827 p->pfrke_tzero = tzero;
834 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
836 struct pfr_kentry *p;
839 p = pfr_lookup_addr(kt, ad, 1);
842 p = pfr_create_kentry(ad);
846 rv = pfr_route_kentry(kt, p);
850 p->pfrke_tzero = tzero;
857 pfr_remove_kentries(struct pfr_ktable *kt,
858 struct pfr_kentryworkq *workq)
860 struct pfr_kentry *p;
863 SLIST_FOREACH(p, workq, pfrke_workq) {
864 pfr_unroute_kentry(kt, p);
868 pfr_destroy_kentries(workq);
872 pfr_clean_node_mask(struct pfr_ktable *kt,
873 struct pfr_kentryworkq *workq)
875 struct pfr_kentry *p;
877 SLIST_FOREACH(p, workq, pfrke_workq)
878 pfr_unroute_kentry(kt, p);
882 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
884 struct pfr_kentry *p;
886 SLIST_FOREACH(p, workq, pfrke_workq) {
888 p->pfrke_not = !p->pfrke_not;
889 if (p->pfrke_counters) {
890 uma_zfree(V_pfr_kcounters_z, p->pfrke_counters);
891 p->pfrke_counters = NULL;
893 p->pfrke_tzero = tzero;
898 pfr_reset_feedback(struct pfr_addr *addr, int size)
903 for (i = 0, ad = addr; i < size; i++, ad++)
904 ad->pfra_fback = PFR_FB_NONE;
908 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
912 bzero(sa, sizeof(*sa));
914 sa->sin.sin_len = sizeof(sa->sin);
915 sa->sin.sin_family = AF_INET;
916 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
917 } else if (af == AF_INET6) {
918 sa->sin6.sin6_len = sizeof(sa->sin6);
919 sa->sin6.sin6_family = AF_INET6;
920 for (i = 0; i < 4; i++) {
922 sa->sin6.sin6_addr.s6_addr32[i] =
923 net ? htonl(-1 << (32-net)) : 0;
926 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
933 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
935 union sockaddr_union mask;
936 struct radix_node *rn;
937 struct radix_head *head = NULL;
941 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
942 if (ke->pfrke_af == AF_INET)
943 head = &kt->pfrkt_ip4->rh;
944 else if (ke->pfrke_af == AF_INET6)
945 head = &kt->pfrkt_ip6->rh;
947 if (KENTRY_NETWORK(ke)) {
948 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
949 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
951 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
953 return (rn == NULL ? -1 : 0);
957 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
959 union sockaddr_union mask;
960 struct radix_node *rn;
961 struct radix_head *head = NULL;
963 if (ke->pfrke_af == AF_INET)
964 head = &kt->pfrkt_ip4->rh;
965 else if (ke->pfrke_af == AF_INET6)
966 head = &kt->pfrkt_ip6->rh;
968 if (KENTRY_NETWORK(ke)) {
969 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
970 rn = rn_delete(&ke->pfrke_sa, &mask, head);
972 rn = rn_delete(&ke->pfrke_sa, NULL, head);
975 printf("pfr_unroute_kentry: delete failed.\n");
982 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
984 bzero(ad, sizeof(*ad));
987 ad->pfra_af = ke->pfrke_af;
988 ad->pfra_net = ke->pfrke_net;
989 ad->pfra_not = ke->pfrke_not;
990 if (ad->pfra_af == AF_INET)
991 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
992 else if (ad->pfra_af == AF_INET6)
993 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
997 pfr_walktree(struct radix_node *rn, void *arg)
999 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1000 struct pfr_walktree *w = arg;
1002 switch (w->pfrw_op) {
1011 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1014 case PFRW_GET_ADDRS:
1015 if (w->pfrw_free-- > 0) {
1016 pfr_copyout_addr(w->pfrw_addr, ke);
1020 case PFRW_GET_ASTATS:
1021 if (w->pfrw_free-- > 0) {
1022 struct pfr_astats as;
1024 pfr_copyout_addr(&as.pfras_a, ke);
1026 if (ke->pfrke_counters) {
1027 bcopy(ke->pfrke_counters->pfrkc_packets,
1028 as.pfras_packets, sizeof(as.pfras_packets));
1029 bcopy(ke->pfrke_counters->pfrkc_bytes,
1030 as.pfras_bytes, sizeof(as.pfras_bytes));
1032 bzero(as.pfras_packets, sizeof(as.pfras_packets));
1033 bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1034 as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1036 as.pfras_tzero = ke->pfrke_tzero;
1038 bcopy(&as, w->pfrw_astats, sizeof(as));
1044 break; /* negative entries are ignored */
1045 if (!w->pfrw_cnt--) {
1046 w->pfrw_kentry = ke;
1047 return (1); /* finish search */
1050 case PFRW_DYNADDR_UPDATE:
1052 union sockaddr_union pfr_mask;
1054 if (ke->pfrke_af == AF_INET) {
1055 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1057 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1058 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa,
1060 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask,
1062 } else if (ke->pfrke_af == AF_INET6){
1063 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1065 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1066 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa,
1068 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask,
1078 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1080 struct pfr_ktableworkq workq;
1081 struct pfr_ktable *p;
1084 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1085 if (pfr_fix_anchor(filter->pfrt_anchor))
1087 if (pfr_table_count(filter, flags) < 0)
1091 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1092 if (pfr_skip_table(filter, p, flags))
1094 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1096 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1098 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1099 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1102 if (!(flags & PFR_FLAG_DUMMY))
1103 pfr_setflags_ktables(&workq);
1110 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1112 struct pfr_ktableworkq addq, changeq;
1113 struct pfr_ktable *p, *q, *r, key;
1114 int i, rv, xadd = 0;
1115 long tzero = time_second;
1117 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1119 SLIST_INIT(&changeq);
1120 for (i = 0; i < size; i++) {
1121 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1122 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1123 flags & PFR_FLAG_USERIOCTL))
1125 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1126 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1128 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1131 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1132 if (!pfr_ktable_compare(p, q))
1135 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1137 if (!key.pfrkt_anchor[0])
1140 /* find or create root table */
1141 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1142 r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1147 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1148 if (!pfr_ktable_compare(&key, q)) {
1153 key.pfrkt_flags = 0;
1154 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1157 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1159 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1160 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1161 if (!pfr_ktable_compare(&key, q))
1163 p->pfrkt_nflags = (p->pfrkt_flags &
1164 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1165 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1171 if (!(flags & PFR_FLAG_DUMMY)) {
1172 pfr_insert_ktables(&addq);
1173 pfr_setflags_ktables(&changeq);
1175 pfr_destroy_ktables(&addq, 0);
1180 pfr_destroy_ktables(&addq, 0);
1185 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1187 struct pfr_ktableworkq workq;
1188 struct pfr_ktable *p, *q, key;
1191 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1193 for (i = 0; i < size; i++) {
1194 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1195 if (pfr_validate_table(&key.pfrkt_t, 0,
1196 flags & PFR_FLAG_USERIOCTL))
1198 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1199 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1200 SLIST_FOREACH(q, &workq, pfrkt_workq)
1201 if (!pfr_ktable_compare(p, q))
1203 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1204 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1211 if (!(flags & PFR_FLAG_DUMMY))
1212 pfr_setflags_ktables(&workq);
1219 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1222 struct pfr_ktable *p;
1227 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1228 if (pfr_fix_anchor(filter->pfrt_anchor))
1230 n = nn = pfr_table_count(filter, flags);
1237 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1238 if (pfr_skip_table(filter, p, flags))
1242 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1245 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1252 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1255 struct pfr_ktable *p;
1256 struct pfr_ktableworkq workq;
1258 long tzero = time_second;
1260 /* XXX PFR_FLAG_CLSTATS disabled */
1261 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1262 if (pfr_fix_anchor(filter->pfrt_anchor))
1264 n = nn = pfr_table_count(filter, flags);
1272 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1273 if (pfr_skip_table(filter, p, flags))
1277 bcopy(&p->pfrkt_ts, tbl++, sizeof(*tbl));
1278 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1280 if (flags & PFR_FLAG_CLSTATS)
1281 pfr_clstats_ktables(&workq, tzero,
1282 flags & PFR_FLAG_ADDRSTOO);
1284 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1291 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1293 struct pfr_ktableworkq workq;
1294 struct pfr_ktable *p, key;
1296 long tzero = time_second;
1298 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1300 for (i = 0; i < size; i++) {
1301 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1302 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1304 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1306 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1310 if (!(flags & PFR_FLAG_DUMMY))
1311 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1318 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1319 int *nchange, int *ndel, int flags)
1321 struct pfr_ktableworkq workq;
1322 struct pfr_ktable *p, *q, key;
1323 int i, xchange = 0, xdel = 0;
1325 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1326 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1327 (clrflag & ~PFR_TFLAG_USRMASK) ||
1328 (setflag & clrflag))
1331 for (i = 0; i < size; i++) {
1332 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1333 if (pfr_validate_table(&key.pfrkt_t, 0,
1334 flags & PFR_FLAG_USERIOCTL))
1336 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1337 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1338 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1340 if (p->pfrkt_nflags == p->pfrkt_flags)
1342 SLIST_FOREACH(q, &workq, pfrkt_workq)
1343 if (!pfr_ktable_compare(p, q))
1345 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1346 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1347 (clrflag & PFR_TFLAG_PERSIST) &&
1348 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1356 if (!(flags & PFR_FLAG_DUMMY))
1357 pfr_setflags_ktables(&workq);
1358 if (nchange != NULL)
1366 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1368 struct pfr_ktableworkq workq;
1369 struct pfr_ktable *p;
1370 struct pf_ruleset *rs;
1373 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1374 rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1378 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1379 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1380 pfr_skip_table(trs, p, 0))
1382 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1383 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1386 if (!(flags & PFR_FLAG_DUMMY)) {
1387 pfr_setflags_ktables(&workq);
1389 *ticket = ++rs->tticket;
1392 pf_remove_if_empty_ruleset(rs);
1399 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1400 int *nadd, int *naddr, u_int32_t ticket, int flags)
1402 struct pfr_ktableworkq tableq;
1403 struct pfr_kentryworkq addrq;
1404 struct pfr_ktable *kt, *rt, *shadow, key;
1405 struct pfr_kentry *p;
1406 struct pfr_addr *ad;
1407 struct pf_ruleset *rs;
1408 int i, rv, xadd = 0, xaddr = 0;
1412 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1413 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1415 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1416 flags & PFR_FLAG_USERIOCTL))
1418 rs = pf_find_ruleset(tbl->pfrt_anchor);
1419 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1421 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1422 SLIST_INIT(&tableq);
1423 kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
1425 kt = pfr_create_ktable(tbl, 0, 1);
1428 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1430 if (!tbl->pfrt_anchor[0])
1433 /* find or create root table */
1434 bzero(&key, sizeof(key));
1435 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1436 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1438 kt->pfrkt_root = rt;
1441 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1443 pfr_destroy_ktables(&tableq, 0);
1446 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1447 kt->pfrkt_root = rt;
1448 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1451 shadow = pfr_create_ktable(tbl, 0, 0);
1452 if (shadow == NULL) {
1453 pfr_destroy_ktables(&tableq, 0);
1457 for (i = 0, ad = addr; i < size; i++, ad++) {
1458 if (pfr_validate_addr(ad))
1460 if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1462 p = pfr_create_kentry(ad);
1465 if (pfr_route_kentry(shadow, p)) {
1466 pfr_destroy_kentry(p);
1469 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1472 if (!(flags & PFR_FLAG_DUMMY)) {
1473 if (kt->pfrkt_shadow != NULL)
1474 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1475 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1476 pfr_insert_ktables(&tableq);
1477 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1478 xaddr : NO_ADDRESSES;
1479 kt->pfrkt_shadow = shadow;
1481 pfr_clean_node_mask(shadow, &addrq);
1482 pfr_destroy_ktable(shadow, 0);
1483 pfr_destroy_ktables(&tableq, 0);
1484 pfr_destroy_kentries(&addrq);
1492 pfr_destroy_ktable(shadow, 0);
1493 pfr_destroy_ktables(&tableq, 0);
1494 pfr_destroy_kentries(&addrq);
1499 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1501 struct pfr_ktableworkq workq;
1502 struct pfr_ktable *p;
1503 struct pf_ruleset *rs;
1508 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1509 rs = pf_find_ruleset(trs->pfrt_anchor);
1510 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1513 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1514 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1515 pfr_skip_table(trs, p, 0))
1517 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1518 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1521 if (!(flags & PFR_FLAG_DUMMY)) {
1522 pfr_setflags_ktables(&workq);
1524 pf_remove_if_empty_ruleset(rs);
1532 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1533 int *nchange, int flags)
1535 struct pfr_ktable *p, *q;
1536 struct pfr_ktableworkq workq;
1537 struct pf_ruleset *rs;
1538 int xadd = 0, xchange = 0;
1539 long tzero = time_second;
1543 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1544 rs = pf_find_ruleset(trs->pfrt_anchor);
1545 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1549 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1550 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1551 pfr_skip_table(trs, p, 0))
1553 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1554 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1560 if (!(flags & PFR_FLAG_DUMMY)) {
1561 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1562 q = SLIST_NEXT(p, pfrkt_workq);
1563 pfr_commit_ktable(p, tzero);
1566 pf_remove_if_empty_ruleset(rs);
1570 if (nchange != NULL)
1577 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1579 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1584 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1585 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1586 pfr_clstats_ktable(kt, tzero, 1);
1587 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1588 /* kt might contain addresses */
1589 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1590 struct pfr_kentry *p, *q, *next;
1593 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1596 SLIST_INIT(&changeq);
1598 SLIST_INIT(&garbageq);
1599 pfr_clean_node_mask(shadow, &addrq);
1600 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1601 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1602 pfr_copyout_addr(&ad, p);
1603 q = pfr_lookup_addr(kt, &ad, 1);
1605 if (q->pfrke_not != p->pfrke_not)
1606 SLIST_INSERT_HEAD(&changeq, q,
1609 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1611 p->pfrke_tzero = tzero;
1612 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1615 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1616 pfr_insert_kentries(kt, &addq, tzero);
1617 pfr_remove_kentries(kt, &delq);
1618 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1619 pfr_destroy_kentries(&garbageq);
1621 /* kt cannot contain addresses */
1622 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1624 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1626 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1627 pfr_clstats_ktable(kt, tzero, 1);
1629 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1630 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1631 & ~PFR_TFLAG_INACTIVE;
1632 pfr_destroy_ktable(shadow, 0);
1633 kt->pfrkt_shadow = NULL;
1634 pfr_setflags_ktable(kt, nflags);
1638 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1642 if (!tbl->pfrt_name[0])
1644 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1646 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1648 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1649 if (tbl->pfrt_name[i])
1651 if (pfr_fix_anchor(tbl->pfrt_anchor))
1653 if (tbl->pfrt_flags & ~allowedflags)
1659 * Rewrite anchors referenced by tables to remove slashes
1660 * and check for validity.
1663 pfr_fix_anchor(char *anchor)
1665 size_t siz = MAXPATHLEN;
1668 if (anchor[0] == '/') {
1674 while (*++path == '/')
1676 bcopy(path, anchor, siz - off);
1677 memset(anchor + siz - off, 0, off);
1679 if (anchor[siz - 1])
1681 for (i = strlen(anchor); i < siz; i++)
1688 pfr_table_count(struct pfr_table *filter, int flags)
1690 struct pf_ruleset *rs;
1694 if (flags & PFR_FLAG_ALLRSETS)
1695 return (V_pfr_ktable_cnt);
1696 if (filter->pfrt_anchor[0]) {
1697 rs = pf_find_ruleset(filter->pfrt_anchor);
1698 return ((rs != NULL) ? rs->tables : -1);
1700 return (pf_main_ruleset.tables);
1704 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1706 if (flags & PFR_FLAG_ALLRSETS)
1708 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1714 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1716 struct pfr_ktable *p;
1718 SLIST_FOREACH(p, workq, pfrkt_workq)
1719 pfr_insert_ktable(p);
1723 pfr_insert_ktable(struct pfr_ktable *kt)
1728 RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
1730 if (kt->pfrkt_root != NULL)
1731 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1732 pfr_setflags_ktable(kt->pfrkt_root,
1733 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1737 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1739 struct pfr_ktable *p, *q;
1741 for (p = SLIST_FIRST(workq); p; p = q) {
1742 q = SLIST_NEXT(p, pfrkt_workq);
1743 pfr_setflags_ktable(p, p->pfrkt_nflags);
1748 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1750 struct pfr_kentryworkq addrq;
1754 if (!(newf & PFR_TFLAG_REFERENCED) &&
1755 !(newf & PFR_TFLAG_PERSIST))
1756 newf &= ~PFR_TFLAG_ACTIVE;
1757 if (!(newf & PFR_TFLAG_ACTIVE))
1758 newf &= ~PFR_TFLAG_USRMASK;
1759 if (!(newf & PFR_TFLAG_SETMASK)) {
1760 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
1761 if (kt->pfrkt_root != NULL)
1762 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1763 pfr_setflags_ktable(kt->pfrkt_root,
1764 kt->pfrkt_root->pfrkt_flags &
1765 ~PFR_TFLAG_REFDANCHOR);
1766 pfr_destroy_ktable(kt, 1);
1770 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1771 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1772 pfr_remove_kentries(kt, &addrq);
1774 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1775 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1776 kt->pfrkt_shadow = NULL;
1778 kt->pfrkt_flags = newf;
1782 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1784 struct pfr_ktable *p;
1786 SLIST_FOREACH(p, workq, pfrkt_workq)
1787 pfr_clstats_ktable(p, tzero, recurse);
1791 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1793 struct pfr_kentryworkq addrq;
1796 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1797 pfr_clstats_kentries(&addrq, tzero, 0);
1799 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1800 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1801 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1802 kt->pfrkt_tzero = tzero;
1805 static struct pfr_ktable *
1806 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1808 struct pfr_ktable *kt;
1809 struct pf_ruleset *rs;
1813 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1818 if (attachruleset) {
1819 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1821 pfr_destroy_ktable(kt, 0);
1828 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1829 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1830 !rn_inithead((void **)&kt->pfrkt_ip6,
1831 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1832 pfr_destroy_ktable(kt, 0);
1835 kt->pfrkt_tzero = tzero;
1841 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1843 struct pfr_ktable *p, *q;
1845 for (p = SLIST_FIRST(workq); p; p = q) {
1846 q = SLIST_NEXT(p, pfrkt_workq);
1847 pfr_destroy_ktable(p, flushaddr);
1852 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1854 struct pfr_kentryworkq addrq;
1857 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1858 pfr_clean_node_mask(kt, &addrq);
1859 pfr_destroy_kentries(&addrq);
1861 if (kt->pfrkt_ip4 != NULL)
1862 rn_detachhead((void **)&kt->pfrkt_ip4);
1863 if (kt->pfrkt_ip6 != NULL)
1864 rn_detachhead((void **)&kt->pfrkt_ip6);
1865 if (kt->pfrkt_shadow != NULL)
1866 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1867 if (kt->pfrkt_rs != NULL) {
1868 kt->pfrkt_rs->tables--;
1869 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1871 free(kt, M_PFTABLE);
1875 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1879 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1881 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1884 static struct pfr_ktable *
1885 pfr_lookup_table(struct pfr_table *tbl)
1887 /* struct pfr_ktable start like a struct pfr_table */
1888 return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
1889 (struct pfr_ktable *)tbl));
1893 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1895 struct pfr_kentry *ke = NULL;
1900 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1901 kt = kt->pfrkt_root;
1902 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1909 struct sockaddr_in sin;
1911 bzero(&sin, sizeof(sin));
1912 sin.sin_len = sizeof(sin);
1913 sin.sin_family = AF_INET;
1914 sin.sin_addr.s_addr = a->addr32[0];
1915 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
1916 if (ke && KENTRY_RNF_ROOT(ke))
1924 struct sockaddr_in6 sin6;
1926 bzero(&sin6, sizeof(sin6));
1927 sin6.sin6_len = sizeof(sin6);
1928 sin6.sin6_family = AF_INET6;
1929 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
1930 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
1931 if (ke && KENTRY_RNF_ROOT(ke))
1937 match = (ke && !ke->pfrke_not);
1941 kt->pfrkt_nomatch++;
1946 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1947 u_int64_t len, int dir_out, int op_pass, int notrule)
1949 struct pfr_kentry *ke = NULL;
1951 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1952 kt = kt->pfrkt_root;
1953 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1960 struct sockaddr_in sin;
1962 bzero(&sin, sizeof(sin));
1963 sin.sin_len = sizeof(sin);
1964 sin.sin_family = AF_INET;
1965 sin.sin_addr.s_addr = a->addr32[0];
1966 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
1967 if (ke && KENTRY_RNF_ROOT(ke))
1975 struct sockaddr_in6 sin6;
1977 bzero(&sin6, sizeof(sin6));
1978 sin6.sin6_len = sizeof(sin6);
1979 sin6.sin6_family = AF_INET6;
1980 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
1981 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
1982 if (ke && KENTRY_RNF_ROOT(ke))
1988 panic("%s: unknown address family %u", __func__, af);
1990 if ((ke == NULL || ke->pfrke_not) != notrule) {
1991 if (op_pass != PFR_OP_PASS)
1992 printf("pfr_update_stats: assertion failed.\n");
1993 op_pass = PFR_OP_XPASS;
1995 kt->pfrkt_packets[dir_out][op_pass]++;
1996 kt->pfrkt_bytes[dir_out][op_pass] += len;
1997 if (ke != NULL && op_pass != PFR_OP_XPASS &&
1998 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1999 if (ke->pfrke_counters == NULL)
2000 ke->pfrke_counters = uma_zalloc(V_pfr_kcounters_z,
2002 if (ke->pfrke_counters != NULL) {
2003 ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2004 ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2010 pfr_attach_table(struct pf_ruleset *rs, char *name)
2012 struct pfr_ktable *kt, *rt;
2013 struct pfr_table tbl;
2014 struct pf_anchor *ac = rs->anchor;
2018 bzero(&tbl, sizeof(tbl));
2019 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2021 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2022 kt = pfr_lookup_table(&tbl);
2024 kt = pfr_create_ktable(&tbl, time_second, 1);
2028 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2029 rt = pfr_lookup_table(&tbl);
2031 rt = pfr_create_ktable(&tbl, 0, 1);
2033 pfr_destroy_ktable(kt, 0);
2036 pfr_insert_ktable(rt);
2038 kt->pfrkt_root = rt;
2040 pfr_insert_ktable(kt);
2042 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2043 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2048 pfr_detach_table(struct pfr_ktable *kt)
2052 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2053 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2055 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2056 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2060 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2063 struct pf_addr *addr, *cur, *mask;
2064 union sockaddr_union uaddr, umask;
2065 struct pfr_kentry *ke, *ke2 = NULL;
2066 int idx = -1, use_counter = 0;
2070 uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2071 uaddr.sin.sin_family = AF_INET;
2074 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2075 uaddr.sin6.sin6_family = AF_INET6;
2078 addr = SUNION2PF(&uaddr, af);
2080 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2081 kt = kt->pfrkt_root;
2082 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2087 if (counter != NULL && idx >= 0)
2093 ke = pfr_kentry_byidx(kt, idx, af);
2095 kt->pfrkt_nomatch++;
2098 pfr_prepare_network(&umask, af, ke->pfrke_net);
2099 cur = SUNION2PF(&ke->pfrke_sa, af);
2100 mask = SUNION2PF(&umask, af);
2103 /* is supplied address within block? */
2104 if (!PF_MATCHA(0, cur, mask, counter, af)) {
2105 /* no, go to next block in table */
2110 PF_ACPY(addr, counter, af);
2112 /* use first address of block */
2113 PF_ACPY(addr, cur, af);
2116 if (!KENTRY_NETWORK(ke)) {
2117 /* this is a single IP address - no possible nested block */
2118 PF_ACPY(counter, addr, af);
2124 /* we don't want to use a nested block */
2127 ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2128 &kt->pfrkt_ip4->rh);
2131 ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2132 &kt->pfrkt_ip6->rh);
2135 /* no need to check KENTRY_RNF_ROOT() here */
2137 /* lookup return the same block - perfect */
2138 PF_ACPY(counter, addr, af);
2144 /* we need to increase the counter past the nested block */
2145 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2146 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af);
2148 if (!PF_MATCHA(0, cur, mask, addr, af)) {
2149 /* ok, we reached the end of our main block */
2150 /* go to next block in table */
2158 static struct pfr_kentry *
2159 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2161 struct pfr_walktree w;
2163 bzero(&w, sizeof(w));
2164 w.pfrw_op = PFRW_POOL_GET;
2170 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2171 return (w.pfrw_kentry);
2175 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2176 return (w.pfrw_kentry);
2184 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2186 struct pfr_walktree w;
2188 bzero(&w, sizeof(w));
2189 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2192 dyn->pfid_acnt4 = 0;
2193 dyn->pfid_acnt6 = 0;
2194 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2195 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2196 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2197 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);