2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2002 Cedric Berger
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 #include "opt_inet6.h"
40 #include <sys/param.h>
41 #include <sys/kernel.h>
43 #include <sys/malloc.h>
45 #include <sys/mutex.h>
46 #include <sys/refcount.h>
47 #include <sys/socket.h>
52 #include <net/pfvar.h>
54 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
56 #define ACCEPT_FLAGS(flags, oklist) \
58 if ((flags & ~(oklist)) & \
63 #define FILLIN_SIN(sin, addr) \
65 (sin).sin_len = sizeof(sin); \
66 (sin).sin_family = AF_INET; \
67 (sin).sin_addr = (addr); \
70 #define FILLIN_SIN6(sin6, addr) \
72 (sin6).sin6_len = sizeof(sin6); \
73 (sin6).sin6_family = AF_INET6; \
74 (sin6).sin6_addr = (addr); \
77 #define SWAP(type, a1, a2) \
84 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
85 (struct pf_addr *)&(su)->sin.sin_addr : \
86 (struct pf_addr *)&(su)->sin6.sin6_addr)
88 #define AF_BITS(af) (((af)==AF_INET)?32:128)
89 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
90 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
91 #define KENTRY_RNF_ROOT(ke) \
92 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
94 #define NO_ADDRESSES (-1)
95 #define ENQUEUE_UNMARKED_ONLY (1)
96 #define INVERT_NEG_FLAG (1)
109 struct pfr_addr *pfrw1_addr;
110 struct pfr_astats *pfrw1_astats;
111 struct pfr_kentryworkq *pfrw1_workq;
112 struct pfr_kentry *pfrw1_kentry;
113 struct pfi_dynaddr *pfrw1_dyn;
118 #define pfrw_addr pfrw_1.pfrw1_addr
119 #define pfrw_astats pfrw_1.pfrw1_astats
120 #define pfrw_workq pfrw_1.pfrw1_workq
121 #define pfrw_kentry pfrw_1.pfrw1_kentry
122 #define pfrw_dyn pfrw_1.pfrw1_dyn
123 #define pfrw_cnt pfrw_free
125 #define senderr(e) do { rv = (e); goto _bad; } while (0)
127 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
128 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z);
129 #define V_pfr_kentry_z VNET(pfr_kentry_z)
131 static struct pf_addr pfr_ffaddr = {
132 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
135 static void pfr_copyout_astats(struct pfr_astats *,
136 const struct pfr_kentry *,
137 const struct pfr_walktree *);
138 static void pfr_copyout_addr(struct pfr_addr *,
139 const struct pfr_kentry *ke);
140 static int pfr_validate_addr(struct pfr_addr *);
141 static void pfr_enqueue_addrs(struct pfr_ktable *,
142 struct pfr_kentryworkq *, int *, int);
143 static void pfr_mark_addrs(struct pfr_ktable *);
144 static struct pfr_kentry
145 *pfr_lookup_addr(struct pfr_ktable *,
146 struct pfr_addr *, int);
147 static bool pfr_create_kentry_counter(struct pfr_kcounters *,
149 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
150 static void pfr_destroy_kentries(struct pfr_kentryworkq *);
151 static void pfr_destroy_kentry_counter(struct pfr_kcounters *,
153 static void pfr_destroy_kentry(struct pfr_kentry *);
154 static void pfr_insert_kentries(struct pfr_ktable *,
155 struct pfr_kentryworkq *, long);
156 static void pfr_remove_kentries(struct pfr_ktable *,
157 struct pfr_kentryworkq *);
158 static void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
160 static void pfr_reset_feedback(struct pfr_addr *, int);
161 static void pfr_prepare_network(union sockaddr_union *, int, int);
162 static int pfr_route_kentry(struct pfr_ktable *,
163 struct pfr_kentry *);
164 static int pfr_unroute_kentry(struct pfr_ktable *,
165 struct pfr_kentry *);
166 static int pfr_walktree(struct radix_node *, void *);
167 static int pfr_validate_table(struct pfr_table *, int, int);
168 static int pfr_fix_anchor(char *);
169 static void pfr_commit_ktable(struct pfr_ktable *, long);
170 static void pfr_insert_ktables(struct pfr_ktableworkq *);
171 static void pfr_insert_ktable(struct pfr_ktable *);
172 static void pfr_setflags_ktables(struct pfr_ktableworkq *);
173 static void pfr_setflags_ktable(struct pfr_ktable *, int);
174 static void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
176 static void pfr_clstats_ktable(struct pfr_ktable *, long, int);
177 static struct pfr_ktable
178 *pfr_create_ktable(struct pfr_table *, long, int);
179 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
180 static void pfr_destroy_ktable(struct pfr_ktable *, int);
181 static int pfr_ktable_compare(struct pfr_ktable *,
182 struct pfr_ktable *);
183 static struct pfr_ktable
184 *pfr_lookup_table(struct pfr_table *);
185 static void pfr_clean_node_mask(struct pfr_ktable *,
186 struct pfr_kentryworkq *);
187 static int pfr_skip_table(struct pfr_table *,
188 struct pfr_ktable *, int);
189 static struct pfr_kentry
190 *pfr_kentry_byidx(struct pfr_ktable *, int, int);
192 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
193 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
195 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables);
196 #define V_pfr_ktables VNET(pfr_ktables)
198 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable);
199 #define V_pfr_nulltable VNET(pfr_nulltable)
201 VNET_DEFINE_STATIC(int, pfr_ktable_cnt);
202 #define V_pfr_ktable_cnt VNET(pfr_ktable_cnt)
208 V_pfr_kentry_z = uma_zcreate("pf table entries",
209 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
211 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
212 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
219 uma_zdestroy(V_pfr_kentry_z);
223 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
225 struct pfr_ktable *kt;
226 struct pfr_kentryworkq workq;
230 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
231 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
233 kt = pfr_lookup_table(tbl);
234 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
236 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
238 pfr_enqueue_addrs(kt, &workq, ndel, 0);
240 if (!(flags & PFR_FLAG_DUMMY)) {
241 pfr_remove_kentries(kt, &workq);
242 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
248 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
249 int *nadd, int flags)
251 struct pfr_ktable *kt, *tmpkt;
252 struct pfr_kentryworkq workq;
253 struct pfr_kentry *p, *q;
256 long tzero = time_second;
260 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
261 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
263 kt = pfr_lookup_table(tbl);
264 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
266 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
268 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
272 for (i = 0, ad = addr; i < size; i++, ad++) {
273 if (pfr_validate_addr(ad))
275 p = pfr_lookup_addr(kt, ad, 1);
276 q = pfr_lookup_addr(tmpkt, ad, 1);
277 if (flags & PFR_FLAG_FEEDBACK) {
279 ad->pfra_fback = PFR_FB_DUPLICATE;
281 ad->pfra_fback = PFR_FB_ADDED;
282 else if (p->pfrke_not != ad->pfra_not)
283 ad->pfra_fback = PFR_FB_CONFLICT;
285 ad->pfra_fback = PFR_FB_NONE;
287 if (p == NULL && q == NULL) {
288 p = pfr_create_kentry(ad);
291 if (pfr_route_kentry(tmpkt, p)) {
292 pfr_destroy_kentry(p);
293 ad->pfra_fback = PFR_FB_NONE;
295 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
300 pfr_clean_node_mask(tmpkt, &workq);
301 if (!(flags & PFR_FLAG_DUMMY))
302 pfr_insert_kentries(kt, &workq, tzero);
304 pfr_destroy_kentries(&workq);
307 pfr_destroy_ktable(tmpkt, 0);
310 pfr_clean_node_mask(tmpkt, &workq);
311 pfr_destroy_kentries(&workq);
312 if (flags & PFR_FLAG_FEEDBACK)
313 pfr_reset_feedback(addr, size);
314 pfr_destroy_ktable(tmpkt, 0);
319 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
320 int *ndel, int flags)
322 struct pfr_ktable *kt;
323 struct pfr_kentryworkq workq;
324 struct pfr_kentry *p;
326 int i, rv, xdel = 0, log = 1;
330 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
331 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
333 kt = pfr_lookup_table(tbl);
334 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
336 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
339 * there are two algorithms to choose from here.
341 * n: number of addresses to delete
342 * N: number of addresses in the table
344 * one is O(N) and is better for large 'n'
345 * one is O(n*LOG(N)) and is better for small 'n'
347 * following code try to decide which one is best.
349 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
351 if (size > kt->pfrkt_cnt/log) {
352 /* full table scan */
355 /* iterate over addresses to delete */
356 for (i = 0, ad = addr; i < size; i++, ad++) {
357 if (pfr_validate_addr(ad))
359 p = pfr_lookup_addr(kt, ad, 1);
365 for (i = 0, ad = addr; i < size; i++, ad++) {
366 if (pfr_validate_addr(ad))
368 p = pfr_lookup_addr(kt, ad, 1);
369 if (flags & PFR_FLAG_FEEDBACK) {
371 ad->pfra_fback = PFR_FB_NONE;
372 else if (p->pfrke_not != ad->pfra_not)
373 ad->pfra_fback = PFR_FB_CONFLICT;
374 else if (p->pfrke_mark)
375 ad->pfra_fback = PFR_FB_DUPLICATE;
377 ad->pfra_fback = PFR_FB_DELETED;
379 if (p != NULL && p->pfrke_not == ad->pfra_not &&
382 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
386 if (!(flags & PFR_FLAG_DUMMY))
387 pfr_remove_kentries(kt, &workq);
392 if (flags & PFR_FLAG_FEEDBACK)
393 pfr_reset_feedback(addr, size);
398 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
399 int *size2, int *nadd, int *ndel, int *nchange, int flags,
400 u_int32_t ignore_pfrt_flags)
402 struct pfr_ktable *kt, *tmpkt;
403 struct pfr_kentryworkq addq, delq, changeq;
404 struct pfr_kentry *p, *q;
406 int i, rv, xadd = 0, xdel = 0, xchange = 0;
407 long tzero = time_second;
411 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
412 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
415 kt = pfr_lookup_table(tbl);
416 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
418 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
420 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
426 SLIST_INIT(&changeq);
427 for (i = 0; i < size; i++) {
429 * XXXGL: undertand pf_if usage of this function
430 * and make ad a moving pointer
432 bcopy(addr + i, &ad, sizeof(ad));
433 if (pfr_validate_addr(&ad))
435 ad.pfra_fback = PFR_FB_NONE;
436 p = pfr_lookup_addr(kt, &ad, 1);
439 ad.pfra_fback = PFR_FB_DUPLICATE;
443 if (p->pfrke_not != ad.pfra_not) {
444 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
445 ad.pfra_fback = PFR_FB_CHANGED;
449 q = pfr_lookup_addr(tmpkt, &ad, 1);
451 ad.pfra_fback = PFR_FB_DUPLICATE;
454 p = pfr_create_kentry(&ad);
457 if (pfr_route_kentry(tmpkt, p)) {
458 pfr_destroy_kentry(p);
459 ad.pfra_fback = PFR_FB_NONE;
461 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
462 ad.pfra_fback = PFR_FB_ADDED;
467 if (flags & PFR_FLAG_FEEDBACK)
468 bcopy(&ad, addr + i, sizeof(ad));
470 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
471 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
472 if (*size2 < size+xdel) {
477 SLIST_FOREACH(p, &delq, pfrke_workq) {
478 pfr_copyout_addr(&ad, p);
479 ad.pfra_fback = PFR_FB_DELETED;
480 bcopy(&ad, addr + size + i, sizeof(ad));
484 pfr_clean_node_mask(tmpkt, &addq);
485 if (!(flags & PFR_FLAG_DUMMY)) {
486 pfr_insert_kentries(kt, &addq, tzero);
487 pfr_remove_kentries(kt, &delq);
488 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
490 pfr_destroy_kentries(&addq);
497 if ((flags & PFR_FLAG_FEEDBACK) && size2)
499 pfr_destroy_ktable(tmpkt, 0);
502 pfr_clean_node_mask(tmpkt, &addq);
503 pfr_destroy_kentries(&addq);
504 if (flags & PFR_FLAG_FEEDBACK)
505 pfr_reset_feedback(addr, size);
506 pfr_destroy_ktable(tmpkt, 0);
511 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
512 int *nmatch, int flags)
514 struct pfr_ktable *kt;
515 struct pfr_kentry *p;
521 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
522 if (pfr_validate_table(tbl, 0, 0))
524 kt = pfr_lookup_table(tbl);
525 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
528 for (i = 0, ad = addr; i < size; i++, ad++) {
529 if (pfr_validate_addr(ad))
531 if (ADDR_NETWORK(ad))
533 p = pfr_lookup_addr(kt, ad, 0);
534 if (flags & PFR_FLAG_REPLACE)
535 pfr_copyout_addr(ad, p);
536 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
537 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
538 if (p != NULL && !p->pfrke_not)
547 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
550 struct pfr_ktable *kt;
551 struct pfr_walktree w;
556 ACCEPT_FLAGS(flags, 0);
557 if (pfr_validate_table(tbl, 0, 0))
559 kt = pfr_lookup_table(tbl);
560 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
562 if (kt->pfrkt_cnt > *size) {
563 *size = kt->pfrkt_cnt;
567 bzero(&w, sizeof(w));
568 w.pfrw_op = PFRW_GET_ADDRS;
570 w.pfrw_free = kt->pfrkt_cnt;
571 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
573 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
578 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
581 *size = kt->pfrkt_cnt;
586 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
589 struct pfr_ktable *kt;
590 struct pfr_walktree w;
591 struct pfr_kentryworkq workq;
593 long tzero = time_second;
597 /* XXX PFR_FLAG_CLSTATS disabled */
598 ACCEPT_FLAGS(flags, 0);
599 if (pfr_validate_table(tbl, 0, 0))
601 kt = pfr_lookup_table(tbl);
602 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
604 if (kt->pfrkt_cnt > *size) {
605 *size = kt->pfrkt_cnt;
609 bzero(&w, sizeof(w));
610 w.pfrw_op = PFRW_GET_ASTATS;
611 w.pfrw_astats = addr;
612 w.pfrw_free = kt->pfrkt_cnt;
614 * Flags below are for backward compatibility. It was possible to have
615 * a table without per-entry counters. Now they are always allocated,
616 * we just discard data when reading it if table is not configured to
619 w.pfrw_flags = kt->pfrkt_flags;
620 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
622 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
624 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
625 pfr_enqueue_addrs(kt, &workq, NULL, 0);
626 pfr_clstats_kentries(&workq, tzero, 0);
632 printf("pfr_get_astats: corruption detected (%d).\n",
636 *size = kt->pfrkt_cnt;
641 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
642 int *nzero, int flags)
644 struct pfr_ktable *kt;
645 struct pfr_kentryworkq workq;
646 struct pfr_kentry *p;
648 int i, rv, xzero = 0;
652 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
653 if (pfr_validate_table(tbl, 0, 0))
655 kt = pfr_lookup_table(tbl);
656 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
659 for (i = 0, ad = addr; i < size; i++, ad++) {
660 if (pfr_validate_addr(ad))
662 p = pfr_lookup_addr(kt, ad, 1);
663 if (flags & PFR_FLAG_FEEDBACK) {
664 ad->pfra_fback = (p != NULL) ?
665 PFR_FB_CLEARED : PFR_FB_NONE;
668 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
673 if (!(flags & PFR_FLAG_DUMMY))
674 pfr_clstats_kentries(&workq, 0, 0);
679 if (flags & PFR_FLAG_FEEDBACK)
680 pfr_reset_feedback(addr, size);
685 pfr_validate_addr(struct pfr_addr *ad)
689 switch (ad->pfra_af) {
692 if (ad->pfra_net > 32)
698 if (ad->pfra_net > 128)
705 if (ad->pfra_net < 128 &&
706 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
708 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
709 if (((caddr_t)ad)[i])
711 if (ad->pfra_not && ad->pfra_not != 1)
719 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
720 int *naddr, int sweep)
722 struct pfr_walktree w;
725 bzero(&w, sizeof(w));
726 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
727 w.pfrw_workq = workq;
728 if (kt->pfrkt_ip4 != NULL)
729 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh,
731 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
732 if (kt->pfrkt_ip6 != NULL)
733 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
735 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
741 pfr_mark_addrs(struct pfr_ktable *kt)
743 struct pfr_walktree w;
745 bzero(&w, sizeof(w));
746 w.pfrw_op = PFRW_MARK;
747 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w))
748 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
749 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w))
750 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
754 static struct pfr_kentry *
755 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
757 union sockaddr_union sa, mask;
758 struct radix_head *head = NULL;
759 struct pfr_kentry *ke;
763 bzero(&sa, sizeof(sa));
764 if (ad->pfra_af == AF_INET) {
765 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
766 head = &kt->pfrkt_ip4->rh;
767 } else if ( ad->pfra_af == AF_INET6 ) {
768 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
769 head = &kt->pfrkt_ip6->rh;
771 if (ADDR_NETWORK(ad)) {
772 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
773 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
774 if (ke && KENTRY_RNF_ROOT(ke))
777 ke = (struct pfr_kentry *)rn_match(&sa, head);
778 if (ke && KENTRY_RNF_ROOT(ke))
780 if (exact && ke && KENTRY_NETWORK(ke))
787 pfr_create_kentry_counter(struct pfr_kcounters *kc, int pfr_dir, int pfr_op)
789 kc->pfrkc_packets[pfr_dir][pfr_op] = counter_u64_alloc(M_NOWAIT);
790 if (! kc->pfrkc_packets[pfr_dir][pfr_op])
793 kc->pfrkc_bytes[pfr_dir][pfr_op] = counter_u64_alloc(M_NOWAIT);
794 if (! kc->pfrkc_bytes[pfr_dir][pfr_op]) {
795 /* Previous allocation will be freed through
796 * pfr_destroy_kentry() */
805 static struct pfr_kentry *
806 pfr_create_kentry(struct pfr_addr *ad)
808 struct pfr_kentry *ke;
811 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
815 if (ad->pfra_af == AF_INET)
816 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
817 else if (ad->pfra_af == AF_INET6)
818 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
819 ke->pfrke_af = ad->pfra_af;
820 ke->pfrke_net = ad->pfra_net;
821 ke->pfrke_not = ad->pfra_not;
822 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++)
823 for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) {
824 if (! pfr_create_kentry_counter(&ke->pfrke_counters,
826 pfr_destroy_kentry(ke);
834 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
836 struct pfr_kentry *p, *q;
838 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
839 q = SLIST_NEXT(p, pfrke_workq);
840 pfr_destroy_kentry(p);
845 pfr_destroy_kentry_counter(struct pfr_kcounters *kc, int pfr_dir, int pfr_op)
847 counter_u64_free(kc->pfrkc_packets[pfr_dir][pfr_op]);
848 counter_u64_free(kc->pfrkc_bytes[pfr_dir][pfr_op]);
852 pfr_destroy_kentry(struct pfr_kentry *ke)
856 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++)
857 for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++)
858 pfr_destroy_kentry_counter(&ke->pfrke_counters,
861 uma_zfree(V_pfr_kentry_z, ke);
865 pfr_insert_kentries(struct pfr_ktable *kt,
866 struct pfr_kentryworkq *workq, long tzero)
868 struct pfr_kentry *p;
871 SLIST_FOREACH(p, workq, pfrke_workq) {
872 rv = pfr_route_kentry(kt, p);
874 printf("pfr_insert_kentries: cannot route entry "
878 p->pfrke_counters.pfrkc_tzero = tzero;
885 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
887 struct pfr_kentry *p;
890 p = pfr_lookup_addr(kt, ad, 1);
893 p = pfr_create_kentry(ad);
897 rv = pfr_route_kentry(kt, p);
901 p->pfrke_counters.pfrkc_tzero = tzero;
908 pfr_remove_kentries(struct pfr_ktable *kt,
909 struct pfr_kentryworkq *workq)
911 struct pfr_kentry *p;
914 SLIST_FOREACH(p, workq, pfrke_workq) {
915 pfr_unroute_kentry(kt, p);
919 pfr_destroy_kentries(workq);
923 pfr_clean_node_mask(struct pfr_ktable *kt,
924 struct pfr_kentryworkq *workq)
926 struct pfr_kentry *p;
928 SLIST_FOREACH(p, workq, pfrke_workq)
929 pfr_unroute_kentry(kt, p);
933 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
935 struct pfr_kentry *p;
938 SLIST_FOREACH(p, workq, pfrke_workq) {
940 p->pfrke_not = !p->pfrke_not;
941 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
942 for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) {
943 counter_u64_zero(p->pfrke_counters.
944 pfrkc_packets[pfr_dir][pfr_op]);
945 counter_u64_zero(p->pfrke_counters.
946 pfrkc_bytes[pfr_dir][pfr_op]);
949 p->pfrke_counters.pfrkc_tzero = tzero;
954 pfr_reset_feedback(struct pfr_addr *addr, int size)
959 for (i = 0, ad = addr; i < size; i++, ad++)
960 ad->pfra_fback = PFR_FB_NONE;
964 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
968 bzero(sa, sizeof(*sa));
970 sa->sin.sin_len = sizeof(sa->sin);
971 sa->sin.sin_family = AF_INET;
972 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
973 } else if (af == AF_INET6) {
974 sa->sin6.sin6_len = sizeof(sa->sin6);
975 sa->sin6.sin6_family = AF_INET6;
976 for (i = 0; i < 4; i++) {
978 sa->sin6.sin6_addr.s6_addr32[i] =
979 net ? htonl(-1 << (32-net)) : 0;
982 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
989 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
991 union sockaddr_union mask;
992 struct radix_node *rn;
993 struct radix_head *head = NULL;
997 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
998 if (ke->pfrke_af == AF_INET)
999 head = &kt->pfrkt_ip4->rh;
1000 else if (ke->pfrke_af == AF_INET6)
1001 head = &kt->pfrkt_ip6->rh;
1003 if (KENTRY_NETWORK(ke)) {
1004 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1005 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1007 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1009 return (rn == NULL ? -1 : 0);
1013 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1015 union sockaddr_union mask;
1016 struct radix_node *rn;
1017 struct radix_head *head = NULL;
1019 if (ke->pfrke_af == AF_INET)
1020 head = &kt->pfrkt_ip4->rh;
1021 else if (ke->pfrke_af == AF_INET6)
1022 head = &kt->pfrkt_ip6->rh;
1024 if (KENTRY_NETWORK(ke)) {
1025 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1026 rn = rn_delete(&ke->pfrke_sa, &mask, head);
1028 rn = rn_delete(&ke->pfrke_sa, NULL, head);
1031 printf("pfr_unroute_kentry: delete failed.\n");
1038 pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke)
1040 bzero(ad, sizeof(*ad));
1043 ad->pfra_af = ke->pfrke_af;
1044 ad->pfra_net = ke->pfrke_net;
1045 ad->pfra_not = ke->pfrke_not;
1046 if (ad->pfra_af == AF_INET)
1047 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1048 else if (ad->pfra_af == AF_INET6)
1049 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1053 pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke,
1054 const struct pfr_walktree *w)
1057 const struct pfr_kcounters *kc = &ke->pfrke_counters;
1059 pfr_copyout_addr(&as->pfras_a, ke);
1060 as->pfras_tzero = kc->pfrkc_tzero;
1062 if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS)) {
1063 bzero(as->pfras_packets, sizeof(as->pfras_packets));
1064 bzero(as->pfras_bytes, sizeof(as->pfras_bytes));
1065 as->pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1069 for (dir = 0; dir < PFR_DIR_MAX; dir ++) {
1070 for (op = 0; op < PFR_OP_ADDR_MAX; op ++) {
1071 as->pfras_packets[dir][op] =
1072 counter_u64_fetch(kc->pfrkc_packets[dir][op]);
1073 as->pfras_bytes[dir][op] =
1074 counter_u64_fetch(kc->pfrkc_bytes[dir][op]);
1080 pfr_walktree(struct radix_node *rn, void *arg)
1082 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1083 struct pfr_walktree *w = arg;
1085 switch (w->pfrw_op) {
1094 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1097 case PFRW_GET_ADDRS:
1098 if (w->pfrw_free-- > 0) {
1099 pfr_copyout_addr(w->pfrw_addr, ke);
1103 case PFRW_GET_ASTATS:
1104 if (w->pfrw_free-- > 0) {
1105 struct pfr_astats as;
1107 pfr_copyout_astats(&as, ke, w);
1109 bcopy(&as, w->pfrw_astats, sizeof(as));
1115 break; /* negative entries are ignored */
1116 if (!w->pfrw_cnt--) {
1117 w->pfrw_kentry = ke;
1118 return (1); /* finish search */
1121 case PFRW_DYNADDR_UPDATE:
1123 union sockaddr_union pfr_mask;
1125 if (ke->pfrke_af == AF_INET) {
1126 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1128 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1129 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa,
1131 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask,
1133 } else if (ke->pfrke_af == AF_INET6){
1134 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1136 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1137 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa,
1139 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask,
1149 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1151 struct pfr_ktableworkq workq;
1152 struct pfr_ktable *p;
1155 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1156 if (pfr_fix_anchor(filter->pfrt_anchor))
1158 if (pfr_table_count(filter, flags) < 0)
1162 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1163 if (pfr_skip_table(filter, p, flags))
1165 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1167 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1169 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1170 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1173 if (!(flags & PFR_FLAG_DUMMY))
1174 pfr_setflags_ktables(&workq);
1181 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1183 struct pfr_ktableworkq addq, changeq;
1184 struct pfr_ktable *p, *q, *r, key;
1185 int i, rv, xadd = 0;
1186 long tzero = time_second;
1188 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1190 SLIST_INIT(&changeq);
1191 for (i = 0; i < size; i++) {
1192 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1193 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1194 flags & PFR_FLAG_USERIOCTL))
1196 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1197 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1199 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1202 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1203 if (!pfr_ktable_compare(p, q)) {
1204 pfr_destroy_ktable(p, 0);
1208 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1210 if (!key.pfrkt_anchor[0])
1213 /* find or create root table */
1214 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1215 r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1220 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1221 if (!pfr_ktable_compare(&key, q)) {
1226 key.pfrkt_flags = 0;
1227 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1230 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1232 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1233 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1234 if (!pfr_ktable_compare(&key, q))
1236 p->pfrkt_nflags = (p->pfrkt_flags &
1237 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1238 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1244 if (!(flags & PFR_FLAG_DUMMY)) {
1245 pfr_insert_ktables(&addq);
1246 pfr_setflags_ktables(&changeq);
1248 pfr_destroy_ktables(&addq, 0);
1253 pfr_destroy_ktables(&addq, 0);
1258 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1260 struct pfr_ktableworkq workq;
1261 struct pfr_ktable *p, *q, key;
1264 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1266 for (i = 0; i < size; i++) {
1267 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1268 if (pfr_validate_table(&key.pfrkt_t, 0,
1269 flags & PFR_FLAG_USERIOCTL))
1271 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1272 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1273 SLIST_FOREACH(q, &workq, pfrkt_workq)
1274 if (!pfr_ktable_compare(p, q))
1276 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1277 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1284 if (!(flags & PFR_FLAG_DUMMY))
1285 pfr_setflags_ktables(&workq);
1292 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1295 struct pfr_ktable *p;
1300 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1301 if (pfr_fix_anchor(filter->pfrt_anchor))
1303 n = nn = pfr_table_count(filter, flags);
1310 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1311 if (pfr_skip_table(filter, p, flags))
1315 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1318 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1325 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1328 struct pfr_ktable *p;
1329 struct pfr_ktableworkq workq;
1331 long tzero = time_second;
1332 int pfr_dir, pfr_op;
1334 /* XXX PFR_FLAG_CLSTATS disabled */
1335 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1336 if (pfr_fix_anchor(filter->pfrt_anchor))
1338 n = nn = pfr_table_count(filter, flags);
1346 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1347 if (pfr_skip_table(filter, p, flags))
1351 bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t,
1352 sizeof(struct pfr_table));
1353 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1354 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1355 tbl->pfrts_packets[pfr_dir][pfr_op] =
1357 p->pfrkt_packets[pfr_dir][pfr_op]);
1358 tbl->pfrts_bytes[pfr_dir][pfr_op] =
1360 p->pfrkt_bytes[pfr_dir][pfr_op]);
1363 tbl->pfrts_match = counter_u64_fetch(p->pfrkt_match);
1364 tbl->pfrts_nomatch = counter_u64_fetch(p->pfrkt_nomatch);
1365 tbl->pfrts_tzero = p->pfrkt_tzero;
1366 tbl->pfrts_cnt = p->pfrkt_cnt;
1367 for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++)
1368 tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op];
1370 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1372 if (flags & PFR_FLAG_CLSTATS)
1373 pfr_clstats_ktables(&workq, tzero,
1374 flags & PFR_FLAG_ADDRSTOO);
1376 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1383 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1385 struct pfr_ktableworkq workq;
1386 struct pfr_ktable *p, key;
1388 long tzero = time_second;
1390 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1392 for (i = 0; i < size; i++) {
1393 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1394 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1396 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1398 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1402 if (!(flags & PFR_FLAG_DUMMY))
1403 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1410 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1411 int *nchange, int *ndel, int flags)
1413 struct pfr_ktableworkq workq;
1414 struct pfr_ktable *p, *q, key;
1415 int i, xchange = 0, xdel = 0;
1417 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1418 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1419 (clrflag & ~PFR_TFLAG_USRMASK) ||
1420 (setflag & clrflag))
1423 for (i = 0; i < size; i++) {
1424 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1425 if (pfr_validate_table(&key.pfrkt_t, 0,
1426 flags & PFR_FLAG_USERIOCTL))
1428 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1429 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1430 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1432 if (p->pfrkt_nflags == p->pfrkt_flags)
1434 SLIST_FOREACH(q, &workq, pfrkt_workq)
1435 if (!pfr_ktable_compare(p, q))
1437 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1438 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1439 (clrflag & PFR_TFLAG_PERSIST) &&
1440 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1448 if (!(flags & PFR_FLAG_DUMMY))
1449 pfr_setflags_ktables(&workq);
1450 if (nchange != NULL)
1458 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1460 struct pfr_ktableworkq workq;
1461 struct pfr_ktable *p;
1462 struct pf_ruleset *rs;
1465 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1466 rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1470 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1471 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1472 pfr_skip_table(trs, p, 0))
1474 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1475 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1478 if (!(flags & PFR_FLAG_DUMMY)) {
1479 pfr_setflags_ktables(&workq);
1481 *ticket = ++rs->tticket;
1484 pf_remove_if_empty_ruleset(rs);
1491 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1492 int *nadd, int *naddr, u_int32_t ticket, int flags)
1494 struct pfr_ktableworkq tableq;
1495 struct pfr_kentryworkq addrq;
1496 struct pfr_ktable *kt, *rt, *shadow, key;
1497 struct pfr_kentry *p;
1498 struct pfr_addr *ad;
1499 struct pf_ruleset *rs;
1500 int i, rv, xadd = 0, xaddr = 0;
1504 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1505 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1507 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1508 flags & PFR_FLAG_USERIOCTL))
1510 rs = pf_find_ruleset(tbl->pfrt_anchor);
1511 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1513 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1514 SLIST_INIT(&tableq);
1515 kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
1517 kt = pfr_create_ktable(tbl, 0, 1);
1520 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1522 if (!tbl->pfrt_anchor[0])
1525 /* find or create root table */
1526 bzero(&key, sizeof(key));
1527 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1528 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1530 kt->pfrkt_root = rt;
1533 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1535 pfr_destroy_ktables(&tableq, 0);
1538 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1539 kt->pfrkt_root = rt;
1540 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1543 shadow = pfr_create_ktable(tbl, 0, 0);
1544 if (shadow == NULL) {
1545 pfr_destroy_ktables(&tableq, 0);
1549 for (i = 0, ad = addr; i < size; i++, ad++) {
1550 if (pfr_validate_addr(ad))
1552 if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1554 p = pfr_create_kentry(ad);
1557 if (pfr_route_kentry(shadow, p)) {
1558 pfr_destroy_kentry(p);
1561 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1564 if (!(flags & PFR_FLAG_DUMMY)) {
1565 if (kt->pfrkt_shadow != NULL)
1566 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1567 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1568 pfr_insert_ktables(&tableq);
1569 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1570 xaddr : NO_ADDRESSES;
1571 kt->pfrkt_shadow = shadow;
1573 pfr_clean_node_mask(shadow, &addrq);
1574 pfr_destroy_ktable(shadow, 0);
1575 pfr_destroy_ktables(&tableq, 0);
1576 pfr_destroy_kentries(&addrq);
1584 pfr_destroy_ktable(shadow, 0);
1585 pfr_destroy_ktables(&tableq, 0);
1586 pfr_destroy_kentries(&addrq);
1591 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1593 struct pfr_ktableworkq workq;
1594 struct pfr_ktable *p;
1595 struct pf_ruleset *rs;
1600 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1601 rs = pf_find_ruleset(trs->pfrt_anchor);
1602 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1605 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1606 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1607 pfr_skip_table(trs, p, 0))
1609 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1610 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1613 if (!(flags & PFR_FLAG_DUMMY)) {
1614 pfr_setflags_ktables(&workq);
1616 pf_remove_if_empty_ruleset(rs);
1624 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1625 int *nchange, int flags)
1627 struct pfr_ktable *p, *q;
1628 struct pfr_ktableworkq workq;
1629 struct pf_ruleset *rs;
1630 int xadd = 0, xchange = 0;
1631 long tzero = time_second;
1635 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1636 rs = pf_find_ruleset(trs->pfrt_anchor);
1637 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1641 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1642 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1643 pfr_skip_table(trs, p, 0))
1645 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1646 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1652 if (!(flags & PFR_FLAG_DUMMY)) {
1653 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1654 q = SLIST_NEXT(p, pfrkt_workq);
1655 pfr_commit_ktable(p, tzero);
1658 pf_remove_if_empty_ruleset(rs);
1662 if (nchange != NULL)
1669 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1671 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1676 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1677 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1678 pfr_clstats_ktable(kt, tzero, 1);
1679 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1680 /* kt might contain addresses */
1681 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1682 struct pfr_kentry *p, *q, *next;
1685 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1688 SLIST_INIT(&changeq);
1690 SLIST_INIT(&garbageq);
1691 pfr_clean_node_mask(shadow, &addrq);
1692 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1693 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1694 pfr_copyout_addr(&ad, p);
1695 q = pfr_lookup_addr(kt, &ad, 1);
1697 if (q->pfrke_not != p->pfrke_not)
1698 SLIST_INSERT_HEAD(&changeq, q,
1701 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1703 p->pfrke_counters.pfrkc_tzero = tzero;
1704 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1707 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1708 pfr_insert_kentries(kt, &addq, tzero);
1709 pfr_remove_kentries(kt, &delq);
1710 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1711 pfr_destroy_kentries(&garbageq);
1713 /* kt cannot contain addresses */
1714 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1716 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1718 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1719 pfr_clstats_ktable(kt, tzero, 1);
1721 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1722 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1723 & ~PFR_TFLAG_INACTIVE;
1724 pfr_destroy_ktable(shadow, 0);
1725 kt->pfrkt_shadow = NULL;
1726 pfr_setflags_ktable(kt, nflags);
1730 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1734 if (!tbl->pfrt_name[0])
1736 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1738 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1740 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1741 if (tbl->pfrt_name[i])
1743 if (pfr_fix_anchor(tbl->pfrt_anchor))
1745 if (tbl->pfrt_flags & ~allowedflags)
1751 * Rewrite anchors referenced by tables to remove slashes
1752 * and check for validity.
1755 pfr_fix_anchor(char *anchor)
1757 size_t siz = MAXPATHLEN;
1760 if (anchor[0] == '/') {
1766 while (*++path == '/')
1768 bcopy(path, anchor, siz - off);
1769 memset(anchor + siz - off, 0, off);
1771 if (anchor[siz - 1])
1773 for (i = strlen(anchor); i < siz; i++)
1780 pfr_table_count(struct pfr_table *filter, int flags)
1782 struct pf_ruleset *rs;
1786 if (flags & PFR_FLAG_ALLRSETS)
1787 return (V_pfr_ktable_cnt);
1788 if (filter->pfrt_anchor[0]) {
1789 rs = pf_find_ruleset(filter->pfrt_anchor);
1790 return ((rs != NULL) ? rs->tables : -1);
1792 return (pf_main_ruleset.tables);
1796 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1798 if (flags & PFR_FLAG_ALLRSETS)
1800 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1806 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1808 struct pfr_ktable *p;
1810 SLIST_FOREACH(p, workq, pfrkt_workq)
1811 pfr_insert_ktable(p);
1815 pfr_insert_ktable(struct pfr_ktable *kt)
1820 RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
1822 if (kt->pfrkt_root != NULL)
1823 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1824 pfr_setflags_ktable(kt->pfrkt_root,
1825 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1829 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1831 struct pfr_ktable *p, *q;
1833 for (p = SLIST_FIRST(workq); p; p = q) {
1834 q = SLIST_NEXT(p, pfrkt_workq);
1835 pfr_setflags_ktable(p, p->pfrkt_nflags);
1840 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1842 struct pfr_kentryworkq addrq;
1846 if (!(newf & PFR_TFLAG_REFERENCED) &&
1847 !(newf & PFR_TFLAG_REFDANCHOR) &&
1848 !(newf & PFR_TFLAG_PERSIST))
1849 newf &= ~PFR_TFLAG_ACTIVE;
1850 if (!(newf & PFR_TFLAG_ACTIVE))
1851 newf &= ~PFR_TFLAG_USRMASK;
1852 if (!(newf & PFR_TFLAG_SETMASK)) {
1853 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
1854 if (kt->pfrkt_root != NULL)
1855 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1856 pfr_setflags_ktable(kt->pfrkt_root,
1857 kt->pfrkt_root->pfrkt_flags &
1858 ~PFR_TFLAG_REFDANCHOR);
1859 pfr_destroy_ktable(kt, 1);
1863 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1864 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1865 pfr_remove_kentries(kt, &addrq);
1867 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1868 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1869 kt->pfrkt_shadow = NULL;
1871 kt->pfrkt_flags = newf;
1875 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1877 struct pfr_ktable *p;
1879 SLIST_FOREACH(p, workq, pfrkt_workq)
1880 pfr_clstats_ktable(p, tzero, recurse);
1884 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1886 struct pfr_kentryworkq addrq;
1887 int pfr_dir, pfr_op;
1890 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1891 pfr_clstats_kentries(&addrq, tzero, 0);
1893 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1894 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1895 counter_u64_zero(kt->pfrkt_packets[pfr_dir][pfr_op]);
1896 counter_u64_zero(kt->pfrkt_bytes[pfr_dir][pfr_op]);
1899 counter_u64_zero(kt->pfrkt_match);
1900 counter_u64_zero(kt->pfrkt_nomatch);
1901 kt->pfrkt_tzero = tzero;
1904 static struct pfr_ktable *
1905 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1907 struct pfr_ktable *kt;
1908 struct pf_ruleset *rs;
1909 int pfr_dir, pfr_op;
1913 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1918 if (attachruleset) {
1919 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1921 pfr_destroy_ktable(kt, 0);
1928 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1929 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1930 kt->pfrkt_packets[pfr_dir][pfr_op] =
1931 counter_u64_alloc(M_NOWAIT);
1932 if (! kt->pfrkt_packets[pfr_dir][pfr_op]) {
1933 pfr_destroy_ktable(kt, 0);
1936 kt->pfrkt_bytes[pfr_dir][pfr_op] =
1937 counter_u64_alloc(M_NOWAIT);
1938 if (! kt->pfrkt_bytes[pfr_dir][pfr_op]) {
1939 pfr_destroy_ktable(kt, 0);
1944 kt->pfrkt_match = counter_u64_alloc(M_NOWAIT);
1945 if (! kt->pfrkt_match) {
1946 pfr_destroy_ktable(kt, 0);
1950 kt->pfrkt_nomatch = counter_u64_alloc(M_NOWAIT);
1951 if (! kt->pfrkt_nomatch) {
1952 pfr_destroy_ktable(kt, 0);
1956 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1957 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1958 !rn_inithead((void **)&kt->pfrkt_ip6,
1959 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1960 pfr_destroy_ktable(kt, 0);
1963 kt->pfrkt_tzero = tzero;
1969 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1971 struct pfr_ktable *p, *q;
1973 for (p = SLIST_FIRST(workq); p; p = q) {
1974 q = SLIST_NEXT(p, pfrkt_workq);
1975 pfr_destroy_ktable(p, flushaddr);
1980 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1982 struct pfr_kentryworkq addrq;
1983 int pfr_dir, pfr_op;
1986 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1987 pfr_clean_node_mask(kt, &addrq);
1988 pfr_destroy_kentries(&addrq);
1990 if (kt->pfrkt_ip4 != NULL)
1991 rn_detachhead((void **)&kt->pfrkt_ip4);
1992 if (kt->pfrkt_ip6 != NULL)
1993 rn_detachhead((void **)&kt->pfrkt_ip6);
1994 if (kt->pfrkt_shadow != NULL)
1995 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1996 if (kt->pfrkt_rs != NULL) {
1997 kt->pfrkt_rs->tables--;
1998 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
2000 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
2001 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
2002 counter_u64_free(kt->pfrkt_packets[pfr_dir][pfr_op]);
2003 counter_u64_free(kt->pfrkt_bytes[pfr_dir][pfr_op]);
2006 counter_u64_free(kt->pfrkt_match);
2007 counter_u64_free(kt->pfrkt_nomatch);
2009 free(kt, M_PFTABLE);
2013 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2017 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2019 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2022 static struct pfr_ktable *
2023 pfr_lookup_table(struct pfr_table *tbl)
2025 /* struct pfr_ktable start like a struct pfr_table */
2026 return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
2027 (struct pfr_ktable *)tbl));
2031 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2033 struct pfr_kentry *ke = NULL;
2038 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2039 kt = kt->pfrkt_root;
2040 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2047 struct sockaddr_in sin;
2049 bzero(&sin, sizeof(sin));
2050 sin.sin_len = sizeof(sin);
2051 sin.sin_family = AF_INET;
2052 sin.sin_addr.s_addr = a->addr32[0];
2053 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2054 if (ke && KENTRY_RNF_ROOT(ke))
2062 struct sockaddr_in6 sin6;
2064 bzero(&sin6, sizeof(sin6));
2065 sin6.sin6_len = sizeof(sin6);
2066 sin6.sin6_family = AF_INET6;
2067 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2068 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2069 if (ke && KENTRY_RNF_ROOT(ke))
2075 match = (ke && !ke->pfrke_not);
2077 counter_u64_add(kt->pfrkt_match, 1);
2079 counter_u64_add(kt->pfrkt_nomatch, 1);
2084 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2085 u_int64_t len, int dir_out, int op_pass, int notrule)
2087 struct pfr_kentry *ke = NULL;
2089 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2090 kt = kt->pfrkt_root;
2091 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2098 struct sockaddr_in sin;
2100 bzero(&sin, sizeof(sin));
2101 sin.sin_len = sizeof(sin);
2102 sin.sin_family = AF_INET;
2103 sin.sin_addr.s_addr = a->addr32[0];
2104 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2105 if (ke && KENTRY_RNF_ROOT(ke))
2113 struct sockaddr_in6 sin6;
2115 bzero(&sin6, sizeof(sin6));
2116 sin6.sin6_len = sizeof(sin6);
2117 sin6.sin6_family = AF_INET6;
2118 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2119 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2120 if (ke && KENTRY_RNF_ROOT(ke))
2126 panic("%s: unknown address family %u", __func__, af);
2128 if ((ke == NULL || ke->pfrke_not) != notrule) {
2129 if (op_pass != PFR_OP_PASS)
2130 DPFPRINTF(PF_DEBUG_URGENT,
2131 ("pfr_update_stats: assertion failed.\n"));
2132 op_pass = PFR_OP_XPASS;
2134 counter_u64_add(kt->pfrkt_packets[dir_out][op_pass], 1);
2135 counter_u64_add(kt->pfrkt_bytes[dir_out][op_pass], len);
2136 if (ke != NULL && op_pass != PFR_OP_XPASS &&
2137 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2138 counter_u64_add(ke->pfrke_counters.
2139 pfrkc_packets[dir_out][op_pass], 1);
2140 counter_u64_add(ke->pfrke_counters.
2141 pfrkc_bytes[dir_out][op_pass], len);
2146 pfr_attach_table(struct pf_ruleset *rs, char *name)
2148 struct pfr_ktable *kt, *rt;
2149 struct pfr_table tbl;
2150 struct pf_anchor *ac = rs->anchor;
2154 bzero(&tbl, sizeof(tbl));
2155 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2157 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2158 kt = pfr_lookup_table(&tbl);
2160 kt = pfr_create_ktable(&tbl, time_second, 1);
2164 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2165 rt = pfr_lookup_table(&tbl);
2167 rt = pfr_create_ktable(&tbl, 0, 1);
2169 pfr_destroy_ktable(kt, 0);
2172 pfr_insert_ktable(rt);
2174 kt->pfrkt_root = rt;
2176 pfr_insert_ktable(kt);
2178 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2179 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2184 pfr_detach_table(struct pfr_ktable *kt)
2188 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2189 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2191 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2192 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2196 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2199 struct pf_addr *addr, *cur, *mask;
2200 union sockaddr_union uaddr, umask;
2201 struct pfr_kentry *ke, *ke2 = NULL;
2202 int idx = -1, use_counter = 0;
2206 uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2207 uaddr.sin.sin_family = AF_INET;
2210 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2211 uaddr.sin6.sin6_family = AF_INET6;
2214 addr = SUNION2PF(&uaddr, af);
2216 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2217 kt = kt->pfrkt_root;
2218 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2223 if (counter != NULL && idx >= 0)
2229 ke = pfr_kentry_byidx(kt, idx, af);
2231 counter_u64_add(kt->pfrkt_nomatch, 1);
2234 pfr_prepare_network(&umask, af, ke->pfrke_net);
2235 cur = SUNION2PF(&ke->pfrke_sa, af);
2236 mask = SUNION2PF(&umask, af);
2239 /* is supplied address within block? */
2240 if (!PF_MATCHA(0, cur, mask, counter, af)) {
2241 /* no, go to next block in table */
2246 PF_ACPY(addr, counter, af);
2248 /* use first address of block */
2249 PF_ACPY(addr, cur, af);
2252 if (!KENTRY_NETWORK(ke)) {
2253 /* this is a single IP address - no possible nested block */
2254 PF_ACPY(counter, addr, af);
2256 counter_u64_add(kt->pfrkt_match, 1);
2260 /* we don't want to use a nested block */
2263 ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2264 &kt->pfrkt_ip4->rh);
2267 ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2268 &kt->pfrkt_ip6->rh);
2271 /* no need to check KENTRY_RNF_ROOT() here */
2273 /* lookup return the same block - perfect */
2274 PF_ACPY(counter, addr, af);
2276 counter_u64_add(kt->pfrkt_match, 1);
2280 /* we need to increase the counter past the nested block */
2281 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2282 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af);
2284 if (!PF_MATCHA(0, cur, mask, addr, af)) {
2285 /* ok, we reached the end of our main block */
2286 /* go to next block in table */
2294 static struct pfr_kentry *
2295 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2297 struct pfr_walktree w;
2299 bzero(&w, sizeof(w));
2300 w.pfrw_op = PFRW_POOL_GET;
2306 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2307 return (w.pfrw_kentry);
2311 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2312 return (w.pfrw_kentry);
2320 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2322 struct pfr_walktree w;
2324 bzero(&w, sizeof(w));
2325 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2328 dyn->pfid_acnt4 = 0;
2329 dyn->pfid_acnt6 = 0;
2330 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2331 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2332 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2333 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);