1 /* $OpenBSD: pf_table.c,v 1.68 2006/05/02 10:08:45 dhartmei Exp $ */
4 * Copyright (c) 2002 Cedric Berger
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
35 #include "opt_inet6.h"
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/socket.h>
45 #include <sys/kernel.h>
47 #include <sys/rwlock.h>
49 #include <sys/malloc.h>
53 #include <net/route.h>
54 #include <netinet/in.h>
56 #include <netinet/ip_ipsp.h>
59 #include <net/pfvar.h>
61 #define ACCEPT_FLAGS(oklist) \
63 if ((flags & ~(oklist)) & \
70 _copyin(const void *uaddr, void *kaddr, size_t len)
75 r = copyin(uaddr, kaddr, len);
82 _copyout(const void *uaddr, void *kaddr, size_t len)
87 r = copyout(uaddr, kaddr, len);
93 #define COPYIN(from, to, size) \
94 ((flags & PFR_FLAG_USERIOCTL) ? \
95 _copyin((from), (to), (size)) : \
96 (bcopy((from), (to), (size)), 0))
98 #define COPYOUT(from, to, size) \
99 ((flags & PFR_FLAG_USERIOCTL) ? \
100 _copyout((from), (to), (size)) : \
101 (bcopy((from), (to), (size)), 0))
105 #define COPYIN(from, to, size) \
106 ((flags & PFR_FLAG_USERIOCTL) ? \
107 copyin((from), (to), (size)) : \
108 (bcopy((from), (to), (size)), 0))
110 #define COPYOUT(from, to, size) \
111 ((flags & PFR_FLAG_USERIOCTL) ? \
112 copyout((from), (to), (size)) : \
113 (bcopy((from), (to), (size)), 0))
117 #define FILLIN_SIN(sin, addr) \
119 (sin).sin_len = sizeof(sin); \
120 (sin).sin_family = AF_INET; \
121 (sin).sin_addr = (addr); \
124 #define FILLIN_SIN6(sin6, addr) \
126 (sin6).sin6_len = sizeof(sin6); \
127 (sin6).sin6_family = AF_INET6; \
128 (sin6).sin6_addr = (addr); \
131 #define SWAP(type, a1, a2) \
138 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
139 (struct pf_addr *)&(su)->sin.sin_addr : \
140 (struct pf_addr *)&(su)->sin6.sin6_addr)
142 #define AF_BITS(af) (((af)==AF_INET)?32:128)
143 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
144 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
145 #define KENTRY_RNF_ROOT(ke) \
146 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
148 #define NO_ADDRESSES (-1)
149 #define ENQUEUE_UNMARKED_ONLY (1)
150 #define INVERT_NEG_FLAG (1)
152 struct pfr_walktree {
163 struct pfr_addr *pfrw1_addr;
164 struct pfr_astats *pfrw1_astats;
165 struct pfr_kentryworkq *pfrw1_workq;
166 struct pfr_kentry *pfrw1_kentry;
167 struct pfi_dynaddr *pfrw1_dyn;
172 #define pfrw_addr pfrw_1.pfrw1_addr
173 #define pfrw_astats pfrw_1.pfrw1_astats
174 #define pfrw_workq pfrw_1.pfrw1_workq
175 #define pfrw_kentry pfrw_1.pfrw1_kentry
176 #define pfrw_dyn pfrw_1.pfrw1_dyn
177 #define pfrw_cnt pfrw_free
179 #define senderr(e) do { rv = (e); goto _bad; } while (0)
182 uma_zone_t pfr_ktable_pl;
183 uma_zone_t pfr_kentry_pl;
184 uma_zone_t pfr_kentry_pl2;
186 struct pool pfr_ktable_pl;
187 struct pool pfr_kentry_pl;
188 struct pool pfr_kentry_pl2;
190 struct sockaddr_in pfr_sin;
191 struct sockaddr_in6 pfr_sin6;
192 union sockaddr_union pfr_mask;
193 struct pf_addr pfr_ffaddr;
195 void pfr_copyout_addr(struct pfr_addr *,
196 struct pfr_kentry *ke);
197 int pfr_validate_addr(struct pfr_addr *);
198 void pfr_enqueue_addrs(struct pfr_ktable *,
199 struct pfr_kentryworkq *, int *, int);
200 void pfr_mark_addrs(struct pfr_ktable *);
201 struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
202 struct pfr_addr *, int);
203 struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int);
204 void pfr_destroy_kentries(struct pfr_kentryworkq *);
205 void pfr_destroy_kentry(struct pfr_kentry *);
206 void pfr_insert_kentries(struct pfr_ktable *,
207 struct pfr_kentryworkq *, long);
208 void pfr_remove_kentries(struct pfr_ktable *,
209 struct pfr_kentryworkq *);
210 void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
212 void pfr_reset_feedback(struct pfr_addr *, int, int);
213 void pfr_prepare_network(union sockaddr_union *, int, int);
214 int pfr_route_kentry(struct pfr_ktable *,
215 struct pfr_kentry *);
216 int pfr_unroute_kentry(struct pfr_ktable *,
217 struct pfr_kentry *);
218 int pfr_walktree(struct radix_node *, void *);
219 int pfr_validate_table(struct pfr_table *, int, int);
220 int pfr_fix_anchor(char *);
221 void pfr_commit_ktable(struct pfr_ktable *, long);
222 void pfr_insert_ktables(struct pfr_ktableworkq *);
223 void pfr_insert_ktable(struct pfr_ktable *);
224 void pfr_setflags_ktables(struct pfr_ktableworkq *);
225 void pfr_setflags_ktable(struct pfr_ktable *, int);
226 void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
228 void pfr_clstats_ktable(struct pfr_ktable *, long, int);
229 struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int);
230 void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
231 void pfr_destroy_ktable(struct pfr_ktable *, int);
232 int pfr_ktable_compare(struct pfr_ktable *,
233 struct pfr_ktable *);
234 struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
235 void pfr_clean_node_mask(struct pfr_ktable *,
236 struct pfr_kentryworkq *);
237 int pfr_table_count(struct pfr_table *, int);
238 int pfr_skip_table(struct pfr_table *,
239 struct pfr_ktable *, int);
240 struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
242 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
243 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
245 struct pfr_ktablehead pfr_ktables;
246 struct pfr_table pfr_nulltable;
253 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
254 "pfrktable", &pool_allocator_oldnointr);
255 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
256 "pfrkentry", &pool_allocator_oldnointr);
257 pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
261 pfr_sin.sin_len = sizeof(pfr_sin);
262 pfr_sin.sin_family = AF_INET;
263 pfr_sin6.sin6_len = sizeof(pfr_sin6);
264 pfr_sin6.sin6_family = AF_INET6;
266 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
270 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
272 struct pfr_ktable *kt;
273 struct pfr_kentryworkq workq;
276 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
277 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
279 kt = pfr_lookup_table(tbl);
280 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
282 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
284 pfr_enqueue_addrs(kt, &workq, ndel, 0);
286 if (!(flags & PFR_FLAG_DUMMY)) {
288 if (flags & PFR_FLAG_ATOMIC)
290 pfr_remove_kentries(kt, &workq);
291 if (flags & PFR_FLAG_ATOMIC)
294 printf("pfr_clr_addrs: corruption detected (%d).\n",
303 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
304 int *nadd, int flags)
306 struct pfr_ktable *kt, *tmpkt;
307 struct pfr_kentryworkq workq;
308 struct pfr_kentry *p, *q;
310 int i, rv, s = 0, xadd = 0;
311 long tzero = time_second;
313 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
314 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
316 kt = pfr_lookup_table(tbl);
317 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
319 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
321 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
325 for (i = 0; i < size; i++) {
326 if (COPYIN(addr+i, &ad, sizeof(ad)))
328 if (pfr_validate_addr(&ad))
330 p = pfr_lookup_addr(kt, &ad, 1);
331 q = pfr_lookup_addr(tmpkt, &ad, 1);
332 if (flags & PFR_FLAG_FEEDBACK) {
334 ad.pfra_fback = PFR_FB_DUPLICATE;
336 ad.pfra_fback = PFR_FB_ADDED;
337 else if (p->pfrke_not != ad.pfra_not)
338 ad.pfra_fback = PFR_FB_CONFLICT;
340 ad.pfra_fback = PFR_FB_NONE;
342 if (p == NULL && q == NULL) {
343 p = pfr_create_kentry(&ad, 0);
346 if (pfr_route_kentry(tmpkt, p)) {
347 pfr_destroy_kentry(p);
348 ad.pfra_fback = PFR_FB_NONE;
350 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
354 if (flags & PFR_FLAG_FEEDBACK) {
355 if (COPYOUT(&ad, addr+i, sizeof(ad)))
359 pfr_clean_node_mask(tmpkt, &workq);
360 if (!(flags & PFR_FLAG_DUMMY)) {
361 if (flags & PFR_FLAG_ATOMIC)
363 pfr_insert_kentries(kt, &workq, tzero);
364 if (flags & PFR_FLAG_ATOMIC)
367 pfr_destroy_kentries(&workq);
370 pfr_destroy_ktable(tmpkt, 0);
373 pfr_clean_node_mask(tmpkt, &workq);
374 pfr_destroy_kentries(&workq);
375 if (flags & PFR_FLAG_FEEDBACK)
376 pfr_reset_feedback(addr, size, flags);
377 pfr_destroy_ktable(tmpkt, 0);
382 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
383 int *ndel, int flags)
385 struct pfr_ktable *kt;
386 struct pfr_kentryworkq workq;
387 struct pfr_kentry *p;
389 int i, rv, s = 0, xdel = 0, log = 1;
391 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
392 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
394 kt = pfr_lookup_table(tbl);
395 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
397 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
400 * there are two algorithms to choose from here.
402 * n: number of addresses to delete
403 * N: number of addresses in the table
405 * one is O(N) and is better for large 'n'
406 * one is O(n*LOG(N)) and is better for small 'n'
408 * following code try to decide which one is best.
410 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
412 if (size > kt->pfrkt_cnt/log) {
413 /* full table scan */
416 /* iterate over addresses to delete */
417 for (i = 0; i < size; i++) {
418 if (COPYIN(addr+i, &ad, sizeof(ad)))
420 if (pfr_validate_addr(&ad))
422 p = pfr_lookup_addr(kt, &ad, 1);
428 for (i = 0; i < size; i++) {
429 if (COPYIN(addr+i, &ad, sizeof(ad)))
431 if (pfr_validate_addr(&ad))
433 p = pfr_lookup_addr(kt, &ad, 1);
434 if (flags & PFR_FLAG_FEEDBACK) {
436 ad.pfra_fback = PFR_FB_NONE;
437 else if (p->pfrke_not != ad.pfra_not)
438 ad.pfra_fback = PFR_FB_CONFLICT;
439 else if (p->pfrke_mark)
440 ad.pfra_fback = PFR_FB_DUPLICATE;
442 ad.pfra_fback = PFR_FB_DELETED;
444 if (p != NULL && p->pfrke_not == ad.pfra_not &&
447 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
450 if (flags & PFR_FLAG_FEEDBACK)
451 if (COPYOUT(&ad, addr+i, sizeof(ad)))
454 if (!(flags & PFR_FLAG_DUMMY)) {
455 if (flags & PFR_FLAG_ATOMIC)
457 pfr_remove_kentries(kt, &workq);
458 if (flags & PFR_FLAG_ATOMIC)
465 if (flags & PFR_FLAG_FEEDBACK)
466 pfr_reset_feedback(addr, size, flags);
471 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
472 int *size2, int *nadd, int *ndel, int *nchange, int flags,
473 u_int32_t ignore_pfrt_flags)
475 struct pfr_ktable *kt, *tmpkt;
476 struct pfr_kentryworkq addq, delq, changeq;
477 struct pfr_kentry *p, *q;
479 int i, rv, s = 0, xadd = 0, xdel = 0, xchange = 0;
480 long tzero = time_second;
482 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
483 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
486 kt = pfr_lookup_table(tbl);
487 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
489 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
491 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
497 SLIST_INIT(&changeq);
498 for (i = 0; i < size; i++) {
499 if (COPYIN(addr+i, &ad, sizeof(ad)))
501 if (pfr_validate_addr(&ad))
503 ad.pfra_fback = PFR_FB_NONE;
504 p = pfr_lookup_addr(kt, &ad, 1);
507 ad.pfra_fback = PFR_FB_DUPLICATE;
511 if (p->pfrke_not != ad.pfra_not) {
512 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
513 ad.pfra_fback = PFR_FB_CHANGED;
517 q = pfr_lookup_addr(tmpkt, &ad, 1);
519 ad.pfra_fback = PFR_FB_DUPLICATE;
522 p = pfr_create_kentry(&ad, 0);
525 if (pfr_route_kentry(tmpkt, p)) {
526 pfr_destroy_kentry(p);
527 ad.pfra_fback = PFR_FB_NONE;
529 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
530 ad.pfra_fback = PFR_FB_ADDED;
535 if (flags & PFR_FLAG_FEEDBACK)
536 if (COPYOUT(&ad, addr+i, sizeof(ad)))
539 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
540 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
541 if (*size2 < size+xdel) {
546 SLIST_FOREACH(p, &delq, pfrke_workq) {
547 pfr_copyout_addr(&ad, p);
548 ad.pfra_fback = PFR_FB_DELETED;
549 if (COPYOUT(&ad, addr+size+i, sizeof(ad)))
554 pfr_clean_node_mask(tmpkt, &addq);
555 if (!(flags & PFR_FLAG_DUMMY)) {
556 if (flags & PFR_FLAG_ATOMIC)
558 pfr_insert_kentries(kt, &addq, tzero);
559 pfr_remove_kentries(kt, &delq);
560 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
561 if (flags & PFR_FLAG_ATOMIC)
564 pfr_destroy_kentries(&addq);
571 if ((flags & PFR_FLAG_FEEDBACK) && size2)
573 pfr_destroy_ktable(tmpkt, 0);
576 pfr_clean_node_mask(tmpkt, &addq);
577 pfr_destroy_kentries(&addq);
578 if (flags & PFR_FLAG_FEEDBACK)
579 pfr_reset_feedback(addr, size, flags);
580 pfr_destroy_ktable(tmpkt, 0);
585 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
586 int *nmatch, int flags)
588 struct pfr_ktable *kt;
589 struct pfr_kentry *p;
593 ACCEPT_FLAGS(PFR_FLAG_REPLACE);
594 if (pfr_validate_table(tbl, 0, 0))
596 kt = pfr_lookup_table(tbl);
597 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
600 for (i = 0; i < size; i++) {
601 if (COPYIN(addr+i, &ad, sizeof(ad)))
603 if (pfr_validate_addr(&ad))
605 if (ADDR_NETWORK(&ad))
607 p = pfr_lookup_addr(kt, &ad, 0);
608 if (flags & PFR_FLAG_REPLACE)
609 pfr_copyout_addr(&ad, p);
610 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
611 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
612 if (p != NULL && !p->pfrke_not)
614 if (COPYOUT(&ad, addr+i, sizeof(ad)))
623 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
626 struct pfr_ktable *kt;
627 struct pfr_walktree w;
631 if (pfr_validate_table(tbl, 0, 0))
633 kt = pfr_lookup_table(tbl);
634 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
636 if (kt->pfrkt_cnt > *size) {
637 *size = kt->pfrkt_cnt;
641 bzero(&w, sizeof(w));
642 w.pfrw_op = PFRW_GET_ADDRS;
644 w.pfrw_free = kt->pfrkt_cnt;
645 w.pfrw_flags = flags;
647 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
649 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
653 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
656 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
662 printf("pfr_get_addrs: corruption detected (%d).\n",
666 *size = kt->pfrkt_cnt;
671 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
674 struct pfr_ktable *kt;
675 struct pfr_walktree w;
676 struct pfr_kentryworkq workq;
678 long tzero = time_second;
680 ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
681 if (pfr_validate_table(tbl, 0, 0))
683 kt = pfr_lookup_table(tbl);
684 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
686 if (kt->pfrkt_cnt > *size) {
687 *size = kt->pfrkt_cnt;
691 bzero(&w, sizeof(w));
692 w.pfrw_op = PFRW_GET_ASTATS;
693 w.pfrw_astats = addr;
694 w.pfrw_free = kt->pfrkt_cnt;
695 w.pfrw_flags = flags;
696 if (flags & PFR_FLAG_ATOMIC)
699 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
701 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
705 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
708 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
710 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
711 pfr_enqueue_addrs(kt, &workq, NULL, 0);
712 pfr_clstats_kentries(&workq, tzero, 0);
714 if (flags & PFR_FLAG_ATOMIC)
720 printf("pfr_get_astats: corruption detected (%d).\n",
724 *size = kt->pfrkt_cnt;
729 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
730 int *nzero, int flags)
732 struct pfr_ktable *kt;
733 struct pfr_kentryworkq workq;
734 struct pfr_kentry *p;
736 int i, rv, s = 0, xzero = 0;
738 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
739 if (pfr_validate_table(tbl, 0, 0))
741 kt = pfr_lookup_table(tbl);
742 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
745 for (i = 0; i < size; i++) {
746 if (COPYIN(addr+i, &ad, sizeof(ad)))
748 if (pfr_validate_addr(&ad))
750 p = pfr_lookup_addr(kt, &ad, 1);
751 if (flags & PFR_FLAG_FEEDBACK) {
752 ad.pfra_fback = (p != NULL) ?
753 PFR_FB_CLEARED : PFR_FB_NONE;
754 if (COPYOUT(&ad, addr+i, sizeof(ad)))
758 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
763 if (!(flags & PFR_FLAG_DUMMY)) {
764 if (flags & PFR_FLAG_ATOMIC)
766 pfr_clstats_kentries(&workq, 0, 0);
767 if (flags & PFR_FLAG_ATOMIC)
774 if (flags & PFR_FLAG_FEEDBACK)
775 pfr_reset_feedback(addr, size, flags);
780 pfr_validate_addr(struct pfr_addr *ad)
784 switch (ad->pfra_af) {
787 if (ad->pfra_net > 32)
793 if (ad->pfra_net > 128)
800 if (ad->pfra_net < 128 &&
801 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
803 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
804 if (((caddr_t)ad)[i])
806 if (ad->pfra_not && ad->pfra_not != 1)
814 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
815 int *naddr, int sweep)
817 struct pfr_walktree w;
820 bzero(&w, sizeof(w));
821 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
822 w.pfrw_workq = workq;
823 if (kt->pfrkt_ip4 != NULL)
825 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree,
828 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
830 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
831 if (kt->pfrkt_ip6 != NULL)
833 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
836 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
838 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
844 pfr_mark_addrs(struct pfr_ktable *kt)
846 struct pfr_walktree w;
848 bzero(&w, sizeof(w));
849 w.pfrw_op = PFRW_MARK;
851 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
853 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
855 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
857 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
859 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
861 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
866 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
868 union sockaddr_union sa, mask;
869 struct radix_node_head *head = NULL; /* make the compiler happy */
870 struct pfr_kentry *ke;
873 bzero(&sa, sizeof(sa));
874 if (ad->pfra_af == AF_INET) {
875 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
876 head = kt->pfrkt_ip4;
877 } else if ( ad->pfra_af == AF_INET6 ) {
878 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
879 head = kt->pfrkt_ip6;
881 if (ADDR_NETWORK(ad)) {
882 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
883 s = splsoftnet(); /* rn_lookup makes use of globals */
887 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
889 if (ke && KENTRY_RNF_ROOT(ke))
892 ke = (struct pfr_kentry *)rn_match(&sa, head);
893 if (ke && KENTRY_RNF_ROOT(ke))
895 if (exact && ke && KENTRY_NETWORK(ke))
902 pfr_create_kentry(struct pfr_addr *ad, int intr)
904 struct pfr_kentry *ke;
907 ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT);
909 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
912 bzero(ke, sizeof(*ke));
914 if (ad->pfra_af == AF_INET)
915 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
916 else if (ad->pfra_af == AF_INET6)
917 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
918 ke->pfrke_af = ad->pfra_af;
919 ke->pfrke_net = ad->pfra_net;
920 ke->pfrke_not = ad->pfra_not;
921 ke->pfrke_intrpool = intr;
926 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
928 struct pfr_kentry *p, *q;
930 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
931 q = SLIST_NEXT(p, pfrke_workq);
932 pfr_destroy_kentry(p);
937 pfr_destroy_kentry(struct pfr_kentry *ke)
939 if (ke->pfrke_intrpool)
940 pool_put(&pfr_kentry_pl2, ke);
942 pool_put(&pfr_kentry_pl, ke);
946 pfr_insert_kentries(struct pfr_ktable *kt,
947 struct pfr_kentryworkq *workq, long tzero)
949 struct pfr_kentry *p;
952 SLIST_FOREACH(p, workq, pfrke_workq) {
953 rv = pfr_route_kentry(kt, p);
955 printf("pfr_insert_kentries: cannot route entry "
959 p->pfrke_tzero = tzero;
966 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
968 struct pfr_kentry *p;
971 p = pfr_lookup_addr(kt, ad, 1);
974 p = pfr_create_kentry(ad, 1);
978 rv = pfr_route_kentry(kt, p);
982 p->pfrke_tzero = tzero;
989 pfr_remove_kentries(struct pfr_ktable *kt,
990 struct pfr_kentryworkq *workq)
992 struct pfr_kentry *p;
995 SLIST_FOREACH(p, workq, pfrke_workq) {
996 pfr_unroute_kentry(kt, p);
1000 pfr_destroy_kentries(workq);
1004 pfr_clean_node_mask(struct pfr_ktable *kt,
1005 struct pfr_kentryworkq *workq)
1007 struct pfr_kentry *p;
1009 SLIST_FOREACH(p, workq, pfrke_workq)
1010 pfr_unroute_kentry(kt, p);
1014 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
1016 struct pfr_kentry *p;
1019 SLIST_FOREACH(p, workq, pfrke_workq) {
1022 p->pfrke_not = !p->pfrke_not;
1023 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
1024 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
1026 p->pfrke_tzero = tzero;
1031 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
1036 for (i = 0; i < size; i++) {
1037 if (COPYIN(addr+i, &ad, sizeof(ad)))
1039 ad.pfra_fback = PFR_FB_NONE;
1040 if (COPYOUT(&ad, addr+i, sizeof(ad)))
1046 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
1050 bzero(sa, sizeof(*sa));
1051 if (af == AF_INET) {
1052 sa->sin.sin_len = sizeof(sa->sin);
1053 sa->sin.sin_family = AF_INET;
1054 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
1055 } else if (af == AF_INET6) {
1056 sa->sin6.sin6_len = sizeof(sa->sin6);
1057 sa->sin6.sin6_family = AF_INET6;
1058 for (i = 0; i < 4; i++) {
1060 sa->sin6.sin6_addr.s6_addr32[i] =
1061 net ? htonl(-1 << (32-net)) : 0;
1064 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
1071 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1073 union sockaddr_union mask;
1074 struct radix_node *rn;
1075 struct radix_node_head *head = NULL; /* make the compiler happy */
1078 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
1079 if (ke->pfrke_af == AF_INET)
1080 head = kt->pfrkt_ip4;
1081 else if (ke->pfrke_af == AF_INET6)
1082 head = kt->pfrkt_ip6;
1086 PF_ASSERT(MA_OWNED);
1088 if (KENTRY_NETWORK(ke)) {
1089 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1090 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1092 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1095 return (rn == NULL ? -1 : 0);
1099 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1101 union sockaddr_union mask;
1102 struct radix_node *rn;
1103 struct radix_node_head *head = NULL; /* make the compiler happy */
1106 if (ke->pfrke_af == AF_INET)
1107 head = kt->pfrkt_ip4;
1108 else if (ke->pfrke_af == AF_INET6)
1109 head = kt->pfrkt_ip6;
1113 PF_ASSERT(MA_OWNED);
1115 if (KENTRY_NETWORK(ke)) {
1116 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1118 rn = rn_delete(&ke->pfrke_sa, &mask, head);
1120 rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL);
1124 rn = rn_delete(&ke->pfrke_sa, NULL, head);
1126 rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL);
1131 printf("pfr_unroute_kentry: delete failed.\n");
1138 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1140 bzero(ad, sizeof(*ad));
1143 ad->pfra_af = ke->pfrke_af;
1144 ad->pfra_net = ke->pfrke_net;
1145 ad->pfra_not = ke->pfrke_not;
1146 if (ad->pfra_af == AF_INET)
1147 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1148 else if (ad->pfra_af == AF_INET6)
1149 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1153 pfr_walktree(struct radix_node *rn, void *arg)
1155 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1156 struct pfr_walktree *w = arg;
1157 int s, flags = w->pfrw_flags;
1159 switch (w->pfrw_op) {
1168 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1171 case PFRW_GET_ADDRS:
1172 if (w->pfrw_free-- > 0) {
1175 pfr_copyout_addr(&ad, ke);
1176 if (COPYOUT(&ad, w->pfrw_addr, sizeof(ad)))
1181 case PFRW_GET_ASTATS:
1182 if (w->pfrw_free-- > 0) {
1183 struct pfr_astats as;
1185 pfr_copyout_addr(&as.pfras_a, ke);
1188 bcopy(ke->pfrke_packets, as.pfras_packets,
1189 sizeof(as.pfras_packets));
1190 bcopy(ke->pfrke_bytes, as.pfras_bytes,
1191 sizeof(as.pfras_bytes));
1193 as.pfras_tzero = ke->pfrke_tzero;
1195 if (COPYOUT(&as, w->pfrw_astats, sizeof(as)))
1202 break; /* negative entries are ignored */
1203 if (!w->pfrw_cnt--) {
1204 w->pfrw_kentry = ke;
1205 return (1); /* finish search */
1208 case PFRW_DYNADDR_UPDATE:
1209 if (ke->pfrke_af == AF_INET) {
1210 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1212 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1213 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1214 &ke->pfrke_sa, AF_INET);
1215 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1216 &pfr_mask, AF_INET);
1217 } else if (ke->pfrke_af == AF_INET6){
1218 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1220 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1221 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1222 &ke->pfrke_sa, AF_INET6);
1223 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1224 &pfr_mask, AF_INET6);
1232 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1234 struct pfr_ktableworkq workq;
1235 struct pfr_ktable *p;
1236 int s = 0, xdel = 0;
1238 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1239 if (pfr_fix_anchor(filter->pfrt_anchor))
1241 if (pfr_table_count(filter, flags) < 0)
1245 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1246 if (pfr_skip_table(filter, p, flags))
1248 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1250 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1252 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1253 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1256 if (!(flags & PFR_FLAG_DUMMY)) {
1257 if (flags & PFR_FLAG_ATOMIC)
1259 pfr_setflags_ktables(&workq);
1260 if (flags & PFR_FLAG_ATOMIC)
1269 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1271 struct pfr_ktableworkq addq, changeq;
1272 struct pfr_ktable *p, *q, *r, key;
1273 int i, rv, s = 0, xadd = 0;
1274 long tzero = time_second;
1276 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1278 SLIST_INIT(&changeq);
1279 for (i = 0; i < size; i++) {
1280 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1282 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1283 flags & PFR_FLAG_USERIOCTL))
1285 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1286 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1288 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1291 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1292 if (!pfr_ktable_compare(p, q))
1295 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1297 if (!key.pfrkt_anchor[0])
1300 /* find or create root table */
1301 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1302 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1307 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1308 if (!pfr_ktable_compare(&key, q)) {
1313 key.pfrkt_flags = 0;
1314 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1317 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1319 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1320 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1321 if (!pfr_ktable_compare(&key, q))
1323 p->pfrkt_nflags = (p->pfrkt_flags &
1324 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1325 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1331 if (!(flags & PFR_FLAG_DUMMY)) {
1332 if (flags & PFR_FLAG_ATOMIC)
1334 pfr_insert_ktables(&addq);
1335 pfr_setflags_ktables(&changeq);
1336 if (flags & PFR_FLAG_ATOMIC)
1339 pfr_destroy_ktables(&addq, 0);
1344 pfr_destroy_ktables(&addq, 0);
1349 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1351 struct pfr_ktableworkq workq;
1352 struct pfr_ktable *p, *q, key;
1353 int i, s = 0, xdel = 0;
1355 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1357 for (i = 0; i < size; i++) {
1358 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1360 if (pfr_validate_table(&key.pfrkt_t, 0,
1361 flags & PFR_FLAG_USERIOCTL))
1363 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1364 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1365 SLIST_FOREACH(q, &workq, pfrkt_workq)
1366 if (!pfr_ktable_compare(p, q))
1368 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1369 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1376 if (!(flags & PFR_FLAG_DUMMY)) {
1377 if (flags & PFR_FLAG_ATOMIC)
1379 pfr_setflags_ktables(&workq);
1380 if (flags & PFR_FLAG_ATOMIC)
1389 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1392 struct pfr_ktable *p;
1395 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1396 if (pfr_fix_anchor(filter->pfrt_anchor))
1398 n = nn = pfr_table_count(filter, flags);
1405 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1406 if (pfr_skip_table(filter, p, flags))
1410 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1414 printf("pfr_get_tables: corruption detected (%d).\n", n);
1422 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1425 struct pfr_ktable *p;
1426 struct pfr_ktableworkq workq;
1428 long tzero = time_second;
1430 ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1431 /* XXX PFR_FLAG_CLSTATS disabled */
1432 if (pfr_fix_anchor(filter->pfrt_anchor))
1434 n = nn = pfr_table_count(filter, flags);
1442 if (flags & PFR_FLAG_ATOMIC)
1444 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1445 if (pfr_skip_table(filter, p, flags))
1449 if (!(flags & PFR_FLAG_ATOMIC))
1451 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1452 if (!(flags & PFR_FLAG_ATOMIC))
1456 if (!(flags & PFR_FLAG_ATOMIC))
1458 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1460 if (flags & PFR_FLAG_CLSTATS)
1461 pfr_clstats_ktables(&workq, tzero,
1462 flags & PFR_FLAG_ADDRSTOO);
1463 if (flags & PFR_FLAG_ATOMIC)
1466 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1474 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1476 struct pfr_ktableworkq workq;
1477 struct pfr_ktable *p, key;
1478 int i, s = 0, xzero = 0;
1479 long tzero = time_second;
1481 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1483 for (i = 0; i < size; i++) {
1484 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1486 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1488 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1490 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1494 if (!(flags & PFR_FLAG_DUMMY)) {
1495 if (flags & PFR_FLAG_ATOMIC)
1497 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1498 if (flags & PFR_FLAG_ATOMIC)
1507 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1508 int *nchange, int *ndel, int flags)
1510 struct pfr_ktableworkq workq;
1511 struct pfr_ktable *p, *q, key;
1512 int i, s = 0, xchange = 0, xdel = 0;
1514 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1515 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1516 (clrflag & ~PFR_TFLAG_USRMASK) ||
1517 (setflag & clrflag))
1520 for (i = 0; i < size; i++) {
1521 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1523 if (pfr_validate_table(&key.pfrkt_t, 0,
1524 flags & PFR_FLAG_USERIOCTL))
1526 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1527 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1528 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1530 if (p->pfrkt_nflags == p->pfrkt_flags)
1532 SLIST_FOREACH(q, &workq, pfrkt_workq)
1533 if (!pfr_ktable_compare(p, q))
1535 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1536 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1537 (clrflag & PFR_TFLAG_PERSIST) &&
1538 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1546 if (!(flags & PFR_FLAG_DUMMY)) {
1547 if (flags & PFR_FLAG_ATOMIC)
1549 pfr_setflags_ktables(&workq);
1550 if (flags & PFR_FLAG_ATOMIC)
1553 if (nchange != NULL)
1561 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1563 struct pfr_ktableworkq workq;
1564 struct pfr_ktable *p;
1565 struct pf_ruleset *rs;
1568 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1569 rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1573 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1574 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1575 pfr_skip_table(trs, p, 0))
1577 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1578 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1581 if (!(flags & PFR_FLAG_DUMMY)) {
1582 pfr_setflags_ktables(&workq);
1584 *ticket = ++rs->tticket;
1587 pf_remove_if_empty_ruleset(rs);
1594 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1595 int *nadd, int *naddr, u_int32_t ticket, int flags)
1597 struct pfr_ktableworkq tableq;
1598 struct pfr_kentryworkq addrq;
1599 struct pfr_ktable *kt, *rt, *shadow, key;
1600 struct pfr_kentry *p;
1602 struct pf_ruleset *rs;
1603 int i, rv, xadd = 0, xaddr = 0;
1605 ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1606 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1608 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1609 flags & PFR_FLAG_USERIOCTL))
1611 rs = pf_find_ruleset(tbl->pfrt_anchor);
1612 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1614 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1615 SLIST_INIT(&tableq);
1616 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1618 kt = pfr_create_ktable(tbl, 0, 1);
1621 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1623 if (!tbl->pfrt_anchor[0])
1626 /* find or create root table */
1627 bzero(&key, sizeof(key));
1628 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1629 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1631 kt->pfrkt_root = rt;
1634 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1636 pfr_destroy_ktables(&tableq, 0);
1639 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1640 kt->pfrkt_root = rt;
1641 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1644 shadow = pfr_create_ktable(tbl, 0, 0);
1645 if (shadow == NULL) {
1646 pfr_destroy_ktables(&tableq, 0);
1650 for (i = 0; i < size; i++) {
1651 if (COPYIN(addr+i, &ad, sizeof(ad)))
1653 if (pfr_validate_addr(&ad))
1655 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1657 p = pfr_create_kentry(&ad, 0);
1660 if (pfr_route_kentry(shadow, p)) {
1661 pfr_destroy_kentry(p);
1664 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1667 if (!(flags & PFR_FLAG_DUMMY)) {
1668 if (kt->pfrkt_shadow != NULL)
1669 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1670 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1671 pfr_insert_ktables(&tableq);
1672 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1673 xaddr : NO_ADDRESSES;
1674 kt->pfrkt_shadow = shadow;
1676 pfr_clean_node_mask(shadow, &addrq);
1677 pfr_destroy_ktable(shadow, 0);
1678 pfr_destroy_ktables(&tableq, 0);
1679 pfr_destroy_kentries(&addrq);
1687 pfr_destroy_ktable(shadow, 0);
1688 pfr_destroy_ktables(&tableq, 0);
1689 pfr_destroy_kentries(&addrq);
1694 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1696 struct pfr_ktableworkq workq;
1697 struct pfr_ktable *p;
1698 struct pf_ruleset *rs;
1701 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1702 rs = pf_find_ruleset(trs->pfrt_anchor);
1703 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1706 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1707 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1708 pfr_skip_table(trs, p, 0))
1710 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1711 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1714 if (!(flags & PFR_FLAG_DUMMY)) {
1715 pfr_setflags_ktables(&workq);
1717 pf_remove_if_empty_ruleset(rs);
1725 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1726 int *nchange, int flags)
1728 struct pfr_ktable *p, *q;
1729 struct pfr_ktableworkq workq;
1730 struct pf_ruleset *rs;
1731 int s = 0, xadd = 0, xchange = 0;
1732 long tzero = time_second;
1734 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1735 rs = pf_find_ruleset(trs->pfrt_anchor);
1736 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1740 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1741 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1742 pfr_skip_table(trs, p, 0))
1744 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1745 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1751 if (!(flags & PFR_FLAG_DUMMY)) {
1752 if (flags & PFR_FLAG_ATOMIC)
1754 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1755 q = SLIST_NEXT(p, pfrkt_workq);
1756 pfr_commit_ktable(p, tzero);
1758 if (flags & PFR_FLAG_ATOMIC)
1761 pf_remove_if_empty_ruleset(rs);
1765 if (nchange != NULL)
1772 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1774 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1777 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1778 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1779 pfr_clstats_ktable(kt, tzero, 1);
1780 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1781 /* kt might contain addresses */
1782 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1783 struct pfr_kentry *p, *q, *next;
1786 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1789 SLIST_INIT(&changeq);
1791 SLIST_INIT(&garbageq);
1792 pfr_clean_node_mask(shadow, &addrq);
1793 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1794 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1795 pfr_copyout_addr(&ad, p);
1796 q = pfr_lookup_addr(kt, &ad, 1);
1798 if (q->pfrke_not != p->pfrke_not)
1799 SLIST_INSERT_HEAD(&changeq, q,
1802 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1804 p->pfrke_tzero = tzero;
1805 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1808 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1809 pfr_insert_kentries(kt, &addq, tzero);
1810 pfr_remove_kentries(kt, &delq);
1811 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1812 pfr_destroy_kentries(&garbageq);
1814 /* kt cannot contain addresses */
1815 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1817 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1819 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1820 pfr_clstats_ktable(kt, tzero, 1);
1822 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1823 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1824 & ~PFR_TFLAG_INACTIVE;
1825 pfr_destroy_ktable(shadow, 0);
1826 kt->pfrkt_shadow = NULL;
1827 pfr_setflags_ktable(kt, nflags);
1831 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1835 if (!tbl->pfrt_name[0])
1837 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1839 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1841 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1842 if (tbl->pfrt_name[i])
1844 if (pfr_fix_anchor(tbl->pfrt_anchor))
1846 if (tbl->pfrt_flags & ~allowedflags)
1852 * Rewrite anchors referenced by tables to remove slashes
1853 * and check for validity.
1856 pfr_fix_anchor(char *anchor)
1858 size_t siz = MAXPATHLEN;
1861 if (anchor[0] == '/') {
1867 while (*++path == '/')
1869 bcopy(path, anchor, siz - off);
1870 memset(anchor + siz - off, 0, off);
1872 if (anchor[siz - 1])
1874 for (i = strlen(anchor); i < siz; i++)
1881 pfr_table_count(struct pfr_table *filter, int flags)
1883 struct pf_ruleset *rs;
1885 if (flags & PFR_FLAG_ALLRSETS)
1886 return (pfr_ktable_cnt);
1887 if (filter->pfrt_anchor[0]) {
1888 rs = pf_find_ruleset(filter->pfrt_anchor);
1889 return ((rs != NULL) ? rs->tables : -1);
1891 return (pf_main_ruleset.tables);
1895 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1897 if (flags & PFR_FLAG_ALLRSETS)
1899 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1905 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1907 struct pfr_ktable *p;
1909 SLIST_FOREACH(p, workq, pfrkt_workq)
1910 pfr_insert_ktable(p);
1914 pfr_insert_ktable(struct pfr_ktable *kt)
1916 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1918 if (kt->pfrkt_root != NULL)
1919 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1920 pfr_setflags_ktable(kt->pfrkt_root,
1921 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1925 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1927 struct pfr_ktable *p, *q;
1929 for (p = SLIST_FIRST(workq); p; p = q) {
1930 q = SLIST_NEXT(p, pfrkt_workq);
1931 pfr_setflags_ktable(p, p->pfrkt_nflags);
1936 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1938 struct pfr_kentryworkq addrq;
1940 if (!(newf & PFR_TFLAG_REFERENCED) &&
1941 !(newf & PFR_TFLAG_PERSIST))
1942 newf &= ~PFR_TFLAG_ACTIVE;
1943 if (!(newf & PFR_TFLAG_ACTIVE))
1944 newf &= ~PFR_TFLAG_USRMASK;
1945 if (!(newf & PFR_TFLAG_SETMASK)) {
1946 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1947 if (kt->pfrkt_root != NULL)
1948 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1949 pfr_setflags_ktable(kt->pfrkt_root,
1950 kt->pfrkt_root->pfrkt_flags &
1951 ~PFR_TFLAG_REFDANCHOR);
1952 pfr_destroy_ktable(kt, 1);
1956 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1957 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1958 pfr_remove_kentries(kt, &addrq);
1960 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1961 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1962 kt->pfrkt_shadow = NULL;
1964 kt->pfrkt_flags = newf;
1968 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1970 struct pfr_ktable *p;
1972 SLIST_FOREACH(p, workq, pfrkt_workq)
1973 pfr_clstats_ktable(p, tzero, recurse);
1977 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1979 struct pfr_kentryworkq addrq;
1983 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1984 pfr_clstats_kentries(&addrq, tzero, 0);
1987 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1988 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1989 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1991 kt->pfrkt_tzero = tzero;
1995 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1997 struct pfr_ktable *kt;
1998 struct pf_ruleset *rs;
2000 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
2003 bzero(kt, sizeof(*kt));
2006 if (attachruleset) {
2007 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
2009 pfr_destroy_ktable(kt, 0);
2016 if (!rn_inithead((void **)&kt->pfrkt_ip4,
2017 offsetof(struct sockaddr_in, sin_addr) * 8) ||
2018 !rn_inithead((void **)&kt->pfrkt_ip6,
2019 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
2020 pfr_destroy_ktable(kt, 0);
2023 kt->pfrkt_tzero = tzero;
2029 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
2031 struct pfr_ktable *p, *q;
2033 for (p = SLIST_FIRST(workq); p; p = q) {
2034 q = SLIST_NEXT(p, pfrkt_workq);
2035 pfr_destroy_ktable(p, flushaddr);
2040 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2042 struct pfr_kentryworkq addrq;
2045 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2046 pfr_clean_node_mask(kt, &addrq);
2047 pfr_destroy_kentries(&addrq);
2049 #if defined(__FreeBSD__) && (__FreeBSD_version >= 500100)
2050 if (kt->pfrkt_ip4 != NULL) {
2051 RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip4);
2052 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
2054 if (kt->pfrkt_ip6 != NULL) {
2055 RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip6);
2056 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
2059 if (kt->pfrkt_ip4 != NULL)
2060 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
2061 if (kt->pfrkt_ip6 != NULL)
2062 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
2064 if (kt->pfrkt_shadow != NULL)
2065 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2066 if (kt->pfrkt_rs != NULL) {
2067 kt->pfrkt_rs->tables--;
2068 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
2070 pool_put(&pfr_ktable_pl, kt);
2074 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2078 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2080 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2084 pfr_lookup_table(struct pfr_table *tbl)
2086 /* struct pfr_ktable start like a struct pfr_table */
2087 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
2088 (struct pfr_ktable *)tbl));
2092 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2094 struct pfr_kentry *ke = NULL;
2097 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2098 kt = kt->pfrkt_root;
2099 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2105 pfr_sin.sin_addr.s_addr = a->addr32[0];
2106 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2107 if (ke && KENTRY_RNF_ROOT(ke))
2113 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2114 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2115 if (ke && KENTRY_RNF_ROOT(ke))
2120 match = (ke && !ke->pfrke_not);
2124 kt->pfrkt_nomatch++;
2129 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2130 u_int64_t len, int dir_out, int op_pass, int notrule)
2132 struct pfr_kentry *ke = NULL;
2134 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2135 kt = kt->pfrkt_root;
2136 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2142 pfr_sin.sin_addr.s_addr = a->addr32[0];
2143 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2144 if (ke && KENTRY_RNF_ROOT(ke))
2150 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2151 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2152 if (ke && KENTRY_RNF_ROOT(ke))
2159 if ((ke == NULL || ke->pfrke_not) != notrule) {
2160 if (op_pass != PFR_OP_PASS)
2161 printf("pfr_update_stats: assertion failed.\n");
2162 op_pass = PFR_OP_XPASS;
2164 kt->pfrkt_packets[dir_out][op_pass]++;
2165 kt->pfrkt_bytes[dir_out][op_pass] += len;
2166 if (ke != NULL && op_pass != PFR_OP_XPASS) {
2167 ke->pfrke_packets[dir_out][op_pass]++;
2168 ke->pfrke_bytes[dir_out][op_pass] += len;
2173 pfr_attach_table(struct pf_ruleset *rs, char *name)
2175 struct pfr_ktable *kt, *rt;
2176 struct pfr_table tbl;
2177 struct pf_anchor *ac = rs->anchor;
2179 bzero(&tbl, sizeof(tbl));
2180 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2182 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2183 kt = pfr_lookup_table(&tbl);
2185 kt = pfr_create_ktable(&tbl, time_second, 1);
2189 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2190 rt = pfr_lookup_table(&tbl);
2192 rt = pfr_create_ktable(&tbl, 0, 1);
2194 pfr_destroy_ktable(kt, 0);
2197 pfr_insert_ktable(rt);
2199 kt->pfrkt_root = rt;
2201 pfr_insert_ktable(kt);
2203 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2204 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2209 pfr_detach_table(struct pfr_ktable *kt)
2211 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2212 printf("pfr_detach_table: refcount = %d.\n",
2213 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2214 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2215 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2220 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2221 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2223 struct pfr_kentry *ke, *ke2 = NULL;
2224 struct pf_addr *addr = NULL;
2225 union sockaddr_union mask;
2226 int idx = -1, use_counter = 0;
2229 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2230 else if (af == AF_INET6)
2231 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2232 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2233 kt = kt->pfrkt_root;
2234 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2239 if (counter != NULL && idx >= 0)
2245 ke = pfr_kentry_byidx(kt, idx, af);
2248 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2249 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2250 *rmask = SUNION2PF(&pfr_mask, af);
2253 /* is supplied address within block? */
2254 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2255 /* no, go to next block in table */
2260 PF_ACPY(addr, counter, af);
2262 /* use first address of block */
2263 PF_ACPY(addr, *raddr, af);
2266 if (!KENTRY_NETWORK(ke)) {
2267 /* this is a single IP address - no possible nested block */
2268 PF_ACPY(counter, addr, af);
2273 /* we don't want to use a nested block */
2275 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2277 else if (af == AF_INET6)
2278 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2280 /* no need to check KENTRY_RNF_ROOT() here */
2282 /* lookup return the same block - perfect */
2283 PF_ACPY(counter, addr, af);
2288 /* we need to increase the counter past the nested block */
2289 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2290 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2292 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2293 /* ok, we reached the end of our main block */
2294 /* go to next block in table */
2303 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2305 struct pfr_walktree w;
2307 bzero(&w, sizeof(w));
2308 w.pfrw_op = PFRW_POOL_GET;
2315 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2317 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2319 return (w.pfrw_kentry);
2324 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2326 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2328 return (w.pfrw_kentry);
2336 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2338 struct pfr_walktree w;
2341 bzero(&w, sizeof(w));
2342 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2346 dyn->pfid_acnt4 = 0;
2347 dyn->pfid_acnt6 = 0;
2348 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2350 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2352 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2354 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2356 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2358 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);