]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netpfil/pf/pf_table.c
MFV r356143:
[FreeBSD/FreeBSD.git] / sys / netpfil / pf / pf_table.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002 Cedric Berger
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    - Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *    - Redistributions in binary form must reproduce the above
14  *      copyright notice, this list of conditions and the following
15  *      disclaimer in the documentation and/or other materials provided
16  *      with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  *      $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/mutex.h>
46 #include <sys/refcount.h>
47 #include <sys/socket.h>
48 #include <vm/uma.h>
49
50 #include <net/if.h>
51 #include <net/vnet.h>
52 #include <net/pfvar.h>
53
54 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
55
56 #define ACCEPT_FLAGS(flags, oklist)             \
57         do {                                    \
58                 if ((flags & ~(oklist)) &       \
59                     PFR_FLAG_ALLMASK)           \
60                         return (EINVAL);        \
61         } while (0)
62
63 #define FILLIN_SIN(sin, addr)                   \
64         do {                                    \
65                 (sin).sin_len = sizeof(sin);    \
66                 (sin).sin_family = AF_INET;     \
67                 (sin).sin_addr = (addr);        \
68         } while (0)
69
70 #define FILLIN_SIN6(sin6, addr)                 \
71         do {                                    \
72                 (sin6).sin6_len = sizeof(sin6); \
73                 (sin6).sin6_family = AF_INET6;  \
74                 (sin6).sin6_addr = (addr);      \
75         } while (0)
76
77 #define SWAP(type, a1, a2)                      \
78         do {                                    \
79                 type tmp = a1;                  \
80                 a1 = a2;                        \
81                 a2 = tmp;                       \
82         } while (0)
83
84 #define SUNION2PF(su, af) (((af)==AF_INET) ?    \
85     (struct pf_addr *)&(su)->sin.sin_addr :     \
86     (struct pf_addr *)&(su)->sin6.sin6_addr)
87
88 #define AF_BITS(af)             (((af)==AF_INET)?32:128)
89 #define ADDR_NETWORK(ad)        ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
90 #define KENTRY_NETWORK(ke)      ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
91 #define KENTRY_RNF_ROOT(ke) \
92                 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
93
94 #define NO_ADDRESSES            (-1)
95 #define ENQUEUE_UNMARKED_ONLY   (1)
96 #define INVERT_NEG_FLAG         (1)
97
98 struct pfr_walktree {
99         enum pfrw_op {
100                 PFRW_MARK,
101                 PFRW_SWEEP,
102                 PFRW_ENQUEUE,
103                 PFRW_GET_ADDRS,
104                 PFRW_GET_ASTATS,
105                 PFRW_POOL_GET,
106                 PFRW_DYNADDR_UPDATE
107         }        pfrw_op;
108         union {
109                 struct pfr_addr         *pfrw1_addr;
110                 struct pfr_astats       *pfrw1_astats;
111                 struct pfr_kentryworkq  *pfrw1_workq;
112                 struct pfr_kentry       *pfrw1_kentry;
113                 struct pfi_dynaddr      *pfrw1_dyn;
114         }        pfrw_1;
115         int      pfrw_free;
116         int      pfrw_flags;
117 };
118 #define pfrw_addr       pfrw_1.pfrw1_addr
119 #define pfrw_astats     pfrw_1.pfrw1_astats
120 #define pfrw_workq      pfrw_1.pfrw1_workq
121 #define pfrw_kentry     pfrw_1.pfrw1_kentry
122 #define pfrw_dyn        pfrw_1.pfrw1_dyn
123 #define pfrw_cnt        pfrw_free
124
125 #define senderr(e)      do { rv = (e); goto _bad; } while (0)
126
127 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
128 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z);
129 #define V_pfr_kentry_z          VNET(pfr_kentry_z)
130
131 static struct pf_addr    pfr_ffaddr = {
132         .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
133 };
134
135 static void              pfr_copyout_astats(struct pfr_astats *,
136                             const struct pfr_kentry *,
137                             const struct pfr_walktree *);
138 static void              pfr_copyout_addr(struct pfr_addr *,
139                             const struct pfr_kentry *ke);
140 static int               pfr_validate_addr(struct pfr_addr *);
141 static void              pfr_enqueue_addrs(struct pfr_ktable *,
142                             struct pfr_kentryworkq *, int *, int);
143 static void              pfr_mark_addrs(struct pfr_ktable *);
144 static struct pfr_kentry
145                         *pfr_lookup_addr(struct pfr_ktable *,
146                             struct pfr_addr *, int);
147 static bool              pfr_create_kentry_counter(struct pfr_kcounters *,
148                             int, int);
149 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
150 static void              pfr_destroy_kentries(struct pfr_kentryworkq *);
151 static void              pfr_destroy_kentry_counter(struct pfr_kcounters *,
152                             int, int);
153 static void              pfr_destroy_kentry(struct pfr_kentry *);
154 static void              pfr_insert_kentries(struct pfr_ktable *,
155                             struct pfr_kentryworkq *, long);
156 static void              pfr_remove_kentries(struct pfr_ktable *,
157                             struct pfr_kentryworkq *);
158 static void              pfr_clstats_kentries(struct pfr_kentryworkq *, long,
159                             int);
160 static void              pfr_reset_feedback(struct pfr_addr *, int);
161 static void              pfr_prepare_network(union sockaddr_union *, int, int);
162 static int               pfr_route_kentry(struct pfr_ktable *,
163                             struct pfr_kentry *);
164 static int               pfr_unroute_kentry(struct pfr_ktable *,
165                             struct pfr_kentry *);
166 static int               pfr_walktree(struct radix_node *, void *);
167 static int               pfr_validate_table(struct pfr_table *, int, int);
168 static int               pfr_fix_anchor(char *);
169 static void              pfr_commit_ktable(struct pfr_ktable *, long);
170 static void              pfr_insert_ktables(struct pfr_ktableworkq *);
171 static void              pfr_insert_ktable(struct pfr_ktable *);
172 static void              pfr_setflags_ktables(struct pfr_ktableworkq *);
173 static void              pfr_setflags_ktable(struct pfr_ktable *, int);
174 static void              pfr_clstats_ktables(struct pfr_ktableworkq *, long,
175                             int);
176 static void              pfr_clstats_ktable(struct pfr_ktable *, long, int);
177 static struct pfr_ktable
178                         *pfr_create_ktable(struct pfr_table *, long, int);
179 static void              pfr_destroy_ktables(struct pfr_ktableworkq *, int);
180 static void              pfr_destroy_ktable(struct pfr_ktable *, int);
181 static int               pfr_ktable_compare(struct pfr_ktable *,
182                             struct pfr_ktable *);
183 static struct pfr_ktable
184                         *pfr_lookup_table(struct pfr_table *);
185 static void              pfr_clean_node_mask(struct pfr_ktable *,
186                             struct pfr_kentryworkq *);
187 static int               pfr_skip_table(struct pfr_table *,
188                             struct pfr_ktable *, int);
189 static struct pfr_kentry
190                         *pfr_kentry_byidx(struct pfr_ktable *, int, int);
191
192 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
193 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
194
195 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables);
196 #define V_pfr_ktables   VNET(pfr_ktables)
197
198 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable);
199 #define V_pfr_nulltable VNET(pfr_nulltable)
200
201 VNET_DEFINE_STATIC(int, pfr_ktable_cnt);
202 #define V_pfr_ktable_cnt        VNET(pfr_ktable_cnt)
203
204 void
205 pfr_initialize(void)
206 {
207
208         V_pfr_kentry_z = uma_zcreate("pf table entries",
209             sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
210             0);
211         V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
212         V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
213 }
214
215 void
216 pfr_cleanup(void)
217 {
218
219         uma_zdestroy(V_pfr_kentry_z);
220 }
221
222 int
223 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
224 {
225         struct pfr_ktable       *kt;
226         struct pfr_kentryworkq   workq;
227
228         PF_RULES_WASSERT();
229
230         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
231         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
232                 return (EINVAL);
233         kt = pfr_lookup_table(tbl);
234         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
235                 return (ESRCH);
236         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
237                 return (EPERM);
238         pfr_enqueue_addrs(kt, &workq, ndel, 0);
239
240         if (!(flags & PFR_FLAG_DUMMY)) {
241                 pfr_remove_kentries(kt, &workq);
242                 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
243         }
244         return (0);
245 }
246
247 int
248 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
249     int *nadd, int flags)
250 {
251         struct pfr_ktable       *kt, *tmpkt;
252         struct pfr_kentryworkq   workq;
253         struct pfr_kentry       *p, *q;
254         struct pfr_addr         *ad;
255         int                      i, rv, xadd = 0;
256         long                     tzero = time_second;
257
258         PF_RULES_WASSERT();
259
260         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
261         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
262                 return (EINVAL);
263         kt = pfr_lookup_table(tbl);
264         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
265                 return (ESRCH);
266         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
267                 return (EPERM);
268         tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
269         if (tmpkt == NULL)
270                 return (ENOMEM);
271         SLIST_INIT(&workq);
272         for (i = 0, ad = addr; i < size; i++, ad++) {
273                 if (pfr_validate_addr(ad))
274                         senderr(EINVAL);
275                 p = pfr_lookup_addr(kt, ad, 1);
276                 q = pfr_lookup_addr(tmpkt, ad, 1);
277                 if (flags & PFR_FLAG_FEEDBACK) {
278                         if (q != NULL)
279                                 ad->pfra_fback = PFR_FB_DUPLICATE;
280                         else if (p == NULL)
281                                 ad->pfra_fback = PFR_FB_ADDED;
282                         else if (p->pfrke_not != ad->pfra_not)
283                                 ad->pfra_fback = PFR_FB_CONFLICT;
284                         else
285                                 ad->pfra_fback = PFR_FB_NONE;
286                 }
287                 if (p == NULL && q == NULL) {
288                         p = pfr_create_kentry(ad);
289                         if (p == NULL)
290                                 senderr(ENOMEM);
291                         if (pfr_route_kentry(tmpkt, p)) {
292                                 pfr_destroy_kentry(p);
293                                 ad->pfra_fback = PFR_FB_NONE;
294                         } else {
295                                 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
296                                 xadd++;
297                         }
298                 }
299         }
300         pfr_clean_node_mask(tmpkt, &workq);
301         if (!(flags & PFR_FLAG_DUMMY))
302                 pfr_insert_kentries(kt, &workq, tzero);
303         else
304                 pfr_destroy_kentries(&workq);
305         if (nadd != NULL)
306                 *nadd = xadd;
307         pfr_destroy_ktable(tmpkt, 0);
308         return (0);
309 _bad:
310         pfr_clean_node_mask(tmpkt, &workq);
311         pfr_destroy_kentries(&workq);
312         if (flags & PFR_FLAG_FEEDBACK)
313                 pfr_reset_feedback(addr, size);
314         pfr_destroy_ktable(tmpkt, 0);
315         return (rv);
316 }
317
318 int
319 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
320     int *ndel, int flags)
321 {
322         struct pfr_ktable       *kt;
323         struct pfr_kentryworkq   workq;
324         struct pfr_kentry       *p;
325         struct pfr_addr         *ad;
326         int                      i, rv, xdel = 0, log = 1;
327
328         PF_RULES_WASSERT();
329
330         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
331         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
332                 return (EINVAL);
333         kt = pfr_lookup_table(tbl);
334         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
335                 return (ESRCH);
336         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
337                 return (EPERM);
338         /*
339          * there are two algorithms to choose from here.
340          * with:
341          *   n: number of addresses to delete
342          *   N: number of addresses in the table
343          *
344          * one is O(N) and is better for large 'n'
345          * one is O(n*LOG(N)) and is better for small 'n'
346          *
347          * following code try to decide which one is best.
348          */
349         for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
350                 log++;
351         if (size > kt->pfrkt_cnt/log) {
352                 /* full table scan */
353                 pfr_mark_addrs(kt);
354         } else {
355                 /* iterate over addresses to delete */
356                 for (i = 0, ad = addr; i < size; i++, ad++) {
357                         if (pfr_validate_addr(ad))
358                                 return (EINVAL);
359                         p = pfr_lookup_addr(kt, ad, 1);
360                         if (p != NULL)
361                                 p->pfrke_mark = 0;
362                 }
363         }
364         SLIST_INIT(&workq);
365         for (i = 0, ad = addr; i < size; i++, ad++) {
366                 if (pfr_validate_addr(ad))
367                         senderr(EINVAL);
368                 p = pfr_lookup_addr(kt, ad, 1);
369                 if (flags & PFR_FLAG_FEEDBACK) {
370                         if (p == NULL)
371                                 ad->pfra_fback = PFR_FB_NONE;
372                         else if (p->pfrke_not != ad->pfra_not)
373                                 ad->pfra_fback = PFR_FB_CONFLICT;
374                         else if (p->pfrke_mark)
375                                 ad->pfra_fback = PFR_FB_DUPLICATE;
376                         else
377                                 ad->pfra_fback = PFR_FB_DELETED;
378                 }
379                 if (p != NULL && p->pfrke_not == ad->pfra_not &&
380                     !p->pfrke_mark) {
381                         p->pfrke_mark = 1;
382                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
383                         xdel++;
384                 }
385         }
386         if (!(flags & PFR_FLAG_DUMMY))
387                 pfr_remove_kentries(kt, &workq);
388         if (ndel != NULL)
389                 *ndel = xdel;
390         return (0);
391 _bad:
392         if (flags & PFR_FLAG_FEEDBACK)
393                 pfr_reset_feedback(addr, size);
394         return (rv);
395 }
396
397 int
398 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
399     int *size2, int *nadd, int *ndel, int *nchange, int flags,
400     u_int32_t ignore_pfrt_flags)
401 {
402         struct pfr_ktable       *kt, *tmpkt;
403         struct pfr_kentryworkq   addq, delq, changeq;
404         struct pfr_kentry       *p, *q;
405         struct pfr_addr          ad;
406         int                      i, rv, xadd = 0, xdel = 0, xchange = 0;
407         long                     tzero = time_second;
408
409         PF_RULES_WASSERT();
410
411         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
412         if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
413             PFR_FLAG_USERIOCTL))
414                 return (EINVAL);
415         kt = pfr_lookup_table(tbl);
416         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
417                 return (ESRCH);
418         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
419                 return (EPERM);
420         tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
421         if (tmpkt == NULL)
422                 return (ENOMEM);
423         pfr_mark_addrs(kt);
424         SLIST_INIT(&addq);
425         SLIST_INIT(&delq);
426         SLIST_INIT(&changeq);
427         for (i = 0; i < size; i++) {
428                 /*
429                  * XXXGL: undertand pf_if usage of this function
430                  * and make ad a moving pointer
431                  */
432                 bcopy(addr + i, &ad, sizeof(ad));
433                 if (pfr_validate_addr(&ad))
434                         senderr(EINVAL);
435                 ad.pfra_fback = PFR_FB_NONE;
436                 p = pfr_lookup_addr(kt, &ad, 1);
437                 if (p != NULL) {
438                         if (p->pfrke_mark) {
439                                 ad.pfra_fback = PFR_FB_DUPLICATE;
440                                 goto _skip;
441                         }
442                         p->pfrke_mark = 1;
443                         if (p->pfrke_not != ad.pfra_not) {
444                                 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
445                                 ad.pfra_fback = PFR_FB_CHANGED;
446                                 xchange++;
447                         }
448                 } else {
449                         q = pfr_lookup_addr(tmpkt, &ad, 1);
450                         if (q != NULL) {
451                                 ad.pfra_fback = PFR_FB_DUPLICATE;
452                                 goto _skip;
453                         }
454                         p = pfr_create_kentry(&ad);
455                         if (p == NULL)
456                                 senderr(ENOMEM);
457                         if (pfr_route_kentry(tmpkt, p)) {
458                                 pfr_destroy_kentry(p);
459                                 ad.pfra_fback = PFR_FB_NONE;
460                         } else {
461                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
462                                 ad.pfra_fback = PFR_FB_ADDED;
463                                 xadd++;
464                         }
465                 }
466 _skip:
467                 if (flags & PFR_FLAG_FEEDBACK)
468                         bcopy(&ad, addr + i, sizeof(ad));
469         }
470         pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
471         if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
472                 if (*size2 < size+xdel) {
473                         *size2 = size+xdel;
474                         senderr(0);
475                 }
476                 i = 0;
477                 SLIST_FOREACH(p, &delq, pfrke_workq) {
478                         pfr_copyout_addr(&ad, p);
479                         ad.pfra_fback = PFR_FB_DELETED;
480                         bcopy(&ad, addr + size + i, sizeof(ad));
481                         i++;
482                 }
483         }
484         pfr_clean_node_mask(tmpkt, &addq);
485         if (!(flags & PFR_FLAG_DUMMY)) {
486                 pfr_insert_kentries(kt, &addq, tzero);
487                 pfr_remove_kentries(kt, &delq);
488                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
489         } else
490                 pfr_destroy_kentries(&addq);
491         if (nadd != NULL)
492                 *nadd = xadd;
493         if (ndel != NULL)
494                 *ndel = xdel;
495         if (nchange != NULL)
496                 *nchange = xchange;
497         if ((flags & PFR_FLAG_FEEDBACK) && size2)
498                 *size2 = size+xdel;
499         pfr_destroy_ktable(tmpkt, 0);
500         return (0);
501 _bad:
502         pfr_clean_node_mask(tmpkt, &addq);
503         pfr_destroy_kentries(&addq);
504         if (flags & PFR_FLAG_FEEDBACK)
505                 pfr_reset_feedback(addr, size);
506         pfr_destroy_ktable(tmpkt, 0);
507         return (rv);
508 }
509
510 int
511 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
512         int *nmatch, int flags)
513 {
514         struct pfr_ktable       *kt;
515         struct pfr_kentry       *p;
516         struct pfr_addr         *ad;
517         int                      i, xmatch = 0;
518
519         PF_RULES_RASSERT();
520
521         ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
522         if (pfr_validate_table(tbl, 0, 0))
523                 return (EINVAL);
524         kt = pfr_lookup_table(tbl);
525         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
526                 return (ESRCH);
527
528         for (i = 0, ad = addr; i < size; i++, ad++) {
529                 if (pfr_validate_addr(ad))
530                         return (EINVAL);
531                 if (ADDR_NETWORK(ad))
532                         return (EINVAL);
533                 p = pfr_lookup_addr(kt, ad, 0);
534                 if (flags & PFR_FLAG_REPLACE)
535                         pfr_copyout_addr(ad, p);
536                 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
537                     (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
538                 if (p != NULL && !p->pfrke_not)
539                         xmatch++;
540         }
541         if (nmatch != NULL)
542                 *nmatch = xmatch;
543         return (0);
544 }
545
546 int
547 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
548         int flags)
549 {
550         struct pfr_ktable       *kt;
551         struct pfr_walktree      w;
552         int                      rv;
553
554         PF_RULES_RASSERT();
555
556         ACCEPT_FLAGS(flags, 0);
557         if (pfr_validate_table(tbl, 0, 0))
558                 return (EINVAL);
559         kt = pfr_lookup_table(tbl);
560         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
561                 return (ESRCH);
562         if (kt->pfrkt_cnt > *size) {
563                 *size = kt->pfrkt_cnt;
564                 return (0);
565         }
566
567         bzero(&w, sizeof(w));
568         w.pfrw_op = PFRW_GET_ADDRS;
569         w.pfrw_addr = addr;
570         w.pfrw_free = kt->pfrkt_cnt;
571         rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
572         if (!rv)
573                 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
574                     pfr_walktree, &w);
575         if (rv)
576                 return (rv);
577
578         KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
579             w.pfrw_free));
580
581         *size = kt->pfrkt_cnt;
582         return (0);
583 }
584
585 int
586 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
587         int flags)
588 {
589         struct pfr_ktable       *kt;
590         struct pfr_walktree      w;
591         struct pfr_kentryworkq   workq;
592         int                      rv;
593         long                     tzero = time_second;
594
595         PF_RULES_RASSERT();
596
597         /* XXX PFR_FLAG_CLSTATS disabled */
598         ACCEPT_FLAGS(flags, 0);
599         if (pfr_validate_table(tbl, 0, 0))
600                 return (EINVAL);
601         kt = pfr_lookup_table(tbl);
602         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
603                 return (ESRCH);
604         if (kt->pfrkt_cnt > *size) {
605                 *size = kt->pfrkt_cnt;
606                 return (0);
607         }
608
609         bzero(&w, sizeof(w));
610         w.pfrw_op = PFRW_GET_ASTATS;
611         w.pfrw_astats = addr;
612         w.pfrw_free = kt->pfrkt_cnt;
613         /*
614          * Flags below are for backward compatibility. It was possible to have
615          * a table without per-entry counters. Now they are always allocated,
616          * we just discard data when reading it if table is not configured to
617          * have counters.
618          */
619         w.pfrw_flags = kt->pfrkt_flags;
620         rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
621         if (!rv)
622                 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
623                     pfr_walktree, &w);
624         if (!rv && (flags & PFR_FLAG_CLSTATS)) {
625                 pfr_enqueue_addrs(kt, &workq, NULL, 0);
626                 pfr_clstats_kentries(&workq, tzero, 0);
627         }
628         if (rv)
629                 return (rv);
630
631         if (w.pfrw_free) {
632                 printf("pfr_get_astats: corruption detected (%d).\n",
633                     w.pfrw_free);
634                 return (ENOTTY);
635         }
636         *size = kt->pfrkt_cnt;
637         return (0);
638 }
639
640 int
641 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
642     int *nzero, int flags)
643 {
644         struct pfr_ktable       *kt;
645         struct pfr_kentryworkq   workq;
646         struct pfr_kentry       *p;
647         struct pfr_addr         *ad;
648         int                      i, rv, xzero = 0;
649
650         PF_RULES_WASSERT();
651
652         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
653         if (pfr_validate_table(tbl, 0, 0))
654                 return (EINVAL);
655         kt = pfr_lookup_table(tbl);
656         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
657                 return (ESRCH);
658         SLIST_INIT(&workq);
659         for (i = 0, ad = addr; i < size; i++, ad++) {
660                 if (pfr_validate_addr(ad))
661                         senderr(EINVAL);
662                 p = pfr_lookup_addr(kt, ad, 1);
663                 if (flags & PFR_FLAG_FEEDBACK) {
664                         ad->pfra_fback = (p != NULL) ?
665                             PFR_FB_CLEARED : PFR_FB_NONE;
666                 }
667                 if (p != NULL) {
668                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
669                         xzero++;
670                 }
671         }
672
673         if (!(flags & PFR_FLAG_DUMMY))
674                 pfr_clstats_kentries(&workq, 0, 0);
675         if (nzero != NULL)
676                 *nzero = xzero;
677         return (0);
678 _bad:
679         if (flags & PFR_FLAG_FEEDBACK)
680                 pfr_reset_feedback(addr, size);
681         return (rv);
682 }
683
684 static int
685 pfr_validate_addr(struct pfr_addr *ad)
686 {
687         int i;
688
689         switch (ad->pfra_af) {
690 #ifdef INET
691         case AF_INET:
692                 if (ad->pfra_net > 32)
693                         return (-1);
694                 break;
695 #endif /* INET */
696 #ifdef INET6
697         case AF_INET6:
698                 if (ad->pfra_net > 128)
699                         return (-1);
700                 break;
701 #endif /* INET6 */
702         default:
703                 return (-1);
704         }
705         if (ad->pfra_net < 128 &&
706                 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
707                         return (-1);
708         for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
709                 if (((caddr_t)ad)[i])
710                         return (-1);
711         if (ad->pfra_not && ad->pfra_not != 1)
712                 return (-1);
713         if (ad->pfra_fback)
714                 return (-1);
715         return (0);
716 }
717
718 static void
719 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
720         int *naddr, int sweep)
721 {
722         struct pfr_walktree     w;
723
724         SLIST_INIT(workq);
725         bzero(&w, sizeof(w));
726         w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
727         w.pfrw_workq = workq;
728         if (kt->pfrkt_ip4 != NULL)
729                 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh,
730                     pfr_walktree, &w))
731                         printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
732         if (kt->pfrkt_ip6 != NULL)
733                 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
734                     pfr_walktree, &w))
735                         printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
736         if (naddr != NULL)
737                 *naddr = w.pfrw_cnt;
738 }
739
740 static void
741 pfr_mark_addrs(struct pfr_ktable *kt)
742 {
743         struct pfr_walktree     w;
744
745         bzero(&w, sizeof(w));
746         w.pfrw_op = PFRW_MARK;
747         if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w))
748                 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
749         if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w))
750                 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
751 }
752
753
754 static struct pfr_kentry *
755 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
756 {
757         union sockaddr_union     sa, mask;
758         struct radix_head       *head = NULL;
759         struct pfr_kentry       *ke;
760
761         PF_RULES_ASSERT();
762
763         bzero(&sa, sizeof(sa));
764         if (ad->pfra_af == AF_INET) {
765                 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
766                 head = &kt->pfrkt_ip4->rh;
767         } else if ( ad->pfra_af == AF_INET6 ) {
768                 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
769                 head = &kt->pfrkt_ip6->rh;
770         }
771         if (ADDR_NETWORK(ad)) {
772                 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
773                 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
774                 if (ke && KENTRY_RNF_ROOT(ke))
775                         ke = NULL;
776         } else {
777                 ke = (struct pfr_kentry *)rn_match(&sa, head);
778                 if (ke && KENTRY_RNF_ROOT(ke))
779                         ke = NULL;
780                 if (exact && ke && KENTRY_NETWORK(ke))
781                         ke = NULL;
782         }
783         return (ke);
784 }
785
786 static bool
787 pfr_create_kentry_counter(struct pfr_kcounters *kc, int pfr_dir, int pfr_op)
788 {
789         kc->pfrkc_packets[pfr_dir][pfr_op] = counter_u64_alloc(M_NOWAIT);
790         if (! kc->pfrkc_packets[pfr_dir][pfr_op])
791                 return (false);
792
793         kc->pfrkc_bytes[pfr_dir][pfr_op] = counter_u64_alloc(M_NOWAIT);
794         if (! kc->pfrkc_bytes[pfr_dir][pfr_op]) {
795                 /* Previous allocation will be freed through
796                  * pfr_destroy_kentry() */
797                 return (false);
798         }
799
800         kc->pfrkc_tzero = 0;
801
802         return (true);
803 }
804
805 static struct pfr_kentry *
806 pfr_create_kentry(struct pfr_addr *ad)
807 {
808         struct pfr_kentry       *ke;
809         int pfr_dir, pfr_op;
810
811         ke =  uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
812         if (ke == NULL)
813                 return (NULL);
814
815         if (ad->pfra_af == AF_INET)
816                 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
817         else if (ad->pfra_af == AF_INET6)
818                 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
819         ke->pfrke_af = ad->pfra_af;
820         ke->pfrke_net = ad->pfra_net;
821         ke->pfrke_not = ad->pfra_not;
822         for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++)
823                 for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) {
824                         if (! pfr_create_kentry_counter(&ke->pfrke_counters,
825                             pfr_dir, pfr_op)) {
826                                 pfr_destroy_kentry(ke);
827                                 return (NULL);
828                         }
829                 }
830         return (ke);
831 }
832
833 static void
834 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
835 {
836         struct pfr_kentry       *p, *q;
837
838         for (p = SLIST_FIRST(workq); p != NULL; p = q) {
839                 q = SLIST_NEXT(p, pfrke_workq);
840                 pfr_destroy_kentry(p);
841         }
842 }
843
844 static void
845 pfr_destroy_kentry_counter(struct pfr_kcounters *kc, int pfr_dir, int pfr_op)
846 {
847         counter_u64_free(kc->pfrkc_packets[pfr_dir][pfr_op]);
848         counter_u64_free(kc->pfrkc_bytes[pfr_dir][pfr_op]);
849 }
850
851 static void
852 pfr_destroy_kentry(struct pfr_kentry *ke)
853 {
854         int pfr_dir, pfr_op;
855
856         for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++)
857                 for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++)
858                         pfr_destroy_kentry_counter(&ke->pfrke_counters,
859                             pfr_dir, pfr_op);
860
861         uma_zfree(V_pfr_kentry_z, ke);
862 }
863
864 static void
865 pfr_insert_kentries(struct pfr_ktable *kt,
866     struct pfr_kentryworkq *workq, long tzero)
867 {
868         struct pfr_kentry       *p;
869         int                      rv, n = 0;
870
871         SLIST_FOREACH(p, workq, pfrke_workq) {
872                 rv = pfr_route_kentry(kt, p);
873                 if (rv) {
874                         printf("pfr_insert_kentries: cannot route entry "
875                             "(code=%d).\n", rv);
876                         break;
877                 }
878                 p->pfrke_counters.pfrkc_tzero = tzero;
879                 n++;
880         }
881         kt->pfrkt_cnt += n;
882 }
883
884 int
885 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
886 {
887         struct pfr_kentry       *p;
888         int                      rv;
889
890         p = pfr_lookup_addr(kt, ad, 1);
891         if (p != NULL)
892                 return (0);
893         p = pfr_create_kentry(ad);
894         if (p == NULL)
895                 return (ENOMEM);
896
897         rv = pfr_route_kentry(kt, p);
898         if (rv)
899                 return (rv);
900
901         p->pfrke_counters.pfrkc_tzero = tzero;
902         kt->pfrkt_cnt++;
903
904         return (0);
905 }
906
907 static void
908 pfr_remove_kentries(struct pfr_ktable *kt,
909     struct pfr_kentryworkq *workq)
910 {
911         struct pfr_kentry       *p;
912         int                      n = 0;
913
914         SLIST_FOREACH(p, workq, pfrke_workq) {
915                 pfr_unroute_kentry(kt, p);
916                 n++;
917         }
918         kt->pfrkt_cnt -= n;
919         pfr_destroy_kentries(workq);
920 }
921
922 static void
923 pfr_clean_node_mask(struct pfr_ktable *kt,
924     struct pfr_kentryworkq *workq)
925 {
926         struct pfr_kentry       *p;
927
928         SLIST_FOREACH(p, workq, pfrke_workq)
929                 pfr_unroute_kentry(kt, p);
930 }
931
932 static void
933 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
934 {
935         struct pfr_kentry       *p;
936         int                      pfr_dir, pfr_op;
937
938         SLIST_FOREACH(p, workq, pfrke_workq) {
939                 if (negchange)
940                         p->pfrke_not = !p->pfrke_not;
941                 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
942                         for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) {
943                                 counter_u64_zero(p->pfrke_counters.
944                                         pfrkc_packets[pfr_dir][pfr_op]);
945                                 counter_u64_zero(p->pfrke_counters.
946                                         pfrkc_bytes[pfr_dir][pfr_op]);
947                         }
948                 }
949                 p->pfrke_counters.pfrkc_tzero = tzero;
950         }
951 }
952
953 static void
954 pfr_reset_feedback(struct pfr_addr *addr, int size)
955 {
956         struct pfr_addr *ad;
957         int             i;
958
959         for (i = 0, ad = addr; i < size; i++, ad++)
960                 ad->pfra_fback = PFR_FB_NONE;
961 }
962
963 static void
964 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
965 {
966         int     i;
967
968         bzero(sa, sizeof(*sa));
969         if (af == AF_INET) {
970                 sa->sin.sin_len = sizeof(sa->sin);
971                 sa->sin.sin_family = AF_INET;
972                 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
973         } else if (af == AF_INET6) {
974                 sa->sin6.sin6_len = sizeof(sa->sin6);
975                 sa->sin6.sin6_family = AF_INET6;
976                 for (i = 0; i < 4; i++) {
977                         if (net <= 32) {
978                                 sa->sin6.sin6_addr.s6_addr32[i] =
979                                     net ? htonl(-1 << (32-net)) : 0;
980                                 break;
981                         }
982                         sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
983                         net -= 32;
984                 }
985         }
986 }
987
988 static int
989 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
990 {
991         union sockaddr_union     mask;
992         struct radix_node       *rn;
993         struct radix_head       *head = NULL;
994
995         PF_RULES_WASSERT();
996
997         bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
998         if (ke->pfrke_af == AF_INET)
999                 head = &kt->pfrkt_ip4->rh;
1000         else if (ke->pfrke_af == AF_INET6)
1001                 head = &kt->pfrkt_ip6->rh;
1002
1003         if (KENTRY_NETWORK(ke)) {
1004                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1005                 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1006         } else
1007                 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1008
1009         return (rn == NULL ? -1 : 0);
1010 }
1011
1012 static int
1013 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1014 {
1015         union sockaddr_union     mask;
1016         struct radix_node       *rn;
1017         struct radix_head       *head = NULL;
1018
1019         if (ke->pfrke_af == AF_INET)
1020                 head = &kt->pfrkt_ip4->rh;
1021         else if (ke->pfrke_af == AF_INET6)
1022                 head = &kt->pfrkt_ip6->rh;
1023
1024         if (KENTRY_NETWORK(ke)) {
1025                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1026                 rn = rn_delete(&ke->pfrke_sa, &mask, head);
1027         } else
1028                 rn = rn_delete(&ke->pfrke_sa, NULL, head);
1029
1030         if (rn == NULL) {
1031                 printf("pfr_unroute_kentry: delete failed.\n");
1032                 return (-1);
1033         }
1034         return (0);
1035 }
1036
1037 static void
1038 pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke)
1039 {
1040         bzero(ad, sizeof(*ad));
1041         if (ke == NULL)
1042                 return;
1043         ad->pfra_af = ke->pfrke_af;
1044         ad->pfra_net = ke->pfrke_net;
1045         ad->pfra_not = ke->pfrke_not;
1046         if (ad->pfra_af == AF_INET)
1047                 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1048         else if (ad->pfra_af == AF_INET6)
1049                 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1050 }
1051
1052 static void
1053 pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke,
1054     const struct pfr_walktree *w)
1055 {
1056         int dir, op;
1057         const struct pfr_kcounters *kc = &ke->pfrke_counters;
1058
1059         pfr_copyout_addr(&as->pfras_a, ke);
1060         as->pfras_tzero = kc->pfrkc_tzero;
1061
1062         if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS)) {
1063                 bzero(as->pfras_packets, sizeof(as->pfras_packets));
1064                 bzero(as->pfras_bytes, sizeof(as->pfras_bytes));
1065                 as->pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1066                 return;
1067         }
1068
1069         for (dir = 0; dir < PFR_DIR_MAX; dir ++) {
1070                 for (op = 0; op < PFR_OP_ADDR_MAX; op ++) {
1071                         as->pfras_packets[dir][op] =
1072                             counter_u64_fetch(kc->pfrkc_packets[dir][op]);
1073                         as->pfras_bytes[dir][op] =
1074                             counter_u64_fetch(kc->pfrkc_bytes[dir][op]);
1075                 }
1076         }
1077 }
1078
1079 static int
1080 pfr_walktree(struct radix_node *rn, void *arg)
1081 {
1082         struct pfr_kentry       *ke = (struct pfr_kentry *)rn;
1083         struct pfr_walktree     *w = arg;
1084
1085         switch (w->pfrw_op) {
1086         case PFRW_MARK:
1087                 ke->pfrke_mark = 0;
1088                 break;
1089         case PFRW_SWEEP:
1090                 if (ke->pfrke_mark)
1091                         break;
1092                 /* FALLTHROUGH */
1093         case PFRW_ENQUEUE:
1094                 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1095                 w->pfrw_cnt++;
1096                 break;
1097         case PFRW_GET_ADDRS:
1098                 if (w->pfrw_free-- > 0) {
1099                         pfr_copyout_addr(w->pfrw_addr, ke);
1100                         w->pfrw_addr++;
1101                 }
1102                 break;
1103         case PFRW_GET_ASTATS:
1104                 if (w->pfrw_free-- > 0) {
1105                         struct pfr_astats as;
1106
1107                         pfr_copyout_astats(&as, ke, w);
1108
1109                         bcopy(&as, w->pfrw_astats, sizeof(as));
1110                         w->pfrw_astats++;
1111                 }
1112                 break;
1113         case PFRW_POOL_GET:
1114                 if (ke->pfrke_not)
1115                         break; /* negative entries are ignored */
1116                 if (!w->pfrw_cnt--) {
1117                         w->pfrw_kentry = ke;
1118                         return (1); /* finish search */
1119                 }
1120                 break;
1121         case PFRW_DYNADDR_UPDATE:
1122             {
1123                 union sockaddr_union    pfr_mask;
1124
1125                 if (ke->pfrke_af == AF_INET) {
1126                         if (w->pfrw_dyn->pfid_acnt4++ > 0)
1127                                 break;
1128                         pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1129                         w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa,
1130                             AF_INET);
1131                         w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask,
1132                             AF_INET);
1133                 } else if (ke->pfrke_af == AF_INET6){
1134                         if (w->pfrw_dyn->pfid_acnt6++ > 0)
1135                                 break;
1136                         pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1137                         w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa,
1138                             AF_INET6);
1139                         w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask,
1140                             AF_INET6);
1141                 }
1142                 break;
1143             }
1144         }
1145         return (0);
1146 }
1147
1148 int
1149 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1150 {
1151         struct pfr_ktableworkq   workq;
1152         struct pfr_ktable       *p;
1153         int                      xdel = 0;
1154
1155         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1156         if (pfr_fix_anchor(filter->pfrt_anchor))
1157                 return (EINVAL);
1158         if (pfr_table_count(filter, flags) < 0)
1159                 return (ENOENT);
1160
1161         SLIST_INIT(&workq);
1162         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1163                 if (pfr_skip_table(filter, p, flags))
1164                         continue;
1165                 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1166                         continue;
1167                 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1168                         continue;
1169                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1170                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1171                 xdel++;
1172         }
1173         if (!(flags & PFR_FLAG_DUMMY))
1174                 pfr_setflags_ktables(&workq);
1175         if (ndel != NULL)
1176                 *ndel = xdel;
1177         return (0);
1178 }
1179
1180 int
1181 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1182 {
1183         struct pfr_ktableworkq   addq, changeq;
1184         struct pfr_ktable       *p, *q, *r, key;
1185         int                      i, rv, xadd = 0;
1186         long                     tzero = time_second;
1187
1188         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1189         SLIST_INIT(&addq);
1190         SLIST_INIT(&changeq);
1191         for (i = 0; i < size; i++) {
1192                 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1193                 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1194                     flags & PFR_FLAG_USERIOCTL))
1195                         senderr(EINVAL);
1196                 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1197                 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1198                 if (p == NULL) {
1199                         p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1200                         if (p == NULL)
1201                                 senderr(ENOMEM);
1202                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1203                                 if (!pfr_ktable_compare(p, q)) {
1204                                         pfr_destroy_ktable(p, 0);
1205                                         goto _skip;
1206                                 }
1207                         }
1208                         SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1209                         xadd++;
1210                         if (!key.pfrkt_anchor[0])
1211                                 goto _skip;
1212
1213                         /* find or create root table */
1214                         bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1215                         r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1216                         if (r != NULL) {
1217                                 p->pfrkt_root = r;
1218                                 goto _skip;
1219                         }
1220                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1221                                 if (!pfr_ktable_compare(&key, q)) {
1222                                         p->pfrkt_root = q;
1223                                         goto _skip;
1224                                 }
1225                         }
1226                         key.pfrkt_flags = 0;
1227                         r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1228                         if (r == NULL)
1229                                 senderr(ENOMEM);
1230                         SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1231                         p->pfrkt_root = r;
1232                 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1233                         SLIST_FOREACH(q, &changeq, pfrkt_workq)
1234                                 if (!pfr_ktable_compare(&key, q))
1235                                         goto _skip;
1236                         p->pfrkt_nflags = (p->pfrkt_flags &
1237                             ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1238                         SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1239                         xadd++;
1240                 }
1241 _skip:
1242         ;
1243         }
1244         if (!(flags & PFR_FLAG_DUMMY)) {
1245                 pfr_insert_ktables(&addq);
1246                 pfr_setflags_ktables(&changeq);
1247         } else
1248                  pfr_destroy_ktables(&addq, 0);
1249         if (nadd != NULL)
1250                 *nadd = xadd;
1251         return (0);
1252 _bad:
1253         pfr_destroy_ktables(&addq, 0);
1254         return (rv);
1255 }
1256
1257 int
1258 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1259 {
1260         struct pfr_ktableworkq   workq;
1261         struct pfr_ktable       *p, *q, key;
1262         int                      i, xdel = 0;
1263
1264         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1265         SLIST_INIT(&workq);
1266         for (i = 0; i < size; i++) {
1267                 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1268                 if (pfr_validate_table(&key.pfrkt_t, 0,
1269                     flags & PFR_FLAG_USERIOCTL))
1270                         return (EINVAL);
1271                 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1272                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1273                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1274                                 if (!pfr_ktable_compare(p, q))
1275                                         goto _skip;
1276                         p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1277                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1278                         xdel++;
1279                 }
1280 _skip:
1281         ;
1282         }
1283
1284         if (!(flags & PFR_FLAG_DUMMY))
1285                 pfr_setflags_ktables(&workq);
1286         if (ndel != NULL)
1287                 *ndel = xdel;
1288         return (0);
1289 }
1290
1291 int
1292 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1293         int flags)
1294 {
1295         struct pfr_ktable       *p;
1296         int                      n, nn;
1297
1298         PF_RULES_RASSERT();
1299
1300         ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1301         if (pfr_fix_anchor(filter->pfrt_anchor))
1302                 return (EINVAL);
1303         n = nn = pfr_table_count(filter, flags);
1304         if (n < 0)
1305                 return (ENOENT);
1306         if (n > *size) {
1307                 *size = n;
1308                 return (0);
1309         }
1310         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1311                 if (pfr_skip_table(filter, p, flags))
1312                         continue;
1313                 if (n-- <= 0)
1314                         continue;
1315                 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1316         }
1317
1318         KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1319
1320         *size = nn;
1321         return (0);
1322 }
1323
1324 int
1325 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1326         int flags)
1327 {
1328         struct pfr_ktable       *p;
1329         struct pfr_ktableworkq   workq;
1330         int                      n, nn;
1331         long                     tzero = time_second;
1332         int                      pfr_dir, pfr_op;
1333
1334         /* XXX PFR_FLAG_CLSTATS disabled */
1335         ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1336         if (pfr_fix_anchor(filter->pfrt_anchor))
1337                 return (EINVAL);
1338         n = nn = pfr_table_count(filter, flags);
1339         if (n < 0)
1340                 return (ENOENT);
1341         if (n > *size) {
1342                 *size = n;
1343                 return (0);
1344         }
1345         SLIST_INIT(&workq);
1346         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1347                 if (pfr_skip_table(filter, p, flags))
1348                         continue;
1349                 if (n-- <= 0)
1350                         continue;
1351                 bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t,
1352                     sizeof(struct pfr_table));
1353                 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1354                         for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1355                                 tbl->pfrts_packets[pfr_dir][pfr_op] =
1356                                     counter_u64_fetch(
1357                                         p->pfrkt_packets[pfr_dir][pfr_op]);
1358                                 tbl->pfrts_bytes[pfr_dir][pfr_op] =
1359                                     counter_u64_fetch(
1360                                         p->pfrkt_bytes[pfr_dir][pfr_op]);
1361                         }
1362                 }
1363                 tbl->pfrts_match = counter_u64_fetch(p->pfrkt_match);
1364                 tbl->pfrts_nomatch = counter_u64_fetch(p->pfrkt_nomatch);
1365                 tbl->pfrts_tzero = p->pfrkt_tzero;
1366                 tbl->pfrts_cnt = p->pfrkt_cnt;
1367                 for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++)
1368                         tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op];
1369                 tbl++;
1370                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1371         }
1372         if (flags & PFR_FLAG_CLSTATS)
1373                 pfr_clstats_ktables(&workq, tzero,
1374                     flags & PFR_FLAG_ADDRSTOO);
1375
1376         KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1377
1378         *size = nn;
1379         return (0);
1380 }
1381
1382 int
1383 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1384 {
1385         struct pfr_ktableworkq   workq;
1386         struct pfr_ktable       *p, key;
1387         int                      i, xzero = 0;
1388         long                     tzero = time_second;
1389
1390         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1391         SLIST_INIT(&workq);
1392         for (i = 0; i < size; i++) {
1393                 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1394                 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1395                         return (EINVAL);
1396                 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1397                 if (p != NULL) {
1398                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1399                         xzero++;
1400                 }
1401         }
1402         if (!(flags & PFR_FLAG_DUMMY))
1403                 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1404         if (nzero != NULL)
1405                 *nzero = xzero;
1406         return (0);
1407 }
1408
1409 int
1410 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1411         int *nchange, int *ndel, int flags)
1412 {
1413         struct pfr_ktableworkq   workq;
1414         struct pfr_ktable       *p, *q, key;
1415         int                      i, xchange = 0, xdel = 0;
1416
1417         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1418         if ((setflag & ~PFR_TFLAG_USRMASK) ||
1419             (clrflag & ~PFR_TFLAG_USRMASK) ||
1420             (setflag & clrflag))
1421                 return (EINVAL);
1422         SLIST_INIT(&workq);
1423         for (i = 0; i < size; i++) {
1424                 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1425                 if (pfr_validate_table(&key.pfrkt_t, 0,
1426                     flags & PFR_FLAG_USERIOCTL))
1427                         return (EINVAL);
1428                 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1429                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1430                         p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1431                             ~clrflag;
1432                         if (p->pfrkt_nflags == p->pfrkt_flags)
1433                                 goto _skip;
1434                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1435                                 if (!pfr_ktable_compare(p, q))
1436                                         goto _skip;
1437                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1438                         if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1439                             (clrflag & PFR_TFLAG_PERSIST) &&
1440                             !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1441                                 xdel++;
1442                         else
1443                                 xchange++;
1444                 }
1445 _skip:
1446         ;
1447         }
1448         if (!(flags & PFR_FLAG_DUMMY))
1449                 pfr_setflags_ktables(&workq);
1450         if (nchange != NULL)
1451                 *nchange = xchange;
1452         if (ndel != NULL)
1453                 *ndel = xdel;
1454         return (0);
1455 }
1456
1457 int
1458 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1459 {
1460         struct pfr_ktableworkq   workq;
1461         struct pfr_ktable       *p;
1462         struct pf_ruleset       *rs;
1463         int                      xdel = 0;
1464
1465         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1466         rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1467         if (rs == NULL)
1468                 return (ENOMEM);
1469         SLIST_INIT(&workq);
1470         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1471                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1472                     pfr_skip_table(trs, p, 0))
1473                         continue;
1474                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1475                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1476                 xdel++;
1477         }
1478         if (!(flags & PFR_FLAG_DUMMY)) {
1479                 pfr_setflags_ktables(&workq);
1480                 if (ticket != NULL)
1481                         *ticket = ++rs->tticket;
1482                 rs->topen = 1;
1483         } else
1484                 pf_remove_if_empty_ruleset(rs);
1485         if (ndel != NULL)
1486                 *ndel = xdel;
1487         return (0);
1488 }
1489
1490 int
1491 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1492     int *nadd, int *naddr, u_int32_t ticket, int flags)
1493 {
1494         struct pfr_ktableworkq   tableq;
1495         struct pfr_kentryworkq   addrq;
1496         struct pfr_ktable       *kt, *rt, *shadow, key;
1497         struct pfr_kentry       *p;
1498         struct pfr_addr         *ad;
1499         struct pf_ruleset       *rs;
1500         int                      i, rv, xadd = 0, xaddr = 0;
1501
1502         PF_RULES_WASSERT();
1503
1504         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1505         if (size && !(flags & PFR_FLAG_ADDRSTOO))
1506                 return (EINVAL);
1507         if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1508             flags & PFR_FLAG_USERIOCTL))
1509                 return (EINVAL);
1510         rs = pf_find_ruleset(tbl->pfrt_anchor);
1511         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1512                 return (EBUSY);
1513         tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1514         SLIST_INIT(&tableq);
1515         kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
1516         if (kt == NULL) {
1517                 kt = pfr_create_ktable(tbl, 0, 1);
1518                 if (kt == NULL)
1519                         return (ENOMEM);
1520                 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1521                 xadd++;
1522                 if (!tbl->pfrt_anchor[0])
1523                         goto _skip;
1524
1525                 /* find or create root table */
1526                 bzero(&key, sizeof(key));
1527                 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1528                 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1529                 if (rt != NULL) {
1530                         kt->pfrkt_root = rt;
1531                         goto _skip;
1532                 }
1533                 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1534                 if (rt == NULL) {
1535                         pfr_destroy_ktables(&tableq, 0);
1536                         return (ENOMEM);
1537                 }
1538                 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1539                 kt->pfrkt_root = rt;
1540         } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1541                 xadd++;
1542 _skip:
1543         shadow = pfr_create_ktable(tbl, 0, 0);
1544         if (shadow == NULL) {
1545                 pfr_destroy_ktables(&tableq, 0);
1546                 return (ENOMEM);
1547         }
1548         SLIST_INIT(&addrq);
1549         for (i = 0, ad = addr; i < size; i++, ad++) {
1550                 if (pfr_validate_addr(ad))
1551                         senderr(EINVAL);
1552                 if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1553                         continue;
1554                 p = pfr_create_kentry(ad);
1555                 if (p == NULL)
1556                         senderr(ENOMEM);
1557                 if (pfr_route_kentry(shadow, p)) {
1558                         pfr_destroy_kentry(p);
1559                         continue;
1560                 }
1561                 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1562                 xaddr++;
1563         }
1564         if (!(flags & PFR_FLAG_DUMMY)) {
1565                 if (kt->pfrkt_shadow != NULL)
1566                         pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1567                 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1568                 pfr_insert_ktables(&tableq);
1569                 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1570                     xaddr : NO_ADDRESSES;
1571                 kt->pfrkt_shadow = shadow;
1572         } else {
1573                 pfr_clean_node_mask(shadow, &addrq);
1574                 pfr_destroy_ktable(shadow, 0);
1575                 pfr_destroy_ktables(&tableq, 0);
1576                 pfr_destroy_kentries(&addrq);
1577         }
1578         if (nadd != NULL)
1579                 *nadd = xadd;
1580         if (naddr != NULL)
1581                 *naddr = xaddr;
1582         return (0);
1583 _bad:
1584         pfr_destroy_ktable(shadow, 0);
1585         pfr_destroy_ktables(&tableq, 0);
1586         pfr_destroy_kentries(&addrq);
1587         return (rv);
1588 }
1589
1590 int
1591 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1592 {
1593         struct pfr_ktableworkq   workq;
1594         struct pfr_ktable       *p;
1595         struct pf_ruleset       *rs;
1596         int                      xdel = 0;
1597
1598         PF_RULES_WASSERT();
1599
1600         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1601         rs = pf_find_ruleset(trs->pfrt_anchor);
1602         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1603                 return (0);
1604         SLIST_INIT(&workq);
1605         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1606                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1607                     pfr_skip_table(trs, p, 0))
1608                         continue;
1609                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1610                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1611                 xdel++;
1612         }
1613         if (!(flags & PFR_FLAG_DUMMY)) {
1614                 pfr_setflags_ktables(&workq);
1615                 rs->topen = 0;
1616                 pf_remove_if_empty_ruleset(rs);
1617         }
1618         if (ndel != NULL)
1619                 *ndel = xdel;
1620         return (0);
1621 }
1622
1623 int
1624 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1625     int *nchange, int flags)
1626 {
1627         struct pfr_ktable       *p, *q;
1628         struct pfr_ktableworkq   workq;
1629         struct pf_ruleset       *rs;
1630         int                      xadd = 0, xchange = 0;
1631         long                     tzero = time_second;
1632
1633         PF_RULES_WASSERT();
1634
1635         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1636         rs = pf_find_ruleset(trs->pfrt_anchor);
1637         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1638                 return (EBUSY);
1639
1640         SLIST_INIT(&workq);
1641         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1642                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1643                     pfr_skip_table(trs, p, 0))
1644                         continue;
1645                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1646                 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1647                         xchange++;
1648                 else
1649                         xadd++;
1650         }
1651
1652         if (!(flags & PFR_FLAG_DUMMY)) {
1653                 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1654                         q = SLIST_NEXT(p, pfrkt_workq);
1655                         pfr_commit_ktable(p, tzero);
1656                 }
1657                 rs->topen = 0;
1658                 pf_remove_if_empty_ruleset(rs);
1659         }
1660         if (nadd != NULL)
1661                 *nadd = xadd;
1662         if (nchange != NULL)
1663                 *nchange = xchange;
1664
1665         return (0);
1666 }
1667
1668 static void
1669 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1670 {
1671         struct pfr_ktable       *shadow = kt->pfrkt_shadow;
1672         int                      nflags;
1673
1674         PF_RULES_WASSERT();
1675
1676         if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1677                 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1678                         pfr_clstats_ktable(kt, tzero, 1);
1679         } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1680                 /* kt might contain addresses */
1681                 struct pfr_kentryworkq   addrq, addq, changeq, delq, garbageq;
1682                 struct pfr_kentry       *p, *q, *next;
1683                 struct pfr_addr          ad;
1684
1685                 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1686                 pfr_mark_addrs(kt);
1687                 SLIST_INIT(&addq);
1688                 SLIST_INIT(&changeq);
1689                 SLIST_INIT(&delq);
1690                 SLIST_INIT(&garbageq);
1691                 pfr_clean_node_mask(shadow, &addrq);
1692                 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1693                         next = SLIST_NEXT(p, pfrke_workq);      /* XXX */
1694                         pfr_copyout_addr(&ad, p);
1695                         q = pfr_lookup_addr(kt, &ad, 1);
1696                         if (q != NULL) {
1697                                 if (q->pfrke_not != p->pfrke_not)
1698                                         SLIST_INSERT_HEAD(&changeq, q,
1699                                             pfrke_workq);
1700                                 q->pfrke_mark = 1;
1701                                 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1702                         } else {
1703                                 p->pfrke_counters.pfrkc_tzero = tzero;
1704                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1705                         }
1706                 }
1707                 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1708                 pfr_insert_kentries(kt, &addq, tzero);
1709                 pfr_remove_kentries(kt, &delq);
1710                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1711                 pfr_destroy_kentries(&garbageq);
1712         } else {
1713                 /* kt cannot contain addresses */
1714                 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1715                     shadow->pfrkt_ip4);
1716                 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1717                     shadow->pfrkt_ip6);
1718                 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1719                 pfr_clstats_ktable(kt, tzero, 1);
1720         }
1721         nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1722             (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1723                 & ~PFR_TFLAG_INACTIVE;
1724         pfr_destroy_ktable(shadow, 0);
1725         kt->pfrkt_shadow = NULL;
1726         pfr_setflags_ktable(kt, nflags);
1727 }
1728
1729 static int
1730 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1731 {
1732         int i;
1733
1734         if (!tbl->pfrt_name[0])
1735                 return (-1);
1736         if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1737                  return (-1);
1738         if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1739                 return (-1);
1740         for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1741                 if (tbl->pfrt_name[i])
1742                         return (-1);
1743         if (pfr_fix_anchor(tbl->pfrt_anchor))
1744                 return (-1);
1745         if (tbl->pfrt_flags & ~allowedflags)
1746                 return (-1);
1747         return (0);
1748 }
1749
1750 /*
1751  * Rewrite anchors referenced by tables to remove slashes
1752  * and check for validity.
1753  */
1754 static int
1755 pfr_fix_anchor(char *anchor)
1756 {
1757         size_t siz = MAXPATHLEN;
1758         int i;
1759
1760         if (anchor[0] == '/') {
1761                 char *path;
1762                 int off;
1763
1764                 path = anchor;
1765                 off = 1;
1766                 while (*++path == '/')
1767                         off++;
1768                 bcopy(path, anchor, siz - off);
1769                 memset(anchor + siz - off, 0, off);
1770         }
1771         if (anchor[siz - 1])
1772                 return (-1);
1773         for (i = strlen(anchor); i < siz; i++)
1774                 if (anchor[i])
1775                         return (-1);
1776         return (0);
1777 }
1778
1779 int
1780 pfr_table_count(struct pfr_table *filter, int flags)
1781 {
1782         struct pf_ruleset *rs;
1783
1784         PF_RULES_ASSERT();
1785
1786         if (flags & PFR_FLAG_ALLRSETS)
1787                 return (V_pfr_ktable_cnt);
1788         if (filter->pfrt_anchor[0]) {
1789                 rs = pf_find_ruleset(filter->pfrt_anchor);
1790                 return ((rs != NULL) ? rs->tables : -1);
1791         }
1792         return (pf_main_ruleset.tables);
1793 }
1794
1795 static int
1796 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1797 {
1798         if (flags & PFR_FLAG_ALLRSETS)
1799                 return (0);
1800         if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1801                 return (1);
1802         return (0);
1803 }
1804
1805 static void
1806 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1807 {
1808         struct pfr_ktable       *p;
1809
1810         SLIST_FOREACH(p, workq, pfrkt_workq)
1811                 pfr_insert_ktable(p);
1812 }
1813
1814 static void
1815 pfr_insert_ktable(struct pfr_ktable *kt)
1816 {
1817
1818         PF_RULES_WASSERT();
1819
1820         RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
1821         V_pfr_ktable_cnt++;
1822         if (kt->pfrkt_root != NULL)
1823                 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1824                         pfr_setflags_ktable(kt->pfrkt_root,
1825                             kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1826 }
1827
1828 static void
1829 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1830 {
1831         struct pfr_ktable       *p, *q;
1832
1833         for (p = SLIST_FIRST(workq); p; p = q) {
1834                 q = SLIST_NEXT(p, pfrkt_workq);
1835                 pfr_setflags_ktable(p, p->pfrkt_nflags);
1836         }
1837 }
1838
1839 static void
1840 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1841 {
1842         struct pfr_kentryworkq  addrq;
1843
1844         PF_RULES_WASSERT();
1845
1846         if (!(newf & PFR_TFLAG_REFERENCED) &&
1847             !(newf & PFR_TFLAG_REFDANCHOR) &&
1848             !(newf & PFR_TFLAG_PERSIST))
1849                 newf &= ~PFR_TFLAG_ACTIVE;
1850         if (!(newf & PFR_TFLAG_ACTIVE))
1851                 newf &= ~PFR_TFLAG_USRMASK;
1852         if (!(newf & PFR_TFLAG_SETMASK)) {
1853                 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
1854                 if (kt->pfrkt_root != NULL)
1855                         if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1856                                 pfr_setflags_ktable(kt->pfrkt_root,
1857                                     kt->pfrkt_root->pfrkt_flags &
1858                                         ~PFR_TFLAG_REFDANCHOR);
1859                 pfr_destroy_ktable(kt, 1);
1860                 V_pfr_ktable_cnt--;
1861                 return;
1862         }
1863         if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1864                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1865                 pfr_remove_kentries(kt, &addrq);
1866         }
1867         if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1868                 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1869                 kt->pfrkt_shadow = NULL;
1870         }
1871         kt->pfrkt_flags = newf;
1872 }
1873
1874 static void
1875 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1876 {
1877         struct pfr_ktable       *p;
1878
1879         SLIST_FOREACH(p, workq, pfrkt_workq)
1880                 pfr_clstats_ktable(p, tzero, recurse);
1881 }
1882
1883 static void
1884 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1885 {
1886         struct pfr_kentryworkq   addrq;
1887         int                      pfr_dir, pfr_op;
1888
1889         if (recurse) {
1890                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1891                 pfr_clstats_kentries(&addrq, tzero, 0);
1892         }
1893         for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1894                 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1895                         counter_u64_zero(kt->pfrkt_packets[pfr_dir][pfr_op]);
1896                         counter_u64_zero(kt->pfrkt_bytes[pfr_dir][pfr_op]);
1897                 }
1898         }
1899         counter_u64_zero(kt->pfrkt_match);
1900         counter_u64_zero(kt->pfrkt_nomatch);
1901         kt->pfrkt_tzero = tzero;
1902 }
1903
1904 static struct pfr_ktable *
1905 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1906 {
1907         struct pfr_ktable       *kt;
1908         struct pf_ruleset       *rs;
1909         int                      pfr_dir, pfr_op;
1910
1911         PF_RULES_WASSERT();
1912
1913         kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1914         if (kt == NULL)
1915                 return (NULL);
1916         kt->pfrkt_t = *tbl;
1917
1918         if (attachruleset) {
1919                 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1920                 if (!rs) {
1921                         pfr_destroy_ktable(kt, 0);
1922                         return (NULL);
1923                 }
1924                 kt->pfrkt_rs = rs;
1925                 rs->tables++;
1926         }
1927
1928         for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1929                 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1930                         kt->pfrkt_packets[pfr_dir][pfr_op] =
1931                             counter_u64_alloc(M_NOWAIT);
1932                         if (! kt->pfrkt_packets[pfr_dir][pfr_op]) {
1933                                 pfr_destroy_ktable(kt, 0);
1934                                 return (NULL);
1935                         }
1936                         kt->pfrkt_bytes[pfr_dir][pfr_op] =
1937                             counter_u64_alloc(M_NOWAIT);
1938                         if (! kt->pfrkt_bytes[pfr_dir][pfr_op]) {
1939                                 pfr_destroy_ktable(kt, 0);
1940                                 return (NULL);
1941                         }
1942                 }
1943         }
1944         kt->pfrkt_match = counter_u64_alloc(M_NOWAIT);
1945         if (! kt->pfrkt_match) {
1946                 pfr_destroy_ktable(kt, 0);
1947                 return (NULL);
1948         }
1949
1950         kt->pfrkt_nomatch = counter_u64_alloc(M_NOWAIT);
1951         if (! kt->pfrkt_nomatch) {
1952                 pfr_destroy_ktable(kt, 0);
1953                 return (NULL);
1954         }
1955
1956         if (!rn_inithead((void **)&kt->pfrkt_ip4,
1957             offsetof(struct sockaddr_in, sin_addr) * 8) ||
1958             !rn_inithead((void **)&kt->pfrkt_ip6,
1959             offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1960                 pfr_destroy_ktable(kt, 0);
1961                 return (NULL);
1962         }
1963         kt->pfrkt_tzero = tzero;
1964
1965         return (kt);
1966 }
1967
1968 static void
1969 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1970 {
1971         struct pfr_ktable       *p, *q;
1972
1973         for (p = SLIST_FIRST(workq); p; p = q) {
1974                 q = SLIST_NEXT(p, pfrkt_workq);
1975                 pfr_destroy_ktable(p, flushaddr);
1976         }
1977 }
1978
1979 static void
1980 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1981 {
1982         struct pfr_kentryworkq   addrq;
1983         int                      pfr_dir, pfr_op;
1984
1985         if (flushaddr) {
1986                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1987                 pfr_clean_node_mask(kt, &addrq);
1988                 pfr_destroy_kentries(&addrq);
1989         }
1990         if (kt->pfrkt_ip4 != NULL)
1991                 rn_detachhead((void **)&kt->pfrkt_ip4);
1992         if (kt->pfrkt_ip6 != NULL)
1993                 rn_detachhead((void **)&kt->pfrkt_ip6);
1994         if (kt->pfrkt_shadow != NULL)
1995                 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1996         if (kt->pfrkt_rs != NULL) {
1997                 kt->pfrkt_rs->tables--;
1998                 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1999         }
2000         for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
2001                 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
2002                         counter_u64_free(kt->pfrkt_packets[pfr_dir][pfr_op]);
2003                         counter_u64_free(kt->pfrkt_bytes[pfr_dir][pfr_op]);
2004                 }
2005         }
2006         counter_u64_free(kt->pfrkt_match);
2007         counter_u64_free(kt->pfrkt_nomatch);
2008
2009         free(kt, M_PFTABLE);
2010 }
2011
2012 static int
2013 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2014 {
2015         int d;
2016
2017         if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2018                 return (d);
2019         return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2020 }
2021
2022 static struct pfr_ktable *
2023 pfr_lookup_table(struct pfr_table *tbl)
2024 {
2025         /* struct pfr_ktable start like a struct pfr_table */
2026         return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
2027             (struct pfr_ktable *)tbl));
2028 }
2029
2030 int
2031 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2032 {
2033         struct pfr_kentry       *ke = NULL;
2034         int                      match;
2035
2036         PF_RULES_RASSERT();
2037
2038         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2039                 kt = kt->pfrkt_root;
2040         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2041                 return (0);
2042
2043         switch (af) {
2044 #ifdef INET
2045         case AF_INET:
2046             {
2047                 struct sockaddr_in sin;
2048
2049                 bzero(&sin, sizeof(sin));
2050                 sin.sin_len = sizeof(sin);
2051                 sin.sin_family = AF_INET;
2052                 sin.sin_addr.s_addr = a->addr32[0];
2053                 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2054                 if (ke && KENTRY_RNF_ROOT(ke))
2055                         ke = NULL;
2056                 break;
2057             }
2058 #endif /* INET */
2059 #ifdef INET6
2060         case AF_INET6:
2061             {
2062                 struct sockaddr_in6 sin6;
2063
2064                 bzero(&sin6, sizeof(sin6));
2065                 sin6.sin6_len = sizeof(sin6);
2066                 sin6.sin6_family = AF_INET6;
2067                 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2068                 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2069                 if (ke && KENTRY_RNF_ROOT(ke))
2070                         ke = NULL;
2071                 break;
2072             }
2073 #endif /* INET6 */
2074         }
2075         match = (ke && !ke->pfrke_not);
2076         if (match)
2077                 counter_u64_add(kt->pfrkt_match, 1);
2078         else
2079                 counter_u64_add(kt->pfrkt_nomatch, 1);
2080         return (match);
2081 }
2082
2083 void
2084 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2085     u_int64_t len, int dir_out, int op_pass, int notrule)
2086 {
2087         struct pfr_kentry       *ke = NULL;
2088
2089         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2090                 kt = kt->pfrkt_root;
2091         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2092                 return;
2093
2094         switch (af) {
2095 #ifdef INET
2096         case AF_INET:
2097             {
2098                 struct sockaddr_in sin;
2099
2100                 bzero(&sin, sizeof(sin));
2101                 sin.sin_len = sizeof(sin);
2102                 sin.sin_family = AF_INET;
2103                 sin.sin_addr.s_addr = a->addr32[0];
2104                 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2105                 if (ke && KENTRY_RNF_ROOT(ke))
2106                         ke = NULL;
2107                 break;
2108             }
2109 #endif /* INET */
2110 #ifdef INET6
2111         case AF_INET6:
2112             {
2113                 struct sockaddr_in6 sin6;
2114
2115                 bzero(&sin6, sizeof(sin6));
2116                 sin6.sin6_len = sizeof(sin6);
2117                 sin6.sin6_family = AF_INET6;
2118                 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2119                 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2120                 if (ke && KENTRY_RNF_ROOT(ke))
2121                         ke = NULL;
2122                 break;
2123             }
2124 #endif /* INET6 */
2125         default:
2126                 panic("%s: unknown address family %u", __func__, af);
2127         }
2128         if ((ke == NULL || ke->pfrke_not) != notrule) {
2129                 if (op_pass != PFR_OP_PASS)
2130                         DPFPRINTF(PF_DEBUG_URGENT,
2131                             ("pfr_update_stats: assertion failed.\n"));
2132                 op_pass = PFR_OP_XPASS;
2133         }
2134         counter_u64_add(kt->pfrkt_packets[dir_out][op_pass], 1);
2135         counter_u64_add(kt->pfrkt_bytes[dir_out][op_pass], len);
2136         if (ke != NULL && op_pass != PFR_OP_XPASS &&
2137             (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2138                 counter_u64_add(ke->pfrke_counters.
2139                     pfrkc_packets[dir_out][op_pass], 1);
2140                 counter_u64_add(ke->pfrke_counters.
2141                     pfrkc_bytes[dir_out][op_pass], len);
2142         }
2143 }
2144
2145 struct pfr_ktable *
2146 pfr_attach_table(struct pf_ruleset *rs, char *name)
2147 {
2148         struct pfr_ktable       *kt, *rt;
2149         struct pfr_table         tbl;
2150         struct pf_anchor        *ac = rs->anchor;
2151
2152         PF_RULES_WASSERT();
2153
2154         bzero(&tbl, sizeof(tbl));
2155         strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2156         if (ac != NULL)
2157                 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2158         kt = pfr_lookup_table(&tbl);
2159         if (kt == NULL) {
2160                 kt = pfr_create_ktable(&tbl, time_second, 1);
2161                 if (kt == NULL)
2162                         return (NULL);
2163                 if (ac != NULL) {
2164                         bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2165                         rt = pfr_lookup_table(&tbl);
2166                         if (rt == NULL) {
2167                                 rt = pfr_create_ktable(&tbl, 0, 1);
2168                                 if (rt == NULL) {
2169                                         pfr_destroy_ktable(kt, 0);
2170                                         return (NULL);
2171                                 }
2172                                 pfr_insert_ktable(rt);
2173                         }
2174                         kt->pfrkt_root = rt;
2175                 }
2176                 pfr_insert_ktable(kt);
2177         }
2178         if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2179                 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2180         return (kt);
2181 }
2182
2183 void
2184 pfr_detach_table(struct pfr_ktable *kt)
2185 {
2186
2187         PF_RULES_WASSERT();
2188         KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2189             __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2190
2191         if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2192                 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2193 }
2194
2195 int
2196 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2197     sa_family_t af)
2198 {
2199         struct pf_addr           *addr, *cur, *mask;
2200         union sockaddr_union     uaddr, umask;
2201         struct pfr_kentry       *ke, *ke2 = NULL;
2202         int                      idx = -1, use_counter = 0;
2203
2204         switch (af) {
2205         case AF_INET:
2206                 uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2207                 uaddr.sin.sin_family = AF_INET;
2208                 break;
2209         case AF_INET6:
2210                 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2211                 uaddr.sin6.sin6_family = AF_INET6;
2212                 break;
2213         }
2214         addr = SUNION2PF(&uaddr, af);
2215
2216         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2217                 kt = kt->pfrkt_root;
2218         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2219                 return (-1);
2220
2221         if (pidx != NULL)
2222                 idx = *pidx;
2223         if (counter != NULL && idx >= 0)
2224                 use_counter = 1;
2225         if (idx < 0)
2226                 idx = 0;
2227
2228 _next_block:
2229         ke = pfr_kentry_byidx(kt, idx, af);
2230         if (ke == NULL) {
2231                 counter_u64_add(kt->pfrkt_nomatch, 1);
2232                 return (1);
2233         }
2234         pfr_prepare_network(&umask, af, ke->pfrke_net);
2235         cur = SUNION2PF(&ke->pfrke_sa, af);
2236         mask = SUNION2PF(&umask, af);
2237
2238         if (use_counter) {
2239                 /* is supplied address within block? */
2240                 if (!PF_MATCHA(0, cur, mask, counter, af)) {
2241                         /* no, go to next block in table */
2242                         idx++;
2243                         use_counter = 0;
2244                         goto _next_block;
2245                 }
2246                 PF_ACPY(addr, counter, af);
2247         } else {
2248                 /* use first address of block */
2249                 PF_ACPY(addr, cur, af);
2250         }
2251
2252         if (!KENTRY_NETWORK(ke)) {
2253                 /* this is a single IP address - no possible nested block */
2254                 PF_ACPY(counter, addr, af);
2255                 *pidx = idx;
2256                 counter_u64_add(kt->pfrkt_match, 1);
2257                 return (0);
2258         }
2259         for (;;) {
2260                 /* we don't want to use a nested block */
2261                 switch (af) {
2262                 case AF_INET:
2263                         ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2264                             &kt->pfrkt_ip4->rh);
2265                         break;
2266                 case AF_INET6:
2267                         ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2268                             &kt->pfrkt_ip6->rh);
2269                         break;
2270                 }
2271                 /* no need to check KENTRY_RNF_ROOT() here */
2272                 if (ke2 == ke) {
2273                         /* lookup return the same block - perfect */
2274                         PF_ACPY(counter, addr, af);
2275                         *pidx = idx;
2276                         counter_u64_add(kt->pfrkt_match, 1);
2277                         return (0);
2278                 }
2279
2280                 /* we need to increase the counter past the nested block */
2281                 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2282                 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af);
2283                 PF_AINC(addr, af);
2284                 if (!PF_MATCHA(0, cur, mask, addr, af)) {
2285                         /* ok, we reached the end of our main block */
2286                         /* go to next block in table */
2287                         idx++;
2288                         use_counter = 0;
2289                         goto _next_block;
2290                 }
2291         }
2292 }
2293
2294 static struct pfr_kentry *
2295 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2296 {
2297         struct pfr_walktree     w;
2298
2299         bzero(&w, sizeof(w));
2300         w.pfrw_op = PFRW_POOL_GET;
2301         w.pfrw_cnt = idx;
2302
2303         switch (af) {
2304 #ifdef INET
2305         case AF_INET:
2306                 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2307                 return (w.pfrw_kentry);
2308 #endif /* INET */
2309 #ifdef INET6
2310         case AF_INET6:
2311                 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2312                 return (w.pfrw_kentry);
2313 #endif /* INET6 */
2314         default:
2315                 return (NULL);
2316         }
2317 }
2318
2319 void
2320 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2321 {
2322         struct pfr_walktree     w;
2323
2324         bzero(&w, sizeof(w));
2325         w.pfrw_op = PFRW_DYNADDR_UPDATE;
2326         w.pfrw_dyn = dyn;
2327
2328         dyn->pfid_acnt4 = 0;
2329         dyn->pfid_acnt6 = 0;
2330         if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2331                 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2332         if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2333                 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2334 }