]> CyberLeo.Net >> Repos - FreeBSD/releng/8.1.git/blob - sys/contrib/pf/net/pf_table.c
Copy stable/8 to releng/8.1 in preparation for 8.1-RC1.
[FreeBSD/releng/8.1.git] / sys / contrib / pf / net / pf_table.c
1 /*      $OpenBSD: pf_table.c,v 1.68 2006/05/02 10:08:45 dhartmei Exp $  */
2
3 /*
4  * Copyright (c) 2002 Cedric Berger
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    - Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *    - Redistributions in binary form must reproduce the above
14  *      copyright notice, this list of conditions and the following
15  *      disclaimer in the documentation and/or other materials provided
16  *      with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  */
32
33 #ifdef __FreeBSD__
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/socket.h>
44 #include <sys/mbuf.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/rwlock.h>
48 #ifdef __FreeBSD__
49 #include <sys/malloc.h>
50 #endif
51
52 #include <net/if.h>
53 #include <net/route.h>
54 #include <netinet/in.h>
55 #ifndef __FreeBSD__
56 #include <netinet/ip_ipsp.h>
57 #endif
58
59 #include <net/pfvar.h>
60
61 #define ACCEPT_FLAGS(oklist)                    \
62         do {                                    \
63                 if ((flags & ~(oklist)) &       \
64                     PFR_FLAG_ALLMASK)           \
65                         return (EINVAL);        \
66         } while (0)
67
68 #ifdef __FreeBSD__
69 static inline int
70 _copyin(const void *uaddr, void *kaddr, size_t len)
71 {
72         int r;
73
74         PF_UNLOCK();
75         r = copyin(uaddr, kaddr, len);
76         PF_LOCK();
77
78         return (r);
79 }
80
81 static inline int
82 _copyout(const void *uaddr, void *kaddr, size_t len)
83 {
84         int r;
85
86         PF_UNLOCK();
87         r = copyout(uaddr, kaddr, len);
88         PF_LOCK();
89
90         return (r);
91 }
92
93 #define COPYIN(from, to, size)                  \
94         ((flags & PFR_FLAG_USERIOCTL) ?         \
95         _copyin((from), (to), (size)) :         \
96         (bcopy((from), (to), (size)), 0))
97
98 #define COPYOUT(from, to, size)                 \
99         ((flags & PFR_FLAG_USERIOCTL) ?         \
100         _copyout((from), (to), (size)) :        \
101         (bcopy((from), (to), (size)), 0))
102
103 #else
104
105 #define COPYIN(from, to, size)                  \
106         ((flags & PFR_FLAG_USERIOCTL) ?         \
107         copyin((from), (to), (size)) :          \
108         (bcopy((from), (to), (size)), 0))
109
110 #define COPYOUT(from, to, size)                 \
111         ((flags & PFR_FLAG_USERIOCTL) ?         \
112         copyout((from), (to), (size)) :         \
113         (bcopy((from), (to), (size)), 0))
114
115 #endif
116
117 #define FILLIN_SIN(sin, addr)                   \
118         do {                                    \
119                 (sin).sin_len = sizeof(sin);    \
120                 (sin).sin_family = AF_INET;     \
121                 (sin).sin_addr = (addr);        \
122         } while (0)
123
124 #define FILLIN_SIN6(sin6, addr)                 \
125         do {                                    \
126                 (sin6).sin6_len = sizeof(sin6); \
127                 (sin6).sin6_family = AF_INET6;  \
128                 (sin6).sin6_addr = (addr);      \
129         } while (0)
130
131 #define SWAP(type, a1, a2)                      \
132         do {                                    \
133                 type tmp = a1;                  \
134                 a1 = a2;                        \
135                 a2 = tmp;                       \
136         } while (0)
137
138 #define SUNION2PF(su, af) (((af)==AF_INET) ?    \
139     (struct pf_addr *)&(su)->sin.sin_addr :     \
140     (struct pf_addr *)&(su)->sin6.sin6_addr)
141
142 #define AF_BITS(af)             (((af)==AF_INET)?32:128)
143 #define ADDR_NETWORK(ad)        ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
144 #define KENTRY_NETWORK(ke)      ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
145 #define KENTRY_RNF_ROOT(ke) \
146                 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
147
148 #define NO_ADDRESSES            (-1)
149 #define ENQUEUE_UNMARKED_ONLY   (1)
150 #define INVERT_NEG_FLAG         (1)
151
152 struct pfr_walktree {
153         enum pfrw_op {
154                 PFRW_MARK,
155                 PFRW_SWEEP,
156                 PFRW_ENQUEUE,
157                 PFRW_GET_ADDRS,
158                 PFRW_GET_ASTATS,
159                 PFRW_POOL_GET,
160                 PFRW_DYNADDR_UPDATE
161         }        pfrw_op;
162         union {
163                 struct pfr_addr         *pfrw1_addr;
164                 struct pfr_astats       *pfrw1_astats;
165                 struct pfr_kentryworkq  *pfrw1_workq;
166                 struct pfr_kentry       *pfrw1_kentry;
167                 struct pfi_dynaddr      *pfrw1_dyn;
168         }        pfrw_1;
169         int      pfrw_free;
170         int      pfrw_flags;
171 };
172 #define pfrw_addr       pfrw_1.pfrw1_addr
173 #define pfrw_astats     pfrw_1.pfrw1_astats
174 #define pfrw_workq      pfrw_1.pfrw1_workq
175 #define pfrw_kentry     pfrw_1.pfrw1_kentry
176 #define pfrw_dyn        pfrw_1.pfrw1_dyn
177 #define pfrw_cnt        pfrw_free
178
179 #define senderr(e)      do { rv = (e); goto _bad; } while (0)
180
181 #ifdef __FreeBSD__
182 uma_zone_t               pfr_ktable_pl;
183 uma_zone_t               pfr_kentry_pl;
184 uma_zone_t               pfr_kentry_pl2;
185 #else
186 struct pool              pfr_ktable_pl;
187 struct pool              pfr_kentry_pl;
188 struct pool              pfr_kentry_pl2;
189 #endif
190 struct sockaddr_in       pfr_sin;
191 struct sockaddr_in6      pfr_sin6;
192 union sockaddr_union     pfr_mask;
193 struct pf_addr           pfr_ffaddr;
194
195 void                     pfr_copyout_addr(struct pfr_addr *,
196                             struct pfr_kentry *ke);
197 int                      pfr_validate_addr(struct pfr_addr *);
198 void                     pfr_enqueue_addrs(struct pfr_ktable *,
199                             struct pfr_kentryworkq *, int *, int);
200 void                     pfr_mark_addrs(struct pfr_ktable *);
201 struct pfr_kentry       *pfr_lookup_addr(struct pfr_ktable *,
202                             struct pfr_addr *, int);
203 struct pfr_kentry       *pfr_create_kentry(struct pfr_addr *, int);
204 void                     pfr_destroy_kentries(struct pfr_kentryworkq *);
205 void                     pfr_destroy_kentry(struct pfr_kentry *);
206 void                     pfr_insert_kentries(struct pfr_ktable *,
207                             struct pfr_kentryworkq *, long);
208 void                     pfr_remove_kentries(struct pfr_ktable *,
209                             struct pfr_kentryworkq *);
210 void                     pfr_clstats_kentries(struct pfr_kentryworkq *, long,
211                             int);
212 void                     pfr_reset_feedback(struct pfr_addr *, int, int);
213 void                     pfr_prepare_network(union sockaddr_union *, int, int);
214 int                      pfr_route_kentry(struct pfr_ktable *,
215                             struct pfr_kentry *);
216 int                      pfr_unroute_kentry(struct pfr_ktable *,
217                             struct pfr_kentry *);
218 int                      pfr_walktree(struct radix_node *, void *);
219 int                      pfr_validate_table(struct pfr_table *, int, int);
220 int                      pfr_fix_anchor(char *);
221 void                     pfr_commit_ktable(struct pfr_ktable *, long);
222 void                     pfr_insert_ktables(struct pfr_ktableworkq *);
223 void                     pfr_insert_ktable(struct pfr_ktable *);
224 void                     pfr_setflags_ktables(struct pfr_ktableworkq *);
225 void                     pfr_setflags_ktable(struct pfr_ktable *, int);
226 void                     pfr_clstats_ktables(struct pfr_ktableworkq *, long,
227                             int);
228 void                     pfr_clstats_ktable(struct pfr_ktable *, long, int);
229 struct pfr_ktable       *pfr_create_ktable(struct pfr_table *, long, int);
230 void                     pfr_destroy_ktables(struct pfr_ktableworkq *, int);
231 void                     pfr_destroy_ktable(struct pfr_ktable *, int);
232 int                      pfr_ktable_compare(struct pfr_ktable *,
233                             struct pfr_ktable *);
234 struct pfr_ktable       *pfr_lookup_table(struct pfr_table *);
235 void                     pfr_clean_node_mask(struct pfr_ktable *,
236                             struct pfr_kentryworkq *);
237 int                      pfr_table_count(struct pfr_table *, int);
238 int                      pfr_skip_table(struct pfr_table *,
239                             struct pfr_ktable *, int);
240 struct pfr_kentry       *pfr_kentry_byidx(struct pfr_ktable *, int, int);
241
242 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
243 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
244
245 struct pfr_ktablehead    pfr_ktables;
246 struct pfr_table         pfr_nulltable;
247 int                      pfr_ktable_cnt;
248
249 void
250 pfr_initialize(void)
251 {
252 #ifndef __FreeBSD__
253         pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
254             "pfrktable", &pool_allocator_oldnointr);
255         pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
256             "pfrkentry", &pool_allocator_oldnointr);
257         pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
258             "pfrkentry2", NULL);
259 #endif
260
261         pfr_sin.sin_len = sizeof(pfr_sin);
262         pfr_sin.sin_family = AF_INET;
263         pfr_sin6.sin6_len = sizeof(pfr_sin6);
264         pfr_sin6.sin6_family = AF_INET6;
265
266         memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
267 }
268
269 int
270 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
271 {
272         struct pfr_ktable       *kt;
273         struct pfr_kentryworkq   workq;
274         int                      s;
275
276         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
277         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
278                 return (EINVAL);
279         kt = pfr_lookup_table(tbl);
280         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
281                 return (ESRCH);
282         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
283                 return (EPERM);
284         pfr_enqueue_addrs(kt, &workq, ndel, 0);
285
286         if (!(flags & PFR_FLAG_DUMMY)) {
287                 s = 0;
288                 if (flags & PFR_FLAG_ATOMIC)
289                         s = splsoftnet();
290                 pfr_remove_kentries(kt, &workq);
291                 if (flags & PFR_FLAG_ATOMIC)
292                         splx(s);
293                 if (kt->pfrkt_cnt) {
294                         printf("pfr_clr_addrs: corruption detected (%d).\n",
295                             kt->pfrkt_cnt);
296                         kt->pfrkt_cnt = 0;
297                 }
298         }
299         return (0);
300 }
301
302 int
303 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
304     int *nadd, int flags)
305 {
306         struct pfr_ktable       *kt, *tmpkt;
307         struct pfr_kentryworkq   workq;
308         struct pfr_kentry       *p, *q;
309         struct pfr_addr          ad;
310         int                      i, rv, s = 0, xadd = 0;
311         long                     tzero = time_second;
312
313         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
314         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
315                 return (EINVAL);
316         kt = pfr_lookup_table(tbl);
317         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
318                 return (ESRCH);
319         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
320                 return (EPERM);
321         tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
322         if (tmpkt == NULL)
323                 return (ENOMEM);
324         SLIST_INIT(&workq);
325         for (i = 0; i < size; i++) {
326                 if (COPYIN(addr+i, &ad, sizeof(ad)))
327                         senderr(EFAULT);
328                 if (pfr_validate_addr(&ad))
329                         senderr(EINVAL);
330                 p = pfr_lookup_addr(kt, &ad, 1);
331                 q = pfr_lookup_addr(tmpkt, &ad, 1);
332                 if (flags & PFR_FLAG_FEEDBACK) {
333                         if (q != NULL)
334                                 ad.pfra_fback = PFR_FB_DUPLICATE;
335                         else if (p == NULL)
336                                 ad.pfra_fback = PFR_FB_ADDED;
337                         else if (p->pfrke_not != ad.pfra_not)
338                                 ad.pfra_fback = PFR_FB_CONFLICT;
339                         else
340                                 ad.pfra_fback = PFR_FB_NONE;
341                 }
342                 if (p == NULL && q == NULL) {
343                         p = pfr_create_kentry(&ad, 0);
344                         if (p == NULL)
345                                 senderr(ENOMEM);
346                         if (pfr_route_kentry(tmpkt, p)) {
347                                 pfr_destroy_kentry(p);
348                                 ad.pfra_fback = PFR_FB_NONE;
349                         } else {
350                                 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
351                                 xadd++;
352                         }
353                 }
354                 if (flags & PFR_FLAG_FEEDBACK) {
355                         if (COPYOUT(&ad, addr+i, sizeof(ad)))
356                                 senderr(EFAULT);
357                 }
358         }
359         pfr_clean_node_mask(tmpkt, &workq);
360         if (!(flags & PFR_FLAG_DUMMY)) {
361                 if (flags & PFR_FLAG_ATOMIC)
362                         s = splsoftnet();
363                 pfr_insert_kentries(kt, &workq, tzero);
364                 if (flags & PFR_FLAG_ATOMIC)
365                         splx(s);
366         } else
367                 pfr_destroy_kentries(&workq);
368         if (nadd != NULL)
369                 *nadd = xadd;
370         pfr_destroy_ktable(tmpkt, 0);
371         return (0);
372 _bad:
373         pfr_clean_node_mask(tmpkt, &workq);
374         pfr_destroy_kentries(&workq);
375         if (flags & PFR_FLAG_FEEDBACK)
376                 pfr_reset_feedback(addr, size, flags);
377         pfr_destroy_ktable(tmpkt, 0);
378         return (rv);
379 }
380
381 int
382 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
383     int *ndel, int flags)
384 {
385         struct pfr_ktable       *kt;
386         struct pfr_kentryworkq   workq;
387         struct pfr_kentry       *p;
388         struct pfr_addr          ad;
389         int                      i, rv, s = 0, xdel = 0, log = 1;
390
391         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
392         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
393                 return (EINVAL);
394         kt = pfr_lookup_table(tbl);
395         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
396                 return (ESRCH);
397         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
398                 return (EPERM);
399         /*
400          * there are two algorithms to choose from here.
401          * with:
402          *   n: number of addresses to delete
403          *   N: number of addresses in the table
404          *
405          * one is O(N) and is better for large 'n'
406          * one is O(n*LOG(N)) and is better for small 'n'
407          * 
408          * following code try to decide which one is best.
409          */
410         for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
411                 log++;
412         if (size > kt->pfrkt_cnt/log) {
413                 /* full table scan */
414                 pfr_mark_addrs(kt);
415         } else {
416                 /* iterate over addresses to delete */
417                 for (i = 0; i < size; i++) {
418                         if (COPYIN(addr+i, &ad, sizeof(ad)))
419                                 return (EFAULT);
420                         if (pfr_validate_addr(&ad))
421                                 return (EINVAL);
422                         p = pfr_lookup_addr(kt, &ad, 1);
423                         if (p != NULL)
424                                 p->pfrke_mark = 0;
425                 }
426         }
427         SLIST_INIT(&workq);
428         for (i = 0; i < size; i++) {
429                 if (COPYIN(addr+i, &ad, sizeof(ad)))
430                         senderr(EFAULT);
431                 if (pfr_validate_addr(&ad))
432                         senderr(EINVAL);
433                 p = pfr_lookup_addr(kt, &ad, 1);
434                 if (flags & PFR_FLAG_FEEDBACK) {
435                         if (p == NULL)
436                                 ad.pfra_fback = PFR_FB_NONE;
437                         else if (p->pfrke_not != ad.pfra_not)
438                                 ad.pfra_fback = PFR_FB_CONFLICT;
439                         else if (p->pfrke_mark)
440                                 ad.pfra_fback = PFR_FB_DUPLICATE;
441                         else
442                                 ad.pfra_fback = PFR_FB_DELETED;
443                 }
444                 if (p != NULL && p->pfrke_not == ad.pfra_not &&
445                     !p->pfrke_mark) {
446                         p->pfrke_mark = 1;
447                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
448                         xdel++;
449                 }
450                 if (flags & PFR_FLAG_FEEDBACK)
451                         if (COPYOUT(&ad, addr+i, sizeof(ad)))
452                                 senderr(EFAULT);
453         }
454         if (!(flags & PFR_FLAG_DUMMY)) {
455                 if (flags & PFR_FLAG_ATOMIC)
456                         s = splsoftnet();
457                 pfr_remove_kentries(kt, &workq);
458                 if (flags & PFR_FLAG_ATOMIC)
459                         splx(s);
460         }
461         if (ndel != NULL)
462                 *ndel = xdel;
463         return (0);
464 _bad:
465         if (flags & PFR_FLAG_FEEDBACK)
466                 pfr_reset_feedback(addr, size, flags);
467         return (rv);
468 }
469
470 int
471 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
472     int *size2, int *nadd, int *ndel, int *nchange, int flags,
473     u_int32_t ignore_pfrt_flags)
474 {
475         struct pfr_ktable       *kt, *tmpkt;
476         struct pfr_kentryworkq   addq, delq, changeq;
477         struct pfr_kentry       *p, *q;
478         struct pfr_addr          ad;
479         int                      i, rv, s = 0, xadd = 0, xdel = 0, xchange = 0;
480         long                     tzero = time_second;
481
482         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
483         if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
484             PFR_FLAG_USERIOCTL))
485                 return (EINVAL);
486         kt = pfr_lookup_table(tbl);
487         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
488                 return (ESRCH);
489         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
490                 return (EPERM);
491         tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
492         if (tmpkt == NULL)
493                 return (ENOMEM);
494         pfr_mark_addrs(kt);
495         SLIST_INIT(&addq);
496         SLIST_INIT(&delq);
497         SLIST_INIT(&changeq);
498         for (i = 0; i < size; i++) {
499                 if (COPYIN(addr+i, &ad, sizeof(ad)))
500                         senderr(EFAULT);
501                 if (pfr_validate_addr(&ad))
502                         senderr(EINVAL);
503                 ad.pfra_fback = PFR_FB_NONE;
504                 p = pfr_lookup_addr(kt, &ad, 1);
505                 if (p != NULL) {
506                         if (p->pfrke_mark) {
507                                 ad.pfra_fback = PFR_FB_DUPLICATE;
508                                 goto _skip;
509                         }
510                         p->pfrke_mark = 1;
511                         if (p->pfrke_not != ad.pfra_not) {
512                                 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
513                                 ad.pfra_fback = PFR_FB_CHANGED;
514                                 xchange++;
515                         }
516                 } else {
517                         q = pfr_lookup_addr(tmpkt, &ad, 1);
518                         if (q != NULL) {
519                                 ad.pfra_fback = PFR_FB_DUPLICATE;
520                                 goto _skip;
521                         }
522                         p = pfr_create_kentry(&ad, 0);
523                         if (p == NULL)
524                                 senderr(ENOMEM);
525                         if (pfr_route_kentry(tmpkt, p)) {
526                                 pfr_destroy_kentry(p);
527                                 ad.pfra_fback = PFR_FB_NONE;
528                         } else {
529                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
530                                 ad.pfra_fback = PFR_FB_ADDED;
531                                 xadd++;
532                         }
533                 }
534 _skip:
535                 if (flags & PFR_FLAG_FEEDBACK)
536                         if (COPYOUT(&ad, addr+i, sizeof(ad)))
537                                 senderr(EFAULT);
538         }
539         pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
540         if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
541                 if (*size2 < size+xdel) {
542                         *size2 = size+xdel;
543                         senderr(0);
544                 }
545                 i = 0;
546                 SLIST_FOREACH(p, &delq, pfrke_workq) {
547                         pfr_copyout_addr(&ad, p);
548                         ad.pfra_fback = PFR_FB_DELETED;
549                         if (COPYOUT(&ad, addr+size+i, sizeof(ad)))
550                                 senderr(EFAULT);
551                         i++;
552                 }
553         }
554         pfr_clean_node_mask(tmpkt, &addq);
555         if (!(flags & PFR_FLAG_DUMMY)) {
556                 if (flags & PFR_FLAG_ATOMIC)
557                         s = splsoftnet();
558                 pfr_insert_kentries(kt, &addq, tzero);
559                 pfr_remove_kentries(kt, &delq);
560                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
561                 if (flags & PFR_FLAG_ATOMIC)
562                         splx(s);
563         } else
564                 pfr_destroy_kentries(&addq);
565         if (nadd != NULL)
566                 *nadd = xadd;
567         if (ndel != NULL)
568                 *ndel = xdel;
569         if (nchange != NULL)
570                 *nchange = xchange;
571         if ((flags & PFR_FLAG_FEEDBACK) && size2)
572                 *size2 = size+xdel;
573         pfr_destroy_ktable(tmpkt, 0);
574         return (0);
575 _bad:
576         pfr_clean_node_mask(tmpkt, &addq);
577         pfr_destroy_kentries(&addq);
578         if (flags & PFR_FLAG_FEEDBACK)
579                 pfr_reset_feedback(addr, size, flags);
580         pfr_destroy_ktable(tmpkt, 0);
581         return (rv);
582 }
583
584 int
585 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
586         int *nmatch, int flags)
587 {
588         struct pfr_ktable       *kt;
589         struct pfr_kentry       *p;
590         struct pfr_addr          ad;
591         int                      i, xmatch = 0;
592
593         ACCEPT_FLAGS(PFR_FLAG_REPLACE);
594         if (pfr_validate_table(tbl, 0, 0))
595                 return (EINVAL);
596         kt = pfr_lookup_table(tbl);
597         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
598                 return (ESRCH);
599
600         for (i = 0; i < size; i++) {
601                 if (COPYIN(addr+i, &ad, sizeof(ad)))
602                         return (EFAULT);
603                 if (pfr_validate_addr(&ad))
604                         return (EINVAL);
605                 if (ADDR_NETWORK(&ad))
606                         return (EINVAL);
607                 p = pfr_lookup_addr(kt, &ad, 0);
608                 if (flags & PFR_FLAG_REPLACE)
609                         pfr_copyout_addr(&ad, p);
610                 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
611                     (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
612                 if (p != NULL && !p->pfrke_not)
613                         xmatch++;
614                 if (COPYOUT(&ad, addr+i, sizeof(ad)))
615                         return (EFAULT);
616         }
617         if (nmatch != NULL)
618                 *nmatch = xmatch;
619         return (0);
620 }
621
622 int
623 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
624         int flags)
625 {
626         struct pfr_ktable       *kt;
627         struct pfr_walktree      w;
628         int                      rv;
629
630         ACCEPT_FLAGS(0);
631         if (pfr_validate_table(tbl, 0, 0))
632                 return (EINVAL);
633         kt = pfr_lookup_table(tbl);
634         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
635                 return (ESRCH);
636         if (kt->pfrkt_cnt > *size) {
637                 *size = kt->pfrkt_cnt;
638                 return (0);
639         }
640
641         bzero(&w, sizeof(w));
642         w.pfrw_op = PFRW_GET_ADDRS;
643         w.pfrw_addr = addr;
644         w.pfrw_free = kt->pfrkt_cnt;
645         w.pfrw_flags = flags;
646 #ifdef __FreeBSD__
647         rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
648 #else
649         rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
650 #endif
651         if (!rv)
652 #ifdef __FreeBSD__
653                 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, 
654                     &w);
655 #else
656                 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
657 #endif
658         if (rv)
659                 return (rv);
660
661         if (w.pfrw_free) {
662                 printf("pfr_get_addrs: corruption detected (%d).\n",
663                     w.pfrw_free);
664                 return (ENOTTY);
665         }
666         *size = kt->pfrkt_cnt;
667         return (0);
668 }
669
670 int
671 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
672         int flags)
673 {
674         struct pfr_ktable       *kt;
675         struct pfr_walktree      w;
676         struct pfr_kentryworkq   workq;
677         int                      rv, s = 0;
678         long                     tzero = time_second;
679
680         ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
681         if (pfr_validate_table(tbl, 0, 0))
682                 return (EINVAL);
683         kt = pfr_lookup_table(tbl);
684         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
685                 return (ESRCH);
686         if (kt->pfrkt_cnt > *size) {
687                 *size = kt->pfrkt_cnt;
688                 return (0);
689         }
690
691         bzero(&w, sizeof(w));
692         w.pfrw_op = PFRW_GET_ASTATS;
693         w.pfrw_astats = addr;
694         w.pfrw_free = kt->pfrkt_cnt;
695         w.pfrw_flags = flags;
696         if (flags & PFR_FLAG_ATOMIC)
697                 s = splsoftnet();
698 #ifdef __FreeBSD__
699         rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
700 #else
701         rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
702 #endif
703         if (!rv)
704 #ifdef __FreeBSD__
705                 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, 
706                     &w);
707 #else
708                 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
709 #endif
710         if (!rv && (flags & PFR_FLAG_CLSTATS)) {
711                 pfr_enqueue_addrs(kt, &workq, NULL, 0);
712                 pfr_clstats_kentries(&workq, tzero, 0);
713         }
714         if (flags & PFR_FLAG_ATOMIC)
715                 splx(s);
716         if (rv)
717                 return (rv);
718
719         if (w.pfrw_free) {
720                 printf("pfr_get_astats: corruption detected (%d).\n",
721                     w.pfrw_free);
722                 return (ENOTTY);
723         }
724         *size = kt->pfrkt_cnt;
725         return (0);
726 }
727
728 int
729 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
730     int *nzero, int flags)
731 {
732         struct pfr_ktable       *kt;
733         struct pfr_kentryworkq   workq;
734         struct pfr_kentry       *p;
735         struct pfr_addr          ad;
736         int                      i, rv, s = 0, xzero = 0;
737
738         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
739         if (pfr_validate_table(tbl, 0, 0))
740                 return (EINVAL);
741         kt = pfr_lookup_table(tbl);
742         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
743                 return (ESRCH);
744         SLIST_INIT(&workq);
745         for (i = 0; i < size; i++) {
746                 if (COPYIN(addr+i, &ad, sizeof(ad)))
747                         senderr(EFAULT);
748                 if (pfr_validate_addr(&ad))
749                         senderr(EINVAL);
750                 p = pfr_lookup_addr(kt, &ad, 1);
751                 if (flags & PFR_FLAG_FEEDBACK) {
752                         ad.pfra_fback = (p != NULL) ?
753                             PFR_FB_CLEARED : PFR_FB_NONE;
754                         if (COPYOUT(&ad, addr+i, sizeof(ad)))
755                                 senderr(EFAULT);
756                 }
757                 if (p != NULL) {
758                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
759                         xzero++;
760                 }
761         }
762
763         if (!(flags & PFR_FLAG_DUMMY)) {
764                 if (flags & PFR_FLAG_ATOMIC)
765                         s = splsoftnet();
766                 pfr_clstats_kentries(&workq, 0, 0);
767                 if (flags & PFR_FLAG_ATOMIC)
768                         splx(s);
769         }
770         if (nzero != NULL)
771                 *nzero = xzero;
772         return (0);
773 _bad:
774         if (flags & PFR_FLAG_FEEDBACK)
775                 pfr_reset_feedback(addr, size, flags);
776         return (rv);
777 }
778
779 int
780 pfr_validate_addr(struct pfr_addr *ad)
781 {
782         int i;
783
784         switch (ad->pfra_af) {
785 #ifdef INET
786         case AF_INET:
787                 if (ad->pfra_net > 32)
788                         return (-1);
789                 break;
790 #endif /* INET */
791 #ifdef INET6
792         case AF_INET6:
793                 if (ad->pfra_net > 128)
794                         return (-1);
795                 break;
796 #endif /* INET6 */
797         default:
798                 return (-1);
799         }
800         if (ad->pfra_net < 128 &&
801                 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
802                         return (-1);
803         for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
804                 if (((caddr_t)ad)[i])
805                         return (-1);
806         if (ad->pfra_not && ad->pfra_not != 1)
807                 return (-1);
808         if (ad->pfra_fback)
809                 return (-1);
810         return (0);
811 }
812
813 void
814 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
815         int *naddr, int sweep)
816 {
817         struct pfr_walktree     w;
818
819         SLIST_INIT(workq);
820         bzero(&w, sizeof(w));
821         w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
822         w.pfrw_workq = workq;
823         if (kt->pfrkt_ip4 != NULL)
824 #ifdef __FreeBSD__
825                 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, 
826                     &w))
827 #else
828                 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
829 #endif
830                         printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
831         if (kt->pfrkt_ip6 != NULL)
832 #ifdef __FreeBSD__
833                 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, 
834                     &w))
835 #else
836                 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
837 #endif
838                         printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
839         if (naddr != NULL)
840                 *naddr = w.pfrw_cnt;
841 }
842
843 void
844 pfr_mark_addrs(struct pfr_ktable *kt)
845 {
846         struct pfr_walktree     w;
847
848         bzero(&w, sizeof(w));
849         w.pfrw_op = PFRW_MARK;
850 #ifdef __FreeBSD__
851         if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
852 #else
853         if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
854 #endif
855                 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
856 #ifdef __FreeBSD__
857         if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
858 #else
859         if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
860 #endif
861                 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
862 }
863
864
865 struct pfr_kentry *
866 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
867 {
868         union sockaddr_union     sa, mask;
869         struct radix_node_head  *head = NULL;   /* make the compiler happy */
870         struct pfr_kentry       *ke;
871         int                      s;
872
873         bzero(&sa, sizeof(sa));
874         if (ad->pfra_af == AF_INET) {
875                 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
876                 head = kt->pfrkt_ip4;
877         } else if ( ad->pfra_af == AF_INET6 ) {
878                 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
879                 head = kt->pfrkt_ip6;
880         }
881         if (ADDR_NETWORK(ad)) {
882                 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
883                 s = splsoftnet(); /* rn_lookup makes use of globals */
884 #ifdef __FreeBSD__
885                 PF_ASSERT(MA_OWNED);
886 #endif
887                 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
888                 splx(s);
889                 if (ke && KENTRY_RNF_ROOT(ke))
890                         ke = NULL;
891         } else {
892                 ke = (struct pfr_kentry *)rn_match(&sa, head);
893                 if (ke && KENTRY_RNF_ROOT(ke))
894                         ke = NULL;
895                 if (exact && ke && KENTRY_NETWORK(ke))
896                         ke = NULL;
897         }
898         return (ke);
899 }
900
901 struct pfr_kentry *
902 pfr_create_kentry(struct pfr_addr *ad, int intr)
903 {
904         struct pfr_kentry       *ke;
905
906         if (intr)
907                 ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT);
908         else
909                 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
910         if (ke == NULL)
911                 return (NULL);
912         bzero(ke, sizeof(*ke));
913
914         if (ad->pfra_af == AF_INET)
915                 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
916         else if (ad->pfra_af == AF_INET6)
917                 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
918         ke->pfrke_af = ad->pfra_af;
919         ke->pfrke_net = ad->pfra_net;
920         ke->pfrke_not = ad->pfra_not;
921         ke->pfrke_intrpool = intr;
922         return (ke);
923 }
924
925 void
926 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
927 {
928         struct pfr_kentry       *p, *q;
929
930         for (p = SLIST_FIRST(workq); p != NULL; p = q) {
931                 q = SLIST_NEXT(p, pfrke_workq);
932                 pfr_destroy_kentry(p);
933         }
934 }
935
936 void
937 pfr_destroy_kentry(struct pfr_kentry *ke)
938 {
939         if (ke->pfrke_intrpool)
940                 pool_put(&pfr_kentry_pl2, ke);
941         else
942                 pool_put(&pfr_kentry_pl, ke);
943 }
944
945 void
946 pfr_insert_kentries(struct pfr_ktable *kt,
947     struct pfr_kentryworkq *workq, long tzero)
948 {
949         struct pfr_kentry       *p;
950         int                      rv, n = 0;
951
952         SLIST_FOREACH(p, workq, pfrke_workq) {
953                 rv = pfr_route_kentry(kt, p);
954                 if (rv) {
955                         printf("pfr_insert_kentries: cannot route entry "
956                             "(code=%d).\n", rv);
957                         break;
958                 }
959                 p->pfrke_tzero = tzero;
960                 n++;
961         }
962         kt->pfrkt_cnt += n;
963 }
964
965 int
966 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
967 {
968         struct pfr_kentry       *p;
969         int                      rv;
970
971         p = pfr_lookup_addr(kt, ad, 1);
972         if (p != NULL)
973                 return (0);
974         p = pfr_create_kentry(ad, 1);
975         if (p == NULL)
976                 return (EINVAL);
977
978         rv = pfr_route_kentry(kt, p);
979         if (rv)
980                 return (rv);
981
982         p->pfrke_tzero = tzero;
983         kt->pfrkt_cnt++;
984
985         return (0);
986 }
987
988 void
989 pfr_remove_kentries(struct pfr_ktable *kt,
990     struct pfr_kentryworkq *workq)
991 {
992         struct pfr_kentry       *p;
993         int                      n = 0;
994
995         SLIST_FOREACH(p, workq, pfrke_workq) {
996                 pfr_unroute_kentry(kt, p);
997                 n++;
998         }
999         kt->pfrkt_cnt -= n;
1000         pfr_destroy_kentries(workq);
1001 }
1002
1003 void
1004 pfr_clean_node_mask(struct pfr_ktable *kt,
1005     struct pfr_kentryworkq *workq)
1006 {
1007         struct pfr_kentry       *p;
1008
1009         SLIST_FOREACH(p, workq, pfrke_workq)
1010                 pfr_unroute_kentry(kt, p);
1011 }
1012
1013 void
1014 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
1015 {
1016         struct pfr_kentry       *p;
1017         int                      s;
1018
1019         SLIST_FOREACH(p, workq, pfrke_workq) {
1020                 s = splsoftnet();
1021                 if (negchange)
1022                         p->pfrke_not = !p->pfrke_not;
1023                 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
1024                 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
1025                 splx(s);
1026                 p->pfrke_tzero = tzero;
1027         }
1028 }
1029
1030 void
1031 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
1032 {
1033         struct pfr_addr ad;
1034         int             i;
1035
1036         for (i = 0; i < size; i++) {
1037                 if (COPYIN(addr+i, &ad, sizeof(ad)))
1038                         break;
1039                 ad.pfra_fback = PFR_FB_NONE;
1040                 if (COPYOUT(&ad, addr+i, sizeof(ad)))
1041                         break;
1042         }
1043 }
1044
1045 void
1046 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
1047 {
1048         int     i;
1049
1050         bzero(sa, sizeof(*sa));
1051         if (af == AF_INET) {
1052                 sa->sin.sin_len = sizeof(sa->sin);
1053                 sa->sin.sin_family = AF_INET;
1054                 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
1055         } else if (af == AF_INET6) {
1056                 sa->sin6.sin6_len = sizeof(sa->sin6);
1057                 sa->sin6.sin6_family = AF_INET6;
1058                 for (i = 0; i < 4; i++) {
1059                         if (net <= 32) {
1060                                 sa->sin6.sin6_addr.s6_addr32[i] =
1061                                     net ? htonl(-1 << (32-net)) : 0;
1062                                 break;
1063                         }
1064                         sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
1065                         net -= 32;
1066                 }
1067         }
1068 }
1069
1070 int
1071 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1072 {
1073         union sockaddr_union     mask;
1074         struct radix_node       *rn;
1075         struct radix_node_head  *head = NULL;   /* make the compiler happy */
1076         int                      s;
1077
1078         bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
1079         if (ke->pfrke_af == AF_INET)
1080                 head = kt->pfrkt_ip4;
1081         else if (ke->pfrke_af == AF_INET6)
1082                 head = kt->pfrkt_ip6;
1083
1084         s = splsoftnet();
1085 #ifdef __FreeBSD__
1086         PF_ASSERT(MA_OWNED);
1087 #endif
1088         if (KENTRY_NETWORK(ke)) {
1089                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1090                 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1091         } else
1092                 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1093         splx(s);
1094
1095         return (rn == NULL ? -1 : 0);
1096 }
1097
1098 int
1099 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1100 {
1101         union sockaddr_union     mask;
1102         struct radix_node       *rn;
1103         struct radix_node_head  *head = NULL;   /* make the compiler happy */
1104         int                      s;
1105
1106         if (ke->pfrke_af == AF_INET)
1107                 head = kt->pfrkt_ip4;
1108         else if (ke->pfrke_af == AF_INET6)
1109                 head = kt->pfrkt_ip6;
1110
1111         s = splsoftnet();
1112 #ifdef __FreeBSD__
1113         PF_ASSERT(MA_OWNED);
1114 #endif
1115         if (KENTRY_NETWORK(ke)) {
1116                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1117 #ifdef __FreeBSD__
1118                 rn = rn_delete(&ke->pfrke_sa, &mask, head);
1119 #else
1120                 rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL);
1121 #endif
1122         } else
1123 #ifdef __FreeBSD__
1124                 rn = rn_delete(&ke->pfrke_sa, NULL, head);
1125 #else
1126                 rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL);
1127 #endif
1128         splx(s);
1129
1130         if (rn == NULL) {
1131                 printf("pfr_unroute_kentry: delete failed.\n");
1132                 return (-1);
1133         }
1134         return (0);
1135 }
1136
1137 void
1138 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1139 {
1140         bzero(ad, sizeof(*ad));
1141         if (ke == NULL)
1142                 return;
1143         ad->pfra_af = ke->pfrke_af;
1144         ad->pfra_net = ke->pfrke_net;
1145         ad->pfra_not = ke->pfrke_not;
1146         if (ad->pfra_af == AF_INET)
1147                 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1148         else if (ad->pfra_af == AF_INET6)
1149                 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1150 }
1151
1152 int
1153 pfr_walktree(struct radix_node *rn, void *arg)
1154 {
1155         struct pfr_kentry       *ke = (struct pfr_kentry *)rn;
1156         struct pfr_walktree     *w = arg;
1157         int                      s, flags = w->pfrw_flags;
1158
1159         switch (w->pfrw_op) {
1160         case PFRW_MARK:
1161                 ke->pfrke_mark = 0;
1162                 break;
1163         case PFRW_SWEEP:
1164                 if (ke->pfrke_mark)
1165                         break;
1166                 /* FALLTHROUGH */
1167         case PFRW_ENQUEUE:
1168                 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1169                 w->pfrw_cnt++;
1170                 break;
1171         case PFRW_GET_ADDRS:
1172                 if (w->pfrw_free-- > 0) {
1173                         struct pfr_addr ad;
1174
1175                         pfr_copyout_addr(&ad, ke);
1176                         if (COPYOUT(&ad, w->pfrw_addr, sizeof(ad)))
1177                                 return (EFAULT);
1178                         w->pfrw_addr++;
1179                 }
1180                 break;
1181         case PFRW_GET_ASTATS:
1182                 if (w->pfrw_free-- > 0) {
1183                         struct pfr_astats as;
1184
1185                         pfr_copyout_addr(&as.pfras_a, ke);
1186
1187                         s = splsoftnet();
1188                         bcopy(ke->pfrke_packets, as.pfras_packets,
1189                             sizeof(as.pfras_packets));
1190                         bcopy(ke->pfrke_bytes, as.pfras_bytes,
1191                             sizeof(as.pfras_bytes));
1192                         splx(s);
1193                         as.pfras_tzero = ke->pfrke_tzero;
1194
1195                         if (COPYOUT(&as, w->pfrw_astats, sizeof(as)))
1196                                 return (EFAULT);
1197                         w->pfrw_astats++;
1198                 }
1199                 break;
1200         case PFRW_POOL_GET:
1201                 if (ke->pfrke_not)
1202                         break; /* negative entries are ignored */
1203                 if (!w->pfrw_cnt--) {
1204                         w->pfrw_kentry = ke;
1205                         return (1); /* finish search */
1206                 }
1207                 break;
1208         case PFRW_DYNADDR_UPDATE:
1209                 if (ke->pfrke_af == AF_INET) {
1210                         if (w->pfrw_dyn->pfid_acnt4++ > 0)
1211                                 break;
1212                         pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1213                         w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1214                             &ke->pfrke_sa, AF_INET);
1215                         w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1216                             &pfr_mask, AF_INET);
1217                 } else if (ke->pfrke_af == AF_INET6){
1218                         if (w->pfrw_dyn->pfid_acnt6++ > 0)
1219                                 break;
1220                         pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1221                         w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1222                             &ke->pfrke_sa, AF_INET6);
1223                         w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1224                             &pfr_mask, AF_INET6);
1225                 }
1226                 break;
1227         }
1228         return (0);
1229 }
1230
1231 int
1232 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1233 {
1234         struct pfr_ktableworkq   workq;
1235         struct pfr_ktable       *p;
1236         int                      s = 0, xdel = 0;
1237
1238         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1239         if (pfr_fix_anchor(filter->pfrt_anchor))
1240                 return (EINVAL);
1241         if (pfr_table_count(filter, flags) < 0)
1242                 return (ENOENT);
1243
1244         SLIST_INIT(&workq);
1245         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1246                 if (pfr_skip_table(filter, p, flags))
1247                         continue;
1248                 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1249                         continue;
1250                 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1251                         continue;
1252                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1253                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1254                 xdel++;
1255         }
1256         if (!(flags & PFR_FLAG_DUMMY)) {
1257                 if (flags & PFR_FLAG_ATOMIC)
1258                         s = splsoftnet();
1259                 pfr_setflags_ktables(&workq);
1260                 if (flags & PFR_FLAG_ATOMIC)
1261                         splx(s);
1262         }
1263         if (ndel != NULL)
1264                 *ndel = xdel;
1265         return (0);
1266 }
1267
1268 int
1269 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1270 {
1271         struct pfr_ktableworkq   addq, changeq;
1272         struct pfr_ktable       *p, *q, *r, key;
1273         int                      i, rv, s = 0, xadd = 0;
1274         long                     tzero = time_second;
1275
1276         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1277         SLIST_INIT(&addq);
1278         SLIST_INIT(&changeq);
1279         for (i = 0; i < size; i++) {
1280                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1281                         senderr(EFAULT);
1282                 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1283                     flags & PFR_FLAG_USERIOCTL))
1284                         senderr(EINVAL);
1285                 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1286                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1287                 if (p == NULL) {
1288                         p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1289                         if (p == NULL)
1290                                 senderr(ENOMEM);
1291                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1292                                 if (!pfr_ktable_compare(p, q))
1293                                         goto _skip;
1294                         }
1295                         SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1296                         xadd++;
1297                         if (!key.pfrkt_anchor[0])
1298                                 goto _skip;
1299
1300                         /* find or create root table */
1301                         bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1302                         r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1303                         if (r != NULL) {
1304                                 p->pfrkt_root = r;
1305                                 goto _skip;
1306                         }
1307                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1308                                 if (!pfr_ktable_compare(&key, q)) {
1309                                         p->pfrkt_root = q;
1310                                         goto _skip;
1311                                 }
1312                         }
1313                         key.pfrkt_flags = 0;
1314                         r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1315                         if (r == NULL)
1316                                 senderr(ENOMEM);
1317                         SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1318                         p->pfrkt_root = r;
1319                 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1320                         SLIST_FOREACH(q, &changeq, pfrkt_workq)
1321                                 if (!pfr_ktable_compare(&key, q))
1322                                         goto _skip;
1323                         p->pfrkt_nflags = (p->pfrkt_flags &
1324                             ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1325                         SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1326                         xadd++;
1327                 }
1328 _skip:
1329         ;
1330         }
1331         if (!(flags & PFR_FLAG_DUMMY)) {
1332                 if (flags & PFR_FLAG_ATOMIC)
1333                         s = splsoftnet();
1334                 pfr_insert_ktables(&addq);
1335                 pfr_setflags_ktables(&changeq);
1336                 if (flags & PFR_FLAG_ATOMIC)
1337                         splx(s);
1338         } else
1339                  pfr_destroy_ktables(&addq, 0);
1340         if (nadd != NULL)
1341                 *nadd = xadd;
1342         return (0);
1343 _bad:
1344         pfr_destroy_ktables(&addq, 0);
1345         return (rv);
1346 }
1347
1348 int
1349 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1350 {
1351         struct pfr_ktableworkq   workq;
1352         struct pfr_ktable       *p, *q, key;
1353         int                      i, s = 0, xdel = 0;
1354
1355         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1356         SLIST_INIT(&workq);
1357         for (i = 0; i < size; i++) {
1358                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1359                         return (EFAULT);
1360                 if (pfr_validate_table(&key.pfrkt_t, 0,
1361                     flags & PFR_FLAG_USERIOCTL))
1362                         return (EINVAL);
1363                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1364                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1365                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1366                                 if (!pfr_ktable_compare(p, q))
1367                                         goto _skip;
1368                         p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1369                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1370                         xdel++;
1371                 }
1372 _skip:
1373         ;
1374         }
1375
1376         if (!(flags & PFR_FLAG_DUMMY)) {
1377                 if (flags & PFR_FLAG_ATOMIC)
1378                         s = splsoftnet();
1379                 pfr_setflags_ktables(&workq);
1380                 if (flags & PFR_FLAG_ATOMIC)
1381                         splx(s);
1382         }
1383         if (ndel != NULL)
1384                 *ndel = xdel;
1385         return (0);
1386 }
1387
1388 int
1389 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1390         int flags)
1391 {
1392         struct pfr_ktable       *p;
1393         int                      n, nn;
1394
1395         ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1396         if (pfr_fix_anchor(filter->pfrt_anchor))
1397                 return (EINVAL);
1398         n = nn = pfr_table_count(filter, flags);
1399         if (n < 0)
1400                 return (ENOENT);
1401         if (n > *size) {
1402                 *size = n;
1403                 return (0);
1404         }
1405         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1406                 if (pfr_skip_table(filter, p, flags))
1407                         continue;
1408                 if (n-- <= 0)
1409                         continue;
1410                 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1411                         return (EFAULT);
1412         }
1413         if (n) {
1414                 printf("pfr_get_tables: corruption detected (%d).\n", n);
1415                 return (ENOTTY);
1416         }
1417         *size = nn;
1418         return (0);
1419 }
1420
1421 int
1422 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1423         int flags)
1424 {
1425         struct pfr_ktable       *p;
1426         struct pfr_ktableworkq   workq;
1427         int                      s = 0, n, nn;
1428         long                     tzero = time_second;
1429
1430         ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1431                                         /* XXX PFR_FLAG_CLSTATS disabled */
1432         if (pfr_fix_anchor(filter->pfrt_anchor))
1433                 return (EINVAL);
1434         n = nn = pfr_table_count(filter, flags);
1435         if (n < 0)
1436                 return (ENOENT);
1437         if (n > *size) {
1438                 *size = n;
1439                 return (0);
1440         }
1441         SLIST_INIT(&workq);
1442         if (flags & PFR_FLAG_ATOMIC)
1443                 s = splsoftnet();
1444         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1445                 if (pfr_skip_table(filter, p, flags))
1446                         continue;
1447                 if (n-- <= 0)
1448                         continue;
1449                 if (!(flags & PFR_FLAG_ATOMIC))
1450                         s = splsoftnet();
1451                 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1452                         if (!(flags & PFR_FLAG_ATOMIC))
1453                                 splx(s);
1454                         return (EFAULT);
1455                 }
1456                 if (!(flags & PFR_FLAG_ATOMIC))
1457                         splx(s);
1458                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1459         }
1460         if (flags & PFR_FLAG_CLSTATS)
1461                 pfr_clstats_ktables(&workq, tzero,
1462                     flags & PFR_FLAG_ADDRSTOO);
1463         if (flags & PFR_FLAG_ATOMIC)
1464                 splx(s);
1465         if (n) {
1466                 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1467                 return (ENOTTY);
1468         }
1469         *size = nn;
1470         return (0);
1471 }
1472
1473 int
1474 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1475 {
1476         struct pfr_ktableworkq   workq;
1477         struct pfr_ktable       *p, key;
1478         int                      i, s = 0, xzero = 0;
1479         long                     tzero = time_second;
1480
1481         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1482         SLIST_INIT(&workq);
1483         for (i = 0; i < size; i++) {
1484                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1485                         return (EFAULT);
1486                 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1487                         return (EINVAL);
1488                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1489                 if (p != NULL) {
1490                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1491                         xzero++;
1492                 }
1493         }
1494         if (!(flags & PFR_FLAG_DUMMY)) {
1495                 if (flags & PFR_FLAG_ATOMIC)
1496                         s = splsoftnet();
1497                 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1498                 if (flags & PFR_FLAG_ATOMIC)
1499                         splx(s);
1500         }
1501         if (nzero != NULL)
1502                 *nzero = xzero;
1503         return (0);
1504 }
1505
1506 int
1507 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1508         int *nchange, int *ndel, int flags)
1509 {
1510         struct pfr_ktableworkq   workq;
1511         struct pfr_ktable       *p, *q, key;
1512         int                      i, s = 0, xchange = 0, xdel = 0;
1513
1514         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1515         if ((setflag & ~PFR_TFLAG_USRMASK) ||
1516             (clrflag & ~PFR_TFLAG_USRMASK) ||
1517             (setflag & clrflag))
1518                 return (EINVAL);
1519         SLIST_INIT(&workq);
1520         for (i = 0; i < size; i++) {
1521                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1522                         return (EFAULT);
1523                 if (pfr_validate_table(&key.pfrkt_t, 0,
1524                     flags & PFR_FLAG_USERIOCTL))
1525                         return (EINVAL);
1526                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1527                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1528                         p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1529                             ~clrflag;
1530                         if (p->pfrkt_nflags == p->pfrkt_flags)
1531                                 goto _skip;
1532                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1533                                 if (!pfr_ktable_compare(p, q))
1534                                         goto _skip;
1535                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1536                         if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1537                             (clrflag & PFR_TFLAG_PERSIST) &&
1538                             !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1539                                 xdel++;
1540                         else
1541                                 xchange++;
1542                 }
1543 _skip:
1544         ;
1545         }
1546         if (!(flags & PFR_FLAG_DUMMY)) {
1547                 if (flags & PFR_FLAG_ATOMIC)
1548                         s = splsoftnet();
1549                 pfr_setflags_ktables(&workq);
1550                 if (flags & PFR_FLAG_ATOMIC)
1551                         splx(s);
1552         }
1553         if (nchange != NULL)
1554                 *nchange = xchange;
1555         if (ndel != NULL)
1556                 *ndel = xdel;
1557         return (0);
1558 }
1559
1560 int
1561 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1562 {
1563         struct pfr_ktableworkq   workq;
1564         struct pfr_ktable       *p;
1565         struct pf_ruleset       *rs;
1566         int                      xdel = 0;
1567
1568         ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1569         rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1570         if (rs == NULL)
1571                 return (ENOMEM);
1572         SLIST_INIT(&workq);
1573         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1574                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1575                     pfr_skip_table(trs, p, 0))
1576                         continue;
1577                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1578                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1579                 xdel++;
1580         }
1581         if (!(flags & PFR_FLAG_DUMMY)) {
1582                 pfr_setflags_ktables(&workq);
1583                 if (ticket != NULL)
1584                         *ticket = ++rs->tticket;
1585                 rs->topen = 1;
1586         } else
1587                 pf_remove_if_empty_ruleset(rs);
1588         if (ndel != NULL)
1589                 *ndel = xdel;
1590         return (0);
1591 }
1592
1593 int
1594 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1595     int *nadd, int *naddr, u_int32_t ticket, int flags)
1596 {
1597         struct pfr_ktableworkq   tableq;
1598         struct pfr_kentryworkq   addrq;
1599         struct pfr_ktable       *kt, *rt, *shadow, key;
1600         struct pfr_kentry       *p;
1601         struct pfr_addr          ad;
1602         struct pf_ruleset       *rs;
1603         int                      i, rv, xadd = 0, xaddr = 0;
1604
1605         ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1606         if (size && !(flags & PFR_FLAG_ADDRSTOO))
1607                 return (EINVAL);
1608         if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1609             flags & PFR_FLAG_USERIOCTL))
1610                 return (EINVAL);
1611         rs = pf_find_ruleset(tbl->pfrt_anchor);
1612         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1613                 return (EBUSY);
1614         tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1615         SLIST_INIT(&tableq);
1616         kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1617         if (kt == NULL) {
1618                 kt = pfr_create_ktable(tbl, 0, 1);
1619                 if (kt == NULL)
1620                         return (ENOMEM);
1621                 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1622                 xadd++;
1623                 if (!tbl->pfrt_anchor[0])
1624                         goto _skip;
1625
1626                 /* find or create root table */
1627                 bzero(&key, sizeof(key));
1628                 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1629                 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1630                 if (rt != NULL) {
1631                         kt->pfrkt_root = rt;
1632                         goto _skip;
1633                 }
1634                 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1635                 if (rt == NULL) {
1636                         pfr_destroy_ktables(&tableq, 0);
1637                         return (ENOMEM);
1638                 }
1639                 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1640                 kt->pfrkt_root = rt;
1641         } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1642                 xadd++;
1643 _skip:
1644         shadow = pfr_create_ktable(tbl, 0, 0);
1645         if (shadow == NULL) {
1646                 pfr_destroy_ktables(&tableq, 0);
1647                 return (ENOMEM);
1648         }
1649         SLIST_INIT(&addrq);
1650         for (i = 0; i < size; i++) {
1651                 if (COPYIN(addr+i, &ad, sizeof(ad)))
1652                         senderr(EFAULT);
1653                 if (pfr_validate_addr(&ad))
1654                         senderr(EINVAL);
1655                 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1656                         continue;
1657                 p = pfr_create_kentry(&ad, 0);
1658                 if (p == NULL)
1659                         senderr(ENOMEM);
1660                 if (pfr_route_kentry(shadow, p)) {
1661                         pfr_destroy_kentry(p);
1662                         continue;
1663                 }
1664                 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1665                 xaddr++;
1666         }
1667         if (!(flags & PFR_FLAG_DUMMY)) {
1668                 if (kt->pfrkt_shadow != NULL)
1669                         pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1670                 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1671                 pfr_insert_ktables(&tableq);
1672                 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1673                     xaddr : NO_ADDRESSES;
1674                 kt->pfrkt_shadow = shadow;
1675         } else {
1676                 pfr_clean_node_mask(shadow, &addrq);
1677                 pfr_destroy_ktable(shadow, 0);
1678                 pfr_destroy_ktables(&tableq, 0);
1679                 pfr_destroy_kentries(&addrq);
1680         }
1681         if (nadd != NULL)
1682                 *nadd = xadd;
1683         if (naddr != NULL)
1684                 *naddr = xaddr;
1685         return (0);
1686 _bad:
1687         pfr_destroy_ktable(shadow, 0);
1688         pfr_destroy_ktables(&tableq, 0);
1689         pfr_destroy_kentries(&addrq);
1690         return (rv);
1691 }
1692
1693 int
1694 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1695 {
1696         struct pfr_ktableworkq   workq;
1697         struct pfr_ktable       *p;
1698         struct pf_ruleset       *rs;
1699         int                      xdel = 0;
1700
1701         ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1702         rs = pf_find_ruleset(trs->pfrt_anchor);
1703         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1704                 return (0);
1705         SLIST_INIT(&workq);
1706         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1707                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1708                     pfr_skip_table(trs, p, 0))
1709                         continue;
1710                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1711                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1712                 xdel++;
1713         }
1714         if (!(flags & PFR_FLAG_DUMMY)) {
1715                 pfr_setflags_ktables(&workq);
1716                 rs->topen = 0;
1717                 pf_remove_if_empty_ruleset(rs);
1718         }
1719         if (ndel != NULL)
1720                 *ndel = xdel;
1721         return (0);
1722 }
1723
1724 int
1725 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1726     int *nchange, int flags)
1727 {
1728         struct pfr_ktable       *p, *q;
1729         struct pfr_ktableworkq   workq;
1730         struct pf_ruleset       *rs;
1731         int                      s = 0, xadd = 0, xchange = 0;
1732         long                     tzero = time_second;
1733
1734         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1735         rs = pf_find_ruleset(trs->pfrt_anchor);
1736         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1737                 return (EBUSY);
1738
1739         SLIST_INIT(&workq);
1740         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1741                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1742                     pfr_skip_table(trs, p, 0))
1743                         continue;
1744                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1745                 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1746                         xchange++;
1747                 else
1748                         xadd++;
1749         }
1750
1751         if (!(flags & PFR_FLAG_DUMMY)) {
1752                 if (flags & PFR_FLAG_ATOMIC)
1753                         s = splsoftnet();
1754                 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1755                         q = SLIST_NEXT(p, pfrkt_workq);
1756                         pfr_commit_ktable(p, tzero);
1757                 }
1758                 if (flags & PFR_FLAG_ATOMIC)
1759                         splx(s);
1760                 rs->topen = 0;
1761                 pf_remove_if_empty_ruleset(rs);
1762         }
1763         if (nadd != NULL)
1764                 *nadd = xadd;
1765         if (nchange != NULL)
1766                 *nchange = xchange;
1767
1768         return (0);
1769 }
1770
1771 void
1772 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1773 {
1774         struct pfr_ktable       *shadow = kt->pfrkt_shadow;
1775         int                      nflags;
1776
1777         if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1778                 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1779                         pfr_clstats_ktable(kt, tzero, 1);
1780         } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1781                 /* kt might contain addresses */
1782                 struct pfr_kentryworkq   addrq, addq, changeq, delq, garbageq;
1783                 struct pfr_kentry       *p, *q, *next;
1784                 struct pfr_addr          ad;
1785
1786                 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1787                 pfr_mark_addrs(kt);
1788                 SLIST_INIT(&addq);
1789                 SLIST_INIT(&changeq);
1790                 SLIST_INIT(&delq);
1791                 SLIST_INIT(&garbageq);
1792                 pfr_clean_node_mask(shadow, &addrq);
1793                 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1794                         next = SLIST_NEXT(p, pfrke_workq);      /* XXX */
1795                         pfr_copyout_addr(&ad, p);
1796                         q = pfr_lookup_addr(kt, &ad, 1);
1797                         if (q != NULL) {
1798                                 if (q->pfrke_not != p->pfrke_not)
1799                                         SLIST_INSERT_HEAD(&changeq, q,
1800                                             pfrke_workq);
1801                                 q->pfrke_mark = 1;
1802                                 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1803                         } else {
1804                                 p->pfrke_tzero = tzero;
1805                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1806                         }
1807                 }
1808                 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1809                 pfr_insert_kentries(kt, &addq, tzero);
1810                 pfr_remove_kentries(kt, &delq);
1811                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1812                 pfr_destroy_kentries(&garbageq);
1813         } else {
1814                 /* kt cannot contain addresses */
1815                 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1816                     shadow->pfrkt_ip4);
1817                 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1818                     shadow->pfrkt_ip6);
1819                 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1820                 pfr_clstats_ktable(kt, tzero, 1);
1821         }
1822         nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1823             (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1824                 & ~PFR_TFLAG_INACTIVE;
1825         pfr_destroy_ktable(shadow, 0);
1826         kt->pfrkt_shadow = NULL;
1827         pfr_setflags_ktable(kt, nflags);
1828 }
1829
1830 int
1831 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1832 {
1833         int i;
1834
1835         if (!tbl->pfrt_name[0])
1836                 return (-1);
1837         if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1838                  return (-1);
1839         if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1840                 return (-1);
1841         for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1842                 if (tbl->pfrt_name[i])
1843                         return (-1);
1844         if (pfr_fix_anchor(tbl->pfrt_anchor))
1845                 return (-1);
1846         if (tbl->pfrt_flags & ~allowedflags)
1847                 return (-1);
1848         return (0);
1849 }
1850
1851 /*
1852  * Rewrite anchors referenced by tables to remove slashes
1853  * and check for validity.
1854  */
1855 int
1856 pfr_fix_anchor(char *anchor)
1857 {
1858         size_t siz = MAXPATHLEN;
1859         int i;
1860
1861         if (anchor[0] == '/') {
1862                 char *path;
1863                 int off;
1864
1865                 path = anchor;
1866                 off = 1;
1867                 while (*++path == '/')
1868                         off++;
1869                 bcopy(path, anchor, siz - off);
1870                 memset(anchor + siz - off, 0, off);
1871         }
1872         if (anchor[siz - 1])
1873                 return (-1);
1874         for (i = strlen(anchor); i < siz; i++)
1875                 if (anchor[i])
1876                         return (-1);
1877         return (0);
1878 }
1879
1880 int
1881 pfr_table_count(struct pfr_table *filter, int flags)
1882 {
1883         struct pf_ruleset *rs;
1884
1885         if (flags & PFR_FLAG_ALLRSETS)
1886                 return (pfr_ktable_cnt);
1887         if (filter->pfrt_anchor[0]) {
1888                 rs = pf_find_ruleset(filter->pfrt_anchor);
1889                 return ((rs != NULL) ? rs->tables : -1);
1890         }
1891         return (pf_main_ruleset.tables);
1892 }
1893
1894 int
1895 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1896 {
1897         if (flags & PFR_FLAG_ALLRSETS)
1898                 return (0);
1899         if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1900                 return (1);
1901         return (0);
1902 }
1903
1904 void
1905 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1906 {
1907         struct pfr_ktable       *p;
1908
1909         SLIST_FOREACH(p, workq, pfrkt_workq)
1910                 pfr_insert_ktable(p);
1911 }
1912
1913 void
1914 pfr_insert_ktable(struct pfr_ktable *kt)
1915 {
1916         RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1917         pfr_ktable_cnt++;
1918         if (kt->pfrkt_root != NULL)
1919                 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1920                         pfr_setflags_ktable(kt->pfrkt_root,
1921                             kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1922 }
1923
1924 void
1925 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1926 {
1927         struct pfr_ktable       *p, *q;
1928
1929         for (p = SLIST_FIRST(workq); p; p = q) {
1930                 q = SLIST_NEXT(p, pfrkt_workq);
1931                 pfr_setflags_ktable(p, p->pfrkt_nflags);
1932         }
1933 }
1934
1935 void
1936 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1937 {
1938         struct pfr_kentryworkq  addrq;
1939
1940         if (!(newf & PFR_TFLAG_REFERENCED) &&
1941             !(newf & PFR_TFLAG_PERSIST))
1942                 newf &= ~PFR_TFLAG_ACTIVE;
1943         if (!(newf & PFR_TFLAG_ACTIVE))
1944                 newf &= ~PFR_TFLAG_USRMASK;
1945         if (!(newf & PFR_TFLAG_SETMASK)) {
1946                 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1947                 if (kt->pfrkt_root != NULL)
1948                         if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1949                                 pfr_setflags_ktable(kt->pfrkt_root,
1950                                     kt->pfrkt_root->pfrkt_flags &
1951                                         ~PFR_TFLAG_REFDANCHOR);
1952                 pfr_destroy_ktable(kt, 1);
1953                 pfr_ktable_cnt--;
1954                 return;
1955         }
1956         if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1957                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1958                 pfr_remove_kentries(kt, &addrq);
1959         }
1960         if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1961                 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1962                 kt->pfrkt_shadow = NULL;
1963         }
1964         kt->pfrkt_flags = newf;
1965 }
1966
1967 void
1968 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1969 {
1970         struct pfr_ktable       *p;
1971
1972         SLIST_FOREACH(p, workq, pfrkt_workq)
1973                 pfr_clstats_ktable(p, tzero, recurse);
1974 }
1975
1976 void
1977 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1978 {
1979         struct pfr_kentryworkq   addrq;
1980         int                      s;
1981
1982         if (recurse) {
1983                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1984                 pfr_clstats_kentries(&addrq, tzero, 0);
1985         }
1986         s = splsoftnet();
1987         bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1988         bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1989         kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1990         splx(s);
1991         kt->pfrkt_tzero = tzero;
1992 }
1993
1994 struct pfr_ktable *
1995 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1996 {
1997         struct pfr_ktable       *kt;
1998         struct pf_ruleset       *rs;
1999
2000         kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
2001         if (kt == NULL)
2002                 return (NULL);
2003         bzero(kt, sizeof(*kt));
2004         kt->pfrkt_t = *tbl;
2005
2006         if (attachruleset) {
2007                 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
2008                 if (!rs) {
2009                         pfr_destroy_ktable(kt, 0);
2010                         return (NULL);
2011                 }
2012                 kt->pfrkt_rs = rs;
2013                 rs->tables++;
2014         }
2015
2016         if (!rn_inithead((void **)&kt->pfrkt_ip4,
2017             offsetof(struct sockaddr_in, sin_addr) * 8) ||
2018             !rn_inithead((void **)&kt->pfrkt_ip6,
2019             offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
2020                 pfr_destroy_ktable(kt, 0);
2021                 return (NULL);
2022         }
2023         kt->pfrkt_tzero = tzero;
2024
2025         return (kt);
2026 }
2027
2028 void
2029 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
2030 {
2031         struct pfr_ktable       *p, *q;
2032
2033         for (p = SLIST_FIRST(workq); p; p = q) {
2034                 q = SLIST_NEXT(p, pfrkt_workq);
2035                 pfr_destroy_ktable(p, flushaddr);
2036         }
2037 }
2038
2039 void
2040 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2041 {
2042         struct pfr_kentryworkq   addrq;
2043
2044         if (flushaddr) {
2045                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2046                 pfr_clean_node_mask(kt, &addrq);
2047                 pfr_destroy_kentries(&addrq);
2048         }
2049 #if defined(__FreeBSD__) && (__FreeBSD_version >= 500100)
2050         if (kt->pfrkt_ip4 != NULL) {
2051                 RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip4);
2052                 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
2053         }
2054         if (kt->pfrkt_ip6 != NULL) {
2055                 RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip6);
2056                 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
2057         }
2058 #else
2059         if (kt->pfrkt_ip4 != NULL)
2060                 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
2061         if (kt->pfrkt_ip6 != NULL)
2062                 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
2063 #endif
2064         if (kt->pfrkt_shadow != NULL)
2065                 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2066         if (kt->pfrkt_rs != NULL) {
2067                 kt->pfrkt_rs->tables--;
2068                 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
2069         }
2070         pool_put(&pfr_ktable_pl, kt);
2071 }
2072
2073 int
2074 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2075 {
2076         int d;
2077
2078         if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2079                 return (d);
2080         return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2081 }
2082
2083 struct pfr_ktable *
2084 pfr_lookup_table(struct pfr_table *tbl)
2085 {
2086         /* struct pfr_ktable start like a struct pfr_table */
2087         return (RB_FIND(pfr_ktablehead, &pfr_ktables,
2088             (struct pfr_ktable *)tbl));
2089 }
2090
2091 int
2092 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2093 {
2094         struct pfr_kentry       *ke = NULL;
2095         int                      match;
2096
2097         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2098                 kt = kt->pfrkt_root;
2099         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2100                 return (0);
2101
2102         switch (af) {
2103 #ifdef INET
2104         case AF_INET:
2105                 pfr_sin.sin_addr.s_addr = a->addr32[0];
2106                 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2107                 if (ke && KENTRY_RNF_ROOT(ke))
2108                         ke = NULL;
2109                 break;
2110 #endif /* INET */
2111 #ifdef INET6
2112         case AF_INET6:
2113                 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2114                 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2115                 if (ke && KENTRY_RNF_ROOT(ke))
2116                         ke = NULL;
2117                 break;
2118 #endif /* INET6 */
2119         }
2120         match = (ke && !ke->pfrke_not);
2121         if (match)
2122                 kt->pfrkt_match++;
2123         else
2124                 kt->pfrkt_nomatch++;
2125         return (match);
2126 }
2127
2128 void
2129 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2130     u_int64_t len, int dir_out, int op_pass, int notrule)
2131 {
2132         struct pfr_kentry       *ke = NULL;
2133
2134         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2135                 kt = kt->pfrkt_root;
2136         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2137                 return;
2138
2139         switch (af) {
2140 #ifdef INET
2141         case AF_INET:
2142                 pfr_sin.sin_addr.s_addr = a->addr32[0];
2143                 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2144                 if (ke && KENTRY_RNF_ROOT(ke))
2145                         ke = NULL;
2146                 break;
2147 #endif /* INET */
2148 #ifdef INET6
2149         case AF_INET6:
2150                 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2151                 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2152                 if (ke && KENTRY_RNF_ROOT(ke))
2153                         ke = NULL;
2154                 break;
2155 #endif /* INET6 */
2156         default:
2157                 ;
2158         }
2159         if ((ke == NULL || ke->pfrke_not) != notrule) {
2160                 if (op_pass != PFR_OP_PASS)
2161                         printf("pfr_update_stats: assertion failed.\n");
2162                 op_pass = PFR_OP_XPASS;
2163         }
2164         kt->pfrkt_packets[dir_out][op_pass]++;
2165         kt->pfrkt_bytes[dir_out][op_pass] += len;
2166         if (ke != NULL && op_pass != PFR_OP_XPASS) {
2167                 ke->pfrke_packets[dir_out][op_pass]++;
2168                 ke->pfrke_bytes[dir_out][op_pass] += len;
2169         }
2170 }
2171
2172 struct pfr_ktable *
2173 pfr_attach_table(struct pf_ruleset *rs, char *name)
2174 {
2175         struct pfr_ktable       *kt, *rt;
2176         struct pfr_table         tbl;
2177         struct pf_anchor        *ac = rs->anchor;
2178
2179         bzero(&tbl, sizeof(tbl));
2180         strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2181         if (ac != NULL)
2182                 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2183         kt = pfr_lookup_table(&tbl);
2184         if (kt == NULL) {
2185                 kt = pfr_create_ktable(&tbl, time_second, 1);
2186                 if (kt == NULL)
2187                         return (NULL);
2188                 if (ac != NULL) {
2189                         bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2190                         rt = pfr_lookup_table(&tbl);
2191                         if (rt == NULL) {
2192                                 rt = pfr_create_ktable(&tbl, 0, 1);
2193                                 if (rt == NULL) {
2194                                         pfr_destroy_ktable(kt, 0);
2195                                         return (NULL);
2196                                 }
2197                                 pfr_insert_ktable(rt);
2198                         }
2199                         kt->pfrkt_root = rt;
2200                 }
2201                 pfr_insert_ktable(kt);
2202         }
2203         if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2204                 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2205         return (kt);
2206 }
2207
2208 void
2209 pfr_detach_table(struct pfr_ktable *kt)
2210 {
2211         if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2212                 printf("pfr_detach_table: refcount = %d.\n",
2213                     kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2214         else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2215                 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2216 }
2217
2218
2219 int
2220 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2221     struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2222 {
2223         struct pfr_kentry       *ke, *ke2 = NULL;
2224         struct pf_addr          *addr = NULL;
2225         union sockaddr_union     mask;
2226         int                      idx = -1, use_counter = 0;
2227
2228         if (af == AF_INET)
2229                 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2230         else if (af == AF_INET6)
2231                 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2232         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2233                 kt = kt->pfrkt_root;
2234         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2235                 return (-1);
2236
2237         if (pidx != NULL)
2238                 idx = *pidx;
2239         if (counter != NULL && idx >= 0)
2240                 use_counter = 1;
2241         if (idx < 0)
2242                 idx = 0;
2243
2244 _next_block:
2245         ke = pfr_kentry_byidx(kt, idx, af);
2246         if (ke == NULL)
2247                 return (1);
2248         pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2249         *raddr = SUNION2PF(&ke->pfrke_sa, af);
2250         *rmask = SUNION2PF(&pfr_mask, af);
2251
2252         if (use_counter) {
2253                 /* is supplied address within block? */
2254                 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2255                         /* no, go to next block in table */
2256                         idx++;
2257                         use_counter = 0;
2258                         goto _next_block;
2259                 }
2260                 PF_ACPY(addr, counter, af);
2261         } else {
2262                 /* use first address of block */
2263                 PF_ACPY(addr, *raddr, af);
2264         }
2265
2266         if (!KENTRY_NETWORK(ke)) {
2267                 /* this is a single IP address - no possible nested block */
2268                 PF_ACPY(counter, addr, af);
2269                 *pidx = idx;
2270                 return (0);
2271         }
2272         for (;;) {
2273                 /* we don't want to use a nested block */
2274                 if (af == AF_INET)
2275                         ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2276                             kt->pfrkt_ip4);
2277                 else if (af == AF_INET6)
2278                         ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2279                             kt->pfrkt_ip6);
2280                 /* no need to check KENTRY_RNF_ROOT() here */
2281                 if (ke2 == ke) {
2282                         /* lookup return the same block - perfect */
2283                         PF_ACPY(counter, addr, af);
2284                         *pidx = idx;
2285                         return (0);
2286                 }
2287
2288                 /* we need to increase the counter past the nested block */
2289                 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2290                 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2291                 PF_AINC(addr, af);
2292                 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2293                         /* ok, we reached the end of our main block */
2294                         /* go to next block in table */
2295                         idx++;
2296                         use_counter = 0;
2297                         goto _next_block;
2298                 }
2299         }
2300 }
2301
2302 struct pfr_kentry *
2303 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2304 {
2305         struct pfr_walktree     w;
2306
2307         bzero(&w, sizeof(w));
2308         w.pfrw_op = PFRW_POOL_GET;
2309         w.pfrw_cnt = idx;
2310
2311         switch (af) {
2312 #ifdef INET
2313         case AF_INET:
2314 #ifdef __FreeBSD__
2315                 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2316 #else
2317                 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2318 #endif
2319                 return (w.pfrw_kentry);
2320 #endif /* INET */
2321 #ifdef INET6
2322         case AF_INET6:
2323 #ifdef __FreeBSD__
2324                 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2325 #else
2326                 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2327 #endif
2328                 return (w.pfrw_kentry);
2329 #endif /* INET6 */
2330         default:
2331                 return (NULL);
2332         }
2333 }
2334
2335 void
2336 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2337 {
2338         struct pfr_walktree     w;
2339         int                     s;
2340
2341         bzero(&w, sizeof(w));
2342         w.pfrw_op = PFRW_DYNADDR_UPDATE;
2343         w.pfrw_dyn = dyn;
2344
2345         s = splsoftnet();
2346         dyn->pfid_acnt4 = 0;
2347         dyn->pfid_acnt6 = 0;
2348         if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2349 #ifdef __FreeBSD__
2350                 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2351 #else
2352                 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2353 #endif
2354         if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2355 #ifdef __FreeBSD__
2356                 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2357 #else
2358                 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2359 #endif
2360         splx(s);
2361 }