]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - sys/contrib/pf/net/pf_table.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / sys / contrib / pf / net / pf_table.c
1 /*      $OpenBSD: pf_table.c,v 1.68 2006/05/02 10:08:45 dhartmei Exp $  */
2
3 /*
4  * Copyright (c) 2002 Cedric Berger
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    - Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *    - Redistributions in binary form must reproduce the above
14  *      copyright notice, this list of conditions and the following
15  *      disclaimer in the documentation and/or other materials provided
16  *      with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  */
32
33 #ifdef __FreeBSD__
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/socket.h>
44 #include <sys/mbuf.h>
45 #include <sys/kernel.h>
46 #ifdef __FreeBSD__
47 #include <sys/malloc.h>
48 #endif
49
50 #include <net/if.h>
51 #include <net/route.h>
52 #include <netinet/in.h>
53 #ifndef __FreeBSD__
54 #include <netinet/ip_ipsp.h>
55 #endif
56
57 #include <net/pfvar.h>
58
59 #define ACCEPT_FLAGS(oklist)                    \
60         do {                                    \
61                 if ((flags & ~(oklist)) &       \
62                     PFR_FLAG_ALLMASK)           \
63                         return (EINVAL);        \
64         } while (0)
65
66 #ifdef __FreeBSD__
67 static inline int
68 _copyin(const void *uaddr, void *kaddr, size_t len)
69 {
70         int r;
71
72         PF_UNLOCK();
73         r = copyin(uaddr, kaddr, len);
74         PF_LOCK();
75
76         return (r);
77 }
78
79 static inline int
80 _copyout(const void *uaddr, void *kaddr, size_t len)
81 {
82         int r;
83
84         PF_UNLOCK();
85         r = copyout(uaddr, kaddr, len);
86         PF_LOCK();
87
88         return (r);
89 }
90
91 #define COPYIN(from, to, size)                  \
92         ((flags & PFR_FLAG_USERIOCTL) ?         \
93         _copyin((from), (to), (size)) :         \
94         (bcopy((from), (to), (size)), 0))
95
96 #define COPYOUT(from, to, size)                 \
97         ((flags & PFR_FLAG_USERIOCTL) ?         \
98         _copyout((from), (to), (size)) :        \
99         (bcopy((from), (to), (size)), 0))
100
101 #else
102
103 #define COPYIN(from, to, size)                  \
104         ((flags & PFR_FLAG_USERIOCTL) ?         \
105         copyin((from), (to), (size)) :          \
106         (bcopy((from), (to), (size)), 0))
107
108 #define COPYOUT(from, to, size)                 \
109         ((flags & PFR_FLAG_USERIOCTL) ?         \
110         copyout((from), (to), (size)) :         \
111         (bcopy((from), (to), (size)), 0))
112
113 #endif
114
115 #define FILLIN_SIN(sin, addr)                   \
116         do {                                    \
117                 (sin).sin_len = sizeof(sin);    \
118                 (sin).sin_family = AF_INET;     \
119                 (sin).sin_addr = (addr);        \
120         } while (0)
121
122 #define FILLIN_SIN6(sin6, addr)                 \
123         do {                                    \
124                 (sin6).sin6_len = sizeof(sin6); \
125                 (sin6).sin6_family = AF_INET6;  \
126                 (sin6).sin6_addr = (addr);      \
127         } while (0)
128
129 #define SWAP(type, a1, a2)                      \
130         do {                                    \
131                 type tmp = a1;                  \
132                 a1 = a2;                        \
133                 a2 = tmp;                       \
134         } while (0)
135
136 #define SUNION2PF(su, af) (((af)==AF_INET) ?    \
137     (struct pf_addr *)&(su)->sin.sin_addr :     \
138     (struct pf_addr *)&(su)->sin6.sin6_addr)
139
140 #define AF_BITS(af)             (((af)==AF_INET)?32:128)
141 #define ADDR_NETWORK(ad)        ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
142 #define KENTRY_NETWORK(ke)      ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
143 #define KENTRY_RNF_ROOT(ke) \
144                 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
145
146 #define NO_ADDRESSES            (-1)
147 #define ENQUEUE_UNMARKED_ONLY   (1)
148 #define INVERT_NEG_FLAG         (1)
149
150 struct pfr_walktree {
151         enum pfrw_op {
152                 PFRW_MARK,
153                 PFRW_SWEEP,
154                 PFRW_ENQUEUE,
155                 PFRW_GET_ADDRS,
156                 PFRW_GET_ASTATS,
157                 PFRW_POOL_GET,
158                 PFRW_DYNADDR_UPDATE
159         }        pfrw_op;
160         union {
161                 struct pfr_addr         *pfrw1_addr;
162                 struct pfr_astats       *pfrw1_astats;
163                 struct pfr_kentryworkq  *pfrw1_workq;
164                 struct pfr_kentry       *pfrw1_kentry;
165                 struct pfi_dynaddr      *pfrw1_dyn;
166         }        pfrw_1;
167         int      pfrw_free;
168         int      pfrw_flags;
169 };
170 #define pfrw_addr       pfrw_1.pfrw1_addr
171 #define pfrw_astats     pfrw_1.pfrw1_astats
172 #define pfrw_workq      pfrw_1.pfrw1_workq
173 #define pfrw_kentry     pfrw_1.pfrw1_kentry
174 #define pfrw_dyn        pfrw_1.pfrw1_dyn
175 #define pfrw_cnt        pfrw_free
176
177 #define senderr(e)      do { rv = (e); goto _bad; } while (0)
178
179 #ifdef __FreeBSD__
180 uma_zone_t               pfr_ktable_pl;
181 uma_zone_t               pfr_kentry_pl;
182 uma_zone_t               pfr_kentry_pl2;
183 #else
184 struct pool              pfr_ktable_pl;
185 struct pool              pfr_kentry_pl;
186 struct pool              pfr_kentry_pl2;
187 #endif
188 struct sockaddr_in       pfr_sin;
189 struct sockaddr_in6      pfr_sin6;
190 union sockaddr_union     pfr_mask;
191 struct pf_addr           pfr_ffaddr;
192
193 void                     pfr_copyout_addr(struct pfr_addr *,
194                             struct pfr_kentry *ke);
195 int                      pfr_validate_addr(struct pfr_addr *);
196 void                     pfr_enqueue_addrs(struct pfr_ktable *,
197                             struct pfr_kentryworkq *, int *, int);
198 void                     pfr_mark_addrs(struct pfr_ktable *);
199 struct pfr_kentry       *pfr_lookup_addr(struct pfr_ktable *,
200                             struct pfr_addr *, int);
201 struct pfr_kentry       *pfr_create_kentry(struct pfr_addr *, int);
202 void                     pfr_destroy_kentries(struct pfr_kentryworkq *);
203 void                     pfr_destroy_kentry(struct pfr_kentry *);
204 void                     pfr_insert_kentries(struct pfr_ktable *,
205                             struct pfr_kentryworkq *, long);
206 void                     pfr_remove_kentries(struct pfr_ktable *,
207                             struct pfr_kentryworkq *);
208 void                     pfr_clstats_kentries(struct pfr_kentryworkq *, long,
209                             int);
210 void                     pfr_reset_feedback(struct pfr_addr *, int, int);
211 void                     pfr_prepare_network(union sockaddr_union *, int, int);
212 int                      pfr_route_kentry(struct pfr_ktable *,
213                             struct pfr_kentry *);
214 int                      pfr_unroute_kentry(struct pfr_ktable *,
215                             struct pfr_kentry *);
216 int                      pfr_walktree(struct radix_node *, void *);
217 int                      pfr_validate_table(struct pfr_table *, int, int);
218 int                      pfr_fix_anchor(char *);
219 void                     pfr_commit_ktable(struct pfr_ktable *, long);
220 void                     pfr_insert_ktables(struct pfr_ktableworkq *);
221 void                     pfr_insert_ktable(struct pfr_ktable *);
222 void                     pfr_setflags_ktables(struct pfr_ktableworkq *);
223 void                     pfr_setflags_ktable(struct pfr_ktable *, int);
224 void                     pfr_clstats_ktables(struct pfr_ktableworkq *, long,
225                             int);
226 void                     pfr_clstats_ktable(struct pfr_ktable *, long, int);
227 struct pfr_ktable       *pfr_create_ktable(struct pfr_table *, long, int);
228 void                     pfr_destroy_ktables(struct pfr_ktableworkq *, int);
229 void                     pfr_destroy_ktable(struct pfr_ktable *, int);
230 int                      pfr_ktable_compare(struct pfr_ktable *,
231                             struct pfr_ktable *);
232 struct pfr_ktable       *pfr_lookup_table(struct pfr_table *);
233 void                     pfr_clean_node_mask(struct pfr_ktable *,
234                             struct pfr_kentryworkq *);
235 int                      pfr_table_count(struct pfr_table *, int);
236 int                      pfr_skip_table(struct pfr_table *,
237                             struct pfr_ktable *, int);
238 struct pfr_kentry       *pfr_kentry_byidx(struct pfr_ktable *, int, int);
239
240 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
241 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
242
243 struct pfr_ktablehead    pfr_ktables;
244 struct pfr_table         pfr_nulltable;
245 int                      pfr_ktable_cnt;
246
247 void
248 pfr_initialize(void)
249 {
250 #ifndef __FreeBSD__
251         pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
252             "pfrktable", &pool_allocator_oldnointr);
253         pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
254             "pfrkentry", &pool_allocator_oldnointr);
255         pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
256             "pfrkentry2", NULL);
257 #endif
258
259         pfr_sin.sin_len = sizeof(pfr_sin);
260         pfr_sin.sin_family = AF_INET;
261         pfr_sin6.sin6_len = sizeof(pfr_sin6);
262         pfr_sin6.sin6_family = AF_INET6;
263
264         memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
265 }
266
267 int
268 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
269 {
270         struct pfr_ktable       *kt;
271         struct pfr_kentryworkq   workq;
272         int                      s;
273
274         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
275         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
276                 return (EINVAL);
277         kt = pfr_lookup_table(tbl);
278         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
279                 return (ESRCH);
280         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
281                 return (EPERM);
282         pfr_enqueue_addrs(kt, &workq, ndel, 0);
283
284         if (!(flags & PFR_FLAG_DUMMY)) {
285                 s = 0;
286                 if (flags & PFR_FLAG_ATOMIC)
287                         s = splsoftnet();
288                 pfr_remove_kentries(kt, &workq);
289                 if (flags & PFR_FLAG_ATOMIC)
290                         splx(s);
291                 if (kt->pfrkt_cnt) {
292                         printf("pfr_clr_addrs: corruption detected (%d).\n",
293                             kt->pfrkt_cnt);
294                         kt->pfrkt_cnt = 0;
295                 }
296         }
297         return (0);
298 }
299
300 int
301 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
302     int *nadd, int flags)
303 {
304         struct pfr_ktable       *kt, *tmpkt;
305         struct pfr_kentryworkq   workq;
306         struct pfr_kentry       *p, *q;
307         struct pfr_addr          ad;
308         int                      i, rv, s = 0, xadd = 0;
309         long                     tzero = time_second;
310
311         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
312         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
313                 return (EINVAL);
314         kt = pfr_lookup_table(tbl);
315         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
316                 return (ESRCH);
317         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
318                 return (EPERM);
319         tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
320         if (tmpkt == NULL)
321                 return (ENOMEM);
322         SLIST_INIT(&workq);
323         for (i = 0; i < size; i++) {
324                 if (COPYIN(addr+i, &ad, sizeof(ad)))
325                         senderr(EFAULT);
326                 if (pfr_validate_addr(&ad))
327                         senderr(EINVAL);
328                 p = pfr_lookup_addr(kt, &ad, 1);
329                 q = pfr_lookup_addr(tmpkt, &ad, 1);
330                 if (flags & PFR_FLAG_FEEDBACK) {
331                         if (q != NULL)
332                                 ad.pfra_fback = PFR_FB_DUPLICATE;
333                         else if (p == NULL)
334                                 ad.pfra_fback = PFR_FB_ADDED;
335                         else if (p->pfrke_not != ad.pfra_not)
336                                 ad.pfra_fback = PFR_FB_CONFLICT;
337                         else
338                                 ad.pfra_fback = PFR_FB_NONE;
339                 }
340                 if (p == NULL && q == NULL) {
341                         p = pfr_create_kentry(&ad, 0);
342                         if (p == NULL)
343                                 senderr(ENOMEM);
344                         if (pfr_route_kentry(tmpkt, p)) {
345                                 pfr_destroy_kentry(p);
346                                 ad.pfra_fback = PFR_FB_NONE;
347                         } else {
348                                 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
349                                 xadd++;
350                         }
351                 }
352                 if (flags & PFR_FLAG_FEEDBACK) {
353                         if (COPYOUT(&ad, addr+i, sizeof(ad)))
354                                 senderr(EFAULT);
355                 }
356         }
357         pfr_clean_node_mask(tmpkt, &workq);
358         if (!(flags & PFR_FLAG_DUMMY)) {
359                 if (flags & PFR_FLAG_ATOMIC)
360                         s = splsoftnet();
361                 pfr_insert_kentries(kt, &workq, tzero);
362                 if (flags & PFR_FLAG_ATOMIC)
363                         splx(s);
364         } else
365                 pfr_destroy_kentries(&workq);
366         if (nadd != NULL)
367                 *nadd = xadd;
368         pfr_destroy_ktable(tmpkt, 0);
369         return (0);
370 _bad:
371         pfr_clean_node_mask(tmpkt, &workq);
372         pfr_destroy_kentries(&workq);
373         if (flags & PFR_FLAG_FEEDBACK)
374                 pfr_reset_feedback(addr, size, flags);
375         pfr_destroy_ktable(tmpkt, 0);
376         return (rv);
377 }
378
379 int
380 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
381     int *ndel, int flags)
382 {
383         struct pfr_ktable       *kt;
384         struct pfr_kentryworkq   workq;
385         struct pfr_kentry       *p;
386         struct pfr_addr          ad;
387         int                      i, rv, s = 0, xdel = 0, log = 1;
388
389         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
390         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
391                 return (EINVAL);
392         kt = pfr_lookup_table(tbl);
393         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
394                 return (ESRCH);
395         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
396                 return (EPERM);
397         /*
398          * there are two algorithms to choose from here.
399          * with:
400          *   n: number of addresses to delete
401          *   N: number of addresses in the table
402          *
403          * one is O(N) and is better for large 'n'
404          * one is O(n*LOG(N)) and is better for small 'n'
405          * 
406          * following code try to decide which one is best.
407          */
408         for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
409                 log++;
410         if (size > kt->pfrkt_cnt/log) {
411                 /* full table scan */
412                 pfr_mark_addrs(kt);
413         } else {
414                 /* iterate over addresses to delete */
415                 for (i = 0; i < size; i++) {
416                         if (COPYIN(addr+i, &ad, sizeof(ad)))
417                                 return (EFAULT);
418                         if (pfr_validate_addr(&ad))
419                                 return (EINVAL);
420                         p = pfr_lookup_addr(kt, &ad, 1);
421                         if (p != NULL)
422                                 p->pfrke_mark = 0;
423                 }
424         }
425         SLIST_INIT(&workq);
426         for (i = 0; i < size; i++) {
427                 if (COPYIN(addr+i, &ad, sizeof(ad)))
428                         senderr(EFAULT);
429                 if (pfr_validate_addr(&ad))
430                         senderr(EINVAL);
431                 p = pfr_lookup_addr(kt, &ad, 1);
432                 if (flags & PFR_FLAG_FEEDBACK) {
433                         if (p == NULL)
434                                 ad.pfra_fback = PFR_FB_NONE;
435                         else if (p->pfrke_not != ad.pfra_not)
436                                 ad.pfra_fback = PFR_FB_CONFLICT;
437                         else if (p->pfrke_mark)
438                                 ad.pfra_fback = PFR_FB_DUPLICATE;
439                         else
440                                 ad.pfra_fback = PFR_FB_DELETED;
441                 }
442                 if (p != NULL && p->pfrke_not == ad.pfra_not &&
443                     !p->pfrke_mark) {
444                         p->pfrke_mark = 1;
445                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
446                         xdel++;
447                 }
448                 if (flags & PFR_FLAG_FEEDBACK)
449                         if (COPYOUT(&ad, addr+i, sizeof(ad)))
450                                 senderr(EFAULT);
451         }
452         if (!(flags & PFR_FLAG_DUMMY)) {
453                 if (flags & PFR_FLAG_ATOMIC)
454                         s = splsoftnet();
455                 pfr_remove_kentries(kt, &workq);
456                 if (flags & PFR_FLAG_ATOMIC)
457                         splx(s);
458         }
459         if (ndel != NULL)
460                 *ndel = xdel;
461         return (0);
462 _bad:
463         if (flags & PFR_FLAG_FEEDBACK)
464                 pfr_reset_feedback(addr, size, flags);
465         return (rv);
466 }
467
468 int
469 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
470     int *size2, int *nadd, int *ndel, int *nchange, int flags,
471     u_int32_t ignore_pfrt_flags)
472 {
473         struct pfr_ktable       *kt, *tmpkt;
474         struct pfr_kentryworkq   addq, delq, changeq;
475         struct pfr_kentry       *p, *q;
476         struct pfr_addr          ad;
477         int                      i, rv, s = 0, xadd = 0, xdel = 0, xchange = 0;
478         long                     tzero = time_second;
479
480         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
481         if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
482             PFR_FLAG_USERIOCTL))
483                 return (EINVAL);
484         kt = pfr_lookup_table(tbl);
485         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
486                 return (ESRCH);
487         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
488                 return (EPERM);
489         tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
490         if (tmpkt == NULL)
491                 return (ENOMEM);
492         pfr_mark_addrs(kt);
493         SLIST_INIT(&addq);
494         SLIST_INIT(&delq);
495         SLIST_INIT(&changeq);
496         for (i = 0; i < size; i++) {
497                 if (COPYIN(addr+i, &ad, sizeof(ad)))
498                         senderr(EFAULT);
499                 if (pfr_validate_addr(&ad))
500                         senderr(EINVAL);
501                 ad.pfra_fback = PFR_FB_NONE;
502                 p = pfr_lookup_addr(kt, &ad, 1);
503                 if (p != NULL) {
504                         if (p->pfrke_mark) {
505                                 ad.pfra_fback = PFR_FB_DUPLICATE;
506                                 goto _skip;
507                         }
508                         p->pfrke_mark = 1;
509                         if (p->pfrke_not != ad.pfra_not) {
510                                 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
511                                 ad.pfra_fback = PFR_FB_CHANGED;
512                                 xchange++;
513                         }
514                 } else {
515                         q = pfr_lookup_addr(tmpkt, &ad, 1);
516                         if (q != NULL) {
517                                 ad.pfra_fback = PFR_FB_DUPLICATE;
518                                 goto _skip;
519                         }
520                         p = pfr_create_kentry(&ad, 0);
521                         if (p == NULL)
522                                 senderr(ENOMEM);
523                         if (pfr_route_kentry(tmpkt, p)) {
524                                 pfr_destroy_kentry(p);
525                                 ad.pfra_fback = PFR_FB_NONE;
526                         } else {
527                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
528                                 ad.pfra_fback = PFR_FB_ADDED;
529                                 xadd++;
530                         }
531                 }
532 _skip:
533                 if (flags & PFR_FLAG_FEEDBACK)
534                         if (COPYOUT(&ad, addr+i, sizeof(ad)))
535                                 senderr(EFAULT);
536         }
537         pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
538         if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
539                 if (*size2 < size+xdel) {
540                         *size2 = size+xdel;
541                         senderr(0);
542                 }
543                 i = 0;
544                 SLIST_FOREACH(p, &delq, pfrke_workq) {
545                         pfr_copyout_addr(&ad, p);
546                         ad.pfra_fback = PFR_FB_DELETED;
547                         if (COPYOUT(&ad, addr+size+i, sizeof(ad)))
548                                 senderr(EFAULT);
549                         i++;
550                 }
551         }
552         pfr_clean_node_mask(tmpkt, &addq);
553         if (!(flags & PFR_FLAG_DUMMY)) {
554                 if (flags & PFR_FLAG_ATOMIC)
555                         s = splsoftnet();
556                 pfr_insert_kentries(kt, &addq, tzero);
557                 pfr_remove_kentries(kt, &delq);
558                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
559                 if (flags & PFR_FLAG_ATOMIC)
560                         splx(s);
561         } else
562                 pfr_destroy_kentries(&addq);
563         if (nadd != NULL)
564                 *nadd = xadd;
565         if (ndel != NULL)
566                 *ndel = xdel;
567         if (nchange != NULL)
568                 *nchange = xchange;
569         if ((flags & PFR_FLAG_FEEDBACK) && size2)
570                 *size2 = size+xdel;
571         pfr_destroy_ktable(tmpkt, 0);
572         return (0);
573 _bad:
574         pfr_clean_node_mask(tmpkt, &addq);
575         pfr_destroy_kentries(&addq);
576         if (flags & PFR_FLAG_FEEDBACK)
577                 pfr_reset_feedback(addr, size, flags);
578         pfr_destroy_ktable(tmpkt, 0);
579         return (rv);
580 }
581
582 int
583 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
584         int *nmatch, int flags)
585 {
586         struct pfr_ktable       *kt;
587         struct pfr_kentry       *p;
588         struct pfr_addr          ad;
589         int                      i, xmatch = 0;
590
591         ACCEPT_FLAGS(PFR_FLAG_REPLACE);
592         if (pfr_validate_table(tbl, 0, 0))
593                 return (EINVAL);
594         kt = pfr_lookup_table(tbl);
595         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
596                 return (ESRCH);
597
598         for (i = 0; i < size; i++) {
599                 if (COPYIN(addr+i, &ad, sizeof(ad)))
600                         return (EFAULT);
601                 if (pfr_validate_addr(&ad))
602                         return (EINVAL);
603                 if (ADDR_NETWORK(&ad))
604                         return (EINVAL);
605                 p = pfr_lookup_addr(kt, &ad, 0);
606                 if (flags & PFR_FLAG_REPLACE)
607                         pfr_copyout_addr(&ad, p);
608                 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
609                     (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
610                 if (p != NULL && !p->pfrke_not)
611                         xmatch++;
612                 if (COPYOUT(&ad, addr+i, sizeof(ad)))
613                         return (EFAULT);
614         }
615         if (nmatch != NULL)
616                 *nmatch = xmatch;
617         return (0);
618 }
619
620 int
621 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
622         int flags)
623 {
624         struct pfr_ktable       *kt;
625         struct pfr_walktree      w;
626         int                      rv;
627
628         ACCEPT_FLAGS(0);
629         if (pfr_validate_table(tbl, 0, 0))
630                 return (EINVAL);
631         kt = pfr_lookup_table(tbl);
632         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
633                 return (ESRCH);
634         if (kt->pfrkt_cnt > *size) {
635                 *size = kt->pfrkt_cnt;
636                 return (0);
637         }
638
639         bzero(&w, sizeof(w));
640         w.pfrw_op = PFRW_GET_ADDRS;
641         w.pfrw_addr = addr;
642         w.pfrw_free = kt->pfrkt_cnt;
643         w.pfrw_flags = flags;
644 #ifdef __FreeBSD__
645         rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
646 #else
647         rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
648 #endif
649         if (!rv)
650 #ifdef __FreeBSD__
651                 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, 
652                     &w);
653 #else
654                 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
655 #endif
656         if (rv)
657                 return (rv);
658
659         if (w.pfrw_free) {
660                 printf("pfr_get_addrs: corruption detected (%d).\n",
661                     w.pfrw_free);
662                 return (ENOTTY);
663         }
664         *size = kt->pfrkt_cnt;
665         return (0);
666 }
667
668 int
669 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
670         int flags)
671 {
672         struct pfr_ktable       *kt;
673         struct pfr_walktree      w;
674         struct pfr_kentryworkq   workq;
675         int                      rv, s = 0;
676         long                     tzero = time_second;
677
678         ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
679         if (pfr_validate_table(tbl, 0, 0))
680                 return (EINVAL);
681         kt = pfr_lookup_table(tbl);
682         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
683                 return (ESRCH);
684         if (kt->pfrkt_cnt > *size) {
685                 *size = kt->pfrkt_cnt;
686                 return (0);
687         }
688
689         bzero(&w, sizeof(w));
690         w.pfrw_op = PFRW_GET_ASTATS;
691         w.pfrw_astats = addr;
692         w.pfrw_free = kt->pfrkt_cnt;
693         w.pfrw_flags = flags;
694         if (flags & PFR_FLAG_ATOMIC)
695                 s = splsoftnet();
696 #ifdef __FreeBSD__
697         rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
698 #else
699         rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
700 #endif
701         if (!rv)
702 #ifdef __FreeBSD__
703                 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, 
704                     &w);
705 #else
706                 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
707 #endif
708         if (!rv && (flags & PFR_FLAG_CLSTATS)) {
709                 pfr_enqueue_addrs(kt, &workq, NULL, 0);
710                 pfr_clstats_kentries(&workq, tzero, 0);
711         }
712         if (flags & PFR_FLAG_ATOMIC)
713                 splx(s);
714         if (rv)
715                 return (rv);
716
717         if (w.pfrw_free) {
718                 printf("pfr_get_astats: corruption detected (%d).\n",
719                     w.pfrw_free);
720                 return (ENOTTY);
721         }
722         *size = kt->pfrkt_cnt;
723         return (0);
724 }
725
726 int
727 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
728     int *nzero, int flags)
729 {
730         struct pfr_ktable       *kt;
731         struct pfr_kentryworkq   workq;
732         struct pfr_kentry       *p;
733         struct pfr_addr          ad;
734         int                      i, rv, s = 0, xzero = 0;
735
736         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
737         if (pfr_validate_table(tbl, 0, 0))
738                 return (EINVAL);
739         kt = pfr_lookup_table(tbl);
740         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
741                 return (ESRCH);
742         SLIST_INIT(&workq);
743         for (i = 0; i < size; i++) {
744                 if (COPYIN(addr+i, &ad, sizeof(ad)))
745                         senderr(EFAULT);
746                 if (pfr_validate_addr(&ad))
747                         senderr(EINVAL);
748                 p = pfr_lookup_addr(kt, &ad, 1);
749                 if (flags & PFR_FLAG_FEEDBACK) {
750                         ad.pfra_fback = (p != NULL) ?
751                             PFR_FB_CLEARED : PFR_FB_NONE;
752                         if (COPYOUT(&ad, addr+i, sizeof(ad)))
753                                 senderr(EFAULT);
754                 }
755                 if (p != NULL) {
756                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
757                         xzero++;
758                 }
759         }
760
761         if (!(flags & PFR_FLAG_DUMMY)) {
762                 if (flags & PFR_FLAG_ATOMIC)
763                         s = splsoftnet();
764                 pfr_clstats_kentries(&workq, 0, 0);
765                 if (flags & PFR_FLAG_ATOMIC)
766                         splx(s);
767         }
768         if (nzero != NULL)
769                 *nzero = xzero;
770         return (0);
771 _bad:
772         if (flags & PFR_FLAG_FEEDBACK)
773                 pfr_reset_feedback(addr, size, flags);
774         return (rv);
775 }
776
777 int
778 pfr_validate_addr(struct pfr_addr *ad)
779 {
780         int i;
781
782         switch (ad->pfra_af) {
783 #ifdef INET
784         case AF_INET:
785                 if (ad->pfra_net > 32)
786                         return (-1);
787                 break;
788 #endif /* INET */
789 #ifdef INET6
790         case AF_INET6:
791                 if (ad->pfra_net > 128)
792                         return (-1);
793                 break;
794 #endif /* INET6 */
795         default:
796                 return (-1);
797         }
798         if (ad->pfra_net < 128 &&
799                 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
800                         return (-1);
801         for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
802                 if (((caddr_t)ad)[i])
803                         return (-1);
804         if (ad->pfra_not && ad->pfra_not != 1)
805                 return (-1);
806         if (ad->pfra_fback)
807                 return (-1);
808         return (0);
809 }
810
811 void
812 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
813         int *naddr, int sweep)
814 {
815         struct pfr_walktree     w;
816
817         SLIST_INIT(workq);
818         bzero(&w, sizeof(w));
819         w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
820         w.pfrw_workq = workq;
821         if (kt->pfrkt_ip4 != NULL)
822 #ifdef __FreeBSD__
823                 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, 
824                     &w))
825 #else
826                 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
827 #endif
828                         printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
829         if (kt->pfrkt_ip6 != NULL)
830 #ifdef __FreeBSD__
831                 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, 
832                     &w))
833 #else
834                 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
835 #endif
836                         printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
837         if (naddr != NULL)
838                 *naddr = w.pfrw_cnt;
839 }
840
841 void
842 pfr_mark_addrs(struct pfr_ktable *kt)
843 {
844         struct pfr_walktree     w;
845
846         bzero(&w, sizeof(w));
847         w.pfrw_op = PFRW_MARK;
848 #ifdef __FreeBSD__
849         if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
850 #else
851         if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
852 #endif
853                 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
854 #ifdef __FreeBSD__
855         if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
856 #else
857         if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
858 #endif
859                 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
860 }
861
862
863 struct pfr_kentry *
864 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
865 {
866         union sockaddr_union     sa, mask;
867         struct radix_node_head  *head = NULL;   /* make the compiler happy */
868         struct pfr_kentry       *ke;
869         int                      s;
870
871         bzero(&sa, sizeof(sa));
872         if (ad->pfra_af == AF_INET) {
873                 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
874                 head = kt->pfrkt_ip4;
875         } else if ( ad->pfra_af == AF_INET6 ) {
876                 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
877                 head = kt->pfrkt_ip6;
878         }
879         if (ADDR_NETWORK(ad)) {
880                 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
881                 s = splsoftnet(); /* rn_lookup makes use of globals */
882 #ifdef __FreeBSD__
883                 PF_ASSERT(MA_OWNED);
884 #endif
885                 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
886                 splx(s);
887                 if (ke && KENTRY_RNF_ROOT(ke))
888                         ke = NULL;
889         } else {
890                 ke = (struct pfr_kentry *)rn_match(&sa, head);
891                 if (ke && KENTRY_RNF_ROOT(ke))
892                         ke = NULL;
893                 if (exact && ke && KENTRY_NETWORK(ke))
894                         ke = NULL;
895         }
896         return (ke);
897 }
898
899 struct pfr_kentry *
900 pfr_create_kentry(struct pfr_addr *ad, int intr)
901 {
902         struct pfr_kentry       *ke;
903
904         if (intr)
905                 ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT);
906         else
907                 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
908         if (ke == NULL)
909                 return (NULL);
910         bzero(ke, sizeof(*ke));
911
912         if (ad->pfra_af == AF_INET)
913                 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
914         else if (ad->pfra_af == AF_INET6)
915                 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
916         ke->pfrke_af = ad->pfra_af;
917         ke->pfrke_net = ad->pfra_net;
918         ke->pfrke_not = ad->pfra_not;
919         ke->pfrke_intrpool = intr;
920         return (ke);
921 }
922
923 void
924 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
925 {
926         struct pfr_kentry       *p, *q;
927
928         for (p = SLIST_FIRST(workq); p != NULL; p = q) {
929                 q = SLIST_NEXT(p, pfrke_workq);
930                 pfr_destroy_kentry(p);
931         }
932 }
933
934 void
935 pfr_destroy_kentry(struct pfr_kentry *ke)
936 {
937         if (ke->pfrke_intrpool)
938                 pool_put(&pfr_kentry_pl2, ke);
939         else
940                 pool_put(&pfr_kentry_pl, ke);
941 }
942
943 void
944 pfr_insert_kentries(struct pfr_ktable *kt,
945     struct pfr_kentryworkq *workq, long tzero)
946 {
947         struct pfr_kentry       *p;
948         int                      rv, n = 0;
949
950         SLIST_FOREACH(p, workq, pfrke_workq) {
951                 rv = pfr_route_kentry(kt, p);
952                 if (rv) {
953                         printf("pfr_insert_kentries: cannot route entry "
954                             "(code=%d).\n", rv);
955                         break;
956                 }
957                 p->pfrke_tzero = tzero;
958                 n++;
959         }
960         kt->pfrkt_cnt += n;
961 }
962
963 int
964 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
965 {
966         struct pfr_kentry       *p;
967         int                      rv;
968
969         p = pfr_lookup_addr(kt, ad, 1);
970         if (p != NULL)
971                 return (0);
972         p = pfr_create_kentry(ad, 1);
973         if (p == NULL)
974                 return (EINVAL);
975
976         rv = pfr_route_kentry(kt, p);
977         if (rv)
978                 return (rv);
979
980         p->pfrke_tzero = tzero;
981         kt->pfrkt_cnt++;
982
983         return (0);
984 }
985
986 void
987 pfr_remove_kentries(struct pfr_ktable *kt,
988     struct pfr_kentryworkq *workq)
989 {
990         struct pfr_kentry       *p;
991         int                      n = 0;
992
993         SLIST_FOREACH(p, workq, pfrke_workq) {
994                 pfr_unroute_kentry(kt, p);
995                 n++;
996         }
997         kt->pfrkt_cnt -= n;
998         pfr_destroy_kentries(workq);
999 }
1000
1001 void
1002 pfr_clean_node_mask(struct pfr_ktable *kt,
1003     struct pfr_kentryworkq *workq)
1004 {
1005         struct pfr_kentry       *p;
1006
1007         SLIST_FOREACH(p, workq, pfrke_workq)
1008                 pfr_unroute_kentry(kt, p);
1009 }
1010
1011 void
1012 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
1013 {
1014         struct pfr_kentry       *p;
1015         int                      s;
1016
1017         SLIST_FOREACH(p, workq, pfrke_workq) {
1018                 s = splsoftnet();
1019                 if (negchange)
1020                         p->pfrke_not = !p->pfrke_not;
1021                 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
1022                 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
1023                 splx(s);
1024                 p->pfrke_tzero = tzero;
1025         }
1026 }
1027
1028 void
1029 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
1030 {
1031         struct pfr_addr ad;
1032         int             i;
1033
1034         for (i = 0; i < size; i++) {
1035                 if (COPYIN(addr+i, &ad, sizeof(ad)))
1036                         break;
1037                 ad.pfra_fback = PFR_FB_NONE;
1038                 if (COPYOUT(&ad, addr+i, sizeof(ad)))
1039                         break;
1040         }
1041 }
1042
1043 void
1044 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
1045 {
1046         int     i;
1047
1048         bzero(sa, sizeof(*sa));
1049         if (af == AF_INET) {
1050                 sa->sin.sin_len = sizeof(sa->sin);
1051                 sa->sin.sin_family = AF_INET;
1052                 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
1053         } else if (af == AF_INET6) {
1054                 sa->sin6.sin6_len = sizeof(sa->sin6);
1055                 sa->sin6.sin6_family = AF_INET6;
1056                 for (i = 0; i < 4; i++) {
1057                         if (net <= 32) {
1058                                 sa->sin6.sin6_addr.s6_addr32[i] =
1059                                     net ? htonl(-1 << (32-net)) : 0;
1060                                 break;
1061                         }
1062                         sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
1063                         net -= 32;
1064                 }
1065         }
1066 }
1067
1068 int
1069 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1070 {
1071         union sockaddr_union     mask;
1072         struct radix_node       *rn;
1073         struct radix_node_head  *head = NULL;   /* make the compiler happy */
1074         int                      s;
1075
1076         bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
1077         if (ke->pfrke_af == AF_INET)
1078                 head = kt->pfrkt_ip4;
1079         else if (ke->pfrke_af == AF_INET6)
1080                 head = kt->pfrkt_ip6;
1081
1082         s = splsoftnet();
1083 #ifdef __FreeBSD__
1084         PF_ASSERT(MA_OWNED);
1085 #endif
1086         if (KENTRY_NETWORK(ke)) {
1087                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1088                 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1089         } else
1090                 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1091         splx(s);
1092
1093         return (rn == NULL ? -1 : 0);
1094 }
1095
1096 int
1097 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1098 {
1099         union sockaddr_union     mask;
1100         struct radix_node       *rn;
1101         struct radix_node_head  *head = NULL;   /* make the compiler happy */
1102         int                      s;
1103
1104         if (ke->pfrke_af == AF_INET)
1105                 head = kt->pfrkt_ip4;
1106         else if (ke->pfrke_af == AF_INET6)
1107                 head = kt->pfrkt_ip6;
1108
1109         s = splsoftnet();
1110 #ifdef __FreeBSD__
1111         PF_ASSERT(MA_OWNED);
1112 #endif
1113         if (KENTRY_NETWORK(ke)) {
1114                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1115 #ifdef __FreeBSD__
1116                 rn = rn_delete(&ke->pfrke_sa, &mask, head);
1117 #else
1118                 rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL);
1119 #endif
1120         } else
1121 #ifdef __FreeBSD__
1122                 rn = rn_delete(&ke->pfrke_sa, NULL, head);
1123 #else
1124                 rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL);
1125 #endif
1126         splx(s);
1127
1128         if (rn == NULL) {
1129                 printf("pfr_unroute_kentry: delete failed.\n");
1130                 return (-1);
1131         }
1132         return (0);
1133 }
1134
1135 void
1136 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1137 {
1138         bzero(ad, sizeof(*ad));
1139         if (ke == NULL)
1140                 return;
1141         ad->pfra_af = ke->pfrke_af;
1142         ad->pfra_net = ke->pfrke_net;
1143         ad->pfra_not = ke->pfrke_not;
1144         if (ad->pfra_af == AF_INET)
1145                 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1146         else if (ad->pfra_af == AF_INET6)
1147                 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1148 }
1149
1150 int
1151 pfr_walktree(struct radix_node *rn, void *arg)
1152 {
1153         struct pfr_kentry       *ke = (struct pfr_kentry *)rn;
1154         struct pfr_walktree     *w = arg;
1155         int                      s, flags = w->pfrw_flags;
1156
1157         switch (w->pfrw_op) {
1158         case PFRW_MARK:
1159                 ke->pfrke_mark = 0;
1160                 break;
1161         case PFRW_SWEEP:
1162                 if (ke->pfrke_mark)
1163                         break;
1164                 /* FALLTHROUGH */
1165         case PFRW_ENQUEUE:
1166                 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1167                 w->pfrw_cnt++;
1168                 break;
1169         case PFRW_GET_ADDRS:
1170                 if (w->pfrw_free-- > 0) {
1171                         struct pfr_addr ad;
1172
1173                         pfr_copyout_addr(&ad, ke);
1174                         if (COPYOUT(&ad, w->pfrw_addr, sizeof(ad)))
1175                                 return (EFAULT);
1176                         w->pfrw_addr++;
1177                 }
1178                 break;
1179         case PFRW_GET_ASTATS:
1180                 if (w->pfrw_free-- > 0) {
1181                         struct pfr_astats as;
1182
1183                         pfr_copyout_addr(&as.pfras_a, ke);
1184
1185                         s = splsoftnet();
1186                         bcopy(ke->pfrke_packets, as.pfras_packets,
1187                             sizeof(as.pfras_packets));
1188                         bcopy(ke->pfrke_bytes, as.pfras_bytes,
1189                             sizeof(as.pfras_bytes));
1190                         splx(s);
1191                         as.pfras_tzero = ke->pfrke_tzero;
1192
1193                         if (COPYOUT(&as, w->pfrw_astats, sizeof(as)))
1194                                 return (EFAULT);
1195                         w->pfrw_astats++;
1196                 }
1197                 break;
1198         case PFRW_POOL_GET:
1199                 if (ke->pfrke_not)
1200                         break; /* negative entries are ignored */
1201                 if (!w->pfrw_cnt--) {
1202                         w->pfrw_kentry = ke;
1203                         return (1); /* finish search */
1204                 }
1205                 break;
1206         case PFRW_DYNADDR_UPDATE:
1207                 if (ke->pfrke_af == AF_INET) {
1208                         if (w->pfrw_dyn->pfid_acnt4++ > 0)
1209                                 break;
1210                         pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1211                         w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1212                             &ke->pfrke_sa, AF_INET);
1213                         w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1214                             &pfr_mask, AF_INET);
1215                 } else if (ke->pfrke_af == AF_INET6){
1216                         if (w->pfrw_dyn->pfid_acnt6++ > 0)
1217                                 break;
1218                         pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1219                         w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1220                             &ke->pfrke_sa, AF_INET6);
1221                         w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1222                             &pfr_mask, AF_INET6);
1223                 }
1224                 break;
1225         }
1226         return (0);
1227 }
1228
1229 int
1230 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1231 {
1232         struct pfr_ktableworkq   workq;
1233         struct pfr_ktable       *p;
1234         int                      s = 0, xdel = 0;
1235
1236         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1237         if (pfr_fix_anchor(filter->pfrt_anchor))
1238                 return (EINVAL);
1239         if (pfr_table_count(filter, flags) < 0)
1240                 return (ENOENT);
1241
1242         SLIST_INIT(&workq);
1243         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1244                 if (pfr_skip_table(filter, p, flags))
1245                         continue;
1246                 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1247                         continue;
1248                 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1249                         continue;
1250                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1251                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1252                 xdel++;
1253         }
1254         if (!(flags & PFR_FLAG_DUMMY)) {
1255                 if (flags & PFR_FLAG_ATOMIC)
1256                         s = splsoftnet();
1257                 pfr_setflags_ktables(&workq);
1258                 if (flags & PFR_FLAG_ATOMIC)
1259                         splx(s);
1260         }
1261         if (ndel != NULL)
1262                 *ndel = xdel;
1263         return (0);
1264 }
1265
1266 int
1267 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1268 {
1269         struct pfr_ktableworkq   addq, changeq;
1270         struct pfr_ktable       *p, *q, *r, key;
1271         int                      i, rv, s = 0, xadd = 0;
1272         long                     tzero = time_second;
1273
1274         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1275         SLIST_INIT(&addq);
1276         SLIST_INIT(&changeq);
1277         for (i = 0; i < size; i++) {
1278                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1279                         senderr(EFAULT);
1280                 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1281                     flags & PFR_FLAG_USERIOCTL))
1282                         senderr(EINVAL);
1283                 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1284                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1285                 if (p == NULL) {
1286                         p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1287                         if (p == NULL)
1288                                 senderr(ENOMEM);
1289                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1290                                 if (!pfr_ktable_compare(p, q))
1291                                         goto _skip;
1292                         }
1293                         SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1294                         xadd++;
1295                         if (!key.pfrkt_anchor[0])
1296                                 goto _skip;
1297
1298                         /* find or create root table */
1299                         bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1300                         r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1301                         if (r != NULL) {
1302                                 p->pfrkt_root = r;
1303                                 goto _skip;
1304                         }
1305                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1306                                 if (!pfr_ktable_compare(&key, q)) {
1307                                         p->pfrkt_root = q;
1308                                         goto _skip;
1309                                 }
1310                         }
1311                         key.pfrkt_flags = 0;
1312                         r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1313                         if (r == NULL)
1314                                 senderr(ENOMEM);
1315                         SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1316                         p->pfrkt_root = r;
1317                 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1318                         SLIST_FOREACH(q, &changeq, pfrkt_workq)
1319                                 if (!pfr_ktable_compare(&key, q))
1320                                         goto _skip;
1321                         p->pfrkt_nflags = (p->pfrkt_flags &
1322                             ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1323                         SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1324                         xadd++;
1325                 }
1326 _skip:
1327         ;
1328         }
1329         if (!(flags & PFR_FLAG_DUMMY)) {
1330                 if (flags & PFR_FLAG_ATOMIC)
1331                         s = splsoftnet();
1332                 pfr_insert_ktables(&addq);
1333                 pfr_setflags_ktables(&changeq);
1334                 if (flags & PFR_FLAG_ATOMIC)
1335                         splx(s);
1336         } else
1337                  pfr_destroy_ktables(&addq, 0);
1338         if (nadd != NULL)
1339                 *nadd = xadd;
1340         return (0);
1341 _bad:
1342         pfr_destroy_ktables(&addq, 0);
1343         return (rv);
1344 }
1345
1346 int
1347 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1348 {
1349         struct pfr_ktableworkq   workq;
1350         struct pfr_ktable       *p, *q, key;
1351         int                      i, s = 0, xdel = 0;
1352
1353         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1354         SLIST_INIT(&workq);
1355         for (i = 0; i < size; i++) {
1356                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1357                         return (EFAULT);
1358                 if (pfr_validate_table(&key.pfrkt_t, 0,
1359                     flags & PFR_FLAG_USERIOCTL))
1360                         return (EINVAL);
1361                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1362                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1363                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1364                                 if (!pfr_ktable_compare(p, q))
1365                                         goto _skip;
1366                         p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1367                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1368                         xdel++;
1369                 }
1370 _skip:
1371         ;
1372         }
1373
1374         if (!(flags & PFR_FLAG_DUMMY)) {
1375                 if (flags & PFR_FLAG_ATOMIC)
1376                         s = splsoftnet();
1377                 pfr_setflags_ktables(&workq);
1378                 if (flags & PFR_FLAG_ATOMIC)
1379                         splx(s);
1380         }
1381         if (ndel != NULL)
1382                 *ndel = xdel;
1383         return (0);
1384 }
1385
1386 int
1387 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1388         int flags)
1389 {
1390         struct pfr_ktable       *p;
1391         int                      n, nn;
1392
1393         ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1394         if (pfr_fix_anchor(filter->pfrt_anchor))
1395                 return (EINVAL);
1396         n = nn = pfr_table_count(filter, flags);
1397         if (n < 0)
1398                 return (ENOENT);
1399         if (n > *size) {
1400                 *size = n;
1401                 return (0);
1402         }
1403         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1404                 if (pfr_skip_table(filter, p, flags))
1405                         continue;
1406                 if (n-- <= 0)
1407                         continue;
1408                 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1409                         return (EFAULT);
1410         }
1411         if (n) {
1412                 printf("pfr_get_tables: corruption detected (%d).\n", n);
1413                 return (ENOTTY);
1414         }
1415         *size = nn;
1416         return (0);
1417 }
1418
1419 int
1420 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1421         int flags)
1422 {
1423         struct pfr_ktable       *p;
1424         struct pfr_ktableworkq   workq;
1425         int                      s = 0, n, nn;
1426         long                     tzero = time_second;
1427
1428         ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1429                                         /* XXX PFR_FLAG_CLSTATS disabled */
1430         if (pfr_fix_anchor(filter->pfrt_anchor))
1431                 return (EINVAL);
1432         n = nn = pfr_table_count(filter, flags);
1433         if (n < 0)
1434                 return (ENOENT);
1435         if (n > *size) {
1436                 *size = n;
1437                 return (0);
1438         }
1439         SLIST_INIT(&workq);
1440         if (flags & PFR_FLAG_ATOMIC)
1441                 s = splsoftnet();
1442         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1443                 if (pfr_skip_table(filter, p, flags))
1444                         continue;
1445                 if (n-- <= 0)
1446                         continue;
1447                 if (!(flags & PFR_FLAG_ATOMIC))
1448                         s = splsoftnet();
1449                 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1450                         if (!(flags & PFR_FLAG_ATOMIC))
1451                                 splx(s);
1452                         return (EFAULT);
1453                 }
1454                 if (!(flags & PFR_FLAG_ATOMIC))
1455                         splx(s);
1456                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1457         }
1458         if (flags & PFR_FLAG_CLSTATS)
1459                 pfr_clstats_ktables(&workq, tzero,
1460                     flags & PFR_FLAG_ADDRSTOO);
1461         if (flags & PFR_FLAG_ATOMIC)
1462                 splx(s);
1463         if (n) {
1464                 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1465                 return (ENOTTY);
1466         }
1467         *size = nn;
1468         return (0);
1469 }
1470
1471 int
1472 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1473 {
1474         struct pfr_ktableworkq   workq;
1475         struct pfr_ktable       *p, key;
1476         int                      i, s = 0, xzero = 0;
1477         long                     tzero = time_second;
1478
1479         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1480         SLIST_INIT(&workq);
1481         for (i = 0; i < size; i++) {
1482                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1483                         return (EFAULT);
1484                 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1485                         return (EINVAL);
1486                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1487                 if (p != NULL) {
1488                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1489                         xzero++;
1490                 }
1491         }
1492         if (!(flags & PFR_FLAG_DUMMY)) {
1493                 if (flags & PFR_FLAG_ATOMIC)
1494                         s = splsoftnet();
1495                 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1496                 if (flags & PFR_FLAG_ATOMIC)
1497                         splx(s);
1498         }
1499         if (nzero != NULL)
1500                 *nzero = xzero;
1501         return (0);
1502 }
1503
1504 int
1505 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1506         int *nchange, int *ndel, int flags)
1507 {
1508         struct pfr_ktableworkq   workq;
1509         struct pfr_ktable       *p, *q, key;
1510         int                      i, s = 0, xchange = 0, xdel = 0;
1511
1512         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1513         if ((setflag & ~PFR_TFLAG_USRMASK) ||
1514             (clrflag & ~PFR_TFLAG_USRMASK) ||
1515             (setflag & clrflag))
1516                 return (EINVAL);
1517         SLIST_INIT(&workq);
1518         for (i = 0; i < size; i++) {
1519                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1520                         return (EFAULT);
1521                 if (pfr_validate_table(&key.pfrkt_t, 0,
1522                     flags & PFR_FLAG_USERIOCTL))
1523                         return (EINVAL);
1524                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1525                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1526                         p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1527                             ~clrflag;
1528                         if (p->pfrkt_nflags == p->pfrkt_flags)
1529                                 goto _skip;
1530                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1531                                 if (!pfr_ktable_compare(p, q))
1532                                         goto _skip;
1533                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1534                         if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1535                             (clrflag & PFR_TFLAG_PERSIST) &&
1536                             !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1537                                 xdel++;
1538                         else
1539                                 xchange++;
1540                 }
1541 _skip:
1542         ;
1543         }
1544         if (!(flags & PFR_FLAG_DUMMY)) {
1545                 if (flags & PFR_FLAG_ATOMIC)
1546                         s = splsoftnet();
1547                 pfr_setflags_ktables(&workq);
1548                 if (flags & PFR_FLAG_ATOMIC)
1549                         splx(s);
1550         }
1551         if (nchange != NULL)
1552                 *nchange = xchange;
1553         if (ndel != NULL)
1554                 *ndel = xdel;
1555         return (0);
1556 }
1557
1558 int
1559 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1560 {
1561         struct pfr_ktableworkq   workq;
1562         struct pfr_ktable       *p;
1563         struct pf_ruleset       *rs;
1564         int                      xdel = 0;
1565
1566         ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1567         rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1568         if (rs == NULL)
1569                 return (ENOMEM);
1570         SLIST_INIT(&workq);
1571         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1572                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1573                     pfr_skip_table(trs, p, 0))
1574                         continue;
1575                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1576                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1577                 xdel++;
1578         }
1579         if (!(flags & PFR_FLAG_DUMMY)) {
1580                 pfr_setflags_ktables(&workq);
1581                 if (ticket != NULL)
1582                         *ticket = ++rs->tticket;
1583                 rs->topen = 1;
1584         } else
1585                 pf_remove_if_empty_ruleset(rs);
1586         if (ndel != NULL)
1587                 *ndel = xdel;
1588         return (0);
1589 }
1590
1591 int
1592 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1593     int *nadd, int *naddr, u_int32_t ticket, int flags)
1594 {
1595         struct pfr_ktableworkq   tableq;
1596         struct pfr_kentryworkq   addrq;
1597         struct pfr_ktable       *kt, *rt, *shadow, key;
1598         struct pfr_kentry       *p;
1599         struct pfr_addr          ad;
1600         struct pf_ruleset       *rs;
1601         int                      i, rv, xadd = 0, xaddr = 0;
1602
1603         ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1604         if (size && !(flags & PFR_FLAG_ADDRSTOO))
1605                 return (EINVAL);
1606         if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1607             flags & PFR_FLAG_USERIOCTL))
1608                 return (EINVAL);
1609         rs = pf_find_ruleset(tbl->pfrt_anchor);
1610         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1611                 return (EBUSY);
1612         tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1613         SLIST_INIT(&tableq);
1614         kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1615         if (kt == NULL) {
1616                 kt = pfr_create_ktable(tbl, 0, 1);
1617                 if (kt == NULL)
1618                         return (ENOMEM);
1619                 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1620                 xadd++;
1621                 if (!tbl->pfrt_anchor[0])
1622                         goto _skip;
1623
1624                 /* find or create root table */
1625                 bzero(&key, sizeof(key));
1626                 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1627                 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1628                 if (rt != NULL) {
1629                         kt->pfrkt_root = rt;
1630                         goto _skip;
1631                 }
1632                 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1633                 if (rt == NULL) {
1634                         pfr_destroy_ktables(&tableq, 0);
1635                         return (ENOMEM);
1636                 }
1637                 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1638                 kt->pfrkt_root = rt;
1639         } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1640                 xadd++;
1641 _skip:
1642         shadow = pfr_create_ktable(tbl, 0, 0);
1643         if (shadow == NULL) {
1644                 pfr_destroy_ktables(&tableq, 0);
1645                 return (ENOMEM);
1646         }
1647         SLIST_INIT(&addrq);
1648         for (i = 0; i < size; i++) {
1649                 if (COPYIN(addr+i, &ad, sizeof(ad)))
1650                         senderr(EFAULT);
1651                 if (pfr_validate_addr(&ad))
1652                         senderr(EINVAL);
1653                 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1654                         continue;
1655                 p = pfr_create_kentry(&ad, 0);
1656                 if (p == NULL)
1657                         senderr(ENOMEM);
1658                 if (pfr_route_kentry(shadow, p)) {
1659                         pfr_destroy_kentry(p);
1660                         continue;
1661                 }
1662                 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1663                 xaddr++;
1664         }
1665         if (!(flags & PFR_FLAG_DUMMY)) {
1666                 if (kt->pfrkt_shadow != NULL)
1667                         pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1668                 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1669                 pfr_insert_ktables(&tableq);
1670                 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1671                     xaddr : NO_ADDRESSES;
1672                 kt->pfrkt_shadow = shadow;
1673         } else {
1674                 pfr_clean_node_mask(shadow, &addrq);
1675                 pfr_destroy_ktable(shadow, 0);
1676                 pfr_destroy_ktables(&tableq, 0);
1677                 pfr_destroy_kentries(&addrq);
1678         }
1679         if (nadd != NULL)
1680                 *nadd = xadd;
1681         if (naddr != NULL)
1682                 *naddr = xaddr;
1683         return (0);
1684 _bad:
1685         pfr_destroy_ktable(shadow, 0);
1686         pfr_destroy_ktables(&tableq, 0);
1687         pfr_destroy_kentries(&addrq);
1688         return (rv);
1689 }
1690
1691 int
1692 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1693 {
1694         struct pfr_ktableworkq   workq;
1695         struct pfr_ktable       *p;
1696         struct pf_ruleset       *rs;
1697         int                      xdel = 0;
1698
1699         ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1700         rs = pf_find_ruleset(trs->pfrt_anchor);
1701         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1702                 return (0);
1703         SLIST_INIT(&workq);
1704         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1705                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1706                     pfr_skip_table(trs, p, 0))
1707                         continue;
1708                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1709                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1710                 xdel++;
1711         }
1712         if (!(flags & PFR_FLAG_DUMMY)) {
1713                 pfr_setflags_ktables(&workq);
1714                 rs->topen = 0;
1715                 pf_remove_if_empty_ruleset(rs);
1716         }
1717         if (ndel != NULL)
1718                 *ndel = xdel;
1719         return (0);
1720 }
1721
1722 int
1723 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1724     int *nchange, int flags)
1725 {
1726         struct pfr_ktable       *p, *q;
1727         struct pfr_ktableworkq   workq;
1728         struct pf_ruleset       *rs;
1729         int                      s = 0, xadd = 0, xchange = 0;
1730         long                     tzero = time_second;
1731
1732         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1733         rs = pf_find_ruleset(trs->pfrt_anchor);
1734         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1735                 return (EBUSY);
1736
1737         SLIST_INIT(&workq);
1738         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1739                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1740                     pfr_skip_table(trs, p, 0))
1741                         continue;
1742                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1743                 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1744                         xchange++;
1745                 else
1746                         xadd++;
1747         }
1748
1749         if (!(flags & PFR_FLAG_DUMMY)) {
1750                 if (flags & PFR_FLAG_ATOMIC)
1751                         s = splsoftnet();
1752                 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1753                         q = SLIST_NEXT(p, pfrkt_workq);
1754                         pfr_commit_ktable(p, tzero);
1755                 }
1756                 if (flags & PFR_FLAG_ATOMIC)
1757                         splx(s);
1758                 rs->topen = 0;
1759                 pf_remove_if_empty_ruleset(rs);
1760         }
1761         if (nadd != NULL)
1762                 *nadd = xadd;
1763         if (nchange != NULL)
1764                 *nchange = xchange;
1765
1766         return (0);
1767 }
1768
1769 void
1770 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1771 {
1772         struct pfr_ktable       *shadow = kt->pfrkt_shadow;
1773         int                      nflags;
1774
1775         if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1776                 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1777                         pfr_clstats_ktable(kt, tzero, 1);
1778         } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1779                 /* kt might contain addresses */
1780                 struct pfr_kentryworkq   addrq, addq, changeq, delq, garbageq;
1781                 struct pfr_kentry       *p, *q, *next;
1782                 struct pfr_addr          ad;
1783
1784                 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1785                 pfr_mark_addrs(kt);
1786                 SLIST_INIT(&addq);
1787                 SLIST_INIT(&changeq);
1788                 SLIST_INIT(&delq);
1789                 SLIST_INIT(&garbageq);
1790                 pfr_clean_node_mask(shadow, &addrq);
1791                 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1792                         next = SLIST_NEXT(p, pfrke_workq);      /* XXX */
1793                         pfr_copyout_addr(&ad, p);
1794                         q = pfr_lookup_addr(kt, &ad, 1);
1795                         if (q != NULL) {
1796                                 if (q->pfrke_not != p->pfrke_not)
1797                                         SLIST_INSERT_HEAD(&changeq, q,
1798                                             pfrke_workq);
1799                                 q->pfrke_mark = 1;
1800                                 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1801                         } else {
1802                                 p->pfrke_tzero = tzero;
1803                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1804                         }
1805                 }
1806                 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1807                 pfr_insert_kentries(kt, &addq, tzero);
1808                 pfr_remove_kentries(kt, &delq);
1809                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1810                 pfr_destroy_kentries(&garbageq);
1811         } else {
1812                 /* kt cannot contain addresses */
1813                 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1814                     shadow->pfrkt_ip4);
1815                 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1816                     shadow->pfrkt_ip6);
1817                 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1818                 pfr_clstats_ktable(kt, tzero, 1);
1819         }
1820         nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1821             (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1822                 & ~PFR_TFLAG_INACTIVE;
1823         pfr_destroy_ktable(shadow, 0);
1824         kt->pfrkt_shadow = NULL;
1825         pfr_setflags_ktable(kt, nflags);
1826 }
1827
1828 int
1829 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1830 {
1831         int i;
1832
1833         if (!tbl->pfrt_name[0])
1834                 return (-1);
1835         if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1836                  return (-1);
1837         if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1838                 return (-1);
1839         for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1840                 if (tbl->pfrt_name[i])
1841                         return (-1);
1842         if (pfr_fix_anchor(tbl->pfrt_anchor))
1843                 return (-1);
1844         if (tbl->pfrt_flags & ~allowedflags)
1845                 return (-1);
1846         return (0);
1847 }
1848
1849 /*
1850  * Rewrite anchors referenced by tables to remove slashes
1851  * and check for validity.
1852  */
1853 int
1854 pfr_fix_anchor(char *anchor)
1855 {
1856         size_t siz = MAXPATHLEN;
1857         int i;
1858
1859         if (anchor[0] == '/') {
1860                 char *path;
1861                 int off;
1862
1863                 path = anchor;
1864                 off = 1;
1865                 while (*++path == '/')
1866                         off++;
1867                 bcopy(path, anchor, siz - off);
1868                 memset(anchor + siz - off, 0, off);
1869         }
1870         if (anchor[siz - 1])
1871                 return (-1);
1872         for (i = strlen(anchor); i < siz; i++)
1873                 if (anchor[i])
1874                         return (-1);
1875         return (0);
1876 }
1877
1878 int
1879 pfr_table_count(struct pfr_table *filter, int flags)
1880 {
1881         struct pf_ruleset *rs;
1882
1883         if (flags & PFR_FLAG_ALLRSETS)
1884                 return (pfr_ktable_cnt);
1885         if (filter->pfrt_anchor[0]) {
1886                 rs = pf_find_ruleset(filter->pfrt_anchor);
1887                 return ((rs != NULL) ? rs->tables : -1);
1888         }
1889         return (pf_main_ruleset.tables);
1890 }
1891
1892 int
1893 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1894 {
1895         if (flags & PFR_FLAG_ALLRSETS)
1896                 return (0);
1897         if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1898                 return (1);
1899         return (0);
1900 }
1901
1902 void
1903 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1904 {
1905         struct pfr_ktable       *p;
1906
1907         SLIST_FOREACH(p, workq, pfrkt_workq)
1908                 pfr_insert_ktable(p);
1909 }
1910
1911 void
1912 pfr_insert_ktable(struct pfr_ktable *kt)
1913 {
1914         RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1915         pfr_ktable_cnt++;
1916         if (kt->pfrkt_root != NULL)
1917                 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1918                         pfr_setflags_ktable(kt->pfrkt_root,
1919                             kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1920 }
1921
1922 void
1923 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1924 {
1925         struct pfr_ktable       *p, *q;
1926
1927         for (p = SLIST_FIRST(workq); p; p = q) {
1928                 q = SLIST_NEXT(p, pfrkt_workq);
1929                 pfr_setflags_ktable(p, p->pfrkt_nflags);
1930         }
1931 }
1932
1933 void
1934 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1935 {
1936         struct pfr_kentryworkq  addrq;
1937
1938         if (!(newf & PFR_TFLAG_REFERENCED) &&
1939             !(newf & PFR_TFLAG_PERSIST))
1940                 newf &= ~PFR_TFLAG_ACTIVE;
1941         if (!(newf & PFR_TFLAG_ACTIVE))
1942                 newf &= ~PFR_TFLAG_USRMASK;
1943         if (!(newf & PFR_TFLAG_SETMASK)) {
1944                 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1945                 if (kt->pfrkt_root != NULL)
1946                         if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1947                                 pfr_setflags_ktable(kt->pfrkt_root,
1948                                     kt->pfrkt_root->pfrkt_flags &
1949                                         ~PFR_TFLAG_REFDANCHOR);
1950                 pfr_destroy_ktable(kt, 1);
1951                 pfr_ktable_cnt--;
1952                 return;
1953         }
1954         if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1955                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1956                 pfr_remove_kentries(kt, &addrq);
1957         }
1958         if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1959                 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1960                 kt->pfrkt_shadow = NULL;
1961         }
1962         kt->pfrkt_flags = newf;
1963 }
1964
1965 void
1966 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1967 {
1968         struct pfr_ktable       *p;
1969
1970         SLIST_FOREACH(p, workq, pfrkt_workq)
1971                 pfr_clstats_ktable(p, tzero, recurse);
1972 }
1973
1974 void
1975 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1976 {
1977         struct pfr_kentryworkq   addrq;
1978         int                      s;
1979
1980         if (recurse) {
1981                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1982                 pfr_clstats_kentries(&addrq, tzero, 0);
1983         }
1984         s = splsoftnet();
1985         bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1986         bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1987         kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1988         splx(s);
1989         kt->pfrkt_tzero = tzero;
1990 }
1991
1992 struct pfr_ktable *
1993 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1994 {
1995         struct pfr_ktable       *kt;
1996         struct pf_ruleset       *rs;
1997
1998         kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1999         if (kt == NULL)
2000                 return (NULL);
2001         bzero(kt, sizeof(*kt));
2002         kt->pfrkt_t = *tbl;
2003
2004         if (attachruleset) {
2005                 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
2006                 if (!rs) {
2007                         pfr_destroy_ktable(kt, 0);
2008                         return (NULL);
2009                 }
2010                 kt->pfrkt_rs = rs;
2011                 rs->tables++;
2012         }
2013
2014         if (!rn_inithead((void **)&kt->pfrkt_ip4,
2015             offsetof(struct sockaddr_in, sin_addr) * 8) ||
2016             !rn_inithead((void **)&kt->pfrkt_ip6,
2017             offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
2018                 pfr_destroy_ktable(kt, 0);
2019                 return (NULL);
2020         }
2021         kt->pfrkt_tzero = tzero;
2022
2023         return (kt);
2024 }
2025
2026 void
2027 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
2028 {
2029         struct pfr_ktable       *p, *q;
2030
2031         for (p = SLIST_FIRST(workq); p; p = q) {
2032                 q = SLIST_NEXT(p, pfrkt_workq);
2033                 pfr_destroy_ktable(p, flushaddr);
2034         }
2035 }
2036
2037 void
2038 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2039 {
2040         struct pfr_kentryworkq   addrq;
2041
2042         if (flushaddr) {
2043                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2044                 pfr_clean_node_mask(kt, &addrq);
2045                 pfr_destroy_kentries(&addrq);
2046         }
2047 #if defined(__FreeBSD__) && (__FreeBSD_version >= 500100)
2048         if (kt->pfrkt_ip4 != NULL) {
2049                 RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip4);
2050                 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
2051         }
2052         if (kt->pfrkt_ip6 != NULL) {
2053                 RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip6);
2054                 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
2055         }
2056 #else
2057         if (kt->pfrkt_ip4 != NULL)
2058                 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
2059         if (kt->pfrkt_ip6 != NULL)
2060                 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
2061 #endif
2062         if (kt->pfrkt_shadow != NULL)
2063                 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2064         if (kt->pfrkt_rs != NULL) {
2065                 kt->pfrkt_rs->tables--;
2066                 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
2067         }
2068         pool_put(&pfr_ktable_pl, kt);
2069 }
2070
2071 int
2072 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2073 {
2074         int d;
2075
2076         if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2077                 return (d);
2078         return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2079 }
2080
2081 struct pfr_ktable *
2082 pfr_lookup_table(struct pfr_table *tbl)
2083 {
2084         /* struct pfr_ktable start like a struct pfr_table */
2085         return (RB_FIND(pfr_ktablehead, &pfr_ktables,
2086             (struct pfr_ktable *)tbl));
2087 }
2088
2089 int
2090 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2091 {
2092         struct pfr_kentry       *ke = NULL;
2093         int                      match;
2094
2095         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2096                 kt = kt->pfrkt_root;
2097         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2098                 return (0);
2099
2100         switch (af) {
2101 #ifdef INET
2102         case AF_INET:
2103                 pfr_sin.sin_addr.s_addr = a->addr32[0];
2104                 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2105                 if (ke && KENTRY_RNF_ROOT(ke))
2106                         ke = NULL;
2107                 break;
2108 #endif /* INET */
2109 #ifdef INET6
2110         case AF_INET6:
2111                 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2112                 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2113                 if (ke && KENTRY_RNF_ROOT(ke))
2114                         ke = NULL;
2115                 break;
2116 #endif /* INET6 */
2117         }
2118         match = (ke && !ke->pfrke_not);
2119         if (match)
2120                 kt->pfrkt_match++;
2121         else
2122                 kt->pfrkt_nomatch++;
2123         return (match);
2124 }
2125
2126 void
2127 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2128     u_int64_t len, int dir_out, int op_pass, int notrule)
2129 {
2130         struct pfr_kentry       *ke = NULL;
2131
2132         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2133                 kt = kt->pfrkt_root;
2134         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2135                 return;
2136
2137         switch (af) {
2138 #ifdef INET
2139         case AF_INET:
2140                 pfr_sin.sin_addr.s_addr = a->addr32[0];
2141                 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2142                 if (ke && KENTRY_RNF_ROOT(ke))
2143                         ke = NULL;
2144                 break;
2145 #endif /* INET */
2146 #ifdef INET6
2147         case AF_INET6:
2148                 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2149                 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2150                 if (ke && KENTRY_RNF_ROOT(ke))
2151                         ke = NULL;
2152                 break;
2153 #endif /* INET6 */
2154         default:
2155                 ;
2156         }
2157         if ((ke == NULL || ke->pfrke_not) != notrule) {
2158                 if (op_pass != PFR_OP_PASS)
2159                         printf("pfr_update_stats: assertion failed.\n");
2160                 op_pass = PFR_OP_XPASS;
2161         }
2162         kt->pfrkt_packets[dir_out][op_pass]++;
2163         kt->pfrkt_bytes[dir_out][op_pass] += len;
2164         if (ke != NULL && op_pass != PFR_OP_XPASS) {
2165                 ke->pfrke_packets[dir_out][op_pass]++;
2166                 ke->pfrke_bytes[dir_out][op_pass] += len;
2167         }
2168 }
2169
2170 struct pfr_ktable *
2171 pfr_attach_table(struct pf_ruleset *rs, char *name)
2172 {
2173         struct pfr_ktable       *kt, *rt;
2174         struct pfr_table         tbl;
2175         struct pf_anchor        *ac = rs->anchor;
2176
2177         bzero(&tbl, sizeof(tbl));
2178         strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2179         if (ac != NULL)
2180                 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2181         kt = pfr_lookup_table(&tbl);
2182         if (kt == NULL) {
2183                 kt = pfr_create_ktable(&tbl, time_second, 1);
2184                 if (kt == NULL)
2185                         return (NULL);
2186                 if (ac != NULL) {
2187                         bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2188                         rt = pfr_lookup_table(&tbl);
2189                         if (rt == NULL) {
2190                                 rt = pfr_create_ktable(&tbl, 0, 1);
2191                                 if (rt == NULL) {
2192                                         pfr_destroy_ktable(kt, 0);
2193                                         return (NULL);
2194                                 }
2195                                 pfr_insert_ktable(rt);
2196                         }
2197                         kt->pfrkt_root = rt;
2198                 }
2199                 pfr_insert_ktable(kt);
2200         }
2201         if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2202                 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2203         return (kt);
2204 }
2205
2206 void
2207 pfr_detach_table(struct pfr_ktable *kt)
2208 {
2209         if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2210                 printf("pfr_detach_table: refcount = %d.\n",
2211                     kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2212         else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2213                 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2214 }
2215
2216
2217 int
2218 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2219     struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2220 {
2221         struct pfr_kentry       *ke, *ke2 = NULL;
2222         struct pf_addr          *addr = NULL;
2223         union sockaddr_union     mask;
2224         int                      idx = -1, use_counter = 0;
2225
2226         if (af == AF_INET)
2227                 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2228         else if (af == AF_INET6)
2229                 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2230         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2231                 kt = kt->pfrkt_root;
2232         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2233                 return (-1);
2234
2235         if (pidx != NULL)
2236                 idx = *pidx;
2237         if (counter != NULL && idx >= 0)
2238                 use_counter = 1;
2239         if (idx < 0)
2240                 idx = 0;
2241
2242 _next_block:
2243         ke = pfr_kentry_byidx(kt, idx, af);
2244         if (ke == NULL)
2245                 return (1);
2246         pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2247         *raddr = SUNION2PF(&ke->pfrke_sa, af);
2248         *rmask = SUNION2PF(&pfr_mask, af);
2249
2250         if (use_counter) {
2251                 /* is supplied address within block? */
2252                 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2253                         /* no, go to next block in table */
2254                         idx++;
2255                         use_counter = 0;
2256                         goto _next_block;
2257                 }
2258                 PF_ACPY(addr, counter, af);
2259         } else {
2260                 /* use first address of block */
2261                 PF_ACPY(addr, *raddr, af);
2262         }
2263
2264         if (!KENTRY_NETWORK(ke)) {
2265                 /* this is a single IP address - no possible nested block */
2266                 PF_ACPY(counter, addr, af);
2267                 *pidx = idx;
2268                 return (0);
2269         }
2270         for (;;) {
2271                 /* we don't want to use a nested block */
2272                 if (af == AF_INET)
2273                         ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2274                             kt->pfrkt_ip4);
2275                 else if (af == AF_INET6)
2276                         ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2277                             kt->pfrkt_ip6);
2278                 /* no need to check KENTRY_RNF_ROOT() here */
2279                 if (ke2 == ke) {
2280                         /* lookup return the same block - perfect */
2281                         PF_ACPY(counter, addr, af);
2282                         *pidx = idx;
2283                         return (0);
2284                 }
2285
2286                 /* we need to increase the counter past the nested block */
2287                 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2288                 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2289                 PF_AINC(addr, af);
2290                 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2291                         /* ok, we reached the end of our main block */
2292                         /* go to next block in table */
2293                         idx++;
2294                         use_counter = 0;
2295                         goto _next_block;
2296                 }
2297         }
2298 }
2299
2300 struct pfr_kentry *
2301 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2302 {
2303         struct pfr_walktree     w;
2304
2305         bzero(&w, sizeof(w));
2306         w.pfrw_op = PFRW_POOL_GET;
2307         w.pfrw_cnt = idx;
2308
2309         switch (af) {
2310 #ifdef INET
2311         case AF_INET:
2312 #ifdef __FreeBSD__
2313                 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2314 #else
2315                 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2316 #endif
2317                 return (w.pfrw_kentry);
2318 #endif /* INET */
2319 #ifdef INET6
2320         case AF_INET6:
2321 #ifdef __FreeBSD__
2322                 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2323 #else
2324                 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2325 #endif
2326                 return (w.pfrw_kentry);
2327 #endif /* INET6 */
2328         default:
2329                 return (NULL);
2330         }
2331 }
2332
2333 void
2334 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2335 {
2336         struct pfr_walktree     w;
2337         int                     s;
2338
2339         bzero(&w, sizeof(w));
2340         w.pfrw_op = PFRW_DYNADDR_UPDATE;
2341         w.pfrw_dyn = dyn;
2342
2343         s = splsoftnet();
2344         dyn->pfid_acnt4 = 0;
2345         dyn->pfid_acnt6 = 0;
2346         if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2347 #ifdef __FreeBSD__
2348                 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2349 #else
2350                 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2351 #endif
2352         if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2353 #ifdef __FreeBSD__
2354                 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2355 #else
2356                 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2357 #endif
2358         splx(s);
2359 }