]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netpfil/pf/pf_table.c
tcpdump: remove undesired svn:keywords property from contrib
[FreeBSD/FreeBSD.git] / sys / netpfil / pf / pf_table.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002 Cedric Berger
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    - Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *    - Redistributions in binary form must reproduce the above
14  *      copyright notice, this list of conditions and the following
15  *      disclaimer in the documentation and/or other materials provided
16  *      with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  *      $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/mutex.h>
46 #include <sys/refcount.h>
47 #include <sys/rwlock.h>
48 #include <sys/socket.h>
49 #include <vm/uma.h>
50
51 #include <net/if.h>
52 #include <net/vnet.h>
53 #include <net/pfvar.h>
54
55 #define ACCEPT_FLAGS(flags, oklist)             \
56         do {                                    \
57                 if ((flags & ~(oklist)) &       \
58                     PFR_FLAG_ALLMASK)           \
59                         return (EINVAL);        \
60         } while (0)
61
62 #define FILLIN_SIN(sin, addr)                   \
63         do {                                    \
64                 (sin).sin_len = sizeof(sin);    \
65                 (sin).sin_family = AF_INET;     \
66                 (sin).sin_addr = (addr);        \
67         } while (0)
68
69 #define FILLIN_SIN6(sin6, addr)                 \
70         do {                                    \
71                 (sin6).sin6_len = sizeof(sin6); \
72                 (sin6).sin6_family = AF_INET6;  \
73                 (sin6).sin6_addr = (addr);      \
74         } while (0)
75
76 #define SWAP(type, a1, a2)                      \
77         do {                                    \
78                 type tmp = a1;                  \
79                 a1 = a2;                        \
80                 a2 = tmp;                       \
81         } while (0)
82
83 #define SUNION2PF(su, af) (((af)==AF_INET) ?    \
84     (struct pf_addr *)&(su)->sin.sin_addr :     \
85     (struct pf_addr *)&(su)->sin6.sin6_addr)
86
87 #define AF_BITS(af)             (((af)==AF_INET)?32:128)
88 #define ADDR_NETWORK(ad)        ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
89 #define KENTRY_NETWORK(ke)      ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
90 #define KENTRY_RNF_ROOT(ke) \
91                 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
92
93 #define NO_ADDRESSES            (-1)
94 #define ENQUEUE_UNMARKED_ONLY   (1)
95 #define INVERT_NEG_FLAG         (1)
96
97 struct pfr_walktree {
98         enum pfrw_op {
99                 PFRW_MARK,
100                 PFRW_SWEEP,
101                 PFRW_ENQUEUE,
102                 PFRW_GET_ADDRS,
103                 PFRW_GET_ASTATS,
104                 PFRW_POOL_GET,
105                 PFRW_DYNADDR_UPDATE
106         }        pfrw_op;
107         union {
108                 struct pfr_addr         *pfrw1_addr;
109                 struct pfr_astats       *pfrw1_astats;
110                 struct pfr_kentryworkq  *pfrw1_workq;
111                 struct pfr_kentry       *pfrw1_kentry;
112                 struct pfi_dynaddr      *pfrw1_dyn;
113         }        pfrw_1;
114         int      pfrw_free;
115 };
116 #define pfrw_addr       pfrw_1.pfrw1_addr
117 #define pfrw_astats     pfrw_1.pfrw1_astats
118 #define pfrw_workq      pfrw_1.pfrw1_workq
119 #define pfrw_kentry     pfrw_1.pfrw1_kentry
120 #define pfrw_dyn        pfrw_1.pfrw1_dyn
121 #define pfrw_cnt        pfrw_free
122
123 #define senderr(e)      do { rv = (e); goto _bad; } while (0)
124
125 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
126 static VNET_DEFINE(uma_zone_t, pfr_kentry_z);
127 #define V_pfr_kentry_z          VNET(pfr_kentry_z)
128 static VNET_DEFINE(uma_zone_t, pfr_kcounters_z);
129 #define V_pfr_kcounters_z       VNET(pfr_kcounters_z)
130
131 static struct pf_addr    pfr_ffaddr = {
132         .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
133 };
134
135 static void              pfr_copyout_addr(struct pfr_addr *,
136                             struct pfr_kentry *ke);
137 static int               pfr_validate_addr(struct pfr_addr *);
138 static void              pfr_enqueue_addrs(struct pfr_ktable *,
139                             struct pfr_kentryworkq *, int *, int);
140 static void              pfr_mark_addrs(struct pfr_ktable *);
141 static struct pfr_kentry
142                         *pfr_lookup_addr(struct pfr_ktable *,
143                             struct pfr_addr *, int);
144 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
145 static void              pfr_destroy_kentries(struct pfr_kentryworkq *);
146 static void              pfr_destroy_kentry(struct pfr_kentry *);
147 static void              pfr_insert_kentries(struct pfr_ktable *,
148                             struct pfr_kentryworkq *, long);
149 static void              pfr_remove_kentries(struct pfr_ktable *,
150                             struct pfr_kentryworkq *);
151 static void              pfr_clstats_kentries(struct pfr_kentryworkq *, long,
152                             int);
153 static void              pfr_reset_feedback(struct pfr_addr *, int);
154 static void              pfr_prepare_network(union sockaddr_union *, int, int);
155 static int               pfr_route_kentry(struct pfr_ktable *,
156                             struct pfr_kentry *);
157 static int               pfr_unroute_kentry(struct pfr_ktable *,
158                             struct pfr_kentry *);
159 static int               pfr_walktree(struct radix_node *, void *);
160 static int               pfr_validate_table(struct pfr_table *, int, int);
161 static int               pfr_fix_anchor(char *);
162 static void              pfr_commit_ktable(struct pfr_ktable *, long);
163 static void              pfr_insert_ktables(struct pfr_ktableworkq *);
164 static void              pfr_insert_ktable(struct pfr_ktable *);
165 static void              pfr_setflags_ktables(struct pfr_ktableworkq *);
166 static void              pfr_setflags_ktable(struct pfr_ktable *, int);
167 static void              pfr_clstats_ktables(struct pfr_ktableworkq *, long,
168                             int);
169 static void              pfr_clstats_ktable(struct pfr_ktable *, long, int);
170 static struct pfr_ktable
171                         *pfr_create_ktable(struct pfr_table *, long, int);
172 static void              pfr_destroy_ktables(struct pfr_ktableworkq *, int);
173 static void              pfr_destroy_ktable(struct pfr_ktable *, int);
174 static int               pfr_ktable_compare(struct pfr_ktable *,
175                             struct pfr_ktable *);
176 static struct pfr_ktable
177                         *pfr_lookup_table(struct pfr_table *);
178 static void              pfr_clean_node_mask(struct pfr_ktable *,
179                             struct pfr_kentryworkq *);
180 static int               pfr_table_count(struct pfr_table *, int);
181 static int               pfr_skip_table(struct pfr_table *,
182                             struct pfr_ktable *, int);
183 static struct pfr_kentry
184                         *pfr_kentry_byidx(struct pfr_ktable *, int, int);
185
186 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
187 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
188
189 static VNET_DEFINE(struct pfr_ktablehead, pfr_ktables);
190 #define V_pfr_ktables   VNET(pfr_ktables)
191
192 static VNET_DEFINE(struct pfr_table, pfr_nulltable);
193 #define V_pfr_nulltable VNET(pfr_nulltable)
194
195 static VNET_DEFINE(int, pfr_ktable_cnt);
196 #define V_pfr_ktable_cnt        VNET(pfr_ktable_cnt)
197
198 void
199 pfr_initialize(void)
200 {
201
202         V_pfr_kentry_z = uma_zcreate("pf table entries",
203             sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
204             0);
205         V_pfr_kcounters_z = uma_zcreate("pf table counters",
206             sizeof(struct pfr_kcounters), NULL, NULL, NULL, NULL,
207             UMA_ALIGN_PTR, 0);
208         V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
209         V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
210 }
211
212 void
213 pfr_cleanup(void)
214 {
215
216         uma_zdestroy(V_pfr_kentry_z);
217         uma_zdestroy(V_pfr_kcounters_z);
218 }
219
220 int
221 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
222 {
223         struct pfr_ktable       *kt;
224         struct pfr_kentryworkq   workq;
225
226         PF_RULES_WASSERT();
227
228         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
229         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
230                 return (EINVAL);
231         kt = pfr_lookup_table(tbl);
232         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
233                 return (ESRCH);
234         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
235                 return (EPERM);
236         pfr_enqueue_addrs(kt, &workq, ndel, 0);
237
238         if (!(flags & PFR_FLAG_DUMMY)) {
239                 pfr_remove_kentries(kt, &workq);
240                 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
241         }
242         return (0);
243 }
244
245 int
246 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
247     int *nadd, int flags)
248 {
249         struct pfr_ktable       *kt, *tmpkt;
250         struct pfr_kentryworkq   workq;
251         struct pfr_kentry       *p, *q;
252         struct pfr_addr         *ad;
253         int                      i, rv, xadd = 0;
254         long                     tzero = time_second;
255
256         PF_RULES_WASSERT();
257
258         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
259         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
260                 return (EINVAL);
261         kt = pfr_lookup_table(tbl);
262         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
263                 return (ESRCH);
264         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
265                 return (EPERM);
266         tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
267         if (tmpkt == NULL)
268                 return (ENOMEM);
269         SLIST_INIT(&workq);
270         for (i = 0, ad = addr; i < size; i++, ad++) {
271                 if (pfr_validate_addr(ad))
272                         senderr(EINVAL);
273                 p = pfr_lookup_addr(kt, ad, 1);
274                 q = pfr_lookup_addr(tmpkt, ad, 1);
275                 if (flags & PFR_FLAG_FEEDBACK) {
276                         if (q != NULL)
277                                 ad->pfra_fback = PFR_FB_DUPLICATE;
278                         else if (p == NULL)
279                                 ad->pfra_fback = PFR_FB_ADDED;
280                         else if (p->pfrke_not != ad->pfra_not)
281                                 ad->pfra_fback = PFR_FB_CONFLICT;
282                         else
283                                 ad->pfra_fback = PFR_FB_NONE;
284                 }
285                 if (p == NULL && q == NULL) {
286                         p = pfr_create_kentry(ad);
287                         if (p == NULL)
288                                 senderr(ENOMEM);
289                         if (pfr_route_kentry(tmpkt, p)) {
290                                 pfr_destroy_kentry(p);
291                                 ad->pfra_fback = PFR_FB_NONE;
292                         } else {
293                                 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
294                                 xadd++;
295                         }
296                 }
297         }
298         pfr_clean_node_mask(tmpkt, &workq);
299         if (!(flags & PFR_FLAG_DUMMY))
300                 pfr_insert_kentries(kt, &workq, tzero);
301         else
302                 pfr_destroy_kentries(&workq);
303         if (nadd != NULL)
304                 *nadd = xadd;
305         pfr_destroy_ktable(tmpkt, 0);
306         return (0);
307 _bad:
308         pfr_clean_node_mask(tmpkt, &workq);
309         pfr_destroy_kentries(&workq);
310         if (flags & PFR_FLAG_FEEDBACK)
311                 pfr_reset_feedback(addr, size);
312         pfr_destroy_ktable(tmpkt, 0);
313         return (rv);
314 }
315
316 int
317 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
318     int *ndel, int flags)
319 {
320         struct pfr_ktable       *kt;
321         struct pfr_kentryworkq   workq;
322         struct pfr_kentry       *p;
323         struct pfr_addr         *ad;
324         int                      i, rv, xdel = 0, log = 1;
325
326         PF_RULES_WASSERT();
327
328         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
329         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
330                 return (EINVAL);
331         kt = pfr_lookup_table(tbl);
332         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
333                 return (ESRCH);
334         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
335                 return (EPERM);
336         /*
337          * there are two algorithms to choose from here.
338          * with:
339          *   n: number of addresses to delete
340          *   N: number of addresses in the table
341          *
342          * one is O(N) and is better for large 'n'
343          * one is O(n*LOG(N)) and is better for small 'n'
344          *
345          * following code try to decide which one is best.
346          */
347         for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
348                 log++;
349         if (size > kt->pfrkt_cnt/log) {
350                 /* full table scan */
351                 pfr_mark_addrs(kt);
352         } else {
353                 /* iterate over addresses to delete */
354                 for (i = 0, ad = addr; i < size; i++, ad++) {
355                         if (pfr_validate_addr(ad))
356                                 return (EINVAL);
357                         p = pfr_lookup_addr(kt, ad, 1);
358                         if (p != NULL)
359                                 p->pfrke_mark = 0;
360                 }
361         }
362         SLIST_INIT(&workq);
363         for (i = 0, ad = addr; i < size; i++, ad++) {
364                 if (pfr_validate_addr(ad))
365                         senderr(EINVAL);
366                 p = pfr_lookup_addr(kt, ad, 1);
367                 if (flags & PFR_FLAG_FEEDBACK) {
368                         if (p == NULL)
369                                 ad->pfra_fback = PFR_FB_NONE;
370                         else if (p->pfrke_not != ad->pfra_not)
371                                 ad->pfra_fback = PFR_FB_CONFLICT;
372                         else if (p->pfrke_mark)
373                                 ad->pfra_fback = PFR_FB_DUPLICATE;
374                         else
375                                 ad->pfra_fback = PFR_FB_DELETED;
376                 }
377                 if (p != NULL && p->pfrke_not == ad->pfra_not &&
378                     !p->pfrke_mark) {
379                         p->pfrke_mark = 1;
380                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
381                         xdel++;
382                 }
383         }
384         if (!(flags & PFR_FLAG_DUMMY))
385                 pfr_remove_kentries(kt, &workq);
386         if (ndel != NULL)
387                 *ndel = xdel;
388         return (0);
389 _bad:
390         if (flags & PFR_FLAG_FEEDBACK)
391                 pfr_reset_feedback(addr, size);
392         return (rv);
393 }
394
395 int
396 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
397     int *size2, int *nadd, int *ndel, int *nchange, int flags,
398     u_int32_t ignore_pfrt_flags)
399 {
400         struct pfr_ktable       *kt, *tmpkt;
401         struct pfr_kentryworkq   addq, delq, changeq;
402         struct pfr_kentry       *p, *q;
403         struct pfr_addr          ad;
404         int                      i, rv, xadd = 0, xdel = 0, xchange = 0;
405         long                     tzero = time_second;
406
407         PF_RULES_WASSERT();
408
409         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
410         if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
411             PFR_FLAG_USERIOCTL))
412                 return (EINVAL);
413         kt = pfr_lookup_table(tbl);
414         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
415                 return (ESRCH);
416         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
417                 return (EPERM);
418         tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
419         if (tmpkt == NULL)
420                 return (ENOMEM);
421         pfr_mark_addrs(kt);
422         SLIST_INIT(&addq);
423         SLIST_INIT(&delq);
424         SLIST_INIT(&changeq);
425         for (i = 0; i < size; i++) {
426                 /*
427                  * XXXGL: undertand pf_if usage of this function
428                  * and make ad a moving pointer
429                  */
430                 bcopy(addr + i, &ad, sizeof(ad));
431                 if (pfr_validate_addr(&ad))
432                         senderr(EINVAL);
433                 ad.pfra_fback = PFR_FB_NONE;
434                 p = pfr_lookup_addr(kt, &ad, 1);
435                 if (p != NULL) {
436                         if (p->pfrke_mark) {
437                                 ad.pfra_fback = PFR_FB_DUPLICATE;
438                                 goto _skip;
439                         }
440                         p->pfrke_mark = 1;
441                         if (p->pfrke_not != ad.pfra_not) {
442                                 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
443                                 ad.pfra_fback = PFR_FB_CHANGED;
444                                 xchange++;
445                         }
446                 } else {
447                         q = pfr_lookup_addr(tmpkt, &ad, 1);
448                         if (q != NULL) {
449                                 ad.pfra_fback = PFR_FB_DUPLICATE;
450                                 goto _skip;
451                         }
452                         p = pfr_create_kentry(&ad);
453                         if (p == NULL)
454                                 senderr(ENOMEM);
455                         if (pfr_route_kentry(tmpkt, p)) {
456                                 pfr_destroy_kentry(p);
457                                 ad.pfra_fback = PFR_FB_NONE;
458                         } else {
459                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
460                                 ad.pfra_fback = PFR_FB_ADDED;
461                                 xadd++;
462                         }
463                 }
464 _skip:
465                 if (flags & PFR_FLAG_FEEDBACK)
466                         bcopy(&ad, addr + i, sizeof(ad));
467         }
468         pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
469         if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
470                 if (*size2 < size+xdel) {
471                         *size2 = size+xdel;
472                         senderr(0);
473                 }
474                 i = 0;
475                 SLIST_FOREACH(p, &delq, pfrke_workq) {
476                         pfr_copyout_addr(&ad, p);
477                         ad.pfra_fback = PFR_FB_DELETED;
478                         bcopy(&ad, addr + size + i, sizeof(ad));
479                         i++;
480                 }
481         }
482         pfr_clean_node_mask(tmpkt, &addq);
483         if (!(flags & PFR_FLAG_DUMMY)) {
484                 pfr_insert_kentries(kt, &addq, tzero);
485                 pfr_remove_kentries(kt, &delq);
486                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
487         } else
488                 pfr_destroy_kentries(&addq);
489         if (nadd != NULL)
490                 *nadd = xadd;
491         if (ndel != NULL)
492                 *ndel = xdel;
493         if (nchange != NULL)
494                 *nchange = xchange;
495         if ((flags & PFR_FLAG_FEEDBACK) && size2)
496                 *size2 = size+xdel;
497         pfr_destroy_ktable(tmpkt, 0);
498         return (0);
499 _bad:
500         pfr_clean_node_mask(tmpkt, &addq);
501         pfr_destroy_kentries(&addq);
502         if (flags & PFR_FLAG_FEEDBACK)
503                 pfr_reset_feedback(addr, size);
504         pfr_destroy_ktable(tmpkt, 0);
505         return (rv);
506 }
507
508 int
509 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
510         int *nmatch, int flags)
511 {
512         struct pfr_ktable       *kt;
513         struct pfr_kentry       *p;
514         struct pfr_addr         *ad;
515         int                      i, xmatch = 0;
516
517         PF_RULES_RASSERT();
518
519         ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
520         if (pfr_validate_table(tbl, 0, 0))
521                 return (EINVAL);
522         kt = pfr_lookup_table(tbl);
523         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
524                 return (ESRCH);
525
526         for (i = 0, ad = addr; i < size; i++, ad++) {
527                 if (pfr_validate_addr(ad))
528                         return (EINVAL);
529                 if (ADDR_NETWORK(ad))
530                         return (EINVAL);
531                 p = pfr_lookup_addr(kt, ad, 0);
532                 if (flags & PFR_FLAG_REPLACE)
533                         pfr_copyout_addr(ad, p);
534                 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
535                     (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
536                 if (p != NULL && !p->pfrke_not)
537                         xmatch++;
538         }
539         if (nmatch != NULL)
540                 *nmatch = xmatch;
541         return (0);
542 }
543
544 int
545 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
546         int flags)
547 {
548         struct pfr_ktable       *kt;
549         struct pfr_walktree      w;
550         int                      rv;
551
552         PF_RULES_RASSERT();
553
554         ACCEPT_FLAGS(flags, 0);
555         if (pfr_validate_table(tbl, 0, 0))
556                 return (EINVAL);
557         kt = pfr_lookup_table(tbl);
558         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
559                 return (ESRCH);
560         if (kt->pfrkt_cnt > *size) {
561                 *size = kt->pfrkt_cnt;
562                 return (0);
563         }
564
565         bzero(&w, sizeof(w));
566         w.pfrw_op = PFRW_GET_ADDRS;
567         w.pfrw_addr = addr;
568         w.pfrw_free = kt->pfrkt_cnt;
569         rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
570         if (!rv)
571                 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
572                     pfr_walktree, &w);
573         if (rv)
574                 return (rv);
575
576         KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
577             w.pfrw_free));
578
579         *size = kt->pfrkt_cnt;
580         return (0);
581 }
582
583 int
584 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
585         int flags)
586 {
587         struct pfr_ktable       *kt;
588         struct pfr_walktree      w;
589         struct pfr_kentryworkq   workq;
590         int                      rv;
591         long                     tzero = time_second;
592
593         PF_RULES_RASSERT();
594
595         /* XXX PFR_FLAG_CLSTATS disabled */
596         ACCEPT_FLAGS(flags, 0);
597         if (pfr_validate_table(tbl, 0, 0))
598                 return (EINVAL);
599         kt = pfr_lookup_table(tbl);
600         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
601                 return (ESRCH);
602         if (kt->pfrkt_cnt > *size) {
603                 *size = kt->pfrkt_cnt;
604                 return (0);
605         }
606
607         bzero(&w, sizeof(w));
608         w.pfrw_op = PFRW_GET_ASTATS;
609         w.pfrw_astats = addr;
610         w.pfrw_free = kt->pfrkt_cnt;
611         rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
612         if (!rv)
613                 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
614                     pfr_walktree, &w);
615         if (!rv && (flags & PFR_FLAG_CLSTATS)) {
616                 pfr_enqueue_addrs(kt, &workq, NULL, 0);
617                 pfr_clstats_kentries(&workq, tzero, 0);
618         }
619         if (rv)
620                 return (rv);
621
622         if (w.pfrw_free) {
623                 printf("pfr_get_astats: corruption detected (%d).\n",
624                     w.pfrw_free);
625                 return (ENOTTY);
626         }
627         *size = kt->pfrkt_cnt;
628         return (0);
629 }
630
631 int
632 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
633     int *nzero, int flags)
634 {
635         struct pfr_ktable       *kt;
636         struct pfr_kentryworkq   workq;
637         struct pfr_kentry       *p;
638         struct pfr_addr         *ad;
639         int                      i, rv, xzero = 0;
640
641         PF_RULES_WASSERT();
642
643         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
644         if (pfr_validate_table(tbl, 0, 0))
645                 return (EINVAL);
646         kt = pfr_lookup_table(tbl);
647         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
648                 return (ESRCH);
649         SLIST_INIT(&workq);
650         for (i = 0, ad = addr; i < size; i++, ad++) {
651                 if (pfr_validate_addr(ad))
652                         senderr(EINVAL);
653                 p = pfr_lookup_addr(kt, ad, 1);
654                 if (flags & PFR_FLAG_FEEDBACK) {
655                         ad->pfra_fback = (p != NULL) ?
656                             PFR_FB_CLEARED : PFR_FB_NONE;
657                 }
658                 if (p != NULL) {
659                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
660                         xzero++;
661                 }
662         }
663
664         if (!(flags & PFR_FLAG_DUMMY))
665                 pfr_clstats_kentries(&workq, 0, 0);
666         if (nzero != NULL)
667                 *nzero = xzero;
668         return (0);
669 _bad:
670         if (flags & PFR_FLAG_FEEDBACK)
671                 pfr_reset_feedback(addr, size);
672         return (rv);
673 }
674
675 static int
676 pfr_validate_addr(struct pfr_addr *ad)
677 {
678         int i;
679
680         switch (ad->pfra_af) {
681 #ifdef INET
682         case AF_INET:
683                 if (ad->pfra_net > 32)
684                         return (-1);
685                 break;
686 #endif /* INET */
687 #ifdef INET6
688         case AF_INET6:
689                 if (ad->pfra_net > 128)
690                         return (-1);
691                 break;
692 #endif /* INET6 */
693         default:
694                 return (-1);
695         }
696         if (ad->pfra_net < 128 &&
697                 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
698                         return (-1);
699         for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
700                 if (((caddr_t)ad)[i])
701                         return (-1);
702         if (ad->pfra_not && ad->pfra_not != 1)
703                 return (-1);
704         if (ad->pfra_fback)
705                 return (-1);
706         return (0);
707 }
708
709 static void
710 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
711         int *naddr, int sweep)
712 {
713         struct pfr_walktree     w;
714
715         SLIST_INIT(workq);
716         bzero(&w, sizeof(w));
717         w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
718         w.pfrw_workq = workq;
719         if (kt->pfrkt_ip4 != NULL)
720                 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh,
721                     pfr_walktree, &w))
722                         printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
723         if (kt->pfrkt_ip6 != NULL)
724                 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
725                     pfr_walktree, &w))
726                         printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
727         if (naddr != NULL)
728                 *naddr = w.pfrw_cnt;
729 }
730
731 static void
732 pfr_mark_addrs(struct pfr_ktable *kt)
733 {
734         struct pfr_walktree     w;
735
736         bzero(&w, sizeof(w));
737         w.pfrw_op = PFRW_MARK;
738         if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w))
739                 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
740         if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w))
741                 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
742 }
743
744
745 static struct pfr_kentry *
746 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
747 {
748         union sockaddr_union     sa, mask;
749         struct radix_head       *head = NULL;
750         struct pfr_kentry       *ke;
751
752         PF_RULES_ASSERT();
753
754         bzero(&sa, sizeof(sa));
755         if (ad->pfra_af == AF_INET) {
756                 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
757                 head = &kt->pfrkt_ip4->rh;
758         } else if ( ad->pfra_af == AF_INET6 ) {
759                 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
760                 head = &kt->pfrkt_ip6->rh;
761         }
762         if (ADDR_NETWORK(ad)) {
763                 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
764                 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
765                 if (ke && KENTRY_RNF_ROOT(ke))
766                         ke = NULL;
767         } else {
768                 ke = (struct pfr_kentry *)rn_match(&sa, head);
769                 if (ke && KENTRY_RNF_ROOT(ke))
770                         ke = NULL;
771                 if (exact && ke && KENTRY_NETWORK(ke))
772                         ke = NULL;
773         }
774         return (ke);
775 }
776
777 static struct pfr_kentry *
778 pfr_create_kentry(struct pfr_addr *ad)
779 {
780         struct pfr_kentry       *ke;
781
782         ke =  uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
783         if (ke == NULL)
784                 return (NULL);
785
786         if (ad->pfra_af == AF_INET)
787                 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
788         else if (ad->pfra_af == AF_INET6)
789                 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
790         ke->pfrke_af = ad->pfra_af;
791         ke->pfrke_net = ad->pfra_net;
792         ke->pfrke_not = ad->pfra_not;
793         return (ke);
794 }
795
796 static void
797 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
798 {
799         struct pfr_kentry       *p, *q;
800
801         for (p = SLIST_FIRST(workq); p != NULL; p = q) {
802                 q = SLIST_NEXT(p, pfrke_workq);
803                 pfr_destroy_kentry(p);
804         }
805 }
806
807 static void
808 pfr_destroy_kentry(struct pfr_kentry *ke)
809 {
810         if (ke->pfrke_counters)
811                 uma_zfree(V_pfr_kcounters_z, ke->pfrke_counters);
812         uma_zfree(V_pfr_kentry_z, ke);
813 }
814
815 static void
816 pfr_insert_kentries(struct pfr_ktable *kt,
817     struct pfr_kentryworkq *workq, long tzero)
818 {
819         struct pfr_kentry       *p;
820         int                      rv, n = 0;
821
822         SLIST_FOREACH(p, workq, pfrke_workq) {
823                 rv = pfr_route_kentry(kt, p);
824                 if (rv) {
825                         printf("pfr_insert_kentries: cannot route entry "
826                             "(code=%d).\n", rv);
827                         break;
828                 }
829                 p->pfrke_tzero = tzero;
830                 n++;
831         }
832         kt->pfrkt_cnt += n;
833 }
834
835 int
836 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
837 {
838         struct pfr_kentry       *p;
839         int                      rv;
840
841         p = pfr_lookup_addr(kt, ad, 1);
842         if (p != NULL)
843                 return (0);
844         p = pfr_create_kentry(ad);
845         if (p == NULL)
846                 return (ENOMEM);
847
848         rv = pfr_route_kentry(kt, p);
849         if (rv)
850                 return (rv);
851
852         p->pfrke_tzero = tzero;
853         kt->pfrkt_cnt++;
854
855         return (0);
856 }
857
858 static void
859 pfr_remove_kentries(struct pfr_ktable *kt,
860     struct pfr_kentryworkq *workq)
861 {
862         struct pfr_kentry       *p;
863         int                      n = 0;
864
865         SLIST_FOREACH(p, workq, pfrke_workq) {
866                 pfr_unroute_kentry(kt, p);
867                 n++;
868         }
869         kt->pfrkt_cnt -= n;
870         pfr_destroy_kentries(workq);
871 }
872
873 static void
874 pfr_clean_node_mask(struct pfr_ktable *kt,
875     struct pfr_kentryworkq *workq)
876 {
877         struct pfr_kentry       *p;
878
879         SLIST_FOREACH(p, workq, pfrke_workq)
880                 pfr_unroute_kentry(kt, p);
881 }
882
883 static void
884 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
885 {
886         struct pfr_kentry       *p;
887
888         SLIST_FOREACH(p, workq, pfrke_workq) {
889                 if (negchange)
890                         p->pfrke_not = !p->pfrke_not;
891                 if (p->pfrke_counters) {
892                         uma_zfree(V_pfr_kcounters_z, p->pfrke_counters);
893                         p->pfrke_counters = NULL;
894                 }
895                 p->pfrke_tzero = tzero;
896         }
897 }
898
899 static void
900 pfr_reset_feedback(struct pfr_addr *addr, int size)
901 {
902         struct pfr_addr *ad;
903         int             i;
904
905         for (i = 0, ad = addr; i < size; i++, ad++)
906                 ad->pfra_fback = PFR_FB_NONE;
907 }
908
909 static void
910 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
911 {
912         int     i;
913
914         bzero(sa, sizeof(*sa));
915         if (af == AF_INET) {
916                 sa->sin.sin_len = sizeof(sa->sin);
917                 sa->sin.sin_family = AF_INET;
918                 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
919         } else if (af == AF_INET6) {
920                 sa->sin6.sin6_len = sizeof(sa->sin6);
921                 sa->sin6.sin6_family = AF_INET6;
922                 for (i = 0; i < 4; i++) {
923                         if (net <= 32) {
924                                 sa->sin6.sin6_addr.s6_addr32[i] =
925                                     net ? htonl(-1 << (32-net)) : 0;
926                                 break;
927                         }
928                         sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
929                         net -= 32;
930                 }
931         }
932 }
933
934 static int
935 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
936 {
937         union sockaddr_union     mask;
938         struct radix_node       *rn;
939         struct radix_head       *head = NULL;
940
941         PF_RULES_WASSERT();
942
943         bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
944         if (ke->pfrke_af == AF_INET)
945                 head = &kt->pfrkt_ip4->rh;
946         else if (ke->pfrke_af == AF_INET6)
947                 head = &kt->pfrkt_ip6->rh;
948
949         if (KENTRY_NETWORK(ke)) {
950                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
951                 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
952         } else
953                 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
954
955         return (rn == NULL ? -1 : 0);
956 }
957
958 static int
959 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
960 {
961         union sockaddr_union     mask;
962         struct radix_node       *rn;
963         struct radix_head       *head = NULL;
964
965         if (ke->pfrke_af == AF_INET)
966                 head = &kt->pfrkt_ip4->rh;
967         else if (ke->pfrke_af == AF_INET6)
968                 head = &kt->pfrkt_ip6->rh;
969
970         if (KENTRY_NETWORK(ke)) {
971                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
972                 rn = rn_delete(&ke->pfrke_sa, &mask, head);
973         } else
974                 rn = rn_delete(&ke->pfrke_sa, NULL, head);
975
976         if (rn == NULL) {
977                 printf("pfr_unroute_kentry: delete failed.\n");
978                 return (-1);
979         }
980         return (0);
981 }
982
983 static void
984 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
985 {
986         bzero(ad, sizeof(*ad));
987         if (ke == NULL)
988                 return;
989         ad->pfra_af = ke->pfrke_af;
990         ad->pfra_net = ke->pfrke_net;
991         ad->pfra_not = ke->pfrke_not;
992         if (ad->pfra_af == AF_INET)
993                 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
994         else if (ad->pfra_af == AF_INET6)
995                 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
996 }
997
998 static int
999 pfr_walktree(struct radix_node *rn, void *arg)
1000 {
1001         struct pfr_kentry       *ke = (struct pfr_kentry *)rn;
1002         struct pfr_walktree     *w = arg;
1003
1004         switch (w->pfrw_op) {
1005         case PFRW_MARK:
1006                 ke->pfrke_mark = 0;
1007                 break;
1008         case PFRW_SWEEP:
1009                 if (ke->pfrke_mark)
1010                         break;
1011                 /* FALLTHROUGH */
1012         case PFRW_ENQUEUE:
1013                 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1014                 w->pfrw_cnt++;
1015                 break;
1016         case PFRW_GET_ADDRS:
1017                 if (w->pfrw_free-- > 0) {
1018                         pfr_copyout_addr(w->pfrw_addr, ke);
1019                         w->pfrw_addr++;
1020                 }
1021                 break;
1022         case PFRW_GET_ASTATS:
1023                 if (w->pfrw_free-- > 0) {
1024                         struct pfr_astats as;
1025
1026                         pfr_copyout_addr(&as.pfras_a, ke);
1027
1028                         if (ke->pfrke_counters) {
1029                                 bcopy(ke->pfrke_counters->pfrkc_packets,
1030                                     as.pfras_packets, sizeof(as.pfras_packets));
1031                                 bcopy(ke->pfrke_counters->pfrkc_bytes,
1032                                     as.pfras_bytes, sizeof(as.pfras_bytes));
1033                         } else {
1034                                 bzero(as.pfras_packets, sizeof(as.pfras_packets));
1035                                 bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1036                                 as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1037                         }
1038                         as.pfras_tzero = ke->pfrke_tzero;
1039
1040                         bcopy(&as, w->pfrw_astats, sizeof(as));
1041                         w->pfrw_astats++;
1042                 }
1043                 break;
1044         case PFRW_POOL_GET:
1045                 if (ke->pfrke_not)
1046                         break; /* negative entries are ignored */
1047                 if (!w->pfrw_cnt--) {
1048                         w->pfrw_kentry = ke;
1049                         return (1); /* finish search */
1050                 }
1051                 break;
1052         case PFRW_DYNADDR_UPDATE:
1053             {
1054                 union sockaddr_union    pfr_mask;
1055
1056                 if (ke->pfrke_af == AF_INET) {
1057                         if (w->pfrw_dyn->pfid_acnt4++ > 0)
1058                                 break;
1059                         pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1060                         w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa,
1061                             AF_INET);
1062                         w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask,
1063                             AF_INET);
1064                 } else if (ke->pfrke_af == AF_INET6){
1065                         if (w->pfrw_dyn->pfid_acnt6++ > 0)
1066                                 break;
1067                         pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1068                         w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa,
1069                             AF_INET6);
1070                         w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask,
1071                             AF_INET6);
1072                 }
1073                 break;
1074             }
1075         }
1076         return (0);
1077 }
1078
1079 int
1080 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1081 {
1082         struct pfr_ktableworkq   workq;
1083         struct pfr_ktable       *p;
1084         int                      xdel = 0;
1085
1086         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1087         if (pfr_fix_anchor(filter->pfrt_anchor))
1088                 return (EINVAL);
1089         if (pfr_table_count(filter, flags) < 0)
1090                 return (ENOENT);
1091
1092         SLIST_INIT(&workq);
1093         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1094                 if (pfr_skip_table(filter, p, flags))
1095                         continue;
1096                 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1097                         continue;
1098                 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1099                         continue;
1100                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1101                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1102                 xdel++;
1103         }
1104         if (!(flags & PFR_FLAG_DUMMY))
1105                 pfr_setflags_ktables(&workq);
1106         if (ndel != NULL)
1107                 *ndel = xdel;
1108         return (0);
1109 }
1110
1111 int
1112 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1113 {
1114         struct pfr_ktableworkq   addq, changeq;
1115         struct pfr_ktable       *p, *q, *r, key;
1116         int                      i, rv, xadd = 0;
1117         long                     tzero = time_second;
1118
1119         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1120         SLIST_INIT(&addq);
1121         SLIST_INIT(&changeq);
1122         for (i = 0; i < size; i++) {
1123                 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1124                 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1125                     flags & PFR_FLAG_USERIOCTL))
1126                         senderr(EINVAL);
1127                 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1128                 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1129                 if (p == NULL) {
1130                         p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1131                         if (p == NULL)
1132                                 senderr(ENOMEM);
1133                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1134                                 if (!pfr_ktable_compare(p, q))
1135                                         goto _skip;
1136                         }
1137                         SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1138                         xadd++;
1139                         if (!key.pfrkt_anchor[0])
1140                                 goto _skip;
1141
1142                         /* find or create root table */
1143                         bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1144                         r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1145                         if (r != NULL) {
1146                                 p->pfrkt_root = r;
1147                                 goto _skip;
1148                         }
1149                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1150                                 if (!pfr_ktable_compare(&key, q)) {
1151                                         p->pfrkt_root = q;
1152                                         goto _skip;
1153                                 }
1154                         }
1155                         key.pfrkt_flags = 0;
1156                         r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1157                         if (r == NULL)
1158                                 senderr(ENOMEM);
1159                         SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1160                         p->pfrkt_root = r;
1161                 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1162                         SLIST_FOREACH(q, &changeq, pfrkt_workq)
1163                                 if (!pfr_ktable_compare(&key, q))
1164                                         goto _skip;
1165                         p->pfrkt_nflags = (p->pfrkt_flags &
1166                             ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1167                         SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1168                         xadd++;
1169                 }
1170 _skip:
1171         ;
1172         }
1173         if (!(flags & PFR_FLAG_DUMMY)) {
1174                 pfr_insert_ktables(&addq);
1175                 pfr_setflags_ktables(&changeq);
1176         } else
1177                  pfr_destroy_ktables(&addq, 0);
1178         if (nadd != NULL)
1179                 *nadd = xadd;
1180         return (0);
1181 _bad:
1182         pfr_destroy_ktables(&addq, 0);
1183         return (rv);
1184 }
1185
1186 int
1187 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1188 {
1189         struct pfr_ktableworkq   workq;
1190         struct pfr_ktable       *p, *q, key;
1191         int                      i, xdel = 0;
1192
1193         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1194         SLIST_INIT(&workq);
1195         for (i = 0; i < size; i++) {
1196                 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1197                 if (pfr_validate_table(&key.pfrkt_t, 0,
1198                     flags & PFR_FLAG_USERIOCTL))
1199                         return (EINVAL);
1200                 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1201                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1202                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1203                                 if (!pfr_ktable_compare(p, q))
1204                                         goto _skip;
1205                         p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1206                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1207                         xdel++;
1208                 }
1209 _skip:
1210         ;
1211         }
1212
1213         if (!(flags & PFR_FLAG_DUMMY))
1214                 pfr_setflags_ktables(&workq);
1215         if (ndel != NULL)
1216                 *ndel = xdel;
1217         return (0);
1218 }
1219
1220 int
1221 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1222         int flags)
1223 {
1224         struct pfr_ktable       *p;
1225         int                      n, nn;
1226
1227         PF_RULES_RASSERT();
1228
1229         ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1230         if (pfr_fix_anchor(filter->pfrt_anchor))
1231                 return (EINVAL);
1232         n = nn = pfr_table_count(filter, flags);
1233         if (n < 0)
1234                 return (ENOENT);
1235         if (n > *size) {
1236                 *size = n;
1237                 return (0);
1238         }
1239         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1240                 if (pfr_skip_table(filter, p, flags))
1241                         continue;
1242                 if (n-- <= 0)
1243                         continue;
1244                 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1245         }
1246
1247         KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1248
1249         *size = nn;
1250         return (0);
1251 }
1252
1253 int
1254 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1255         int flags)
1256 {
1257         struct pfr_ktable       *p;
1258         struct pfr_ktableworkq   workq;
1259         int                      n, nn;
1260         long                     tzero = time_second;
1261
1262         /* XXX PFR_FLAG_CLSTATS disabled */
1263         ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1264         if (pfr_fix_anchor(filter->pfrt_anchor))
1265                 return (EINVAL);
1266         n = nn = pfr_table_count(filter, flags);
1267         if (n < 0)
1268                 return (ENOENT);
1269         if (n > *size) {
1270                 *size = n;
1271                 return (0);
1272         }
1273         SLIST_INIT(&workq);
1274         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1275                 if (pfr_skip_table(filter, p, flags))
1276                         continue;
1277                 if (n-- <= 0)
1278                         continue;
1279                 bcopy(&p->pfrkt_ts, tbl++, sizeof(*tbl));
1280                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1281         }
1282         if (flags & PFR_FLAG_CLSTATS)
1283                 pfr_clstats_ktables(&workq, tzero,
1284                     flags & PFR_FLAG_ADDRSTOO);
1285
1286         KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1287
1288         *size = nn;
1289         return (0);
1290 }
1291
1292 int
1293 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1294 {
1295         struct pfr_ktableworkq   workq;
1296         struct pfr_ktable       *p, key;
1297         int                      i, xzero = 0;
1298         long                     tzero = time_second;
1299
1300         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1301         SLIST_INIT(&workq);
1302         for (i = 0; i < size; i++) {
1303                 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1304                 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1305                         return (EINVAL);
1306                 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1307                 if (p != NULL) {
1308                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1309                         xzero++;
1310                 }
1311         }
1312         if (!(flags & PFR_FLAG_DUMMY))
1313                 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1314         if (nzero != NULL)
1315                 *nzero = xzero;
1316         return (0);
1317 }
1318
1319 int
1320 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1321         int *nchange, int *ndel, int flags)
1322 {
1323         struct pfr_ktableworkq   workq;
1324         struct pfr_ktable       *p, *q, key;
1325         int                      i, xchange = 0, xdel = 0;
1326
1327         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1328         if ((setflag & ~PFR_TFLAG_USRMASK) ||
1329             (clrflag & ~PFR_TFLAG_USRMASK) ||
1330             (setflag & clrflag))
1331                 return (EINVAL);
1332         SLIST_INIT(&workq);
1333         for (i = 0; i < size; i++) {
1334                 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1335                 if (pfr_validate_table(&key.pfrkt_t, 0,
1336                     flags & PFR_FLAG_USERIOCTL))
1337                         return (EINVAL);
1338                 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1339                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1340                         p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1341                             ~clrflag;
1342                         if (p->pfrkt_nflags == p->pfrkt_flags)
1343                                 goto _skip;
1344                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1345                                 if (!pfr_ktable_compare(p, q))
1346                                         goto _skip;
1347                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1348                         if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1349                             (clrflag & PFR_TFLAG_PERSIST) &&
1350                             !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1351                                 xdel++;
1352                         else
1353                                 xchange++;
1354                 }
1355 _skip:
1356         ;
1357         }
1358         if (!(flags & PFR_FLAG_DUMMY))
1359                 pfr_setflags_ktables(&workq);
1360         if (nchange != NULL)
1361                 *nchange = xchange;
1362         if (ndel != NULL)
1363                 *ndel = xdel;
1364         return (0);
1365 }
1366
1367 int
1368 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1369 {
1370         struct pfr_ktableworkq   workq;
1371         struct pfr_ktable       *p;
1372         struct pf_ruleset       *rs;
1373         int                      xdel = 0;
1374
1375         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1376         rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1377         if (rs == NULL)
1378                 return (ENOMEM);
1379         SLIST_INIT(&workq);
1380         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1381                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1382                     pfr_skip_table(trs, p, 0))
1383                         continue;
1384                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1385                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1386                 xdel++;
1387         }
1388         if (!(flags & PFR_FLAG_DUMMY)) {
1389                 pfr_setflags_ktables(&workq);
1390                 if (ticket != NULL)
1391                         *ticket = ++rs->tticket;
1392                 rs->topen = 1;
1393         } else
1394                 pf_remove_if_empty_ruleset(rs);
1395         if (ndel != NULL)
1396                 *ndel = xdel;
1397         return (0);
1398 }
1399
1400 int
1401 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1402     int *nadd, int *naddr, u_int32_t ticket, int flags)
1403 {
1404         struct pfr_ktableworkq   tableq;
1405         struct pfr_kentryworkq   addrq;
1406         struct pfr_ktable       *kt, *rt, *shadow, key;
1407         struct pfr_kentry       *p;
1408         struct pfr_addr         *ad;
1409         struct pf_ruleset       *rs;
1410         int                      i, rv, xadd = 0, xaddr = 0;
1411
1412         PF_RULES_WASSERT();
1413
1414         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1415         if (size && !(flags & PFR_FLAG_ADDRSTOO))
1416                 return (EINVAL);
1417         if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1418             flags & PFR_FLAG_USERIOCTL))
1419                 return (EINVAL);
1420         rs = pf_find_ruleset(tbl->pfrt_anchor);
1421         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1422                 return (EBUSY);
1423         tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1424         SLIST_INIT(&tableq);
1425         kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
1426         if (kt == NULL) {
1427                 kt = pfr_create_ktable(tbl, 0, 1);
1428                 if (kt == NULL)
1429                         return (ENOMEM);
1430                 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1431                 xadd++;
1432                 if (!tbl->pfrt_anchor[0])
1433                         goto _skip;
1434
1435                 /* find or create root table */
1436                 bzero(&key, sizeof(key));
1437                 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1438                 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1439                 if (rt != NULL) {
1440                         kt->pfrkt_root = rt;
1441                         goto _skip;
1442                 }
1443                 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1444                 if (rt == NULL) {
1445                         pfr_destroy_ktables(&tableq, 0);
1446                         return (ENOMEM);
1447                 }
1448                 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1449                 kt->pfrkt_root = rt;
1450         } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1451                 xadd++;
1452 _skip:
1453         shadow = pfr_create_ktable(tbl, 0, 0);
1454         if (shadow == NULL) {
1455                 pfr_destroy_ktables(&tableq, 0);
1456                 return (ENOMEM);
1457         }
1458         SLIST_INIT(&addrq);
1459         for (i = 0, ad = addr; i < size; i++, ad++) {
1460                 if (pfr_validate_addr(ad))
1461                         senderr(EINVAL);
1462                 if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1463                         continue;
1464                 p = pfr_create_kentry(ad);
1465                 if (p == NULL)
1466                         senderr(ENOMEM);
1467                 if (pfr_route_kentry(shadow, p)) {
1468                         pfr_destroy_kentry(p);
1469                         continue;
1470                 }
1471                 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1472                 xaddr++;
1473         }
1474         if (!(flags & PFR_FLAG_DUMMY)) {
1475                 if (kt->pfrkt_shadow != NULL)
1476                         pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1477                 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1478                 pfr_insert_ktables(&tableq);
1479                 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1480                     xaddr : NO_ADDRESSES;
1481                 kt->pfrkt_shadow = shadow;
1482         } else {
1483                 pfr_clean_node_mask(shadow, &addrq);
1484                 pfr_destroy_ktable(shadow, 0);
1485                 pfr_destroy_ktables(&tableq, 0);
1486                 pfr_destroy_kentries(&addrq);
1487         }
1488         if (nadd != NULL)
1489                 *nadd = xadd;
1490         if (naddr != NULL)
1491                 *naddr = xaddr;
1492         return (0);
1493 _bad:
1494         pfr_destroy_ktable(shadow, 0);
1495         pfr_destroy_ktables(&tableq, 0);
1496         pfr_destroy_kentries(&addrq);
1497         return (rv);
1498 }
1499
1500 int
1501 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1502 {
1503         struct pfr_ktableworkq   workq;
1504         struct pfr_ktable       *p;
1505         struct pf_ruleset       *rs;
1506         int                      xdel = 0;
1507
1508         PF_RULES_WASSERT();
1509
1510         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1511         rs = pf_find_ruleset(trs->pfrt_anchor);
1512         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1513                 return (0);
1514         SLIST_INIT(&workq);
1515         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1516                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1517                     pfr_skip_table(trs, p, 0))
1518                         continue;
1519                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1520                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1521                 xdel++;
1522         }
1523         if (!(flags & PFR_FLAG_DUMMY)) {
1524                 pfr_setflags_ktables(&workq);
1525                 rs->topen = 0;
1526                 pf_remove_if_empty_ruleset(rs);
1527         }
1528         if (ndel != NULL)
1529                 *ndel = xdel;
1530         return (0);
1531 }
1532
1533 int
1534 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1535     int *nchange, int flags)
1536 {
1537         struct pfr_ktable       *p, *q;
1538         struct pfr_ktableworkq   workq;
1539         struct pf_ruleset       *rs;
1540         int                      xadd = 0, xchange = 0;
1541         long                     tzero = time_second;
1542
1543         PF_RULES_WASSERT();
1544
1545         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1546         rs = pf_find_ruleset(trs->pfrt_anchor);
1547         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1548                 return (EBUSY);
1549
1550         SLIST_INIT(&workq);
1551         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1552                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1553                     pfr_skip_table(trs, p, 0))
1554                         continue;
1555                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1556                 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1557                         xchange++;
1558                 else
1559                         xadd++;
1560         }
1561
1562         if (!(flags & PFR_FLAG_DUMMY)) {
1563                 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1564                         q = SLIST_NEXT(p, pfrkt_workq);
1565                         pfr_commit_ktable(p, tzero);
1566                 }
1567                 rs->topen = 0;
1568                 pf_remove_if_empty_ruleset(rs);
1569         }
1570         if (nadd != NULL)
1571                 *nadd = xadd;
1572         if (nchange != NULL)
1573                 *nchange = xchange;
1574
1575         return (0);
1576 }
1577
1578 static void
1579 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1580 {
1581         struct pfr_ktable       *shadow = kt->pfrkt_shadow;
1582         int                      nflags;
1583
1584         PF_RULES_WASSERT();
1585
1586         if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1587                 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1588                         pfr_clstats_ktable(kt, tzero, 1);
1589         } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1590                 /* kt might contain addresses */
1591                 struct pfr_kentryworkq   addrq, addq, changeq, delq, garbageq;
1592                 struct pfr_kentry       *p, *q, *next;
1593                 struct pfr_addr          ad;
1594
1595                 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1596                 pfr_mark_addrs(kt);
1597                 SLIST_INIT(&addq);
1598                 SLIST_INIT(&changeq);
1599                 SLIST_INIT(&delq);
1600                 SLIST_INIT(&garbageq);
1601                 pfr_clean_node_mask(shadow, &addrq);
1602                 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1603                         next = SLIST_NEXT(p, pfrke_workq);      /* XXX */
1604                         pfr_copyout_addr(&ad, p);
1605                         q = pfr_lookup_addr(kt, &ad, 1);
1606                         if (q != NULL) {
1607                                 if (q->pfrke_not != p->pfrke_not)
1608                                         SLIST_INSERT_HEAD(&changeq, q,
1609                                             pfrke_workq);
1610                                 q->pfrke_mark = 1;
1611                                 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1612                         } else {
1613                                 p->pfrke_tzero = tzero;
1614                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1615                         }
1616                 }
1617                 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1618                 pfr_insert_kentries(kt, &addq, tzero);
1619                 pfr_remove_kentries(kt, &delq);
1620                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1621                 pfr_destroy_kentries(&garbageq);
1622         } else {
1623                 /* kt cannot contain addresses */
1624                 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1625                     shadow->pfrkt_ip4);
1626                 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1627                     shadow->pfrkt_ip6);
1628                 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1629                 pfr_clstats_ktable(kt, tzero, 1);
1630         }
1631         nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1632             (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1633                 & ~PFR_TFLAG_INACTIVE;
1634         pfr_destroy_ktable(shadow, 0);
1635         kt->pfrkt_shadow = NULL;
1636         pfr_setflags_ktable(kt, nflags);
1637 }
1638
1639 static int
1640 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1641 {
1642         int i;
1643
1644         if (!tbl->pfrt_name[0])
1645                 return (-1);
1646         if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1647                  return (-1);
1648         if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1649                 return (-1);
1650         for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1651                 if (tbl->pfrt_name[i])
1652                         return (-1);
1653         if (pfr_fix_anchor(tbl->pfrt_anchor))
1654                 return (-1);
1655         if (tbl->pfrt_flags & ~allowedflags)
1656                 return (-1);
1657         return (0);
1658 }
1659
1660 /*
1661  * Rewrite anchors referenced by tables to remove slashes
1662  * and check for validity.
1663  */
1664 static int
1665 pfr_fix_anchor(char *anchor)
1666 {
1667         size_t siz = MAXPATHLEN;
1668         int i;
1669
1670         if (anchor[0] == '/') {
1671                 char *path;
1672                 int off;
1673
1674                 path = anchor;
1675                 off = 1;
1676                 while (*++path == '/')
1677                         off++;
1678                 bcopy(path, anchor, siz - off);
1679                 memset(anchor + siz - off, 0, off);
1680         }
1681         if (anchor[siz - 1])
1682                 return (-1);
1683         for (i = strlen(anchor); i < siz; i++)
1684                 if (anchor[i])
1685                         return (-1);
1686         return (0);
1687 }
1688
1689 static int
1690 pfr_table_count(struct pfr_table *filter, int flags)
1691 {
1692         struct pf_ruleset *rs;
1693
1694         PF_RULES_ASSERT();
1695
1696         if (flags & PFR_FLAG_ALLRSETS)
1697                 return (V_pfr_ktable_cnt);
1698         if (filter->pfrt_anchor[0]) {
1699                 rs = pf_find_ruleset(filter->pfrt_anchor);
1700                 return ((rs != NULL) ? rs->tables : -1);
1701         }
1702         return (pf_main_ruleset.tables);
1703 }
1704
1705 static int
1706 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1707 {
1708         if (flags & PFR_FLAG_ALLRSETS)
1709                 return (0);
1710         if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1711                 return (1);
1712         return (0);
1713 }
1714
1715 static void
1716 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1717 {
1718         struct pfr_ktable       *p;
1719
1720         SLIST_FOREACH(p, workq, pfrkt_workq)
1721                 pfr_insert_ktable(p);
1722 }
1723
1724 static void
1725 pfr_insert_ktable(struct pfr_ktable *kt)
1726 {
1727
1728         PF_RULES_WASSERT();
1729
1730         RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
1731         V_pfr_ktable_cnt++;
1732         if (kt->pfrkt_root != NULL)
1733                 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1734                         pfr_setflags_ktable(kt->pfrkt_root,
1735                             kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1736 }
1737
1738 static void
1739 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1740 {
1741         struct pfr_ktable       *p, *q;
1742
1743         for (p = SLIST_FIRST(workq); p; p = q) {
1744                 q = SLIST_NEXT(p, pfrkt_workq);
1745                 pfr_setflags_ktable(p, p->pfrkt_nflags);
1746         }
1747 }
1748
1749 static void
1750 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1751 {
1752         struct pfr_kentryworkq  addrq;
1753
1754         PF_RULES_WASSERT();
1755
1756         if (!(newf & PFR_TFLAG_REFERENCED) &&
1757             !(newf & PFR_TFLAG_PERSIST))
1758                 newf &= ~PFR_TFLAG_ACTIVE;
1759         if (!(newf & PFR_TFLAG_ACTIVE))
1760                 newf &= ~PFR_TFLAG_USRMASK;
1761         if (!(newf & PFR_TFLAG_SETMASK)) {
1762                 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
1763                 if (kt->pfrkt_root != NULL)
1764                         if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1765                                 pfr_setflags_ktable(kt->pfrkt_root,
1766                                     kt->pfrkt_root->pfrkt_flags &
1767                                         ~PFR_TFLAG_REFDANCHOR);
1768                 pfr_destroy_ktable(kt, 1);
1769                 V_pfr_ktable_cnt--;
1770                 return;
1771         }
1772         if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1773                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1774                 pfr_remove_kentries(kt, &addrq);
1775         }
1776         if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1777                 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1778                 kt->pfrkt_shadow = NULL;
1779         }
1780         kt->pfrkt_flags = newf;
1781 }
1782
1783 static void
1784 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1785 {
1786         struct pfr_ktable       *p;
1787
1788         SLIST_FOREACH(p, workq, pfrkt_workq)
1789                 pfr_clstats_ktable(p, tzero, recurse);
1790 }
1791
1792 static void
1793 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1794 {
1795         struct pfr_kentryworkq   addrq;
1796
1797         if (recurse) {
1798                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1799                 pfr_clstats_kentries(&addrq, tzero, 0);
1800         }
1801         bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1802         bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1803         kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1804         kt->pfrkt_tzero = tzero;
1805 }
1806
1807 static struct pfr_ktable *
1808 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1809 {
1810         struct pfr_ktable       *kt;
1811         struct pf_ruleset       *rs;
1812
1813         PF_RULES_WASSERT();
1814
1815         kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1816         if (kt == NULL)
1817                 return (NULL);
1818         kt->pfrkt_t = *tbl;
1819
1820         if (attachruleset) {
1821                 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1822                 if (!rs) {
1823                         pfr_destroy_ktable(kt, 0);
1824                         return (NULL);
1825                 }
1826                 kt->pfrkt_rs = rs;
1827                 rs->tables++;
1828         }
1829
1830         if (!rn_inithead((void **)&kt->pfrkt_ip4,
1831             offsetof(struct sockaddr_in, sin_addr) * 8) ||
1832             !rn_inithead((void **)&kt->pfrkt_ip6,
1833             offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1834                 pfr_destroy_ktable(kt, 0);
1835                 return (NULL);
1836         }
1837         kt->pfrkt_tzero = tzero;
1838
1839         return (kt);
1840 }
1841
1842 static void
1843 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1844 {
1845         struct pfr_ktable       *p, *q;
1846
1847         for (p = SLIST_FIRST(workq); p; p = q) {
1848                 q = SLIST_NEXT(p, pfrkt_workq);
1849                 pfr_destroy_ktable(p, flushaddr);
1850         }
1851 }
1852
1853 static void
1854 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1855 {
1856         struct pfr_kentryworkq   addrq;
1857
1858         if (flushaddr) {
1859                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1860                 pfr_clean_node_mask(kt, &addrq);
1861                 pfr_destroy_kentries(&addrq);
1862         }
1863         if (kt->pfrkt_ip4 != NULL)
1864                 rn_detachhead((void **)&kt->pfrkt_ip4);
1865         if (kt->pfrkt_ip6 != NULL)
1866                 rn_detachhead((void **)&kt->pfrkt_ip6);
1867         if (kt->pfrkt_shadow != NULL)
1868                 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1869         if (kt->pfrkt_rs != NULL) {
1870                 kt->pfrkt_rs->tables--;
1871                 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1872         }
1873         free(kt, M_PFTABLE);
1874 }
1875
1876 static int
1877 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1878 {
1879         int d;
1880
1881         if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1882                 return (d);
1883         return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1884 }
1885
1886 static struct pfr_ktable *
1887 pfr_lookup_table(struct pfr_table *tbl)
1888 {
1889         /* struct pfr_ktable start like a struct pfr_table */
1890         return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
1891             (struct pfr_ktable *)tbl));
1892 }
1893
1894 int
1895 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1896 {
1897         struct pfr_kentry       *ke = NULL;
1898         int                      match;
1899
1900         PF_RULES_RASSERT();
1901
1902         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1903                 kt = kt->pfrkt_root;
1904         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1905                 return (0);
1906
1907         switch (af) {
1908 #ifdef INET
1909         case AF_INET:
1910             {
1911                 struct sockaddr_in sin;
1912
1913                 bzero(&sin, sizeof(sin));
1914                 sin.sin_len = sizeof(sin);
1915                 sin.sin_family = AF_INET;
1916                 sin.sin_addr.s_addr = a->addr32[0];
1917                 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
1918                 if (ke && KENTRY_RNF_ROOT(ke))
1919                         ke = NULL;
1920                 break;
1921             }
1922 #endif /* INET */
1923 #ifdef INET6
1924         case AF_INET6:
1925             {
1926                 struct sockaddr_in6 sin6;
1927
1928                 bzero(&sin6, sizeof(sin6));
1929                 sin6.sin6_len = sizeof(sin6);
1930                 sin6.sin6_family = AF_INET6;
1931                 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
1932                 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
1933                 if (ke && KENTRY_RNF_ROOT(ke))
1934                         ke = NULL;
1935                 break;
1936             }
1937 #endif /* INET6 */
1938         }
1939         match = (ke && !ke->pfrke_not);
1940         if (match)
1941                 kt->pfrkt_match++;
1942         else
1943                 kt->pfrkt_nomatch++;
1944         return (match);
1945 }
1946
1947 void
1948 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1949     u_int64_t len, int dir_out, int op_pass, int notrule)
1950 {
1951         struct pfr_kentry       *ke = NULL;
1952
1953         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1954                 kt = kt->pfrkt_root;
1955         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1956                 return;
1957
1958         switch (af) {
1959 #ifdef INET
1960         case AF_INET:
1961             {
1962                 struct sockaddr_in sin;
1963
1964                 bzero(&sin, sizeof(sin));
1965                 sin.sin_len = sizeof(sin);
1966                 sin.sin_family = AF_INET;
1967                 sin.sin_addr.s_addr = a->addr32[0];
1968                 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
1969                 if (ke && KENTRY_RNF_ROOT(ke))
1970                         ke = NULL;
1971                 break;
1972             }
1973 #endif /* INET */
1974 #ifdef INET6
1975         case AF_INET6:
1976             {
1977                 struct sockaddr_in6 sin6;
1978
1979                 bzero(&sin6, sizeof(sin6));
1980                 sin6.sin6_len = sizeof(sin6);
1981                 sin6.sin6_family = AF_INET6;
1982                 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
1983                 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
1984                 if (ke && KENTRY_RNF_ROOT(ke))
1985                         ke = NULL;
1986                 break;
1987             }
1988 #endif /* INET6 */
1989         default:
1990                 panic("%s: unknown address family %u", __func__, af);
1991         }
1992         if ((ke == NULL || ke->pfrke_not) != notrule) {
1993                 if (op_pass != PFR_OP_PASS)
1994                         printf("pfr_update_stats: assertion failed.\n");
1995                 op_pass = PFR_OP_XPASS;
1996         }
1997         kt->pfrkt_packets[dir_out][op_pass]++;
1998         kt->pfrkt_bytes[dir_out][op_pass] += len;
1999         if (ke != NULL && op_pass != PFR_OP_XPASS &&
2000             (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2001                 if (ke->pfrke_counters == NULL)
2002                         ke->pfrke_counters = uma_zalloc(V_pfr_kcounters_z,
2003                             M_NOWAIT | M_ZERO);
2004                 if (ke->pfrke_counters != NULL) {
2005                         ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2006                         ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2007                 }
2008         }
2009 }
2010
2011 struct pfr_ktable *
2012 pfr_attach_table(struct pf_ruleset *rs, char *name)
2013 {
2014         struct pfr_ktable       *kt, *rt;
2015         struct pfr_table         tbl;
2016         struct pf_anchor        *ac = rs->anchor;
2017
2018         PF_RULES_WASSERT();
2019
2020         bzero(&tbl, sizeof(tbl));
2021         strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2022         if (ac != NULL)
2023                 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2024         kt = pfr_lookup_table(&tbl);
2025         if (kt == NULL) {
2026                 kt = pfr_create_ktable(&tbl, time_second, 1);
2027                 if (kt == NULL)
2028                         return (NULL);
2029                 if (ac != NULL) {
2030                         bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2031                         rt = pfr_lookup_table(&tbl);
2032                         if (rt == NULL) {
2033                                 rt = pfr_create_ktable(&tbl, 0, 1);
2034                                 if (rt == NULL) {
2035                                         pfr_destroy_ktable(kt, 0);
2036                                         return (NULL);
2037                                 }
2038                                 pfr_insert_ktable(rt);
2039                         }
2040                         kt->pfrkt_root = rt;
2041                 }
2042                 pfr_insert_ktable(kt);
2043         }
2044         if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2045                 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2046         return (kt);
2047 }
2048
2049 void
2050 pfr_detach_table(struct pfr_ktable *kt)
2051 {
2052
2053         PF_RULES_WASSERT();
2054         KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2055             __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2056
2057         if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2058                 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2059 }
2060
2061 int
2062 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2063     sa_family_t af)
2064 {
2065         struct pf_addr           *addr, *cur, *mask;
2066         union sockaddr_union     uaddr, umask;
2067         struct pfr_kentry       *ke, *ke2 = NULL;
2068         int                      idx = -1, use_counter = 0;
2069
2070         switch (af) {
2071         case AF_INET:
2072                 uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2073                 uaddr.sin.sin_family = AF_INET;
2074                 break;
2075         case AF_INET6:
2076                 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2077                 uaddr.sin6.sin6_family = AF_INET6;
2078                 break;
2079         }
2080         addr = SUNION2PF(&uaddr, af);
2081
2082         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2083                 kt = kt->pfrkt_root;
2084         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2085                 return (-1);
2086
2087         if (pidx != NULL)
2088                 idx = *pidx;
2089         if (counter != NULL && idx >= 0)
2090                 use_counter = 1;
2091         if (idx < 0)
2092                 idx = 0;
2093
2094 _next_block:
2095         ke = pfr_kentry_byidx(kt, idx, af);
2096         if (ke == NULL) {
2097                 kt->pfrkt_nomatch++;
2098                 return (1);
2099         }
2100         pfr_prepare_network(&umask, af, ke->pfrke_net);
2101         cur = SUNION2PF(&ke->pfrke_sa, af);
2102         mask = SUNION2PF(&umask, af);
2103
2104         if (use_counter) {
2105                 /* is supplied address within block? */
2106                 if (!PF_MATCHA(0, cur, mask, counter, af)) {
2107                         /* no, go to next block in table */
2108                         idx++;
2109                         use_counter = 0;
2110                         goto _next_block;
2111                 }
2112                 PF_ACPY(addr, counter, af);
2113         } else {
2114                 /* use first address of block */
2115                 PF_ACPY(addr, cur, af);
2116         }
2117
2118         if (!KENTRY_NETWORK(ke)) {
2119                 /* this is a single IP address - no possible nested block */
2120                 PF_ACPY(counter, addr, af);
2121                 *pidx = idx;
2122                 kt->pfrkt_match++;
2123                 return (0);
2124         }
2125         for (;;) {
2126                 /* we don't want to use a nested block */
2127                 switch (af) {
2128                 case AF_INET:
2129                         ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2130                             &kt->pfrkt_ip4->rh);
2131                         break;
2132                 case AF_INET6:
2133                         ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2134                             &kt->pfrkt_ip6->rh);
2135                         break;
2136                 }
2137                 /* no need to check KENTRY_RNF_ROOT() here */
2138                 if (ke2 == ke) {
2139                         /* lookup return the same block - perfect */
2140                         PF_ACPY(counter, addr, af);
2141                         *pidx = idx;
2142                         kt->pfrkt_match++;
2143                         return (0);
2144                 }
2145
2146                 /* we need to increase the counter past the nested block */
2147                 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2148                 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af);
2149                 PF_AINC(addr, af);
2150                 if (!PF_MATCHA(0, cur, mask, addr, af)) {
2151                         /* ok, we reached the end of our main block */
2152                         /* go to next block in table */
2153                         idx++;
2154                         use_counter = 0;
2155                         goto _next_block;
2156                 }
2157         }
2158 }
2159
2160 static struct pfr_kentry *
2161 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2162 {
2163         struct pfr_walktree     w;
2164
2165         bzero(&w, sizeof(w));
2166         w.pfrw_op = PFRW_POOL_GET;
2167         w.pfrw_cnt = idx;
2168
2169         switch (af) {
2170 #ifdef INET
2171         case AF_INET:
2172                 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2173                 return (w.pfrw_kentry);
2174 #endif /* INET */
2175 #ifdef INET6
2176         case AF_INET6:
2177                 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2178                 return (w.pfrw_kentry);
2179 #endif /* INET6 */
2180         default:
2181                 return (NULL);
2182         }
2183 }
2184
2185 void
2186 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2187 {
2188         struct pfr_walktree     w;
2189
2190         bzero(&w, sizeof(w));
2191         w.pfrw_op = PFRW_DYNADDR_UPDATE;
2192         w.pfrw_dyn = dyn;
2193
2194         dyn->pfid_acnt4 = 0;
2195         dyn->pfid_acnt6 = 0;
2196         if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2197                 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2198         if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2199                 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2200 }