]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netpfil/pf/pf_table.c
MFV r336490:
[FreeBSD/FreeBSD.git] / sys / netpfil / pf / pf_table.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002 Cedric Berger
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    - Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *    - Redistributions in binary form must reproduce the above
14  *      copyright notice, this list of conditions and the following
15  *      disclaimer in the documentation and/or other materials provided
16  *      with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  *      $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/mutex.h>
46 #include <sys/refcount.h>
47 #include <sys/socket.h>
48 #include <vm/uma.h>
49
50 #include <net/if.h>
51 #include <net/vnet.h>
52 #include <net/pfvar.h>
53
54 #define ACCEPT_FLAGS(flags, oklist)             \
55         do {                                    \
56                 if ((flags & ~(oklist)) &       \
57                     PFR_FLAG_ALLMASK)           \
58                         return (EINVAL);        \
59         } while (0)
60
61 #define FILLIN_SIN(sin, addr)                   \
62         do {                                    \
63                 (sin).sin_len = sizeof(sin);    \
64                 (sin).sin_family = AF_INET;     \
65                 (sin).sin_addr = (addr);        \
66         } while (0)
67
68 #define FILLIN_SIN6(sin6, addr)                 \
69         do {                                    \
70                 (sin6).sin6_len = sizeof(sin6); \
71                 (sin6).sin6_family = AF_INET6;  \
72                 (sin6).sin6_addr = (addr);      \
73         } while (0)
74
75 #define SWAP(type, a1, a2)                      \
76         do {                                    \
77                 type tmp = a1;                  \
78                 a1 = a2;                        \
79                 a2 = tmp;                       \
80         } while (0)
81
82 #define SUNION2PF(su, af) (((af)==AF_INET) ?    \
83     (struct pf_addr *)&(su)->sin.sin_addr :     \
84     (struct pf_addr *)&(su)->sin6.sin6_addr)
85
86 #define AF_BITS(af)             (((af)==AF_INET)?32:128)
87 #define ADDR_NETWORK(ad)        ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
88 #define KENTRY_NETWORK(ke)      ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
89 #define KENTRY_RNF_ROOT(ke) \
90                 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
91
92 #define NO_ADDRESSES            (-1)
93 #define ENQUEUE_UNMARKED_ONLY   (1)
94 #define INVERT_NEG_FLAG         (1)
95
96 struct pfr_walktree {
97         enum pfrw_op {
98                 PFRW_MARK,
99                 PFRW_SWEEP,
100                 PFRW_ENQUEUE,
101                 PFRW_GET_ADDRS,
102                 PFRW_GET_ASTATS,
103                 PFRW_POOL_GET,
104                 PFRW_DYNADDR_UPDATE
105         }        pfrw_op;
106         union {
107                 struct pfr_addr         *pfrw1_addr;
108                 struct pfr_astats       *pfrw1_astats;
109                 struct pfr_kentryworkq  *pfrw1_workq;
110                 struct pfr_kentry       *pfrw1_kentry;
111                 struct pfi_dynaddr      *pfrw1_dyn;
112         }        pfrw_1;
113         int      pfrw_free;
114 };
115 #define pfrw_addr       pfrw_1.pfrw1_addr
116 #define pfrw_astats     pfrw_1.pfrw1_astats
117 #define pfrw_workq      pfrw_1.pfrw1_workq
118 #define pfrw_kentry     pfrw_1.pfrw1_kentry
119 #define pfrw_dyn        pfrw_1.pfrw1_dyn
120 #define pfrw_cnt        pfrw_free
121
122 #define senderr(e)      do { rv = (e); goto _bad; } while (0)
123
124 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
125 static VNET_DEFINE(uma_zone_t, pfr_kentry_z);
126 #define V_pfr_kentry_z          VNET(pfr_kentry_z)
127 static VNET_DEFINE(uma_zone_t, pfr_kcounters_z);
128 #define V_pfr_kcounters_z       VNET(pfr_kcounters_z)
129
130 static struct pf_addr    pfr_ffaddr = {
131         .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
132 };
133
134 static void              pfr_copyout_addr(struct pfr_addr *,
135                             struct pfr_kentry *ke);
136 static int               pfr_validate_addr(struct pfr_addr *);
137 static void              pfr_enqueue_addrs(struct pfr_ktable *,
138                             struct pfr_kentryworkq *, int *, int);
139 static void              pfr_mark_addrs(struct pfr_ktable *);
140 static struct pfr_kentry
141                         *pfr_lookup_addr(struct pfr_ktable *,
142                             struct pfr_addr *, int);
143 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
144 static void              pfr_destroy_kentries(struct pfr_kentryworkq *);
145 static void              pfr_destroy_kentry(struct pfr_kentry *);
146 static void              pfr_insert_kentries(struct pfr_ktable *,
147                             struct pfr_kentryworkq *, long);
148 static void              pfr_remove_kentries(struct pfr_ktable *,
149                             struct pfr_kentryworkq *);
150 static void              pfr_clstats_kentries(struct pfr_kentryworkq *, long,
151                             int);
152 static void              pfr_reset_feedback(struct pfr_addr *, int);
153 static void              pfr_prepare_network(union sockaddr_union *, int, int);
154 static int               pfr_route_kentry(struct pfr_ktable *,
155                             struct pfr_kentry *);
156 static int               pfr_unroute_kentry(struct pfr_ktable *,
157                             struct pfr_kentry *);
158 static int               pfr_walktree(struct radix_node *, void *);
159 static int               pfr_validate_table(struct pfr_table *, int, int);
160 static int               pfr_fix_anchor(char *);
161 static void              pfr_commit_ktable(struct pfr_ktable *, long);
162 static void              pfr_insert_ktables(struct pfr_ktableworkq *);
163 static void              pfr_insert_ktable(struct pfr_ktable *);
164 static void              pfr_setflags_ktables(struct pfr_ktableworkq *);
165 static void              pfr_setflags_ktable(struct pfr_ktable *, int);
166 static void              pfr_clstats_ktables(struct pfr_ktableworkq *, long,
167                             int);
168 static void              pfr_clstats_ktable(struct pfr_ktable *, long, int);
169 static struct pfr_ktable
170                         *pfr_create_ktable(struct pfr_table *, long, int);
171 static void              pfr_destroy_ktables(struct pfr_ktableworkq *, int);
172 static void              pfr_destroy_ktable(struct pfr_ktable *, int);
173 static int               pfr_ktable_compare(struct pfr_ktable *,
174                             struct pfr_ktable *);
175 static struct pfr_ktable
176                         *pfr_lookup_table(struct pfr_table *);
177 static void              pfr_clean_node_mask(struct pfr_ktable *,
178                             struct pfr_kentryworkq *);
179 static int               pfr_skip_table(struct pfr_table *,
180                             struct pfr_ktable *, int);
181 static struct pfr_kentry
182                         *pfr_kentry_byidx(struct pfr_ktable *, int, int);
183
184 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
185 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
186
187 static VNET_DEFINE(struct pfr_ktablehead, pfr_ktables);
188 #define V_pfr_ktables   VNET(pfr_ktables)
189
190 static VNET_DEFINE(struct pfr_table, pfr_nulltable);
191 #define V_pfr_nulltable VNET(pfr_nulltable)
192
193 static VNET_DEFINE(int, pfr_ktable_cnt);
194 #define V_pfr_ktable_cnt        VNET(pfr_ktable_cnt)
195
196 void
197 pfr_initialize(void)
198 {
199
200         V_pfr_kentry_z = uma_zcreate("pf table entries",
201             sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
202             0);
203         V_pfr_kcounters_z = uma_zcreate("pf table counters",
204             sizeof(struct pfr_kcounters), NULL, NULL, NULL, NULL,
205             UMA_ALIGN_PTR, 0);
206         V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
207         V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
208 }
209
210 void
211 pfr_cleanup(void)
212 {
213
214         uma_zdestroy(V_pfr_kentry_z);
215         uma_zdestroy(V_pfr_kcounters_z);
216 }
217
218 int
219 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
220 {
221         struct pfr_ktable       *kt;
222         struct pfr_kentryworkq   workq;
223
224         PF_RULES_WASSERT();
225
226         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
227         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
228                 return (EINVAL);
229         kt = pfr_lookup_table(tbl);
230         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
231                 return (ESRCH);
232         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
233                 return (EPERM);
234         pfr_enqueue_addrs(kt, &workq, ndel, 0);
235
236         if (!(flags & PFR_FLAG_DUMMY)) {
237                 pfr_remove_kentries(kt, &workq);
238                 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
239         }
240         return (0);
241 }
242
243 int
244 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
245     int *nadd, int flags)
246 {
247         struct pfr_ktable       *kt, *tmpkt;
248         struct pfr_kentryworkq   workq;
249         struct pfr_kentry       *p, *q;
250         struct pfr_addr         *ad;
251         int                      i, rv, xadd = 0;
252         long                     tzero = time_second;
253
254         PF_RULES_WASSERT();
255
256         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
257         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
258                 return (EINVAL);
259         kt = pfr_lookup_table(tbl);
260         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
261                 return (ESRCH);
262         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
263                 return (EPERM);
264         tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
265         if (tmpkt == NULL)
266                 return (ENOMEM);
267         SLIST_INIT(&workq);
268         for (i = 0, ad = addr; i < size; i++, ad++) {
269                 if (pfr_validate_addr(ad))
270                         senderr(EINVAL);
271                 p = pfr_lookup_addr(kt, ad, 1);
272                 q = pfr_lookup_addr(tmpkt, ad, 1);
273                 if (flags & PFR_FLAG_FEEDBACK) {
274                         if (q != NULL)
275                                 ad->pfra_fback = PFR_FB_DUPLICATE;
276                         else if (p == NULL)
277                                 ad->pfra_fback = PFR_FB_ADDED;
278                         else if (p->pfrke_not != ad->pfra_not)
279                                 ad->pfra_fback = PFR_FB_CONFLICT;
280                         else
281                                 ad->pfra_fback = PFR_FB_NONE;
282                 }
283                 if (p == NULL && q == NULL) {
284                         p = pfr_create_kentry(ad);
285                         if (p == NULL)
286                                 senderr(ENOMEM);
287                         if (pfr_route_kentry(tmpkt, p)) {
288                                 pfr_destroy_kentry(p);
289                                 ad->pfra_fback = PFR_FB_NONE;
290                         } else {
291                                 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
292                                 xadd++;
293                         }
294                 }
295         }
296         pfr_clean_node_mask(tmpkt, &workq);
297         if (!(flags & PFR_FLAG_DUMMY))
298                 pfr_insert_kentries(kt, &workq, tzero);
299         else
300                 pfr_destroy_kentries(&workq);
301         if (nadd != NULL)
302                 *nadd = xadd;
303         pfr_destroy_ktable(tmpkt, 0);
304         return (0);
305 _bad:
306         pfr_clean_node_mask(tmpkt, &workq);
307         pfr_destroy_kentries(&workq);
308         if (flags & PFR_FLAG_FEEDBACK)
309                 pfr_reset_feedback(addr, size);
310         pfr_destroy_ktable(tmpkt, 0);
311         return (rv);
312 }
313
314 int
315 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
316     int *ndel, int flags)
317 {
318         struct pfr_ktable       *kt;
319         struct pfr_kentryworkq   workq;
320         struct pfr_kentry       *p;
321         struct pfr_addr         *ad;
322         int                      i, rv, xdel = 0, log = 1;
323
324         PF_RULES_WASSERT();
325
326         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
327         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
328                 return (EINVAL);
329         kt = pfr_lookup_table(tbl);
330         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
331                 return (ESRCH);
332         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
333                 return (EPERM);
334         /*
335          * there are two algorithms to choose from here.
336          * with:
337          *   n: number of addresses to delete
338          *   N: number of addresses in the table
339          *
340          * one is O(N) and is better for large 'n'
341          * one is O(n*LOG(N)) and is better for small 'n'
342          *
343          * following code try to decide which one is best.
344          */
345         for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
346                 log++;
347         if (size > kt->pfrkt_cnt/log) {
348                 /* full table scan */
349                 pfr_mark_addrs(kt);
350         } else {
351                 /* iterate over addresses to delete */
352                 for (i = 0, ad = addr; i < size; i++, ad++) {
353                         if (pfr_validate_addr(ad))
354                                 return (EINVAL);
355                         p = pfr_lookup_addr(kt, ad, 1);
356                         if (p != NULL)
357                                 p->pfrke_mark = 0;
358                 }
359         }
360         SLIST_INIT(&workq);
361         for (i = 0, ad = addr; i < size; i++, ad++) {
362                 if (pfr_validate_addr(ad))
363                         senderr(EINVAL);
364                 p = pfr_lookup_addr(kt, ad, 1);
365                 if (flags & PFR_FLAG_FEEDBACK) {
366                         if (p == NULL)
367                                 ad->pfra_fback = PFR_FB_NONE;
368                         else if (p->pfrke_not != ad->pfra_not)
369                                 ad->pfra_fback = PFR_FB_CONFLICT;
370                         else if (p->pfrke_mark)
371                                 ad->pfra_fback = PFR_FB_DUPLICATE;
372                         else
373                                 ad->pfra_fback = PFR_FB_DELETED;
374                 }
375                 if (p != NULL && p->pfrke_not == ad->pfra_not &&
376                     !p->pfrke_mark) {
377                         p->pfrke_mark = 1;
378                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
379                         xdel++;
380                 }
381         }
382         if (!(flags & PFR_FLAG_DUMMY))
383                 pfr_remove_kentries(kt, &workq);
384         if (ndel != NULL)
385                 *ndel = xdel;
386         return (0);
387 _bad:
388         if (flags & PFR_FLAG_FEEDBACK)
389                 pfr_reset_feedback(addr, size);
390         return (rv);
391 }
392
393 int
394 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
395     int *size2, int *nadd, int *ndel, int *nchange, int flags,
396     u_int32_t ignore_pfrt_flags)
397 {
398         struct pfr_ktable       *kt, *tmpkt;
399         struct pfr_kentryworkq   addq, delq, changeq;
400         struct pfr_kentry       *p, *q;
401         struct pfr_addr          ad;
402         int                      i, rv, xadd = 0, xdel = 0, xchange = 0;
403         long                     tzero = time_second;
404
405         PF_RULES_WASSERT();
406
407         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
408         if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
409             PFR_FLAG_USERIOCTL))
410                 return (EINVAL);
411         kt = pfr_lookup_table(tbl);
412         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
413                 return (ESRCH);
414         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
415                 return (EPERM);
416         tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
417         if (tmpkt == NULL)
418                 return (ENOMEM);
419         pfr_mark_addrs(kt);
420         SLIST_INIT(&addq);
421         SLIST_INIT(&delq);
422         SLIST_INIT(&changeq);
423         for (i = 0; i < size; i++) {
424                 /*
425                  * XXXGL: undertand pf_if usage of this function
426                  * and make ad a moving pointer
427                  */
428                 bcopy(addr + i, &ad, sizeof(ad));
429                 if (pfr_validate_addr(&ad))
430                         senderr(EINVAL);
431                 ad.pfra_fback = PFR_FB_NONE;
432                 p = pfr_lookup_addr(kt, &ad, 1);
433                 if (p != NULL) {
434                         if (p->pfrke_mark) {
435                                 ad.pfra_fback = PFR_FB_DUPLICATE;
436                                 goto _skip;
437                         }
438                         p->pfrke_mark = 1;
439                         if (p->pfrke_not != ad.pfra_not) {
440                                 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
441                                 ad.pfra_fback = PFR_FB_CHANGED;
442                                 xchange++;
443                         }
444                 } else {
445                         q = pfr_lookup_addr(tmpkt, &ad, 1);
446                         if (q != NULL) {
447                                 ad.pfra_fback = PFR_FB_DUPLICATE;
448                                 goto _skip;
449                         }
450                         p = pfr_create_kentry(&ad);
451                         if (p == NULL)
452                                 senderr(ENOMEM);
453                         if (pfr_route_kentry(tmpkt, p)) {
454                                 pfr_destroy_kentry(p);
455                                 ad.pfra_fback = PFR_FB_NONE;
456                         } else {
457                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
458                                 ad.pfra_fback = PFR_FB_ADDED;
459                                 xadd++;
460                         }
461                 }
462 _skip:
463                 if (flags & PFR_FLAG_FEEDBACK)
464                         bcopy(&ad, addr + i, sizeof(ad));
465         }
466         pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
467         if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
468                 if (*size2 < size+xdel) {
469                         *size2 = size+xdel;
470                         senderr(0);
471                 }
472                 i = 0;
473                 SLIST_FOREACH(p, &delq, pfrke_workq) {
474                         pfr_copyout_addr(&ad, p);
475                         ad.pfra_fback = PFR_FB_DELETED;
476                         bcopy(&ad, addr + size + i, sizeof(ad));
477                         i++;
478                 }
479         }
480         pfr_clean_node_mask(tmpkt, &addq);
481         if (!(flags & PFR_FLAG_DUMMY)) {
482                 pfr_insert_kentries(kt, &addq, tzero);
483                 pfr_remove_kentries(kt, &delq);
484                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
485         } else
486                 pfr_destroy_kentries(&addq);
487         if (nadd != NULL)
488                 *nadd = xadd;
489         if (ndel != NULL)
490                 *ndel = xdel;
491         if (nchange != NULL)
492                 *nchange = xchange;
493         if ((flags & PFR_FLAG_FEEDBACK) && size2)
494                 *size2 = size+xdel;
495         pfr_destroy_ktable(tmpkt, 0);
496         return (0);
497 _bad:
498         pfr_clean_node_mask(tmpkt, &addq);
499         pfr_destroy_kentries(&addq);
500         if (flags & PFR_FLAG_FEEDBACK)
501                 pfr_reset_feedback(addr, size);
502         pfr_destroy_ktable(tmpkt, 0);
503         return (rv);
504 }
505
506 int
507 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
508         int *nmatch, int flags)
509 {
510         struct pfr_ktable       *kt;
511         struct pfr_kentry       *p;
512         struct pfr_addr         *ad;
513         int                      i, xmatch = 0;
514
515         PF_RULES_RASSERT();
516
517         ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
518         if (pfr_validate_table(tbl, 0, 0))
519                 return (EINVAL);
520         kt = pfr_lookup_table(tbl);
521         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
522                 return (ESRCH);
523
524         for (i = 0, ad = addr; i < size; i++, ad++) {
525                 if (pfr_validate_addr(ad))
526                         return (EINVAL);
527                 if (ADDR_NETWORK(ad))
528                         return (EINVAL);
529                 p = pfr_lookup_addr(kt, ad, 0);
530                 if (flags & PFR_FLAG_REPLACE)
531                         pfr_copyout_addr(ad, p);
532                 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
533                     (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
534                 if (p != NULL && !p->pfrke_not)
535                         xmatch++;
536         }
537         if (nmatch != NULL)
538                 *nmatch = xmatch;
539         return (0);
540 }
541
542 int
543 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
544         int flags)
545 {
546         struct pfr_ktable       *kt;
547         struct pfr_walktree      w;
548         int                      rv;
549
550         PF_RULES_RASSERT();
551
552         ACCEPT_FLAGS(flags, 0);
553         if (pfr_validate_table(tbl, 0, 0))
554                 return (EINVAL);
555         kt = pfr_lookup_table(tbl);
556         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
557                 return (ESRCH);
558         if (kt->pfrkt_cnt > *size) {
559                 *size = kt->pfrkt_cnt;
560                 return (0);
561         }
562
563         bzero(&w, sizeof(w));
564         w.pfrw_op = PFRW_GET_ADDRS;
565         w.pfrw_addr = addr;
566         w.pfrw_free = kt->pfrkt_cnt;
567         rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
568         if (!rv)
569                 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
570                     pfr_walktree, &w);
571         if (rv)
572                 return (rv);
573
574         KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
575             w.pfrw_free));
576
577         *size = kt->pfrkt_cnt;
578         return (0);
579 }
580
581 int
582 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
583         int flags)
584 {
585         struct pfr_ktable       *kt;
586         struct pfr_walktree      w;
587         struct pfr_kentryworkq   workq;
588         int                      rv;
589         long                     tzero = time_second;
590
591         PF_RULES_RASSERT();
592
593         /* XXX PFR_FLAG_CLSTATS disabled */
594         ACCEPT_FLAGS(flags, 0);
595         if (pfr_validate_table(tbl, 0, 0))
596                 return (EINVAL);
597         kt = pfr_lookup_table(tbl);
598         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
599                 return (ESRCH);
600         if (kt->pfrkt_cnt > *size) {
601                 *size = kt->pfrkt_cnt;
602                 return (0);
603         }
604
605         bzero(&w, sizeof(w));
606         w.pfrw_op = PFRW_GET_ASTATS;
607         w.pfrw_astats = addr;
608         w.pfrw_free = kt->pfrkt_cnt;
609         rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
610         if (!rv)
611                 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
612                     pfr_walktree, &w);
613         if (!rv && (flags & PFR_FLAG_CLSTATS)) {
614                 pfr_enqueue_addrs(kt, &workq, NULL, 0);
615                 pfr_clstats_kentries(&workq, tzero, 0);
616         }
617         if (rv)
618                 return (rv);
619
620         if (w.pfrw_free) {
621                 printf("pfr_get_astats: corruption detected (%d).\n",
622                     w.pfrw_free);
623                 return (ENOTTY);
624         }
625         *size = kt->pfrkt_cnt;
626         return (0);
627 }
628
629 int
630 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
631     int *nzero, int flags)
632 {
633         struct pfr_ktable       *kt;
634         struct pfr_kentryworkq   workq;
635         struct pfr_kentry       *p;
636         struct pfr_addr         *ad;
637         int                      i, rv, xzero = 0;
638
639         PF_RULES_WASSERT();
640
641         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
642         if (pfr_validate_table(tbl, 0, 0))
643                 return (EINVAL);
644         kt = pfr_lookup_table(tbl);
645         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
646                 return (ESRCH);
647         SLIST_INIT(&workq);
648         for (i = 0, ad = addr; i < size; i++, ad++) {
649                 if (pfr_validate_addr(ad))
650                         senderr(EINVAL);
651                 p = pfr_lookup_addr(kt, ad, 1);
652                 if (flags & PFR_FLAG_FEEDBACK) {
653                         ad->pfra_fback = (p != NULL) ?
654                             PFR_FB_CLEARED : PFR_FB_NONE;
655                 }
656                 if (p != NULL) {
657                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
658                         xzero++;
659                 }
660         }
661
662         if (!(flags & PFR_FLAG_DUMMY))
663                 pfr_clstats_kentries(&workq, 0, 0);
664         if (nzero != NULL)
665                 *nzero = xzero;
666         return (0);
667 _bad:
668         if (flags & PFR_FLAG_FEEDBACK)
669                 pfr_reset_feedback(addr, size);
670         return (rv);
671 }
672
673 static int
674 pfr_validate_addr(struct pfr_addr *ad)
675 {
676         int i;
677
678         switch (ad->pfra_af) {
679 #ifdef INET
680         case AF_INET:
681                 if (ad->pfra_net > 32)
682                         return (-1);
683                 break;
684 #endif /* INET */
685 #ifdef INET6
686         case AF_INET6:
687                 if (ad->pfra_net > 128)
688                         return (-1);
689                 break;
690 #endif /* INET6 */
691         default:
692                 return (-1);
693         }
694         if (ad->pfra_net < 128 &&
695                 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
696                         return (-1);
697         for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
698                 if (((caddr_t)ad)[i])
699                         return (-1);
700         if (ad->pfra_not && ad->pfra_not != 1)
701                 return (-1);
702         if (ad->pfra_fback)
703                 return (-1);
704         return (0);
705 }
706
707 static void
708 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
709         int *naddr, int sweep)
710 {
711         struct pfr_walktree     w;
712
713         SLIST_INIT(workq);
714         bzero(&w, sizeof(w));
715         w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
716         w.pfrw_workq = workq;
717         if (kt->pfrkt_ip4 != NULL)
718                 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh,
719                     pfr_walktree, &w))
720                         printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
721         if (kt->pfrkt_ip6 != NULL)
722                 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
723                     pfr_walktree, &w))
724                         printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
725         if (naddr != NULL)
726                 *naddr = w.pfrw_cnt;
727 }
728
729 static void
730 pfr_mark_addrs(struct pfr_ktable *kt)
731 {
732         struct pfr_walktree     w;
733
734         bzero(&w, sizeof(w));
735         w.pfrw_op = PFRW_MARK;
736         if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w))
737                 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
738         if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w))
739                 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
740 }
741
742
743 static struct pfr_kentry *
744 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
745 {
746         union sockaddr_union     sa, mask;
747         struct radix_head       *head = NULL;
748         struct pfr_kentry       *ke;
749
750         PF_RULES_ASSERT();
751
752         bzero(&sa, sizeof(sa));
753         if (ad->pfra_af == AF_INET) {
754                 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
755                 head = &kt->pfrkt_ip4->rh;
756         } else if ( ad->pfra_af == AF_INET6 ) {
757                 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
758                 head = &kt->pfrkt_ip6->rh;
759         }
760         if (ADDR_NETWORK(ad)) {
761                 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
762                 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
763                 if (ke && KENTRY_RNF_ROOT(ke))
764                         ke = NULL;
765         } else {
766                 ke = (struct pfr_kentry *)rn_match(&sa, head);
767                 if (ke && KENTRY_RNF_ROOT(ke))
768                         ke = NULL;
769                 if (exact && ke && KENTRY_NETWORK(ke))
770                         ke = NULL;
771         }
772         return (ke);
773 }
774
775 static struct pfr_kentry *
776 pfr_create_kentry(struct pfr_addr *ad)
777 {
778         struct pfr_kentry       *ke;
779
780         ke =  uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
781         if (ke == NULL)
782                 return (NULL);
783
784         if (ad->pfra_af == AF_INET)
785                 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
786         else if (ad->pfra_af == AF_INET6)
787                 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
788         ke->pfrke_af = ad->pfra_af;
789         ke->pfrke_net = ad->pfra_net;
790         ke->pfrke_not = ad->pfra_not;
791         return (ke);
792 }
793
794 static void
795 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
796 {
797         struct pfr_kentry       *p, *q;
798
799         for (p = SLIST_FIRST(workq); p != NULL; p = q) {
800                 q = SLIST_NEXT(p, pfrke_workq);
801                 pfr_destroy_kentry(p);
802         }
803 }
804
805 static void
806 pfr_destroy_kentry(struct pfr_kentry *ke)
807 {
808         if (ke->pfrke_counters)
809                 uma_zfree(V_pfr_kcounters_z, ke->pfrke_counters);
810         uma_zfree(V_pfr_kentry_z, ke);
811 }
812
813 static void
814 pfr_insert_kentries(struct pfr_ktable *kt,
815     struct pfr_kentryworkq *workq, long tzero)
816 {
817         struct pfr_kentry       *p;
818         int                      rv, n = 0;
819
820         SLIST_FOREACH(p, workq, pfrke_workq) {
821                 rv = pfr_route_kentry(kt, p);
822                 if (rv) {
823                         printf("pfr_insert_kentries: cannot route entry "
824                             "(code=%d).\n", rv);
825                         break;
826                 }
827                 p->pfrke_tzero = tzero;
828                 n++;
829         }
830         kt->pfrkt_cnt += n;
831 }
832
833 int
834 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
835 {
836         struct pfr_kentry       *p;
837         int                      rv;
838
839         p = pfr_lookup_addr(kt, ad, 1);
840         if (p != NULL)
841                 return (0);
842         p = pfr_create_kentry(ad);
843         if (p == NULL)
844                 return (ENOMEM);
845
846         rv = pfr_route_kentry(kt, p);
847         if (rv)
848                 return (rv);
849
850         p->pfrke_tzero = tzero;
851         kt->pfrkt_cnt++;
852
853         return (0);
854 }
855
856 static void
857 pfr_remove_kentries(struct pfr_ktable *kt,
858     struct pfr_kentryworkq *workq)
859 {
860         struct pfr_kentry       *p;
861         int                      n = 0;
862
863         SLIST_FOREACH(p, workq, pfrke_workq) {
864                 pfr_unroute_kentry(kt, p);
865                 n++;
866         }
867         kt->pfrkt_cnt -= n;
868         pfr_destroy_kentries(workq);
869 }
870
871 static void
872 pfr_clean_node_mask(struct pfr_ktable *kt,
873     struct pfr_kentryworkq *workq)
874 {
875         struct pfr_kentry       *p;
876
877         SLIST_FOREACH(p, workq, pfrke_workq)
878                 pfr_unroute_kentry(kt, p);
879 }
880
881 static void
882 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
883 {
884         struct pfr_kentry       *p;
885
886         SLIST_FOREACH(p, workq, pfrke_workq) {
887                 if (negchange)
888                         p->pfrke_not = !p->pfrke_not;
889                 if (p->pfrke_counters) {
890                         uma_zfree(V_pfr_kcounters_z, p->pfrke_counters);
891                         p->pfrke_counters = NULL;
892                 }
893                 p->pfrke_tzero = tzero;
894         }
895 }
896
897 static void
898 pfr_reset_feedback(struct pfr_addr *addr, int size)
899 {
900         struct pfr_addr *ad;
901         int             i;
902
903         for (i = 0, ad = addr; i < size; i++, ad++)
904                 ad->pfra_fback = PFR_FB_NONE;
905 }
906
907 static void
908 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
909 {
910         int     i;
911
912         bzero(sa, sizeof(*sa));
913         if (af == AF_INET) {
914                 sa->sin.sin_len = sizeof(sa->sin);
915                 sa->sin.sin_family = AF_INET;
916                 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
917         } else if (af == AF_INET6) {
918                 sa->sin6.sin6_len = sizeof(sa->sin6);
919                 sa->sin6.sin6_family = AF_INET6;
920                 for (i = 0; i < 4; i++) {
921                         if (net <= 32) {
922                                 sa->sin6.sin6_addr.s6_addr32[i] =
923                                     net ? htonl(-1 << (32-net)) : 0;
924                                 break;
925                         }
926                         sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
927                         net -= 32;
928                 }
929         }
930 }
931
932 static int
933 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
934 {
935         union sockaddr_union     mask;
936         struct radix_node       *rn;
937         struct radix_head       *head = NULL;
938
939         PF_RULES_WASSERT();
940
941         bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
942         if (ke->pfrke_af == AF_INET)
943                 head = &kt->pfrkt_ip4->rh;
944         else if (ke->pfrke_af == AF_INET6)
945                 head = &kt->pfrkt_ip6->rh;
946
947         if (KENTRY_NETWORK(ke)) {
948                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
949                 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
950         } else
951                 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
952
953         return (rn == NULL ? -1 : 0);
954 }
955
956 static int
957 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
958 {
959         union sockaddr_union     mask;
960         struct radix_node       *rn;
961         struct radix_head       *head = NULL;
962
963         if (ke->pfrke_af == AF_INET)
964                 head = &kt->pfrkt_ip4->rh;
965         else if (ke->pfrke_af == AF_INET6)
966                 head = &kt->pfrkt_ip6->rh;
967
968         if (KENTRY_NETWORK(ke)) {
969                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
970                 rn = rn_delete(&ke->pfrke_sa, &mask, head);
971         } else
972                 rn = rn_delete(&ke->pfrke_sa, NULL, head);
973
974         if (rn == NULL) {
975                 printf("pfr_unroute_kentry: delete failed.\n");
976                 return (-1);
977         }
978         return (0);
979 }
980
981 static void
982 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
983 {
984         bzero(ad, sizeof(*ad));
985         if (ke == NULL)
986                 return;
987         ad->pfra_af = ke->pfrke_af;
988         ad->pfra_net = ke->pfrke_net;
989         ad->pfra_not = ke->pfrke_not;
990         if (ad->pfra_af == AF_INET)
991                 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
992         else if (ad->pfra_af == AF_INET6)
993                 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
994 }
995
996 static int
997 pfr_walktree(struct radix_node *rn, void *arg)
998 {
999         struct pfr_kentry       *ke = (struct pfr_kentry *)rn;
1000         struct pfr_walktree     *w = arg;
1001
1002         switch (w->pfrw_op) {
1003         case PFRW_MARK:
1004                 ke->pfrke_mark = 0;
1005                 break;
1006         case PFRW_SWEEP:
1007                 if (ke->pfrke_mark)
1008                         break;
1009                 /* FALLTHROUGH */
1010         case PFRW_ENQUEUE:
1011                 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1012                 w->pfrw_cnt++;
1013                 break;
1014         case PFRW_GET_ADDRS:
1015                 if (w->pfrw_free-- > 0) {
1016                         pfr_copyout_addr(w->pfrw_addr, ke);
1017                         w->pfrw_addr++;
1018                 }
1019                 break;
1020         case PFRW_GET_ASTATS:
1021                 if (w->pfrw_free-- > 0) {
1022                         struct pfr_astats as;
1023
1024                         pfr_copyout_addr(&as.pfras_a, ke);
1025
1026                         if (ke->pfrke_counters) {
1027                                 bcopy(ke->pfrke_counters->pfrkc_packets,
1028                                     as.pfras_packets, sizeof(as.pfras_packets));
1029                                 bcopy(ke->pfrke_counters->pfrkc_bytes,
1030                                     as.pfras_bytes, sizeof(as.pfras_bytes));
1031                         } else {
1032                                 bzero(as.pfras_packets, sizeof(as.pfras_packets));
1033                                 bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1034                                 as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1035                         }
1036                         as.pfras_tzero = ke->pfrke_tzero;
1037
1038                         bcopy(&as, w->pfrw_astats, sizeof(as));
1039                         w->pfrw_astats++;
1040                 }
1041                 break;
1042         case PFRW_POOL_GET:
1043                 if (ke->pfrke_not)
1044                         break; /* negative entries are ignored */
1045                 if (!w->pfrw_cnt--) {
1046                         w->pfrw_kentry = ke;
1047                         return (1); /* finish search */
1048                 }
1049                 break;
1050         case PFRW_DYNADDR_UPDATE:
1051             {
1052                 union sockaddr_union    pfr_mask;
1053
1054                 if (ke->pfrke_af == AF_INET) {
1055                         if (w->pfrw_dyn->pfid_acnt4++ > 0)
1056                                 break;
1057                         pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1058                         w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa,
1059                             AF_INET);
1060                         w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask,
1061                             AF_INET);
1062                 } else if (ke->pfrke_af == AF_INET6){
1063                         if (w->pfrw_dyn->pfid_acnt6++ > 0)
1064                                 break;
1065                         pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1066                         w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa,
1067                             AF_INET6);
1068                         w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask,
1069                             AF_INET6);
1070                 }
1071                 break;
1072             }
1073         }
1074         return (0);
1075 }
1076
1077 int
1078 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1079 {
1080         struct pfr_ktableworkq   workq;
1081         struct pfr_ktable       *p;
1082         int                      xdel = 0;
1083
1084         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1085         if (pfr_fix_anchor(filter->pfrt_anchor))
1086                 return (EINVAL);
1087         if (pfr_table_count(filter, flags) < 0)
1088                 return (ENOENT);
1089
1090         SLIST_INIT(&workq);
1091         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1092                 if (pfr_skip_table(filter, p, flags))
1093                         continue;
1094                 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1095                         continue;
1096                 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1097                         continue;
1098                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1099                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1100                 xdel++;
1101         }
1102         if (!(flags & PFR_FLAG_DUMMY))
1103                 pfr_setflags_ktables(&workq);
1104         if (ndel != NULL)
1105                 *ndel = xdel;
1106         return (0);
1107 }
1108
1109 int
1110 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1111 {
1112         struct pfr_ktableworkq   addq, changeq;
1113         struct pfr_ktable       *p, *q, *r, key;
1114         int                      i, rv, xadd = 0;
1115         long                     tzero = time_second;
1116
1117         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1118         SLIST_INIT(&addq);
1119         SLIST_INIT(&changeq);
1120         for (i = 0; i < size; i++) {
1121                 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1122                 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1123                     flags & PFR_FLAG_USERIOCTL))
1124                         senderr(EINVAL);
1125                 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1126                 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1127                 if (p == NULL) {
1128                         p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1129                         if (p == NULL)
1130                                 senderr(ENOMEM);
1131                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1132                                 if (!pfr_ktable_compare(p, q)) {
1133                                         pfr_destroy_ktable(p, 0);
1134                                         goto _skip;
1135                                 }
1136                         }
1137                         SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1138                         xadd++;
1139                         if (!key.pfrkt_anchor[0])
1140                                 goto _skip;
1141
1142                         /* find or create root table */
1143                         bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1144                         r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1145                         if (r != NULL) {
1146                                 p->pfrkt_root = r;
1147                                 goto _skip;
1148                         }
1149                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1150                                 if (!pfr_ktable_compare(&key, q)) {
1151                                         p->pfrkt_root = q;
1152                                         goto _skip;
1153                                 }
1154                         }
1155                         key.pfrkt_flags = 0;
1156                         r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1157                         if (r == NULL)
1158                                 senderr(ENOMEM);
1159                         SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1160                         p->pfrkt_root = r;
1161                 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1162                         SLIST_FOREACH(q, &changeq, pfrkt_workq)
1163                                 if (!pfr_ktable_compare(&key, q))
1164                                         goto _skip;
1165                         p->pfrkt_nflags = (p->pfrkt_flags &
1166                             ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1167                         SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1168                         xadd++;
1169                 }
1170 _skip:
1171         ;
1172         }
1173         if (!(flags & PFR_FLAG_DUMMY)) {
1174                 pfr_insert_ktables(&addq);
1175                 pfr_setflags_ktables(&changeq);
1176         } else
1177                  pfr_destroy_ktables(&addq, 0);
1178         if (nadd != NULL)
1179                 *nadd = xadd;
1180         return (0);
1181 _bad:
1182         pfr_destroy_ktables(&addq, 0);
1183         return (rv);
1184 }
1185
1186 int
1187 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1188 {
1189         struct pfr_ktableworkq   workq;
1190         struct pfr_ktable       *p, *q, key;
1191         int                      i, xdel = 0;
1192
1193         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1194         SLIST_INIT(&workq);
1195         for (i = 0; i < size; i++) {
1196                 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1197                 if (pfr_validate_table(&key.pfrkt_t, 0,
1198                     flags & PFR_FLAG_USERIOCTL))
1199                         return (EINVAL);
1200                 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1201                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1202                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1203                                 if (!pfr_ktable_compare(p, q))
1204                                         goto _skip;
1205                         p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1206                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1207                         xdel++;
1208                 }
1209 _skip:
1210         ;
1211         }
1212
1213         if (!(flags & PFR_FLAG_DUMMY))
1214                 pfr_setflags_ktables(&workq);
1215         if (ndel != NULL)
1216                 *ndel = xdel;
1217         return (0);
1218 }
1219
1220 int
1221 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1222         int flags)
1223 {
1224         struct pfr_ktable       *p;
1225         int                      n, nn;
1226
1227         PF_RULES_RASSERT();
1228
1229         ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1230         if (pfr_fix_anchor(filter->pfrt_anchor))
1231                 return (EINVAL);
1232         n = nn = pfr_table_count(filter, flags);
1233         if (n < 0)
1234                 return (ENOENT);
1235         if (n > *size) {
1236                 *size = n;
1237                 return (0);
1238         }
1239         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1240                 if (pfr_skip_table(filter, p, flags))
1241                         continue;
1242                 if (n-- <= 0)
1243                         continue;
1244                 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1245         }
1246
1247         KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1248
1249         *size = nn;
1250         return (0);
1251 }
1252
1253 int
1254 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1255         int flags)
1256 {
1257         struct pfr_ktable       *p;
1258         struct pfr_ktableworkq   workq;
1259         int                      n, nn;
1260         long                     tzero = time_second;
1261
1262         /* XXX PFR_FLAG_CLSTATS disabled */
1263         ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1264         if (pfr_fix_anchor(filter->pfrt_anchor))
1265                 return (EINVAL);
1266         n = nn = pfr_table_count(filter, flags);
1267         if (n < 0)
1268                 return (ENOENT);
1269         if (n > *size) {
1270                 *size = n;
1271                 return (0);
1272         }
1273         SLIST_INIT(&workq);
1274         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1275                 if (pfr_skip_table(filter, p, flags))
1276                         continue;
1277                 if (n-- <= 0)
1278                         continue;
1279                 bcopy(&p->pfrkt_ts, tbl++, sizeof(*tbl));
1280                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1281         }
1282         if (flags & PFR_FLAG_CLSTATS)
1283                 pfr_clstats_ktables(&workq, tzero,
1284                     flags & PFR_FLAG_ADDRSTOO);
1285
1286         KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1287
1288         *size = nn;
1289         return (0);
1290 }
1291
1292 int
1293 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1294 {
1295         struct pfr_ktableworkq   workq;
1296         struct pfr_ktable       *p, key;
1297         int                      i, xzero = 0;
1298         long                     tzero = time_second;
1299
1300         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1301         SLIST_INIT(&workq);
1302         for (i = 0; i < size; i++) {
1303                 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1304                 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1305                         return (EINVAL);
1306                 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1307                 if (p != NULL) {
1308                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1309                         xzero++;
1310                 }
1311         }
1312         if (!(flags & PFR_FLAG_DUMMY))
1313                 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1314         if (nzero != NULL)
1315                 *nzero = xzero;
1316         return (0);
1317 }
1318
1319 int
1320 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1321         int *nchange, int *ndel, int flags)
1322 {
1323         struct pfr_ktableworkq   workq;
1324         struct pfr_ktable       *p, *q, key;
1325         int                      i, xchange = 0, xdel = 0;
1326
1327         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1328         if ((setflag & ~PFR_TFLAG_USRMASK) ||
1329             (clrflag & ~PFR_TFLAG_USRMASK) ||
1330             (setflag & clrflag))
1331                 return (EINVAL);
1332         SLIST_INIT(&workq);
1333         for (i = 0; i < size; i++) {
1334                 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1335                 if (pfr_validate_table(&key.pfrkt_t, 0,
1336                     flags & PFR_FLAG_USERIOCTL))
1337                         return (EINVAL);
1338                 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1339                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1340                         p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1341                             ~clrflag;
1342                         if (p->pfrkt_nflags == p->pfrkt_flags)
1343                                 goto _skip;
1344                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1345                                 if (!pfr_ktable_compare(p, q))
1346                                         goto _skip;
1347                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1348                         if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1349                             (clrflag & PFR_TFLAG_PERSIST) &&
1350                             !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1351                                 xdel++;
1352                         else
1353                                 xchange++;
1354                 }
1355 _skip:
1356         ;
1357         }
1358         if (!(flags & PFR_FLAG_DUMMY))
1359                 pfr_setflags_ktables(&workq);
1360         if (nchange != NULL)
1361                 *nchange = xchange;
1362         if (ndel != NULL)
1363                 *ndel = xdel;
1364         return (0);
1365 }
1366
1367 int
1368 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1369 {
1370         struct pfr_ktableworkq   workq;
1371         struct pfr_ktable       *p;
1372         struct pf_ruleset       *rs;
1373         int                      xdel = 0;
1374
1375         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1376         rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1377         if (rs == NULL)
1378                 return (ENOMEM);
1379         SLIST_INIT(&workq);
1380         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1381                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1382                     pfr_skip_table(trs, p, 0))
1383                         continue;
1384                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1385                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1386                 xdel++;
1387         }
1388         if (!(flags & PFR_FLAG_DUMMY)) {
1389                 pfr_setflags_ktables(&workq);
1390                 if (ticket != NULL)
1391                         *ticket = ++rs->tticket;
1392                 rs->topen = 1;
1393         } else
1394                 pf_remove_if_empty_ruleset(rs);
1395         if (ndel != NULL)
1396                 *ndel = xdel;
1397         return (0);
1398 }
1399
1400 int
1401 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1402     int *nadd, int *naddr, u_int32_t ticket, int flags)
1403 {
1404         struct pfr_ktableworkq   tableq;
1405         struct pfr_kentryworkq   addrq;
1406         struct pfr_ktable       *kt, *rt, *shadow, key;
1407         struct pfr_kentry       *p;
1408         struct pfr_addr         *ad;
1409         struct pf_ruleset       *rs;
1410         int                      i, rv, xadd = 0, xaddr = 0;
1411
1412         PF_RULES_WASSERT();
1413
1414         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1415         if (size && !(flags & PFR_FLAG_ADDRSTOO))
1416                 return (EINVAL);
1417         if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1418             flags & PFR_FLAG_USERIOCTL))
1419                 return (EINVAL);
1420         rs = pf_find_ruleset(tbl->pfrt_anchor);
1421         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1422                 return (EBUSY);
1423         tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1424         SLIST_INIT(&tableq);
1425         kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
1426         if (kt == NULL) {
1427                 kt = pfr_create_ktable(tbl, 0, 1);
1428                 if (kt == NULL)
1429                         return (ENOMEM);
1430                 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1431                 xadd++;
1432                 if (!tbl->pfrt_anchor[0])
1433                         goto _skip;
1434
1435                 /* find or create root table */
1436                 bzero(&key, sizeof(key));
1437                 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1438                 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1439                 if (rt != NULL) {
1440                         kt->pfrkt_root = rt;
1441                         goto _skip;
1442                 }
1443                 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1444                 if (rt == NULL) {
1445                         pfr_destroy_ktables(&tableq, 0);
1446                         return (ENOMEM);
1447                 }
1448                 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1449                 kt->pfrkt_root = rt;
1450         } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1451                 xadd++;
1452 _skip:
1453         shadow = pfr_create_ktable(tbl, 0, 0);
1454         if (shadow == NULL) {
1455                 pfr_destroy_ktables(&tableq, 0);
1456                 return (ENOMEM);
1457         }
1458         SLIST_INIT(&addrq);
1459         for (i = 0, ad = addr; i < size; i++, ad++) {
1460                 if (pfr_validate_addr(ad))
1461                         senderr(EINVAL);
1462                 if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1463                         continue;
1464                 p = pfr_create_kentry(ad);
1465                 if (p == NULL)
1466                         senderr(ENOMEM);
1467                 if (pfr_route_kentry(shadow, p)) {
1468                         pfr_destroy_kentry(p);
1469                         continue;
1470                 }
1471                 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1472                 xaddr++;
1473         }
1474         if (!(flags & PFR_FLAG_DUMMY)) {
1475                 if (kt->pfrkt_shadow != NULL)
1476                         pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1477                 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1478                 pfr_insert_ktables(&tableq);
1479                 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1480                     xaddr : NO_ADDRESSES;
1481                 kt->pfrkt_shadow = shadow;
1482         } else {
1483                 pfr_clean_node_mask(shadow, &addrq);
1484                 pfr_destroy_ktable(shadow, 0);
1485                 pfr_destroy_ktables(&tableq, 0);
1486                 pfr_destroy_kentries(&addrq);
1487         }
1488         if (nadd != NULL)
1489                 *nadd = xadd;
1490         if (naddr != NULL)
1491                 *naddr = xaddr;
1492         return (0);
1493 _bad:
1494         pfr_destroy_ktable(shadow, 0);
1495         pfr_destroy_ktables(&tableq, 0);
1496         pfr_destroy_kentries(&addrq);
1497         return (rv);
1498 }
1499
1500 int
1501 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1502 {
1503         struct pfr_ktableworkq   workq;
1504         struct pfr_ktable       *p;
1505         struct pf_ruleset       *rs;
1506         int                      xdel = 0;
1507
1508         PF_RULES_WASSERT();
1509
1510         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1511         rs = pf_find_ruleset(trs->pfrt_anchor);
1512         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1513                 return (0);
1514         SLIST_INIT(&workq);
1515         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1516                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1517                     pfr_skip_table(trs, p, 0))
1518                         continue;
1519                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1520                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1521                 xdel++;
1522         }
1523         if (!(flags & PFR_FLAG_DUMMY)) {
1524                 pfr_setflags_ktables(&workq);
1525                 rs->topen = 0;
1526                 pf_remove_if_empty_ruleset(rs);
1527         }
1528         if (ndel != NULL)
1529                 *ndel = xdel;
1530         return (0);
1531 }
1532
1533 int
1534 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1535     int *nchange, int flags)
1536 {
1537         struct pfr_ktable       *p, *q;
1538         struct pfr_ktableworkq   workq;
1539         struct pf_ruleset       *rs;
1540         int                      xadd = 0, xchange = 0;
1541         long                     tzero = time_second;
1542
1543         PF_RULES_WASSERT();
1544
1545         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1546         rs = pf_find_ruleset(trs->pfrt_anchor);
1547         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1548                 return (EBUSY);
1549
1550         SLIST_INIT(&workq);
1551         RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1552                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1553                     pfr_skip_table(trs, p, 0))
1554                         continue;
1555                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1556                 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1557                         xchange++;
1558                 else
1559                         xadd++;
1560         }
1561
1562         if (!(flags & PFR_FLAG_DUMMY)) {
1563                 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1564                         q = SLIST_NEXT(p, pfrkt_workq);
1565                         pfr_commit_ktable(p, tzero);
1566                 }
1567                 rs->topen = 0;
1568                 pf_remove_if_empty_ruleset(rs);
1569         }
1570         if (nadd != NULL)
1571                 *nadd = xadd;
1572         if (nchange != NULL)
1573                 *nchange = xchange;
1574
1575         return (0);
1576 }
1577
1578 static void
1579 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1580 {
1581         struct pfr_ktable       *shadow = kt->pfrkt_shadow;
1582         int                      nflags;
1583
1584         PF_RULES_WASSERT();
1585
1586         if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1587                 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1588                         pfr_clstats_ktable(kt, tzero, 1);
1589         } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1590                 /* kt might contain addresses */
1591                 struct pfr_kentryworkq   addrq, addq, changeq, delq, garbageq;
1592                 struct pfr_kentry       *p, *q, *next;
1593                 struct pfr_addr          ad;
1594
1595                 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1596                 pfr_mark_addrs(kt);
1597                 SLIST_INIT(&addq);
1598                 SLIST_INIT(&changeq);
1599                 SLIST_INIT(&delq);
1600                 SLIST_INIT(&garbageq);
1601                 pfr_clean_node_mask(shadow, &addrq);
1602                 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1603                         next = SLIST_NEXT(p, pfrke_workq);      /* XXX */
1604                         pfr_copyout_addr(&ad, p);
1605                         q = pfr_lookup_addr(kt, &ad, 1);
1606                         if (q != NULL) {
1607                                 if (q->pfrke_not != p->pfrke_not)
1608                                         SLIST_INSERT_HEAD(&changeq, q,
1609                                             pfrke_workq);
1610                                 q->pfrke_mark = 1;
1611                                 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1612                         } else {
1613                                 p->pfrke_tzero = tzero;
1614                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1615                         }
1616                 }
1617                 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1618                 pfr_insert_kentries(kt, &addq, tzero);
1619                 pfr_remove_kentries(kt, &delq);
1620                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1621                 pfr_destroy_kentries(&garbageq);
1622         } else {
1623                 /* kt cannot contain addresses */
1624                 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1625                     shadow->pfrkt_ip4);
1626                 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1627                     shadow->pfrkt_ip6);
1628                 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1629                 pfr_clstats_ktable(kt, tzero, 1);
1630         }
1631         nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1632             (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1633                 & ~PFR_TFLAG_INACTIVE;
1634         pfr_destroy_ktable(shadow, 0);
1635         kt->pfrkt_shadow = NULL;
1636         pfr_setflags_ktable(kt, nflags);
1637 }
1638
1639 static int
1640 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1641 {
1642         int i;
1643
1644         if (!tbl->pfrt_name[0])
1645                 return (-1);
1646         if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1647                  return (-1);
1648         if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1649                 return (-1);
1650         for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1651                 if (tbl->pfrt_name[i])
1652                         return (-1);
1653         if (pfr_fix_anchor(tbl->pfrt_anchor))
1654                 return (-1);
1655         if (tbl->pfrt_flags & ~allowedflags)
1656                 return (-1);
1657         return (0);
1658 }
1659
1660 /*
1661  * Rewrite anchors referenced by tables to remove slashes
1662  * and check for validity.
1663  */
1664 static int
1665 pfr_fix_anchor(char *anchor)
1666 {
1667         size_t siz = MAXPATHLEN;
1668         int i;
1669
1670         if (anchor[0] == '/') {
1671                 char *path;
1672                 int off;
1673
1674                 path = anchor;
1675                 off = 1;
1676                 while (*++path == '/')
1677                         off++;
1678                 bcopy(path, anchor, siz - off);
1679                 memset(anchor + siz - off, 0, off);
1680         }
1681         if (anchor[siz - 1])
1682                 return (-1);
1683         for (i = strlen(anchor); i < siz; i++)
1684                 if (anchor[i])
1685                         return (-1);
1686         return (0);
1687 }
1688
1689 int
1690 pfr_table_count(struct pfr_table *filter, int flags)
1691 {
1692         struct pf_ruleset *rs;
1693
1694         PF_RULES_ASSERT();
1695
1696         if (flags & PFR_FLAG_ALLRSETS)
1697                 return (V_pfr_ktable_cnt);
1698         if (filter->pfrt_anchor[0]) {
1699                 rs = pf_find_ruleset(filter->pfrt_anchor);
1700                 return ((rs != NULL) ? rs->tables : -1);
1701         }
1702         return (pf_main_ruleset.tables);
1703 }
1704
1705 static int
1706 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1707 {
1708         if (flags & PFR_FLAG_ALLRSETS)
1709                 return (0);
1710         if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1711                 return (1);
1712         return (0);
1713 }
1714
1715 static void
1716 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1717 {
1718         struct pfr_ktable       *p;
1719
1720         SLIST_FOREACH(p, workq, pfrkt_workq)
1721                 pfr_insert_ktable(p);
1722 }
1723
1724 static void
1725 pfr_insert_ktable(struct pfr_ktable *kt)
1726 {
1727
1728         PF_RULES_WASSERT();
1729
1730         RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
1731         V_pfr_ktable_cnt++;
1732         if (kt->pfrkt_root != NULL)
1733                 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1734                         pfr_setflags_ktable(kt->pfrkt_root,
1735                             kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1736 }
1737
1738 static void
1739 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1740 {
1741         struct pfr_ktable       *p, *q;
1742
1743         for (p = SLIST_FIRST(workq); p; p = q) {
1744                 q = SLIST_NEXT(p, pfrkt_workq);
1745                 pfr_setflags_ktable(p, p->pfrkt_nflags);
1746         }
1747 }
1748
1749 static void
1750 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1751 {
1752         struct pfr_kentryworkq  addrq;
1753
1754         PF_RULES_WASSERT();
1755
1756         if (!(newf & PFR_TFLAG_REFERENCED) &&
1757             !(newf & PFR_TFLAG_PERSIST))
1758                 newf &= ~PFR_TFLAG_ACTIVE;
1759         if (!(newf & PFR_TFLAG_ACTIVE))
1760                 newf &= ~PFR_TFLAG_USRMASK;
1761         if (!(newf & PFR_TFLAG_SETMASK)) {
1762                 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
1763                 if (kt->pfrkt_root != NULL)
1764                         if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1765                                 pfr_setflags_ktable(kt->pfrkt_root,
1766                                     kt->pfrkt_root->pfrkt_flags &
1767                                         ~PFR_TFLAG_REFDANCHOR);
1768                 pfr_destroy_ktable(kt, 1);
1769                 V_pfr_ktable_cnt--;
1770                 return;
1771         }
1772         if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1773                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1774                 pfr_remove_kentries(kt, &addrq);
1775         }
1776         if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1777                 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1778                 kt->pfrkt_shadow = NULL;
1779         }
1780         kt->pfrkt_flags = newf;
1781 }
1782
1783 static void
1784 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1785 {
1786         struct pfr_ktable       *p;
1787
1788         SLIST_FOREACH(p, workq, pfrkt_workq)
1789                 pfr_clstats_ktable(p, tzero, recurse);
1790 }
1791
1792 static void
1793 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1794 {
1795         struct pfr_kentryworkq   addrq;
1796
1797         if (recurse) {
1798                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1799                 pfr_clstats_kentries(&addrq, tzero, 0);
1800         }
1801         bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1802         bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1803         kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1804         kt->pfrkt_tzero = tzero;
1805 }
1806
1807 static struct pfr_ktable *
1808 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1809 {
1810         struct pfr_ktable       *kt;
1811         struct pf_ruleset       *rs;
1812
1813         PF_RULES_WASSERT();
1814
1815         kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1816         if (kt == NULL)
1817                 return (NULL);
1818         kt->pfrkt_t = *tbl;
1819
1820         if (attachruleset) {
1821                 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1822                 if (!rs) {
1823                         pfr_destroy_ktable(kt, 0);
1824                         return (NULL);
1825                 }
1826                 kt->pfrkt_rs = rs;
1827                 rs->tables++;
1828         }
1829
1830         if (!rn_inithead((void **)&kt->pfrkt_ip4,
1831             offsetof(struct sockaddr_in, sin_addr) * 8) ||
1832             !rn_inithead((void **)&kt->pfrkt_ip6,
1833             offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1834                 pfr_destroy_ktable(kt, 0);
1835                 return (NULL);
1836         }
1837         kt->pfrkt_tzero = tzero;
1838
1839         return (kt);
1840 }
1841
1842 static void
1843 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1844 {
1845         struct pfr_ktable       *p, *q;
1846
1847         for (p = SLIST_FIRST(workq); p; p = q) {
1848                 q = SLIST_NEXT(p, pfrkt_workq);
1849                 pfr_destroy_ktable(p, flushaddr);
1850         }
1851 }
1852
1853 static void
1854 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1855 {
1856         struct pfr_kentryworkq   addrq;
1857
1858         if (flushaddr) {
1859                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1860                 pfr_clean_node_mask(kt, &addrq);
1861                 pfr_destroy_kentries(&addrq);
1862         }
1863         if (kt->pfrkt_ip4 != NULL)
1864                 rn_detachhead((void **)&kt->pfrkt_ip4);
1865         if (kt->pfrkt_ip6 != NULL)
1866                 rn_detachhead((void **)&kt->pfrkt_ip6);
1867         if (kt->pfrkt_shadow != NULL)
1868                 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1869         if (kt->pfrkt_rs != NULL) {
1870                 kt->pfrkt_rs->tables--;
1871                 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1872         }
1873         free(kt, M_PFTABLE);
1874 }
1875
1876 static int
1877 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1878 {
1879         int d;
1880
1881         if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1882                 return (d);
1883         return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1884 }
1885
1886 static struct pfr_ktable *
1887 pfr_lookup_table(struct pfr_table *tbl)
1888 {
1889         /* struct pfr_ktable start like a struct pfr_table */
1890         return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
1891             (struct pfr_ktable *)tbl));
1892 }
1893
1894 int
1895 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1896 {
1897         struct pfr_kentry       *ke = NULL;
1898         int                      match;
1899
1900         PF_RULES_RASSERT();
1901
1902         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1903                 kt = kt->pfrkt_root;
1904         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1905                 return (0);
1906
1907         switch (af) {
1908 #ifdef INET
1909         case AF_INET:
1910             {
1911                 struct sockaddr_in sin;
1912
1913                 bzero(&sin, sizeof(sin));
1914                 sin.sin_len = sizeof(sin);
1915                 sin.sin_family = AF_INET;
1916                 sin.sin_addr.s_addr = a->addr32[0];
1917                 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
1918                 if (ke && KENTRY_RNF_ROOT(ke))
1919                         ke = NULL;
1920                 break;
1921             }
1922 #endif /* INET */
1923 #ifdef INET6
1924         case AF_INET6:
1925             {
1926                 struct sockaddr_in6 sin6;
1927
1928                 bzero(&sin6, sizeof(sin6));
1929                 sin6.sin6_len = sizeof(sin6);
1930                 sin6.sin6_family = AF_INET6;
1931                 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
1932                 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
1933                 if (ke && KENTRY_RNF_ROOT(ke))
1934                         ke = NULL;
1935                 break;
1936             }
1937 #endif /* INET6 */
1938         }
1939         match = (ke && !ke->pfrke_not);
1940         if (match)
1941                 kt->pfrkt_match++;
1942         else
1943                 kt->pfrkt_nomatch++;
1944         return (match);
1945 }
1946
1947 void
1948 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1949     u_int64_t len, int dir_out, int op_pass, int notrule)
1950 {
1951         struct pfr_kentry       *ke = NULL;
1952
1953         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1954                 kt = kt->pfrkt_root;
1955         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1956                 return;
1957
1958         switch (af) {
1959 #ifdef INET
1960         case AF_INET:
1961             {
1962                 struct sockaddr_in sin;
1963
1964                 bzero(&sin, sizeof(sin));
1965                 sin.sin_len = sizeof(sin);
1966                 sin.sin_family = AF_INET;
1967                 sin.sin_addr.s_addr = a->addr32[0];
1968                 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
1969                 if (ke && KENTRY_RNF_ROOT(ke))
1970                         ke = NULL;
1971                 break;
1972             }
1973 #endif /* INET */
1974 #ifdef INET6
1975         case AF_INET6:
1976             {
1977                 struct sockaddr_in6 sin6;
1978
1979                 bzero(&sin6, sizeof(sin6));
1980                 sin6.sin6_len = sizeof(sin6);
1981                 sin6.sin6_family = AF_INET6;
1982                 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
1983                 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
1984                 if (ke && KENTRY_RNF_ROOT(ke))
1985                         ke = NULL;
1986                 break;
1987             }
1988 #endif /* INET6 */
1989         default:
1990                 panic("%s: unknown address family %u", __func__, af);
1991         }
1992         if ((ke == NULL || ke->pfrke_not) != notrule) {
1993                 if (op_pass != PFR_OP_PASS)
1994                         printf("pfr_update_stats: assertion failed.\n");
1995                 op_pass = PFR_OP_XPASS;
1996         }
1997         kt->pfrkt_packets[dir_out][op_pass]++;
1998         kt->pfrkt_bytes[dir_out][op_pass] += len;
1999         if (ke != NULL && op_pass != PFR_OP_XPASS &&
2000             (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2001                 if (ke->pfrke_counters == NULL)
2002                         ke->pfrke_counters = uma_zalloc(V_pfr_kcounters_z,
2003                             M_NOWAIT | M_ZERO);
2004                 if (ke->pfrke_counters != NULL) {
2005                         ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2006                         ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2007                 }
2008         }
2009 }
2010
2011 struct pfr_ktable *
2012 pfr_attach_table(struct pf_ruleset *rs, char *name)
2013 {
2014         struct pfr_ktable       *kt, *rt;
2015         struct pfr_table         tbl;
2016         struct pf_anchor        *ac = rs->anchor;
2017
2018         PF_RULES_WASSERT();
2019
2020         bzero(&tbl, sizeof(tbl));
2021         strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2022         if (ac != NULL)
2023                 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2024         kt = pfr_lookup_table(&tbl);
2025         if (kt == NULL) {
2026                 kt = pfr_create_ktable(&tbl, time_second, 1);
2027                 if (kt == NULL)
2028                         return (NULL);
2029                 if (ac != NULL) {
2030                         bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2031                         rt = pfr_lookup_table(&tbl);
2032                         if (rt == NULL) {
2033                                 rt = pfr_create_ktable(&tbl, 0, 1);
2034                                 if (rt == NULL) {
2035                                         pfr_destroy_ktable(kt, 0);
2036                                         return (NULL);
2037                                 }
2038                                 pfr_insert_ktable(rt);
2039                         }
2040                         kt->pfrkt_root = rt;
2041                 }
2042                 pfr_insert_ktable(kt);
2043         }
2044         if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2045                 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2046         return (kt);
2047 }
2048
2049 void
2050 pfr_detach_table(struct pfr_ktable *kt)
2051 {
2052
2053         PF_RULES_WASSERT();
2054         KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2055             __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2056
2057         if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2058                 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2059 }
2060
2061 int
2062 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2063     sa_family_t af)
2064 {
2065         struct pf_addr           *addr, *cur, *mask;
2066         union sockaddr_union     uaddr, umask;
2067         struct pfr_kentry       *ke, *ke2 = NULL;
2068         int                      idx = -1, use_counter = 0;
2069
2070         switch (af) {
2071         case AF_INET:
2072                 uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2073                 uaddr.sin.sin_family = AF_INET;
2074                 break;
2075         case AF_INET6:
2076                 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2077                 uaddr.sin6.sin6_family = AF_INET6;
2078                 break;
2079         }
2080         addr = SUNION2PF(&uaddr, af);
2081
2082         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2083                 kt = kt->pfrkt_root;
2084         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2085                 return (-1);
2086
2087         if (pidx != NULL)
2088                 idx = *pidx;
2089         if (counter != NULL && idx >= 0)
2090                 use_counter = 1;
2091         if (idx < 0)
2092                 idx = 0;
2093
2094 _next_block:
2095         ke = pfr_kentry_byidx(kt, idx, af);
2096         if (ke == NULL) {
2097                 kt->pfrkt_nomatch++;
2098                 return (1);
2099         }
2100         pfr_prepare_network(&umask, af, ke->pfrke_net);
2101         cur = SUNION2PF(&ke->pfrke_sa, af);
2102         mask = SUNION2PF(&umask, af);
2103
2104         if (use_counter) {
2105                 /* is supplied address within block? */
2106                 if (!PF_MATCHA(0, cur, mask, counter, af)) {
2107                         /* no, go to next block in table */
2108                         idx++;
2109                         use_counter = 0;
2110                         goto _next_block;
2111                 }
2112                 PF_ACPY(addr, counter, af);
2113         } else {
2114                 /* use first address of block */
2115                 PF_ACPY(addr, cur, af);
2116         }
2117
2118         if (!KENTRY_NETWORK(ke)) {
2119                 /* this is a single IP address - no possible nested block */
2120                 PF_ACPY(counter, addr, af);
2121                 *pidx = idx;
2122                 kt->pfrkt_match++;
2123                 return (0);
2124         }
2125         for (;;) {
2126                 /* we don't want to use a nested block */
2127                 switch (af) {
2128                 case AF_INET:
2129                         ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2130                             &kt->pfrkt_ip4->rh);
2131                         break;
2132                 case AF_INET6:
2133                         ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2134                             &kt->pfrkt_ip6->rh);
2135                         break;
2136                 }
2137                 /* no need to check KENTRY_RNF_ROOT() here */
2138                 if (ke2 == ke) {
2139                         /* lookup return the same block - perfect */
2140                         PF_ACPY(counter, addr, af);
2141                         *pidx = idx;
2142                         kt->pfrkt_match++;
2143                         return (0);
2144                 }
2145
2146                 /* we need to increase the counter past the nested block */
2147                 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2148                 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af);
2149                 PF_AINC(addr, af);
2150                 if (!PF_MATCHA(0, cur, mask, addr, af)) {
2151                         /* ok, we reached the end of our main block */
2152                         /* go to next block in table */
2153                         idx++;
2154                         use_counter = 0;
2155                         goto _next_block;
2156                 }
2157         }
2158 }
2159
2160 static struct pfr_kentry *
2161 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2162 {
2163         struct pfr_walktree     w;
2164
2165         bzero(&w, sizeof(w));
2166         w.pfrw_op = PFRW_POOL_GET;
2167         w.pfrw_cnt = idx;
2168
2169         switch (af) {
2170 #ifdef INET
2171         case AF_INET:
2172                 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2173                 return (w.pfrw_kentry);
2174 #endif /* INET */
2175 #ifdef INET6
2176         case AF_INET6:
2177                 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2178                 return (w.pfrw_kentry);
2179 #endif /* INET6 */
2180         default:
2181                 return (NULL);
2182         }
2183 }
2184
2185 void
2186 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2187 {
2188         struct pfr_walktree     w;
2189
2190         bzero(&w, sizeof(w));
2191         w.pfrw_op = PFRW_DYNADDR_UPDATE;
2192         w.pfrw_dyn = dyn;
2193
2194         dyn->pfid_acnt4 = 0;
2195         dyn->pfid_acnt6 = 0;
2196         if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2197                 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2198         if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2199                 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2200 }