]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netpfil/pf/pf_ioctl.c
Update to bmake-20200710
[FreeBSD/FreeBSD.git] / sys / netpfil / pf / pf_ioctl.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *      $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 #include "opt_inet.h"
44 #include "opt_inet6.h"
45 #include "opt_bpf.h"
46 #include "opt_pf.h"
47
48 #include <sys/param.h>
49 #include <sys/_bitset.h>
50 #include <sys/bitset.h>
51 #include <sys/bus.h>
52 #include <sys/conf.h>
53 #include <sys/endian.h>
54 #include <sys/fcntl.h>
55 #include <sys/filio.h>
56 #include <sys/hash.h>
57 #include <sys/interrupt.h>
58 #include <sys/jail.h>
59 #include <sys/kernel.h>
60 #include <sys/kthread.h>
61 #include <sys/lock.h>
62 #include <sys/mbuf.h>
63 #include <sys/module.h>
64 #include <sys/proc.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/vnet.h>
74 #include <net/route.h>
75 #include <net/pfil.h>
76 #include <net/pfvar.h>
77 #include <net/if_pfsync.h>
78 #include <net/if_pflog.h>
79
80 #include <netinet/in.h>
81 #include <netinet/ip.h>
82 #include <netinet/ip_var.h>
83 #include <netinet6/ip6_var.h>
84 #include <netinet/ip_icmp.h>
85
86 #ifdef INET6
87 #include <netinet/ip6.h>
88 #endif /* INET6 */
89
90 #ifdef ALTQ
91 #include <net/altq/altq.h>
92 #endif
93
94 static struct pf_pool   *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
95                             u_int8_t, u_int8_t, u_int8_t);
96
97 static void              pf_mv_pool(struct pf_palist *, struct pf_palist *);
98 static void              pf_empty_pool(struct pf_palist *);
99 static int               pfioctl(struct cdev *, u_long, caddr_t, int,
100                             struct thread *);
101 #ifdef ALTQ
102 static int               pf_begin_altq(u_int32_t *);
103 static int               pf_rollback_altq(u_int32_t);
104 static int               pf_commit_altq(u_int32_t);
105 static int               pf_enable_altq(struct pf_altq *);
106 static int               pf_disable_altq(struct pf_altq *);
107 static u_int32_t         pf_qname2qid(char *);
108 static void              pf_qid_unref(u_int32_t);
109 #endif /* ALTQ */
110 static int               pf_begin_rules(u_int32_t *, int, const char *);
111 static int               pf_rollback_rules(u_int32_t, int, char *);
112 static int               pf_setup_pfsync_matching(struct pf_ruleset *);
113 static void              pf_hash_rule(MD5_CTX *, struct pf_rule *);
114 static void              pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
115 static int               pf_commit_rules(u_int32_t, int, char *);
116 static int               pf_addr_setup(struct pf_ruleset *,
117                             struct pf_addr_wrap *, sa_family_t);
118 static void              pf_addr_copyout(struct pf_addr_wrap *);
119 #ifdef ALTQ
120 static int               pf_export_kaltq(struct pf_altq *,
121                             struct pfioc_altq_v1 *, size_t);
122 static int               pf_import_kaltq(struct pfioc_altq_v1 *,
123                             struct pf_altq *, size_t);
124 #endif /* ALTQ */
125
126 VNET_DEFINE(struct pf_rule,     pf_default_rule);
127
128 #ifdef ALTQ
129 VNET_DEFINE_STATIC(int,         pf_altq_running);
130 #define V_pf_altq_running       VNET(pf_altq_running)
131 #endif
132
133 #define TAGID_MAX        50000
134 struct pf_tagname {
135         TAILQ_ENTRY(pf_tagname) namehash_entries;
136         TAILQ_ENTRY(pf_tagname) taghash_entries;
137         char                    name[PF_TAG_NAME_SIZE];
138         uint16_t                tag;
139         int                     ref;
140 };
141
142 struct pf_tagset {
143         TAILQ_HEAD(, pf_tagname)        *namehash;
144         TAILQ_HEAD(, pf_tagname)        *taghash;
145         unsigned int                     mask;
146         uint32_t                         seed;
147         BITSET_DEFINE(, TAGID_MAX)       avail;
148 };
149
150 VNET_DEFINE(struct pf_tagset, pf_tags);
151 #define V_pf_tags       VNET(pf_tags)
152 static unsigned int     pf_rule_tag_hashsize;
153 #define PF_RULE_TAG_HASH_SIZE_DEFAULT   128
154 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
155     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
156     "Size of pf(4) rule tag hashtable");
157
158 #ifdef ALTQ
159 VNET_DEFINE(struct pf_tagset, pf_qids);
160 #define V_pf_qids       VNET(pf_qids)
161 static unsigned int     pf_queue_tag_hashsize;
162 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT  128
163 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
164     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
165     "Size of pf(4) queue tag hashtable");
166 #endif
167 VNET_DEFINE(uma_zone_t,  pf_tag_z);
168 #define V_pf_tag_z               VNET(pf_tag_z)
169 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
170 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
171
172 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
173 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
174 #endif
175
176 static void              pf_init_tagset(struct pf_tagset *, unsigned int *,
177                             unsigned int);
178 static void              pf_cleanup_tagset(struct pf_tagset *);
179 static uint16_t          tagname2hashindex(const struct pf_tagset *, const char *);
180 static uint16_t          tag2hashindex(const struct pf_tagset *, uint16_t);
181 static u_int16_t         tagname2tag(struct pf_tagset *, char *);
182 static u_int16_t         pf_tagname2tag(char *);
183 static void              tag_unref(struct pf_tagset *, u_int16_t);
184
185 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
186
187 struct cdev *pf_dev;
188
189 /*
190  * XXX - These are new and need to be checked when moveing to a new version
191  */
192 static void              pf_clear_states(void);
193 static int               pf_clear_tables(void);
194 static void              pf_clear_srcnodes(struct pf_src_node *);
195 static void              pf_kill_srcnodes(struct pfioc_src_node_kill *);
196 static void              pf_tbladdr_copyout(struct pf_addr_wrap *);
197
198 /*
199  * Wrapper functions for pfil(9) hooks
200  */
201 #ifdef INET
202 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
203     int flags, void *ruleset __unused, struct inpcb *inp);
204 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
205     int flags, void *ruleset __unused, struct inpcb *inp);
206 #endif
207 #ifdef INET6
208 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
209     int flags, void *ruleset __unused, struct inpcb *inp);
210 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
211     int flags, void *ruleset __unused, struct inpcb *inp);
212 #endif
213
214 static int              hook_pf(void);
215 static int              dehook_pf(void);
216 static int              shutdown_pf(void);
217 static int              pf_load(void);
218 static void             pf_unload(void);
219
220 static struct cdevsw pf_cdevsw = {
221         .d_ioctl =      pfioctl,
222         .d_name =       PF_NAME,
223         .d_version =    D_VERSION,
224 };
225
226 volatile VNET_DEFINE_STATIC(int, pf_pfil_hooked);
227 #define V_pf_pfil_hooked        VNET(pf_pfil_hooked)
228
229 /*
230  * We need a flag that is neither hooked nor running to know when
231  * the VNET is "valid".  We primarily need this to control (global)
232  * external event, e.g., eventhandlers.
233  */
234 VNET_DEFINE(int, pf_vnet_active);
235 #define V_pf_vnet_active        VNET(pf_vnet_active)
236
237 int pf_end_threads;
238 struct proc *pf_purge_proc;
239
240 struct rmlock                   pf_rules_lock;
241 struct sx                       pf_ioctl_lock;
242 struct sx                       pf_end_lock;
243
244 /* pfsync */
245 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
246 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
247 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
248 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
249 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
250 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
251 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
252
253 /* pflog */
254 pflog_packet_t                  *pflog_packet_ptr = NULL;
255
256 extern u_long   pf_ioctl_maxcount;
257
258 static void
259 pfattach_vnet(void)
260 {
261         u_int32_t *my_timeout = V_pf_default_rule.timeout;
262
263         pf_initialize();
264         pfr_initialize();
265         pfi_initialize_vnet();
266         pf_normalize_init();
267
268         V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
269         V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
270
271         RB_INIT(&V_pf_anchors);
272         pf_init_ruleset(&pf_main_ruleset);
273
274         /* default rule should never be garbage collected */
275         V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
276 #ifdef PF_DEFAULT_TO_DROP
277         V_pf_default_rule.action = PF_DROP;
278 #else
279         V_pf_default_rule.action = PF_PASS;
280 #endif
281         V_pf_default_rule.nr = -1;
282         V_pf_default_rule.rtableid = -1;
283
284         V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
285         V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
286         V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
287
288         /* initialize default timeouts */
289         my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
290         my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
291         my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
292         my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
293         my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
294         my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
295         my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
296         my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
297         my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
298         my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
299         my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
300         my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
301         my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
302         my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
303         my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
304         my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
305         my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
306         my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
307         my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
308         my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
309
310         bzero(&V_pf_status, sizeof(V_pf_status));
311         V_pf_status.debug = PF_DEBUG_URGENT;
312
313         V_pf_pfil_hooked = 0;
314
315         /* XXX do our best to avoid a conflict */
316         V_pf_status.hostid = arc4random();
317
318         for (int i = 0; i < PFRES_MAX; i++)
319                 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
320         for (int i = 0; i < LCNT_MAX; i++)
321                 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
322         for (int i = 0; i < FCNT_MAX; i++)
323                 V_pf_status.fcounters[i] = counter_u64_alloc(M_WAITOK);
324         for (int i = 0; i < SCNT_MAX; i++)
325                 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
326
327         if (swi_add(NULL, "pf send", pf_intr, curvnet, SWI_NET,
328             INTR_MPSAFE, &V_pf_swi_cookie) != 0)
329                 /* XXXGL: leaked all above. */
330                 return;
331 }
332
333
334 static struct pf_pool *
335 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
336     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
337     u_int8_t check_ticket)
338 {
339         struct pf_ruleset       *ruleset;
340         struct pf_rule          *rule;
341         int                      rs_num;
342
343         ruleset = pf_find_ruleset(anchor);
344         if (ruleset == NULL)
345                 return (NULL);
346         rs_num = pf_get_ruleset_number(rule_action);
347         if (rs_num >= PF_RULESET_MAX)
348                 return (NULL);
349         if (active) {
350                 if (check_ticket && ticket !=
351                     ruleset->rules[rs_num].active.ticket)
352                         return (NULL);
353                 if (r_last)
354                         rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
355                             pf_rulequeue);
356                 else
357                         rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
358         } else {
359                 if (check_ticket && ticket !=
360                     ruleset->rules[rs_num].inactive.ticket)
361                         return (NULL);
362                 if (r_last)
363                         rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
364                             pf_rulequeue);
365                 else
366                         rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
367         }
368         if (!r_last) {
369                 while ((rule != NULL) && (rule->nr != rule_number))
370                         rule = TAILQ_NEXT(rule, entries);
371         }
372         if (rule == NULL)
373                 return (NULL);
374
375         return (&rule->rpool);
376 }
377
378 static void
379 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
380 {
381         struct pf_pooladdr      *mv_pool_pa;
382
383         while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
384                 TAILQ_REMOVE(poola, mv_pool_pa, entries);
385                 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
386         }
387 }
388
389 static void
390 pf_empty_pool(struct pf_palist *poola)
391 {
392         struct pf_pooladdr *pa;
393
394         while ((pa = TAILQ_FIRST(poola)) != NULL) {
395                 switch (pa->addr.type) {
396                 case PF_ADDR_DYNIFTL:
397                         pfi_dynaddr_remove(pa->addr.p.dyn);
398                         break;
399                 case PF_ADDR_TABLE:
400                         /* XXX: this could be unfinished pooladdr on pabuf */
401                         if (pa->addr.p.tbl != NULL)
402                                 pfr_detach_table(pa->addr.p.tbl);
403                         break;
404                 }
405                 if (pa->kif)
406                         pfi_kif_unref(pa->kif);
407                 TAILQ_REMOVE(poola, pa, entries);
408                 free(pa, M_PFRULE);
409         }
410 }
411
412 static void
413 pf_unlink_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
414 {
415
416         PF_RULES_WASSERT();
417
418         TAILQ_REMOVE(rulequeue, rule, entries);
419
420         PF_UNLNKDRULES_LOCK();
421         rule->rule_flag |= PFRULE_REFS;
422         TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
423         PF_UNLNKDRULES_UNLOCK();
424 }
425
426 void
427 pf_free_rule(struct pf_rule *rule)
428 {
429
430         PF_RULES_WASSERT();
431
432         if (rule->tag)
433                 tag_unref(&V_pf_tags, rule->tag);
434         if (rule->match_tag)
435                 tag_unref(&V_pf_tags, rule->match_tag);
436 #ifdef ALTQ
437         if (rule->pqid != rule->qid)
438                 pf_qid_unref(rule->pqid);
439         pf_qid_unref(rule->qid);
440 #endif
441         switch (rule->src.addr.type) {
442         case PF_ADDR_DYNIFTL:
443                 pfi_dynaddr_remove(rule->src.addr.p.dyn);
444                 break;
445         case PF_ADDR_TABLE:
446                 pfr_detach_table(rule->src.addr.p.tbl);
447                 break;
448         }
449         switch (rule->dst.addr.type) {
450         case PF_ADDR_DYNIFTL:
451                 pfi_dynaddr_remove(rule->dst.addr.p.dyn);
452                 break;
453         case PF_ADDR_TABLE:
454                 pfr_detach_table(rule->dst.addr.p.tbl);
455                 break;
456         }
457         if (rule->overload_tbl)
458                 pfr_detach_table(rule->overload_tbl);
459         if (rule->kif)
460                 pfi_kif_unref(rule->kif);
461         pf_anchor_remove(rule);
462         pf_empty_pool(&rule->rpool.list);
463         counter_u64_free(rule->states_cur);
464         counter_u64_free(rule->states_tot);
465         counter_u64_free(rule->src_nodes);
466         free(rule, M_PFRULE);
467 }
468
469 static void
470 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
471     unsigned int default_size)
472 {
473         unsigned int i;
474         unsigned int hashsize;
475         
476         if (*tunable_size == 0 || !powerof2(*tunable_size))
477                 *tunable_size = default_size;
478
479         hashsize = *tunable_size;
480         ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
481             M_WAITOK);
482         ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
483             M_WAITOK);
484         ts->mask = hashsize - 1;
485         ts->seed = arc4random();
486         for (i = 0; i < hashsize; i++) {
487                 TAILQ_INIT(&ts->namehash[i]);
488                 TAILQ_INIT(&ts->taghash[i]);
489         }
490         BIT_FILL(TAGID_MAX, &ts->avail);
491 }
492
493 static void
494 pf_cleanup_tagset(struct pf_tagset *ts)
495 {
496         unsigned int i;
497         unsigned int hashsize;
498         struct pf_tagname *t, *tmp;
499
500         /*
501          * Only need to clean up one of the hashes as each tag is hashed
502          * into each table.
503          */
504         hashsize = ts->mask + 1;
505         for (i = 0; i < hashsize; i++)
506                 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
507                         uma_zfree(V_pf_tag_z, t);
508
509         free(ts->namehash, M_PFHASH);
510         free(ts->taghash, M_PFHASH);
511 }
512
513 static uint16_t
514 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
515 {
516
517         return (murmur3_32_hash(tagname, strlen(tagname), ts->seed) & ts->mask);
518 }
519
520 static uint16_t
521 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
522 {
523
524         return (tag & ts->mask);
525 }
526
527 static u_int16_t
528 tagname2tag(struct pf_tagset *ts, char *tagname)
529 {
530         struct pf_tagname       *tag;
531         u_int32_t                index;
532         u_int16_t                new_tagid;
533
534         PF_RULES_WASSERT();
535
536         index = tagname2hashindex(ts, tagname);
537         TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
538                 if (strcmp(tagname, tag->name) == 0) {
539                         tag->ref++;
540                         return (tag->tag);
541                 }
542
543         /*
544          * new entry
545          *
546          * to avoid fragmentation, we do a linear search from the beginning
547          * and take the first free slot we find.
548          */
549         new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
550         /*
551          * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
552          * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
553          * set.  It may also return a bit number greater than TAGID_MAX due
554          * to rounding of the number of bits in the vector up to a multiple
555          * of the vector word size at declaration/allocation time.
556          */
557         if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
558                 return (0);
559
560         /* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
561         BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
562         
563         /* allocate and fill new struct pf_tagname */
564         tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
565         if (tag == NULL)
566                 return (0);
567         strlcpy(tag->name, tagname, sizeof(tag->name));
568         tag->tag = new_tagid;
569         tag->ref = 1;
570
571         /* Insert into namehash */
572         TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
573
574         /* Insert into taghash */
575         index = tag2hashindex(ts, new_tagid);
576         TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
577         
578         return (tag->tag);
579 }
580
581 static void
582 tag_unref(struct pf_tagset *ts, u_int16_t tag)
583 {
584         struct pf_tagname       *t;
585         uint16_t                 index;
586         
587         PF_RULES_WASSERT();
588
589         index = tag2hashindex(ts, tag);
590         TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
591                 if (tag == t->tag) {
592                         if (--t->ref == 0) {
593                                 TAILQ_REMOVE(&ts->taghash[index], t,
594                                     taghash_entries);
595                                 index = tagname2hashindex(ts, t->name);
596                                 TAILQ_REMOVE(&ts->namehash[index], t,
597                                     namehash_entries);
598                                 /* Bits are 0-based for BIT_SET() */
599                                 BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
600                                 uma_zfree(V_pf_tag_z, t);
601                         }
602                         break;
603                 }
604 }
605
606 static u_int16_t
607 pf_tagname2tag(char *tagname)
608 {
609         return (tagname2tag(&V_pf_tags, tagname));
610 }
611
612 #ifdef ALTQ
613 static u_int32_t
614 pf_qname2qid(char *qname)
615 {
616         return ((u_int32_t)tagname2tag(&V_pf_qids, qname));
617 }
618
619 static void
620 pf_qid_unref(u_int32_t qid)
621 {
622         tag_unref(&V_pf_qids, (u_int16_t)qid);
623 }
624
625 static int
626 pf_begin_altq(u_int32_t *ticket)
627 {
628         struct pf_altq  *altq, *tmp;
629         int              error = 0;
630
631         PF_RULES_WASSERT();
632
633         /* Purge the old altq lists */
634         TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
635                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
636                         /* detach and destroy the discipline */
637                         error = altq_remove(altq);
638                 }
639                 free(altq, M_PFALTQ);
640         }
641         TAILQ_INIT(V_pf_altq_ifs_inactive);
642         TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
643                 pf_qid_unref(altq->qid);
644                 free(altq, M_PFALTQ);
645         }
646         TAILQ_INIT(V_pf_altqs_inactive);
647         if (error)
648                 return (error);
649         *ticket = ++V_ticket_altqs_inactive;
650         V_altqs_inactive_open = 1;
651         return (0);
652 }
653
654 static int
655 pf_rollback_altq(u_int32_t ticket)
656 {
657         struct pf_altq  *altq, *tmp;
658         int              error = 0;
659
660         PF_RULES_WASSERT();
661
662         if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
663                 return (0);
664         /* Purge the old altq lists */
665         TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
666                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
667                         /* detach and destroy the discipline */
668                         error = altq_remove(altq);
669                 }
670                 free(altq, M_PFALTQ);
671         }
672         TAILQ_INIT(V_pf_altq_ifs_inactive);
673         TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
674                 pf_qid_unref(altq->qid);
675                 free(altq, M_PFALTQ);
676         }
677         TAILQ_INIT(V_pf_altqs_inactive);
678         V_altqs_inactive_open = 0;
679         return (error);
680 }
681
682 static int
683 pf_commit_altq(u_int32_t ticket)
684 {
685         struct pf_altqqueue     *old_altqs, *old_altq_ifs;
686         struct pf_altq          *altq, *tmp;
687         int                      err, error = 0;
688
689         PF_RULES_WASSERT();
690
691         if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
692                 return (EBUSY);
693
694         /* swap altqs, keep the old. */
695         old_altqs = V_pf_altqs_active;
696         old_altq_ifs = V_pf_altq_ifs_active;
697         V_pf_altqs_active = V_pf_altqs_inactive;
698         V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
699         V_pf_altqs_inactive = old_altqs;
700         V_pf_altq_ifs_inactive = old_altq_ifs;
701         V_ticket_altqs_active = V_ticket_altqs_inactive;
702
703         /* Attach new disciplines */
704         TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
705                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
706                         /* attach the discipline */
707                         error = altq_pfattach(altq);
708                         if (error == 0 && V_pf_altq_running)
709                                 error = pf_enable_altq(altq);
710                         if (error != 0)
711                                 return (error);
712                 }
713         }
714
715         /* Purge the old altq lists */
716         TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
717                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
718                         /* detach and destroy the discipline */
719                         if (V_pf_altq_running)
720                                 error = pf_disable_altq(altq);
721                         err = altq_pfdetach(altq);
722                         if (err != 0 && error == 0)
723                                 error = err;
724                         err = altq_remove(altq);
725                         if (err != 0 && error == 0)
726                                 error = err;
727                 }
728                 free(altq, M_PFALTQ);
729         }
730         TAILQ_INIT(V_pf_altq_ifs_inactive);
731         TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
732                 pf_qid_unref(altq->qid);
733                 free(altq, M_PFALTQ);
734         }
735         TAILQ_INIT(V_pf_altqs_inactive);
736
737         V_altqs_inactive_open = 0;
738         return (error);
739 }
740
741 static int
742 pf_enable_altq(struct pf_altq *altq)
743 {
744         struct ifnet            *ifp;
745         struct tb_profile        tb;
746         int                      error = 0;
747
748         if ((ifp = ifunit(altq->ifname)) == NULL)
749                 return (EINVAL);
750
751         if (ifp->if_snd.altq_type != ALTQT_NONE)
752                 error = altq_enable(&ifp->if_snd);
753
754         /* set tokenbucket regulator */
755         if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
756                 tb.rate = altq->ifbandwidth;
757                 tb.depth = altq->tbrsize;
758                 error = tbr_set(&ifp->if_snd, &tb);
759         }
760
761         return (error);
762 }
763
764 static int
765 pf_disable_altq(struct pf_altq *altq)
766 {
767         struct ifnet            *ifp;
768         struct tb_profile        tb;
769         int                      error;
770
771         if ((ifp = ifunit(altq->ifname)) == NULL)
772                 return (EINVAL);
773
774         /*
775          * when the discipline is no longer referenced, it was overridden
776          * by a new one.  if so, just return.
777          */
778         if (altq->altq_disc != ifp->if_snd.altq_disc)
779                 return (0);
780
781         error = altq_disable(&ifp->if_snd);
782
783         if (error == 0) {
784                 /* clear tokenbucket regulator */
785                 tb.rate = 0;
786                 error = tbr_set(&ifp->if_snd, &tb);
787         }
788
789         return (error);
790 }
791
792 static int
793 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
794     struct pf_altq *altq)
795 {
796         struct ifnet    *ifp1;
797         int              error = 0;
798         
799         /* Deactivate the interface in question */
800         altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
801         if ((ifp1 = ifunit(altq->ifname)) == NULL ||
802             (remove && ifp1 == ifp)) {
803                 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
804         } else {
805                 error = altq_add(ifp1, altq);
806
807                 if (ticket != V_ticket_altqs_inactive)
808                         error = EBUSY;
809
810                 if (error)
811                         free(altq, M_PFALTQ);
812         }
813
814         return (error);
815 }
816
817 void
818 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
819 {
820         struct pf_altq  *a1, *a2, *a3;
821         u_int32_t        ticket;
822         int              error = 0;
823
824         /*
825          * No need to re-evaluate the configuration for events on interfaces
826          * that do not support ALTQ, as it's not possible for such
827          * interfaces to be part of the configuration.
828          */
829         if (!ALTQ_IS_READY(&ifp->if_snd))
830                 return;
831
832         /* Interrupt userland queue modifications */
833         if (V_altqs_inactive_open)
834                 pf_rollback_altq(V_ticket_altqs_inactive);
835
836         /* Start new altq ruleset */
837         if (pf_begin_altq(&ticket))
838                 return;
839
840         /* Copy the current active set */
841         TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
842                 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
843                 if (a2 == NULL) {
844                         error = ENOMEM;
845                         break;
846                 }
847                 bcopy(a1, a2, sizeof(struct pf_altq));
848
849                 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
850                 if (error)
851                         break;
852
853                 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
854         }
855         if (error)
856                 goto out;
857         TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
858                 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
859                 if (a2 == NULL) {
860                         error = ENOMEM;
861                         break;
862                 }
863                 bcopy(a1, a2, sizeof(struct pf_altq));
864
865                 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
866                         error = EBUSY;
867                         free(a2, M_PFALTQ);
868                         break;
869                 }
870                 a2->altq_disc = NULL;
871                 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
872                         if (strncmp(a3->ifname, a2->ifname,
873                                 IFNAMSIZ) == 0) {
874                                 a2->altq_disc = a3->altq_disc;
875                                 break;
876                         }
877                 }
878                 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
879                 if (error)
880                         break;
881
882                 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
883         }
884
885 out:
886         if (error != 0)
887                 pf_rollback_altq(ticket);
888         else
889                 pf_commit_altq(ticket);
890 }
891 #endif /* ALTQ */
892
893 static int
894 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
895 {
896         struct pf_ruleset       *rs;
897         struct pf_rule          *rule;
898
899         PF_RULES_WASSERT();
900
901         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
902                 return (EINVAL);
903         rs = pf_find_or_create_ruleset(anchor);
904         if (rs == NULL)
905                 return (EINVAL);
906         while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
907                 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
908                 rs->rules[rs_num].inactive.rcount--;
909         }
910         *ticket = ++rs->rules[rs_num].inactive.ticket;
911         rs->rules[rs_num].inactive.open = 1;
912         return (0);
913 }
914
915 static int
916 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
917 {
918         struct pf_ruleset       *rs;
919         struct pf_rule          *rule;
920
921         PF_RULES_WASSERT();
922
923         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
924                 return (EINVAL);
925         rs = pf_find_ruleset(anchor);
926         if (rs == NULL || !rs->rules[rs_num].inactive.open ||
927             rs->rules[rs_num].inactive.ticket != ticket)
928                 return (0);
929         while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
930                 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
931                 rs->rules[rs_num].inactive.rcount--;
932         }
933         rs->rules[rs_num].inactive.open = 0;
934         return (0);
935 }
936
937 #define PF_MD5_UPD(st, elm)                                             \
938                 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
939
940 #define PF_MD5_UPD_STR(st, elm)                                         \
941                 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
942
943 #define PF_MD5_UPD_HTONL(st, elm, stor) do {                            \
944                 (stor) = htonl((st)->elm);                              \
945                 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
946 } while (0)
947
948 #define PF_MD5_UPD_HTONS(st, elm, stor) do {                            \
949                 (stor) = htons((st)->elm);                              \
950                 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
951 } while (0)
952
953 static void
954 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
955 {
956         PF_MD5_UPD(pfr, addr.type);
957         switch (pfr->addr.type) {
958                 case PF_ADDR_DYNIFTL:
959                         PF_MD5_UPD(pfr, addr.v.ifname);
960                         PF_MD5_UPD(pfr, addr.iflags);
961                         break;
962                 case PF_ADDR_TABLE:
963                         PF_MD5_UPD(pfr, addr.v.tblname);
964                         break;
965                 case PF_ADDR_ADDRMASK:
966                         /* XXX ignore af? */
967                         PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
968                         PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
969                         break;
970         }
971
972         PF_MD5_UPD(pfr, port[0]);
973         PF_MD5_UPD(pfr, port[1]);
974         PF_MD5_UPD(pfr, neg);
975         PF_MD5_UPD(pfr, port_op);
976 }
977
978 static void
979 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
980 {
981         u_int16_t x;
982         u_int32_t y;
983
984         pf_hash_rule_addr(ctx, &rule->src);
985         pf_hash_rule_addr(ctx, &rule->dst);
986         PF_MD5_UPD_STR(rule, label);
987         PF_MD5_UPD_STR(rule, ifname);
988         PF_MD5_UPD_STR(rule, match_tagname);
989         PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
990         PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
991         PF_MD5_UPD_HTONL(rule, prob, y);
992         PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
993         PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
994         PF_MD5_UPD(rule, uid.op);
995         PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
996         PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
997         PF_MD5_UPD(rule, gid.op);
998         PF_MD5_UPD_HTONL(rule, rule_flag, y);
999         PF_MD5_UPD(rule, action);
1000         PF_MD5_UPD(rule, direction);
1001         PF_MD5_UPD(rule, af);
1002         PF_MD5_UPD(rule, quick);
1003         PF_MD5_UPD(rule, ifnot);
1004         PF_MD5_UPD(rule, match_tag_not);
1005         PF_MD5_UPD(rule, natpass);
1006         PF_MD5_UPD(rule, keep_state);
1007         PF_MD5_UPD(rule, proto);
1008         PF_MD5_UPD(rule, type);
1009         PF_MD5_UPD(rule, code);
1010         PF_MD5_UPD(rule, flags);
1011         PF_MD5_UPD(rule, flagset);
1012         PF_MD5_UPD(rule, allow_opts);
1013         PF_MD5_UPD(rule, rt);
1014         PF_MD5_UPD(rule, tos);
1015 }
1016
1017 static int
1018 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1019 {
1020         struct pf_ruleset       *rs;
1021         struct pf_rule          *rule, **old_array;
1022         struct pf_rulequeue     *old_rules;
1023         int                      error;
1024         u_int32_t                old_rcount;
1025
1026         PF_RULES_WASSERT();
1027
1028         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1029                 return (EINVAL);
1030         rs = pf_find_ruleset(anchor);
1031         if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1032             ticket != rs->rules[rs_num].inactive.ticket)
1033                 return (EBUSY);
1034
1035         /* Calculate checksum for the main ruleset */
1036         if (rs == &pf_main_ruleset) {
1037                 error = pf_setup_pfsync_matching(rs);
1038                 if (error != 0)
1039                         return (error);
1040         }
1041
1042         /* Swap rules, keep the old. */
1043         old_rules = rs->rules[rs_num].active.ptr;
1044         old_rcount = rs->rules[rs_num].active.rcount;
1045         old_array = rs->rules[rs_num].active.ptr_array;
1046
1047         rs->rules[rs_num].active.ptr =
1048             rs->rules[rs_num].inactive.ptr;
1049         rs->rules[rs_num].active.ptr_array =
1050             rs->rules[rs_num].inactive.ptr_array;
1051         rs->rules[rs_num].active.rcount =
1052             rs->rules[rs_num].inactive.rcount;
1053         rs->rules[rs_num].inactive.ptr = old_rules;
1054         rs->rules[rs_num].inactive.ptr_array = old_array;
1055         rs->rules[rs_num].inactive.rcount = old_rcount;
1056
1057         rs->rules[rs_num].active.ticket =
1058             rs->rules[rs_num].inactive.ticket;
1059         pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1060
1061
1062         /* Purge the old rule list. */
1063         while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1064                 pf_unlink_rule(old_rules, rule);
1065         if (rs->rules[rs_num].inactive.ptr_array)
1066                 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1067         rs->rules[rs_num].inactive.ptr_array = NULL;
1068         rs->rules[rs_num].inactive.rcount = 0;
1069         rs->rules[rs_num].inactive.open = 0;
1070         pf_remove_if_empty_ruleset(rs);
1071
1072         return (0);
1073 }
1074
1075 static int
1076 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1077 {
1078         MD5_CTX                  ctx;
1079         struct pf_rule          *rule;
1080         int                      rs_cnt;
1081         u_int8_t                 digest[PF_MD5_DIGEST_LENGTH];
1082
1083         MD5Init(&ctx);
1084         for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1085                 /* XXX PF_RULESET_SCRUB as well? */
1086                 if (rs_cnt == PF_RULESET_SCRUB)
1087                         continue;
1088
1089                 if (rs->rules[rs_cnt].inactive.ptr_array)
1090                         free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1091                 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1092
1093                 if (rs->rules[rs_cnt].inactive.rcount) {
1094                         rs->rules[rs_cnt].inactive.ptr_array =
1095                             malloc(sizeof(caddr_t) *
1096                             rs->rules[rs_cnt].inactive.rcount,
1097                             M_TEMP, M_NOWAIT);
1098
1099                         if (!rs->rules[rs_cnt].inactive.ptr_array)
1100                                 return (ENOMEM);
1101                 }
1102
1103                 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1104                     entries) {
1105                         pf_hash_rule(&ctx, rule);
1106                         (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1107                 }
1108         }
1109
1110         MD5Final(digest, &ctx);
1111         memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1112         return (0);
1113 }
1114
1115 static int
1116 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
1117     sa_family_t af)
1118 {
1119         int error = 0;
1120
1121         switch (addr->type) {
1122         case PF_ADDR_TABLE:
1123                 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1124                 if (addr->p.tbl == NULL)
1125                         error = ENOMEM;
1126                 break;
1127         case PF_ADDR_DYNIFTL:
1128                 error = pfi_dynaddr_setup(addr, af);
1129                 break;
1130         }
1131
1132         return (error);
1133 }
1134
1135 static void
1136 pf_addr_copyout(struct pf_addr_wrap *addr)
1137 {
1138
1139         switch (addr->type) {
1140         case PF_ADDR_DYNIFTL:
1141                 pfi_dynaddr_copyout(addr);
1142                 break;
1143         case PF_ADDR_TABLE:
1144                 pf_tbladdr_copyout(addr);
1145                 break;
1146         }
1147 }
1148
1149 #ifdef ALTQ
1150 /*
1151  * Handle export of struct pf_kaltq to user binaries that may be using any
1152  * version of struct pf_altq.
1153  */
1154 static int
1155 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1156 {
1157         u_int32_t version;
1158         
1159         if (ioc_size == sizeof(struct pfioc_altq_v0))
1160                 version = 0;
1161         else
1162                 version = pa->version;
1163
1164         if (version > PFIOC_ALTQ_VERSION)
1165                 return (EINVAL);
1166
1167 #define ASSIGN(x) exported_q->x = q->x
1168 #define COPY(x) \
1169         bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1170 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1171 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1172
1173         switch (version) {
1174         case 0: {
1175                 struct pf_altq_v0 *exported_q =
1176                     &((struct pfioc_altq_v0 *)pa)->altq;
1177
1178                 COPY(ifname);
1179
1180                 ASSIGN(scheduler);
1181                 ASSIGN(tbrsize);
1182                 exported_q->tbrsize = SATU16(q->tbrsize);
1183                 exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1184
1185                 COPY(qname);
1186                 COPY(parent);
1187                 ASSIGN(parent_qid);
1188                 exported_q->bandwidth = SATU32(q->bandwidth);
1189                 ASSIGN(priority);
1190                 ASSIGN(local_flags);
1191
1192                 ASSIGN(qlimit);
1193                 ASSIGN(flags);
1194
1195                 if (q->scheduler == ALTQT_HFSC) {
1196 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1197 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1198                             SATU32(q->pq_u.hfsc_opts.x)
1199                         
1200                         ASSIGN_OPT_SATU32(rtsc_m1);
1201                         ASSIGN_OPT(rtsc_d);
1202                         ASSIGN_OPT_SATU32(rtsc_m2);
1203
1204                         ASSIGN_OPT_SATU32(lssc_m1);
1205                         ASSIGN_OPT(lssc_d);
1206                         ASSIGN_OPT_SATU32(lssc_m2);
1207
1208                         ASSIGN_OPT_SATU32(ulsc_m1);
1209                         ASSIGN_OPT(ulsc_d);
1210                         ASSIGN_OPT_SATU32(ulsc_m2);
1211
1212                         ASSIGN_OPT(flags);
1213                         
1214 #undef ASSIGN_OPT
1215 #undef ASSIGN_OPT_SATU32
1216                 } else
1217                         COPY(pq_u);
1218
1219                 ASSIGN(qid);
1220                 break;
1221         }
1222         case 1: {
1223                 struct pf_altq_v1 *exported_q =
1224                     &((struct pfioc_altq_v1 *)pa)->altq;
1225
1226                 COPY(ifname);
1227
1228                 ASSIGN(scheduler);
1229                 ASSIGN(tbrsize);
1230                 ASSIGN(ifbandwidth);
1231
1232                 COPY(qname);
1233                 COPY(parent);
1234                 ASSIGN(parent_qid);
1235                 ASSIGN(bandwidth);
1236                 ASSIGN(priority);
1237                 ASSIGN(local_flags);
1238
1239                 ASSIGN(qlimit);
1240                 ASSIGN(flags);
1241                 COPY(pq_u);
1242
1243                 ASSIGN(qid);
1244                 break;
1245         }
1246         default:
1247                 panic("%s: unhandled struct pfioc_altq version", __func__);
1248                 break;
1249         }
1250
1251 #undef ASSIGN
1252 #undef COPY
1253 #undef SATU16
1254 #undef SATU32
1255
1256         return (0);
1257 }
1258
1259 /*
1260  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1261  * that may be using any version of it.
1262  */
1263 static int
1264 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1265 {
1266         u_int32_t version;
1267         
1268         if (ioc_size == sizeof(struct pfioc_altq_v0))
1269                 version = 0;
1270         else
1271                 version = pa->version;
1272
1273         if (version > PFIOC_ALTQ_VERSION)
1274                 return (EINVAL);
1275         
1276 #define ASSIGN(x) q->x = imported_q->x
1277 #define COPY(x) \
1278         bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1279
1280         switch (version) {
1281         case 0: {
1282                 struct pf_altq_v0 *imported_q =
1283                     &((struct pfioc_altq_v0 *)pa)->altq;
1284
1285                 COPY(ifname);
1286
1287                 ASSIGN(scheduler);
1288                 ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1289                 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1290
1291                 COPY(qname);
1292                 COPY(parent);
1293                 ASSIGN(parent_qid);
1294                 ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1295                 ASSIGN(priority);
1296                 ASSIGN(local_flags);
1297
1298                 ASSIGN(qlimit);
1299                 ASSIGN(flags);
1300
1301                 if (imported_q->scheduler == ALTQT_HFSC) {
1302 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1303
1304                         /*
1305                          * The m1 and m2 parameters are being copied from
1306                          * 32-bit to 64-bit.
1307                          */
1308                         ASSIGN_OPT(rtsc_m1);
1309                         ASSIGN_OPT(rtsc_d);
1310                         ASSIGN_OPT(rtsc_m2);
1311
1312                         ASSIGN_OPT(lssc_m1);
1313                         ASSIGN_OPT(lssc_d);
1314                         ASSIGN_OPT(lssc_m2);
1315
1316                         ASSIGN_OPT(ulsc_m1);
1317                         ASSIGN_OPT(ulsc_d);
1318                         ASSIGN_OPT(ulsc_m2);
1319
1320                         ASSIGN_OPT(flags);
1321                         
1322 #undef ASSIGN_OPT
1323                 } else
1324                         COPY(pq_u);
1325
1326                 ASSIGN(qid);
1327                 break;
1328         }
1329         case 1: {
1330                 struct pf_altq_v1 *imported_q =
1331                     &((struct pfioc_altq_v1 *)pa)->altq;
1332
1333                 COPY(ifname);
1334
1335                 ASSIGN(scheduler);
1336                 ASSIGN(tbrsize);
1337                 ASSIGN(ifbandwidth);
1338
1339                 COPY(qname);
1340                 COPY(parent);
1341                 ASSIGN(parent_qid);
1342                 ASSIGN(bandwidth);
1343                 ASSIGN(priority);
1344                 ASSIGN(local_flags);
1345
1346                 ASSIGN(qlimit);
1347                 ASSIGN(flags);
1348                 COPY(pq_u);
1349
1350                 ASSIGN(qid);
1351                 break;
1352         }
1353         default:        
1354                 panic("%s: unhandled struct pfioc_altq version", __func__);
1355                 break;
1356         }
1357
1358 #undef ASSIGN
1359 #undef COPY
1360         
1361         return (0);
1362 }
1363
1364 static struct pf_altq *
1365 pf_altq_get_nth_active(u_int32_t n)
1366 {
1367         struct pf_altq          *altq;
1368         u_int32_t                nr;
1369
1370         nr = 0;
1371         TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1372                 if (nr == n)
1373                         return (altq);
1374                 nr++;
1375         }
1376
1377         TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1378                 if (nr == n)
1379                         return (altq);
1380                 nr++;
1381         }
1382
1383         return (NULL);
1384 }
1385 #endif /* ALTQ */
1386
1387 static int
1388 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1389 {
1390         int                      error = 0;
1391         PF_RULES_RLOCK_TRACKER;
1392
1393         /* XXX keep in sync with switch() below */
1394         if (securelevel_gt(td->td_ucred, 2))
1395                 switch (cmd) {
1396                 case DIOCGETRULES:
1397                 case DIOCGETRULE:
1398                 case DIOCGETADDRS:
1399                 case DIOCGETADDR:
1400                 case DIOCGETSTATE:
1401                 case DIOCSETSTATUSIF:
1402                 case DIOCGETSTATUS:
1403                 case DIOCCLRSTATUS:
1404                 case DIOCNATLOOK:
1405                 case DIOCSETDEBUG:
1406                 case DIOCGETSTATES:
1407                 case DIOCGETTIMEOUT:
1408                 case DIOCCLRRULECTRS:
1409                 case DIOCGETLIMIT:
1410                 case DIOCGETALTQSV0:
1411                 case DIOCGETALTQSV1:
1412                 case DIOCGETALTQV0:
1413                 case DIOCGETALTQV1:
1414                 case DIOCGETQSTATSV0:
1415                 case DIOCGETQSTATSV1:
1416                 case DIOCGETRULESETS:
1417                 case DIOCGETRULESET:
1418                 case DIOCRGETTABLES:
1419                 case DIOCRGETTSTATS:
1420                 case DIOCRCLRTSTATS:
1421                 case DIOCRCLRADDRS:
1422                 case DIOCRADDADDRS:
1423                 case DIOCRDELADDRS:
1424                 case DIOCRSETADDRS:
1425                 case DIOCRGETADDRS:
1426                 case DIOCRGETASTATS:
1427                 case DIOCRCLRASTATS:
1428                 case DIOCRTSTADDRS:
1429                 case DIOCOSFPGET:
1430                 case DIOCGETSRCNODES:
1431                 case DIOCCLRSRCNODES:
1432                 case DIOCIGETIFACES:
1433                 case DIOCGIFSPEEDV0:
1434                 case DIOCGIFSPEEDV1:
1435                 case DIOCSETIFFLAG:
1436                 case DIOCCLRIFFLAG:
1437                         break;
1438                 case DIOCRCLRTABLES:
1439                 case DIOCRADDTABLES:
1440                 case DIOCRDELTABLES:
1441                 case DIOCRSETTFLAGS:
1442                         if (((struct pfioc_table *)addr)->pfrio_flags &
1443                             PFR_FLAG_DUMMY)
1444                                 break; /* dummy operation ok */
1445                         return (EPERM);
1446                 default:
1447                         return (EPERM);
1448                 }
1449
1450         if (!(flags & FWRITE))
1451                 switch (cmd) {
1452                 case DIOCGETRULES:
1453                 case DIOCGETADDRS:
1454                 case DIOCGETADDR:
1455                 case DIOCGETSTATE:
1456                 case DIOCGETSTATUS:
1457                 case DIOCGETSTATES:
1458                 case DIOCGETTIMEOUT:
1459                 case DIOCGETLIMIT:
1460                 case DIOCGETALTQSV0:
1461                 case DIOCGETALTQSV1:
1462                 case DIOCGETALTQV0:
1463                 case DIOCGETALTQV1:
1464                 case DIOCGETQSTATSV0:
1465                 case DIOCGETQSTATSV1:
1466                 case DIOCGETRULESETS:
1467                 case DIOCGETRULESET:
1468                 case DIOCNATLOOK:
1469                 case DIOCRGETTABLES:
1470                 case DIOCRGETTSTATS:
1471                 case DIOCRGETADDRS:
1472                 case DIOCRGETASTATS:
1473                 case DIOCRTSTADDRS:
1474                 case DIOCOSFPGET:
1475                 case DIOCGETSRCNODES:
1476                 case DIOCIGETIFACES:
1477                 case DIOCGIFSPEEDV1:
1478                 case DIOCGIFSPEEDV0:
1479                         break;
1480                 case DIOCRCLRTABLES:
1481                 case DIOCRADDTABLES:
1482                 case DIOCRDELTABLES:
1483                 case DIOCRCLRTSTATS:
1484                 case DIOCRCLRADDRS:
1485                 case DIOCRADDADDRS:
1486                 case DIOCRDELADDRS:
1487                 case DIOCRSETADDRS:
1488                 case DIOCRSETTFLAGS:
1489                         if (((struct pfioc_table *)addr)->pfrio_flags &
1490                             PFR_FLAG_DUMMY) {
1491                                 flags |= FWRITE; /* need write lock for dummy */
1492                                 break; /* dummy operation ok */
1493                         }
1494                         return (EACCES);
1495                 case DIOCGETRULE:
1496                         if (((struct pfioc_rule *)addr)->action ==
1497                             PF_GET_CLR_CNTR)
1498                                 return (EACCES);
1499                         break;
1500                 default:
1501                         return (EACCES);
1502                 }
1503
1504         CURVNET_SET(TD_TO_VNET(td));
1505
1506         switch (cmd) {
1507         case DIOCSTART:
1508                 sx_xlock(&pf_ioctl_lock);
1509                 if (V_pf_status.running)
1510                         error = EEXIST;
1511                 else {
1512                         int cpu;
1513
1514                         error = hook_pf();
1515                         if (error) {
1516                                 DPFPRINTF(PF_DEBUG_MISC,
1517                                     ("pf: pfil registration failed\n"));
1518                                 break;
1519                         }
1520                         V_pf_status.running = 1;
1521                         V_pf_status.since = time_second;
1522
1523                         CPU_FOREACH(cpu)
1524                                 V_pf_stateid[cpu] = time_second;
1525
1526                         DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1527                 }
1528                 break;
1529
1530         case DIOCSTOP:
1531                 sx_xlock(&pf_ioctl_lock);
1532                 if (!V_pf_status.running)
1533                         error = ENOENT;
1534                 else {
1535                         V_pf_status.running = 0;
1536                         error = dehook_pf();
1537                         if (error) {
1538                                 V_pf_status.running = 1;
1539                                 DPFPRINTF(PF_DEBUG_MISC,
1540                                     ("pf: pfil unregistration failed\n"));
1541                         }
1542                         V_pf_status.since = time_second;
1543                         DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1544                 }
1545                 break;
1546
1547         case DIOCADDRULE: {
1548                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
1549                 struct pf_ruleset       *ruleset;
1550                 struct pf_rule          *rule, *tail;
1551                 struct pf_pooladdr      *pa;
1552                 struct pfi_kif          *kif = NULL;
1553                 int                      rs_num;
1554
1555                 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1556                         error = EINVAL;
1557                         break;
1558                 }
1559                 if (pr->rule.src.addr.p.dyn != NULL ||
1560                     pr->rule.dst.addr.p.dyn != NULL) {
1561                         error = EINVAL;
1562                         break;
1563                 }
1564 #ifndef INET
1565                 if (pr->rule.af == AF_INET) {
1566                         error = EAFNOSUPPORT;
1567                         break;
1568                 }
1569 #endif /* INET */
1570 #ifndef INET6
1571                 if (pr->rule.af == AF_INET6) {
1572                         error = EAFNOSUPPORT;
1573                         break;
1574                 }
1575 #endif /* INET6 */
1576
1577                 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
1578                 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1579                 if (rule->ifname[0])
1580                         kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
1581                 rule->states_cur = counter_u64_alloc(M_WAITOK);
1582                 rule->states_tot = counter_u64_alloc(M_WAITOK);
1583                 rule->src_nodes = counter_u64_alloc(M_WAITOK);
1584                 rule->cuid = td->td_ucred->cr_ruid;
1585                 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1586                 TAILQ_INIT(&rule->rpool.list);
1587
1588 #define ERROUT(x)       { error = (x); goto DIOCADDRULE_error; }
1589
1590                 PF_RULES_WLOCK();
1591                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1592                 ruleset = pf_find_ruleset(pr->anchor);
1593                 if (ruleset == NULL)
1594                         ERROUT(EINVAL);
1595                 rs_num = pf_get_ruleset_number(pr->rule.action);
1596                 if (rs_num >= PF_RULESET_MAX)
1597                         ERROUT(EINVAL);
1598                 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1599                         DPFPRINTF(PF_DEBUG_MISC,
1600                             ("ticket: %d != [%d]%d\n", pr->ticket, rs_num,
1601                             ruleset->rules[rs_num].inactive.ticket));
1602                         ERROUT(EBUSY);
1603                 }
1604                 if (pr->pool_ticket != V_ticket_pabuf) {
1605                         DPFPRINTF(PF_DEBUG_MISC,
1606                             ("pool_ticket: %d != %d\n", pr->pool_ticket,
1607                             V_ticket_pabuf));
1608                         ERROUT(EBUSY);
1609                 }
1610
1611                 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1612                     pf_rulequeue);
1613                 if (tail)
1614                         rule->nr = tail->nr + 1;
1615                 else
1616                         rule->nr = 0;
1617                 if (rule->ifname[0]) {
1618                         rule->kif = pfi_kif_attach(kif, rule->ifname);
1619                         pfi_kif_ref(rule->kif);
1620                 } else
1621                         rule->kif = NULL;
1622
1623                 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
1624                         error = EBUSY;
1625
1626 #ifdef ALTQ
1627                 /* set queue IDs */
1628                 if (rule->qname[0] != 0) {
1629                         if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1630                                 error = EBUSY;
1631                         else if (rule->pqname[0] != 0) {
1632                                 if ((rule->pqid =
1633                                     pf_qname2qid(rule->pqname)) == 0)
1634                                         error = EBUSY;
1635                         } else
1636                                 rule->pqid = rule->qid;
1637                 }
1638 #endif
1639                 if (rule->tagname[0])
1640                         if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1641                                 error = EBUSY;
1642                 if (rule->match_tagname[0])
1643                         if ((rule->match_tag =
1644                             pf_tagname2tag(rule->match_tagname)) == 0)
1645                                 error = EBUSY;
1646                 if (rule->rt && !rule->direction)
1647                         error = EINVAL;
1648                 if (!rule->log)
1649                         rule->logif = 0;
1650                 if (rule->logif >= PFLOGIFS_MAX)
1651                         error = EINVAL;
1652                 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1653                         error = ENOMEM;
1654                 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1655                         error = ENOMEM;
1656                 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1657                         error = EINVAL;
1658                 if (rule->scrub_flags & PFSTATE_SETPRIO &&
1659                     (rule->set_prio[0] > PF_PRIO_MAX ||
1660                     rule->set_prio[1] > PF_PRIO_MAX))
1661                         error = EINVAL;
1662                 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
1663                         if (pa->addr.type == PF_ADDR_TABLE) {
1664                                 pa->addr.p.tbl = pfr_attach_table(ruleset,
1665                                     pa->addr.v.tblname);
1666                                 if (pa->addr.p.tbl == NULL)
1667                                         error = ENOMEM;
1668                         }
1669
1670                 rule->overload_tbl = NULL;
1671                 if (rule->overload_tblname[0]) {
1672                         if ((rule->overload_tbl = pfr_attach_table(ruleset,
1673                             rule->overload_tblname)) == NULL)
1674                                 error = EINVAL;
1675                         else
1676                                 rule->overload_tbl->pfrkt_flags |=
1677                                     PFR_TFLAG_ACTIVE;
1678                 }
1679
1680                 pf_mv_pool(&V_pf_pabuf, &rule->rpool.list);
1681                 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1682                     (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1683                     (rule->rt > PF_NOPFROUTE)) &&
1684                     (TAILQ_FIRST(&rule->rpool.list) == NULL))
1685                         error = EINVAL;
1686
1687                 if (error) {
1688                         pf_free_rule(rule);
1689                         PF_RULES_WUNLOCK();
1690                         break;
1691                 }
1692
1693                 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1694                 rule->evaluations = rule->packets[0] = rule->packets[1] =
1695                     rule->bytes[0] = rule->bytes[1] = 0;
1696                 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1697                     rule, entries);
1698                 ruleset->rules[rs_num].inactive.rcount++;
1699                 PF_RULES_WUNLOCK();
1700                 break;
1701
1702 #undef ERROUT
1703 DIOCADDRULE_error:
1704                 PF_RULES_WUNLOCK();
1705                 counter_u64_free(rule->states_cur);
1706                 counter_u64_free(rule->states_tot);
1707                 counter_u64_free(rule->src_nodes);
1708                 free(rule, M_PFRULE);
1709                 if (kif)
1710                         free(kif, PFI_MTYPE);
1711                 break;
1712         }
1713
1714         case DIOCGETRULES: {
1715                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
1716                 struct pf_ruleset       *ruleset;
1717                 struct pf_rule          *tail;
1718                 int                      rs_num;
1719
1720                 PF_RULES_WLOCK();
1721                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1722                 ruleset = pf_find_ruleset(pr->anchor);
1723                 if (ruleset == NULL) {
1724                         PF_RULES_WUNLOCK();
1725                         error = EINVAL;
1726                         break;
1727                 }
1728                 rs_num = pf_get_ruleset_number(pr->rule.action);
1729                 if (rs_num >= PF_RULESET_MAX) {
1730                         PF_RULES_WUNLOCK();
1731                         error = EINVAL;
1732                         break;
1733                 }
1734                 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1735                     pf_rulequeue);
1736                 if (tail)
1737                         pr->nr = tail->nr + 1;
1738                 else
1739                         pr->nr = 0;
1740                 pr->ticket = ruleset->rules[rs_num].active.ticket;
1741                 PF_RULES_WUNLOCK();
1742                 break;
1743         }
1744
1745         case DIOCGETRULE: {
1746                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
1747                 struct pf_ruleset       *ruleset;
1748                 struct pf_rule          *rule;
1749                 int                      rs_num, i;
1750
1751                 PF_RULES_WLOCK();
1752                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1753                 ruleset = pf_find_ruleset(pr->anchor);
1754                 if (ruleset == NULL) {
1755                         PF_RULES_WUNLOCK();
1756                         error = EINVAL;
1757                         break;
1758                 }
1759                 rs_num = pf_get_ruleset_number(pr->rule.action);
1760                 if (rs_num >= PF_RULESET_MAX) {
1761                         PF_RULES_WUNLOCK();
1762                         error = EINVAL;
1763                         break;
1764                 }
1765                 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1766                         PF_RULES_WUNLOCK();
1767                         error = EBUSY;
1768                         break;
1769                 }
1770                 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1771                 while ((rule != NULL) && (rule->nr != pr->nr))
1772                         rule = TAILQ_NEXT(rule, entries);
1773                 if (rule == NULL) {
1774                         PF_RULES_WUNLOCK();
1775                         error = EBUSY;
1776                         break;
1777                 }
1778                 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1779                 pr->rule.u_states_cur = counter_u64_fetch(rule->states_cur);
1780                 pr->rule.u_states_tot = counter_u64_fetch(rule->states_tot);
1781                 pr->rule.u_src_nodes = counter_u64_fetch(rule->src_nodes);
1782                 if (pf_anchor_copyout(ruleset, rule, pr)) {
1783                         PF_RULES_WUNLOCK();
1784                         error = EBUSY;
1785                         break;
1786                 }
1787                 pf_addr_copyout(&pr->rule.src.addr);
1788                 pf_addr_copyout(&pr->rule.dst.addr);
1789                 for (i = 0; i < PF_SKIP_COUNT; ++i)
1790                         if (rule->skip[i].ptr == NULL)
1791                                 pr->rule.skip[i].nr = -1;
1792                         else
1793                                 pr->rule.skip[i].nr =
1794                                     rule->skip[i].ptr->nr;
1795
1796                 if (pr->action == PF_GET_CLR_CNTR) {
1797                         rule->evaluations = 0;
1798                         rule->packets[0] = rule->packets[1] = 0;
1799                         rule->bytes[0] = rule->bytes[1] = 0;
1800                         counter_u64_zero(rule->states_tot);
1801                 }
1802                 PF_RULES_WUNLOCK();
1803                 break;
1804         }
1805
1806         case DIOCCHANGERULE: {
1807                 struct pfioc_rule       *pcr = (struct pfioc_rule *)addr;
1808                 struct pf_ruleset       *ruleset;
1809                 struct pf_rule          *oldrule = NULL, *newrule = NULL;
1810                 struct pfi_kif          *kif = NULL;
1811                 struct pf_pooladdr      *pa;
1812                 u_int32_t                nr = 0;
1813                 int                      rs_num;
1814
1815                 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1816                     pcr->action > PF_CHANGE_GET_TICKET) {
1817                         error = EINVAL;
1818                         break;
1819                 }
1820                 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1821                         error = EINVAL;
1822                         break;
1823                 }
1824
1825                 if (pcr->action != PF_CHANGE_REMOVE) {
1826 #ifndef INET
1827                         if (pcr->rule.af == AF_INET) {
1828                                 error = EAFNOSUPPORT;
1829                                 break;
1830                         }
1831 #endif /* INET */
1832 #ifndef INET6
1833                         if (pcr->rule.af == AF_INET6) {
1834                                 error = EAFNOSUPPORT;
1835                                 break;
1836                         }
1837 #endif /* INET6 */
1838                         newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK);
1839                         bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1840                         if (newrule->ifname[0])
1841                                 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
1842                         newrule->states_cur = counter_u64_alloc(M_WAITOK);
1843                         newrule->states_tot = counter_u64_alloc(M_WAITOK);
1844                         newrule->src_nodes = counter_u64_alloc(M_WAITOK);
1845                         newrule->cuid = td->td_ucred->cr_ruid;
1846                         newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1847                         TAILQ_INIT(&newrule->rpool.list);
1848                 }
1849
1850 #define ERROUT(x)       { error = (x); goto DIOCCHANGERULE_error; }
1851
1852                 PF_RULES_WLOCK();
1853                 if (!(pcr->action == PF_CHANGE_REMOVE ||
1854                     pcr->action == PF_CHANGE_GET_TICKET) &&
1855                     pcr->pool_ticket != V_ticket_pabuf)
1856                         ERROUT(EBUSY);
1857
1858                 ruleset = pf_find_ruleset(pcr->anchor);
1859                 if (ruleset == NULL)
1860                         ERROUT(EINVAL);
1861
1862                 rs_num = pf_get_ruleset_number(pcr->rule.action);
1863                 if (rs_num >= PF_RULESET_MAX)
1864                         ERROUT(EINVAL);
1865
1866                 if (pcr->action == PF_CHANGE_GET_TICKET) {
1867                         pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1868                         ERROUT(0);
1869                 } else if (pcr->ticket !=
1870                             ruleset->rules[rs_num].active.ticket)
1871                                 ERROUT(EINVAL);
1872
1873                 if (pcr->action != PF_CHANGE_REMOVE) {
1874                         if (newrule->ifname[0]) {
1875                                 newrule->kif = pfi_kif_attach(kif,
1876                                     newrule->ifname);
1877                                 pfi_kif_ref(newrule->kif);
1878                         } else
1879                                 newrule->kif = NULL;
1880
1881                         if (newrule->rtableid > 0 &&
1882                             newrule->rtableid >= rt_numfibs)
1883                                 error = EBUSY;
1884
1885 #ifdef ALTQ
1886                         /* set queue IDs */
1887                         if (newrule->qname[0] != 0) {
1888                                 if ((newrule->qid =
1889                                     pf_qname2qid(newrule->qname)) == 0)
1890                                         error = EBUSY;
1891                                 else if (newrule->pqname[0] != 0) {
1892                                         if ((newrule->pqid =
1893                                             pf_qname2qid(newrule->pqname)) == 0)
1894                                                 error = EBUSY;
1895                                 } else
1896                                         newrule->pqid = newrule->qid;
1897                         }
1898 #endif /* ALTQ */
1899                         if (newrule->tagname[0])
1900                                 if ((newrule->tag =
1901                                     pf_tagname2tag(newrule->tagname)) == 0)
1902                                         error = EBUSY;
1903                         if (newrule->match_tagname[0])
1904                                 if ((newrule->match_tag = pf_tagname2tag(
1905                                     newrule->match_tagname)) == 0)
1906                                         error = EBUSY;
1907                         if (newrule->rt && !newrule->direction)
1908                                 error = EINVAL;
1909                         if (!newrule->log)
1910                                 newrule->logif = 0;
1911                         if (newrule->logif >= PFLOGIFS_MAX)
1912                                 error = EINVAL;
1913                         if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1914                                 error = ENOMEM;
1915                         if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1916                                 error = ENOMEM;
1917                         if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1918                                 error = EINVAL;
1919                         TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
1920                                 if (pa->addr.type == PF_ADDR_TABLE) {
1921                                         pa->addr.p.tbl =
1922                                             pfr_attach_table(ruleset,
1923                                             pa->addr.v.tblname);
1924                                         if (pa->addr.p.tbl == NULL)
1925                                                 error = ENOMEM;
1926                                 }
1927
1928                         newrule->overload_tbl = NULL;
1929                         if (newrule->overload_tblname[0]) {
1930                                 if ((newrule->overload_tbl = pfr_attach_table(
1931                                     ruleset, newrule->overload_tblname)) ==
1932                                     NULL)
1933                                         error = EINVAL;
1934                                 else
1935                                         newrule->overload_tbl->pfrkt_flags |=
1936                                             PFR_TFLAG_ACTIVE;
1937                         }
1938
1939                         pf_mv_pool(&V_pf_pabuf, &newrule->rpool.list);
1940                         if (((((newrule->action == PF_NAT) ||
1941                             (newrule->action == PF_RDR) ||
1942                             (newrule->action == PF_BINAT) ||
1943                             (newrule->rt > PF_NOPFROUTE)) &&
1944                             !newrule->anchor)) &&
1945                             (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1946                                 error = EINVAL;
1947
1948                         if (error) {
1949                                 pf_free_rule(newrule);
1950                                 PF_RULES_WUNLOCK();
1951                                 break;
1952                         }
1953
1954                         newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1955                         newrule->evaluations = 0;
1956                         newrule->packets[0] = newrule->packets[1] = 0;
1957                         newrule->bytes[0] = newrule->bytes[1] = 0;
1958                 }
1959                 pf_empty_pool(&V_pf_pabuf);
1960
1961                 if (pcr->action == PF_CHANGE_ADD_HEAD)
1962                         oldrule = TAILQ_FIRST(
1963                             ruleset->rules[rs_num].active.ptr);
1964                 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1965                         oldrule = TAILQ_LAST(
1966                             ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1967                 else {
1968                         oldrule = TAILQ_FIRST(
1969                             ruleset->rules[rs_num].active.ptr);
1970                         while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1971                                 oldrule = TAILQ_NEXT(oldrule, entries);
1972                         if (oldrule == NULL) {
1973                                 if (newrule != NULL)
1974                                         pf_free_rule(newrule);
1975                                 PF_RULES_WUNLOCK();
1976                                 error = EINVAL;
1977                                 break;
1978                         }
1979                 }
1980
1981                 if (pcr->action == PF_CHANGE_REMOVE) {
1982                         pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
1983                             oldrule);
1984                         ruleset->rules[rs_num].active.rcount--;
1985                 } else {
1986                         if (oldrule == NULL)
1987                                 TAILQ_INSERT_TAIL(
1988                                     ruleset->rules[rs_num].active.ptr,
1989                                     newrule, entries);
1990                         else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1991                             pcr->action == PF_CHANGE_ADD_BEFORE)
1992                                 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1993                         else
1994                                 TAILQ_INSERT_AFTER(
1995                                     ruleset->rules[rs_num].active.ptr,
1996                                     oldrule, newrule, entries);
1997                         ruleset->rules[rs_num].active.rcount++;
1998                 }
1999
2000                 nr = 0;
2001                 TAILQ_FOREACH(oldrule,
2002                     ruleset->rules[rs_num].active.ptr, entries)
2003                         oldrule->nr = nr++;
2004
2005                 ruleset->rules[rs_num].active.ticket++;
2006
2007                 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
2008                 pf_remove_if_empty_ruleset(ruleset);
2009
2010                 PF_RULES_WUNLOCK();
2011                 break;
2012
2013 #undef ERROUT
2014 DIOCCHANGERULE_error:
2015                 PF_RULES_WUNLOCK();
2016                 if (newrule != NULL) {
2017                         counter_u64_free(newrule->states_cur);
2018                         counter_u64_free(newrule->states_tot);
2019                         counter_u64_free(newrule->src_nodes);
2020                         free(newrule, M_PFRULE);
2021                 }
2022                 if (kif != NULL)
2023                         free(kif, PFI_MTYPE);
2024                 break;
2025         }
2026
2027         case DIOCCLRSTATES: {
2028                 struct pf_state         *s;
2029                 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
2030                 u_int                    i, killed = 0;
2031
2032                 for (i = 0; i <= pf_hashmask; i++) {
2033                         struct pf_idhash *ih = &V_pf_idhash[i];
2034
2035 relock_DIOCCLRSTATES:
2036                         PF_HASHROW_LOCK(ih);
2037                         LIST_FOREACH(s, &ih->states, entry)
2038                                 if (!psk->psk_ifname[0] ||
2039                                     !strcmp(psk->psk_ifname,
2040                                     s->kif->pfik_name)) {
2041                                         /*
2042                                          * Don't send out individual
2043                                          * delete messages.
2044                                          */
2045                                         s->state_flags |= PFSTATE_NOSYNC;
2046                                         pf_unlink_state(s, PF_ENTER_LOCKED);
2047                                         killed++;
2048                                         goto relock_DIOCCLRSTATES;
2049                                 }
2050                         PF_HASHROW_UNLOCK(ih);
2051                 }
2052                 psk->psk_killed = killed;
2053                 if (V_pfsync_clear_states_ptr != NULL)
2054                         V_pfsync_clear_states_ptr(V_pf_status.hostid, psk->psk_ifname);
2055                 break;
2056         }
2057
2058         case DIOCKILLSTATES: {
2059                 struct pf_state         *s;
2060                 struct pf_state_key     *sk;
2061                 struct pf_addr          *srcaddr, *dstaddr;
2062                 u_int16_t                srcport, dstport;
2063                 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
2064                 u_int                    i, killed = 0;
2065
2066                 if (psk->psk_pfcmp.id) {
2067                         if (psk->psk_pfcmp.creatorid == 0)
2068                                 psk->psk_pfcmp.creatorid = V_pf_status.hostid;
2069                         if ((s = pf_find_state_byid(psk->psk_pfcmp.id,
2070                             psk->psk_pfcmp.creatorid))) {
2071                                 pf_unlink_state(s, PF_ENTER_LOCKED);
2072                                 psk->psk_killed = 1;
2073                         }
2074                         break;
2075                 }
2076
2077                 for (i = 0; i <= pf_hashmask; i++) {
2078                         struct pf_idhash *ih = &V_pf_idhash[i];
2079
2080 relock_DIOCKILLSTATES:
2081                         PF_HASHROW_LOCK(ih);
2082                         LIST_FOREACH(s, &ih->states, entry) {
2083                                 sk = s->key[PF_SK_WIRE];
2084                                 if (s->direction == PF_OUT) {
2085                                         srcaddr = &sk->addr[1];
2086                                         dstaddr = &sk->addr[0];
2087                                         srcport = sk->port[1];
2088                                         dstport = sk->port[0];
2089                                 } else {
2090                                         srcaddr = &sk->addr[0];
2091                                         dstaddr = &sk->addr[1];
2092                                         srcport = sk->port[0];
2093                                         dstport = sk->port[1];
2094                                 }
2095
2096                                 if ((!psk->psk_af || sk->af == psk->psk_af)
2097                                     && (!psk->psk_proto || psk->psk_proto ==
2098                                     sk->proto) &&
2099                                     PF_MATCHA(psk->psk_src.neg,
2100                                     &psk->psk_src.addr.v.a.addr,
2101                                     &psk->psk_src.addr.v.a.mask,
2102                                     srcaddr, sk->af) &&
2103                                     PF_MATCHA(psk->psk_dst.neg,
2104                                     &psk->psk_dst.addr.v.a.addr,
2105                                     &psk->psk_dst.addr.v.a.mask,
2106                                     dstaddr, sk->af) &&
2107                                     (psk->psk_src.port_op == 0 ||
2108                                     pf_match_port(psk->psk_src.port_op,
2109                                     psk->psk_src.port[0], psk->psk_src.port[1],
2110                                     srcport)) &&
2111                                     (psk->psk_dst.port_op == 0 ||
2112                                     pf_match_port(psk->psk_dst.port_op,
2113                                     psk->psk_dst.port[0], psk->psk_dst.port[1],
2114                                     dstport)) &&
2115                                     (!psk->psk_label[0] ||
2116                                     (s->rule.ptr->label[0] &&
2117                                     !strcmp(psk->psk_label,
2118                                     s->rule.ptr->label))) &&
2119                                     (!psk->psk_ifname[0] ||
2120                                     !strcmp(psk->psk_ifname,
2121                                     s->kif->pfik_name))) {
2122                                         pf_unlink_state(s, PF_ENTER_LOCKED);
2123                                         killed++;
2124                                         goto relock_DIOCKILLSTATES;
2125                                 }
2126                         }
2127                         PF_HASHROW_UNLOCK(ih);
2128                 }
2129                 psk->psk_killed = killed;
2130                 break;
2131         }
2132
2133         case DIOCADDSTATE: {
2134                 struct pfioc_state      *ps = (struct pfioc_state *)addr;
2135                 struct pfsync_state     *sp = &ps->state;
2136
2137                 if (sp->timeout >= PFTM_MAX) {
2138                         error = EINVAL;
2139                         break;
2140                 }
2141                 if (V_pfsync_state_import_ptr != NULL) {
2142                         PF_RULES_RLOCK();
2143                         error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL);
2144                         PF_RULES_RUNLOCK();
2145                 } else
2146                         error = EOPNOTSUPP;
2147                 break;
2148         }
2149
2150         case DIOCGETSTATE: {
2151                 struct pfioc_state      *ps = (struct pfioc_state *)addr;
2152                 struct pf_state         *s;
2153
2154                 s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
2155                 if (s == NULL) {
2156                         error = ENOENT;
2157                         break;
2158                 }
2159
2160                 pfsync_state_export(&ps->state, s);
2161                 PF_STATE_UNLOCK(s);
2162                 break;
2163         }
2164
2165         case DIOCGETSTATES: {
2166                 struct pfioc_states     *ps = (struct pfioc_states *)addr;
2167                 struct pf_state         *s;
2168                 struct pfsync_state     *pstore, *p;
2169                 int i, nr;
2170
2171                 if (ps->ps_len <= 0) {
2172                         nr = uma_zone_get_cur(V_pf_state_z);
2173                         ps->ps_len = sizeof(struct pfsync_state) * nr;
2174                         break;
2175                 }
2176
2177                 p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK | M_ZERO);
2178                 nr = 0;
2179
2180                 for (i = 0; i <= pf_hashmask; i++) {
2181                         struct pf_idhash *ih = &V_pf_idhash[i];
2182
2183                         PF_HASHROW_LOCK(ih);
2184                         LIST_FOREACH(s, &ih->states, entry) {
2185
2186                                 if (s->timeout == PFTM_UNLINKED)
2187                                         continue;
2188
2189                                 if ((nr+1) * sizeof(*p) > ps->ps_len) {
2190                                         PF_HASHROW_UNLOCK(ih);
2191                                         goto DIOCGETSTATES_full;
2192                                 }
2193                                 pfsync_state_export(p, s);
2194                                 p++;
2195                                 nr++;
2196                         }
2197                         PF_HASHROW_UNLOCK(ih);
2198                 }
2199 DIOCGETSTATES_full:
2200                 error = copyout(pstore, ps->ps_states,
2201                     sizeof(struct pfsync_state) * nr);
2202                 if (error) {
2203                         free(pstore, M_TEMP);
2204                         break;
2205                 }
2206                 ps->ps_len = sizeof(struct pfsync_state) * nr;
2207                 free(pstore, M_TEMP);
2208
2209                 break;
2210         }
2211
2212         case DIOCGETSTATUS: {
2213                 struct pf_status *s = (struct pf_status *)addr;
2214
2215                 PF_RULES_RLOCK();
2216                 s->running = V_pf_status.running;
2217                 s->since   = V_pf_status.since;
2218                 s->debug   = V_pf_status.debug;
2219                 s->hostid  = V_pf_status.hostid;
2220                 s->states  = V_pf_status.states;
2221                 s->src_nodes = V_pf_status.src_nodes;
2222
2223                 for (int i = 0; i < PFRES_MAX; i++)
2224                         s->counters[i] =
2225                             counter_u64_fetch(V_pf_status.counters[i]);
2226                 for (int i = 0; i < LCNT_MAX; i++)
2227                         s->lcounters[i] =
2228                             counter_u64_fetch(V_pf_status.lcounters[i]);
2229                 for (int i = 0; i < FCNT_MAX; i++)
2230                         s->fcounters[i] =
2231                             counter_u64_fetch(V_pf_status.fcounters[i]);
2232                 for (int i = 0; i < SCNT_MAX; i++)
2233                         s->scounters[i] =
2234                             counter_u64_fetch(V_pf_status.scounters[i]);
2235
2236                 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ);
2237                 bcopy(V_pf_status.pf_chksum, s->pf_chksum,
2238                     PF_MD5_DIGEST_LENGTH);
2239
2240                 pfi_update_status(s->ifname, s);
2241                 PF_RULES_RUNLOCK();
2242                 break;
2243         }
2244
2245         case DIOCSETSTATUSIF: {
2246                 struct pfioc_if *pi = (struct pfioc_if *)addr;
2247
2248                 if (pi->ifname[0] == 0) {
2249                         bzero(V_pf_status.ifname, IFNAMSIZ);
2250                         break;
2251                 }
2252                 PF_RULES_WLOCK();
2253                 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
2254                 PF_RULES_WUNLOCK();
2255                 break;
2256         }
2257
2258         case DIOCCLRSTATUS: {
2259                 PF_RULES_WLOCK();
2260                 for (int i = 0; i < PFRES_MAX; i++)
2261                         counter_u64_zero(V_pf_status.counters[i]);
2262                 for (int i = 0; i < FCNT_MAX; i++)
2263                         counter_u64_zero(V_pf_status.fcounters[i]);
2264                 for (int i = 0; i < SCNT_MAX; i++)
2265                         counter_u64_zero(V_pf_status.scounters[i]);
2266                 for (int i = 0; i < LCNT_MAX; i++)
2267                         counter_u64_zero(V_pf_status.lcounters[i]);
2268                 V_pf_status.since = time_second;
2269                 if (*V_pf_status.ifname)
2270                         pfi_update_status(V_pf_status.ifname, NULL);
2271                 PF_RULES_WUNLOCK();
2272                 break;
2273         }
2274
2275         case DIOCNATLOOK: {
2276                 struct pfioc_natlook    *pnl = (struct pfioc_natlook *)addr;
2277                 struct pf_state_key     *sk;
2278                 struct pf_state         *state;
2279                 struct pf_state_key_cmp  key;
2280                 int                      m = 0, direction = pnl->direction;
2281                 int                      sidx, didx;
2282
2283                 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
2284                 sidx = (direction == PF_IN) ? 1 : 0;
2285                 didx = (direction == PF_IN) ? 0 : 1;
2286
2287                 if (!pnl->proto ||
2288                     PF_AZERO(&pnl->saddr, pnl->af) ||
2289                     PF_AZERO(&pnl->daddr, pnl->af) ||
2290                     ((pnl->proto == IPPROTO_TCP ||
2291                     pnl->proto == IPPROTO_UDP) &&
2292                     (!pnl->dport || !pnl->sport)))
2293                         error = EINVAL;
2294                 else {
2295                         bzero(&key, sizeof(key));
2296                         key.af = pnl->af;
2297                         key.proto = pnl->proto;
2298                         PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
2299                         key.port[sidx] = pnl->sport;
2300                         PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
2301                         key.port[didx] = pnl->dport;
2302
2303                         state = pf_find_state_all(&key, direction, &m);
2304
2305                         if (m > 1)
2306                                 error = E2BIG;  /* more than one state */
2307                         else if (state != NULL) {
2308                                 /* XXXGL: not locked read */
2309                                 sk = state->key[sidx];
2310                                 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
2311                                 pnl->rsport = sk->port[sidx];
2312                                 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
2313                                 pnl->rdport = sk->port[didx];
2314                         } else
2315                                 error = ENOENT;
2316                 }
2317                 break;
2318         }
2319
2320         case DIOCSETTIMEOUT: {
2321                 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2322                 int              old;
2323
2324                 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
2325                     pt->seconds < 0) {
2326                         error = EINVAL;
2327                         break;
2328                 }
2329                 PF_RULES_WLOCK();
2330                 old = V_pf_default_rule.timeout[pt->timeout];
2331                 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
2332                         pt->seconds = 1;
2333                 V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
2334                 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
2335                         wakeup(pf_purge_thread);
2336                 pt->seconds = old;
2337                 PF_RULES_WUNLOCK();
2338                 break;
2339         }
2340
2341         case DIOCGETTIMEOUT: {
2342                 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2343
2344                 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
2345                         error = EINVAL;
2346                         break;
2347                 }
2348                 PF_RULES_RLOCK();
2349                 pt->seconds = V_pf_default_rule.timeout[pt->timeout];
2350                 PF_RULES_RUNLOCK();
2351                 break;
2352         }
2353
2354         case DIOCGETLIMIT: {
2355                 struct pfioc_limit      *pl = (struct pfioc_limit *)addr;
2356
2357                 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
2358                         error = EINVAL;
2359                         break;
2360                 }
2361                 PF_RULES_RLOCK();
2362                 pl->limit = V_pf_limits[pl->index].limit;
2363                 PF_RULES_RUNLOCK();
2364                 break;
2365         }
2366
2367         case DIOCSETLIMIT: {
2368                 struct pfioc_limit      *pl = (struct pfioc_limit *)addr;
2369                 int                      old_limit;
2370
2371                 PF_RULES_WLOCK();
2372                 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
2373                     V_pf_limits[pl->index].zone == NULL) {
2374                         PF_RULES_WUNLOCK();
2375                         error = EINVAL;
2376                         break;
2377                 }
2378                 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
2379                 old_limit = V_pf_limits[pl->index].limit;
2380                 V_pf_limits[pl->index].limit = pl->limit;
2381                 pl->limit = old_limit;
2382                 PF_RULES_WUNLOCK();
2383                 break;
2384         }
2385
2386         case DIOCSETDEBUG: {
2387                 u_int32_t       *level = (u_int32_t *)addr;
2388
2389                 PF_RULES_WLOCK();
2390                 V_pf_status.debug = *level;
2391                 PF_RULES_WUNLOCK();
2392                 break;
2393         }
2394
2395         case DIOCCLRRULECTRS: {
2396                 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
2397                 struct pf_ruleset       *ruleset = &pf_main_ruleset;
2398                 struct pf_rule          *rule;
2399
2400                 PF_RULES_WLOCK();
2401                 TAILQ_FOREACH(rule,
2402                     ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
2403                         rule->evaluations = 0;
2404                         rule->packets[0] = rule->packets[1] = 0;
2405                         rule->bytes[0] = rule->bytes[1] = 0;
2406                 }
2407                 PF_RULES_WUNLOCK();
2408                 break;
2409         }
2410
2411         case DIOCGIFSPEEDV0:
2412         case DIOCGIFSPEEDV1: {
2413                 struct pf_ifspeed_v1    *psp = (struct pf_ifspeed_v1 *)addr;
2414                 struct pf_ifspeed_v1    ps;
2415                 struct ifnet            *ifp;
2416
2417                 if (psp->ifname[0] != 0) {
2418                         /* Can we completely trust user-land? */
2419                         strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
2420                         ifp = ifunit(ps.ifname);
2421                         if (ifp != NULL) {
2422                                 psp->baudrate32 =
2423                                     (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
2424                                 if (cmd == DIOCGIFSPEEDV1)
2425                                         psp->baudrate = ifp->if_baudrate;
2426                         } else
2427                                 error = EINVAL;
2428                 } else
2429                         error = EINVAL;
2430                 break;
2431         }
2432
2433 #ifdef ALTQ
2434         case DIOCSTARTALTQ: {
2435                 struct pf_altq          *altq;
2436
2437                 PF_RULES_WLOCK();
2438                 /* enable all altq interfaces on active list */
2439                 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
2440                         if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
2441                                 error = pf_enable_altq(altq);
2442                                 if (error != 0)
2443                                         break;
2444                         }
2445                 }
2446                 if (error == 0)
2447                         V_pf_altq_running = 1;
2448                 PF_RULES_WUNLOCK();
2449                 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2450                 break;
2451         }
2452
2453         case DIOCSTOPALTQ: {
2454                 struct pf_altq          *altq;
2455
2456                 PF_RULES_WLOCK();
2457                 /* disable all altq interfaces on active list */
2458                 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
2459                         if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
2460                                 error = pf_disable_altq(altq);
2461                                 if (error != 0)
2462                                         break;
2463                         }
2464                 }
2465                 if (error == 0)
2466                         V_pf_altq_running = 0;
2467                 PF_RULES_WUNLOCK();
2468                 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2469                 break;
2470         }
2471
2472         case DIOCADDALTQV0:
2473         case DIOCADDALTQV1: {
2474                 struct pfioc_altq_v1    *pa = (struct pfioc_altq_v1 *)addr;
2475                 struct pf_altq          *altq, *a;
2476                 struct ifnet            *ifp;
2477
2478                 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
2479                 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
2480                 if (error)
2481                         break;
2482                 altq->local_flags = 0;
2483
2484                 PF_RULES_WLOCK();
2485                 if (pa->ticket != V_ticket_altqs_inactive) {
2486                         PF_RULES_WUNLOCK();
2487                         free(altq, M_PFALTQ);
2488                         error = EBUSY;
2489                         break;
2490                 }
2491
2492                 /*
2493                  * if this is for a queue, find the discipline and
2494                  * copy the necessary fields
2495                  */
2496                 if (altq->qname[0] != 0) {
2497                         if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2498                                 PF_RULES_WUNLOCK();
2499                                 error = EBUSY;
2500                                 free(altq, M_PFALTQ);
2501                                 break;
2502                         }
2503                         altq->altq_disc = NULL;
2504                         TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
2505                                 if (strncmp(a->ifname, altq->ifname,
2506                                     IFNAMSIZ) == 0) {
2507                                         altq->altq_disc = a->altq_disc;
2508                                         break;
2509                                 }
2510                         }
2511                 }
2512
2513                 if ((ifp = ifunit(altq->ifname)) == NULL)
2514                         altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
2515                 else
2516                         error = altq_add(ifp, altq);
2517
2518                 if (error) {
2519                         PF_RULES_WUNLOCK();
2520                         free(altq, M_PFALTQ);
2521                         break;
2522                 }
2523
2524                 if (altq->qname[0] != 0)
2525                         TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
2526                 else
2527                         TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
2528                 /* version error check done on import above */
2529                 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
2530                 PF_RULES_WUNLOCK();
2531                 break;
2532         }
2533
2534         case DIOCGETALTQSV0:
2535         case DIOCGETALTQSV1: {
2536                 struct pfioc_altq_v1    *pa = (struct pfioc_altq_v1 *)addr;
2537                 struct pf_altq          *altq;
2538
2539                 PF_RULES_RLOCK();
2540                 pa->nr = 0;
2541                 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
2542                         pa->nr++;
2543                 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
2544                         pa->nr++;
2545                 pa->ticket = V_ticket_altqs_active;
2546                 PF_RULES_RUNLOCK();
2547                 break;
2548         }
2549
2550         case DIOCGETALTQV0:
2551         case DIOCGETALTQV1: {
2552                 struct pfioc_altq_v1    *pa = (struct pfioc_altq_v1 *)addr;
2553                 struct pf_altq          *altq;
2554
2555                 PF_RULES_RLOCK();
2556                 if (pa->ticket != V_ticket_altqs_active) {
2557                         PF_RULES_RUNLOCK();
2558                         error = EBUSY;
2559                         break;
2560                 }
2561                 altq = pf_altq_get_nth_active(pa->nr);
2562                 if (altq == NULL) {
2563                         PF_RULES_RUNLOCK();
2564                         error = EBUSY;
2565                         break;
2566                 }
2567                 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
2568                 PF_RULES_RUNLOCK();
2569                 break;
2570         }
2571
2572         case DIOCCHANGEALTQV0:
2573         case DIOCCHANGEALTQV1:
2574                 /* CHANGEALTQ not supported yet! */
2575                 error = ENODEV;
2576                 break;
2577
2578         case DIOCGETQSTATSV0:
2579         case DIOCGETQSTATSV1: {
2580                 struct pfioc_qstats_v1  *pq = (struct pfioc_qstats_v1 *)addr;
2581                 struct pf_altq          *altq;
2582                 int                      nbytes;
2583                 u_int32_t                version;
2584
2585                 PF_RULES_RLOCK();
2586                 if (pq->ticket != V_ticket_altqs_active) {
2587                         PF_RULES_RUNLOCK();
2588                         error = EBUSY;
2589                         break;
2590                 }
2591                 nbytes = pq->nbytes;
2592                 altq = pf_altq_get_nth_active(pq->nr);
2593                 if (altq == NULL) {
2594                         PF_RULES_RUNLOCK();
2595                         error = EBUSY;
2596                         break;
2597                 }
2598
2599                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
2600                         PF_RULES_RUNLOCK();
2601                         error = ENXIO;
2602                         break;
2603                 }
2604                 PF_RULES_RUNLOCK();
2605                 if (cmd == DIOCGETQSTATSV0)
2606                         version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
2607                 else
2608                         version = pq->version;
2609                 error = altq_getqstats(altq, pq->buf, &nbytes, version);
2610                 if (error == 0) {
2611                         pq->scheduler = altq->scheduler;
2612                         pq->nbytes = nbytes;
2613                 }
2614                 break;
2615         }
2616 #endif /* ALTQ */
2617
2618         case DIOCBEGINADDRS: {
2619                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
2620
2621                 PF_RULES_WLOCK();
2622                 pf_empty_pool(&V_pf_pabuf);
2623                 pp->ticket = ++V_ticket_pabuf;
2624                 PF_RULES_WUNLOCK();
2625                 break;
2626         }
2627
2628         case DIOCADDADDR: {
2629                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
2630                 struct pf_pooladdr      *pa;
2631                 struct pfi_kif          *kif = NULL;
2632
2633 #ifndef INET
2634                 if (pp->af == AF_INET) {
2635                         error = EAFNOSUPPORT;
2636                         break;
2637                 }
2638 #endif /* INET */
2639 #ifndef INET6
2640                 if (pp->af == AF_INET6) {
2641                         error = EAFNOSUPPORT;
2642                         break;
2643                 }
2644 #endif /* INET6 */
2645                 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2646                     pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2647                     pp->addr.addr.type != PF_ADDR_TABLE) {
2648                         error = EINVAL;
2649                         break;
2650                 }
2651                 if (pp->addr.addr.p.dyn != NULL) {
2652                         error = EINVAL;
2653                         break;
2654                 }
2655                 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2656                 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2657                 if (pa->ifname[0])
2658                         kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
2659                 PF_RULES_WLOCK();
2660                 if (pp->ticket != V_ticket_pabuf) {
2661                         PF_RULES_WUNLOCK();
2662                         if (pa->ifname[0])
2663                                 free(kif, PFI_MTYPE);
2664                         free(pa, M_PFRULE);
2665                         error = EBUSY;
2666                         break;
2667                 }
2668                 if (pa->ifname[0]) {
2669                         pa->kif = pfi_kif_attach(kif, pa->ifname);
2670                         pfi_kif_ref(pa->kif);
2671                 } else
2672                         pa->kif = NULL;
2673                 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2674                     pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2675                         if (pa->ifname[0])
2676                                 pfi_kif_unref(pa->kif);
2677                         PF_RULES_WUNLOCK();
2678                         free(pa, M_PFRULE);
2679                         break;
2680                 }
2681                 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
2682                 PF_RULES_WUNLOCK();
2683                 break;
2684         }
2685
2686         case DIOCGETADDRS: {
2687                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
2688                 struct pf_pool          *pool;
2689                 struct pf_pooladdr      *pa;
2690
2691                 PF_RULES_RLOCK();
2692                 pp->nr = 0;
2693                 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2694                     pp->r_num, 0, 1, 0);
2695                 if (pool == NULL) {
2696                         PF_RULES_RUNLOCK();
2697                         error = EBUSY;
2698                         break;
2699                 }
2700                 TAILQ_FOREACH(pa, &pool->list, entries)
2701                         pp->nr++;
2702                 PF_RULES_RUNLOCK();
2703                 break;
2704         }
2705
2706         case DIOCGETADDR: {
2707                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
2708                 struct pf_pool          *pool;
2709                 struct pf_pooladdr      *pa;
2710                 u_int32_t                nr = 0;
2711
2712                 PF_RULES_RLOCK();
2713                 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2714                     pp->r_num, 0, 1, 1);
2715                 if (pool == NULL) {
2716                         PF_RULES_RUNLOCK();
2717                         error = EBUSY;
2718                         break;
2719                 }
2720                 pa = TAILQ_FIRST(&pool->list);
2721                 while ((pa != NULL) && (nr < pp->nr)) {
2722                         pa = TAILQ_NEXT(pa, entries);
2723                         nr++;
2724                 }
2725                 if (pa == NULL) {
2726                         PF_RULES_RUNLOCK();
2727                         error = EBUSY;
2728                         break;
2729                 }
2730                 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2731                 pf_addr_copyout(&pp->addr.addr);
2732                 PF_RULES_RUNLOCK();
2733                 break;
2734         }
2735
2736         case DIOCCHANGEADDR: {
2737                 struct pfioc_pooladdr   *pca = (struct pfioc_pooladdr *)addr;
2738                 struct pf_pool          *pool;
2739                 struct pf_pooladdr      *oldpa = NULL, *newpa = NULL;
2740                 struct pf_ruleset       *ruleset;
2741                 struct pfi_kif          *kif = NULL;
2742
2743                 if (pca->action < PF_CHANGE_ADD_HEAD ||
2744                     pca->action > PF_CHANGE_REMOVE) {
2745                         error = EINVAL;
2746                         break;
2747                 }
2748                 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2749                     pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2750                     pca->addr.addr.type != PF_ADDR_TABLE) {
2751                         error = EINVAL;
2752                         break;
2753                 }
2754                 if (pca->addr.addr.p.dyn != NULL) {
2755                         error = EINVAL;
2756                         break;
2757                 }
2758
2759                 if (pca->action != PF_CHANGE_REMOVE) {
2760 #ifndef INET
2761                         if (pca->af == AF_INET) {
2762                                 error = EAFNOSUPPORT;
2763                                 break;
2764                         }
2765 #endif /* INET */
2766 #ifndef INET6
2767                         if (pca->af == AF_INET6) {
2768                                 error = EAFNOSUPPORT;
2769                                 break;
2770                         }
2771 #endif /* INET6 */
2772                         newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
2773                         bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2774                         if (newpa->ifname[0])
2775                                 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
2776                         newpa->kif = NULL;
2777                 }
2778
2779 #define ERROUT(x)       { error = (x); goto DIOCCHANGEADDR_error; }
2780                 PF_RULES_WLOCK();
2781                 ruleset = pf_find_ruleset(pca->anchor);
2782                 if (ruleset == NULL)
2783                         ERROUT(EBUSY);
2784
2785                 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2786                     pca->r_num, pca->r_last, 1, 1);
2787                 if (pool == NULL)
2788                         ERROUT(EBUSY);
2789
2790                 if (pca->action != PF_CHANGE_REMOVE) {
2791                         if (newpa->ifname[0]) {
2792                                 newpa->kif = pfi_kif_attach(kif, newpa->ifname);
2793                                 pfi_kif_ref(newpa->kif);
2794                                 kif = NULL;
2795                         }
2796
2797                         switch (newpa->addr.type) {
2798                         case PF_ADDR_DYNIFTL:
2799                                 error = pfi_dynaddr_setup(&newpa->addr,
2800                                     pca->af);
2801                                 break;
2802                         case PF_ADDR_TABLE:
2803                                 newpa->addr.p.tbl = pfr_attach_table(ruleset,
2804                                     newpa->addr.v.tblname);
2805                                 if (newpa->addr.p.tbl == NULL)
2806                                         error = ENOMEM;
2807                                 break;
2808                         }
2809                         if (error)
2810                                 goto DIOCCHANGEADDR_error;
2811                 }
2812
2813                 switch (pca->action) {
2814                 case PF_CHANGE_ADD_HEAD:
2815                         oldpa = TAILQ_FIRST(&pool->list);
2816                         break;
2817                 case PF_CHANGE_ADD_TAIL:
2818                         oldpa = TAILQ_LAST(&pool->list, pf_palist);
2819                         break;
2820                 default:
2821                         oldpa = TAILQ_FIRST(&pool->list);
2822                         for (int i = 0; oldpa && i < pca->nr; i++)
2823                                 oldpa = TAILQ_NEXT(oldpa, entries);
2824
2825                         if (oldpa == NULL)
2826                                 ERROUT(EINVAL);
2827                 }
2828
2829                 if (pca->action == PF_CHANGE_REMOVE) {
2830                         TAILQ_REMOVE(&pool->list, oldpa, entries);
2831                         switch (oldpa->addr.type) {
2832                         case PF_ADDR_DYNIFTL:
2833                                 pfi_dynaddr_remove(oldpa->addr.p.dyn);
2834                                 break;
2835                         case PF_ADDR_TABLE:
2836                                 pfr_detach_table(oldpa->addr.p.tbl);
2837                                 break;
2838                         }
2839                         if (oldpa->kif)
2840                                 pfi_kif_unref(oldpa->kif);
2841                         free(oldpa, M_PFRULE);
2842                 } else {
2843                         if (oldpa == NULL)
2844                                 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2845                         else if (pca->action == PF_CHANGE_ADD_HEAD ||
2846                             pca->action == PF_CHANGE_ADD_BEFORE)
2847                                 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2848                         else
2849                                 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2850                                     newpa, entries);
2851                 }
2852
2853                 pool->cur = TAILQ_FIRST(&pool->list);
2854                 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
2855                 PF_RULES_WUNLOCK();
2856                 break;
2857
2858 #undef ERROUT
2859 DIOCCHANGEADDR_error:
2860                 if (newpa != NULL) {
2861                         if (newpa->kif)
2862                                 pfi_kif_unref(newpa->kif);
2863                         free(newpa, M_PFRULE);
2864                 }
2865                 PF_RULES_WUNLOCK();
2866                 if (kif != NULL)
2867                         free(kif, PFI_MTYPE);
2868                 break;
2869         }
2870
2871         case DIOCGETRULESETS: {
2872                 struct pfioc_ruleset    *pr = (struct pfioc_ruleset *)addr;
2873                 struct pf_ruleset       *ruleset;
2874                 struct pf_anchor        *anchor;
2875
2876                 PF_RULES_RLOCK();
2877                 pr->path[sizeof(pr->path) - 1] = 0;
2878                 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2879                         PF_RULES_RUNLOCK();
2880                         error = ENOENT;
2881                         break;
2882                 }
2883                 pr->nr = 0;
2884                 if (ruleset->anchor == NULL) {
2885                         /* XXX kludge for pf_main_ruleset */
2886                         RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors)
2887                                 if (anchor->parent == NULL)
2888                                         pr->nr++;
2889                 } else {
2890                         RB_FOREACH(anchor, pf_anchor_node,
2891                             &ruleset->anchor->children)
2892                                 pr->nr++;
2893                 }
2894                 PF_RULES_RUNLOCK();
2895                 break;
2896         }
2897
2898         case DIOCGETRULESET: {
2899                 struct pfioc_ruleset    *pr = (struct pfioc_ruleset *)addr;
2900                 struct pf_ruleset       *ruleset;
2901                 struct pf_anchor        *anchor;
2902                 u_int32_t                nr = 0;
2903
2904                 PF_RULES_RLOCK();
2905                 pr->path[sizeof(pr->path) - 1] = 0;
2906                 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2907                         PF_RULES_RUNLOCK();
2908                         error = ENOENT;
2909                         break;
2910                 }
2911                 pr->name[0] = 0;
2912                 if (ruleset->anchor == NULL) {
2913                         /* XXX kludge for pf_main_ruleset */
2914                         RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors)
2915                                 if (anchor->parent == NULL && nr++ == pr->nr) {
2916                                         strlcpy(pr->name, anchor->name,
2917                                             sizeof(pr->name));
2918                                         break;
2919                                 }
2920                 } else {
2921                         RB_FOREACH(anchor, pf_anchor_node,
2922                             &ruleset->anchor->children)
2923                                 if (nr++ == pr->nr) {
2924                                         strlcpy(pr->name, anchor->name,
2925                                             sizeof(pr->name));
2926                                         break;
2927                                 }
2928                 }
2929                 if (!pr->name[0])
2930                         error = EBUSY;
2931                 PF_RULES_RUNLOCK();
2932                 break;
2933         }
2934
2935         case DIOCRCLRTABLES: {
2936                 struct pfioc_table *io = (struct pfioc_table *)addr;
2937
2938                 if (io->pfrio_esize != 0) {
2939                         error = ENODEV;
2940                         break;
2941                 }
2942                 PF_RULES_WLOCK();
2943                 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2944                     io->pfrio_flags | PFR_FLAG_USERIOCTL);
2945                 PF_RULES_WUNLOCK();
2946                 break;
2947         }
2948
2949         case DIOCRADDTABLES: {
2950                 struct pfioc_table *io = (struct pfioc_table *)addr;
2951                 struct pfr_table *pfrts;
2952                 size_t totlen;
2953
2954                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2955                         error = ENODEV;
2956                         break;
2957                 }
2958
2959                 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
2960                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
2961                         error = ENOMEM;
2962                         break;
2963                 }
2964
2965                 totlen = io->pfrio_size * sizeof(struct pfr_table);
2966                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2967                     M_TEMP, M_WAITOK);
2968                 error = copyin(io->pfrio_buffer, pfrts, totlen);
2969                 if (error) {
2970                         free(pfrts, M_TEMP);
2971                         break;
2972                 }
2973                 PF_RULES_WLOCK();
2974                 error = pfr_add_tables(pfrts, io->pfrio_size,
2975                     &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2976                 PF_RULES_WUNLOCK();
2977                 free(pfrts, M_TEMP);
2978                 break;
2979         }
2980
2981         case DIOCRDELTABLES: {
2982                 struct pfioc_table *io = (struct pfioc_table *)addr;
2983                 struct pfr_table *pfrts;
2984                 size_t totlen;
2985
2986                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2987                         error = ENODEV;
2988                         break;
2989                 }
2990
2991                 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
2992                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
2993                         error = ENOMEM;
2994                         break;
2995                 }
2996
2997                 totlen = io->pfrio_size * sizeof(struct pfr_table);
2998                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2999                     M_TEMP, M_WAITOK);
3000                 error = copyin(io->pfrio_buffer, pfrts, totlen);
3001                 if (error) {
3002                         free(pfrts, M_TEMP);
3003                         break;
3004                 }
3005                 PF_RULES_WLOCK();
3006                 error = pfr_del_tables(pfrts, io->pfrio_size,
3007                     &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3008                 PF_RULES_WUNLOCK();
3009                 free(pfrts, M_TEMP);
3010                 break;
3011         }
3012
3013         case DIOCRGETTABLES: {
3014                 struct pfioc_table *io = (struct pfioc_table *)addr;
3015                 struct pfr_table *pfrts;
3016                 size_t totlen;
3017                 int n;
3018
3019                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3020                         error = ENODEV;
3021                         break;
3022                 }
3023                 PF_RULES_RLOCK();
3024                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
3025                 if (n < 0) {
3026                         PF_RULES_RUNLOCK();
3027                         error = EINVAL;
3028                         break;
3029                 }
3030                 io->pfrio_size = min(io->pfrio_size, n);
3031
3032                 totlen = io->pfrio_size * sizeof(struct pfr_table);
3033
3034                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
3035                     M_TEMP, M_NOWAIT);
3036                 if (pfrts == NULL) {
3037                         error = ENOMEM;
3038                         PF_RULES_RUNLOCK();
3039                         break;
3040                 }
3041                 error = pfr_get_tables(&io->pfrio_table, pfrts,
3042                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3043                 PF_RULES_RUNLOCK();
3044                 if (error == 0)
3045                         error = copyout(pfrts, io->pfrio_buffer, totlen);
3046                 free(pfrts, M_TEMP);
3047                 break;
3048         }
3049
3050         case DIOCRGETTSTATS: {
3051                 struct pfioc_table *io = (struct pfioc_table *)addr;
3052                 struct pfr_tstats *pfrtstats;
3053                 size_t totlen;
3054                 int n;
3055
3056                 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
3057                         error = ENODEV;
3058                         break;
3059                 }
3060                 PF_RULES_WLOCK();
3061                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
3062                 if (n < 0) {
3063                         PF_RULES_WUNLOCK();
3064                         error = EINVAL;
3065                         break;
3066                 }
3067                 io->pfrio_size = min(io->pfrio_size, n);
3068
3069                 totlen = io->pfrio_size * sizeof(struct pfr_tstats);
3070                 pfrtstats = mallocarray(io->pfrio_size,
3071                     sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT);
3072                 if (pfrtstats == NULL) {
3073                         error = ENOMEM;
3074                         PF_RULES_WUNLOCK();
3075                         break;
3076                 }
3077                 error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
3078                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3079                 PF_RULES_WUNLOCK();
3080                 if (error == 0)
3081                         error = copyout(pfrtstats, io->pfrio_buffer, totlen);
3082                 free(pfrtstats, M_TEMP);
3083                 break;
3084         }
3085
3086         case DIOCRCLRTSTATS: {
3087                 struct pfioc_table *io = (struct pfioc_table *)addr;
3088                 struct pfr_table *pfrts;
3089                 size_t totlen;
3090                 int n;
3091
3092                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3093                         error = ENODEV;
3094                         break;
3095                 }
3096
3097                 PF_RULES_WLOCK();
3098                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
3099                 if (n < 0) {
3100                         PF_RULES_WUNLOCK();
3101                         error = EINVAL;
3102                         break;
3103                 }
3104                 io->pfrio_size = min(io->pfrio_size, n);
3105
3106                 totlen = io->pfrio_size * sizeof(struct pfr_table);
3107                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
3108                     M_TEMP, M_NOWAIT);
3109                 if (pfrts == NULL) {
3110                         error = ENOMEM;
3111                         PF_RULES_WUNLOCK();
3112                         break;
3113                 }
3114                 error = copyin(io->pfrio_buffer, pfrts, totlen);
3115                 if (error) {
3116                         free(pfrts, M_TEMP);
3117                         PF_RULES_WUNLOCK();
3118                         break;
3119                 }
3120                 error = pfr_clr_tstats(pfrts, io->pfrio_size,
3121                     &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3122                 PF_RULES_WUNLOCK();
3123                 free(pfrts, M_TEMP);
3124                 break;
3125         }
3126
3127         case DIOCRSETTFLAGS: {
3128                 struct pfioc_table *io = (struct pfioc_table *)addr;
3129                 struct pfr_table *pfrts;
3130                 size_t totlen;
3131                 int n;
3132
3133                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3134                         error = ENODEV;
3135                         break;
3136                 }
3137
3138                 PF_RULES_RLOCK();
3139                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
3140                 if (n < 0) {
3141                         PF_RULES_RUNLOCK();
3142                         error = EINVAL;
3143                         break;
3144                 }
3145
3146                 io->pfrio_size = min(io->pfrio_size, n);
3147                 PF_RULES_RUNLOCK();
3148
3149                 totlen = io->pfrio_size * sizeof(struct pfr_table);
3150                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
3151                     M_TEMP, M_WAITOK);
3152                 error = copyin(io->pfrio_buffer, pfrts, totlen);
3153                 if (error) {
3154                         free(pfrts, M_TEMP);
3155                         break;
3156                 }
3157                 PF_RULES_WLOCK();
3158                 error = pfr_set_tflags(pfrts, io->pfrio_size,
3159                     io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
3160                     &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3161                 PF_RULES_WUNLOCK();
3162                 free(pfrts, M_TEMP);
3163                 break;
3164         }
3165
3166         case DIOCRCLRADDRS: {
3167                 struct pfioc_table *io = (struct pfioc_table *)addr;
3168
3169                 if (io->pfrio_esize != 0) {
3170                         error = ENODEV;
3171                         break;
3172                 }
3173                 PF_RULES_WLOCK();
3174                 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
3175                     io->pfrio_flags | PFR_FLAG_USERIOCTL);
3176                 PF_RULES_WUNLOCK();
3177                 break;
3178         }
3179
3180         case DIOCRADDADDRS: {
3181                 struct pfioc_table *io = (struct pfioc_table *)addr;
3182                 struct pfr_addr *pfras;
3183                 size_t totlen;
3184
3185                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3186                         error = ENODEV;
3187                         break;
3188                 }
3189                 if (io->pfrio_size < 0 ||
3190                     io->pfrio_size > pf_ioctl_maxcount ||
3191                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3192                         error = EINVAL;
3193                         break;
3194                 }
3195                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3196                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3197                     M_TEMP, M_NOWAIT);
3198                 if (! pfras) {
3199                         error = ENOMEM;
3200                         break;
3201                 }
3202                 error = copyin(io->pfrio_buffer, pfras, totlen);
3203                 if (error) {
3204                         free(pfras, M_TEMP);
3205                         break;
3206                 }
3207                 PF_RULES_WLOCK();
3208                 error = pfr_add_addrs(&io->pfrio_table, pfras,
3209                     io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
3210                     PFR_FLAG_USERIOCTL);
3211                 PF_RULES_WUNLOCK();
3212                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
3213                         error = copyout(pfras, io->pfrio_buffer, totlen);
3214                 free(pfras, M_TEMP);
3215                 break;
3216         }
3217
3218         case DIOCRDELADDRS: {
3219                 struct pfioc_table *io = (struct pfioc_table *)addr;
3220                 struct pfr_addr *pfras;
3221                 size_t totlen;
3222
3223                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3224                         error = ENODEV;
3225                         break;
3226                 }
3227                 if (io->pfrio_size < 0 ||
3228                     io->pfrio_size > pf_ioctl_maxcount ||
3229                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3230                         error = EINVAL;
3231                         break;
3232                 }
3233                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3234                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3235                     M_TEMP, M_NOWAIT);
3236                 if (! pfras) {
3237                         error = ENOMEM;
3238                         break;
3239                 }
3240                 error = copyin(io->pfrio_buffer, pfras, totlen);
3241                 if (error) {
3242                         free(pfras, M_TEMP);
3243                         break;
3244                 }
3245                 PF_RULES_WLOCK();
3246                 error = pfr_del_addrs(&io->pfrio_table, pfras,
3247                     io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
3248                     PFR_FLAG_USERIOCTL);
3249                 PF_RULES_WUNLOCK();
3250                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
3251                         error = copyout(pfras, io->pfrio_buffer, totlen);
3252                 free(pfras, M_TEMP);
3253                 break;
3254         }
3255
3256         case DIOCRSETADDRS: {
3257                 struct pfioc_table *io = (struct pfioc_table *)addr;
3258                 struct pfr_addr *pfras;
3259                 size_t totlen, count;
3260
3261                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3262                         error = ENODEV;
3263                         break;
3264                 }
3265                 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
3266                         error = EINVAL;
3267                         break;
3268                 }
3269                 count = max(io->pfrio_size, io->pfrio_size2);
3270                 if (count > pf_ioctl_maxcount ||
3271                     WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
3272                         error = EINVAL;
3273                         break;
3274                 }
3275                 totlen = count * sizeof(struct pfr_addr);
3276                 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
3277                     M_NOWAIT);
3278                 if (! pfras) {
3279                         error = ENOMEM;
3280                         break;
3281                 }
3282                 error = copyin(io->pfrio_buffer, pfras, totlen);
3283                 if (error) {
3284                         free(pfras, M_TEMP);
3285                         break;
3286                 }
3287                 PF_RULES_WLOCK();
3288                 error = pfr_set_addrs(&io->pfrio_table, pfras,
3289                     io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
3290                     &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
3291                     PFR_FLAG_USERIOCTL, 0);
3292                 PF_RULES_WUNLOCK();
3293                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
3294                         error = copyout(pfras, io->pfrio_buffer, totlen);
3295                 free(pfras, M_TEMP);
3296                 break;
3297         }
3298
3299         case DIOCRGETADDRS: {
3300                 struct pfioc_table *io = (struct pfioc_table *)addr;
3301                 struct pfr_addr *pfras;
3302                 size_t totlen;
3303
3304                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3305                         error = ENODEV;
3306                         break;
3307                 }
3308                 if (io->pfrio_size < 0 ||
3309                     io->pfrio_size > pf_ioctl_maxcount ||
3310                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3311                         error = EINVAL;
3312                         break;
3313                 }
3314                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3315                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3316                     M_TEMP, M_NOWAIT);
3317                 if (! pfras) {
3318                         error = ENOMEM;
3319                         break;
3320                 }
3321                 PF_RULES_RLOCK();
3322                 error = pfr_get_addrs(&io->pfrio_table, pfras,
3323                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3324                 PF_RULES_RUNLOCK();
3325                 if (error == 0)
3326                         error = copyout(pfras, io->pfrio_buffer, totlen);
3327                 free(pfras, M_TEMP);
3328                 break;
3329         }
3330
3331         case DIOCRGETASTATS: {
3332                 struct pfioc_table *io = (struct pfioc_table *)addr;
3333                 struct pfr_astats *pfrastats;
3334                 size_t totlen;
3335
3336                 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
3337                         error = ENODEV;
3338                         break;
3339                 }
3340                 if (io->pfrio_size < 0 ||
3341                     io->pfrio_size > pf_ioctl_maxcount ||
3342                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
3343                         error = EINVAL;
3344                         break;
3345                 }
3346                 totlen = io->pfrio_size * sizeof(struct pfr_astats);
3347                 pfrastats = mallocarray(io->pfrio_size,
3348                     sizeof(struct pfr_astats), M_TEMP, M_NOWAIT);
3349                 if (! pfrastats) {
3350                         error = ENOMEM;
3351                         break;
3352                 }
3353                 PF_RULES_RLOCK();
3354                 error = pfr_get_astats(&io->pfrio_table, pfrastats,
3355                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3356                 PF_RULES_RUNLOCK();
3357                 if (error == 0)
3358                         error = copyout(pfrastats, io->pfrio_buffer, totlen);
3359                 free(pfrastats, M_TEMP);
3360                 break;
3361         }
3362
3363         case DIOCRCLRASTATS: {
3364                 struct pfioc_table *io = (struct pfioc_table *)addr;
3365                 struct pfr_addr *pfras;
3366                 size_t totlen;
3367
3368                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3369                         error = ENODEV;
3370                         break;
3371                 }
3372                 if (io->pfrio_size < 0 ||
3373                     io->pfrio_size > pf_ioctl_maxcount ||
3374                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3375                         error = EINVAL;
3376                         break;
3377                 }
3378                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3379                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3380                     M_TEMP, M_NOWAIT);
3381                 if (! pfras) {
3382                         error = ENOMEM;
3383                         break;
3384                 }
3385                 error = copyin(io->pfrio_buffer, pfras, totlen);
3386                 if (error) {
3387                         free(pfras, M_TEMP);
3388                         break;
3389                 }
3390                 PF_RULES_WLOCK();
3391                 error = pfr_clr_astats(&io->pfrio_table, pfras,
3392                     io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
3393                     PFR_FLAG_USERIOCTL);
3394                 PF_RULES_WUNLOCK();
3395                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
3396                         error = copyout(pfras, io->pfrio_buffer, totlen);
3397                 free(pfras, M_TEMP);
3398                 break;
3399         }
3400
3401         case DIOCRTSTADDRS: {
3402                 struct pfioc_table *io = (struct pfioc_table *)addr;
3403                 struct pfr_addr *pfras;
3404                 size_t totlen;
3405
3406                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3407                         error = ENODEV;
3408                         break;
3409                 }
3410                 if (io->pfrio_size < 0 ||
3411                     io->pfrio_size > pf_ioctl_maxcount ||
3412                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3413                         error = EINVAL;
3414                         break;
3415                 }
3416                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3417                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3418                     M_TEMP, M_NOWAIT);
3419                 if (! pfras) {
3420                         error = ENOMEM;
3421                         break;
3422                 }
3423                 error = copyin(io->pfrio_buffer, pfras, totlen);
3424                 if (error) {
3425                         free(pfras, M_TEMP);
3426                         break;
3427                 }
3428                 PF_RULES_RLOCK();
3429                 error = pfr_tst_addrs(&io->pfrio_table, pfras,
3430                     io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
3431                     PFR_FLAG_USERIOCTL);
3432                 PF_RULES_RUNLOCK();
3433                 if (error == 0)
3434                         error = copyout(pfras, io->pfrio_buffer, totlen);
3435                 free(pfras, M_TEMP);
3436                 break;
3437         }
3438
3439         case DIOCRINADEFINE: {
3440                 struct pfioc_table *io = (struct pfioc_table *)addr;
3441                 struct pfr_addr *pfras;
3442                 size_t totlen;
3443
3444                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3445                         error = ENODEV;
3446                         break;
3447                 }
3448                 if (io->pfrio_size < 0 ||
3449                     io->pfrio_size > pf_ioctl_maxcount ||
3450                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3451                         error = EINVAL;
3452                         break;
3453                 }
3454                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3455                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3456                     M_TEMP, M_NOWAIT);
3457                 if (! pfras) {
3458                         error = ENOMEM;
3459                         break;
3460                 }
3461                 error = copyin(io->pfrio_buffer, pfras, totlen);
3462                 if (error) {
3463                         free(pfras, M_TEMP);
3464                         break;
3465                 }
3466                 PF_RULES_WLOCK();
3467                 error = pfr_ina_define(&io->pfrio_table, pfras,
3468                     io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
3469                     io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3470                 PF_RULES_WUNLOCK();
3471                 free(pfras, M_TEMP);
3472                 break;
3473         }
3474
3475         case DIOCOSFPADD: {
3476                 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
3477                 PF_RULES_WLOCK();
3478                 error = pf_osfp_add(io);
3479                 PF_RULES_WUNLOCK();
3480                 break;
3481         }
3482
3483         case DIOCOSFPGET: {
3484                 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
3485                 PF_RULES_RLOCK();
3486                 error = pf_osfp_get(io);
3487                 PF_RULES_RUNLOCK();
3488                 break;
3489         }
3490
3491         case DIOCXBEGIN: {
3492                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
3493                 struct pfioc_trans_e    *ioes, *ioe;
3494                 size_t                   totlen;
3495                 int                      i;
3496
3497                 if (io->esize != sizeof(*ioe)) {
3498                         error = ENODEV;
3499                         break;
3500                 }
3501                 if (io->size < 0 ||
3502                     io->size > pf_ioctl_maxcount ||
3503                     WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3504                         error = EINVAL;
3505                         break;
3506                 }
3507                 totlen = sizeof(struct pfioc_trans_e) * io->size;
3508                 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3509                     M_TEMP, M_NOWAIT);
3510                 if (! ioes) {
3511                         error = ENOMEM;
3512                         break;
3513                 }
3514                 error = copyin(io->array, ioes, totlen);
3515                 if (error) {
3516                         free(ioes, M_TEMP);
3517                         break;
3518                 }
3519                 PF_RULES_WLOCK();
3520                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3521                         switch (ioe->rs_num) {
3522 #ifdef ALTQ
3523                         case PF_RULESET_ALTQ:
3524                                 if (ioe->anchor[0]) {
3525                                         PF_RULES_WUNLOCK();
3526                                         free(ioes, M_TEMP);
3527                                         error = EINVAL;
3528                                         goto fail;
3529                                 }
3530                                 if ((error = pf_begin_altq(&ioe->ticket))) {
3531                                         PF_RULES_WUNLOCK();
3532                                         free(ioes, M_TEMP);
3533                                         goto fail;
3534                                 }
3535                                 break;
3536 #endif /* ALTQ */
3537                         case PF_RULESET_TABLE:
3538                             {
3539                                 struct pfr_table table;
3540
3541                                 bzero(&table, sizeof(table));
3542                                 strlcpy(table.pfrt_anchor, ioe->anchor,
3543                                     sizeof(table.pfrt_anchor));
3544                                 if ((error = pfr_ina_begin(&table,
3545                                     &ioe->ticket, NULL, 0))) {
3546                                         PF_RULES_WUNLOCK();
3547                                         free(ioes, M_TEMP);
3548                                         goto fail;
3549                                 }
3550                                 break;
3551                             }
3552                         default:
3553                                 if ((error = pf_begin_rules(&ioe->ticket,
3554                                     ioe->rs_num, ioe->anchor))) {
3555                                         PF_RULES_WUNLOCK();
3556                                         free(ioes, M_TEMP);
3557                                         goto fail;
3558                                 }
3559                                 break;
3560                         }
3561                 }
3562                 PF_RULES_WUNLOCK();
3563                 error = copyout(ioes, io->array, totlen);
3564                 free(ioes, M_TEMP);
3565                 break;
3566         }
3567
3568         case DIOCXROLLBACK: {
3569                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
3570                 struct pfioc_trans_e    *ioe, *ioes;
3571                 size_t                   totlen;
3572                 int                      i;
3573
3574                 if (io->esize != sizeof(*ioe)) {
3575                         error = ENODEV;
3576                         break;
3577                 }
3578                 if (io->size < 0 ||
3579                     io->size > pf_ioctl_maxcount ||
3580                     WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3581                         error = EINVAL;
3582                         break;
3583                 }
3584                 totlen = sizeof(struct pfioc_trans_e) * io->size;
3585                 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3586                     M_TEMP, M_NOWAIT);
3587                 if (! ioes) {
3588                         error = ENOMEM;
3589                         break;
3590                 }
3591                 error = copyin(io->array, ioes, totlen);
3592                 if (error) {
3593                         free(ioes, M_TEMP);
3594                         break;
3595                 }
3596                 PF_RULES_WLOCK();
3597                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3598                         switch (ioe->rs_num) {
3599 #ifdef ALTQ
3600                         case PF_RULESET_ALTQ:
3601                                 if (ioe->anchor[0]) {
3602                                         PF_RULES_WUNLOCK();
3603                                         free(ioes, M_TEMP);
3604                                         error = EINVAL;
3605                                         goto fail;
3606                                 }
3607                                 if ((error = pf_rollback_altq(ioe->ticket))) {
3608                                         PF_RULES_WUNLOCK();
3609                                         free(ioes, M_TEMP);
3610                                         goto fail; /* really bad */
3611                                 }
3612                                 break;
3613 #endif /* ALTQ */
3614                         case PF_RULESET_TABLE:
3615                             {
3616                                 struct pfr_table table;
3617
3618                                 bzero(&table, sizeof(table));
3619                                 strlcpy(table.pfrt_anchor, ioe->anchor,
3620                                     sizeof(table.pfrt_anchor));
3621                                 if ((error = pfr_ina_rollback(&table,
3622                                     ioe->ticket, NULL, 0))) {
3623                                         PF_RULES_WUNLOCK();
3624                                         free(ioes, M_TEMP);
3625                                         goto fail; /* really bad */
3626                                 }
3627                                 break;
3628                             }
3629                         default:
3630                                 if ((error = pf_rollback_rules(ioe->ticket,
3631                                     ioe->rs_num, ioe->anchor))) {
3632                                         PF_RULES_WUNLOCK();
3633                                         free(ioes, M_TEMP);
3634                                         goto fail; /* really bad */
3635                                 }
3636                                 break;
3637                         }
3638                 }
3639                 PF_RULES_WUNLOCK();
3640                 free(ioes, M_TEMP);
3641                 break;
3642         }
3643
3644         case DIOCXCOMMIT: {
3645                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
3646                 struct pfioc_trans_e    *ioe, *ioes;
3647                 struct pf_ruleset       *rs;
3648                 size_t                   totlen;
3649                 int                      i;
3650
3651                 if (io->esize != sizeof(*ioe)) {
3652                         error = ENODEV;
3653                         break;
3654                 }
3655
3656                 if (io->size < 0 ||
3657                     io->size > pf_ioctl_maxcount ||
3658                     WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3659                         error = EINVAL;
3660                         break;
3661                 }
3662
3663                 totlen = sizeof(struct pfioc_trans_e) * io->size;
3664                 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3665                     M_TEMP, M_NOWAIT);
3666                 if (ioes == NULL) {
3667                         error = ENOMEM;
3668                         break;
3669                 }
3670                 error = copyin(io->array, ioes, totlen);
3671                 if (error) {
3672                         free(ioes, M_TEMP);
3673                         break;
3674                 }
3675                 PF_RULES_WLOCK();
3676                 /* First makes sure everything will succeed. */
3677                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3678                         switch (ioe->rs_num) {
3679 #ifdef ALTQ
3680                         case PF_RULESET_ALTQ:
3681                                 if (ioe->anchor[0]) {
3682                                         PF_RULES_WUNLOCK();
3683                                         free(ioes, M_TEMP);
3684                                         error = EINVAL;
3685                                         goto fail;
3686                                 }
3687                                 if (!V_altqs_inactive_open || ioe->ticket !=
3688                                     V_ticket_altqs_inactive) {
3689                                         PF_RULES_WUNLOCK();
3690                                         free(ioes, M_TEMP);
3691                                         error = EBUSY;
3692                                         goto fail;
3693                                 }
3694                                 break;
3695 #endif /* ALTQ */
3696                         case PF_RULESET_TABLE:
3697                                 rs = pf_find_ruleset(ioe->anchor);
3698                                 if (rs == NULL || !rs->topen || ioe->ticket !=
3699                                     rs->tticket) {
3700                                         PF_RULES_WUNLOCK();
3701                                         free(ioes, M_TEMP);
3702                                         error = EBUSY;
3703                                         goto fail;
3704                                 }
3705                                 break;
3706                         default:
3707                                 if (ioe->rs_num < 0 || ioe->rs_num >=
3708                                     PF_RULESET_MAX) {
3709                                         PF_RULES_WUNLOCK();
3710                                         free(ioes, M_TEMP);
3711                                         error = EINVAL;
3712                                         goto fail;
3713                                 }
3714                                 rs = pf_find_ruleset(ioe->anchor);
3715                                 if (rs == NULL ||
3716                                     !rs->rules[ioe->rs_num].inactive.open ||
3717                                     rs->rules[ioe->rs_num].inactive.ticket !=
3718                                     ioe->ticket) {
3719                                         PF_RULES_WUNLOCK();
3720                                         free(ioes, M_TEMP);
3721                                         error = EBUSY;
3722                                         goto fail;
3723                                 }
3724                                 break;
3725                         }
3726                 }
3727                 /* Now do the commit - no errors should happen here. */
3728                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3729                         switch (ioe->rs_num) {
3730 #ifdef ALTQ
3731                         case PF_RULESET_ALTQ:
3732                                 if ((error = pf_commit_altq(ioe->ticket))) {
3733                                         PF_RULES_WUNLOCK();
3734                                         free(ioes, M_TEMP);
3735                                         goto fail; /* really bad */
3736                                 }
3737                                 break;
3738 #endif /* ALTQ */
3739                         case PF_RULESET_TABLE:
3740                             {
3741                                 struct pfr_table table;
3742
3743                                 bzero(&table, sizeof(table));
3744                                 strlcpy(table.pfrt_anchor, ioe->anchor,
3745                                     sizeof(table.pfrt_anchor));
3746                                 if ((error = pfr_ina_commit(&table,
3747                                     ioe->ticket, NULL, NULL, 0))) {
3748                                         PF_RULES_WUNLOCK();
3749                                         free(ioes, M_TEMP);
3750                                         goto fail; /* really bad */
3751                                 }
3752                                 break;
3753                             }
3754                         default:
3755                                 if ((error = pf_commit_rules(ioe->ticket,
3756                                     ioe->rs_num, ioe->anchor))) {
3757                                         PF_RULES_WUNLOCK();
3758                                         free(ioes, M_TEMP);
3759                                         goto fail; /* really bad */
3760                                 }
3761                                 break;
3762                         }
3763                 }
3764                 PF_RULES_WUNLOCK();
3765                 free(ioes, M_TEMP);
3766                 break;
3767         }
3768
3769         case DIOCGETSRCNODES: {
3770                 struct pfioc_src_nodes  *psn = (struct pfioc_src_nodes *)addr;
3771                 struct pf_srchash       *sh;
3772                 struct pf_src_node      *n, *p, *pstore;
3773                 uint32_t                 i, nr = 0;
3774
3775                 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
3776                                 i++, sh++) {
3777                         PF_HASHROW_LOCK(sh);
3778                         LIST_FOREACH(n, &sh->nodes, entry)
3779                                 nr++;
3780                         PF_HASHROW_UNLOCK(sh);
3781                 }
3782
3783                 psn->psn_len = min(psn->psn_len,
3784                     sizeof(struct pf_src_node) * nr);
3785
3786                 if (psn->psn_len == 0) {
3787                         psn->psn_len = sizeof(struct pf_src_node) * nr;
3788                         break;
3789                 }
3790
3791                 nr = 0;
3792
3793                 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
3794                 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
3795                     i++, sh++) {
3796                     PF_HASHROW_LOCK(sh);
3797                     LIST_FOREACH(n, &sh->nodes, entry) {
3798                         int     secs = time_uptime, diff;
3799
3800                         if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
3801                                 break;
3802
3803                         bcopy(n, p, sizeof(struct pf_src_node));
3804                         if (n->rule.ptr != NULL)
3805                                 p->rule.nr = n->rule.ptr->nr;
3806                         p->creation = secs - p->creation;
3807                         if (p->expire > secs)
3808                                 p->expire -= secs;
3809                         else
3810                                 p->expire = 0;
3811
3812                         /* Adjust the connection rate estimate. */
3813                         diff = secs - n->conn_rate.last;
3814                         if (diff >= n->conn_rate.seconds)
3815                                 p->conn_rate.count = 0;
3816                         else
3817                                 p->conn_rate.count -=
3818                                     n->conn_rate.count * diff /
3819                                     n->conn_rate.seconds;
3820                         p++;
3821                         nr++;
3822                     }
3823                     PF_HASHROW_UNLOCK(sh);
3824                 }
3825                 error = copyout(pstore, psn->psn_src_nodes,
3826                     sizeof(struct pf_src_node) * nr);
3827                 if (error) {
3828                         free(pstore, M_TEMP);
3829                         break;
3830                 }
3831                 psn->psn_len = sizeof(struct pf_src_node) * nr;
3832                 free(pstore, M_TEMP);
3833                 break;
3834         }
3835
3836         case DIOCCLRSRCNODES: {
3837
3838                 pf_clear_srcnodes(NULL);
3839                 pf_purge_expired_src_nodes();
3840                 break;
3841         }
3842
3843         case DIOCKILLSRCNODES:
3844                 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
3845                 break;
3846
3847         case DIOCSETHOSTID: {
3848                 u_int32_t       *hostid = (u_int32_t *)addr;
3849
3850                 PF_RULES_WLOCK();
3851                 if (*hostid == 0)
3852                         V_pf_status.hostid = arc4random();
3853                 else
3854                         V_pf_status.hostid = *hostid;
3855                 PF_RULES_WUNLOCK();
3856                 break;
3857         }
3858
3859         case DIOCOSFPFLUSH:
3860                 PF_RULES_WLOCK();
3861                 pf_osfp_flush();
3862                 PF_RULES_WUNLOCK();
3863                 break;
3864
3865         case DIOCIGETIFACES: {
3866                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3867                 struct pfi_kif *ifstore;
3868                 size_t bufsiz;
3869
3870                 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
3871                         error = ENODEV;
3872                         break;
3873                 }
3874
3875                 if (io->pfiio_size < 0 ||
3876                     io->pfiio_size > pf_ioctl_maxcount ||
3877                     WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
3878                         error = EINVAL;
3879                         break;
3880                 }
3881
3882                 bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
3883                 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
3884                     M_TEMP, M_NOWAIT);
3885                 if (ifstore == NULL) {
3886                         error = ENOMEM;
3887                         break;
3888                 }
3889
3890                 PF_RULES_RLOCK();
3891                 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
3892                 PF_RULES_RUNLOCK();
3893                 error = copyout(ifstore, io->pfiio_buffer, bufsiz);
3894                 free(ifstore, M_TEMP);
3895                 break;
3896         }
3897
3898         case DIOCSETIFFLAG: {
3899                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3900
3901                 PF_RULES_WLOCK();
3902                 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
3903                 PF_RULES_WUNLOCK();
3904                 break;
3905         }
3906
3907         case DIOCCLRIFFLAG: {
3908                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3909
3910                 PF_RULES_WLOCK();
3911                 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
3912                 PF_RULES_WUNLOCK();
3913                 break;
3914         }
3915
3916         default:
3917                 error = ENODEV;
3918                 break;
3919         }
3920 fail:
3921         if (sx_xlocked(&pf_ioctl_lock))
3922                 sx_xunlock(&pf_ioctl_lock);
3923         CURVNET_RESTORE();
3924
3925         return (error);
3926 }
3927
3928 void
3929 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
3930 {
3931         bzero(sp, sizeof(struct pfsync_state));
3932
3933         /* copy from state key */
3934         sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
3935         sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
3936         sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
3937         sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
3938         sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
3939         sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
3940         sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
3941         sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
3942         sp->proto = st->key[PF_SK_WIRE]->proto;
3943         sp->af = st->key[PF_SK_WIRE]->af;
3944
3945         /* copy from state */
3946         strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
3947         bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
3948         sp->creation = htonl(time_uptime - st->creation);
3949         sp->expire = pf_state_expires(st);
3950         if (sp->expire <= time_uptime)
3951                 sp->expire = htonl(0);
3952         else
3953                 sp->expire = htonl(sp->expire - time_uptime);
3954
3955         sp->direction = st->direction;
3956         sp->log = st->log;
3957         sp->timeout = st->timeout;
3958         sp->state_flags = st->state_flags;
3959         if (st->src_node)
3960                 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
3961         if (st->nat_src_node)
3962                 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
3963
3964         sp->id = st->id;
3965         sp->creatorid = st->creatorid;
3966         pf_state_peer_hton(&st->src, &sp->src);
3967         pf_state_peer_hton(&st->dst, &sp->dst);
3968
3969         if (st->rule.ptr == NULL)
3970                 sp->rule = htonl(-1);
3971         else
3972                 sp->rule = htonl(st->rule.ptr->nr);
3973         if (st->anchor.ptr == NULL)
3974                 sp->anchor = htonl(-1);
3975         else
3976                 sp->anchor = htonl(st->anchor.ptr->nr);
3977         if (st->nat_rule.ptr == NULL)
3978                 sp->nat_rule = htonl(-1);
3979         else
3980                 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
3981
3982         pf_state_counter_hton(st->packets[0], sp->packets[0]);
3983         pf_state_counter_hton(st->packets[1], sp->packets[1]);
3984         pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
3985         pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
3986
3987 }
3988
3989 static void
3990 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
3991 {
3992         struct pfr_ktable *kt;
3993
3994         KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
3995
3996         kt = aw->p.tbl;
3997         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
3998                 kt = kt->pfrkt_root;
3999         aw->p.tbl = NULL;
4000         aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
4001                 kt->pfrkt_cnt : -1;
4002 }
4003
4004 /*
4005  * XXX - Check for version missmatch!!!
4006  */
4007 static void
4008 pf_clear_states(void)
4009 {
4010         struct pf_state *s;
4011         u_int i;
4012
4013         for (i = 0; i <= pf_hashmask; i++) {
4014                 struct pf_idhash *ih = &V_pf_idhash[i];
4015 relock:
4016                 PF_HASHROW_LOCK(ih);
4017                 LIST_FOREACH(s, &ih->states, entry) {
4018                         s->timeout = PFTM_PURGE;
4019                         /* Don't send out individual delete messages. */
4020                         s->state_flags |= PFSTATE_NOSYNC;
4021                         pf_unlink_state(s, PF_ENTER_LOCKED);
4022                         goto relock;
4023                 }
4024                 PF_HASHROW_UNLOCK(ih);
4025         }
4026 }
4027
4028 static int
4029 pf_clear_tables(void)
4030 {
4031         struct pfioc_table io;
4032         int error;
4033
4034         bzero(&io, sizeof(io));
4035
4036         error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
4037             io.pfrio_flags);
4038
4039         return (error);
4040 }
4041
4042 static void
4043 pf_clear_srcnodes(struct pf_src_node *n)
4044 {
4045         struct pf_state *s;
4046         int i;
4047
4048         for (i = 0; i <= pf_hashmask; i++) {
4049                 struct pf_idhash *ih = &V_pf_idhash[i];
4050
4051                 PF_HASHROW_LOCK(ih);
4052                 LIST_FOREACH(s, &ih->states, entry) {
4053                         if (n == NULL || n == s->src_node)
4054                                 s->src_node = NULL;
4055                         if (n == NULL || n == s->nat_src_node)
4056                                 s->nat_src_node = NULL;
4057                 }
4058                 PF_HASHROW_UNLOCK(ih);
4059         }
4060
4061         if (n == NULL) {
4062                 struct pf_srchash *sh;
4063
4064                 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
4065                     i++, sh++) {
4066                         PF_HASHROW_LOCK(sh);
4067                         LIST_FOREACH(n, &sh->nodes, entry) {
4068                                 n->expire = 1;
4069                                 n->states = 0;
4070                         }
4071                         PF_HASHROW_UNLOCK(sh);
4072                 }
4073         } else {
4074                 /* XXX: hash slot should already be locked here. */
4075                 n->expire = 1;
4076                 n->states = 0;
4077         }
4078 }
4079
4080 static void
4081 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
4082 {
4083         struct pf_src_node_list  kill;
4084
4085         LIST_INIT(&kill);
4086         for (int i = 0; i <= pf_srchashmask; i++) {
4087                 struct pf_srchash *sh = &V_pf_srchash[i];
4088                 struct pf_src_node *sn, *tmp;
4089
4090                 PF_HASHROW_LOCK(sh);
4091                 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
4092                         if (PF_MATCHA(psnk->psnk_src.neg,
4093                               &psnk->psnk_src.addr.v.a.addr,
4094                               &psnk->psnk_src.addr.v.a.mask,
4095                               &sn->addr, sn->af) &&
4096                             PF_MATCHA(psnk->psnk_dst.neg,
4097                               &psnk->psnk_dst.addr.v.a.addr,
4098                               &psnk->psnk_dst.addr.v.a.mask,
4099                               &sn->raddr, sn->af)) {
4100                                 pf_unlink_src_node(sn);
4101                                 LIST_INSERT_HEAD(&kill, sn, entry);
4102                                 sn->expire = 1;
4103                         }
4104                 PF_HASHROW_UNLOCK(sh);
4105         }
4106
4107         for (int i = 0; i <= pf_hashmask; i++) {
4108                 struct pf_idhash *ih = &V_pf_idhash[i];
4109                 struct pf_state *s;
4110
4111                 PF_HASHROW_LOCK(ih);
4112                 LIST_FOREACH(s, &ih->states, entry) {
4113                         if (s->src_node && s->src_node->expire == 1)
4114                                 s->src_node = NULL;
4115                         if (s->nat_src_node && s->nat_src_node->expire == 1)
4116                                 s->nat_src_node = NULL;
4117                 }
4118                 PF_HASHROW_UNLOCK(ih);
4119         }
4120
4121         psnk->psnk_killed = pf_free_src_nodes(&kill);
4122 }
4123
4124 /*
4125  * XXX - Check for version missmatch!!!
4126  */
4127
4128 /*
4129  * Duplicate pfctl -Fa operation to get rid of as much as we can.
4130  */
4131 static int
4132 shutdown_pf(void)
4133 {
4134         int error = 0;
4135         u_int32_t t[5];
4136         char nn = '\0';
4137
4138         do {
4139                 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
4140                     != 0) {
4141                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
4142                         break;
4143                 }
4144                 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
4145                     != 0) {
4146                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
4147                         break;          /* XXX: rollback? */
4148                 }
4149                 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
4150                     != 0) {
4151                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
4152                         break;          /* XXX: rollback? */
4153                 }
4154                 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
4155                     != 0) {
4156                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
4157                         break;          /* XXX: rollback? */
4158                 }
4159                 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
4160                     != 0) {
4161                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
4162                         break;          /* XXX: rollback? */
4163                 }
4164
4165                 /* XXX: these should always succeed here */
4166                 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
4167                 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
4168                 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
4169                 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
4170                 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
4171
4172                 if ((error = pf_clear_tables()) != 0)
4173                         break;
4174
4175 #ifdef ALTQ
4176                 if ((error = pf_begin_altq(&t[0])) != 0) {
4177                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
4178                         break;
4179                 }
4180                 pf_commit_altq(t[0]);
4181 #endif
4182
4183                 pf_clear_states();
4184
4185                 pf_clear_srcnodes(NULL);
4186
4187                 /* status does not use malloced mem so no need to cleanup */
4188                 /* fingerprints and interfaces have their own cleanup code */
4189         } while(0);
4190
4191         return (error);
4192 }
4193
4194 static pfil_return_t
4195 pf_check_return(int chk, struct mbuf **m)
4196 {
4197
4198         switch (chk) {
4199         case PF_PASS:
4200                 if (*m == NULL)
4201                         return (PFIL_CONSUMED);
4202                 else
4203                         return (PFIL_PASS);
4204                 break;
4205         default:
4206                 if (*m != NULL) {
4207                         m_freem(*m);
4208                         *m = NULL;
4209                 }
4210                 return (PFIL_DROPPED);
4211         }
4212 }
4213
4214 #ifdef INET
4215 static pfil_return_t
4216 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
4217     void *ruleset __unused, struct inpcb *inp)
4218 {
4219         int chk;
4220
4221         chk = pf_test(PF_IN, flags, ifp, m, inp);
4222
4223         return (pf_check_return(chk, m));
4224 }
4225
4226 static pfil_return_t
4227 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
4228     void *ruleset __unused,  struct inpcb *inp)
4229 {
4230         int chk;
4231
4232         chk = pf_test(PF_OUT, flags, ifp, m, inp);
4233
4234         return (pf_check_return(chk, m));
4235 }
4236 #endif
4237
4238 #ifdef INET6
4239 static pfil_return_t
4240 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
4241     void *ruleset __unused,  struct inpcb *inp)
4242 {
4243         int chk;
4244
4245         /*
4246          * In case of loopback traffic IPv6 uses the real interface in
4247          * order to support scoped addresses. In order to support stateful
4248          * filtering we have change this to lo0 as it is the case in IPv4.
4249          */
4250         CURVNET_SET(ifp->if_vnet);
4251         chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp);
4252         CURVNET_RESTORE();
4253
4254         return (pf_check_return(chk, m));
4255 }
4256
4257 static pfil_return_t
4258 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
4259     void *ruleset __unused,  struct inpcb *inp)
4260 {
4261         int chk;
4262
4263         CURVNET_SET(ifp->if_vnet);
4264         chk = pf_test6(PF_OUT, flags, ifp, m, inp);
4265         CURVNET_RESTORE();
4266
4267         return (pf_check_return(chk, m));
4268 }
4269 #endif /* INET6 */
4270
4271 #ifdef INET
4272 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
4273 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
4274 #define V_pf_ip4_in_hook        VNET(pf_ip4_in_hook)
4275 #define V_pf_ip4_out_hook       VNET(pf_ip4_out_hook)
4276 #endif
4277 #ifdef INET6
4278 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
4279 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
4280 #define V_pf_ip6_in_hook        VNET(pf_ip6_in_hook)
4281 #define V_pf_ip6_out_hook       VNET(pf_ip6_out_hook)
4282 #endif
4283
4284 static int
4285 hook_pf(void)
4286 {
4287         struct pfil_hook_args pha;
4288         struct pfil_link_args pla;
4289
4290         if (V_pf_pfil_hooked)
4291                 return (0);
4292
4293         pha.pa_version = PFIL_VERSION;
4294         pha.pa_modname = "pf";
4295         pha.pa_ruleset = NULL;
4296
4297         pla.pa_version = PFIL_VERSION;
4298
4299 #ifdef INET
4300         pha.pa_type = PFIL_TYPE_IP4;
4301         pha.pa_func = pf_check_in;
4302         pha.pa_flags = PFIL_IN;
4303         pha.pa_rulname = "default-in";
4304         V_pf_ip4_in_hook = pfil_add_hook(&pha);
4305         pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
4306         pla.pa_head = V_inet_pfil_head;
4307         pla.pa_hook = V_pf_ip4_in_hook;
4308         (void)pfil_link(&pla);
4309         pha.pa_func = pf_check_out;
4310         pha.pa_flags = PFIL_OUT;
4311         pha.pa_rulname = "default-out";
4312         V_pf_ip4_out_hook = pfil_add_hook(&pha);
4313         pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
4314         pla.pa_head = V_inet_pfil_head;
4315         pla.pa_hook = V_pf_ip4_out_hook;
4316         (void)pfil_link(&pla);
4317 #endif
4318 #ifdef INET6
4319         pha.pa_type = PFIL_TYPE_IP6;
4320         pha.pa_func = pf_check6_in;
4321         pha.pa_flags = PFIL_IN;
4322         pha.pa_rulname = "default-in6";
4323         V_pf_ip6_in_hook = pfil_add_hook(&pha);
4324         pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
4325         pla.pa_head = V_inet6_pfil_head;
4326         pla.pa_hook = V_pf_ip6_in_hook;
4327         (void)pfil_link(&pla);
4328         pha.pa_func = pf_check6_out;
4329         pha.pa_rulname = "default-out6";
4330         pha.pa_flags = PFIL_OUT;
4331         V_pf_ip6_out_hook = pfil_add_hook(&pha);
4332         pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
4333         pla.pa_head = V_inet6_pfil_head;
4334         pla.pa_hook = V_pf_ip6_out_hook;
4335         (void)pfil_link(&pla);
4336 #endif
4337
4338         V_pf_pfil_hooked = 1;
4339         return (0);
4340 }
4341
4342 static int
4343 dehook_pf(void)
4344 {
4345
4346         if (V_pf_pfil_hooked == 0)
4347                 return (0);
4348
4349 #ifdef INET
4350         pfil_remove_hook(V_pf_ip4_in_hook);
4351         pfil_remove_hook(V_pf_ip4_out_hook);
4352 #endif
4353 #ifdef INET6
4354         pfil_remove_hook(V_pf_ip6_in_hook);
4355         pfil_remove_hook(V_pf_ip6_out_hook);
4356 #endif
4357
4358         V_pf_pfil_hooked = 0;
4359         return (0);
4360 }
4361
4362 static void
4363 pf_load_vnet(void)
4364 {
4365         V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
4366             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
4367
4368         pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
4369             PF_RULE_TAG_HASH_SIZE_DEFAULT);
4370 #ifdef ALTQ
4371         pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
4372             PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
4373 #endif
4374
4375         pfattach_vnet();
4376         V_pf_vnet_active = 1;
4377 }
4378
4379 static int
4380 pf_load(void)
4381 {
4382         int error;
4383
4384         rm_init(&pf_rules_lock, "pf rulesets");
4385         sx_init(&pf_ioctl_lock, "pf ioctl");
4386         sx_init(&pf_end_lock, "pf end thread");
4387
4388         pf_mtag_initialize();
4389
4390         pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
4391         if (pf_dev == NULL)
4392                 return (ENOMEM);
4393
4394         pf_end_threads = 0;
4395         error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
4396         if (error != 0)
4397                 return (error);
4398
4399         pfi_initialize();
4400
4401         return (0);
4402 }
4403
4404 static void
4405 pf_unload_vnet(void)
4406 {
4407         int error;
4408
4409         V_pf_vnet_active = 0;
4410         V_pf_status.running = 0;
4411         error = dehook_pf();
4412         if (error) {
4413                 /*
4414                  * Should not happen!
4415                  * XXX Due to error code ESRCH, kldunload will show
4416                  * a message like 'No such process'.
4417                  */
4418                 printf("%s : pfil unregisteration fail\n", __FUNCTION__);
4419                 return;
4420         }
4421
4422         PF_RULES_WLOCK();
4423         shutdown_pf();
4424         PF_RULES_WUNLOCK();
4425
4426         swi_remove(V_pf_swi_cookie);
4427
4428         pf_unload_vnet_purge();
4429
4430         pf_normalize_cleanup();
4431         PF_RULES_WLOCK();
4432         pfi_cleanup_vnet();
4433         PF_RULES_WUNLOCK();
4434         pfr_cleanup();
4435         pf_osfp_flush();
4436         pf_cleanup();
4437         if (IS_DEFAULT_VNET(curvnet))
4438                 pf_mtag_cleanup();
4439
4440         pf_cleanup_tagset(&V_pf_tags);
4441 #ifdef ALTQ
4442         pf_cleanup_tagset(&V_pf_qids);
4443 #endif
4444         uma_zdestroy(V_pf_tag_z);
4445
4446         /* Free counters last as we updated them during shutdown. */
4447         counter_u64_free(V_pf_default_rule.states_cur);
4448         counter_u64_free(V_pf_default_rule.states_tot);
4449         counter_u64_free(V_pf_default_rule.src_nodes);
4450
4451         for (int i = 0; i < PFRES_MAX; i++)
4452                 counter_u64_free(V_pf_status.counters[i]);
4453         for (int i = 0; i < LCNT_MAX; i++)
4454                 counter_u64_free(V_pf_status.lcounters[i]);
4455         for (int i = 0; i < FCNT_MAX; i++)
4456                 counter_u64_free(V_pf_status.fcounters[i]);
4457         for (int i = 0; i < SCNT_MAX; i++)
4458                 counter_u64_free(V_pf_status.scounters[i]);
4459 }
4460
4461 static void
4462 pf_unload(void)
4463 {
4464
4465         sx_xlock(&pf_end_lock);
4466         pf_end_threads = 1;
4467         while (pf_end_threads < 2) {
4468                 wakeup_one(pf_purge_thread);
4469                 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
4470         }
4471         sx_xunlock(&pf_end_lock);
4472
4473         if (pf_dev != NULL)
4474                 destroy_dev(pf_dev);
4475
4476         pfi_cleanup();
4477
4478         rm_destroy(&pf_rules_lock);
4479         sx_destroy(&pf_ioctl_lock);
4480         sx_destroy(&pf_end_lock);
4481 }
4482
4483 static void
4484 vnet_pf_init(void *unused __unused)
4485 {
4486
4487         pf_load_vnet();
4488 }
4489 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 
4490     vnet_pf_init, NULL);
4491
4492 static void
4493 vnet_pf_uninit(const void *unused __unused)
4494 {
4495
4496         pf_unload_vnet();
4497
4498 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
4499 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
4500     vnet_pf_uninit, NULL);
4501
4502
4503 static int
4504 pf_modevent(module_t mod, int type, void *data)
4505 {
4506         int error = 0;
4507
4508         switch(type) {
4509         case MOD_LOAD:
4510                 error = pf_load();
4511                 break;
4512         case MOD_UNLOAD:
4513                 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after
4514                  * the vnet_pf_uninit()s */
4515                 break;
4516         default:
4517                 error = EINVAL;
4518                 break;
4519         }
4520
4521         return (error);
4522 }
4523
4524 static moduledata_t pf_mod = {
4525         "pf",
4526         pf_modevent,
4527         0
4528 };
4529
4530 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
4531 MODULE_VERSION(pf, PF_MODVER);