]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netpfil/pf/pf_ioctl.c
pf: Support killing 'matching' states
[FreeBSD/FreeBSD.git] / sys / netpfil / pf / pf_ioctl.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *      $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 #include "opt_inet.h"
44 #include "opt_inet6.h"
45 #include "opt_bpf.h"
46 #include "opt_pf.h"
47
48 #include <sys/param.h>
49 #include <sys/_bitset.h>
50 #include <sys/bitset.h>
51 #include <sys/bus.h>
52 #include <sys/conf.h>
53 #include <sys/endian.h>
54 #include <sys/fcntl.h>
55 #include <sys/filio.h>
56 #include <sys/hash.h>
57 #include <sys/interrupt.h>
58 #include <sys/jail.h>
59 #include <sys/kernel.h>
60 #include <sys/kthread.h>
61 #include <sys/lock.h>
62 #include <sys/mbuf.h>
63 #include <sys/module.h>
64 #include <sys/nv.h>
65 #include <sys/proc.h>
66 #include <sys/sdt.h>
67 #include <sys/smp.h>
68 #include <sys/socket.h>
69 #include <sys/sysctl.h>
70 #include <sys/md5.h>
71 #include <sys/ucred.h>
72
73 #include <net/if.h>
74 #include <net/if_var.h>
75 #include <net/vnet.h>
76 #include <net/route.h>
77 #include <net/pfil.h>
78 #include <net/pfvar.h>
79 #include <net/if_pfsync.h>
80 #include <net/if_pflog.h>
81
82 #include <netinet/in.h>
83 #include <netinet/ip.h>
84 #include <netinet/ip_var.h>
85 #include <netinet6/ip6_var.h>
86 #include <netinet/ip_icmp.h>
87 #include <netpfil/pf/pf_nv.h>
88
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif /* INET6 */
92
93 #ifdef ALTQ
94 #include <net/altq/altq.h>
95 #endif
96
97 SDT_PROVIDER_DECLARE(pf);
98 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
99 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
101 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
102
103 static struct pf_kpool  *pf_get_kpool(char *, u_int32_t, u_int8_t, u_int32_t,
104                             u_int8_t, u_int8_t, u_int8_t);
105
106 static void              pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
107 static void              pf_empty_kpool(struct pf_kpalist *);
108 static int               pfioctl(struct cdev *, u_long, caddr_t, int,
109                             struct thread *);
110 #ifdef ALTQ
111 static int               pf_begin_altq(u_int32_t *);
112 static int               pf_rollback_altq(u_int32_t);
113 static int               pf_commit_altq(u_int32_t);
114 static int               pf_enable_altq(struct pf_altq *);
115 static int               pf_disable_altq(struct pf_altq *);
116 static u_int32_t         pf_qname2qid(char *);
117 static void              pf_qid_unref(u_int32_t);
118 #endif /* ALTQ */
119 static int               pf_begin_rules(u_int32_t *, int, const char *);
120 static int               pf_rollback_rules(u_int32_t, int, char *);
121 static int               pf_setup_pfsync_matching(struct pf_kruleset *);
122 static void              pf_hash_rule(MD5_CTX *, struct pf_krule *);
123 static void              pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
124 static int               pf_commit_rules(u_int32_t, int, char *);
125 static int               pf_addr_setup(struct pf_kruleset *,
126                             struct pf_addr_wrap *, sa_family_t);
127 static void              pf_addr_copyout(struct pf_addr_wrap *);
128 static void              pf_src_node_copy(const struct pf_ksrc_node *,
129                             struct pf_src_node *);
130 #ifdef ALTQ
131 static int               pf_export_kaltq(struct pf_altq *,
132                             struct pfioc_altq_v1 *, size_t);
133 static int               pf_import_kaltq(struct pfioc_altq_v1 *,
134                             struct pf_altq *, size_t);
135 #endif /* ALTQ */
136
137 VNET_DEFINE(struct pf_krule,    pf_default_rule);
138
139 #ifdef ALTQ
140 VNET_DEFINE_STATIC(int,         pf_altq_running);
141 #define V_pf_altq_running       VNET(pf_altq_running)
142 #endif
143
144 #define TAGID_MAX        50000
145 struct pf_tagname {
146         TAILQ_ENTRY(pf_tagname) namehash_entries;
147         TAILQ_ENTRY(pf_tagname) taghash_entries;
148         char                    name[PF_TAG_NAME_SIZE];
149         uint16_t                tag;
150         int                     ref;
151 };
152
153 struct pf_tagset {
154         TAILQ_HEAD(, pf_tagname)        *namehash;
155         TAILQ_HEAD(, pf_tagname)        *taghash;
156         unsigned int                     mask;
157         uint32_t                         seed;
158         BITSET_DEFINE(, TAGID_MAX)       avail;
159 };
160
161 VNET_DEFINE(struct pf_tagset, pf_tags);
162 #define V_pf_tags       VNET(pf_tags)
163 static unsigned int     pf_rule_tag_hashsize;
164 #define PF_RULE_TAG_HASH_SIZE_DEFAULT   128
165 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
166     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
167     "Size of pf(4) rule tag hashtable");
168
169 #ifdef ALTQ
170 VNET_DEFINE(struct pf_tagset, pf_qids);
171 #define V_pf_qids       VNET(pf_qids)
172 static unsigned int     pf_queue_tag_hashsize;
173 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT  128
174 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
175     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
176     "Size of pf(4) queue tag hashtable");
177 #endif
178 VNET_DEFINE(uma_zone_t,  pf_tag_z);
179 #define V_pf_tag_z               VNET(pf_tag_z)
180 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
181 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
182
183 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
184 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
185 #endif
186
187 static void              pf_init_tagset(struct pf_tagset *, unsigned int *,
188                             unsigned int);
189 static void              pf_cleanup_tagset(struct pf_tagset *);
190 static uint16_t          tagname2hashindex(const struct pf_tagset *, const char *);
191 static uint16_t          tag2hashindex(const struct pf_tagset *, uint16_t);
192 static u_int16_t         tagname2tag(struct pf_tagset *, char *);
193 static u_int16_t         pf_tagname2tag(char *);
194 static void              tag_unref(struct pf_tagset *, u_int16_t);
195
196 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
197
198 struct cdev *pf_dev;
199
200 /*
201  * XXX - These are new and need to be checked when moveing to a new version
202  */
203 static void              pf_clear_all_states(void);
204 static unsigned int      pf_clear_states(const struct pf_kstate_kill *);
205 static int               pf_killstates(struct pf_kstate_kill *,
206                             unsigned int *);
207 static int               pf_killstates_row(struct pf_kstate_kill *,
208                             struct pf_idhash *);
209 static int               pf_killstates_nv(struct pfioc_nv *);
210 static int               pf_clearstates_nv(struct pfioc_nv *);
211 static int               pf_clear_tables(void);
212 static void              pf_clear_srcnodes(struct pf_ksrc_node *);
213 static void              pf_kill_srcnodes(struct pfioc_src_node_kill *);
214 static int               pf_keepcounters(struct pfioc_nv *);
215 static void              pf_tbladdr_copyout(struct pf_addr_wrap *);
216
217 /*
218  * Wrapper functions for pfil(9) hooks
219  */
220 #ifdef INET
221 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
222     int dir, int flags, struct inpcb *inp);
223 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
224     int dir, int flags, struct inpcb *inp);
225 #endif
226 #ifdef INET6
227 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
228     int dir, int flags, struct inpcb *inp);
229 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
230     int dir, int flags, struct inpcb *inp);
231 #endif
232
233 static int              hook_pf(void);
234 static int              dehook_pf(void);
235 static int              shutdown_pf(void);
236 static int              pf_load(void);
237 static void             pf_unload(void);
238
239 static struct cdevsw pf_cdevsw = {
240         .d_ioctl =      pfioctl,
241         .d_name =       PF_NAME,
242         .d_version =    D_VERSION,
243 };
244
245 volatile VNET_DEFINE_STATIC(int, pf_pfil_hooked);
246 #define V_pf_pfil_hooked        VNET(pf_pfil_hooked)
247
248 /*
249  * We need a flag that is neither hooked nor running to know when
250  * the VNET is "valid".  We primarily need this to control (global)
251  * external event, e.g., eventhandlers.
252  */
253 VNET_DEFINE(int, pf_vnet_active);
254 #define V_pf_vnet_active        VNET(pf_vnet_active)
255
256 int pf_end_threads;
257 struct proc *pf_purge_proc;
258
259 struct rmlock                   pf_rules_lock;
260 struct sx                       pf_ioctl_lock;
261 struct sx                       pf_end_lock;
262
263 /* pfsync */
264 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
265 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
266 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
267 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
268 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
269 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
270 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
271
272 /* pflog */
273 pflog_packet_t                  *pflog_packet_ptr = NULL;
274
275 extern u_long   pf_ioctl_maxcount;
276
277 #define ERROUT_FUNCTION(target, x)                                      \
278         do {                                                            \
279                 error = (x);                                            \
280                 SDT_PROBE3(pf, ioctl, function, error, __func__, error, \
281                     __LINE__);                                          \
282                 goto target;                                            \
283         } while (0)
284
285 static void
286 pfattach_vnet(void)
287 {
288         u_int32_t *my_timeout = V_pf_default_rule.timeout;
289
290         pf_initialize();
291         pfr_initialize();
292         pfi_initialize_vnet();
293         pf_normalize_init();
294
295         V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
296         V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
297
298         RB_INIT(&V_pf_anchors);
299         pf_init_kruleset(&pf_main_ruleset);
300
301         /* default rule should never be garbage collected */
302         V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
303 #ifdef PF_DEFAULT_TO_DROP
304         V_pf_default_rule.action = PF_DROP;
305 #else
306         V_pf_default_rule.action = PF_PASS;
307 #endif
308         V_pf_default_rule.nr = -1;
309         V_pf_default_rule.rtableid = -1;
310
311         V_pf_default_rule.evaluations = counter_u64_alloc(M_WAITOK);
312         for (int i = 0; i < 2; i++) {
313                 V_pf_default_rule.packets[i] = counter_u64_alloc(M_WAITOK);
314                 V_pf_default_rule.bytes[i] = counter_u64_alloc(M_WAITOK);
315         }
316         V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
317         V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
318         V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
319
320         /* initialize default timeouts */
321         my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
322         my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
323         my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
324         my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
325         my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
326         my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
327         my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
328         my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
329         my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
330         my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
331         my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
332         my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
333         my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
334         my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
335         my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
336         my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
337         my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
338         my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
339         my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
340         my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
341
342         bzero(&V_pf_status, sizeof(V_pf_status));
343         V_pf_status.debug = PF_DEBUG_URGENT;
344
345         V_pf_pfil_hooked = 0;
346
347         /* XXX do our best to avoid a conflict */
348         V_pf_status.hostid = arc4random();
349
350         for (int i = 0; i < PFRES_MAX; i++)
351                 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
352         for (int i = 0; i < LCNT_MAX; i++)
353                 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
354         for (int i = 0; i < FCNT_MAX; i++)
355                 V_pf_status.fcounters[i] = counter_u64_alloc(M_WAITOK);
356         for (int i = 0; i < SCNT_MAX; i++)
357                 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
358
359         if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
360             INTR_MPSAFE, &V_pf_swi_cookie) != 0)
361                 /* XXXGL: leaked all above. */
362                 return;
363 }
364
365 static struct pf_kpool *
366 pf_get_kpool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
367     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
368     u_int8_t check_ticket)
369 {
370         struct pf_kruleset      *ruleset;
371         struct pf_krule         *rule;
372         int                      rs_num;
373
374         ruleset = pf_find_kruleset(anchor);
375         if (ruleset == NULL)
376                 return (NULL);
377         rs_num = pf_get_ruleset_number(rule_action);
378         if (rs_num >= PF_RULESET_MAX)
379                 return (NULL);
380         if (active) {
381                 if (check_ticket && ticket !=
382                     ruleset->rules[rs_num].active.ticket)
383                         return (NULL);
384                 if (r_last)
385                         rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
386                             pf_krulequeue);
387                 else
388                         rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
389         } else {
390                 if (check_ticket && ticket !=
391                     ruleset->rules[rs_num].inactive.ticket)
392                         return (NULL);
393                 if (r_last)
394                         rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
395                             pf_krulequeue);
396                 else
397                         rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
398         }
399         if (!r_last) {
400                 while ((rule != NULL) && (rule->nr != rule_number))
401                         rule = TAILQ_NEXT(rule, entries);
402         }
403         if (rule == NULL)
404                 return (NULL);
405
406         return (&rule->rpool);
407 }
408
409 static void
410 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
411 {
412         struct pf_kpooladdr     *mv_pool_pa;
413
414         while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
415                 TAILQ_REMOVE(poola, mv_pool_pa, entries);
416                 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
417         }
418 }
419
420 static void
421 pf_empty_kpool(struct pf_kpalist *poola)
422 {
423         struct pf_kpooladdr *pa;
424
425         while ((pa = TAILQ_FIRST(poola)) != NULL) {
426                 switch (pa->addr.type) {
427                 case PF_ADDR_DYNIFTL:
428                         pfi_dynaddr_remove(pa->addr.p.dyn);
429                         break;
430                 case PF_ADDR_TABLE:
431                         /* XXX: this could be unfinished pooladdr on pabuf */
432                         if (pa->addr.p.tbl != NULL)
433                                 pfr_detach_table(pa->addr.p.tbl);
434                         break;
435                 }
436                 if (pa->kif)
437                         pfi_kkif_unref(pa->kif);
438                 TAILQ_REMOVE(poola, pa, entries);
439                 free(pa, M_PFRULE);
440         }
441 }
442
443 static void
444 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
445 {
446
447         PF_RULES_WASSERT();
448
449         TAILQ_REMOVE(rulequeue, rule, entries);
450
451         PF_UNLNKDRULES_LOCK();
452         rule->rule_ref |= PFRULE_REFS;
453         TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
454         PF_UNLNKDRULES_UNLOCK();
455 }
456
457 void
458 pf_free_rule(struct pf_krule *rule)
459 {
460
461         PF_RULES_WASSERT();
462
463         if (rule->tag)
464                 tag_unref(&V_pf_tags, rule->tag);
465         if (rule->match_tag)
466                 tag_unref(&V_pf_tags, rule->match_tag);
467 #ifdef ALTQ
468         if (rule->pqid != rule->qid)
469                 pf_qid_unref(rule->pqid);
470         pf_qid_unref(rule->qid);
471 #endif
472         switch (rule->src.addr.type) {
473         case PF_ADDR_DYNIFTL:
474                 pfi_dynaddr_remove(rule->src.addr.p.dyn);
475                 break;
476         case PF_ADDR_TABLE:
477                 pfr_detach_table(rule->src.addr.p.tbl);
478                 break;
479         }
480         switch (rule->dst.addr.type) {
481         case PF_ADDR_DYNIFTL:
482                 pfi_dynaddr_remove(rule->dst.addr.p.dyn);
483                 break;
484         case PF_ADDR_TABLE:
485                 pfr_detach_table(rule->dst.addr.p.tbl);
486                 break;
487         }
488         if (rule->overload_tbl)
489                 pfr_detach_table(rule->overload_tbl);
490         if (rule->kif)
491                 pfi_kkif_unref(rule->kif);
492         pf_kanchor_remove(rule);
493         pf_empty_kpool(&rule->rpool.list);
494
495         pf_krule_free(rule);
496 }
497
498 static void
499 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
500     unsigned int default_size)
501 {
502         unsigned int i;
503         unsigned int hashsize;
504         
505         if (*tunable_size == 0 || !powerof2(*tunable_size))
506                 *tunable_size = default_size;
507
508         hashsize = *tunable_size;
509         ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
510             M_WAITOK);
511         ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
512             M_WAITOK);
513         ts->mask = hashsize - 1;
514         ts->seed = arc4random();
515         for (i = 0; i < hashsize; i++) {
516                 TAILQ_INIT(&ts->namehash[i]);
517                 TAILQ_INIT(&ts->taghash[i]);
518         }
519         BIT_FILL(TAGID_MAX, &ts->avail);
520 }
521
522 static void
523 pf_cleanup_tagset(struct pf_tagset *ts)
524 {
525         unsigned int i;
526         unsigned int hashsize;
527         struct pf_tagname *t, *tmp;
528
529         /*
530          * Only need to clean up one of the hashes as each tag is hashed
531          * into each table.
532          */
533         hashsize = ts->mask + 1;
534         for (i = 0; i < hashsize; i++)
535                 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
536                         uma_zfree(V_pf_tag_z, t);
537
538         free(ts->namehash, M_PFHASH);
539         free(ts->taghash, M_PFHASH);
540 }
541
542 static uint16_t
543 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
544 {
545         size_t len;
546
547         len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
548         return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
549 }
550
551 static uint16_t
552 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
553 {
554
555         return (tag & ts->mask);
556 }
557
558 static u_int16_t
559 tagname2tag(struct pf_tagset *ts, char *tagname)
560 {
561         struct pf_tagname       *tag;
562         u_int32_t                index;
563         u_int16_t                new_tagid;
564
565         PF_RULES_WASSERT();
566
567         index = tagname2hashindex(ts, tagname);
568         TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
569                 if (strcmp(tagname, tag->name) == 0) {
570                         tag->ref++;
571                         return (tag->tag);
572                 }
573
574         /*
575          * new entry
576          *
577          * to avoid fragmentation, we do a linear search from the beginning
578          * and take the first free slot we find.
579          */
580         new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
581         /*
582          * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
583          * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
584          * set.  It may also return a bit number greater than TAGID_MAX due
585          * to rounding of the number of bits in the vector up to a multiple
586          * of the vector word size at declaration/allocation time.
587          */
588         if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
589                 return (0);
590
591         /* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
592         BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
593         
594         /* allocate and fill new struct pf_tagname */
595         tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
596         if (tag == NULL)
597                 return (0);
598         strlcpy(tag->name, tagname, sizeof(tag->name));
599         tag->tag = new_tagid;
600         tag->ref = 1;
601
602         /* Insert into namehash */
603         TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
604
605         /* Insert into taghash */
606         index = tag2hashindex(ts, new_tagid);
607         TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
608         
609         return (tag->tag);
610 }
611
612 static void
613 tag_unref(struct pf_tagset *ts, u_int16_t tag)
614 {
615         struct pf_tagname       *t;
616         uint16_t                 index;
617         
618         PF_RULES_WASSERT();
619
620         index = tag2hashindex(ts, tag);
621         TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
622                 if (tag == t->tag) {
623                         if (--t->ref == 0) {
624                                 TAILQ_REMOVE(&ts->taghash[index], t,
625                                     taghash_entries);
626                                 index = tagname2hashindex(ts, t->name);
627                                 TAILQ_REMOVE(&ts->namehash[index], t,
628                                     namehash_entries);
629                                 /* Bits are 0-based for BIT_SET() */
630                                 BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
631                                 uma_zfree(V_pf_tag_z, t);
632                         }
633                         break;
634                 }
635 }
636
637 static u_int16_t
638 pf_tagname2tag(char *tagname)
639 {
640         return (tagname2tag(&V_pf_tags, tagname));
641 }
642
643 #ifdef ALTQ
644 static u_int32_t
645 pf_qname2qid(char *qname)
646 {
647         return ((u_int32_t)tagname2tag(&V_pf_qids, qname));
648 }
649
650 static void
651 pf_qid_unref(u_int32_t qid)
652 {
653         tag_unref(&V_pf_qids, (u_int16_t)qid);
654 }
655
656 static int
657 pf_begin_altq(u_int32_t *ticket)
658 {
659         struct pf_altq  *altq, *tmp;
660         int              error = 0;
661
662         PF_RULES_WASSERT();
663
664         /* Purge the old altq lists */
665         TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
666                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
667                         /* detach and destroy the discipline */
668                         error = altq_remove(altq);
669                 }
670                 free(altq, M_PFALTQ);
671         }
672         TAILQ_INIT(V_pf_altq_ifs_inactive);
673         TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
674                 pf_qid_unref(altq->qid);
675                 free(altq, M_PFALTQ);
676         }
677         TAILQ_INIT(V_pf_altqs_inactive);
678         if (error)
679                 return (error);
680         *ticket = ++V_ticket_altqs_inactive;
681         V_altqs_inactive_open = 1;
682         return (0);
683 }
684
685 static int
686 pf_rollback_altq(u_int32_t ticket)
687 {
688         struct pf_altq  *altq, *tmp;
689         int              error = 0;
690
691         PF_RULES_WASSERT();
692
693         if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
694                 return (0);
695         /* Purge the old altq lists */
696         TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
697                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
698                         /* detach and destroy the discipline */
699                         error = altq_remove(altq);
700                 }
701                 free(altq, M_PFALTQ);
702         }
703         TAILQ_INIT(V_pf_altq_ifs_inactive);
704         TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
705                 pf_qid_unref(altq->qid);
706                 free(altq, M_PFALTQ);
707         }
708         TAILQ_INIT(V_pf_altqs_inactive);
709         V_altqs_inactive_open = 0;
710         return (error);
711 }
712
713 static int
714 pf_commit_altq(u_int32_t ticket)
715 {
716         struct pf_altqqueue     *old_altqs, *old_altq_ifs;
717         struct pf_altq          *altq, *tmp;
718         int                      err, error = 0;
719
720         PF_RULES_WASSERT();
721
722         if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
723                 return (EBUSY);
724
725         /* swap altqs, keep the old. */
726         old_altqs = V_pf_altqs_active;
727         old_altq_ifs = V_pf_altq_ifs_active;
728         V_pf_altqs_active = V_pf_altqs_inactive;
729         V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
730         V_pf_altqs_inactive = old_altqs;
731         V_pf_altq_ifs_inactive = old_altq_ifs;
732         V_ticket_altqs_active = V_ticket_altqs_inactive;
733
734         /* Attach new disciplines */
735         TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
736                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
737                         /* attach the discipline */
738                         error = altq_pfattach(altq);
739                         if (error == 0 && V_pf_altq_running)
740                                 error = pf_enable_altq(altq);
741                         if (error != 0)
742                                 return (error);
743                 }
744         }
745
746         /* Purge the old altq lists */
747         TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
748                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
749                         /* detach and destroy the discipline */
750                         if (V_pf_altq_running)
751                                 error = pf_disable_altq(altq);
752                         err = altq_pfdetach(altq);
753                         if (err != 0 && error == 0)
754                                 error = err;
755                         err = altq_remove(altq);
756                         if (err != 0 && error == 0)
757                                 error = err;
758                 }
759                 free(altq, M_PFALTQ);
760         }
761         TAILQ_INIT(V_pf_altq_ifs_inactive);
762         TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
763                 pf_qid_unref(altq->qid);
764                 free(altq, M_PFALTQ);
765         }
766         TAILQ_INIT(V_pf_altqs_inactive);
767
768         V_altqs_inactive_open = 0;
769         return (error);
770 }
771
772 static int
773 pf_enable_altq(struct pf_altq *altq)
774 {
775         struct ifnet            *ifp;
776         struct tb_profile        tb;
777         int                      error = 0;
778
779         if ((ifp = ifunit(altq->ifname)) == NULL)
780                 return (EINVAL);
781
782         if (ifp->if_snd.altq_type != ALTQT_NONE)
783                 error = altq_enable(&ifp->if_snd);
784
785         /* set tokenbucket regulator */
786         if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
787                 tb.rate = altq->ifbandwidth;
788                 tb.depth = altq->tbrsize;
789                 error = tbr_set(&ifp->if_snd, &tb);
790         }
791
792         return (error);
793 }
794
795 static int
796 pf_disable_altq(struct pf_altq *altq)
797 {
798         struct ifnet            *ifp;
799         struct tb_profile        tb;
800         int                      error;
801
802         if ((ifp = ifunit(altq->ifname)) == NULL)
803                 return (EINVAL);
804
805         /*
806          * when the discipline is no longer referenced, it was overridden
807          * by a new one.  if so, just return.
808          */
809         if (altq->altq_disc != ifp->if_snd.altq_disc)
810                 return (0);
811
812         error = altq_disable(&ifp->if_snd);
813
814         if (error == 0) {
815                 /* clear tokenbucket regulator */
816                 tb.rate = 0;
817                 error = tbr_set(&ifp->if_snd, &tb);
818         }
819
820         return (error);
821 }
822
823 static int
824 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
825     struct pf_altq *altq)
826 {
827         struct ifnet    *ifp1;
828         int              error = 0;
829         
830         /* Deactivate the interface in question */
831         altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
832         if ((ifp1 = ifunit(altq->ifname)) == NULL ||
833             (remove && ifp1 == ifp)) {
834                 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
835         } else {
836                 error = altq_add(ifp1, altq);
837
838                 if (ticket != V_ticket_altqs_inactive)
839                         error = EBUSY;
840
841                 if (error)
842                         free(altq, M_PFALTQ);
843         }
844
845         return (error);
846 }
847
848 void
849 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
850 {
851         struct pf_altq  *a1, *a2, *a3;
852         u_int32_t        ticket;
853         int              error = 0;
854
855         /*
856          * No need to re-evaluate the configuration for events on interfaces
857          * that do not support ALTQ, as it's not possible for such
858          * interfaces to be part of the configuration.
859          */
860         if (!ALTQ_IS_READY(&ifp->if_snd))
861                 return;
862
863         /* Interrupt userland queue modifications */
864         if (V_altqs_inactive_open)
865                 pf_rollback_altq(V_ticket_altqs_inactive);
866
867         /* Start new altq ruleset */
868         if (pf_begin_altq(&ticket))
869                 return;
870
871         /* Copy the current active set */
872         TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
873                 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
874                 if (a2 == NULL) {
875                         error = ENOMEM;
876                         break;
877                 }
878                 bcopy(a1, a2, sizeof(struct pf_altq));
879
880                 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
881                 if (error)
882                         break;
883
884                 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
885         }
886         if (error)
887                 goto out;
888         TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
889                 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
890                 if (a2 == NULL) {
891                         error = ENOMEM;
892                         break;
893                 }
894                 bcopy(a1, a2, sizeof(struct pf_altq));
895
896                 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
897                         error = EBUSY;
898                         free(a2, M_PFALTQ);
899                         break;
900                 }
901                 a2->altq_disc = NULL;
902                 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
903                         if (strncmp(a3->ifname, a2->ifname,
904                                 IFNAMSIZ) == 0) {
905                                 a2->altq_disc = a3->altq_disc;
906                                 break;
907                         }
908                 }
909                 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
910                 if (error)
911                         break;
912
913                 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
914         }
915
916 out:
917         if (error != 0)
918                 pf_rollback_altq(ticket);
919         else
920                 pf_commit_altq(ticket);
921 }
922 #endif /* ALTQ */
923
924 static int
925 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
926 {
927         struct pf_kruleset      *rs;
928         struct pf_krule         *rule;
929
930         PF_RULES_WASSERT();
931
932         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
933                 return (EINVAL);
934         rs = pf_find_or_create_kruleset(anchor);
935         if (rs == NULL)
936                 return (EINVAL);
937         while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
938                 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
939                 rs->rules[rs_num].inactive.rcount--;
940         }
941         *ticket = ++rs->rules[rs_num].inactive.ticket;
942         rs->rules[rs_num].inactive.open = 1;
943         return (0);
944 }
945
946 static int
947 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
948 {
949         struct pf_kruleset      *rs;
950         struct pf_krule         *rule;
951
952         PF_RULES_WASSERT();
953
954         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
955                 return (EINVAL);
956         rs = pf_find_kruleset(anchor);
957         if (rs == NULL || !rs->rules[rs_num].inactive.open ||
958             rs->rules[rs_num].inactive.ticket != ticket)
959                 return (0);
960         while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
961                 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
962                 rs->rules[rs_num].inactive.rcount--;
963         }
964         rs->rules[rs_num].inactive.open = 0;
965         return (0);
966 }
967
968 #define PF_MD5_UPD(st, elm)                                             \
969                 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
970
971 #define PF_MD5_UPD_STR(st, elm)                                         \
972                 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
973
974 #define PF_MD5_UPD_HTONL(st, elm, stor) do {                            \
975                 (stor) = htonl((st)->elm);                              \
976                 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
977 } while (0)
978
979 #define PF_MD5_UPD_HTONS(st, elm, stor) do {                            \
980                 (stor) = htons((st)->elm);                              \
981                 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
982 } while (0)
983
984 static void
985 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
986 {
987         PF_MD5_UPD(pfr, addr.type);
988         switch (pfr->addr.type) {
989                 case PF_ADDR_DYNIFTL:
990                         PF_MD5_UPD(pfr, addr.v.ifname);
991                         PF_MD5_UPD(pfr, addr.iflags);
992                         break;
993                 case PF_ADDR_TABLE:
994                         PF_MD5_UPD(pfr, addr.v.tblname);
995                         break;
996                 case PF_ADDR_ADDRMASK:
997                         /* XXX ignore af? */
998                         PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
999                         PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1000                         break;
1001         }
1002
1003         PF_MD5_UPD(pfr, port[0]);
1004         PF_MD5_UPD(pfr, port[1]);
1005         PF_MD5_UPD(pfr, neg);
1006         PF_MD5_UPD(pfr, port_op);
1007 }
1008
1009 static void
1010 pf_hash_rule(MD5_CTX *ctx, struct pf_krule *rule)
1011 {
1012         u_int16_t x;
1013         u_int32_t y;
1014
1015         pf_hash_rule_addr(ctx, &rule->src);
1016         pf_hash_rule_addr(ctx, &rule->dst);
1017         for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1018                 PF_MD5_UPD_STR(rule, label[i]);
1019         PF_MD5_UPD_STR(rule, ifname);
1020         PF_MD5_UPD_STR(rule, match_tagname);
1021         PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1022         PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1023         PF_MD5_UPD_HTONL(rule, prob, y);
1024         PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1025         PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1026         PF_MD5_UPD(rule, uid.op);
1027         PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1028         PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1029         PF_MD5_UPD(rule, gid.op);
1030         PF_MD5_UPD_HTONL(rule, rule_flag, y);
1031         PF_MD5_UPD(rule, action);
1032         PF_MD5_UPD(rule, direction);
1033         PF_MD5_UPD(rule, af);
1034         PF_MD5_UPD(rule, quick);
1035         PF_MD5_UPD(rule, ifnot);
1036         PF_MD5_UPD(rule, match_tag_not);
1037         PF_MD5_UPD(rule, natpass);
1038         PF_MD5_UPD(rule, keep_state);
1039         PF_MD5_UPD(rule, proto);
1040         PF_MD5_UPD(rule, type);
1041         PF_MD5_UPD(rule, code);
1042         PF_MD5_UPD(rule, flags);
1043         PF_MD5_UPD(rule, flagset);
1044         PF_MD5_UPD(rule, allow_opts);
1045         PF_MD5_UPD(rule, rt);
1046         PF_MD5_UPD(rule, tos);
1047 }
1048
1049 static bool
1050 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1051 {
1052         MD5_CTX         ctx[2];
1053         u_int8_t        digest[2][PF_MD5_DIGEST_LENGTH];
1054
1055         MD5Init(&ctx[0]);
1056         MD5Init(&ctx[1]);
1057         pf_hash_rule(&ctx[0], a);
1058         pf_hash_rule(&ctx[1], b);
1059         MD5Final(digest[0], &ctx[0]);
1060         MD5Final(digest[1], &ctx[1]);
1061
1062         return (memcmp(digest[0], digest[1], PF_MD5_DIGEST_LENGTH) == 0);
1063 }
1064
1065 static int
1066 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1067 {
1068         struct pf_kruleset      *rs;
1069         struct pf_krule         *rule, **old_array, *tail;
1070         struct pf_krulequeue    *old_rules;
1071         int                      error;
1072         u_int32_t                old_rcount;
1073
1074         PF_RULES_WASSERT();
1075
1076         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1077                 return (EINVAL);
1078         rs = pf_find_kruleset(anchor);
1079         if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1080             ticket != rs->rules[rs_num].inactive.ticket)
1081                 return (EBUSY);
1082
1083         /* Calculate checksum for the main ruleset */
1084         if (rs == &pf_main_ruleset) {
1085                 error = pf_setup_pfsync_matching(rs);
1086                 if (error != 0)
1087                         return (error);
1088         }
1089
1090         /* Swap rules, keep the old. */
1091         old_rules = rs->rules[rs_num].active.ptr;
1092         old_rcount = rs->rules[rs_num].active.rcount;
1093         old_array = rs->rules[rs_num].active.ptr_array;
1094
1095         rs->rules[rs_num].active.ptr =
1096             rs->rules[rs_num].inactive.ptr;
1097         rs->rules[rs_num].active.ptr_array =
1098             rs->rules[rs_num].inactive.ptr_array;
1099         rs->rules[rs_num].active.rcount =
1100             rs->rules[rs_num].inactive.rcount;
1101
1102         /* Attempt to preserve counter information. */
1103         if (V_pf_status.keep_counters) {
1104                 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1105                     entries) {
1106                         tail = TAILQ_FIRST(old_rules);
1107                         while ((tail != NULL) && ! pf_krule_compare(tail, rule))
1108                                 tail = TAILQ_NEXT(tail, entries);
1109                         if (tail != NULL) {
1110                                 counter_u64_add(rule->evaluations,
1111                                     counter_u64_fetch(tail->evaluations));
1112                                 counter_u64_add(rule->packets[0],
1113                                     counter_u64_fetch(tail->packets[0]));
1114                                 counter_u64_add(rule->packets[1],
1115                                     counter_u64_fetch(tail->packets[1]));
1116                                 counter_u64_add(rule->bytes[0],
1117                                     counter_u64_fetch(tail->bytes[0]));
1118                                 counter_u64_add(rule->bytes[1],
1119                                     counter_u64_fetch(tail->bytes[1]));
1120                         }
1121                 }
1122         }
1123
1124         rs->rules[rs_num].inactive.ptr = old_rules;
1125         rs->rules[rs_num].inactive.ptr_array = old_array;
1126         rs->rules[rs_num].inactive.rcount = old_rcount;
1127
1128         rs->rules[rs_num].active.ticket =
1129             rs->rules[rs_num].inactive.ticket;
1130         pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1131
1132
1133         /* Purge the old rule list. */
1134         while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1135                 pf_unlink_rule(old_rules, rule);
1136         if (rs->rules[rs_num].inactive.ptr_array)
1137                 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1138         rs->rules[rs_num].inactive.ptr_array = NULL;
1139         rs->rules[rs_num].inactive.rcount = 0;
1140         rs->rules[rs_num].inactive.open = 0;
1141         pf_remove_if_empty_kruleset(rs);
1142
1143         return (0);
1144 }
1145
1146 static int
1147 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1148 {
1149         MD5_CTX                  ctx;
1150         struct pf_krule         *rule;
1151         int                      rs_cnt;
1152         u_int8_t                 digest[PF_MD5_DIGEST_LENGTH];
1153
1154         MD5Init(&ctx);
1155         for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1156                 /* XXX PF_RULESET_SCRUB as well? */
1157                 if (rs_cnt == PF_RULESET_SCRUB)
1158                         continue;
1159
1160                 if (rs->rules[rs_cnt].inactive.ptr_array)
1161                         free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1162                 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1163
1164                 if (rs->rules[rs_cnt].inactive.rcount) {
1165                         rs->rules[rs_cnt].inactive.ptr_array =
1166                             malloc(sizeof(caddr_t) *
1167                             rs->rules[rs_cnt].inactive.rcount,
1168                             M_TEMP, M_NOWAIT);
1169
1170                         if (!rs->rules[rs_cnt].inactive.ptr_array)
1171                                 return (ENOMEM);
1172                 }
1173
1174                 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1175                     entries) {
1176                         pf_hash_rule(&ctx, rule);
1177                         (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1178                 }
1179         }
1180
1181         MD5Final(digest, &ctx);
1182         memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1183         return (0);
1184 }
1185
1186 static int
1187 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1188     sa_family_t af)
1189 {
1190         int error = 0;
1191
1192         switch (addr->type) {
1193         case PF_ADDR_TABLE:
1194                 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1195                 if (addr->p.tbl == NULL)
1196                         error = ENOMEM;
1197                 break;
1198         case PF_ADDR_DYNIFTL:
1199                 error = pfi_dynaddr_setup(addr, af);
1200                 break;
1201         }
1202
1203         return (error);
1204 }
1205
1206 static void
1207 pf_addr_copyout(struct pf_addr_wrap *addr)
1208 {
1209
1210         switch (addr->type) {
1211         case PF_ADDR_DYNIFTL:
1212                 pfi_dynaddr_copyout(addr);
1213                 break;
1214         case PF_ADDR_TABLE:
1215                 pf_tbladdr_copyout(addr);
1216                 break;
1217         }
1218 }
1219
1220 static void
1221 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1222 {
1223         int     secs = time_uptime, diff;
1224
1225         bzero(out, sizeof(struct pf_src_node));
1226
1227         bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1228         bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1229
1230         if (in->rule.ptr != NULL)
1231                 out->rule.nr = in->rule.ptr->nr;
1232
1233         for (int i = 0; i < 2; i++) {
1234                 out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1235                 out->packets[i] = counter_u64_fetch(in->packets[i]);
1236         }
1237
1238         out->states = in->states;
1239         out->conn = in->conn;
1240         out->af = in->af;
1241         out->ruletype = in->ruletype;
1242
1243         out->creation = secs - in->creation;
1244         if (out->expire > secs)
1245                 out->expire -= secs;
1246         else
1247                 out->expire = 0;
1248
1249         /* Adjust the connection rate estimate. */
1250         diff = secs - in->conn_rate.last;
1251         if (diff >= in->conn_rate.seconds)
1252                 out->conn_rate.count = 0;
1253         else
1254                 out->conn_rate.count -=
1255                     in->conn_rate.count * diff /
1256                     in->conn_rate.seconds;
1257 }
1258
1259 #ifdef ALTQ
1260 /*
1261  * Handle export of struct pf_kaltq to user binaries that may be using any
1262  * version of struct pf_altq.
1263  */
1264 static int
1265 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1266 {
1267         u_int32_t version;
1268         
1269         if (ioc_size == sizeof(struct pfioc_altq_v0))
1270                 version = 0;
1271         else
1272                 version = pa->version;
1273
1274         if (version > PFIOC_ALTQ_VERSION)
1275                 return (EINVAL);
1276
1277 #define ASSIGN(x) exported_q->x = q->x
1278 #define COPY(x) \
1279         bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1280 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1281 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1282
1283         switch (version) {
1284         case 0: {
1285                 struct pf_altq_v0 *exported_q =
1286                     &((struct pfioc_altq_v0 *)pa)->altq;
1287
1288                 COPY(ifname);
1289
1290                 ASSIGN(scheduler);
1291                 ASSIGN(tbrsize);
1292                 exported_q->tbrsize = SATU16(q->tbrsize);
1293                 exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1294
1295                 COPY(qname);
1296                 COPY(parent);
1297                 ASSIGN(parent_qid);
1298                 exported_q->bandwidth = SATU32(q->bandwidth);
1299                 ASSIGN(priority);
1300                 ASSIGN(local_flags);
1301
1302                 ASSIGN(qlimit);
1303                 ASSIGN(flags);
1304
1305                 if (q->scheduler == ALTQT_HFSC) {
1306 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1307 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1308                             SATU32(q->pq_u.hfsc_opts.x)
1309                         
1310                         ASSIGN_OPT_SATU32(rtsc_m1);
1311                         ASSIGN_OPT(rtsc_d);
1312                         ASSIGN_OPT_SATU32(rtsc_m2);
1313
1314                         ASSIGN_OPT_SATU32(lssc_m1);
1315                         ASSIGN_OPT(lssc_d);
1316                         ASSIGN_OPT_SATU32(lssc_m2);
1317
1318                         ASSIGN_OPT_SATU32(ulsc_m1);
1319                         ASSIGN_OPT(ulsc_d);
1320                         ASSIGN_OPT_SATU32(ulsc_m2);
1321
1322                         ASSIGN_OPT(flags);
1323                         
1324 #undef ASSIGN_OPT
1325 #undef ASSIGN_OPT_SATU32
1326                 } else
1327                         COPY(pq_u);
1328
1329                 ASSIGN(qid);
1330                 break;
1331         }
1332         case 1: {
1333                 struct pf_altq_v1 *exported_q =
1334                     &((struct pfioc_altq_v1 *)pa)->altq;
1335
1336                 COPY(ifname);
1337
1338                 ASSIGN(scheduler);
1339                 ASSIGN(tbrsize);
1340                 ASSIGN(ifbandwidth);
1341
1342                 COPY(qname);
1343                 COPY(parent);
1344                 ASSIGN(parent_qid);
1345                 ASSIGN(bandwidth);
1346                 ASSIGN(priority);
1347                 ASSIGN(local_flags);
1348
1349                 ASSIGN(qlimit);
1350                 ASSIGN(flags);
1351                 COPY(pq_u);
1352
1353                 ASSIGN(qid);
1354                 break;
1355         }
1356         default:
1357                 panic("%s: unhandled struct pfioc_altq version", __func__);
1358                 break;
1359         }
1360
1361 #undef ASSIGN
1362 #undef COPY
1363 #undef SATU16
1364 #undef SATU32
1365
1366         return (0);
1367 }
1368
1369 /*
1370  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1371  * that may be using any version of it.
1372  */
1373 static int
1374 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1375 {
1376         u_int32_t version;
1377         
1378         if (ioc_size == sizeof(struct pfioc_altq_v0))
1379                 version = 0;
1380         else
1381                 version = pa->version;
1382
1383         if (version > PFIOC_ALTQ_VERSION)
1384                 return (EINVAL);
1385         
1386 #define ASSIGN(x) q->x = imported_q->x
1387 #define COPY(x) \
1388         bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1389
1390         switch (version) {
1391         case 0: {
1392                 struct pf_altq_v0 *imported_q =
1393                     &((struct pfioc_altq_v0 *)pa)->altq;
1394
1395                 COPY(ifname);
1396
1397                 ASSIGN(scheduler);
1398                 ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1399                 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1400
1401                 COPY(qname);
1402                 COPY(parent);
1403                 ASSIGN(parent_qid);
1404                 ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1405                 ASSIGN(priority);
1406                 ASSIGN(local_flags);
1407
1408                 ASSIGN(qlimit);
1409                 ASSIGN(flags);
1410
1411                 if (imported_q->scheduler == ALTQT_HFSC) {
1412 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1413
1414                         /*
1415                          * The m1 and m2 parameters are being copied from
1416                          * 32-bit to 64-bit.
1417                          */
1418                         ASSIGN_OPT(rtsc_m1);
1419                         ASSIGN_OPT(rtsc_d);
1420                         ASSIGN_OPT(rtsc_m2);
1421
1422                         ASSIGN_OPT(lssc_m1);
1423                         ASSIGN_OPT(lssc_d);
1424                         ASSIGN_OPT(lssc_m2);
1425
1426                         ASSIGN_OPT(ulsc_m1);
1427                         ASSIGN_OPT(ulsc_d);
1428                         ASSIGN_OPT(ulsc_m2);
1429
1430                         ASSIGN_OPT(flags);
1431                         
1432 #undef ASSIGN_OPT
1433                 } else
1434                         COPY(pq_u);
1435
1436                 ASSIGN(qid);
1437                 break;
1438         }
1439         case 1: {
1440                 struct pf_altq_v1 *imported_q =
1441                     &((struct pfioc_altq_v1 *)pa)->altq;
1442
1443                 COPY(ifname);
1444
1445                 ASSIGN(scheduler);
1446                 ASSIGN(tbrsize);
1447                 ASSIGN(ifbandwidth);
1448
1449                 COPY(qname);
1450                 COPY(parent);
1451                 ASSIGN(parent_qid);
1452                 ASSIGN(bandwidth);
1453                 ASSIGN(priority);
1454                 ASSIGN(local_flags);
1455
1456                 ASSIGN(qlimit);
1457                 ASSIGN(flags);
1458                 COPY(pq_u);
1459
1460                 ASSIGN(qid);
1461                 break;
1462         }
1463         default:        
1464                 panic("%s: unhandled struct pfioc_altq version", __func__);
1465                 break;
1466         }
1467
1468 #undef ASSIGN
1469 #undef COPY
1470         
1471         return (0);
1472 }
1473
1474 static struct pf_altq *
1475 pf_altq_get_nth_active(u_int32_t n)
1476 {
1477         struct pf_altq          *altq;
1478         u_int32_t                nr;
1479
1480         nr = 0;
1481         TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1482                 if (nr == n)
1483                         return (altq);
1484                 nr++;
1485         }
1486
1487         TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1488                 if (nr == n)
1489                         return (altq);
1490                 nr++;
1491         }
1492
1493         return (NULL);
1494 }
1495 #endif /* ALTQ */
1496
1497 void
1498 pf_krule_free(struct pf_krule *rule)
1499 {
1500         if (rule == NULL)
1501                 return;
1502
1503         counter_u64_free(rule->evaluations);
1504         for (int i = 0; i < 2; i++) {
1505                 counter_u64_free(rule->packets[i]);
1506                 counter_u64_free(rule->bytes[i]);
1507         }
1508         counter_u64_free(rule->states_cur);
1509         counter_u64_free(rule->states_tot);
1510         counter_u64_free(rule->src_nodes);
1511         free(rule, M_PFRULE);
1512 }
1513
1514 static void
1515 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1516     struct pf_pooladdr *pool)
1517 {
1518
1519         bzero(pool, sizeof(*pool));
1520         bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1521         strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1522 }
1523
1524 static void
1525 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1526     struct pf_kpooladdr *kpool)
1527 {
1528
1529         bzero(kpool, sizeof(*kpool));
1530         bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1531         strlcpy(kpool->ifname, pool->ifname, sizeof(kpool->ifname));
1532 }
1533
1534 static void
1535 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool)
1536 {
1537         bzero(pool, sizeof(*pool));
1538
1539         bcopy(&kpool->key, &pool->key, sizeof(pool->key));
1540         bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter));
1541
1542         pool->tblidx = kpool->tblidx;
1543         pool->proxy_port[0] = kpool->proxy_port[0];
1544         pool->proxy_port[1] = kpool->proxy_port[1];
1545         pool->opts = kpool->opts;
1546 }
1547
1548 static int
1549 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1550 {
1551         _Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1552         _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1553
1554         bzero(kpool, sizeof(*kpool));
1555
1556         bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1557         bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1558
1559         kpool->tblidx = pool->tblidx;
1560         kpool->proxy_port[0] = pool->proxy_port[0];
1561         kpool->proxy_port[1] = pool->proxy_port[1];
1562         kpool->opts = pool->opts;
1563
1564         return (0);
1565 }
1566
1567 static void
1568 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule)
1569 {
1570
1571         bzero(rule, sizeof(*rule));
1572
1573         bcopy(&krule->src, &rule->src, sizeof(rule->src));
1574         bcopy(&krule->dst, &rule->dst, sizeof(rule->dst));
1575
1576         for (int i = 0; i < PF_SKIP_COUNT; ++i) {
1577                 if (rule->skip[i].ptr == NULL)
1578                         rule->skip[i].nr = -1;
1579                 else
1580                         rule->skip[i].nr = krule->skip[i].ptr->nr;
1581         }
1582
1583         strlcpy(rule->label, krule->label[0], sizeof(rule->label));
1584         strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname));
1585         strlcpy(rule->qname, krule->qname, sizeof(rule->qname));
1586         strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname));
1587         strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname));
1588         strlcpy(rule->match_tagname, krule->match_tagname,
1589             sizeof(rule->match_tagname));
1590         strlcpy(rule->overload_tblname, krule->overload_tblname,
1591             sizeof(rule->overload_tblname));
1592
1593         pf_kpool_to_pool(&krule->rpool, &rule->rpool);
1594
1595         rule->evaluations = counter_u64_fetch(krule->evaluations);
1596         for (int i = 0; i < 2; i++) {
1597                 rule->packets[i] = counter_u64_fetch(krule->packets[i]);
1598                 rule->bytes[i] = counter_u64_fetch(krule->bytes[i]);
1599         }
1600
1601         /* kif, anchor, overload_tbl are not copied over. */
1602
1603         rule->os_fingerprint = krule->os_fingerprint;
1604
1605         rule->rtableid = krule->rtableid;
1606         bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout));
1607         rule->max_states = krule->max_states;
1608         rule->max_src_nodes = krule->max_src_nodes;
1609         rule->max_src_states = krule->max_src_states;
1610         rule->max_src_conn = krule->max_src_conn;
1611         rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit;
1612         rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds;
1613         rule->qid = krule->qid;
1614         rule->pqid = krule->pqid;
1615         rule->nr = krule->nr;
1616         rule->prob = krule->prob;
1617         rule->cuid = krule->cuid;
1618         rule->cpid = krule->cpid;
1619
1620         rule->return_icmp = krule->return_icmp;
1621         rule->return_icmp6 = krule->return_icmp6;
1622         rule->max_mss = krule->max_mss;
1623         rule->tag = krule->tag;
1624         rule->match_tag = krule->match_tag;
1625         rule->scrub_flags = krule->scrub_flags;
1626
1627         bcopy(&krule->uid, &rule->uid, sizeof(krule->uid));
1628         bcopy(&krule->gid, &rule->gid, sizeof(krule->gid));
1629
1630         rule->rule_flag = krule->rule_flag;
1631         rule->action = krule->action;
1632         rule->direction = krule->direction;
1633         rule->log = krule->log;
1634         rule->logif = krule->logif;
1635         rule->quick = krule->quick;
1636         rule->ifnot = krule->ifnot;
1637         rule->match_tag_not = krule->match_tag_not;
1638         rule->natpass = krule->natpass;
1639
1640         rule->keep_state = krule->keep_state;
1641         rule->af = krule->af;
1642         rule->proto = krule->proto;
1643         rule->type = krule->type;
1644         rule->code = krule->code;
1645         rule->flags = krule->flags;
1646         rule->flagset = krule->flagset;
1647         rule->min_ttl = krule->min_ttl;
1648         rule->allow_opts = krule->allow_opts;
1649         rule->rt = krule->rt;
1650         rule->return_ttl = krule->return_ttl;
1651         rule->tos = krule->tos;
1652         rule->set_tos = krule->set_tos;
1653         rule->anchor_relative = krule->anchor_relative;
1654         rule->anchor_wildcard = krule->anchor_wildcard;
1655
1656         rule->flush = krule->flush;
1657         rule->prio = krule->prio;
1658         rule->set_prio[0] = krule->set_prio[0];
1659         rule->set_prio[1] = krule->set_prio[1];
1660
1661         bcopy(&krule->divert, &rule->divert, sizeof(krule->divert));
1662
1663         rule->u_states_cur = counter_u64_fetch(krule->states_cur);
1664         rule->u_states_tot = counter_u64_fetch(krule->states_tot);
1665         rule->u_src_nodes = counter_u64_fetch(krule->src_nodes);
1666 }
1667
1668 static int
1669 pf_check_rule_addr(const struct pf_rule_addr *addr)
1670 {
1671
1672         switch (addr->addr.type) {
1673         case PF_ADDR_ADDRMASK:
1674         case PF_ADDR_NOROUTE:
1675         case PF_ADDR_DYNIFTL:
1676         case PF_ADDR_TABLE:
1677         case PF_ADDR_URPFFAILED:
1678         case PF_ADDR_RANGE:
1679                 break;
1680         default:
1681                 return (EINVAL);
1682         }
1683
1684         if (addr->addr.p.dyn != NULL) {
1685                 return (EINVAL);
1686         }
1687
1688         return (0);
1689 }
1690
1691 static int
1692 pf_nvaddr_to_addr(const nvlist_t *nvl, struct pf_addr *paddr)
1693 {
1694         return (pf_nvbinary(nvl, "addr", paddr, sizeof(*paddr)));
1695 }
1696
1697 static nvlist_t *
1698 pf_addr_to_nvaddr(const struct pf_addr *paddr)
1699 {
1700         nvlist_t *nvl;
1701
1702         nvl = nvlist_create(0);
1703         if (nvl == NULL)
1704                 return (NULL);
1705
1706         nvlist_add_binary(nvl, "addr", paddr, sizeof(*paddr));
1707
1708         return (nvl);
1709 }
1710
1711 static int
1712 pf_nvmape_to_mape(const nvlist_t *nvl, struct pf_mape_portset *mape)
1713 {
1714         int error = 0;
1715
1716         bzero(mape, sizeof(*mape));
1717         PFNV_CHK(pf_nvuint8(nvl, "offset", &mape->offset));
1718         PFNV_CHK(pf_nvuint8(nvl, "psidlen", &mape->psidlen));
1719         PFNV_CHK(pf_nvuint16(nvl, "psid", &mape->psid));
1720
1721 errout:
1722         return (error);
1723 }
1724
1725 static nvlist_t *
1726 pf_mape_to_nvmape(const struct pf_mape_portset *mape)
1727 {
1728         nvlist_t *nvl;
1729
1730         nvl = nvlist_create(0);
1731         if (nvl == NULL)
1732                 return (NULL);
1733
1734         nvlist_add_number(nvl, "offset", mape->offset);
1735         nvlist_add_number(nvl, "psidlen", mape->psidlen);
1736         nvlist_add_number(nvl, "psid", mape->psid);
1737
1738         return (nvl);
1739 }
1740
1741 static int
1742 pf_nvpool_to_pool(const nvlist_t *nvl, struct pf_kpool *kpool)
1743 {
1744         int error = 0;
1745
1746         bzero(kpool, sizeof(*kpool));
1747
1748         PFNV_CHK(pf_nvbinary(nvl, "key", &kpool->key, sizeof(kpool->key)));
1749
1750         if (nvlist_exists_nvlist(nvl, "counter")) {
1751                 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "counter"),
1752                     &kpool->counter));
1753         }
1754
1755         PFNV_CHK(pf_nvint(nvl, "tblidx", &kpool->tblidx));
1756         PFNV_CHK(pf_nvuint16_array(nvl, "proxy_port", kpool->proxy_port, 2,
1757             NULL));
1758         PFNV_CHK(pf_nvuint8(nvl, "opts", &kpool->opts));
1759
1760         if (nvlist_exists_nvlist(nvl, "mape")) {
1761                 PFNV_CHK(pf_nvmape_to_mape(nvlist_get_nvlist(nvl, "mape"),
1762                     &kpool->mape));
1763         }
1764
1765 errout:
1766         return (error);
1767 }
1768
1769 static nvlist_t *
1770 pf_pool_to_nvpool(const struct pf_kpool *pool)
1771 {
1772         nvlist_t *nvl;
1773         nvlist_t *tmp;
1774
1775         nvl = nvlist_create(0);
1776         if (nvl == NULL)
1777                 return (NULL);
1778
1779         nvlist_add_binary(nvl, "key", &pool->key, sizeof(pool->key));
1780         tmp = pf_addr_to_nvaddr(&pool->counter);
1781         if (tmp == NULL)
1782                 goto error;
1783         nvlist_add_nvlist(nvl, "counter", tmp);
1784
1785         nvlist_add_number(nvl, "tblidx", pool->tblidx);
1786         pf_uint16_array_nv(nvl, "proxy_port", pool->proxy_port, 2);
1787         nvlist_add_number(nvl, "opts", pool->opts);
1788
1789         tmp = pf_mape_to_nvmape(&pool->mape);
1790         if (tmp == NULL)
1791                 goto error;
1792         nvlist_add_nvlist(nvl, "mape", tmp);
1793
1794         return (nvl);
1795
1796 error:
1797         nvlist_destroy(nvl);
1798         return (NULL);
1799 }
1800
1801 static int
1802 pf_nvaddr_wrap_to_addr_wrap(const nvlist_t *nvl, struct pf_addr_wrap *addr)
1803 {
1804         int error = 0;
1805
1806         bzero(addr, sizeof(*addr));
1807
1808         PFNV_CHK(pf_nvuint8(nvl, "type", &addr->type));
1809         PFNV_CHK(pf_nvuint8(nvl, "iflags", &addr->iflags));
1810         if (addr->type == PF_ADDR_DYNIFTL)
1811                 PFNV_CHK(pf_nvstring(nvl, "ifname", addr->v.ifname,
1812                     sizeof(addr->v.ifname)));
1813         if (addr->type == PF_ADDR_TABLE)
1814                 PFNV_CHK(pf_nvstring(nvl, "tblname", addr->v.tblname,
1815                     sizeof(addr->v.tblname)));
1816
1817         if (! nvlist_exists_nvlist(nvl, "addr"))
1818                 return (EINVAL);
1819         PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "addr"),
1820             &addr->v.a.addr));
1821
1822         if (! nvlist_exists_nvlist(nvl, "mask"))
1823                 return (EINVAL);
1824         PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "mask"),
1825             &addr->v.a.mask));
1826
1827         switch (addr->type) {
1828         case PF_ADDR_DYNIFTL:
1829         case PF_ADDR_TABLE:
1830         case PF_ADDR_RANGE:
1831         case PF_ADDR_ADDRMASK:
1832         case PF_ADDR_NOROUTE:
1833         case PF_ADDR_URPFFAILED:
1834                 break;
1835         default:
1836                 return (EINVAL);
1837         }
1838
1839 errout:
1840         return (error);
1841 }
1842
1843 static nvlist_t *
1844 pf_addr_wrap_to_nvaddr_wrap(const struct pf_addr_wrap *addr)
1845 {
1846         nvlist_t *nvl;
1847         nvlist_t *tmp;
1848
1849         nvl = nvlist_create(0);
1850         if (nvl == NULL)
1851                 return (NULL);
1852
1853         nvlist_add_number(nvl, "type", addr->type);
1854         nvlist_add_number(nvl, "iflags", addr->iflags);
1855         if (addr->type == PF_ADDR_DYNIFTL)
1856                 nvlist_add_string(nvl, "ifname", addr->v.ifname);
1857         if (addr->type == PF_ADDR_TABLE)
1858                 nvlist_add_string(nvl, "tblname", addr->v.tblname);
1859
1860         tmp = pf_addr_to_nvaddr(&addr->v.a.addr);
1861         if (tmp == NULL)
1862                 goto error;
1863         nvlist_add_nvlist(nvl, "addr", tmp);
1864         tmp = pf_addr_to_nvaddr(&addr->v.a.mask);
1865         if (tmp == NULL)
1866                 goto error;
1867         nvlist_add_nvlist(nvl, "mask", tmp);
1868
1869         return (nvl);
1870
1871 error:
1872         nvlist_destroy(nvl);
1873         return (NULL);
1874 }
1875
1876 static int
1877 pf_validate_op(uint8_t op)
1878 {
1879         switch (op) {
1880         case PF_OP_NONE:
1881         case PF_OP_IRG:
1882         case PF_OP_EQ:
1883         case PF_OP_NE:
1884         case PF_OP_LT:
1885         case PF_OP_LE:
1886         case PF_OP_GT:
1887         case PF_OP_GE:
1888         case PF_OP_XRG:
1889         case PF_OP_RRG:
1890                 break;
1891         default:
1892                 return (EINVAL);
1893         }
1894
1895         return (0);
1896 }
1897
1898 static int
1899 pf_nvrule_addr_to_rule_addr(const nvlist_t *nvl, struct pf_rule_addr *addr)
1900 {
1901         int error = 0;
1902
1903         if (! nvlist_exists_nvlist(nvl, "addr"))
1904                 return (EINVAL);
1905
1906         PFNV_CHK(pf_nvaddr_wrap_to_addr_wrap(nvlist_get_nvlist(nvl, "addr"),
1907             &addr->addr));
1908         PFNV_CHK(pf_nvuint16_array(nvl, "port", addr->port, 2, NULL));
1909         PFNV_CHK(pf_nvuint8(nvl, "neg", &addr->neg));
1910         PFNV_CHK(pf_nvuint8(nvl, "port_op", &addr->port_op));
1911
1912         PFNV_CHK(pf_validate_op(addr->port_op));
1913
1914 errout:
1915         return (error);
1916 }
1917
1918 static nvlist_t *
1919 pf_rule_addr_to_nvrule_addr(const struct pf_rule_addr *addr)
1920 {
1921         nvlist_t *nvl;
1922         nvlist_t *tmp;
1923
1924         nvl = nvlist_create(0);
1925         if (nvl == NULL)
1926                 return (NULL);
1927
1928         tmp = pf_addr_wrap_to_nvaddr_wrap(&addr->addr);
1929         if (tmp == NULL)
1930                 goto error;
1931         nvlist_add_nvlist(nvl, "addr", tmp);
1932         pf_uint16_array_nv(nvl, "port", addr->port, 2);
1933         nvlist_add_number(nvl, "neg", addr->neg);
1934         nvlist_add_number(nvl, "port_op", addr->port_op);
1935
1936         return (nvl);
1937
1938 error:
1939         nvlist_destroy(nvl);
1940         return (NULL);
1941 }
1942
1943 static int
1944 pf_nvrule_uid_to_rule_uid(const nvlist_t *nvl, struct pf_rule_uid *uid)
1945 {
1946         int error = 0;
1947
1948         bzero(uid, sizeof(*uid));
1949
1950         PFNV_CHK(pf_nvuint32_array(nvl, "uid", uid->uid, 2, NULL));
1951         PFNV_CHK(pf_nvuint8(nvl, "op", &uid->op));
1952
1953         PFNV_CHK(pf_validate_op(uid->op));
1954
1955 errout:
1956         return (error);
1957 }
1958
1959 static nvlist_t *
1960 pf_rule_uid_to_nvrule_uid(const struct pf_rule_uid *uid)
1961 {
1962         nvlist_t *nvl;
1963
1964         nvl = nvlist_create(0);
1965         if (nvl == NULL)
1966                 return (NULL);
1967
1968         pf_uint32_array_nv(nvl, "uid", uid->uid, 2);
1969         nvlist_add_number(nvl, "op", uid->op);
1970
1971         return (nvl);
1972 }
1973
1974 static int
1975 pf_nvrule_gid_to_rule_gid(const nvlist_t *nvl, struct pf_rule_gid *gid)
1976 {
1977         /* Cheat a little. These stucts are the same, other than the name of
1978          * the first field. */
1979         return (pf_nvrule_uid_to_rule_uid(nvl, (struct pf_rule_uid *)gid));
1980 }
1981
1982 static int
1983 pf_nvrule_to_krule(const nvlist_t *nvl, struct pf_krule **prule)
1984 {
1985         struct pf_krule *rule;
1986         int error = 0;
1987
1988 #define ERROUT(x)       ERROUT_FUNCTION(errout, x)
1989
1990         rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK | M_ZERO);
1991
1992         PFNV_CHK(pf_nvuint32(nvl, "nr", &rule->nr));
1993
1994         if (! nvlist_exists_nvlist(nvl, "src"))
1995                 ERROUT(EINVAL);
1996
1997         error = pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "src"),
1998             &rule->src);
1999         if (error != 0)
2000                 ERROUT(error);
2001
2002         if (! nvlist_exists_nvlist(nvl, "dst"))
2003                 ERROUT(EINVAL);
2004
2005         PFNV_CHK(pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "dst"),
2006             &rule->dst));
2007
2008         if (nvlist_exists_string(nvl, "label")) {
2009                 PFNV_CHK(pf_nvstring(nvl, "label", rule->label[0],
2010                     sizeof(rule->label[0])));
2011         } else if (nvlist_exists_string_array(nvl, "labels")) {
2012                 const char *const *strs;
2013                 size_t items;
2014                 int ret;
2015
2016                 strs = nvlist_get_string_array(nvl, "labels", &items);
2017                 if (items > PF_RULE_MAX_LABEL_COUNT)
2018                         ERROUT(E2BIG);
2019
2020                 for (size_t i = 0; i < items; i++) {
2021                         ret = strlcpy(rule->label[i], strs[i],
2022                             sizeof(rule->label[0]));
2023                         if (ret >= sizeof(rule->label[0]))
2024                                 ERROUT(E2BIG);
2025                 }
2026         }
2027
2028         PFNV_CHK(pf_nvstring(nvl, "ifname", rule->ifname,
2029             sizeof(rule->ifname)));
2030         PFNV_CHK(pf_nvstring(nvl, "qname", rule->qname, sizeof(rule->qname)));
2031         PFNV_CHK(pf_nvstring(nvl, "pqname", rule->pqname,
2032             sizeof(rule->pqname)));
2033         PFNV_CHK(pf_nvstring(nvl, "tagname", rule->tagname,
2034             sizeof(rule->tagname)));
2035         PFNV_CHK(pf_nvstring(nvl, "match_tagname", rule->match_tagname,
2036             sizeof(rule->match_tagname)));
2037         PFNV_CHK(pf_nvstring(nvl, "overload_tblname", rule->overload_tblname,
2038             sizeof(rule->overload_tblname)));
2039
2040         if (! nvlist_exists_nvlist(nvl, "rpool"))
2041                 ERROUT(EINVAL);
2042         PFNV_CHK(pf_nvpool_to_pool(nvlist_get_nvlist(nvl, "rpool"),
2043             &rule->rpool));
2044
2045         PFNV_CHK(pf_nvuint32(nvl, "os_fingerprint", &rule->os_fingerprint));
2046
2047         PFNV_CHK(pf_nvint(nvl, "rtableid", &rule->rtableid));
2048         PFNV_CHK(pf_nvuint32_array(nvl, "timeout", rule->timeout, PFTM_MAX, NULL));
2049         PFNV_CHK(pf_nvuint32(nvl, "max_states", &rule->max_states));
2050         PFNV_CHK(pf_nvuint32(nvl, "max_src_nodes", &rule->max_src_nodes));
2051         PFNV_CHK(pf_nvuint32(nvl, "max_src_states", &rule->max_src_states));
2052         PFNV_CHK(pf_nvuint32(nvl, "max_src_conn", &rule->max_src_conn));
2053         PFNV_CHK(pf_nvuint32(nvl, "max_src_conn_rate.limit",
2054             &rule->max_src_conn_rate.limit));
2055         PFNV_CHK(pf_nvuint32(nvl, "max_src_conn_rate.seconds",
2056             &rule->max_src_conn_rate.seconds));
2057         PFNV_CHK(pf_nvuint32(nvl, "prob", &rule->prob));
2058         PFNV_CHK(pf_nvuint32(nvl, "cuid", &rule->cuid));
2059         PFNV_CHK(pf_nvuint32(nvl, "cpid", &rule->cpid));
2060
2061         PFNV_CHK(pf_nvuint16(nvl, "return_icmp", &rule->return_icmp));
2062         PFNV_CHK(pf_nvuint16(nvl, "return_icmp6", &rule->return_icmp6));
2063
2064         PFNV_CHK(pf_nvuint16(nvl, "max_mss", &rule->max_mss));
2065         PFNV_CHK(pf_nvuint16(nvl, "scrub_flags", &rule->scrub_flags));
2066
2067         if (! nvlist_exists_nvlist(nvl, "uid"))
2068                 ERROUT(EINVAL);
2069         PFNV_CHK(pf_nvrule_uid_to_rule_uid(nvlist_get_nvlist(nvl, "uid"),
2070             &rule->uid));
2071
2072         if (! nvlist_exists_nvlist(nvl, "gid"))
2073                 ERROUT(EINVAL);
2074         PFNV_CHK(pf_nvrule_gid_to_rule_gid(nvlist_get_nvlist(nvl, "gid"),
2075             &rule->gid));
2076
2077         PFNV_CHK(pf_nvuint32(nvl, "rule_flag", &rule->rule_flag));
2078         PFNV_CHK(pf_nvuint8(nvl, "action", &rule->action));
2079         PFNV_CHK(pf_nvuint8(nvl, "direction", &rule->direction));
2080         PFNV_CHK(pf_nvuint8(nvl, "log", &rule->log));
2081         PFNV_CHK(pf_nvuint8(nvl, "logif", &rule->logif));
2082         PFNV_CHK(pf_nvuint8(nvl, "quick", &rule->quick));
2083         PFNV_CHK(pf_nvuint8(nvl, "ifnot", &rule->ifnot));
2084         PFNV_CHK(pf_nvuint8(nvl, "match_tag_not", &rule->match_tag_not));
2085         PFNV_CHK(pf_nvuint8(nvl, "natpass", &rule->natpass));
2086
2087         PFNV_CHK(pf_nvuint8(nvl, "keep_state", &rule->keep_state));
2088         PFNV_CHK(pf_nvuint8(nvl, "af", &rule->af));
2089         PFNV_CHK(pf_nvuint8(nvl, "proto", &rule->proto));
2090         PFNV_CHK(pf_nvuint8(nvl, "type", &rule->type));
2091         PFNV_CHK(pf_nvuint8(nvl, "code", &rule->code));
2092         PFNV_CHK(pf_nvuint8(nvl, "flags", &rule->flags));
2093         PFNV_CHK(pf_nvuint8(nvl, "flagset", &rule->flagset));
2094         PFNV_CHK(pf_nvuint8(nvl, "min_ttl", &rule->min_ttl));
2095         PFNV_CHK(pf_nvuint8(nvl, "allow_opts", &rule->allow_opts));
2096         PFNV_CHK(pf_nvuint8(nvl, "rt", &rule->rt));
2097         PFNV_CHK(pf_nvuint8(nvl, "return_ttl", &rule->return_ttl));
2098         PFNV_CHK(pf_nvuint8(nvl, "tos", &rule->tos));
2099         PFNV_CHK(pf_nvuint8(nvl, "set_tos", &rule->set_tos));
2100         PFNV_CHK(pf_nvuint8(nvl, "anchor_relative", &rule->anchor_relative));
2101         PFNV_CHK(pf_nvuint8(nvl, "anchor_wildcard", &rule->anchor_wildcard));
2102
2103         PFNV_CHK(pf_nvuint8(nvl, "flush", &rule->flush));
2104         PFNV_CHK(pf_nvuint8(nvl, "prio", &rule->prio));
2105
2106         PFNV_CHK(pf_nvuint8_array(nvl, "set_prio", &rule->prio, 2, NULL));
2107
2108         if (nvlist_exists_nvlist(nvl, "divert")) {
2109                 const nvlist_t *nvldivert = nvlist_get_nvlist(nvl, "divert");
2110
2111                 if (! nvlist_exists_nvlist(nvldivert, "addr"))
2112                         ERROUT(EINVAL);
2113                 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvldivert, "addr"),
2114                     &rule->divert.addr));
2115                 PFNV_CHK(pf_nvuint16(nvldivert, "port", &rule->divert.port));
2116         }
2117
2118         /* Validation */
2119 #ifndef INET
2120         if (rule->af == AF_INET)
2121                 ERROUT(EAFNOSUPPORT);
2122 #endif /* INET */
2123 #ifndef INET6
2124         if (rule->af == AF_INET6)
2125                 ERROUT(EAFNOSUPPORT);
2126 #endif /* INET6 */
2127
2128         PFNV_CHK(pf_check_rule_addr(&rule->src));
2129         PFNV_CHK(pf_check_rule_addr(&rule->dst));
2130
2131         *prule = rule;
2132
2133         return (0);
2134
2135 #undef ERROUT
2136 errout:
2137         pf_krule_free(rule);
2138         *prule = NULL;
2139
2140         return (error);
2141 }
2142
2143 static nvlist_t *
2144 pf_divert_to_nvdivert(const struct pf_krule *rule)
2145 {
2146         nvlist_t *nvl;
2147         nvlist_t *tmp;
2148
2149         nvl = nvlist_create(0);
2150         if (nvl == NULL)
2151                 return (NULL);
2152
2153         tmp = pf_addr_to_nvaddr(&rule->divert.addr);
2154         if (tmp == NULL)
2155                 goto error;
2156         nvlist_add_nvlist(nvl, "addr", tmp);
2157         nvlist_add_number(nvl, "port", rule->divert.port);
2158
2159         return (nvl);
2160
2161 error:
2162         nvlist_destroy(nvl);
2163         return (NULL);
2164 }
2165
2166 static nvlist_t *
2167 pf_krule_to_nvrule(const struct pf_krule *rule)
2168 {
2169         nvlist_t *nvl, *tmp;
2170
2171         nvl = nvlist_create(0);
2172         if (nvl == NULL)
2173                 return (nvl);
2174
2175         nvlist_add_number(nvl, "nr", rule->nr);
2176         tmp = pf_rule_addr_to_nvrule_addr(&rule->src);
2177         if (tmp == NULL)
2178                 goto error;
2179         nvlist_add_nvlist(nvl, "src", tmp);
2180         tmp = pf_rule_addr_to_nvrule_addr(&rule->dst);
2181         if (tmp == NULL)
2182                 goto error;
2183         nvlist_add_nvlist(nvl, "dst", tmp);
2184
2185         for (int i = 0; i < PF_SKIP_COUNT; i++) {
2186                 nvlist_append_number_array(nvl, "skip",
2187                     rule->skip[i].ptr ? rule->skip[i].ptr->nr : -1);
2188         }
2189
2190         for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) {
2191                 nvlist_append_string_array(nvl, "labels", rule->label[i]);
2192         }
2193         nvlist_add_string(nvl, "label", rule->label[0]);
2194         nvlist_add_string(nvl, "ifname", rule->ifname);
2195         nvlist_add_string(nvl, "qname", rule->qname);
2196         nvlist_add_string(nvl, "pqname", rule->pqname);
2197         nvlist_add_string(nvl, "tagname", rule->tagname);
2198         nvlist_add_string(nvl, "match_tagname", rule->match_tagname);
2199         nvlist_add_string(nvl, "overload_tblname", rule->overload_tblname);
2200
2201         tmp = pf_pool_to_nvpool(&rule->rpool);
2202         if (tmp == NULL)
2203                 goto error;
2204         nvlist_add_nvlist(nvl, "rpool", tmp);
2205
2206         nvlist_add_number(nvl, "evaluations",
2207             counter_u64_fetch(rule->evaluations));
2208         for (int i = 0; i < 2; i++) {
2209                 nvlist_append_number_array(nvl, "packets",
2210                     counter_u64_fetch(rule->packets[i]));
2211                 nvlist_append_number_array(nvl, "bytes",
2212                     counter_u64_fetch(rule->bytes[i]));
2213         }
2214
2215         nvlist_add_number(nvl, "os_fingerprint", rule->os_fingerprint);
2216
2217         nvlist_add_number(nvl, "rtableid", rule->rtableid);
2218         pf_uint32_array_nv(nvl, "timeout", rule->timeout, PFTM_MAX);
2219         nvlist_add_number(nvl, "max_states", rule->max_states);
2220         nvlist_add_number(nvl, "max_src_nodes", rule->max_src_nodes);
2221         nvlist_add_number(nvl, "max_src_states", rule->max_src_states);
2222         nvlist_add_number(nvl, "max_src_conn", rule->max_src_conn);
2223         nvlist_add_number(nvl, "max_src_conn_rate.limit",
2224             rule->max_src_conn_rate.limit);
2225         nvlist_add_number(nvl, "max_src_conn_rate.seconds",
2226             rule->max_src_conn_rate.seconds);
2227         nvlist_add_number(nvl, "qid", rule->qid);
2228         nvlist_add_number(nvl, "pqid", rule->pqid);
2229         nvlist_add_number(nvl, "prob", rule->prob);
2230         nvlist_add_number(nvl, "cuid", rule->cuid);
2231         nvlist_add_number(nvl, "cpid", rule->cpid);
2232
2233         nvlist_add_number(nvl, "states_cur",
2234             counter_u64_fetch(rule->states_cur));
2235         nvlist_add_number(nvl, "states_tot",
2236             counter_u64_fetch(rule->states_tot));
2237         nvlist_add_number(nvl, "src_nodes",
2238             counter_u64_fetch(rule->src_nodes));
2239
2240         nvlist_add_number(nvl, "return_icmp", rule->return_icmp);
2241         nvlist_add_number(nvl, "return_icmp6", rule->return_icmp6);
2242
2243         nvlist_add_number(nvl, "max_mss", rule->max_mss);
2244         nvlist_add_number(nvl, "scrub_flags", rule->scrub_flags);
2245
2246         tmp = pf_rule_uid_to_nvrule_uid(&rule->uid);
2247         if (tmp == NULL)
2248                 goto error;
2249         nvlist_add_nvlist(nvl, "uid", tmp);
2250         tmp = pf_rule_uid_to_nvrule_uid((const struct pf_rule_uid *)&rule->gid);
2251         if (tmp == NULL)
2252                 goto error;
2253         nvlist_add_nvlist(nvl, "gid", tmp);
2254
2255         nvlist_add_number(nvl, "rule_flag", rule->rule_flag);
2256         nvlist_add_number(nvl, "action", rule->action);
2257         nvlist_add_number(nvl, "direction", rule->direction);
2258         nvlist_add_number(nvl, "log", rule->log);
2259         nvlist_add_number(nvl, "logif", rule->logif);
2260         nvlist_add_number(nvl, "quick", rule->quick);
2261         nvlist_add_number(nvl, "ifnot", rule->ifnot);
2262         nvlist_add_number(nvl, "match_tag_not", rule->match_tag_not);
2263         nvlist_add_number(nvl, "natpass", rule->natpass);
2264
2265         nvlist_add_number(nvl, "keep_state", rule->keep_state);
2266         nvlist_add_number(nvl, "af", rule->af);
2267         nvlist_add_number(nvl, "proto", rule->proto);
2268         nvlist_add_number(nvl, "type", rule->type);
2269         nvlist_add_number(nvl, "code", rule->code);
2270         nvlist_add_number(nvl, "flags", rule->flags);
2271         nvlist_add_number(nvl, "flagset", rule->flagset);
2272         nvlist_add_number(nvl, "min_ttl", rule->min_ttl);
2273         nvlist_add_number(nvl, "allow_opts", rule->allow_opts);
2274         nvlist_add_number(nvl, "rt", rule->rt);
2275         nvlist_add_number(nvl, "return_ttl", rule->return_ttl);
2276         nvlist_add_number(nvl, "tos", rule->tos);
2277         nvlist_add_number(nvl, "set_tos", rule->set_tos);
2278         nvlist_add_number(nvl, "anchor_relative", rule->anchor_relative);
2279         nvlist_add_number(nvl, "anchor_wildcard", rule->anchor_wildcard);
2280
2281         nvlist_add_number(nvl, "flush", rule->flush);
2282         nvlist_add_number(nvl, "prio", rule->prio);
2283
2284         pf_uint8_array_nv(nvl, "set_prio", &rule->prio, 2);
2285
2286         tmp = pf_divert_to_nvdivert(rule);
2287         if (tmp == NULL)
2288                 goto error;
2289         nvlist_add_nvlist(nvl, "divert", tmp);
2290
2291         return (nvl);
2292
2293 error:
2294         nvlist_destroy(nvl);
2295         return (NULL);
2296 }
2297
2298 static int
2299 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
2300 {
2301         int ret;
2302
2303 #ifndef INET
2304         if (rule->af == AF_INET) {
2305                 return (EAFNOSUPPORT);
2306         }
2307 #endif /* INET */
2308 #ifndef INET6
2309         if (rule->af == AF_INET6) {
2310                 return (EAFNOSUPPORT);
2311         }
2312 #endif /* INET6 */
2313
2314         ret = pf_check_rule_addr(&rule->src);
2315         if (ret != 0)
2316                 return (ret);
2317         ret = pf_check_rule_addr(&rule->dst);
2318         if (ret != 0)
2319                 return (ret);
2320
2321         bzero(krule, sizeof(*krule));
2322
2323         bcopy(&rule->src, &krule->src, sizeof(rule->src));
2324         bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
2325
2326         strlcpy(krule->label[0], rule->label, sizeof(rule->label));
2327         strlcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
2328         strlcpy(krule->qname, rule->qname, sizeof(rule->qname));
2329         strlcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
2330         strlcpy(krule->tagname, rule->tagname, sizeof(rule->tagname));
2331         strlcpy(krule->match_tagname, rule->match_tagname,
2332             sizeof(rule->match_tagname));
2333         strlcpy(krule->overload_tblname, rule->overload_tblname,
2334             sizeof(rule->overload_tblname));
2335
2336         ret = pf_pool_to_kpool(&rule->rpool, &krule->rpool);
2337         if (ret != 0)
2338                 return (ret);
2339
2340         /* Don't allow userspace to set evaulations, packets or bytes. */
2341         /* kif, anchor, overload_tbl are not copied over. */
2342
2343         krule->os_fingerprint = rule->os_fingerprint;
2344
2345         krule->rtableid = rule->rtableid;
2346         bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout));
2347         krule->max_states = rule->max_states;
2348         krule->max_src_nodes = rule->max_src_nodes;
2349         krule->max_src_states = rule->max_src_states;
2350         krule->max_src_conn = rule->max_src_conn;
2351         krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
2352         krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
2353         krule->qid = rule->qid;
2354         krule->pqid = rule->pqid;
2355         krule->nr = rule->nr;
2356         krule->prob = rule->prob;
2357         krule->cuid = rule->cuid;
2358         krule->cpid = rule->cpid;
2359
2360         krule->return_icmp = rule->return_icmp;
2361         krule->return_icmp6 = rule->return_icmp6;
2362         krule->max_mss = rule->max_mss;
2363         krule->tag = rule->tag;
2364         krule->match_tag = rule->match_tag;
2365         krule->scrub_flags = rule->scrub_flags;
2366
2367         bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
2368         bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
2369
2370         krule->rule_flag = rule->rule_flag;
2371         krule->action = rule->action;
2372         krule->direction = rule->direction;
2373         krule->log = rule->log;
2374         krule->logif = rule->logif;
2375         krule->quick = rule->quick;
2376         krule->ifnot = rule->ifnot;
2377         krule->match_tag_not = rule->match_tag_not;
2378         krule->natpass = rule->natpass;
2379
2380         krule->keep_state = rule->keep_state;
2381         krule->af = rule->af;
2382         krule->proto = rule->proto;
2383         krule->type = rule->type;
2384         krule->code = rule->code;
2385         krule->flags = rule->flags;
2386         krule->flagset = rule->flagset;
2387         krule->min_ttl = rule->min_ttl;
2388         krule->allow_opts = rule->allow_opts;
2389         krule->rt = rule->rt;
2390         krule->return_ttl = rule->return_ttl;
2391         krule->tos = rule->tos;
2392         krule->set_tos = rule->set_tos;
2393         krule->anchor_relative = rule->anchor_relative;
2394         krule->anchor_wildcard = rule->anchor_wildcard;
2395
2396         krule->flush = rule->flush;
2397         krule->prio = rule->prio;
2398         krule->set_prio[0] = rule->set_prio[0];
2399         krule->set_prio[1] = rule->set_prio[1];
2400
2401         bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2402
2403         return (0);
2404 }
2405
2406 static bool
2407 pf_label_match(const struct pf_krule *rule, const char *label)
2408 {
2409         int i = 0;
2410
2411         while (*rule->label[i]) {
2412                 if (strcmp(rule->label[i], label) == 0)
2413                         return (true);
2414                 i++;
2415         }
2416
2417         return (false);
2418 }
2419
2420 static unsigned int
2421 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2422 {
2423         struct pf_state *match;
2424         int more = 0;
2425         unsigned int killed = 0;
2426
2427         /* Call with unlocked hashrow */
2428
2429         match = pf_find_state_all(key, dir, &more);
2430         if (match && !more) {
2431                 pf_unlink_state(match, 0);
2432                 killed++;
2433         }
2434
2435         return (killed);
2436 }
2437
2438 static int
2439 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2440 {
2441         struct pf_state         *s;
2442         struct pf_state_key     *sk;
2443         struct pf_addr          *srcaddr, *dstaddr;
2444         struct pf_state_key_cmp  match_key;
2445         int                      idx, killed = 0;
2446         unsigned int             dir;
2447         u_int16_t                srcport, dstport;
2448
2449 relock_DIOCKILLSTATES:
2450         PF_HASHROW_LOCK(ih);
2451         LIST_FOREACH(s, &ih->states, entry) {
2452                 sk = s->key[PF_SK_WIRE];
2453                 if (s->direction == PF_OUT) {
2454                         srcaddr = &sk->addr[1];
2455                         dstaddr = &sk->addr[0];
2456                         srcport = sk->port[1];
2457                         dstport = sk->port[0];
2458                 } else {
2459                         srcaddr = &sk->addr[0];
2460                         dstaddr = &sk->addr[1];
2461                         srcport = sk->port[0];
2462                         dstport = sk->port[1];
2463                 }
2464
2465                 if (psk->psk_af && sk->af != psk->psk_af)
2466                         continue;
2467
2468                 if (psk->psk_proto && psk->psk_proto != sk->proto)
2469                         continue;
2470
2471                 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
2472                     &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2473                         continue;
2474
2475                 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
2476                     &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2477                         continue;
2478
2479                 if (!  PF_MATCHA(psk->psk_rt_addr.neg,
2480                     &psk->psk_rt_addr.addr.v.a.addr,
2481                     &psk->psk_rt_addr.addr.v.a.mask,
2482                     &s->rt_addr, sk->af))
2483                         continue;
2484
2485                 if (psk->psk_src.port_op != 0 &&
2486                     ! pf_match_port(psk->psk_src.port_op,
2487                     psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2488                         continue;
2489
2490                 if (psk->psk_dst.port_op != 0 &&
2491                     ! pf_match_port(psk->psk_dst.port_op,
2492                     psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2493                         continue;
2494
2495                 if (psk->psk_label[0] &&
2496                     ! pf_label_match(s->rule.ptr, psk->psk_label))
2497                         continue;
2498
2499                 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2500                     s->kif->pfik_name))
2501                         continue;
2502
2503                 if (psk->psk_kill_match) {
2504                         /* Create the key to find matching states, with lock
2505                          * held. */
2506
2507                         bzero(&match_key, sizeof(match_key));
2508
2509                         if (s->direction == PF_OUT) {
2510                                 dir = PF_IN;
2511                                 idx = PF_SK_STACK;
2512                         } else {
2513                                 dir = PF_OUT;
2514                                 idx = PF_SK_WIRE;
2515                         }
2516
2517                         match_key.af = s->key[idx]->af;
2518                         match_key.proto = s->key[idx]->proto;
2519                         PF_ACPY(&match_key.addr[0],
2520                             &s->key[idx]->addr[1], match_key.af);
2521                         match_key.port[0] = s->key[idx]->port[1];
2522                         PF_ACPY(&match_key.addr[1],
2523                             &s->key[idx]->addr[0], match_key.af);
2524                         match_key.port[1] = s->key[idx]->port[0];
2525                 }
2526
2527                 pf_unlink_state(s, PF_ENTER_LOCKED);
2528                 killed++;
2529
2530                 if (psk->psk_kill_match)
2531                         killed += pf_kill_matching_state(&match_key, dir);
2532
2533                 goto relock_DIOCKILLSTATES;
2534         }
2535         PF_HASHROW_UNLOCK(ih);
2536
2537         return (killed);
2538 }
2539
2540 static int
2541 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk,
2542     struct pf_kstate_kill *kill)
2543 {
2544         bzero(kill, sizeof(*kill));
2545
2546         bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp));
2547         kill->psk_af = psk->psk_af;
2548         kill->psk_proto = psk->psk_proto;
2549         bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src));
2550         bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst));
2551         strlcpy(kill->psk_ifname, psk->psk_ifname, sizeof(kill->psk_ifname));
2552         strlcpy(kill->psk_label, psk->psk_label, sizeof(kill->psk_label));
2553
2554         return (0);
2555 }
2556
2557 static int
2558 pf_nvstate_cmp_to_state_cmp(const nvlist_t *nvl, struct pf_state_cmp *cmp)
2559 {
2560         int error = 0;
2561
2562         bzero(cmp, sizeof(*cmp));
2563
2564         PFNV_CHK(pf_nvuint64(nvl, "id", &cmp->id));
2565         PFNV_CHK(pf_nvuint32(nvl, "creatorid", &cmp->creatorid));
2566         PFNV_CHK(pf_nvuint8(nvl, "direction", &cmp->direction));
2567
2568 errout:
2569         return (error);
2570 }
2571
2572 static int
2573 pf_nvstate_kill_to_kstate_kill(const nvlist_t *nvl,
2574     struct pf_kstate_kill *kill)
2575 {
2576         int error = 0;
2577
2578         bzero(kill, sizeof(*kill));
2579
2580         if (! nvlist_exists_nvlist(nvl, "cmp"))
2581                 return (EINVAL);
2582
2583         PFNV_CHK(pf_nvstate_cmp_to_state_cmp(nvlist_get_nvlist(nvl, "cmp"),
2584             &kill->psk_pfcmp));
2585         PFNV_CHK(pf_nvuint8(nvl, "af", &kill->psk_af));
2586         PFNV_CHK(pf_nvint(nvl, "proto", &kill->psk_proto));
2587
2588         if (! nvlist_exists_nvlist(nvl, "src"))
2589                 return (EINVAL);
2590         PFNV_CHK(pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "src"),
2591             &kill->psk_src));
2592         if (! nvlist_exists_nvlist(nvl, "dst"))
2593                 return (EINVAL);
2594         PFNV_CHK(pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "dst"),
2595             &kill->psk_dst));
2596         if (nvlist_exists_nvlist(nvl, "rt_addr")) {
2597                 PFNV_CHK(pf_nvrule_addr_to_rule_addr(
2598                     nvlist_get_nvlist(nvl, "rt_addr"), &kill->psk_rt_addr));
2599         }
2600
2601         PFNV_CHK(pf_nvstring(nvl, "ifname", kill->psk_ifname,
2602             sizeof(kill->psk_ifname)));
2603         PFNV_CHK(pf_nvstring(nvl, "label", kill->psk_label,
2604             sizeof(kill->psk_label)));
2605         if (nvlist_exists_bool(nvl, "kill_match"))
2606                 kill->psk_kill_match = nvlist_get_bool(nvl, "kill_match");
2607
2608 errout:
2609         return (error);
2610 }
2611
2612 static int
2613 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2614     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2615     struct thread *td)
2616 {
2617         struct pf_kruleset      *ruleset;
2618         struct pf_krule         *tail;
2619         struct pf_kpooladdr     *pa;
2620         struct pfi_kkif         *kif = NULL;
2621         int                      rs_num;
2622         int                      error = 0;
2623
2624         if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
2625                 error = EINVAL;
2626                 goto errout_unlocked;
2627         }
2628
2629 #define ERROUT(x)       ERROUT_FUNCTION(errout, x)
2630
2631         if (rule->ifname[0])
2632                 kif = pf_kkif_create(M_WAITOK);
2633         rule->evaluations = counter_u64_alloc(M_WAITOK);
2634         for (int i = 0; i < 2; i++) {
2635                 rule->packets[i] = counter_u64_alloc(M_WAITOK);
2636                 rule->bytes[i] = counter_u64_alloc(M_WAITOK);
2637         }
2638         rule->states_cur = counter_u64_alloc(M_WAITOK);
2639         rule->states_tot = counter_u64_alloc(M_WAITOK);
2640         rule->src_nodes = counter_u64_alloc(M_WAITOK);
2641         rule->cuid = td->td_ucred->cr_ruid;
2642         rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
2643         TAILQ_INIT(&rule->rpool.list);
2644
2645         PF_RULES_WLOCK();
2646         ruleset = pf_find_kruleset(anchor);
2647         if (ruleset == NULL)
2648                 ERROUT(EINVAL);
2649         rs_num = pf_get_ruleset_number(rule->action);
2650         if (rs_num >= PF_RULESET_MAX)
2651                 ERROUT(EINVAL);
2652         if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2653                 DPFPRINTF(PF_DEBUG_MISC,
2654                     ("ticket: %d != [%d]%d\n", ticket, rs_num,
2655                     ruleset->rules[rs_num].inactive.ticket));
2656                 ERROUT(EBUSY);
2657         }
2658         if (pool_ticket != V_ticket_pabuf) {
2659                 DPFPRINTF(PF_DEBUG_MISC,
2660                     ("pool_ticket: %d != %d\n", pool_ticket,
2661                     V_ticket_pabuf));
2662                 ERROUT(EBUSY);
2663         }
2664
2665         tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2666             pf_krulequeue);
2667         if (tail)
2668                 rule->nr = tail->nr + 1;
2669         else
2670                 rule->nr = 0;
2671         if (rule->ifname[0]) {
2672                 rule->kif = pfi_kkif_attach(kif, rule->ifname);
2673                 kif = NULL;
2674                 pfi_kkif_ref(rule->kif);
2675         } else
2676                 rule->kif = NULL;
2677
2678         if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2679                 error = EBUSY;
2680
2681 #ifdef ALTQ
2682         /* set queue IDs */
2683         if (rule->qname[0] != 0) {
2684                 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2685                         error = EBUSY;
2686                 else if (rule->pqname[0] != 0) {
2687                         if ((rule->pqid =
2688                             pf_qname2qid(rule->pqname)) == 0)
2689                                 error = EBUSY;
2690                 } else
2691                         rule->pqid = rule->qid;
2692         }
2693 #endif
2694         if (rule->tagname[0])
2695                 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2696                         error = EBUSY;
2697         if (rule->match_tagname[0])
2698                 if ((rule->match_tag =
2699                     pf_tagname2tag(rule->match_tagname)) == 0)
2700                         error = EBUSY;
2701         if (rule->rt && !rule->direction)
2702                 error = EINVAL;
2703         if (!rule->log)
2704                 rule->logif = 0;
2705         if (rule->logif >= PFLOGIFS_MAX)
2706                 error = EINVAL;
2707         if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2708                 error = ENOMEM;
2709         if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2710                 error = ENOMEM;
2711         if (pf_kanchor_setup(rule, ruleset, anchor_call))
2712                 error = EINVAL;
2713         if (rule->scrub_flags & PFSTATE_SETPRIO &&
2714             (rule->set_prio[0] > PF_PRIO_MAX ||
2715             rule->set_prio[1] > PF_PRIO_MAX))
2716                 error = EINVAL;
2717         TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2718                 if (pa->addr.type == PF_ADDR_TABLE) {
2719                         pa->addr.p.tbl = pfr_attach_table(ruleset,
2720                             pa->addr.v.tblname);
2721                         if (pa->addr.p.tbl == NULL)
2722                                 error = ENOMEM;
2723                 }
2724
2725         rule->overload_tbl = NULL;
2726         if (rule->overload_tblname[0]) {
2727                 if ((rule->overload_tbl = pfr_attach_table(ruleset,
2728                     rule->overload_tblname)) == NULL)
2729                         error = EINVAL;
2730                 else
2731                         rule->overload_tbl->pfrkt_flags |=
2732                             PFR_TFLAG_ACTIVE;
2733         }
2734
2735         pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
2736         if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2737             (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
2738             (rule->rt > PF_NOPFROUTE)) &&
2739             (TAILQ_FIRST(&rule->rpool.list) == NULL))
2740                 error = EINVAL;
2741
2742         if (error) {
2743                 pf_free_rule(rule);
2744                 rule = NULL;
2745                 ERROUT(error);
2746         }
2747
2748         rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2749         counter_u64_zero(rule->evaluations);
2750         for (int i = 0; i < 2; i++) {
2751                 counter_u64_zero(rule->packets[i]);
2752                 counter_u64_zero(rule->bytes[i]);
2753         }
2754         TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2755             rule, entries);
2756         ruleset->rules[rs_num].inactive.rcount++;
2757         PF_RULES_WUNLOCK();
2758
2759         return (0);
2760
2761 #undef ERROUT
2762 errout:
2763         PF_RULES_WUNLOCK();
2764 errout_unlocked:
2765         pf_kkif_free(kif);
2766         pf_krule_free(rule);
2767         return (error);
2768 }
2769
2770 static int
2771 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2772 {
2773         int                      error = 0;
2774         PF_RULES_RLOCK_TRACKER;
2775
2776 #define ERROUT_IOCTL(target, x)                                 \
2777     do {                                                                \
2778             error = (x);                                                \
2779             SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);  \
2780             goto target;                                                \
2781     } while (0)
2782
2783
2784         /* XXX keep in sync with switch() below */
2785         if (securelevel_gt(td->td_ucred, 2))
2786                 switch (cmd) {
2787                 case DIOCGETRULES:
2788                 case DIOCGETRULE:
2789                 case DIOCGETRULENV:
2790                 case DIOCGETADDRS:
2791                 case DIOCGETADDR:
2792                 case DIOCGETSTATE:
2793                 case DIOCSETSTATUSIF:
2794                 case DIOCGETSTATUS:
2795                 case DIOCCLRSTATUS:
2796                 case DIOCNATLOOK:
2797                 case DIOCSETDEBUG:
2798                 case DIOCGETSTATES:
2799                 case DIOCGETTIMEOUT:
2800                 case DIOCCLRRULECTRS:
2801                 case DIOCGETLIMIT:
2802                 case DIOCGETALTQSV0:
2803                 case DIOCGETALTQSV1:
2804                 case DIOCGETALTQV0:
2805                 case DIOCGETALTQV1:
2806                 case DIOCGETQSTATSV0:
2807                 case DIOCGETQSTATSV1:
2808                 case DIOCGETRULESETS:
2809                 case DIOCGETRULESET:
2810                 case DIOCRGETTABLES:
2811                 case DIOCRGETTSTATS:
2812                 case DIOCRCLRTSTATS:
2813                 case DIOCRCLRADDRS:
2814                 case DIOCRADDADDRS:
2815                 case DIOCRDELADDRS:
2816                 case DIOCRSETADDRS:
2817                 case DIOCRGETADDRS:
2818                 case DIOCRGETASTATS:
2819                 case DIOCRCLRASTATS:
2820                 case DIOCRTSTADDRS:
2821                 case DIOCOSFPGET:
2822                 case DIOCGETSRCNODES:
2823                 case DIOCCLRSRCNODES:
2824                 case DIOCIGETIFACES:
2825                 case DIOCGIFSPEEDV0:
2826                 case DIOCGIFSPEEDV1:
2827                 case DIOCSETIFFLAG:
2828                 case DIOCCLRIFFLAG:
2829                         break;
2830                 case DIOCRCLRTABLES:
2831                 case DIOCRADDTABLES:
2832                 case DIOCRDELTABLES:
2833                 case DIOCRSETTFLAGS:
2834                         if (((struct pfioc_table *)addr)->pfrio_flags &
2835                             PFR_FLAG_DUMMY)
2836                                 break; /* dummy operation ok */
2837                         return (EPERM);
2838                 default:
2839                         return (EPERM);
2840                 }
2841
2842         if (!(flags & FWRITE))
2843                 switch (cmd) {
2844                 case DIOCGETRULES:
2845                 case DIOCGETADDRS:
2846                 case DIOCGETADDR:
2847                 case DIOCGETSTATE:
2848                 case DIOCGETSTATUS:
2849                 case DIOCGETSTATES:
2850                 case DIOCGETTIMEOUT:
2851                 case DIOCGETLIMIT:
2852                 case DIOCGETALTQSV0:
2853                 case DIOCGETALTQSV1:
2854                 case DIOCGETALTQV0:
2855                 case DIOCGETALTQV1:
2856                 case DIOCGETQSTATSV0:
2857                 case DIOCGETQSTATSV1:
2858                 case DIOCGETRULESETS:
2859                 case DIOCGETRULESET:
2860                 case DIOCNATLOOK:
2861                 case DIOCRGETTABLES:
2862                 case DIOCRGETTSTATS:
2863                 case DIOCRGETADDRS:
2864                 case DIOCRGETASTATS:
2865                 case DIOCRTSTADDRS:
2866                 case DIOCOSFPGET:
2867                 case DIOCGETSRCNODES:
2868                 case DIOCIGETIFACES:
2869                 case DIOCGIFSPEEDV1:
2870                 case DIOCGIFSPEEDV0:
2871                 case DIOCGETRULENV:
2872                         break;
2873                 case DIOCRCLRTABLES:
2874                 case DIOCRADDTABLES:
2875                 case DIOCRDELTABLES:
2876                 case DIOCRCLRTSTATS:
2877                 case DIOCRCLRADDRS:
2878                 case DIOCRADDADDRS:
2879                 case DIOCRDELADDRS:
2880                 case DIOCRSETADDRS:
2881                 case DIOCRSETTFLAGS:
2882                         if (((struct pfioc_table *)addr)->pfrio_flags &
2883                             PFR_FLAG_DUMMY) {
2884                                 flags |= FWRITE; /* need write lock for dummy */
2885                                 break; /* dummy operation ok */
2886                         }
2887                         return (EACCES);
2888                 case DIOCGETRULE:
2889                         if (((struct pfioc_rule *)addr)->action ==
2890                             PF_GET_CLR_CNTR)
2891                                 return (EACCES);
2892                         break;
2893                 default:
2894                         return (EACCES);
2895                 }
2896
2897         CURVNET_SET(TD_TO_VNET(td));
2898
2899         switch (cmd) {
2900         case DIOCSTART:
2901                 sx_xlock(&pf_ioctl_lock);
2902                 if (V_pf_status.running)
2903                         error = EEXIST;
2904                 else {
2905                         int cpu;
2906
2907                         error = hook_pf();
2908                         if (error) {
2909                                 DPFPRINTF(PF_DEBUG_MISC,
2910                                     ("pf: pfil registration failed\n"));
2911                                 break;
2912                         }
2913                         V_pf_status.running = 1;
2914                         V_pf_status.since = time_second;
2915
2916                         CPU_FOREACH(cpu)
2917                                 V_pf_stateid[cpu] = time_second;
2918
2919                         DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
2920                 }
2921                 break;
2922
2923         case DIOCSTOP:
2924                 sx_xlock(&pf_ioctl_lock);
2925                 if (!V_pf_status.running)
2926                         error = ENOENT;
2927                 else {
2928                         V_pf_status.running = 0;
2929                         error = dehook_pf();
2930                         if (error) {
2931                                 V_pf_status.running = 1;
2932                                 DPFPRINTF(PF_DEBUG_MISC,
2933                                     ("pf: pfil unregistration failed\n"));
2934                         }
2935                         V_pf_status.since = time_second;
2936                         DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
2937                 }
2938                 break;
2939
2940         case DIOCADDRULENV: {
2941                 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
2942                 nvlist_t        *nvl = NULL;
2943                 void            *nvlpacked = NULL;
2944                 struct pf_krule *rule = NULL;
2945                 const char      *anchor = "", *anchor_call = "";
2946                 uint32_t         ticket = 0, pool_ticket = 0;
2947
2948 #define ERROUT(x)       ERROUT_IOCTL(DIOCADDRULENV_error, x)
2949
2950                 if (nv->len > pf_ioctl_maxcount)
2951                         ERROUT(ENOMEM);
2952
2953                 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
2954                 error = copyin(nv->data, nvlpacked, nv->len);
2955                 if (error)
2956                         ERROUT(error);
2957
2958                 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2959                 if (nvl == NULL)
2960                         ERROUT(EBADMSG);
2961
2962                 if (! nvlist_exists_number(nvl, "ticket"))
2963                         ERROUT(EINVAL);
2964                 ticket = nvlist_get_number(nvl, "ticket");
2965
2966                 if (! nvlist_exists_number(nvl, "pool_ticket"))
2967                         ERROUT(EINVAL);
2968                 pool_ticket = nvlist_get_number(nvl, "pool_ticket");
2969
2970                 if (! nvlist_exists_nvlist(nvl, "rule"))
2971                         ERROUT(EINVAL);
2972
2973                 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
2974                     &rule);
2975                 if (error)
2976                         ERROUT(error);
2977
2978                 if (nvlist_exists_string(nvl, "anchor"))
2979                         anchor = nvlist_get_string(nvl, "anchor");
2980                 if (nvlist_exists_string(nvl, "anchor_call"))
2981                         anchor_call = nvlist_get_string(nvl, "anchor_call");
2982
2983                 if ((error = nvlist_error(nvl)))
2984                         ERROUT(error);
2985
2986                 /* Frees rule on error */
2987                 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
2988                     anchor_call, td);
2989
2990                 nvlist_destroy(nvl);
2991                 free(nvlpacked, M_TEMP);
2992                 break;
2993 #undef ERROUT
2994 DIOCADDRULENV_error:
2995                 pf_krule_free(rule);
2996                 nvlist_destroy(nvl);
2997                 free(nvlpacked, M_TEMP);
2998
2999                 break;
3000         }
3001         case DIOCADDRULE: {
3002                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
3003                 struct pf_krule         *rule;
3004
3005                 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
3006                 error = pf_rule_to_krule(&pr->rule, rule);
3007                 if (error != 0) {
3008                         free(rule, M_PFRULE);
3009                         break;
3010                 }
3011
3012                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
3013
3014                 /* Frees rule on error */
3015                 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3016                     pr->anchor, pr->anchor_call, td);
3017                 break;
3018         }
3019
3020         case DIOCGETRULES: {
3021                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
3022                 struct pf_kruleset      *ruleset;
3023                 struct pf_krule         *tail;
3024                 int                      rs_num;
3025
3026                 PF_RULES_WLOCK();
3027                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
3028                 ruleset = pf_find_kruleset(pr->anchor);
3029                 if (ruleset == NULL) {
3030                         PF_RULES_WUNLOCK();
3031                         error = EINVAL;
3032                         break;
3033                 }
3034                 rs_num = pf_get_ruleset_number(pr->rule.action);
3035                 if (rs_num >= PF_RULESET_MAX) {
3036                         PF_RULES_WUNLOCK();
3037                         error = EINVAL;
3038                         break;
3039                 }
3040                 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3041                     pf_krulequeue);
3042                 if (tail)
3043                         pr->nr = tail->nr + 1;
3044                 else
3045                         pr->nr = 0;
3046                 pr->ticket = ruleset->rules[rs_num].active.ticket;
3047                 PF_RULES_WUNLOCK();
3048                 break;
3049         }
3050
3051         case DIOCGETRULE: {
3052                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
3053                 struct pf_kruleset      *ruleset;
3054                 struct pf_krule         *rule;
3055                 int                      rs_num;
3056
3057                 PF_RULES_WLOCK();
3058                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
3059                 ruleset = pf_find_kruleset(pr->anchor);
3060                 if (ruleset == NULL) {
3061                         PF_RULES_WUNLOCK();
3062                         error = EINVAL;
3063                         break;
3064                 }
3065                 rs_num = pf_get_ruleset_number(pr->rule.action);
3066                 if (rs_num >= PF_RULESET_MAX) {
3067                         PF_RULES_WUNLOCK();
3068                         error = EINVAL;
3069                         break;
3070                 }
3071                 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
3072                         PF_RULES_WUNLOCK();
3073                         error = EBUSY;
3074                         break;
3075                 }
3076                 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3077                 while ((rule != NULL) && (rule->nr != pr->nr))
3078                         rule = TAILQ_NEXT(rule, entries);
3079                 if (rule == NULL) {
3080                         PF_RULES_WUNLOCK();
3081                         error = EBUSY;
3082                         break;
3083                 }
3084
3085                 pf_krule_to_rule(rule, &pr->rule);
3086
3087                 if (pf_kanchor_copyout(ruleset, rule, pr)) {
3088                         PF_RULES_WUNLOCK();
3089                         error = EBUSY;
3090                         break;
3091                 }
3092                 pf_addr_copyout(&pr->rule.src.addr);
3093                 pf_addr_copyout(&pr->rule.dst.addr);
3094
3095                 if (pr->action == PF_GET_CLR_CNTR) {
3096                         counter_u64_zero(rule->evaluations);
3097                         for (int i = 0; i < 2; i++) {
3098                                 counter_u64_zero(rule->packets[i]);
3099                                 counter_u64_zero(rule->bytes[i]);
3100                         }
3101                         counter_u64_zero(rule->states_tot);
3102                 }
3103                 PF_RULES_WUNLOCK();
3104                 break;
3105         }
3106
3107         case DIOCGETRULENV: {
3108                 struct pfioc_nv         *nv = (struct pfioc_nv *)addr;
3109                 nvlist_t                *nvrule = NULL;
3110                 nvlist_t                *nvl = NULL;
3111                 struct pf_kruleset      *ruleset;
3112                 struct pf_krule         *rule;
3113                 void                    *nvlpacked = NULL;
3114                 int                      rs_num, nr;
3115                 bool                     clear_counter = false;
3116
3117 #define ERROUT(x)       ERROUT_IOCTL(DIOCGETRULENV_error, x)
3118
3119                 if (nv->len > pf_ioctl_maxcount)
3120                         ERROUT(ENOMEM);
3121
3122                 /* Copy the request in */
3123                 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
3124                 if (nvlpacked == NULL)
3125                         ERROUT(ENOMEM);
3126
3127                 error = copyin(nv->data, nvlpacked, nv->len);
3128                 if (error)
3129                         ERROUT(error);
3130
3131                 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3132                 if (nvl == NULL)
3133                         ERROUT(EBADMSG);
3134
3135                 if (! nvlist_exists_string(nvl, "anchor"))
3136                         ERROUT(EBADMSG);
3137                 if (! nvlist_exists_number(nvl, "ruleset"))
3138                         ERROUT(EBADMSG);
3139                 if (! nvlist_exists_number(nvl, "ticket"))
3140                         ERROUT(EBADMSG);
3141                 if (! nvlist_exists_number(nvl, "nr"))
3142                         ERROUT(EBADMSG);
3143
3144                 if (nvlist_exists_bool(nvl, "clear_counter"))
3145                         clear_counter = nvlist_get_bool(nvl, "clear_counter");
3146
3147                 if (clear_counter && !(flags & FWRITE))
3148                         ERROUT(EACCES);
3149
3150                 nr = nvlist_get_number(nvl, "nr");
3151
3152                 PF_RULES_WLOCK();
3153                 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3154                 if (ruleset == NULL) {
3155                         PF_RULES_WUNLOCK();
3156                         ERROUT(ENOENT);
3157                 }
3158
3159                 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3160                 if (rs_num >= PF_RULESET_MAX) {
3161                         PF_RULES_WUNLOCK();
3162                         ERROUT(EINVAL);
3163                 }
3164
3165                 if (nvlist_get_number(nvl, "ticket") !=
3166                     ruleset->rules[rs_num].active.ticket) {
3167                         PF_RULES_WUNLOCK();
3168                         ERROUT(EBUSY);
3169                         break;
3170                 }
3171
3172                 if ((error = nvlist_error(nvl))) {
3173                         PF_RULES_WUNLOCK();
3174                         ERROUT(error);
3175                 }
3176
3177                 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3178                 while ((rule != NULL) && (rule->nr != nr))
3179                         rule = TAILQ_NEXT(rule, entries);
3180                 if (rule == NULL) {
3181                         PF_RULES_WUNLOCK();
3182                         ERROUT(EBUSY);
3183                         break;
3184                 }
3185
3186                 nvrule = pf_krule_to_nvrule(rule);
3187
3188                 nvlist_destroy(nvl);
3189                 nvl = nvlist_create(0);
3190                 if (nvl == NULL) {
3191                         PF_RULES_WUNLOCK();
3192                         ERROUT(ENOMEM);
3193                 }
3194                 nvlist_add_number(nvl, "nr", nr);
3195                 nvlist_add_nvlist(nvl, "rule", nvrule);
3196                 nvrule = NULL;
3197                 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3198                         PF_RULES_WUNLOCK();
3199                         ERROUT(EBUSY);
3200                 }
3201
3202                 free(nvlpacked, M_TEMP);
3203                 nvlpacked = nvlist_pack(nvl, &nv->len);
3204                 if (nvlpacked == NULL) {
3205                         PF_RULES_WUNLOCK();
3206                         ERROUT(ENOMEM);
3207                 }
3208
3209                 if (nv->size == 0) {
3210                         PF_RULES_WUNLOCK();
3211                         ERROUT(0);
3212                 }
3213                 else if (nv->size < nv->len) {
3214                         PF_RULES_WUNLOCK();
3215                         ERROUT(ENOSPC);
3216                 }
3217
3218                 error = copyout(nvlpacked, nv->data, nv->len);
3219
3220                 if (clear_counter) {
3221                         counter_u64_zero(rule->evaluations);
3222                         for (int i = 0; i < 2; i++) {
3223                                 counter_u64_zero(rule->packets[i]);
3224                                 counter_u64_zero(rule->bytes[i]);
3225                         }
3226                         counter_u64_zero(rule->states_tot);
3227                 }
3228                 PF_RULES_WUNLOCK();
3229
3230 #undef ERROUT
3231 DIOCGETRULENV_error:
3232                 free(nvlpacked, M_TEMP);
3233                 nvlist_destroy(nvrule);
3234                 nvlist_destroy(nvl);
3235
3236                 break;
3237         }
3238
3239         case DIOCCHANGERULE: {
3240                 struct pfioc_rule       *pcr = (struct pfioc_rule *)addr;
3241                 struct pf_kruleset      *ruleset;
3242                 struct pf_krule         *oldrule = NULL, *newrule = NULL;
3243                 struct pfi_kkif         *kif = NULL;
3244                 struct pf_kpooladdr     *pa;
3245                 u_int32_t                nr = 0;
3246                 int                      rs_num;
3247
3248                 if (pcr->action < PF_CHANGE_ADD_HEAD ||
3249                     pcr->action > PF_CHANGE_GET_TICKET) {
3250                         error = EINVAL;
3251                         break;
3252                 }
3253                 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3254                         error = EINVAL;
3255                         break;
3256                 }
3257
3258                 if (pcr->action != PF_CHANGE_REMOVE) {
3259                         newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK);
3260                         error = pf_rule_to_krule(&pcr->rule, newrule);
3261                         if (error != 0) {
3262                                 free(newrule, M_PFRULE);
3263                                 break;
3264                         }
3265
3266                         if (newrule->ifname[0])
3267                                 kif = pf_kkif_create(M_WAITOK);
3268                         newrule->evaluations = counter_u64_alloc(M_WAITOK);
3269                         for (int i = 0; i < 2; i++) {
3270                                 newrule->packets[i] =
3271                                     counter_u64_alloc(M_WAITOK);
3272                                 newrule->bytes[i] =
3273                                     counter_u64_alloc(M_WAITOK);
3274                         }
3275                         newrule->states_cur = counter_u64_alloc(M_WAITOK);
3276                         newrule->states_tot = counter_u64_alloc(M_WAITOK);
3277                         newrule->src_nodes = counter_u64_alloc(M_WAITOK);
3278                         newrule->cuid = td->td_ucred->cr_ruid;
3279                         newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3280                         TAILQ_INIT(&newrule->rpool.list);
3281                 }
3282
3283 #define ERROUT(x)       { error = (x); goto DIOCCHANGERULE_error; }
3284
3285                 PF_RULES_WLOCK();
3286                 if (!(pcr->action == PF_CHANGE_REMOVE ||
3287                     pcr->action == PF_CHANGE_GET_TICKET) &&
3288                     pcr->pool_ticket != V_ticket_pabuf)
3289                         ERROUT(EBUSY);
3290
3291                 ruleset = pf_find_kruleset(pcr->anchor);
3292                 if (ruleset == NULL)
3293                         ERROUT(EINVAL);
3294
3295                 rs_num = pf_get_ruleset_number(pcr->rule.action);
3296                 if (rs_num >= PF_RULESET_MAX)
3297                         ERROUT(EINVAL);
3298
3299                 if (pcr->action == PF_CHANGE_GET_TICKET) {
3300                         pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3301                         ERROUT(0);
3302                 } else if (pcr->ticket !=
3303                             ruleset->rules[rs_num].active.ticket)
3304                                 ERROUT(EINVAL);
3305
3306                 if (pcr->action != PF_CHANGE_REMOVE) {
3307                         if (newrule->ifname[0]) {
3308                                 newrule->kif = pfi_kkif_attach(kif,
3309                                     newrule->ifname);
3310                                 kif = NULL;
3311                                 pfi_kkif_ref(newrule->kif);
3312                         } else
3313                                 newrule->kif = NULL;
3314
3315                         if (newrule->rtableid > 0 &&
3316                             newrule->rtableid >= rt_numfibs)
3317                                 error = EBUSY;
3318
3319 #ifdef ALTQ
3320                         /* set queue IDs */
3321                         if (newrule->qname[0] != 0) {
3322                                 if ((newrule->qid =
3323                                     pf_qname2qid(newrule->qname)) == 0)
3324                                         error = EBUSY;
3325                                 else if (newrule->pqname[0] != 0) {
3326                                         if ((newrule->pqid =
3327                                             pf_qname2qid(newrule->pqname)) == 0)
3328                                                 error = EBUSY;
3329                                 } else
3330                                         newrule->pqid = newrule->qid;
3331                         }
3332 #endif /* ALTQ */
3333                         if (newrule->tagname[0])
3334                                 if ((newrule->tag =
3335                                     pf_tagname2tag(newrule->tagname)) == 0)
3336                                         error = EBUSY;
3337                         if (newrule->match_tagname[0])
3338                                 if ((newrule->match_tag = pf_tagname2tag(
3339                                     newrule->match_tagname)) == 0)
3340                                         error = EBUSY;
3341                         if (newrule->rt && !newrule->direction)
3342                                 error = EINVAL;
3343                         if (!newrule->log)
3344                                 newrule->logif = 0;
3345                         if (newrule->logif >= PFLOGIFS_MAX)
3346                                 error = EINVAL;
3347                         if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3348                                 error = ENOMEM;
3349                         if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3350                                 error = ENOMEM;
3351                         if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3352                                 error = EINVAL;
3353                         TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
3354                                 if (pa->addr.type == PF_ADDR_TABLE) {
3355                                         pa->addr.p.tbl =
3356                                             pfr_attach_table(ruleset,
3357                                             pa->addr.v.tblname);
3358                                         if (pa->addr.p.tbl == NULL)
3359                                                 error = ENOMEM;
3360                                 }
3361
3362                         newrule->overload_tbl = NULL;
3363                         if (newrule->overload_tblname[0]) {
3364                                 if ((newrule->overload_tbl = pfr_attach_table(
3365                                     ruleset, newrule->overload_tblname)) ==
3366                                     NULL)
3367                                         error = EINVAL;
3368                                 else
3369                                         newrule->overload_tbl->pfrkt_flags |=
3370                                             PFR_TFLAG_ACTIVE;
3371                         }
3372
3373                         pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
3374                         if (((((newrule->action == PF_NAT) ||
3375                             (newrule->action == PF_RDR) ||
3376                             (newrule->action == PF_BINAT) ||
3377                             (newrule->rt > PF_NOPFROUTE)) &&
3378                             !newrule->anchor)) &&
3379                             (TAILQ_FIRST(&newrule->rpool.list) == NULL))
3380                                 error = EINVAL;
3381
3382                         if (error) {
3383                                 pf_free_rule(newrule);
3384                                 PF_RULES_WUNLOCK();
3385                                 break;
3386                         }
3387
3388                         newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3389                 }
3390                 pf_empty_kpool(&V_pf_pabuf);
3391
3392                 if (pcr->action == PF_CHANGE_ADD_HEAD)
3393                         oldrule = TAILQ_FIRST(
3394                             ruleset->rules[rs_num].active.ptr);
3395                 else if (pcr->action == PF_CHANGE_ADD_TAIL)
3396                         oldrule = TAILQ_LAST(
3397                             ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3398                 else {
3399                         oldrule = TAILQ_FIRST(
3400                             ruleset->rules[rs_num].active.ptr);
3401                         while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3402                                 oldrule = TAILQ_NEXT(oldrule, entries);
3403                         if (oldrule == NULL) {
3404                                 if (newrule != NULL)
3405                                         pf_free_rule(newrule);
3406                                 PF_RULES_WUNLOCK();
3407                                 error = EINVAL;
3408                                 break;
3409                         }
3410                 }
3411
3412                 if (pcr->action == PF_CHANGE_REMOVE) {
3413                         pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3414                             oldrule);
3415                         ruleset->rules[rs_num].active.rcount--;
3416                 } else {
3417                         if (oldrule == NULL)
3418                                 TAILQ_INSERT_TAIL(
3419                                     ruleset->rules[rs_num].active.ptr,
3420                                     newrule, entries);
3421                         else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3422                             pcr->action == PF_CHANGE_ADD_BEFORE)
3423                                 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3424                         else
3425                                 TAILQ_INSERT_AFTER(
3426                                     ruleset->rules[rs_num].active.ptr,
3427                                     oldrule, newrule, entries);
3428                         ruleset->rules[rs_num].active.rcount++;
3429                 }
3430
3431                 nr = 0;
3432                 TAILQ_FOREACH(oldrule,
3433                     ruleset->rules[rs_num].active.ptr, entries)
3434                         oldrule->nr = nr++;
3435
3436                 ruleset->rules[rs_num].active.ticket++;
3437
3438                 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3439                 pf_remove_if_empty_kruleset(ruleset);
3440
3441                 PF_RULES_WUNLOCK();
3442                 break;
3443
3444 #undef ERROUT
3445 DIOCCHANGERULE_error:
3446                 PF_RULES_WUNLOCK();
3447                 pf_krule_free(newrule);
3448                 pf_kkif_free(kif);
3449                 break;
3450         }
3451
3452         case DIOCCLRSTATES: {
3453                 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
3454                 struct pf_kstate_kill    kill;
3455
3456                 error = pf_state_kill_to_kstate_kill(psk, &kill);
3457                 if (error)
3458                         break;
3459
3460                 psk->psk_killed = pf_clear_states(&kill);
3461                 break;
3462         }
3463
3464         case DIOCCLRSTATESNV: {
3465                 error = pf_clearstates_nv((struct pfioc_nv *)addr);
3466                 break;
3467         }
3468
3469         case DIOCKILLSTATES: {
3470                 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
3471                 struct pf_kstate_kill    kill;
3472
3473                 error = pf_state_kill_to_kstate_kill(psk, &kill);
3474                 if (error)
3475                         break;
3476
3477                 psk->psk_killed = 0;
3478                 error = pf_killstates(&kill, &psk->psk_killed);
3479                 break;
3480         }
3481
3482         case DIOCKILLSTATESNV: {
3483                 error = pf_killstates_nv((struct pfioc_nv *)addr);
3484                 break;
3485         }
3486
3487         case DIOCADDSTATE: {
3488                 struct pfioc_state      *ps = (struct pfioc_state *)addr;
3489                 struct pfsync_state     *sp = &ps->state;
3490
3491                 if (sp->timeout >= PFTM_MAX) {
3492                         error = EINVAL;
3493                         break;
3494                 }
3495                 if (V_pfsync_state_import_ptr != NULL) {
3496                         PF_RULES_RLOCK();
3497                         error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL);
3498                         PF_RULES_RUNLOCK();
3499                 } else
3500                         error = EOPNOTSUPP;
3501                 break;
3502         }
3503
3504         case DIOCGETSTATE: {
3505                 struct pfioc_state      *ps = (struct pfioc_state *)addr;
3506                 struct pf_state         *s;
3507
3508                 s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
3509                 if (s == NULL) {
3510                         error = ENOENT;
3511                         break;
3512                 }
3513
3514                 pfsync_state_export(&ps->state, s);
3515                 PF_STATE_UNLOCK(s);
3516                 break;
3517         }
3518
3519         case DIOCGETSTATES: {
3520                 struct pfioc_states     *ps = (struct pfioc_states *)addr;
3521                 struct pf_state         *s;
3522                 struct pfsync_state     *pstore, *p;
3523                 int i, nr;
3524
3525                 if (ps->ps_len <= 0) {
3526                         nr = uma_zone_get_cur(V_pf_state_z);
3527                         ps->ps_len = sizeof(struct pfsync_state) * nr;
3528                         break;
3529                 }
3530
3531                 p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK | M_ZERO);
3532                 nr = 0;
3533
3534                 for (i = 0; i <= pf_hashmask; i++) {
3535                         struct pf_idhash *ih = &V_pf_idhash[i];
3536
3537                         PF_HASHROW_LOCK(ih);
3538                         LIST_FOREACH(s, &ih->states, entry) {
3539
3540                                 if (s->timeout == PFTM_UNLINKED)
3541                                         continue;
3542
3543                                 if ((nr+1) * sizeof(*p) > ps->ps_len) {
3544                                         PF_HASHROW_UNLOCK(ih);
3545                                         goto DIOCGETSTATES_full;
3546                                 }
3547                                 pfsync_state_export(p, s);
3548                                 p++;
3549                                 nr++;
3550                         }
3551                         PF_HASHROW_UNLOCK(ih);
3552                 }
3553 DIOCGETSTATES_full:
3554                 error = copyout(pstore, ps->ps_states,
3555                     sizeof(struct pfsync_state) * nr);
3556                 if (error) {
3557                         free(pstore, M_TEMP);
3558                         break;
3559                 }
3560                 ps->ps_len = sizeof(struct pfsync_state) * nr;
3561                 free(pstore, M_TEMP);
3562
3563                 break;
3564         }
3565
3566         case DIOCGETSTATUS: {
3567                 struct pf_status *s = (struct pf_status *)addr;
3568
3569                 PF_RULES_RLOCK();
3570                 s->running = V_pf_status.running;
3571                 s->since   = V_pf_status.since;
3572                 s->debug   = V_pf_status.debug;
3573                 s->hostid  = V_pf_status.hostid;
3574                 s->states  = V_pf_status.states;
3575                 s->src_nodes = V_pf_status.src_nodes;
3576
3577                 for (int i = 0; i < PFRES_MAX; i++)
3578                         s->counters[i] =
3579                             counter_u64_fetch(V_pf_status.counters[i]);
3580                 for (int i = 0; i < LCNT_MAX; i++)
3581                         s->lcounters[i] =
3582                             counter_u64_fetch(V_pf_status.lcounters[i]);
3583                 for (int i = 0; i < FCNT_MAX; i++)
3584                         s->fcounters[i] =
3585                             counter_u64_fetch(V_pf_status.fcounters[i]);
3586                 for (int i = 0; i < SCNT_MAX; i++)
3587                         s->scounters[i] =
3588                             counter_u64_fetch(V_pf_status.scounters[i]);
3589
3590                 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ);
3591                 bcopy(V_pf_status.pf_chksum, s->pf_chksum,
3592                     PF_MD5_DIGEST_LENGTH);
3593
3594                 pfi_update_status(s->ifname, s);
3595                 PF_RULES_RUNLOCK();
3596                 break;
3597         }
3598
3599         case DIOCSETSTATUSIF: {
3600                 struct pfioc_if *pi = (struct pfioc_if *)addr;
3601
3602                 if (pi->ifname[0] == 0) {
3603                         bzero(V_pf_status.ifname, IFNAMSIZ);
3604                         break;
3605                 }
3606                 PF_RULES_WLOCK();
3607                 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
3608                 PF_RULES_WUNLOCK();
3609                 break;
3610         }
3611
3612         case DIOCCLRSTATUS: {
3613                 PF_RULES_WLOCK();
3614                 for (int i = 0; i < PFRES_MAX; i++)
3615                         counter_u64_zero(V_pf_status.counters[i]);
3616                 for (int i = 0; i < FCNT_MAX; i++)
3617                         counter_u64_zero(V_pf_status.fcounters[i]);
3618                 for (int i = 0; i < SCNT_MAX; i++)
3619                         counter_u64_zero(V_pf_status.scounters[i]);
3620                 for (int i = 0; i < LCNT_MAX; i++)
3621                         counter_u64_zero(V_pf_status.lcounters[i]);
3622                 V_pf_status.since = time_second;
3623                 if (*V_pf_status.ifname)
3624                         pfi_update_status(V_pf_status.ifname, NULL);
3625                 PF_RULES_WUNLOCK();
3626                 break;
3627         }
3628
3629         case DIOCNATLOOK: {
3630                 struct pfioc_natlook    *pnl = (struct pfioc_natlook *)addr;
3631                 struct pf_state_key     *sk;
3632                 struct pf_state         *state;
3633                 struct pf_state_key_cmp  key;
3634                 int                      m = 0, direction = pnl->direction;
3635                 int                      sidx, didx;
3636
3637                 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
3638                 sidx = (direction == PF_IN) ? 1 : 0;
3639                 didx = (direction == PF_IN) ? 0 : 1;
3640
3641                 if (!pnl->proto ||
3642                     PF_AZERO(&pnl->saddr, pnl->af) ||
3643                     PF_AZERO(&pnl->daddr, pnl->af) ||
3644                     ((pnl->proto == IPPROTO_TCP ||
3645                     pnl->proto == IPPROTO_UDP) &&
3646                     (!pnl->dport || !pnl->sport)))
3647                         error = EINVAL;
3648                 else {
3649                         bzero(&key, sizeof(key));
3650                         key.af = pnl->af;
3651                         key.proto = pnl->proto;
3652                         PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
3653                         key.port[sidx] = pnl->sport;
3654                         PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
3655                         key.port[didx] = pnl->dport;
3656
3657                         state = pf_find_state_all(&key, direction, &m);
3658
3659                         if (m > 1)
3660                                 error = E2BIG;  /* more than one state */
3661                         else if (state != NULL) {
3662                                 /* XXXGL: not locked read */
3663                                 sk = state->key[sidx];
3664                                 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
3665                                 pnl->rsport = sk->port[sidx];
3666                                 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
3667                                 pnl->rdport = sk->port[didx];
3668                         } else
3669                                 error = ENOENT;
3670                 }
3671                 break;
3672         }
3673
3674         case DIOCSETTIMEOUT: {
3675                 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
3676                 int              old;
3677
3678                 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3679                     pt->seconds < 0) {
3680                         error = EINVAL;
3681                         break;
3682                 }
3683                 PF_RULES_WLOCK();
3684                 old = V_pf_default_rule.timeout[pt->timeout];
3685                 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
3686                         pt->seconds = 1;
3687                 V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
3688                 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
3689                         wakeup(pf_purge_thread);
3690                 pt->seconds = old;
3691                 PF_RULES_WUNLOCK();
3692                 break;
3693         }
3694
3695         case DIOCGETTIMEOUT: {
3696                 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
3697
3698                 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3699                         error = EINVAL;
3700                         break;
3701                 }
3702                 PF_RULES_RLOCK();
3703                 pt->seconds = V_pf_default_rule.timeout[pt->timeout];
3704                 PF_RULES_RUNLOCK();
3705                 break;
3706         }
3707
3708         case DIOCGETLIMIT: {
3709                 struct pfioc_limit      *pl = (struct pfioc_limit *)addr;
3710
3711                 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3712                         error = EINVAL;
3713                         break;
3714                 }
3715                 PF_RULES_RLOCK();
3716                 pl->limit = V_pf_limits[pl->index].limit;
3717                 PF_RULES_RUNLOCK();
3718                 break;
3719         }
3720
3721         case DIOCSETLIMIT: {
3722                 struct pfioc_limit      *pl = (struct pfioc_limit *)addr;
3723                 int                      old_limit;
3724
3725                 PF_RULES_WLOCK();
3726                 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3727                     V_pf_limits[pl->index].zone == NULL) {
3728                         PF_RULES_WUNLOCK();
3729                         error = EINVAL;
3730                         break;
3731                 }
3732                 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
3733                 old_limit = V_pf_limits[pl->index].limit;
3734                 V_pf_limits[pl->index].limit = pl->limit;
3735                 pl->limit = old_limit;
3736                 PF_RULES_WUNLOCK();
3737                 break;
3738         }
3739
3740         case DIOCSETDEBUG: {
3741                 u_int32_t       *level = (u_int32_t *)addr;
3742
3743                 PF_RULES_WLOCK();
3744                 V_pf_status.debug = *level;
3745                 PF_RULES_WUNLOCK();
3746                 break;
3747         }
3748
3749         case DIOCCLRRULECTRS: {
3750                 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
3751                 struct pf_kruleset      *ruleset = &pf_main_ruleset;
3752                 struct pf_krule         *rule;
3753
3754                 PF_RULES_WLOCK();
3755                 TAILQ_FOREACH(rule,
3756                     ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
3757                         counter_u64_zero(rule->evaluations);
3758                         for (int i = 0; i < 2; i++) {
3759                                 counter_u64_zero(rule->packets[i]);
3760                                 counter_u64_zero(rule->bytes[i]);
3761                         }
3762                 }
3763                 PF_RULES_WUNLOCK();
3764                 break;
3765         }
3766
3767         case DIOCGIFSPEEDV0:
3768         case DIOCGIFSPEEDV1: {
3769                 struct pf_ifspeed_v1    *psp = (struct pf_ifspeed_v1 *)addr;
3770                 struct pf_ifspeed_v1    ps;
3771                 struct ifnet            *ifp;
3772
3773                 if (psp->ifname[0] != 0) {
3774                         /* Can we completely trust user-land? */
3775                         strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
3776                         ifp = ifunit(ps.ifname);
3777                         if (ifp != NULL) {
3778                                 psp->baudrate32 =
3779                                     (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
3780                                 if (cmd == DIOCGIFSPEEDV1)
3781                                         psp->baudrate = ifp->if_baudrate;
3782                         } else
3783                                 error = EINVAL;
3784                 } else
3785                         error = EINVAL;
3786                 break;
3787         }
3788
3789 #ifdef ALTQ
3790         case DIOCSTARTALTQ: {
3791                 struct pf_altq          *altq;
3792
3793                 PF_RULES_WLOCK();
3794                 /* enable all altq interfaces on active list */
3795                 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
3796                         if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
3797                                 error = pf_enable_altq(altq);
3798                                 if (error != 0)
3799                                         break;
3800                         }
3801                 }
3802                 if (error == 0)
3803                         V_pf_altq_running = 1;
3804                 PF_RULES_WUNLOCK();
3805                 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
3806                 break;
3807         }
3808
3809         case DIOCSTOPALTQ: {
3810                 struct pf_altq          *altq;
3811
3812                 PF_RULES_WLOCK();
3813                 /* disable all altq interfaces on active list */
3814                 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
3815                         if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
3816                                 error = pf_disable_altq(altq);
3817                                 if (error != 0)
3818                                         break;
3819                         }
3820                 }
3821                 if (error == 0)
3822                         V_pf_altq_running = 0;
3823                 PF_RULES_WUNLOCK();
3824                 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
3825                 break;
3826         }
3827
3828         case DIOCADDALTQV0:
3829         case DIOCADDALTQV1: {
3830                 struct pfioc_altq_v1    *pa = (struct pfioc_altq_v1 *)addr;
3831                 struct pf_altq          *altq, *a;
3832                 struct ifnet            *ifp;
3833
3834                 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
3835                 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
3836                 if (error)
3837                         break;
3838                 altq->local_flags = 0;
3839
3840                 PF_RULES_WLOCK();
3841                 if (pa->ticket != V_ticket_altqs_inactive) {
3842                         PF_RULES_WUNLOCK();
3843                         free(altq, M_PFALTQ);
3844                         error = EBUSY;
3845                         break;
3846                 }
3847
3848                 /*
3849                  * if this is for a queue, find the discipline and
3850                  * copy the necessary fields
3851                  */
3852                 if (altq->qname[0] != 0) {
3853                         if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
3854                                 PF_RULES_WUNLOCK();
3855                                 error = EBUSY;
3856                                 free(altq, M_PFALTQ);
3857                                 break;
3858                         }
3859                         altq->altq_disc = NULL;
3860                         TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
3861                                 if (strncmp(a->ifname, altq->ifname,
3862                                     IFNAMSIZ) == 0) {
3863                                         altq->altq_disc = a->altq_disc;
3864                                         break;
3865                                 }
3866                         }
3867                 }
3868
3869                 if ((ifp = ifunit(altq->ifname)) == NULL)
3870                         altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
3871                 else
3872                         error = altq_add(ifp, altq);
3873
3874                 if (error) {
3875                         PF_RULES_WUNLOCK();
3876                         free(altq, M_PFALTQ);
3877                         break;
3878                 }
3879
3880                 if (altq->qname[0] != 0)
3881                         TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
3882                 else
3883                         TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
3884                 /* version error check done on import above */
3885                 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
3886                 PF_RULES_WUNLOCK();
3887                 break;
3888         }
3889
3890         case DIOCGETALTQSV0:
3891         case DIOCGETALTQSV1: {
3892                 struct pfioc_altq_v1    *pa = (struct pfioc_altq_v1 *)addr;
3893                 struct pf_altq          *altq;
3894
3895                 PF_RULES_RLOCK();
3896                 pa->nr = 0;
3897                 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
3898                         pa->nr++;
3899                 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
3900                         pa->nr++;
3901                 pa->ticket = V_ticket_altqs_active;
3902                 PF_RULES_RUNLOCK();
3903                 break;
3904         }
3905
3906         case DIOCGETALTQV0:
3907         case DIOCGETALTQV1: {
3908                 struct pfioc_altq_v1    *pa = (struct pfioc_altq_v1 *)addr;
3909                 struct pf_altq          *altq;
3910
3911                 PF_RULES_RLOCK();
3912                 if (pa->ticket != V_ticket_altqs_active) {
3913                         PF_RULES_RUNLOCK();
3914                         error = EBUSY;
3915                         break;
3916                 }
3917                 altq = pf_altq_get_nth_active(pa->nr);
3918                 if (altq == NULL) {
3919                         PF_RULES_RUNLOCK();
3920                         error = EBUSY;
3921                         break;
3922                 }
3923                 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
3924                 PF_RULES_RUNLOCK();
3925                 break;
3926         }
3927
3928         case DIOCCHANGEALTQV0:
3929         case DIOCCHANGEALTQV1:
3930                 /* CHANGEALTQ not supported yet! */
3931                 error = ENODEV;
3932                 break;
3933
3934         case DIOCGETQSTATSV0:
3935         case DIOCGETQSTATSV1: {
3936                 struct pfioc_qstats_v1  *pq = (struct pfioc_qstats_v1 *)addr;
3937                 struct pf_altq          *altq;
3938                 int                      nbytes;
3939                 u_int32_t                version;
3940
3941                 PF_RULES_RLOCK();
3942                 if (pq->ticket != V_ticket_altqs_active) {
3943                         PF_RULES_RUNLOCK();
3944                         error = EBUSY;
3945                         break;
3946                 }
3947                 nbytes = pq->nbytes;
3948                 altq = pf_altq_get_nth_active(pq->nr);
3949                 if (altq == NULL) {
3950                         PF_RULES_RUNLOCK();
3951                         error = EBUSY;
3952                         break;
3953                 }
3954
3955                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
3956                         PF_RULES_RUNLOCK();
3957                         error = ENXIO;
3958                         break;
3959                 }
3960                 PF_RULES_RUNLOCK();
3961                 if (cmd == DIOCGETQSTATSV0)
3962                         version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
3963                 else
3964                         version = pq->version;
3965                 error = altq_getqstats(altq, pq->buf, &nbytes, version);
3966                 if (error == 0) {
3967                         pq->scheduler = altq->scheduler;
3968                         pq->nbytes = nbytes;
3969                 }
3970                 break;
3971         }
3972 #endif /* ALTQ */
3973
3974         case DIOCBEGINADDRS: {
3975                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
3976
3977                 PF_RULES_WLOCK();
3978                 pf_empty_kpool(&V_pf_pabuf);
3979                 pp->ticket = ++V_ticket_pabuf;
3980                 PF_RULES_WUNLOCK();
3981                 break;
3982         }
3983
3984         case DIOCADDADDR: {
3985                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
3986                 struct pf_kpooladdr     *pa;
3987                 struct pfi_kkif         *kif = NULL;
3988
3989 #ifndef INET
3990                 if (pp->af == AF_INET) {
3991                         error = EAFNOSUPPORT;
3992                         break;
3993                 }
3994 #endif /* INET */
3995 #ifndef INET6
3996                 if (pp->af == AF_INET6) {
3997                         error = EAFNOSUPPORT;
3998                         break;
3999                 }
4000 #endif /* INET6 */
4001                 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
4002                     pp->addr.addr.type != PF_ADDR_DYNIFTL &&
4003                     pp->addr.addr.type != PF_ADDR_TABLE) {
4004                         error = EINVAL;
4005                         break;
4006                 }
4007                 if (pp->addr.addr.p.dyn != NULL) {
4008                         error = EINVAL;
4009                         break;
4010                 }
4011                 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
4012                 pf_pooladdr_to_kpooladdr(&pp->addr, pa);
4013                 if (pa->ifname[0])
4014                         kif = pf_kkif_create(M_WAITOK);
4015                 PF_RULES_WLOCK();
4016                 if (pp->ticket != V_ticket_pabuf) {
4017                         PF_RULES_WUNLOCK();
4018                         if (pa->ifname[0])
4019                                 pf_kkif_free(kif);
4020                         free(pa, M_PFRULE);
4021                         error = EBUSY;
4022                         break;
4023                 }
4024                 if (pa->ifname[0]) {
4025                         pa->kif = pfi_kkif_attach(kif, pa->ifname);
4026                         kif = NULL;
4027                         pfi_kkif_ref(pa->kif);
4028                 } else
4029                         pa->kif = NULL;
4030                 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
4031                     pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
4032                         if (pa->ifname[0])
4033                                 pfi_kkif_unref(pa->kif);
4034                         PF_RULES_WUNLOCK();
4035                         free(pa, M_PFRULE);
4036                         break;
4037                 }
4038                 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
4039                 PF_RULES_WUNLOCK();
4040                 break;
4041         }
4042
4043         case DIOCGETADDRS: {
4044                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
4045                 struct pf_kpool         *pool;
4046                 struct pf_kpooladdr     *pa;
4047
4048                 PF_RULES_RLOCK();
4049                 pp->nr = 0;
4050                 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
4051                     pp->r_num, 0, 1, 0);
4052                 if (pool == NULL) {
4053                         PF_RULES_RUNLOCK();
4054                         error = EBUSY;
4055                         break;
4056                 }
4057                 TAILQ_FOREACH(pa, &pool->list, entries)
4058                         pp->nr++;
4059                 PF_RULES_RUNLOCK();
4060                 break;
4061         }
4062
4063         case DIOCGETADDR: {
4064                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
4065                 struct pf_kpool         *pool;
4066                 struct pf_kpooladdr     *pa;
4067                 u_int32_t                nr = 0;
4068
4069                 PF_RULES_RLOCK();
4070                 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
4071                     pp->r_num, 0, 1, 1);
4072                 if (pool == NULL) {
4073                         PF_RULES_RUNLOCK();
4074                         error = EBUSY;
4075                         break;
4076                 }
4077                 pa = TAILQ_FIRST(&pool->list);
4078                 while ((pa != NULL) && (nr < pp->nr)) {
4079                         pa = TAILQ_NEXT(pa, entries);
4080                         nr++;
4081                 }
4082                 if (pa == NULL) {
4083                         PF_RULES_RUNLOCK();
4084                         error = EBUSY;
4085                         break;
4086                 }
4087                 pf_kpooladdr_to_pooladdr(pa, &pp->addr);
4088                 pf_addr_copyout(&pp->addr.addr);
4089                 PF_RULES_RUNLOCK();
4090                 break;
4091         }
4092
4093         case DIOCCHANGEADDR: {
4094                 struct pfioc_pooladdr   *pca = (struct pfioc_pooladdr *)addr;
4095                 struct pf_kpool         *pool;
4096                 struct pf_kpooladdr     *oldpa = NULL, *newpa = NULL;
4097                 struct pf_kruleset      *ruleset;
4098                 struct pfi_kkif         *kif = NULL;
4099
4100                 if (pca->action < PF_CHANGE_ADD_HEAD ||
4101                     pca->action > PF_CHANGE_REMOVE) {
4102                         error = EINVAL;
4103                         break;
4104                 }
4105                 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4106                     pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4107                     pca->addr.addr.type != PF_ADDR_TABLE) {
4108                         error = EINVAL;
4109                         break;
4110                 }
4111                 if (pca->addr.addr.p.dyn != NULL) {
4112                         error = EINVAL;
4113                         break;
4114                 }
4115
4116                 if (pca->action != PF_CHANGE_REMOVE) {
4117 #ifndef INET
4118                         if (pca->af == AF_INET) {
4119                                 error = EAFNOSUPPORT;
4120                                 break;
4121                         }
4122 #endif /* INET */
4123 #ifndef INET6
4124                         if (pca->af == AF_INET6) {
4125                                 error = EAFNOSUPPORT;
4126                                 break;
4127                         }
4128 #endif /* INET6 */
4129                         newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4130                         bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4131                         if (newpa->ifname[0])
4132                                 kif = pf_kkif_create(M_WAITOK);
4133                         newpa->kif = NULL;
4134                 }
4135
4136 #define ERROUT(x)       ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4137                 PF_RULES_WLOCK();
4138                 ruleset = pf_find_kruleset(pca->anchor);
4139                 if (ruleset == NULL)
4140                         ERROUT(EBUSY);
4141
4142                 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4143                     pca->r_num, pca->r_last, 1, 1);
4144                 if (pool == NULL)
4145                         ERROUT(EBUSY);
4146
4147                 if (pca->action != PF_CHANGE_REMOVE) {
4148                         if (newpa->ifname[0]) {
4149                                 newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4150                                 pfi_kkif_ref(newpa->kif);
4151                                 kif = NULL;
4152                         }
4153
4154                         switch (newpa->addr.type) {
4155                         case PF_ADDR_DYNIFTL:
4156                                 error = pfi_dynaddr_setup(&newpa->addr,
4157                                     pca->af);
4158                                 break;
4159                         case PF_ADDR_TABLE:
4160                                 newpa->addr.p.tbl = pfr_attach_table(ruleset,
4161                                     newpa->addr.v.tblname);
4162                                 if (newpa->addr.p.tbl == NULL)
4163                                         error = ENOMEM;
4164                                 break;
4165                         }
4166                         if (error)
4167                                 goto DIOCCHANGEADDR_error;
4168                 }
4169
4170                 switch (pca->action) {
4171                 case PF_CHANGE_ADD_HEAD:
4172                         oldpa = TAILQ_FIRST(&pool->list);
4173                         break;
4174                 case PF_CHANGE_ADD_TAIL:
4175                         oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4176                         break;
4177                 default:
4178                         oldpa = TAILQ_FIRST(&pool->list);
4179                         for (int i = 0; oldpa && i < pca->nr; i++)
4180                                 oldpa = TAILQ_NEXT(oldpa, entries);
4181
4182                         if (oldpa == NULL)
4183                                 ERROUT(EINVAL);
4184                 }
4185
4186                 if (pca->action == PF_CHANGE_REMOVE) {
4187                         TAILQ_REMOVE(&pool->list, oldpa, entries);
4188                         switch (oldpa->addr.type) {
4189                         case PF_ADDR_DYNIFTL:
4190                                 pfi_dynaddr_remove(oldpa->addr.p.dyn);
4191                                 break;
4192                         case PF_ADDR_TABLE:
4193                                 pfr_detach_table(oldpa->addr.p.tbl);
4194                                 break;
4195                         }
4196                         if (oldpa->kif)
4197                                 pfi_kkif_unref(oldpa->kif);
4198                         free(oldpa, M_PFRULE);
4199                 } else {
4200                         if (oldpa == NULL)
4201                                 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4202                         else if (pca->action == PF_CHANGE_ADD_HEAD ||
4203                             pca->action == PF_CHANGE_ADD_BEFORE)
4204                                 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4205                         else
4206                                 TAILQ_INSERT_AFTER(&pool->list, oldpa,
4207                                     newpa, entries);
4208                 }
4209
4210                 pool->cur = TAILQ_FIRST(&pool->list);
4211                 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4212                 PF_RULES_WUNLOCK();
4213                 break;
4214
4215 #undef ERROUT
4216 DIOCCHANGEADDR_error:
4217                 if (newpa != NULL) {
4218                         if (newpa->kif)
4219                                 pfi_kkif_unref(newpa->kif);
4220                         free(newpa, M_PFRULE);
4221                 }
4222                 PF_RULES_WUNLOCK();
4223                 pf_kkif_free(kif);
4224                 break;
4225         }
4226
4227         case DIOCGETRULESETS: {
4228                 struct pfioc_ruleset    *pr = (struct pfioc_ruleset *)addr;
4229                 struct pf_kruleset      *ruleset;
4230                 struct pf_kanchor       *anchor;
4231
4232                 PF_RULES_RLOCK();
4233                 pr->path[sizeof(pr->path) - 1] = 0;
4234                 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4235                         PF_RULES_RUNLOCK();
4236                         error = ENOENT;
4237                         break;
4238                 }
4239                 pr->nr = 0;
4240                 if (ruleset->anchor == NULL) {
4241                         /* XXX kludge for pf_main_ruleset */
4242                         RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4243                                 if (anchor->parent == NULL)
4244                                         pr->nr++;
4245                 } else {
4246                         RB_FOREACH(anchor, pf_kanchor_node,
4247                             &ruleset->anchor->children)
4248                                 pr->nr++;
4249                 }
4250                 PF_RULES_RUNLOCK();
4251                 break;
4252         }
4253
4254         case DIOCGETRULESET: {
4255                 struct pfioc_ruleset    *pr = (struct pfioc_ruleset *)addr;
4256                 struct pf_kruleset      *ruleset;
4257                 struct pf_kanchor       *anchor;
4258                 u_int32_t                nr = 0;
4259
4260                 PF_RULES_RLOCK();
4261                 pr->path[sizeof(pr->path) - 1] = 0;
4262                 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4263                         PF_RULES_RUNLOCK();
4264                         error = ENOENT;
4265                         break;
4266                 }
4267                 pr->name[0] = 0;
4268                 if (ruleset->anchor == NULL) {
4269                         /* XXX kludge for pf_main_ruleset */
4270                         RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4271                                 if (anchor->parent == NULL && nr++ == pr->nr) {
4272                                         strlcpy(pr->name, anchor->name,
4273                                             sizeof(pr->name));
4274                                         break;
4275                                 }
4276                 } else {
4277                         RB_FOREACH(anchor, pf_kanchor_node,
4278                             &ruleset->anchor->children)
4279                                 if (nr++ == pr->nr) {
4280                                         strlcpy(pr->name, anchor->name,
4281                                             sizeof(pr->name));
4282                                         break;
4283                                 }
4284                 }
4285                 if (!pr->name[0])
4286                         error = EBUSY;
4287                 PF_RULES_RUNLOCK();
4288                 break;
4289         }
4290
4291         case DIOCRCLRTABLES: {
4292                 struct pfioc_table *io = (struct pfioc_table *)addr;
4293
4294                 if (io->pfrio_esize != 0) {
4295                         error = ENODEV;
4296                         break;
4297                 }
4298                 PF_RULES_WLOCK();
4299                 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4300                     io->pfrio_flags | PFR_FLAG_USERIOCTL);
4301                 PF_RULES_WUNLOCK();
4302                 break;
4303         }
4304
4305         case DIOCRADDTABLES: {
4306                 struct pfioc_table *io = (struct pfioc_table *)addr;
4307                 struct pfr_table *pfrts;
4308                 size_t totlen;
4309
4310                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4311                         error = ENODEV;
4312                         break;
4313                 }
4314
4315                 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4316                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4317                         error = ENOMEM;
4318                         break;
4319                 }
4320
4321                 totlen = io->pfrio_size * sizeof(struct pfr_table);
4322                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4323                     M_TEMP, M_WAITOK);
4324                 error = copyin(io->pfrio_buffer, pfrts, totlen);
4325                 if (error) {
4326                         free(pfrts, M_TEMP);
4327                         break;
4328                 }
4329                 PF_RULES_WLOCK();
4330                 error = pfr_add_tables(pfrts, io->pfrio_size,
4331                     &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4332                 PF_RULES_WUNLOCK();
4333                 free(pfrts, M_TEMP);
4334                 break;
4335         }
4336
4337         case DIOCRDELTABLES: {
4338                 struct pfioc_table *io = (struct pfioc_table *)addr;
4339                 struct pfr_table *pfrts;
4340                 size_t totlen;
4341
4342                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4343                         error = ENODEV;
4344                         break;
4345                 }
4346
4347                 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4348                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4349                         error = ENOMEM;
4350                         break;
4351                 }
4352
4353                 totlen = io->pfrio_size * sizeof(struct pfr_table);
4354                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4355                     M_TEMP, M_WAITOK);
4356                 error = copyin(io->pfrio_buffer, pfrts, totlen);
4357                 if (error) {
4358                         free(pfrts, M_TEMP);
4359                         break;
4360                 }
4361                 PF_RULES_WLOCK();
4362                 error = pfr_del_tables(pfrts, io->pfrio_size,
4363                     &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4364                 PF_RULES_WUNLOCK();
4365                 free(pfrts, M_TEMP);
4366                 break;
4367         }
4368
4369         case DIOCRGETTABLES: {
4370                 struct pfioc_table *io = (struct pfioc_table *)addr;
4371                 struct pfr_table *pfrts;
4372                 size_t totlen;
4373                 int n;
4374
4375                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4376                         error = ENODEV;
4377                         break;
4378                 }
4379                 PF_RULES_RLOCK();
4380                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4381                 if (n < 0) {
4382                         PF_RULES_RUNLOCK();
4383                         error = EINVAL;
4384                         break;
4385                 }
4386                 io->pfrio_size = min(io->pfrio_size, n);
4387
4388                 totlen = io->pfrio_size * sizeof(struct pfr_table);
4389
4390                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4391                     M_TEMP, M_NOWAIT);
4392                 if (pfrts == NULL) {
4393                         error = ENOMEM;
4394                         PF_RULES_RUNLOCK();
4395                         break;
4396                 }
4397                 error = pfr_get_tables(&io->pfrio_table, pfrts,
4398                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4399                 PF_RULES_RUNLOCK();
4400                 if (error == 0)
4401                         error = copyout(pfrts, io->pfrio_buffer, totlen);
4402                 free(pfrts, M_TEMP);
4403                 break;
4404         }
4405
4406         case DIOCRGETTSTATS: {
4407                 struct pfioc_table *io = (struct pfioc_table *)addr;
4408                 struct pfr_tstats *pfrtstats;
4409                 size_t totlen;
4410                 int n;
4411
4412                 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4413                         error = ENODEV;
4414                         break;
4415                 }
4416                 PF_RULES_WLOCK();
4417                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4418                 if (n < 0) {
4419                         PF_RULES_WUNLOCK();
4420                         error = EINVAL;
4421                         break;
4422                 }
4423                 io->pfrio_size = min(io->pfrio_size, n);
4424
4425                 totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4426                 pfrtstats = mallocarray(io->pfrio_size,
4427                     sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT);
4428                 if (pfrtstats == NULL) {
4429                         error = ENOMEM;
4430                         PF_RULES_WUNLOCK();
4431                         break;
4432                 }
4433                 error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4434                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4435                 PF_RULES_WUNLOCK();
4436                 if (error == 0)
4437                         error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4438                 free(pfrtstats, M_TEMP);
4439                 break;
4440         }
4441
4442         case DIOCRCLRTSTATS: {
4443                 struct pfioc_table *io = (struct pfioc_table *)addr;
4444                 struct pfr_table *pfrts;
4445                 size_t totlen;
4446
4447                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4448                         error = ENODEV;
4449                         break;
4450                 }
4451
4452                 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4453                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4454                         /* We used to count tables and use the minimum required
4455                          * size, so we didn't fail on overly large requests.
4456                          * Keep doing so. */
4457                         io->pfrio_size = pf_ioctl_maxcount;
4458                         break;
4459                 }
4460
4461                 totlen = io->pfrio_size * sizeof(struct pfr_table);
4462                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4463                     M_TEMP, M_NOWAIT);
4464                 if (pfrts == NULL) {
4465                         error = ENOMEM;
4466                         break;
4467                 }
4468                 error = copyin(io->pfrio_buffer, pfrts, totlen);
4469                 if (error) {
4470                         free(pfrts, M_TEMP);
4471                         break;
4472                 }
4473
4474                 PF_RULES_WLOCK();
4475                 error = pfr_clr_tstats(pfrts, io->pfrio_size,
4476                     &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4477                 PF_RULES_WUNLOCK();
4478                 free(pfrts, M_TEMP);
4479                 break;
4480         }
4481
4482         case DIOCRSETTFLAGS: {
4483                 struct pfioc_table *io = (struct pfioc_table *)addr;
4484                 struct pfr_table *pfrts;
4485                 size_t totlen;
4486                 int n;
4487
4488                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4489                         error = ENODEV;
4490                         break;
4491                 }
4492
4493                 PF_RULES_RLOCK();
4494                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4495                 if (n < 0) {
4496                         PF_RULES_RUNLOCK();
4497                         error = EINVAL;
4498                         break;
4499                 }
4500
4501                 io->pfrio_size = min(io->pfrio_size, n);
4502                 PF_RULES_RUNLOCK();
4503
4504                 totlen = io->pfrio_size * sizeof(struct pfr_table);
4505                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4506                     M_TEMP, M_WAITOK);
4507                 error = copyin(io->pfrio_buffer, pfrts, totlen);
4508                 if (error) {
4509                         free(pfrts, M_TEMP);
4510                         break;
4511                 }
4512                 PF_RULES_WLOCK();
4513                 error = pfr_set_tflags(pfrts, io->pfrio_size,
4514                     io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4515                     &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4516                 PF_RULES_WUNLOCK();
4517                 free(pfrts, M_TEMP);
4518                 break;
4519         }
4520
4521         case DIOCRCLRADDRS: {
4522                 struct pfioc_table *io = (struct pfioc_table *)addr;
4523
4524                 if (io->pfrio_esize != 0) {
4525                         error = ENODEV;
4526                         break;
4527                 }
4528                 PF_RULES_WLOCK();
4529                 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4530                     io->pfrio_flags | PFR_FLAG_USERIOCTL);
4531                 PF_RULES_WUNLOCK();
4532                 break;
4533         }
4534
4535         case DIOCRADDADDRS: {
4536                 struct pfioc_table *io = (struct pfioc_table *)addr;
4537                 struct pfr_addr *pfras;
4538                 size_t totlen;
4539
4540                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4541                         error = ENODEV;
4542                         break;
4543                 }
4544                 if (io->pfrio_size < 0 ||
4545                     io->pfrio_size > pf_ioctl_maxcount ||
4546                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4547                         error = EINVAL;
4548                         break;
4549                 }
4550                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4551                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4552                     M_TEMP, M_NOWAIT);
4553                 if (! pfras) {
4554                         error = ENOMEM;
4555                         break;
4556                 }
4557                 error = copyin(io->pfrio_buffer, pfras, totlen);
4558                 if (error) {
4559                         free(pfras, M_TEMP);
4560                         break;
4561                 }
4562                 PF_RULES_WLOCK();
4563                 error = pfr_add_addrs(&io->pfrio_table, pfras,
4564                     io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4565                     PFR_FLAG_USERIOCTL);
4566                 PF_RULES_WUNLOCK();
4567                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4568                         error = copyout(pfras, io->pfrio_buffer, totlen);
4569                 free(pfras, M_TEMP);
4570                 break;
4571         }
4572
4573         case DIOCRDELADDRS: {
4574                 struct pfioc_table *io = (struct pfioc_table *)addr;
4575                 struct pfr_addr *pfras;
4576                 size_t totlen;
4577
4578                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4579                         error = ENODEV;
4580                         break;
4581                 }
4582                 if (io->pfrio_size < 0 ||
4583                     io->pfrio_size > pf_ioctl_maxcount ||
4584                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4585                         error = EINVAL;
4586                         break;
4587                 }
4588                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4589                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4590                     M_TEMP, M_NOWAIT);
4591                 if (! pfras) {
4592                         error = ENOMEM;
4593                         break;
4594                 }
4595                 error = copyin(io->pfrio_buffer, pfras, totlen);
4596                 if (error) {
4597                         free(pfras, M_TEMP);
4598                         break;
4599                 }
4600                 PF_RULES_WLOCK();
4601                 error = pfr_del_addrs(&io->pfrio_table, pfras,
4602                     io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
4603                     PFR_FLAG_USERIOCTL);
4604                 PF_RULES_WUNLOCK();
4605                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4606                         error = copyout(pfras, io->pfrio_buffer, totlen);
4607                 free(pfras, M_TEMP);
4608                 break;
4609         }
4610
4611         case DIOCRSETADDRS: {
4612                 struct pfioc_table *io = (struct pfioc_table *)addr;
4613                 struct pfr_addr *pfras;
4614                 size_t totlen, count;
4615
4616                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4617                         error = ENODEV;
4618                         break;
4619                 }
4620                 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
4621                         error = EINVAL;
4622                         break;
4623                 }
4624                 count = max(io->pfrio_size, io->pfrio_size2);
4625                 if (count > pf_ioctl_maxcount ||
4626                     WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
4627                         error = EINVAL;
4628                         break;
4629                 }
4630                 totlen = count * sizeof(struct pfr_addr);
4631                 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
4632                     M_NOWAIT);
4633                 if (! pfras) {
4634                         error = ENOMEM;
4635                         break;
4636                 }
4637                 error = copyin(io->pfrio_buffer, pfras, totlen);
4638                 if (error) {
4639                         free(pfras, M_TEMP);
4640                         break;
4641                 }
4642                 PF_RULES_WLOCK();
4643                 error = pfr_set_addrs(&io->pfrio_table, pfras,
4644                     io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
4645                     &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
4646                     PFR_FLAG_USERIOCTL, 0);
4647                 PF_RULES_WUNLOCK();
4648                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4649                         error = copyout(pfras, io->pfrio_buffer, totlen);
4650                 free(pfras, M_TEMP);
4651                 break;
4652         }
4653
4654         case DIOCRGETADDRS: {
4655                 struct pfioc_table *io = (struct pfioc_table *)addr;
4656                 struct pfr_addr *pfras;
4657                 size_t totlen;
4658
4659                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4660                         error = ENODEV;
4661                         break;
4662                 }
4663                 if (io->pfrio_size < 0 ||
4664                     io->pfrio_size > pf_ioctl_maxcount ||
4665                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4666                         error = EINVAL;
4667                         break;
4668                 }
4669                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4670                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4671                     M_TEMP, M_NOWAIT);
4672                 if (! pfras) {
4673                         error = ENOMEM;
4674                         break;
4675                 }
4676                 PF_RULES_RLOCK();
4677                 error = pfr_get_addrs(&io->pfrio_table, pfras,
4678                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4679                 PF_RULES_RUNLOCK();
4680                 if (error == 0)
4681                         error = copyout(pfras, io->pfrio_buffer, totlen);
4682                 free(pfras, M_TEMP);
4683                 break;
4684         }
4685
4686         case DIOCRGETASTATS: {
4687                 struct pfioc_table *io = (struct pfioc_table *)addr;
4688                 struct pfr_astats *pfrastats;
4689                 size_t totlen;
4690
4691                 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
4692                         error = ENODEV;
4693                         break;
4694                 }
4695                 if (io->pfrio_size < 0 ||
4696                     io->pfrio_size > pf_ioctl_maxcount ||
4697                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
4698                         error = EINVAL;
4699                         break;
4700                 }
4701                 totlen = io->pfrio_size * sizeof(struct pfr_astats);
4702                 pfrastats = mallocarray(io->pfrio_size,
4703                     sizeof(struct pfr_astats), M_TEMP, M_NOWAIT);
4704                 if (! pfrastats) {
4705                         error = ENOMEM;
4706                         break;
4707                 }
4708                 PF_RULES_RLOCK();
4709                 error = pfr_get_astats(&io->pfrio_table, pfrastats,
4710                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4711                 PF_RULES_RUNLOCK();
4712                 if (error == 0)
4713                         error = copyout(pfrastats, io->pfrio_buffer, totlen);
4714                 free(pfrastats, M_TEMP);
4715                 break;
4716         }
4717
4718         case DIOCRCLRASTATS: {
4719                 struct pfioc_table *io = (struct pfioc_table *)addr;
4720                 struct pfr_addr *pfras;
4721                 size_t totlen;
4722
4723                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4724                         error = ENODEV;
4725                         break;
4726                 }
4727                 if (io->pfrio_size < 0 ||
4728                     io->pfrio_size > pf_ioctl_maxcount ||
4729                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4730                         error = EINVAL;
4731                         break;
4732                 }
4733                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4734                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4735                     M_TEMP, M_NOWAIT);
4736                 if (! pfras) {
4737                         error = ENOMEM;
4738                         break;
4739                 }
4740                 error = copyin(io->pfrio_buffer, pfras, totlen);
4741                 if (error) {
4742                         free(pfras, M_TEMP);
4743                         break;
4744                 }
4745                 PF_RULES_WLOCK();
4746                 error = pfr_clr_astats(&io->pfrio_table, pfras,
4747                     io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
4748                     PFR_FLAG_USERIOCTL);
4749                 PF_RULES_WUNLOCK();
4750                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4751                         error = copyout(pfras, io->pfrio_buffer, totlen);
4752                 free(pfras, M_TEMP);
4753                 break;
4754         }
4755
4756         case DIOCRTSTADDRS: {
4757                 struct pfioc_table *io = (struct pfioc_table *)addr;
4758                 struct pfr_addr *pfras;
4759                 size_t totlen;
4760
4761                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4762                         error = ENODEV;
4763                         break;
4764                 }
4765                 if (io->pfrio_size < 0 ||
4766                     io->pfrio_size > pf_ioctl_maxcount ||
4767                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4768                         error = EINVAL;
4769                         break;
4770                 }
4771                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4772                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4773                     M_TEMP, M_NOWAIT);
4774                 if (! pfras) {
4775                         error = ENOMEM;
4776                         break;
4777                 }
4778                 error = copyin(io->pfrio_buffer, pfras, totlen);
4779                 if (error) {
4780                         free(pfras, M_TEMP);
4781                         break;
4782                 }
4783                 PF_RULES_RLOCK();
4784                 error = pfr_tst_addrs(&io->pfrio_table, pfras,
4785                     io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
4786                     PFR_FLAG_USERIOCTL);
4787                 PF_RULES_RUNLOCK();
4788                 if (error == 0)
4789                         error = copyout(pfras, io->pfrio_buffer, totlen);
4790                 free(pfras, M_TEMP);
4791                 break;
4792         }
4793
4794         case DIOCRINADEFINE: {
4795                 struct pfioc_table *io = (struct pfioc_table *)addr;
4796                 struct pfr_addr *pfras;
4797                 size_t totlen;
4798
4799                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4800                         error = ENODEV;
4801                         break;
4802                 }
4803                 if (io->pfrio_size < 0 ||
4804                     io->pfrio_size > pf_ioctl_maxcount ||
4805                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4806                         error = EINVAL;
4807                         break;
4808                 }
4809                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4810                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4811                     M_TEMP, M_NOWAIT);
4812                 if (! pfras) {
4813                         error = ENOMEM;
4814                         break;
4815                 }
4816                 error = copyin(io->pfrio_buffer, pfras, totlen);
4817                 if (error) {
4818                         free(pfras, M_TEMP);
4819                         break;
4820                 }
4821                 PF_RULES_WLOCK();
4822                 error = pfr_ina_define(&io->pfrio_table, pfras,
4823                     io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
4824                     io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4825                 PF_RULES_WUNLOCK();
4826                 free(pfras, M_TEMP);
4827                 break;
4828         }
4829
4830         case DIOCOSFPADD: {
4831                 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
4832                 PF_RULES_WLOCK();
4833                 error = pf_osfp_add(io);
4834                 PF_RULES_WUNLOCK();
4835                 break;
4836         }
4837
4838         case DIOCOSFPGET: {
4839                 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
4840                 PF_RULES_RLOCK();
4841                 error = pf_osfp_get(io);
4842                 PF_RULES_RUNLOCK();
4843                 break;
4844         }
4845
4846         case DIOCXBEGIN: {
4847                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
4848                 struct pfioc_trans_e    *ioes, *ioe;
4849                 size_t                   totlen;
4850                 int                      i;
4851
4852                 if (io->esize != sizeof(*ioe)) {
4853                         error = ENODEV;
4854                         break;
4855                 }
4856                 if (io->size < 0 ||
4857                     io->size > pf_ioctl_maxcount ||
4858                     WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
4859                         error = EINVAL;
4860                         break;
4861                 }
4862                 totlen = sizeof(struct pfioc_trans_e) * io->size;
4863                 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
4864                     M_TEMP, M_NOWAIT);
4865                 if (! ioes) {
4866                         error = ENOMEM;
4867                         break;
4868                 }
4869                 error = copyin(io->array, ioes, totlen);
4870                 if (error) {
4871                         free(ioes, M_TEMP);
4872                         break;
4873                 }
4874                 PF_RULES_WLOCK();
4875                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4876                         switch (ioe->rs_num) {
4877 #ifdef ALTQ
4878                         case PF_RULESET_ALTQ:
4879                                 if (ioe->anchor[0]) {
4880                                         PF_RULES_WUNLOCK();
4881                                         free(ioes, M_TEMP);
4882                                         error = EINVAL;
4883                                         goto fail;
4884                                 }
4885                                 if ((error = pf_begin_altq(&ioe->ticket))) {
4886                                         PF_RULES_WUNLOCK();
4887                                         free(ioes, M_TEMP);
4888                                         goto fail;
4889                                 }
4890                                 break;
4891 #endif /* ALTQ */
4892                         case PF_RULESET_TABLE:
4893                             {
4894                                 struct pfr_table table;
4895
4896                                 bzero(&table, sizeof(table));
4897                                 strlcpy(table.pfrt_anchor, ioe->anchor,
4898                                     sizeof(table.pfrt_anchor));
4899                                 if ((error = pfr_ina_begin(&table,
4900                                     &ioe->ticket, NULL, 0))) {
4901                                         PF_RULES_WUNLOCK();
4902                                         free(ioes, M_TEMP);
4903                                         goto fail;
4904                                 }
4905                                 break;
4906                             }
4907                         default:
4908                                 if ((error = pf_begin_rules(&ioe->ticket,
4909                                     ioe->rs_num, ioe->anchor))) {
4910                                         PF_RULES_WUNLOCK();
4911                                         free(ioes, M_TEMP);
4912                                         goto fail;
4913                                 }
4914                                 break;
4915                         }
4916                 }
4917                 PF_RULES_WUNLOCK();
4918                 error = copyout(ioes, io->array, totlen);
4919                 free(ioes, M_TEMP);
4920                 break;
4921         }
4922
4923         case DIOCXROLLBACK: {
4924                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
4925                 struct pfioc_trans_e    *ioe, *ioes;
4926                 size_t                   totlen;
4927                 int                      i;
4928
4929                 if (io->esize != sizeof(*ioe)) {
4930                         error = ENODEV;
4931                         break;
4932                 }
4933                 if (io->size < 0 ||
4934                     io->size > pf_ioctl_maxcount ||
4935                     WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
4936                         error = EINVAL;
4937                         break;
4938                 }
4939                 totlen = sizeof(struct pfioc_trans_e) * io->size;
4940                 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
4941                     M_TEMP, M_NOWAIT);
4942                 if (! ioes) {
4943                         error = ENOMEM;
4944                         break;
4945                 }
4946                 error = copyin(io->array, ioes, totlen);
4947                 if (error) {
4948                         free(ioes, M_TEMP);
4949                         break;
4950                 }
4951                 PF_RULES_WLOCK();
4952                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4953                         switch (ioe->rs_num) {
4954 #ifdef ALTQ
4955                         case PF_RULESET_ALTQ:
4956                                 if (ioe->anchor[0]) {
4957                                         PF_RULES_WUNLOCK();
4958                                         free(ioes, M_TEMP);
4959                                         error = EINVAL;
4960                                         goto fail;
4961                                 }
4962                                 if ((error = pf_rollback_altq(ioe->ticket))) {
4963                                         PF_RULES_WUNLOCK();
4964                                         free(ioes, M_TEMP);
4965                                         goto fail; /* really bad */
4966                                 }
4967                                 break;
4968 #endif /* ALTQ */
4969                         case PF_RULESET_TABLE:
4970                             {
4971                                 struct pfr_table table;
4972
4973                                 bzero(&table, sizeof(table));
4974                                 strlcpy(table.pfrt_anchor, ioe->anchor,
4975                                     sizeof(table.pfrt_anchor));
4976                                 if ((error = pfr_ina_rollback(&table,
4977                                     ioe->ticket, NULL, 0))) {
4978                                         PF_RULES_WUNLOCK();
4979                                         free(ioes, M_TEMP);
4980                                         goto fail; /* really bad */
4981                                 }
4982                                 break;
4983                             }
4984                         default:
4985                                 if ((error = pf_rollback_rules(ioe->ticket,
4986                                     ioe->rs_num, ioe->anchor))) {
4987                                         PF_RULES_WUNLOCK();
4988                                         free(ioes, M_TEMP);
4989                                         goto fail; /* really bad */
4990                                 }
4991                                 break;
4992                         }
4993                 }
4994                 PF_RULES_WUNLOCK();
4995                 free(ioes, M_TEMP);
4996                 break;
4997         }
4998
4999         case DIOCXCOMMIT: {
5000                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
5001                 struct pfioc_trans_e    *ioe, *ioes;
5002                 struct pf_kruleset      *rs;
5003                 size_t                   totlen;
5004                 int                      i;
5005
5006                 if (io->esize != sizeof(*ioe)) {
5007                         error = ENODEV;
5008                         break;
5009                 }
5010
5011                 if (io->size < 0 ||
5012                     io->size > pf_ioctl_maxcount ||
5013                     WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5014                         error = EINVAL;
5015                         break;
5016                 }
5017
5018                 totlen = sizeof(struct pfioc_trans_e) * io->size;
5019                 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5020                     M_TEMP, M_NOWAIT);
5021                 if (ioes == NULL) {
5022                         error = ENOMEM;
5023                         break;
5024                 }
5025                 error = copyin(io->array, ioes, totlen);
5026                 if (error) {
5027                         free(ioes, M_TEMP);
5028                         break;
5029                 }
5030                 PF_RULES_WLOCK();
5031                 /* First makes sure everything will succeed. */
5032                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5033                         switch (ioe->rs_num) {
5034 #ifdef ALTQ
5035                         case PF_RULESET_ALTQ:
5036                                 if (ioe->anchor[0]) {
5037                                         PF_RULES_WUNLOCK();
5038                                         free(ioes, M_TEMP);
5039                                         error = EINVAL;
5040                                         goto fail;
5041                                 }
5042                                 if (!V_altqs_inactive_open || ioe->ticket !=
5043                                     V_ticket_altqs_inactive) {
5044                                         PF_RULES_WUNLOCK();
5045                                         free(ioes, M_TEMP);
5046                                         error = EBUSY;
5047                                         goto fail;
5048                                 }
5049                                 break;
5050 #endif /* ALTQ */
5051                         case PF_RULESET_TABLE:
5052                                 rs = pf_find_kruleset(ioe->anchor);
5053                                 if (rs == NULL || !rs->topen || ioe->ticket !=
5054                                     rs->tticket) {
5055                                         PF_RULES_WUNLOCK();
5056                                         free(ioes, M_TEMP);
5057                                         error = EBUSY;
5058                                         goto fail;
5059                                 }
5060                                 break;
5061                         default:
5062                                 if (ioe->rs_num < 0 || ioe->rs_num >=
5063                                     PF_RULESET_MAX) {
5064                                         PF_RULES_WUNLOCK();
5065                                         free(ioes, M_TEMP);
5066                                         error = EINVAL;
5067                                         goto fail;
5068                                 }
5069                                 rs = pf_find_kruleset(ioe->anchor);
5070                                 if (rs == NULL ||
5071                                     !rs->rules[ioe->rs_num].inactive.open ||
5072                                     rs->rules[ioe->rs_num].inactive.ticket !=
5073                                     ioe->ticket) {
5074                                         PF_RULES_WUNLOCK();
5075                                         free(ioes, M_TEMP);
5076                                         error = EBUSY;
5077                                         goto fail;
5078                                 }
5079                                 break;
5080                         }
5081                 }
5082                 /* Now do the commit - no errors should happen here. */
5083                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5084                         switch (ioe->rs_num) {
5085 #ifdef ALTQ
5086                         case PF_RULESET_ALTQ:
5087                                 if ((error = pf_commit_altq(ioe->ticket))) {
5088                                         PF_RULES_WUNLOCK();
5089                                         free(ioes, M_TEMP);
5090                                         goto fail; /* really bad */
5091                                 }
5092                                 break;
5093 #endif /* ALTQ */
5094                         case PF_RULESET_TABLE:
5095                             {
5096                                 struct pfr_table table;
5097
5098                                 bzero(&table, sizeof(table));
5099                                 strlcpy(table.pfrt_anchor, ioe->anchor,
5100                                     sizeof(table.pfrt_anchor));
5101                                 if ((error = pfr_ina_commit(&table,
5102                                     ioe->ticket, NULL, NULL, 0))) {
5103                                         PF_RULES_WUNLOCK();
5104                                         free(ioes, M_TEMP);
5105                                         goto fail; /* really bad */
5106                                 }
5107                                 break;
5108                             }
5109                         default:
5110                                 if ((error = pf_commit_rules(ioe->ticket,
5111                                     ioe->rs_num, ioe->anchor))) {
5112                                         PF_RULES_WUNLOCK();
5113                                         free(ioes, M_TEMP);
5114                                         goto fail; /* really bad */
5115                                 }
5116                                 break;
5117                         }
5118                 }
5119                 PF_RULES_WUNLOCK();
5120                 free(ioes, M_TEMP);
5121                 break;
5122         }
5123
5124         case DIOCGETSRCNODES: {
5125                 struct pfioc_src_nodes  *psn = (struct pfioc_src_nodes *)addr;
5126                 struct pf_srchash       *sh;
5127                 struct pf_ksrc_node     *n;
5128                 struct pf_src_node      *p, *pstore;
5129                 uint32_t                 i, nr = 0;
5130
5131                 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5132                                 i++, sh++) {
5133                         PF_HASHROW_LOCK(sh);
5134                         LIST_FOREACH(n, &sh->nodes, entry)
5135                                 nr++;
5136                         PF_HASHROW_UNLOCK(sh);
5137                 }
5138
5139                 psn->psn_len = min(psn->psn_len,
5140                     sizeof(struct pf_src_node) * nr);
5141
5142                 if (psn->psn_len == 0) {
5143                         psn->psn_len = sizeof(struct pf_src_node) * nr;
5144                         break;
5145                 }
5146
5147                 nr = 0;
5148
5149                 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5150                 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5151                     i++, sh++) {
5152                     PF_HASHROW_LOCK(sh);
5153                     LIST_FOREACH(n, &sh->nodes, entry) {
5154
5155                         if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5156                                 break;
5157
5158                         pf_src_node_copy(n, p);
5159
5160                         p++;
5161                         nr++;
5162                     }
5163                     PF_HASHROW_UNLOCK(sh);
5164                 }
5165                 error = copyout(pstore, psn->psn_src_nodes,
5166                     sizeof(struct pf_src_node) * nr);
5167                 if (error) {
5168                         free(pstore, M_TEMP);
5169                         break;
5170                 }
5171                 psn->psn_len = sizeof(struct pf_src_node) * nr;
5172                 free(pstore, M_TEMP);
5173                 break;
5174         }
5175
5176         case DIOCCLRSRCNODES: {
5177
5178                 pf_clear_srcnodes(NULL);
5179                 pf_purge_expired_src_nodes();
5180                 break;
5181         }
5182
5183         case DIOCKILLSRCNODES:
5184                 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5185                 break;
5186
5187         case DIOCKEEPCOUNTERS:
5188                 error = pf_keepcounters((struct pfioc_nv *)addr);
5189                 break;
5190
5191         case DIOCSETHOSTID: {
5192                 u_int32_t       *hostid = (u_int32_t *)addr;
5193
5194                 PF_RULES_WLOCK();
5195                 if (*hostid == 0)
5196                         V_pf_status.hostid = arc4random();
5197                 else
5198                         V_pf_status.hostid = *hostid;
5199                 PF_RULES_WUNLOCK();
5200                 break;
5201         }
5202
5203         case DIOCOSFPFLUSH:
5204                 PF_RULES_WLOCK();
5205                 pf_osfp_flush();
5206                 PF_RULES_WUNLOCK();
5207                 break;
5208
5209         case DIOCIGETIFACES: {
5210                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5211                 struct pfi_kif *ifstore;
5212                 size_t bufsiz;
5213
5214                 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5215                         error = ENODEV;
5216                         break;
5217                 }
5218
5219                 if (io->pfiio_size < 0 ||
5220                     io->pfiio_size > pf_ioctl_maxcount ||
5221                     WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5222                         error = EINVAL;
5223                         break;
5224                 }
5225
5226                 bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5227                 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5228                     M_TEMP, M_NOWAIT);
5229                 if (ifstore == NULL) {
5230                         error = ENOMEM;
5231                         break;
5232                 }
5233
5234                 PF_RULES_RLOCK();
5235                 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5236                 PF_RULES_RUNLOCK();
5237                 error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5238                 free(ifstore, M_TEMP);
5239                 break;
5240         }
5241
5242         case DIOCSETIFFLAG: {
5243                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5244
5245                 PF_RULES_WLOCK();
5246                 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5247                 PF_RULES_WUNLOCK();
5248                 break;
5249         }
5250
5251         case DIOCCLRIFFLAG: {
5252                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5253
5254                 PF_RULES_WLOCK();
5255                 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5256                 PF_RULES_WUNLOCK();
5257                 break;
5258         }
5259
5260         default:
5261                 error = ENODEV;
5262                 break;
5263         }
5264 fail:
5265         if (sx_xlocked(&pf_ioctl_lock))
5266                 sx_xunlock(&pf_ioctl_lock);
5267         CURVNET_RESTORE();
5268
5269 #undef ERROUT_IOCTL
5270
5271         return (error);
5272 }
5273
5274 void
5275 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
5276 {
5277         bzero(sp, sizeof(struct pfsync_state));
5278
5279         /* copy from state key */
5280         sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5281         sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5282         sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5283         sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5284         sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5285         sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5286         sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5287         sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5288         sp->proto = st->key[PF_SK_WIRE]->proto;
5289         sp->af = st->key[PF_SK_WIRE]->af;
5290
5291         /* copy from state */
5292         strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5293         bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
5294         sp->creation = htonl(time_uptime - st->creation);
5295         sp->expire = pf_state_expires(st);
5296         if (sp->expire <= time_uptime)
5297                 sp->expire = htonl(0);
5298         else
5299                 sp->expire = htonl(sp->expire - time_uptime);
5300
5301         sp->direction = st->direction;
5302         sp->log = st->log;
5303         sp->timeout = st->timeout;
5304         sp->state_flags = st->state_flags;
5305         if (st->src_node)
5306                 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5307         if (st->nat_src_node)
5308                 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5309
5310         sp->id = st->id;
5311         sp->creatorid = st->creatorid;
5312         pf_state_peer_hton(&st->src, &sp->src);
5313         pf_state_peer_hton(&st->dst, &sp->dst);
5314
5315         if (st->rule.ptr == NULL)
5316                 sp->rule = htonl(-1);
5317         else
5318                 sp->rule = htonl(st->rule.ptr->nr);
5319         if (st->anchor.ptr == NULL)
5320                 sp->anchor = htonl(-1);
5321         else
5322                 sp->anchor = htonl(st->anchor.ptr->nr);
5323         if (st->nat_rule.ptr == NULL)
5324                 sp->nat_rule = htonl(-1);
5325         else
5326                 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
5327
5328         pf_state_counter_hton(counter_u64_fetch(st->packets[0]),
5329             sp->packets[0]);
5330         pf_state_counter_hton(counter_u64_fetch(st->packets[1]),
5331             sp->packets[1]);
5332         pf_state_counter_hton(counter_u64_fetch(st->bytes[0]), sp->bytes[0]);
5333         pf_state_counter_hton(counter_u64_fetch(st->bytes[1]), sp->bytes[1]);
5334
5335 }
5336
5337 static void
5338 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5339 {
5340         struct pfr_ktable *kt;
5341
5342         KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5343
5344         kt = aw->p.tbl;
5345         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5346                 kt = kt->pfrkt_root;
5347         aw->p.tbl = NULL;
5348         aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5349                 kt->pfrkt_cnt : -1;
5350 }
5351
5352 /*
5353  * XXX - Check for version missmatch!!!
5354  */
5355 static void
5356 pf_clear_all_states(void)
5357 {
5358         struct pf_state *s;
5359         u_int i;
5360
5361         for (i = 0; i <= pf_hashmask; i++) {
5362                 struct pf_idhash *ih = &V_pf_idhash[i];
5363 relock:
5364                 PF_HASHROW_LOCK(ih);
5365                 LIST_FOREACH(s, &ih->states, entry) {
5366                         s->timeout = PFTM_PURGE;
5367                         /* Don't send out individual delete messages. */
5368                         s->state_flags |= PFSTATE_NOSYNC;
5369                         pf_unlink_state(s, PF_ENTER_LOCKED);
5370                         goto relock;
5371                 }
5372                 PF_HASHROW_UNLOCK(ih);
5373         }
5374 }
5375
5376 static int
5377 pf_clear_tables(void)
5378 {
5379         struct pfioc_table io;
5380         int error;
5381
5382         bzero(&io, sizeof(io));
5383
5384         error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
5385             io.pfrio_flags);
5386
5387         return (error);
5388 }
5389
5390 static void
5391 pf_clear_srcnodes(struct pf_ksrc_node *n)
5392 {
5393         struct pf_state *s;
5394         int i;
5395
5396         for (i = 0; i <= pf_hashmask; i++) {
5397                 struct pf_idhash *ih = &V_pf_idhash[i];
5398
5399                 PF_HASHROW_LOCK(ih);
5400                 LIST_FOREACH(s, &ih->states, entry) {
5401                         if (n == NULL || n == s->src_node)
5402                                 s->src_node = NULL;
5403                         if (n == NULL || n == s->nat_src_node)
5404                                 s->nat_src_node = NULL;
5405                 }
5406                 PF_HASHROW_UNLOCK(ih);
5407         }
5408
5409         if (n == NULL) {
5410                 struct pf_srchash *sh;
5411
5412                 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5413                     i++, sh++) {
5414                         PF_HASHROW_LOCK(sh);
5415                         LIST_FOREACH(n, &sh->nodes, entry) {
5416                                 n->expire = 1;
5417                                 n->states = 0;
5418                         }
5419                         PF_HASHROW_UNLOCK(sh);
5420                 }
5421         } else {
5422                 /* XXX: hash slot should already be locked here. */
5423                 n->expire = 1;
5424                 n->states = 0;
5425         }
5426 }
5427
5428 static void
5429 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
5430 {
5431         struct pf_ksrc_node_list         kill;
5432
5433         LIST_INIT(&kill);
5434         for (int i = 0; i <= pf_srchashmask; i++) {
5435                 struct pf_srchash *sh = &V_pf_srchash[i];
5436                 struct pf_ksrc_node *sn, *tmp;
5437
5438                 PF_HASHROW_LOCK(sh);
5439                 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
5440                         if (PF_MATCHA(psnk->psnk_src.neg,
5441                               &psnk->psnk_src.addr.v.a.addr,
5442                               &psnk->psnk_src.addr.v.a.mask,
5443                               &sn->addr, sn->af) &&
5444                             PF_MATCHA(psnk->psnk_dst.neg,
5445                               &psnk->psnk_dst.addr.v.a.addr,
5446                               &psnk->psnk_dst.addr.v.a.mask,
5447                               &sn->raddr, sn->af)) {
5448                                 pf_unlink_src_node(sn);
5449                                 LIST_INSERT_HEAD(&kill, sn, entry);
5450                                 sn->expire = 1;
5451                         }
5452                 PF_HASHROW_UNLOCK(sh);
5453         }
5454
5455         for (int i = 0; i <= pf_hashmask; i++) {
5456                 struct pf_idhash *ih = &V_pf_idhash[i];
5457                 struct pf_state *s;
5458
5459                 PF_HASHROW_LOCK(ih);
5460                 LIST_FOREACH(s, &ih->states, entry) {
5461                         if (s->src_node && s->src_node->expire == 1)
5462                                 s->src_node = NULL;
5463                         if (s->nat_src_node && s->nat_src_node->expire == 1)
5464                                 s->nat_src_node = NULL;
5465                 }
5466                 PF_HASHROW_UNLOCK(ih);
5467         }
5468
5469         psnk->psnk_killed = pf_free_src_nodes(&kill);
5470 }
5471
5472 static int
5473 pf_keepcounters(struct pfioc_nv *nv)
5474 {
5475         nvlist_t        *nvl = NULL;
5476         void            *nvlpacked = NULL;
5477         int              error = 0;
5478
5479 #define ERROUT(x)       ERROUT_FUNCTION(on_error, x)
5480
5481         if (nv->len > pf_ioctl_maxcount)
5482                 ERROUT(ENOMEM);
5483
5484         nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
5485         if (nvlpacked == NULL)
5486                 ERROUT(ENOMEM);
5487
5488         error = copyin(nv->data, nvlpacked, nv->len);
5489         if (error)
5490                 ERROUT(error);
5491
5492         nvl = nvlist_unpack(nvlpacked, nv->len, 0);
5493         if (nvl == NULL)
5494                 ERROUT(EBADMSG);
5495
5496         if (! nvlist_exists_bool(nvl, "keep_counters"))
5497                 ERROUT(EBADMSG);
5498
5499         V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
5500
5501 on_error:
5502         nvlist_destroy(nvl);
5503         free(nvlpacked, M_TEMP);
5504         return (error);
5505 }
5506
5507 static unsigned int
5508 pf_clear_states(const struct pf_kstate_kill *kill)
5509 {
5510         struct pf_state_key_cmp  match_key;
5511         struct pf_state *s;
5512         int              idx;
5513         unsigned int     killed = 0, dir;
5514
5515         for (unsigned int i = 0; i <= pf_hashmask; i++) {
5516                 struct pf_idhash *ih = &V_pf_idhash[i];
5517
5518 relock_DIOCCLRSTATES:
5519                 PF_HASHROW_LOCK(ih);
5520                 LIST_FOREACH(s, &ih->states, entry) {
5521                         if (kill->psk_ifname[0] &&
5522                             strcmp(kill->psk_ifname,
5523                             s->kif->pfik_name))
5524                                 continue;
5525
5526                         if (kill->psk_kill_match) {
5527                                 bzero(&match_key, sizeof(match_key));
5528
5529                                 if (s->direction == PF_OUT) {
5530                                         dir = PF_IN;
5531                                         idx = PF_SK_STACK;
5532                                 } else {
5533                                         dir = PF_OUT;
5534                                         idx = PF_SK_WIRE;
5535                                 }
5536
5537                                 match_key.af = s->key[idx]->af;
5538                                 match_key.proto = s->key[idx]->proto;
5539                                 PF_ACPY(&match_key.addr[0],
5540                                     &s->key[idx]->addr[1], match_key.af);
5541                                 match_key.port[0] = s->key[idx]->port[1];
5542                                 PF_ACPY(&match_key.addr[1],
5543                                     &s->key[idx]->addr[0], match_key.af);
5544                                 match_key.port[1] = s->key[idx]->port[0];
5545                         }
5546
5547                         /*
5548                          * Don't send out individual
5549                          * delete messages.
5550                          */
5551                         s->state_flags |= PFSTATE_NOSYNC;
5552                         pf_unlink_state(s, PF_ENTER_LOCKED);
5553                         killed++;
5554
5555                         if (kill->psk_kill_match)
5556                                 killed += pf_kill_matching_state(&match_key,
5557                                     dir);
5558
5559                         goto relock_DIOCCLRSTATES;
5560                 }
5561                 PF_HASHROW_UNLOCK(ih);
5562         }
5563
5564         if (V_pfsync_clear_states_ptr != NULL)
5565                 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
5566
5567         return (killed);
5568 }
5569
5570 static int
5571 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
5572 {
5573         struct pf_state         *s;
5574
5575         if (kill->psk_pfcmp.id) {
5576                 if (kill->psk_pfcmp.creatorid == 0)
5577                         kill->psk_pfcmp.creatorid = V_pf_status.hostid;
5578                 if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
5579                     kill->psk_pfcmp.creatorid))) {
5580                         pf_unlink_state(s, PF_ENTER_LOCKED);
5581                         *killed = 1;
5582                 }
5583                 return (0);
5584         }
5585
5586         for (unsigned int i = 0; i <= pf_hashmask; i++)
5587                 *killed += pf_killstates_row(kill, &V_pf_idhash[i]);
5588
5589         return (0);
5590 }
5591
5592 static int
5593 pf_killstates_nv(struct pfioc_nv *nv)
5594 {
5595         struct pf_kstate_kill    kill;
5596         nvlist_t                *nvl = NULL;
5597         void                    *nvlpacked = NULL;
5598         int                      error = 0;
5599         unsigned int             killed = 0;
5600
5601 #define ERROUT(x)       ERROUT_FUNCTION(on_error, x)
5602
5603         if (nv->len > pf_ioctl_maxcount)
5604                 ERROUT(ENOMEM);
5605
5606         nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
5607         if (nvlpacked == NULL)
5608                 ERROUT(ENOMEM);
5609
5610         error = copyin(nv->data, nvlpacked, nv->len);
5611         if (error)
5612                 ERROUT(error);
5613
5614         nvl = nvlist_unpack(nvlpacked, nv->len, 0);
5615         if (nvl == NULL)
5616                 ERROUT(EBADMSG);
5617
5618         error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
5619         if (error)
5620                 ERROUT(error);
5621
5622         error = pf_killstates(&kill, &killed);
5623
5624         free(nvlpacked, M_TEMP);
5625         nvlpacked = NULL;
5626         nvlist_destroy(nvl);
5627         nvl = nvlist_create(0);
5628         if (nvl == NULL)
5629                 ERROUT(ENOMEM);
5630
5631         nvlist_add_number(nvl, "killed", killed);
5632
5633         nvlpacked = nvlist_pack(nvl, &nv->len);
5634         if (nvlpacked == NULL)
5635                 ERROUT(ENOMEM);
5636
5637         if (nv->size == 0)
5638                 ERROUT(0);
5639         else if (nv->size < nv->len)
5640                 ERROUT(ENOSPC);
5641
5642         error = copyout(nvlpacked, nv->data, nv->len);
5643
5644 on_error:
5645         nvlist_destroy(nvl);
5646         free(nvlpacked, M_TEMP);
5647         return (error);
5648 }
5649
5650 static int
5651 pf_clearstates_nv(struct pfioc_nv *nv)
5652 {
5653         struct pf_kstate_kill    kill;
5654         nvlist_t                *nvl = NULL;
5655         void                    *nvlpacked = NULL;
5656         int                      error = 0;
5657         unsigned int             killed;
5658
5659 #define ERROUT(x)       ERROUT_FUNCTION(on_error, x)
5660
5661         if (nv->len > pf_ioctl_maxcount)
5662                 ERROUT(ENOMEM);
5663
5664         nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
5665         if (nvlpacked == NULL)
5666                 ERROUT(ENOMEM);
5667
5668         error = copyin(nv->data, nvlpacked, nv->len);
5669         if (error)
5670                 ERROUT(error);
5671
5672         nvl = nvlist_unpack(nvlpacked, nv->len, 0);
5673         if (nvl == NULL)
5674                 ERROUT(EBADMSG);
5675
5676         error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
5677         if (error)
5678                 ERROUT(error);
5679
5680         killed = pf_clear_states(&kill);
5681
5682         free(nvlpacked, M_TEMP);
5683         nvlpacked = NULL;
5684         nvlist_destroy(nvl);
5685         nvl = nvlist_create(0);
5686         if (nvl == NULL)
5687                 ERROUT(ENOMEM);
5688
5689         nvlist_add_number(nvl, "killed", killed);
5690
5691         nvlpacked = nvlist_pack(nvl, &nv->len);
5692         if (nvlpacked == NULL)
5693                 ERROUT(ENOMEM);
5694
5695         if (nv->size == 0)
5696                 ERROUT(0);
5697         else if (nv->size < nv->len)
5698                 ERROUT(ENOSPC);
5699
5700         error = copyout(nvlpacked, nv->data, nv->len);
5701
5702 on_error:
5703         nvlist_destroy(nvl);
5704         free(nvlpacked, M_TEMP);
5705         return (error);
5706 }
5707
5708 /*
5709  * XXX - Check for version missmatch!!!
5710  */
5711
5712 /*
5713  * Duplicate pfctl -Fa operation to get rid of as much as we can.
5714  */
5715 static int
5716 shutdown_pf(void)
5717 {
5718         int error = 0;
5719         u_int32_t t[5];
5720         char nn = '\0';
5721
5722         do {
5723                 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
5724                     != 0) {
5725                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
5726                         break;
5727                 }
5728                 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
5729                     != 0) {
5730                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
5731                         break;          /* XXX: rollback? */
5732                 }
5733                 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
5734                     != 0) {
5735                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
5736                         break;          /* XXX: rollback? */
5737                 }
5738                 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
5739                     != 0) {
5740                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
5741                         break;          /* XXX: rollback? */
5742                 }
5743                 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
5744                     != 0) {
5745                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
5746                         break;          /* XXX: rollback? */
5747                 }
5748
5749                 /* XXX: these should always succeed here */
5750                 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
5751                 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
5752                 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
5753                 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
5754                 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
5755
5756                 if ((error = pf_clear_tables()) != 0)
5757                         break;
5758
5759 #ifdef ALTQ
5760                 if ((error = pf_begin_altq(&t[0])) != 0) {
5761                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
5762                         break;
5763                 }
5764                 pf_commit_altq(t[0]);
5765 #endif
5766
5767                 pf_clear_all_states();
5768
5769                 pf_clear_srcnodes(NULL);
5770
5771                 /* status does not use malloced mem so no need to cleanup */
5772                 /* fingerprints and interfaces have their own cleanup code */
5773         } while(0);
5774
5775         return (error);
5776 }
5777
5778 #ifdef INET
5779 static int
5780 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
5781     struct inpcb *inp)
5782 {
5783         int chk;
5784
5785         chk = pf_test(PF_IN, flags, ifp, m, inp);
5786         if (chk && *m) {
5787                 m_freem(*m);
5788                 *m = NULL;
5789         }
5790
5791         if (chk != PF_PASS)
5792                 return (EACCES);
5793         return (0);
5794 }
5795
5796 static int
5797 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
5798     struct inpcb *inp)
5799 {
5800         int chk;
5801
5802         chk = pf_test(PF_OUT, flags, ifp, m, inp);
5803         if (chk && *m) {
5804                 m_freem(*m);
5805                 *m = NULL;
5806         }
5807
5808         if (chk != PF_PASS)
5809                 return (EACCES);
5810         return (0);
5811 }
5812 #endif
5813
5814 #ifdef INET6
5815 static int
5816 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
5817     struct inpcb *inp)
5818 {
5819         int chk;
5820
5821         /*
5822          * In case of loopback traffic IPv6 uses the real interface in
5823          * order to support scoped addresses. In order to support stateful
5824          * filtering we have change this to lo0 as it is the case in IPv4.
5825          */
5826         CURVNET_SET(ifp->if_vnet);
5827         chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp);
5828         CURVNET_RESTORE();
5829         if (chk && *m) {
5830                 m_freem(*m);
5831                 *m = NULL;
5832         }
5833         if (chk != PF_PASS)
5834                 return (EACCES);
5835         return (0);
5836 }
5837
5838 static int
5839 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
5840     struct inpcb *inp)
5841 {
5842         int chk;
5843
5844         CURVNET_SET(ifp->if_vnet);
5845         chk = pf_test6(PF_OUT, flags, ifp, m, inp);
5846         CURVNET_RESTORE();
5847         if (chk && *m) {
5848                 m_freem(*m);
5849                 *m = NULL;
5850         }
5851         if (chk != PF_PASS)
5852                 return (EACCES);
5853         return (0);
5854 }
5855 #endif /* INET6 */
5856
5857 static int
5858 hook_pf(void)
5859 {
5860 #ifdef INET
5861         struct pfil_head *pfh_inet;
5862 #endif
5863 #ifdef INET6
5864         struct pfil_head *pfh_inet6;
5865 #endif
5866
5867         if (V_pf_pfil_hooked)
5868                 return (0);
5869
5870 #ifdef INET
5871         pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
5872         if (pfh_inet == NULL)
5873                 return (ESRCH); /* XXX */
5874         pfil_add_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet);
5875         pfil_add_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet);
5876 #endif
5877 #ifdef INET6
5878         pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
5879         if (pfh_inet6 == NULL) {
5880 #ifdef INET
5881                 pfil_remove_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
5882                     pfh_inet);
5883                 pfil_remove_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
5884                     pfh_inet);
5885 #endif
5886                 return (ESRCH); /* XXX */
5887         }
5888         pfil_add_hook_flags(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6);
5889         pfil_add_hook_flags(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6);
5890 #endif
5891
5892         V_pf_pfil_hooked = 1;
5893         return (0);
5894 }
5895
5896 static int
5897 dehook_pf(void)
5898 {
5899 #ifdef INET
5900         struct pfil_head *pfh_inet;
5901 #endif
5902 #ifdef INET6
5903         struct pfil_head *pfh_inet6;
5904 #endif
5905
5906         if (V_pf_pfil_hooked == 0)
5907                 return (0);
5908
5909 #ifdef INET
5910         pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
5911         if (pfh_inet == NULL)
5912                 return (ESRCH); /* XXX */
5913         pfil_remove_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
5914             pfh_inet);
5915         pfil_remove_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
5916             pfh_inet);
5917 #endif
5918 #ifdef INET6
5919         pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
5920         if (pfh_inet6 == NULL)
5921                 return (ESRCH); /* XXX */
5922         pfil_remove_hook_flags(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK,
5923             pfh_inet6);
5924         pfil_remove_hook_flags(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK,
5925             pfh_inet6);
5926 #endif
5927
5928         V_pf_pfil_hooked = 0;
5929         return (0);
5930 }
5931
5932 static void
5933 pf_load_vnet(void)
5934 {
5935         V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
5936             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
5937
5938         pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
5939             PF_RULE_TAG_HASH_SIZE_DEFAULT);
5940 #ifdef ALTQ
5941         pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
5942             PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
5943 #endif
5944
5945         pfattach_vnet();
5946         V_pf_vnet_active = 1;
5947 }
5948
5949 static int
5950 pf_load(void)
5951 {
5952         int error;
5953
5954         rm_init(&pf_rules_lock, "pf rulesets");
5955         sx_init(&pf_ioctl_lock, "pf ioctl");
5956         sx_init(&pf_end_lock, "pf end thread");
5957
5958         pf_mtag_initialize();
5959
5960         pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
5961         if (pf_dev == NULL)
5962                 return (ENOMEM);
5963
5964         pf_end_threads = 0;
5965         error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
5966         if (error != 0)
5967                 return (error);
5968
5969         pfi_initialize();
5970
5971         return (0);
5972 }
5973
5974 static void
5975 pf_unload_vnet(void)
5976 {
5977         int error, ret;
5978
5979         V_pf_vnet_active = 0;
5980         V_pf_status.running = 0;
5981         error = dehook_pf();
5982         if (error) {
5983                 /*
5984                  * Should not happen!
5985                  * XXX Due to error code ESRCH, kldunload will show
5986                  * a message like 'No such process'.
5987                  */
5988                 printf("%s : pfil unregisteration fail\n", __FUNCTION__);
5989                 return;
5990         }
5991
5992         PF_RULES_WLOCK();
5993         shutdown_pf();
5994         PF_RULES_WUNLOCK();
5995
5996         ret = swi_remove(V_pf_swi_cookie);
5997         MPASS(ret == 0);
5998         ret = intr_event_destroy(V_pf_swi_ie);
5999         MPASS(ret == 0);
6000
6001         pf_unload_vnet_purge();
6002
6003         pf_normalize_cleanup();
6004         PF_RULES_WLOCK();
6005         pfi_cleanup_vnet();
6006         PF_RULES_WUNLOCK();
6007         pfr_cleanup();
6008         pf_osfp_flush();
6009         pf_cleanup();
6010         if (IS_DEFAULT_VNET(curvnet))
6011                 pf_mtag_cleanup();
6012
6013         pf_cleanup_tagset(&V_pf_tags);
6014 #ifdef ALTQ
6015         pf_cleanup_tagset(&V_pf_qids);
6016 #endif
6017         uma_zdestroy(V_pf_tag_z);
6018
6019         /* Free counters last as we updated them during shutdown. */
6020         counter_u64_free(V_pf_default_rule.evaluations);
6021         for (int i = 0; i < 2; i++) {
6022                 counter_u64_free(V_pf_default_rule.packets[i]);
6023                 counter_u64_free(V_pf_default_rule.bytes[i]);
6024         }
6025         counter_u64_free(V_pf_default_rule.states_cur);
6026         counter_u64_free(V_pf_default_rule.states_tot);
6027         counter_u64_free(V_pf_default_rule.src_nodes);
6028
6029         for (int i = 0; i < PFRES_MAX; i++)
6030                 counter_u64_free(V_pf_status.counters[i]);
6031         for (int i = 0; i < LCNT_MAX; i++)
6032                 counter_u64_free(V_pf_status.lcounters[i]);
6033         for (int i = 0; i < FCNT_MAX; i++)
6034                 counter_u64_free(V_pf_status.fcounters[i]);
6035         for (int i = 0; i < SCNT_MAX; i++)
6036                 counter_u64_free(V_pf_status.scounters[i]);
6037 }
6038
6039 static void
6040 pf_unload(void)
6041 {
6042
6043         sx_xlock(&pf_end_lock);
6044         pf_end_threads = 1;
6045         while (pf_end_threads < 2) {
6046                 wakeup_one(pf_purge_thread);
6047                 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6048         }
6049         sx_xunlock(&pf_end_lock);
6050
6051         if (pf_dev != NULL)
6052                 destroy_dev(pf_dev);
6053
6054         pfi_cleanup();
6055
6056         rm_destroy(&pf_rules_lock);
6057         sx_destroy(&pf_ioctl_lock);
6058         sx_destroy(&pf_end_lock);
6059 }
6060
6061 static void
6062 vnet_pf_init(void *unused __unused)
6063 {
6064
6065         pf_load_vnet();
6066 }
6067 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 
6068     vnet_pf_init, NULL);
6069
6070 static void
6071 vnet_pf_uninit(const void *unused __unused)
6072 {
6073
6074         pf_unload_vnet();
6075
6076 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6077 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6078     vnet_pf_uninit, NULL);
6079
6080
6081 static int
6082 pf_modevent(module_t mod, int type, void *data)
6083 {
6084         int error = 0;
6085
6086         switch(type) {
6087         case MOD_LOAD:
6088                 error = pf_load();
6089                 break;
6090         case MOD_UNLOAD:
6091                 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after
6092                  * the vnet_pf_uninit()s */
6093                 break;
6094         default:
6095                 error = EINVAL;
6096                 break;
6097         }
6098
6099         return (error);
6100 }
6101
6102 static moduledata_t pf_mod = {
6103         "pf",
6104         pf_modevent,
6105         0
6106 };
6107
6108 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
6109 MODULE_VERSION(pf, PF_MODVER);