]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netpfil/pf/pf_ioctl.c
pf: limit ioctl to a reasonable and tuneable number of elements
[FreeBSD/FreeBSD.git] / sys / netpfil / pf / pf_ioctl.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *      $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 #include "opt_inet.h"
44 #include "opt_inet6.h"
45 #include "opt_bpf.h"
46 #include "opt_pf.h"
47
48 #include <sys/param.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/interrupt.h>
55 #include <sys/jail.h>
56 #include <sys/kernel.h>
57 #include <sys/kthread.h>
58 #include <sys/lock.h>
59 #include <sys/mbuf.h>
60 #include <sys/module.h>
61 #include <sys/proc.h>
62 #include <sys/rwlock.h>
63 #include <sys/smp.h>
64 #include <sys/socket.h>
65 #include <sys/sysctl.h>
66 #include <sys/md5.h>
67 #include <sys/ucred.h>
68
69 #include <net/if.h>
70 #include <net/if_var.h>
71 #include <net/vnet.h>
72 #include <net/route.h>
73 #include <net/pfil.h>
74 #include <net/pfvar.h>
75 #include <net/if_pfsync.h>
76 #include <net/if_pflog.h>
77
78 #include <netinet/in.h>
79 #include <netinet/ip.h>
80 #include <netinet/ip_var.h>
81 #include <netinet6/ip6_var.h>
82 #include <netinet/ip_icmp.h>
83
84 #ifdef INET6
85 #include <netinet/ip6.h>
86 #endif /* INET6 */
87
88 #ifdef ALTQ
89 #include <net/altq/altq.h>
90 #endif
91
92 static struct pf_pool   *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
93                             u_int8_t, u_int8_t, u_int8_t);
94
95 static void              pf_mv_pool(struct pf_palist *, struct pf_palist *);
96 static void              pf_empty_pool(struct pf_palist *);
97 static int               pfioctl(struct cdev *, u_long, caddr_t, int,
98                             struct thread *);
99 #ifdef ALTQ
100 static int               pf_begin_altq(u_int32_t *);
101 static int               pf_rollback_altq(u_int32_t);
102 static int               pf_commit_altq(u_int32_t);
103 static int               pf_enable_altq(struct pf_altq *);
104 static int               pf_disable_altq(struct pf_altq *);
105 static u_int32_t         pf_qname2qid(char *);
106 static void              pf_qid_unref(u_int32_t);
107 #endif /* ALTQ */
108 static int               pf_begin_rules(u_int32_t *, int, const char *);
109 static int               pf_rollback_rules(u_int32_t, int, char *);
110 static int               pf_setup_pfsync_matching(struct pf_ruleset *);
111 static void              pf_hash_rule(MD5_CTX *, struct pf_rule *);
112 static void              pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
113 static int               pf_commit_rules(u_int32_t, int, char *);
114 static int               pf_addr_setup(struct pf_ruleset *,
115                             struct pf_addr_wrap *, sa_family_t);
116 static void              pf_addr_copyout(struct pf_addr_wrap *);
117
118 VNET_DEFINE(struct pf_rule,     pf_default_rule);
119
120 #ifdef ALTQ
121 static VNET_DEFINE(int,         pf_altq_running);
122 #define V_pf_altq_running       VNET(pf_altq_running)
123 #endif
124
125 #define TAGID_MAX        50000
126 struct pf_tagname {
127         TAILQ_ENTRY(pf_tagname) entries;
128         char                    name[PF_TAG_NAME_SIZE];
129         uint16_t                tag;
130         int                     ref;
131 };
132
133 TAILQ_HEAD(pf_tags, pf_tagname);
134 #define V_pf_tags               VNET(pf_tags)
135 VNET_DEFINE(struct pf_tags, pf_tags);
136 #define V_pf_qids               VNET(pf_qids)
137 VNET_DEFINE(struct pf_tags, pf_qids);
138 static MALLOC_DEFINE(M_PFTAG, "pf_tag", "pf(4) tag names");
139 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
140 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
141
142 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
143 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
144 #endif
145
146 static u_int16_t         tagname2tag(struct pf_tags *, char *);
147 static u_int16_t         pf_tagname2tag(char *);
148 static void              tag_unref(struct pf_tags *, u_int16_t);
149
150 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
151
152 struct cdev *pf_dev;
153
154 /*
155  * XXX - These are new and need to be checked when moveing to a new version
156  */
157 static void              pf_clear_states(void);
158 static int               pf_clear_tables(void);
159 static void              pf_clear_srcnodes(struct pf_src_node *);
160 static void              pf_kill_srcnodes(struct pfioc_src_node_kill *);
161 static void              pf_tbladdr_copyout(struct pf_addr_wrap *);
162
163 /*
164  * Wrapper functions for pfil(9) hooks
165  */
166 #ifdef INET
167 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
168     int dir, int flags, struct inpcb *inp);
169 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
170     int dir, int flags, struct inpcb *inp);
171 #endif
172 #ifdef INET6
173 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
174     int dir, int flags, struct inpcb *inp);
175 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
176     int dir, int flags, struct inpcb *inp);
177 #endif
178
179 static int              hook_pf(void);
180 static int              dehook_pf(void);
181 static int              shutdown_pf(void);
182 static int              pf_load(void);
183 static void             pf_unload(void);
184
185 static struct cdevsw pf_cdevsw = {
186         .d_ioctl =      pfioctl,
187         .d_name =       PF_NAME,
188         .d_version =    D_VERSION,
189 };
190
191 static volatile VNET_DEFINE(int, pf_pfil_hooked);
192 #define V_pf_pfil_hooked        VNET(pf_pfil_hooked)
193
194 /*
195  * We need a flag that is neither hooked nor running to know when
196  * the VNET is "valid".  We primarily need this to control (global)
197  * external event, e.g., eventhandlers.
198  */
199 VNET_DEFINE(int, pf_vnet_active);
200 #define V_pf_vnet_active        VNET(pf_vnet_active)
201
202 int pf_end_threads;
203 struct proc *pf_purge_proc;
204
205 struct rwlock                   pf_rules_lock;
206 struct sx                       pf_ioctl_lock;
207 struct sx                       pf_end_lock;
208
209 /* pfsync */
210 pfsync_state_import_t           *pfsync_state_import_ptr = NULL;
211 pfsync_insert_state_t           *pfsync_insert_state_ptr = NULL;
212 pfsync_update_state_t           *pfsync_update_state_ptr = NULL;
213 pfsync_delete_state_t           *pfsync_delete_state_ptr = NULL;
214 pfsync_clear_states_t           *pfsync_clear_states_ptr = NULL;
215 pfsync_defer_t                  *pfsync_defer_ptr = NULL;
216 /* pflog */
217 pflog_packet_t                  *pflog_packet_ptr = NULL;
218
219 extern u_long   pf_ioctl_maxcount;
220
221 static void
222 pfattach_vnet(void)
223 {
224         u_int32_t *my_timeout = V_pf_default_rule.timeout;
225
226         pf_initialize();
227         pfr_initialize();
228         pfi_initialize_vnet();
229         pf_normalize_init();
230
231         V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
232         V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
233
234         RB_INIT(&V_pf_anchors);
235         pf_init_ruleset(&pf_main_ruleset);
236
237         /* default rule should never be garbage collected */
238         V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
239 #ifdef PF_DEFAULT_TO_DROP
240         V_pf_default_rule.action = PF_DROP;
241 #else
242         V_pf_default_rule.action = PF_PASS;
243 #endif
244         V_pf_default_rule.nr = -1;
245         V_pf_default_rule.rtableid = -1;
246
247         V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
248         V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
249         V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
250
251         /* initialize default timeouts */
252         my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
253         my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
254         my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
255         my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
256         my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
257         my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
258         my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
259         my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
260         my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
261         my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
262         my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
263         my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
264         my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
265         my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
266         my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
267         my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
268         my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
269         my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
270         my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
271         my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
272
273         bzero(&V_pf_status, sizeof(V_pf_status));
274         V_pf_status.debug = PF_DEBUG_URGENT;
275
276         V_pf_pfil_hooked = 0;
277
278         /* XXX do our best to avoid a conflict */
279         V_pf_status.hostid = arc4random();
280
281         for (int i = 0; i < PFRES_MAX; i++)
282                 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
283         for (int i = 0; i < LCNT_MAX; i++)
284                 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
285         for (int i = 0; i < FCNT_MAX; i++)
286                 V_pf_status.fcounters[i] = counter_u64_alloc(M_WAITOK);
287         for (int i = 0; i < SCNT_MAX; i++)
288                 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
289
290         if (swi_add(NULL, "pf send", pf_intr, curvnet, SWI_NET,
291             INTR_MPSAFE, &V_pf_swi_cookie) != 0)
292                 /* XXXGL: leaked all above. */
293                 return;
294 }
295
296
297 static struct pf_pool *
298 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
299     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
300     u_int8_t check_ticket)
301 {
302         struct pf_ruleset       *ruleset;
303         struct pf_rule          *rule;
304         int                      rs_num;
305
306         ruleset = pf_find_ruleset(anchor);
307         if (ruleset == NULL)
308                 return (NULL);
309         rs_num = pf_get_ruleset_number(rule_action);
310         if (rs_num >= PF_RULESET_MAX)
311                 return (NULL);
312         if (active) {
313                 if (check_ticket && ticket !=
314                     ruleset->rules[rs_num].active.ticket)
315                         return (NULL);
316                 if (r_last)
317                         rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
318                             pf_rulequeue);
319                 else
320                         rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
321         } else {
322                 if (check_ticket && ticket !=
323                     ruleset->rules[rs_num].inactive.ticket)
324                         return (NULL);
325                 if (r_last)
326                         rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
327                             pf_rulequeue);
328                 else
329                         rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
330         }
331         if (!r_last) {
332                 while ((rule != NULL) && (rule->nr != rule_number))
333                         rule = TAILQ_NEXT(rule, entries);
334         }
335         if (rule == NULL)
336                 return (NULL);
337
338         return (&rule->rpool);
339 }
340
341 static void
342 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
343 {
344         struct pf_pooladdr      *mv_pool_pa;
345
346         while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
347                 TAILQ_REMOVE(poola, mv_pool_pa, entries);
348                 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
349         }
350 }
351
352 static void
353 pf_empty_pool(struct pf_palist *poola)
354 {
355         struct pf_pooladdr *pa;
356
357         while ((pa = TAILQ_FIRST(poola)) != NULL) {
358                 switch (pa->addr.type) {
359                 case PF_ADDR_DYNIFTL:
360                         pfi_dynaddr_remove(pa->addr.p.dyn);
361                         break;
362                 case PF_ADDR_TABLE:
363                         /* XXX: this could be unfinished pooladdr on pabuf */
364                         if (pa->addr.p.tbl != NULL)
365                                 pfr_detach_table(pa->addr.p.tbl);
366                         break;
367                 }
368                 if (pa->kif)
369                         pfi_kif_unref(pa->kif);
370                 TAILQ_REMOVE(poola, pa, entries);
371                 free(pa, M_PFRULE);
372         }
373 }
374
375 static void
376 pf_unlink_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
377 {
378
379         PF_RULES_WASSERT();
380
381         TAILQ_REMOVE(rulequeue, rule, entries);
382
383         PF_UNLNKDRULES_LOCK();
384         rule->rule_flag |= PFRULE_REFS;
385         TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
386         PF_UNLNKDRULES_UNLOCK();
387 }
388
389 void
390 pf_free_rule(struct pf_rule *rule)
391 {
392
393         PF_RULES_WASSERT();
394
395         if (rule->tag)
396                 tag_unref(&V_pf_tags, rule->tag);
397         if (rule->match_tag)
398                 tag_unref(&V_pf_tags, rule->match_tag);
399 #ifdef ALTQ
400         if (rule->pqid != rule->qid)
401                 pf_qid_unref(rule->pqid);
402         pf_qid_unref(rule->qid);
403 #endif
404         switch (rule->src.addr.type) {
405         case PF_ADDR_DYNIFTL:
406                 pfi_dynaddr_remove(rule->src.addr.p.dyn);
407                 break;
408         case PF_ADDR_TABLE:
409                 pfr_detach_table(rule->src.addr.p.tbl);
410                 break;
411         }
412         switch (rule->dst.addr.type) {
413         case PF_ADDR_DYNIFTL:
414                 pfi_dynaddr_remove(rule->dst.addr.p.dyn);
415                 break;
416         case PF_ADDR_TABLE:
417                 pfr_detach_table(rule->dst.addr.p.tbl);
418                 break;
419         }
420         if (rule->overload_tbl)
421                 pfr_detach_table(rule->overload_tbl);
422         if (rule->kif)
423                 pfi_kif_unref(rule->kif);
424         pf_anchor_remove(rule);
425         pf_empty_pool(&rule->rpool.list);
426         counter_u64_free(rule->states_cur);
427         counter_u64_free(rule->states_tot);
428         counter_u64_free(rule->src_nodes);
429         free(rule, M_PFRULE);
430 }
431
432 static u_int16_t
433 tagname2tag(struct pf_tags *head, char *tagname)
434 {
435         struct pf_tagname       *tag, *p = NULL;
436         u_int16_t                new_tagid = 1;
437
438         PF_RULES_WASSERT();
439
440         TAILQ_FOREACH(tag, head, entries)
441                 if (strcmp(tagname, tag->name) == 0) {
442                         tag->ref++;
443                         return (tag->tag);
444                 }
445
446         /*
447          * to avoid fragmentation, we do a linear search from the beginning
448          * and take the first free slot we find. if there is none or the list
449          * is empty, append a new entry at the end.
450          */
451
452         /* new entry */
453         if (!TAILQ_EMPTY(head))
454                 for (p = TAILQ_FIRST(head); p != NULL &&
455                     p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
456                         new_tagid = p->tag + 1;
457
458         if (new_tagid > TAGID_MAX)
459                 return (0);
460
461         /* allocate and fill new struct pf_tagname */
462         tag = malloc(sizeof(*tag), M_PFTAG, M_NOWAIT|M_ZERO);
463         if (tag == NULL)
464                 return (0);
465         strlcpy(tag->name, tagname, sizeof(tag->name));
466         tag->tag = new_tagid;
467         tag->ref++;
468
469         if (p != NULL)  /* insert new entry before p */
470                 TAILQ_INSERT_BEFORE(p, tag, entries);
471         else    /* either list empty or no free slot in between */
472                 TAILQ_INSERT_TAIL(head, tag, entries);
473
474         return (tag->tag);
475 }
476
477 static void
478 tag_unref(struct pf_tags *head, u_int16_t tag)
479 {
480         struct pf_tagname       *p, *next;
481
482         PF_RULES_WASSERT();
483
484         for (p = TAILQ_FIRST(head); p != NULL; p = next) {
485                 next = TAILQ_NEXT(p, entries);
486                 if (tag == p->tag) {
487                         if (--p->ref == 0) {
488                                 TAILQ_REMOVE(head, p, entries);
489                                 free(p, M_PFTAG);
490                         }
491                         break;
492                 }
493         }
494 }
495
496 static u_int16_t
497 pf_tagname2tag(char *tagname)
498 {
499         return (tagname2tag(&V_pf_tags, tagname));
500 }
501
502 #ifdef ALTQ
503 static u_int32_t
504 pf_qname2qid(char *qname)
505 {
506         return ((u_int32_t)tagname2tag(&V_pf_qids, qname));
507 }
508
509 static void
510 pf_qid_unref(u_int32_t qid)
511 {
512         tag_unref(&V_pf_qids, (u_int16_t)qid);
513 }
514
515 static int
516 pf_begin_altq(u_int32_t *ticket)
517 {
518         struct pf_altq  *altq;
519         int              error = 0;
520
521         PF_RULES_WASSERT();
522
523         /* Purge the old altq list */
524         while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) {
525                 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries);
526                 if (altq->qname[0] == 0 &&
527                     (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
528                         /* detach and destroy the discipline */
529                         error = altq_remove(altq);
530                 } else
531                         pf_qid_unref(altq->qid);
532                 free(altq, M_PFALTQ);
533         }
534         if (error)
535                 return (error);
536         *ticket = ++V_ticket_altqs_inactive;
537         V_altqs_inactive_open = 1;
538         return (0);
539 }
540
541 static int
542 pf_rollback_altq(u_int32_t ticket)
543 {
544         struct pf_altq  *altq;
545         int              error = 0;
546
547         PF_RULES_WASSERT();
548
549         if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
550                 return (0);
551         /* Purge the old altq list */
552         while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) {
553                 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries);
554                 if (altq->qname[0] == 0 &&
555                    (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
556                         /* detach and destroy the discipline */
557                         error = altq_remove(altq);
558                 } else
559                         pf_qid_unref(altq->qid);
560                 free(altq, M_PFALTQ);
561         }
562         V_altqs_inactive_open = 0;
563         return (error);
564 }
565
566 static int
567 pf_commit_altq(u_int32_t ticket)
568 {
569         struct pf_altqqueue     *old_altqs;
570         struct pf_altq          *altq;
571         int                      err, error = 0;
572
573         PF_RULES_WASSERT();
574
575         if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
576                 return (EBUSY);
577
578         /* swap altqs, keep the old. */
579         old_altqs = V_pf_altqs_active;
580         V_pf_altqs_active = V_pf_altqs_inactive;
581         V_pf_altqs_inactive = old_altqs;
582         V_ticket_altqs_active = V_ticket_altqs_inactive;
583
584         /* Attach new disciplines */
585         TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
586         if (altq->qname[0] == 0 &&
587            (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
588                         /* attach the discipline */
589                         error = altq_pfattach(altq);
590                         if (error == 0 && V_pf_altq_running)
591                                 error = pf_enable_altq(altq);
592                         if (error != 0)
593                                 return (error);
594                 }
595         }
596
597         /* Purge the old altq list */
598         while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) {
599                 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries);
600                 if (altq->qname[0] == 0 &&
601                     (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
602                         /* detach and destroy the discipline */
603                         if (V_pf_altq_running)
604                                 error = pf_disable_altq(altq);
605                         err = altq_pfdetach(altq);
606                         if (err != 0 && error == 0)
607                                 error = err;
608                         err = altq_remove(altq);
609                         if (err != 0 && error == 0)
610                                 error = err;
611                 } else
612                         pf_qid_unref(altq->qid);
613                 free(altq, M_PFALTQ);
614         }
615
616         V_altqs_inactive_open = 0;
617         return (error);
618 }
619
620 static int
621 pf_enable_altq(struct pf_altq *altq)
622 {
623         struct ifnet            *ifp;
624         struct tb_profile        tb;
625         int                      error = 0;
626
627         if ((ifp = ifunit(altq->ifname)) == NULL)
628                 return (EINVAL);
629
630         if (ifp->if_snd.altq_type != ALTQT_NONE)
631                 error = altq_enable(&ifp->if_snd);
632
633         /* set tokenbucket regulator */
634         if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
635                 tb.rate = altq->ifbandwidth;
636                 tb.depth = altq->tbrsize;
637                 error = tbr_set(&ifp->if_snd, &tb);
638         }
639
640         return (error);
641 }
642
643 static int
644 pf_disable_altq(struct pf_altq *altq)
645 {
646         struct ifnet            *ifp;
647         struct tb_profile        tb;
648         int                      error;
649
650         if ((ifp = ifunit(altq->ifname)) == NULL)
651                 return (EINVAL);
652
653         /*
654          * when the discipline is no longer referenced, it was overridden
655          * by a new one.  if so, just return.
656          */
657         if (altq->altq_disc != ifp->if_snd.altq_disc)
658                 return (0);
659
660         error = altq_disable(&ifp->if_snd);
661
662         if (error == 0) {
663                 /* clear tokenbucket regulator */
664                 tb.rate = 0;
665                 error = tbr_set(&ifp->if_snd, &tb);
666         }
667
668         return (error);
669 }
670
671 void
672 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
673 {
674         struct ifnet    *ifp1;
675         struct pf_altq  *a1, *a2, *a3;
676         u_int32_t        ticket;
677         int              error = 0;
678
679         /* Interrupt userland queue modifications */
680         if (V_altqs_inactive_open)
681                 pf_rollback_altq(V_ticket_altqs_inactive);
682
683         /* Start new altq ruleset */
684         if (pf_begin_altq(&ticket))
685                 return;
686
687         /* Copy the current active set */
688         TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
689                 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
690                 if (a2 == NULL) {
691                         error = ENOMEM;
692                         break;
693                 }
694                 bcopy(a1, a2, sizeof(struct pf_altq));
695
696                 if (a2->qname[0] != 0) {
697                         if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
698                                 error = EBUSY;
699                                 free(a2, M_PFALTQ);
700                                 break;
701                         }
702                         a2->altq_disc = NULL;
703                         TAILQ_FOREACH(a3, V_pf_altqs_inactive, entries) {
704                                 if (strncmp(a3->ifname, a2->ifname,
705                                     IFNAMSIZ) == 0 && a3->qname[0] == 0) {
706                                         a2->altq_disc = a3->altq_disc;
707                                         break;
708                                 }
709                         }
710                 }
711                 /* Deactivate the interface in question */
712                 a2->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
713                 if ((ifp1 = ifunit(a2->ifname)) == NULL ||
714                     (remove && ifp1 == ifp)) {
715                         a2->local_flags |= PFALTQ_FLAG_IF_REMOVED;
716                 } else {
717                         error = altq_add(a2);
718
719                         if (ticket != V_ticket_altqs_inactive)
720                                 error = EBUSY;
721
722                         if (error) {
723                                 free(a2, M_PFALTQ);
724                                 break;
725                         }
726                 }
727
728                 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
729         }
730
731         if (error != 0)
732                 pf_rollback_altq(ticket);
733         else
734                 pf_commit_altq(ticket);
735 }
736 #endif /* ALTQ */
737
738 static int
739 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
740 {
741         struct pf_ruleset       *rs;
742         struct pf_rule          *rule;
743
744         PF_RULES_WASSERT();
745
746         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
747                 return (EINVAL);
748         rs = pf_find_or_create_ruleset(anchor);
749         if (rs == NULL)
750                 return (EINVAL);
751         while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
752                 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
753                 rs->rules[rs_num].inactive.rcount--;
754         }
755         *ticket = ++rs->rules[rs_num].inactive.ticket;
756         rs->rules[rs_num].inactive.open = 1;
757         return (0);
758 }
759
760 static int
761 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
762 {
763         struct pf_ruleset       *rs;
764         struct pf_rule          *rule;
765
766         PF_RULES_WASSERT();
767
768         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
769                 return (EINVAL);
770         rs = pf_find_ruleset(anchor);
771         if (rs == NULL || !rs->rules[rs_num].inactive.open ||
772             rs->rules[rs_num].inactive.ticket != ticket)
773                 return (0);
774         while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
775                 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
776                 rs->rules[rs_num].inactive.rcount--;
777         }
778         rs->rules[rs_num].inactive.open = 0;
779         return (0);
780 }
781
782 #define PF_MD5_UPD(st, elm)                                             \
783                 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
784
785 #define PF_MD5_UPD_STR(st, elm)                                         \
786                 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
787
788 #define PF_MD5_UPD_HTONL(st, elm, stor) do {                            \
789                 (stor) = htonl((st)->elm);                              \
790                 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
791 } while (0)
792
793 #define PF_MD5_UPD_HTONS(st, elm, stor) do {                            \
794                 (stor) = htons((st)->elm);                              \
795                 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
796 } while (0)
797
798 static void
799 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
800 {
801         PF_MD5_UPD(pfr, addr.type);
802         switch (pfr->addr.type) {
803                 case PF_ADDR_DYNIFTL:
804                         PF_MD5_UPD(pfr, addr.v.ifname);
805                         PF_MD5_UPD(pfr, addr.iflags);
806                         break;
807                 case PF_ADDR_TABLE:
808                         PF_MD5_UPD(pfr, addr.v.tblname);
809                         break;
810                 case PF_ADDR_ADDRMASK:
811                         /* XXX ignore af? */
812                         PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
813                         PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
814                         break;
815         }
816
817         PF_MD5_UPD(pfr, port[0]);
818         PF_MD5_UPD(pfr, port[1]);
819         PF_MD5_UPD(pfr, neg);
820         PF_MD5_UPD(pfr, port_op);
821 }
822
823 static void
824 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
825 {
826         u_int16_t x;
827         u_int32_t y;
828
829         pf_hash_rule_addr(ctx, &rule->src);
830         pf_hash_rule_addr(ctx, &rule->dst);
831         PF_MD5_UPD_STR(rule, label);
832         PF_MD5_UPD_STR(rule, ifname);
833         PF_MD5_UPD_STR(rule, match_tagname);
834         PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
835         PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
836         PF_MD5_UPD_HTONL(rule, prob, y);
837         PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
838         PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
839         PF_MD5_UPD(rule, uid.op);
840         PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
841         PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
842         PF_MD5_UPD(rule, gid.op);
843         PF_MD5_UPD_HTONL(rule, rule_flag, y);
844         PF_MD5_UPD(rule, action);
845         PF_MD5_UPD(rule, direction);
846         PF_MD5_UPD(rule, af);
847         PF_MD5_UPD(rule, quick);
848         PF_MD5_UPD(rule, ifnot);
849         PF_MD5_UPD(rule, match_tag_not);
850         PF_MD5_UPD(rule, natpass);
851         PF_MD5_UPD(rule, keep_state);
852         PF_MD5_UPD(rule, proto);
853         PF_MD5_UPD(rule, type);
854         PF_MD5_UPD(rule, code);
855         PF_MD5_UPD(rule, flags);
856         PF_MD5_UPD(rule, flagset);
857         PF_MD5_UPD(rule, allow_opts);
858         PF_MD5_UPD(rule, rt);
859         PF_MD5_UPD(rule, tos);
860 }
861
862 static int
863 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
864 {
865         struct pf_ruleset       *rs;
866         struct pf_rule          *rule, **old_array;
867         struct pf_rulequeue     *old_rules;
868         int                      error;
869         u_int32_t                old_rcount;
870
871         PF_RULES_WASSERT();
872
873         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
874                 return (EINVAL);
875         rs = pf_find_ruleset(anchor);
876         if (rs == NULL || !rs->rules[rs_num].inactive.open ||
877             ticket != rs->rules[rs_num].inactive.ticket)
878                 return (EBUSY);
879
880         /* Calculate checksum for the main ruleset */
881         if (rs == &pf_main_ruleset) {
882                 error = pf_setup_pfsync_matching(rs);
883                 if (error != 0)
884                         return (error);
885         }
886
887         /* Swap rules, keep the old. */
888         old_rules = rs->rules[rs_num].active.ptr;
889         old_rcount = rs->rules[rs_num].active.rcount;
890         old_array = rs->rules[rs_num].active.ptr_array;
891
892         rs->rules[rs_num].active.ptr =
893             rs->rules[rs_num].inactive.ptr;
894         rs->rules[rs_num].active.ptr_array =
895             rs->rules[rs_num].inactive.ptr_array;
896         rs->rules[rs_num].active.rcount =
897             rs->rules[rs_num].inactive.rcount;
898         rs->rules[rs_num].inactive.ptr = old_rules;
899         rs->rules[rs_num].inactive.ptr_array = old_array;
900         rs->rules[rs_num].inactive.rcount = old_rcount;
901
902         rs->rules[rs_num].active.ticket =
903             rs->rules[rs_num].inactive.ticket;
904         pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
905
906
907         /* Purge the old rule list. */
908         while ((rule = TAILQ_FIRST(old_rules)) != NULL)
909                 pf_unlink_rule(old_rules, rule);
910         if (rs->rules[rs_num].inactive.ptr_array)
911                 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
912         rs->rules[rs_num].inactive.ptr_array = NULL;
913         rs->rules[rs_num].inactive.rcount = 0;
914         rs->rules[rs_num].inactive.open = 0;
915         pf_remove_if_empty_ruleset(rs);
916
917         return (0);
918 }
919
920 static int
921 pf_setup_pfsync_matching(struct pf_ruleset *rs)
922 {
923         MD5_CTX                  ctx;
924         struct pf_rule          *rule;
925         int                      rs_cnt;
926         u_int8_t                 digest[PF_MD5_DIGEST_LENGTH];
927
928         MD5Init(&ctx);
929         for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
930                 /* XXX PF_RULESET_SCRUB as well? */
931                 if (rs_cnt == PF_RULESET_SCRUB)
932                         continue;
933
934                 if (rs->rules[rs_cnt].inactive.ptr_array)
935                         free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
936                 rs->rules[rs_cnt].inactive.ptr_array = NULL;
937
938                 if (rs->rules[rs_cnt].inactive.rcount) {
939                         rs->rules[rs_cnt].inactive.ptr_array =
940                             malloc(sizeof(caddr_t) *
941                             rs->rules[rs_cnt].inactive.rcount,
942                             M_TEMP, M_NOWAIT);
943
944                         if (!rs->rules[rs_cnt].inactive.ptr_array)
945                                 return (ENOMEM);
946                 }
947
948                 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
949                     entries) {
950                         pf_hash_rule(&ctx, rule);
951                         (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
952                 }
953         }
954
955         MD5Final(digest, &ctx);
956         memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
957         return (0);
958 }
959
960 static int
961 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
962     sa_family_t af)
963 {
964         int error = 0;
965
966         switch (addr->type) {
967         case PF_ADDR_TABLE:
968                 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
969                 if (addr->p.tbl == NULL)
970                         error = ENOMEM;
971                 break;
972         case PF_ADDR_DYNIFTL:
973                 error = pfi_dynaddr_setup(addr, af);
974                 break;
975         }
976
977         return (error);
978 }
979
980 static void
981 pf_addr_copyout(struct pf_addr_wrap *addr)
982 {
983
984         switch (addr->type) {
985         case PF_ADDR_DYNIFTL:
986                 pfi_dynaddr_copyout(addr);
987                 break;
988         case PF_ADDR_TABLE:
989                 pf_tbladdr_copyout(addr);
990                 break;
991         }
992 }
993
994 static int
995 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
996 {
997         int                      error = 0;
998
999         /* XXX keep in sync with switch() below */
1000         if (securelevel_gt(td->td_ucred, 2))
1001                 switch (cmd) {
1002                 case DIOCGETRULES:
1003                 case DIOCGETRULE:
1004                 case DIOCGETADDRS:
1005                 case DIOCGETADDR:
1006                 case DIOCGETSTATE:
1007                 case DIOCSETSTATUSIF:
1008                 case DIOCGETSTATUS:
1009                 case DIOCCLRSTATUS:
1010                 case DIOCNATLOOK:
1011                 case DIOCSETDEBUG:
1012                 case DIOCGETSTATES:
1013                 case DIOCGETTIMEOUT:
1014                 case DIOCCLRRULECTRS:
1015                 case DIOCGETLIMIT:
1016                 case DIOCGETALTQS:
1017                 case DIOCGETALTQ:
1018                 case DIOCGETQSTATS:
1019                 case DIOCGETRULESETS:
1020                 case DIOCGETRULESET:
1021                 case DIOCRGETTABLES:
1022                 case DIOCRGETTSTATS:
1023                 case DIOCRCLRTSTATS:
1024                 case DIOCRCLRADDRS:
1025                 case DIOCRADDADDRS:
1026                 case DIOCRDELADDRS:
1027                 case DIOCRSETADDRS:
1028                 case DIOCRGETADDRS:
1029                 case DIOCRGETASTATS:
1030                 case DIOCRCLRASTATS:
1031                 case DIOCRTSTADDRS:
1032                 case DIOCOSFPGET:
1033                 case DIOCGETSRCNODES:
1034                 case DIOCCLRSRCNODES:
1035                 case DIOCIGETIFACES:
1036                 case DIOCGIFSPEED:
1037                 case DIOCSETIFFLAG:
1038                 case DIOCCLRIFFLAG:
1039                         break;
1040                 case DIOCRCLRTABLES:
1041                 case DIOCRADDTABLES:
1042                 case DIOCRDELTABLES:
1043                 case DIOCRSETTFLAGS:
1044                         if (((struct pfioc_table *)addr)->pfrio_flags &
1045                             PFR_FLAG_DUMMY)
1046                                 break; /* dummy operation ok */
1047                         return (EPERM);
1048                 default:
1049                         return (EPERM);
1050                 }
1051
1052         if (!(flags & FWRITE))
1053                 switch (cmd) {
1054                 case DIOCGETRULES:
1055                 case DIOCGETADDRS:
1056                 case DIOCGETADDR:
1057                 case DIOCGETSTATE:
1058                 case DIOCGETSTATUS:
1059                 case DIOCGETSTATES:
1060                 case DIOCGETTIMEOUT:
1061                 case DIOCGETLIMIT:
1062                 case DIOCGETALTQS:
1063                 case DIOCGETALTQ:
1064                 case DIOCGETQSTATS:
1065                 case DIOCGETRULESETS:
1066                 case DIOCGETRULESET:
1067                 case DIOCNATLOOK:
1068                 case DIOCRGETTABLES:
1069                 case DIOCRGETTSTATS:
1070                 case DIOCRGETADDRS:
1071                 case DIOCRGETASTATS:
1072                 case DIOCRTSTADDRS:
1073                 case DIOCOSFPGET:
1074                 case DIOCGETSRCNODES:
1075                 case DIOCIGETIFACES:
1076                 case DIOCGIFSPEED:
1077                         break;
1078                 case DIOCRCLRTABLES:
1079                 case DIOCRADDTABLES:
1080                 case DIOCRDELTABLES:
1081                 case DIOCRCLRTSTATS:
1082                 case DIOCRCLRADDRS:
1083                 case DIOCRADDADDRS:
1084                 case DIOCRDELADDRS:
1085                 case DIOCRSETADDRS:
1086                 case DIOCRSETTFLAGS:
1087                         if (((struct pfioc_table *)addr)->pfrio_flags &
1088                             PFR_FLAG_DUMMY) {
1089                                 flags |= FWRITE; /* need write lock for dummy */
1090                                 break; /* dummy operation ok */
1091                         }
1092                         return (EACCES);
1093                 case DIOCGETRULE:
1094                         if (((struct pfioc_rule *)addr)->action ==
1095                             PF_GET_CLR_CNTR)
1096                                 return (EACCES);
1097                         break;
1098                 default:
1099                         return (EACCES);
1100                 }
1101
1102         CURVNET_SET(TD_TO_VNET(td));
1103
1104         switch (cmd) {
1105         case DIOCSTART:
1106                 sx_xlock(&pf_ioctl_lock);
1107                 if (V_pf_status.running)
1108                         error = EEXIST;
1109                 else {
1110                         int cpu;
1111
1112                         error = hook_pf();
1113                         if (error) {
1114                                 DPFPRINTF(PF_DEBUG_MISC,
1115                                     ("pf: pfil registration failed\n"));
1116                                 break;
1117                         }
1118                         V_pf_status.running = 1;
1119                         V_pf_status.since = time_second;
1120
1121                         CPU_FOREACH(cpu)
1122                                 V_pf_stateid[cpu] = time_second;
1123
1124                         DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1125                 }
1126                 break;
1127
1128         case DIOCSTOP:
1129                 sx_xlock(&pf_ioctl_lock);
1130                 if (!V_pf_status.running)
1131                         error = ENOENT;
1132                 else {
1133                         V_pf_status.running = 0;
1134                         error = dehook_pf();
1135                         if (error) {
1136                                 V_pf_status.running = 1;
1137                                 DPFPRINTF(PF_DEBUG_MISC,
1138                                     ("pf: pfil unregistration failed\n"));
1139                         }
1140                         V_pf_status.since = time_second;
1141                         DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1142                 }
1143                 break;
1144
1145         case DIOCADDRULE: {
1146                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
1147                 struct pf_ruleset       *ruleset;
1148                 struct pf_rule          *rule, *tail;
1149                 struct pf_pooladdr      *pa;
1150                 struct pfi_kif          *kif = NULL;
1151                 int                      rs_num;
1152
1153                 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1154                         error = EINVAL;
1155                         break;
1156                 }
1157 #ifndef INET
1158                 if (pr->rule.af == AF_INET) {
1159                         error = EAFNOSUPPORT;
1160                         break;
1161                 }
1162 #endif /* INET */
1163 #ifndef INET6
1164                 if (pr->rule.af == AF_INET6) {
1165                         error = EAFNOSUPPORT;
1166                         break;
1167                 }
1168 #endif /* INET6 */
1169
1170                 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
1171                 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1172                 if (rule->ifname[0])
1173                         kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
1174                 rule->states_cur = counter_u64_alloc(M_WAITOK);
1175                 rule->states_tot = counter_u64_alloc(M_WAITOK);
1176                 rule->src_nodes = counter_u64_alloc(M_WAITOK);
1177                 rule->cuid = td->td_ucred->cr_ruid;
1178                 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1179                 TAILQ_INIT(&rule->rpool.list);
1180
1181 #define ERROUT(x)       { error = (x); goto DIOCADDRULE_error; }
1182
1183                 PF_RULES_WLOCK();
1184                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1185                 ruleset = pf_find_ruleset(pr->anchor);
1186                 if (ruleset == NULL)
1187                         ERROUT(EINVAL);
1188                 rs_num = pf_get_ruleset_number(pr->rule.action);
1189                 if (rs_num >= PF_RULESET_MAX)
1190                         ERROUT(EINVAL);
1191                 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1192                         DPFPRINTF(PF_DEBUG_MISC,
1193                             ("ticket: %d != [%d]%d\n", pr->ticket, rs_num,
1194                             ruleset->rules[rs_num].inactive.ticket));
1195                         ERROUT(EBUSY);
1196                 }
1197                 if (pr->pool_ticket != V_ticket_pabuf) {
1198                         DPFPRINTF(PF_DEBUG_MISC,
1199                             ("pool_ticket: %d != %d\n", pr->pool_ticket,
1200                             V_ticket_pabuf));
1201                         ERROUT(EBUSY);
1202                 }
1203
1204                 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1205                     pf_rulequeue);
1206                 if (tail)
1207                         rule->nr = tail->nr + 1;
1208                 else
1209                         rule->nr = 0;
1210                 if (rule->ifname[0]) {
1211                         rule->kif = pfi_kif_attach(kif, rule->ifname);
1212                         pfi_kif_ref(rule->kif);
1213                 } else
1214                         rule->kif = NULL;
1215
1216                 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
1217                         error = EBUSY;
1218
1219 #ifdef ALTQ
1220                 /* set queue IDs */
1221                 if (rule->qname[0] != 0) {
1222                         if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1223                                 error = EBUSY;
1224                         else if (rule->pqname[0] != 0) {
1225                                 if ((rule->pqid =
1226                                     pf_qname2qid(rule->pqname)) == 0)
1227                                         error = EBUSY;
1228                         } else
1229                                 rule->pqid = rule->qid;
1230                 }
1231 #endif
1232                 if (rule->tagname[0])
1233                         if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1234                                 error = EBUSY;
1235                 if (rule->match_tagname[0])
1236                         if ((rule->match_tag =
1237                             pf_tagname2tag(rule->match_tagname)) == 0)
1238                                 error = EBUSY;
1239                 if (rule->rt && !rule->direction)
1240                         error = EINVAL;
1241                 if (!rule->log)
1242                         rule->logif = 0;
1243                 if (rule->logif >= PFLOGIFS_MAX)
1244                         error = EINVAL;
1245                 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1246                         error = ENOMEM;
1247                 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1248                         error = ENOMEM;
1249                 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1250                         error = EINVAL;
1251                 if (rule->scrub_flags & PFSTATE_SETPRIO &&
1252                     (rule->set_prio[0] > PF_PRIO_MAX ||
1253                     rule->set_prio[1] > PF_PRIO_MAX))
1254                         error = EINVAL;
1255                 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
1256                         if (pa->addr.type == PF_ADDR_TABLE) {
1257                                 pa->addr.p.tbl = pfr_attach_table(ruleset,
1258                                     pa->addr.v.tblname);
1259                                 if (pa->addr.p.tbl == NULL)
1260                                         error = ENOMEM;
1261                         }
1262
1263                 rule->overload_tbl = NULL;
1264                 if (rule->overload_tblname[0]) {
1265                         if ((rule->overload_tbl = pfr_attach_table(ruleset,
1266                             rule->overload_tblname)) == NULL)
1267                                 error = EINVAL;
1268                         else
1269                                 rule->overload_tbl->pfrkt_flags |=
1270                                     PFR_TFLAG_ACTIVE;
1271                 }
1272
1273                 pf_mv_pool(&V_pf_pabuf, &rule->rpool.list);
1274                 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1275                     (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1276                     (rule->rt > PF_NOPFROUTE)) &&
1277                     (TAILQ_FIRST(&rule->rpool.list) == NULL))
1278                         error = EINVAL;
1279
1280                 if (error) {
1281                         pf_free_rule(rule);
1282                         PF_RULES_WUNLOCK();
1283                         break;
1284                 }
1285
1286                 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1287                 rule->evaluations = rule->packets[0] = rule->packets[1] =
1288                     rule->bytes[0] = rule->bytes[1] = 0;
1289                 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1290                     rule, entries);
1291                 ruleset->rules[rs_num].inactive.rcount++;
1292                 PF_RULES_WUNLOCK();
1293                 break;
1294
1295 #undef ERROUT
1296 DIOCADDRULE_error:
1297                 PF_RULES_WUNLOCK();
1298                 counter_u64_free(rule->states_cur);
1299                 counter_u64_free(rule->states_tot);
1300                 counter_u64_free(rule->src_nodes);
1301                 free(rule, M_PFRULE);
1302                 if (kif)
1303                         free(kif, PFI_MTYPE);
1304                 break;
1305         }
1306
1307         case DIOCGETRULES: {
1308                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
1309                 struct pf_ruleset       *ruleset;
1310                 struct pf_rule          *tail;
1311                 int                      rs_num;
1312
1313                 PF_RULES_WLOCK();
1314                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1315                 ruleset = pf_find_ruleset(pr->anchor);
1316                 if (ruleset == NULL) {
1317                         PF_RULES_WUNLOCK();
1318                         error = EINVAL;
1319                         break;
1320                 }
1321                 rs_num = pf_get_ruleset_number(pr->rule.action);
1322                 if (rs_num >= PF_RULESET_MAX) {
1323                         PF_RULES_WUNLOCK();
1324                         error = EINVAL;
1325                         break;
1326                 }
1327                 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1328                     pf_rulequeue);
1329                 if (tail)
1330                         pr->nr = tail->nr + 1;
1331                 else
1332                         pr->nr = 0;
1333                 pr->ticket = ruleset->rules[rs_num].active.ticket;
1334                 PF_RULES_WUNLOCK();
1335                 break;
1336         }
1337
1338         case DIOCGETRULE: {
1339                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
1340                 struct pf_ruleset       *ruleset;
1341                 struct pf_rule          *rule;
1342                 int                      rs_num, i;
1343
1344                 PF_RULES_WLOCK();
1345                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1346                 ruleset = pf_find_ruleset(pr->anchor);
1347                 if (ruleset == NULL) {
1348                         PF_RULES_WUNLOCK();
1349                         error = EINVAL;
1350                         break;
1351                 }
1352                 rs_num = pf_get_ruleset_number(pr->rule.action);
1353                 if (rs_num >= PF_RULESET_MAX) {
1354                         PF_RULES_WUNLOCK();
1355                         error = EINVAL;
1356                         break;
1357                 }
1358                 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1359                         PF_RULES_WUNLOCK();
1360                         error = EBUSY;
1361                         break;
1362                 }
1363                 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1364                 while ((rule != NULL) && (rule->nr != pr->nr))
1365                         rule = TAILQ_NEXT(rule, entries);
1366                 if (rule == NULL) {
1367                         PF_RULES_WUNLOCK();
1368                         error = EBUSY;
1369                         break;
1370                 }
1371                 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1372                 pr->rule.u_states_cur = counter_u64_fetch(rule->states_cur);
1373                 pr->rule.u_states_tot = counter_u64_fetch(rule->states_tot);
1374                 pr->rule.u_src_nodes = counter_u64_fetch(rule->src_nodes);
1375                 if (pf_anchor_copyout(ruleset, rule, pr)) {
1376                         PF_RULES_WUNLOCK();
1377                         error = EBUSY;
1378                         break;
1379                 }
1380                 pf_addr_copyout(&pr->rule.src.addr);
1381                 pf_addr_copyout(&pr->rule.dst.addr);
1382                 for (i = 0; i < PF_SKIP_COUNT; ++i)
1383                         if (rule->skip[i].ptr == NULL)
1384                                 pr->rule.skip[i].nr = -1;
1385                         else
1386                                 pr->rule.skip[i].nr =
1387                                     rule->skip[i].ptr->nr;
1388
1389                 if (pr->action == PF_GET_CLR_CNTR) {
1390                         rule->evaluations = 0;
1391                         rule->packets[0] = rule->packets[1] = 0;
1392                         rule->bytes[0] = rule->bytes[1] = 0;
1393                         counter_u64_zero(rule->states_tot);
1394                 }
1395                 PF_RULES_WUNLOCK();
1396                 break;
1397         }
1398
1399         case DIOCCHANGERULE: {
1400                 struct pfioc_rule       *pcr = (struct pfioc_rule *)addr;
1401                 struct pf_ruleset       *ruleset;
1402                 struct pf_rule          *oldrule = NULL, *newrule = NULL;
1403                 struct pfi_kif          *kif = NULL;
1404                 struct pf_pooladdr      *pa;
1405                 u_int32_t                nr = 0;
1406                 int                      rs_num;
1407
1408                 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1409                     pcr->action > PF_CHANGE_GET_TICKET) {
1410                         error = EINVAL;
1411                         break;
1412                 }
1413                 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1414                         error = EINVAL;
1415                         break;
1416                 }
1417
1418                 if (pcr->action != PF_CHANGE_REMOVE) {
1419 #ifndef INET
1420                         if (pcr->rule.af == AF_INET) {
1421                                 error = EAFNOSUPPORT;
1422                                 break;
1423                         }
1424 #endif /* INET */
1425 #ifndef INET6
1426                         if (pcr->rule.af == AF_INET6) {
1427                                 error = EAFNOSUPPORT;
1428                                 break;
1429                         }
1430 #endif /* INET6 */
1431                         newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK);
1432                         bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1433                         if (newrule->ifname[0])
1434                                 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
1435                         newrule->states_cur = counter_u64_alloc(M_WAITOK);
1436                         newrule->states_tot = counter_u64_alloc(M_WAITOK);
1437                         newrule->src_nodes = counter_u64_alloc(M_WAITOK);
1438                         newrule->cuid = td->td_ucred->cr_ruid;
1439                         newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1440                         TAILQ_INIT(&newrule->rpool.list);
1441                 }
1442
1443 #define ERROUT(x)       { error = (x); goto DIOCCHANGERULE_error; }
1444
1445                 PF_RULES_WLOCK();
1446                 if (!(pcr->action == PF_CHANGE_REMOVE ||
1447                     pcr->action == PF_CHANGE_GET_TICKET) &&
1448                     pcr->pool_ticket != V_ticket_pabuf)
1449                         ERROUT(EBUSY);
1450
1451                 ruleset = pf_find_ruleset(pcr->anchor);
1452                 if (ruleset == NULL)
1453                         ERROUT(EINVAL);
1454
1455                 rs_num = pf_get_ruleset_number(pcr->rule.action);
1456                 if (rs_num >= PF_RULESET_MAX)
1457                         ERROUT(EINVAL);
1458
1459                 if (pcr->action == PF_CHANGE_GET_TICKET) {
1460                         pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1461                         ERROUT(0);
1462                 } else if (pcr->ticket !=
1463                             ruleset->rules[rs_num].active.ticket)
1464                                 ERROUT(EINVAL);
1465
1466                 if (pcr->action != PF_CHANGE_REMOVE) {
1467                         if (newrule->ifname[0]) {
1468                                 newrule->kif = pfi_kif_attach(kif,
1469                                     newrule->ifname);
1470                                 pfi_kif_ref(newrule->kif);
1471                         } else
1472                                 newrule->kif = NULL;
1473
1474                         if (newrule->rtableid > 0 &&
1475                             newrule->rtableid >= rt_numfibs)
1476                                 error = EBUSY;
1477
1478 #ifdef ALTQ
1479                         /* set queue IDs */
1480                         if (newrule->qname[0] != 0) {
1481                                 if ((newrule->qid =
1482                                     pf_qname2qid(newrule->qname)) == 0)
1483                                         error = EBUSY;
1484                                 else if (newrule->pqname[0] != 0) {
1485                                         if ((newrule->pqid =
1486                                             pf_qname2qid(newrule->pqname)) == 0)
1487                                                 error = EBUSY;
1488                                 } else
1489                                         newrule->pqid = newrule->qid;
1490                         }
1491 #endif /* ALTQ */
1492                         if (newrule->tagname[0])
1493                                 if ((newrule->tag =
1494                                     pf_tagname2tag(newrule->tagname)) == 0)
1495                                         error = EBUSY;
1496                         if (newrule->match_tagname[0])
1497                                 if ((newrule->match_tag = pf_tagname2tag(
1498                                     newrule->match_tagname)) == 0)
1499                                         error = EBUSY;
1500                         if (newrule->rt && !newrule->direction)
1501                                 error = EINVAL;
1502                         if (!newrule->log)
1503                                 newrule->logif = 0;
1504                         if (newrule->logif >= PFLOGIFS_MAX)
1505                                 error = EINVAL;
1506                         if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1507                                 error = ENOMEM;
1508                         if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1509                                 error = ENOMEM;
1510                         if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1511                                 error = EINVAL;
1512                         TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
1513                                 if (pa->addr.type == PF_ADDR_TABLE) {
1514                                         pa->addr.p.tbl =
1515                                             pfr_attach_table(ruleset,
1516                                             pa->addr.v.tblname);
1517                                         if (pa->addr.p.tbl == NULL)
1518                                                 error = ENOMEM;
1519                                 }
1520
1521                         newrule->overload_tbl = NULL;
1522                         if (newrule->overload_tblname[0]) {
1523                                 if ((newrule->overload_tbl = pfr_attach_table(
1524                                     ruleset, newrule->overload_tblname)) ==
1525                                     NULL)
1526                                         error = EINVAL;
1527                                 else
1528                                         newrule->overload_tbl->pfrkt_flags |=
1529                                             PFR_TFLAG_ACTIVE;
1530                         }
1531
1532                         pf_mv_pool(&V_pf_pabuf, &newrule->rpool.list);
1533                         if (((((newrule->action == PF_NAT) ||
1534                             (newrule->action == PF_RDR) ||
1535                             (newrule->action == PF_BINAT) ||
1536                             (newrule->rt > PF_NOPFROUTE)) &&
1537                             !newrule->anchor)) &&
1538                             (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1539                                 error = EINVAL;
1540
1541                         if (error) {
1542                                 pf_free_rule(newrule);
1543                                 PF_RULES_WUNLOCK();
1544                                 break;
1545                         }
1546
1547                         newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1548                         newrule->evaluations = 0;
1549                         newrule->packets[0] = newrule->packets[1] = 0;
1550                         newrule->bytes[0] = newrule->bytes[1] = 0;
1551                 }
1552                 pf_empty_pool(&V_pf_pabuf);
1553
1554                 if (pcr->action == PF_CHANGE_ADD_HEAD)
1555                         oldrule = TAILQ_FIRST(
1556                             ruleset->rules[rs_num].active.ptr);
1557                 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1558                         oldrule = TAILQ_LAST(
1559                             ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1560                 else {
1561                         oldrule = TAILQ_FIRST(
1562                             ruleset->rules[rs_num].active.ptr);
1563                         while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1564                                 oldrule = TAILQ_NEXT(oldrule, entries);
1565                         if (oldrule == NULL) {
1566                                 if (newrule != NULL)
1567                                         pf_free_rule(newrule);
1568                                 PF_RULES_WUNLOCK();
1569                                 error = EINVAL;
1570                                 break;
1571                         }
1572                 }
1573
1574                 if (pcr->action == PF_CHANGE_REMOVE) {
1575                         pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
1576                             oldrule);
1577                         ruleset->rules[rs_num].active.rcount--;
1578                 } else {
1579                         if (oldrule == NULL)
1580                                 TAILQ_INSERT_TAIL(
1581                                     ruleset->rules[rs_num].active.ptr,
1582                                     newrule, entries);
1583                         else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1584                             pcr->action == PF_CHANGE_ADD_BEFORE)
1585                                 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1586                         else
1587                                 TAILQ_INSERT_AFTER(
1588                                     ruleset->rules[rs_num].active.ptr,
1589                                     oldrule, newrule, entries);
1590                         ruleset->rules[rs_num].active.rcount++;
1591                 }
1592
1593                 nr = 0;
1594                 TAILQ_FOREACH(oldrule,
1595                     ruleset->rules[rs_num].active.ptr, entries)
1596                         oldrule->nr = nr++;
1597
1598                 ruleset->rules[rs_num].active.ticket++;
1599
1600                 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1601                 pf_remove_if_empty_ruleset(ruleset);
1602
1603                 PF_RULES_WUNLOCK();
1604                 break;
1605
1606 #undef ERROUT
1607 DIOCCHANGERULE_error:
1608                 PF_RULES_WUNLOCK();
1609                 if (newrule != NULL) {
1610                         counter_u64_free(newrule->states_cur);
1611                         counter_u64_free(newrule->states_tot);
1612                         counter_u64_free(newrule->src_nodes);
1613                         free(newrule, M_PFRULE);
1614                 }
1615                 if (kif != NULL)
1616                         free(kif, PFI_MTYPE);
1617                 break;
1618         }
1619
1620         case DIOCCLRSTATES: {
1621                 struct pf_state         *s;
1622                 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1623                 u_int                    i, killed = 0;
1624
1625                 for (i = 0; i <= pf_hashmask; i++) {
1626                         struct pf_idhash *ih = &V_pf_idhash[i];
1627
1628 relock_DIOCCLRSTATES:
1629                         PF_HASHROW_LOCK(ih);
1630                         LIST_FOREACH(s, &ih->states, entry)
1631                                 if (!psk->psk_ifname[0] ||
1632                                     !strcmp(psk->psk_ifname,
1633                                     s->kif->pfik_name)) {
1634                                         /*
1635                                          * Don't send out individual
1636                                          * delete messages.
1637                                          */
1638                                         s->state_flags |= PFSTATE_NOSYNC;
1639                                         pf_unlink_state(s, PF_ENTER_LOCKED);
1640                                         killed++;
1641                                         goto relock_DIOCCLRSTATES;
1642                                 }
1643                         PF_HASHROW_UNLOCK(ih);
1644                 }
1645                 psk->psk_killed = killed;
1646                 if (pfsync_clear_states_ptr != NULL)
1647                         pfsync_clear_states_ptr(V_pf_status.hostid, psk->psk_ifname);
1648                 break;
1649         }
1650
1651         case DIOCKILLSTATES: {
1652                 struct pf_state         *s;
1653                 struct pf_state_key     *sk;
1654                 struct pf_addr          *srcaddr, *dstaddr;
1655                 u_int16_t                srcport, dstport;
1656                 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1657                 u_int                    i, killed = 0;
1658
1659                 if (psk->psk_pfcmp.id) {
1660                         if (psk->psk_pfcmp.creatorid == 0)
1661                                 psk->psk_pfcmp.creatorid = V_pf_status.hostid;
1662                         if ((s = pf_find_state_byid(psk->psk_pfcmp.id,
1663                             psk->psk_pfcmp.creatorid))) {
1664                                 pf_unlink_state(s, PF_ENTER_LOCKED);
1665                                 psk->psk_killed = 1;
1666                         }
1667                         break;
1668                 }
1669
1670                 for (i = 0; i <= pf_hashmask; i++) {
1671                         struct pf_idhash *ih = &V_pf_idhash[i];
1672
1673 relock_DIOCKILLSTATES:
1674                         PF_HASHROW_LOCK(ih);
1675                         LIST_FOREACH(s, &ih->states, entry) {
1676                                 sk = s->key[PF_SK_WIRE];
1677                                 if (s->direction == PF_OUT) {
1678                                         srcaddr = &sk->addr[1];
1679                                         dstaddr = &sk->addr[0];
1680                                         srcport = sk->port[1];
1681                                         dstport = sk->port[0];
1682                                 } else {
1683                                         srcaddr = &sk->addr[0];
1684                                         dstaddr = &sk->addr[1];
1685                                         srcport = sk->port[0];
1686                                         dstport = sk->port[1];
1687                                 }
1688
1689                                 if ((!psk->psk_af || sk->af == psk->psk_af)
1690                                     && (!psk->psk_proto || psk->psk_proto ==
1691                                     sk->proto) &&
1692                                     PF_MATCHA(psk->psk_src.neg,
1693                                     &psk->psk_src.addr.v.a.addr,
1694                                     &psk->psk_src.addr.v.a.mask,
1695                                     srcaddr, sk->af) &&
1696                                     PF_MATCHA(psk->psk_dst.neg,
1697                                     &psk->psk_dst.addr.v.a.addr,
1698                                     &psk->psk_dst.addr.v.a.mask,
1699                                     dstaddr, sk->af) &&
1700                                     (psk->psk_src.port_op == 0 ||
1701                                     pf_match_port(psk->psk_src.port_op,
1702                                     psk->psk_src.port[0], psk->psk_src.port[1],
1703                                     srcport)) &&
1704                                     (psk->psk_dst.port_op == 0 ||
1705                                     pf_match_port(psk->psk_dst.port_op,
1706                                     psk->psk_dst.port[0], psk->psk_dst.port[1],
1707                                     dstport)) &&
1708                                     (!psk->psk_label[0] ||
1709                                     (s->rule.ptr->label[0] &&
1710                                     !strcmp(psk->psk_label,
1711                                     s->rule.ptr->label))) &&
1712                                     (!psk->psk_ifname[0] ||
1713                                     !strcmp(psk->psk_ifname,
1714                                     s->kif->pfik_name))) {
1715                                         pf_unlink_state(s, PF_ENTER_LOCKED);
1716                                         killed++;
1717                                         goto relock_DIOCKILLSTATES;
1718                                 }
1719                         }
1720                         PF_HASHROW_UNLOCK(ih);
1721                 }
1722                 psk->psk_killed = killed;
1723                 break;
1724         }
1725
1726         case DIOCADDSTATE: {
1727                 struct pfioc_state      *ps = (struct pfioc_state *)addr;
1728                 struct pfsync_state     *sp = &ps->state;
1729
1730                 if (sp->timeout >= PFTM_MAX) {
1731                         error = EINVAL;
1732                         break;
1733                 }
1734                 if (pfsync_state_import_ptr != NULL) {
1735                         PF_RULES_RLOCK();
1736                         error = pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL);
1737                         PF_RULES_RUNLOCK();
1738                 } else
1739                         error = EOPNOTSUPP;
1740                 break;
1741         }
1742
1743         case DIOCGETSTATE: {
1744                 struct pfioc_state      *ps = (struct pfioc_state *)addr;
1745                 struct pf_state         *s;
1746
1747                 s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
1748                 if (s == NULL) {
1749                         error = ENOENT;
1750                         break;
1751                 }
1752
1753                 pfsync_state_export(&ps->state, s);
1754                 PF_STATE_UNLOCK(s);
1755                 break;
1756         }
1757
1758         case DIOCGETSTATES: {
1759                 struct pfioc_states     *ps = (struct pfioc_states *)addr;
1760                 struct pf_state         *s;
1761                 struct pfsync_state     *pstore, *p;
1762                 int i, nr;
1763
1764                 if (ps->ps_len == 0) {
1765                         nr = uma_zone_get_cur(V_pf_state_z);
1766                         ps->ps_len = sizeof(struct pfsync_state) * nr;
1767                         break;
1768                 }
1769
1770                 p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK);
1771                 nr = 0;
1772
1773                 for (i = 0; i <= pf_hashmask; i++) {
1774                         struct pf_idhash *ih = &V_pf_idhash[i];
1775
1776                         PF_HASHROW_LOCK(ih);
1777                         LIST_FOREACH(s, &ih->states, entry) {
1778
1779                                 if (s->timeout == PFTM_UNLINKED)
1780                                         continue;
1781
1782                                 if ((nr+1) * sizeof(*p) > ps->ps_len) {
1783                                         PF_HASHROW_UNLOCK(ih);
1784                                         goto DIOCGETSTATES_full;
1785                                 }
1786                                 pfsync_state_export(p, s);
1787                                 p++;
1788                                 nr++;
1789                         }
1790                         PF_HASHROW_UNLOCK(ih);
1791                 }
1792 DIOCGETSTATES_full:
1793                 error = copyout(pstore, ps->ps_states,
1794                     sizeof(struct pfsync_state) * nr);
1795                 if (error) {
1796                         free(pstore, M_TEMP);
1797                         break;
1798                 }
1799                 ps->ps_len = sizeof(struct pfsync_state) * nr;
1800                 free(pstore, M_TEMP);
1801
1802                 break;
1803         }
1804
1805         case DIOCGETSTATUS: {
1806                 struct pf_status *s = (struct pf_status *)addr;
1807
1808                 PF_RULES_RLOCK();
1809                 s->running = V_pf_status.running;
1810                 s->since   = V_pf_status.since;
1811                 s->debug   = V_pf_status.debug;
1812                 s->hostid  = V_pf_status.hostid;
1813                 s->states  = V_pf_status.states;
1814                 s->src_nodes = V_pf_status.src_nodes;
1815
1816                 for (int i = 0; i < PFRES_MAX; i++)
1817                         s->counters[i] =
1818                             counter_u64_fetch(V_pf_status.counters[i]);
1819                 for (int i = 0; i < LCNT_MAX; i++)
1820                         s->lcounters[i] =
1821                             counter_u64_fetch(V_pf_status.lcounters[i]);
1822                 for (int i = 0; i < FCNT_MAX; i++)
1823                         s->fcounters[i] =
1824                             counter_u64_fetch(V_pf_status.fcounters[i]);
1825                 for (int i = 0; i < SCNT_MAX; i++)
1826                         s->scounters[i] =
1827                             counter_u64_fetch(V_pf_status.scounters[i]);
1828
1829                 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ);
1830                 bcopy(V_pf_status.pf_chksum, s->pf_chksum,
1831                     PF_MD5_DIGEST_LENGTH);
1832
1833                 pfi_update_status(s->ifname, s);
1834                 PF_RULES_RUNLOCK();
1835                 break;
1836         }
1837
1838         case DIOCSETSTATUSIF: {
1839                 struct pfioc_if *pi = (struct pfioc_if *)addr;
1840
1841                 if (pi->ifname[0] == 0) {
1842                         bzero(V_pf_status.ifname, IFNAMSIZ);
1843                         break;
1844                 }
1845                 PF_RULES_WLOCK();
1846                 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
1847                 PF_RULES_WUNLOCK();
1848                 break;
1849         }
1850
1851         case DIOCCLRSTATUS: {
1852                 PF_RULES_WLOCK();
1853                 for (int i = 0; i < PFRES_MAX; i++)
1854                         counter_u64_zero(V_pf_status.counters[i]);
1855                 for (int i = 0; i < FCNT_MAX; i++)
1856                         counter_u64_zero(V_pf_status.fcounters[i]);
1857                 for (int i = 0; i < SCNT_MAX; i++)
1858                         counter_u64_zero(V_pf_status.scounters[i]);
1859                 for (int i = 0; i < LCNT_MAX; i++)
1860                         counter_u64_zero(V_pf_status.lcounters[i]);
1861                 V_pf_status.since = time_second;
1862                 if (*V_pf_status.ifname)
1863                         pfi_update_status(V_pf_status.ifname, NULL);
1864                 PF_RULES_WUNLOCK();
1865                 break;
1866         }
1867
1868         case DIOCNATLOOK: {
1869                 struct pfioc_natlook    *pnl = (struct pfioc_natlook *)addr;
1870                 struct pf_state_key     *sk;
1871                 struct pf_state         *state;
1872                 struct pf_state_key_cmp  key;
1873                 int                      m = 0, direction = pnl->direction;
1874                 int                      sidx, didx;
1875
1876                 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
1877                 sidx = (direction == PF_IN) ? 1 : 0;
1878                 didx = (direction == PF_IN) ? 0 : 1;
1879
1880                 if (!pnl->proto ||
1881                     PF_AZERO(&pnl->saddr, pnl->af) ||
1882                     PF_AZERO(&pnl->daddr, pnl->af) ||
1883                     ((pnl->proto == IPPROTO_TCP ||
1884                     pnl->proto == IPPROTO_UDP) &&
1885                     (!pnl->dport || !pnl->sport)))
1886                         error = EINVAL;
1887                 else {
1888                         bzero(&key, sizeof(key));
1889                         key.af = pnl->af;
1890                         key.proto = pnl->proto;
1891                         PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
1892                         key.port[sidx] = pnl->sport;
1893                         PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
1894                         key.port[didx] = pnl->dport;
1895
1896                         state = pf_find_state_all(&key, direction, &m);
1897
1898                         if (m > 1)
1899                                 error = E2BIG;  /* more than one state */
1900                         else if (state != NULL) {
1901                                 /* XXXGL: not locked read */
1902                                 sk = state->key[sidx];
1903                                 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
1904                                 pnl->rsport = sk->port[sidx];
1905                                 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
1906                                 pnl->rdport = sk->port[didx];
1907                         } else
1908                                 error = ENOENT;
1909                 }
1910                 break;
1911         }
1912
1913         case DIOCSETTIMEOUT: {
1914                 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1915                 int              old;
1916
1917                 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1918                     pt->seconds < 0) {
1919                         error = EINVAL;
1920                         break;
1921                 }
1922                 PF_RULES_WLOCK();
1923                 old = V_pf_default_rule.timeout[pt->timeout];
1924                 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1925                         pt->seconds = 1;
1926                 V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
1927                 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
1928                         wakeup(pf_purge_thread);
1929                 pt->seconds = old;
1930                 PF_RULES_WUNLOCK();
1931                 break;
1932         }
1933
1934         case DIOCGETTIMEOUT: {
1935                 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1936
1937                 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1938                         error = EINVAL;
1939                         break;
1940                 }
1941                 PF_RULES_RLOCK();
1942                 pt->seconds = V_pf_default_rule.timeout[pt->timeout];
1943                 PF_RULES_RUNLOCK();
1944                 break;
1945         }
1946
1947         case DIOCGETLIMIT: {
1948                 struct pfioc_limit      *pl = (struct pfioc_limit *)addr;
1949
1950                 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1951                         error = EINVAL;
1952                         break;
1953                 }
1954                 PF_RULES_RLOCK();
1955                 pl->limit = V_pf_limits[pl->index].limit;
1956                 PF_RULES_RUNLOCK();
1957                 break;
1958         }
1959
1960         case DIOCSETLIMIT: {
1961                 struct pfioc_limit      *pl = (struct pfioc_limit *)addr;
1962                 int                      old_limit;
1963
1964                 PF_RULES_WLOCK();
1965                 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1966                     V_pf_limits[pl->index].zone == NULL) {
1967                         PF_RULES_WUNLOCK();
1968                         error = EINVAL;
1969                         break;
1970                 }
1971                 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
1972                 old_limit = V_pf_limits[pl->index].limit;
1973                 V_pf_limits[pl->index].limit = pl->limit;
1974                 pl->limit = old_limit;
1975                 PF_RULES_WUNLOCK();
1976                 break;
1977         }
1978
1979         case DIOCSETDEBUG: {
1980                 u_int32_t       *level = (u_int32_t *)addr;
1981
1982                 PF_RULES_WLOCK();
1983                 V_pf_status.debug = *level;
1984                 PF_RULES_WUNLOCK();
1985                 break;
1986         }
1987
1988         case DIOCCLRRULECTRS: {
1989                 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1990                 struct pf_ruleset       *ruleset = &pf_main_ruleset;
1991                 struct pf_rule          *rule;
1992
1993                 PF_RULES_WLOCK();
1994                 TAILQ_FOREACH(rule,
1995                     ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1996                         rule->evaluations = 0;
1997                         rule->packets[0] = rule->packets[1] = 0;
1998                         rule->bytes[0] = rule->bytes[1] = 0;
1999                 }
2000                 PF_RULES_WUNLOCK();
2001                 break;
2002         }
2003
2004         case DIOCGIFSPEED: {
2005                 struct pf_ifspeed       *psp = (struct pf_ifspeed *)addr;
2006                 struct pf_ifspeed       ps;
2007                 struct ifnet            *ifp;
2008
2009                 if (psp->ifname[0] != 0) {
2010                         /* Can we completely trust user-land? */
2011                         strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
2012                         ifp = ifunit(ps.ifname);
2013                         if (ifp != NULL)
2014                                 psp->baudrate = ifp->if_baudrate;
2015                         else
2016                                 error = EINVAL;
2017                 } else
2018                         error = EINVAL;
2019                 break;
2020         }
2021
2022 #ifdef ALTQ
2023         case DIOCSTARTALTQ: {
2024                 struct pf_altq          *altq;
2025
2026                 PF_RULES_WLOCK();
2027                 /* enable all altq interfaces on active list */
2028                 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
2029                         if (altq->qname[0] == 0 && (altq->local_flags &
2030                             PFALTQ_FLAG_IF_REMOVED) == 0) {
2031                                 error = pf_enable_altq(altq);
2032                                 if (error != 0)
2033                                         break;
2034                         }
2035                 }
2036                 if (error == 0)
2037                         V_pf_altq_running = 1;
2038                 PF_RULES_WUNLOCK();
2039                 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2040                 break;
2041         }
2042
2043         case DIOCSTOPALTQ: {
2044                 struct pf_altq          *altq;
2045
2046                 PF_RULES_WLOCK();
2047                 /* disable all altq interfaces on active list */
2048                 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
2049                         if (altq->qname[0] == 0 && (altq->local_flags &
2050                             PFALTQ_FLAG_IF_REMOVED) == 0) {
2051                                 error = pf_disable_altq(altq);
2052                                 if (error != 0)
2053                                         break;
2054                         }
2055                 }
2056                 if (error == 0)
2057                         V_pf_altq_running = 0;
2058                 PF_RULES_WUNLOCK();
2059                 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2060                 break;
2061         }
2062
2063         case DIOCADDALTQ: {
2064                 struct pfioc_altq       *pa = (struct pfioc_altq *)addr;
2065                 struct pf_altq          *altq, *a;
2066                 struct ifnet            *ifp;
2067
2068                 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK);
2069                 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2070                 altq->local_flags = 0;
2071
2072                 PF_RULES_WLOCK();
2073                 if (pa->ticket != V_ticket_altqs_inactive) {
2074                         PF_RULES_WUNLOCK();
2075                         free(altq, M_PFALTQ);
2076                         error = EBUSY;
2077                         break;
2078                 }
2079
2080                 /*
2081                  * if this is for a queue, find the discipline and
2082                  * copy the necessary fields
2083                  */
2084                 if (altq->qname[0] != 0) {
2085                         if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2086                                 PF_RULES_WUNLOCK();
2087                                 error = EBUSY;
2088                                 free(altq, M_PFALTQ);
2089                                 break;
2090                         }
2091                         altq->altq_disc = NULL;
2092                         TAILQ_FOREACH(a, V_pf_altqs_inactive, entries) {
2093                                 if (strncmp(a->ifname, altq->ifname,
2094                                     IFNAMSIZ) == 0 && a->qname[0] == 0) {
2095                                         altq->altq_disc = a->altq_disc;
2096                                         break;
2097                                 }
2098                         }
2099                 }
2100
2101                 if ((ifp = ifunit(altq->ifname)) == NULL)
2102                         altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
2103                 else
2104                         error = altq_add(altq);
2105
2106                 if (error) {
2107                         PF_RULES_WUNLOCK();
2108                         free(altq, M_PFALTQ);
2109                         break;
2110                 }
2111
2112                 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
2113                 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2114                 PF_RULES_WUNLOCK();
2115                 break;
2116         }
2117
2118         case DIOCGETALTQS: {
2119                 struct pfioc_altq       *pa = (struct pfioc_altq *)addr;
2120                 struct pf_altq          *altq;
2121
2122                 PF_RULES_RLOCK();
2123                 pa->nr = 0;
2124                 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
2125                         pa->nr++;
2126                 pa->ticket = V_ticket_altqs_active;
2127                 PF_RULES_RUNLOCK();
2128                 break;
2129         }
2130
2131         case DIOCGETALTQ: {
2132                 struct pfioc_altq       *pa = (struct pfioc_altq *)addr;
2133                 struct pf_altq          *altq;
2134                 u_int32_t                nr;
2135
2136                 PF_RULES_RLOCK();
2137                 if (pa->ticket != V_ticket_altqs_active) {
2138                         PF_RULES_RUNLOCK();
2139                         error = EBUSY;
2140                         break;
2141                 }
2142                 nr = 0;
2143                 altq = TAILQ_FIRST(V_pf_altqs_active);
2144                 while ((altq != NULL) && (nr < pa->nr)) {
2145                         altq = TAILQ_NEXT(altq, entries);
2146                         nr++;
2147                 }
2148                 if (altq == NULL) {
2149                         PF_RULES_RUNLOCK();
2150                         error = EBUSY;
2151                         break;
2152                 }
2153                 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2154                 PF_RULES_RUNLOCK();
2155                 break;
2156         }
2157
2158         case DIOCCHANGEALTQ:
2159                 /* CHANGEALTQ not supported yet! */
2160                 error = ENODEV;
2161                 break;
2162
2163         case DIOCGETQSTATS: {
2164                 struct pfioc_qstats     *pq = (struct pfioc_qstats *)addr;
2165                 struct pf_altq          *altq;
2166                 u_int32_t                nr;
2167                 int                      nbytes;
2168
2169                 PF_RULES_RLOCK();
2170                 if (pq->ticket != V_ticket_altqs_active) {
2171                         PF_RULES_RUNLOCK();
2172                         error = EBUSY;
2173                         break;
2174                 }
2175                 nbytes = pq->nbytes;
2176                 nr = 0;
2177                 altq = TAILQ_FIRST(V_pf_altqs_active);
2178                 while ((altq != NULL) && (nr < pq->nr)) {
2179                         altq = TAILQ_NEXT(altq, entries);
2180                         nr++;
2181                 }
2182                 if (altq == NULL) {
2183                         PF_RULES_RUNLOCK();
2184                         error = EBUSY;
2185                         break;
2186                 }
2187
2188                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
2189                         PF_RULES_RUNLOCK();
2190                         error = ENXIO;
2191                         break;
2192                 }
2193                 PF_RULES_RUNLOCK();
2194                 error = altq_getqstats(altq, pq->buf, &nbytes);
2195                 if (error == 0) {
2196                         pq->scheduler = altq->scheduler;
2197                         pq->nbytes = nbytes;
2198                 }
2199                 break;
2200         }
2201 #endif /* ALTQ */
2202
2203         case DIOCBEGINADDRS: {
2204                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
2205
2206                 PF_RULES_WLOCK();
2207                 pf_empty_pool(&V_pf_pabuf);
2208                 pp->ticket = ++V_ticket_pabuf;
2209                 PF_RULES_WUNLOCK();
2210                 break;
2211         }
2212
2213         case DIOCADDADDR: {
2214                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
2215                 struct pf_pooladdr      *pa;
2216                 struct pfi_kif          *kif = NULL;
2217
2218 #ifndef INET
2219                 if (pp->af == AF_INET) {
2220                         error = EAFNOSUPPORT;
2221                         break;
2222                 }
2223 #endif /* INET */
2224 #ifndef INET6
2225                 if (pp->af == AF_INET6) {
2226                         error = EAFNOSUPPORT;
2227                         break;
2228                 }
2229 #endif /* INET6 */
2230                 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2231                     pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2232                     pp->addr.addr.type != PF_ADDR_TABLE) {
2233                         error = EINVAL;
2234                         break;
2235                 }
2236                 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2237                 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2238                 if (pa->ifname[0])
2239                         kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
2240                 PF_RULES_WLOCK();
2241                 if (pp->ticket != V_ticket_pabuf) {
2242                         PF_RULES_WUNLOCK();
2243                         if (pa->ifname[0])
2244                                 free(kif, PFI_MTYPE);
2245                         free(pa, M_PFRULE);
2246                         error = EBUSY;
2247                         break;
2248                 }
2249                 if (pa->ifname[0]) {
2250                         pa->kif = pfi_kif_attach(kif, pa->ifname);
2251                         pfi_kif_ref(pa->kif);
2252                 } else
2253                         pa->kif = NULL;
2254                 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2255                     pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2256                         if (pa->ifname[0])
2257                                 pfi_kif_unref(pa->kif);
2258                         PF_RULES_WUNLOCK();
2259                         free(pa, M_PFRULE);
2260                         break;
2261                 }
2262                 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
2263                 PF_RULES_WUNLOCK();
2264                 break;
2265         }
2266
2267         case DIOCGETADDRS: {
2268                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
2269                 struct pf_pool          *pool;
2270                 struct pf_pooladdr      *pa;
2271
2272                 PF_RULES_RLOCK();
2273                 pp->nr = 0;
2274                 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2275                     pp->r_num, 0, 1, 0);
2276                 if (pool == NULL) {
2277                         PF_RULES_RUNLOCK();
2278                         error = EBUSY;
2279                         break;
2280                 }
2281                 TAILQ_FOREACH(pa, &pool->list, entries)
2282                         pp->nr++;
2283                 PF_RULES_RUNLOCK();
2284                 break;
2285         }
2286
2287         case DIOCGETADDR: {
2288                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
2289                 struct pf_pool          *pool;
2290                 struct pf_pooladdr      *pa;
2291                 u_int32_t                nr = 0;
2292
2293                 PF_RULES_RLOCK();
2294                 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2295                     pp->r_num, 0, 1, 1);
2296                 if (pool == NULL) {
2297                         PF_RULES_RUNLOCK();
2298                         error = EBUSY;
2299                         break;
2300                 }
2301                 pa = TAILQ_FIRST(&pool->list);
2302                 while ((pa != NULL) && (nr < pp->nr)) {
2303                         pa = TAILQ_NEXT(pa, entries);
2304                         nr++;
2305                 }
2306                 if (pa == NULL) {
2307                         PF_RULES_RUNLOCK();
2308                         error = EBUSY;
2309                         break;
2310                 }
2311                 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2312                 pf_addr_copyout(&pp->addr.addr);
2313                 PF_RULES_RUNLOCK();
2314                 break;
2315         }
2316
2317         case DIOCCHANGEADDR: {
2318                 struct pfioc_pooladdr   *pca = (struct pfioc_pooladdr *)addr;
2319                 struct pf_pool          *pool;
2320                 struct pf_pooladdr      *oldpa = NULL, *newpa = NULL;
2321                 struct pf_ruleset       *ruleset;
2322                 struct pfi_kif          *kif = NULL;
2323
2324                 if (pca->action < PF_CHANGE_ADD_HEAD ||
2325                     pca->action > PF_CHANGE_REMOVE) {
2326                         error = EINVAL;
2327                         break;
2328                 }
2329                 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2330                     pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2331                     pca->addr.addr.type != PF_ADDR_TABLE) {
2332                         error = EINVAL;
2333                         break;
2334                 }
2335
2336                 if (pca->action != PF_CHANGE_REMOVE) {
2337 #ifndef INET
2338                         if (pca->af == AF_INET) {
2339                                 error = EAFNOSUPPORT;
2340                                 break;
2341                         }
2342 #endif /* INET */
2343 #ifndef INET6
2344                         if (pca->af == AF_INET6) {
2345                                 error = EAFNOSUPPORT;
2346                                 break;
2347                         }
2348 #endif /* INET6 */
2349                         newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
2350                         bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2351                         if (newpa->ifname[0])
2352                                 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
2353                         newpa->kif = NULL;
2354                 }
2355
2356 #define ERROUT(x)       { error = (x); goto DIOCCHANGEADDR_error; }
2357                 PF_RULES_WLOCK();
2358                 ruleset = pf_find_ruleset(pca->anchor);
2359                 if (ruleset == NULL)
2360                         ERROUT(EBUSY);
2361
2362                 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2363                     pca->r_num, pca->r_last, 1, 1);
2364                 if (pool == NULL)
2365                         ERROUT(EBUSY);
2366
2367                 if (pca->action != PF_CHANGE_REMOVE) {
2368                         if (newpa->ifname[0]) {
2369                                 newpa->kif = pfi_kif_attach(kif, newpa->ifname);
2370                                 pfi_kif_ref(newpa->kif);
2371                                 kif = NULL;
2372                         }
2373
2374                         switch (newpa->addr.type) {
2375                         case PF_ADDR_DYNIFTL:
2376                                 error = pfi_dynaddr_setup(&newpa->addr,
2377                                     pca->af);
2378                                 break;
2379                         case PF_ADDR_TABLE:
2380                                 newpa->addr.p.tbl = pfr_attach_table(ruleset,
2381                                     newpa->addr.v.tblname);
2382                                 if (newpa->addr.p.tbl == NULL)
2383                                         error = ENOMEM;
2384                                 break;
2385                         }
2386                         if (error)
2387                                 goto DIOCCHANGEADDR_error;
2388                 }
2389
2390                 switch (pca->action) {
2391                 case PF_CHANGE_ADD_HEAD:
2392                         oldpa = TAILQ_FIRST(&pool->list);
2393                         break;
2394                 case PF_CHANGE_ADD_TAIL:
2395                         oldpa = TAILQ_LAST(&pool->list, pf_palist);
2396                         break;
2397                 default:
2398                         oldpa = TAILQ_FIRST(&pool->list);
2399                         for (int i = 0; oldpa && i < pca->nr; i++)
2400                                 oldpa = TAILQ_NEXT(oldpa, entries);
2401
2402                         if (oldpa == NULL)
2403                                 ERROUT(EINVAL);
2404                 }
2405
2406                 if (pca->action == PF_CHANGE_REMOVE) {
2407                         TAILQ_REMOVE(&pool->list, oldpa, entries);
2408                         switch (oldpa->addr.type) {
2409                         case PF_ADDR_DYNIFTL:
2410                                 pfi_dynaddr_remove(oldpa->addr.p.dyn);
2411                                 break;
2412                         case PF_ADDR_TABLE:
2413                                 pfr_detach_table(oldpa->addr.p.tbl);
2414                                 break;
2415                         }
2416                         if (oldpa->kif)
2417                                 pfi_kif_unref(oldpa->kif);
2418                         free(oldpa, M_PFRULE);
2419                 } else {
2420                         if (oldpa == NULL)
2421                                 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2422                         else if (pca->action == PF_CHANGE_ADD_HEAD ||
2423                             pca->action == PF_CHANGE_ADD_BEFORE)
2424                                 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2425                         else
2426                                 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2427                                     newpa, entries);
2428                 }
2429
2430                 pool->cur = TAILQ_FIRST(&pool->list);
2431                 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
2432                 PF_RULES_WUNLOCK();
2433                 break;
2434
2435 #undef ERROUT
2436 DIOCCHANGEADDR_error:
2437                 if (newpa != NULL) {
2438                         if (newpa->kif)
2439                                 pfi_kif_unref(newpa->kif);
2440                         free(newpa, M_PFRULE);
2441                 }
2442                 PF_RULES_WUNLOCK();
2443                 if (kif != NULL)
2444                         free(kif, PFI_MTYPE);
2445                 break;
2446         }
2447
2448         case DIOCGETRULESETS: {
2449                 struct pfioc_ruleset    *pr = (struct pfioc_ruleset *)addr;
2450                 struct pf_ruleset       *ruleset;
2451                 struct pf_anchor        *anchor;
2452
2453                 PF_RULES_RLOCK();
2454                 pr->path[sizeof(pr->path) - 1] = 0;
2455                 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2456                         PF_RULES_RUNLOCK();
2457                         error = ENOENT;
2458                         break;
2459                 }
2460                 pr->nr = 0;
2461                 if (ruleset->anchor == NULL) {
2462                         /* XXX kludge for pf_main_ruleset */
2463                         RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors)
2464                                 if (anchor->parent == NULL)
2465                                         pr->nr++;
2466                 } else {
2467                         RB_FOREACH(anchor, pf_anchor_node,
2468                             &ruleset->anchor->children)
2469                                 pr->nr++;
2470                 }
2471                 PF_RULES_RUNLOCK();
2472                 break;
2473         }
2474
2475         case DIOCGETRULESET: {
2476                 struct pfioc_ruleset    *pr = (struct pfioc_ruleset *)addr;
2477                 struct pf_ruleset       *ruleset;
2478                 struct pf_anchor        *anchor;
2479                 u_int32_t                nr = 0;
2480
2481                 PF_RULES_RLOCK();
2482                 pr->path[sizeof(pr->path) - 1] = 0;
2483                 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2484                         PF_RULES_RUNLOCK();
2485                         error = ENOENT;
2486                         break;
2487                 }
2488                 pr->name[0] = 0;
2489                 if (ruleset->anchor == NULL) {
2490                         /* XXX kludge for pf_main_ruleset */
2491                         RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors)
2492                                 if (anchor->parent == NULL && nr++ == pr->nr) {
2493                                         strlcpy(pr->name, anchor->name,
2494                                             sizeof(pr->name));
2495                                         break;
2496                                 }
2497                 } else {
2498                         RB_FOREACH(anchor, pf_anchor_node,
2499                             &ruleset->anchor->children)
2500                                 if (nr++ == pr->nr) {
2501                                         strlcpy(pr->name, anchor->name,
2502                                             sizeof(pr->name));
2503                                         break;
2504                                 }
2505                 }
2506                 if (!pr->name[0])
2507                         error = EBUSY;
2508                 PF_RULES_RUNLOCK();
2509                 break;
2510         }
2511
2512         case DIOCRCLRTABLES: {
2513                 struct pfioc_table *io = (struct pfioc_table *)addr;
2514
2515                 if (io->pfrio_esize != 0) {
2516                         error = ENODEV;
2517                         break;
2518                 }
2519                 PF_RULES_WLOCK();
2520                 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2521                     io->pfrio_flags | PFR_FLAG_USERIOCTL);
2522                 PF_RULES_WUNLOCK();
2523                 break;
2524         }
2525
2526         case DIOCRADDTABLES: {
2527                 struct pfioc_table *io = (struct pfioc_table *)addr;
2528                 struct pfr_table *pfrts;
2529                 size_t totlen;
2530
2531                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2532                         error = ENODEV;
2533                         break;
2534                 }
2535
2536                 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
2537                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
2538                         error = ENOMEM;
2539                         break;
2540                 }
2541
2542                 totlen = io->pfrio_size * sizeof(struct pfr_table);
2543                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2544                     M_TEMP, M_WAITOK);
2545                 error = copyin(io->pfrio_buffer, pfrts, totlen);
2546                 if (error) {
2547                         free(pfrts, M_TEMP);
2548                         break;
2549                 }
2550                 PF_RULES_WLOCK();
2551                 error = pfr_add_tables(pfrts, io->pfrio_size,
2552                     &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2553                 PF_RULES_WUNLOCK();
2554                 free(pfrts, M_TEMP);
2555                 break;
2556         }
2557
2558         case DIOCRDELTABLES: {
2559                 struct pfioc_table *io = (struct pfioc_table *)addr;
2560                 struct pfr_table *pfrts;
2561                 size_t totlen;
2562
2563                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2564                         error = ENODEV;
2565                         break;
2566                 }
2567
2568                 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
2569                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
2570                         error = ENOMEM;
2571                         break;
2572                 }
2573
2574                 totlen = io->pfrio_size * sizeof(struct pfr_table);
2575                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2576                     M_TEMP, M_WAITOK);
2577                 error = copyin(io->pfrio_buffer, pfrts, totlen);
2578                 if (error) {
2579                         free(pfrts, M_TEMP);
2580                         break;
2581                 }
2582                 PF_RULES_WLOCK();
2583                 error = pfr_del_tables(pfrts, io->pfrio_size,
2584                     &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2585                 PF_RULES_WUNLOCK();
2586                 free(pfrts, M_TEMP);
2587                 break;
2588         }
2589
2590         case DIOCRGETTABLES: {
2591                 struct pfioc_table *io = (struct pfioc_table *)addr;
2592                 struct pfr_table *pfrts;
2593                 size_t totlen, n;
2594
2595                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2596                         error = ENODEV;
2597                         break;
2598                 }
2599                 PF_RULES_RLOCK();
2600                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
2601                 io->pfrio_size = min(io->pfrio_size, n);
2602
2603                 totlen = io->pfrio_size * sizeof(struct pfr_table);
2604
2605                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2606                     M_TEMP, M_NOWAIT);
2607                 if (pfrts == NULL) {
2608                         error = ENOMEM;
2609                         PF_RULES_RUNLOCK();
2610                         break;
2611                 }
2612                 error = pfr_get_tables(&io->pfrio_table, pfrts,
2613                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2614                 PF_RULES_RUNLOCK();
2615                 if (error == 0)
2616                         error = copyout(pfrts, io->pfrio_buffer, totlen);
2617                 free(pfrts, M_TEMP);
2618                 break;
2619         }
2620
2621         case DIOCRGETTSTATS: {
2622                 struct pfioc_table *io = (struct pfioc_table *)addr;
2623                 struct pfr_tstats *pfrtstats;
2624                 size_t totlen, n;
2625
2626                 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2627                         error = ENODEV;
2628                         break;
2629                 }
2630                 PF_RULES_WLOCK();
2631                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
2632                 io->pfrio_size = min(io->pfrio_size, n);
2633
2634                 totlen = io->pfrio_size * sizeof(struct pfr_tstats);
2635                 pfrtstats = mallocarray(io->pfrio_size,
2636                     sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT);
2637                 if (pfrtstats == NULL) {
2638                         error = ENOMEM;
2639                         PF_RULES_WUNLOCK();
2640                         break;
2641                 }
2642                 error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
2643                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2644                 PF_RULES_WUNLOCK();
2645                 if (error == 0)
2646                         error = copyout(pfrtstats, io->pfrio_buffer, totlen);
2647                 free(pfrtstats, M_TEMP);
2648                 break;
2649         }
2650
2651         case DIOCRCLRTSTATS: {
2652                 struct pfioc_table *io = (struct pfioc_table *)addr;
2653                 struct pfr_table *pfrts;
2654                 size_t totlen, n;
2655
2656                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2657                         error = ENODEV;
2658                         break;
2659                 }
2660
2661                 PF_RULES_WLOCK();
2662                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
2663                 io->pfrio_size = min(io->pfrio_size, n);
2664
2665                 totlen = io->pfrio_size * sizeof(struct pfr_table);
2666                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2667                     M_TEMP, M_NOWAIT);
2668                 if (pfrts == NULL) {
2669                         error = ENOMEM;
2670                         PF_RULES_WUNLOCK();
2671                         break;
2672                 }
2673                 error = copyin(io->pfrio_buffer, pfrts, totlen);
2674                 if (error) {
2675                         free(pfrts, M_TEMP);
2676                         PF_RULES_WUNLOCK();
2677                         break;
2678                 }
2679                 error = pfr_clr_tstats(pfrts, io->pfrio_size,
2680                     &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2681                 PF_RULES_WUNLOCK();
2682                 free(pfrts, M_TEMP);
2683                 break;
2684         }
2685
2686         case DIOCRSETTFLAGS: {
2687                 struct pfioc_table *io = (struct pfioc_table *)addr;
2688                 struct pfr_table *pfrts;
2689                 size_t totlen, n;
2690
2691                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2692                         error = ENODEV;
2693                         break;
2694                 }
2695
2696                 PF_RULES_WLOCK();
2697                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
2698                 io->pfrio_size = min(io->pfrio_size, n);
2699
2700                 totlen = io->pfrio_size * sizeof(struct pfr_table);
2701                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2702                     M_TEMP, M_NOWAIT);
2703                 if (pfrts == NULL) {
2704                         error = ENOMEM;
2705                         PF_RULES_WUNLOCK();
2706                         break;
2707                 }
2708                 error = copyin(io->pfrio_buffer, pfrts, totlen);
2709                 if (error) {
2710                         free(pfrts, M_TEMP);
2711                         PF_RULES_WUNLOCK();
2712                         break;
2713                 }
2714                 error = pfr_set_tflags(pfrts, io->pfrio_size,
2715                     io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2716                     &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2717                 PF_RULES_WUNLOCK();
2718                 free(pfrts, M_TEMP);
2719                 break;
2720         }
2721
2722         case DIOCRCLRADDRS: {
2723                 struct pfioc_table *io = (struct pfioc_table *)addr;
2724
2725                 if (io->pfrio_esize != 0) {
2726                         error = ENODEV;
2727                         break;
2728                 }
2729                 PF_RULES_WLOCK();
2730                 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2731                     io->pfrio_flags | PFR_FLAG_USERIOCTL);
2732                 PF_RULES_WUNLOCK();
2733                 break;
2734         }
2735
2736         case DIOCRADDADDRS: {
2737                 struct pfioc_table *io = (struct pfioc_table *)addr;
2738                 struct pfr_addr *pfras;
2739                 size_t totlen;
2740
2741                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2742                         error = ENODEV;
2743                         break;
2744                 }
2745                 if (io->pfrio_size < 0 ||
2746                     io->pfrio_size > pf_ioctl_maxcount ||
2747                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
2748                         error = EINVAL;
2749                         break;
2750                 }
2751                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2752                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
2753                     M_TEMP, M_NOWAIT);
2754                 if (! pfras) {
2755                         error = ENOMEM;
2756                         break;
2757                 }
2758                 error = copyin(io->pfrio_buffer, pfras, totlen);
2759                 if (error) {
2760                         free(pfras, M_TEMP);
2761                         break;
2762                 }
2763                 PF_RULES_WLOCK();
2764                 error = pfr_add_addrs(&io->pfrio_table, pfras,
2765                     io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2766                     PFR_FLAG_USERIOCTL);
2767                 PF_RULES_WUNLOCK();
2768                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
2769                         error = copyout(pfras, io->pfrio_buffer, totlen);
2770                 free(pfras, M_TEMP);
2771                 break;
2772         }
2773
2774         case DIOCRDELADDRS: {
2775                 struct pfioc_table *io = (struct pfioc_table *)addr;
2776                 struct pfr_addr *pfras;
2777                 size_t totlen;
2778
2779                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2780                         error = ENODEV;
2781                         break;
2782                 }
2783                 if (io->pfrio_size < 0 ||
2784                     io->pfrio_size > pf_ioctl_maxcount ||
2785                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
2786                         error = EINVAL;
2787                         break;
2788                 }
2789                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2790                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
2791                     M_TEMP, M_NOWAIT);
2792                 if (! pfras) {
2793                         error = ENOMEM;
2794                         break;
2795                 }
2796                 error = copyin(io->pfrio_buffer, pfras, totlen);
2797                 if (error) {
2798                         free(pfras, M_TEMP);
2799                         break;
2800                 }
2801                 PF_RULES_WLOCK();
2802                 error = pfr_del_addrs(&io->pfrio_table, pfras,
2803                     io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2804                     PFR_FLAG_USERIOCTL);
2805                 PF_RULES_WUNLOCK();
2806                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
2807                         error = copyout(pfras, io->pfrio_buffer, totlen);
2808                 free(pfras, M_TEMP);
2809                 break;
2810         }
2811
2812         case DIOCRSETADDRS: {
2813                 struct pfioc_table *io = (struct pfioc_table *)addr;
2814                 struct pfr_addr *pfras;
2815                 size_t totlen, count;
2816
2817                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2818                         error = ENODEV;
2819                         break;
2820                 }
2821                 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
2822                         error = EINVAL;
2823                         break;
2824                 }
2825                 count = max(io->pfrio_size, io->pfrio_size2);
2826                 if (count > pf_ioctl_maxcount ||
2827                     WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
2828                         error = EINVAL;
2829                         break;
2830                 }
2831                 totlen = count * sizeof(struct pfr_addr);
2832                 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
2833                     M_NOWAIT);
2834                 if (! pfras) {
2835                         error = ENOMEM;
2836                         break;
2837                 }
2838                 error = copyin(io->pfrio_buffer, pfras, totlen);
2839                 if (error) {
2840                         free(pfras, M_TEMP);
2841                         break;
2842                 }
2843                 PF_RULES_WLOCK();
2844                 error = pfr_set_addrs(&io->pfrio_table, pfras,
2845                     io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2846                     &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2847                     PFR_FLAG_USERIOCTL, 0);
2848                 PF_RULES_WUNLOCK();
2849                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
2850                         error = copyout(pfras, io->pfrio_buffer, totlen);
2851                 free(pfras, M_TEMP);
2852                 break;
2853         }
2854
2855         case DIOCRGETADDRS: {
2856                 struct pfioc_table *io = (struct pfioc_table *)addr;
2857                 struct pfr_addr *pfras;
2858                 size_t totlen;
2859
2860                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2861                         error = ENODEV;
2862                         break;
2863                 }
2864                 if (io->pfrio_size < 0 ||
2865                     io->pfrio_size > pf_ioctl_maxcount ||
2866                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
2867                         error = EINVAL;
2868                         break;
2869                 }
2870                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2871                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
2872                     M_TEMP, M_NOWAIT);
2873                 if (! pfras) {
2874                         error = ENOMEM;
2875                         break;
2876                 }
2877                 PF_RULES_RLOCK();
2878                 error = pfr_get_addrs(&io->pfrio_table, pfras,
2879                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2880                 PF_RULES_RUNLOCK();
2881                 if (error == 0)
2882                         error = copyout(pfras, io->pfrio_buffer, totlen);
2883                 free(pfras, M_TEMP);
2884                 break;
2885         }
2886
2887         case DIOCRGETASTATS: {
2888                 struct pfioc_table *io = (struct pfioc_table *)addr;
2889                 struct pfr_astats *pfrastats;
2890                 size_t totlen;
2891
2892                 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2893                         error = ENODEV;
2894                         break;
2895                 }
2896                 if (io->pfrio_size < 0 ||
2897                     io->pfrio_size > pf_ioctl_maxcount ||
2898                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
2899                         error = EINVAL;
2900                         break;
2901                 }
2902                 totlen = io->pfrio_size * sizeof(struct pfr_astats);
2903                 pfrastats = mallocarray(io->pfrio_size,
2904                     sizeof(struct pfr_astats), M_TEMP, M_NOWAIT);
2905                 if (! pfrastats) {
2906                         error = ENOMEM;
2907                         break;
2908                 }
2909                 PF_RULES_RLOCK();
2910                 error = pfr_get_astats(&io->pfrio_table, pfrastats,
2911                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2912                 PF_RULES_RUNLOCK();
2913                 if (error == 0)
2914                         error = copyout(pfrastats, io->pfrio_buffer, totlen);
2915                 free(pfrastats, M_TEMP);
2916                 break;
2917         }
2918
2919         case DIOCRCLRASTATS: {
2920                 struct pfioc_table *io = (struct pfioc_table *)addr;
2921                 struct pfr_addr *pfras;
2922                 size_t totlen;
2923
2924                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2925                         error = ENODEV;
2926                         break;
2927                 }
2928                 if (io->pfrio_size < 0 ||
2929                     io->pfrio_size > pf_ioctl_maxcount ||
2930                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
2931                         error = EINVAL;
2932                         break;
2933                 }
2934                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2935                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
2936                     M_TEMP, M_NOWAIT);
2937                 if (! pfras) {
2938                         error = ENOMEM;
2939                         break;
2940                 }
2941                 error = copyin(io->pfrio_buffer, pfras, totlen);
2942                 if (error) {
2943                         free(pfras, M_TEMP);
2944                         break;
2945                 }
2946                 PF_RULES_WLOCK();
2947                 error = pfr_clr_astats(&io->pfrio_table, pfras,
2948                     io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2949                     PFR_FLAG_USERIOCTL);
2950                 PF_RULES_WUNLOCK();
2951                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
2952                         error = copyout(pfras, io->pfrio_buffer, totlen);
2953                 free(pfras, M_TEMP);
2954                 break;
2955         }
2956
2957         case DIOCRTSTADDRS: {
2958                 struct pfioc_table *io = (struct pfioc_table *)addr;
2959                 struct pfr_addr *pfras;
2960                 size_t totlen;
2961
2962                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2963                         error = ENODEV;
2964                         break;
2965                 }
2966                 if (io->pfrio_size < 0 ||
2967                     io->pfrio_size > pf_ioctl_maxcount ||
2968                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
2969                         error = EINVAL;
2970                         break;
2971                 }
2972                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2973                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
2974                     M_TEMP, M_NOWAIT);
2975                 if (! pfras) {
2976                         error = ENOMEM;
2977                         break;
2978                 }
2979                 error = copyin(io->pfrio_buffer, pfras, totlen);
2980                 if (error) {
2981                         free(pfras, M_TEMP);
2982                         break;
2983                 }
2984                 PF_RULES_RLOCK();
2985                 error = pfr_tst_addrs(&io->pfrio_table, pfras,
2986                     io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2987                     PFR_FLAG_USERIOCTL);
2988                 PF_RULES_RUNLOCK();
2989                 if (error == 0)
2990                         error = copyout(pfras, io->pfrio_buffer, totlen);
2991                 free(pfras, M_TEMP);
2992                 break;
2993         }
2994
2995         case DIOCRINADEFINE: {
2996                 struct pfioc_table *io = (struct pfioc_table *)addr;
2997                 struct pfr_addr *pfras;
2998                 size_t totlen;
2999
3000                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3001                         error = ENODEV;
3002                         break;
3003                 }
3004                 if (io->pfrio_size < 0 ||
3005                     io->pfrio_size > pf_ioctl_maxcount ||
3006                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3007                         error = EINVAL;
3008                         break;
3009                 }
3010                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3011                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3012                     M_TEMP, M_NOWAIT);
3013                 if (! pfras) {
3014                         error = ENOMEM;
3015                         break;
3016                 }
3017                 error = copyin(io->pfrio_buffer, pfras, totlen);
3018                 if (error) {
3019                         free(pfras, M_TEMP);
3020                         break;
3021                 }
3022                 PF_RULES_WLOCK();
3023                 error = pfr_ina_define(&io->pfrio_table, pfras,
3024                     io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
3025                     io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3026                 PF_RULES_WUNLOCK();
3027                 free(pfras, M_TEMP);
3028                 break;
3029         }
3030
3031         case DIOCOSFPADD: {
3032                 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
3033                 PF_RULES_WLOCK();
3034                 error = pf_osfp_add(io);
3035                 PF_RULES_WUNLOCK();
3036                 break;
3037         }
3038
3039         case DIOCOSFPGET: {
3040                 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
3041                 PF_RULES_RLOCK();
3042                 error = pf_osfp_get(io);
3043                 PF_RULES_RUNLOCK();
3044                 break;
3045         }
3046
3047         case DIOCXBEGIN: {
3048                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
3049                 struct pfioc_trans_e    *ioes, *ioe;
3050                 size_t                   totlen;
3051                 int                      i;
3052
3053                 if (io->esize != sizeof(*ioe)) {
3054                         error = ENODEV;
3055                         break;
3056                 }
3057                 if (io->size < 0 ||
3058                     io->size > pf_ioctl_maxcount ||
3059                     WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3060                         error = EINVAL;
3061                         break;
3062                 }
3063                 totlen = sizeof(struct pfioc_trans_e) * io->size;
3064                 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3065                     M_TEMP, M_NOWAIT);
3066                 if (! ioes) {
3067                         error = ENOMEM;
3068                         break;
3069                 }
3070                 error = copyin(io->array, ioes, totlen);
3071                 if (error) {
3072                         free(ioes, M_TEMP);
3073                         break;
3074                 }
3075                 PF_RULES_WLOCK();
3076                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3077                         switch (ioe->rs_num) {
3078 #ifdef ALTQ
3079                         case PF_RULESET_ALTQ:
3080                                 if (ioe->anchor[0]) {
3081                                         PF_RULES_WUNLOCK();
3082                                         free(ioes, M_TEMP);
3083                                         error = EINVAL;
3084                                         goto fail;
3085                                 }
3086                                 if ((error = pf_begin_altq(&ioe->ticket))) {
3087                                         PF_RULES_WUNLOCK();
3088                                         free(ioes, M_TEMP);
3089                                         goto fail;
3090                                 }
3091                                 break;
3092 #endif /* ALTQ */
3093                         case PF_RULESET_TABLE:
3094                             {
3095                                 struct pfr_table table;
3096
3097                                 bzero(&table, sizeof(table));
3098                                 strlcpy(table.pfrt_anchor, ioe->anchor,
3099                                     sizeof(table.pfrt_anchor));
3100                                 if ((error = pfr_ina_begin(&table,
3101                                     &ioe->ticket, NULL, 0))) {
3102                                         PF_RULES_WUNLOCK();
3103                                         free(ioes, M_TEMP);
3104                                         goto fail;
3105                                 }
3106                                 break;
3107                             }
3108                         default:
3109                                 if ((error = pf_begin_rules(&ioe->ticket,
3110                                     ioe->rs_num, ioe->anchor))) {
3111                                         PF_RULES_WUNLOCK();
3112                                         free(ioes, M_TEMP);
3113                                         goto fail;
3114                                 }
3115                                 break;
3116                         }
3117                 }
3118                 PF_RULES_WUNLOCK();
3119                 error = copyout(ioes, io->array, totlen);
3120                 free(ioes, M_TEMP);
3121                 break;
3122         }
3123
3124         case DIOCXROLLBACK: {
3125                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
3126                 struct pfioc_trans_e    *ioe, *ioes;
3127                 size_t                   totlen;
3128                 int                      i;
3129
3130                 if (io->esize != sizeof(*ioe)) {
3131                         error = ENODEV;
3132                         break;
3133                 }
3134                 if (io->size < 0 ||
3135                     io->size > pf_ioctl_maxcount ||
3136                     WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3137                         error = EINVAL;
3138                         break;
3139                 }
3140                 totlen = sizeof(struct pfioc_trans_e) * io->size;
3141                 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3142                     M_TEMP, M_NOWAIT);
3143                 if (! ioes) {
3144                         error = ENOMEM;
3145                         break;
3146                 }
3147                 error = copyin(io->array, ioes, totlen);
3148                 if (error) {
3149                         free(ioes, M_TEMP);
3150                         break;
3151                 }
3152                 PF_RULES_WLOCK();
3153                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3154                         switch (ioe->rs_num) {
3155 #ifdef ALTQ
3156                         case PF_RULESET_ALTQ:
3157                                 if (ioe->anchor[0]) {
3158                                         PF_RULES_WUNLOCK();
3159                                         free(ioes, M_TEMP);
3160                                         error = EINVAL;
3161                                         goto fail;
3162                                 }
3163                                 if ((error = pf_rollback_altq(ioe->ticket))) {
3164                                         PF_RULES_WUNLOCK();
3165                                         free(ioes, M_TEMP);
3166                                         goto fail; /* really bad */
3167                                 }
3168                                 break;
3169 #endif /* ALTQ */
3170                         case PF_RULESET_TABLE:
3171                             {
3172                                 struct pfr_table table;
3173
3174                                 bzero(&table, sizeof(table));
3175                                 strlcpy(table.pfrt_anchor, ioe->anchor,
3176                                     sizeof(table.pfrt_anchor));
3177                                 if ((error = pfr_ina_rollback(&table,
3178                                     ioe->ticket, NULL, 0))) {
3179                                         PF_RULES_WUNLOCK();
3180                                         free(ioes, M_TEMP);
3181                                         goto fail; /* really bad */
3182                                 }
3183                                 break;
3184                             }
3185                         default:
3186                                 if ((error = pf_rollback_rules(ioe->ticket,
3187                                     ioe->rs_num, ioe->anchor))) {
3188                                         PF_RULES_WUNLOCK();
3189                                         free(ioes, M_TEMP);
3190                                         goto fail; /* really bad */
3191                                 }
3192                                 break;
3193                         }
3194                 }
3195                 PF_RULES_WUNLOCK();
3196                 free(ioes, M_TEMP);
3197                 break;
3198         }
3199
3200         case DIOCXCOMMIT: {
3201                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
3202                 struct pfioc_trans_e    *ioe, *ioes;
3203                 struct pf_ruleset       *rs;
3204                 size_t                   totlen;
3205                 int                      i;
3206
3207                 if (io->esize != sizeof(*ioe)) {
3208                         error = ENODEV;
3209                         break;
3210                 }
3211
3212                 if (io->size < 0 ||
3213                     io->size > pf_ioctl_maxcount ||
3214                     WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3215                         error = EINVAL;
3216                         break;
3217                 }
3218
3219                 totlen = sizeof(struct pfioc_trans_e) * io->size;
3220                 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3221                     M_TEMP, M_NOWAIT);
3222                 if (ioes == NULL) {
3223                         error = ENOMEM;
3224                         break;
3225                 }
3226                 error = copyin(io->array, ioes, totlen);
3227                 if (error) {
3228                         free(ioes, M_TEMP);
3229                         break;
3230                 }
3231                 PF_RULES_WLOCK();
3232                 /* First makes sure everything will succeed. */
3233                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3234                         switch (ioe->rs_num) {
3235 #ifdef ALTQ
3236                         case PF_RULESET_ALTQ:
3237                                 if (ioe->anchor[0]) {
3238                                         PF_RULES_WUNLOCK();
3239                                         free(ioes, M_TEMP);
3240                                         error = EINVAL;
3241                                         goto fail;
3242                                 }
3243                                 if (!V_altqs_inactive_open || ioe->ticket !=
3244                                     V_ticket_altqs_inactive) {
3245                                         PF_RULES_WUNLOCK();
3246                                         free(ioes, M_TEMP);
3247                                         error = EBUSY;
3248                                         goto fail;
3249                                 }
3250                                 break;
3251 #endif /* ALTQ */
3252                         case PF_RULESET_TABLE:
3253                                 rs = pf_find_ruleset(ioe->anchor);
3254                                 if (rs == NULL || !rs->topen || ioe->ticket !=
3255                                     rs->tticket) {
3256                                         PF_RULES_WUNLOCK();
3257                                         free(ioes, M_TEMP);
3258                                         error = EBUSY;
3259                                         goto fail;
3260                                 }
3261                                 break;
3262                         default:
3263                                 if (ioe->rs_num < 0 || ioe->rs_num >=
3264                                     PF_RULESET_MAX) {
3265                                         PF_RULES_WUNLOCK();
3266                                         free(ioes, M_TEMP);
3267                                         error = EINVAL;
3268                                         goto fail;
3269                                 }
3270                                 rs = pf_find_ruleset(ioe->anchor);
3271                                 if (rs == NULL ||
3272                                     !rs->rules[ioe->rs_num].inactive.open ||
3273                                     rs->rules[ioe->rs_num].inactive.ticket !=
3274                                     ioe->ticket) {
3275                                         PF_RULES_WUNLOCK();
3276                                         free(ioes, M_TEMP);
3277                                         error = EBUSY;
3278                                         goto fail;
3279                                 }
3280                                 break;
3281                         }
3282                 }
3283                 /* Now do the commit - no errors should happen here. */
3284                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3285                         switch (ioe->rs_num) {
3286 #ifdef ALTQ
3287                         case PF_RULESET_ALTQ:
3288                                 if ((error = pf_commit_altq(ioe->ticket))) {
3289                                         PF_RULES_WUNLOCK();
3290                                         free(ioes, M_TEMP);
3291                                         goto fail; /* really bad */
3292                                 }
3293                                 break;
3294 #endif /* ALTQ */
3295                         case PF_RULESET_TABLE:
3296                             {
3297                                 struct pfr_table table;
3298
3299                                 bzero(&table, sizeof(table));
3300                                 strlcpy(table.pfrt_anchor, ioe->anchor,
3301                                     sizeof(table.pfrt_anchor));
3302                                 if ((error = pfr_ina_commit(&table,
3303                                     ioe->ticket, NULL, NULL, 0))) {
3304                                         PF_RULES_WUNLOCK();
3305                                         free(ioes, M_TEMP);
3306                                         goto fail; /* really bad */
3307                                 }
3308                                 break;
3309                             }
3310                         default:
3311                                 if ((error = pf_commit_rules(ioe->ticket,
3312                                     ioe->rs_num, ioe->anchor))) {
3313                                         PF_RULES_WUNLOCK();
3314                                         free(ioes, M_TEMP);
3315                                         goto fail; /* really bad */
3316                                 }
3317                                 break;
3318                         }
3319                 }
3320                 PF_RULES_WUNLOCK();
3321                 free(ioes, M_TEMP);
3322                 break;
3323         }
3324
3325         case DIOCGETSRCNODES: {
3326                 struct pfioc_src_nodes  *psn = (struct pfioc_src_nodes *)addr;
3327                 struct pf_srchash       *sh;
3328                 struct pf_src_node      *n, *p, *pstore;
3329                 uint32_t                 i, nr = 0;
3330
3331                 if (psn->psn_len == 0) {
3332                         for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
3333                             i++, sh++) {
3334                                 PF_HASHROW_LOCK(sh);
3335                                 LIST_FOREACH(n, &sh->nodes, entry)
3336                                         nr++;
3337                                 PF_HASHROW_UNLOCK(sh);
3338                         }
3339                         psn->psn_len = sizeof(struct pf_src_node) * nr;
3340                         break;
3341                 }
3342
3343                 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK);
3344                 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
3345                     i++, sh++) {
3346                     PF_HASHROW_LOCK(sh);
3347                     LIST_FOREACH(n, &sh->nodes, entry) {
3348                         int     secs = time_uptime, diff;
3349
3350                         if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
3351                                 break;
3352
3353                         bcopy(n, p, sizeof(struct pf_src_node));
3354                         if (n->rule.ptr != NULL)
3355                                 p->rule.nr = n->rule.ptr->nr;
3356                         p->creation = secs - p->creation;
3357                         if (p->expire > secs)
3358                                 p->expire -= secs;
3359                         else
3360                                 p->expire = 0;
3361
3362                         /* Adjust the connection rate estimate. */
3363                         diff = secs - n->conn_rate.last;
3364                         if (diff >= n->conn_rate.seconds)
3365                                 p->conn_rate.count = 0;
3366                         else
3367                                 p->conn_rate.count -=
3368                                     n->conn_rate.count * diff /
3369                                     n->conn_rate.seconds;
3370                         p++;
3371                         nr++;
3372                     }
3373                     PF_HASHROW_UNLOCK(sh);
3374                 }
3375                 error = copyout(pstore, psn->psn_src_nodes,
3376                     sizeof(struct pf_src_node) * nr);
3377                 if (error) {
3378                         free(pstore, M_TEMP);
3379                         break;
3380                 }
3381                 psn->psn_len = sizeof(struct pf_src_node) * nr;
3382                 free(pstore, M_TEMP);
3383                 break;
3384         }
3385
3386         case DIOCCLRSRCNODES: {
3387
3388                 pf_clear_srcnodes(NULL);
3389                 pf_purge_expired_src_nodes();
3390                 break;
3391         }
3392
3393         case DIOCKILLSRCNODES:
3394                 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
3395                 break;
3396
3397         case DIOCSETHOSTID: {
3398                 u_int32_t       *hostid = (u_int32_t *)addr;
3399
3400                 PF_RULES_WLOCK();
3401                 if (*hostid == 0)
3402                         V_pf_status.hostid = arc4random();
3403                 else
3404                         V_pf_status.hostid = *hostid;
3405                 PF_RULES_WUNLOCK();
3406                 break;
3407         }
3408
3409         case DIOCOSFPFLUSH:
3410                 PF_RULES_WLOCK();
3411                 pf_osfp_flush();
3412                 PF_RULES_WUNLOCK();
3413                 break;
3414
3415         case DIOCIGETIFACES: {
3416                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3417                 struct pfi_kif *ifstore;
3418                 size_t bufsiz;
3419
3420                 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
3421                         error = ENODEV;
3422                         break;
3423                 }
3424
3425                 if (io->pfiio_size < 0 ||
3426                     io->pfiio_size > pf_ioctl_maxcount ||
3427                     WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
3428                         error = EINVAL;
3429                         break;
3430                 }
3431
3432                 bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
3433                 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
3434                     M_TEMP, M_NOWAIT);
3435                 if (ifstore == NULL) {
3436                         error = ENOMEM;
3437                         break;
3438                 }
3439
3440                 PF_RULES_RLOCK();
3441                 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
3442                 PF_RULES_RUNLOCK();
3443                 error = copyout(ifstore, io->pfiio_buffer, bufsiz);
3444                 free(ifstore, M_TEMP);
3445                 break;
3446         }
3447
3448         case DIOCSETIFFLAG: {
3449                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3450
3451                 PF_RULES_WLOCK();
3452                 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
3453                 PF_RULES_WUNLOCK();
3454                 break;
3455         }
3456
3457         case DIOCCLRIFFLAG: {
3458                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3459
3460                 PF_RULES_WLOCK();
3461                 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
3462                 PF_RULES_WUNLOCK();
3463                 break;
3464         }
3465
3466         default:
3467                 error = ENODEV;
3468                 break;
3469         }
3470 fail:
3471         if (sx_xlocked(&pf_ioctl_lock))
3472                 sx_xunlock(&pf_ioctl_lock);
3473         CURVNET_RESTORE();
3474
3475         return (error);
3476 }
3477
3478 void
3479 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
3480 {
3481         bzero(sp, sizeof(struct pfsync_state));
3482
3483         /* copy from state key */
3484         sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
3485         sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
3486         sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
3487         sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
3488         sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
3489         sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
3490         sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
3491         sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
3492         sp->proto = st->key[PF_SK_WIRE]->proto;
3493         sp->af = st->key[PF_SK_WIRE]->af;
3494
3495         /* copy from state */
3496         strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
3497         bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
3498         sp->creation = htonl(time_uptime - st->creation);
3499         sp->expire = pf_state_expires(st);
3500         if (sp->expire <= time_uptime)
3501                 sp->expire = htonl(0);
3502         else
3503                 sp->expire = htonl(sp->expire - time_uptime);
3504
3505         sp->direction = st->direction;
3506         sp->log = st->log;
3507         sp->timeout = st->timeout;
3508         sp->state_flags = st->state_flags;
3509         if (st->src_node)
3510                 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
3511         if (st->nat_src_node)
3512                 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
3513
3514         sp->id = st->id;
3515         sp->creatorid = st->creatorid;
3516         pf_state_peer_hton(&st->src, &sp->src);
3517         pf_state_peer_hton(&st->dst, &sp->dst);
3518
3519         if (st->rule.ptr == NULL)
3520                 sp->rule = htonl(-1);
3521         else
3522                 sp->rule = htonl(st->rule.ptr->nr);
3523         if (st->anchor.ptr == NULL)
3524                 sp->anchor = htonl(-1);
3525         else
3526                 sp->anchor = htonl(st->anchor.ptr->nr);
3527         if (st->nat_rule.ptr == NULL)
3528                 sp->nat_rule = htonl(-1);
3529         else
3530                 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
3531
3532         pf_state_counter_hton(st->packets[0], sp->packets[0]);
3533         pf_state_counter_hton(st->packets[1], sp->packets[1]);
3534         pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
3535         pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
3536
3537 }
3538
3539 static void
3540 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
3541 {
3542         struct pfr_ktable *kt;
3543
3544         KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
3545
3546         kt = aw->p.tbl;
3547         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
3548                 kt = kt->pfrkt_root;
3549         aw->p.tbl = NULL;
3550         aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
3551                 kt->pfrkt_cnt : -1;
3552 }
3553
3554 /*
3555  * XXX - Check for version missmatch!!!
3556  */
3557 static void
3558 pf_clear_states(void)
3559 {
3560         struct pf_state *s;
3561         u_int i;
3562
3563         for (i = 0; i <= pf_hashmask; i++) {
3564                 struct pf_idhash *ih = &V_pf_idhash[i];
3565 relock:
3566                 PF_HASHROW_LOCK(ih);
3567                 LIST_FOREACH(s, &ih->states, entry) {
3568                         s->timeout = PFTM_PURGE;
3569                         /* Don't send out individual delete messages. */
3570                         s->state_flags |= PFSTATE_NOSYNC;
3571                         pf_unlink_state(s, PF_ENTER_LOCKED);
3572                         goto relock;
3573                 }
3574                 PF_HASHROW_UNLOCK(ih);
3575         }
3576 }
3577
3578 static int
3579 pf_clear_tables(void)
3580 {
3581         struct pfioc_table io;
3582         int error;
3583
3584         bzero(&io, sizeof(io));
3585
3586         error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
3587             io.pfrio_flags);
3588
3589         return (error);
3590 }
3591
3592 static void
3593 pf_clear_srcnodes(struct pf_src_node *n)
3594 {
3595         struct pf_state *s;
3596         int i;
3597
3598         for (i = 0; i <= pf_hashmask; i++) {
3599                 struct pf_idhash *ih = &V_pf_idhash[i];
3600
3601                 PF_HASHROW_LOCK(ih);
3602                 LIST_FOREACH(s, &ih->states, entry) {
3603                         if (n == NULL || n == s->src_node)
3604                                 s->src_node = NULL;
3605                         if (n == NULL || n == s->nat_src_node)
3606                                 s->nat_src_node = NULL;
3607                 }
3608                 PF_HASHROW_UNLOCK(ih);
3609         }
3610
3611         if (n == NULL) {
3612                 struct pf_srchash *sh;
3613
3614                 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
3615                     i++, sh++) {
3616                         PF_HASHROW_LOCK(sh);
3617                         LIST_FOREACH(n, &sh->nodes, entry) {
3618                                 n->expire = 1;
3619                                 n->states = 0;
3620                         }
3621                         PF_HASHROW_UNLOCK(sh);
3622                 }
3623         } else {
3624                 /* XXX: hash slot should already be locked here. */
3625                 n->expire = 1;
3626                 n->states = 0;
3627         }
3628 }
3629
3630 static void
3631 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
3632 {
3633         struct pf_src_node_list  kill;
3634
3635         LIST_INIT(&kill);
3636         for (int i = 0; i <= pf_srchashmask; i++) {
3637                 struct pf_srchash *sh = &V_pf_srchash[i];
3638                 struct pf_src_node *sn, *tmp;
3639
3640                 PF_HASHROW_LOCK(sh);
3641                 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
3642                         if (PF_MATCHA(psnk->psnk_src.neg,
3643                               &psnk->psnk_src.addr.v.a.addr,
3644                               &psnk->psnk_src.addr.v.a.mask,
3645                               &sn->addr, sn->af) &&
3646                             PF_MATCHA(psnk->psnk_dst.neg,
3647                               &psnk->psnk_dst.addr.v.a.addr,
3648                               &psnk->psnk_dst.addr.v.a.mask,
3649                               &sn->raddr, sn->af)) {
3650                                 pf_unlink_src_node(sn);
3651                                 LIST_INSERT_HEAD(&kill, sn, entry);
3652                                 sn->expire = 1;
3653                         }
3654                 PF_HASHROW_UNLOCK(sh);
3655         }
3656
3657         for (int i = 0; i <= pf_hashmask; i++) {
3658                 struct pf_idhash *ih = &V_pf_idhash[i];
3659                 struct pf_state *s;
3660
3661                 PF_HASHROW_LOCK(ih);
3662                 LIST_FOREACH(s, &ih->states, entry) {
3663                         if (s->src_node && s->src_node->expire == 1)
3664                                 s->src_node = NULL;
3665                         if (s->nat_src_node && s->nat_src_node->expire == 1)
3666                                 s->nat_src_node = NULL;
3667                 }
3668                 PF_HASHROW_UNLOCK(ih);
3669         }
3670
3671         psnk->psnk_killed = pf_free_src_nodes(&kill);
3672 }
3673
3674 /*
3675  * XXX - Check for version missmatch!!!
3676  */
3677
3678 /*
3679  * Duplicate pfctl -Fa operation to get rid of as much as we can.
3680  */
3681 static int
3682 shutdown_pf(void)
3683 {
3684         int error = 0;
3685         u_int32_t t[5];
3686         char nn = '\0';
3687
3688         do {
3689                 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
3690                     != 0) {
3691                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
3692                         break;
3693                 }
3694                 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
3695                     != 0) {
3696                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
3697                         break;          /* XXX: rollback? */
3698                 }
3699                 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
3700                     != 0) {
3701                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
3702                         break;          /* XXX: rollback? */
3703                 }
3704                 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
3705                     != 0) {
3706                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
3707                         break;          /* XXX: rollback? */
3708                 }
3709                 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
3710                     != 0) {
3711                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
3712                         break;          /* XXX: rollback? */
3713                 }
3714
3715                 /* XXX: these should always succeed here */
3716                 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
3717                 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
3718                 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
3719                 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
3720                 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
3721
3722                 if ((error = pf_clear_tables()) != 0)
3723                         break;
3724
3725 #ifdef ALTQ
3726                 if ((error = pf_begin_altq(&t[0])) != 0) {
3727                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
3728                         break;
3729                 }
3730                 pf_commit_altq(t[0]);
3731 #endif
3732
3733                 pf_clear_states();
3734
3735                 pf_clear_srcnodes(NULL);
3736
3737                 /* status does not use malloced mem so no need to cleanup */
3738                 /* fingerprints and interfaces have their own cleanup code */
3739
3740                 /* Free counters last as we updated them during shutdown. */
3741                 counter_u64_free(V_pf_default_rule.states_cur);
3742                 counter_u64_free(V_pf_default_rule.states_tot);
3743                 counter_u64_free(V_pf_default_rule.src_nodes);
3744
3745                 for (int i = 0; i < PFRES_MAX; i++)
3746                         counter_u64_free(V_pf_status.counters[i]);
3747                 for (int i = 0; i < LCNT_MAX; i++)
3748                         counter_u64_free(V_pf_status.lcounters[i]);
3749                 for (int i = 0; i < FCNT_MAX; i++)
3750                         counter_u64_free(V_pf_status.fcounters[i]);
3751                 for (int i = 0; i < SCNT_MAX; i++)
3752                         counter_u64_free(V_pf_status.scounters[i]);
3753         } while(0);
3754
3755         return (error);
3756 }
3757
3758 #ifdef INET
3759 static int
3760 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
3761     struct inpcb *inp)
3762 {
3763         int chk;
3764
3765         chk = pf_test(PF_IN, flags, ifp, m, inp);
3766         if (chk && *m) {
3767                 m_freem(*m);
3768                 *m = NULL;
3769         }
3770
3771         if (chk != PF_PASS)
3772                 return (EACCES);
3773         return (0);
3774 }
3775
3776 static int
3777 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
3778     struct inpcb *inp)
3779 {
3780         int chk;
3781
3782         chk = pf_test(PF_OUT, flags, ifp, m, inp);
3783         if (chk && *m) {
3784                 m_freem(*m);
3785                 *m = NULL;
3786         }
3787
3788         if (chk != PF_PASS)
3789                 return (EACCES);
3790         return (0);
3791 }
3792 #endif
3793
3794 #ifdef INET6
3795 static int
3796 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
3797     struct inpcb *inp)
3798 {
3799         int chk;
3800
3801         /*
3802          * In case of loopback traffic IPv6 uses the real interface in
3803          * order to support scoped addresses. In order to support stateful
3804          * filtering we have change this to lo0 as it is the case in IPv4.
3805          */
3806         CURVNET_SET(ifp->if_vnet);
3807         chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp);
3808         CURVNET_RESTORE();
3809         if (chk && *m) {
3810                 m_freem(*m);
3811                 *m = NULL;
3812         }
3813         if (chk != PF_PASS)
3814                 return (EACCES);
3815         return (0);
3816 }
3817
3818 static int
3819 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
3820     struct inpcb *inp)
3821 {
3822         int chk;
3823
3824         CURVNET_SET(ifp->if_vnet);
3825         chk = pf_test6(PF_OUT, flags, ifp, m, inp);
3826         CURVNET_RESTORE();
3827         if (chk && *m) {
3828                 m_freem(*m);
3829                 *m = NULL;
3830         }
3831         if (chk != PF_PASS)
3832                 return (EACCES);
3833         return (0);
3834 }
3835 #endif /* INET6 */
3836
3837 static int
3838 hook_pf(void)
3839 {
3840 #ifdef INET
3841         struct pfil_head *pfh_inet;
3842 #endif
3843 #ifdef INET6
3844         struct pfil_head *pfh_inet6;
3845 #endif
3846
3847         if (V_pf_pfil_hooked)
3848                 return (0);
3849
3850 #ifdef INET
3851         pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3852         if (pfh_inet == NULL)
3853                 return (ESRCH); /* XXX */
3854         pfil_add_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet);
3855         pfil_add_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet);
3856 #endif
3857 #ifdef INET6
3858         pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3859         if (pfh_inet6 == NULL) {
3860 #ifdef INET
3861                 pfil_remove_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3862                     pfh_inet);
3863                 pfil_remove_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3864                     pfh_inet);
3865 #endif
3866                 return (ESRCH); /* XXX */
3867         }
3868         pfil_add_hook_flags(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6);
3869         pfil_add_hook_flags(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6);
3870 #endif
3871
3872         V_pf_pfil_hooked = 1;
3873         return (0);
3874 }
3875
3876 static int
3877 dehook_pf(void)
3878 {
3879 #ifdef INET
3880         struct pfil_head *pfh_inet;
3881 #endif
3882 #ifdef INET6
3883         struct pfil_head *pfh_inet6;
3884 #endif
3885
3886         if (V_pf_pfil_hooked == 0)
3887                 return (0);
3888
3889 #ifdef INET
3890         pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3891         if (pfh_inet == NULL)
3892                 return (ESRCH); /* XXX */
3893         pfil_remove_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3894             pfh_inet);
3895         pfil_remove_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3896             pfh_inet);
3897 #endif
3898 #ifdef INET6
3899         pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3900         if (pfh_inet6 == NULL)
3901                 return (ESRCH); /* XXX */
3902         pfil_remove_hook_flags(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK,
3903             pfh_inet6);
3904         pfil_remove_hook_flags(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK,
3905             pfh_inet6);
3906 #endif
3907
3908         V_pf_pfil_hooked = 0;
3909         return (0);
3910 }
3911
3912 static void
3913 pf_load_vnet(void)
3914 {
3915         TAILQ_INIT(&V_pf_tags);
3916         TAILQ_INIT(&V_pf_qids);
3917
3918         pfattach_vnet();
3919         V_pf_vnet_active = 1;
3920 }
3921
3922 static int
3923 pf_load(void)
3924 {
3925         int error;
3926
3927         rw_init(&pf_rules_lock, "pf rulesets");
3928         sx_init(&pf_ioctl_lock, "pf ioctl");
3929         sx_init(&pf_end_lock, "pf end thread");
3930
3931         pf_mtag_initialize();
3932
3933         pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
3934         if (pf_dev == NULL)
3935                 return (ENOMEM);
3936
3937         pf_end_threads = 0;
3938         error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
3939         if (error != 0)
3940                 return (error);
3941
3942         pfi_initialize();
3943
3944         return (0);
3945 }
3946
3947 static void
3948 pf_unload_vnet(void)
3949 {
3950         int error;
3951
3952         V_pf_vnet_active = 0;
3953         V_pf_status.running = 0;
3954         swi_remove(V_pf_swi_cookie);
3955         error = dehook_pf();
3956         if (error) {
3957                 /*
3958                  * Should not happen!
3959                  * XXX Due to error code ESRCH, kldunload will show
3960                  * a message like 'No such process'.
3961                  */
3962                 printf("%s : pfil unregisteration fail\n", __FUNCTION__);
3963                 return;
3964         }
3965
3966         PF_RULES_WLOCK();
3967         shutdown_pf();
3968         PF_RULES_WUNLOCK();
3969
3970         pf_unload_vnet_purge();
3971
3972         pf_normalize_cleanup();
3973         PF_RULES_WLOCK();
3974         pfi_cleanup_vnet();
3975         PF_RULES_WUNLOCK();
3976         pfr_cleanup();
3977         pf_osfp_flush();
3978         pf_cleanup();
3979         if (IS_DEFAULT_VNET(curvnet))
3980                 pf_mtag_cleanup();
3981 }
3982
3983 static void
3984 pf_unload(void)
3985 {
3986
3987         sx_xlock(&pf_end_lock);
3988         pf_end_threads = 1;
3989         while (pf_end_threads < 2) {
3990                 wakeup_one(pf_purge_thread);
3991                 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
3992         }
3993         sx_xunlock(&pf_end_lock);
3994
3995         if (pf_dev != NULL)
3996                 destroy_dev(pf_dev);
3997
3998         pfi_cleanup();
3999
4000         rw_destroy(&pf_rules_lock);
4001         sx_destroy(&pf_ioctl_lock);
4002         sx_destroy(&pf_end_lock);
4003 }
4004
4005 static void
4006 vnet_pf_init(void *unused __unused)
4007 {
4008
4009         pf_load_vnet();
4010 }
4011 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 
4012     vnet_pf_init, NULL);
4013
4014 static void
4015 vnet_pf_uninit(const void *unused __unused)
4016 {
4017
4018         pf_unload_vnet();
4019
4020 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
4021 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
4022     vnet_pf_uninit, NULL);
4023
4024
4025 static int
4026 pf_modevent(module_t mod, int type, void *data)
4027 {
4028         int error = 0;
4029
4030         switch(type) {
4031         case MOD_LOAD:
4032                 error = pf_load();
4033                 break;
4034         case MOD_UNLOAD:
4035                 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after
4036                  * the vnet_pf_uninit()s */
4037                 break;
4038         default:
4039                 error = EINVAL;
4040                 break;
4041         }
4042
4043         return (error);
4044 }
4045
4046 static moduledata_t pf_mod = {
4047         "pf",
4048         pf_modevent,
4049         0
4050 };
4051
4052 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
4053 MODULE_VERSION(pf, PF_MODVER);