2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002,2003 Henning Brauer
6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 #include "opt_inet6.h"
48 #include <sys/param.h>
49 #include <sys/_bitset.h>
50 #include <sys/bitset.h>
53 #include <sys/endian.h>
54 #include <sys/fcntl.h>
55 #include <sys/filio.h>
57 #include <sys/interrupt.h>
59 #include <sys/kernel.h>
60 #include <sys/kthread.h>
63 #include <sys/module.h>
68 #include <sys/socket.h>
69 #include <sys/sysctl.h>
71 #include <sys/ucred.h>
74 #include <net/if_var.h>
76 #include <net/route.h>
78 #include <net/pfvar.h>
79 #include <net/if_pfsync.h>
80 #include <net/if_pflog.h>
82 #include <netinet/in.h>
83 #include <netinet/ip.h>
84 #include <netinet/ip_var.h>
85 #include <netinet6/ip6_var.h>
86 #include <netinet/ip_icmp.h>
87 #include <netpfil/pf/pf_nv.h>
90 #include <netinet/ip6.h>
94 #include <net/altq/altq.h>
97 SDT_PROVIDER_DECLARE(pf);
98 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
99 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
101 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
103 static struct pf_kpool *pf_get_kpool(char *, u_int32_t, u_int8_t, u_int32_t,
104 u_int8_t, u_int8_t, u_int8_t);
106 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
107 static void pf_empty_kpool(struct pf_kpalist *);
108 static int pfioctl(struct cdev *, u_long, caddr_t, int,
111 static int pf_begin_altq(u_int32_t *);
112 static int pf_rollback_altq(u_int32_t);
113 static int pf_commit_altq(u_int32_t);
114 static int pf_enable_altq(struct pf_altq *);
115 static int pf_disable_altq(struct pf_altq *);
116 static u_int32_t pf_qname2qid(char *);
117 static void pf_qid_unref(u_int32_t);
119 static int pf_begin_rules(u_int32_t *, int, const char *);
120 static int pf_rollback_rules(u_int32_t, int, char *);
121 static int pf_setup_pfsync_matching(struct pf_kruleset *);
122 static void pf_hash_rule(MD5_CTX *, struct pf_krule *);
123 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
124 static int pf_commit_rules(u_int32_t, int, char *);
125 static int pf_addr_setup(struct pf_kruleset *,
126 struct pf_addr_wrap *, sa_family_t);
127 static void pf_addr_copyout(struct pf_addr_wrap *);
128 static void pf_src_node_copy(const struct pf_ksrc_node *,
129 struct pf_src_node *);
131 static int pf_export_kaltq(struct pf_altq *,
132 struct pfioc_altq_v1 *, size_t);
133 static int pf_import_kaltq(struct pfioc_altq_v1 *,
134 struct pf_altq *, size_t);
137 VNET_DEFINE(struct pf_krule, pf_default_rule);
140 VNET_DEFINE_STATIC(int, pf_altq_running);
141 #define V_pf_altq_running VNET(pf_altq_running)
144 #define TAGID_MAX 50000
146 TAILQ_ENTRY(pf_tagname) namehash_entries;
147 TAILQ_ENTRY(pf_tagname) taghash_entries;
148 char name[PF_TAG_NAME_SIZE];
154 TAILQ_HEAD(, pf_tagname) *namehash;
155 TAILQ_HEAD(, pf_tagname) *taghash;
158 BITSET_DEFINE(, TAGID_MAX) avail;
161 VNET_DEFINE(struct pf_tagset, pf_tags);
162 #define V_pf_tags VNET(pf_tags)
163 static unsigned int pf_rule_tag_hashsize;
164 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128
165 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
166 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
167 "Size of pf(4) rule tag hashtable");
170 VNET_DEFINE(struct pf_tagset, pf_qids);
171 #define V_pf_qids VNET(pf_qids)
172 static unsigned int pf_queue_tag_hashsize;
173 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128
174 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
175 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
176 "Size of pf(4) queue tag hashtable");
178 VNET_DEFINE(uma_zone_t, pf_tag_z);
179 #define V_pf_tag_z VNET(pf_tag_z)
180 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
181 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
183 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
184 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
187 static void pf_init_tagset(struct pf_tagset *, unsigned int *,
189 static void pf_cleanup_tagset(struct pf_tagset *);
190 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *);
191 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t);
192 static u_int16_t tagname2tag(struct pf_tagset *, char *);
193 static u_int16_t pf_tagname2tag(char *);
194 static void tag_unref(struct pf_tagset *, u_int16_t);
196 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
201 * XXX - These are new and need to be checked when moveing to a new version
203 static void pf_clear_all_states(void);
204 static unsigned int pf_clear_states(const struct pf_kstate_kill *);
205 static int pf_killstates(struct pf_kstate_kill *,
207 static int pf_killstates_row(struct pf_kstate_kill *,
209 static int pf_killstates_nv(struct pfioc_nv *);
210 static int pf_clearstates_nv(struct pfioc_nv *);
211 static int pf_clear_tables(void);
212 static void pf_clear_srcnodes(struct pf_ksrc_node *);
213 static void pf_kill_srcnodes(struct pfioc_src_node_kill *);
214 static int pf_keepcounters(struct pfioc_nv *);
215 static void pf_tbladdr_copyout(struct pf_addr_wrap *);
218 * Wrapper functions for pfil(9) hooks
221 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
222 int dir, int flags, struct inpcb *inp);
223 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
224 int dir, int flags, struct inpcb *inp);
227 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
228 int dir, int flags, struct inpcb *inp);
229 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
230 int dir, int flags, struct inpcb *inp);
233 static int hook_pf(void);
234 static int dehook_pf(void);
235 static int shutdown_pf(void);
236 static int pf_load(void);
237 static void pf_unload(void);
239 static struct cdevsw pf_cdevsw = {
242 .d_version = D_VERSION,
245 volatile VNET_DEFINE_STATIC(int, pf_pfil_hooked);
246 #define V_pf_pfil_hooked VNET(pf_pfil_hooked)
249 * We need a flag that is neither hooked nor running to know when
250 * the VNET is "valid". We primarily need this to control (global)
251 * external event, e.g., eventhandlers.
253 VNET_DEFINE(int, pf_vnet_active);
254 #define V_pf_vnet_active VNET(pf_vnet_active)
257 struct proc *pf_purge_proc;
259 struct rmlock pf_rules_lock;
260 struct sx pf_ioctl_lock;
261 struct sx pf_end_lock;
264 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
265 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
266 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
267 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
268 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
269 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
270 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
273 pflog_packet_t *pflog_packet_ptr = NULL;
275 extern u_long pf_ioctl_maxcount;
277 #define ERROUT_FUNCTION(target, x) \
280 SDT_PROBE3(pf, ioctl, function, error, __func__, error, \
288 u_int32_t *my_timeout = V_pf_default_rule.timeout;
292 pfi_initialize_vnet();
295 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
296 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
298 RB_INIT(&V_pf_anchors);
299 pf_init_kruleset(&pf_main_ruleset);
301 /* default rule should never be garbage collected */
302 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
303 #ifdef PF_DEFAULT_TO_DROP
304 V_pf_default_rule.action = PF_DROP;
306 V_pf_default_rule.action = PF_PASS;
308 V_pf_default_rule.nr = -1;
309 V_pf_default_rule.rtableid = -1;
311 V_pf_default_rule.evaluations = counter_u64_alloc(M_WAITOK);
312 for (int i = 0; i < 2; i++) {
313 V_pf_default_rule.packets[i] = counter_u64_alloc(M_WAITOK);
314 V_pf_default_rule.bytes[i] = counter_u64_alloc(M_WAITOK);
316 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
317 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
318 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
320 /* initialize default timeouts */
321 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
322 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
323 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
324 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
325 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
326 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
327 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
328 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
329 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
330 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
331 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
332 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
333 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
334 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
335 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
336 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
337 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
338 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
339 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
340 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
342 bzero(&V_pf_status, sizeof(V_pf_status));
343 V_pf_status.debug = PF_DEBUG_URGENT;
345 V_pf_pfil_hooked = 0;
347 /* XXX do our best to avoid a conflict */
348 V_pf_status.hostid = arc4random();
350 for (int i = 0; i < PFRES_MAX; i++)
351 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
352 for (int i = 0; i < LCNT_MAX; i++)
353 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
354 for (int i = 0; i < FCNT_MAX; i++)
355 V_pf_status.fcounters[i] = counter_u64_alloc(M_WAITOK);
356 for (int i = 0; i < SCNT_MAX; i++)
357 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
359 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
360 INTR_MPSAFE, &V_pf_swi_cookie) != 0)
361 /* XXXGL: leaked all above. */
365 static struct pf_kpool *
366 pf_get_kpool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
367 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
368 u_int8_t check_ticket)
370 struct pf_kruleset *ruleset;
371 struct pf_krule *rule;
374 ruleset = pf_find_kruleset(anchor);
377 rs_num = pf_get_ruleset_number(rule_action);
378 if (rs_num >= PF_RULESET_MAX)
381 if (check_ticket && ticket !=
382 ruleset->rules[rs_num].active.ticket)
385 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
388 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
390 if (check_ticket && ticket !=
391 ruleset->rules[rs_num].inactive.ticket)
394 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
397 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
400 while ((rule != NULL) && (rule->nr != rule_number))
401 rule = TAILQ_NEXT(rule, entries);
406 return (&rule->rpool);
410 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
412 struct pf_kpooladdr *mv_pool_pa;
414 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
415 TAILQ_REMOVE(poola, mv_pool_pa, entries);
416 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
421 pf_empty_kpool(struct pf_kpalist *poola)
423 struct pf_kpooladdr *pa;
425 while ((pa = TAILQ_FIRST(poola)) != NULL) {
426 switch (pa->addr.type) {
427 case PF_ADDR_DYNIFTL:
428 pfi_dynaddr_remove(pa->addr.p.dyn);
431 /* XXX: this could be unfinished pooladdr on pabuf */
432 if (pa->addr.p.tbl != NULL)
433 pfr_detach_table(pa->addr.p.tbl);
437 pfi_kkif_unref(pa->kif);
438 TAILQ_REMOVE(poola, pa, entries);
444 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
449 TAILQ_REMOVE(rulequeue, rule, entries);
451 PF_UNLNKDRULES_LOCK();
452 rule->rule_ref |= PFRULE_REFS;
453 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
454 PF_UNLNKDRULES_UNLOCK();
458 pf_free_rule(struct pf_krule *rule)
464 tag_unref(&V_pf_tags, rule->tag);
466 tag_unref(&V_pf_tags, rule->match_tag);
468 if (rule->pqid != rule->qid)
469 pf_qid_unref(rule->pqid);
470 pf_qid_unref(rule->qid);
472 switch (rule->src.addr.type) {
473 case PF_ADDR_DYNIFTL:
474 pfi_dynaddr_remove(rule->src.addr.p.dyn);
477 pfr_detach_table(rule->src.addr.p.tbl);
480 switch (rule->dst.addr.type) {
481 case PF_ADDR_DYNIFTL:
482 pfi_dynaddr_remove(rule->dst.addr.p.dyn);
485 pfr_detach_table(rule->dst.addr.p.tbl);
488 if (rule->overload_tbl)
489 pfr_detach_table(rule->overload_tbl);
491 pfi_kkif_unref(rule->kif);
492 pf_kanchor_remove(rule);
493 pf_empty_kpool(&rule->rpool.list);
499 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
500 unsigned int default_size)
503 unsigned int hashsize;
505 if (*tunable_size == 0 || !powerof2(*tunable_size))
506 *tunable_size = default_size;
508 hashsize = *tunable_size;
509 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
511 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
513 ts->mask = hashsize - 1;
514 ts->seed = arc4random();
515 for (i = 0; i < hashsize; i++) {
516 TAILQ_INIT(&ts->namehash[i]);
517 TAILQ_INIT(&ts->taghash[i]);
519 BIT_FILL(TAGID_MAX, &ts->avail);
523 pf_cleanup_tagset(struct pf_tagset *ts)
526 unsigned int hashsize;
527 struct pf_tagname *t, *tmp;
530 * Only need to clean up one of the hashes as each tag is hashed
533 hashsize = ts->mask + 1;
534 for (i = 0; i < hashsize; i++)
535 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
536 uma_zfree(V_pf_tag_z, t);
538 free(ts->namehash, M_PFHASH);
539 free(ts->taghash, M_PFHASH);
543 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
547 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
548 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
552 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
555 return (tag & ts->mask);
559 tagname2tag(struct pf_tagset *ts, char *tagname)
561 struct pf_tagname *tag;
567 index = tagname2hashindex(ts, tagname);
568 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
569 if (strcmp(tagname, tag->name) == 0) {
577 * to avoid fragmentation, we do a linear search from the beginning
578 * and take the first free slot we find.
580 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
582 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
583 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
584 * set. It may also return a bit number greater than TAGID_MAX due
585 * to rounding of the number of bits in the vector up to a multiple
586 * of the vector word size at declaration/allocation time.
588 if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
591 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */
592 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
594 /* allocate and fill new struct pf_tagname */
595 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
598 strlcpy(tag->name, tagname, sizeof(tag->name));
599 tag->tag = new_tagid;
602 /* Insert into namehash */
603 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
605 /* Insert into taghash */
606 index = tag2hashindex(ts, new_tagid);
607 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
613 tag_unref(struct pf_tagset *ts, u_int16_t tag)
615 struct pf_tagname *t;
620 index = tag2hashindex(ts, tag);
621 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
624 TAILQ_REMOVE(&ts->taghash[index], t,
626 index = tagname2hashindex(ts, t->name);
627 TAILQ_REMOVE(&ts->namehash[index], t,
629 /* Bits are 0-based for BIT_SET() */
630 BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
631 uma_zfree(V_pf_tag_z, t);
638 pf_tagname2tag(char *tagname)
640 return (tagname2tag(&V_pf_tags, tagname));
645 pf_qname2qid(char *qname)
647 return ((u_int32_t)tagname2tag(&V_pf_qids, qname));
651 pf_qid_unref(u_int32_t qid)
653 tag_unref(&V_pf_qids, (u_int16_t)qid);
657 pf_begin_altq(u_int32_t *ticket)
659 struct pf_altq *altq, *tmp;
664 /* Purge the old altq lists */
665 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
666 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
667 /* detach and destroy the discipline */
668 error = altq_remove(altq);
670 free(altq, M_PFALTQ);
672 TAILQ_INIT(V_pf_altq_ifs_inactive);
673 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
674 pf_qid_unref(altq->qid);
675 free(altq, M_PFALTQ);
677 TAILQ_INIT(V_pf_altqs_inactive);
680 *ticket = ++V_ticket_altqs_inactive;
681 V_altqs_inactive_open = 1;
686 pf_rollback_altq(u_int32_t ticket)
688 struct pf_altq *altq, *tmp;
693 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
695 /* Purge the old altq lists */
696 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
697 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
698 /* detach and destroy the discipline */
699 error = altq_remove(altq);
701 free(altq, M_PFALTQ);
703 TAILQ_INIT(V_pf_altq_ifs_inactive);
704 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
705 pf_qid_unref(altq->qid);
706 free(altq, M_PFALTQ);
708 TAILQ_INIT(V_pf_altqs_inactive);
709 V_altqs_inactive_open = 0;
714 pf_commit_altq(u_int32_t ticket)
716 struct pf_altqqueue *old_altqs, *old_altq_ifs;
717 struct pf_altq *altq, *tmp;
722 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
725 /* swap altqs, keep the old. */
726 old_altqs = V_pf_altqs_active;
727 old_altq_ifs = V_pf_altq_ifs_active;
728 V_pf_altqs_active = V_pf_altqs_inactive;
729 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
730 V_pf_altqs_inactive = old_altqs;
731 V_pf_altq_ifs_inactive = old_altq_ifs;
732 V_ticket_altqs_active = V_ticket_altqs_inactive;
734 /* Attach new disciplines */
735 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
736 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
737 /* attach the discipline */
738 error = altq_pfattach(altq);
739 if (error == 0 && V_pf_altq_running)
740 error = pf_enable_altq(altq);
746 /* Purge the old altq lists */
747 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
748 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
749 /* detach and destroy the discipline */
750 if (V_pf_altq_running)
751 error = pf_disable_altq(altq);
752 err = altq_pfdetach(altq);
753 if (err != 0 && error == 0)
755 err = altq_remove(altq);
756 if (err != 0 && error == 0)
759 free(altq, M_PFALTQ);
761 TAILQ_INIT(V_pf_altq_ifs_inactive);
762 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
763 pf_qid_unref(altq->qid);
764 free(altq, M_PFALTQ);
766 TAILQ_INIT(V_pf_altqs_inactive);
768 V_altqs_inactive_open = 0;
773 pf_enable_altq(struct pf_altq *altq)
776 struct tb_profile tb;
779 if ((ifp = ifunit(altq->ifname)) == NULL)
782 if (ifp->if_snd.altq_type != ALTQT_NONE)
783 error = altq_enable(&ifp->if_snd);
785 /* set tokenbucket regulator */
786 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
787 tb.rate = altq->ifbandwidth;
788 tb.depth = altq->tbrsize;
789 error = tbr_set(&ifp->if_snd, &tb);
796 pf_disable_altq(struct pf_altq *altq)
799 struct tb_profile tb;
802 if ((ifp = ifunit(altq->ifname)) == NULL)
806 * when the discipline is no longer referenced, it was overridden
807 * by a new one. if so, just return.
809 if (altq->altq_disc != ifp->if_snd.altq_disc)
812 error = altq_disable(&ifp->if_snd);
815 /* clear tokenbucket regulator */
817 error = tbr_set(&ifp->if_snd, &tb);
824 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
825 struct pf_altq *altq)
830 /* Deactivate the interface in question */
831 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
832 if ((ifp1 = ifunit(altq->ifname)) == NULL ||
833 (remove && ifp1 == ifp)) {
834 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
836 error = altq_add(ifp1, altq);
838 if (ticket != V_ticket_altqs_inactive)
842 free(altq, M_PFALTQ);
849 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
851 struct pf_altq *a1, *a2, *a3;
856 * No need to re-evaluate the configuration for events on interfaces
857 * that do not support ALTQ, as it's not possible for such
858 * interfaces to be part of the configuration.
860 if (!ALTQ_IS_READY(&ifp->if_snd))
863 /* Interrupt userland queue modifications */
864 if (V_altqs_inactive_open)
865 pf_rollback_altq(V_ticket_altqs_inactive);
867 /* Start new altq ruleset */
868 if (pf_begin_altq(&ticket))
871 /* Copy the current active set */
872 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
873 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
878 bcopy(a1, a2, sizeof(struct pf_altq));
880 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
884 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
888 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
889 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
894 bcopy(a1, a2, sizeof(struct pf_altq));
896 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
901 a2->altq_disc = NULL;
902 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
903 if (strncmp(a3->ifname, a2->ifname,
905 a2->altq_disc = a3->altq_disc;
909 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
913 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
918 pf_rollback_altq(ticket);
920 pf_commit_altq(ticket);
925 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
927 struct pf_kruleset *rs;
928 struct pf_krule *rule;
932 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
934 rs = pf_find_or_create_kruleset(anchor);
937 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
938 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
939 rs->rules[rs_num].inactive.rcount--;
941 *ticket = ++rs->rules[rs_num].inactive.ticket;
942 rs->rules[rs_num].inactive.open = 1;
947 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
949 struct pf_kruleset *rs;
950 struct pf_krule *rule;
954 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
956 rs = pf_find_kruleset(anchor);
957 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
958 rs->rules[rs_num].inactive.ticket != ticket)
960 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
961 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
962 rs->rules[rs_num].inactive.rcount--;
964 rs->rules[rs_num].inactive.open = 0;
968 #define PF_MD5_UPD(st, elm) \
969 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
971 #define PF_MD5_UPD_STR(st, elm) \
972 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
974 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
975 (stor) = htonl((st)->elm); \
976 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
979 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
980 (stor) = htons((st)->elm); \
981 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
985 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
987 PF_MD5_UPD(pfr, addr.type);
988 switch (pfr->addr.type) {
989 case PF_ADDR_DYNIFTL:
990 PF_MD5_UPD(pfr, addr.v.ifname);
991 PF_MD5_UPD(pfr, addr.iflags);
994 PF_MD5_UPD(pfr, addr.v.tblname);
996 case PF_ADDR_ADDRMASK:
998 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
999 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1003 PF_MD5_UPD(pfr, port[0]);
1004 PF_MD5_UPD(pfr, port[1]);
1005 PF_MD5_UPD(pfr, neg);
1006 PF_MD5_UPD(pfr, port_op);
1010 pf_hash_rule(MD5_CTX *ctx, struct pf_krule *rule)
1015 pf_hash_rule_addr(ctx, &rule->src);
1016 pf_hash_rule_addr(ctx, &rule->dst);
1017 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1018 PF_MD5_UPD_STR(rule, label[i]);
1019 PF_MD5_UPD_STR(rule, ifname);
1020 PF_MD5_UPD_STR(rule, match_tagname);
1021 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1022 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1023 PF_MD5_UPD_HTONL(rule, prob, y);
1024 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1025 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1026 PF_MD5_UPD(rule, uid.op);
1027 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1028 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1029 PF_MD5_UPD(rule, gid.op);
1030 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1031 PF_MD5_UPD(rule, action);
1032 PF_MD5_UPD(rule, direction);
1033 PF_MD5_UPD(rule, af);
1034 PF_MD5_UPD(rule, quick);
1035 PF_MD5_UPD(rule, ifnot);
1036 PF_MD5_UPD(rule, match_tag_not);
1037 PF_MD5_UPD(rule, natpass);
1038 PF_MD5_UPD(rule, keep_state);
1039 PF_MD5_UPD(rule, proto);
1040 PF_MD5_UPD(rule, type);
1041 PF_MD5_UPD(rule, code);
1042 PF_MD5_UPD(rule, flags);
1043 PF_MD5_UPD(rule, flagset);
1044 PF_MD5_UPD(rule, allow_opts);
1045 PF_MD5_UPD(rule, rt);
1046 PF_MD5_UPD(rule, tos);
1050 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1053 u_int8_t digest[2][PF_MD5_DIGEST_LENGTH];
1057 pf_hash_rule(&ctx[0], a);
1058 pf_hash_rule(&ctx[1], b);
1059 MD5Final(digest[0], &ctx[0]);
1060 MD5Final(digest[1], &ctx[1]);
1062 return (memcmp(digest[0], digest[1], PF_MD5_DIGEST_LENGTH) == 0);
1066 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1068 struct pf_kruleset *rs;
1069 struct pf_krule *rule, **old_array, *tail;
1070 struct pf_krulequeue *old_rules;
1072 u_int32_t old_rcount;
1076 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1078 rs = pf_find_kruleset(anchor);
1079 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1080 ticket != rs->rules[rs_num].inactive.ticket)
1083 /* Calculate checksum for the main ruleset */
1084 if (rs == &pf_main_ruleset) {
1085 error = pf_setup_pfsync_matching(rs);
1090 /* Swap rules, keep the old. */
1091 old_rules = rs->rules[rs_num].active.ptr;
1092 old_rcount = rs->rules[rs_num].active.rcount;
1093 old_array = rs->rules[rs_num].active.ptr_array;
1095 rs->rules[rs_num].active.ptr =
1096 rs->rules[rs_num].inactive.ptr;
1097 rs->rules[rs_num].active.ptr_array =
1098 rs->rules[rs_num].inactive.ptr_array;
1099 rs->rules[rs_num].active.rcount =
1100 rs->rules[rs_num].inactive.rcount;
1102 /* Attempt to preserve counter information. */
1103 if (V_pf_status.keep_counters) {
1104 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1106 tail = TAILQ_FIRST(old_rules);
1107 while ((tail != NULL) && ! pf_krule_compare(tail, rule))
1108 tail = TAILQ_NEXT(tail, entries);
1110 counter_u64_add(rule->evaluations,
1111 counter_u64_fetch(tail->evaluations));
1112 counter_u64_add(rule->packets[0],
1113 counter_u64_fetch(tail->packets[0]));
1114 counter_u64_add(rule->packets[1],
1115 counter_u64_fetch(tail->packets[1]));
1116 counter_u64_add(rule->bytes[0],
1117 counter_u64_fetch(tail->bytes[0]));
1118 counter_u64_add(rule->bytes[1],
1119 counter_u64_fetch(tail->bytes[1]));
1124 rs->rules[rs_num].inactive.ptr = old_rules;
1125 rs->rules[rs_num].inactive.ptr_array = old_array;
1126 rs->rules[rs_num].inactive.rcount = old_rcount;
1128 rs->rules[rs_num].active.ticket =
1129 rs->rules[rs_num].inactive.ticket;
1130 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1133 /* Purge the old rule list. */
1134 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1135 pf_unlink_rule(old_rules, rule);
1136 if (rs->rules[rs_num].inactive.ptr_array)
1137 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1138 rs->rules[rs_num].inactive.ptr_array = NULL;
1139 rs->rules[rs_num].inactive.rcount = 0;
1140 rs->rules[rs_num].inactive.open = 0;
1141 pf_remove_if_empty_kruleset(rs);
1147 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1150 struct pf_krule *rule;
1152 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1155 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1156 /* XXX PF_RULESET_SCRUB as well? */
1157 if (rs_cnt == PF_RULESET_SCRUB)
1160 if (rs->rules[rs_cnt].inactive.ptr_array)
1161 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1162 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1164 if (rs->rules[rs_cnt].inactive.rcount) {
1165 rs->rules[rs_cnt].inactive.ptr_array =
1166 malloc(sizeof(caddr_t) *
1167 rs->rules[rs_cnt].inactive.rcount,
1170 if (!rs->rules[rs_cnt].inactive.ptr_array)
1174 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1176 pf_hash_rule(&ctx, rule);
1177 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1181 MD5Final(digest, &ctx);
1182 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1187 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1192 switch (addr->type) {
1194 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1195 if (addr->p.tbl == NULL)
1198 case PF_ADDR_DYNIFTL:
1199 error = pfi_dynaddr_setup(addr, af);
1207 pf_addr_copyout(struct pf_addr_wrap *addr)
1210 switch (addr->type) {
1211 case PF_ADDR_DYNIFTL:
1212 pfi_dynaddr_copyout(addr);
1215 pf_tbladdr_copyout(addr);
1221 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1223 int secs = time_uptime, diff;
1225 bzero(out, sizeof(struct pf_src_node));
1227 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1228 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1230 if (in->rule.ptr != NULL)
1231 out->rule.nr = in->rule.ptr->nr;
1233 for (int i = 0; i < 2; i++) {
1234 out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1235 out->packets[i] = counter_u64_fetch(in->packets[i]);
1238 out->states = in->states;
1239 out->conn = in->conn;
1241 out->ruletype = in->ruletype;
1243 out->creation = secs - in->creation;
1244 if (out->expire > secs)
1245 out->expire -= secs;
1249 /* Adjust the connection rate estimate. */
1250 diff = secs - in->conn_rate.last;
1251 if (diff >= in->conn_rate.seconds)
1252 out->conn_rate.count = 0;
1254 out->conn_rate.count -=
1255 in->conn_rate.count * diff /
1256 in->conn_rate.seconds;
1261 * Handle export of struct pf_kaltq to user binaries that may be using any
1262 * version of struct pf_altq.
1265 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1269 if (ioc_size == sizeof(struct pfioc_altq_v0))
1272 version = pa->version;
1274 if (version > PFIOC_ALTQ_VERSION)
1277 #define ASSIGN(x) exported_q->x = q->x
1279 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1280 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1281 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1285 struct pf_altq_v0 *exported_q =
1286 &((struct pfioc_altq_v0 *)pa)->altq;
1292 exported_q->tbrsize = SATU16(q->tbrsize);
1293 exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1298 exported_q->bandwidth = SATU32(q->bandwidth);
1300 ASSIGN(local_flags);
1305 if (q->scheduler == ALTQT_HFSC) {
1306 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1307 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1308 SATU32(q->pq_u.hfsc_opts.x)
1310 ASSIGN_OPT_SATU32(rtsc_m1);
1312 ASSIGN_OPT_SATU32(rtsc_m2);
1314 ASSIGN_OPT_SATU32(lssc_m1);
1316 ASSIGN_OPT_SATU32(lssc_m2);
1318 ASSIGN_OPT_SATU32(ulsc_m1);
1320 ASSIGN_OPT_SATU32(ulsc_m2);
1325 #undef ASSIGN_OPT_SATU32
1333 struct pf_altq_v1 *exported_q =
1334 &((struct pfioc_altq_v1 *)pa)->altq;
1340 ASSIGN(ifbandwidth);
1347 ASSIGN(local_flags);
1357 panic("%s: unhandled struct pfioc_altq version", __func__);
1370 * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1371 * that may be using any version of it.
1374 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1378 if (ioc_size == sizeof(struct pfioc_altq_v0))
1381 version = pa->version;
1383 if (version > PFIOC_ALTQ_VERSION)
1386 #define ASSIGN(x) q->x = imported_q->x
1388 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1392 struct pf_altq_v0 *imported_q =
1393 &((struct pfioc_altq_v0 *)pa)->altq;
1398 ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1399 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1404 ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1406 ASSIGN(local_flags);
1411 if (imported_q->scheduler == ALTQT_HFSC) {
1412 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1415 * The m1 and m2 parameters are being copied from
1418 ASSIGN_OPT(rtsc_m1);
1420 ASSIGN_OPT(rtsc_m2);
1422 ASSIGN_OPT(lssc_m1);
1424 ASSIGN_OPT(lssc_m2);
1426 ASSIGN_OPT(ulsc_m1);
1428 ASSIGN_OPT(ulsc_m2);
1440 struct pf_altq_v1 *imported_q =
1441 &((struct pfioc_altq_v1 *)pa)->altq;
1447 ASSIGN(ifbandwidth);
1454 ASSIGN(local_flags);
1464 panic("%s: unhandled struct pfioc_altq version", __func__);
1474 static struct pf_altq *
1475 pf_altq_get_nth_active(u_int32_t n)
1477 struct pf_altq *altq;
1481 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1487 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1498 pf_krule_free(struct pf_krule *rule)
1503 counter_u64_free(rule->evaluations);
1504 for (int i = 0; i < 2; i++) {
1505 counter_u64_free(rule->packets[i]);
1506 counter_u64_free(rule->bytes[i]);
1508 counter_u64_free(rule->states_cur);
1509 counter_u64_free(rule->states_tot);
1510 counter_u64_free(rule->src_nodes);
1511 free(rule, M_PFRULE);
1515 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1516 struct pf_pooladdr *pool)
1519 bzero(pool, sizeof(*pool));
1520 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1521 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1525 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1526 struct pf_kpooladdr *kpool)
1529 bzero(kpool, sizeof(*kpool));
1530 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1531 strlcpy(kpool->ifname, pool->ifname, sizeof(kpool->ifname));
1535 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool)
1537 bzero(pool, sizeof(*pool));
1539 bcopy(&kpool->key, &pool->key, sizeof(pool->key));
1540 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter));
1542 pool->tblidx = kpool->tblidx;
1543 pool->proxy_port[0] = kpool->proxy_port[0];
1544 pool->proxy_port[1] = kpool->proxy_port[1];
1545 pool->opts = kpool->opts;
1549 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1551 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1552 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1554 bzero(kpool, sizeof(*kpool));
1556 bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1557 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1559 kpool->tblidx = pool->tblidx;
1560 kpool->proxy_port[0] = pool->proxy_port[0];
1561 kpool->proxy_port[1] = pool->proxy_port[1];
1562 kpool->opts = pool->opts;
1568 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule)
1571 bzero(rule, sizeof(*rule));
1573 bcopy(&krule->src, &rule->src, sizeof(rule->src));
1574 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst));
1576 for (int i = 0; i < PF_SKIP_COUNT; ++i) {
1577 if (rule->skip[i].ptr == NULL)
1578 rule->skip[i].nr = -1;
1580 rule->skip[i].nr = krule->skip[i].ptr->nr;
1583 strlcpy(rule->label, krule->label[0], sizeof(rule->label));
1584 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname));
1585 strlcpy(rule->qname, krule->qname, sizeof(rule->qname));
1586 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname));
1587 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname));
1588 strlcpy(rule->match_tagname, krule->match_tagname,
1589 sizeof(rule->match_tagname));
1590 strlcpy(rule->overload_tblname, krule->overload_tblname,
1591 sizeof(rule->overload_tblname));
1593 pf_kpool_to_pool(&krule->rpool, &rule->rpool);
1595 rule->evaluations = counter_u64_fetch(krule->evaluations);
1596 for (int i = 0; i < 2; i++) {
1597 rule->packets[i] = counter_u64_fetch(krule->packets[i]);
1598 rule->bytes[i] = counter_u64_fetch(krule->bytes[i]);
1601 /* kif, anchor, overload_tbl are not copied over. */
1603 rule->os_fingerprint = krule->os_fingerprint;
1605 rule->rtableid = krule->rtableid;
1606 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout));
1607 rule->max_states = krule->max_states;
1608 rule->max_src_nodes = krule->max_src_nodes;
1609 rule->max_src_states = krule->max_src_states;
1610 rule->max_src_conn = krule->max_src_conn;
1611 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit;
1612 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds;
1613 rule->qid = krule->qid;
1614 rule->pqid = krule->pqid;
1615 rule->nr = krule->nr;
1616 rule->prob = krule->prob;
1617 rule->cuid = krule->cuid;
1618 rule->cpid = krule->cpid;
1620 rule->return_icmp = krule->return_icmp;
1621 rule->return_icmp6 = krule->return_icmp6;
1622 rule->max_mss = krule->max_mss;
1623 rule->tag = krule->tag;
1624 rule->match_tag = krule->match_tag;
1625 rule->scrub_flags = krule->scrub_flags;
1627 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid));
1628 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid));
1630 rule->rule_flag = krule->rule_flag;
1631 rule->action = krule->action;
1632 rule->direction = krule->direction;
1633 rule->log = krule->log;
1634 rule->logif = krule->logif;
1635 rule->quick = krule->quick;
1636 rule->ifnot = krule->ifnot;
1637 rule->match_tag_not = krule->match_tag_not;
1638 rule->natpass = krule->natpass;
1640 rule->keep_state = krule->keep_state;
1641 rule->af = krule->af;
1642 rule->proto = krule->proto;
1643 rule->type = krule->type;
1644 rule->code = krule->code;
1645 rule->flags = krule->flags;
1646 rule->flagset = krule->flagset;
1647 rule->min_ttl = krule->min_ttl;
1648 rule->allow_opts = krule->allow_opts;
1649 rule->rt = krule->rt;
1650 rule->return_ttl = krule->return_ttl;
1651 rule->tos = krule->tos;
1652 rule->set_tos = krule->set_tos;
1653 rule->anchor_relative = krule->anchor_relative;
1654 rule->anchor_wildcard = krule->anchor_wildcard;
1656 rule->flush = krule->flush;
1657 rule->prio = krule->prio;
1658 rule->set_prio[0] = krule->set_prio[0];
1659 rule->set_prio[1] = krule->set_prio[1];
1661 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert));
1663 rule->u_states_cur = counter_u64_fetch(krule->states_cur);
1664 rule->u_states_tot = counter_u64_fetch(krule->states_tot);
1665 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes);
1669 pf_check_rule_addr(const struct pf_rule_addr *addr)
1672 switch (addr->addr.type) {
1673 case PF_ADDR_ADDRMASK:
1674 case PF_ADDR_NOROUTE:
1675 case PF_ADDR_DYNIFTL:
1677 case PF_ADDR_URPFFAILED:
1684 if (addr->addr.p.dyn != NULL) {
1692 pf_nvaddr_to_addr(const nvlist_t *nvl, struct pf_addr *paddr)
1694 return (pf_nvbinary(nvl, "addr", paddr, sizeof(*paddr)));
1698 pf_addr_to_nvaddr(const struct pf_addr *paddr)
1702 nvl = nvlist_create(0);
1706 nvlist_add_binary(nvl, "addr", paddr, sizeof(*paddr));
1712 pf_nvmape_to_mape(const nvlist_t *nvl, struct pf_mape_portset *mape)
1716 bzero(mape, sizeof(*mape));
1717 PFNV_CHK(pf_nvuint8(nvl, "offset", &mape->offset));
1718 PFNV_CHK(pf_nvuint8(nvl, "psidlen", &mape->psidlen));
1719 PFNV_CHK(pf_nvuint16(nvl, "psid", &mape->psid));
1726 pf_mape_to_nvmape(const struct pf_mape_portset *mape)
1730 nvl = nvlist_create(0);
1734 nvlist_add_number(nvl, "offset", mape->offset);
1735 nvlist_add_number(nvl, "psidlen", mape->psidlen);
1736 nvlist_add_number(nvl, "psid", mape->psid);
1742 pf_nvpool_to_pool(const nvlist_t *nvl, struct pf_kpool *kpool)
1746 bzero(kpool, sizeof(*kpool));
1748 PFNV_CHK(pf_nvbinary(nvl, "key", &kpool->key, sizeof(kpool->key)));
1750 if (nvlist_exists_nvlist(nvl, "counter")) {
1751 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "counter"),
1755 PFNV_CHK(pf_nvint(nvl, "tblidx", &kpool->tblidx));
1756 PFNV_CHK(pf_nvuint16_array(nvl, "proxy_port", kpool->proxy_port, 2,
1758 PFNV_CHK(pf_nvuint8(nvl, "opts", &kpool->opts));
1760 if (nvlist_exists_nvlist(nvl, "mape")) {
1761 PFNV_CHK(pf_nvmape_to_mape(nvlist_get_nvlist(nvl, "mape"),
1770 pf_pool_to_nvpool(const struct pf_kpool *pool)
1775 nvl = nvlist_create(0);
1779 nvlist_add_binary(nvl, "key", &pool->key, sizeof(pool->key));
1780 tmp = pf_addr_to_nvaddr(&pool->counter);
1783 nvlist_add_nvlist(nvl, "counter", tmp);
1785 nvlist_add_number(nvl, "tblidx", pool->tblidx);
1786 pf_uint16_array_nv(nvl, "proxy_port", pool->proxy_port, 2);
1787 nvlist_add_number(nvl, "opts", pool->opts);
1789 tmp = pf_mape_to_nvmape(&pool->mape);
1792 nvlist_add_nvlist(nvl, "mape", tmp);
1797 nvlist_destroy(nvl);
1802 pf_nvaddr_wrap_to_addr_wrap(const nvlist_t *nvl, struct pf_addr_wrap *addr)
1806 bzero(addr, sizeof(*addr));
1808 PFNV_CHK(pf_nvuint8(nvl, "type", &addr->type));
1809 PFNV_CHK(pf_nvuint8(nvl, "iflags", &addr->iflags));
1810 if (addr->type == PF_ADDR_DYNIFTL)
1811 PFNV_CHK(pf_nvstring(nvl, "ifname", addr->v.ifname,
1812 sizeof(addr->v.ifname)));
1813 if (addr->type == PF_ADDR_TABLE)
1814 PFNV_CHK(pf_nvstring(nvl, "tblname", addr->v.tblname,
1815 sizeof(addr->v.tblname)));
1817 if (! nvlist_exists_nvlist(nvl, "addr"))
1819 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "addr"),
1822 if (! nvlist_exists_nvlist(nvl, "mask"))
1824 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "mask"),
1827 switch (addr->type) {
1828 case PF_ADDR_DYNIFTL:
1831 case PF_ADDR_ADDRMASK:
1832 case PF_ADDR_NOROUTE:
1833 case PF_ADDR_URPFFAILED:
1844 pf_addr_wrap_to_nvaddr_wrap(const struct pf_addr_wrap *addr)
1849 nvl = nvlist_create(0);
1853 nvlist_add_number(nvl, "type", addr->type);
1854 nvlist_add_number(nvl, "iflags", addr->iflags);
1855 if (addr->type == PF_ADDR_DYNIFTL)
1856 nvlist_add_string(nvl, "ifname", addr->v.ifname);
1857 if (addr->type == PF_ADDR_TABLE)
1858 nvlist_add_string(nvl, "tblname", addr->v.tblname);
1860 tmp = pf_addr_to_nvaddr(&addr->v.a.addr);
1863 nvlist_add_nvlist(nvl, "addr", tmp);
1864 tmp = pf_addr_to_nvaddr(&addr->v.a.mask);
1867 nvlist_add_nvlist(nvl, "mask", tmp);
1872 nvlist_destroy(nvl);
1877 pf_validate_op(uint8_t op)
1899 pf_nvrule_addr_to_rule_addr(const nvlist_t *nvl, struct pf_rule_addr *addr)
1903 if (! nvlist_exists_nvlist(nvl, "addr"))
1906 PFNV_CHK(pf_nvaddr_wrap_to_addr_wrap(nvlist_get_nvlist(nvl, "addr"),
1908 PFNV_CHK(pf_nvuint16_array(nvl, "port", addr->port, 2, NULL));
1909 PFNV_CHK(pf_nvuint8(nvl, "neg", &addr->neg));
1910 PFNV_CHK(pf_nvuint8(nvl, "port_op", &addr->port_op));
1912 PFNV_CHK(pf_validate_op(addr->port_op));
1919 pf_rule_addr_to_nvrule_addr(const struct pf_rule_addr *addr)
1924 nvl = nvlist_create(0);
1928 tmp = pf_addr_wrap_to_nvaddr_wrap(&addr->addr);
1931 nvlist_add_nvlist(nvl, "addr", tmp);
1932 pf_uint16_array_nv(nvl, "port", addr->port, 2);
1933 nvlist_add_number(nvl, "neg", addr->neg);
1934 nvlist_add_number(nvl, "port_op", addr->port_op);
1939 nvlist_destroy(nvl);
1944 pf_nvrule_uid_to_rule_uid(const nvlist_t *nvl, struct pf_rule_uid *uid)
1948 bzero(uid, sizeof(*uid));
1950 PFNV_CHK(pf_nvuint32_array(nvl, "uid", uid->uid, 2, NULL));
1951 PFNV_CHK(pf_nvuint8(nvl, "op", &uid->op));
1953 PFNV_CHK(pf_validate_op(uid->op));
1960 pf_rule_uid_to_nvrule_uid(const struct pf_rule_uid *uid)
1964 nvl = nvlist_create(0);
1968 pf_uint32_array_nv(nvl, "uid", uid->uid, 2);
1969 nvlist_add_number(nvl, "op", uid->op);
1975 pf_nvrule_gid_to_rule_gid(const nvlist_t *nvl, struct pf_rule_gid *gid)
1977 /* Cheat a little. These stucts are the same, other than the name of
1978 * the first field. */
1979 return (pf_nvrule_uid_to_rule_uid(nvl, (struct pf_rule_uid *)gid));
1983 pf_nvrule_to_krule(const nvlist_t *nvl, struct pf_krule **prule)
1985 struct pf_krule *rule;
1988 #define ERROUT(x) ERROUT_FUNCTION(errout, x)
1990 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK | M_ZERO);
1992 PFNV_CHK(pf_nvuint32(nvl, "nr", &rule->nr));
1994 if (! nvlist_exists_nvlist(nvl, "src"))
1997 error = pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "src"),
2002 if (! nvlist_exists_nvlist(nvl, "dst"))
2005 PFNV_CHK(pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "dst"),
2008 if (nvlist_exists_string(nvl, "label")) {
2009 PFNV_CHK(pf_nvstring(nvl, "label", rule->label[0],
2010 sizeof(rule->label[0])));
2011 } else if (nvlist_exists_string_array(nvl, "labels")) {
2012 const char *const *strs;
2016 strs = nvlist_get_string_array(nvl, "labels", &items);
2017 if (items > PF_RULE_MAX_LABEL_COUNT)
2020 for (size_t i = 0; i < items; i++) {
2021 ret = strlcpy(rule->label[i], strs[i],
2022 sizeof(rule->label[0]));
2023 if (ret >= sizeof(rule->label[0]))
2028 PFNV_CHK(pf_nvstring(nvl, "ifname", rule->ifname,
2029 sizeof(rule->ifname)));
2030 PFNV_CHK(pf_nvstring(nvl, "qname", rule->qname, sizeof(rule->qname)));
2031 PFNV_CHK(pf_nvstring(nvl, "pqname", rule->pqname,
2032 sizeof(rule->pqname)));
2033 PFNV_CHK(pf_nvstring(nvl, "tagname", rule->tagname,
2034 sizeof(rule->tagname)));
2035 PFNV_CHK(pf_nvstring(nvl, "match_tagname", rule->match_tagname,
2036 sizeof(rule->match_tagname)));
2037 PFNV_CHK(pf_nvstring(nvl, "overload_tblname", rule->overload_tblname,
2038 sizeof(rule->overload_tblname)));
2040 if (! nvlist_exists_nvlist(nvl, "rpool"))
2042 PFNV_CHK(pf_nvpool_to_pool(nvlist_get_nvlist(nvl, "rpool"),
2045 PFNV_CHK(pf_nvuint32(nvl, "os_fingerprint", &rule->os_fingerprint));
2047 PFNV_CHK(pf_nvint(nvl, "rtableid", &rule->rtableid));
2048 PFNV_CHK(pf_nvuint32_array(nvl, "timeout", rule->timeout, PFTM_MAX, NULL));
2049 PFNV_CHK(pf_nvuint32(nvl, "max_states", &rule->max_states));
2050 PFNV_CHK(pf_nvuint32(nvl, "max_src_nodes", &rule->max_src_nodes));
2051 PFNV_CHK(pf_nvuint32(nvl, "max_src_states", &rule->max_src_states));
2052 PFNV_CHK(pf_nvuint32(nvl, "max_src_conn", &rule->max_src_conn));
2053 PFNV_CHK(pf_nvuint32(nvl, "max_src_conn_rate.limit",
2054 &rule->max_src_conn_rate.limit));
2055 PFNV_CHK(pf_nvuint32(nvl, "max_src_conn_rate.seconds",
2056 &rule->max_src_conn_rate.seconds));
2057 PFNV_CHK(pf_nvuint32(nvl, "prob", &rule->prob));
2058 PFNV_CHK(pf_nvuint32(nvl, "cuid", &rule->cuid));
2059 PFNV_CHK(pf_nvuint32(nvl, "cpid", &rule->cpid));
2061 PFNV_CHK(pf_nvuint16(nvl, "return_icmp", &rule->return_icmp));
2062 PFNV_CHK(pf_nvuint16(nvl, "return_icmp6", &rule->return_icmp6));
2064 PFNV_CHK(pf_nvuint16(nvl, "max_mss", &rule->max_mss));
2065 PFNV_CHK(pf_nvuint16(nvl, "scrub_flags", &rule->scrub_flags));
2067 if (! nvlist_exists_nvlist(nvl, "uid"))
2069 PFNV_CHK(pf_nvrule_uid_to_rule_uid(nvlist_get_nvlist(nvl, "uid"),
2072 if (! nvlist_exists_nvlist(nvl, "gid"))
2074 PFNV_CHK(pf_nvrule_gid_to_rule_gid(nvlist_get_nvlist(nvl, "gid"),
2077 PFNV_CHK(pf_nvuint32(nvl, "rule_flag", &rule->rule_flag));
2078 PFNV_CHK(pf_nvuint8(nvl, "action", &rule->action));
2079 PFNV_CHK(pf_nvuint8(nvl, "direction", &rule->direction));
2080 PFNV_CHK(pf_nvuint8(nvl, "log", &rule->log));
2081 PFNV_CHK(pf_nvuint8(nvl, "logif", &rule->logif));
2082 PFNV_CHK(pf_nvuint8(nvl, "quick", &rule->quick));
2083 PFNV_CHK(pf_nvuint8(nvl, "ifnot", &rule->ifnot));
2084 PFNV_CHK(pf_nvuint8(nvl, "match_tag_not", &rule->match_tag_not));
2085 PFNV_CHK(pf_nvuint8(nvl, "natpass", &rule->natpass));
2087 PFNV_CHK(pf_nvuint8(nvl, "keep_state", &rule->keep_state));
2088 PFNV_CHK(pf_nvuint8(nvl, "af", &rule->af));
2089 PFNV_CHK(pf_nvuint8(nvl, "proto", &rule->proto));
2090 PFNV_CHK(pf_nvuint8(nvl, "type", &rule->type));
2091 PFNV_CHK(pf_nvuint8(nvl, "code", &rule->code));
2092 PFNV_CHK(pf_nvuint8(nvl, "flags", &rule->flags));
2093 PFNV_CHK(pf_nvuint8(nvl, "flagset", &rule->flagset));
2094 PFNV_CHK(pf_nvuint8(nvl, "min_ttl", &rule->min_ttl));
2095 PFNV_CHK(pf_nvuint8(nvl, "allow_opts", &rule->allow_opts));
2096 PFNV_CHK(pf_nvuint8(nvl, "rt", &rule->rt));
2097 PFNV_CHK(pf_nvuint8(nvl, "return_ttl", &rule->return_ttl));
2098 PFNV_CHK(pf_nvuint8(nvl, "tos", &rule->tos));
2099 PFNV_CHK(pf_nvuint8(nvl, "set_tos", &rule->set_tos));
2100 PFNV_CHK(pf_nvuint8(nvl, "anchor_relative", &rule->anchor_relative));
2101 PFNV_CHK(pf_nvuint8(nvl, "anchor_wildcard", &rule->anchor_wildcard));
2103 PFNV_CHK(pf_nvuint8(nvl, "flush", &rule->flush));
2104 PFNV_CHK(pf_nvuint8(nvl, "prio", &rule->prio));
2106 PFNV_CHK(pf_nvuint8_array(nvl, "set_prio", &rule->prio, 2, NULL));
2108 if (nvlist_exists_nvlist(nvl, "divert")) {
2109 const nvlist_t *nvldivert = nvlist_get_nvlist(nvl, "divert");
2111 if (! nvlist_exists_nvlist(nvldivert, "addr"))
2113 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvldivert, "addr"),
2114 &rule->divert.addr));
2115 PFNV_CHK(pf_nvuint16(nvldivert, "port", &rule->divert.port));
2120 if (rule->af == AF_INET)
2121 ERROUT(EAFNOSUPPORT);
2124 if (rule->af == AF_INET6)
2125 ERROUT(EAFNOSUPPORT);
2128 PFNV_CHK(pf_check_rule_addr(&rule->src));
2129 PFNV_CHK(pf_check_rule_addr(&rule->dst));
2137 pf_krule_free(rule);
2144 pf_divert_to_nvdivert(const struct pf_krule *rule)
2149 nvl = nvlist_create(0);
2153 tmp = pf_addr_to_nvaddr(&rule->divert.addr);
2156 nvlist_add_nvlist(nvl, "addr", tmp);
2157 nvlist_add_number(nvl, "port", rule->divert.port);
2162 nvlist_destroy(nvl);
2167 pf_krule_to_nvrule(const struct pf_krule *rule)
2169 nvlist_t *nvl, *tmp;
2171 nvl = nvlist_create(0);
2175 nvlist_add_number(nvl, "nr", rule->nr);
2176 tmp = pf_rule_addr_to_nvrule_addr(&rule->src);
2179 nvlist_add_nvlist(nvl, "src", tmp);
2180 tmp = pf_rule_addr_to_nvrule_addr(&rule->dst);
2183 nvlist_add_nvlist(nvl, "dst", tmp);
2185 for (int i = 0; i < PF_SKIP_COUNT; i++) {
2186 nvlist_append_number_array(nvl, "skip",
2187 rule->skip[i].ptr ? rule->skip[i].ptr->nr : -1);
2190 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) {
2191 nvlist_append_string_array(nvl, "labels", rule->label[i]);
2193 nvlist_add_string(nvl, "label", rule->label[0]);
2194 nvlist_add_string(nvl, "ifname", rule->ifname);
2195 nvlist_add_string(nvl, "qname", rule->qname);
2196 nvlist_add_string(nvl, "pqname", rule->pqname);
2197 nvlist_add_string(nvl, "tagname", rule->tagname);
2198 nvlist_add_string(nvl, "match_tagname", rule->match_tagname);
2199 nvlist_add_string(nvl, "overload_tblname", rule->overload_tblname);
2201 tmp = pf_pool_to_nvpool(&rule->rpool);
2204 nvlist_add_nvlist(nvl, "rpool", tmp);
2206 nvlist_add_number(nvl, "evaluations",
2207 counter_u64_fetch(rule->evaluations));
2208 for (int i = 0; i < 2; i++) {
2209 nvlist_append_number_array(nvl, "packets",
2210 counter_u64_fetch(rule->packets[i]));
2211 nvlist_append_number_array(nvl, "bytes",
2212 counter_u64_fetch(rule->bytes[i]));
2215 nvlist_add_number(nvl, "os_fingerprint", rule->os_fingerprint);
2217 nvlist_add_number(nvl, "rtableid", rule->rtableid);
2218 pf_uint32_array_nv(nvl, "timeout", rule->timeout, PFTM_MAX);
2219 nvlist_add_number(nvl, "max_states", rule->max_states);
2220 nvlist_add_number(nvl, "max_src_nodes", rule->max_src_nodes);
2221 nvlist_add_number(nvl, "max_src_states", rule->max_src_states);
2222 nvlist_add_number(nvl, "max_src_conn", rule->max_src_conn);
2223 nvlist_add_number(nvl, "max_src_conn_rate.limit",
2224 rule->max_src_conn_rate.limit);
2225 nvlist_add_number(nvl, "max_src_conn_rate.seconds",
2226 rule->max_src_conn_rate.seconds);
2227 nvlist_add_number(nvl, "qid", rule->qid);
2228 nvlist_add_number(nvl, "pqid", rule->pqid);
2229 nvlist_add_number(nvl, "prob", rule->prob);
2230 nvlist_add_number(nvl, "cuid", rule->cuid);
2231 nvlist_add_number(nvl, "cpid", rule->cpid);
2233 nvlist_add_number(nvl, "states_cur",
2234 counter_u64_fetch(rule->states_cur));
2235 nvlist_add_number(nvl, "states_tot",
2236 counter_u64_fetch(rule->states_tot));
2237 nvlist_add_number(nvl, "src_nodes",
2238 counter_u64_fetch(rule->src_nodes));
2240 nvlist_add_number(nvl, "return_icmp", rule->return_icmp);
2241 nvlist_add_number(nvl, "return_icmp6", rule->return_icmp6);
2243 nvlist_add_number(nvl, "max_mss", rule->max_mss);
2244 nvlist_add_number(nvl, "scrub_flags", rule->scrub_flags);
2246 tmp = pf_rule_uid_to_nvrule_uid(&rule->uid);
2249 nvlist_add_nvlist(nvl, "uid", tmp);
2250 tmp = pf_rule_uid_to_nvrule_uid((const struct pf_rule_uid *)&rule->gid);
2253 nvlist_add_nvlist(nvl, "gid", tmp);
2255 nvlist_add_number(nvl, "rule_flag", rule->rule_flag);
2256 nvlist_add_number(nvl, "action", rule->action);
2257 nvlist_add_number(nvl, "direction", rule->direction);
2258 nvlist_add_number(nvl, "log", rule->log);
2259 nvlist_add_number(nvl, "logif", rule->logif);
2260 nvlist_add_number(nvl, "quick", rule->quick);
2261 nvlist_add_number(nvl, "ifnot", rule->ifnot);
2262 nvlist_add_number(nvl, "match_tag_not", rule->match_tag_not);
2263 nvlist_add_number(nvl, "natpass", rule->natpass);
2265 nvlist_add_number(nvl, "keep_state", rule->keep_state);
2266 nvlist_add_number(nvl, "af", rule->af);
2267 nvlist_add_number(nvl, "proto", rule->proto);
2268 nvlist_add_number(nvl, "type", rule->type);
2269 nvlist_add_number(nvl, "code", rule->code);
2270 nvlist_add_number(nvl, "flags", rule->flags);
2271 nvlist_add_number(nvl, "flagset", rule->flagset);
2272 nvlist_add_number(nvl, "min_ttl", rule->min_ttl);
2273 nvlist_add_number(nvl, "allow_opts", rule->allow_opts);
2274 nvlist_add_number(nvl, "rt", rule->rt);
2275 nvlist_add_number(nvl, "return_ttl", rule->return_ttl);
2276 nvlist_add_number(nvl, "tos", rule->tos);
2277 nvlist_add_number(nvl, "set_tos", rule->set_tos);
2278 nvlist_add_number(nvl, "anchor_relative", rule->anchor_relative);
2279 nvlist_add_number(nvl, "anchor_wildcard", rule->anchor_wildcard);
2281 nvlist_add_number(nvl, "flush", rule->flush);
2282 nvlist_add_number(nvl, "prio", rule->prio);
2284 pf_uint8_array_nv(nvl, "set_prio", &rule->prio, 2);
2286 tmp = pf_divert_to_nvdivert(rule);
2289 nvlist_add_nvlist(nvl, "divert", tmp);
2294 nvlist_destroy(nvl);
2299 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
2304 if (rule->af == AF_INET) {
2305 return (EAFNOSUPPORT);
2309 if (rule->af == AF_INET6) {
2310 return (EAFNOSUPPORT);
2314 ret = pf_check_rule_addr(&rule->src);
2317 ret = pf_check_rule_addr(&rule->dst);
2321 bzero(krule, sizeof(*krule));
2323 bcopy(&rule->src, &krule->src, sizeof(rule->src));
2324 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
2326 strlcpy(krule->label[0], rule->label, sizeof(rule->label));
2327 strlcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
2328 strlcpy(krule->qname, rule->qname, sizeof(rule->qname));
2329 strlcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
2330 strlcpy(krule->tagname, rule->tagname, sizeof(rule->tagname));
2331 strlcpy(krule->match_tagname, rule->match_tagname,
2332 sizeof(rule->match_tagname));
2333 strlcpy(krule->overload_tblname, rule->overload_tblname,
2334 sizeof(rule->overload_tblname));
2336 ret = pf_pool_to_kpool(&rule->rpool, &krule->rpool);
2340 /* Don't allow userspace to set evaulations, packets or bytes. */
2341 /* kif, anchor, overload_tbl are not copied over. */
2343 krule->os_fingerprint = rule->os_fingerprint;
2345 krule->rtableid = rule->rtableid;
2346 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout));
2347 krule->max_states = rule->max_states;
2348 krule->max_src_nodes = rule->max_src_nodes;
2349 krule->max_src_states = rule->max_src_states;
2350 krule->max_src_conn = rule->max_src_conn;
2351 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
2352 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
2353 krule->qid = rule->qid;
2354 krule->pqid = rule->pqid;
2355 krule->nr = rule->nr;
2356 krule->prob = rule->prob;
2357 krule->cuid = rule->cuid;
2358 krule->cpid = rule->cpid;
2360 krule->return_icmp = rule->return_icmp;
2361 krule->return_icmp6 = rule->return_icmp6;
2362 krule->max_mss = rule->max_mss;
2363 krule->tag = rule->tag;
2364 krule->match_tag = rule->match_tag;
2365 krule->scrub_flags = rule->scrub_flags;
2367 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
2368 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
2370 krule->rule_flag = rule->rule_flag;
2371 krule->action = rule->action;
2372 krule->direction = rule->direction;
2373 krule->log = rule->log;
2374 krule->logif = rule->logif;
2375 krule->quick = rule->quick;
2376 krule->ifnot = rule->ifnot;
2377 krule->match_tag_not = rule->match_tag_not;
2378 krule->natpass = rule->natpass;
2380 krule->keep_state = rule->keep_state;
2381 krule->af = rule->af;
2382 krule->proto = rule->proto;
2383 krule->type = rule->type;
2384 krule->code = rule->code;
2385 krule->flags = rule->flags;
2386 krule->flagset = rule->flagset;
2387 krule->min_ttl = rule->min_ttl;
2388 krule->allow_opts = rule->allow_opts;
2389 krule->rt = rule->rt;
2390 krule->return_ttl = rule->return_ttl;
2391 krule->tos = rule->tos;
2392 krule->set_tos = rule->set_tos;
2393 krule->anchor_relative = rule->anchor_relative;
2394 krule->anchor_wildcard = rule->anchor_wildcard;
2396 krule->flush = rule->flush;
2397 krule->prio = rule->prio;
2398 krule->set_prio[0] = rule->set_prio[0];
2399 krule->set_prio[1] = rule->set_prio[1];
2401 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2407 pf_label_match(const struct pf_krule *rule, const char *label)
2411 while (*rule->label[i]) {
2412 if (strcmp(rule->label[i], label) == 0)
2421 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2423 struct pf_state *match;
2425 unsigned int killed = 0;
2427 /* Call with unlocked hashrow */
2429 match = pf_find_state_all(key, dir, &more);
2430 if (match && !more) {
2431 pf_unlink_state(match, 0);
2439 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2442 struct pf_state_key *sk;
2443 struct pf_addr *srcaddr, *dstaddr;
2444 struct pf_state_key_cmp match_key;
2445 int idx, killed = 0;
2447 u_int16_t srcport, dstport;
2449 relock_DIOCKILLSTATES:
2450 PF_HASHROW_LOCK(ih);
2451 LIST_FOREACH(s, &ih->states, entry) {
2452 sk = s->key[PF_SK_WIRE];
2453 if (s->direction == PF_OUT) {
2454 srcaddr = &sk->addr[1];
2455 dstaddr = &sk->addr[0];
2456 srcport = sk->port[1];
2457 dstport = sk->port[0];
2459 srcaddr = &sk->addr[0];
2460 dstaddr = &sk->addr[1];
2461 srcport = sk->port[0];
2462 dstport = sk->port[1];
2465 if (psk->psk_af && sk->af != psk->psk_af)
2468 if (psk->psk_proto && psk->psk_proto != sk->proto)
2471 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
2472 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2475 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
2476 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2479 if (! PF_MATCHA(psk->psk_rt_addr.neg,
2480 &psk->psk_rt_addr.addr.v.a.addr,
2481 &psk->psk_rt_addr.addr.v.a.mask,
2482 &s->rt_addr, sk->af))
2485 if (psk->psk_src.port_op != 0 &&
2486 ! pf_match_port(psk->psk_src.port_op,
2487 psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2490 if (psk->psk_dst.port_op != 0 &&
2491 ! pf_match_port(psk->psk_dst.port_op,
2492 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2495 if (psk->psk_label[0] &&
2496 ! pf_label_match(s->rule.ptr, psk->psk_label))
2499 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2503 if (psk->psk_kill_match) {
2504 /* Create the key to find matching states, with lock
2507 bzero(&match_key, sizeof(match_key));
2509 if (s->direction == PF_OUT) {
2517 match_key.af = s->key[idx]->af;
2518 match_key.proto = s->key[idx]->proto;
2519 PF_ACPY(&match_key.addr[0],
2520 &s->key[idx]->addr[1], match_key.af);
2521 match_key.port[0] = s->key[idx]->port[1];
2522 PF_ACPY(&match_key.addr[1],
2523 &s->key[idx]->addr[0], match_key.af);
2524 match_key.port[1] = s->key[idx]->port[0];
2527 pf_unlink_state(s, PF_ENTER_LOCKED);
2530 if (psk->psk_kill_match)
2531 killed += pf_kill_matching_state(&match_key, dir);
2533 goto relock_DIOCKILLSTATES;
2535 PF_HASHROW_UNLOCK(ih);
2541 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk,
2542 struct pf_kstate_kill *kill)
2544 bzero(kill, sizeof(*kill));
2546 bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp));
2547 kill->psk_af = psk->psk_af;
2548 kill->psk_proto = psk->psk_proto;
2549 bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src));
2550 bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst));
2551 strlcpy(kill->psk_ifname, psk->psk_ifname, sizeof(kill->psk_ifname));
2552 strlcpy(kill->psk_label, psk->psk_label, sizeof(kill->psk_label));
2558 pf_nvstate_cmp_to_state_cmp(const nvlist_t *nvl, struct pf_state_cmp *cmp)
2562 bzero(cmp, sizeof(*cmp));
2564 PFNV_CHK(pf_nvuint64(nvl, "id", &cmp->id));
2565 PFNV_CHK(pf_nvuint32(nvl, "creatorid", &cmp->creatorid));
2566 PFNV_CHK(pf_nvuint8(nvl, "direction", &cmp->direction));
2573 pf_nvstate_kill_to_kstate_kill(const nvlist_t *nvl,
2574 struct pf_kstate_kill *kill)
2578 bzero(kill, sizeof(*kill));
2580 if (! nvlist_exists_nvlist(nvl, "cmp"))
2583 PFNV_CHK(pf_nvstate_cmp_to_state_cmp(nvlist_get_nvlist(nvl, "cmp"),
2585 PFNV_CHK(pf_nvuint8(nvl, "af", &kill->psk_af));
2586 PFNV_CHK(pf_nvint(nvl, "proto", &kill->psk_proto));
2588 if (! nvlist_exists_nvlist(nvl, "src"))
2590 PFNV_CHK(pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "src"),
2592 if (! nvlist_exists_nvlist(nvl, "dst"))
2594 PFNV_CHK(pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "dst"),
2596 if (nvlist_exists_nvlist(nvl, "rt_addr")) {
2597 PFNV_CHK(pf_nvrule_addr_to_rule_addr(
2598 nvlist_get_nvlist(nvl, "rt_addr"), &kill->psk_rt_addr));
2601 PFNV_CHK(pf_nvstring(nvl, "ifname", kill->psk_ifname,
2602 sizeof(kill->psk_ifname)));
2603 PFNV_CHK(pf_nvstring(nvl, "label", kill->psk_label,
2604 sizeof(kill->psk_label)));
2605 if (nvlist_exists_bool(nvl, "kill_match"))
2606 kill->psk_kill_match = nvlist_get_bool(nvl, "kill_match");
2613 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2614 uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2617 struct pf_kruleset *ruleset;
2618 struct pf_krule *tail;
2619 struct pf_kpooladdr *pa;
2620 struct pfi_kkif *kif = NULL;
2624 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
2626 goto errout_unlocked;
2629 #define ERROUT(x) ERROUT_FUNCTION(errout, x)
2631 if (rule->ifname[0])
2632 kif = pf_kkif_create(M_WAITOK);
2633 rule->evaluations = counter_u64_alloc(M_WAITOK);
2634 for (int i = 0; i < 2; i++) {
2635 rule->packets[i] = counter_u64_alloc(M_WAITOK);
2636 rule->bytes[i] = counter_u64_alloc(M_WAITOK);
2638 rule->states_cur = counter_u64_alloc(M_WAITOK);
2639 rule->states_tot = counter_u64_alloc(M_WAITOK);
2640 rule->src_nodes = counter_u64_alloc(M_WAITOK);
2641 rule->cuid = td->td_ucred->cr_ruid;
2642 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
2643 TAILQ_INIT(&rule->rpool.list);
2646 ruleset = pf_find_kruleset(anchor);
2647 if (ruleset == NULL)
2649 rs_num = pf_get_ruleset_number(rule->action);
2650 if (rs_num >= PF_RULESET_MAX)
2652 if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2653 DPFPRINTF(PF_DEBUG_MISC,
2654 ("ticket: %d != [%d]%d\n", ticket, rs_num,
2655 ruleset->rules[rs_num].inactive.ticket));
2658 if (pool_ticket != V_ticket_pabuf) {
2659 DPFPRINTF(PF_DEBUG_MISC,
2660 ("pool_ticket: %d != %d\n", pool_ticket,
2665 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2668 rule->nr = tail->nr + 1;
2671 if (rule->ifname[0]) {
2672 rule->kif = pfi_kkif_attach(kif, rule->ifname);
2674 pfi_kkif_ref(rule->kif);
2678 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2683 if (rule->qname[0] != 0) {
2684 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2686 else if (rule->pqname[0] != 0) {
2688 pf_qname2qid(rule->pqname)) == 0)
2691 rule->pqid = rule->qid;
2694 if (rule->tagname[0])
2695 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2697 if (rule->match_tagname[0])
2698 if ((rule->match_tag =
2699 pf_tagname2tag(rule->match_tagname)) == 0)
2701 if (rule->rt && !rule->direction)
2705 if (rule->logif >= PFLOGIFS_MAX)
2707 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2709 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2711 if (pf_kanchor_setup(rule, ruleset, anchor_call))
2713 if (rule->scrub_flags & PFSTATE_SETPRIO &&
2714 (rule->set_prio[0] > PF_PRIO_MAX ||
2715 rule->set_prio[1] > PF_PRIO_MAX))
2717 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2718 if (pa->addr.type == PF_ADDR_TABLE) {
2719 pa->addr.p.tbl = pfr_attach_table(ruleset,
2720 pa->addr.v.tblname);
2721 if (pa->addr.p.tbl == NULL)
2725 rule->overload_tbl = NULL;
2726 if (rule->overload_tblname[0]) {
2727 if ((rule->overload_tbl = pfr_attach_table(ruleset,
2728 rule->overload_tblname)) == NULL)
2731 rule->overload_tbl->pfrkt_flags |=
2735 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
2736 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2737 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
2738 (rule->rt > PF_NOPFROUTE)) &&
2739 (TAILQ_FIRST(&rule->rpool.list) == NULL))
2748 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2749 counter_u64_zero(rule->evaluations);
2750 for (int i = 0; i < 2; i++) {
2751 counter_u64_zero(rule->packets[i]);
2752 counter_u64_zero(rule->bytes[i]);
2754 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2756 ruleset->rules[rs_num].inactive.rcount++;
2766 pf_krule_free(rule);
2771 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2774 PF_RULES_RLOCK_TRACKER;
2776 #define ERROUT_IOCTL(target, x) \
2779 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \
2784 /* XXX keep in sync with switch() below */
2785 if (securelevel_gt(td->td_ucred, 2))
2793 case DIOCSETSTATUSIF:
2799 case DIOCGETTIMEOUT:
2800 case DIOCCLRRULECTRS:
2802 case DIOCGETALTQSV0:
2803 case DIOCGETALTQSV1:
2806 case DIOCGETQSTATSV0:
2807 case DIOCGETQSTATSV1:
2808 case DIOCGETRULESETS:
2809 case DIOCGETRULESET:
2810 case DIOCRGETTABLES:
2811 case DIOCRGETTSTATS:
2812 case DIOCRCLRTSTATS:
2818 case DIOCRGETASTATS:
2819 case DIOCRCLRASTATS:
2822 case DIOCGETSRCNODES:
2823 case DIOCCLRSRCNODES:
2824 case DIOCIGETIFACES:
2825 case DIOCGIFSPEEDV0:
2826 case DIOCGIFSPEEDV1:
2830 case DIOCRCLRTABLES:
2831 case DIOCRADDTABLES:
2832 case DIOCRDELTABLES:
2833 case DIOCRSETTFLAGS:
2834 if (((struct pfioc_table *)addr)->pfrio_flags &
2836 break; /* dummy operation ok */
2842 if (!(flags & FWRITE))
2850 case DIOCGETTIMEOUT:
2852 case DIOCGETALTQSV0:
2853 case DIOCGETALTQSV1:
2856 case DIOCGETQSTATSV0:
2857 case DIOCGETQSTATSV1:
2858 case DIOCGETRULESETS:
2859 case DIOCGETRULESET:
2861 case DIOCRGETTABLES:
2862 case DIOCRGETTSTATS:
2864 case DIOCRGETASTATS:
2867 case DIOCGETSRCNODES:
2868 case DIOCIGETIFACES:
2869 case DIOCGIFSPEEDV1:
2870 case DIOCGIFSPEEDV0:
2873 case DIOCRCLRTABLES:
2874 case DIOCRADDTABLES:
2875 case DIOCRDELTABLES:
2876 case DIOCRCLRTSTATS:
2881 case DIOCRSETTFLAGS:
2882 if (((struct pfioc_table *)addr)->pfrio_flags &
2884 flags |= FWRITE; /* need write lock for dummy */
2885 break; /* dummy operation ok */
2889 if (((struct pfioc_rule *)addr)->action ==
2897 CURVNET_SET(TD_TO_VNET(td));
2901 sx_xlock(&pf_ioctl_lock);
2902 if (V_pf_status.running)
2909 DPFPRINTF(PF_DEBUG_MISC,
2910 ("pf: pfil registration failed\n"));
2913 V_pf_status.running = 1;
2914 V_pf_status.since = time_second;
2917 V_pf_stateid[cpu] = time_second;
2919 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
2924 sx_xlock(&pf_ioctl_lock);
2925 if (!V_pf_status.running)
2928 V_pf_status.running = 0;
2929 error = dehook_pf();
2931 V_pf_status.running = 1;
2932 DPFPRINTF(PF_DEBUG_MISC,
2933 ("pf: pfil unregistration failed\n"));
2935 V_pf_status.since = time_second;
2936 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
2940 case DIOCADDRULENV: {
2941 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
2942 nvlist_t *nvl = NULL;
2943 void *nvlpacked = NULL;
2944 struct pf_krule *rule = NULL;
2945 const char *anchor = "", *anchor_call = "";
2946 uint32_t ticket = 0, pool_ticket = 0;
2948 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x)
2950 if (nv->len > pf_ioctl_maxcount)
2953 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
2954 error = copyin(nv->data, nvlpacked, nv->len);
2958 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2962 if (! nvlist_exists_number(nvl, "ticket"))
2964 ticket = nvlist_get_number(nvl, "ticket");
2966 if (! nvlist_exists_number(nvl, "pool_ticket"))
2968 pool_ticket = nvlist_get_number(nvl, "pool_ticket");
2970 if (! nvlist_exists_nvlist(nvl, "rule"))
2973 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
2978 if (nvlist_exists_string(nvl, "anchor"))
2979 anchor = nvlist_get_string(nvl, "anchor");
2980 if (nvlist_exists_string(nvl, "anchor_call"))
2981 anchor_call = nvlist_get_string(nvl, "anchor_call");
2983 if ((error = nvlist_error(nvl)))
2986 /* Frees rule on error */
2987 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
2990 nvlist_destroy(nvl);
2991 free(nvlpacked, M_TEMP);
2994 DIOCADDRULENV_error:
2995 pf_krule_free(rule);
2996 nvlist_destroy(nvl);
2997 free(nvlpacked, M_TEMP);
3002 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
3003 struct pf_krule *rule;
3005 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
3006 error = pf_rule_to_krule(&pr->rule, rule);
3008 free(rule, M_PFRULE);
3012 pr->anchor[sizeof(pr->anchor) - 1] = 0;
3014 /* Frees rule on error */
3015 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3016 pr->anchor, pr->anchor_call, td);
3020 case DIOCGETRULES: {
3021 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
3022 struct pf_kruleset *ruleset;
3023 struct pf_krule *tail;
3027 pr->anchor[sizeof(pr->anchor) - 1] = 0;
3028 ruleset = pf_find_kruleset(pr->anchor);
3029 if (ruleset == NULL) {
3034 rs_num = pf_get_ruleset_number(pr->rule.action);
3035 if (rs_num >= PF_RULESET_MAX) {
3040 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3043 pr->nr = tail->nr + 1;
3046 pr->ticket = ruleset->rules[rs_num].active.ticket;
3052 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
3053 struct pf_kruleset *ruleset;
3054 struct pf_krule *rule;
3058 pr->anchor[sizeof(pr->anchor) - 1] = 0;
3059 ruleset = pf_find_kruleset(pr->anchor);
3060 if (ruleset == NULL) {
3065 rs_num = pf_get_ruleset_number(pr->rule.action);
3066 if (rs_num >= PF_RULESET_MAX) {
3071 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
3076 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3077 while ((rule != NULL) && (rule->nr != pr->nr))
3078 rule = TAILQ_NEXT(rule, entries);
3085 pf_krule_to_rule(rule, &pr->rule);
3087 if (pf_kanchor_copyout(ruleset, rule, pr)) {
3092 pf_addr_copyout(&pr->rule.src.addr);
3093 pf_addr_copyout(&pr->rule.dst.addr);
3095 if (pr->action == PF_GET_CLR_CNTR) {
3096 counter_u64_zero(rule->evaluations);
3097 for (int i = 0; i < 2; i++) {
3098 counter_u64_zero(rule->packets[i]);
3099 counter_u64_zero(rule->bytes[i]);
3101 counter_u64_zero(rule->states_tot);
3107 case DIOCGETRULENV: {
3108 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
3109 nvlist_t *nvrule = NULL;
3110 nvlist_t *nvl = NULL;
3111 struct pf_kruleset *ruleset;
3112 struct pf_krule *rule;
3113 void *nvlpacked = NULL;
3115 bool clear_counter = false;
3117 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x)
3119 if (nv->len > pf_ioctl_maxcount)
3122 /* Copy the request in */
3123 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
3124 if (nvlpacked == NULL)
3127 error = copyin(nv->data, nvlpacked, nv->len);
3131 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3135 if (! nvlist_exists_string(nvl, "anchor"))
3137 if (! nvlist_exists_number(nvl, "ruleset"))
3139 if (! nvlist_exists_number(nvl, "ticket"))
3141 if (! nvlist_exists_number(nvl, "nr"))
3144 if (nvlist_exists_bool(nvl, "clear_counter"))
3145 clear_counter = nvlist_get_bool(nvl, "clear_counter");
3147 if (clear_counter && !(flags & FWRITE))
3150 nr = nvlist_get_number(nvl, "nr");
3153 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3154 if (ruleset == NULL) {
3159 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3160 if (rs_num >= PF_RULESET_MAX) {
3165 if (nvlist_get_number(nvl, "ticket") !=
3166 ruleset->rules[rs_num].active.ticket) {
3172 if ((error = nvlist_error(nvl))) {
3177 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3178 while ((rule != NULL) && (rule->nr != nr))
3179 rule = TAILQ_NEXT(rule, entries);
3186 nvrule = pf_krule_to_nvrule(rule);
3188 nvlist_destroy(nvl);
3189 nvl = nvlist_create(0);
3194 nvlist_add_number(nvl, "nr", nr);
3195 nvlist_add_nvlist(nvl, "rule", nvrule);
3197 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3202 free(nvlpacked, M_TEMP);
3203 nvlpacked = nvlist_pack(nvl, &nv->len);
3204 if (nvlpacked == NULL) {
3209 if (nv->size == 0) {
3213 else if (nv->size < nv->len) {
3218 error = copyout(nvlpacked, nv->data, nv->len);
3220 if (clear_counter) {
3221 counter_u64_zero(rule->evaluations);
3222 for (int i = 0; i < 2; i++) {
3223 counter_u64_zero(rule->packets[i]);
3224 counter_u64_zero(rule->bytes[i]);
3226 counter_u64_zero(rule->states_tot);
3231 DIOCGETRULENV_error:
3232 free(nvlpacked, M_TEMP);
3233 nvlist_destroy(nvrule);
3234 nvlist_destroy(nvl);
3239 case DIOCCHANGERULE: {
3240 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
3241 struct pf_kruleset *ruleset;
3242 struct pf_krule *oldrule = NULL, *newrule = NULL;
3243 struct pfi_kkif *kif = NULL;
3244 struct pf_kpooladdr *pa;
3248 if (pcr->action < PF_CHANGE_ADD_HEAD ||
3249 pcr->action > PF_CHANGE_GET_TICKET) {
3253 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3258 if (pcr->action != PF_CHANGE_REMOVE) {
3259 newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK);
3260 error = pf_rule_to_krule(&pcr->rule, newrule);
3262 free(newrule, M_PFRULE);
3266 if (newrule->ifname[0])
3267 kif = pf_kkif_create(M_WAITOK);
3268 newrule->evaluations = counter_u64_alloc(M_WAITOK);
3269 for (int i = 0; i < 2; i++) {
3270 newrule->packets[i] =
3271 counter_u64_alloc(M_WAITOK);
3273 counter_u64_alloc(M_WAITOK);
3275 newrule->states_cur = counter_u64_alloc(M_WAITOK);
3276 newrule->states_tot = counter_u64_alloc(M_WAITOK);
3277 newrule->src_nodes = counter_u64_alloc(M_WAITOK);
3278 newrule->cuid = td->td_ucred->cr_ruid;
3279 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3280 TAILQ_INIT(&newrule->rpool.list);
3283 #define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; }
3286 if (!(pcr->action == PF_CHANGE_REMOVE ||
3287 pcr->action == PF_CHANGE_GET_TICKET) &&
3288 pcr->pool_ticket != V_ticket_pabuf)
3291 ruleset = pf_find_kruleset(pcr->anchor);
3292 if (ruleset == NULL)
3295 rs_num = pf_get_ruleset_number(pcr->rule.action);
3296 if (rs_num >= PF_RULESET_MAX)
3299 if (pcr->action == PF_CHANGE_GET_TICKET) {
3300 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3302 } else if (pcr->ticket !=
3303 ruleset->rules[rs_num].active.ticket)
3306 if (pcr->action != PF_CHANGE_REMOVE) {
3307 if (newrule->ifname[0]) {
3308 newrule->kif = pfi_kkif_attach(kif,
3311 pfi_kkif_ref(newrule->kif);
3313 newrule->kif = NULL;
3315 if (newrule->rtableid > 0 &&
3316 newrule->rtableid >= rt_numfibs)
3321 if (newrule->qname[0] != 0) {
3323 pf_qname2qid(newrule->qname)) == 0)
3325 else if (newrule->pqname[0] != 0) {
3326 if ((newrule->pqid =
3327 pf_qname2qid(newrule->pqname)) == 0)
3330 newrule->pqid = newrule->qid;
3333 if (newrule->tagname[0])
3335 pf_tagname2tag(newrule->tagname)) == 0)
3337 if (newrule->match_tagname[0])
3338 if ((newrule->match_tag = pf_tagname2tag(
3339 newrule->match_tagname)) == 0)
3341 if (newrule->rt && !newrule->direction)
3345 if (newrule->logif >= PFLOGIFS_MAX)
3347 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3349 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3351 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3353 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
3354 if (pa->addr.type == PF_ADDR_TABLE) {
3356 pfr_attach_table(ruleset,
3357 pa->addr.v.tblname);
3358 if (pa->addr.p.tbl == NULL)
3362 newrule->overload_tbl = NULL;
3363 if (newrule->overload_tblname[0]) {
3364 if ((newrule->overload_tbl = pfr_attach_table(
3365 ruleset, newrule->overload_tblname)) ==
3369 newrule->overload_tbl->pfrkt_flags |=
3373 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
3374 if (((((newrule->action == PF_NAT) ||
3375 (newrule->action == PF_RDR) ||
3376 (newrule->action == PF_BINAT) ||
3377 (newrule->rt > PF_NOPFROUTE)) &&
3378 !newrule->anchor)) &&
3379 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
3383 pf_free_rule(newrule);
3388 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3390 pf_empty_kpool(&V_pf_pabuf);
3392 if (pcr->action == PF_CHANGE_ADD_HEAD)
3393 oldrule = TAILQ_FIRST(
3394 ruleset->rules[rs_num].active.ptr);
3395 else if (pcr->action == PF_CHANGE_ADD_TAIL)
3396 oldrule = TAILQ_LAST(
3397 ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3399 oldrule = TAILQ_FIRST(
3400 ruleset->rules[rs_num].active.ptr);
3401 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3402 oldrule = TAILQ_NEXT(oldrule, entries);
3403 if (oldrule == NULL) {
3404 if (newrule != NULL)
3405 pf_free_rule(newrule);
3412 if (pcr->action == PF_CHANGE_REMOVE) {
3413 pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3415 ruleset->rules[rs_num].active.rcount--;
3417 if (oldrule == NULL)
3419 ruleset->rules[rs_num].active.ptr,
3421 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3422 pcr->action == PF_CHANGE_ADD_BEFORE)
3423 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3426 ruleset->rules[rs_num].active.ptr,
3427 oldrule, newrule, entries);
3428 ruleset->rules[rs_num].active.rcount++;
3432 TAILQ_FOREACH(oldrule,
3433 ruleset->rules[rs_num].active.ptr, entries)
3436 ruleset->rules[rs_num].active.ticket++;
3438 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3439 pf_remove_if_empty_kruleset(ruleset);
3445 DIOCCHANGERULE_error:
3447 pf_krule_free(newrule);
3452 case DIOCCLRSTATES: {
3453 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
3454 struct pf_kstate_kill kill;
3456 error = pf_state_kill_to_kstate_kill(psk, &kill);
3460 psk->psk_killed = pf_clear_states(&kill);
3464 case DIOCCLRSTATESNV: {
3465 error = pf_clearstates_nv((struct pfioc_nv *)addr);
3469 case DIOCKILLSTATES: {
3470 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
3471 struct pf_kstate_kill kill;
3473 error = pf_state_kill_to_kstate_kill(psk, &kill);
3477 psk->psk_killed = 0;
3478 error = pf_killstates(&kill, &psk->psk_killed);
3482 case DIOCKILLSTATESNV: {
3483 error = pf_killstates_nv((struct pfioc_nv *)addr);
3487 case DIOCADDSTATE: {
3488 struct pfioc_state *ps = (struct pfioc_state *)addr;
3489 struct pfsync_state *sp = &ps->state;
3491 if (sp->timeout >= PFTM_MAX) {
3495 if (V_pfsync_state_import_ptr != NULL) {
3497 error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL);
3504 case DIOCGETSTATE: {
3505 struct pfioc_state *ps = (struct pfioc_state *)addr;
3508 s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
3514 pfsync_state_export(&ps->state, s);
3519 case DIOCGETSTATES: {
3520 struct pfioc_states *ps = (struct pfioc_states *)addr;
3522 struct pfsync_state *pstore, *p;
3525 if (ps->ps_len <= 0) {
3526 nr = uma_zone_get_cur(V_pf_state_z);
3527 ps->ps_len = sizeof(struct pfsync_state) * nr;
3531 p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK | M_ZERO);
3534 for (i = 0; i <= pf_hashmask; i++) {
3535 struct pf_idhash *ih = &V_pf_idhash[i];
3537 PF_HASHROW_LOCK(ih);
3538 LIST_FOREACH(s, &ih->states, entry) {
3540 if (s->timeout == PFTM_UNLINKED)
3543 if ((nr+1) * sizeof(*p) > ps->ps_len) {
3544 PF_HASHROW_UNLOCK(ih);
3545 goto DIOCGETSTATES_full;
3547 pfsync_state_export(p, s);
3551 PF_HASHROW_UNLOCK(ih);
3554 error = copyout(pstore, ps->ps_states,
3555 sizeof(struct pfsync_state) * nr);
3557 free(pstore, M_TEMP);
3560 ps->ps_len = sizeof(struct pfsync_state) * nr;
3561 free(pstore, M_TEMP);
3566 case DIOCGETSTATUS: {
3567 struct pf_status *s = (struct pf_status *)addr;
3570 s->running = V_pf_status.running;
3571 s->since = V_pf_status.since;
3572 s->debug = V_pf_status.debug;
3573 s->hostid = V_pf_status.hostid;
3574 s->states = V_pf_status.states;
3575 s->src_nodes = V_pf_status.src_nodes;
3577 for (int i = 0; i < PFRES_MAX; i++)
3579 counter_u64_fetch(V_pf_status.counters[i]);
3580 for (int i = 0; i < LCNT_MAX; i++)
3582 counter_u64_fetch(V_pf_status.lcounters[i]);
3583 for (int i = 0; i < FCNT_MAX; i++)
3585 counter_u64_fetch(V_pf_status.fcounters[i]);
3586 for (int i = 0; i < SCNT_MAX; i++)
3588 counter_u64_fetch(V_pf_status.scounters[i]);
3590 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ);
3591 bcopy(V_pf_status.pf_chksum, s->pf_chksum,
3592 PF_MD5_DIGEST_LENGTH);
3594 pfi_update_status(s->ifname, s);
3599 case DIOCSETSTATUSIF: {
3600 struct pfioc_if *pi = (struct pfioc_if *)addr;
3602 if (pi->ifname[0] == 0) {
3603 bzero(V_pf_status.ifname, IFNAMSIZ);
3607 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
3612 case DIOCCLRSTATUS: {
3614 for (int i = 0; i < PFRES_MAX; i++)
3615 counter_u64_zero(V_pf_status.counters[i]);
3616 for (int i = 0; i < FCNT_MAX; i++)
3617 counter_u64_zero(V_pf_status.fcounters[i]);
3618 for (int i = 0; i < SCNT_MAX; i++)
3619 counter_u64_zero(V_pf_status.scounters[i]);
3620 for (int i = 0; i < LCNT_MAX; i++)
3621 counter_u64_zero(V_pf_status.lcounters[i]);
3622 V_pf_status.since = time_second;
3623 if (*V_pf_status.ifname)
3624 pfi_update_status(V_pf_status.ifname, NULL);
3630 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
3631 struct pf_state_key *sk;
3632 struct pf_state *state;
3633 struct pf_state_key_cmp key;
3634 int m = 0, direction = pnl->direction;
3637 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
3638 sidx = (direction == PF_IN) ? 1 : 0;
3639 didx = (direction == PF_IN) ? 0 : 1;
3642 PF_AZERO(&pnl->saddr, pnl->af) ||
3643 PF_AZERO(&pnl->daddr, pnl->af) ||
3644 ((pnl->proto == IPPROTO_TCP ||
3645 pnl->proto == IPPROTO_UDP) &&
3646 (!pnl->dport || !pnl->sport)))
3649 bzero(&key, sizeof(key));
3651 key.proto = pnl->proto;
3652 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
3653 key.port[sidx] = pnl->sport;
3654 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
3655 key.port[didx] = pnl->dport;
3657 state = pf_find_state_all(&key, direction, &m);
3660 error = E2BIG; /* more than one state */
3661 else if (state != NULL) {
3662 /* XXXGL: not locked read */
3663 sk = state->key[sidx];
3664 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
3665 pnl->rsport = sk->port[sidx];
3666 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
3667 pnl->rdport = sk->port[didx];
3674 case DIOCSETTIMEOUT: {
3675 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
3678 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3684 old = V_pf_default_rule.timeout[pt->timeout];
3685 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
3687 V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
3688 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
3689 wakeup(pf_purge_thread);
3695 case DIOCGETTIMEOUT: {
3696 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
3698 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3703 pt->seconds = V_pf_default_rule.timeout[pt->timeout];
3708 case DIOCGETLIMIT: {
3709 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
3711 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3716 pl->limit = V_pf_limits[pl->index].limit;
3721 case DIOCSETLIMIT: {
3722 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
3726 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3727 V_pf_limits[pl->index].zone == NULL) {
3732 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
3733 old_limit = V_pf_limits[pl->index].limit;
3734 V_pf_limits[pl->index].limit = pl->limit;
3735 pl->limit = old_limit;
3740 case DIOCSETDEBUG: {
3741 u_int32_t *level = (u_int32_t *)addr;
3744 V_pf_status.debug = *level;
3749 case DIOCCLRRULECTRS: {
3750 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
3751 struct pf_kruleset *ruleset = &pf_main_ruleset;
3752 struct pf_krule *rule;
3756 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
3757 counter_u64_zero(rule->evaluations);
3758 for (int i = 0; i < 2; i++) {
3759 counter_u64_zero(rule->packets[i]);
3760 counter_u64_zero(rule->bytes[i]);
3767 case DIOCGIFSPEEDV0:
3768 case DIOCGIFSPEEDV1: {
3769 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr;
3770 struct pf_ifspeed_v1 ps;
3773 if (psp->ifname[0] != 0) {
3774 /* Can we completely trust user-land? */
3775 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
3776 ifp = ifunit(ps.ifname);
3779 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
3780 if (cmd == DIOCGIFSPEEDV1)
3781 psp->baudrate = ifp->if_baudrate;
3790 case DIOCSTARTALTQ: {
3791 struct pf_altq *altq;
3794 /* enable all altq interfaces on active list */
3795 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
3796 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
3797 error = pf_enable_altq(altq);
3803 V_pf_altq_running = 1;
3805 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
3809 case DIOCSTOPALTQ: {
3810 struct pf_altq *altq;
3813 /* disable all altq interfaces on active list */
3814 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
3815 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
3816 error = pf_disable_altq(altq);
3822 V_pf_altq_running = 0;
3824 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
3829 case DIOCADDALTQV1: {
3830 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
3831 struct pf_altq *altq, *a;
3834 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
3835 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
3838 altq->local_flags = 0;
3841 if (pa->ticket != V_ticket_altqs_inactive) {
3843 free(altq, M_PFALTQ);
3849 * if this is for a queue, find the discipline and
3850 * copy the necessary fields
3852 if (altq->qname[0] != 0) {
3853 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
3856 free(altq, M_PFALTQ);
3859 altq->altq_disc = NULL;
3860 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
3861 if (strncmp(a->ifname, altq->ifname,
3863 altq->altq_disc = a->altq_disc;
3869 if ((ifp = ifunit(altq->ifname)) == NULL)
3870 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
3872 error = altq_add(ifp, altq);
3876 free(altq, M_PFALTQ);
3880 if (altq->qname[0] != 0)
3881 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
3883 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
3884 /* version error check done on import above */
3885 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
3890 case DIOCGETALTQSV0:
3891 case DIOCGETALTQSV1: {
3892 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
3893 struct pf_altq *altq;
3897 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
3899 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
3901 pa->ticket = V_ticket_altqs_active;
3907 case DIOCGETALTQV1: {
3908 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
3909 struct pf_altq *altq;
3912 if (pa->ticket != V_ticket_altqs_active) {
3917 altq = pf_altq_get_nth_active(pa->nr);
3923 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
3928 case DIOCCHANGEALTQV0:
3929 case DIOCCHANGEALTQV1:
3930 /* CHANGEALTQ not supported yet! */
3934 case DIOCGETQSTATSV0:
3935 case DIOCGETQSTATSV1: {
3936 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr;
3937 struct pf_altq *altq;
3942 if (pq->ticket != V_ticket_altqs_active) {
3947 nbytes = pq->nbytes;
3948 altq = pf_altq_get_nth_active(pq->nr);
3955 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
3961 if (cmd == DIOCGETQSTATSV0)
3962 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */
3964 version = pq->version;
3965 error = altq_getqstats(altq, pq->buf, &nbytes, version);
3967 pq->scheduler = altq->scheduler;
3968 pq->nbytes = nbytes;
3974 case DIOCBEGINADDRS: {
3975 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
3978 pf_empty_kpool(&V_pf_pabuf);
3979 pp->ticket = ++V_ticket_pabuf;
3985 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
3986 struct pf_kpooladdr *pa;
3987 struct pfi_kkif *kif = NULL;
3990 if (pp->af == AF_INET) {
3991 error = EAFNOSUPPORT;
3996 if (pp->af == AF_INET6) {
3997 error = EAFNOSUPPORT;
4001 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
4002 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
4003 pp->addr.addr.type != PF_ADDR_TABLE) {
4007 if (pp->addr.addr.p.dyn != NULL) {
4011 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
4012 pf_pooladdr_to_kpooladdr(&pp->addr, pa);
4014 kif = pf_kkif_create(M_WAITOK);
4016 if (pp->ticket != V_ticket_pabuf) {
4024 if (pa->ifname[0]) {
4025 pa->kif = pfi_kkif_attach(kif, pa->ifname);
4027 pfi_kkif_ref(pa->kif);
4030 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
4031 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
4033 pfi_kkif_unref(pa->kif);
4038 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
4043 case DIOCGETADDRS: {
4044 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
4045 struct pf_kpool *pool;
4046 struct pf_kpooladdr *pa;
4050 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
4051 pp->r_num, 0, 1, 0);
4057 TAILQ_FOREACH(pa, &pool->list, entries)
4064 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
4065 struct pf_kpool *pool;
4066 struct pf_kpooladdr *pa;
4070 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
4071 pp->r_num, 0, 1, 1);
4077 pa = TAILQ_FIRST(&pool->list);
4078 while ((pa != NULL) && (nr < pp->nr)) {
4079 pa = TAILQ_NEXT(pa, entries);
4087 pf_kpooladdr_to_pooladdr(pa, &pp->addr);
4088 pf_addr_copyout(&pp->addr.addr);
4093 case DIOCCHANGEADDR: {
4094 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
4095 struct pf_kpool *pool;
4096 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL;
4097 struct pf_kruleset *ruleset;
4098 struct pfi_kkif *kif = NULL;
4100 if (pca->action < PF_CHANGE_ADD_HEAD ||
4101 pca->action > PF_CHANGE_REMOVE) {
4105 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4106 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4107 pca->addr.addr.type != PF_ADDR_TABLE) {
4111 if (pca->addr.addr.p.dyn != NULL) {
4116 if (pca->action != PF_CHANGE_REMOVE) {
4118 if (pca->af == AF_INET) {
4119 error = EAFNOSUPPORT;
4124 if (pca->af == AF_INET6) {
4125 error = EAFNOSUPPORT;
4129 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4130 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4131 if (newpa->ifname[0])
4132 kif = pf_kkif_create(M_WAITOK);
4136 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4138 ruleset = pf_find_kruleset(pca->anchor);
4139 if (ruleset == NULL)
4142 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4143 pca->r_num, pca->r_last, 1, 1);
4147 if (pca->action != PF_CHANGE_REMOVE) {
4148 if (newpa->ifname[0]) {
4149 newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4150 pfi_kkif_ref(newpa->kif);
4154 switch (newpa->addr.type) {
4155 case PF_ADDR_DYNIFTL:
4156 error = pfi_dynaddr_setup(&newpa->addr,
4160 newpa->addr.p.tbl = pfr_attach_table(ruleset,
4161 newpa->addr.v.tblname);
4162 if (newpa->addr.p.tbl == NULL)
4167 goto DIOCCHANGEADDR_error;
4170 switch (pca->action) {
4171 case PF_CHANGE_ADD_HEAD:
4172 oldpa = TAILQ_FIRST(&pool->list);
4174 case PF_CHANGE_ADD_TAIL:
4175 oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4178 oldpa = TAILQ_FIRST(&pool->list);
4179 for (int i = 0; oldpa && i < pca->nr; i++)
4180 oldpa = TAILQ_NEXT(oldpa, entries);
4186 if (pca->action == PF_CHANGE_REMOVE) {
4187 TAILQ_REMOVE(&pool->list, oldpa, entries);
4188 switch (oldpa->addr.type) {
4189 case PF_ADDR_DYNIFTL:
4190 pfi_dynaddr_remove(oldpa->addr.p.dyn);
4193 pfr_detach_table(oldpa->addr.p.tbl);
4197 pfi_kkif_unref(oldpa->kif);
4198 free(oldpa, M_PFRULE);
4201 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4202 else if (pca->action == PF_CHANGE_ADD_HEAD ||
4203 pca->action == PF_CHANGE_ADD_BEFORE)
4204 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4206 TAILQ_INSERT_AFTER(&pool->list, oldpa,
4210 pool->cur = TAILQ_FIRST(&pool->list);
4211 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4216 DIOCCHANGEADDR_error:
4217 if (newpa != NULL) {
4219 pfi_kkif_unref(newpa->kif);
4220 free(newpa, M_PFRULE);
4227 case DIOCGETRULESETS: {
4228 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
4229 struct pf_kruleset *ruleset;
4230 struct pf_kanchor *anchor;
4233 pr->path[sizeof(pr->path) - 1] = 0;
4234 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4240 if (ruleset->anchor == NULL) {
4241 /* XXX kludge for pf_main_ruleset */
4242 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4243 if (anchor->parent == NULL)
4246 RB_FOREACH(anchor, pf_kanchor_node,
4247 &ruleset->anchor->children)
4254 case DIOCGETRULESET: {
4255 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
4256 struct pf_kruleset *ruleset;
4257 struct pf_kanchor *anchor;
4261 pr->path[sizeof(pr->path) - 1] = 0;
4262 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4268 if (ruleset->anchor == NULL) {
4269 /* XXX kludge for pf_main_ruleset */
4270 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4271 if (anchor->parent == NULL && nr++ == pr->nr) {
4272 strlcpy(pr->name, anchor->name,
4277 RB_FOREACH(anchor, pf_kanchor_node,
4278 &ruleset->anchor->children)
4279 if (nr++ == pr->nr) {
4280 strlcpy(pr->name, anchor->name,
4291 case DIOCRCLRTABLES: {
4292 struct pfioc_table *io = (struct pfioc_table *)addr;
4294 if (io->pfrio_esize != 0) {
4299 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4300 io->pfrio_flags | PFR_FLAG_USERIOCTL);
4305 case DIOCRADDTABLES: {
4306 struct pfioc_table *io = (struct pfioc_table *)addr;
4307 struct pfr_table *pfrts;
4310 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4315 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4316 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4321 totlen = io->pfrio_size * sizeof(struct pfr_table);
4322 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4324 error = copyin(io->pfrio_buffer, pfrts, totlen);
4326 free(pfrts, M_TEMP);
4330 error = pfr_add_tables(pfrts, io->pfrio_size,
4331 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4333 free(pfrts, M_TEMP);
4337 case DIOCRDELTABLES: {
4338 struct pfioc_table *io = (struct pfioc_table *)addr;
4339 struct pfr_table *pfrts;
4342 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4347 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4348 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4353 totlen = io->pfrio_size * sizeof(struct pfr_table);
4354 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4356 error = copyin(io->pfrio_buffer, pfrts, totlen);
4358 free(pfrts, M_TEMP);
4362 error = pfr_del_tables(pfrts, io->pfrio_size,
4363 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4365 free(pfrts, M_TEMP);
4369 case DIOCRGETTABLES: {
4370 struct pfioc_table *io = (struct pfioc_table *)addr;
4371 struct pfr_table *pfrts;
4375 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4380 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4386 io->pfrio_size = min(io->pfrio_size, n);
4388 totlen = io->pfrio_size * sizeof(struct pfr_table);
4390 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4392 if (pfrts == NULL) {
4397 error = pfr_get_tables(&io->pfrio_table, pfrts,
4398 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4401 error = copyout(pfrts, io->pfrio_buffer, totlen);
4402 free(pfrts, M_TEMP);
4406 case DIOCRGETTSTATS: {
4407 struct pfioc_table *io = (struct pfioc_table *)addr;
4408 struct pfr_tstats *pfrtstats;
4412 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4417 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4423 io->pfrio_size = min(io->pfrio_size, n);
4425 totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4426 pfrtstats = mallocarray(io->pfrio_size,
4427 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT);
4428 if (pfrtstats == NULL) {
4433 error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4434 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4437 error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4438 free(pfrtstats, M_TEMP);
4442 case DIOCRCLRTSTATS: {
4443 struct pfioc_table *io = (struct pfioc_table *)addr;
4444 struct pfr_table *pfrts;
4447 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4452 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4453 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4454 /* We used to count tables and use the minimum required
4455 * size, so we didn't fail on overly large requests.
4457 io->pfrio_size = pf_ioctl_maxcount;
4461 totlen = io->pfrio_size * sizeof(struct pfr_table);
4462 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4464 if (pfrts == NULL) {
4468 error = copyin(io->pfrio_buffer, pfrts, totlen);
4470 free(pfrts, M_TEMP);
4475 error = pfr_clr_tstats(pfrts, io->pfrio_size,
4476 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4478 free(pfrts, M_TEMP);
4482 case DIOCRSETTFLAGS: {
4483 struct pfioc_table *io = (struct pfioc_table *)addr;
4484 struct pfr_table *pfrts;
4488 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4494 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4501 io->pfrio_size = min(io->pfrio_size, n);
4504 totlen = io->pfrio_size * sizeof(struct pfr_table);
4505 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4507 error = copyin(io->pfrio_buffer, pfrts, totlen);
4509 free(pfrts, M_TEMP);
4513 error = pfr_set_tflags(pfrts, io->pfrio_size,
4514 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4515 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4517 free(pfrts, M_TEMP);
4521 case DIOCRCLRADDRS: {
4522 struct pfioc_table *io = (struct pfioc_table *)addr;
4524 if (io->pfrio_esize != 0) {
4529 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4530 io->pfrio_flags | PFR_FLAG_USERIOCTL);
4535 case DIOCRADDADDRS: {
4536 struct pfioc_table *io = (struct pfioc_table *)addr;
4537 struct pfr_addr *pfras;
4540 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4544 if (io->pfrio_size < 0 ||
4545 io->pfrio_size > pf_ioctl_maxcount ||
4546 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4550 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4551 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4557 error = copyin(io->pfrio_buffer, pfras, totlen);
4559 free(pfras, M_TEMP);
4563 error = pfr_add_addrs(&io->pfrio_table, pfras,
4564 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4565 PFR_FLAG_USERIOCTL);
4567 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4568 error = copyout(pfras, io->pfrio_buffer, totlen);
4569 free(pfras, M_TEMP);
4573 case DIOCRDELADDRS: {
4574 struct pfioc_table *io = (struct pfioc_table *)addr;
4575 struct pfr_addr *pfras;
4578 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4582 if (io->pfrio_size < 0 ||
4583 io->pfrio_size > pf_ioctl_maxcount ||
4584 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4588 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4589 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4595 error = copyin(io->pfrio_buffer, pfras, totlen);
4597 free(pfras, M_TEMP);
4601 error = pfr_del_addrs(&io->pfrio_table, pfras,
4602 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
4603 PFR_FLAG_USERIOCTL);
4605 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4606 error = copyout(pfras, io->pfrio_buffer, totlen);
4607 free(pfras, M_TEMP);
4611 case DIOCRSETADDRS: {
4612 struct pfioc_table *io = (struct pfioc_table *)addr;
4613 struct pfr_addr *pfras;
4614 size_t totlen, count;
4616 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4620 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
4624 count = max(io->pfrio_size, io->pfrio_size2);
4625 if (count > pf_ioctl_maxcount ||
4626 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
4630 totlen = count * sizeof(struct pfr_addr);
4631 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
4637 error = copyin(io->pfrio_buffer, pfras, totlen);
4639 free(pfras, M_TEMP);
4643 error = pfr_set_addrs(&io->pfrio_table, pfras,
4644 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
4645 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
4646 PFR_FLAG_USERIOCTL, 0);
4648 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4649 error = copyout(pfras, io->pfrio_buffer, totlen);
4650 free(pfras, M_TEMP);
4654 case DIOCRGETADDRS: {
4655 struct pfioc_table *io = (struct pfioc_table *)addr;
4656 struct pfr_addr *pfras;
4659 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4663 if (io->pfrio_size < 0 ||
4664 io->pfrio_size > pf_ioctl_maxcount ||
4665 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4669 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4670 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4677 error = pfr_get_addrs(&io->pfrio_table, pfras,
4678 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4681 error = copyout(pfras, io->pfrio_buffer, totlen);
4682 free(pfras, M_TEMP);
4686 case DIOCRGETASTATS: {
4687 struct pfioc_table *io = (struct pfioc_table *)addr;
4688 struct pfr_astats *pfrastats;
4691 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
4695 if (io->pfrio_size < 0 ||
4696 io->pfrio_size > pf_ioctl_maxcount ||
4697 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
4701 totlen = io->pfrio_size * sizeof(struct pfr_astats);
4702 pfrastats = mallocarray(io->pfrio_size,
4703 sizeof(struct pfr_astats), M_TEMP, M_NOWAIT);
4709 error = pfr_get_astats(&io->pfrio_table, pfrastats,
4710 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4713 error = copyout(pfrastats, io->pfrio_buffer, totlen);
4714 free(pfrastats, M_TEMP);
4718 case DIOCRCLRASTATS: {
4719 struct pfioc_table *io = (struct pfioc_table *)addr;
4720 struct pfr_addr *pfras;
4723 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4727 if (io->pfrio_size < 0 ||
4728 io->pfrio_size > pf_ioctl_maxcount ||
4729 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4733 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4734 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4740 error = copyin(io->pfrio_buffer, pfras, totlen);
4742 free(pfras, M_TEMP);
4746 error = pfr_clr_astats(&io->pfrio_table, pfras,
4747 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
4748 PFR_FLAG_USERIOCTL);
4750 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4751 error = copyout(pfras, io->pfrio_buffer, totlen);
4752 free(pfras, M_TEMP);
4756 case DIOCRTSTADDRS: {
4757 struct pfioc_table *io = (struct pfioc_table *)addr;
4758 struct pfr_addr *pfras;
4761 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4765 if (io->pfrio_size < 0 ||
4766 io->pfrio_size > pf_ioctl_maxcount ||
4767 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4771 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4772 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4778 error = copyin(io->pfrio_buffer, pfras, totlen);
4780 free(pfras, M_TEMP);
4784 error = pfr_tst_addrs(&io->pfrio_table, pfras,
4785 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
4786 PFR_FLAG_USERIOCTL);
4789 error = copyout(pfras, io->pfrio_buffer, totlen);
4790 free(pfras, M_TEMP);
4794 case DIOCRINADEFINE: {
4795 struct pfioc_table *io = (struct pfioc_table *)addr;
4796 struct pfr_addr *pfras;
4799 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4803 if (io->pfrio_size < 0 ||
4804 io->pfrio_size > pf_ioctl_maxcount ||
4805 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4809 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4810 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4816 error = copyin(io->pfrio_buffer, pfras, totlen);
4818 free(pfras, M_TEMP);
4822 error = pfr_ina_define(&io->pfrio_table, pfras,
4823 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
4824 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4826 free(pfras, M_TEMP);
4831 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
4833 error = pf_osfp_add(io);
4839 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
4841 error = pf_osfp_get(io);
4847 struct pfioc_trans *io = (struct pfioc_trans *)addr;
4848 struct pfioc_trans_e *ioes, *ioe;
4852 if (io->esize != sizeof(*ioe)) {
4857 io->size > pf_ioctl_maxcount ||
4858 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
4862 totlen = sizeof(struct pfioc_trans_e) * io->size;
4863 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
4869 error = copyin(io->array, ioes, totlen);
4875 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4876 switch (ioe->rs_num) {
4878 case PF_RULESET_ALTQ:
4879 if (ioe->anchor[0]) {
4885 if ((error = pf_begin_altq(&ioe->ticket))) {
4892 case PF_RULESET_TABLE:
4894 struct pfr_table table;
4896 bzero(&table, sizeof(table));
4897 strlcpy(table.pfrt_anchor, ioe->anchor,
4898 sizeof(table.pfrt_anchor));
4899 if ((error = pfr_ina_begin(&table,
4900 &ioe->ticket, NULL, 0))) {
4908 if ((error = pf_begin_rules(&ioe->ticket,
4909 ioe->rs_num, ioe->anchor))) {
4918 error = copyout(ioes, io->array, totlen);
4923 case DIOCXROLLBACK: {
4924 struct pfioc_trans *io = (struct pfioc_trans *)addr;
4925 struct pfioc_trans_e *ioe, *ioes;
4929 if (io->esize != sizeof(*ioe)) {
4934 io->size > pf_ioctl_maxcount ||
4935 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
4939 totlen = sizeof(struct pfioc_trans_e) * io->size;
4940 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
4946 error = copyin(io->array, ioes, totlen);
4952 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4953 switch (ioe->rs_num) {
4955 case PF_RULESET_ALTQ:
4956 if (ioe->anchor[0]) {
4962 if ((error = pf_rollback_altq(ioe->ticket))) {
4965 goto fail; /* really bad */
4969 case PF_RULESET_TABLE:
4971 struct pfr_table table;
4973 bzero(&table, sizeof(table));
4974 strlcpy(table.pfrt_anchor, ioe->anchor,
4975 sizeof(table.pfrt_anchor));
4976 if ((error = pfr_ina_rollback(&table,
4977 ioe->ticket, NULL, 0))) {
4980 goto fail; /* really bad */
4985 if ((error = pf_rollback_rules(ioe->ticket,
4986 ioe->rs_num, ioe->anchor))) {
4989 goto fail; /* really bad */
5000 struct pfioc_trans *io = (struct pfioc_trans *)addr;
5001 struct pfioc_trans_e *ioe, *ioes;
5002 struct pf_kruleset *rs;
5006 if (io->esize != sizeof(*ioe)) {
5012 io->size > pf_ioctl_maxcount ||
5013 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5018 totlen = sizeof(struct pfioc_trans_e) * io->size;
5019 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5025 error = copyin(io->array, ioes, totlen);
5031 /* First makes sure everything will succeed. */
5032 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5033 switch (ioe->rs_num) {
5035 case PF_RULESET_ALTQ:
5036 if (ioe->anchor[0]) {
5042 if (!V_altqs_inactive_open || ioe->ticket !=
5043 V_ticket_altqs_inactive) {
5051 case PF_RULESET_TABLE:
5052 rs = pf_find_kruleset(ioe->anchor);
5053 if (rs == NULL || !rs->topen || ioe->ticket !=
5062 if (ioe->rs_num < 0 || ioe->rs_num >=
5069 rs = pf_find_kruleset(ioe->anchor);
5071 !rs->rules[ioe->rs_num].inactive.open ||
5072 rs->rules[ioe->rs_num].inactive.ticket !=
5082 /* Now do the commit - no errors should happen here. */
5083 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5084 switch (ioe->rs_num) {
5086 case PF_RULESET_ALTQ:
5087 if ((error = pf_commit_altq(ioe->ticket))) {
5090 goto fail; /* really bad */
5094 case PF_RULESET_TABLE:
5096 struct pfr_table table;
5098 bzero(&table, sizeof(table));
5099 strlcpy(table.pfrt_anchor, ioe->anchor,
5100 sizeof(table.pfrt_anchor));
5101 if ((error = pfr_ina_commit(&table,
5102 ioe->ticket, NULL, NULL, 0))) {
5105 goto fail; /* really bad */
5110 if ((error = pf_commit_rules(ioe->ticket,
5111 ioe->rs_num, ioe->anchor))) {
5114 goto fail; /* really bad */
5124 case DIOCGETSRCNODES: {
5125 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
5126 struct pf_srchash *sh;
5127 struct pf_ksrc_node *n;
5128 struct pf_src_node *p, *pstore;
5131 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5133 PF_HASHROW_LOCK(sh);
5134 LIST_FOREACH(n, &sh->nodes, entry)
5136 PF_HASHROW_UNLOCK(sh);
5139 psn->psn_len = min(psn->psn_len,
5140 sizeof(struct pf_src_node) * nr);
5142 if (psn->psn_len == 0) {
5143 psn->psn_len = sizeof(struct pf_src_node) * nr;
5149 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5150 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5152 PF_HASHROW_LOCK(sh);
5153 LIST_FOREACH(n, &sh->nodes, entry) {
5155 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5158 pf_src_node_copy(n, p);
5163 PF_HASHROW_UNLOCK(sh);
5165 error = copyout(pstore, psn->psn_src_nodes,
5166 sizeof(struct pf_src_node) * nr);
5168 free(pstore, M_TEMP);
5171 psn->psn_len = sizeof(struct pf_src_node) * nr;
5172 free(pstore, M_TEMP);
5176 case DIOCCLRSRCNODES: {
5178 pf_clear_srcnodes(NULL);
5179 pf_purge_expired_src_nodes();
5183 case DIOCKILLSRCNODES:
5184 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5187 case DIOCKEEPCOUNTERS:
5188 error = pf_keepcounters((struct pfioc_nv *)addr);
5191 case DIOCSETHOSTID: {
5192 u_int32_t *hostid = (u_int32_t *)addr;
5196 V_pf_status.hostid = arc4random();
5198 V_pf_status.hostid = *hostid;
5209 case DIOCIGETIFACES: {
5210 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5211 struct pfi_kif *ifstore;
5214 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5219 if (io->pfiio_size < 0 ||
5220 io->pfiio_size > pf_ioctl_maxcount ||
5221 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5226 bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5227 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5229 if (ifstore == NULL) {
5235 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5237 error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5238 free(ifstore, M_TEMP);
5242 case DIOCSETIFFLAG: {
5243 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5246 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5251 case DIOCCLRIFFLAG: {
5252 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5255 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5265 if (sx_xlocked(&pf_ioctl_lock))
5266 sx_xunlock(&pf_ioctl_lock);
5275 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
5277 bzero(sp, sizeof(struct pfsync_state));
5279 /* copy from state key */
5280 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5281 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5282 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5283 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5284 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5285 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5286 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5287 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5288 sp->proto = st->key[PF_SK_WIRE]->proto;
5289 sp->af = st->key[PF_SK_WIRE]->af;
5291 /* copy from state */
5292 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5293 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
5294 sp->creation = htonl(time_uptime - st->creation);
5295 sp->expire = pf_state_expires(st);
5296 if (sp->expire <= time_uptime)
5297 sp->expire = htonl(0);
5299 sp->expire = htonl(sp->expire - time_uptime);
5301 sp->direction = st->direction;
5303 sp->timeout = st->timeout;
5304 sp->state_flags = st->state_flags;
5306 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5307 if (st->nat_src_node)
5308 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5311 sp->creatorid = st->creatorid;
5312 pf_state_peer_hton(&st->src, &sp->src);
5313 pf_state_peer_hton(&st->dst, &sp->dst);
5315 if (st->rule.ptr == NULL)
5316 sp->rule = htonl(-1);
5318 sp->rule = htonl(st->rule.ptr->nr);
5319 if (st->anchor.ptr == NULL)
5320 sp->anchor = htonl(-1);
5322 sp->anchor = htonl(st->anchor.ptr->nr);
5323 if (st->nat_rule.ptr == NULL)
5324 sp->nat_rule = htonl(-1);
5326 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
5328 pf_state_counter_hton(counter_u64_fetch(st->packets[0]),
5330 pf_state_counter_hton(counter_u64_fetch(st->packets[1]),
5332 pf_state_counter_hton(counter_u64_fetch(st->bytes[0]), sp->bytes[0]);
5333 pf_state_counter_hton(counter_u64_fetch(st->bytes[1]), sp->bytes[1]);
5338 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5340 struct pfr_ktable *kt;
5342 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5345 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5346 kt = kt->pfrkt_root;
5348 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5353 * XXX - Check for version missmatch!!!
5356 pf_clear_all_states(void)
5361 for (i = 0; i <= pf_hashmask; i++) {
5362 struct pf_idhash *ih = &V_pf_idhash[i];
5364 PF_HASHROW_LOCK(ih);
5365 LIST_FOREACH(s, &ih->states, entry) {
5366 s->timeout = PFTM_PURGE;
5367 /* Don't send out individual delete messages. */
5368 s->state_flags |= PFSTATE_NOSYNC;
5369 pf_unlink_state(s, PF_ENTER_LOCKED);
5372 PF_HASHROW_UNLOCK(ih);
5377 pf_clear_tables(void)
5379 struct pfioc_table io;
5382 bzero(&io, sizeof(io));
5384 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
5391 pf_clear_srcnodes(struct pf_ksrc_node *n)
5396 for (i = 0; i <= pf_hashmask; i++) {
5397 struct pf_idhash *ih = &V_pf_idhash[i];
5399 PF_HASHROW_LOCK(ih);
5400 LIST_FOREACH(s, &ih->states, entry) {
5401 if (n == NULL || n == s->src_node)
5403 if (n == NULL || n == s->nat_src_node)
5404 s->nat_src_node = NULL;
5406 PF_HASHROW_UNLOCK(ih);
5410 struct pf_srchash *sh;
5412 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5414 PF_HASHROW_LOCK(sh);
5415 LIST_FOREACH(n, &sh->nodes, entry) {
5419 PF_HASHROW_UNLOCK(sh);
5422 /* XXX: hash slot should already be locked here. */
5429 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
5431 struct pf_ksrc_node_list kill;
5434 for (int i = 0; i <= pf_srchashmask; i++) {
5435 struct pf_srchash *sh = &V_pf_srchash[i];
5436 struct pf_ksrc_node *sn, *tmp;
5438 PF_HASHROW_LOCK(sh);
5439 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
5440 if (PF_MATCHA(psnk->psnk_src.neg,
5441 &psnk->psnk_src.addr.v.a.addr,
5442 &psnk->psnk_src.addr.v.a.mask,
5443 &sn->addr, sn->af) &&
5444 PF_MATCHA(psnk->psnk_dst.neg,
5445 &psnk->psnk_dst.addr.v.a.addr,
5446 &psnk->psnk_dst.addr.v.a.mask,
5447 &sn->raddr, sn->af)) {
5448 pf_unlink_src_node(sn);
5449 LIST_INSERT_HEAD(&kill, sn, entry);
5452 PF_HASHROW_UNLOCK(sh);
5455 for (int i = 0; i <= pf_hashmask; i++) {
5456 struct pf_idhash *ih = &V_pf_idhash[i];
5459 PF_HASHROW_LOCK(ih);
5460 LIST_FOREACH(s, &ih->states, entry) {
5461 if (s->src_node && s->src_node->expire == 1)
5463 if (s->nat_src_node && s->nat_src_node->expire == 1)
5464 s->nat_src_node = NULL;
5466 PF_HASHROW_UNLOCK(ih);
5469 psnk->psnk_killed = pf_free_src_nodes(&kill);
5473 pf_keepcounters(struct pfioc_nv *nv)
5475 nvlist_t *nvl = NULL;
5476 void *nvlpacked = NULL;
5479 #define ERROUT(x) ERROUT_FUNCTION(on_error, x)
5481 if (nv->len > pf_ioctl_maxcount)
5484 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
5485 if (nvlpacked == NULL)
5488 error = copyin(nv->data, nvlpacked, nv->len);
5492 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
5496 if (! nvlist_exists_bool(nvl, "keep_counters"))
5499 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
5502 nvlist_destroy(nvl);
5503 free(nvlpacked, M_TEMP);
5508 pf_clear_states(const struct pf_kstate_kill *kill)
5510 struct pf_state_key_cmp match_key;
5513 unsigned int killed = 0, dir;
5515 for (unsigned int i = 0; i <= pf_hashmask; i++) {
5516 struct pf_idhash *ih = &V_pf_idhash[i];
5518 relock_DIOCCLRSTATES:
5519 PF_HASHROW_LOCK(ih);
5520 LIST_FOREACH(s, &ih->states, entry) {
5521 if (kill->psk_ifname[0] &&
5522 strcmp(kill->psk_ifname,
5526 if (kill->psk_kill_match) {
5527 bzero(&match_key, sizeof(match_key));
5529 if (s->direction == PF_OUT) {
5537 match_key.af = s->key[idx]->af;
5538 match_key.proto = s->key[idx]->proto;
5539 PF_ACPY(&match_key.addr[0],
5540 &s->key[idx]->addr[1], match_key.af);
5541 match_key.port[0] = s->key[idx]->port[1];
5542 PF_ACPY(&match_key.addr[1],
5543 &s->key[idx]->addr[0], match_key.af);
5544 match_key.port[1] = s->key[idx]->port[0];
5548 * Don't send out individual
5551 s->state_flags |= PFSTATE_NOSYNC;
5552 pf_unlink_state(s, PF_ENTER_LOCKED);
5555 if (kill->psk_kill_match)
5556 killed += pf_kill_matching_state(&match_key,
5559 goto relock_DIOCCLRSTATES;
5561 PF_HASHROW_UNLOCK(ih);
5564 if (V_pfsync_clear_states_ptr != NULL)
5565 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
5571 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
5575 if (kill->psk_pfcmp.id) {
5576 if (kill->psk_pfcmp.creatorid == 0)
5577 kill->psk_pfcmp.creatorid = V_pf_status.hostid;
5578 if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
5579 kill->psk_pfcmp.creatorid))) {
5580 pf_unlink_state(s, PF_ENTER_LOCKED);
5586 for (unsigned int i = 0; i <= pf_hashmask; i++)
5587 *killed += pf_killstates_row(kill, &V_pf_idhash[i]);
5593 pf_killstates_nv(struct pfioc_nv *nv)
5595 struct pf_kstate_kill kill;
5596 nvlist_t *nvl = NULL;
5597 void *nvlpacked = NULL;
5599 unsigned int killed = 0;
5601 #define ERROUT(x) ERROUT_FUNCTION(on_error, x)
5603 if (nv->len > pf_ioctl_maxcount)
5606 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
5607 if (nvlpacked == NULL)
5610 error = copyin(nv->data, nvlpacked, nv->len);
5614 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
5618 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
5622 error = pf_killstates(&kill, &killed);
5624 free(nvlpacked, M_TEMP);
5626 nvlist_destroy(nvl);
5627 nvl = nvlist_create(0);
5631 nvlist_add_number(nvl, "killed", killed);
5633 nvlpacked = nvlist_pack(nvl, &nv->len);
5634 if (nvlpacked == NULL)
5639 else if (nv->size < nv->len)
5642 error = copyout(nvlpacked, nv->data, nv->len);
5645 nvlist_destroy(nvl);
5646 free(nvlpacked, M_TEMP);
5651 pf_clearstates_nv(struct pfioc_nv *nv)
5653 struct pf_kstate_kill kill;
5654 nvlist_t *nvl = NULL;
5655 void *nvlpacked = NULL;
5657 unsigned int killed;
5659 #define ERROUT(x) ERROUT_FUNCTION(on_error, x)
5661 if (nv->len > pf_ioctl_maxcount)
5664 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
5665 if (nvlpacked == NULL)
5668 error = copyin(nv->data, nvlpacked, nv->len);
5672 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
5676 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
5680 killed = pf_clear_states(&kill);
5682 free(nvlpacked, M_TEMP);
5684 nvlist_destroy(nvl);
5685 nvl = nvlist_create(0);
5689 nvlist_add_number(nvl, "killed", killed);
5691 nvlpacked = nvlist_pack(nvl, &nv->len);
5692 if (nvlpacked == NULL)
5697 else if (nv->size < nv->len)
5700 error = copyout(nvlpacked, nv->data, nv->len);
5703 nvlist_destroy(nvl);
5704 free(nvlpacked, M_TEMP);
5709 * XXX - Check for version missmatch!!!
5713 * Duplicate pfctl -Fa operation to get rid of as much as we can.
5723 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
5725 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
5728 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
5730 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
5731 break; /* XXX: rollback? */
5733 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
5735 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
5736 break; /* XXX: rollback? */
5738 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
5740 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
5741 break; /* XXX: rollback? */
5743 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
5745 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
5746 break; /* XXX: rollback? */
5749 /* XXX: these should always succeed here */
5750 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
5751 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
5752 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
5753 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
5754 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
5756 if ((error = pf_clear_tables()) != 0)
5760 if ((error = pf_begin_altq(&t[0])) != 0) {
5761 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
5764 pf_commit_altq(t[0]);
5767 pf_clear_all_states();
5769 pf_clear_srcnodes(NULL);
5771 /* status does not use malloced mem so no need to cleanup */
5772 /* fingerprints and interfaces have their own cleanup code */
5780 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
5785 chk = pf_test(PF_IN, flags, ifp, m, inp);
5797 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
5802 chk = pf_test(PF_OUT, flags, ifp, m, inp);
5816 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
5822 * In case of loopback traffic IPv6 uses the real interface in
5823 * order to support scoped addresses. In order to support stateful
5824 * filtering we have change this to lo0 as it is the case in IPv4.
5826 CURVNET_SET(ifp->if_vnet);
5827 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp);
5839 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
5844 CURVNET_SET(ifp->if_vnet);
5845 chk = pf_test6(PF_OUT, flags, ifp, m, inp);
5861 struct pfil_head *pfh_inet;
5864 struct pfil_head *pfh_inet6;
5867 if (V_pf_pfil_hooked)
5871 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
5872 if (pfh_inet == NULL)
5873 return (ESRCH); /* XXX */
5874 pfil_add_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet);
5875 pfil_add_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet);
5878 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
5879 if (pfh_inet6 == NULL) {
5881 pfil_remove_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
5883 pfil_remove_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
5886 return (ESRCH); /* XXX */
5888 pfil_add_hook_flags(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6);
5889 pfil_add_hook_flags(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6);
5892 V_pf_pfil_hooked = 1;
5900 struct pfil_head *pfh_inet;
5903 struct pfil_head *pfh_inet6;
5906 if (V_pf_pfil_hooked == 0)
5910 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
5911 if (pfh_inet == NULL)
5912 return (ESRCH); /* XXX */
5913 pfil_remove_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
5915 pfil_remove_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
5919 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
5920 if (pfh_inet6 == NULL)
5921 return (ESRCH); /* XXX */
5922 pfil_remove_hook_flags(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK,
5924 pfil_remove_hook_flags(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK,
5928 V_pf_pfil_hooked = 0;
5935 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
5936 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
5938 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
5939 PF_RULE_TAG_HASH_SIZE_DEFAULT);
5941 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
5942 PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
5946 V_pf_vnet_active = 1;
5954 rm_init(&pf_rules_lock, "pf rulesets");
5955 sx_init(&pf_ioctl_lock, "pf ioctl");
5956 sx_init(&pf_end_lock, "pf end thread");
5958 pf_mtag_initialize();
5960 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
5965 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
5975 pf_unload_vnet(void)
5979 V_pf_vnet_active = 0;
5980 V_pf_status.running = 0;
5981 error = dehook_pf();
5984 * Should not happen!
5985 * XXX Due to error code ESRCH, kldunload will show
5986 * a message like 'No such process'.
5988 printf("%s : pfil unregisteration fail\n", __FUNCTION__);
5996 ret = swi_remove(V_pf_swi_cookie);
5998 ret = intr_event_destroy(V_pf_swi_ie);
6001 pf_unload_vnet_purge();
6003 pf_normalize_cleanup();
6010 if (IS_DEFAULT_VNET(curvnet))
6013 pf_cleanup_tagset(&V_pf_tags);
6015 pf_cleanup_tagset(&V_pf_qids);
6017 uma_zdestroy(V_pf_tag_z);
6019 /* Free counters last as we updated them during shutdown. */
6020 counter_u64_free(V_pf_default_rule.evaluations);
6021 for (int i = 0; i < 2; i++) {
6022 counter_u64_free(V_pf_default_rule.packets[i]);
6023 counter_u64_free(V_pf_default_rule.bytes[i]);
6025 counter_u64_free(V_pf_default_rule.states_cur);
6026 counter_u64_free(V_pf_default_rule.states_tot);
6027 counter_u64_free(V_pf_default_rule.src_nodes);
6029 for (int i = 0; i < PFRES_MAX; i++)
6030 counter_u64_free(V_pf_status.counters[i]);
6031 for (int i = 0; i < LCNT_MAX; i++)
6032 counter_u64_free(V_pf_status.lcounters[i]);
6033 for (int i = 0; i < FCNT_MAX; i++)
6034 counter_u64_free(V_pf_status.fcounters[i]);
6035 for (int i = 0; i < SCNT_MAX; i++)
6036 counter_u64_free(V_pf_status.scounters[i]);
6043 sx_xlock(&pf_end_lock);
6045 while (pf_end_threads < 2) {
6046 wakeup_one(pf_purge_thread);
6047 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6049 sx_xunlock(&pf_end_lock);
6052 destroy_dev(pf_dev);
6056 rm_destroy(&pf_rules_lock);
6057 sx_destroy(&pf_ioctl_lock);
6058 sx_destroy(&pf_end_lock);
6062 vnet_pf_init(void *unused __unused)
6067 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6068 vnet_pf_init, NULL);
6071 vnet_pf_uninit(const void *unused __unused)
6076 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6077 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6078 vnet_pf_uninit, NULL);
6082 pf_modevent(module_t mod, int type, void *data)
6091 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after
6092 * the vnet_pf_uninit()s */
6102 static moduledata_t pf_mod = {
6108 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
6109 MODULE_VERSION(pf, PF_MODVER);