2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002,2003 Henning Brauer
6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 #include "opt_inet6.h"
48 #include <sys/param.h>
49 #include <sys/_bitset.h>
50 #include <sys/bitset.h>
53 #include <sys/endian.h>
54 #include <sys/fcntl.h>
55 #include <sys/filio.h>
57 #include <sys/interrupt.h>
59 #include <sys/kernel.h>
60 #include <sys/kthread.h>
63 #include <sys/module.h>
68 #include <sys/socket.h>
69 #include <sys/sysctl.h>
71 #include <sys/ucred.h>
74 #include <net/if_var.h>
76 #include <net/route.h>
78 #include <net/pfvar.h>
79 #include <net/if_pfsync.h>
80 #include <net/if_pflog.h>
82 #include <netinet/in.h>
83 #include <netinet/ip.h>
84 #include <netinet/ip_var.h>
85 #include <netinet6/ip6_var.h>
86 #include <netinet/ip_icmp.h>
87 #include <netpfil/pf/pf_nv.h>
90 #include <netinet/ip6.h>
94 #include <net/altq/altq.h>
97 SDT_PROVIDER_DECLARE(pf);
98 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
99 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
101 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
103 static struct pf_kpool *pf_get_kpool(char *, u_int32_t, u_int8_t, u_int32_t,
104 u_int8_t, u_int8_t, u_int8_t);
106 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
107 static void pf_empty_kpool(struct pf_kpalist *);
108 static int pfioctl(struct cdev *, u_long, caddr_t, int,
111 static int pf_begin_altq(u_int32_t *);
112 static int pf_rollback_altq(u_int32_t);
113 static int pf_commit_altq(u_int32_t);
114 static int pf_enable_altq(struct pf_altq *);
115 static int pf_disable_altq(struct pf_altq *);
116 static u_int32_t pf_qname2qid(char *);
117 static void pf_qid_unref(u_int32_t);
119 static int pf_begin_rules(u_int32_t *, int, const char *);
120 static int pf_rollback_rules(u_int32_t, int, char *);
121 static int pf_setup_pfsync_matching(struct pf_kruleset *);
122 static void pf_hash_rule(MD5_CTX *, struct pf_krule *);
123 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
124 static int pf_commit_rules(u_int32_t, int, char *);
125 static int pf_addr_setup(struct pf_kruleset *,
126 struct pf_addr_wrap *, sa_family_t);
127 static void pf_addr_copyout(struct pf_addr_wrap *);
128 static void pf_src_node_copy(const struct pf_ksrc_node *,
129 struct pf_src_node *);
131 static int pf_export_kaltq(struct pf_altq *,
132 struct pfioc_altq_v1 *, size_t);
133 static int pf_import_kaltq(struct pfioc_altq_v1 *,
134 struct pf_altq *, size_t);
137 VNET_DEFINE(struct pf_krule, pf_default_rule);
140 VNET_DEFINE_STATIC(int, pf_altq_running);
141 #define V_pf_altq_running VNET(pf_altq_running)
144 #define TAGID_MAX 50000
146 TAILQ_ENTRY(pf_tagname) namehash_entries;
147 TAILQ_ENTRY(pf_tagname) taghash_entries;
148 char name[PF_TAG_NAME_SIZE];
154 TAILQ_HEAD(, pf_tagname) *namehash;
155 TAILQ_HEAD(, pf_tagname) *taghash;
158 BITSET_DEFINE(, TAGID_MAX) avail;
161 VNET_DEFINE(struct pf_tagset, pf_tags);
162 #define V_pf_tags VNET(pf_tags)
163 static unsigned int pf_rule_tag_hashsize;
164 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128
165 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
166 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
167 "Size of pf(4) rule tag hashtable");
170 VNET_DEFINE(struct pf_tagset, pf_qids);
171 #define V_pf_qids VNET(pf_qids)
172 static unsigned int pf_queue_tag_hashsize;
173 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128
174 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
175 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
176 "Size of pf(4) queue tag hashtable");
178 VNET_DEFINE(uma_zone_t, pf_tag_z);
179 #define V_pf_tag_z VNET(pf_tag_z)
180 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
181 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
183 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
184 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
187 static void pf_init_tagset(struct pf_tagset *, unsigned int *,
189 static void pf_cleanup_tagset(struct pf_tagset *);
190 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *);
191 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t);
192 static u_int16_t tagname2tag(struct pf_tagset *, char *);
193 static u_int16_t pf_tagname2tag(char *);
194 static void tag_unref(struct pf_tagset *, u_int16_t);
196 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
201 * XXX - These are new and need to be checked when moveing to a new version
203 static void pf_clear_all_states(void);
204 static unsigned int pf_clear_states(const struct pf_kstate_kill *);
205 static int pf_killstates(struct pf_kstate_kill *,
207 static int pf_killstates_row(struct pf_kstate_kill *,
209 static int pf_killstates_nv(struct pfioc_nv *);
210 static int pf_clearstates_nv(struct pfioc_nv *);
211 static int pf_getstate(struct pfioc_nv *);
212 static int pf_clear_tables(void);
213 static void pf_clear_srcnodes(struct pf_ksrc_node *);
214 static void pf_kill_srcnodes(struct pfioc_src_node_kill *);
215 static int pf_keepcounters(struct pfioc_nv *);
216 static void pf_tbladdr_copyout(struct pf_addr_wrap *);
219 * Wrapper functions for pfil(9) hooks
222 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
223 int flags, void *ruleset __unused, struct inpcb *inp);
224 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
225 int flags, void *ruleset __unused, struct inpcb *inp);
228 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
229 int flags, void *ruleset __unused, struct inpcb *inp);
230 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
231 int flags, void *ruleset __unused, struct inpcb *inp);
234 static void hook_pf(void);
235 static void dehook_pf(void);
236 static int shutdown_pf(void);
237 static int pf_load(void);
238 static void pf_unload(void);
240 static struct cdevsw pf_cdevsw = {
243 .d_version = D_VERSION,
246 volatile VNET_DEFINE_STATIC(int, pf_pfil_hooked);
247 #define V_pf_pfil_hooked VNET(pf_pfil_hooked)
250 * We need a flag that is neither hooked nor running to know when
251 * the VNET is "valid". We primarily need this to control (global)
252 * external event, e.g., eventhandlers.
254 VNET_DEFINE(int, pf_vnet_active);
255 #define V_pf_vnet_active VNET(pf_vnet_active)
258 struct proc *pf_purge_proc;
260 struct rmlock pf_rules_lock;
261 struct sx pf_ioctl_lock;
262 struct sx pf_end_lock;
265 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
266 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
267 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
268 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
269 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
270 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
271 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
274 pflog_packet_t *pflog_packet_ptr = NULL;
276 extern u_long pf_ioctl_maxcount;
278 #define ERROUT_FUNCTION(target, x) \
281 SDT_PROBE3(pf, ioctl, function, error, __func__, error, \
289 u_int32_t *my_timeout = V_pf_default_rule.timeout;
293 pfi_initialize_vnet();
296 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
297 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
299 RB_INIT(&V_pf_anchors);
300 pf_init_kruleset(&pf_main_ruleset);
302 /* default rule should never be garbage collected */
303 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
304 #ifdef PF_DEFAULT_TO_DROP
305 V_pf_default_rule.action = PF_DROP;
307 V_pf_default_rule.action = PF_PASS;
309 V_pf_default_rule.nr = -1;
310 V_pf_default_rule.rtableid = -1;
312 V_pf_default_rule.evaluations = counter_u64_alloc(M_WAITOK);
313 for (int i = 0; i < 2; i++) {
314 V_pf_default_rule.packets[i] = counter_u64_alloc(M_WAITOK);
315 V_pf_default_rule.bytes[i] = counter_u64_alloc(M_WAITOK);
317 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
318 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
319 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
321 /* initialize default timeouts */
322 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
323 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
324 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
325 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
326 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
327 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
328 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
329 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
330 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
331 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
332 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
333 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
334 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
335 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
336 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
337 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
338 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
339 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
340 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
341 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
343 bzero(&V_pf_status, sizeof(V_pf_status));
344 V_pf_status.debug = PF_DEBUG_URGENT;
346 V_pf_pfil_hooked = 0;
348 /* XXX do our best to avoid a conflict */
349 V_pf_status.hostid = arc4random();
351 for (int i = 0; i < PFRES_MAX; i++)
352 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
353 for (int i = 0; i < LCNT_MAX; i++)
354 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
355 for (int i = 0; i < FCNT_MAX; i++)
356 V_pf_status.fcounters[i] = counter_u64_alloc(M_WAITOK);
357 for (int i = 0; i < SCNT_MAX; i++)
358 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
360 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
361 INTR_MPSAFE, &V_pf_swi_cookie) != 0)
362 /* XXXGL: leaked all above. */
366 static struct pf_kpool *
367 pf_get_kpool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
368 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
369 u_int8_t check_ticket)
371 struct pf_kruleset *ruleset;
372 struct pf_krule *rule;
375 ruleset = pf_find_kruleset(anchor);
378 rs_num = pf_get_ruleset_number(rule_action);
379 if (rs_num >= PF_RULESET_MAX)
382 if (check_ticket && ticket !=
383 ruleset->rules[rs_num].active.ticket)
386 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
389 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
391 if (check_ticket && ticket !=
392 ruleset->rules[rs_num].inactive.ticket)
395 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
398 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
401 while ((rule != NULL) && (rule->nr != rule_number))
402 rule = TAILQ_NEXT(rule, entries);
407 return (&rule->rpool);
411 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
413 struct pf_kpooladdr *mv_pool_pa;
415 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
416 TAILQ_REMOVE(poola, mv_pool_pa, entries);
417 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
422 pf_empty_kpool(struct pf_kpalist *poola)
424 struct pf_kpooladdr *pa;
426 while ((pa = TAILQ_FIRST(poola)) != NULL) {
427 switch (pa->addr.type) {
428 case PF_ADDR_DYNIFTL:
429 pfi_dynaddr_remove(pa->addr.p.dyn);
432 /* XXX: this could be unfinished pooladdr on pabuf */
433 if (pa->addr.p.tbl != NULL)
434 pfr_detach_table(pa->addr.p.tbl);
438 pfi_kkif_unref(pa->kif);
439 TAILQ_REMOVE(poola, pa, entries);
445 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
450 TAILQ_REMOVE(rulequeue, rule, entries);
452 PF_UNLNKDRULES_LOCK();
453 rule->rule_ref |= PFRULE_REFS;
454 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
455 PF_UNLNKDRULES_UNLOCK();
459 pf_free_rule(struct pf_krule *rule)
465 tag_unref(&V_pf_tags, rule->tag);
467 tag_unref(&V_pf_tags, rule->match_tag);
469 if (rule->pqid != rule->qid)
470 pf_qid_unref(rule->pqid);
471 pf_qid_unref(rule->qid);
473 switch (rule->src.addr.type) {
474 case PF_ADDR_DYNIFTL:
475 pfi_dynaddr_remove(rule->src.addr.p.dyn);
478 pfr_detach_table(rule->src.addr.p.tbl);
481 switch (rule->dst.addr.type) {
482 case PF_ADDR_DYNIFTL:
483 pfi_dynaddr_remove(rule->dst.addr.p.dyn);
486 pfr_detach_table(rule->dst.addr.p.tbl);
489 if (rule->overload_tbl)
490 pfr_detach_table(rule->overload_tbl);
492 pfi_kkif_unref(rule->kif);
493 pf_kanchor_remove(rule);
494 pf_empty_kpool(&rule->rpool.list);
500 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
501 unsigned int default_size)
504 unsigned int hashsize;
506 if (*tunable_size == 0 || !powerof2(*tunable_size))
507 *tunable_size = default_size;
509 hashsize = *tunable_size;
510 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
512 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
514 ts->mask = hashsize - 1;
515 ts->seed = arc4random();
516 for (i = 0; i < hashsize; i++) {
517 TAILQ_INIT(&ts->namehash[i]);
518 TAILQ_INIT(&ts->taghash[i]);
520 BIT_FILL(TAGID_MAX, &ts->avail);
524 pf_cleanup_tagset(struct pf_tagset *ts)
527 unsigned int hashsize;
528 struct pf_tagname *t, *tmp;
531 * Only need to clean up one of the hashes as each tag is hashed
534 hashsize = ts->mask + 1;
535 for (i = 0; i < hashsize; i++)
536 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
537 uma_zfree(V_pf_tag_z, t);
539 free(ts->namehash, M_PFHASH);
540 free(ts->taghash, M_PFHASH);
544 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
548 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
549 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
553 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
556 return (tag & ts->mask);
560 tagname2tag(struct pf_tagset *ts, char *tagname)
562 struct pf_tagname *tag;
568 index = tagname2hashindex(ts, tagname);
569 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
570 if (strcmp(tagname, tag->name) == 0) {
578 * to avoid fragmentation, we do a linear search from the beginning
579 * and take the first free slot we find.
581 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
583 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
584 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
585 * set. It may also return a bit number greater than TAGID_MAX due
586 * to rounding of the number of bits in the vector up to a multiple
587 * of the vector word size at declaration/allocation time.
589 if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
592 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */
593 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
595 /* allocate and fill new struct pf_tagname */
596 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
599 strlcpy(tag->name, tagname, sizeof(tag->name));
600 tag->tag = new_tagid;
603 /* Insert into namehash */
604 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
606 /* Insert into taghash */
607 index = tag2hashindex(ts, new_tagid);
608 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
614 tag_unref(struct pf_tagset *ts, u_int16_t tag)
616 struct pf_tagname *t;
621 index = tag2hashindex(ts, tag);
622 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
625 TAILQ_REMOVE(&ts->taghash[index], t,
627 index = tagname2hashindex(ts, t->name);
628 TAILQ_REMOVE(&ts->namehash[index], t,
630 /* Bits are 0-based for BIT_SET() */
631 BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
632 uma_zfree(V_pf_tag_z, t);
639 pf_tagname2tag(char *tagname)
641 return (tagname2tag(&V_pf_tags, tagname));
646 pf_qname2qid(char *qname)
648 return ((u_int32_t)tagname2tag(&V_pf_qids, qname));
652 pf_qid_unref(u_int32_t qid)
654 tag_unref(&V_pf_qids, (u_int16_t)qid);
658 pf_begin_altq(u_int32_t *ticket)
660 struct pf_altq *altq, *tmp;
665 /* Purge the old altq lists */
666 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
667 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
668 /* detach and destroy the discipline */
669 error = altq_remove(altq);
671 free(altq, M_PFALTQ);
673 TAILQ_INIT(V_pf_altq_ifs_inactive);
674 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
675 pf_qid_unref(altq->qid);
676 free(altq, M_PFALTQ);
678 TAILQ_INIT(V_pf_altqs_inactive);
681 *ticket = ++V_ticket_altqs_inactive;
682 V_altqs_inactive_open = 1;
687 pf_rollback_altq(u_int32_t ticket)
689 struct pf_altq *altq, *tmp;
694 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
696 /* Purge the old altq lists */
697 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
698 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
699 /* detach and destroy the discipline */
700 error = altq_remove(altq);
702 free(altq, M_PFALTQ);
704 TAILQ_INIT(V_pf_altq_ifs_inactive);
705 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
706 pf_qid_unref(altq->qid);
707 free(altq, M_PFALTQ);
709 TAILQ_INIT(V_pf_altqs_inactive);
710 V_altqs_inactive_open = 0;
715 pf_commit_altq(u_int32_t ticket)
717 struct pf_altqqueue *old_altqs, *old_altq_ifs;
718 struct pf_altq *altq, *tmp;
723 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
726 /* swap altqs, keep the old. */
727 old_altqs = V_pf_altqs_active;
728 old_altq_ifs = V_pf_altq_ifs_active;
729 V_pf_altqs_active = V_pf_altqs_inactive;
730 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
731 V_pf_altqs_inactive = old_altqs;
732 V_pf_altq_ifs_inactive = old_altq_ifs;
733 V_ticket_altqs_active = V_ticket_altqs_inactive;
735 /* Attach new disciplines */
736 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
737 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
738 /* attach the discipline */
739 error = altq_pfattach(altq);
740 if (error == 0 && V_pf_altq_running)
741 error = pf_enable_altq(altq);
747 /* Purge the old altq lists */
748 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
749 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
750 /* detach and destroy the discipline */
751 if (V_pf_altq_running)
752 error = pf_disable_altq(altq);
753 err = altq_pfdetach(altq);
754 if (err != 0 && error == 0)
756 err = altq_remove(altq);
757 if (err != 0 && error == 0)
760 free(altq, M_PFALTQ);
762 TAILQ_INIT(V_pf_altq_ifs_inactive);
763 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
764 pf_qid_unref(altq->qid);
765 free(altq, M_PFALTQ);
767 TAILQ_INIT(V_pf_altqs_inactive);
769 V_altqs_inactive_open = 0;
774 pf_enable_altq(struct pf_altq *altq)
777 struct tb_profile tb;
780 if ((ifp = ifunit(altq->ifname)) == NULL)
783 if (ifp->if_snd.altq_type != ALTQT_NONE)
784 error = altq_enable(&ifp->if_snd);
786 /* set tokenbucket regulator */
787 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
788 tb.rate = altq->ifbandwidth;
789 tb.depth = altq->tbrsize;
790 error = tbr_set(&ifp->if_snd, &tb);
797 pf_disable_altq(struct pf_altq *altq)
800 struct tb_profile tb;
803 if ((ifp = ifunit(altq->ifname)) == NULL)
807 * when the discipline is no longer referenced, it was overridden
808 * by a new one. if so, just return.
810 if (altq->altq_disc != ifp->if_snd.altq_disc)
813 error = altq_disable(&ifp->if_snd);
816 /* clear tokenbucket regulator */
818 error = tbr_set(&ifp->if_snd, &tb);
825 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
826 struct pf_altq *altq)
831 /* Deactivate the interface in question */
832 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
833 if ((ifp1 = ifunit(altq->ifname)) == NULL ||
834 (remove && ifp1 == ifp)) {
835 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
837 error = altq_add(ifp1, altq);
839 if (ticket != V_ticket_altqs_inactive)
843 free(altq, M_PFALTQ);
850 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
852 struct pf_altq *a1, *a2, *a3;
857 * No need to re-evaluate the configuration for events on interfaces
858 * that do not support ALTQ, as it's not possible for such
859 * interfaces to be part of the configuration.
861 if (!ALTQ_IS_READY(&ifp->if_snd))
864 /* Interrupt userland queue modifications */
865 if (V_altqs_inactive_open)
866 pf_rollback_altq(V_ticket_altqs_inactive);
868 /* Start new altq ruleset */
869 if (pf_begin_altq(&ticket))
872 /* Copy the current active set */
873 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
874 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
879 bcopy(a1, a2, sizeof(struct pf_altq));
881 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
885 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
889 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
890 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
895 bcopy(a1, a2, sizeof(struct pf_altq));
897 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
902 a2->altq_disc = NULL;
903 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
904 if (strncmp(a3->ifname, a2->ifname,
906 a2->altq_disc = a3->altq_disc;
910 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
914 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
919 pf_rollback_altq(ticket);
921 pf_commit_altq(ticket);
926 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
928 struct pf_kruleset *rs;
929 struct pf_krule *rule;
933 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
935 rs = pf_find_or_create_kruleset(anchor);
938 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
939 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
940 rs->rules[rs_num].inactive.rcount--;
942 *ticket = ++rs->rules[rs_num].inactive.ticket;
943 rs->rules[rs_num].inactive.open = 1;
948 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
950 struct pf_kruleset *rs;
951 struct pf_krule *rule;
955 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
957 rs = pf_find_kruleset(anchor);
958 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
959 rs->rules[rs_num].inactive.ticket != ticket)
961 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
962 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
963 rs->rules[rs_num].inactive.rcount--;
965 rs->rules[rs_num].inactive.open = 0;
969 #define PF_MD5_UPD(st, elm) \
970 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
972 #define PF_MD5_UPD_STR(st, elm) \
973 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
975 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
976 (stor) = htonl((st)->elm); \
977 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
980 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
981 (stor) = htons((st)->elm); \
982 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
986 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
988 PF_MD5_UPD(pfr, addr.type);
989 switch (pfr->addr.type) {
990 case PF_ADDR_DYNIFTL:
991 PF_MD5_UPD(pfr, addr.v.ifname);
992 PF_MD5_UPD(pfr, addr.iflags);
995 PF_MD5_UPD(pfr, addr.v.tblname);
997 case PF_ADDR_ADDRMASK:
999 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1000 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1004 PF_MD5_UPD(pfr, port[0]);
1005 PF_MD5_UPD(pfr, port[1]);
1006 PF_MD5_UPD(pfr, neg);
1007 PF_MD5_UPD(pfr, port_op);
1011 pf_hash_rule(MD5_CTX *ctx, struct pf_krule *rule)
1016 pf_hash_rule_addr(ctx, &rule->src);
1017 pf_hash_rule_addr(ctx, &rule->dst);
1018 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1019 PF_MD5_UPD_STR(rule, label[i]);
1020 PF_MD5_UPD_STR(rule, ifname);
1021 PF_MD5_UPD_STR(rule, match_tagname);
1022 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1023 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1024 PF_MD5_UPD_HTONL(rule, prob, y);
1025 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1026 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1027 PF_MD5_UPD(rule, uid.op);
1028 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1029 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1030 PF_MD5_UPD(rule, gid.op);
1031 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1032 PF_MD5_UPD(rule, action);
1033 PF_MD5_UPD(rule, direction);
1034 PF_MD5_UPD(rule, af);
1035 PF_MD5_UPD(rule, quick);
1036 PF_MD5_UPD(rule, ifnot);
1037 PF_MD5_UPD(rule, match_tag_not);
1038 PF_MD5_UPD(rule, natpass);
1039 PF_MD5_UPD(rule, keep_state);
1040 PF_MD5_UPD(rule, proto);
1041 PF_MD5_UPD(rule, type);
1042 PF_MD5_UPD(rule, code);
1043 PF_MD5_UPD(rule, flags);
1044 PF_MD5_UPD(rule, flagset);
1045 PF_MD5_UPD(rule, allow_opts);
1046 PF_MD5_UPD(rule, rt);
1047 PF_MD5_UPD(rule, tos);
1051 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1054 u_int8_t digest[2][PF_MD5_DIGEST_LENGTH];
1058 pf_hash_rule(&ctx[0], a);
1059 pf_hash_rule(&ctx[1], b);
1060 MD5Final(digest[0], &ctx[0]);
1061 MD5Final(digest[1], &ctx[1]);
1063 return (memcmp(digest[0], digest[1], PF_MD5_DIGEST_LENGTH) == 0);
1067 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1069 struct pf_kruleset *rs;
1070 struct pf_krule *rule, **old_array, *tail;
1071 struct pf_krulequeue *old_rules;
1073 u_int32_t old_rcount;
1077 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1079 rs = pf_find_kruleset(anchor);
1080 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1081 ticket != rs->rules[rs_num].inactive.ticket)
1084 /* Calculate checksum for the main ruleset */
1085 if (rs == &pf_main_ruleset) {
1086 error = pf_setup_pfsync_matching(rs);
1091 /* Swap rules, keep the old. */
1092 old_rules = rs->rules[rs_num].active.ptr;
1093 old_rcount = rs->rules[rs_num].active.rcount;
1094 old_array = rs->rules[rs_num].active.ptr_array;
1096 rs->rules[rs_num].active.ptr =
1097 rs->rules[rs_num].inactive.ptr;
1098 rs->rules[rs_num].active.ptr_array =
1099 rs->rules[rs_num].inactive.ptr_array;
1100 rs->rules[rs_num].active.rcount =
1101 rs->rules[rs_num].inactive.rcount;
1103 /* Attempt to preserve counter information. */
1104 if (V_pf_status.keep_counters) {
1105 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1107 tail = TAILQ_FIRST(old_rules);
1108 while ((tail != NULL) && ! pf_krule_compare(tail, rule))
1109 tail = TAILQ_NEXT(tail, entries);
1111 counter_u64_add(rule->evaluations,
1112 counter_u64_fetch(tail->evaluations));
1113 counter_u64_add(rule->packets[0],
1114 counter_u64_fetch(tail->packets[0]));
1115 counter_u64_add(rule->packets[1],
1116 counter_u64_fetch(tail->packets[1]));
1117 counter_u64_add(rule->bytes[0],
1118 counter_u64_fetch(tail->bytes[0]));
1119 counter_u64_add(rule->bytes[1],
1120 counter_u64_fetch(tail->bytes[1]));
1125 rs->rules[rs_num].inactive.ptr = old_rules;
1126 rs->rules[rs_num].inactive.ptr_array = old_array;
1127 rs->rules[rs_num].inactive.rcount = old_rcount;
1129 rs->rules[rs_num].active.ticket =
1130 rs->rules[rs_num].inactive.ticket;
1131 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1133 /* Purge the old rule list. */
1134 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1135 pf_unlink_rule(old_rules, rule);
1136 if (rs->rules[rs_num].inactive.ptr_array)
1137 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1138 rs->rules[rs_num].inactive.ptr_array = NULL;
1139 rs->rules[rs_num].inactive.rcount = 0;
1140 rs->rules[rs_num].inactive.open = 0;
1141 pf_remove_if_empty_kruleset(rs);
1147 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1150 struct pf_krule *rule;
1152 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1155 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1156 /* XXX PF_RULESET_SCRUB as well? */
1157 if (rs_cnt == PF_RULESET_SCRUB)
1160 if (rs->rules[rs_cnt].inactive.ptr_array)
1161 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1162 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1164 if (rs->rules[rs_cnt].inactive.rcount) {
1165 rs->rules[rs_cnt].inactive.ptr_array =
1166 malloc(sizeof(caddr_t) *
1167 rs->rules[rs_cnt].inactive.rcount,
1170 if (!rs->rules[rs_cnt].inactive.ptr_array)
1174 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1176 pf_hash_rule(&ctx, rule);
1177 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1181 MD5Final(digest, &ctx);
1182 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1187 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1192 switch (addr->type) {
1194 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1195 if (addr->p.tbl == NULL)
1198 case PF_ADDR_DYNIFTL:
1199 error = pfi_dynaddr_setup(addr, af);
1207 pf_addr_copyout(struct pf_addr_wrap *addr)
1210 switch (addr->type) {
1211 case PF_ADDR_DYNIFTL:
1212 pfi_dynaddr_copyout(addr);
1215 pf_tbladdr_copyout(addr);
1221 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1223 int secs = time_uptime, diff;
1225 bzero(out, sizeof(struct pf_src_node));
1227 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1228 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1230 if (in->rule.ptr != NULL)
1231 out->rule.nr = in->rule.ptr->nr;
1233 for (int i = 0; i < 2; i++) {
1234 out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1235 out->packets[i] = counter_u64_fetch(in->packets[i]);
1238 out->states = in->states;
1239 out->conn = in->conn;
1241 out->ruletype = in->ruletype;
1243 out->creation = secs - in->creation;
1244 if (out->expire > secs)
1245 out->expire -= secs;
1249 /* Adjust the connection rate estimate. */
1250 diff = secs - in->conn_rate.last;
1251 if (diff >= in->conn_rate.seconds)
1252 out->conn_rate.count = 0;
1254 out->conn_rate.count -=
1255 in->conn_rate.count * diff /
1256 in->conn_rate.seconds;
1261 * Handle export of struct pf_kaltq to user binaries that may be using any
1262 * version of struct pf_altq.
1265 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1269 if (ioc_size == sizeof(struct pfioc_altq_v0))
1272 version = pa->version;
1274 if (version > PFIOC_ALTQ_VERSION)
1277 #define ASSIGN(x) exported_q->x = q->x
1279 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1280 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1281 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1285 struct pf_altq_v0 *exported_q =
1286 &((struct pfioc_altq_v0 *)pa)->altq;
1292 exported_q->tbrsize = SATU16(q->tbrsize);
1293 exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1298 exported_q->bandwidth = SATU32(q->bandwidth);
1300 ASSIGN(local_flags);
1305 if (q->scheduler == ALTQT_HFSC) {
1306 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1307 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1308 SATU32(q->pq_u.hfsc_opts.x)
1310 ASSIGN_OPT_SATU32(rtsc_m1);
1312 ASSIGN_OPT_SATU32(rtsc_m2);
1314 ASSIGN_OPT_SATU32(lssc_m1);
1316 ASSIGN_OPT_SATU32(lssc_m2);
1318 ASSIGN_OPT_SATU32(ulsc_m1);
1320 ASSIGN_OPT_SATU32(ulsc_m2);
1325 #undef ASSIGN_OPT_SATU32
1333 struct pf_altq_v1 *exported_q =
1334 &((struct pfioc_altq_v1 *)pa)->altq;
1340 ASSIGN(ifbandwidth);
1347 ASSIGN(local_flags);
1357 panic("%s: unhandled struct pfioc_altq version", __func__);
1370 * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1371 * that may be using any version of it.
1374 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1378 if (ioc_size == sizeof(struct pfioc_altq_v0))
1381 version = pa->version;
1383 if (version > PFIOC_ALTQ_VERSION)
1386 #define ASSIGN(x) q->x = imported_q->x
1388 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1392 struct pf_altq_v0 *imported_q =
1393 &((struct pfioc_altq_v0 *)pa)->altq;
1398 ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1399 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1404 ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1406 ASSIGN(local_flags);
1411 if (imported_q->scheduler == ALTQT_HFSC) {
1412 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1415 * The m1 and m2 parameters are being copied from
1418 ASSIGN_OPT(rtsc_m1);
1420 ASSIGN_OPT(rtsc_m2);
1422 ASSIGN_OPT(lssc_m1);
1424 ASSIGN_OPT(lssc_m2);
1426 ASSIGN_OPT(ulsc_m1);
1428 ASSIGN_OPT(ulsc_m2);
1440 struct pf_altq_v1 *imported_q =
1441 &((struct pfioc_altq_v1 *)pa)->altq;
1447 ASSIGN(ifbandwidth);
1454 ASSIGN(local_flags);
1464 panic("%s: unhandled struct pfioc_altq version", __func__);
1474 static struct pf_altq *
1475 pf_altq_get_nth_active(u_int32_t n)
1477 struct pf_altq *altq;
1481 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1487 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1498 pf_krule_free(struct pf_krule *rule)
1503 counter_u64_free(rule->evaluations);
1504 for (int i = 0; i < 2; i++) {
1505 counter_u64_free(rule->packets[i]);
1506 counter_u64_free(rule->bytes[i]);
1508 counter_u64_free(rule->states_cur);
1509 counter_u64_free(rule->states_tot);
1510 counter_u64_free(rule->src_nodes);
1511 free(rule, M_PFRULE);
1515 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1516 struct pf_pooladdr *pool)
1519 bzero(pool, sizeof(*pool));
1520 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1521 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1525 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1526 struct pf_kpooladdr *kpool)
1529 bzero(kpool, sizeof(*kpool));
1530 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1531 strlcpy(kpool->ifname, pool->ifname, sizeof(kpool->ifname));
1535 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool)
1537 bzero(pool, sizeof(*pool));
1539 bcopy(&kpool->key, &pool->key, sizeof(pool->key));
1540 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter));
1542 pool->tblidx = kpool->tblidx;
1543 pool->proxy_port[0] = kpool->proxy_port[0];
1544 pool->proxy_port[1] = kpool->proxy_port[1];
1545 pool->opts = kpool->opts;
1549 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1551 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1552 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1554 bzero(kpool, sizeof(*kpool));
1556 bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1557 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1559 kpool->tblidx = pool->tblidx;
1560 kpool->proxy_port[0] = pool->proxy_port[0];
1561 kpool->proxy_port[1] = pool->proxy_port[1];
1562 kpool->opts = pool->opts;
1568 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule)
1571 bzero(rule, sizeof(*rule));
1573 bcopy(&krule->src, &rule->src, sizeof(rule->src));
1574 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst));
1576 for (int i = 0; i < PF_SKIP_COUNT; ++i) {
1577 if (rule->skip[i].ptr == NULL)
1578 rule->skip[i].nr = -1;
1580 rule->skip[i].nr = krule->skip[i].ptr->nr;
1583 strlcpy(rule->label, krule->label[0], sizeof(rule->label));
1584 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname));
1585 strlcpy(rule->qname, krule->qname, sizeof(rule->qname));
1586 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname));
1587 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname));
1588 strlcpy(rule->match_tagname, krule->match_tagname,
1589 sizeof(rule->match_tagname));
1590 strlcpy(rule->overload_tblname, krule->overload_tblname,
1591 sizeof(rule->overload_tblname));
1593 pf_kpool_to_pool(&krule->rpool, &rule->rpool);
1595 rule->evaluations = counter_u64_fetch(krule->evaluations);
1596 for (int i = 0; i < 2; i++) {
1597 rule->packets[i] = counter_u64_fetch(krule->packets[i]);
1598 rule->bytes[i] = counter_u64_fetch(krule->bytes[i]);
1601 /* kif, anchor, overload_tbl are not copied over. */
1603 rule->os_fingerprint = krule->os_fingerprint;
1605 rule->rtableid = krule->rtableid;
1606 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout));
1607 rule->max_states = krule->max_states;
1608 rule->max_src_nodes = krule->max_src_nodes;
1609 rule->max_src_states = krule->max_src_states;
1610 rule->max_src_conn = krule->max_src_conn;
1611 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit;
1612 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds;
1613 rule->qid = krule->qid;
1614 rule->pqid = krule->pqid;
1615 rule->nr = krule->nr;
1616 rule->prob = krule->prob;
1617 rule->cuid = krule->cuid;
1618 rule->cpid = krule->cpid;
1620 rule->return_icmp = krule->return_icmp;
1621 rule->return_icmp6 = krule->return_icmp6;
1622 rule->max_mss = krule->max_mss;
1623 rule->tag = krule->tag;
1624 rule->match_tag = krule->match_tag;
1625 rule->scrub_flags = krule->scrub_flags;
1627 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid));
1628 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid));
1630 rule->rule_flag = krule->rule_flag;
1631 rule->action = krule->action;
1632 rule->direction = krule->direction;
1633 rule->log = krule->log;
1634 rule->logif = krule->logif;
1635 rule->quick = krule->quick;
1636 rule->ifnot = krule->ifnot;
1637 rule->match_tag_not = krule->match_tag_not;
1638 rule->natpass = krule->natpass;
1640 rule->keep_state = krule->keep_state;
1641 rule->af = krule->af;
1642 rule->proto = krule->proto;
1643 rule->type = krule->type;
1644 rule->code = krule->code;
1645 rule->flags = krule->flags;
1646 rule->flagset = krule->flagset;
1647 rule->min_ttl = krule->min_ttl;
1648 rule->allow_opts = krule->allow_opts;
1649 rule->rt = krule->rt;
1650 rule->return_ttl = krule->return_ttl;
1651 rule->tos = krule->tos;
1652 rule->set_tos = krule->set_tos;
1653 rule->anchor_relative = krule->anchor_relative;
1654 rule->anchor_wildcard = krule->anchor_wildcard;
1656 rule->flush = krule->flush;
1657 rule->prio = krule->prio;
1658 rule->set_prio[0] = krule->set_prio[0];
1659 rule->set_prio[1] = krule->set_prio[1];
1661 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert));
1663 rule->u_states_cur = counter_u64_fetch(krule->states_cur);
1664 rule->u_states_tot = counter_u64_fetch(krule->states_tot);
1665 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes);
1669 pf_check_rule_addr(const struct pf_rule_addr *addr)
1672 switch (addr->addr.type) {
1673 case PF_ADDR_ADDRMASK:
1674 case PF_ADDR_NOROUTE:
1675 case PF_ADDR_DYNIFTL:
1677 case PF_ADDR_URPFFAILED:
1684 if (addr->addr.p.dyn != NULL) {
1692 pf_nvaddr_to_addr(const nvlist_t *nvl, struct pf_addr *paddr)
1694 return (pf_nvbinary(nvl, "addr", paddr, sizeof(*paddr)));
1698 pf_addr_to_nvaddr(const struct pf_addr *paddr)
1702 nvl = nvlist_create(0);
1706 nvlist_add_binary(nvl, "addr", paddr, sizeof(*paddr));
1712 pf_nvmape_to_mape(const nvlist_t *nvl, struct pf_mape_portset *mape)
1716 bzero(mape, sizeof(*mape));
1717 PFNV_CHK(pf_nvuint8(nvl, "offset", &mape->offset));
1718 PFNV_CHK(pf_nvuint8(nvl, "psidlen", &mape->psidlen));
1719 PFNV_CHK(pf_nvuint16(nvl, "psid", &mape->psid));
1726 pf_mape_to_nvmape(const struct pf_mape_portset *mape)
1730 nvl = nvlist_create(0);
1734 nvlist_add_number(nvl, "offset", mape->offset);
1735 nvlist_add_number(nvl, "psidlen", mape->psidlen);
1736 nvlist_add_number(nvl, "psid", mape->psid);
1742 pf_nvpool_to_pool(const nvlist_t *nvl, struct pf_kpool *kpool)
1746 bzero(kpool, sizeof(*kpool));
1748 PFNV_CHK(pf_nvbinary(nvl, "key", &kpool->key, sizeof(kpool->key)));
1750 if (nvlist_exists_nvlist(nvl, "counter")) {
1751 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "counter"),
1755 PFNV_CHK(pf_nvint(nvl, "tblidx", &kpool->tblidx));
1756 PFNV_CHK(pf_nvuint16_array(nvl, "proxy_port", kpool->proxy_port, 2,
1758 PFNV_CHK(pf_nvuint8(nvl, "opts", &kpool->opts));
1760 if (nvlist_exists_nvlist(nvl, "mape")) {
1761 PFNV_CHK(pf_nvmape_to_mape(nvlist_get_nvlist(nvl, "mape"),
1770 pf_pool_to_nvpool(const struct pf_kpool *pool)
1775 nvl = nvlist_create(0);
1779 nvlist_add_binary(nvl, "key", &pool->key, sizeof(pool->key));
1780 tmp = pf_addr_to_nvaddr(&pool->counter);
1783 nvlist_add_nvlist(nvl, "counter", tmp);
1785 nvlist_add_number(nvl, "tblidx", pool->tblidx);
1786 pf_uint16_array_nv(nvl, "proxy_port", pool->proxy_port, 2);
1787 nvlist_add_number(nvl, "opts", pool->opts);
1789 tmp = pf_mape_to_nvmape(&pool->mape);
1792 nvlist_add_nvlist(nvl, "mape", tmp);
1797 nvlist_destroy(nvl);
1802 pf_nvaddr_wrap_to_addr_wrap(const nvlist_t *nvl, struct pf_addr_wrap *addr)
1806 bzero(addr, sizeof(*addr));
1808 PFNV_CHK(pf_nvuint8(nvl, "type", &addr->type));
1809 PFNV_CHK(pf_nvuint8(nvl, "iflags", &addr->iflags));
1810 if (addr->type == PF_ADDR_DYNIFTL)
1811 PFNV_CHK(pf_nvstring(nvl, "ifname", addr->v.ifname,
1812 sizeof(addr->v.ifname)));
1813 if (addr->type == PF_ADDR_TABLE)
1814 PFNV_CHK(pf_nvstring(nvl, "tblname", addr->v.tblname,
1815 sizeof(addr->v.tblname)));
1817 if (! nvlist_exists_nvlist(nvl, "addr"))
1819 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "addr"),
1822 if (! nvlist_exists_nvlist(nvl, "mask"))
1824 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "mask"),
1827 switch (addr->type) {
1828 case PF_ADDR_DYNIFTL:
1831 case PF_ADDR_ADDRMASK:
1832 case PF_ADDR_NOROUTE:
1833 case PF_ADDR_URPFFAILED:
1844 pf_addr_wrap_to_nvaddr_wrap(const struct pf_addr_wrap *addr)
1849 nvl = nvlist_create(0);
1853 nvlist_add_number(nvl, "type", addr->type);
1854 nvlist_add_number(nvl, "iflags", addr->iflags);
1855 if (addr->type == PF_ADDR_DYNIFTL)
1856 nvlist_add_string(nvl, "ifname", addr->v.ifname);
1857 if (addr->type == PF_ADDR_TABLE)
1858 nvlist_add_string(nvl, "tblname", addr->v.tblname);
1860 tmp = pf_addr_to_nvaddr(&addr->v.a.addr);
1863 nvlist_add_nvlist(nvl, "addr", tmp);
1864 tmp = pf_addr_to_nvaddr(&addr->v.a.mask);
1867 nvlist_add_nvlist(nvl, "mask", tmp);
1872 nvlist_destroy(nvl);
1877 pf_validate_op(uint8_t op)
1899 pf_nvrule_addr_to_rule_addr(const nvlist_t *nvl, struct pf_rule_addr *addr)
1903 if (! nvlist_exists_nvlist(nvl, "addr"))
1906 PFNV_CHK(pf_nvaddr_wrap_to_addr_wrap(nvlist_get_nvlist(nvl, "addr"),
1908 PFNV_CHK(pf_nvuint16_array(nvl, "port", addr->port, 2, NULL));
1909 PFNV_CHK(pf_nvuint8(nvl, "neg", &addr->neg));
1910 PFNV_CHK(pf_nvuint8(nvl, "port_op", &addr->port_op));
1912 PFNV_CHK(pf_validate_op(addr->port_op));
1919 pf_rule_addr_to_nvrule_addr(const struct pf_rule_addr *addr)
1924 nvl = nvlist_create(0);
1928 tmp = pf_addr_wrap_to_nvaddr_wrap(&addr->addr);
1931 nvlist_add_nvlist(nvl, "addr", tmp);
1932 pf_uint16_array_nv(nvl, "port", addr->port, 2);
1933 nvlist_add_number(nvl, "neg", addr->neg);
1934 nvlist_add_number(nvl, "port_op", addr->port_op);
1939 nvlist_destroy(nvl);
1944 pf_nvrule_uid_to_rule_uid(const nvlist_t *nvl, struct pf_rule_uid *uid)
1948 bzero(uid, sizeof(*uid));
1950 PFNV_CHK(pf_nvuint32_array(nvl, "uid", uid->uid, 2, NULL));
1951 PFNV_CHK(pf_nvuint8(nvl, "op", &uid->op));
1953 PFNV_CHK(pf_validate_op(uid->op));
1960 pf_rule_uid_to_nvrule_uid(const struct pf_rule_uid *uid)
1964 nvl = nvlist_create(0);
1968 pf_uint32_array_nv(nvl, "uid", uid->uid, 2);
1969 nvlist_add_number(nvl, "op", uid->op);
1975 pf_nvrule_gid_to_rule_gid(const nvlist_t *nvl, struct pf_rule_gid *gid)
1977 /* Cheat a little. These stucts are the same, other than the name of
1978 * the first field. */
1979 return (pf_nvrule_uid_to_rule_uid(nvl, (struct pf_rule_uid *)gid));
1983 pf_nvrule_to_krule(const nvlist_t *nvl, struct pf_krule **prule)
1985 struct pf_krule *rule;
1988 #define ERROUT(x) ERROUT_FUNCTION(errout, x)
1990 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK | M_ZERO);
1992 PFNV_CHK(pf_nvuint32(nvl, "nr", &rule->nr));
1994 if (! nvlist_exists_nvlist(nvl, "src"))
1997 error = pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "src"),
2002 if (! nvlist_exists_nvlist(nvl, "dst"))
2005 PFNV_CHK(pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "dst"),
2008 if (nvlist_exists_string(nvl, "label")) {
2009 PFNV_CHK(pf_nvstring(nvl, "label", rule->label[0],
2010 sizeof(rule->label[0])));
2011 } else if (nvlist_exists_string_array(nvl, "labels")) {
2012 const char *const *strs;
2016 strs = nvlist_get_string_array(nvl, "labels", &items);
2017 if (items > PF_RULE_MAX_LABEL_COUNT)
2020 for (size_t i = 0; i < items; i++) {
2021 ret = strlcpy(rule->label[i], strs[i],
2022 sizeof(rule->label[0]));
2023 if (ret >= sizeof(rule->label[0]))
2028 PFNV_CHK(pf_nvstring(nvl, "ifname", rule->ifname,
2029 sizeof(rule->ifname)));
2030 PFNV_CHK(pf_nvstring(nvl, "qname", rule->qname, sizeof(rule->qname)));
2031 PFNV_CHK(pf_nvstring(nvl, "pqname", rule->pqname,
2032 sizeof(rule->pqname)));
2033 PFNV_CHK(pf_nvstring(nvl, "tagname", rule->tagname,
2034 sizeof(rule->tagname)));
2035 PFNV_CHK(pf_nvstring(nvl, "match_tagname", rule->match_tagname,
2036 sizeof(rule->match_tagname)));
2037 PFNV_CHK(pf_nvstring(nvl, "overload_tblname", rule->overload_tblname,
2038 sizeof(rule->overload_tblname)));
2040 if (! nvlist_exists_nvlist(nvl, "rpool"))
2042 PFNV_CHK(pf_nvpool_to_pool(nvlist_get_nvlist(nvl, "rpool"),
2045 PFNV_CHK(pf_nvuint32(nvl, "os_fingerprint", &rule->os_fingerprint));
2047 PFNV_CHK(pf_nvint(nvl, "rtableid", &rule->rtableid));
2048 PFNV_CHK(pf_nvuint32_array(nvl, "timeout", rule->timeout, PFTM_MAX, NULL));
2049 PFNV_CHK(pf_nvuint32(nvl, "max_states", &rule->max_states));
2050 PFNV_CHK(pf_nvuint32(nvl, "max_src_nodes", &rule->max_src_nodes));
2051 PFNV_CHK(pf_nvuint32(nvl, "max_src_states", &rule->max_src_states));
2052 PFNV_CHK(pf_nvuint32(nvl, "max_src_conn", &rule->max_src_conn));
2053 PFNV_CHK(pf_nvuint32(nvl, "max_src_conn_rate.limit",
2054 &rule->max_src_conn_rate.limit));
2055 PFNV_CHK(pf_nvuint32(nvl, "max_src_conn_rate.seconds",
2056 &rule->max_src_conn_rate.seconds));
2057 PFNV_CHK(pf_nvuint32(nvl, "prob", &rule->prob));
2058 PFNV_CHK(pf_nvuint32(nvl, "cuid", &rule->cuid));
2059 PFNV_CHK(pf_nvuint32(nvl, "cpid", &rule->cpid));
2061 PFNV_CHK(pf_nvuint16(nvl, "return_icmp", &rule->return_icmp));
2062 PFNV_CHK(pf_nvuint16(nvl, "return_icmp6", &rule->return_icmp6));
2064 PFNV_CHK(pf_nvuint16(nvl, "max_mss", &rule->max_mss));
2065 PFNV_CHK(pf_nvuint16(nvl, "scrub_flags", &rule->scrub_flags));
2067 if (! nvlist_exists_nvlist(nvl, "uid"))
2069 PFNV_CHK(pf_nvrule_uid_to_rule_uid(nvlist_get_nvlist(nvl, "uid"),
2072 if (! nvlist_exists_nvlist(nvl, "gid"))
2074 PFNV_CHK(pf_nvrule_gid_to_rule_gid(nvlist_get_nvlist(nvl, "gid"),
2077 PFNV_CHK(pf_nvuint32(nvl, "rule_flag", &rule->rule_flag));
2078 PFNV_CHK(pf_nvuint8(nvl, "action", &rule->action));
2079 PFNV_CHK(pf_nvuint8(nvl, "direction", &rule->direction));
2080 PFNV_CHK(pf_nvuint8(nvl, "log", &rule->log));
2081 PFNV_CHK(pf_nvuint8(nvl, "logif", &rule->logif));
2082 PFNV_CHK(pf_nvuint8(nvl, "quick", &rule->quick));
2083 PFNV_CHK(pf_nvuint8(nvl, "ifnot", &rule->ifnot));
2084 PFNV_CHK(pf_nvuint8(nvl, "match_tag_not", &rule->match_tag_not));
2085 PFNV_CHK(pf_nvuint8(nvl, "natpass", &rule->natpass));
2087 PFNV_CHK(pf_nvuint8(nvl, "keep_state", &rule->keep_state));
2088 PFNV_CHK(pf_nvuint8(nvl, "af", &rule->af));
2089 PFNV_CHK(pf_nvuint8(nvl, "proto", &rule->proto));
2090 PFNV_CHK(pf_nvuint8(nvl, "type", &rule->type));
2091 PFNV_CHK(pf_nvuint8(nvl, "code", &rule->code));
2092 PFNV_CHK(pf_nvuint8(nvl, "flags", &rule->flags));
2093 PFNV_CHK(pf_nvuint8(nvl, "flagset", &rule->flagset));
2094 PFNV_CHK(pf_nvuint8(nvl, "min_ttl", &rule->min_ttl));
2095 PFNV_CHK(pf_nvuint8(nvl, "allow_opts", &rule->allow_opts));
2096 PFNV_CHK(pf_nvuint8(nvl, "rt", &rule->rt));
2097 PFNV_CHK(pf_nvuint8(nvl, "return_ttl", &rule->return_ttl));
2098 PFNV_CHK(pf_nvuint8(nvl, "tos", &rule->tos));
2099 PFNV_CHK(pf_nvuint8(nvl, "set_tos", &rule->set_tos));
2100 PFNV_CHK(pf_nvuint8(nvl, "anchor_relative", &rule->anchor_relative));
2101 PFNV_CHK(pf_nvuint8(nvl, "anchor_wildcard", &rule->anchor_wildcard));
2103 PFNV_CHK(pf_nvuint8(nvl, "flush", &rule->flush));
2104 PFNV_CHK(pf_nvuint8(nvl, "prio", &rule->prio));
2106 PFNV_CHK(pf_nvuint8_array(nvl, "set_prio", &rule->prio, 2, NULL));
2108 if (nvlist_exists_nvlist(nvl, "divert")) {
2109 const nvlist_t *nvldivert = nvlist_get_nvlist(nvl, "divert");
2111 if (! nvlist_exists_nvlist(nvldivert, "addr"))
2113 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvldivert, "addr"),
2114 &rule->divert.addr));
2115 PFNV_CHK(pf_nvuint16(nvldivert, "port", &rule->divert.port));
2120 if (rule->af == AF_INET)
2121 ERROUT(EAFNOSUPPORT);
2124 if (rule->af == AF_INET6)
2125 ERROUT(EAFNOSUPPORT);
2128 PFNV_CHK(pf_check_rule_addr(&rule->src));
2129 PFNV_CHK(pf_check_rule_addr(&rule->dst));
2137 pf_krule_free(rule);
2144 pf_divert_to_nvdivert(const struct pf_krule *rule)
2149 nvl = nvlist_create(0);
2153 tmp = pf_addr_to_nvaddr(&rule->divert.addr);
2156 nvlist_add_nvlist(nvl, "addr", tmp);
2157 nvlist_add_number(nvl, "port", rule->divert.port);
2162 nvlist_destroy(nvl);
2167 pf_krule_to_nvrule(const struct pf_krule *rule)
2169 nvlist_t *nvl, *tmp;
2171 nvl = nvlist_create(0);
2175 nvlist_add_number(nvl, "nr", rule->nr);
2176 tmp = pf_rule_addr_to_nvrule_addr(&rule->src);
2179 nvlist_add_nvlist(nvl, "src", tmp);
2180 tmp = pf_rule_addr_to_nvrule_addr(&rule->dst);
2183 nvlist_add_nvlist(nvl, "dst", tmp);
2185 for (int i = 0; i < PF_SKIP_COUNT; i++) {
2186 nvlist_append_number_array(nvl, "skip",
2187 rule->skip[i].ptr ? rule->skip[i].ptr->nr : -1);
2190 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) {
2191 nvlist_append_string_array(nvl, "labels", rule->label[i]);
2193 nvlist_add_string(nvl, "label", rule->label[0]);
2194 nvlist_add_string(nvl, "ifname", rule->ifname);
2195 nvlist_add_string(nvl, "qname", rule->qname);
2196 nvlist_add_string(nvl, "pqname", rule->pqname);
2197 nvlist_add_string(nvl, "tagname", rule->tagname);
2198 nvlist_add_string(nvl, "match_tagname", rule->match_tagname);
2199 nvlist_add_string(nvl, "overload_tblname", rule->overload_tblname);
2201 tmp = pf_pool_to_nvpool(&rule->rpool);
2204 nvlist_add_nvlist(nvl, "rpool", tmp);
2206 nvlist_add_number(nvl, "evaluations",
2207 counter_u64_fetch(rule->evaluations));
2208 for (int i = 0; i < 2; i++) {
2209 nvlist_append_number_array(nvl, "packets",
2210 counter_u64_fetch(rule->packets[i]));
2211 nvlist_append_number_array(nvl, "bytes",
2212 counter_u64_fetch(rule->bytes[i]));
2215 nvlist_add_number(nvl, "os_fingerprint", rule->os_fingerprint);
2217 nvlist_add_number(nvl, "rtableid", rule->rtableid);
2218 pf_uint32_array_nv(nvl, "timeout", rule->timeout, PFTM_MAX);
2219 nvlist_add_number(nvl, "max_states", rule->max_states);
2220 nvlist_add_number(nvl, "max_src_nodes", rule->max_src_nodes);
2221 nvlist_add_number(nvl, "max_src_states", rule->max_src_states);
2222 nvlist_add_number(nvl, "max_src_conn", rule->max_src_conn);
2223 nvlist_add_number(nvl, "max_src_conn_rate.limit",
2224 rule->max_src_conn_rate.limit);
2225 nvlist_add_number(nvl, "max_src_conn_rate.seconds",
2226 rule->max_src_conn_rate.seconds);
2227 nvlist_add_number(nvl, "qid", rule->qid);
2228 nvlist_add_number(nvl, "pqid", rule->pqid);
2229 nvlist_add_number(nvl, "prob", rule->prob);
2230 nvlist_add_number(nvl, "cuid", rule->cuid);
2231 nvlist_add_number(nvl, "cpid", rule->cpid);
2233 nvlist_add_number(nvl, "states_cur",
2234 counter_u64_fetch(rule->states_cur));
2235 nvlist_add_number(nvl, "states_tot",
2236 counter_u64_fetch(rule->states_tot));
2237 nvlist_add_number(nvl, "src_nodes",
2238 counter_u64_fetch(rule->src_nodes));
2240 nvlist_add_number(nvl, "return_icmp", rule->return_icmp);
2241 nvlist_add_number(nvl, "return_icmp6", rule->return_icmp6);
2243 nvlist_add_number(nvl, "max_mss", rule->max_mss);
2244 nvlist_add_number(nvl, "scrub_flags", rule->scrub_flags);
2246 tmp = pf_rule_uid_to_nvrule_uid(&rule->uid);
2249 nvlist_add_nvlist(nvl, "uid", tmp);
2250 tmp = pf_rule_uid_to_nvrule_uid((const struct pf_rule_uid *)&rule->gid);
2253 nvlist_add_nvlist(nvl, "gid", tmp);
2255 nvlist_add_number(nvl, "rule_flag", rule->rule_flag);
2256 nvlist_add_number(nvl, "action", rule->action);
2257 nvlist_add_number(nvl, "direction", rule->direction);
2258 nvlist_add_number(nvl, "log", rule->log);
2259 nvlist_add_number(nvl, "logif", rule->logif);
2260 nvlist_add_number(nvl, "quick", rule->quick);
2261 nvlist_add_number(nvl, "ifnot", rule->ifnot);
2262 nvlist_add_number(nvl, "match_tag_not", rule->match_tag_not);
2263 nvlist_add_number(nvl, "natpass", rule->natpass);
2265 nvlist_add_number(nvl, "keep_state", rule->keep_state);
2266 nvlist_add_number(nvl, "af", rule->af);
2267 nvlist_add_number(nvl, "proto", rule->proto);
2268 nvlist_add_number(nvl, "type", rule->type);
2269 nvlist_add_number(nvl, "code", rule->code);
2270 nvlist_add_number(nvl, "flags", rule->flags);
2271 nvlist_add_number(nvl, "flagset", rule->flagset);
2272 nvlist_add_number(nvl, "min_ttl", rule->min_ttl);
2273 nvlist_add_number(nvl, "allow_opts", rule->allow_opts);
2274 nvlist_add_number(nvl, "rt", rule->rt);
2275 nvlist_add_number(nvl, "return_ttl", rule->return_ttl);
2276 nvlist_add_number(nvl, "tos", rule->tos);
2277 nvlist_add_number(nvl, "set_tos", rule->set_tos);
2278 nvlist_add_number(nvl, "anchor_relative", rule->anchor_relative);
2279 nvlist_add_number(nvl, "anchor_wildcard", rule->anchor_wildcard);
2281 nvlist_add_number(nvl, "flush", rule->flush);
2282 nvlist_add_number(nvl, "prio", rule->prio);
2284 pf_uint8_array_nv(nvl, "set_prio", &rule->prio, 2);
2286 tmp = pf_divert_to_nvdivert(rule);
2289 nvlist_add_nvlist(nvl, "divert", tmp);
2294 nvlist_destroy(nvl);
2299 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
2304 if (rule->af == AF_INET) {
2305 return (EAFNOSUPPORT);
2309 if (rule->af == AF_INET6) {
2310 return (EAFNOSUPPORT);
2314 ret = pf_check_rule_addr(&rule->src);
2317 ret = pf_check_rule_addr(&rule->dst);
2321 bzero(krule, sizeof(*krule));
2323 bcopy(&rule->src, &krule->src, sizeof(rule->src));
2324 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
2326 strlcpy(krule->label[0], rule->label, sizeof(rule->label));
2327 strlcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
2328 strlcpy(krule->qname, rule->qname, sizeof(rule->qname));
2329 strlcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
2330 strlcpy(krule->tagname, rule->tagname, sizeof(rule->tagname));
2331 strlcpy(krule->match_tagname, rule->match_tagname,
2332 sizeof(rule->match_tagname));
2333 strlcpy(krule->overload_tblname, rule->overload_tblname,
2334 sizeof(rule->overload_tblname));
2336 ret = pf_pool_to_kpool(&rule->rpool, &krule->rpool);
2340 /* Don't allow userspace to set evaulations, packets or bytes. */
2341 /* kif, anchor, overload_tbl are not copied over. */
2343 krule->os_fingerprint = rule->os_fingerprint;
2345 krule->rtableid = rule->rtableid;
2346 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout));
2347 krule->max_states = rule->max_states;
2348 krule->max_src_nodes = rule->max_src_nodes;
2349 krule->max_src_states = rule->max_src_states;
2350 krule->max_src_conn = rule->max_src_conn;
2351 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
2352 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
2353 krule->qid = rule->qid;
2354 krule->pqid = rule->pqid;
2355 krule->nr = rule->nr;
2356 krule->prob = rule->prob;
2357 krule->cuid = rule->cuid;
2358 krule->cpid = rule->cpid;
2360 krule->return_icmp = rule->return_icmp;
2361 krule->return_icmp6 = rule->return_icmp6;
2362 krule->max_mss = rule->max_mss;
2363 krule->tag = rule->tag;
2364 krule->match_tag = rule->match_tag;
2365 krule->scrub_flags = rule->scrub_flags;
2367 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
2368 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
2370 krule->rule_flag = rule->rule_flag;
2371 krule->action = rule->action;
2372 krule->direction = rule->direction;
2373 krule->log = rule->log;
2374 krule->logif = rule->logif;
2375 krule->quick = rule->quick;
2376 krule->ifnot = rule->ifnot;
2377 krule->match_tag_not = rule->match_tag_not;
2378 krule->natpass = rule->natpass;
2380 krule->keep_state = rule->keep_state;
2381 krule->af = rule->af;
2382 krule->proto = rule->proto;
2383 krule->type = rule->type;
2384 krule->code = rule->code;
2385 krule->flags = rule->flags;
2386 krule->flagset = rule->flagset;
2387 krule->min_ttl = rule->min_ttl;
2388 krule->allow_opts = rule->allow_opts;
2389 krule->rt = rule->rt;
2390 krule->return_ttl = rule->return_ttl;
2391 krule->tos = rule->tos;
2392 krule->set_tos = rule->set_tos;
2393 krule->anchor_relative = rule->anchor_relative;
2394 krule->anchor_wildcard = rule->anchor_wildcard;
2396 krule->flush = rule->flush;
2397 krule->prio = rule->prio;
2398 krule->set_prio[0] = rule->set_prio[0];
2399 krule->set_prio[1] = rule->set_prio[1];
2401 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2407 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk,
2408 struct pf_kstate_kill *kill)
2410 bzero(kill, sizeof(*kill));
2412 bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp));
2413 kill->psk_af = psk->psk_af;
2414 kill->psk_proto = psk->psk_proto;
2415 bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src));
2416 bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst));
2417 strlcpy(kill->psk_ifname, psk->psk_ifname, sizeof(kill->psk_ifname));
2418 strlcpy(kill->psk_label, psk->psk_label, sizeof(kill->psk_label));
2424 pf_nvstate_cmp_to_state_cmp(const nvlist_t *nvl, struct pf_state_cmp *cmp)
2428 bzero(cmp, sizeof(*cmp));
2430 PFNV_CHK(pf_nvuint64(nvl, "id", &cmp->id));
2431 PFNV_CHK(pf_nvuint32(nvl, "creatorid", &cmp->creatorid));
2432 PFNV_CHK(pf_nvuint8(nvl, "direction", &cmp->direction));
2439 pf_nvstate_kill_to_kstate_kill(const nvlist_t *nvl,
2440 struct pf_kstate_kill *kill)
2444 bzero(kill, sizeof(*kill));
2446 if (! nvlist_exists_nvlist(nvl, "cmp"))
2449 PFNV_CHK(pf_nvstate_cmp_to_state_cmp(nvlist_get_nvlist(nvl, "cmp"),
2451 PFNV_CHK(pf_nvuint8(nvl, "af", &kill->psk_af));
2452 PFNV_CHK(pf_nvint(nvl, "proto", &kill->psk_proto));
2454 if (! nvlist_exists_nvlist(nvl, "src"))
2456 PFNV_CHK(pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "src"),
2458 if (! nvlist_exists_nvlist(nvl, "dst"))
2460 PFNV_CHK(pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "dst"),
2462 if (nvlist_exists_nvlist(nvl, "rt_addr")) {
2463 PFNV_CHK(pf_nvrule_addr_to_rule_addr(
2464 nvlist_get_nvlist(nvl, "rt_addr"), &kill->psk_rt_addr));
2467 PFNV_CHK(pf_nvstring(nvl, "ifname", kill->psk_ifname,
2468 sizeof(kill->psk_ifname)));
2469 PFNV_CHK(pf_nvstring(nvl, "label", kill->psk_label,
2470 sizeof(kill->psk_label)));
2471 if (nvlist_exists_bool(nvl, "kill_match"))
2472 kill->psk_kill_match = nvlist_get_bool(nvl, "kill_match");
2479 pf_state_key_to_nvstate_key(const struct pf_state_key *key)
2481 nvlist_t *nvl, *tmp;
2483 nvl = nvlist_create(0);
2487 for (int i = 0; i < 2; i++) {
2488 tmp = pf_addr_to_nvaddr(&key->addr[i]);
2491 nvlist_append_nvlist_array(nvl, "addr", tmp);
2492 nvlist_append_number_array(nvl, "port", key->port[i]);
2494 nvlist_add_number(nvl, "af", key->af);
2495 nvlist_add_number(nvl, "proto", key->proto);
2500 nvlist_destroy(nvl);
2505 pf_state_scrub_to_nvstate_scrub(const struct pf_state_scrub *scrub)
2509 nvl = nvlist_create(0);
2513 nvlist_add_bool(nvl, "timestamp", scrub->pfss_flags & PFSS_TIMESTAMP);
2514 nvlist_add_number(nvl, "ttl", scrub->pfss_ttl);
2515 nvlist_add_number(nvl, "ts_mod", scrub->pfss_ts_mod);
2521 pf_state_peer_to_nvstate_peer(const struct pf_state_peer *peer)
2523 nvlist_t *nvl, *tmp;
2525 nvl = nvlist_create(0);
2530 tmp = pf_state_scrub_to_nvstate_scrub(peer->scrub);
2533 nvlist_add_nvlist(nvl, "scrub", tmp);
2536 nvlist_add_number(nvl, "seqlo", peer->seqlo);
2537 nvlist_add_number(nvl, "seqhi", peer->seqhi);
2538 nvlist_add_number(nvl, "seqdiff", peer->seqdiff);
2539 nvlist_add_number(nvl, "max_win", peer->max_win);
2540 nvlist_add_number(nvl, "mss", peer->mss);
2541 nvlist_add_number(nvl, "state", peer->state);
2542 nvlist_add_number(nvl, "wscale", peer->wscale);
2547 nvlist_destroy(nvl);
2553 pf_state_to_nvstate(const struct pf_state *s)
2555 nvlist_t *nvl, *tmp;
2556 uint32_t expire, flags = 0;
2558 nvl = nvlist_create(0);
2562 nvlist_add_number(nvl, "id", s->id);
2563 nvlist_add_string(nvl, "ifname", s->kif->pfik_name);
2565 tmp = pf_state_key_to_nvstate_key(s->key[PF_SK_STACK]);
2568 nvlist_add_nvlist(nvl, "stack_key", tmp);
2570 tmp = pf_state_key_to_nvstate_key(s->key[PF_SK_WIRE]);
2573 nvlist_add_nvlist(nvl, "wire_key", tmp);
2575 tmp = pf_state_peer_to_nvstate_peer(&s->src);
2578 nvlist_add_nvlist(nvl, "src", tmp);
2580 tmp = pf_state_peer_to_nvstate_peer(&s->dst);
2583 nvlist_add_nvlist(nvl, "dst", tmp);
2585 tmp = pf_addr_to_nvaddr(&s->rt_addr);
2588 nvlist_add_nvlist(nvl, "rt_addr", tmp);
2590 nvlist_add_number(nvl, "rule", s->rule.ptr ? s->rule.ptr->nr : -1);
2591 nvlist_add_number(nvl, "anchor",
2592 s->anchor.ptr ? s->anchor.ptr->nr : -1);
2593 nvlist_add_number(nvl, "nat_rule",
2594 s->nat_rule.ptr ? s->nat_rule.ptr->nr : -1);
2595 nvlist_add_number(nvl, "creation", s->creation);
2597 expire = pf_state_expires(s);
2598 if (expire <= time_uptime)
2601 expire = expire - time_uptime;
2602 nvlist_add_number(nvl, "expire", expire);
2604 for (int i = 0; i < 2; i++) {
2605 nvlist_append_number_array(nvl, "packets",
2606 counter_u64_fetch(s->packets[i]));
2607 nvlist_append_number_array(nvl, "bytes",
2608 counter_u64_fetch(s->bytes[i]));
2611 nvlist_add_number(nvl, "creatorid", s->creatorid);
2612 nvlist_add_number(nvl, "direction", s->direction);
2613 nvlist_add_number(nvl, "log", s->log);
2614 nvlist_add_number(nvl, "state_flags", s->state_flags);
2615 nvlist_add_number(nvl, "timeout", s->timeout);
2617 flags |= PFSYNC_FLAG_SRCNODE;
2618 if (s->nat_src_node)
2619 flags |= PFSYNC_FLAG_NATSRCNODE;
2620 nvlist_add_number(nvl, "sync_flags", flags);
2625 nvlist_destroy(nvl);
2630 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2631 uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2634 struct pf_kruleset *ruleset;
2635 struct pf_krule *tail;
2636 struct pf_kpooladdr *pa;
2637 struct pfi_kkif *kif = NULL;
2641 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
2643 goto errout_unlocked;
2646 #define ERROUT(x) ERROUT_FUNCTION(errout, x)
2648 if (rule->ifname[0])
2649 kif = pf_kkif_create(M_WAITOK);
2650 rule->evaluations = counter_u64_alloc(M_WAITOK);
2651 for (int i = 0; i < 2; i++) {
2652 rule->packets[i] = counter_u64_alloc(M_WAITOK);
2653 rule->bytes[i] = counter_u64_alloc(M_WAITOK);
2655 rule->states_cur = counter_u64_alloc(M_WAITOK);
2656 rule->states_tot = counter_u64_alloc(M_WAITOK);
2657 rule->src_nodes = counter_u64_alloc(M_WAITOK);
2658 rule->cuid = td->td_ucred->cr_ruid;
2659 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
2660 TAILQ_INIT(&rule->rpool.list);
2663 ruleset = pf_find_kruleset(anchor);
2664 if (ruleset == NULL)
2666 rs_num = pf_get_ruleset_number(rule->action);
2667 if (rs_num >= PF_RULESET_MAX)
2669 if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2670 DPFPRINTF(PF_DEBUG_MISC,
2671 ("ticket: %d != [%d]%d\n", ticket, rs_num,
2672 ruleset->rules[rs_num].inactive.ticket));
2675 if (pool_ticket != V_ticket_pabuf) {
2676 DPFPRINTF(PF_DEBUG_MISC,
2677 ("pool_ticket: %d != %d\n", pool_ticket,
2682 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2685 rule->nr = tail->nr + 1;
2688 if (rule->ifname[0]) {
2689 rule->kif = pfi_kkif_attach(kif, rule->ifname);
2691 pfi_kkif_ref(rule->kif);
2695 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2700 if (rule->qname[0] != 0) {
2701 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2703 else if (rule->pqname[0] != 0) {
2705 pf_qname2qid(rule->pqname)) == 0)
2708 rule->pqid = rule->qid;
2711 if (rule->tagname[0])
2712 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2714 if (rule->match_tagname[0])
2715 if ((rule->match_tag =
2716 pf_tagname2tag(rule->match_tagname)) == 0)
2718 if (rule->rt && !rule->direction)
2722 if (rule->logif >= PFLOGIFS_MAX)
2724 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2726 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2728 if (pf_kanchor_setup(rule, ruleset, anchor_call))
2730 if (rule->scrub_flags & PFSTATE_SETPRIO &&
2731 (rule->set_prio[0] > PF_PRIO_MAX ||
2732 rule->set_prio[1] > PF_PRIO_MAX))
2734 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2735 if (pa->addr.type == PF_ADDR_TABLE) {
2736 pa->addr.p.tbl = pfr_attach_table(ruleset,
2737 pa->addr.v.tblname);
2738 if (pa->addr.p.tbl == NULL)
2742 rule->overload_tbl = NULL;
2743 if (rule->overload_tblname[0]) {
2744 if ((rule->overload_tbl = pfr_attach_table(ruleset,
2745 rule->overload_tblname)) == NULL)
2748 rule->overload_tbl->pfrkt_flags |=
2752 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
2753 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2754 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
2755 (rule->rt > PF_NOPFROUTE)) &&
2756 (TAILQ_FIRST(&rule->rpool.list) == NULL))
2765 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2766 counter_u64_zero(rule->evaluations);
2767 for (int i = 0; i < 2; i++) {
2768 counter_u64_zero(rule->packets[i]);
2769 counter_u64_zero(rule->bytes[i]);
2771 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2773 ruleset->rules[rs_num].inactive.rcount++;
2783 pf_krule_free(rule);
2788 pf_label_match(const struct pf_krule *rule, const char *label)
2792 while (*rule->label[i]) {
2793 if (strcmp(rule->label[i], label) == 0)
2802 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2804 struct pf_state *match;
2806 unsigned int killed = 0;
2808 /* Call with unlocked hashrow */
2810 match = pf_find_state_all(key, dir, &more);
2811 if (match && !more) {
2812 pf_unlink_state(match, 0);
2820 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2823 struct pf_state_key *sk;
2824 struct pf_addr *srcaddr, *dstaddr;
2825 struct pf_state_key_cmp match_key;
2826 int idx, killed = 0;
2828 u_int16_t srcport, dstport;
2830 relock_DIOCKILLSTATES:
2831 PF_HASHROW_LOCK(ih);
2832 LIST_FOREACH(s, &ih->states, entry) {
2833 sk = s->key[PF_SK_WIRE];
2834 if (s->direction == PF_OUT) {
2835 srcaddr = &sk->addr[1];
2836 dstaddr = &sk->addr[0];
2837 srcport = sk->port[1];
2838 dstport = sk->port[0];
2840 srcaddr = &sk->addr[0];
2841 dstaddr = &sk->addr[1];
2842 srcport = sk->port[0];
2843 dstport = sk->port[1];
2846 if (psk->psk_af && sk->af != psk->psk_af)
2849 if (psk->psk_proto && psk->psk_proto != sk->proto)
2852 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
2853 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2856 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
2857 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2860 if (! PF_MATCHA(psk->psk_rt_addr.neg,
2861 &psk->psk_rt_addr.addr.v.a.addr,
2862 &psk->psk_rt_addr.addr.v.a.mask,
2863 &s->rt_addr, sk->af))
2866 if (psk->psk_src.port_op != 0 &&
2867 ! pf_match_port(psk->psk_src.port_op,
2868 psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2871 if (psk->psk_dst.port_op != 0 &&
2872 ! pf_match_port(psk->psk_dst.port_op,
2873 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2876 if (psk->psk_label[0] &&
2877 ! pf_label_match(s->rule.ptr, psk->psk_label))
2880 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2884 if (psk->psk_kill_match) {
2885 /* Create the key to find matching states, with lock
2888 bzero(&match_key, sizeof(match_key));
2890 if (s->direction == PF_OUT) {
2898 match_key.af = s->key[idx]->af;
2899 match_key.proto = s->key[idx]->proto;
2900 PF_ACPY(&match_key.addr[0],
2901 &s->key[idx]->addr[1], match_key.af);
2902 match_key.port[0] = s->key[idx]->port[1];
2903 PF_ACPY(&match_key.addr[1],
2904 &s->key[idx]->addr[0], match_key.af);
2905 match_key.port[1] = s->key[idx]->port[0];
2908 pf_unlink_state(s, PF_ENTER_LOCKED);
2911 if (psk->psk_kill_match)
2912 killed += pf_kill_matching_state(&match_key, dir);
2914 goto relock_DIOCKILLSTATES;
2916 PF_HASHROW_UNLOCK(ih);
2922 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2925 PF_RULES_RLOCK_TRACKER;
2927 #define ERROUT_IOCTL(target, x) \
2930 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \
2935 /* XXX keep in sync with switch() below */
2936 if (securelevel_gt(td->td_ucred, 2))
2944 case DIOCGETSTATENV:
2945 case DIOCSETSTATUSIF:
2951 case DIOCGETTIMEOUT:
2952 case DIOCCLRRULECTRS:
2954 case DIOCGETALTQSV0:
2955 case DIOCGETALTQSV1:
2958 case DIOCGETQSTATSV0:
2959 case DIOCGETQSTATSV1:
2960 case DIOCGETRULESETS:
2961 case DIOCGETRULESET:
2962 case DIOCRGETTABLES:
2963 case DIOCRGETTSTATS:
2964 case DIOCRCLRTSTATS:
2970 case DIOCRGETASTATS:
2971 case DIOCRCLRASTATS:
2974 case DIOCGETSRCNODES:
2975 case DIOCCLRSRCNODES:
2976 case DIOCIGETIFACES:
2977 case DIOCGIFSPEEDV0:
2978 case DIOCGIFSPEEDV1:
2982 case DIOCRCLRTABLES:
2983 case DIOCRADDTABLES:
2984 case DIOCRDELTABLES:
2985 case DIOCRSETTFLAGS:
2986 if (((struct pfioc_table *)addr)->pfrio_flags &
2988 break; /* dummy operation ok */
2994 if (!(flags & FWRITE))
3000 case DIOCGETSTATENV:
3003 case DIOCGETTIMEOUT:
3005 case DIOCGETALTQSV0:
3006 case DIOCGETALTQSV1:
3009 case DIOCGETQSTATSV0:
3010 case DIOCGETQSTATSV1:
3011 case DIOCGETRULESETS:
3012 case DIOCGETRULESET:
3014 case DIOCRGETTABLES:
3015 case DIOCRGETTSTATS:
3017 case DIOCRGETASTATS:
3020 case DIOCGETSRCNODES:
3021 case DIOCIGETIFACES:
3022 case DIOCGIFSPEEDV1:
3023 case DIOCGIFSPEEDV0:
3026 case DIOCRCLRTABLES:
3027 case DIOCRADDTABLES:
3028 case DIOCRDELTABLES:
3029 case DIOCRCLRTSTATS:
3034 case DIOCRSETTFLAGS:
3035 if (((struct pfioc_table *)addr)->pfrio_flags &
3037 flags |= FWRITE; /* need write lock for dummy */
3038 break; /* dummy operation ok */
3042 if (((struct pfioc_rule *)addr)->action ==
3050 CURVNET_SET(TD_TO_VNET(td));
3054 sx_xlock(&pf_ioctl_lock);
3055 if (V_pf_status.running)
3061 V_pf_status.running = 1;
3062 V_pf_status.since = time_second;
3065 V_pf_stateid[cpu] = time_second;
3067 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
3072 sx_xlock(&pf_ioctl_lock);
3073 if (!V_pf_status.running)
3076 V_pf_status.running = 0;
3078 V_pf_status.since = time_second;
3079 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
3083 case DIOCADDRULENV: {
3084 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
3085 nvlist_t *nvl = NULL;
3086 void *nvlpacked = NULL;
3087 struct pf_krule *rule = NULL;
3088 const char *anchor = "", *anchor_call = "";
3089 uint32_t ticket = 0, pool_ticket = 0;
3091 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x)
3093 if (nv->len > pf_ioctl_maxcount)
3096 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
3097 error = copyin(nv->data, nvlpacked, nv->len);
3101 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3105 if (! nvlist_exists_number(nvl, "ticket"))
3107 ticket = nvlist_get_number(nvl, "ticket");
3109 if (! nvlist_exists_number(nvl, "pool_ticket"))
3111 pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3113 if (! nvlist_exists_nvlist(nvl, "rule"))
3116 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3121 if (nvlist_exists_string(nvl, "anchor"))
3122 anchor = nvlist_get_string(nvl, "anchor");
3123 if (nvlist_exists_string(nvl, "anchor_call"))
3124 anchor_call = nvlist_get_string(nvl, "anchor_call");
3126 if ((error = nvlist_error(nvl)))
3129 /* Frees rule on error */
3130 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3133 nvlist_destroy(nvl);
3134 free(nvlpacked, M_TEMP);
3137 DIOCADDRULENV_error:
3138 pf_krule_free(rule);
3139 nvlist_destroy(nvl);
3140 free(nvlpacked, M_TEMP);
3145 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
3146 struct pf_krule *rule;
3148 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
3149 error = pf_rule_to_krule(&pr->rule, rule);
3151 free(rule, M_PFRULE);
3155 pr->anchor[sizeof(pr->anchor) - 1] = 0;
3157 /* Frees rule on error */
3158 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3159 pr->anchor, pr->anchor_call, td);
3163 case DIOCGETRULES: {
3164 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
3165 struct pf_kruleset *ruleset;
3166 struct pf_krule *tail;
3170 pr->anchor[sizeof(pr->anchor) - 1] = 0;
3171 ruleset = pf_find_kruleset(pr->anchor);
3172 if (ruleset == NULL) {
3177 rs_num = pf_get_ruleset_number(pr->rule.action);
3178 if (rs_num >= PF_RULESET_MAX) {
3183 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3186 pr->nr = tail->nr + 1;
3189 pr->ticket = ruleset->rules[rs_num].active.ticket;
3195 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
3196 struct pf_kruleset *ruleset;
3197 struct pf_krule *rule;
3201 pr->anchor[sizeof(pr->anchor) - 1] = 0;
3202 ruleset = pf_find_kruleset(pr->anchor);
3203 if (ruleset == NULL) {
3208 rs_num = pf_get_ruleset_number(pr->rule.action);
3209 if (rs_num >= PF_RULESET_MAX) {
3214 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
3219 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3220 while ((rule != NULL) && (rule->nr != pr->nr))
3221 rule = TAILQ_NEXT(rule, entries);
3228 pf_krule_to_rule(rule, &pr->rule);
3230 if (pf_kanchor_copyout(ruleset, rule, pr)) {
3235 pf_addr_copyout(&pr->rule.src.addr);
3236 pf_addr_copyout(&pr->rule.dst.addr);
3238 if (pr->action == PF_GET_CLR_CNTR) {
3239 counter_u64_zero(rule->evaluations);
3240 for (int i = 0; i < 2; i++) {
3241 counter_u64_zero(rule->packets[i]);
3242 counter_u64_zero(rule->bytes[i]);
3244 counter_u64_zero(rule->states_tot);
3250 case DIOCGETRULENV: {
3251 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
3252 nvlist_t *nvrule = NULL;
3253 nvlist_t *nvl = NULL;
3254 struct pf_kruleset *ruleset;
3255 struct pf_krule *rule;
3256 void *nvlpacked = NULL;
3258 bool clear_counter = false;
3260 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x)
3262 if (nv->len > pf_ioctl_maxcount)
3265 /* Copy the request in */
3266 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
3267 if (nvlpacked == NULL)
3270 error = copyin(nv->data, nvlpacked, nv->len);
3274 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3278 if (! nvlist_exists_string(nvl, "anchor"))
3280 if (! nvlist_exists_number(nvl, "ruleset"))
3282 if (! nvlist_exists_number(nvl, "ticket"))
3284 if (! nvlist_exists_number(nvl, "nr"))
3287 if (nvlist_exists_bool(nvl, "clear_counter"))
3288 clear_counter = nvlist_get_bool(nvl, "clear_counter");
3290 if (clear_counter && !(flags & FWRITE))
3293 nr = nvlist_get_number(nvl, "nr");
3296 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3297 if (ruleset == NULL) {
3302 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3303 if (rs_num >= PF_RULESET_MAX) {
3308 if (nvlist_get_number(nvl, "ticket") !=
3309 ruleset->rules[rs_num].active.ticket) {
3315 if ((error = nvlist_error(nvl))) {
3320 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3321 while ((rule != NULL) && (rule->nr != nr))
3322 rule = TAILQ_NEXT(rule, entries);
3329 nvrule = pf_krule_to_nvrule(rule);
3331 nvlist_destroy(nvl);
3332 nvl = nvlist_create(0);
3337 nvlist_add_number(nvl, "nr", nr);
3338 nvlist_add_nvlist(nvl, "rule", nvrule);
3340 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3345 free(nvlpacked, M_TEMP);
3346 nvlpacked = nvlist_pack(nvl, &nv->len);
3347 if (nvlpacked == NULL) {
3352 if (nv->size == 0) {
3356 else if (nv->size < nv->len) {
3361 error = copyout(nvlpacked, nv->data, nv->len);
3363 if (clear_counter) {
3364 counter_u64_zero(rule->evaluations);
3365 for (int i = 0; i < 2; i++) {
3366 counter_u64_zero(rule->packets[i]);
3367 counter_u64_zero(rule->bytes[i]);
3369 counter_u64_zero(rule->states_tot);
3374 DIOCGETRULENV_error:
3375 free(nvlpacked, M_TEMP);
3376 nvlist_destroy(nvrule);
3377 nvlist_destroy(nvl);
3382 case DIOCCHANGERULE: {
3383 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
3384 struct pf_kruleset *ruleset;
3385 struct pf_krule *oldrule = NULL, *newrule = NULL;
3386 struct pfi_kkif *kif = NULL;
3387 struct pf_kpooladdr *pa;
3391 if (pcr->action < PF_CHANGE_ADD_HEAD ||
3392 pcr->action > PF_CHANGE_GET_TICKET) {
3396 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3401 if (pcr->action != PF_CHANGE_REMOVE) {
3402 newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK);
3403 error = pf_rule_to_krule(&pcr->rule, newrule);
3405 free(newrule, M_PFRULE);
3409 if (newrule->ifname[0])
3410 kif = pf_kkif_create(M_WAITOK);
3411 newrule->evaluations = counter_u64_alloc(M_WAITOK);
3412 for (int i = 0; i < 2; i++) {
3413 newrule->packets[i] =
3414 counter_u64_alloc(M_WAITOK);
3416 counter_u64_alloc(M_WAITOK);
3418 newrule->states_cur = counter_u64_alloc(M_WAITOK);
3419 newrule->states_tot = counter_u64_alloc(M_WAITOK);
3420 newrule->src_nodes = counter_u64_alloc(M_WAITOK);
3421 newrule->cuid = td->td_ucred->cr_ruid;
3422 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3423 TAILQ_INIT(&newrule->rpool.list);
3425 #define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; }
3428 if (!(pcr->action == PF_CHANGE_REMOVE ||
3429 pcr->action == PF_CHANGE_GET_TICKET) &&
3430 pcr->pool_ticket != V_ticket_pabuf)
3433 ruleset = pf_find_kruleset(pcr->anchor);
3434 if (ruleset == NULL)
3437 rs_num = pf_get_ruleset_number(pcr->rule.action);
3438 if (rs_num >= PF_RULESET_MAX)
3441 if (pcr->action == PF_CHANGE_GET_TICKET) {
3442 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3444 } else if (pcr->ticket !=
3445 ruleset->rules[rs_num].active.ticket)
3448 if (pcr->action != PF_CHANGE_REMOVE) {
3449 if (newrule->ifname[0]) {
3450 newrule->kif = pfi_kkif_attach(kif,
3453 pfi_kkif_ref(newrule->kif);
3455 newrule->kif = NULL;
3457 if (newrule->rtableid > 0 &&
3458 newrule->rtableid >= rt_numfibs)
3463 if (newrule->qname[0] != 0) {
3465 pf_qname2qid(newrule->qname)) == 0)
3467 else if (newrule->pqname[0] != 0) {
3468 if ((newrule->pqid =
3469 pf_qname2qid(newrule->pqname)) == 0)
3472 newrule->pqid = newrule->qid;
3475 if (newrule->tagname[0])
3477 pf_tagname2tag(newrule->tagname)) == 0)
3479 if (newrule->match_tagname[0])
3480 if ((newrule->match_tag = pf_tagname2tag(
3481 newrule->match_tagname)) == 0)
3483 if (newrule->rt && !newrule->direction)
3487 if (newrule->logif >= PFLOGIFS_MAX)
3489 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3491 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3493 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3495 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
3496 if (pa->addr.type == PF_ADDR_TABLE) {
3498 pfr_attach_table(ruleset,
3499 pa->addr.v.tblname);
3500 if (pa->addr.p.tbl == NULL)
3504 newrule->overload_tbl = NULL;
3505 if (newrule->overload_tblname[0]) {
3506 if ((newrule->overload_tbl = pfr_attach_table(
3507 ruleset, newrule->overload_tblname)) ==
3511 newrule->overload_tbl->pfrkt_flags |=
3515 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
3516 if (((((newrule->action == PF_NAT) ||
3517 (newrule->action == PF_RDR) ||
3518 (newrule->action == PF_BINAT) ||
3519 (newrule->rt > PF_NOPFROUTE)) &&
3520 !newrule->anchor)) &&
3521 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
3525 pf_free_rule(newrule);
3530 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3532 pf_empty_kpool(&V_pf_pabuf);
3534 if (pcr->action == PF_CHANGE_ADD_HEAD)
3535 oldrule = TAILQ_FIRST(
3536 ruleset->rules[rs_num].active.ptr);
3537 else if (pcr->action == PF_CHANGE_ADD_TAIL)
3538 oldrule = TAILQ_LAST(
3539 ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3541 oldrule = TAILQ_FIRST(
3542 ruleset->rules[rs_num].active.ptr);
3543 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3544 oldrule = TAILQ_NEXT(oldrule, entries);
3545 if (oldrule == NULL) {
3546 if (newrule != NULL)
3547 pf_free_rule(newrule);
3554 if (pcr->action == PF_CHANGE_REMOVE) {
3555 pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3557 ruleset->rules[rs_num].active.rcount--;
3559 if (oldrule == NULL)
3561 ruleset->rules[rs_num].active.ptr,
3563 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3564 pcr->action == PF_CHANGE_ADD_BEFORE)
3565 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3568 ruleset->rules[rs_num].active.ptr,
3569 oldrule, newrule, entries);
3570 ruleset->rules[rs_num].active.rcount++;
3574 TAILQ_FOREACH(oldrule,
3575 ruleset->rules[rs_num].active.ptr, entries)
3578 ruleset->rules[rs_num].active.ticket++;
3580 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3581 pf_remove_if_empty_kruleset(ruleset);
3587 DIOCCHANGERULE_error:
3589 pf_krule_free(newrule);
3594 case DIOCCLRSTATES: {
3595 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
3596 struct pf_kstate_kill kill;
3598 error = pf_state_kill_to_kstate_kill(psk, &kill);
3602 psk->psk_killed = pf_clear_states(&kill);
3606 case DIOCCLRSTATESNV: {
3607 error = pf_clearstates_nv((struct pfioc_nv *)addr);
3611 case DIOCKILLSTATES: {
3612 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
3613 struct pf_kstate_kill kill;
3615 error = pf_state_kill_to_kstate_kill(psk, &kill);
3619 psk->psk_killed = 0;
3620 error = pf_killstates(&kill, &psk->psk_killed);
3624 case DIOCKILLSTATESNV: {
3625 error = pf_killstates_nv((struct pfioc_nv *)addr);
3629 case DIOCADDSTATE: {
3630 struct pfioc_state *ps = (struct pfioc_state *)addr;
3631 struct pfsync_state *sp = &ps->state;
3633 if (sp->timeout >= PFTM_MAX) {
3637 if (V_pfsync_state_import_ptr != NULL) {
3639 error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL);
3646 case DIOCGETSTATE: {
3647 struct pfioc_state *ps = (struct pfioc_state *)addr;
3650 s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
3656 pfsync_state_export(&ps->state, s);
3661 case DIOCGETSTATENV: {
3662 error = pf_getstate((struct pfioc_nv *)addr);
3666 case DIOCGETSTATES: {
3667 struct pfioc_states *ps = (struct pfioc_states *)addr;
3669 struct pfsync_state *pstore, *p;
3672 if (ps->ps_len <= 0) {
3673 nr = uma_zone_get_cur(V_pf_state_z);
3674 ps->ps_len = sizeof(struct pfsync_state) * nr;
3678 p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK | M_ZERO);
3681 for (i = 0; i <= pf_hashmask; i++) {
3682 struct pf_idhash *ih = &V_pf_idhash[i];
3684 PF_HASHROW_LOCK(ih);
3685 LIST_FOREACH(s, &ih->states, entry) {
3686 if (s->timeout == PFTM_UNLINKED)
3689 if ((nr+1) * sizeof(*p) > ps->ps_len) {
3690 PF_HASHROW_UNLOCK(ih);
3691 goto DIOCGETSTATES_full;
3693 pfsync_state_export(p, s);
3697 PF_HASHROW_UNLOCK(ih);
3700 error = copyout(pstore, ps->ps_states,
3701 sizeof(struct pfsync_state) * nr);
3703 free(pstore, M_TEMP);
3706 ps->ps_len = sizeof(struct pfsync_state) * nr;
3707 free(pstore, M_TEMP);
3712 case DIOCGETSTATUS: {
3713 struct pf_status *s = (struct pf_status *)addr;
3716 s->running = V_pf_status.running;
3717 s->since = V_pf_status.since;
3718 s->debug = V_pf_status.debug;
3719 s->hostid = V_pf_status.hostid;
3720 s->states = V_pf_status.states;
3721 s->src_nodes = V_pf_status.src_nodes;
3723 for (int i = 0; i < PFRES_MAX; i++)
3725 counter_u64_fetch(V_pf_status.counters[i]);
3726 for (int i = 0; i < LCNT_MAX; i++)
3728 counter_u64_fetch(V_pf_status.lcounters[i]);
3729 for (int i = 0; i < FCNT_MAX; i++)
3731 counter_u64_fetch(V_pf_status.fcounters[i]);
3732 for (int i = 0; i < SCNT_MAX; i++)
3734 counter_u64_fetch(V_pf_status.scounters[i]);
3736 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ);
3737 bcopy(V_pf_status.pf_chksum, s->pf_chksum,
3738 PF_MD5_DIGEST_LENGTH);
3740 pfi_update_status(s->ifname, s);
3745 case DIOCSETSTATUSIF: {
3746 struct pfioc_if *pi = (struct pfioc_if *)addr;
3748 if (pi->ifname[0] == 0) {
3749 bzero(V_pf_status.ifname, IFNAMSIZ);
3753 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
3758 case DIOCCLRSTATUS: {
3760 for (int i = 0; i < PFRES_MAX; i++)
3761 counter_u64_zero(V_pf_status.counters[i]);
3762 for (int i = 0; i < FCNT_MAX; i++)
3763 counter_u64_zero(V_pf_status.fcounters[i]);
3764 for (int i = 0; i < SCNT_MAX; i++)
3765 counter_u64_zero(V_pf_status.scounters[i]);
3766 for (int i = 0; i < LCNT_MAX; i++)
3767 counter_u64_zero(V_pf_status.lcounters[i]);
3768 V_pf_status.since = time_second;
3769 if (*V_pf_status.ifname)
3770 pfi_update_status(V_pf_status.ifname, NULL);
3776 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
3777 struct pf_state_key *sk;
3778 struct pf_state *state;
3779 struct pf_state_key_cmp key;
3780 int m = 0, direction = pnl->direction;
3783 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
3784 sidx = (direction == PF_IN) ? 1 : 0;
3785 didx = (direction == PF_IN) ? 0 : 1;
3788 PF_AZERO(&pnl->saddr, pnl->af) ||
3789 PF_AZERO(&pnl->daddr, pnl->af) ||
3790 ((pnl->proto == IPPROTO_TCP ||
3791 pnl->proto == IPPROTO_UDP) &&
3792 (!pnl->dport || !pnl->sport)))
3795 bzero(&key, sizeof(key));
3797 key.proto = pnl->proto;
3798 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
3799 key.port[sidx] = pnl->sport;
3800 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
3801 key.port[didx] = pnl->dport;
3803 state = pf_find_state_all(&key, direction, &m);
3806 error = E2BIG; /* more than one state */
3807 else if (state != NULL) {
3808 /* XXXGL: not locked read */
3809 sk = state->key[sidx];
3810 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
3811 pnl->rsport = sk->port[sidx];
3812 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
3813 pnl->rdport = sk->port[didx];
3820 case DIOCSETTIMEOUT: {
3821 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
3824 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3830 old = V_pf_default_rule.timeout[pt->timeout];
3831 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
3833 V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
3834 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
3835 wakeup(pf_purge_thread);
3841 case DIOCGETTIMEOUT: {
3842 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
3844 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3849 pt->seconds = V_pf_default_rule.timeout[pt->timeout];
3854 case DIOCGETLIMIT: {
3855 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
3857 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3862 pl->limit = V_pf_limits[pl->index].limit;
3867 case DIOCSETLIMIT: {
3868 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
3872 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3873 V_pf_limits[pl->index].zone == NULL) {
3878 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
3879 old_limit = V_pf_limits[pl->index].limit;
3880 V_pf_limits[pl->index].limit = pl->limit;
3881 pl->limit = old_limit;
3886 case DIOCSETDEBUG: {
3887 u_int32_t *level = (u_int32_t *)addr;
3890 V_pf_status.debug = *level;
3895 case DIOCCLRRULECTRS: {
3896 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
3897 struct pf_kruleset *ruleset = &pf_main_ruleset;
3898 struct pf_krule *rule;
3902 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
3903 counter_u64_zero(rule->evaluations);
3904 for (int i = 0; i < 2; i++) {
3905 counter_u64_zero(rule->packets[i]);
3906 counter_u64_zero(rule->bytes[i]);
3913 case DIOCGIFSPEEDV0:
3914 case DIOCGIFSPEEDV1: {
3915 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr;
3916 struct pf_ifspeed_v1 ps;
3919 if (psp->ifname[0] != 0) {
3920 /* Can we completely trust user-land? */
3921 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
3922 ifp = ifunit(ps.ifname);
3925 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
3926 if (cmd == DIOCGIFSPEEDV1)
3927 psp->baudrate = ifp->if_baudrate;
3936 case DIOCSTARTALTQ: {
3937 struct pf_altq *altq;
3940 /* enable all altq interfaces on active list */
3941 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
3942 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
3943 error = pf_enable_altq(altq);
3949 V_pf_altq_running = 1;
3951 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
3955 case DIOCSTOPALTQ: {
3956 struct pf_altq *altq;
3959 /* disable all altq interfaces on active list */
3960 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
3961 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
3962 error = pf_disable_altq(altq);
3968 V_pf_altq_running = 0;
3970 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
3975 case DIOCADDALTQV1: {
3976 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
3977 struct pf_altq *altq, *a;
3980 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
3981 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
3984 altq->local_flags = 0;
3987 if (pa->ticket != V_ticket_altqs_inactive) {
3989 free(altq, M_PFALTQ);
3995 * if this is for a queue, find the discipline and
3996 * copy the necessary fields
3998 if (altq->qname[0] != 0) {
3999 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
4002 free(altq, M_PFALTQ);
4005 altq->altq_disc = NULL;
4006 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
4007 if (strncmp(a->ifname, altq->ifname,
4009 altq->altq_disc = a->altq_disc;
4015 if ((ifp = ifunit(altq->ifname)) == NULL)
4016 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
4018 error = altq_add(ifp, altq);
4022 free(altq, M_PFALTQ);
4026 if (altq->qname[0] != 0)
4027 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
4029 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
4030 /* version error check done on import above */
4031 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4036 case DIOCGETALTQSV0:
4037 case DIOCGETALTQSV1: {
4038 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
4039 struct pf_altq *altq;
4043 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4045 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4047 pa->ticket = V_ticket_altqs_active;
4053 case DIOCGETALTQV1: {
4054 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
4055 struct pf_altq *altq;
4058 if (pa->ticket != V_ticket_altqs_active) {
4063 altq = pf_altq_get_nth_active(pa->nr);
4069 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4074 case DIOCCHANGEALTQV0:
4075 case DIOCCHANGEALTQV1:
4076 /* CHANGEALTQ not supported yet! */
4080 case DIOCGETQSTATSV0:
4081 case DIOCGETQSTATSV1: {
4082 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr;
4083 struct pf_altq *altq;
4088 if (pq->ticket != V_ticket_altqs_active) {
4093 nbytes = pq->nbytes;
4094 altq = pf_altq_get_nth_active(pq->nr);
4101 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4107 if (cmd == DIOCGETQSTATSV0)
4108 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */
4110 version = pq->version;
4111 error = altq_getqstats(altq, pq->buf, &nbytes, version);
4113 pq->scheduler = altq->scheduler;
4114 pq->nbytes = nbytes;
4120 case DIOCBEGINADDRS: {
4121 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
4124 pf_empty_kpool(&V_pf_pabuf);
4125 pp->ticket = ++V_ticket_pabuf;
4131 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
4132 struct pf_kpooladdr *pa;
4133 struct pfi_kkif *kif = NULL;
4136 if (pp->af == AF_INET) {
4137 error = EAFNOSUPPORT;
4142 if (pp->af == AF_INET6) {
4143 error = EAFNOSUPPORT;
4147 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
4148 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
4149 pp->addr.addr.type != PF_ADDR_TABLE) {
4153 if (pp->addr.addr.p.dyn != NULL) {
4157 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
4158 pf_pooladdr_to_kpooladdr(&pp->addr, pa);
4160 kif = pf_kkif_create(M_WAITOK);
4162 if (pp->ticket != V_ticket_pabuf) {
4170 if (pa->ifname[0]) {
4171 pa->kif = pfi_kkif_attach(kif, pa->ifname);
4173 pfi_kkif_ref(pa->kif);
4176 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
4177 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
4179 pfi_kkif_unref(pa->kif);
4184 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
4189 case DIOCGETADDRS: {
4190 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
4191 struct pf_kpool *pool;
4192 struct pf_kpooladdr *pa;
4196 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
4197 pp->r_num, 0, 1, 0);
4203 TAILQ_FOREACH(pa, &pool->list, entries)
4210 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
4211 struct pf_kpool *pool;
4212 struct pf_kpooladdr *pa;
4216 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
4217 pp->r_num, 0, 1, 1);
4223 pa = TAILQ_FIRST(&pool->list);
4224 while ((pa != NULL) && (nr < pp->nr)) {
4225 pa = TAILQ_NEXT(pa, entries);
4233 pf_kpooladdr_to_pooladdr(pa, &pp->addr);
4234 pf_addr_copyout(&pp->addr.addr);
4239 case DIOCCHANGEADDR: {
4240 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
4241 struct pf_kpool *pool;
4242 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL;
4243 struct pf_kruleset *ruleset;
4244 struct pfi_kkif *kif = NULL;
4246 if (pca->action < PF_CHANGE_ADD_HEAD ||
4247 pca->action > PF_CHANGE_REMOVE) {
4251 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4252 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4253 pca->addr.addr.type != PF_ADDR_TABLE) {
4257 if (pca->addr.addr.p.dyn != NULL) {
4262 if (pca->action != PF_CHANGE_REMOVE) {
4264 if (pca->af == AF_INET) {
4265 error = EAFNOSUPPORT;
4270 if (pca->af == AF_INET6) {
4271 error = EAFNOSUPPORT;
4275 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4276 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4277 if (newpa->ifname[0])
4278 kif = pf_kkif_create(M_WAITOK);
4281 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4283 ruleset = pf_find_kruleset(pca->anchor);
4284 if (ruleset == NULL)
4287 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4288 pca->r_num, pca->r_last, 1, 1);
4292 if (pca->action != PF_CHANGE_REMOVE) {
4293 if (newpa->ifname[0]) {
4294 newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4295 pfi_kkif_ref(newpa->kif);
4299 switch (newpa->addr.type) {
4300 case PF_ADDR_DYNIFTL:
4301 error = pfi_dynaddr_setup(&newpa->addr,
4305 newpa->addr.p.tbl = pfr_attach_table(ruleset,
4306 newpa->addr.v.tblname);
4307 if (newpa->addr.p.tbl == NULL)
4312 goto DIOCCHANGEADDR_error;
4315 switch (pca->action) {
4316 case PF_CHANGE_ADD_HEAD:
4317 oldpa = TAILQ_FIRST(&pool->list);
4319 case PF_CHANGE_ADD_TAIL:
4320 oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4323 oldpa = TAILQ_FIRST(&pool->list);
4324 for (int i = 0; oldpa && i < pca->nr; i++)
4325 oldpa = TAILQ_NEXT(oldpa, entries);
4331 if (pca->action == PF_CHANGE_REMOVE) {
4332 TAILQ_REMOVE(&pool->list, oldpa, entries);
4333 switch (oldpa->addr.type) {
4334 case PF_ADDR_DYNIFTL:
4335 pfi_dynaddr_remove(oldpa->addr.p.dyn);
4338 pfr_detach_table(oldpa->addr.p.tbl);
4342 pfi_kkif_unref(oldpa->kif);
4343 free(oldpa, M_PFRULE);
4346 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4347 else if (pca->action == PF_CHANGE_ADD_HEAD ||
4348 pca->action == PF_CHANGE_ADD_BEFORE)
4349 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4351 TAILQ_INSERT_AFTER(&pool->list, oldpa,
4355 pool->cur = TAILQ_FIRST(&pool->list);
4356 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4361 DIOCCHANGEADDR_error:
4362 if (newpa != NULL) {
4364 pfi_kkif_unref(newpa->kif);
4365 free(newpa, M_PFRULE);
4372 case DIOCGETRULESETS: {
4373 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
4374 struct pf_kruleset *ruleset;
4375 struct pf_kanchor *anchor;
4378 pr->path[sizeof(pr->path) - 1] = 0;
4379 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4385 if (ruleset->anchor == NULL) {
4386 /* XXX kludge for pf_main_ruleset */
4387 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4388 if (anchor->parent == NULL)
4391 RB_FOREACH(anchor, pf_kanchor_node,
4392 &ruleset->anchor->children)
4399 case DIOCGETRULESET: {
4400 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
4401 struct pf_kruleset *ruleset;
4402 struct pf_kanchor *anchor;
4406 pr->path[sizeof(pr->path) - 1] = 0;
4407 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4413 if (ruleset->anchor == NULL) {
4414 /* XXX kludge for pf_main_ruleset */
4415 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4416 if (anchor->parent == NULL && nr++ == pr->nr) {
4417 strlcpy(pr->name, anchor->name,
4422 RB_FOREACH(anchor, pf_kanchor_node,
4423 &ruleset->anchor->children)
4424 if (nr++ == pr->nr) {
4425 strlcpy(pr->name, anchor->name,
4436 case DIOCRCLRTABLES: {
4437 struct pfioc_table *io = (struct pfioc_table *)addr;
4439 if (io->pfrio_esize != 0) {
4444 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4445 io->pfrio_flags | PFR_FLAG_USERIOCTL);
4450 case DIOCRADDTABLES: {
4451 struct pfioc_table *io = (struct pfioc_table *)addr;
4452 struct pfr_table *pfrts;
4455 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4460 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4461 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4466 totlen = io->pfrio_size * sizeof(struct pfr_table);
4467 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4469 error = copyin(io->pfrio_buffer, pfrts, totlen);
4471 free(pfrts, M_TEMP);
4475 error = pfr_add_tables(pfrts, io->pfrio_size,
4476 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4478 free(pfrts, M_TEMP);
4482 case DIOCRDELTABLES: {
4483 struct pfioc_table *io = (struct pfioc_table *)addr;
4484 struct pfr_table *pfrts;
4487 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4492 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4493 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4498 totlen = io->pfrio_size * sizeof(struct pfr_table);
4499 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4501 error = copyin(io->pfrio_buffer, pfrts, totlen);
4503 free(pfrts, M_TEMP);
4507 error = pfr_del_tables(pfrts, io->pfrio_size,
4508 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4510 free(pfrts, M_TEMP);
4514 case DIOCRGETTABLES: {
4515 struct pfioc_table *io = (struct pfioc_table *)addr;
4516 struct pfr_table *pfrts;
4520 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4525 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4531 io->pfrio_size = min(io->pfrio_size, n);
4533 totlen = io->pfrio_size * sizeof(struct pfr_table);
4535 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4537 if (pfrts == NULL) {
4542 error = pfr_get_tables(&io->pfrio_table, pfrts,
4543 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4546 error = copyout(pfrts, io->pfrio_buffer, totlen);
4547 free(pfrts, M_TEMP);
4551 case DIOCRGETTSTATS: {
4552 struct pfioc_table *io = (struct pfioc_table *)addr;
4553 struct pfr_tstats *pfrtstats;
4557 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4562 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4568 io->pfrio_size = min(io->pfrio_size, n);
4570 totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4571 pfrtstats = mallocarray(io->pfrio_size,
4572 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT);
4573 if (pfrtstats == NULL) {
4578 error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4579 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4582 error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4583 free(pfrtstats, M_TEMP);
4587 case DIOCRCLRTSTATS: {
4588 struct pfioc_table *io = (struct pfioc_table *)addr;
4589 struct pfr_table *pfrts;
4592 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4597 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4598 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4599 /* We used to count tables and use the minimum required
4600 * size, so we didn't fail on overly large requests.
4602 io->pfrio_size = pf_ioctl_maxcount;
4606 totlen = io->pfrio_size * sizeof(struct pfr_table);
4607 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4609 if (pfrts == NULL) {
4613 error = copyin(io->pfrio_buffer, pfrts, totlen);
4615 free(pfrts, M_TEMP);
4620 error = pfr_clr_tstats(pfrts, io->pfrio_size,
4621 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4623 free(pfrts, M_TEMP);
4627 case DIOCRSETTFLAGS: {
4628 struct pfioc_table *io = (struct pfioc_table *)addr;
4629 struct pfr_table *pfrts;
4633 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4639 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4646 io->pfrio_size = min(io->pfrio_size, n);
4649 totlen = io->pfrio_size * sizeof(struct pfr_table);
4650 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4652 error = copyin(io->pfrio_buffer, pfrts, totlen);
4654 free(pfrts, M_TEMP);
4658 error = pfr_set_tflags(pfrts, io->pfrio_size,
4659 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4660 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4662 free(pfrts, M_TEMP);
4666 case DIOCRCLRADDRS: {
4667 struct pfioc_table *io = (struct pfioc_table *)addr;
4669 if (io->pfrio_esize != 0) {
4674 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4675 io->pfrio_flags | PFR_FLAG_USERIOCTL);
4680 case DIOCRADDADDRS: {
4681 struct pfioc_table *io = (struct pfioc_table *)addr;
4682 struct pfr_addr *pfras;
4685 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4689 if (io->pfrio_size < 0 ||
4690 io->pfrio_size > pf_ioctl_maxcount ||
4691 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4695 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4696 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4702 error = copyin(io->pfrio_buffer, pfras, totlen);
4704 free(pfras, M_TEMP);
4708 error = pfr_add_addrs(&io->pfrio_table, pfras,
4709 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4710 PFR_FLAG_USERIOCTL);
4712 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4713 error = copyout(pfras, io->pfrio_buffer, totlen);
4714 free(pfras, M_TEMP);
4718 case DIOCRDELADDRS: {
4719 struct pfioc_table *io = (struct pfioc_table *)addr;
4720 struct pfr_addr *pfras;
4723 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4727 if (io->pfrio_size < 0 ||
4728 io->pfrio_size > pf_ioctl_maxcount ||
4729 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4733 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4734 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4740 error = copyin(io->pfrio_buffer, pfras, totlen);
4742 free(pfras, M_TEMP);
4746 error = pfr_del_addrs(&io->pfrio_table, pfras,
4747 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
4748 PFR_FLAG_USERIOCTL);
4750 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4751 error = copyout(pfras, io->pfrio_buffer, totlen);
4752 free(pfras, M_TEMP);
4756 case DIOCRSETADDRS: {
4757 struct pfioc_table *io = (struct pfioc_table *)addr;
4758 struct pfr_addr *pfras;
4759 size_t totlen, count;
4761 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4765 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
4769 count = max(io->pfrio_size, io->pfrio_size2);
4770 if (count > pf_ioctl_maxcount ||
4771 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
4775 totlen = count * sizeof(struct pfr_addr);
4776 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
4782 error = copyin(io->pfrio_buffer, pfras, totlen);
4784 free(pfras, M_TEMP);
4788 error = pfr_set_addrs(&io->pfrio_table, pfras,
4789 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
4790 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
4791 PFR_FLAG_USERIOCTL, 0);
4793 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4794 error = copyout(pfras, io->pfrio_buffer, totlen);
4795 free(pfras, M_TEMP);
4799 case DIOCRGETADDRS: {
4800 struct pfioc_table *io = (struct pfioc_table *)addr;
4801 struct pfr_addr *pfras;
4804 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4808 if (io->pfrio_size < 0 ||
4809 io->pfrio_size > pf_ioctl_maxcount ||
4810 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4814 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4815 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4822 error = pfr_get_addrs(&io->pfrio_table, pfras,
4823 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4826 error = copyout(pfras, io->pfrio_buffer, totlen);
4827 free(pfras, M_TEMP);
4831 case DIOCRGETASTATS: {
4832 struct pfioc_table *io = (struct pfioc_table *)addr;
4833 struct pfr_astats *pfrastats;
4836 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
4840 if (io->pfrio_size < 0 ||
4841 io->pfrio_size > pf_ioctl_maxcount ||
4842 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
4846 totlen = io->pfrio_size * sizeof(struct pfr_astats);
4847 pfrastats = mallocarray(io->pfrio_size,
4848 sizeof(struct pfr_astats), M_TEMP, M_NOWAIT);
4854 error = pfr_get_astats(&io->pfrio_table, pfrastats,
4855 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4858 error = copyout(pfrastats, io->pfrio_buffer, totlen);
4859 free(pfrastats, M_TEMP);
4863 case DIOCRCLRASTATS: {
4864 struct pfioc_table *io = (struct pfioc_table *)addr;
4865 struct pfr_addr *pfras;
4868 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4872 if (io->pfrio_size < 0 ||
4873 io->pfrio_size > pf_ioctl_maxcount ||
4874 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4878 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4879 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4885 error = copyin(io->pfrio_buffer, pfras, totlen);
4887 free(pfras, M_TEMP);
4891 error = pfr_clr_astats(&io->pfrio_table, pfras,
4892 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
4893 PFR_FLAG_USERIOCTL);
4895 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4896 error = copyout(pfras, io->pfrio_buffer, totlen);
4897 free(pfras, M_TEMP);
4901 case DIOCRTSTADDRS: {
4902 struct pfioc_table *io = (struct pfioc_table *)addr;
4903 struct pfr_addr *pfras;
4906 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4910 if (io->pfrio_size < 0 ||
4911 io->pfrio_size > pf_ioctl_maxcount ||
4912 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4916 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4917 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4923 error = copyin(io->pfrio_buffer, pfras, totlen);
4925 free(pfras, M_TEMP);
4929 error = pfr_tst_addrs(&io->pfrio_table, pfras,
4930 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
4931 PFR_FLAG_USERIOCTL);
4934 error = copyout(pfras, io->pfrio_buffer, totlen);
4935 free(pfras, M_TEMP);
4939 case DIOCRINADEFINE: {
4940 struct pfioc_table *io = (struct pfioc_table *)addr;
4941 struct pfr_addr *pfras;
4944 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4948 if (io->pfrio_size < 0 ||
4949 io->pfrio_size > pf_ioctl_maxcount ||
4950 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4954 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4955 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4961 error = copyin(io->pfrio_buffer, pfras, totlen);
4963 free(pfras, M_TEMP);
4967 error = pfr_ina_define(&io->pfrio_table, pfras,
4968 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
4969 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4971 free(pfras, M_TEMP);
4976 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
4978 error = pf_osfp_add(io);
4984 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
4986 error = pf_osfp_get(io);
4992 struct pfioc_trans *io = (struct pfioc_trans *)addr;
4993 struct pfioc_trans_e *ioes, *ioe;
4997 if (io->esize != sizeof(*ioe)) {
5002 io->size > pf_ioctl_maxcount ||
5003 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5007 totlen = sizeof(struct pfioc_trans_e) * io->size;
5008 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5014 error = copyin(io->array, ioes, totlen);
5020 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5021 switch (ioe->rs_num) {
5023 case PF_RULESET_ALTQ:
5024 if (ioe->anchor[0]) {
5030 if ((error = pf_begin_altq(&ioe->ticket))) {
5037 case PF_RULESET_TABLE:
5039 struct pfr_table table;
5041 bzero(&table, sizeof(table));
5042 strlcpy(table.pfrt_anchor, ioe->anchor,
5043 sizeof(table.pfrt_anchor));
5044 if ((error = pfr_ina_begin(&table,
5045 &ioe->ticket, NULL, 0))) {
5053 if ((error = pf_begin_rules(&ioe->ticket,
5054 ioe->rs_num, ioe->anchor))) {
5063 error = copyout(ioes, io->array, totlen);
5068 case DIOCXROLLBACK: {
5069 struct pfioc_trans *io = (struct pfioc_trans *)addr;
5070 struct pfioc_trans_e *ioe, *ioes;
5074 if (io->esize != sizeof(*ioe)) {
5079 io->size > pf_ioctl_maxcount ||
5080 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5084 totlen = sizeof(struct pfioc_trans_e) * io->size;
5085 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5091 error = copyin(io->array, ioes, totlen);
5097 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5098 switch (ioe->rs_num) {
5100 case PF_RULESET_ALTQ:
5101 if (ioe->anchor[0]) {
5107 if ((error = pf_rollback_altq(ioe->ticket))) {
5110 goto fail; /* really bad */
5114 case PF_RULESET_TABLE:
5116 struct pfr_table table;
5118 bzero(&table, sizeof(table));
5119 strlcpy(table.pfrt_anchor, ioe->anchor,
5120 sizeof(table.pfrt_anchor));
5121 if ((error = pfr_ina_rollback(&table,
5122 ioe->ticket, NULL, 0))) {
5125 goto fail; /* really bad */
5130 if ((error = pf_rollback_rules(ioe->ticket,
5131 ioe->rs_num, ioe->anchor))) {
5134 goto fail; /* really bad */
5145 struct pfioc_trans *io = (struct pfioc_trans *)addr;
5146 struct pfioc_trans_e *ioe, *ioes;
5147 struct pf_kruleset *rs;
5151 if (io->esize != sizeof(*ioe)) {
5157 io->size > pf_ioctl_maxcount ||
5158 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5163 totlen = sizeof(struct pfioc_trans_e) * io->size;
5164 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5170 error = copyin(io->array, ioes, totlen);
5176 /* First makes sure everything will succeed. */
5177 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5178 switch (ioe->rs_num) {
5180 case PF_RULESET_ALTQ:
5181 if (ioe->anchor[0]) {
5187 if (!V_altqs_inactive_open || ioe->ticket !=
5188 V_ticket_altqs_inactive) {
5196 case PF_RULESET_TABLE:
5197 rs = pf_find_kruleset(ioe->anchor);
5198 if (rs == NULL || !rs->topen || ioe->ticket !=
5207 if (ioe->rs_num < 0 || ioe->rs_num >=
5214 rs = pf_find_kruleset(ioe->anchor);
5216 !rs->rules[ioe->rs_num].inactive.open ||
5217 rs->rules[ioe->rs_num].inactive.ticket !=
5227 /* Now do the commit - no errors should happen here. */
5228 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5229 switch (ioe->rs_num) {
5231 case PF_RULESET_ALTQ:
5232 if ((error = pf_commit_altq(ioe->ticket))) {
5235 goto fail; /* really bad */
5239 case PF_RULESET_TABLE:
5241 struct pfr_table table;
5243 bzero(&table, sizeof(table));
5244 strlcpy(table.pfrt_anchor, ioe->anchor,
5245 sizeof(table.pfrt_anchor));
5246 if ((error = pfr_ina_commit(&table,
5247 ioe->ticket, NULL, NULL, 0))) {
5250 goto fail; /* really bad */
5255 if ((error = pf_commit_rules(ioe->ticket,
5256 ioe->rs_num, ioe->anchor))) {
5259 goto fail; /* really bad */
5269 case DIOCGETSRCNODES: {
5270 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
5271 struct pf_srchash *sh;
5272 struct pf_ksrc_node *n;
5273 struct pf_src_node *p, *pstore;
5276 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5278 PF_HASHROW_LOCK(sh);
5279 LIST_FOREACH(n, &sh->nodes, entry)
5281 PF_HASHROW_UNLOCK(sh);
5284 psn->psn_len = min(psn->psn_len,
5285 sizeof(struct pf_src_node) * nr);
5287 if (psn->psn_len == 0) {
5288 psn->psn_len = sizeof(struct pf_src_node) * nr;
5294 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5295 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5297 PF_HASHROW_LOCK(sh);
5298 LIST_FOREACH(n, &sh->nodes, entry) {
5300 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5303 pf_src_node_copy(n, p);
5308 PF_HASHROW_UNLOCK(sh);
5310 error = copyout(pstore, psn->psn_src_nodes,
5311 sizeof(struct pf_src_node) * nr);
5313 free(pstore, M_TEMP);
5316 psn->psn_len = sizeof(struct pf_src_node) * nr;
5317 free(pstore, M_TEMP);
5321 case DIOCCLRSRCNODES: {
5322 pf_clear_srcnodes(NULL);
5323 pf_purge_expired_src_nodes();
5327 case DIOCKILLSRCNODES:
5328 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5331 case DIOCKEEPCOUNTERS:
5332 error = pf_keepcounters((struct pfioc_nv *)addr);
5335 case DIOCSETHOSTID: {
5336 u_int32_t *hostid = (u_int32_t *)addr;
5340 V_pf_status.hostid = arc4random();
5342 V_pf_status.hostid = *hostid;
5353 case DIOCIGETIFACES: {
5354 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5355 struct pfi_kif *ifstore;
5358 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5363 if (io->pfiio_size < 0 ||
5364 io->pfiio_size > pf_ioctl_maxcount ||
5365 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5370 bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5371 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5373 if (ifstore == NULL) {
5379 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5381 error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5382 free(ifstore, M_TEMP);
5386 case DIOCSETIFFLAG: {
5387 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5390 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5395 case DIOCCLRIFFLAG: {
5396 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5399 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5409 if (sx_xlocked(&pf_ioctl_lock))
5410 sx_xunlock(&pf_ioctl_lock);
5419 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
5421 bzero(sp, sizeof(struct pfsync_state));
5423 /* copy from state key */
5424 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5425 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5426 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5427 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5428 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5429 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5430 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5431 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5432 sp->proto = st->key[PF_SK_WIRE]->proto;
5433 sp->af = st->key[PF_SK_WIRE]->af;
5435 /* copy from state */
5436 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5437 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
5438 sp->creation = htonl(time_uptime - st->creation);
5439 sp->expire = pf_state_expires(st);
5440 if (sp->expire <= time_uptime)
5441 sp->expire = htonl(0);
5443 sp->expire = htonl(sp->expire - time_uptime);
5445 sp->direction = st->direction;
5447 sp->timeout = st->timeout;
5448 sp->state_flags = st->state_flags;
5450 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5451 if (st->nat_src_node)
5452 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5455 sp->creatorid = st->creatorid;
5456 pf_state_peer_hton(&st->src, &sp->src);
5457 pf_state_peer_hton(&st->dst, &sp->dst);
5459 if (st->rule.ptr == NULL)
5460 sp->rule = htonl(-1);
5462 sp->rule = htonl(st->rule.ptr->nr);
5463 if (st->anchor.ptr == NULL)
5464 sp->anchor = htonl(-1);
5466 sp->anchor = htonl(st->anchor.ptr->nr);
5467 if (st->nat_rule.ptr == NULL)
5468 sp->nat_rule = htonl(-1);
5470 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
5472 pf_state_counter_hton(counter_u64_fetch(st->packets[0]),
5474 pf_state_counter_hton(counter_u64_fetch(st->packets[1]),
5476 pf_state_counter_hton(counter_u64_fetch(st->bytes[0]), sp->bytes[0]);
5477 pf_state_counter_hton(counter_u64_fetch(st->bytes[1]), sp->bytes[1]);
5482 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5484 struct pfr_ktable *kt;
5486 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5489 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5490 kt = kt->pfrkt_root;
5492 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5497 * XXX - Check for version missmatch!!!
5500 pf_clear_all_states(void)
5505 for (i = 0; i <= pf_hashmask; i++) {
5506 struct pf_idhash *ih = &V_pf_idhash[i];
5508 PF_HASHROW_LOCK(ih);
5509 LIST_FOREACH(s, &ih->states, entry) {
5510 s->timeout = PFTM_PURGE;
5511 /* Don't send out individual delete messages. */
5512 s->state_flags |= PFSTATE_NOSYNC;
5513 pf_unlink_state(s, PF_ENTER_LOCKED);
5516 PF_HASHROW_UNLOCK(ih);
5521 pf_clear_tables(void)
5523 struct pfioc_table io;
5526 bzero(&io, sizeof(io));
5528 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
5535 pf_clear_srcnodes(struct pf_ksrc_node *n)
5540 for (i = 0; i <= pf_hashmask; i++) {
5541 struct pf_idhash *ih = &V_pf_idhash[i];
5543 PF_HASHROW_LOCK(ih);
5544 LIST_FOREACH(s, &ih->states, entry) {
5545 if (n == NULL || n == s->src_node)
5547 if (n == NULL || n == s->nat_src_node)
5548 s->nat_src_node = NULL;
5550 PF_HASHROW_UNLOCK(ih);
5554 struct pf_srchash *sh;
5556 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5558 PF_HASHROW_LOCK(sh);
5559 LIST_FOREACH(n, &sh->nodes, entry) {
5563 PF_HASHROW_UNLOCK(sh);
5566 /* XXX: hash slot should already be locked here. */
5573 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
5575 struct pf_ksrc_node_list kill;
5578 for (int i = 0; i <= pf_srchashmask; i++) {
5579 struct pf_srchash *sh = &V_pf_srchash[i];
5580 struct pf_ksrc_node *sn, *tmp;
5582 PF_HASHROW_LOCK(sh);
5583 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
5584 if (PF_MATCHA(psnk->psnk_src.neg,
5585 &psnk->psnk_src.addr.v.a.addr,
5586 &psnk->psnk_src.addr.v.a.mask,
5587 &sn->addr, sn->af) &&
5588 PF_MATCHA(psnk->psnk_dst.neg,
5589 &psnk->psnk_dst.addr.v.a.addr,
5590 &psnk->psnk_dst.addr.v.a.mask,
5591 &sn->raddr, sn->af)) {
5592 pf_unlink_src_node(sn);
5593 LIST_INSERT_HEAD(&kill, sn, entry);
5596 PF_HASHROW_UNLOCK(sh);
5599 for (int i = 0; i <= pf_hashmask; i++) {
5600 struct pf_idhash *ih = &V_pf_idhash[i];
5603 PF_HASHROW_LOCK(ih);
5604 LIST_FOREACH(s, &ih->states, entry) {
5605 if (s->src_node && s->src_node->expire == 1)
5607 if (s->nat_src_node && s->nat_src_node->expire == 1)
5608 s->nat_src_node = NULL;
5610 PF_HASHROW_UNLOCK(ih);
5613 psnk->psnk_killed = pf_free_src_nodes(&kill);
5617 pf_keepcounters(struct pfioc_nv *nv)
5619 nvlist_t *nvl = NULL;
5620 void *nvlpacked = NULL;
5623 #define ERROUT(x) ERROUT_FUNCTION(on_error, x)
5625 if (nv->len > pf_ioctl_maxcount)
5628 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
5629 if (nvlpacked == NULL)
5632 error = copyin(nv->data, nvlpacked, nv->len);
5636 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
5640 if (! nvlist_exists_bool(nvl, "keep_counters"))
5643 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
5646 nvlist_destroy(nvl);
5647 free(nvlpacked, M_TEMP);
5652 pf_clear_states(const struct pf_kstate_kill *kill)
5654 struct pf_state_key_cmp match_key;
5657 unsigned int killed = 0, dir;
5659 for (unsigned int i = 0; i <= pf_hashmask; i++) {
5660 struct pf_idhash *ih = &V_pf_idhash[i];
5662 relock_DIOCCLRSTATES:
5663 PF_HASHROW_LOCK(ih);
5664 LIST_FOREACH(s, &ih->states, entry) {
5665 if (kill->psk_ifname[0] &&
5666 strcmp(kill->psk_ifname,
5670 if (kill->psk_kill_match) {
5671 bzero(&match_key, sizeof(match_key));
5673 if (s->direction == PF_OUT) {
5681 match_key.af = s->key[idx]->af;
5682 match_key.proto = s->key[idx]->proto;
5683 PF_ACPY(&match_key.addr[0],
5684 &s->key[idx]->addr[1], match_key.af);
5685 match_key.port[0] = s->key[idx]->port[1];
5686 PF_ACPY(&match_key.addr[1],
5687 &s->key[idx]->addr[0], match_key.af);
5688 match_key.port[1] = s->key[idx]->port[0];
5692 * Don't send out individual
5695 s->state_flags |= PFSTATE_NOSYNC;
5696 pf_unlink_state(s, PF_ENTER_LOCKED);
5699 if (kill->psk_kill_match)
5700 killed += pf_kill_matching_state(&match_key,
5703 goto relock_DIOCCLRSTATES;
5705 PF_HASHROW_UNLOCK(ih);
5708 if (V_pfsync_clear_states_ptr != NULL)
5709 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
5715 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
5719 if (kill->psk_pfcmp.id) {
5720 if (kill->psk_pfcmp.creatorid == 0)
5721 kill->psk_pfcmp.creatorid = V_pf_status.hostid;
5722 if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
5723 kill->psk_pfcmp.creatorid))) {
5724 pf_unlink_state(s, PF_ENTER_LOCKED);
5730 for (unsigned int i = 0; i <= pf_hashmask; i++)
5731 *killed += pf_killstates_row(kill, &V_pf_idhash[i]);
5737 pf_killstates_nv(struct pfioc_nv *nv)
5739 struct pf_kstate_kill kill;
5740 nvlist_t *nvl = NULL;
5741 void *nvlpacked = NULL;
5743 unsigned int killed = 0;
5745 #define ERROUT(x) ERROUT_FUNCTION(on_error, x)
5747 if (nv->len > pf_ioctl_maxcount)
5750 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
5751 if (nvlpacked == NULL)
5754 error = copyin(nv->data, nvlpacked, nv->len);
5758 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
5762 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
5766 error = pf_killstates(&kill, &killed);
5768 free(nvlpacked, M_TEMP);
5770 nvlist_destroy(nvl);
5771 nvl = nvlist_create(0);
5775 nvlist_add_number(nvl, "killed", killed);
5777 nvlpacked = nvlist_pack(nvl, &nv->len);
5778 if (nvlpacked == NULL)
5783 else if (nv->size < nv->len)
5786 error = copyout(nvlpacked, nv->data, nv->len);
5789 nvlist_destroy(nvl);
5790 free(nvlpacked, M_TEMP);
5795 pf_clearstates_nv(struct pfioc_nv *nv)
5797 struct pf_kstate_kill kill;
5798 nvlist_t *nvl = NULL;
5799 void *nvlpacked = NULL;
5801 unsigned int killed;
5803 #define ERROUT(x) ERROUT_FUNCTION(on_error, x)
5805 if (nv->len > pf_ioctl_maxcount)
5808 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
5809 if (nvlpacked == NULL)
5812 error = copyin(nv->data, nvlpacked, nv->len);
5816 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
5820 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
5824 killed = pf_clear_states(&kill);
5826 free(nvlpacked, M_TEMP);
5828 nvlist_destroy(nvl);
5829 nvl = nvlist_create(0);
5833 nvlist_add_number(nvl, "killed", killed);
5835 nvlpacked = nvlist_pack(nvl, &nv->len);
5836 if (nvlpacked == NULL)
5841 else if (nv->size < nv->len)
5844 error = copyout(nvlpacked, nv->data, nv->len);
5848 nvlist_destroy(nvl);
5849 free(nvlpacked, M_TEMP);
5854 pf_getstate(struct pfioc_nv *nv)
5856 nvlist_t *nvl = NULL, *nvls;
5857 void *nvlpacked = NULL;
5858 struct pf_state *s = NULL;
5860 uint64_t id, creatorid;
5862 #define ERROUT(x) ERROUT_FUNCTION(errout, x)
5864 if (nv->len > pf_ioctl_maxcount)
5867 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
5868 if (nvlpacked == NULL)
5871 error = copyin(nv->data, nvlpacked, nv->len);
5875 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
5879 PFNV_CHK(pf_nvuint64(nvl, "id", &id));
5880 PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
5882 s = pf_find_state_byid(id, creatorid);
5886 free(nvlpacked, M_TEMP);
5888 nvlist_destroy(nvl);
5889 nvl = nvlist_create(0);
5893 nvls = pf_state_to_nvstate(s);
5897 nvlist_add_nvlist(nvl, "state", nvls);
5899 nvlpacked = nvlist_pack(nvl, &nv->len);
5900 if (nvlpacked == NULL)
5905 else if (nv->size < nv->len)
5908 error = copyout(nvlpacked, nv->data, nv->len);
5914 free(nvlpacked, M_TEMP);
5915 nvlist_destroy(nvl);
5920 * XXX - Check for version missmatch!!!
5924 * Duplicate pfctl -Fa operation to get rid of as much as we can.
5934 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
5936 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
5939 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
5941 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
5942 break; /* XXX: rollback? */
5944 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
5946 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
5947 break; /* XXX: rollback? */
5949 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
5951 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
5952 break; /* XXX: rollback? */
5954 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
5956 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
5957 break; /* XXX: rollback? */
5960 /* XXX: these should always succeed here */
5961 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
5962 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
5963 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
5964 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
5965 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
5967 if ((error = pf_clear_tables()) != 0)
5971 if ((error = pf_begin_altq(&t[0])) != 0) {
5972 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
5975 pf_commit_altq(t[0]);
5978 pf_clear_all_states();
5980 pf_clear_srcnodes(NULL);
5982 /* status does not use malloced mem so no need to cleanup */
5983 /* fingerprints and interfaces have their own cleanup code */
5989 static pfil_return_t
5990 pf_check_return(int chk, struct mbuf **m)
5996 return (PFIL_CONSUMED);
6005 return (PFIL_DROPPED);
6010 static pfil_return_t
6011 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6012 void *ruleset __unused, struct inpcb *inp)
6016 chk = pf_test(PF_IN, flags, ifp, m, inp);
6018 return (pf_check_return(chk, m));
6021 static pfil_return_t
6022 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6023 void *ruleset __unused, struct inpcb *inp)
6027 chk = pf_test(PF_OUT, flags, ifp, m, inp);
6029 return (pf_check_return(chk, m));
6034 static pfil_return_t
6035 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6036 void *ruleset __unused, struct inpcb *inp)
6041 * In case of loopback traffic IPv6 uses the real interface in
6042 * order to support scoped addresses. In order to support stateful
6043 * filtering we have change this to lo0 as it is the case in IPv4.
6045 CURVNET_SET(ifp->if_vnet);
6046 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp);
6049 return (pf_check_return(chk, m));
6052 static pfil_return_t
6053 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6054 void *ruleset __unused, struct inpcb *inp)
6058 CURVNET_SET(ifp->if_vnet);
6059 chk = pf_test6(PF_OUT, flags, ifp, m, inp);
6062 return (pf_check_return(chk, m));
6067 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6068 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6069 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook)
6070 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook)
6073 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6074 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6075 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook)
6076 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook)
6082 struct pfil_hook_args pha;
6083 struct pfil_link_args pla;
6086 if (V_pf_pfil_hooked)
6089 pha.pa_version = PFIL_VERSION;
6090 pha.pa_modname = "pf";
6091 pha.pa_ruleset = NULL;
6093 pla.pa_version = PFIL_VERSION;
6096 pha.pa_type = PFIL_TYPE_IP4;
6097 pha.pa_func = pf_check_in;
6098 pha.pa_flags = PFIL_IN;
6099 pha.pa_rulname = "default-in";
6100 V_pf_ip4_in_hook = pfil_add_hook(&pha);
6101 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6102 pla.pa_head = V_inet_pfil_head;
6103 pla.pa_hook = V_pf_ip4_in_hook;
6104 ret = pfil_link(&pla);
6106 pha.pa_func = pf_check_out;
6107 pha.pa_flags = PFIL_OUT;
6108 pha.pa_rulname = "default-out";
6109 V_pf_ip4_out_hook = pfil_add_hook(&pha);
6110 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6111 pla.pa_head = V_inet_pfil_head;
6112 pla.pa_hook = V_pf_ip4_out_hook;
6113 ret = pfil_link(&pla);
6117 pha.pa_type = PFIL_TYPE_IP6;
6118 pha.pa_func = pf_check6_in;
6119 pha.pa_flags = PFIL_IN;
6120 pha.pa_rulname = "default-in6";
6121 V_pf_ip6_in_hook = pfil_add_hook(&pha);
6122 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6123 pla.pa_head = V_inet6_pfil_head;
6124 pla.pa_hook = V_pf_ip6_in_hook;
6125 ret = pfil_link(&pla);
6127 pha.pa_func = pf_check6_out;
6128 pha.pa_rulname = "default-out6";
6129 pha.pa_flags = PFIL_OUT;
6130 V_pf_ip6_out_hook = pfil_add_hook(&pha);
6131 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6132 pla.pa_head = V_inet6_pfil_head;
6133 pla.pa_hook = V_pf_ip6_out_hook;
6134 ret = pfil_link(&pla);
6138 V_pf_pfil_hooked = 1;
6145 if (V_pf_pfil_hooked == 0)
6149 pfil_remove_hook(V_pf_ip4_in_hook);
6150 pfil_remove_hook(V_pf_ip4_out_hook);
6153 pfil_remove_hook(V_pf_ip6_in_hook);
6154 pfil_remove_hook(V_pf_ip6_out_hook);
6157 V_pf_pfil_hooked = 0;
6163 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6164 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6166 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6167 PF_RULE_TAG_HASH_SIZE_DEFAULT);
6169 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6170 PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6174 V_pf_vnet_active = 1;
6182 rm_init(&pf_rules_lock, "pf rulesets");
6183 sx_init(&pf_ioctl_lock, "pf ioctl");
6184 sx_init(&pf_end_lock, "pf end thread");
6186 pf_mtag_initialize();
6188 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
6193 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
6203 pf_unload_vnet(void)
6207 V_pf_vnet_active = 0;
6208 V_pf_status.running = 0;
6215 ret = swi_remove(V_pf_swi_cookie);
6217 ret = intr_event_destroy(V_pf_swi_ie);
6220 pf_unload_vnet_purge();
6222 pf_normalize_cleanup();
6229 if (IS_DEFAULT_VNET(curvnet))
6232 pf_cleanup_tagset(&V_pf_tags);
6234 pf_cleanup_tagset(&V_pf_qids);
6236 uma_zdestroy(V_pf_tag_z);
6238 /* Free counters last as we updated them during shutdown. */
6239 counter_u64_free(V_pf_default_rule.evaluations);
6240 for (int i = 0; i < 2; i++) {
6241 counter_u64_free(V_pf_default_rule.packets[i]);
6242 counter_u64_free(V_pf_default_rule.bytes[i]);
6244 counter_u64_free(V_pf_default_rule.states_cur);
6245 counter_u64_free(V_pf_default_rule.states_tot);
6246 counter_u64_free(V_pf_default_rule.src_nodes);
6248 for (int i = 0; i < PFRES_MAX; i++)
6249 counter_u64_free(V_pf_status.counters[i]);
6250 for (int i = 0; i < LCNT_MAX; i++)
6251 counter_u64_free(V_pf_status.lcounters[i]);
6252 for (int i = 0; i < FCNT_MAX; i++)
6253 counter_u64_free(V_pf_status.fcounters[i]);
6254 for (int i = 0; i < SCNT_MAX; i++)
6255 counter_u64_free(V_pf_status.scounters[i]);
6262 sx_xlock(&pf_end_lock);
6264 while (pf_end_threads < 2) {
6265 wakeup_one(pf_purge_thread);
6266 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6268 sx_xunlock(&pf_end_lock);
6271 destroy_dev(pf_dev);
6275 rm_destroy(&pf_rules_lock);
6276 sx_destroy(&pf_ioctl_lock);
6277 sx_destroy(&pf_end_lock);
6281 vnet_pf_init(void *unused __unused)
6286 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6287 vnet_pf_init, NULL);
6290 vnet_pf_uninit(const void *unused __unused)
6295 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6296 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6297 vnet_pf_uninit, NULL);
6300 pf_modevent(module_t mod, int type, void *data)
6309 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after
6310 * the vnet_pf_uninit()s */
6320 static moduledata_t pf_mod = {
6326 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
6327 MODULE_VERSION(pf, PF_MODVER);