2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002,2003 Henning Brauer
6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 #include "opt_inet6.h"
48 #include <sys/param.h>
49 #include <sys/_bitset.h>
50 #include <sys/bitset.h>
53 #include <sys/endian.h>
54 #include <sys/fcntl.h>
55 #include <sys/filio.h>
57 #include <sys/interrupt.h>
59 #include <sys/kernel.h>
60 #include <sys/kthread.h>
63 #include <sys/module.h>
67 #include <sys/socket.h>
68 #include <sys/sysctl.h>
70 #include <sys/ucred.h>
73 #include <net/if_var.h>
75 #include <net/route.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nv.h>
89 #include <netinet/ip6.h>
93 #include <net/altq/altq.h>
96 static struct pf_kpool *pf_get_kpool(char *, u_int32_t, u_int8_t, u_int32_t,
97 u_int8_t, u_int8_t, u_int8_t);
99 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
100 static void pf_empty_kpool(struct pf_kpalist *);
101 static int pfioctl(struct cdev *, u_long, caddr_t, int,
104 static int pf_begin_altq(u_int32_t *);
105 static int pf_rollback_altq(u_int32_t);
106 static int pf_commit_altq(u_int32_t);
107 static int pf_enable_altq(struct pf_altq *);
108 static int pf_disable_altq(struct pf_altq *);
109 static u_int32_t pf_qname2qid(char *);
110 static void pf_qid_unref(u_int32_t);
112 static int pf_begin_rules(u_int32_t *, int, const char *);
113 static int pf_rollback_rules(u_int32_t, int, char *);
114 static int pf_setup_pfsync_matching(struct pf_kruleset *);
115 static void pf_hash_rule(MD5_CTX *, struct pf_krule *);
116 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
117 static int pf_commit_rules(u_int32_t, int, char *);
118 static int pf_addr_setup(struct pf_kruleset *,
119 struct pf_addr_wrap *, sa_family_t);
120 static void pf_addr_copyout(struct pf_addr_wrap *);
121 static void pf_src_node_copy(const struct pf_ksrc_node *,
122 struct pf_src_node *);
124 static int pf_export_kaltq(struct pf_altq *,
125 struct pfioc_altq_v1 *, size_t);
126 static int pf_import_kaltq(struct pfioc_altq_v1 *,
127 struct pf_altq *, size_t);
130 VNET_DEFINE(struct pf_krule, pf_default_rule);
133 VNET_DEFINE_STATIC(int, pf_altq_running);
134 #define V_pf_altq_running VNET(pf_altq_running)
137 #define TAGID_MAX 50000
139 TAILQ_ENTRY(pf_tagname) namehash_entries;
140 TAILQ_ENTRY(pf_tagname) taghash_entries;
141 char name[PF_TAG_NAME_SIZE];
147 TAILQ_HEAD(, pf_tagname) *namehash;
148 TAILQ_HEAD(, pf_tagname) *taghash;
151 BITSET_DEFINE(, TAGID_MAX) avail;
154 VNET_DEFINE(struct pf_tagset, pf_tags);
155 #define V_pf_tags VNET(pf_tags)
156 static unsigned int pf_rule_tag_hashsize;
157 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128
158 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
159 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
160 "Size of pf(4) rule tag hashtable");
163 VNET_DEFINE(struct pf_tagset, pf_qids);
164 #define V_pf_qids VNET(pf_qids)
165 static unsigned int pf_queue_tag_hashsize;
166 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128
167 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
168 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
169 "Size of pf(4) queue tag hashtable");
171 VNET_DEFINE(uma_zone_t, pf_tag_z);
172 #define V_pf_tag_z VNET(pf_tag_z)
173 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
174 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
176 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
177 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
180 static void pf_init_tagset(struct pf_tagset *, unsigned int *,
182 static void pf_cleanup_tagset(struct pf_tagset *);
183 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *);
184 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t);
185 static u_int16_t tagname2tag(struct pf_tagset *, char *);
186 static u_int16_t pf_tagname2tag(char *);
187 static void tag_unref(struct pf_tagset *, u_int16_t);
189 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
194 * XXX - These are new and need to be checked when moveing to a new version
196 static void pf_clear_states(void);
197 static int pf_clear_tables(void);
198 static void pf_clear_srcnodes(struct pf_ksrc_node *);
199 static void pf_kill_srcnodes(struct pfioc_src_node_kill *);
200 static void pf_tbladdr_copyout(struct pf_addr_wrap *);
203 * Wrapper functions for pfil(9) hooks
206 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
207 int flags, void *ruleset __unused, struct inpcb *inp);
208 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
209 int flags, void *ruleset __unused, struct inpcb *inp);
212 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
213 int flags, void *ruleset __unused, struct inpcb *inp);
214 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
215 int flags, void *ruleset __unused, struct inpcb *inp);
218 static void hook_pf(void);
219 static void dehook_pf(void);
220 static int shutdown_pf(void);
221 static int pf_load(void);
222 static void pf_unload(void);
224 static struct cdevsw pf_cdevsw = {
227 .d_version = D_VERSION,
230 volatile VNET_DEFINE_STATIC(int, pf_pfil_hooked);
231 #define V_pf_pfil_hooked VNET(pf_pfil_hooked)
234 * We need a flag that is neither hooked nor running to know when
235 * the VNET is "valid". We primarily need this to control (global)
236 * external event, e.g., eventhandlers.
238 VNET_DEFINE(int, pf_vnet_active);
239 #define V_pf_vnet_active VNET(pf_vnet_active)
242 struct proc *pf_purge_proc;
244 struct rmlock pf_rules_lock;
245 struct sx pf_ioctl_lock;
246 struct sx pf_end_lock;
249 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
250 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
251 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
252 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
253 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
254 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
255 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
258 pflog_packet_t *pflog_packet_ptr = NULL;
260 extern u_long pf_ioctl_maxcount;
265 u_int32_t *my_timeout = V_pf_default_rule.timeout;
269 pfi_initialize_vnet();
272 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
273 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
275 RB_INIT(&V_pf_anchors);
276 pf_init_kruleset(&pf_main_ruleset);
278 /* default rule should never be garbage collected */
279 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
280 #ifdef PF_DEFAULT_TO_DROP
281 V_pf_default_rule.action = PF_DROP;
283 V_pf_default_rule.action = PF_PASS;
285 V_pf_default_rule.nr = -1;
286 V_pf_default_rule.rtableid = -1;
288 V_pf_default_rule.evaluations = counter_u64_alloc(M_WAITOK);
289 for (int i = 0; i < 2; i++) {
290 V_pf_default_rule.packets[i] = counter_u64_alloc(M_WAITOK);
291 V_pf_default_rule.bytes[i] = counter_u64_alloc(M_WAITOK);
293 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
294 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
295 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
297 /* initialize default timeouts */
298 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
299 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
300 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
301 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
302 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
303 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
304 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
305 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
306 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
307 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
308 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
309 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
310 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
311 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
312 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
313 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
314 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
315 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
316 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
317 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
319 bzero(&V_pf_status, sizeof(V_pf_status));
320 V_pf_status.debug = PF_DEBUG_URGENT;
322 V_pf_pfil_hooked = 0;
324 /* XXX do our best to avoid a conflict */
325 V_pf_status.hostid = arc4random();
327 for (int i = 0; i < PFRES_MAX; i++)
328 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
329 for (int i = 0; i < LCNT_MAX; i++)
330 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
331 for (int i = 0; i < FCNT_MAX; i++)
332 V_pf_status.fcounters[i] = counter_u64_alloc(M_WAITOK);
333 for (int i = 0; i < SCNT_MAX; i++)
334 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
336 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
337 INTR_MPSAFE, &V_pf_swi_cookie) != 0)
338 /* XXXGL: leaked all above. */
342 static struct pf_kpool *
343 pf_get_kpool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
344 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
345 u_int8_t check_ticket)
347 struct pf_kruleset *ruleset;
348 struct pf_krule *rule;
351 ruleset = pf_find_kruleset(anchor);
354 rs_num = pf_get_ruleset_number(rule_action);
355 if (rs_num >= PF_RULESET_MAX)
358 if (check_ticket && ticket !=
359 ruleset->rules[rs_num].active.ticket)
362 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
365 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
367 if (check_ticket && ticket !=
368 ruleset->rules[rs_num].inactive.ticket)
371 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
374 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
377 while ((rule != NULL) && (rule->nr != rule_number))
378 rule = TAILQ_NEXT(rule, entries);
383 return (&rule->rpool);
387 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
389 struct pf_kpooladdr *mv_pool_pa;
391 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
392 TAILQ_REMOVE(poola, mv_pool_pa, entries);
393 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
398 pf_empty_kpool(struct pf_kpalist *poola)
400 struct pf_kpooladdr *pa;
402 while ((pa = TAILQ_FIRST(poola)) != NULL) {
403 switch (pa->addr.type) {
404 case PF_ADDR_DYNIFTL:
405 pfi_dynaddr_remove(pa->addr.p.dyn);
408 /* XXX: this could be unfinished pooladdr on pabuf */
409 if (pa->addr.p.tbl != NULL)
410 pfr_detach_table(pa->addr.p.tbl);
414 pfi_kkif_unref(pa->kif);
415 TAILQ_REMOVE(poola, pa, entries);
421 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
426 TAILQ_REMOVE(rulequeue, rule, entries);
428 PF_UNLNKDRULES_LOCK();
429 rule->rule_flag |= PFRULE_REFS;
430 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
431 PF_UNLNKDRULES_UNLOCK();
435 pf_free_rule(struct pf_krule *rule)
441 tag_unref(&V_pf_tags, rule->tag);
443 tag_unref(&V_pf_tags, rule->match_tag);
445 if (rule->pqid != rule->qid)
446 pf_qid_unref(rule->pqid);
447 pf_qid_unref(rule->qid);
449 switch (rule->src.addr.type) {
450 case PF_ADDR_DYNIFTL:
451 pfi_dynaddr_remove(rule->src.addr.p.dyn);
454 pfr_detach_table(rule->src.addr.p.tbl);
457 switch (rule->dst.addr.type) {
458 case PF_ADDR_DYNIFTL:
459 pfi_dynaddr_remove(rule->dst.addr.p.dyn);
462 pfr_detach_table(rule->dst.addr.p.tbl);
465 if (rule->overload_tbl)
466 pfr_detach_table(rule->overload_tbl);
468 pfi_kkif_unref(rule->kif);
469 pf_kanchor_remove(rule);
470 pf_empty_kpool(&rule->rpool.list);
476 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
477 unsigned int default_size)
480 unsigned int hashsize;
482 if (*tunable_size == 0 || !powerof2(*tunable_size))
483 *tunable_size = default_size;
485 hashsize = *tunable_size;
486 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
488 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
490 ts->mask = hashsize - 1;
491 ts->seed = arc4random();
492 for (i = 0; i < hashsize; i++) {
493 TAILQ_INIT(&ts->namehash[i]);
494 TAILQ_INIT(&ts->taghash[i]);
496 BIT_FILL(TAGID_MAX, &ts->avail);
500 pf_cleanup_tagset(struct pf_tagset *ts)
503 unsigned int hashsize;
504 struct pf_tagname *t, *tmp;
507 * Only need to clean up one of the hashes as each tag is hashed
510 hashsize = ts->mask + 1;
511 for (i = 0; i < hashsize; i++)
512 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
513 uma_zfree(V_pf_tag_z, t);
515 free(ts->namehash, M_PFHASH);
516 free(ts->taghash, M_PFHASH);
520 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
524 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
525 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
529 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
532 return (tag & ts->mask);
536 tagname2tag(struct pf_tagset *ts, char *tagname)
538 struct pf_tagname *tag;
544 index = tagname2hashindex(ts, tagname);
545 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
546 if (strcmp(tagname, tag->name) == 0) {
554 * to avoid fragmentation, we do a linear search from the beginning
555 * and take the first free slot we find.
557 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
559 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
560 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
561 * set. It may also return a bit number greater than TAGID_MAX due
562 * to rounding of the number of bits in the vector up to a multiple
563 * of the vector word size at declaration/allocation time.
565 if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
568 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */
569 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
571 /* allocate and fill new struct pf_tagname */
572 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
575 strlcpy(tag->name, tagname, sizeof(tag->name));
576 tag->tag = new_tagid;
579 /* Insert into namehash */
580 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
582 /* Insert into taghash */
583 index = tag2hashindex(ts, new_tagid);
584 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
590 tag_unref(struct pf_tagset *ts, u_int16_t tag)
592 struct pf_tagname *t;
597 index = tag2hashindex(ts, tag);
598 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
601 TAILQ_REMOVE(&ts->taghash[index], t,
603 index = tagname2hashindex(ts, t->name);
604 TAILQ_REMOVE(&ts->namehash[index], t,
606 /* Bits are 0-based for BIT_SET() */
607 BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
608 uma_zfree(V_pf_tag_z, t);
615 pf_tagname2tag(char *tagname)
617 return (tagname2tag(&V_pf_tags, tagname));
622 pf_qname2qid(char *qname)
624 return ((u_int32_t)tagname2tag(&V_pf_qids, qname));
628 pf_qid_unref(u_int32_t qid)
630 tag_unref(&V_pf_qids, (u_int16_t)qid);
634 pf_begin_altq(u_int32_t *ticket)
636 struct pf_altq *altq, *tmp;
641 /* Purge the old altq lists */
642 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
643 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
644 /* detach and destroy the discipline */
645 error = altq_remove(altq);
647 free(altq, M_PFALTQ);
649 TAILQ_INIT(V_pf_altq_ifs_inactive);
650 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
651 pf_qid_unref(altq->qid);
652 free(altq, M_PFALTQ);
654 TAILQ_INIT(V_pf_altqs_inactive);
657 *ticket = ++V_ticket_altqs_inactive;
658 V_altqs_inactive_open = 1;
663 pf_rollback_altq(u_int32_t ticket)
665 struct pf_altq *altq, *tmp;
670 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
672 /* Purge the old altq lists */
673 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
674 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
675 /* detach and destroy the discipline */
676 error = altq_remove(altq);
678 free(altq, M_PFALTQ);
680 TAILQ_INIT(V_pf_altq_ifs_inactive);
681 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
682 pf_qid_unref(altq->qid);
683 free(altq, M_PFALTQ);
685 TAILQ_INIT(V_pf_altqs_inactive);
686 V_altqs_inactive_open = 0;
691 pf_commit_altq(u_int32_t ticket)
693 struct pf_altqqueue *old_altqs, *old_altq_ifs;
694 struct pf_altq *altq, *tmp;
699 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
702 /* swap altqs, keep the old. */
703 old_altqs = V_pf_altqs_active;
704 old_altq_ifs = V_pf_altq_ifs_active;
705 V_pf_altqs_active = V_pf_altqs_inactive;
706 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
707 V_pf_altqs_inactive = old_altqs;
708 V_pf_altq_ifs_inactive = old_altq_ifs;
709 V_ticket_altqs_active = V_ticket_altqs_inactive;
711 /* Attach new disciplines */
712 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
713 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
714 /* attach the discipline */
715 error = altq_pfattach(altq);
716 if (error == 0 && V_pf_altq_running)
717 error = pf_enable_altq(altq);
723 /* Purge the old altq lists */
724 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
725 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
726 /* detach and destroy the discipline */
727 if (V_pf_altq_running)
728 error = pf_disable_altq(altq);
729 err = altq_pfdetach(altq);
730 if (err != 0 && error == 0)
732 err = altq_remove(altq);
733 if (err != 0 && error == 0)
736 free(altq, M_PFALTQ);
738 TAILQ_INIT(V_pf_altq_ifs_inactive);
739 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
740 pf_qid_unref(altq->qid);
741 free(altq, M_PFALTQ);
743 TAILQ_INIT(V_pf_altqs_inactive);
745 V_altqs_inactive_open = 0;
750 pf_enable_altq(struct pf_altq *altq)
753 struct tb_profile tb;
756 if ((ifp = ifunit(altq->ifname)) == NULL)
759 if (ifp->if_snd.altq_type != ALTQT_NONE)
760 error = altq_enable(&ifp->if_snd);
762 /* set tokenbucket regulator */
763 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
764 tb.rate = altq->ifbandwidth;
765 tb.depth = altq->tbrsize;
766 error = tbr_set(&ifp->if_snd, &tb);
773 pf_disable_altq(struct pf_altq *altq)
776 struct tb_profile tb;
779 if ((ifp = ifunit(altq->ifname)) == NULL)
783 * when the discipline is no longer referenced, it was overridden
784 * by a new one. if so, just return.
786 if (altq->altq_disc != ifp->if_snd.altq_disc)
789 error = altq_disable(&ifp->if_snd);
792 /* clear tokenbucket regulator */
794 error = tbr_set(&ifp->if_snd, &tb);
801 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
802 struct pf_altq *altq)
807 /* Deactivate the interface in question */
808 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
809 if ((ifp1 = ifunit(altq->ifname)) == NULL ||
810 (remove && ifp1 == ifp)) {
811 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
813 error = altq_add(ifp1, altq);
815 if (ticket != V_ticket_altqs_inactive)
819 free(altq, M_PFALTQ);
826 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
828 struct pf_altq *a1, *a2, *a3;
833 * No need to re-evaluate the configuration for events on interfaces
834 * that do not support ALTQ, as it's not possible for such
835 * interfaces to be part of the configuration.
837 if (!ALTQ_IS_READY(&ifp->if_snd))
840 /* Interrupt userland queue modifications */
841 if (V_altqs_inactive_open)
842 pf_rollback_altq(V_ticket_altqs_inactive);
844 /* Start new altq ruleset */
845 if (pf_begin_altq(&ticket))
848 /* Copy the current active set */
849 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
850 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
855 bcopy(a1, a2, sizeof(struct pf_altq));
857 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
861 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
865 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
866 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
871 bcopy(a1, a2, sizeof(struct pf_altq));
873 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
878 a2->altq_disc = NULL;
879 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
880 if (strncmp(a3->ifname, a2->ifname,
882 a2->altq_disc = a3->altq_disc;
886 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
890 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
895 pf_rollback_altq(ticket);
897 pf_commit_altq(ticket);
902 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
904 struct pf_kruleset *rs;
905 struct pf_krule *rule;
909 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
911 rs = pf_find_or_create_kruleset(anchor);
914 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
915 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
916 rs->rules[rs_num].inactive.rcount--;
918 *ticket = ++rs->rules[rs_num].inactive.ticket;
919 rs->rules[rs_num].inactive.open = 1;
924 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
926 struct pf_kruleset *rs;
927 struct pf_krule *rule;
931 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
933 rs = pf_find_kruleset(anchor);
934 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
935 rs->rules[rs_num].inactive.ticket != ticket)
937 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
938 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
939 rs->rules[rs_num].inactive.rcount--;
941 rs->rules[rs_num].inactive.open = 0;
945 #define PF_MD5_UPD(st, elm) \
946 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
948 #define PF_MD5_UPD_STR(st, elm) \
949 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
951 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
952 (stor) = htonl((st)->elm); \
953 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
956 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
957 (stor) = htons((st)->elm); \
958 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
962 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
964 PF_MD5_UPD(pfr, addr.type);
965 switch (pfr->addr.type) {
966 case PF_ADDR_DYNIFTL:
967 PF_MD5_UPD(pfr, addr.v.ifname);
968 PF_MD5_UPD(pfr, addr.iflags);
971 PF_MD5_UPD(pfr, addr.v.tblname);
973 case PF_ADDR_ADDRMASK:
975 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
976 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
980 PF_MD5_UPD(pfr, port[0]);
981 PF_MD5_UPD(pfr, port[1]);
982 PF_MD5_UPD(pfr, neg);
983 PF_MD5_UPD(pfr, port_op);
987 pf_hash_rule(MD5_CTX *ctx, struct pf_krule *rule)
992 pf_hash_rule_addr(ctx, &rule->src);
993 pf_hash_rule_addr(ctx, &rule->dst);
994 PF_MD5_UPD_STR(rule, label);
995 PF_MD5_UPD_STR(rule, ifname);
996 PF_MD5_UPD_STR(rule, match_tagname);
997 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
998 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
999 PF_MD5_UPD_HTONL(rule, prob, y);
1000 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1001 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1002 PF_MD5_UPD(rule, uid.op);
1003 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1004 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1005 PF_MD5_UPD(rule, gid.op);
1006 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1007 PF_MD5_UPD(rule, action);
1008 PF_MD5_UPD(rule, direction);
1009 PF_MD5_UPD(rule, af);
1010 PF_MD5_UPD(rule, quick);
1011 PF_MD5_UPD(rule, ifnot);
1012 PF_MD5_UPD(rule, match_tag_not);
1013 PF_MD5_UPD(rule, natpass);
1014 PF_MD5_UPD(rule, keep_state);
1015 PF_MD5_UPD(rule, proto);
1016 PF_MD5_UPD(rule, type);
1017 PF_MD5_UPD(rule, code);
1018 PF_MD5_UPD(rule, flags);
1019 PF_MD5_UPD(rule, flagset);
1020 PF_MD5_UPD(rule, allow_opts);
1021 PF_MD5_UPD(rule, rt);
1022 PF_MD5_UPD(rule, tos);
1026 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1028 struct pf_kruleset *rs;
1029 struct pf_krule *rule, **old_array;
1030 struct pf_krulequeue *old_rules;
1032 u_int32_t old_rcount;
1036 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1038 rs = pf_find_kruleset(anchor);
1039 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1040 ticket != rs->rules[rs_num].inactive.ticket)
1043 /* Calculate checksum for the main ruleset */
1044 if (rs == &pf_main_ruleset) {
1045 error = pf_setup_pfsync_matching(rs);
1050 /* Swap rules, keep the old. */
1051 old_rules = rs->rules[rs_num].active.ptr;
1052 old_rcount = rs->rules[rs_num].active.rcount;
1053 old_array = rs->rules[rs_num].active.ptr_array;
1055 rs->rules[rs_num].active.ptr =
1056 rs->rules[rs_num].inactive.ptr;
1057 rs->rules[rs_num].active.ptr_array =
1058 rs->rules[rs_num].inactive.ptr_array;
1059 rs->rules[rs_num].active.rcount =
1060 rs->rules[rs_num].inactive.rcount;
1061 rs->rules[rs_num].inactive.ptr = old_rules;
1062 rs->rules[rs_num].inactive.ptr_array = old_array;
1063 rs->rules[rs_num].inactive.rcount = old_rcount;
1065 rs->rules[rs_num].active.ticket =
1066 rs->rules[rs_num].inactive.ticket;
1067 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1069 /* Purge the old rule list. */
1070 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1071 pf_unlink_rule(old_rules, rule);
1072 if (rs->rules[rs_num].inactive.ptr_array)
1073 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1074 rs->rules[rs_num].inactive.ptr_array = NULL;
1075 rs->rules[rs_num].inactive.rcount = 0;
1076 rs->rules[rs_num].inactive.open = 0;
1077 pf_remove_if_empty_kruleset(rs);
1083 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1086 struct pf_krule *rule;
1088 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1091 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1092 /* XXX PF_RULESET_SCRUB as well? */
1093 if (rs_cnt == PF_RULESET_SCRUB)
1096 if (rs->rules[rs_cnt].inactive.ptr_array)
1097 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1098 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1100 if (rs->rules[rs_cnt].inactive.rcount) {
1101 rs->rules[rs_cnt].inactive.ptr_array =
1102 malloc(sizeof(caddr_t) *
1103 rs->rules[rs_cnt].inactive.rcount,
1106 if (!rs->rules[rs_cnt].inactive.ptr_array)
1110 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1112 pf_hash_rule(&ctx, rule);
1113 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1117 MD5Final(digest, &ctx);
1118 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1123 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1128 switch (addr->type) {
1130 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1131 if (addr->p.tbl == NULL)
1134 case PF_ADDR_DYNIFTL:
1135 error = pfi_dynaddr_setup(addr, af);
1143 pf_addr_copyout(struct pf_addr_wrap *addr)
1146 switch (addr->type) {
1147 case PF_ADDR_DYNIFTL:
1148 pfi_dynaddr_copyout(addr);
1151 pf_tbladdr_copyout(addr);
1157 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1159 int secs = time_uptime, diff;
1161 bzero(out, sizeof(struct pf_src_node));
1163 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1164 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1166 if (in->rule.ptr != NULL)
1167 out->rule.nr = in->rule.ptr->nr;
1169 for (int i = 0; i < 2; i++) {
1170 out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1171 out->packets[i] = counter_u64_fetch(in->packets[i]);
1174 out->states = in->states;
1175 out->conn = in->conn;
1177 out->ruletype = in->ruletype;
1179 out->creation = secs - in->creation;
1180 if (out->expire > secs)
1181 out->expire -= secs;
1185 /* Adjust the connection rate estimate. */
1186 diff = secs - in->conn_rate.last;
1187 if (diff >= in->conn_rate.seconds)
1188 out->conn_rate.count = 0;
1190 out->conn_rate.count -=
1191 in->conn_rate.count * diff /
1192 in->conn_rate.seconds;
1197 * Handle export of struct pf_kaltq to user binaries that may be using any
1198 * version of struct pf_altq.
1201 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1205 if (ioc_size == sizeof(struct pfioc_altq_v0))
1208 version = pa->version;
1210 if (version > PFIOC_ALTQ_VERSION)
1213 #define ASSIGN(x) exported_q->x = q->x
1215 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1216 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1217 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1221 struct pf_altq_v0 *exported_q =
1222 &((struct pfioc_altq_v0 *)pa)->altq;
1228 exported_q->tbrsize = SATU16(q->tbrsize);
1229 exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1234 exported_q->bandwidth = SATU32(q->bandwidth);
1236 ASSIGN(local_flags);
1241 if (q->scheduler == ALTQT_HFSC) {
1242 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1243 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1244 SATU32(q->pq_u.hfsc_opts.x)
1246 ASSIGN_OPT_SATU32(rtsc_m1);
1248 ASSIGN_OPT_SATU32(rtsc_m2);
1250 ASSIGN_OPT_SATU32(lssc_m1);
1252 ASSIGN_OPT_SATU32(lssc_m2);
1254 ASSIGN_OPT_SATU32(ulsc_m1);
1256 ASSIGN_OPT_SATU32(ulsc_m2);
1261 #undef ASSIGN_OPT_SATU32
1269 struct pf_altq_v1 *exported_q =
1270 &((struct pfioc_altq_v1 *)pa)->altq;
1276 ASSIGN(ifbandwidth);
1283 ASSIGN(local_flags);
1293 panic("%s: unhandled struct pfioc_altq version", __func__);
1306 * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1307 * that may be using any version of it.
1310 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1314 if (ioc_size == sizeof(struct pfioc_altq_v0))
1317 version = pa->version;
1319 if (version > PFIOC_ALTQ_VERSION)
1322 #define ASSIGN(x) q->x = imported_q->x
1324 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1328 struct pf_altq_v0 *imported_q =
1329 &((struct pfioc_altq_v0 *)pa)->altq;
1334 ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1335 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1340 ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1342 ASSIGN(local_flags);
1347 if (imported_q->scheduler == ALTQT_HFSC) {
1348 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1351 * The m1 and m2 parameters are being copied from
1354 ASSIGN_OPT(rtsc_m1);
1356 ASSIGN_OPT(rtsc_m2);
1358 ASSIGN_OPT(lssc_m1);
1360 ASSIGN_OPT(lssc_m2);
1362 ASSIGN_OPT(ulsc_m1);
1364 ASSIGN_OPT(ulsc_m2);
1376 struct pf_altq_v1 *imported_q =
1377 &((struct pfioc_altq_v1 *)pa)->altq;
1383 ASSIGN(ifbandwidth);
1390 ASSIGN(local_flags);
1400 panic("%s: unhandled struct pfioc_altq version", __func__);
1410 static struct pf_altq *
1411 pf_altq_get_nth_active(u_int32_t n)
1413 struct pf_altq *altq;
1417 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1423 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1434 pf_krule_free(struct pf_krule *rule)
1439 counter_u64_free(rule->evaluations);
1440 for (int i = 0; i < 2; i++) {
1441 counter_u64_free(rule->packets[i]);
1442 counter_u64_free(rule->bytes[i]);
1444 counter_u64_free(rule->states_cur);
1445 counter_u64_free(rule->states_tot);
1446 counter_u64_free(rule->src_nodes);
1447 free(rule, M_PFRULE);
1451 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1452 struct pf_pooladdr *pool)
1455 bzero(pool, sizeof(*pool));
1456 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1457 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1461 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1462 struct pf_kpooladdr *kpool)
1465 bzero(kpool, sizeof(*kpool));
1466 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1467 strlcpy(kpool->ifname, pool->ifname, sizeof(kpool->ifname));
1471 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool)
1473 bzero(pool, sizeof(*pool));
1475 bcopy(&kpool->key, &pool->key, sizeof(pool->key));
1476 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter));
1478 pool->tblidx = kpool->tblidx;
1479 pool->proxy_port[0] = kpool->proxy_port[0];
1480 pool->proxy_port[1] = kpool->proxy_port[1];
1481 pool->opts = kpool->opts;
1485 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1487 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1488 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1490 bzero(kpool, sizeof(*kpool));
1492 bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1493 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1495 kpool->tblidx = pool->tblidx;
1496 kpool->proxy_port[0] = pool->proxy_port[0];
1497 kpool->proxy_port[1] = pool->proxy_port[1];
1498 kpool->opts = pool->opts;
1504 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule)
1507 bzero(rule, sizeof(*rule));
1509 bcopy(&krule->src, &rule->src, sizeof(rule->src));
1510 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst));
1512 for (int i = 0; i < PF_SKIP_COUNT; ++i) {
1513 if (rule->skip[i].ptr == NULL)
1514 rule->skip[i].nr = -1;
1516 rule->skip[i].nr = krule->skip[i].ptr->nr;
1519 strlcpy(rule->label, krule->label, sizeof(rule->label));
1520 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname));
1521 strlcpy(rule->qname, krule->qname, sizeof(rule->qname));
1522 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname));
1523 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname));
1524 strlcpy(rule->match_tagname, krule->match_tagname,
1525 sizeof(rule->match_tagname));
1526 strlcpy(rule->overload_tblname, krule->overload_tblname,
1527 sizeof(rule->overload_tblname));
1529 pf_kpool_to_pool(&krule->rpool, &rule->rpool);
1531 rule->evaluations = counter_u64_fetch(krule->evaluations);
1532 for (int i = 0; i < 2; i++) {
1533 rule->packets[i] = counter_u64_fetch(krule->packets[i]);
1534 rule->bytes[i] = counter_u64_fetch(krule->bytes[i]);
1537 /* kif, anchor, overload_tbl are not copied over. */
1539 rule->os_fingerprint = krule->os_fingerprint;
1541 rule->rtableid = krule->rtableid;
1542 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout));
1543 rule->max_states = krule->max_states;
1544 rule->max_src_nodes = krule->max_src_nodes;
1545 rule->max_src_states = krule->max_src_states;
1546 rule->max_src_conn = krule->max_src_conn;
1547 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit;
1548 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds;
1549 rule->qid = krule->qid;
1550 rule->pqid = krule->pqid;
1551 rule->nr = krule->nr;
1552 rule->prob = krule->prob;
1553 rule->cuid = krule->cuid;
1554 rule->cpid = krule->cpid;
1556 rule->return_icmp = krule->return_icmp;
1557 rule->return_icmp6 = krule->return_icmp6;
1558 rule->max_mss = krule->max_mss;
1559 rule->tag = krule->tag;
1560 rule->match_tag = krule->match_tag;
1561 rule->scrub_flags = krule->scrub_flags;
1563 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid));
1564 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid));
1566 rule->rule_flag = krule->rule_flag;
1567 rule->action = krule->action;
1568 rule->direction = krule->direction;
1569 rule->log = krule->log;
1570 rule->logif = krule->logif;
1571 rule->quick = krule->quick;
1572 rule->ifnot = krule->ifnot;
1573 rule->match_tag_not = krule->match_tag_not;
1574 rule->natpass = krule->natpass;
1576 rule->keep_state = krule->keep_state;
1577 rule->af = krule->af;
1578 rule->proto = krule->proto;
1579 rule->type = krule->type;
1580 rule->code = krule->code;
1581 rule->flags = krule->flags;
1582 rule->flagset = krule->flagset;
1583 rule->min_ttl = krule->min_ttl;
1584 rule->allow_opts = krule->allow_opts;
1585 rule->rt = krule->rt;
1586 rule->return_ttl = krule->return_ttl;
1587 rule->tos = krule->tos;
1588 rule->set_tos = krule->set_tos;
1589 rule->anchor_relative = krule->anchor_relative;
1590 rule->anchor_wildcard = krule->anchor_wildcard;
1592 rule->flush = krule->flush;
1593 rule->prio = krule->prio;
1594 rule->set_prio[0] = krule->set_prio[0];
1595 rule->set_prio[1] = krule->set_prio[1];
1597 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert));
1599 rule->u_states_cur = counter_u64_fetch(krule->states_cur);
1600 rule->u_states_tot = counter_u64_fetch(krule->states_tot);
1601 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes);
1605 pf_check_rule_addr(const struct pf_rule_addr *addr)
1608 switch (addr->addr.type) {
1609 case PF_ADDR_ADDRMASK:
1610 case PF_ADDR_NOROUTE:
1611 case PF_ADDR_DYNIFTL:
1613 case PF_ADDR_URPFFAILED:
1620 if (addr->addr.p.dyn != NULL) {
1628 pf_nvaddr_to_addr(const nvlist_t *nvl, struct pf_addr *paddr)
1630 return (pf_nvbinary(nvl, "addr", paddr, sizeof(*paddr)));
1634 pf_addr_to_nvaddr(const struct pf_addr *paddr)
1638 nvl = nvlist_create(0);
1642 nvlist_add_binary(nvl, "addr", paddr, sizeof(*paddr));
1648 pf_nvmape_to_mape(const nvlist_t *nvl, struct pf_mape_portset *mape)
1652 bzero(mape, sizeof(*mape));
1653 PFNV_CHK(pf_nvuint8(nvl, "offset", &mape->offset));
1654 PFNV_CHK(pf_nvuint8(nvl, "psidlen", &mape->psidlen));
1655 PFNV_CHK(pf_nvuint16(nvl, "psid", &mape->psid));
1662 pf_mape_to_nvmape(const struct pf_mape_portset *mape)
1666 nvl = nvlist_create(0);
1670 nvlist_add_number(nvl, "offset", mape->offset);
1671 nvlist_add_number(nvl, "psidlen", mape->psidlen);
1672 nvlist_add_number(nvl, "psid", mape->psid);
1678 pf_nvpool_to_pool(const nvlist_t *nvl, struct pf_kpool *kpool)
1682 bzero(kpool, sizeof(*kpool));
1684 PFNV_CHK(pf_nvbinary(nvl, "key", &kpool->key, sizeof(kpool->key)));
1686 if (nvlist_exists_nvlist(nvl, "counter")) {
1687 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "counter"),
1691 PFNV_CHK(pf_nvint(nvl, "tblidx", &kpool->tblidx));
1692 PFNV_CHK(pf_nvuint16_array(nvl, "proxy_port", kpool->proxy_port, 2,
1694 PFNV_CHK(pf_nvuint8(nvl, "opts", &kpool->opts));
1696 if (nvlist_exists_nvlist(nvl, "mape")) {
1697 PFNV_CHK(pf_nvmape_to_mape(nvlist_get_nvlist(nvl, "mape"),
1706 pf_pool_to_nvpool(const struct pf_kpool *pool)
1711 nvl = nvlist_create(0);
1715 nvlist_add_binary(nvl, "key", &pool->key, sizeof(pool->key));
1716 tmp = pf_addr_to_nvaddr(&pool->counter);
1719 nvlist_add_nvlist(nvl, "counter", tmp);
1721 nvlist_add_number(nvl, "tblidx", pool->tblidx);
1722 pf_uint16_array_nv(nvl, "proxy_port", pool->proxy_port, 2);
1723 nvlist_add_number(nvl, "opts", pool->opts);
1725 tmp = pf_mape_to_nvmape(&pool->mape);
1728 nvlist_add_nvlist(nvl, "mape", tmp);
1733 nvlist_destroy(nvl);
1738 pf_nvaddr_wrap_to_addr_wrap(const nvlist_t *nvl, struct pf_addr_wrap *addr)
1742 bzero(addr, sizeof(*addr));
1744 PFNV_CHK(pf_nvuint8(nvl, "type", &addr->type));
1745 PFNV_CHK(pf_nvuint8(nvl, "iflags", &addr->iflags));
1746 PFNV_CHK(pf_nvstring(nvl, "ifname", addr->v.ifname,
1747 sizeof(addr->v.ifname)));
1748 PFNV_CHK(pf_nvstring(nvl, "tblname", addr->v.tblname,
1749 sizeof(addr->v.tblname)));
1751 if (! nvlist_exists_nvlist(nvl, "addr"))
1753 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "addr"),
1756 if (! nvlist_exists_nvlist(nvl, "mask"))
1758 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "mask"),
1761 switch (addr->type) {
1762 case PF_ADDR_DYNIFTL:
1765 case PF_ADDR_ADDRMASK:
1766 case PF_ADDR_NOROUTE:
1767 case PF_ADDR_URPFFAILED:
1778 pf_addr_wrap_to_nvaddr_wrap(const struct pf_addr_wrap *addr)
1783 nvl = nvlist_create(0);
1787 nvlist_add_number(nvl, "type", addr->type);
1788 nvlist_add_number(nvl, "iflags", addr->iflags);
1789 nvlist_add_string(nvl, "ifname", addr->v.ifname);
1790 nvlist_add_string(nvl, "tblname", addr->v.tblname);
1792 tmp = pf_addr_to_nvaddr(&addr->v.a.addr);
1795 nvlist_add_nvlist(nvl, "addr", tmp);
1796 tmp = pf_addr_to_nvaddr(&addr->v.a.mask);
1799 nvlist_add_nvlist(nvl, "mask", tmp);
1804 nvlist_destroy(nvl);
1809 pf_validate_op(uint8_t op)
1831 pf_nvrule_addr_to_rule_addr(const nvlist_t *nvl, struct pf_rule_addr *addr)
1835 if (! nvlist_exists_nvlist(nvl, "addr"))
1838 PFNV_CHK(pf_nvaddr_wrap_to_addr_wrap(nvlist_get_nvlist(nvl, "addr"),
1840 PFNV_CHK(pf_nvuint16_array(nvl, "port", addr->port, 2, NULL));
1841 PFNV_CHK(pf_nvuint8(nvl, "neg", &addr->neg));
1842 PFNV_CHK(pf_nvuint8(nvl, "port_op", &addr->port_op));
1844 PFNV_CHK(pf_validate_op(addr->port_op));
1851 pf_rule_addr_to_nvrule_addr(const struct pf_rule_addr *addr)
1856 nvl = nvlist_create(0);
1860 tmp = pf_addr_wrap_to_nvaddr_wrap(&addr->addr);
1863 nvlist_add_nvlist(nvl, "addr", tmp);
1864 pf_uint16_array_nv(nvl, "port", addr->port, 2);
1865 nvlist_add_number(nvl, "neg", addr->neg);
1866 nvlist_add_number(nvl, "port_op", addr->port_op);
1871 nvlist_destroy(nvl);
1876 pf_nvrule_uid_to_rule_uid(const nvlist_t *nvl, struct pf_rule_uid *uid)
1880 bzero(uid, sizeof(*uid));
1882 PFNV_CHK(pf_nvuint32_array(nvl, "uid", uid->uid, 2, NULL));
1883 PFNV_CHK(pf_nvuint8(nvl, "op", &uid->op));
1885 PFNV_CHK(pf_validate_op(uid->op));
1892 pf_rule_uid_to_nvrule_uid(const struct pf_rule_uid *uid)
1896 nvl = nvlist_create(0);
1900 pf_uint32_array_nv(nvl, "uid", uid->uid, 2);
1901 nvlist_add_number(nvl, "op", uid->op);
1907 pf_nvrule_gid_to_rule_gid(const nvlist_t *nvl, struct pf_rule_gid *gid)
1909 /* Cheat a little. These stucts are the same, other than the name of
1910 * the first field. */
1911 return (pf_nvrule_uid_to_rule_uid(nvl, (struct pf_rule_uid *)gid));
1915 pf_nvrule_to_krule(const nvlist_t *nvl, struct pf_krule **prule)
1917 struct pf_krule *rule;
1920 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK | M_ZERO);
1922 PFNV_CHK(pf_nvuint32(nvl, "nr", &rule->nr));
1924 if (! nvlist_exists_nvlist(nvl, "src")) {
1928 error = pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "src"),
1933 if (! nvlist_exists_nvlist(nvl, "dst")) {
1937 PFNV_CHK(pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "dst"),
1940 PFNV_CHK(pf_nvstring(nvl, "label", rule->label, sizeof(rule->label)));
1941 PFNV_CHK(pf_nvstring(nvl, "ifname", rule->ifname,
1942 sizeof(rule->ifname)));
1943 PFNV_CHK(pf_nvstring(nvl, "qname", rule->qname, sizeof(rule->qname)));
1944 PFNV_CHK(pf_nvstring(nvl, "pqname", rule->pqname,
1945 sizeof(rule->pqname)));
1946 PFNV_CHK(pf_nvstring(nvl, "tagname", rule->tagname,
1947 sizeof(rule->tagname)));
1948 PFNV_CHK(pf_nvstring(nvl, "match_tagname", rule->match_tagname,
1949 sizeof(rule->match_tagname)));
1950 PFNV_CHK(pf_nvstring(nvl, "overload_tblname", rule->overload_tblname,
1951 sizeof(rule->overload_tblname)));
1953 if (! nvlist_exists_nvlist(nvl, "rpool")) {
1957 PFNV_CHK(pf_nvpool_to_pool(nvlist_get_nvlist(nvl, "rpool"),
1960 PFNV_CHK(pf_nvuint32(nvl, "os_fingerprint", &rule->os_fingerprint));
1962 PFNV_CHK(pf_nvint(nvl, "rtableid", &rule->rtableid));
1963 PFNV_CHK(pf_nvuint32_array(nvl, "timeout", rule->timeout, PFTM_MAX, NULL));
1964 PFNV_CHK(pf_nvuint32(nvl, "max_states", &rule->max_states));
1965 PFNV_CHK(pf_nvuint32(nvl, "max_src_nodes", &rule->max_src_nodes));
1966 PFNV_CHK(pf_nvuint32(nvl, "max_src_states", &rule->max_src_states));
1967 PFNV_CHK(pf_nvuint32(nvl, "max_src_conn", &rule->max_src_conn));
1968 PFNV_CHK(pf_nvuint32(nvl, "max_src_conn_rate.limit",
1969 &rule->max_src_conn_rate.limit));
1970 PFNV_CHK(pf_nvuint32(nvl, "max_src_conn_rate.seconds",
1971 &rule->max_src_conn_rate.seconds));
1972 PFNV_CHK(pf_nvuint32(nvl, "prob", &rule->prob));
1973 PFNV_CHK(pf_nvuint32(nvl, "cuid", &rule->cuid));
1974 PFNV_CHK(pf_nvuint32(nvl, "cpid", &rule->cpid));
1976 PFNV_CHK(pf_nvuint16(nvl, "return_icmp", &rule->return_icmp));
1977 PFNV_CHK(pf_nvuint16(nvl, "return_icmp6", &rule->return_icmp6));
1979 PFNV_CHK(pf_nvuint16(nvl, "max_mss", &rule->max_mss));
1980 PFNV_CHK(pf_nvuint16(nvl, "scrub_flags", &rule->scrub_flags));
1982 if (! nvlist_exists_nvlist(nvl, "uid")) {
1986 PFNV_CHK(pf_nvrule_uid_to_rule_uid(nvlist_get_nvlist(nvl, "uid"),
1989 if (! nvlist_exists_nvlist(nvl, "gid")) {
1993 PFNV_CHK(pf_nvrule_gid_to_rule_gid(nvlist_get_nvlist(nvl, "gid"),
1996 PFNV_CHK(pf_nvuint32(nvl, "rule_flag", &rule->rule_flag));
1997 PFNV_CHK(pf_nvuint8(nvl, "action", &rule->action));
1998 PFNV_CHK(pf_nvuint8(nvl, "direction", &rule->direction));
1999 PFNV_CHK(pf_nvuint8(nvl, "log", &rule->log));
2000 PFNV_CHK(pf_nvuint8(nvl, "logif", &rule->logif));
2001 PFNV_CHK(pf_nvuint8(nvl, "quick", &rule->quick));
2002 PFNV_CHK(pf_nvuint8(nvl, "ifnot", &rule->ifnot));
2003 PFNV_CHK(pf_nvuint8(nvl, "match_tag_not", &rule->match_tag_not));
2004 PFNV_CHK(pf_nvuint8(nvl, "natpass", &rule->natpass));
2006 PFNV_CHK(pf_nvuint8(nvl, "keep_state", &rule->keep_state));
2007 PFNV_CHK(pf_nvuint8(nvl, "af", &rule->af));
2008 PFNV_CHK(pf_nvuint8(nvl, "proto", &rule->proto));
2009 PFNV_CHK(pf_nvuint8(nvl, "type", &rule->type));
2010 PFNV_CHK(pf_nvuint8(nvl, "code", &rule->code));
2011 PFNV_CHK(pf_nvuint8(nvl, "flags", &rule->flags));
2012 PFNV_CHK(pf_nvuint8(nvl, "flagset", &rule->flagset));
2013 PFNV_CHK(pf_nvuint8(nvl, "min_ttl", &rule->min_ttl));
2014 PFNV_CHK(pf_nvuint8(nvl, "allow_opts", &rule->allow_opts));
2015 PFNV_CHK(pf_nvuint8(nvl, "rt", &rule->rt));
2016 PFNV_CHK(pf_nvuint8(nvl, "return_ttl", &rule->return_ttl));
2017 PFNV_CHK(pf_nvuint8(nvl, "tos", &rule->tos));
2018 PFNV_CHK(pf_nvuint8(nvl, "set_tos", &rule->set_tos));
2019 PFNV_CHK(pf_nvuint8(nvl, "anchor_relative", &rule->anchor_relative));
2020 PFNV_CHK(pf_nvuint8(nvl, "anchor_wildcard", &rule->anchor_wildcard));
2022 PFNV_CHK(pf_nvuint8(nvl, "flush", &rule->flush));
2023 PFNV_CHK(pf_nvuint8(nvl, "prio", &rule->prio));
2025 PFNV_CHK(pf_nvuint8_array(nvl, "set_prio", &rule->prio, 2, NULL));
2027 if (nvlist_exists_nvlist(nvl, "divert")) {
2028 const nvlist_t *nvldivert = nvlist_get_nvlist(nvl, "divert");
2030 if (! nvlist_exists_nvlist(nvldivert, "addr")) {
2034 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvldivert, "addr"),
2035 &rule->divert.addr));
2036 PFNV_CHK(pf_nvuint16(nvldivert, "port", &rule->divert.port));
2041 if (rule->af == AF_INET) {
2042 error = EAFNOSUPPORT;
2047 if (rule->af == AF_INET6) {
2048 error = EAFNOSUPPORT;
2053 PFNV_CHK(pf_check_rule_addr(&rule->src));
2054 PFNV_CHK(pf_check_rule_addr(&rule->dst));
2061 pf_krule_free(rule);
2068 pf_divert_to_nvdivert(const struct pf_krule *rule)
2073 nvl = nvlist_create(0);
2077 tmp = pf_addr_to_nvaddr(&rule->divert.addr);
2080 nvlist_add_nvlist(nvl, "addr", tmp);
2081 nvlist_add_number(nvl, "port", rule->divert.port);
2086 nvlist_destroy(nvl);
2091 pf_krule_to_nvrule(const struct pf_krule *rule)
2093 nvlist_t *nvl, *tmp;
2095 nvl = nvlist_create(0);
2099 nvlist_add_number(nvl, "nr", rule->nr);
2100 tmp = pf_rule_addr_to_nvrule_addr(&rule->src);
2103 nvlist_add_nvlist(nvl, "src", tmp);
2104 tmp = pf_rule_addr_to_nvrule_addr(&rule->dst);
2107 nvlist_add_nvlist(nvl, "dst", tmp);
2109 for (int i = 0; i < PF_SKIP_COUNT; i++) {
2110 nvlist_append_number_array(nvl, "skip",
2111 rule->skip[i].ptr ? rule->skip[i].ptr->nr : -1);
2114 nvlist_add_string(nvl, "label", rule->label);
2115 nvlist_add_string(nvl, "ifname", rule->ifname);
2116 nvlist_add_string(nvl, "qname", rule->qname);
2117 nvlist_add_string(nvl, "pqname", rule->pqname);
2118 nvlist_add_string(nvl, "tagname", rule->tagname);
2119 nvlist_add_string(nvl, "match_tagname", rule->match_tagname);
2120 nvlist_add_string(nvl, "overload_tblname", rule->overload_tblname);
2122 tmp = pf_pool_to_nvpool(&rule->rpool);
2125 nvlist_add_nvlist(nvl, "rpool", tmp);
2127 nvlist_add_number(nvl, "evaluations",
2128 counter_u64_fetch(rule->evaluations));
2129 for (int i = 0; i < 2; i++) {
2130 nvlist_append_number_array(nvl, "packets",
2131 counter_u64_fetch(rule->packets[i]));
2132 nvlist_append_number_array(nvl, "bytes",
2133 counter_u64_fetch(rule->bytes[i]));
2136 nvlist_add_number(nvl, "os_fingerprint", rule->os_fingerprint);
2138 nvlist_add_number(nvl, "rtableid", rule->rtableid);
2139 pf_uint32_array_nv(nvl, "timeout", rule->timeout, PFTM_MAX);
2140 nvlist_add_number(nvl, "max_states", rule->max_states);
2141 nvlist_add_number(nvl, "max_src_nodes", rule->max_src_nodes);
2142 nvlist_add_number(nvl, "max_src_states", rule->max_src_states);
2143 nvlist_add_number(nvl, "max_src_conn", rule->max_src_conn);
2144 nvlist_add_number(nvl, "max_src_conn_rate.limit",
2145 rule->max_src_conn_rate.limit);
2146 nvlist_add_number(nvl, "max_src_conn_rate.seconds",
2147 rule->max_src_conn_rate.seconds);
2148 nvlist_add_number(nvl, "qid", rule->qid);
2149 nvlist_add_number(nvl, "pqid", rule->pqid);
2150 nvlist_add_number(nvl, "prob", rule->prob);
2151 nvlist_add_number(nvl, "cuid", rule->cuid);
2152 nvlist_add_number(nvl, "cpid", rule->cpid);
2154 nvlist_add_number(nvl, "states_cur",
2155 counter_u64_fetch(rule->states_cur));
2156 nvlist_add_number(nvl, "states_tot",
2157 counter_u64_fetch(rule->states_tot));
2158 nvlist_add_number(nvl, "src_nodes",
2159 counter_u64_fetch(rule->src_nodes));
2161 nvlist_add_number(nvl, "return_icmp", rule->return_icmp);
2162 nvlist_add_number(nvl, "return_icmp6", rule->return_icmp6);
2164 nvlist_add_number(nvl, "max_mss", rule->max_mss);
2165 nvlist_add_number(nvl, "scrub_flags", rule->scrub_flags);
2167 tmp = pf_rule_uid_to_nvrule_uid(&rule->uid);
2170 nvlist_add_nvlist(nvl, "uid", tmp);
2171 tmp = pf_rule_uid_to_nvrule_uid((const struct pf_rule_uid *)&rule->gid);
2174 nvlist_add_nvlist(nvl, "gid", tmp);
2176 nvlist_add_number(nvl, "rule_flag", rule->rule_flag);
2177 nvlist_add_number(nvl, "action", rule->action);
2178 nvlist_add_number(nvl, "direction", rule->direction);
2179 nvlist_add_number(nvl, "log", rule->log);
2180 nvlist_add_number(nvl, "logif", rule->logif);
2181 nvlist_add_number(nvl, "quick", rule->quick);
2182 nvlist_add_number(nvl, "ifnot", rule->ifnot);
2183 nvlist_add_number(nvl, "match_tag_not", rule->match_tag_not);
2184 nvlist_add_number(nvl, "natpass", rule->natpass);
2186 nvlist_add_number(nvl, "keep_state", rule->keep_state);
2187 nvlist_add_number(nvl, "af", rule->af);
2188 nvlist_add_number(nvl, "proto", rule->proto);
2189 nvlist_add_number(nvl, "type", rule->type);
2190 nvlist_add_number(nvl, "code", rule->code);
2191 nvlist_add_number(nvl, "flags", rule->flags);
2192 nvlist_add_number(nvl, "flagset", rule->flagset);
2193 nvlist_add_number(nvl, "min_ttl", rule->min_ttl);
2194 nvlist_add_number(nvl, "allow_opts", rule->allow_opts);
2195 nvlist_add_number(nvl, "rt", rule->rt);
2196 nvlist_add_number(nvl, "return_ttl", rule->return_ttl);
2197 nvlist_add_number(nvl, "tos", rule->tos);
2198 nvlist_add_number(nvl, "set_tos", rule->set_tos);
2199 nvlist_add_number(nvl, "anchor_relative", rule->anchor_relative);
2200 nvlist_add_number(nvl, "anchor_wildcard", rule->anchor_wildcard);
2202 nvlist_add_number(nvl, "flush", rule->flush);
2203 nvlist_add_number(nvl, "prio", rule->prio);
2205 pf_uint8_array_nv(nvl, "set_prio", &rule->prio, 2);
2207 tmp = pf_divert_to_nvdivert(rule);
2210 nvlist_add_nvlist(nvl, "divert", tmp);
2215 nvlist_destroy(nvl);
2220 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
2225 if (rule->af == AF_INET) {
2226 return (EAFNOSUPPORT);
2230 if (rule->af == AF_INET6) {
2231 return (EAFNOSUPPORT);
2235 ret = pf_check_rule_addr(&rule->src);
2238 ret = pf_check_rule_addr(&rule->dst);
2242 bzero(krule, sizeof(*krule));
2244 bcopy(&rule->src, &krule->src, sizeof(rule->src));
2245 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
2247 strlcpy(krule->label, rule->label, sizeof(rule->label));
2248 strlcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
2249 strlcpy(krule->qname, rule->qname, sizeof(rule->qname));
2250 strlcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
2251 strlcpy(krule->tagname, rule->tagname, sizeof(rule->tagname));
2252 strlcpy(krule->match_tagname, rule->match_tagname,
2253 sizeof(rule->match_tagname));
2254 strlcpy(krule->overload_tblname, rule->overload_tblname,
2255 sizeof(rule->overload_tblname));
2257 ret = pf_pool_to_kpool(&rule->rpool, &krule->rpool);
2261 /* Don't allow userspace to set evaulations, packets or bytes. */
2262 /* kif, anchor, overload_tbl are not copied over. */
2264 krule->os_fingerprint = rule->os_fingerprint;
2266 krule->rtableid = rule->rtableid;
2267 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout));
2268 krule->max_states = rule->max_states;
2269 krule->max_src_nodes = rule->max_src_nodes;
2270 krule->max_src_states = rule->max_src_states;
2271 krule->max_src_conn = rule->max_src_conn;
2272 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
2273 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
2274 krule->qid = rule->qid;
2275 krule->pqid = rule->pqid;
2276 krule->nr = rule->nr;
2277 krule->prob = rule->prob;
2278 krule->cuid = rule->cuid;
2279 krule->cpid = rule->cpid;
2281 krule->return_icmp = rule->return_icmp;
2282 krule->return_icmp6 = rule->return_icmp6;
2283 krule->max_mss = rule->max_mss;
2284 krule->tag = rule->tag;
2285 krule->match_tag = rule->match_tag;
2286 krule->scrub_flags = rule->scrub_flags;
2288 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
2289 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
2291 krule->rule_flag = rule->rule_flag;
2292 krule->action = rule->action;
2293 krule->direction = rule->direction;
2294 krule->log = rule->log;
2295 krule->logif = rule->logif;
2296 krule->quick = rule->quick;
2297 krule->ifnot = rule->ifnot;
2298 krule->match_tag_not = rule->match_tag_not;
2299 krule->natpass = rule->natpass;
2301 krule->keep_state = rule->keep_state;
2302 krule->af = rule->af;
2303 krule->proto = rule->proto;
2304 krule->type = rule->type;
2305 krule->code = rule->code;
2306 krule->flags = rule->flags;
2307 krule->flagset = rule->flagset;
2308 krule->min_ttl = rule->min_ttl;
2309 krule->allow_opts = rule->allow_opts;
2310 krule->rt = rule->rt;
2311 krule->return_ttl = rule->return_ttl;
2312 krule->tos = rule->tos;
2313 krule->set_tos = rule->set_tos;
2314 krule->anchor_relative = rule->anchor_relative;
2315 krule->anchor_wildcard = rule->anchor_wildcard;
2317 krule->flush = rule->flush;
2318 krule->prio = rule->prio;
2319 krule->set_prio[0] = rule->set_prio[0];
2320 krule->set_prio[1] = rule->set_prio[1];
2322 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2328 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2329 uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2332 struct pf_kruleset *ruleset;
2333 struct pf_krule *tail;
2334 struct pf_kpooladdr *pa;
2335 struct pfi_kkif *kif = NULL;
2339 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
2341 goto errout_unlocked;
2344 #define ERROUT(x) { error = (x); goto errout; }
2346 if (rule->ifname[0])
2347 kif = pf_kkif_create(M_WAITOK);
2348 rule->evaluations = counter_u64_alloc(M_WAITOK);
2349 for (int i = 0; i < 2; i++) {
2350 rule->packets[i] = counter_u64_alloc(M_WAITOK);
2351 rule->bytes[i] = counter_u64_alloc(M_WAITOK);
2353 rule->states_cur = counter_u64_alloc(M_WAITOK);
2354 rule->states_tot = counter_u64_alloc(M_WAITOK);
2355 rule->src_nodes = counter_u64_alloc(M_WAITOK);
2356 rule->cuid = td->td_ucred->cr_ruid;
2357 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
2358 TAILQ_INIT(&rule->rpool.list);
2361 ruleset = pf_find_kruleset(anchor);
2362 if (ruleset == NULL)
2364 rs_num = pf_get_ruleset_number(rule->action);
2365 if (rs_num >= PF_RULESET_MAX)
2367 if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2368 DPFPRINTF(PF_DEBUG_MISC,
2369 ("ticket: %d != [%d]%d\n", ticket, rs_num,
2370 ruleset->rules[rs_num].inactive.ticket));
2373 if (pool_ticket != V_ticket_pabuf) {
2374 DPFPRINTF(PF_DEBUG_MISC,
2375 ("pool_ticket: %d != %d\n", pool_ticket,
2380 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2383 rule->nr = tail->nr + 1;
2386 if (rule->ifname[0]) {
2387 rule->kif = pfi_kkif_attach(kif, rule->ifname);
2389 pfi_kkif_ref(rule->kif);
2393 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2398 if (rule->qname[0] != 0) {
2399 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2401 else if (rule->pqname[0] != 0) {
2403 pf_qname2qid(rule->pqname)) == 0)
2406 rule->pqid = rule->qid;
2409 if (rule->tagname[0])
2410 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2412 if (rule->match_tagname[0])
2413 if ((rule->match_tag =
2414 pf_tagname2tag(rule->match_tagname)) == 0)
2416 if (rule->rt && !rule->direction)
2420 if (rule->logif >= PFLOGIFS_MAX)
2422 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2424 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2426 if (pf_kanchor_setup(rule, ruleset, anchor_call))
2428 if (rule->scrub_flags & PFSTATE_SETPRIO &&
2429 (rule->set_prio[0] > PF_PRIO_MAX ||
2430 rule->set_prio[1] > PF_PRIO_MAX))
2432 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2433 if (pa->addr.type == PF_ADDR_TABLE) {
2434 pa->addr.p.tbl = pfr_attach_table(ruleset,
2435 pa->addr.v.tblname);
2436 if (pa->addr.p.tbl == NULL)
2440 rule->overload_tbl = NULL;
2441 if (rule->overload_tblname[0]) {
2442 if ((rule->overload_tbl = pfr_attach_table(ruleset,
2443 rule->overload_tblname)) == NULL)
2446 rule->overload_tbl->pfrkt_flags |=
2450 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
2451 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2452 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
2453 (rule->rt > PF_NOPFROUTE)) &&
2454 (TAILQ_FIRST(&rule->rpool.list) == NULL))
2463 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2464 counter_u64_zero(rule->evaluations);
2465 for (int i = 0; i < 2; i++) {
2466 counter_u64_zero(rule->packets[i]);
2467 counter_u64_zero(rule->bytes[i]);
2469 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2471 ruleset->rules[rs_num].inactive.rcount++;
2481 pf_krule_free(rule);
2486 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2489 PF_RULES_RLOCK_TRACKER;
2491 /* XXX keep in sync with switch() below */
2492 if (securelevel_gt(td->td_ucred, 2))
2500 case DIOCSETSTATUSIF:
2506 case DIOCGETTIMEOUT:
2507 case DIOCCLRRULECTRS:
2509 case DIOCGETALTQSV0:
2510 case DIOCGETALTQSV1:
2513 case DIOCGETQSTATSV0:
2514 case DIOCGETQSTATSV1:
2515 case DIOCGETRULESETS:
2516 case DIOCGETRULESET:
2517 case DIOCRGETTABLES:
2518 case DIOCRGETTSTATS:
2519 case DIOCRCLRTSTATS:
2525 case DIOCRGETASTATS:
2526 case DIOCRCLRASTATS:
2529 case DIOCGETSRCNODES:
2530 case DIOCCLRSRCNODES:
2531 case DIOCIGETIFACES:
2532 case DIOCGIFSPEEDV0:
2533 case DIOCGIFSPEEDV1:
2537 case DIOCRCLRTABLES:
2538 case DIOCRADDTABLES:
2539 case DIOCRDELTABLES:
2540 case DIOCRSETTFLAGS:
2541 if (((struct pfioc_table *)addr)->pfrio_flags &
2543 break; /* dummy operation ok */
2549 if (!(flags & FWRITE))
2557 case DIOCGETTIMEOUT:
2559 case DIOCGETALTQSV0:
2560 case DIOCGETALTQSV1:
2563 case DIOCGETQSTATSV0:
2564 case DIOCGETQSTATSV1:
2565 case DIOCGETRULESETS:
2566 case DIOCGETRULESET:
2568 case DIOCRGETTABLES:
2569 case DIOCRGETTSTATS:
2571 case DIOCRGETASTATS:
2574 case DIOCGETSRCNODES:
2575 case DIOCIGETIFACES:
2576 case DIOCGIFSPEEDV1:
2577 case DIOCGIFSPEEDV0:
2580 case DIOCRCLRTABLES:
2581 case DIOCRADDTABLES:
2582 case DIOCRDELTABLES:
2583 case DIOCRCLRTSTATS:
2588 case DIOCRSETTFLAGS:
2589 if (((struct pfioc_table *)addr)->pfrio_flags &
2591 flags |= FWRITE; /* need write lock for dummy */
2592 break; /* dummy operation ok */
2596 if (((struct pfioc_rule *)addr)->action ==
2604 CURVNET_SET(TD_TO_VNET(td));
2608 sx_xlock(&pf_ioctl_lock);
2609 if (V_pf_status.running)
2615 V_pf_status.running = 1;
2616 V_pf_status.since = time_second;
2619 V_pf_stateid[cpu] = time_second;
2621 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
2626 sx_xlock(&pf_ioctl_lock);
2627 if (!V_pf_status.running)
2630 V_pf_status.running = 0;
2632 V_pf_status.since = time_second;
2633 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
2637 case DIOCADDRULENV: {
2638 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
2639 nvlist_t *nvl = NULL;
2640 void *nvlpacked = NULL;
2641 struct pf_krule *rule = NULL;
2642 const char *anchor = "", *anchor_call = "";
2643 uint32_t ticket = 0, pool_ticket = 0;
2645 #define ERROUT(x) do { error = (x); goto DIOCADDRULENV_error; } while (0)
2647 if (nv->len > pf_ioctl_maxcount)
2650 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
2651 error = copyin(nv->data, nvlpacked, nv->len);
2655 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2659 if (! nvlist_exists_number(nvl, "ticket"))
2661 ticket = nvlist_get_number(nvl, "ticket");
2663 if (! nvlist_exists_number(nvl, "pool_ticket"))
2665 pool_ticket = nvlist_get_number(nvl, "pool_ticket");
2667 if (! nvlist_exists_nvlist(nvl, "rule"))
2670 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
2675 if (nvlist_exists_string(nvl, "anchor"))
2676 anchor = nvlist_get_string(nvl, "anchor");
2677 if (nvlist_exists_string(nvl, "anchor_call"))
2678 anchor_call = nvlist_get_string(nvl, "anchor_call");
2680 if ((error = nvlist_error(nvl)))
2683 /* Frees rule on error */
2684 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
2687 nvlist_destroy(nvl);
2688 free(nvlpacked, M_TEMP);
2691 DIOCADDRULENV_error:
2692 pf_krule_free(rule);
2693 nvlist_destroy(nvl);
2694 free(nvlpacked, M_TEMP);
2699 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
2700 struct pf_krule *rule;
2702 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
2703 error = pf_rule_to_krule(&pr->rule, rule);
2705 free(rule, M_PFRULE);
2709 pr->anchor[sizeof(pr->anchor) - 1] = 0;
2711 /* Frees rule on error */
2712 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
2713 pr->anchor, pr->anchor_call, td);
2717 case DIOCGETRULES: {
2718 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
2719 struct pf_kruleset *ruleset;
2720 struct pf_krule *tail;
2724 pr->anchor[sizeof(pr->anchor) - 1] = 0;
2725 ruleset = pf_find_kruleset(pr->anchor);
2726 if (ruleset == NULL) {
2731 rs_num = pf_get_ruleset_number(pr->rule.action);
2732 if (rs_num >= PF_RULESET_MAX) {
2737 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2740 pr->nr = tail->nr + 1;
2743 pr->ticket = ruleset->rules[rs_num].active.ticket;
2749 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
2750 struct pf_kruleset *ruleset;
2751 struct pf_krule *rule;
2755 pr->anchor[sizeof(pr->anchor) - 1] = 0;
2756 ruleset = pf_find_kruleset(pr->anchor);
2757 if (ruleset == NULL) {
2762 rs_num = pf_get_ruleset_number(pr->rule.action);
2763 if (rs_num >= PF_RULESET_MAX) {
2768 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
2773 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2774 while ((rule != NULL) && (rule->nr != pr->nr))
2775 rule = TAILQ_NEXT(rule, entries);
2782 pf_krule_to_rule(rule, &pr->rule);
2784 if (pf_kanchor_copyout(ruleset, rule, pr)) {
2789 pf_addr_copyout(&pr->rule.src.addr);
2790 pf_addr_copyout(&pr->rule.dst.addr);
2792 if (pr->action == PF_GET_CLR_CNTR) {
2793 counter_u64_zero(rule->evaluations);
2794 for (int i = 0; i < 2; i++) {
2795 counter_u64_zero(rule->packets[i]);
2796 counter_u64_zero(rule->bytes[i]);
2798 counter_u64_zero(rule->states_tot);
2804 case DIOCGETRULENV: {
2805 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
2806 nvlist_t *nvrule = NULL;
2807 nvlist_t *nvl = NULL;
2808 struct pf_kruleset *ruleset;
2809 struct pf_krule *rule;
2810 void *nvlpacked = NULL;
2812 bool clear_counter = false;
2814 #define ERROUT(x) do { error = (x); goto DIOCGETRULENV_error; } while (0)
2816 if (nv->len > pf_ioctl_maxcount)
2819 /* Copy the request in */
2820 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
2821 if (nvlpacked == NULL)
2824 error = copyin(nv->data, nvlpacked, nv->len);
2828 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2832 if (! nvlist_exists_string(nvl, "anchor"))
2834 if (! nvlist_exists_number(nvl, "ruleset"))
2836 if (! nvlist_exists_number(nvl, "ticket"))
2838 if (! nvlist_exists_number(nvl, "nr"))
2841 if (nvlist_exists_bool(nvl, "clear_counter"))
2842 clear_counter = nvlist_get_bool(nvl, "clear_counter");
2844 if (clear_counter && !(flags & FWRITE))
2847 nr = nvlist_get_number(nvl, "nr");
2850 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
2851 if (ruleset == NULL) {
2856 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
2857 if (rs_num >= PF_RULESET_MAX) {
2862 if (nvlist_get_number(nvl, "ticket") !=
2863 ruleset->rules[rs_num].active.ticket) {
2869 if ((error = nvlist_error(nvl))) {
2874 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2875 while ((rule != NULL) && (rule->nr != nr))
2876 rule = TAILQ_NEXT(rule, entries);
2883 nvrule = pf_krule_to_nvrule(rule);
2885 nvlist_destroy(nvl);
2886 nvl = nvlist_create(0);
2891 nvlist_add_number(nvl, "nr", nr);
2892 nvlist_add_nvlist(nvl, "rule", nvrule);
2894 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
2899 free(nvlpacked, M_TEMP);
2900 nvlpacked = nvlist_pack(nvl, &nv->len);
2901 if (nvlpacked == NULL) {
2906 if (nv->size == 0) {
2910 else if (nv->size < nv->len) {
2915 error = copyout(nvlpacked, nv->data, nv->len);
2917 if (clear_counter) {
2918 counter_u64_zero(rule->evaluations);
2919 for (int i = 0; i < 2; i++) {
2920 counter_u64_zero(rule->packets[i]);
2921 counter_u64_zero(rule->bytes[i]);
2923 counter_u64_zero(rule->states_tot);
2928 DIOCGETRULENV_error:
2929 free(nvlpacked, M_TEMP);
2930 nvlist_destroy(nvrule);
2931 nvlist_destroy(nvl);
2936 case DIOCCHANGERULE: {
2937 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
2938 struct pf_kruleset *ruleset;
2939 struct pf_krule *oldrule = NULL, *newrule = NULL;
2940 struct pfi_kkif *kif = NULL;
2941 struct pf_kpooladdr *pa;
2945 if (pcr->action < PF_CHANGE_ADD_HEAD ||
2946 pcr->action > PF_CHANGE_GET_TICKET) {
2950 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2955 if (pcr->action != PF_CHANGE_REMOVE) {
2956 newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK);
2957 error = pf_rule_to_krule(&pcr->rule, newrule);
2959 free(newrule, M_PFRULE);
2963 if (newrule->ifname[0])
2964 kif = pf_kkif_create(M_WAITOK);
2965 newrule->evaluations = counter_u64_alloc(M_WAITOK);
2966 for (int i = 0; i < 2; i++) {
2967 newrule->packets[i] =
2968 counter_u64_alloc(M_WAITOK);
2970 counter_u64_alloc(M_WAITOK);
2972 newrule->states_cur = counter_u64_alloc(M_WAITOK);
2973 newrule->states_tot = counter_u64_alloc(M_WAITOK);
2974 newrule->src_nodes = counter_u64_alloc(M_WAITOK);
2975 newrule->cuid = td->td_ucred->cr_ruid;
2976 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
2977 TAILQ_INIT(&newrule->rpool.list);
2979 #define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; }
2982 if (!(pcr->action == PF_CHANGE_REMOVE ||
2983 pcr->action == PF_CHANGE_GET_TICKET) &&
2984 pcr->pool_ticket != V_ticket_pabuf)
2987 ruleset = pf_find_kruleset(pcr->anchor);
2988 if (ruleset == NULL)
2991 rs_num = pf_get_ruleset_number(pcr->rule.action);
2992 if (rs_num >= PF_RULESET_MAX)
2995 if (pcr->action == PF_CHANGE_GET_TICKET) {
2996 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
2998 } else if (pcr->ticket !=
2999 ruleset->rules[rs_num].active.ticket)
3002 if (pcr->action != PF_CHANGE_REMOVE) {
3003 if (newrule->ifname[0]) {
3004 newrule->kif = pfi_kkif_attach(kif,
3007 pfi_kkif_ref(newrule->kif);
3009 newrule->kif = NULL;
3011 if (newrule->rtableid > 0 &&
3012 newrule->rtableid >= rt_numfibs)
3017 if (newrule->qname[0] != 0) {
3019 pf_qname2qid(newrule->qname)) == 0)
3021 else if (newrule->pqname[0] != 0) {
3022 if ((newrule->pqid =
3023 pf_qname2qid(newrule->pqname)) == 0)
3026 newrule->pqid = newrule->qid;
3029 if (newrule->tagname[0])
3031 pf_tagname2tag(newrule->tagname)) == 0)
3033 if (newrule->match_tagname[0])
3034 if ((newrule->match_tag = pf_tagname2tag(
3035 newrule->match_tagname)) == 0)
3037 if (newrule->rt && !newrule->direction)
3041 if (newrule->logif >= PFLOGIFS_MAX)
3043 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3045 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3047 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3049 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
3050 if (pa->addr.type == PF_ADDR_TABLE) {
3052 pfr_attach_table(ruleset,
3053 pa->addr.v.tblname);
3054 if (pa->addr.p.tbl == NULL)
3058 newrule->overload_tbl = NULL;
3059 if (newrule->overload_tblname[0]) {
3060 if ((newrule->overload_tbl = pfr_attach_table(
3061 ruleset, newrule->overload_tblname)) ==
3065 newrule->overload_tbl->pfrkt_flags |=
3069 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
3070 if (((((newrule->action == PF_NAT) ||
3071 (newrule->action == PF_RDR) ||
3072 (newrule->action == PF_BINAT) ||
3073 (newrule->rt > PF_NOPFROUTE)) &&
3074 !newrule->anchor)) &&
3075 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
3079 pf_free_rule(newrule);
3084 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3086 pf_empty_kpool(&V_pf_pabuf);
3088 if (pcr->action == PF_CHANGE_ADD_HEAD)
3089 oldrule = TAILQ_FIRST(
3090 ruleset->rules[rs_num].active.ptr);
3091 else if (pcr->action == PF_CHANGE_ADD_TAIL)
3092 oldrule = TAILQ_LAST(
3093 ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3095 oldrule = TAILQ_FIRST(
3096 ruleset->rules[rs_num].active.ptr);
3097 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3098 oldrule = TAILQ_NEXT(oldrule, entries);
3099 if (oldrule == NULL) {
3100 if (newrule != NULL)
3101 pf_free_rule(newrule);
3108 if (pcr->action == PF_CHANGE_REMOVE) {
3109 pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3111 ruleset->rules[rs_num].active.rcount--;
3113 if (oldrule == NULL)
3115 ruleset->rules[rs_num].active.ptr,
3117 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3118 pcr->action == PF_CHANGE_ADD_BEFORE)
3119 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3122 ruleset->rules[rs_num].active.ptr,
3123 oldrule, newrule, entries);
3124 ruleset->rules[rs_num].active.rcount++;
3128 TAILQ_FOREACH(oldrule,
3129 ruleset->rules[rs_num].active.ptr, entries)
3132 ruleset->rules[rs_num].active.ticket++;
3134 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3135 pf_remove_if_empty_kruleset(ruleset);
3141 DIOCCHANGERULE_error:
3143 pf_krule_free(newrule);
3148 case DIOCCLRSTATES: {
3150 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
3151 u_int i, killed = 0;
3153 for (i = 0; i <= pf_hashmask; i++) {
3154 struct pf_idhash *ih = &V_pf_idhash[i];
3156 relock_DIOCCLRSTATES:
3157 PF_HASHROW_LOCK(ih);
3158 LIST_FOREACH(s, &ih->states, entry)
3159 if (!psk->psk_ifname[0] ||
3160 !strcmp(psk->psk_ifname,
3161 s->kif->pfik_name)) {
3163 * Don't send out individual
3166 s->state_flags |= PFSTATE_NOSYNC;
3167 pf_unlink_state(s, PF_ENTER_LOCKED);
3169 goto relock_DIOCCLRSTATES;
3171 PF_HASHROW_UNLOCK(ih);
3173 psk->psk_killed = killed;
3174 if (V_pfsync_clear_states_ptr != NULL)
3175 V_pfsync_clear_states_ptr(V_pf_status.hostid, psk->psk_ifname);
3179 case DIOCKILLSTATES: {
3181 struct pf_state_key *sk;
3182 struct pf_addr *srcaddr, *dstaddr;
3183 u_int16_t srcport, dstport;
3184 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
3185 u_int i, killed = 0;
3187 if (psk->psk_pfcmp.id) {
3188 if (psk->psk_pfcmp.creatorid == 0)
3189 psk->psk_pfcmp.creatorid = V_pf_status.hostid;
3190 if ((s = pf_find_state_byid(psk->psk_pfcmp.id,
3191 psk->psk_pfcmp.creatorid))) {
3192 pf_unlink_state(s, PF_ENTER_LOCKED);
3193 psk->psk_killed = 1;
3198 for (i = 0; i <= pf_hashmask; i++) {
3199 struct pf_idhash *ih = &V_pf_idhash[i];
3201 relock_DIOCKILLSTATES:
3202 PF_HASHROW_LOCK(ih);
3203 LIST_FOREACH(s, &ih->states, entry) {
3204 sk = s->key[PF_SK_WIRE];
3205 if (s->direction == PF_OUT) {
3206 srcaddr = &sk->addr[1];
3207 dstaddr = &sk->addr[0];
3208 srcport = sk->port[1];
3209 dstport = sk->port[0];
3211 srcaddr = &sk->addr[0];
3212 dstaddr = &sk->addr[1];
3213 srcport = sk->port[0];
3214 dstport = sk->port[1];
3217 if ((!psk->psk_af || sk->af == psk->psk_af)
3218 && (!psk->psk_proto || psk->psk_proto ==
3220 PF_MATCHA(psk->psk_src.neg,
3221 &psk->psk_src.addr.v.a.addr,
3222 &psk->psk_src.addr.v.a.mask,
3224 PF_MATCHA(psk->psk_dst.neg,
3225 &psk->psk_dst.addr.v.a.addr,
3226 &psk->psk_dst.addr.v.a.mask,
3228 (psk->psk_src.port_op == 0 ||
3229 pf_match_port(psk->psk_src.port_op,
3230 psk->psk_src.port[0], psk->psk_src.port[1],
3232 (psk->psk_dst.port_op == 0 ||
3233 pf_match_port(psk->psk_dst.port_op,
3234 psk->psk_dst.port[0], psk->psk_dst.port[1],
3236 (!psk->psk_label[0] ||
3237 (s->rule.ptr->label[0] &&
3238 !strcmp(psk->psk_label,
3239 s->rule.ptr->label))) &&
3240 (!psk->psk_ifname[0] ||
3241 !strcmp(psk->psk_ifname,
3242 s->kif->pfik_name))) {
3243 pf_unlink_state(s, PF_ENTER_LOCKED);
3245 goto relock_DIOCKILLSTATES;
3248 PF_HASHROW_UNLOCK(ih);
3250 psk->psk_killed = killed;
3254 case DIOCADDSTATE: {
3255 struct pfioc_state *ps = (struct pfioc_state *)addr;
3256 struct pfsync_state *sp = &ps->state;
3258 if (sp->timeout >= PFTM_MAX) {
3262 if (V_pfsync_state_import_ptr != NULL) {
3264 error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL);
3271 case DIOCGETSTATE: {
3272 struct pfioc_state *ps = (struct pfioc_state *)addr;
3275 s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
3281 pfsync_state_export(&ps->state, s);
3286 case DIOCGETSTATES: {
3287 struct pfioc_states *ps = (struct pfioc_states *)addr;
3289 struct pfsync_state *pstore, *p;
3292 if (ps->ps_len <= 0) {
3293 nr = uma_zone_get_cur(V_pf_state_z);
3294 ps->ps_len = sizeof(struct pfsync_state) * nr;
3298 p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK | M_ZERO);
3301 for (i = 0; i <= pf_hashmask; i++) {
3302 struct pf_idhash *ih = &V_pf_idhash[i];
3304 PF_HASHROW_LOCK(ih);
3305 LIST_FOREACH(s, &ih->states, entry) {
3306 if (s->timeout == PFTM_UNLINKED)
3309 if ((nr+1) * sizeof(*p) > ps->ps_len) {
3310 PF_HASHROW_UNLOCK(ih);
3311 goto DIOCGETSTATES_full;
3313 pfsync_state_export(p, s);
3317 PF_HASHROW_UNLOCK(ih);
3320 error = copyout(pstore, ps->ps_states,
3321 sizeof(struct pfsync_state) * nr);
3323 free(pstore, M_TEMP);
3326 ps->ps_len = sizeof(struct pfsync_state) * nr;
3327 free(pstore, M_TEMP);
3332 case DIOCGETSTATUS: {
3333 struct pf_status *s = (struct pf_status *)addr;
3336 s->running = V_pf_status.running;
3337 s->since = V_pf_status.since;
3338 s->debug = V_pf_status.debug;
3339 s->hostid = V_pf_status.hostid;
3340 s->states = V_pf_status.states;
3341 s->src_nodes = V_pf_status.src_nodes;
3343 for (int i = 0; i < PFRES_MAX; i++)
3345 counter_u64_fetch(V_pf_status.counters[i]);
3346 for (int i = 0; i < LCNT_MAX; i++)
3348 counter_u64_fetch(V_pf_status.lcounters[i]);
3349 for (int i = 0; i < FCNT_MAX; i++)
3351 counter_u64_fetch(V_pf_status.fcounters[i]);
3352 for (int i = 0; i < SCNT_MAX; i++)
3354 counter_u64_fetch(V_pf_status.scounters[i]);
3356 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ);
3357 bcopy(V_pf_status.pf_chksum, s->pf_chksum,
3358 PF_MD5_DIGEST_LENGTH);
3360 pfi_update_status(s->ifname, s);
3365 case DIOCSETSTATUSIF: {
3366 struct pfioc_if *pi = (struct pfioc_if *)addr;
3368 if (pi->ifname[0] == 0) {
3369 bzero(V_pf_status.ifname, IFNAMSIZ);
3373 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
3378 case DIOCCLRSTATUS: {
3380 for (int i = 0; i < PFRES_MAX; i++)
3381 counter_u64_zero(V_pf_status.counters[i]);
3382 for (int i = 0; i < FCNT_MAX; i++)
3383 counter_u64_zero(V_pf_status.fcounters[i]);
3384 for (int i = 0; i < SCNT_MAX; i++)
3385 counter_u64_zero(V_pf_status.scounters[i]);
3386 for (int i = 0; i < LCNT_MAX; i++)
3387 counter_u64_zero(V_pf_status.lcounters[i]);
3388 V_pf_status.since = time_second;
3389 if (*V_pf_status.ifname)
3390 pfi_update_status(V_pf_status.ifname, NULL);
3396 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
3397 struct pf_state_key *sk;
3398 struct pf_state *state;
3399 struct pf_state_key_cmp key;
3400 int m = 0, direction = pnl->direction;
3403 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
3404 sidx = (direction == PF_IN) ? 1 : 0;
3405 didx = (direction == PF_IN) ? 0 : 1;
3408 PF_AZERO(&pnl->saddr, pnl->af) ||
3409 PF_AZERO(&pnl->daddr, pnl->af) ||
3410 ((pnl->proto == IPPROTO_TCP ||
3411 pnl->proto == IPPROTO_UDP) &&
3412 (!pnl->dport || !pnl->sport)))
3415 bzero(&key, sizeof(key));
3417 key.proto = pnl->proto;
3418 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
3419 key.port[sidx] = pnl->sport;
3420 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
3421 key.port[didx] = pnl->dport;
3423 state = pf_find_state_all(&key, direction, &m);
3426 error = E2BIG; /* more than one state */
3427 else if (state != NULL) {
3428 /* XXXGL: not locked read */
3429 sk = state->key[sidx];
3430 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
3431 pnl->rsport = sk->port[sidx];
3432 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
3433 pnl->rdport = sk->port[didx];
3440 case DIOCSETTIMEOUT: {
3441 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
3444 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3450 old = V_pf_default_rule.timeout[pt->timeout];
3451 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
3453 V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
3454 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
3455 wakeup(pf_purge_thread);
3461 case DIOCGETTIMEOUT: {
3462 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
3464 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3469 pt->seconds = V_pf_default_rule.timeout[pt->timeout];
3474 case DIOCGETLIMIT: {
3475 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
3477 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3482 pl->limit = V_pf_limits[pl->index].limit;
3487 case DIOCSETLIMIT: {
3488 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
3492 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3493 V_pf_limits[pl->index].zone == NULL) {
3498 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
3499 old_limit = V_pf_limits[pl->index].limit;
3500 V_pf_limits[pl->index].limit = pl->limit;
3501 pl->limit = old_limit;
3506 case DIOCSETDEBUG: {
3507 u_int32_t *level = (u_int32_t *)addr;
3510 V_pf_status.debug = *level;
3515 case DIOCCLRRULECTRS: {
3516 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
3517 struct pf_kruleset *ruleset = &pf_main_ruleset;
3518 struct pf_krule *rule;
3522 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
3523 counter_u64_zero(rule->evaluations);
3524 for (int i = 0; i < 2; i++) {
3525 counter_u64_zero(rule->packets[i]);
3526 counter_u64_zero(rule->bytes[i]);
3533 case DIOCGIFSPEEDV0:
3534 case DIOCGIFSPEEDV1: {
3535 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr;
3536 struct pf_ifspeed_v1 ps;
3539 if (psp->ifname[0] != 0) {
3540 /* Can we completely trust user-land? */
3541 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
3542 ifp = ifunit(ps.ifname);
3545 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
3546 if (cmd == DIOCGIFSPEEDV1)
3547 psp->baudrate = ifp->if_baudrate;
3556 case DIOCSTARTALTQ: {
3557 struct pf_altq *altq;
3560 /* enable all altq interfaces on active list */
3561 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
3562 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
3563 error = pf_enable_altq(altq);
3569 V_pf_altq_running = 1;
3571 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
3575 case DIOCSTOPALTQ: {
3576 struct pf_altq *altq;
3579 /* disable all altq interfaces on active list */
3580 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
3581 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
3582 error = pf_disable_altq(altq);
3588 V_pf_altq_running = 0;
3590 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
3595 case DIOCADDALTQV1: {
3596 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
3597 struct pf_altq *altq, *a;
3600 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
3601 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
3604 altq->local_flags = 0;
3607 if (pa->ticket != V_ticket_altqs_inactive) {
3609 free(altq, M_PFALTQ);
3615 * if this is for a queue, find the discipline and
3616 * copy the necessary fields
3618 if (altq->qname[0] != 0) {
3619 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
3622 free(altq, M_PFALTQ);
3625 altq->altq_disc = NULL;
3626 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
3627 if (strncmp(a->ifname, altq->ifname,
3629 altq->altq_disc = a->altq_disc;
3635 if ((ifp = ifunit(altq->ifname)) == NULL)
3636 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
3638 error = altq_add(ifp, altq);
3642 free(altq, M_PFALTQ);
3646 if (altq->qname[0] != 0)
3647 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
3649 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
3650 /* version error check done on import above */
3651 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
3656 case DIOCGETALTQSV0:
3657 case DIOCGETALTQSV1: {
3658 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
3659 struct pf_altq *altq;
3663 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
3665 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
3667 pa->ticket = V_ticket_altqs_active;
3673 case DIOCGETALTQV1: {
3674 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
3675 struct pf_altq *altq;
3678 if (pa->ticket != V_ticket_altqs_active) {
3683 altq = pf_altq_get_nth_active(pa->nr);
3689 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
3694 case DIOCCHANGEALTQV0:
3695 case DIOCCHANGEALTQV1:
3696 /* CHANGEALTQ not supported yet! */
3700 case DIOCGETQSTATSV0:
3701 case DIOCGETQSTATSV1: {
3702 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr;
3703 struct pf_altq *altq;
3708 if (pq->ticket != V_ticket_altqs_active) {
3713 nbytes = pq->nbytes;
3714 altq = pf_altq_get_nth_active(pq->nr);
3721 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
3727 if (cmd == DIOCGETQSTATSV0)
3728 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */
3730 version = pq->version;
3731 error = altq_getqstats(altq, pq->buf, &nbytes, version);
3733 pq->scheduler = altq->scheduler;
3734 pq->nbytes = nbytes;
3740 case DIOCBEGINADDRS: {
3741 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
3744 pf_empty_kpool(&V_pf_pabuf);
3745 pp->ticket = ++V_ticket_pabuf;
3751 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
3752 struct pf_kpooladdr *pa;
3753 struct pfi_kkif *kif = NULL;
3756 if (pp->af == AF_INET) {
3757 error = EAFNOSUPPORT;
3762 if (pp->af == AF_INET6) {
3763 error = EAFNOSUPPORT;
3767 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3768 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3769 pp->addr.addr.type != PF_ADDR_TABLE) {
3773 if (pp->addr.addr.p.dyn != NULL) {
3777 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
3778 pf_pooladdr_to_kpooladdr(&pp->addr, pa);
3780 kif = pf_kkif_create(M_WAITOK);
3782 if (pp->ticket != V_ticket_pabuf) {
3790 if (pa->ifname[0]) {
3791 pa->kif = pfi_kkif_attach(kif, pa->ifname);
3793 pfi_kkif_ref(pa->kif);
3796 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
3797 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
3799 pfi_kkif_unref(pa->kif);
3804 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
3809 case DIOCGETADDRS: {
3810 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
3811 struct pf_kpool *pool;
3812 struct pf_kpooladdr *pa;
3816 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
3817 pp->r_num, 0, 1, 0);
3823 TAILQ_FOREACH(pa, &pool->list, entries)
3830 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
3831 struct pf_kpool *pool;
3832 struct pf_kpooladdr *pa;
3836 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
3837 pp->r_num, 0, 1, 1);
3843 pa = TAILQ_FIRST(&pool->list);
3844 while ((pa != NULL) && (nr < pp->nr)) {
3845 pa = TAILQ_NEXT(pa, entries);
3853 pf_kpooladdr_to_pooladdr(pa, &pp->addr);
3854 pf_addr_copyout(&pp->addr.addr);
3859 case DIOCCHANGEADDR: {
3860 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
3861 struct pf_kpool *pool;
3862 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL;
3863 struct pf_kruleset *ruleset;
3864 struct pfi_kkif *kif = NULL;
3866 if (pca->action < PF_CHANGE_ADD_HEAD ||
3867 pca->action > PF_CHANGE_REMOVE) {
3871 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
3872 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
3873 pca->addr.addr.type != PF_ADDR_TABLE) {
3877 if (pca->addr.addr.p.dyn != NULL) {
3882 if (pca->action != PF_CHANGE_REMOVE) {
3884 if (pca->af == AF_INET) {
3885 error = EAFNOSUPPORT;
3890 if (pca->af == AF_INET6) {
3891 error = EAFNOSUPPORT;
3895 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
3896 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
3897 if (newpa->ifname[0])
3898 kif = pf_kkif_create(M_WAITOK);
3901 #define ERROUT(x) { error = (x); goto DIOCCHANGEADDR_error; }
3903 ruleset = pf_find_kruleset(pca->anchor);
3904 if (ruleset == NULL)
3907 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
3908 pca->r_num, pca->r_last, 1, 1);
3912 if (pca->action != PF_CHANGE_REMOVE) {
3913 if (newpa->ifname[0]) {
3914 newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
3915 pfi_kkif_ref(newpa->kif);
3919 switch (newpa->addr.type) {
3920 case PF_ADDR_DYNIFTL:
3921 error = pfi_dynaddr_setup(&newpa->addr,
3925 newpa->addr.p.tbl = pfr_attach_table(ruleset,
3926 newpa->addr.v.tblname);
3927 if (newpa->addr.p.tbl == NULL)
3932 goto DIOCCHANGEADDR_error;
3935 switch (pca->action) {
3936 case PF_CHANGE_ADD_HEAD:
3937 oldpa = TAILQ_FIRST(&pool->list);
3939 case PF_CHANGE_ADD_TAIL:
3940 oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
3943 oldpa = TAILQ_FIRST(&pool->list);
3944 for (int i = 0; oldpa && i < pca->nr; i++)
3945 oldpa = TAILQ_NEXT(oldpa, entries);
3951 if (pca->action == PF_CHANGE_REMOVE) {
3952 TAILQ_REMOVE(&pool->list, oldpa, entries);
3953 switch (oldpa->addr.type) {
3954 case PF_ADDR_DYNIFTL:
3955 pfi_dynaddr_remove(oldpa->addr.p.dyn);
3958 pfr_detach_table(oldpa->addr.p.tbl);
3962 pfi_kkif_unref(oldpa->kif);
3963 free(oldpa, M_PFRULE);
3966 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
3967 else if (pca->action == PF_CHANGE_ADD_HEAD ||
3968 pca->action == PF_CHANGE_ADD_BEFORE)
3969 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
3971 TAILQ_INSERT_AFTER(&pool->list, oldpa,
3975 pool->cur = TAILQ_FIRST(&pool->list);
3976 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
3981 DIOCCHANGEADDR_error:
3982 if (newpa != NULL) {
3984 pfi_kkif_unref(newpa->kif);
3985 free(newpa, M_PFRULE);
3992 case DIOCGETRULESETS: {
3993 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
3994 struct pf_kruleset *ruleset;
3995 struct pf_kanchor *anchor;
3998 pr->path[sizeof(pr->path) - 1] = 0;
3999 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4005 if (ruleset->anchor == NULL) {
4006 /* XXX kludge for pf_main_ruleset */
4007 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4008 if (anchor->parent == NULL)
4011 RB_FOREACH(anchor, pf_kanchor_node,
4012 &ruleset->anchor->children)
4019 case DIOCGETRULESET: {
4020 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
4021 struct pf_kruleset *ruleset;
4022 struct pf_kanchor *anchor;
4026 pr->path[sizeof(pr->path) - 1] = 0;
4027 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4033 if (ruleset->anchor == NULL) {
4034 /* XXX kludge for pf_main_ruleset */
4035 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4036 if (anchor->parent == NULL && nr++ == pr->nr) {
4037 strlcpy(pr->name, anchor->name,
4042 RB_FOREACH(anchor, pf_kanchor_node,
4043 &ruleset->anchor->children)
4044 if (nr++ == pr->nr) {
4045 strlcpy(pr->name, anchor->name,
4056 case DIOCRCLRTABLES: {
4057 struct pfioc_table *io = (struct pfioc_table *)addr;
4059 if (io->pfrio_esize != 0) {
4064 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4065 io->pfrio_flags | PFR_FLAG_USERIOCTL);
4070 case DIOCRADDTABLES: {
4071 struct pfioc_table *io = (struct pfioc_table *)addr;
4072 struct pfr_table *pfrts;
4075 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4080 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4081 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4086 totlen = io->pfrio_size * sizeof(struct pfr_table);
4087 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4089 error = copyin(io->pfrio_buffer, pfrts, totlen);
4091 free(pfrts, M_TEMP);
4095 error = pfr_add_tables(pfrts, io->pfrio_size,
4096 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4098 free(pfrts, M_TEMP);
4102 case DIOCRDELTABLES: {
4103 struct pfioc_table *io = (struct pfioc_table *)addr;
4104 struct pfr_table *pfrts;
4107 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4112 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4113 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4118 totlen = io->pfrio_size * sizeof(struct pfr_table);
4119 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4121 error = copyin(io->pfrio_buffer, pfrts, totlen);
4123 free(pfrts, M_TEMP);
4127 error = pfr_del_tables(pfrts, io->pfrio_size,
4128 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4130 free(pfrts, M_TEMP);
4134 case DIOCRGETTABLES: {
4135 struct pfioc_table *io = (struct pfioc_table *)addr;
4136 struct pfr_table *pfrts;
4140 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4145 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4151 io->pfrio_size = min(io->pfrio_size, n);
4153 totlen = io->pfrio_size * sizeof(struct pfr_table);
4155 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4157 if (pfrts == NULL) {
4162 error = pfr_get_tables(&io->pfrio_table, pfrts,
4163 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4166 error = copyout(pfrts, io->pfrio_buffer, totlen);
4167 free(pfrts, M_TEMP);
4171 case DIOCRGETTSTATS: {
4172 struct pfioc_table *io = (struct pfioc_table *)addr;
4173 struct pfr_tstats *pfrtstats;
4177 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4182 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4188 io->pfrio_size = min(io->pfrio_size, n);
4190 totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4191 pfrtstats = mallocarray(io->pfrio_size,
4192 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT);
4193 if (pfrtstats == NULL) {
4198 error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4199 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4202 error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4203 free(pfrtstats, M_TEMP);
4207 case DIOCRCLRTSTATS: {
4208 struct pfioc_table *io = (struct pfioc_table *)addr;
4209 struct pfr_table *pfrts;
4212 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4217 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4218 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4219 /* We used to count tables and use the minimum required
4220 * size, so we didn't fail on overly large requests.
4222 io->pfrio_size = pf_ioctl_maxcount;
4226 totlen = io->pfrio_size * sizeof(struct pfr_table);
4227 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4229 if (pfrts == NULL) {
4233 error = copyin(io->pfrio_buffer, pfrts, totlen);
4235 free(pfrts, M_TEMP);
4240 error = pfr_clr_tstats(pfrts, io->pfrio_size,
4241 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4243 free(pfrts, M_TEMP);
4247 case DIOCRSETTFLAGS: {
4248 struct pfioc_table *io = (struct pfioc_table *)addr;
4249 struct pfr_table *pfrts;
4253 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4259 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4266 io->pfrio_size = min(io->pfrio_size, n);
4269 totlen = io->pfrio_size * sizeof(struct pfr_table);
4270 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4272 error = copyin(io->pfrio_buffer, pfrts, totlen);
4274 free(pfrts, M_TEMP);
4278 error = pfr_set_tflags(pfrts, io->pfrio_size,
4279 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4280 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4282 free(pfrts, M_TEMP);
4286 case DIOCRCLRADDRS: {
4287 struct pfioc_table *io = (struct pfioc_table *)addr;
4289 if (io->pfrio_esize != 0) {
4294 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4295 io->pfrio_flags | PFR_FLAG_USERIOCTL);
4300 case DIOCRADDADDRS: {
4301 struct pfioc_table *io = (struct pfioc_table *)addr;
4302 struct pfr_addr *pfras;
4305 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4309 if (io->pfrio_size < 0 ||
4310 io->pfrio_size > pf_ioctl_maxcount ||
4311 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4315 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4316 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4322 error = copyin(io->pfrio_buffer, pfras, totlen);
4324 free(pfras, M_TEMP);
4328 error = pfr_add_addrs(&io->pfrio_table, pfras,
4329 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4330 PFR_FLAG_USERIOCTL);
4332 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4333 error = copyout(pfras, io->pfrio_buffer, totlen);
4334 free(pfras, M_TEMP);
4338 case DIOCRDELADDRS: {
4339 struct pfioc_table *io = (struct pfioc_table *)addr;
4340 struct pfr_addr *pfras;
4343 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4347 if (io->pfrio_size < 0 ||
4348 io->pfrio_size > pf_ioctl_maxcount ||
4349 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4353 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4354 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4360 error = copyin(io->pfrio_buffer, pfras, totlen);
4362 free(pfras, M_TEMP);
4366 error = pfr_del_addrs(&io->pfrio_table, pfras,
4367 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
4368 PFR_FLAG_USERIOCTL);
4370 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4371 error = copyout(pfras, io->pfrio_buffer, totlen);
4372 free(pfras, M_TEMP);
4376 case DIOCRSETADDRS: {
4377 struct pfioc_table *io = (struct pfioc_table *)addr;
4378 struct pfr_addr *pfras;
4379 size_t totlen, count;
4381 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4385 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
4389 count = max(io->pfrio_size, io->pfrio_size2);
4390 if (count > pf_ioctl_maxcount ||
4391 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
4395 totlen = count * sizeof(struct pfr_addr);
4396 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
4402 error = copyin(io->pfrio_buffer, pfras, totlen);
4404 free(pfras, M_TEMP);
4408 error = pfr_set_addrs(&io->pfrio_table, pfras,
4409 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
4410 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
4411 PFR_FLAG_USERIOCTL, 0);
4413 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4414 error = copyout(pfras, io->pfrio_buffer, totlen);
4415 free(pfras, M_TEMP);
4419 case DIOCRGETADDRS: {
4420 struct pfioc_table *io = (struct pfioc_table *)addr;
4421 struct pfr_addr *pfras;
4424 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4428 if (io->pfrio_size < 0 ||
4429 io->pfrio_size > pf_ioctl_maxcount ||
4430 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4434 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4435 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4442 error = pfr_get_addrs(&io->pfrio_table, pfras,
4443 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4446 error = copyout(pfras, io->pfrio_buffer, totlen);
4447 free(pfras, M_TEMP);
4451 case DIOCRGETASTATS: {
4452 struct pfioc_table *io = (struct pfioc_table *)addr;
4453 struct pfr_astats *pfrastats;
4456 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
4460 if (io->pfrio_size < 0 ||
4461 io->pfrio_size > pf_ioctl_maxcount ||
4462 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
4466 totlen = io->pfrio_size * sizeof(struct pfr_astats);
4467 pfrastats = mallocarray(io->pfrio_size,
4468 sizeof(struct pfr_astats), M_TEMP, M_NOWAIT);
4474 error = pfr_get_astats(&io->pfrio_table, pfrastats,
4475 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4478 error = copyout(pfrastats, io->pfrio_buffer, totlen);
4479 free(pfrastats, M_TEMP);
4483 case DIOCRCLRASTATS: {
4484 struct pfioc_table *io = (struct pfioc_table *)addr;
4485 struct pfr_addr *pfras;
4488 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4492 if (io->pfrio_size < 0 ||
4493 io->pfrio_size > pf_ioctl_maxcount ||
4494 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4498 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4499 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4505 error = copyin(io->pfrio_buffer, pfras, totlen);
4507 free(pfras, M_TEMP);
4511 error = pfr_clr_astats(&io->pfrio_table, pfras,
4512 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
4513 PFR_FLAG_USERIOCTL);
4515 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4516 error = copyout(pfras, io->pfrio_buffer, totlen);
4517 free(pfras, M_TEMP);
4521 case DIOCRTSTADDRS: {
4522 struct pfioc_table *io = (struct pfioc_table *)addr;
4523 struct pfr_addr *pfras;
4526 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4530 if (io->pfrio_size < 0 ||
4531 io->pfrio_size > pf_ioctl_maxcount ||
4532 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4536 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4537 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4543 error = copyin(io->pfrio_buffer, pfras, totlen);
4545 free(pfras, M_TEMP);
4549 error = pfr_tst_addrs(&io->pfrio_table, pfras,
4550 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
4551 PFR_FLAG_USERIOCTL);
4554 error = copyout(pfras, io->pfrio_buffer, totlen);
4555 free(pfras, M_TEMP);
4559 case DIOCRINADEFINE: {
4560 struct pfioc_table *io = (struct pfioc_table *)addr;
4561 struct pfr_addr *pfras;
4564 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4568 if (io->pfrio_size < 0 ||
4569 io->pfrio_size > pf_ioctl_maxcount ||
4570 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4574 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4575 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4581 error = copyin(io->pfrio_buffer, pfras, totlen);
4583 free(pfras, M_TEMP);
4587 error = pfr_ina_define(&io->pfrio_table, pfras,
4588 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
4589 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4591 free(pfras, M_TEMP);
4596 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
4598 error = pf_osfp_add(io);
4604 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
4606 error = pf_osfp_get(io);
4612 struct pfioc_trans *io = (struct pfioc_trans *)addr;
4613 struct pfioc_trans_e *ioes, *ioe;
4617 if (io->esize != sizeof(*ioe)) {
4622 io->size > pf_ioctl_maxcount ||
4623 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
4627 totlen = sizeof(struct pfioc_trans_e) * io->size;
4628 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
4634 error = copyin(io->array, ioes, totlen);
4640 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4641 switch (ioe->rs_num) {
4643 case PF_RULESET_ALTQ:
4644 if (ioe->anchor[0]) {
4650 if ((error = pf_begin_altq(&ioe->ticket))) {
4657 case PF_RULESET_TABLE:
4659 struct pfr_table table;
4661 bzero(&table, sizeof(table));
4662 strlcpy(table.pfrt_anchor, ioe->anchor,
4663 sizeof(table.pfrt_anchor));
4664 if ((error = pfr_ina_begin(&table,
4665 &ioe->ticket, NULL, 0))) {
4673 if ((error = pf_begin_rules(&ioe->ticket,
4674 ioe->rs_num, ioe->anchor))) {
4683 error = copyout(ioes, io->array, totlen);
4688 case DIOCXROLLBACK: {
4689 struct pfioc_trans *io = (struct pfioc_trans *)addr;
4690 struct pfioc_trans_e *ioe, *ioes;
4694 if (io->esize != sizeof(*ioe)) {
4699 io->size > pf_ioctl_maxcount ||
4700 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
4704 totlen = sizeof(struct pfioc_trans_e) * io->size;
4705 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
4711 error = copyin(io->array, ioes, totlen);
4717 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4718 switch (ioe->rs_num) {
4720 case PF_RULESET_ALTQ:
4721 if (ioe->anchor[0]) {
4727 if ((error = pf_rollback_altq(ioe->ticket))) {
4730 goto fail; /* really bad */
4734 case PF_RULESET_TABLE:
4736 struct pfr_table table;
4738 bzero(&table, sizeof(table));
4739 strlcpy(table.pfrt_anchor, ioe->anchor,
4740 sizeof(table.pfrt_anchor));
4741 if ((error = pfr_ina_rollback(&table,
4742 ioe->ticket, NULL, 0))) {
4745 goto fail; /* really bad */
4750 if ((error = pf_rollback_rules(ioe->ticket,
4751 ioe->rs_num, ioe->anchor))) {
4754 goto fail; /* really bad */
4765 struct pfioc_trans *io = (struct pfioc_trans *)addr;
4766 struct pfioc_trans_e *ioe, *ioes;
4767 struct pf_kruleset *rs;
4771 if (io->esize != sizeof(*ioe)) {
4777 io->size > pf_ioctl_maxcount ||
4778 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
4783 totlen = sizeof(struct pfioc_trans_e) * io->size;
4784 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
4790 error = copyin(io->array, ioes, totlen);
4796 /* First makes sure everything will succeed. */
4797 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4798 switch (ioe->rs_num) {
4800 case PF_RULESET_ALTQ:
4801 if (ioe->anchor[0]) {
4807 if (!V_altqs_inactive_open || ioe->ticket !=
4808 V_ticket_altqs_inactive) {
4816 case PF_RULESET_TABLE:
4817 rs = pf_find_kruleset(ioe->anchor);
4818 if (rs == NULL || !rs->topen || ioe->ticket !=
4827 if (ioe->rs_num < 0 || ioe->rs_num >=
4834 rs = pf_find_kruleset(ioe->anchor);
4836 !rs->rules[ioe->rs_num].inactive.open ||
4837 rs->rules[ioe->rs_num].inactive.ticket !=
4847 /* Now do the commit - no errors should happen here. */
4848 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4849 switch (ioe->rs_num) {
4851 case PF_RULESET_ALTQ:
4852 if ((error = pf_commit_altq(ioe->ticket))) {
4855 goto fail; /* really bad */
4859 case PF_RULESET_TABLE:
4861 struct pfr_table table;
4863 bzero(&table, sizeof(table));
4864 strlcpy(table.pfrt_anchor, ioe->anchor,
4865 sizeof(table.pfrt_anchor));
4866 if ((error = pfr_ina_commit(&table,
4867 ioe->ticket, NULL, NULL, 0))) {
4870 goto fail; /* really bad */
4875 if ((error = pf_commit_rules(ioe->ticket,
4876 ioe->rs_num, ioe->anchor))) {
4879 goto fail; /* really bad */
4889 case DIOCGETSRCNODES: {
4890 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
4891 struct pf_srchash *sh;
4892 struct pf_ksrc_node *n;
4893 struct pf_src_node *p, *pstore;
4896 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
4898 PF_HASHROW_LOCK(sh);
4899 LIST_FOREACH(n, &sh->nodes, entry)
4901 PF_HASHROW_UNLOCK(sh);
4904 psn->psn_len = min(psn->psn_len,
4905 sizeof(struct pf_src_node) * nr);
4907 if (psn->psn_len == 0) {
4908 psn->psn_len = sizeof(struct pf_src_node) * nr;
4914 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
4915 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
4917 PF_HASHROW_LOCK(sh);
4918 LIST_FOREACH(n, &sh->nodes, entry) {
4920 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
4923 pf_src_node_copy(n, p);
4928 PF_HASHROW_UNLOCK(sh);
4930 error = copyout(pstore, psn->psn_src_nodes,
4931 sizeof(struct pf_src_node) * nr);
4933 free(pstore, M_TEMP);
4936 psn->psn_len = sizeof(struct pf_src_node) * nr;
4937 free(pstore, M_TEMP);
4941 case DIOCCLRSRCNODES: {
4942 pf_clear_srcnodes(NULL);
4943 pf_purge_expired_src_nodes();
4947 case DIOCKILLSRCNODES:
4948 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
4951 case DIOCSETHOSTID: {
4952 u_int32_t *hostid = (u_int32_t *)addr;
4956 V_pf_status.hostid = arc4random();
4958 V_pf_status.hostid = *hostid;
4969 case DIOCIGETIFACES: {
4970 struct pfioc_iface *io = (struct pfioc_iface *)addr;
4971 struct pfi_kif *ifstore;
4974 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
4979 if (io->pfiio_size < 0 ||
4980 io->pfiio_size > pf_ioctl_maxcount ||
4981 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
4986 bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
4987 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
4989 if (ifstore == NULL) {
4995 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
4997 error = copyout(ifstore, io->pfiio_buffer, bufsiz);
4998 free(ifstore, M_TEMP);
5002 case DIOCSETIFFLAG: {
5003 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5006 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5011 case DIOCCLRIFFLAG: {
5012 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5015 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5025 if (sx_xlocked(&pf_ioctl_lock))
5026 sx_xunlock(&pf_ioctl_lock);
5033 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
5035 bzero(sp, sizeof(struct pfsync_state));
5037 /* copy from state key */
5038 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5039 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5040 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5041 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5042 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5043 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5044 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5045 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5046 sp->proto = st->key[PF_SK_WIRE]->proto;
5047 sp->af = st->key[PF_SK_WIRE]->af;
5049 /* copy from state */
5050 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5051 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
5052 sp->creation = htonl(time_uptime - st->creation);
5053 sp->expire = pf_state_expires(st);
5054 if (sp->expire <= time_uptime)
5055 sp->expire = htonl(0);
5057 sp->expire = htonl(sp->expire - time_uptime);
5059 sp->direction = st->direction;
5061 sp->timeout = st->timeout;
5062 sp->state_flags = st->state_flags;
5064 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5065 if (st->nat_src_node)
5066 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5069 sp->creatorid = st->creatorid;
5070 pf_state_peer_hton(&st->src, &sp->src);
5071 pf_state_peer_hton(&st->dst, &sp->dst);
5073 if (st->rule.ptr == NULL)
5074 sp->rule = htonl(-1);
5076 sp->rule = htonl(st->rule.ptr->nr);
5077 if (st->anchor.ptr == NULL)
5078 sp->anchor = htonl(-1);
5080 sp->anchor = htonl(st->anchor.ptr->nr);
5081 if (st->nat_rule.ptr == NULL)
5082 sp->nat_rule = htonl(-1);
5084 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
5086 pf_state_counter_hton(counter_u64_fetch(st->packets[0]),
5088 pf_state_counter_hton(counter_u64_fetch(st->packets[1]),
5090 pf_state_counter_hton(counter_u64_fetch(st->bytes[0]), sp->bytes[0]);
5091 pf_state_counter_hton(counter_u64_fetch(st->bytes[1]), sp->bytes[1]);
5096 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5098 struct pfr_ktable *kt;
5100 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5103 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5104 kt = kt->pfrkt_root;
5106 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5111 * XXX - Check for version missmatch!!!
5114 pf_clear_states(void)
5119 for (i = 0; i <= pf_hashmask; i++) {
5120 struct pf_idhash *ih = &V_pf_idhash[i];
5122 PF_HASHROW_LOCK(ih);
5123 LIST_FOREACH(s, &ih->states, entry) {
5124 s->timeout = PFTM_PURGE;
5125 /* Don't send out individual delete messages. */
5126 s->state_flags |= PFSTATE_NOSYNC;
5127 pf_unlink_state(s, PF_ENTER_LOCKED);
5130 PF_HASHROW_UNLOCK(ih);
5135 pf_clear_tables(void)
5137 struct pfioc_table io;
5140 bzero(&io, sizeof(io));
5142 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
5149 pf_clear_srcnodes(struct pf_ksrc_node *n)
5154 for (i = 0; i <= pf_hashmask; i++) {
5155 struct pf_idhash *ih = &V_pf_idhash[i];
5157 PF_HASHROW_LOCK(ih);
5158 LIST_FOREACH(s, &ih->states, entry) {
5159 if (n == NULL || n == s->src_node)
5161 if (n == NULL || n == s->nat_src_node)
5162 s->nat_src_node = NULL;
5164 PF_HASHROW_UNLOCK(ih);
5168 struct pf_srchash *sh;
5170 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5172 PF_HASHROW_LOCK(sh);
5173 LIST_FOREACH(n, &sh->nodes, entry) {
5177 PF_HASHROW_UNLOCK(sh);
5180 /* XXX: hash slot should already be locked here. */
5187 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
5189 struct pf_ksrc_node_list kill;
5192 for (int i = 0; i <= pf_srchashmask; i++) {
5193 struct pf_srchash *sh = &V_pf_srchash[i];
5194 struct pf_ksrc_node *sn, *tmp;
5196 PF_HASHROW_LOCK(sh);
5197 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
5198 if (PF_MATCHA(psnk->psnk_src.neg,
5199 &psnk->psnk_src.addr.v.a.addr,
5200 &psnk->psnk_src.addr.v.a.mask,
5201 &sn->addr, sn->af) &&
5202 PF_MATCHA(psnk->psnk_dst.neg,
5203 &psnk->psnk_dst.addr.v.a.addr,
5204 &psnk->psnk_dst.addr.v.a.mask,
5205 &sn->raddr, sn->af)) {
5206 pf_unlink_src_node(sn);
5207 LIST_INSERT_HEAD(&kill, sn, entry);
5210 PF_HASHROW_UNLOCK(sh);
5213 for (int i = 0; i <= pf_hashmask; i++) {
5214 struct pf_idhash *ih = &V_pf_idhash[i];
5217 PF_HASHROW_LOCK(ih);
5218 LIST_FOREACH(s, &ih->states, entry) {
5219 if (s->src_node && s->src_node->expire == 1)
5221 if (s->nat_src_node && s->nat_src_node->expire == 1)
5222 s->nat_src_node = NULL;
5224 PF_HASHROW_UNLOCK(ih);
5227 psnk->psnk_killed = pf_free_src_nodes(&kill);
5231 * XXX - Check for version missmatch!!!
5235 * Duplicate pfctl -Fa operation to get rid of as much as we can.
5245 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
5247 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
5250 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
5252 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
5253 break; /* XXX: rollback? */
5255 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
5257 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
5258 break; /* XXX: rollback? */
5260 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
5262 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
5263 break; /* XXX: rollback? */
5265 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
5267 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
5268 break; /* XXX: rollback? */
5271 /* XXX: these should always succeed here */
5272 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
5273 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
5274 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
5275 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
5276 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
5278 if ((error = pf_clear_tables()) != 0)
5282 if ((error = pf_begin_altq(&t[0])) != 0) {
5283 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
5286 pf_commit_altq(t[0]);
5291 pf_clear_srcnodes(NULL);
5293 /* status does not use malloced mem so no need to cleanup */
5294 /* fingerprints and interfaces have their own cleanup code */
5300 static pfil_return_t
5301 pf_check_return(int chk, struct mbuf **m)
5307 return (PFIL_CONSUMED);
5316 return (PFIL_DROPPED);
5321 static pfil_return_t
5322 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
5323 void *ruleset __unused, struct inpcb *inp)
5327 chk = pf_test(PF_IN, flags, ifp, m, inp);
5329 return (pf_check_return(chk, m));
5332 static pfil_return_t
5333 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
5334 void *ruleset __unused, struct inpcb *inp)
5338 chk = pf_test(PF_OUT, flags, ifp, m, inp);
5340 return (pf_check_return(chk, m));
5345 static pfil_return_t
5346 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
5347 void *ruleset __unused, struct inpcb *inp)
5352 * In case of loopback traffic IPv6 uses the real interface in
5353 * order to support scoped addresses. In order to support stateful
5354 * filtering we have change this to lo0 as it is the case in IPv4.
5356 CURVNET_SET(ifp->if_vnet);
5357 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp);
5360 return (pf_check_return(chk, m));
5363 static pfil_return_t
5364 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
5365 void *ruleset __unused, struct inpcb *inp)
5369 CURVNET_SET(ifp->if_vnet);
5370 chk = pf_test6(PF_OUT, flags, ifp, m, inp);
5373 return (pf_check_return(chk, m));
5378 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
5379 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
5380 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook)
5381 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook)
5384 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
5385 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
5386 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook)
5387 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook)
5393 struct pfil_hook_args pha;
5394 struct pfil_link_args pla;
5397 if (V_pf_pfil_hooked)
5400 pha.pa_version = PFIL_VERSION;
5401 pha.pa_modname = "pf";
5402 pha.pa_ruleset = NULL;
5404 pla.pa_version = PFIL_VERSION;
5407 pha.pa_type = PFIL_TYPE_IP4;
5408 pha.pa_func = pf_check_in;
5409 pha.pa_flags = PFIL_IN;
5410 pha.pa_rulname = "default-in";
5411 V_pf_ip4_in_hook = pfil_add_hook(&pha);
5412 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
5413 pla.pa_head = V_inet_pfil_head;
5414 pla.pa_hook = V_pf_ip4_in_hook;
5415 ret = pfil_link(&pla);
5417 pha.pa_func = pf_check_out;
5418 pha.pa_flags = PFIL_OUT;
5419 pha.pa_rulname = "default-out";
5420 V_pf_ip4_out_hook = pfil_add_hook(&pha);
5421 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
5422 pla.pa_head = V_inet_pfil_head;
5423 pla.pa_hook = V_pf_ip4_out_hook;
5424 ret = pfil_link(&pla);
5428 pha.pa_type = PFIL_TYPE_IP6;
5429 pha.pa_func = pf_check6_in;
5430 pha.pa_flags = PFIL_IN;
5431 pha.pa_rulname = "default-in6";
5432 V_pf_ip6_in_hook = pfil_add_hook(&pha);
5433 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
5434 pla.pa_head = V_inet6_pfil_head;
5435 pla.pa_hook = V_pf_ip6_in_hook;
5436 ret = pfil_link(&pla);
5438 pha.pa_func = pf_check6_out;
5439 pha.pa_rulname = "default-out6";
5440 pha.pa_flags = PFIL_OUT;
5441 V_pf_ip6_out_hook = pfil_add_hook(&pha);
5442 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
5443 pla.pa_head = V_inet6_pfil_head;
5444 pla.pa_hook = V_pf_ip6_out_hook;
5445 ret = pfil_link(&pla);
5449 V_pf_pfil_hooked = 1;
5456 if (V_pf_pfil_hooked == 0)
5460 pfil_remove_hook(V_pf_ip4_in_hook);
5461 pfil_remove_hook(V_pf_ip4_out_hook);
5464 pfil_remove_hook(V_pf_ip6_in_hook);
5465 pfil_remove_hook(V_pf_ip6_out_hook);
5468 V_pf_pfil_hooked = 0;
5474 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
5475 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
5477 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
5478 PF_RULE_TAG_HASH_SIZE_DEFAULT);
5480 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
5481 PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
5485 V_pf_vnet_active = 1;
5493 rm_init(&pf_rules_lock, "pf rulesets");
5494 sx_init(&pf_ioctl_lock, "pf ioctl");
5495 sx_init(&pf_end_lock, "pf end thread");
5497 pf_mtag_initialize();
5499 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
5504 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
5514 pf_unload_vnet(void)
5518 V_pf_vnet_active = 0;
5519 V_pf_status.running = 0;
5526 ret = swi_remove(V_pf_swi_cookie);
5528 ret = intr_event_destroy(V_pf_swi_ie);
5531 pf_unload_vnet_purge();
5533 pf_normalize_cleanup();
5540 if (IS_DEFAULT_VNET(curvnet))
5543 pf_cleanup_tagset(&V_pf_tags);
5545 pf_cleanup_tagset(&V_pf_qids);
5547 uma_zdestroy(V_pf_tag_z);
5549 /* Free counters last as we updated them during shutdown. */
5550 counter_u64_free(V_pf_default_rule.evaluations);
5551 for (int i = 0; i < 2; i++) {
5552 counter_u64_free(V_pf_default_rule.packets[i]);
5553 counter_u64_free(V_pf_default_rule.bytes[i]);
5555 counter_u64_free(V_pf_default_rule.states_cur);
5556 counter_u64_free(V_pf_default_rule.states_tot);
5557 counter_u64_free(V_pf_default_rule.src_nodes);
5559 for (int i = 0; i < PFRES_MAX; i++)
5560 counter_u64_free(V_pf_status.counters[i]);
5561 for (int i = 0; i < LCNT_MAX; i++)
5562 counter_u64_free(V_pf_status.lcounters[i]);
5563 for (int i = 0; i < FCNT_MAX; i++)
5564 counter_u64_free(V_pf_status.fcounters[i]);
5565 for (int i = 0; i < SCNT_MAX; i++)
5566 counter_u64_free(V_pf_status.scounters[i]);
5573 sx_xlock(&pf_end_lock);
5575 while (pf_end_threads < 2) {
5576 wakeup_one(pf_purge_thread);
5577 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
5579 sx_xunlock(&pf_end_lock);
5582 destroy_dev(pf_dev);
5586 rm_destroy(&pf_rules_lock);
5587 sx_destroy(&pf_ioctl_lock);
5588 sx_destroy(&pf_end_lock);
5592 vnet_pf_init(void *unused __unused)
5597 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
5598 vnet_pf_init, NULL);
5601 vnet_pf_uninit(const void *unused __unused)
5606 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
5607 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
5608 vnet_pf_uninit, NULL);
5611 pf_modevent(module_t mod, int type, void *data)
5620 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after
5621 * the vnet_pf_uninit()s */
5631 static moduledata_t pf_mod = {
5637 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
5638 MODULE_VERSION(pf, PF_MODVER);