2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002,2003 Henning Brauer
6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 #include "opt_inet6.h"
48 #include <sys/param.h>
49 #include <sys/_bitset.h>
50 #include <sys/bitset.h>
53 #include <sys/endian.h>
54 #include <sys/fcntl.h>
55 #include <sys/filio.h>
57 #include <sys/interrupt.h>
59 #include <sys/kernel.h>
60 #include <sys/kthread.h>
63 #include <sys/module.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
69 #include <sys/ucred.h>
72 #include <net/if_var.h>
74 #include <net/route.h>
76 #include <net/pfvar.h>
77 #include <net/if_pfsync.h>
78 #include <net/if_pflog.h>
80 #include <netinet/in.h>
81 #include <netinet/ip.h>
82 #include <netinet/ip_var.h>
83 #include <netinet6/ip6_var.h>
84 #include <netinet/ip_icmp.h>
87 #include <netinet/ip6.h>
91 #include <net/altq/altq.h>
94 static struct pf_kpool *pf_get_kpool(char *, u_int32_t, u_int8_t, u_int32_t,
95 u_int8_t, u_int8_t, u_int8_t);
97 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
98 static void pf_empty_kpool(struct pf_kpalist *);
99 static int pfioctl(struct cdev *, u_long, caddr_t, int,
102 static int pf_begin_altq(u_int32_t *);
103 static int pf_rollback_altq(u_int32_t);
104 static int pf_commit_altq(u_int32_t);
105 static int pf_enable_altq(struct pf_altq *);
106 static int pf_disable_altq(struct pf_altq *);
107 static u_int32_t pf_qname2qid(char *);
108 static void pf_qid_unref(u_int32_t);
110 static int pf_begin_rules(u_int32_t *, int, const char *);
111 static int pf_rollback_rules(u_int32_t, int, char *);
112 static int pf_setup_pfsync_matching(struct pf_kruleset *);
113 static void pf_hash_rule(MD5_CTX *, struct pf_krule *);
114 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
115 static int pf_commit_rules(u_int32_t, int, char *);
116 static int pf_addr_setup(struct pf_kruleset *,
117 struct pf_addr_wrap *, sa_family_t);
118 static void pf_addr_copyout(struct pf_addr_wrap *);
119 static void pf_src_node_copy(const struct pf_ksrc_node *,
120 struct pf_src_node *);
122 static int pf_export_kaltq(struct pf_altq *,
123 struct pfioc_altq_v1 *, size_t);
124 static int pf_import_kaltq(struct pfioc_altq_v1 *,
125 struct pf_altq *, size_t);
128 VNET_DEFINE(struct pf_krule, pf_default_rule);
131 VNET_DEFINE_STATIC(int, pf_altq_running);
132 #define V_pf_altq_running VNET(pf_altq_running)
135 #define TAGID_MAX 50000
137 TAILQ_ENTRY(pf_tagname) namehash_entries;
138 TAILQ_ENTRY(pf_tagname) taghash_entries;
139 char name[PF_TAG_NAME_SIZE];
145 TAILQ_HEAD(, pf_tagname) *namehash;
146 TAILQ_HEAD(, pf_tagname) *taghash;
149 BITSET_DEFINE(, TAGID_MAX) avail;
152 VNET_DEFINE(struct pf_tagset, pf_tags);
153 #define V_pf_tags VNET(pf_tags)
154 static unsigned int pf_rule_tag_hashsize;
155 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128
156 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
157 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
158 "Size of pf(4) rule tag hashtable");
161 VNET_DEFINE(struct pf_tagset, pf_qids);
162 #define V_pf_qids VNET(pf_qids)
163 static unsigned int pf_queue_tag_hashsize;
164 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128
165 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
166 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
167 "Size of pf(4) queue tag hashtable");
169 VNET_DEFINE(uma_zone_t, pf_tag_z);
170 #define V_pf_tag_z VNET(pf_tag_z)
171 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
172 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
174 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
175 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
178 static void pf_init_tagset(struct pf_tagset *, unsigned int *,
180 static void pf_cleanup_tagset(struct pf_tagset *);
181 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *);
182 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t);
183 static u_int16_t tagname2tag(struct pf_tagset *, char *);
184 static u_int16_t pf_tagname2tag(char *);
185 static void tag_unref(struct pf_tagset *, u_int16_t);
187 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
192 * XXX - These are new and need to be checked when moveing to a new version
194 static void pf_clear_states(void);
195 static int pf_clear_tables(void);
196 static void pf_clear_srcnodes(struct pf_ksrc_node *);
197 static void pf_kill_srcnodes(struct pfioc_src_node_kill *);
198 static void pf_tbladdr_copyout(struct pf_addr_wrap *);
201 * Wrapper functions for pfil(9) hooks
204 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
205 int flags, void *ruleset __unused, struct inpcb *inp);
206 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
207 int flags, void *ruleset __unused, struct inpcb *inp);
210 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
211 int flags, void *ruleset __unused, struct inpcb *inp);
212 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
213 int flags, void *ruleset __unused, struct inpcb *inp);
216 static void hook_pf(void);
217 static void dehook_pf(void);
218 static int shutdown_pf(void);
219 static int pf_load(void);
220 static void pf_unload(void);
222 static struct cdevsw pf_cdevsw = {
225 .d_version = D_VERSION,
228 volatile VNET_DEFINE_STATIC(int, pf_pfil_hooked);
229 #define V_pf_pfil_hooked VNET(pf_pfil_hooked)
232 * We need a flag that is neither hooked nor running to know when
233 * the VNET is "valid". We primarily need this to control (global)
234 * external event, e.g., eventhandlers.
236 VNET_DEFINE(int, pf_vnet_active);
237 #define V_pf_vnet_active VNET(pf_vnet_active)
240 struct proc *pf_purge_proc;
242 struct rmlock pf_rules_lock;
243 struct sx pf_ioctl_lock;
244 struct sx pf_end_lock;
247 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
248 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
249 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
250 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
251 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
252 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
253 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
256 pflog_packet_t *pflog_packet_ptr = NULL;
258 extern u_long pf_ioctl_maxcount;
263 u_int32_t *my_timeout = V_pf_default_rule.timeout;
267 pfi_initialize_vnet();
270 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
271 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
273 RB_INIT(&V_pf_anchors);
274 pf_init_kruleset(&pf_main_ruleset);
276 /* default rule should never be garbage collected */
277 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
278 #ifdef PF_DEFAULT_TO_DROP
279 V_pf_default_rule.action = PF_DROP;
281 V_pf_default_rule.action = PF_PASS;
283 V_pf_default_rule.nr = -1;
284 V_pf_default_rule.rtableid = -1;
286 V_pf_default_rule.evaluations = counter_u64_alloc(M_WAITOK);
287 for (int i = 0; i < 2; i++) {
288 V_pf_default_rule.packets[i] = counter_u64_alloc(M_WAITOK);
289 V_pf_default_rule.bytes[i] = counter_u64_alloc(M_WAITOK);
291 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
292 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
293 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
295 /* initialize default timeouts */
296 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
297 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
298 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
299 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
300 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
301 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
302 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
303 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
304 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
305 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
306 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
307 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
308 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
309 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
310 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
311 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
312 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
313 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
314 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
315 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
317 bzero(&V_pf_status, sizeof(V_pf_status));
318 V_pf_status.debug = PF_DEBUG_URGENT;
320 V_pf_pfil_hooked = 0;
322 /* XXX do our best to avoid a conflict */
323 V_pf_status.hostid = arc4random();
325 for (int i = 0; i < PFRES_MAX; i++)
326 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
327 for (int i = 0; i < LCNT_MAX; i++)
328 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
329 for (int i = 0; i < FCNT_MAX; i++)
330 V_pf_status.fcounters[i] = counter_u64_alloc(M_WAITOK);
331 for (int i = 0; i < SCNT_MAX; i++)
332 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
334 if (swi_add(NULL, "pf send", pf_intr, curvnet, SWI_NET,
335 INTR_MPSAFE, &V_pf_swi_cookie) != 0)
336 /* XXXGL: leaked all above. */
340 static struct pf_kpool *
341 pf_get_kpool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
342 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
343 u_int8_t check_ticket)
345 struct pf_kruleset *ruleset;
346 struct pf_krule *rule;
349 ruleset = pf_find_kruleset(anchor);
352 rs_num = pf_get_ruleset_number(rule_action);
353 if (rs_num >= PF_RULESET_MAX)
356 if (check_ticket && ticket !=
357 ruleset->rules[rs_num].active.ticket)
360 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
363 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
365 if (check_ticket && ticket !=
366 ruleset->rules[rs_num].inactive.ticket)
369 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
372 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
375 while ((rule != NULL) && (rule->nr != rule_number))
376 rule = TAILQ_NEXT(rule, entries);
381 return (&rule->rpool);
385 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
387 struct pf_kpooladdr *mv_pool_pa;
389 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
390 TAILQ_REMOVE(poola, mv_pool_pa, entries);
391 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
396 pf_empty_kpool(struct pf_kpalist *poola)
398 struct pf_kpooladdr *pa;
400 while ((pa = TAILQ_FIRST(poola)) != NULL) {
401 switch (pa->addr.type) {
402 case PF_ADDR_DYNIFTL:
403 pfi_dynaddr_remove(pa->addr.p.dyn);
406 /* XXX: this could be unfinished pooladdr on pabuf */
407 if (pa->addr.p.tbl != NULL)
408 pfr_detach_table(pa->addr.p.tbl);
412 pfi_kkif_unref(pa->kif);
413 TAILQ_REMOVE(poola, pa, entries);
419 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
424 TAILQ_REMOVE(rulequeue, rule, entries);
426 PF_UNLNKDRULES_LOCK();
427 rule->rule_flag |= PFRULE_REFS;
428 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
429 PF_UNLNKDRULES_UNLOCK();
433 pf_free_rule(struct pf_krule *rule)
439 tag_unref(&V_pf_tags, rule->tag);
441 tag_unref(&V_pf_tags, rule->match_tag);
443 if (rule->pqid != rule->qid)
444 pf_qid_unref(rule->pqid);
445 pf_qid_unref(rule->qid);
447 switch (rule->src.addr.type) {
448 case PF_ADDR_DYNIFTL:
449 pfi_dynaddr_remove(rule->src.addr.p.dyn);
452 pfr_detach_table(rule->src.addr.p.tbl);
455 switch (rule->dst.addr.type) {
456 case PF_ADDR_DYNIFTL:
457 pfi_dynaddr_remove(rule->dst.addr.p.dyn);
460 pfr_detach_table(rule->dst.addr.p.tbl);
463 if (rule->overload_tbl)
464 pfr_detach_table(rule->overload_tbl);
466 pfi_kkif_unref(rule->kif);
467 pf_kanchor_remove(rule);
468 pf_empty_kpool(&rule->rpool.list);
469 counter_u64_free(rule->evaluations);
470 for (int i = 0; i < 2; i++) {
471 counter_u64_free(rule->packets[i]);
472 counter_u64_free(rule->bytes[i]);
474 counter_u64_free(rule->states_cur);
475 counter_u64_free(rule->states_tot);
476 counter_u64_free(rule->src_nodes);
477 free(rule, M_PFRULE);
481 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
482 unsigned int default_size)
485 unsigned int hashsize;
487 if (*tunable_size == 0 || !powerof2(*tunable_size))
488 *tunable_size = default_size;
490 hashsize = *tunable_size;
491 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
493 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
495 ts->mask = hashsize - 1;
496 ts->seed = arc4random();
497 for (i = 0; i < hashsize; i++) {
498 TAILQ_INIT(&ts->namehash[i]);
499 TAILQ_INIT(&ts->taghash[i]);
501 BIT_FILL(TAGID_MAX, &ts->avail);
505 pf_cleanup_tagset(struct pf_tagset *ts)
508 unsigned int hashsize;
509 struct pf_tagname *t, *tmp;
512 * Only need to clean up one of the hashes as each tag is hashed
515 hashsize = ts->mask + 1;
516 for (i = 0; i < hashsize; i++)
517 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
518 uma_zfree(V_pf_tag_z, t);
520 free(ts->namehash, M_PFHASH);
521 free(ts->taghash, M_PFHASH);
525 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
529 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
530 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
534 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
537 return (tag & ts->mask);
541 tagname2tag(struct pf_tagset *ts, char *tagname)
543 struct pf_tagname *tag;
549 index = tagname2hashindex(ts, tagname);
550 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
551 if (strcmp(tagname, tag->name) == 0) {
559 * to avoid fragmentation, we do a linear search from the beginning
560 * and take the first free slot we find.
562 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
564 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
565 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
566 * set. It may also return a bit number greater than TAGID_MAX due
567 * to rounding of the number of bits in the vector up to a multiple
568 * of the vector word size at declaration/allocation time.
570 if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
573 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */
574 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
576 /* allocate and fill new struct pf_tagname */
577 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
580 strlcpy(tag->name, tagname, sizeof(tag->name));
581 tag->tag = new_tagid;
584 /* Insert into namehash */
585 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
587 /* Insert into taghash */
588 index = tag2hashindex(ts, new_tagid);
589 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
595 tag_unref(struct pf_tagset *ts, u_int16_t tag)
597 struct pf_tagname *t;
602 index = tag2hashindex(ts, tag);
603 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
606 TAILQ_REMOVE(&ts->taghash[index], t,
608 index = tagname2hashindex(ts, t->name);
609 TAILQ_REMOVE(&ts->namehash[index], t,
611 /* Bits are 0-based for BIT_SET() */
612 BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
613 uma_zfree(V_pf_tag_z, t);
620 pf_tagname2tag(char *tagname)
622 return (tagname2tag(&V_pf_tags, tagname));
627 pf_qname2qid(char *qname)
629 return ((u_int32_t)tagname2tag(&V_pf_qids, qname));
633 pf_qid_unref(u_int32_t qid)
635 tag_unref(&V_pf_qids, (u_int16_t)qid);
639 pf_begin_altq(u_int32_t *ticket)
641 struct pf_altq *altq, *tmp;
646 /* Purge the old altq lists */
647 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
648 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
649 /* detach and destroy the discipline */
650 error = altq_remove(altq);
652 free(altq, M_PFALTQ);
654 TAILQ_INIT(V_pf_altq_ifs_inactive);
655 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
656 pf_qid_unref(altq->qid);
657 free(altq, M_PFALTQ);
659 TAILQ_INIT(V_pf_altqs_inactive);
662 *ticket = ++V_ticket_altqs_inactive;
663 V_altqs_inactive_open = 1;
668 pf_rollback_altq(u_int32_t ticket)
670 struct pf_altq *altq, *tmp;
675 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
677 /* Purge the old altq lists */
678 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
679 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
680 /* detach and destroy the discipline */
681 error = altq_remove(altq);
683 free(altq, M_PFALTQ);
685 TAILQ_INIT(V_pf_altq_ifs_inactive);
686 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
687 pf_qid_unref(altq->qid);
688 free(altq, M_PFALTQ);
690 TAILQ_INIT(V_pf_altqs_inactive);
691 V_altqs_inactive_open = 0;
696 pf_commit_altq(u_int32_t ticket)
698 struct pf_altqqueue *old_altqs, *old_altq_ifs;
699 struct pf_altq *altq, *tmp;
704 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
707 /* swap altqs, keep the old. */
708 old_altqs = V_pf_altqs_active;
709 old_altq_ifs = V_pf_altq_ifs_active;
710 V_pf_altqs_active = V_pf_altqs_inactive;
711 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
712 V_pf_altqs_inactive = old_altqs;
713 V_pf_altq_ifs_inactive = old_altq_ifs;
714 V_ticket_altqs_active = V_ticket_altqs_inactive;
716 /* Attach new disciplines */
717 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
718 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
719 /* attach the discipline */
720 error = altq_pfattach(altq);
721 if (error == 0 && V_pf_altq_running)
722 error = pf_enable_altq(altq);
728 /* Purge the old altq lists */
729 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
730 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
731 /* detach and destroy the discipline */
732 if (V_pf_altq_running)
733 error = pf_disable_altq(altq);
734 err = altq_pfdetach(altq);
735 if (err != 0 && error == 0)
737 err = altq_remove(altq);
738 if (err != 0 && error == 0)
741 free(altq, M_PFALTQ);
743 TAILQ_INIT(V_pf_altq_ifs_inactive);
744 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
745 pf_qid_unref(altq->qid);
746 free(altq, M_PFALTQ);
748 TAILQ_INIT(V_pf_altqs_inactive);
750 V_altqs_inactive_open = 0;
755 pf_enable_altq(struct pf_altq *altq)
758 struct tb_profile tb;
761 if ((ifp = ifunit(altq->ifname)) == NULL)
764 if (ifp->if_snd.altq_type != ALTQT_NONE)
765 error = altq_enable(&ifp->if_snd);
767 /* set tokenbucket regulator */
768 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
769 tb.rate = altq->ifbandwidth;
770 tb.depth = altq->tbrsize;
771 error = tbr_set(&ifp->if_snd, &tb);
778 pf_disable_altq(struct pf_altq *altq)
781 struct tb_profile tb;
784 if ((ifp = ifunit(altq->ifname)) == NULL)
788 * when the discipline is no longer referenced, it was overridden
789 * by a new one. if so, just return.
791 if (altq->altq_disc != ifp->if_snd.altq_disc)
794 error = altq_disable(&ifp->if_snd);
797 /* clear tokenbucket regulator */
799 error = tbr_set(&ifp->if_snd, &tb);
806 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
807 struct pf_altq *altq)
812 /* Deactivate the interface in question */
813 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
814 if ((ifp1 = ifunit(altq->ifname)) == NULL ||
815 (remove && ifp1 == ifp)) {
816 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
818 error = altq_add(ifp1, altq);
820 if (ticket != V_ticket_altqs_inactive)
824 free(altq, M_PFALTQ);
831 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
833 struct pf_altq *a1, *a2, *a3;
838 * No need to re-evaluate the configuration for events on interfaces
839 * that do not support ALTQ, as it's not possible for such
840 * interfaces to be part of the configuration.
842 if (!ALTQ_IS_READY(&ifp->if_snd))
845 /* Interrupt userland queue modifications */
846 if (V_altqs_inactive_open)
847 pf_rollback_altq(V_ticket_altqs_inactive);
849 /* Start new altq ruleset */
850 if (pf_begin_altq(&ticket))
853 /* Copy the current active set */
854 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
855 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
860 bcopy(a1, a2, sizeof(struct pf_altq));
862 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
866 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
870 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
871 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
876 bcopy(a1, a2, sizeof(struct pf_altq));
878 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
883 a2->altq_disc = NULL;
884 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
885 if (strncmp(a3->ifname, a2->ifname,
887 a2->altq_disc = a3->altq_disc;
891 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
895 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
900 pf_rollback_altq(ticket);
902 pf_commit_altq(ticket);
907 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
909 struct pf_kruleset *rs;
910 struct pf_krule *rule;
914 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
916 rs = pf_find_or_create_kruleset(anchor);
919 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
920 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
921 rs->rules[rs_num].inactive.rcount--;
923 *ticket = ++rs->rules[rs_num].inactive.ticket;
924 rs->rules[rs_num].inactive.open = 1;
929 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
931 struct pf_kruleset *rs;
932 struct pf_krule *rule;
936 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
938 rs = pf_find_kruleset(anchor);
939 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
940 rs->rules[rs_num].inactive.ticket != ticket)
942 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
943 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
944 rs->rules[rs_num].inactive.rcount--;
946 rs->rules[rs_num].inactive.open = 0;
950 #define PF_MD5_UPD(st, elm) \
951 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
953 #define PF_MD5_UPD_STR(st, elm) \
954 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
956 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
957 (stor) = htonl((st)->elm); \
958 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
961 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
962 (stor) = htons((st)->elm); \
963 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
967 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
969 PF_MD5_UPD(pfr, addr.type);
970 switch (pfr->addr.type) {
971 case PF_ADDR_DYNIFTL:
972 PF_MD5_UPD(pfr, addr.v.ifname);
973 PF_MD5_UPD(pfr, addr.iflags);
976 PF_MD5_UPD(pfr, addr.v.tblname);
978 case PF_ADDR_ADDRMASK:
980 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
981 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
985 PF_MD5_UPD(pfr, port[0]);
986 PF_MD5_UPD(pfr, port[1]);
987 PF_MD5_UPD(pfr, neg);
988 PF_MD5_UPD(pfr, port_op);
992 pf_hash_rule(MD5_CTX *ctx, struct pf_krule *rule)
997 pf_hash_rule_addr(ctx, &rule->src);
998 pf_hash_rule_addr(ctx, &rule->dst);
999 PF_MD5_UPD_STR(rule, label);
1000 PF_MD5_UPD_STR(rule, ifname);
1001 PF_MD5_UPD_STR(rule, match_tagname);
1002 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1003 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1004 PF_MD5_UPD_HTONL(rule, prob, y);
1005 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1006 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1007 PF_MD5_UPD(rule, uid.op);
1008 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1009 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1010 PF_MD5_UPD(rule, gid.op);
1011 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1012 PF_MD5_UPD(rule, action);
1013 PF_MD5_UPD(rule, direction);
1014 PF_MD5_UPD(rule, af);
1015 PF_MD5_UPD(rule, quick);
1016 PF_MD5_UPD(rule, ifnot);
1017 PF_MD5_UPD(rule, match_tag_not);
1018 PF_MD5_UPD(rule, natpass);
1019 PF_MD5_UPD(rule, keep_state);
1020 PF_MD5_UPD(rule, proto);
1021 PF_MD5_UPD(rule, type);
1022 PF_MD5_UPD(rule, code);
1023 PF_MD5_UPD(rule, flags);
1024 PF_MD5_UPD(rule, flagset);
1025 PF_MD5_UPD(rule, allow_opts);
1026 PF_MD5_UPD(rule, rt);
1027 PF_MD5_UPD(rule, tos);
1031 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1033 struct pf_kruleset *rs;
1034 struct pf_krule *rule, **old_array;
1035 struct pf_krulequeue *old_rules;
1037 u_int32_t old_rcount;
1041 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1043 rs = pf_find_kruleset(anchor);
1044 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1045 ticket != rs->rules[rs_num].inactive.ticket)
1048 /* Calculate checksum for the main ruleset */
1049 if (rs == &pf_main_ruleset) {
1050 error = pf_setup_pfsync_matching(rs);
1055 /* Swap rules, keep the old. */
1056 old_rules = rs->rules[rs_num].active.ptr;
1057 old_rcount = rs->rules[rs_num].active.rcount;
1058 old_array = rs->rules[rs_num].active.ptr_array;
1060 rs->rules[rs_num].active.ptr =
1061 rs->rules[rs_num].inactive.ptr;
1062 rs->rules[rs_num].active.ptr_array =
1063 rs->rules[rs_num].inactive.ptr_array;
1064 rs->rules[rs_num].active.rcount =
1065 rs->rules[rs_num].inactive.rcount;
1066 rs->rules[rs_num].inactive.ptr = old_rules;
1067 rs->rules[rs_num].inactive.ptr_array = old_array;
1068 rs->rules[rs_num].inactive.rcount = old_rcount;
1070 rs->rules[rs_num].active.ticket =
1071 rs->rules[rs_num].inactive.ticket;
1072 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1074 /* Purge the old rule list. */
1075 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1076 pf_unlink_rule(old_rules, rule);
1077 if (rs->rules[rs_num].inactive.ptr_array)
1078 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1079 rs->rules[rs_num].inactive.ptr_array = NULL;
1080 rs->rules[rs_num].inactive.rcount = 0;
1081 rs->rules[rs_num].inactive.open = 0;
1082 pf_remove_if_empty_kruleset(rs);
1088 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1091 struct pf_krule *rule;
1093 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1096 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1097 /* XXX PF_RULESET_SCRUB as well? */
1098 if (rs_cnt == PF_RULESET_SCRUB)
1101 if (rs->rules[rs_cnt].inactive.ptr_array)
1102 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1103 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1105 if (rs->rules[rs_cnt].inactive.rcount) {
1106 rs->rules[rs_cnt].inactive.ptr_array =
1107 malloc(sizeof(caddr_t) *
1108 rs->rules[rs_cnt].inactive.rcount,
1111 if (!rs->rules[rs_cnt].inactive.ptr_array)
1115 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1117 pf_hash_rule(&ctx, rule);
1118 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1122 MD5Final(digest, &ctx);
1123 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1128 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1133 switch (addr->type) {
1135 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1136 if (addr->p.tbl == NULL)
1139 case PF_ADDR_DYNIFTL:
1140 error = pfi_dynaddr_setup(addr, af);
1148 pf_addr_copyout(struct pf_addr_wrap *addr)
1151 switch (addr->type) {
1152 case PF_ADDR_DYNIFTL:
1153 pfi_dynaddr_copyout(addr);
1156 pf_tbladdr_copyout(addr);
1162 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1164 int secs = time_uptime, diff;
1166 bzero(out, sizeof(struct pf_src_node));
1168 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1169 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1171 if (in->rule.ptr != NULL)
1172 out->rule.nr = in->rule.ptr->nr;
1174 for (int i = 0; i < 2; i++) {
1175 out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1176 out->packets[i] = counter_u64_fetch(in->packets[i]);
1179 out->states = in->states;
1180 out->conn = in->conn;
1182 out->ruletype = in->ruletype;
1184 out->creation = secs - in->creation;
1185 if (out->expire > secs)
1186 out->expire -= secs;
1190 /* Adjust the connection rate estimate. */
1191 diff = secs - in->conn_rate.last;
1192 if (diff >= in->conn_rate.seconds)
1193 out->conn_rate.count = 0;
1195 out->conn_rate.count -=
1196 in->conn_rate.count * diff /
1197 in->conn_rate.seconds;
1202 * Handle export of struct pf_kaltq to user binaries that may be using any
1203 * version of struct pf_altq.
1206 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1210 if (ioc_size == sizeof(struct pfioc_altq_v0))
1213 version = pa->version;
1215 if (version > PFIOC_ALTQ_VERSION)
1218 #define ASSIGN(x) exported_q->x = q->x
1220 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1221 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1222 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1226 struct pf_altq_v0 *exported_q =
1227 &((struct pfioc_altq_v0 *)pa)->altq;
1233 exported_q->tbrsize = SATU16(q->tbrsize);
1234 exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1239 exported_q->bandwidth = SATU32(q->bandwidth);
1241 ASSIGN(local_flags);
1246 if (q->scheduler == ALTQT_HFSC) {
1247 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1248 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1249 SATU32(q->pq_u.hfsc_opts.x)
1251 ASSIGN_OPT_SATU32(rtsc_m1);
1253 ASSIGN_OPT_SATU32(rtsc_m2);
1255 ASSIGN_OPT_SATU32(lssc_m1);
1257 ASSIGN_OPT_SATU32(lssc_m2);
1259 ASSIGN_OPT_SATU32(ulsc_m1);
1261 ASSIGN_OPT_SATU32(ulsc_m2);
1266 #undef ASSIGN_OPT_SATU32
1274 struct pf_altq_v1 *exported_q =
1275 &((struct pfioc_altq_v1 *)pa)->altq;
1281 ASSIGN(ifbandwidth);
1288 ASSIGN(local_flags);
1298 panic("%s: unhandled struct pfioc_altq version", __func__);
1311 * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1312 * that may be using any version of it.
1315 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1319 if (ioc_size == sizeof(struct pfioc_altq_v0))
1322 version = pa->version;
1324 if (version > PFIOC_ALTQ_VERSION)
1327 #define ASSIGN(x) q->x = imported_q->x
1329 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1333 struct pf_altq_v0 *imported_q =
1334 &((struct pfioc_altq_v0 *)pa)->altq;
1339 ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1340 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1345 ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1347 ASSIGN(local_flags);
1352 if (imported_q->scheduler == ALTQT_HFSC) {
1353 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1356 * The m1 and m2 parameters are being copied from
1359 ASSIGN_OPT(rtsc_m1);
1361 ASSIGN_OPT(rtsc_m2);
1363 ASSIGN_OPT(lssc_m1);
1365 ASSIGN_OPT(lssc_m2);
1367 ASSIGN_OPT(ulsc_m1);
1369 ASSIGN_OPT(ulsc_m2);
1381 struct pf_altq_v1 *imported_q =
1382 &((struct pfioc_altq_v1 *)pa)->altq;
1388 ASSIGN(ifbandwidth);
1395 ASSIGN(local_flags);
1405 panic("%s: unhandled struct pfioc_altq version", __func__);
1415 static struct pf_altq *
1416 pf_altq_get_nth_active(u_int32_t n)
1418 struct pf_altq *altq;
1422 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1428 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1439 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1440 struct pf_pooladdr *pool)
1443 bzero(pool, sizeof(*pool));
1444 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1445 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1449 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1450 struct pf_kpooladdr *kpool)
1453 bzero(kpool, sizeof(*kpool));
1454 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1455 strlcpy(kpool->ifname, pool->ifname, sizeof(kpool->ifname));
1459 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule)
1462 bzero(rule, sizeof(*rule));
1464 bcopy(&krule->src, &rule->src, sizeof(rule->src));
1465 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst));
1467 for (int i = 0; i < PF_SKIP_COUNT; ++i) {
1468 if (rule->skip[i].ptr == NULL)
1469 rule->skip[i].nr = -1;
1471 rule->skip[i].nr = krule->skip[i].ptr->nr;
1474 strlcpy(rule->label, krule->label, sizeof(rule->label));
1475 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname));
1476 strlcpy(rule->qname, krule->qname, sizeof(rule->qname));
1477 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname));
1478 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname));
1479 strlcpy(rule->match_tagname, krule->match_tagname,
1480 sizeof(rule->match_tagname));
1481 strlcpy(rule->overload_tblname, krule->overload_tblname,
1482 sizeof(rule->overload_tblname));
1484 bcopy(&krule->rpool, &rule->rpool, sizeof(krule->rpool));
1486 rule->evaluations = counter_u64_fetch(krule->evaluations);
1487 for (int i = 0; i < 2; i++) {
1488 rule->packets[i] = counter_u64_fetch(krule->packets[i]);
1489 rule->bytes[i] = counter_u64_fetch(krule->bytes[i]);
1492 /* kif, anchor, overload_tbl are not copied over. */
1494 rule->os_fingerprint = krule->os_fingerprint;
1496 rule->rtableid = krule->rtableid;
1497 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout));
1498 rule->max_states = krule->max_states;
1499 rule->max_src_nodes = krule->max_src_nodes;
1500 rule->max_src_states = krule->max_src_states;
1501 rule->max_src_conn = krule->max_src_conn;
1502 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit;
1503 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds;
1504 rule->qid = krule->qid;
1505 rule->pqid = krule->pqid;
1506 rule->rt_listid = krule->rt_listid;
1507 rule->nr = krule->nr;
1508 rule->prob = krule->prob;
1509 rule->cuid = krule->cuid;
1510 rule->cpid = krule->cpid;
1512 rule->return_icmp = krule->return_icmp;
1513 rule->return_icmp6 = krule->return_icmp6;
1514 rule->max_mss = krule->max_mss;
1515 rule->tag = krule->tag;
1516 rule->match_tag = krule->match_tag;
1517 rule->scrub_flags = krule->scrub_flags;
1519 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid));
1520 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid));
1522 rule->rule_flag = krule->rule_flag;
1523 rule->action = krule->action;
1524 rule->direction = krule->direction;
1525 rule->log = krule->log;
1526 rule->logif = krule->logif;
1527 rule->quick = krule->quick;
1528 rule->ifnot = krule->ifnot;
1529 rule->match_tag_not = krule->match_tag_not;
1530 rule->natpass = krule->natpass;
1532 rule->keep_state = krule->keep_state;
1533 rule->af = krule->af;
1534 rule->proto = krule->proto;
1535 rule->type = krule->type;
1536 rule->code = krule->code;
1537 rule->flags = krule->flags;
1538 rule->flagset = krule->flagset;
1539 rule->min_ttl = krule->min_ttl;
1540 rule->allow_opts = krule->allow_opts;
1541 rule->rt = krule->rt;
1542 rule->return_ttl = krule->return_ttl;
1543 rule->tos = krule->tos;
1544 rule->set_tos = krule->set_tos;
1545 rule->anchor_relative = krule->anchor_relative;
1546 rule->anchor_wildcard = krule->anchor_wildcard;
1548 rule->flush = krule->flush;
1549 rule->prio = krule->prio;
1550 rule->set_prio[0] = krule->set_prio[0];
1551 rule->set_prio[1] = krule->set_prio[1];
1553 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert));
1555 rule->u_states_cur = counter_u64_fetch(krule->states_cur);
1556 rule->u_states_tot = counter_u64_fetch(krule->states_tot);
1557 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes);
1561 pf_check_rule_addr(const struct pf_rule_addr *addr)
1564 switch (addr->addr.type) {
1565 case PF_ADDR_ADDRMASK:
1566 case PF_ADDR_NOROUTE:
1567 case PF_ADDR_DYNIFTL:
1569 case PF_ADDR_URPFFAILED:
1576 if (addr->addr.p.dyn != NULL) {
1584 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
1589 if (rule->af == AF_INET) {
1590 return (EAFNOSUPPORT);
1594 if (rule->af == AF_INET6) {
1595 return (EAFNOSUPPORT);
1599 ret = pf_check_rule_addr(&rule->src);
1602 ret = pf_check_rule_addr(&rule->dst);
1606 bzero(krule, sizeof(*krule));
1608 bcopy(&rule->src, &krule->src, sizeof(rule->src));
1609 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
1611 strlcpy(krule->label, rule->label, sizeof(rule->label));
1612 strlcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
1613 strlcpy(krule->qname, rule->qname, sizeof(rule->qname));
1614 strlcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
1615 strlcpy(krule->tagname, rule->tagname, sizeof(rule->tagname));
1616 strlcpy(krule->match_tagname, rule->match_tagname,
1617 sizeof(rule->match_tagname));
1618 strlcpy(krule->overload_tblname, rule->overload_tblname,
1619 sizeof(rule->overload_tblname));
1621 bcopy(&rule->rpool, &krule->rpool, sizeof(krule->rpool));
1623 /* Don't allow userspace to set evaulations, packets or bytes. */
1624 /* kif, anchor, overload_tbl are not copied over. */
1626 krule->os_fingerprint = rule->os_fingerprint;
1628 krule->rtableid = rule->rtableid;
1629 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout));
1630 krule->max_states = rule->max_states;
1631 krule->max_src_nodes = rule->max_src_nodes;
1632 krule->max_src_states = rule->max_src_states;
1633 krule->max_src_conn = rule->max_src_conn;
1634 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
1635 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
1636 krule->qid = rule->qid;
1637 krule->pqid = rule->pqid;
1638 krule->rt_listid = rule->rt_listid;
1639 krule->nr = rule->nr;
1640 krule->prob = rule->prob;
1641 krule->cuid = rule->cuid;
1642 krule->cpid = rule->cpid;
1644 krule->return_icmp = rule->return_icmp;
1645 krule->return_icmp6 = rule->return_icmp6;
1646 krule->max_mss = rule->max_mss;
1647 krule->tag = rule->tag;
1648 krule->match_tag = rule->match_tag;
1649 krule->scrub_flags = rule->scrub_flags;
1651 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
1652 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
1654 krule->rule_flag = rule->rule_flag;
1655 krule->action = rule->action;
1656 krule->direction = rule->direction;
1657 krule->log = rule->log;
1658 krule->logif = rule->logif;
1659 krule->quick = rule->quick;
1660 krule->ifnot = rule->ifnot;
1661 krule->match_tag_not = rule->match_tag_not;
1662 krule->natpass = rule->natpass;
1664 krule->keep_state = rule->keep_state;
1665 krule->af = rule->af;
1666 krule->proto = rule->proto;
1667 krule->type = rule->type;
1668 krule->code = rule->code;
1669 krule->flags = rule->flags;
1670 krule->flagset = rule->flagset;
1671 krule->min_ttl = rule->min_ttl;
1672 krule->allow_opts = rule->allow_opts;
1673 krule->rt = rule->rt;
1674 krule->return_ttl = rule->return_ttl;
1675 krule->tos = rule->tos;
1676 krule->set_tos = rule->set_tos;
1677 krule->anchor_relative = rule->anchor_relative;
1678 krule->anchor_wildcard = rule->anchor_wildcard;
1680 krule->flush = rule->flush;
1681 krule->prio = rule->prio;
1682 krule->set_prio[0] = rule->set_prio[0];
1683 krule->set_prio[1] = rule->set_prio[1];
1685 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
1691 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1694 PF_RULES_RLOCK_TRACKER;
1696 /* XXX keep in sync with switch() below */
1697 if (securelevel_gt(td->td_ucred, 2))
1704 case DIOCSETSTATUSIF:
1710 case DIOCGETTIMEOUT:
1711 case DIOCCLRRULECTRS:
1713 case DIOCGETALTQSV0:
1714 case DIOCGETALTQSV1:
1717 case DIOCGETQSTATSV0:
1718 case DIOCGETQSTATSV1:
1719 case DIOCGETRULESETS:
1720 case DIOCGETRULESET:
1721 case DIOCRGETTABLES:
1722 case DIOCRGETTSTATS:
1723 case DIOCRCLRTSTATS:
1729 case DIOCRGETASTATS:
1730 case DIOCRCLRASTATS:
1733 case DIOCGETSRCNODES:
1734 case DIOCCLRSRCNODES:
1735 case DIOCIGETIFACES:
1736 case DIOCGIFSPEEDV0:
1737 case DIOCGIFSPEEDV1:
1741 case DIOCRCLRTABLES:
1742 case DIOCRADDTABLES:
1743 case DIOCRDELTABLES:
1744 case DIOCRSETTFLAGS:
1745 if (((struct pfioc_table *)addr)->pfrio_flags &
1747 break; /* dummy operation ok */
1753 if (!(flags & FWRITE))
1761 case DIOCGETTIMEOUT:
1763 case DIOCGETALTQSV0:
1764 case DIOCGETALTQSV1:
1767 case DIOCGETQSTATSV0:
1768 case DIOCGETQSTATSV1:
1769 case DIOCGETRULESETS:
1770 case DIOCGETRULESET:
1772 case DIOCRGETTABLES:
1773 case DIOCRGETTSTATS:
1775 case DIOCRGETASTATS:
1778 case DIOCGETSRCNODES:
1779 case DIOCIGETIFACES:
1780 case DIOCGIFSPEEDV1:
1781 case DIOCGIFSPEEDV0:
1783 case DIOCRCLRTABLES:
1784 case DIOCRADDTABLES:
1785 case DIOCRDELTABLES:
1786 case DIOCRCLRTSTATS:
1791 case DIOCRSETTFLAGS:
1792 if (((struct pfioc_table *)addr)->pfrio_flags &
1794 flags |= FWRITE; /* need write lock for dummy */
1795 break; /* dummy operation ok */
1799 if (((struct pfioc_rule *)addr)->action ==
1807 CURVNET_SET(TD_TO_VNET(td));
1811 sx_xlock(&pf_ioctl_lock);
1812 if (V_pf_status.running)
1818 V_pf_status.running = 1;
1819 V_pf_status.since = time_second;
1822 V_pf_stateid[cpu] = time_second;
1824 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1829 sx_xlock(&pf_ioctl_lock);
1830 if (!V_pf_status.running)
1833 V_pf_status.running = 0;
1835 V_pf_status.since = time_second;
1836 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1841 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1842 struct pf_kruleset *ruleset;
1843 struct pf_krule *rule, *tail;
1844 struct pf_kpooladdr *pa;
1845 struct pfi_kkif *kif = NULL;
1848 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1853 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
1854 error = pf_rule_to_krule(&pr->rule, rule);
1856 free(rule, M_PFRULE);
1860 if (rule->ifname[0])
1861 kif = pf_kkif_create(M_WAITOK);
1862 rule->evaluations = counter_u64_alloc(M_WAITOK);
1863 for (int i = 0; i < 2; i++) {
1864 rule->packets[i] = counter_u64_alloc(M_WAITOK);
1865 rule->bytes[i] = counter_u64_alloc(M_WAITOK);
1867 rule->states_cur = counter_u64_alloc(M_WAITOK);
1868 rule->states_tot = counter_u64_alloc(M_WAITOK);
1869 rule->src_nodes = counter_u64_alloc(M_WAITOK);
1870 rule->cuid = td->td_ucred->cr_ruid;
1871 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1872 TAILQ_INIT(&rule->rpool.list);
1873 #define ERROUT(x) { error = (x); goto DIOCADDRULE_error; }
1876 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1877 ruleset = pf_find_kruleset(pr->anchor);
1878 if (ruleset == NULL)
1880 rs_num = pf_get_ruleset_number(pr->rule.action);
1881 if (rs_num >= PF_RULESET_MAX)
1883 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1884 DPFPRINTF(PF_DEBUG_MISC,
1885 ("ticket: %d != [%d]%d\n", pr->ticket, rs_num,
1886 ruleset->rules[rs_num].inactive.ticket));
1889 if (pr->pool_ticket != V_ticket_pabuf) {
1890 DPFPRINTF(PF_DEBUG_MISC,
1891 ("pool_ticket: %d != %d\n", pr->pool_ticket,
1896 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1899 rule->nr = tail->nr + 1;
1902 if (rule->ifname[0]) {
1903 rule->kif = pfi_kkif_attach(kif, rule->ifname);
1904 pfi_kkif_ref(rule->kif);
1908 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
1913 if (rule->qname[0] != 0) {
1914 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1916 else if (rule->pqname[0] != 0) {
1918 pf_qname2qid(rule->pqname)) == 0)
1921 rule->pqid = rule->qid;
1924 if (rule->tagname[0])
1925 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1927 if (rule->match_tagname[0])
1928 if ((rule->match_tag =
1929 pf_tagname2tag(rule->match_tagname)) == 0)
1931 if (rule->rt && !rule->direction)
1935 if (rule->logif >= PFLOGIFS_MAX)
1937 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1939 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1941 if (pf_kanchor_setup(rule, ruleset, pr->anchor_call))
1943 if (rule->scrub_flags & PFSTATE_SETPRIO &&
1944 (rule->set_prio[0] > PF_PRIO_MAX ||
1945 rule->set_prio[1] > PF_PRIO_MAX))
1947 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
1948 if (pa->addr.type == PF_ADDR_TABLE) {
1949 pa->addr.p.tbl = pfr_attach_table(ruleset,
1950 pa->addr.v.tblname);
1951 if (pa->addr.p.tbl == NULL)
1955 rule->overload_tbl = NULL;
1956 if (rule->overload_tblname[0]) {
1957 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1958 rule->overload_tblname)) == NULL)
1961 rule->overload_tbl->pfrkt_flags |=
1965 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
1966 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1967 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1968 (rule->rt > PF_NOPFROUTE)) &&
1969 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1978 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1979 counter_u64_zero(rule->evaluations);
1980 for (int i = 0; i < 2; i++) {
1981 counter_u64_zero(rule->packets[i]);
1982 counter_u64_zero(rule->bytes[i]);
1984 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1986 ruleset->rules[rs_num].inactive.rcount++;
1993 counter_u64_free(rule->evaluations);
1994 for (int i = 0; i < 2; i++) {
1995 counter_u64_free(rule->packets[i]);
1996 counter_u64_free(rule->bytes[i]);
1998 counter_u64_free(rule->states_cur);
1999 counter_u64_free(rule->states_tot);
2000 counter_u64_free(rule->src_nodes);
2001 free(rule, M_PFRULE);
2007 case DIOCGETRULES: {
2008 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
2009 struct pf_kruleset *ruleset;
2010 struct pf_krule *tail;
2014 pr->anchor[sizeof(pr->anchor) - 1] = 0;
2015 ruleset = pf_find_kruleset(pr->anchor);
2016 if (ruleset == NULL) {
2021 rs_num = pf_get_ruleset_number(pr->rule.action);
2022 if (rs_num >= PF_RULESET_MAX) {
2027 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2030 pr->nr = tail->nr + 1;
2033 pr->ticket = ruleset->rules[rs_num].active.ticket;
2039 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
2040 struct pf_kruleset *ruleset;
2041 struct pf_krule *rule;
2045 pr->anchor[sizeof(pr->anchor) - 1] = 0;
2046 ruleset = pf_find_kruleset(pr->anchor);
2047 if (ruleset == NULL) {
2052 rs_num = pf_get_ruleset_number(pr->rule.action);
2053 if (rs_num >= PF_RULESET_MAX) {
2058 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
2063 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2064 while ((rule != NULL) && (rule->nr != pr->nr))
2065 rule = TAILQ_NEXT(rule, entries);
2072 pf_krule_to_rule(rule, &pr->rule);
2074 if (pf_kanchor_copyout(ruleset, rule, pr)) {
2079 pf_addr_copyout(&pr->rule.src.addr);
2080 pf_addr_copyout(&pr->rule.dst.addr);
2082 if (pr->action == PF_GET_CLR_CNTR) {
2083 counter_u64_zero(rule->evaluations);
2084 for (int i = 0; i < 2; i++) {
2085 counter_u64_zero(rule->packets[i]);
2086 counter_u64_zero(rule->bytes[i]);
2088 counter_u64_zero(rule->states_tot);
2094 case DIOCCHANGERULE: {
2095 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
2096 struct pf_kruleset *ruleset;
2097 struct pf_krule *oldrule = NULL, *newrule = NULL;
2098 struct pfi_kkif *kif = NULL;
2099 struct pf_kpooladdr *pa;
2103 if (pcr->action < PF_CHANGE_ADD_HEAD ||
2104 pcr->action > PF_CHANGE_GET_TICKET) {
2108 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2113 if (pcr->action != PF_CHANGE_REMOVE) {
2114 newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK);
2115 error = pf_rule_to_krule(&pcr->rule, newrule);
2117 free(newrule, M_PFRULE);
2121 if (newrule->ifname[0])
2122 kif = pf_kkif_create(M_WAITOK);
2123 newrule->evaluations = counter_u64_alloc(M_WAITOK);
2124 for (int i = 0; i < 2; i++) {
2125 newrule->packets[i] =
2126 counter_u64_alloc(M_WAITOK);
2128 counter_u64_alloc(M_WAITOK);
2130 newrule->states_cur = counter_u64_alloc(M_WAITOK);
2131 newrule->states_tot = counter_u64_alloc(M_WAITOK);
2132 newrule->src_nodes = counter_u64_alloc(M_WAITOK);
2133 newrule->cuid = td->td_ucred->cr_ruid;
2134 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
2135 TAILQ_INIT(&newrule->rpool.list);
2137 #define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; }
2140 if (!(pcr->action == PF_CHANGE_REMOVE ||
2141 pcr->action == PF_CHANGE_GET_TICKET) &&
2142 pcr->pool_ticket != V_ticket_pabuf)
2145 ruleset = pf_find_kruleset(pcr->anchor);
2146 if (ruleset == NULL)
2149 rs_num = pf_get_ruleset_number(pcr->rule.action);
2150 if (rs_num >= PF_RULESET_MAX)
2153 if (pcr->action == PF_CHANGE_GET_TICKET) {
2154 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
2156 } else if (pcr->ticket !=
2157 ruleset->rules[rs_num].active.ticket)
2160 if (pcr->action != PF_CHANGE_REMOVE) {
2161 if (newrule->ifname[0]) {
2162 newrule->kif = pfi_kkif_attach(kif,
2164 pfi_kkif_ref(newrule->kif);
2166 newrule->kif = NULL;
2168 if (newrule->rtableid > 0 &&
2169 newrule->rtableid >= rt_numfibs)
2174 if (newrule->qname[0] != 0) {
2176 pf_qname2qid(newrule->qname)) == 0)
2178 else if (newrule->pqname[0] != 0) {
2179 if ((newrule->pqid =
2180 pf_qname2qid(newrule->pqname)) == 0)
2183 newrule->pqid = newrule->qid;
2186 if (newrule->tagname[0])
2188 pf_tagname2tag(newrule->tagname)) == 0)
2190 if (newrule->match_tagname[0])
2191 if ((newrule->match_tag = pf_tagname2tag(
2192 newrule->match_tagname)) == 0)
2194 if (newrule->rt && !newrule->direction)
2198 if (newrule->logif >= PFLOGIFS_MAX)
2200 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
2202 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
2204 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
2206 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2207 if (pa->addr.type == PF_ADDR_TABLE) {
2209 pfr_attach_table(ruleset,
2210 pa->addr.v.tblname);
2211 if (pa->addr.p.tbl == NULL)
2215 newrule->overload_tbl = NULL;
2216 if (newrule->overload_tblname[0]) {
2217 if ((newrule->overload_tbl = pfr_attach_table(
2218 ruleset, newrule->overload_tblname)) ==
2222 newrule->overload_tbl->pfrkt_flags |=
2226 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
2227 if (((((newrule->action == PF_NAT) ||
2228 (newrule->action == PF_RDR) ||
2229 (newrule->action == PF_BINAT) ||
2230 (newrule->rt > PF_NOPFROUTE)) &&
2231 !newrule->anchor)) &&
2232 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
2236 pf_free_rule(newrule);
2241 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
2243 pf_empty_kpool(&V_pf_pabuf);
2245 if (pcr->action == PF_CHANGE_ADD_HEAD)
2246 oldrule = TAILQ_FIRST(
2247 ruleset->rules[rs_num].active.ptr);
2248 else if (pcr->action == PF_CHANGE_ADD_TAIL)
2249 oldrule = TAILQ_LAST(
2250 ruleset->rules[rs_num].active.ptr, pf_krulequeue);
2252 oldrule = TAILQ_FIRST(
2253 ruleset->rules[rs_num].active.ptr);
2254 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
2255 oldrule = TAILQ_NEXT(oldrule, entries);
2256 if (oldrule == NULL) {
2257 if (newrule != NULL)
2258 pf_free_rule(newrule);
2265 if (pcr->action == PF_CHANGE_REMOVE) {
2266 pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
2268 ruleset->rules[rs_num].active.rcount--;
2270 if (oldrule == NULL)
2272 ruleset->rules[rs_num].active.ptr,
2274 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
2275 pcr->action == PF_CHANGE_ADD_BEFORE)
2276 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
2279 ruleset->rules[rs_num].active.ptr,
2280 oldrule, newrule, entries);
2281 ruleset->rules[rs_num].active.rcount++;
2285 TAILQ_FOREACH(oldrule,
2286 ruleset->rules[rs_num].active.ptr, entries)
2289 ruleset->rules[rs_num].active.ticket++;
2291 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
2292 pf_remove_if_empty_kruleset(ruleset);
2298 DIOCCHANGERULE_error:
2300 if (newrule != NULL) {
2301 counter_u64_free(newrule->evaluations);
2302 for (int i = 0; i < 2; i++) {
2303 counter_u64_free(newrule->packets[i]);
2304 counter_u64_free(newrule->bytes[i]);
2306 counter_u64_free(newrule->states_cur);
2307 counter_u64_free(newrule->states_tot);
2308 counter_u64_free(newrule->src_nodes);
2309 free(newrule, M_PFRULE);
2316 case DIOCCLRSTATES: {
2318 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
2319 u_int i, killed = 0;
2321 for (i = 0; i <= pf_hashmask; i++) {
2322 struct pf_idhash *ih = &V_pf_idhash[i];
2324 relock_DIOCCLRSTATES:
2325 PF_HASHROW_LOCK(ih);
2326 LIST_FOREACH(s, &ih->states, entry)
2327 if (!psk->psk_ifname[0] ||
2328 !strcmp(psk->psk_ifname,
2329 s->kif->pfik_name)) {
2331 * Don't send out individual
2334 s->state_flags |= PFSTATE_NOSYNC;
2335 pf_unlink_state(s, PF_ENTER_LOCKED);
2337 goto relock_DIOCCLRSTATES;
2339 PF_HASHROW_UNLOCK(ih);
2341 psk->psk_killed = killed;
2342 if (V_pfsync_clear_states_ptr != NULL)
2343 V_pfsync_clear_states_ptr(V_pf_status.hostid, psk->psk_ifname);
2347 case DIOCKILLSTATES: {
2349 struct pf_state_key *sk;
2350 struct pf_addr *srcaddr, *dstaddr;
2351 u_int16_t srcport, dstport;
2352 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
2353 u_int i, killed = 0;
2355 if (psk->psk_pfcmp.id) {
2356 if (psk->psk_pfcmp.creatorid == 0)
2357 psk->psk_pfcmp.creatorid = V_pf_status.hostid;
2358 if ((s = pf_find_state_byid(psk->psk_pfcmp.id,
2359 psk->psk_pfcmp.creatorid))) {
2360 pf_unlink_state(s, PF_ENTER_LOCKED);
2361 psk->psk_killed = 1;
2366 for (i = 0; i <= pf_hashmask; i++) {
2367 struct pf_idhash *ih = &V_pf_idhash[i];
2369 relock_DIOCKILLSTATES:
2370 PF_HASHROW_LOCK(ih);
2371 LIST_FOREACH(s, &ih->states, entry) {
2372 sk = s->key[PF_SK_WIRE];
2373 if (s->direction == PF_OUT) {
2374 srcaddr = &sk->addr[1];
2375 dstaddr = &sk->addr[0];
2376 srcport = sk->port[1];
2377 dstport = sk->port[0];
2379 srcaddr = &sk->addr[0];
2380 dstaddr = &sk->addr[1];
2381 srcport = sk->port[0];
2382 dstport = sk->port[1];
2385 if ((!psk->psk_af || sk->af == psk->psk_af)
2386 && (!psk->psk_proto || psk->psk_proto ==
2388 PF_MATCHA(psk->psk_src.neg,
2389 &psk->psk_src.addr.v.a.addr,
2390 &psk->psk_src.addr.v.a.mask,
2392 PF_MATCHA(psk->psk_dst.neg,
2393 &psk->psk_dst.addr.v.a.addr,
2394 &psk->psk_dst.addr.v.a.mask,
2396 (psk->psk_src.port_op == 0 ||
2397 pf_match_port(psk->psk_src.port_op,
2398 psk->psk_src.port[0], psk->psk_src.port[1],
2400 (psk->psk_dst.port_op == 0 ||
2401 pf_match_port(psk->psk_dst.port_op,
2402 psk->psk_dst.port[0], psk->psk_dst.port[1],
2404 (!psk->psk_label[0] ||
2405 (s->rule.ptr->label[0] &&
2406 !strcmp(psk->psk_label,
2407 s->rule.ptr->label))) &&
2408 (!psk->psk_ifname[0] ||
2409 !strcmp(psk->psk_ifname,
2410 s->kif->pfik_name))) {
2411 pf_unlink_state(s, PF_ENTER_LOCKED);
2413 goto relock_DIOCKILLSTATES;
2416 PF_HASHROW_UNLOCK(ih);
2418 psk->psk_killed = killed;
2422 case DIOCADDSTATE: {
2423 struct pfioc_state *ps = (struct pfioc_state *)addr;
2424 struct pfsync_state *sp = &ps->state;
2426 if (sp->timeout >= PFTM_MAX) {
2430 if (V_pfsync_state_import_ptr != NULL) {
2432 error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL);
2439 case DIOCGETSTATE: {
2440 struct pfioc_state *ps = (struct pfioc_state *)addr;
2443 s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
2449 pfsync_state_export(&ps->state, s);
2454 case DIOCGETSTATES: {
2455 struct pfioc_states *ps = (struct pfioc_states *)addr;
2457 struct pfsync_state *pstore, *p;
2460 if (ps->ps_len <= 0) {
2461 nr = uma_zone_get_cur(V_pf_state_z);
2462 ps->ps_len = sizeof(struct pfsync_state) * nr;
2466 p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK | M_ZERO);
2469 for (i = 0; i <= pf_hashmask; i++) {
2470 struct pf_idhash *ih = &V_pf_idhash[i];
2472 PF_HASHROW_LOCK(ih);
2473 LIST_FOREACH(s, &ih->states, entry) {
2474 if (s->timeout == PFTM_UNLINKED)
2477 if ((nr+1) * sizeof(*p) > ps->ps_len) {
2478 PF_HASHROW_UNLOCK(ih);
2479 goto DIOCGETSTATES_full;
2481 pfsync_state_export(p, s);
2485 PF_HASHROW_UNLOCK(ih);
2488 error = copyout(pstore, ps->ps_states,
2489 sizeof(struct pfsync_state) * nr);
2491 free(pstore, M_TEMP);
2494 ps->ps_len = sizeof(struct pfsync_state) * nr;
2495 free(pstore, M_TEMP);
2500 case DIOCGETSTATUS: {
2501 struct pf_status *s = (struct pf_status *)addr;
2504 s->running = V_pf_status.running;
2505 s->since = V_pf_status.since;
2506 s->debug = V_pf_status.debug;
2507 s->hostid = V_pf_status.hostid;
2508 s->states = V_pf_status.states;
2509 s->src_nodes = V_pf_status.src_nodes;
2511 for (int i = 0; i < PFRES_MAX; i++)
2513 counter_u64_fetch(V_pf_status.counters[i]);
2514 for (int i = 0; i < LCNT_MAX; i++)
2516 counter_u64_fetch(V_pf_status.lcounters[i]);
2517 for (int i = 0; i < FCNT_MAX; i++)
2519 counter_u64_fetch(V_pf_status.fcounters[i]);
2520 for (int i = 0; i < SCNT_MAX; i++)
2522 counter_u64_fetch(V_pf_status.scounters[i]);
2524 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ);
2525 bcopy(V_pf_status.pf_chksum, s->pf_chksum,
2526 PF_MD5_DIGEST_LENGTH);
2528 pfi_update_status(s->ifname, s);
2533 case DIOCSETSTATUSIF: {
2534 struct pfioc_if *pi = (struct pfioc_if *)addr;
2536 if (pi->ifname[0] == 0) {
2537 bzero(V_pf_status.ifname, IFNAMSIZ);
2541 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
2546 case DIOCCLRSTATUS: {
2548 for (int i = 0; i < PFRES_MAX; i++)
2549 counter_u64_zero(V_pf_status.counters[i]);
2550 for (int i = 0; i < FCNT_MAX; i++)
2551 counter_u64_zero(V_pf_status.fcounters[i]);
2552 for (int i = 0; i < SCNT_MAX; i++)
2553 counter_u64_zero(V_pf_status.scounters[i]);
2554 for (int i = 0; i < LCNT_MAX; i++)
2555 counter_u64_zero(V_pf_status.lcounters[i]);
2556 V_pf_status.since = time_second;
2557 if (*V_pf_status.ifname)
2558 pfi_update_status(V_pf_status.ifname, NULL);
2564 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
2565 struct pf_state_key *sk;
2566 struct pf_state *state;
2567 struct pf_state_key_cmp key;
2568 int m = 0, direction = pnl->direction;
2571 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
2572 sidx = (direction == PF_IN) ? 1 : 0;
2573 didx = (direction == PF_IN) ? 0 : 1;
2576 PF_AZERO(&pnl->saddr, pnl->af) ||
2577 PF_AZERO(&pnl->daddr, pnl->af) ||
2578 ((pnl->proto == IPPROTO_TCP ||
2579 pnl->proto == IPPROTO_UDP) &&
2580 (!pnl->dport || !pnl->sport)))
2583 bzero(&key, sizeof(key));
2585 key.proto = pnl->proto;
2586 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
2587 key.port[sidx] = pnl->sport;
2588 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
2589 key.port[didx] = pnl->dport;
2591 state = pf_find_state_all(&key, direction, &m);
2594 error = E2BIG; /* more than one state */
2595 else if (state != NULL) {
2596 /* XXXGL: not locked read */
2597 sk = state->key[sidx];
2598 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
2599 pnl->rsport = sk->port[sidx];
2600 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
2601 pnl->rdport = sk->port[didx];
2608 case DIOCSETTIMEOUT: {
2609 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2612 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
2618 old = V_pf_default_rule.timeout[pt->timeout];
2619 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
2621 V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
2622 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
2623 wakeup(pf_purge_thread);
2629 case DIOCGETTIMEOUT: {
2630 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2632 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
2637 pt->seconds = V_pf_default_rule.timeout[pt->timeout];
2642 case DIOCGETLIMIT: {
2643 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2645 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
2650 pl->limit = V_pf_limits[pl->index].limit;
2655 case DIOCSETLIMIT: {
2656 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2660 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
2661 V_pf_limits[pl->index].zone == NULL) {
2666 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
2667 old_limit = V_pf_limits[pl->index].limit;
2668 V_pf_limits[pl->index].limit = pl->limit;
2669 pl->limit = old_limit;
2674 case DIOCSETDEBUG: {
2675 u_int32_t *level = (u_int32_t *)addr;
2678 V_pf_status.debug = *level;
2683 case DIOCCLRRULECTRS: {
2684 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
2685 struct pf_kruleset *ruleset = &pf_main_ruleset;
2686 struct pf_krule *rule;
2690 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
2691 counter_u64_zero(rule->evaluations);
2692 for (int i = 0; i < 2; i++) {
2693 counter_u64_zero(rule->packets[i]);
2694 counter_u64_zero(rule->bytes[i]);
2701 case DIOCGIFSPEEDV0:
2702 case DIOCGIFSPEEDV1: {
2703 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr;
2704 struct pf_ifspeed_v1 ps;
2707 if (psp->ifname[0] != 0) {
2708 /* Can we completely trust user-land? */
2709 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
2710 ifp = ifunit(ps.ifname);
2713 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
2714 if (cmd == DIOCGIFSPEEDV1)
2715 psp->baudrate = ifp->if_baudrate;
2724 case DIOCSTARTALTQ: {
2725 struct pf_altq *altq;
2728 /* enable all altq interfaces on active list */
2729 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
2730 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
2731 error = pf_enable_altq(altq);
2737 V_pf_altq_running = 1;
2739 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2743 case DIOCSTOPALTQ: {
2744 struct pf_altq *altq;
2747 /* disable all altq interfaces on active list */
2748 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
2749 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
2750 error = pf_disable_altq(altq);
2756 V_pf_altq_running = 0;
2758 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2763 case DIOCADDALTQV1: {
2764 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
2765 struct pf_altq *altq, *a;
2768 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
2769 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
2772 altq->local_flags = 0;
2775 if (pa->ticket != V_ticket_altqs_inactive) {
2777 free(altq, M_PFALTQ);
2783 * if this is for a queue, find the discipline and
2784 * copy the necessary fields
2786 if (altq->qname[0] != 0) {
2787 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2790 free(altq, M_PFALTQ);
2793 altq->altq_disc = NULL;
2794 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
2795 if (strncmp(a->ifname, altq->ifname,
2797 altq->altq_disc = a->altq_disc;
2803 if ((ifp = ifunit(altq->ifname)) == NULL)
2804 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
2806 error = altq_add(ifp, altq);
2810 free(altq, M_PFALTQ);
2814 if (altq->qname[0] != 0)
2815 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
2817 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
2818 /* version error check done on import above */
2819 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
2824 case DIOCGETALTQSV0:
2825 case DIOCGETALTQSV1: {
2826 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
2827 struct pf_altq *altq;
2831 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
2833 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
2835 pa->ticket = V_ticket_altqs_active;
2841 case DIOCGETALTQV1: {
2842 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
2843 struct pf_altq *altq;
2846 if (pa->ticket != V_ticket_altqs_active) {
2851 altq = pf_altq_get_nth_active(pa->nr);
2857 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
2862 case DIOCCHANGEALTQV0:
2863 case DIOCCHANGEALTQV1:
2864 /* CHANGEALTQ not supported yet! */
2868 case DIOCGETQSTATSV0:
2869 case DIOCGETQSTATSV1: {
2870 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr;
2871 struct pf_altq *altq;
2876 if (pq->ticket != V_ticket_altqs_active) {
2881 nbytes = pq->nbytes;
2882 altq = pf_altq_get_nth_active(pq->nr);
2889 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
2895 if (cmd == DIOCGETQSTATSV0)
2896 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */
2898 version = pq->version;
2899 error = altq_getqstats(altq, pq->buf, &nbytes, version);
2901 pq->scheduler = altq->scheduler;
2902 pq->nbytes = nbytes;
2908 case DIOCBEGINADDRS: {
2909 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2912 pf_empty_kpool(&V_pf_pabuf);
2913 pp->ticket = ++V_ticket_pabuf;
2919 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2920 struct pf_kpooladdr *pa;
2921 struct pfi_kkif *kif = NULL;
2924 if (pp->af == AF_INET) {
2925 error = EAFNOSUPPORT;
2930 if (pp->af == AF_INET6) {
2931 error = EAFNOSUPPORT;
2935 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2936 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2937 pp->addr.addr.type != PF_ADDR_TABLE) {
2941 if (pp->addr.addr.p.dyn != NULL) {
2945 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2946 pf_pooladdr_to_kpooladdr(&pp->addr, pa);
2948 kif = pf_kkif_create(M_WAITOK);
2950 if (pp->ticket != V_ticket_pabuf) {
2958 if (pa->ifname[0]) {
2959 pa->kif = pfi_kkif_attach(kif, pa->ifname);
2960 pfi_kkif_ref(pa->kif);
2963 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2964 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2966 pfi_kkif_unref(pa->kif);
2971 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
2976 case DIOCGETADDRS: {
2977 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2978 struct pf_kpool *pool;
2979 struct pf_kpooladdr *pa;
2983 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2984 pp->r_num, 0, 1, 0);
2990 TAILQ_FOREACH(pa, &pool->list, entries)
2997 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2998 struct pf_kpool *pool;
2999 struct pf_kpooladdr *pa;
3003 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
3004 pp->r_num, 0, 1, 1);
3010 pa = TAILQ_FIRST(&pool->list);
3011 while ((pa != NULL) && (nr < pp->nr)) {
3012 pa = TAILQ_NEXT(pa, entries);
3020 pf_kpooladdr_to_pooladdr(pa, &pp->addr);
3021 pf_addr_copyout(&pp->addr.addr);
3026 case DIOCCHANGEADDR: {
3027 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
3028 struct pf_kpool *pool;
3029 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL;
3030 struct pf_kruleset *ruleset;
3031 struct pfi_kkif *kif = NULL;
3033 if (pca->action < PF_CHANGE_ADD_HEAD ||
3034 pca->action > PF_CHANGE_REMOVE) {
3038 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
3039 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
3040 pca->addr.addr.type != PF_ADDR_TABLE) {
3044 if (pca->addr.addr.p.dyn != NULL) {
3049 if (pca->action != PF_CHANGE_REMOVE) {
3051 if (pca->af == AF_INET) {
3052 error = EAFNOSUPPORT;
3057 if (pca->af == AF_INET6) {
3058 error = EAFNOSUPPORT;
3062 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
3063 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
3064 if (newpa->ifname[0])
3065 kif = pf_kkif_create(M_WAITOK);
3068 #define ERROUT(x) { error = (x); goto DIOCCHANGEADDR_error; }
3070 ruleset = pf_find_kruleset(pca->anchor);
3071 if (ruleset == NULL)
3074 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
3075 pca->r_num, pca->r_last, 1, 1);
3079 if (pca->action != PF_CHANGE_REMOVE) {
3080 if (newpa->ifname[0]) {
3081 newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
3082 pfi_kkif_ref(newpa->kif);
3086 switch (newpa->addr.type) {
3087 case PF_ADDR_DYNIFTL:
3088 error = pfi_dynaddr_setup(&newpa->addr,
3092 newpa->addr.p.tbl = pfr_attach_table(ruleset,
3093 newpa->addr.v.tblname);
3094 if (newpa->addr.p.tbl == NULL)
3099 goto DIOCCHANGEADDR_error;
3102 switch (pca->action) {
3103 case PF_CHANGE_ADD_HEAD:
3104 oldpa = TAILQ_FIRST(&pool->list);
3106 case PF_CHANGE_ADD_TAIL:
3107 oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
3110 oldpa = TAILQ_FIRST(&pool->list);
3111 for (int i = 0; oldpa && i < pca->nr; i++)
3112 oldpa = TAILQ_NEXT(oldpa, entries);
3118 if (pca->action == PF_CHANGE_REMOVE) {
3119 TAILQ_REMOVE(&pool->list, oldpa, entries);
3120 switch (oldpa->addr.type) {
3121 case PF_ADDR_DYNIFTL:
3122 pfi_dynaddr_remove(oldpa->addr.p.dyn);
3125 pfr_detach_table(oldpa->addr.p.tbl);
3129 pfi_kkif_unref(oldpa->kif);
3130 free(oldpa, M_PFRULE);
3133 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
3134 else if (pca->action == PF_CHANGE_ADD_HEAD ||
3135 pca->action == PF_CHANGE_ADD_BEFORE)
3136 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
3138 TAILQ_INSERT_AFTER(&pool->list, oldpa,
3142 pool->cur = TAILQ_FIRST(&pool->list);
3143 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
3148 DIOCCHANGEADDR_error:
3149 if (newpa != NULL) {
3151 pfi_kkif_unref(newpa->kif);
3152 free(newpa, M_PFRULE);
3160 case DIOCGETRULESETS: {
3161 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
3162 struct pf_kruleset *ruleset;
3163 struct pf_kanchor *anchor;
3166 pr->path[sizeof(pr->path) - 1] = 0;
3167 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
3173 if (ruleset->anchor == NULL) {
3174 /* XXX kludge for pf_main_ruleset */
3175 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
3176 if (anchor->parent == NULL)
3179 RB_FOREACH(anchor, pf_kanchor_node,
3180 &ruleset->anchor->children)
3187 case DIOCGETRULESET: {
3188 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
3189 struct pf_kruleset *ruleset;
3190 struct pf_kanchor *anchor;
3194 pr->path[sizeof(pr->path) - 1] = 0;
3195 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
3201 if (ruleset->anchor == NULL) {
3202 /* XXX kludge for pf_main_ruleset */
3203 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
3204 if (anchor->parent == NULL && nr++ == pr->nr) {
3205 strlcpy(pr->name, anchor->name,
3210 RB_FOREACH(anchor, pf_kanchor_node,
3211 &ruleset->anchor->children)
3212 if (nr++ == pr->nr) {
3213 strlcpy(pr->name, anchor->name,
3224 case DIOCRCLRTABLES: {
3225 struct pfioc_table *io = (struct pfioc_table *)addr;
3227 if (io->pfrio_esize != 0) {
3232 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
3233 io->pfrio_flags | PFR_FLAG_USERIOCTL);
3238 case DIOCRADDTABLES: {
3239 struct pfioc_table *io = (struct pfioc_table *)addr;
3240 struct pfr_table *pfrts;
3243 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3248 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
3249 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
3254 totlen = io->pfrio_size * sizeof(struct pfr_table);
3255 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
3257 error = copyin(io->pfrio_buffer, pfrts, totlen);
3259 free(pfrts, M_TEMP);
3263 error = pfr_add_tables(pfrts, io->pfrio_size,
3264 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3266 free(pfrts, M_TEMP);
3270 case DIOCRDELTABLES: {
3271 struct pfioc_table *io = (struct pfioc_table *)addr;
3272 struct pfr_table *pfrts;
3275 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3280 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
3281 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
3286 totlen = io->pfrio_size * sizeof(struct pfr_table);
3287 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
3289 error = copyin(io->pfrio_buffer, pfrts, totlen);
3291 free(pfrts, M_TEMP);
3295 error = pfr_del_tables(pfrts, io->pfrio_size,
3296 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3298 free(pfrts, M_TEMP);
3302 case DIOCRGETTABLES: {
3303 struct pfioc_table *io = (struct pfioc_table *)addr;
3304 struct pfr_table *pfrts;
3308 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3313 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
3319 io->pfrio_size = min(io->pfrio_size, n);
3321 totlen = io->pfrio_size * sizeof(struct pfr_table);
3323 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
3325 if (pfrts == NULL) {
3330 error = pfr_get_tables(&io->pfrio_table, pfrts,
3331 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3334 error = copyout(pfrts, io->pfrio_buffer, totlen);
3335 free(pfrts, M_TEMP);
3339 case DIOCRGETTSTATS: {
3340 struct pfioc_table *io = (struct pfioc_table *)addr;
3341 struct pfr_tstats *pfrtstats;
3345 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
3350 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
3356 io->pfrio_size = min(io->pfrio_size, n);
3358 totlen = io->pfrio_size * sizeof(struct pfr_tstats);
3359 pfrtstats = mallocarray(io->pfrio_size,
3360 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT);
3361 if (pfrtstats == NULL) {
3366 error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
3367 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3370 error = copyout(pfrtstats, io->pfrio_buffer, totlen);
3371 free(pfrtstats, M_TEMP);
3375 case DIOCRCLRTSTATS: {
3376 struct pfioc_table *io = (struct pfioc_table *)addr;
3377 struct pfr_table *pfrts;
3380 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3385 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
3386 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
3387 /* We used to count tables and use the minimum required
3388 * size, so we didn't fail on overly large requests.
3390 io->pfrio_size = pf_ioctl_maxcount;
3394 totlen = io->pfrio_size * sizeof(struct pfr_table);
3395 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
3397 if (pfrts == NULL) {
3401 error = copyin(io->pfrio_buffer, pfrts, totlen);
3403 free(pfrts, M_TEMP);
3408 error = pfr_clr_tstats(pfrts, io->pfrio_size,
3409 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3411 free(pfrts, M_TEMP);
3415 case DIOCRSETTFLAGS: {
3416 struct pfioc_table *io = (struct pfioc_table *)addr;
3417 struct pfr_table *pfrts;
3421 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3427 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
3434 io->pfrio_size = min(io->pfrio_size, n);
3437 totlen = io->pfrio_size * sizeof(struct pfr_table);
3438 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
3440 error = copyin(io->pfrio_buffer, pfrts, totlen);
3442 free(pfrts, M_TEMP);
3446 error = pfr_set_tflags(pfrts, io->pfrio_size,
3447 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
3448 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3450 free(pfrts, M_TEMP);
3454 case DIOCRCLRADDRS: {
3455 struct pfioc_table *io = (struct pfioc_table *)addr;
3457 if (io->pfrio_esize != 0) {
3462 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
3463 io->pfrio_flags | PFR_FLAG_USERIOCTL);
3468 case DIOCRADDADDRS: {
3469 struct pfioc_table *io = (struct pfioc_table *)addr;
3470 struct pfr_addr *pfras;
3473 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3477 if (io->pfrio_size < 0 ||
3478 io->pfrio_size > pf_ioctl_maxcount ||
3479 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3483 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3484 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3490 error = copyin(io->pfrio_buffer, pfras, totlen);
3492 free(pfras, M_TEMP);
3496 error = pfr_add_addrs(&io->pfrio_table, pfras,
3497 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
3498 PFR_FLAG_USERIOCTL);
3500 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
3501 error = copyout(pfras, io->pfrio_buffer, totlen);
3502 free(pfras, M_TEMP);
3506 case DIOCRDELADDRS: {
3507 struct pfioc_table *io = (struct pfioc_table *)addr;
3508 struct pfr_addr *pfras;
3511 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3515 if (io->pfrio_size < 0 ||
3516 io->pfrio_size > pf_ioctl_maxcount ||
3517 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3521 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3522 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3528 error = copyin(io->pfrio_buffer, pfras, totlen);
3530 free(pfras, M_TEMP);
3534 error = pfr_del_addrs(&io->pfrio_table, pfras,
3535 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
3536 PFR_FLAG_USERIOCTL);
3538 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
3539 error = copyout(pfras, io->pfrio_buffer, totlen);
3540 free(pfras, M_TEMP);
3544 case DIOCRSETADDRS: {
3545 struct pfioc_table *io = (struct pfioc_table *)addr;
3546 struct pfr_addr *pfras;
3547 size_t totlen, count;
3549 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3553 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
3557 count = max(io->pfrio_size, io->pfrio_size2);
3558 if (count > pf_ioctl_maxcount ||
3559 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
3563 totlen = count * sizeof(struct pfr_addr);
3564 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
3570 error = copyin(io->pfrio_buffer, pfras, totlen);
3572 free(pfras, M_TEMP);
3576 error = pfr_set_addrs(&io->pfrio_table, pfras,
3577 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
3578 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
3579 PFR_FLAG_USERIOCTL, 0);
3581 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
3582 error = copyout(pfras, io->pfrio_buffer, totlen);
3583 free(pfras, M_TEMP);
3587 case DIOCRGETADDRS: {
3588 struct pfioc_table *io = (struct pfioc_table *)addr;
3589 struct pfr_addr *pfras;
3592 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3596 if (io->pfrio_size < 0 ||
3597 io->pfrio_size > pf_ioctl_maxcount ||
3598 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3602 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3603 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3610 error = pfr_get_addrs(&io->pfrio_table, pfras,
3611 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3614 error = copyout(pfras, io->pfrio_buffer, totlen);
3615 free(pfras, M_TEMP);
3619 case DIOCRGETASTATS: {
3620 struct pfioc_table *io = (struct pfioc_table *)addr;
3621 struct pfr_astats *pfrastats;
3624 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
3628 if (io->pfrio_size < 0 ||
3629 io->pfrio_size > pf_ioctl_maxcount ||
3630 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
3634 totlen = io->pfrio_size * sizeof(struct pfr_astats);
3635 pfrastats = mallocarray(io->pfrio_size,
3636 sizeof(struct pfr_astats), M_TEMP, M_NOWAIT);
3642 error = pfr_get_astats(&io->pfrio_table, pfrastats,
3643 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3646 error = copyout(pfrastats, io->pfrio_buffer, totlen);
3647 free(pfrastats, M_TEMP);
3651 case DIOCRCLRASTATS: {
3652 struct pfioc_table *io = (struct pfioc_table *)addr;
3653 struct pfr_addr *pfras;
3656 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3660 if (io->pfrio_size < 0 ||
3661 io->pfrio_size > pf_ioctl_maxcount ||
3662 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3666 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3667 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3673 error = copyin(io->pfrio_buffer, pfras, totlen);
3675 free(pfras, M_TEMP);
3679 error = pfr_clr_astats(&io->pfrio_table, pfras,
3680 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
3681 PFR_FLAG_USERIOCTL);
3683 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
3684 error = copyout(pfras, io->pfrio_buffer, totlen);
3685 free(pfras, M_TEMP);
3689 case DIOCRTSTADDRS: {
3690 struct pfioc_table *io = (struct pfioc_table *)addr;
3691 struct pfr_addr *pfras;
3694 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3698 if (io->pfrio_size < 0 ||
3699 io->pfrio_size > pf_ioctl_maxcount ||
3700 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3704 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3705 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3711 error = copyin(io->pfrio_buffer, pfras, totlen);
3713 free(pfras, M_TEMP);
3717 error = pfr_tst_addrs(&io->pfrio_table, pfras,
3718 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
3719 PFR_FLAG_USERIOCTL);
3722 error = copyout(pfras, io->pfrio_buffer, totlen);
3723 free(pfras, M_TEMP);
3727 case DIOCRINADEFINE: {
3728 struct pfioc_table *io = (struct pfioc_table *)addr;
3729 struct pfr_addr *pfras;
3732 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3736 if (io->pfrio_size < 0 ||
3737 io->pfrio_size > pf_ioctl_maxcount ||
3738 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3742 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3743 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3749 error = copyin(io->pfrio_buffer, pfras, totlen);
3751 free(pfras, M_TEMP);
3755 error = pfr_ina_define(&io->pfrio_table, pfras,
3756 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
3757 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3759 free(pfras, M_TEMP);
3764 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
3766 error = pf_osfp_add(io);
3772 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
3774 error = pf_osfp_get(io);
3780 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3781 struct pfioc_trans_e *ioes, *ioe;
3785 if (io->esize != sizeof(*ioe)) {
3790 io->size > pf_ioctl_maxcount ||
3791 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3795 totlen = sizeof(struct pfioc_trans_e) * io->size;
3796 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3802 error = copyin(io->array, ioes, totlen);
3808 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3809 switch (ioe->rs_num) {
3811 case PF_RULESET_ALTQ:
3812 if (ioe->anchor[0]) {
3818 if ((error = pf_begin_altq(&ioe->ticket))) {
3825 case PF_RULESET_TABLE:
3827 struct pfr_table table;
3829 bzero(&table, sizeof(table));
3830 strlcpy(table.pfrt_anchor, ioe->anchor,
3831 sizeof(table.pfrt_anchor));
3832 if ((error = pfr_ina_begin(&table,
3833 &ioe->ticket, NULL, 0))) {
3841 if ((error = pf_begin_rules(&ioe->ticket,
3842 ioe->rs_num, ioe->anchor))) {
3851 error = copyout(ioes, io->array, totlen);
3856 case DIOCXROLLBACK: {
3857 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3858 struct pfioc_trans_e *ioe, *ioes;
3862 if (io->esize != sizeof(*ioe)) {
3867 io->size > pf_ioctl_maxcount ||
3868 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3872 totlen = sizeof(struct pfioc_trans_e) * io->size;
3873 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3879 error = copyin(io->array, ioes, totlen);
3885 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3886 switch (ioe->rs_num) {
3888 case PF_RULESET_ALTQ:
3889 if (ioe->anchor[0]) {
3895 if ((error = pf_rollback_altq(ioe->ticket))) {
3898 goto fail; /* really bad */
3902 case PF_RULESET_TABLE:
3904 struct pfr_table table;
3906 bzero(&table, sizeof(table));
3907 strlcpy(table.pfrt_anchor, ioe->anchor,
3908 sizeof(table.pfrt_anchor));
3909 if ((error = pfr_ina_rollback(&table,
3910 ioe->ticket, NULL, 0))) {
3913 goto fail; /* really bad */
3918 if ((error = pf_rollback_rules(ioe->ticket,
3919 ioe->rs_num, ioe->anchor))) {
3922 goto fail; /* really bad */
3933 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3934 struct pfioc_trans_e *ioe, *ioes;
3935 struct pf_kruleset *rs;
3939 if (io->esize != sizeof(*ioe)) {
3945 io->size > pf_ioctl_maxcount ||
3946 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3951 totlen = sizeof(struct pfioc_trans_e) * io->size;
3952 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3958 error = copyin(io->array, ioes, totlen);
3964 /* First makes sure everything will succeed. */
3965 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3966 switch (ioe->rs_num) {
3968 case PF_RULESET_ALTQ:
3969 if (ioe->anchor[0]) {
3975 if (!V_altqs_inactive_open || ioe->ticket !=
3976 V_ticket_altqs_inactive) {
3984 case PF_RULESET_TABLE:
3985 rs = pf_find_kruleset(ioe->anchor);
3986 if (rs == NULL || !rs->topen || ioe->ticket !=
3995 if (ioe->rs_num < 0 || ioe->rs_num >=
4002 rs = pf_find_kruleset(ioe->anchor);
4004 !rs->rules[ioe->rs_num].inactive.open ||
4005 rs->rules[ioe->rs_num].inactive.ticket !=
4015 /* Now do the commit - no errors should happen here. */
4016 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4017 switch (ioe->rs_num) {
4019 case PF_RULESET_ALTQ:
4020 if ((error = pf_commit_altq(ioe->ticket))) {
4023 goto fail; /* really bad */
4027 case PF_RULESET_TABLE:
4029 struct pfr_table table;
4031 bzero(&table, sizeof(table));
4032 strlcpy(table.pfrt_anchor, ioe->anchor,
4033 sizeof(table.pfrt_anchor));
4034 if ((error = pfr_ina_commit(&table,
4035 ioe->ticket, NULL, NULL, 0))) {
4038 goto fail; /* really bad */
4043 if ((error = pf_commit_rules(ioe->ticket,
4044 ioe->rs_num, ioe->anchor))) {
4047 goto fail; /* really bad */
4057 case DIOCGETSRCNODES: {
4058 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
4059 struct pf_srchash *sh;
4060 struct pf_ksrc_node *n;
4061 struct pf_src_node *p, *pstore;
4064 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
4066 PF_HASHROW_LOCK(sh);
4067 LIST_FOREACH(n, &sh->nodes, entry)
4069 PF_HASHROW_UNLOCK(sh);
4072 psn->psn_len = min(psn->psn_len,
4073 sizeof(struct pf_src_node) * nr);
4075 if (psn->psn_len == 0) {
4076 psn->psn_len = sizeof(struct pf_src_node) * nr;
4082 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
4083 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
4085 PF_HASHROW_LOCK(sh);
4086 LIST_FOREACH(n, &sh->nodes, entry) {
4088 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
4091 pf_src_node_copy(n, p);
4096 PF_HASHROW_UNLOCK(sh);
4098 error = copyout(pstore, psn->psn_src_nodes,
4099 sizeof(struct pf_src_node) * nr);
4101 free(pstore, M_TEMP);
4104 psn->psn_len = sizeof(struct pf_src_node) * nr;
4105 free(pstore, M_TEMP);
4109 case DIOCCLRSRCNODES: {
4110 pf_clear_srcnodes(NULL);
4111 pf_purge_expired_src_nodes();
4115 case DIOCKILLSRCNODES:
4116 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
4119 case DIOCSETHOSTID: {
4120 u_int32_t *hostid = (u_int32_t *)addr;
4124 V_pf_status.hostid = arc4random();
4126 V_pf_status.hostid = *hostid;
4137 case DIOCIGETIFACES: {
4138 struct pfioc_iface *io = (struct pfioc_iface *)addr;
4139 struct pfi_kif *ifstore;
4142 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
4147 if (io->pfiio_size < 0 ||
4148 io->pfiio_size > pf_ioctl_maxcount ||
4149 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
4154 bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
4155 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
4157 if (ifstore == NULL) {
4163 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
4165 error = copyout(ifstore, io->pfiio_buffer, bufsiz);
4166 free(ifstore, M_TEMP);
4170 case DIOCSETIFFLAG: {
4171 struct pfioc_iface *io = (struct pfioc_iface *)addr;
4174 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
4179 case DIOCCLRIFFLAG: {
4180 struct pfioc_iface *io = (struct pfioc_iface *)addr;
4183 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
4193 if (sx_xlocked(&pf_ioctl_lock))
4194 sx_xunlock(&pf_ioctl_lock);
4201 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
4203 bzero(sp, sizeof(struct pfsync_state));
4205 /* copy from state key */
4206 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
4207 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
4208 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
4209 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
4210 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
4211 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
4212 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
4213 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
4214 sp->proto = st->key[PF_SK_WIRE]->proto;
4215 sp->af = st->key[PF_SK_WIRE]->af;
4217 /* copy from state */
4218 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
4219 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
4220 sp->creation = htonl(time_uptime - st->creation);
4221 sp->expire = pf_state_expires(st);
4222 if (sp->expire <= time_uptime)
4223 sp->expire = htonl(0);
4225 sp->expire = htonl(sp->expire - time_uptime);
4227 sp->direction = st->direction;
4229 sp->timeout = st->timeout;
4230 sp->state_flags = st->state_flags;
4232 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
4233 if (st->nat_src_node)
4234 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
4237 sp->creatorid = st->creatorid;
4238 pf_state_peer_hton(&st->src, &sp->src);
4239 pf_state_peer_hton(&st->dst, &sp->dst);
4241 if (st->rule.ptr == NULL)
4242 sp->rule = htonl(-1);
4244 sp->rule = htonl(st->rule.ptr->nr);
4245 if (st->anchor.ptr == NULL)
4246 sp->anchor = htonl(-1);
4248 sp->anchor = htonl(st->anchor.ptr->nr);
4249 if (st->nat_rule.ptr == NULL)
4250 sp->nat_rule = htonl(-1);
4252 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
4254 pf_state_counter_hton(counter_u64_fetch(st->packets[0]),
4256 pf_state_counter_hton(counter_u64_fetch(st->packets[1]),
4258 pf_state_counter_hton(counter_u64_fetch(st->bytes[0]), sp->bytes[0]);
4259 pf_state_counter_hton(counter_u64_fetch(st->bytes[1]), sp->bytes[1]);
4264 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
4266 struct pfr_ktable *kt;
4268 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
4271 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
4272 kt = kt->pfrkt_root;
4274 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
4279 * XXX - Check for version missmatch!!!
4282 pf_clear_states(void)
4287 for (i = 0; i <= pf_hashmask; i++) {
4288 struct pf_idhash *ih = &V_pf_idhash[i];
4290 PF_HASHROW_LOCK(ih);
4291 LIST_FOREACH(s, &ih->states, entry) {
4292 s->timeout = PFTM_PURGE;
4293 /* Don't send out individual delete messages. */
4294 s->state_flags |= PFSTATE_NOSYNC;
4295 pf_unlink_state(s, PF_ENTER_LOCKED);
4298 PF_HASHROW_UNLOCK(ih);
4303 pf_clear_tables(void)
4305 struct pfioc_table io;
4308 bzero(&io, sizeof(io));
4310 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
4317 pf_clear_srcnodes(struct pf_ksrc_node *n)
4322 for (i = 0; i <= pf_hashmask; i++) {
4323 struct pf_idhash *ih = &V_pf_idhash[i];
4325 PF_HASHROW_LOCK(ih);
4326 LIST_FOREACH(s, &ih->states, entry) {
4327 if (n == NULL || n == s->src_node)
4329 if (n == NULL || n == s->nat_src_node)
4330 s->nat_src_node = NULL;
4332 PF_HASHROW_UNLOCK(ih);
4336 struct pf_srchash *sh;
4338 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
4340 PF_HASHROW_LOCK(sh);
4341 LIST_FOREACH(n, &sh->nodes, entry) {
4345 PF_HASHROW_UNLOCK(sh);
4348 /* XXX: hash slot should already be locked here. */
4355 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
4357 struct pf_ksrc_node_list kill;
4360 for (int i = 0; i <= pf_srchashmask; i++) {
4361 struct pf_srchash *sh = &V_pf_srchash[i];
4362 struct pf_ksrc_node *sn, *tmp;
4364 PF_HASHROW_LOCK(sh);
4365 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
4366 if (PF_MATCHA(psnk->psnk_src.neg,
4367 &psnk->psnk_src.addr.v.a.addr,
4368 &psnk->psnk_src.addr.v.a.mask,
4369 &sn->addr, sn->af) &&
4370 PF_MATCHA(psnk->psnk_dst.neg,
4371 &psnk->psnk_dst.addr.v.a.addr,
4372 &psnk->psnk_dst.addr.v.a.mask,
4373 &sn->raddr, sn->af)) {
4374 pf_unlink_src_node(sn);
4375 LIST_INSERT_HEAD(&kill, sn, entry);
4378 PF_HASHROW_UNLOCK(sh);
4381 for (int i = 0; i <= pf_hashmask; i++) {
4382 struct pf_idhash *ih = &V_pf_idhash[i];
4385 PF_HASHROW_LOCK(ih);
4386 LIST_FOREACH(s, &ih->states, entry) {
4387 if (s->src_node && s->src_node->expire == 1)
4389 if (s->nat_src_node && s->nat_src_node->expire == 1)
4390 s->nat_src_node = NULL;
4392 PF_HASHROW_UNLOCK(ih);
4395 psnk->psnk_killed = pf_free_src_nodes(&kill);
4399 * XXX - Check for version missmatch!!!
4403 * Duplicate pfctl -Fa operation to get rid of as much as we can.
4413 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
4415 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
4418 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
4420 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
4421 break; /* XXX: rollback? */
4423 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
4425 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
4426 break; /* XXX: rollback? */
4428 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
4430 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
4431 break; /* XXX: rollback? */
4433 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
4435 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
4436 break; /* XXX: rollback? */
4439 /* XXX: these should always succeed here */
4440 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
4441 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
4442 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
4443 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
4444 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
4446 if ((error = pf_clear_tables()) != 0)
4450 if ((error = pf_begin_altq(&t[0])) != 0) {
4451 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
4454 pf_commit_altq(t[0]);
4459 pf_clear_srcnodes(NULL);
4461 /* status does not use malloced mem so no need to cleanup */
4462 /* fingerprints and interfaces have their own cleanup code */
4468 static pfil_return_t
4469 pf_check_return(int chk, struct mbuf **m)
4475 return (PFIL_CONSUMED);
4484 return (PFIL_DROPPED);
4489 static pfil_return_t
4490 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
4491 void *ruleset __unused, struct inpcb *inp)
4495 chk = pf_test(PF_IN, flags, ifp, m, inp);
4497 return (pf_check_return(chk, m));
4500 static pfil_return_t
4501 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
4502 void *ruleset __unused, struct inpcb *inp)
4506 chk = pf_test(PF_OUT, flags, ifp, m, inp);
4508 return (pf_check_return(chk, m));
4513 static pfil_return_t
4514 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
4515 void *ruleset __unused, struct inpcb *inp)
4520 * In case of loopback traffic IPv6 uses the real interface in
4521 * order to support scoped addresses. In order to support stateful
4522 * filtering we have change this to lo0 as it is the case in IPv4.
4524 CURVNET_SET(ifp->if_vnet);
4525 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp);
4528 return (pf_check_return(chk, m));
4531 static pfil_return_t
4532 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
4533 void *ruleset __unused, struct inpcb *inp)
4537 CURVNET_SET(ifp->if_vnet);
4538 chk = pf_test6(PF_OUT, flags, ifp, m, inp);
4541 return (pf_check_return(chk, m));
4546 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
4547 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
4548 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook)
4549 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook)
4552 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
4553 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
4554 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook)
4555 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook)
4561 struct pfil_hook_args pha;
4562 struct pfil_link_args pla;
4565 if (V_pf_pfil_hooked)
4568 pha.pa_version = PFIL_VERSION;
4569 pha.pa_modname = "pf";
4570 pha.pa_ruleset = NULL;
4572 pla.pa_version = PFIL_VERSION;
4575 pha.pa_type = PFIL_TYPE_IP4;
4576 pha.pa_func = pf_check_in;
4577 pha.pa_flags = PFIL_IN;
4578 pha.pa_rulname = "default-in";
4579 V_pf_ip4_in_hook = pfil_add_hook(&pha);
4580 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
4581 pla.pa_head = V_inet_pfil_head;
4582 pla.pa_hook = V_pf_ip4_in_hook;
4583 ret = pfil_link(&pla);
4585 pha.pa_func = pf_check_out;
4586 pha.pa_flags = PFIL_OUT;
4587 pha.pa_rulname = "default-out";
4588 V_pf_ip4_out_hook = pfil_add_hook(&pha);
4589 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
4590 pla.pa_head = V_inet_pfil_head;
4591 pla.pa_hook = V_pf_ip4_out_hook;
4592 ret = pfil_link(&pla);
4596 pha.pa_type = PFIL_TYPE_IP6;
4597 pha.pa_func = pf_check6_in;
4598 pha.pa_flags = PFIL_IN;
4599 pha.pa_rulname = "default-in6";
4600 V_pf_ip6_in_hook = pfil_add_hook(&pha);
4601 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
4602 pla.pa_head = V_inet6_pfil_head;
4603 pla.pa_hook = V_pf_ip6_in_hook;
4604 ret = pfil_link(&pla);
4606 pha.pa_func = pf_check6_out;
4607 pha.pa_rulname = "default-out6";
4608 pha.pa_flags = PFIL_OUT;
4609 V_pf_ip6_out_hook = pfil_add_hook(&pha);
4610 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
4611 pla.pa_head = V_inet6_pfil_head;
4612 pla.pa_hook = V_pf_ip6_out_hook;
4613 ret = pfil_link(&pla);
4617 V_pf_pfil_hooked = 1;
4624 if (V_pf_pfil_hooked == 0)
4628 pfil_remove_hook(V_pf_ip4_in_hook);
4629 pfil_remove_hook(V_pf_ip4_out_hook);
4632 pfil_remove_hook(V_pf_ip6_in_hook);
4633 pfil_remove_hook(V_pf_ip6_out_hook);
4636 V_pf_pfil_hooked = 0;
4642 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
4643 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
4645 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
4646 PF_RULE_TAG_HASH_SIZE_DEFAULT);
4648 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
4649 PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
4653 V_pf_vnet_active = 1;
4661 rm_init(&pf_rules_lock, "pf rulesets");
4662 sx_init(&pf_ioctl_lock, "pf ioctl");
4663 sx_init(&pf_end_lock, "pf end thread");
4665 pf_mtag_initialize();
4667 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
4672 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
4682 pf_unload_vnet(void)
4685 V_pf_vnet_active = 0;
4686 V_pf_status.running = 0;
4693 swi_remove(V_pf_swi_cookie);
4695 pf_unload_vnet_purge();
4697 pf_normalize_cleanup();
4704 if (IS_DEFAULT_VNET(curvnet))
4707 pf_cleanup_tagset(&V_pf_tags);
4709 pf_cleanup_tagset(&V_pf_qids);
4711 uma_zdestroy(V_pf_tag_z);
4713 /* Free counters last as we updated them during shutdown. */
4714 counter_u64_free(V_pf_default_rule.evaluations);
4715 for (int i = 0; i < 2; i++) {
4716 counter_u64_free(V_pf_default_rule.packets[i]);
4717 counter_u64_free(V_pf_default_rule.bytes[i]);
4719 counter_u64_free(V_pf_default_rule.states_cur);
4720 counter_u64_free(V_pf_default_rule.states_tot);
4721 counter_u64_free(V_pf_default_rule.src_nodes);
4723 for (int i = 0; i < PFRES_MAX; i++)
4724 counter_u64_free(V_pf_status.counters[i]);
4725 for (int i = 0; i < LCNT_MAX; i++)
4726 counter_u64_free(V_pf_status.lcounters[i]);
4727 for (int i = 0; i < FCNT_MAX; i++)
4728 counter_u64_free(V_pf_status.fcounters[i]);
4729 for (int i = 0; i < SCNT_MAX; i++)
4730 counter_u64_free(V_pf_status.scounters[i]);
4737 sx_xlock(&pf_end_lock);
4739 while (pf_end_threads < 2) {
4740 wakeup_one(pf_purge_thread);
4741 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
4743 sx_xunlock(&pf_end_lock);
4746 destroy_dev(pf_dev);
4750 rm_destroy(&pf_rules_lock);
4751 sx_destroy(&pf_ioctl_lock);
4752 sx_destroy(&pf_end_lock);
4756 vnet_pf_init(void *unused __unused)
4761 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
4762 vnet_pf_init, NULL);
4765 vnet_pf_uninit(const void *unused __unused)
4770 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
4771 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
4772 vnet_pf_uninit, NULL);
4775 pf_modevent(module_t mod, int type, void *data)
4784 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after
4785 * the vnet_pf_uninit()s */
4795 static moduledata_t pf_mod = {
4801 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
4802 MODULE_VERSION(pf, PF_MODVER);