2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002,2003 Henning Brauer
6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 #include "opt_inet6.h"
48 #include <sys/param.h>
49 #include <sys/_bitset.h>
50 #include <sys/bitset.h>
53 #include <sys/endian.h>
54 #include <sys/fcntl.h>
55 #include <sys/filio.h>
57 #include <sys/interrupt.h>
59 #include <sys/kernel.h>
60 #include <sys/kthread.h>
63 #include <sys/module.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
69 #include <sys/ucred.h>
72 #include <net/if_var.h>
74 #include <net/route.h>
76 #include <net/pfvar.h>
77 #include <net/if_pfsync.h>
78 #include <net/if_pflog.h>
80 #include <netinet/in.h>
81 #include <netinet/ip.h>
82 #include <netinet/ip_var.h>
83 #include <netinet6/ip6_var.h>
84 #include <netinet/ip_icmp.h>
87 #include <netinet/ip6.h>
91 #include <net/altq/altq.h>
94 static struct pf_kpool *pf_get_kpool(char *, u_int32_t, u_int8_t, u_int32_t,
95 u_int8_t, u_int8_t, u_int8_t);
97 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
98 static void pf_empty_kpool(struct pf_kpalist *);
99 static int pfioctl(struct cdev *, u_long, caddr_t, int,
102 static int pf_begin_altq(u_int32_t *);
103 static int pf_rollback_altq(u_int32_t);
104 static int pf_commit_altq(u_int32_t);
105 static int pf_enable_altq(struct pf_altq *);
106 static int pf_disable_altq(struct pf_altq *);
107 static u_int32_t pf_qname2qid(char *);
108 static void pf_qid_unref(u_int32_t);
110 static int pf_begin_rules(u_int32_t *, int, const char *);
111 static int pf_rollback_rules(u_int32_t, int, char *);
112 static int pf_setup_pfsync_matching(struct pf_kruleset *);
113 static void pf_hash_rule(MD5_CTX *, struct pf_krule *);
114 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
115 static int pf_commit_rules(u_int32_t, int, char *);
116 static int pf_addr_setup(struct pf_kruleset *,
117 struct pf_addr_wrap *, sa_family_t);
118 static void pf_addr_copyout(struct pf_addr_wrap *);
119 static void pf_src_node_copy(const struct pf_ksrc_node *,
120 struct pf_src_node *);
122 static int pf_export_kaltq(struct pf_altq *,
123 struct pfioc_altq_v1 *, size_t);
124 static int pf_import_kaltq(struct pfioc_altq_v1 *,
125 struct pf_altq *, size_t);
128 VNET_DEFINE(struct pf_krule, pf_default_rule);
131 VNET_DEFINE_STATIC(int, pf_altq_running);
132 #define V_pf_altq_running VNET(pf_altq_running)
135 #define TAGID_MAX 50000
137 TAILQ_ENTRY(pf_tagname) namehash_entries;
138 TAILQ_ENTRY(pf_tagname) taghash_entries;
139 char name[PF_TAG_NAME_SIZE];
145 TAILQ_HEAD(, pf_tagname) *namehash;
146 TAILQ_HEAD(, pf_tagname) *taghash;
149 BITSET_DEFINE(, TAGID_MAX) avail;
152 VNET_DEFINE(struct pf_tagset, pf_tags);
153 #define V_pf_tags VNET(pf_tags)
154 static unsigned int pf_rule_tag_hashsize;
155 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128
156 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
157 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
158 "Size of pf(4) rule tag hashtable");
161 VNET_DEFINE(struct pf_tagset, pf_qids);
162 #define V_pf_qids VNET(pf_qids)
163 static unsigned int pf_queue_tag_hashsize;
164 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128
165 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
166 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
167 "Size of pf(4) queue tag hashtable");
169 VNET_DEFINE(uma_zone_t, pf_tag_z);
170 #define V_pf_tag_z VNET(pf_tag_z)
171 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
172 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
174 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
175 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
178 static void pf_init_tagset(struct pf_tagset *, unsigned int *,
180 static void pf_cleanup_tagset(struct pf_tagset *);
181 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *);
182 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t);
183 static u_int16_t tagname2tag(struct pf_tagset *, char *);
184 static u_int16_t pf_tagname2tag(char *);
185 static void tag_unref(struct pf_tagset *, u_int16_t);
187 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
192 * XXX - These are new and need to be checked when moveing to a new version
194 static void pf_clear_states(void);
195 static int pf_clear_tables(void);
196 static void pf_clear_srcnodes(struct pf_ksrc_node *);
197 static void pf_kill_srcnodes(struct pfioc_src_node_kill *);
198 static void pf_tbladdr_copyout(struct pf_addr_wrap *);
201 * Wrapper functions for pfil(9) hooks
204 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
205 int dir, int flags, struct inpcb *inp);
206 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
207 int dir, int flags, struct inpcb *inp);
210 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
211 int dir, int flags, struct inpcb *inp);
212 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
213 int dir, int flags, struct inpcb *inp);
216 static int hook_pf(void);
217 static int dehook_pf(void);
218 static int shutdown_pf(void);
219 static int pf_load(void);
220 static void pf_unload(void);
222 static struct cdevsw pf_cdevsw = {
225 .d_version = D_VERSION,
228 volatile VNET_DEFINE_STATIC(int, pf_pfil_hooked);
229 #define V_pf_pfil_hooked VNET(pf_pfil_hooked)
232 * We need a flag that is neither hooked nor running to know when
233 * the VNET is "valid". We primarily need this to control (global)
234 * external event, e.g., eventhandlers.
236 VNET_DEFINE(int, pf_vnet_active);
237 #define V_pf_vnet_active VNET(pf_vnet_active)
240 struct proc *pf_purge_proc;
242 struct rmlock pf_rules_lock;
243 struct sx pf_ioctl_lock;
244 struct sx pf_end_lock;
247 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
248 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
249 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
250 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
251 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
252 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
253 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
256 pflog_packet_t *pflog_packet_ptr = NULL;
258 extern u_long pf_ioctl_maxcount;
263 u_int32_t *my_timeout = V_pf_default_rule.timeout;
267 pfi_initialize_vnet();
270 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
271 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
273 RB_INIT(&V_pf_anchors);
274 pf_init_kruleset(&pf_main_ruleset);
276 /* default rule should never be garbage collected */
277 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
278 #ifdef PF_DEFAULT_TO_DROP
279 V_pf_default_rule.action = PF_DROP;
281 V_pf_default_rule.action = PF_PASS;
283 V_pf_default_rule.nr = -1;
284 V_pf_default_rule.rtableid = -1;
286 V_pf_default_rule.evaluations = counter_u64_alloc(M_WAITOK);
287 for (int i = 0; i < 2; i++) {
288 V_pf_default_rule.packets[i] = counter_u64_alloc(M_WAITOK);
289 V_pf_default_rule.bytes[i] = counter_u64_alloc(M_WAITOK);
291 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
292 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
293 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
295 /* initialize default timeouts */
296 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
297 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
298 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
299 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
300 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
301 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
302 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
303 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
304 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
305 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
306 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
307 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
308 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
309 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
310 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
311 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
312 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
313 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
314 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
315 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
317 bzero(&V_pf_status, sizeof(V_pf_status));
318 V_pf_status.debug = PF_DEBUG_URGENT;
320 V_pf_pfil_hooked = 0;
322 /* XXX do our best to avoid a conflict */
323 V_pf_status.hostid = arc4random();
325 for (int i = 0; i < PFRES_MAX; i++)
326 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
327 for (int i = 0; i < LCNT_MAX; i++)
328 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
329 for (int i = 0; i < FCNT_MAX; i++)
330 V_pf_status.fcounters[i] = counter_u64_alloc(M_WAITOK);
331 for (int i = 0; i < SCNT_MAX; i++)
332 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
334 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
335 INTR_MPSAFE, &V_pf_swi_cookie) != 0)
336 /* XXXGL: leaked all above. */
340 static struct pf_kpool *
341 pf_get_kpool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
342 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
343 u_int8_t check_ticket)
345 struct pf_kruleset *ruleset;
346 struct pf_krule *rule;
349 ruleset = pf_find_kruleset(anchor);
352 rs_num = pf_get_ruleset_number(rule_action);
353 if (rs_num >= PF_RULESET_MAX)
356 if (check_ticket && ticket !=
357 ruleset->rules[rs_num].active.ticket)
360 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
363 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
365 if (check_ticket && ticket !=
366 ruleset->rules[rs_num].inactive.ticket)
369 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
372 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
375 while ((rule != NULL) && (rule->nr != rule_number))
376 rule = TAILQ_NEXT(rule, entries);
381 return (&rule->rpool);
385 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
387 struct pf_kpooladdr *mv_pool_pa;
389 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
390 TAILQ_REMOVE(poola, mv_pool_pa, entries);
391 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
396 pf_empty_kpool(struct pf_kpalist *poola)
398 struct pf_kpooladdr *pa;
400 while ((pa = TAILQ_FIRST(poola)) != NULL) {
401 switch (pa->addr.type) {
402 case PF_ADDR_DYNIFTL:
403 pfi_dynaddr_remove(pa->addr.p.dyn);
406 /* XXX: this could be unfinished pooladdr on pabuf */
407 if (pa->addr.p.tbl != NULL)
408 pfr_detach_table(pa->addr.p.tbl);
412 pfi_kkif_unref(pa->kif);
413 TAILQ_REMOVE(poola, pa, entries);
419 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
424 TAILQ_REMOVE(rulequeue, rule, entries);
426 PF_UNLNKDRULES_LOCK();
427 rule->rule_flag |= PFRULE_REFS;
428 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
429 PF_UNLNKDRULES_UNLOCK();
433 pf_free_rule(struct pf_krule *rule)
439 tag_unref(&V_pf_tags, rule->tag);
441 tag_unref(&V_pf_tags, rule->match_tag);
443 if (rule->pqid != rule->qid)
444 pf_qid_unref(rule->pqid);
445 pf_qid_unref(rule->qid);
447 switch (rule->src.addr.type) {
448 case PF_ADDR_DYNIFTL:
449 pfi_dynaddr_remove(rule->src.addr.p.dyn);
452 pfr_detach_table(rule->src.addr.p.tbl);
455 switch (rule->dst.addr.type) {
456 case PF_ADDR_DYNIFTL:
457 pfi_dynaddr_remove(rule->dst.addr.p.dyn);
460 pfr_detach_table(rule->dst.addr.p.tbl);
463 if (rule->overload_tbl)
464 pfr_detach_table(rule->overload_tbl);
466 pfi_kkif_unref(rule->kif);
467 pf_kanchor_remove(rule);
468 pf_empty_kpool(&rule->rpool.list);
474 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
475 unsigned int default_size)
478 unsigned int hashsize;
480 if (*tunable_size == 0 || !powerof2(*tunable_size))
481 *tunable_size = default_size;
483 hashsize = *tunable_size;
484 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
486 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
488 ts->mask = hashsize - 1;
489 ts->seed = arc4random();
490 for (i = 0; i < hashsize; i++) {
491 TAILQ_INIT(&ts->namehash[i]);
492 TAILQ_INIT(&ts->taghash[i]);
494 BIT_FILL(TAGID_MAX, &ts->avail);
498 pf_cleanup_tagset(struct pf_tagset *ts)
501 unsigned int hashsize;
502 struct pf_tagname *t, *tmp;
505 * Only need to clean up one of the hashes as each tag is hashed
508 hashsize = ts->mask + 1;
509 for (i = 0; i < hashsize; i++)
510 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
511 uma_zfree(V_pf_tag_z, t);
513 free(ts->namehash, M_PFHASH);
514 free(ts->taghash, M_PFHASH);
518 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
522 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
523 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
527 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
530 return (tag & ts->mask);
534 tagname2tag(struct pf_tagset *ts, char *tagname)
536 struct pf_tagname *tag;
542 index = tagname2hashindex(ts, tagname);
543 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
544 if (strcmp(tagname, tag->name) == 0) {
552 * to avoid fragmentation, we do a linear search from the beginning
553 * and take the first free slot we find.
555 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
557 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
558 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
559 * set. It may also return a bit number greater than TAGID_MAX due
560 * to rounding of the number of bits in the vector up to a multiple
561 * of the vector word size at declaration/allocation time.
563 if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
566 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */
567 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
569 /* allocate and fill new struct pf_tagname */
570 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
573 strlcpy(tag->name, tagname, sizeof(tag->name));
574 tag->tag = new_tagid;
577 /* Insert into namehash */
578 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
580 /* Insert into taghash */
581 index = tag2hashindex(ts, new_tagid);
582 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
588 tag_unref(struct pf_tagset *ts, u_int16_t tag)
590 struct pf_tagname *t;
595 index = tag2hashindex(ts, tag);
596 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
599 TAILQ_REMOVE(&ts->taghash[index], t,
601 index = tagname2hashindex(ts, t->name);
602 TAILQ_REMOVE(&ts->namehash[index], t,
604 /* Bits are 0-based for BIT_SET() */
605 BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
606 uma_zfree(V_pf_tag_z, t);
613 pf_tagname2tag(char *tagname)
615 return (tagname2tag(&V_pf_tags, tagname));
620 pf_qname2qid(char *qname)
622 return ((u_int32_t)tagname2tag(&V_pf_qids, qname));
626 pf_qid_unref(u_int32_t qid)
628 tag_unref(&V_pf_qids, (u_int16_t)qid);
632 pf_begin_altq(u_int32_t *ticket)
634 struct pf_altq *altq, *tmp;
639 /* Purge the old altq lists */
640 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
641 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
642 /* detach and destroy the discipline */
643 error = altq_remove(altq);
645 free(altq, M_PFALTQ);
647 TAILQ_INIT(V_pf_altq_ifs_inactive);
648 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
649 pf_qid_unref(altq->qid);
650 free(altq, M_PFALTQ);
652 TAILQ_INIT(V_pf_altqs_inactive);
655 *ticket = ++V_ticket_altqs_inactive;
656 V_altqs_inactive_open = 1;
661 pf_rollback_altq(u_int32_t ticket)
663 struct pf_altq *altq, *tmp;
668 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
670 /* Purge the old altq lists */
671 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
672 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
673 /* detach and destroy the discipline */
674 error = altq_remove(altq);
676 free(altq, M_PFALTQ);
678 TAILQ_INIT(V_pf_altq_ifs_inactive);
679 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
680 pf_qid_unref(altq->qid);
681 free(altq, M_PFALTQ);
683 TAILQ_INIT(V_pf_altqs_inactive);
684 V_altqs_inactive_open = 0;
689 pf_commit_altq(u_int32_t ticket)
691 struct pf_altqqueue *old_altqs, *old_altq_ifs;
692 struct pf_altq *altq, *tmp;
697 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
700 /* swap altqs, keep the old. */
701 old_altqs = V_pf_altqs_active;
702 old_altq_ifs = V_pf_altq_ifs_active;
703 V_pf_altqs_active = V_pf_altqs_inactive;
704 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
705 V_pf_altqs_inactive = old_altqs;
706 V_pf_altq_ifs_inactive = old_altq_ifs;
707 V_ticket_altqs_active = V_ticket_altqs_inactive;
709 /* Attach new disciplines */
710 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
711 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
712 /* attach the discipline */
713 error = altq_pfattach(altq);
714 if (error == 0 && V_pf_altq_running)
715 error = pf_enable_altq(altq);
721 /* Purge the old altq lists */
722 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
723 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
724 /* detach and destroy the discipline */
725 if (V_pf_altq_running)
726 error = pf_disable_altq(altq);
727 err = altq_pfdetach(altq);
728 if (err != 0 && error == 0)
730 err = altq_remove(altq);
731 if (err != 0 && error == 0)
734 free(altq, M_PFALTQ);
736 TAILQ_INIT(V_pf_altq_ifs_inactive);
737 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
738 pf_qid_unref(altq->qid);
739 free(altq, M_PFALTQ);
741 TAILQ_INIT(V_pf_altqs_inactive);
743 V_altqs_inactive_open = 0;
748 pf_enable_altq(struct pf_altq *altq)
751 struct tb_profile tb;
754 if ((ifp = ifunit(altq->ifname)) == NULL)
757 if (ifp->if_snd.altq_type != ALTQT_NONE)
758 error = altq_enable(&ifp->if_snd);
760 /* set tokenbucket regulator */
761 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
762 tb.rate = altq->ifbandwidth;
763 tb.depth = altq->tbrsize;
764 error = tbr_set(&ifp->if_snd, &tb);
771 pf_disable_altq(struct pf_altq *altq)
774 struct tb_profile tb;
777 if ((ifp = ifunit(altq->ifname)) == NULL)
781 * when the discipline is no longer referenced, it was overridden
782 * by a new one. if so, just return.
784 if (altq->altq_disc != ifp->if_snd.altq_disc)
787 error = altq_disable(&ifp->if_snd);
790 /* clear tokenbucket regulator */
792 error = tbr_set(&ifp->if_snd, &tb);
799 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
800 struct pf_altq *altq)
805 /* Deactivate the interface in question */
806 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
807 if ((ifp1 = ifunit(altq->ifname)) == NULL ||
808 (remove && ifp1 == ifp)) {
809 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
811 error = altq_add(ifp1, altq);
813 if (ticket != V_ticket_altqs_inactive)
817 free(altq, M_PFALTQ);
824 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
826 struct pf_altq *a1, *a2, *a3;
831 * No need to re-evaluate the configuration for events on interfaces
832 * that do not support ALTQ, as it's not possible for such
833 * interfaces to be part of the configuration.
835 if (!ALTQ_IS_READY(&ifp->if_snd))
838 /* Interrupt userland queue modifications */
839 if (V_altqs_inactive_open)
840 pf_rollback_altq(V_ticket_altqs_inactive);
842 /* Start new altq ruleset */
843 if (pf_begin_altq(&ticket))
846 /* Copy the current active set */
847 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
848 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
853 bcopy(a1, a2, sizeof(struct pf_altq));
855 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
859 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
863 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
864 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
869 bcopy(a1, a2, sizeof(struct pf_altq));
871 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
876 a2->altq_disc = NULL;
877 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
878 if (strncmp(a3->ifname, a2->ifname,
880 a2->altq_disc = a3->altq_disc;
884 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
888 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
893 pf_rollback_altq(ticket);
895 pf_commit_altq(ticket);
900 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
902 struct pf_kruleset *rs;
903 struct pf_krule *rule;
907 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
909 rs = pf_find_or_create_kruleset(anchor);
912 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
913 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
914 rs->rules[rs_num].inactive.rcount--;
916 *ticket = ++rs->rules[rs_num].inactive.ticket;
917 rs->rules[rs_num].inactive.open = 1;
922 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
924 struct pf_kruleset *rs;
925 struct pf_krule *rule;
929 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
931 rs = pf_find_kruleset(anchor);
932 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
933 rs->rules[rs_num].inactive.ticket != ticket)
935 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
936 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
937 rs->rules[rs_num].inactive.rcount--;
939 rs->rules[rs_num].inactive.open = 0;
943 #define PF_MD5_UPD(st, elm) \
944 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
946 #define PF_MD5_UPD_STR(st, elm) \
947 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
949 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
950 (stor) = htonl((st)->elm); \
951 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
954 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
955 (stor) = htons((st)->elm); \
956 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
960 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
962 PF_MD5_UPD(pfr, addr.type);
963 switch (pfr->addr.type) {
964 case PF_ADDR_DYNIFTL:
965 PF_MD5_UPD(pfr, addr.v.ifname);
966 PF_MD5_UPD(pfr, addr.iflags);
969 PF_MD5_UPD(pfr, addr.v.tblname);
971 case PF_ADDR_ADDRMASK:
973 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
974 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
978 PF_MD5_UPD(pfr, port[0]);
979 PF_MD5_UPD(pfr, port[1]);
980 PF_MD5_UPD(pfr, neg);
981 PF_MD5_UPD(pfr, port_op);
985 pf_hash_rule(MD5_CTX *ctx, struct pf_krule *rule)
990 pf_hash_rule_addr(ctx, &rule->src);
991 pf_hash_rule_addr(ctx, &rule->dst);
992 PF_MD5_UPD_STR(rule, label);
993 PF_MD5_UPD_STR(rule, ifname);
994 PF_MD5_UPD_STR(rule, match_tagname);
995 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
996 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
997 PF_MD5_UPD_HTONL(rule, prob, y);
998 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
999 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1000 PF_MD5_UPD(rule, uid.op);
1001 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1002 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1003 PF_MD5_UPD(rule, gid.op);
1004 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1005 PF_MD5_UPD(rule, action);
1006 PF_MD5_UPD(rule, direction);
1007 PF_MD5_UPD(rule, af);
1008 PF_MD5_UPD(rule, quick);
1009 PF_MD5_UPD(rule, ifnot);
1010 PF_MD5_UPD(rule, match_tag_not);
1011 PF_MD5_UPD(rule, natpass);
1012 PF_MD5_UPD(rule, keep_state);
1013 PF_MD5_UPD(rule, proto);
1014 PF_MD5_UPD(rule, type);
1015 PF_MD5_UPD(rule, code);
1016 PF_MD5_UPD(rule, flags);
1017 PF_MD5_UPD(rule, flagset);
1018 PF_MD5_UPD(rule, allow_opts);
1019 PF_MD5_UPD(rule, rt);
1020 PF_MD5_UPD(rule, tos);
1024 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1026 struct pf_kruleset *rs;
1027 struct pf_krule *rule, **old_array;
1028 struct pf_krulequeue *old_rules;
1030 u_int32_t old_rcount;
1034 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1036 rs = pf_find_kruleset(anchor);
1037 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1038 ticket != rs->rules[rs_num].inactive.ticket)
1041 /* Calculate checksum for the main ruleset */
1042 if (rs == &pf_main_ruleset) {
1043 error = pf_setup_pfsync_matching(rs);
1048 /* Swap rules, keep the old. */
1049 old_rules = rs->rules[rs_num].active.ptr;
1050 old_rcount = rs->rules[rs_num].active.rcount;
1051 old_array = rs->rules[rs_num].active.ptr_array;
1053 rs->rules[rs_num].active.ptr =
1054 rs->rules[rs_num].inactive.ptr;
1055 rs->rules[rs_num].active.ptr_array =
1056 rs->rules[rs_num].inactive.ptr_array;
1057 rs->rules[rs_num].active.rcount =
1058 rs->rules[rs_num].inactive.rcount;
1059 rs->rules[rs_num].inactive.ptr = old_rules;
1060 rs->rules[rs_num].inactive.ptr_array = old_array;
1061 rs->rules[rs_num].inactive.rcount = old_rcount;
1063 rs->rules[rs_num].active.ticket =
1064 rs->rules[rs_num].inactive.ticket;
1065 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1068 /* Purge the old rule list. */
1069 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1070 pf_unlink_rule(old_rules, rule);
1071 if (rs->rules[rs_num].inactive.ptr_array)
1072 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1073 rs->rules[rs_num].inactive.ptr_array = NULL;
1074 rs->rules[rs_num].inactive.rcount = 0;
1075 rs->rules[rs_num].inactive.open = 0;
1076 pf_remove_if_empty_kruleset(rs);
1082 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1085 struct pf_krule *rule;
1087 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1090 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1091 /* XXX PF_RULESET_SCRUB as well? */
1092 if (rs_cnt == PF_RULESET_SCRUB)
1095 if (rs->rules[rs_cnt].inactive.ptr_array)
1096 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1097 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1099 if (rs->rules[rs_cnt].inactive.rcount) {
1100 rs->rules[rs_cnt].inactive.ptr_array =
1101 malloc(sizeof(caddr_t) *
1102 rs->rules[rs_cnt].inactive.rcount,
1105 if (!rs->rules[rs_cnt].inactive.ptr_array)
1109 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1111 pf_hash_rule(&ctx, rule);
1112 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1116 MD5Final(digest, &ctx);
1117 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1122 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1127 switch (addr->type) {
1129 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1130 if (addr->p.tbl == NULL)
1133 case PF_ADDR_DYNIFTL:
1134 error = pfi_dynaddr_setup(addr, af);
1142 pf_addr_copyout(struct pf_addr_wrap *addr)
1145 switch (addr->type) {
1146 case PF_ADDR_DYNIFTL:
1147 pfi_dynaddr_copyout(addr);
1150 pf_tbladdr_copyout(addr);
1156 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1158 int secs = time_uptime, diff;
1160 bzero(out, sizeof(struct pf_src_node));
1162 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1163 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1165 if (in->rule.ptr != NULL)
1166 out->rule.nr = in->rule.ptr->nr;
1168 for (int i = 0; i < 2; i++) {
1169 out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1170 out->packets[i] = counter_u64_fetch(in->packets[i]);
1173 out->states = in->states;
1174 out->conn = in->conn;
1176 out->ruletype = in->ruletype;
1178 out->creation = secs - in->creation;
1179 if (out->expire > secs)
1180 out->expire -= secs;
1184 /* Adjust the connection rate estimate. */
1185 diff = secs - in->conn_rate.last;
1186 if (diff >= in->conn_rate.seconds)
1187 out->conn_rate.count = 0;
1189 out->conn_rate.count -=
1190 in->conn_rate.count * diff /
1191 in->conn_rate.seconds;
1196 * Handle export of struct pf_kaltq to user binaries that may be using any
1197 * version of struct pf_altq.
1200 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1204 if (ioc_size == sizeof(struct pfioc_altq_v0))
1207 version = pa->version;
1209 if (version > PFIOC_ALTQ_VERSION)
1212 #define ASSIGN(x) exported_q->x = q->x
1214 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1215 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1216 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1220 struct pf_altq_v0 *exported_q =
1221 &((struct pfioc_altq_v0 *)pa)->altq;
1227 exported_q->tbrsize = SATU16(q->tbrsize);
1228 exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1233 exported_q->bandwidth = SATU32(q->bandwidth);
1235 ASSIGN(local_flags);
1240 if (q->scheduler == ALTQT_HFSC) {
1241 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1242 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1243 SATU32(q->pq_u.hfsc_opts.x)
1245 ASSIGN_OPT_SATU32(rtsc_m1);
1247 ASSIGN_OPT_SATU32(rtsc_m2);
1249 ASSIGN_OPT_SATU32(lssc_m1);
1251 ASSIGN_OPT_SATU32(lssc_m2);
1253 ASSIGN_OPT_SATU32(ulsc_m1);
1255 ASSIGN_OPT_SATU32(ulsc_m2);
1260 #undef ASSIGN_OPT_SATU32
1268 struct pf_altq_v1 *exported_q =
1269 &((struct pfioc_altq_v1 *)pa)->altq;
1275 ASSIGN(ifbandwidth);
1282 ASSIGN(local_flags);
1292 panic("%s: unhandled struct pfioc_altq version", __func__);
1305 * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1306 * that may be using any version of it.
1309 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1313 if (ioc_size == sizeof(struct pfioc_altq_v0))
1316 version = pa->version;
1318 if (version > PFIOC_ALTQ_VERSION)
1321 #define ASSIGN(x) q->x = imported_q->x
1323 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1327 struct pf_altq_v0 *imported_q =
1328 &((struct pfioc_altq_v0 *)pa)->altq;
1333 ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1334 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1339 ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1341 ASSIGN(local_flags);
1346 if (imported_q->scheduler == ALTQT_HFSC) {
1347 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1350 * The m1 and m2 parameters are being copied from
1353 ASSIGN_OPT(rtsc_m1);
1355 ASSIGN_OPT(rtsc_m2);
1357 ASSIGN_OPT(lssc_m1);
1359 ASSIGN_OPT(lssc_m2);
1361 ASSIGN_OPT(ulsc_m1);
1363 ASSIGN_OPT(ulsc_m2);
1375 struct pf_altq_v1 *imported_q =
1376 &((struct pfioc_altq_v1 *)pa)->altq;
1382 ASSIGN(ifbandwidth);
1389 ASSIGN(local_flags);
1399 panic("%s: unhandled struct pfioc_altq version", __func__);
1409 static struct pf_altq *
1410 pf_altq_get_nth_active(u_int32_t n)
1412 struct pf_altq *altq;
1416 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1422 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1433 pf_krule_free(struct pf_krule *rule)
1438 counter_u64_free(rule->evaluations);
1439 for (int i = 0; i < 2; i++) {
1440 counter_u64_free(rule->packets[i]);
1441 counter_u64_free(rule->bytes[i]);
1443 counter_u64_free(rule->states_cur);
1444 counter_u64_free(rule->states_tot);
1445 counter_u64_free(rule->src_nodes);
1446 free(rule, M_PFRULE);
1450 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1451 struct pf_pooladdr *pool)
1454 bzero(pool, sizeof(*pool));
1455 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1456 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1460 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1461 struct pf_kpooladdr *kpool)
1464 bzero(kpool, sizeof(*kpool));
1465 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1466 strlcpy(kpool->ifname, pool->ifname, sizeof(kpool->ifname));
1470 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool)
1472 bzero(pool, sizeof(*pool));
1474 bcopy(&kpool->key, &pool->key, sizeof(pool->key));
1475 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter));
1477 pool->tblidx = kpool->tblidx;
1478 pool->proxy_port[0] = kpool->proxy_port[0];
1479 pool->proxy_port[1] = kpool->proxy_port[1];
1480 pool->opts = kpool->opts;
1484 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1486 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1487 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1489 bzero(kpool, sizeof(*kpool));
1491 bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1492 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1494 kpool->tblidx = pool->tblidx;
1495 kpool->proxy_port[0] = pool->proxy_port[0];
1496 kpool->proxy_port[1] = pool->proxy_port[1];
1497 kpool->opts = pool->opts;
1503 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule)
1506 bzero(rule, sizeof(*rule));
1508 bcopy(&krule->src, &rule->src, sizeof(rule->src));
1509 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst));
1511 for (int i = 0; i < PF_SKIP_COUNT; ++i) {
1512 if (rule->skip[i].ptr == NULL)
1513 rule->skip[i].nr = -1;
1515 rule->skip[i].nr = krule->skip[i].ptr->nr;
1518 strlcpy(rule->label, krule->label, sizeof(rule->label));
1519 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname));
1520 strlcpy(rule->qname, krule->qname, sizeof(rule->qname));
1521 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname));
1522 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname));
1523 strlcpy(rule->match_tagname, krule->match_tagname,
1524 sizeof(rule->match_tagname));
1525 strlcpy(rule->overload_tblname, krule->overload_tblname,
1526 sizeof(rule->overload_tblname));
1528 pf_kpool_to_pool(&krule->rpool, &rule->rpool);
1530 rule->evaluations = counter_u64_fetch(krule->evaluations);
1531 for (int i = 0; i < 2; i++) {
1532 rule->packets[i] = counter_u64_fetch(krule->packets[i]);
1533 rule->bytes[i] = counter_u64_fetch(krule->bytes[i]);
1536 /* kif, anchor, overload_tbl are not copied over. */
1538 rule->os_fingerprint = krule->os_fingerprint;
1540 rule->rtableid = krule->rtableid;
1541 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout));
1542 rule->max_states = krule->max_states;
1543 rule->max_src_nodes = krule->max_src_nodes;
1544 rule->max_src_states = krule->max_src_states;
1545 rule->max_src_conn = krule->max_src_conn;
1546 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit;
1547 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds;
1548 rule->qid = krule->qid;
1549 rule->pqid = krule->pqid;
1550 rule->rt_listid = krule->rt_listid;
1551 rule->nr = krule->nr;
1552 rule->prob = krule->prob;
1553 rule->cuid = krule->cuid;
1554 rule->cpid = krule->cpid;
1556 rule->return_icmp = krule->return_icmp;
1557 rule->return_icmp6 = krule->return_icmp6;
1558 rule->max_mss = krule->max_mss;
1559 rule->tag = krule->tag;
1560 rule->match_tag = krule->match_tag;
1561 rule->scrub_flags = krule->scrub_flags;
1563 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid));
1564 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid));
1566 rule->rule_flag = krule->rule_flag;
1567 rule->action = krule->action;
1568 rule->direction = krule->direction;
1569 rule->log = krule->log;
1570 rule->logif = krule->logif;
1571 rule->quick = krule->quick;
1572 rule->ifnot = krule->ifnot;
1573 rule->match_tag_not = krule->match_tag_not;
1574 rule->natpass = krule->natpass;
1576 rule->keep_state = krule->keep_state;
1577 rule->af = krule->af;
1578 rule->proto = krule->proto;
1579 rule->type = krule->type;
1580 rule->code = krule->code;
1581 rule->flags = krule->flags;
1582 rule->flagset = krule->flagset;
1583 rule->min_ttl = krule->min_ttl;
1584 rule->allow_opts = krule->allow_opts;
1585 rule->rt = krule->rt;
1586 rule->return_ttl = krule->return_ttl;
1587 rule->tos = krule->tos;
1588 rule->set_tos = krule->set_tos;
1589 rule->anchor_relative = krule->anchor_relative;
1590 rule->anchor_wildcard = krule->anchor_wildcard;
1592 rule->flush = krule->flush;
1593 rule->prio = krule->prio;
1594 rule->set_prio[0] = krule->set_prio[0];
1595 rule->set_prio[1] = krule->set_prio[1];
1597 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert));
1599 rule->u_states_cur = counter_u64_fetch(krule->states_cur);
1600 rule->u_states_tot = counter_u64_fetch(krule->states_tot);
1601 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes);
1605 pf_check_rule_addr(const struct pf_rule_addr *addr)
1608 switch (addr->addr.type) {
1609 case PF_ADDR_ADDRMASK:
1610 case PF_ADDR_NOROUTE:
1611 case PF_ADDR_DYNIFTL:
1613 case PF_ADDR_URPFFAILED:
1620 if (addr->addr.p.dyn != NULL) {
1628 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
1633 if (rule->af == AF_INET) {
1634 return (EAFNOSUPPORT);
1638 if (rule->af == AF_INET6) {
1639 return (EAFNOSUPPORT);
1643 ret = pf_check_rule_addr(&rule->src);
1646 ret = pf_check_rule_addr(&rule->dst);
1650 bzero(krule, sizeof(*krule));
1652 bcopy(&rule->src, &krule->src, sizeof(rule->src));
1653 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
1655 strlcpy(krule->label, rule->label, sizeof(rule->label));
1656 strlcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
1657 strlcpy(krule->qname, rule->qname, sizeof(rule->qname));
1658 strlcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
1659 strlcpy(krule->tagname, rule->tagname, sizeof(rule->tagname));
1660 strlcpy(krule->match_tagname, rule->match_tagname,
1661 sizeof(rule->match_tagname));
1662 strlcpy(krule->overload_tblname, rule->overload_tblname,
1663 sizeof(rule->overload_tblname));
1665 ret = pf_pool_to_kpool(&rule->rpool, &krule->rpool);
1669 /* Don't allow userspace to set evaulations, packets or bytes. */
1670 /* kif, anchor, overload_tbl are not copied over. */
1672 krule->os_fingerprint = rule->os_fingerprint;
1674 krule->rtableid = rule->rtableid;
1675 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout));
1676 krule->max_states = rule->max_states;
1677 krule->max_src_nodes = rule->max_src_nodes;
1678 krule->max_src_states = rule->max_src_states;
1679 krule->max_src_conn = rule->max_src_conn;
1680 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
1681 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
1682 krule->qid = rule->qid;
1683 krule->pqid = rule->pqid;
1684 krule->rt_listid = rule->rt_listid;
1685 krule->nr = rule->nr;
1686 krule->prob = rule->prob;
1687 krule->cuid = rule->cuid;
1688 krule->cpid = rule->cpid;
1690 krule->return_icmp = rule->return_icmp;
1691 krule->return_icmp6 = rule->return_icmp6;
1692 krule->max_mss = rule->max_mss;
1693 krule->tag = rule->tag;
1694 krule->match_tag = rule->match_tag;
1695 krule->scrub_flags = rule->scrub_flags;
1697 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
1698 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
1700 krule->rule_flag = rule->rule_flag;
1701 krule->action = rule->action;
1702 krule->direction = rule->direction;
1703 krule->log = rule->log;
1704 krule->logif = rule->logif;
1705 krule->quick = rule->quick;
1706 krule->ifnot = rule->ifnot;
1707 krule->match_tag_not = rule->match_tag_not;
1708 krule->natpass = rule->natpass;
1710 krule->keep_state = rule->keep_state;
1711 krule->af = rule->af;
1712 krule->proto = rule->proto;
1713 krule->type = rule->type;
1714 krule->code = rule->code;
1715 krule->flags = rule->flags;
1716 krule->flagset = rule->flagset;
1717 krule->min_ttl = rule->min_ttl;
1718 krule->allow_opts = rule->allow_opts;
1719 krule->rt = rule->rt;
1720 krule->return_ttl = rule->return_ttl;
1721 krule->tos = rule->tos;
1722 krule->set_tos = rule->set_tos;
1723 krule->anchor_relative = rule->anchor_relative;
1724 krule->anchor_wildcard = rule->anchor_wildcard;
1726 krule->flush = rule->flush;
1727 krule->prio = rule->prio;
1728 krule->set_prio[0] = rule->set_prio[0];
1729 krule->set_prio[1] = rule->set_prio[1];
1731 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
1737 pf_killstates_row(struct pfioc_state_kill *psk, struct pf_idhash *ih)
1740 struct pf_state_key *sk;
1741 struct pf_addr *srcaddr, *dstaddr;
1743 u_int16_t srcport, dstport;
1745 relock_DIOCKILLSTATES:
1746 PF_HASHROW_LOCK(ih);
1747 LIST_FOREACH(s, &ih->states, entry) {
1748 sk = s->key[PF_SK_WIRE];
1749 if (s->direction == PF_OUT) {
1750 srcaddr = &sk->addr[1];
1751 dstaddr = &sk->addr[0];
1752 srcport = sk->port[1];
1753 dstport = sk->port[0];
1755 srcaddr = &sk->addr[0];
1756 dstaddr = &sk->addr[1];
1757 srcport = sk->port[0];
1758 dstport = sk->port[1];
1761 if (psk->psk_af && sk->af != psk->psk_af)
1764 if (psk->psk_proto && psk->psk_proto != sk->proto)
1767 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
1768 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
1771 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
1772 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
1775 if (psk->psk_src.port_op != 0 &&
1776 ! pf_match_port(psk->psk_src.port_op,
1777 psk->psk_src.port[0], psk->psk_src.port[1], srcport))
1780 if (psk->psk_dst.port_op != 0 &&
1781 ! pf_match_port(psk->psk_dst.port_op,
1782 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
1785 if (psk->psk_label[0] && (! s->rule.ptr->label[0] ||
1786 strcmp(psk->psk_label, s->rule.ptr->label)))
1789 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
1793 pf_unlink_state(s, PF_ENTER_LOCKED);
1795 goto relock_DIOCKILLSTATES;
1797 PF_HASHROW_UNLOCK(ih);
1803 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1806 PF_RULES_RLOCK_TRACKER;
1808 /* XXX keep in sync with switch() below */
1809 if (securelevel_gt(td->td_ucred, 2))
1816 case DIOCSETSTATUSIF:
1822 case DIOCGETTIMEOUT:
1823 case DIOCCLRRULECTRS:
1825 case DIOCGETALTQSV0:
1826 case DIOCGETALTQSV1:
1829 case DIOCGETQSTATSV0:
1830 case DIOCGETQSTATSV1:
1831 case DIOCGETRULESETS:
1832 case DIOCGETRULESET:
1833 case DIOCRGETTABLES:
1834 case DIOCRGETTSTATS:
1835 case DIOCRCLRTSTATS:
1841 case DIOCRGETASTATS:
1842 case DIOCRCLRASTATS:
1845 case DIOCGETSRCNODES:
1846 case DIOCCLRSRCNODES:
1847 case DIOCIGETIFACES:
1848 case DIOCGIFSPEEDV0:
1849 case DIOCGIFSPEEDV1:
1853 case DIOCRCLRTABLES:
1854 case DIOCRADDTABLES:
1855 case DIOCRDELTABLES:
1856 case DIOCRSETTFLAGS:
1857 if (((struct pfioc_table *)addr)->pfrio_flags &
1859 break; /* dummy operation ok */
1865 if (!(flags & FWRITE))
1873 case DIOCGETTIMEOUT:
1875 case DIOCGETALTQSV0:
1876 case DIOCGETALTQSV1:
1879 case DIOCGETQSTATSV0:
1880 case DIOCGETQSTATSV1:
1881 case DIOCGETRULESETS:
1882 case DIOCGETRULESET:
1884 case DIOCRGETTABLES:
1885 case DIOCRGETTSTATS:
1887 case DIOCRGETASTATS:
1890 case DIOCGETSRCNODES:
1891 case DIOCIGETIFACES:
1892 case DIOCGIFSPEEDV1:
1893 case DIOCGIFSPEEDV0:
1895 case DIOCRCLRTABLES:
1896 case DIOCRADDTABLES:
1897 case DIOCRDELTABLES:
1898 case DIOCRCLRTSTATS:
1903 case DIOCRSETTFLAGS:
1904 if (((struct pfioc_table *)addr)->pfrio_flags &
1906 flags |= FWRITE; /* need write lock for dummy */
1907 break; /* dummy operation ok */
1911 if (((struct pfioc_rule *)addr)->action ==
1919 CURVNET_SET(TD_TO_VNET(td));
1923 sx_xlock(&pf_ioctl_lock);
1924 if (V_pf_status.running)
1931 DPFPRINTF(PF_DEBUG_MISC,
1932 ("pf: pfil registration failed\n"));
1935 V_pf_status.running = 1;
1936 V_pf_status.since = time_second;
1939 V_pf_stateid[cpu] = time_second;
1941 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1946 sx_xlock(&pf_ioctl_lock);
1947 if (!V_pf_status.running)
1950 V_pf_status.running = 0;
1951 error = dehook_pf();
1953 V_pf_status.running = 1;
1954 DPFPRINTF(PF_DEBUG_MISC,
1955 ("pf: pfil unregistration failed\n"));
1957 V_pf_status.since = time_second;
1958 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1963 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1964 struct pf_kruleset *ruleset;
1965 struct pf_krule *rule, *tail;
1966 struct pf_kpooladdr *pa;
1967 struct pfi_kkif *kif = NULL;
1970 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1975 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
1976 error = pf_rule_to_krule(&pr->rule, rule);
1978 free(rule, M_PFRULE);
1982 if (rule->ifname[0])
1983 kif = pf_kkif_create(M_WAITOK);
1984 rule->evaluations = counter_u64_alloc(M_WAITOK);
1985 for (int i = 0; i < 2; i++) {
1986 rule->packets[i] = counter_u64_alloc(M_WAITOK);
1987 rule->bytes[i] = counter_u64_alloc(M_WAITOK);
1989 rule->states_cur = counter_u64_alloc(M_WAITOK);
1990 rule->states_tot = counter_u64_alloc(M_WAITOK);
1991 rule->src_nodes = counter_u64_alloc(M_WAITOK);
1992 rule->cuid = td->td_ucred->cr_ruid;
1993 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1994 TAILQ_INIT(&rule->rpool.list);
1996 #define ERROUT(x) { error = (x); goto DIOCADDRULE_error; }
1999 pr->anchor[sizeof(pr->anchor) - 1] = 0;
2000 ruleset = pf_find_kruleset(pr->anchor);
2001 if (ruleset == NULL)
2003 rs_num = pf_get_ruleset_number(pr->rule.action);
2004 if (rs_num >= PF_RULESET_MAX)
2006 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
2007 DPFPRINTF(PF_DEBUG_MISC,
2008 ("ticket: %d != [%d]%d\n", pr->ticket, rs_num,
2009 ruleset->rules[rs_num].inactive.ticket));
2012 if (pr->pool_ticket != V_ticket_pabuf) {
2013 DPFPRINTF(PF_DEBUG_MISC,
2014 ("pool_ticket: %d != %d\n", pr->pool_ticket,
2019 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2022 rule->nr = tail->nr + 1;
2025 if (rule->ifname[0]) {
2026 rule->kif = pfi_kkif_attach(kif, rule->ifname);
2027 pfi_kkif_ref(rule->kif);
2031 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2036 if (rule->qname[0] != 0) {
2037 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2039 else if (rule->pqname[0] != 0) {
2041 pf_qname2qid(rule->pqname)) == 0)
2044 rule->pqid = rule->qid;
2047 if (rule->tagname[0])
2048 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2050 if (rule->match_tagname[0])
2051 if ((rule->match_tag =
2052 pf_tagname2tag(rule->match_tagname)) == 0)
2054 if (rule->rt && !rule->direction)
2058 if (rule->logif >= PFLOGIFS_MAX)
2060 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2062 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2064 if (pf_kanchor_setup(rule, ruleset, pr->anchor_call))
2066 if (rule->scrub_flags & PFSTATE_SETPRIO &&
2067 (rule->set_prio[0] > PF_PRIO_MAX ||
2068 rule->set_prio[1] > PF_PRIO_MAX))
2070 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2071 if (pa->addr.type == PF_ADDR_TABLE) {
2072 pa->addr.p.tbl = pfr_attach_table(ruleset,
2073 pa->addr.v.tblname);
2074 if (pa->addr.p.tbl == NULL)
2078 rule->overload_tbl = NULL;
2079 if (rule->overload_tblname[0]) {
2080 if ((rule->overload_tbl = pfr_attach_table(ruleset,
2081 rule->overload_tblname)) == NULL)
2084 rule->overload_tbl->pfrkt_flags |=
2088 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
2089 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2090 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
2091 (rule->rt > PF_NOPFROUTE)) &&
2092 (TAILQ_FIRST(&rule->rpool.list) == NULL))
2101 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2102 counter_u64_zero(rule->evaluations);
2103 for (int i = 0; i < 2; i++) {
2104 counter_u64_zero(rule->packets[i]);
2105 counter_u64_zero(rule->bytes[i]);
2107 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2109 ruleset->rules[rs_num].inactive.rcount++;
2116 pf_krule_free(rule);
2121 case DIOCGETRULES: {
2122 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
2123 struct pf_kruleset *ruleset;
2124 struct pf_krule *tail;
2128 pr->anchor[sizeof(pr->anchor) - 1] = 0;
2129 ruleset = pf_find_kruleset(pr->anchor);
2130 if (ruleset == NULL) {
2135 rs_num = pf_get_ruleset_number(pr->rule.action);
2136 if (rs_num >= PF_RULESET_MAX) {
2141 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2144 pr->nr = tail->nr + 1;
2147 pr->ticket = ruleset->rules[rs_num].active.ticket;
2153 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
2154 struct pf_kruleset *ruleset;
2155 struct pf_krule *rule;
2159 pr->anchor[sizeof(pr->anchor) - 1] = 0;
2160 ruleset = pf_find_kruleset(pr->anchor);
2161 if (ruleset == NULL) {
2166 rs_num = pf_get_ruleset_number(pr->rule.action);
2167 if (rs_num >= PF_RULESET_MAX) {
2172 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
2177 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2178 while ((rule != NULL) && (rule->nr != pr->nr))
2179 rule = TAILQ_NEXT(rule, entries);
2186 pf_krule_to_rule(rule, &pr->rule);
2188 if (pf_kanchor_copyout(ruleset, rule, pr)) {
2193 pf_addr_copyout(&pr->rule.src.addr);
2194 pf_addr_copyout(&pr->rule.dst.addr);
2196 if (pr->action == PF_GET_CLR_CNTR) {
2197 counter_u64_zero(rule->evaluations);
2198 for (int i = 0; i < 2; i++) {
2199 counter_u64_zero(rule->packets[i]);
2200 counter_u64_zero(rule->bytes[i]);
2202 counter_u64_zero(rule->states_tot);
2208 case DIOCCHANGERULE: {
2209 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
2210 struct pf_kruleset *ruleset;
2211 struct pf_krule *oldrule = NULL, *newrule = NULL;
2212 struct pfi_kkif *kif = NULL;
2213 struct pf_kpooladdr *pa;
2217 if (pcr->action < PF_CHANGE_ADD_HEAD ||
2218 pcr->action > PF_CHANGE_GET_TICKET) {
2222 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2227 if (pcr->action != PF_CHANGE_REMOVE) {
2228 newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK);
2229 error = pf_rule_to_krule(&pcr->rule, newrule);
2231 free(newrule, M_PFRULE);
2235 if (newrule->ifname[0])
2236 kif = pf_kkif_create(M_WAITOK);
2237 newrule->evaluations = counter_u64_alloc(M_WAITOK);
2238 for (int i = 0; i < 2; i++) {
2239 newrule->packets[i] =
2240 counter_u64_alloc(M_WAITOK);
2242 counter_u64_alloc(M_WAITOK);
2244 newrule->states_cur = counter_u64_alloc(M_WAITOK);
2245 newrule->states_tot = counter_u64_alloc(M_WAITOK);
2246 newrule->src_nodes = counter_u64_alloc(M_WAITOK);
2247 newrule->cuid = td->td_ucred->cr_ruid;
2248 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
2249 TAILQ_INIT(&newrule->rpool.list);
2252 #define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; }
2255 if (!(pcr->action == PF_CHANGE_REMOVE ||
2256 pcr->action == PF_CHANGE_GET_TICKET) &&
2257 pcr->pool_ticket != V_ticket_pabuf)
2260 ruleset = pf_find_kruleset(pcr->anchor);
2261 if (ruleset == NULL)
2264 rs_num = pf_get_ruleset_number(pcr->rule.action);
2265 if (rs_num >= PF_RULESET_MAX)
2268 if (pcr->action == PF_CHANGE_GET_TICKET) {
2269 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
2271 } else if (pcr->ticket !=
2272 ruleset->rules[rs_num].active.ticket)
2275 if (pcr->action != PF_CHANGE_REMOVE) {
2276 if (newrule->ifname[0]) {
2277 newrule->kif = pfi_kkif_attach(kif,
2279 pfi_kkif_ref(newrule->kif);
2281 newrule->kif = NULL;
2283 if (newrule->rtableid > 0 &&
2284 newrule->rtableid >= rt_numfibs)
2289 if (newrule->qname[0] != 0) {
2291 pf_qname2qid(newrule->qname)) == 0)
2293 else if (newrule->pqname[0] != 0) {
2294 if ((newrule->pqid =
2295 pf_qname2qid(newrule->pqname)) == 0)
2298 newrule->pqid = newrule->qid;
2301 if (newrule->tagname[0])
2303 pf_tagname2tag(newrule->tagname)) == 0)
2305 if (newrule->match_tagname[0])
2306 if ((newrule->match_tag = pf_tagname2tag(
2307 newrule->match_tagname)) == 0)
2309 if (newrule->rt && !newrule->direction)
2313 if (newrule->logif >= PFLOGIFS_MAX)
2315 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
2317 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
2319 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
2321 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2322 if (pa->addr.type == PF_ADDR_TABLE) {
2324 pfr_attach_table(ruleset,
2325 pa->addr.v.tblname);
2326 if (pa->addr.p.tbl == NULL)
2330 newrule->overload_tbl = NULL;
2331 if (newrule->overload_tblname[0]) {
2332 if ((newrule->overload_tbl = pfr_attach_table(
2333 ruleset, newrule->overload_tblname)) ==
2337 newrule->overload_tbl->pfrkt_flags |=
2341 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
2342 if (((((newrule->action == PF_NAT) ||
2343 (newrule->action == PF_RDR) ||
2344 (newrule->action == PF_BINAT) ||
2345 (newrule->rt > PF_NOPFROUTE)) &&
2346 !newrule->anchor)) &&
2347 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
2351 pf_free_rule(newrule);
2356 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
2358 pf_empty_kpool(&V_pf_pabuf);
2360 if (pcr->action == PF_CHANGE_ADD_HEAD)
2361 oldrule = TAILQ_FIRST(
2362 ruleset->rules[rs_num].active.ptr);
2363 else if (pcr->action == PF_CHANGE_ADD_TAIL)
2364 oldrule = TAILQ_LAST(
2365 ruleset->rules[rs_num].active.ptr, pf_krulequeue);
2367 oldrule = TAILQ_FIRST(
2368 ruleset->rules[rs_num].active.ptr);
2369 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
2370 oldrule = TAILQ_NEXT(oldrule, entries);
2371 if (oldrule == NULL) {
2372 if (newrule != NULL)
2373 pf_free_rule(newrule);
2380 if (pcr->action == PF_CHANGE_REMOVE) {
2381 pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
2383 ruleset->rules[rs_num].active.rcount--;
2385 if (oldrule == NULL)
2387 ruleset->rules[rs_num].active.ptr,
2389 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
2390 pcr->action == PF_CHANGE_ADD_BEFORE)
2391 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
2394 ruleset->rules[rs_num].active.ptr,
2395 oldrule, newrule, entries);
2396 ruleset->rules[rs_num].active.rcount++;
2400 TAILQ_FOREACH(oldrule,
2401 ruleset->rules[rs_num].active.ptr, entries)
2404 ruleset->rules[rs_num].active.ticket++;
2406 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
2407 pf_remove_if_empty_kruleset(ruleset);
2413 DIOCCHANGERULE_error:
2415 pf_krule_free(newrule);
2420 case DIOCCLRSTATES: {
2422 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
2423 u_int i, killed = 0;
2425 for (i = 0; i <= pf_hashmask; i++) {
2426 struct pf_idhash *ih = &V_pf_idhash[i];
2428 relock_DIOCCLRSTATES:
2429 PF_HASHROW_LOCK(ih);
2430 LIST_FOREACH(s, &ih->states, entry)
2431 if (!psk->psk_ifname[0] ||
2432 !strcmp(psk->psk_ifname,
2433 s->kif->pfik_name)) {
2435 * Don't send out individual
2438 s->state_flags |= PFSTATE_NOSYNC;
2439 pf_unlink_state(s, PF_ENTER_LOCKED);
2441 goto relock_DIOCCLRSTATES;
2443 PF_HASHROW_UNLOCK(ih);
2445 psk->psk_killed = killed;
2446 if (V_pfsync_clear_states_ptr != NULL)
2447 V_pfsync_clear_states_ptr(V_pf_status.hostid, psk->psk_ifname);
2451 case DIOCKILLSTATES: {
2453 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
2454 u_int i, killed = 0;
2456 if (psk->psk_pfcmp.id) {
2457 if (psk->psk_pfcmp.creatorid == 0)
2458 psk->psk_pfcmp.creatorid = V_pf_status.hostid;
2459 if ((s = pf_find_state_byid(psk->psk_pfcmp.id,
2460 psk->psk_pfcmp.creatorid))) {
2461 pf_unlink_state(s, PF_ENTER_LOCKED);
2462 psk->psk_killed = 1;
2467 for (i = 0; i <= pf_hashmask; i++)
2468 killed += pf_killstates_row(psk, &V_pf_idhash[i]);
2470 psk->psk_killed = killed;
2474 case DIOCADDSTATE: {
2475 struct pfioc_state *ps = (struct pfioc_state *)addr;
2476 struct pfsync_state *sp = &ps->state;
2478 if (sp->timeout >= PFTM_MAX) {
2482 if (V_pfsync_state_import_ptr != NULL) {
2484 error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL);
2491 case DIOCGETSTATE: {
2492 struct pfioc_state *ps = (struct pfioc_state *)addr;
2495 s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
2501 pfsync_state_export(&ps->state, s);
2506 case DIOCGETSTATES: {
2507 struct pfioc_states *ps = (struct pfioc_states *)addr;
2509 struct pfsync_state *pstore, *p;
2512 if (ps->ps_len <= 0) {
2513 nr = uma_zone_get_cur(V_pf_state_z);
2514 ps->ps_len = sizeof(struct pfsync_state) * nr;
2518 p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK | M_ZERO);
2521 for (i = 0; i <= pf_hashmask; i++) {
2522 struct pf_idhash *ih = &V_pf_idhash[i];
2524 PF_HASHROW_LOCK(ih);
2525 LIST_FOREACH(s, &ih->states, entry) {
2527 if (s->timeout == PFTM_UNLINKED)
2530 if ((nr+1) * sizeof(*p) > ps->ps_len) {
2531 PF_HASHROW_UNLOCK(ih);
2532 goto DIOCGETSTATES_full;
2534 pfsync_state_export(p, s);
2538 PF_HASHROW_UNLOCK(ih);
2541 error = copyout(pstore, ps->ps_states,
2542 sizeof(struct pfsync_state) * nr);
2544 free(pstore, M_TEMP);
2547 ps->ps_len = sizeof(struct pfsync_state) * nr;
2548 free(pstore, M_TEMP);
2553 case DIOCGETSTATUS: {
2554 struct pf_status *s = (struct pf_status *)addr;
2557 s->running = V_pf_status.running;
2558 s->since = V_pf_status.since;
2559 s->debug = V_pf_status.debug;
2560 s->hostid = V_pf_status.hostid;
2561 s->states = V_pf_status.states;
2562 s->src_nodes = V_pf_status.src_nodes;
2564 for (int i = 0; i < PFRES_MAX; i++)
2566 counter_u64_fetch(V_pf_status.counters[i]);
2567 for (int i = 0; i < LCNT_MAX; i++)
2569 counter_u64_fetch(V_pf_status.lcounters[i]);
2570 for (int i = 0; i < FCNT_MAX; i++)
2572 counter_u64_fetch(V_pf_status.fcounters[i]);
2573 for (int i = 0; i < SCNT_MAX; i++)
2575 counter_u64_fetch(V_pf_status.scounters[i]);
2577 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ);
2578 bcopy(V_pf_status.pf_chksum, s->pf_chksum,
2579 PF_MD5_DIGEST_LENGTH);
2581 pfi_update_status(s->ifname, s);
2586 case DIOCSETSTATUSIF: {
2587 struct pfioc_if *pi = (struct pfioc_if *)addr;
2589 if (pi->ifname[0] == 0) {
2590 bzero(V_pf_status.ifname, IFNAMSIZ);
2594 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
2599 case DIOCCLRSTATUS: {
2601 for (int i = 0; i < PFRES_MAX; i++)
2602 counter_u64_zero(V_pf_status.counters[i]);
2603 for (int i = 0; i < FCNT_MAX; i++)
2604 counter_u64_zero(V_pf_status.fcounters[i]);
2605 for (int i = 0; i < SCNT_MAX; i++)
2606 counter_u64_zero(V_pf_status.scounters[i]);
2607 for (int i = 0; i < LCNT_MAX; i++)
2608 counter_u64_zero(V_pf_status.lcounters[i]);
2609 V_pf_status.since = time_second;
2610 if (*V_pf_status.ifname)
2611 pfi_update_status(V_pf_status.ifname, NULL);
2617 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
2618 struct pf_state_key *sk;
2619 struct pf_state *state;
2620 struct pf_state_key_cmp key;
2621 int m = 0, direction = pnl->direction;
2624 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
2625 sidx = (direction == PF_IN) ? 1 : 0;
2626 didx = (direction == PF_IN) ? 0 : 1;
2629 PF_AZERO(&pnl->saddr, pnl->af) ||
2630 PF_AZERO(&pnl->daddr, pnl->af) ||
2631 ((pnl->proto == IPPROTO_TCP ||
2632 pnl->proto == IPPROTO_UDP) &&
2633 (!pnl->dport || !pnl->sport)))
2636 bzero(&key, sizeof(key));
2638 key.proto = pnl->proto;
2639 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
2640 key.port[sidx] = pnl->sport;
2641 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
2642 key.port[didx] = pnl->dport;
2644 state = pf_find_state_all(&key, direction, &m);
2647 error = E2BIG; /* more than one state */
2648 else if (state != NULL) {
2649 /* XXXGL: not locked read */
2650 sk = state->key[sidx];
2651 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
2652 pnl->rsport = sk->port[sidx];
2653 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
2654 pnl->rdport = sk->port[didx];
2661 case DIOCSETTIMEOUT: {
2662 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2665 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
2671 old = V_pf_default_rule.timeout[pt->timeout];
2672 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
2674 V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
2675 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
2676 wakeup(pf_purge_thread);
2682 case DIOCGETTIMEOUT: {
2683 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2685 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
2690 pt->seconds = V_pf_default_rule.timeout[pt->timeout];
2695 case DIOCGETLIMIT: {
2696 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2698 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
2703 pl->limit = V_pf_limits[pl->index].limit;
2708 case DIOCSETLIMIT: {
2709 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2713 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
2714 V_pf_limits[pl->index].zone == NULL) {
2719 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
2720 old_limit = V_pf_limits[pl->index].limit;
2721 V_pf_limits[pl->index].limit = pl->limit;
2722 pl->limit = old_limit;
2727 case DIOCSETDEBUG: {
2728 u_int32_t *level = (u_int32_t *)addr;
2731 V_pf_status.debug = *level;
2736 case DIOCCLRRULECTRS: {
2737 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
2738 struct pf_kruleset *ruleset = &pf_main_ruleset;
2739 struct pf_krule *rule;
2743 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
2744 counter_u64_zero(rule->evaluations);
2745 for (int i = 0; i < 2; i++) {
2746 counter_u64_zero(rule->packets[i]);
2747 counter_u64_zero(rule->bytes[i]);
2754 case DIOCGIFSPEEDV0:
2755 case DIOCGIFSPEEDV1: {
2756 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr;
2757 struct pf_ifspeed_v1 ps;
2760 if (psp->ifname[0] != 0) {
2761 /* Can we completely trust user-land? */
2762 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
2763 ifp = ifunit(ps.ifname);
2766 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
2767 if (cmd == DIOCGIFSPEEDV1)
2768 psp->baudrate = ifp->if_baudrate;
2777 case DIOCSTARTALTQ: {
2778 struct pf_altq *altq;
2781 /* enable all altq interfaces on active list */
2782 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
2783 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
2784 error = pf_enable_altq(altq);
2790 V_pf_altq_running = 1;
2792 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2796 case DIOCSTOPALTQ: {
2797 struct pf_altq *altq;
2800 /* disable all altq interfaces on active list */
2801 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
2802 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
2803 error = pf_disable_altq(altq);
2809 V_pf_altq_running = 0;
2811 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2816 case DIOCADDALTQV1: {
2817 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
2818 struct pf_altq *altq, *a;
2821 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
2822 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
2825 altq->local_flags = 0;
2828 if (pa->ticket != V_ticket_altqs_inactive) {
2830 free(altq, M_PFALTQ);
2836 * if this is for a queue, find the discipline and
2837 * copy the necessary fields
2839 if (altq->qname[0] != 0) {
2840 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2843 free(altq, M_PFALTQ);
2846 altq->altq_disc = NULL;
2847 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
2848 if (strncmp(a->ifname, altq->ifname,
2850 altq->altq_disc = a->altq_disc;
2856 if ((ifp = ifunit(altq->ifname)) == NULL)
2857 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
2859 error = altq_add(ifp, altq);
2863 free(altq, M_PFALTQ);
2867 if (altq->qname[0] != 0)
2868 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
2870 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
2871 /* version error check done on import above */
2872 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
2877 case DIOCGETALTQSV0:
2878 case DIOCGETALTQSV1: {
2879 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
2880 struct pf_altq *altq;
2884 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
2886 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
2888 pa->ticket = V_ticket_altqs_active;
2894 case DIOCGETALTQV1: {
2895 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
2896 struct pf_altq *altq;
2899 if (pa->ticket != V_ticket_altqs_active) {
2904 altq = pf_altq_get_nth_active(pa->nr);
2910 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
2915 case DIOCCHANGEALTQV0:
2916 case DIOCCHANGEALTQV1:
2917 /* CHANGEALTQ not supported yet! */
2921 case DIOCGETQSTATSV0:
2922 case DIOCGETQSTATSV1: {
2923 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr;
2924 struct pf_altq *altq;
2929 if (pq->ticket != V_ticket_altqs_active) {
2934 nbytes = pq->nbytes;
2935 altq = pf_altq_get_nth_active(pq->nr);
2942 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
2948 if (cmd == DIOCGETQSTATSV0)
2949 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */
2951 version = pq->version;
2952 error = altq_getqstats(altq, pq->buf, &nbytes, version);
2954 pq->scheduler = altq->scheduler;
2955 pq->nbytes = nbytes;
2961 case DIOCBEGINADDRS: {
2962 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2965 pf_empty_kpool(&V_pf_pabuf);
2966 pp->ticket = ++V_ticket_pabuf;
2972 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2973 struct pf_kpooladdr *pa;
2974 struct pfi_kkif *kif = NULL;
2977 if (pp->af == AF_INET) {
2978 error = EAFNOSUPPORT;
2983 if (pp->af == AF_INET6) {
2984 error = EAFNOSUPPORT;
2988 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2989 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2990 pp->addr.addr.type != PF_ADDR_TABLE) {
2994 if (pp->addr.addr.p.dyn != NULL) {
2998 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2999 pf_pooladdr_to_kpooladdr(&pp->addr, pa);
3001 kif = pf_kkif_create(M_WAITOK);
3003 if (pp->ticket != V_ticket_pabuf) {
3011 if (pa->ifname[0]) {
3012 pa->kif = pfi_kkif_attach(kif, pa->ifname);
3013 pfi_kkif_ref(pa->kif);
3016 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
3017 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
3019 pfi_kkif_unref(pa->kif);
3024 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
3029 case DIOCGETADDRS: {
3030 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
3031 struct pf_kpool *pool;
3032 struct pf_kpooladdr *pa;
3036 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
3037 pp->r_num, 0, 1, 0);
3043 TAILQ_FOREACH(pa, &pool->list, entries)
3050 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
3051 struct pf_kpool *pool;
3052 struct pf_kpooladdr *pa;
3056 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
3057 pp->r_num, 0, 1, 1);
3063 pa = TAILQ_FIRST(&pool->list);
3064 while ((pa != NULL) && (nr < pp->nr)) {
3065 pa = TAILQ_NEXT(pa, entries);
3073 pf_kpooladdr_to_pooladdr(pa, &pp->addr);
3074 pf_addr_copyout(&pp->addr.addr);
3079 case DIOCCHANGEADDR: {
3080 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
3081 struct pf_kpool *pool;
3082 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL;
3083 struct pf_kruleset *ruleset;
3084 struct pfi_kkif *kif = NULL;
3086 if (pca->action < PF_CHANGE_ADD_HEAD ||
3087 pca->action > PF_CHANGE_REMOVE) {
3091 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
3092 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
3093 pca->addr.addr.type != PF_ADDR_TABLE) {
3097 if (pca->addr.addr.p.dyn != NULL) {
3102 if (pca->action != PF_CHANGE_REMOVE) {
3104 if (pca->af == AF_INET) {
3105 error = EAFNOSUPPORT;
3110 if (pca->af == AF_INET6) {
3111 error = EAFNOSUPPORT;
3115 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
3116 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
3117 if (newpa->ifname[0])
3118 kif = pf_kkif_create(M_WAITOK);
3122 #define ERROUT(x) { error = (x); goto DIOCCHANGEADDR_error; }
3124 ruleset = pf_find_kruleset(pca->anchor);
3125 if (ruleset == NULL)
3128 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
3129 pca->r_num, pca->r_last, 1, 1);
3133 if (pca->action != PF_CHANGE_REMOVE) {
3134 if (newpa->ifname[0]) {
3135 newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
3136 pfi_kkif_ref(newpa->kif);
3140 switch (newpa->addr.type) {
3141 case PF_ADDR_DYNIFTL:
3142 error = pfi_dynaddr_setup(&newpa->addr,
3146 newpa->addr.p.tbl = pfr_attach_table(ruleset,
3147 newpa->addr.v.tblname);
3148 if (newpa->addr.p.tbl == NULL)
3153 goto DIOCCHANGEADDR_error;
3156 switch (pca->action) {
3157 case PF_CHANGE_ADD_HEAD:
3158 oldpa = TAILQ_FIRST(&pool->list);
3160 case PF_CHANGE_ADD_TAIL:
3161 oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
3164 oldpa = TAILQ_FIRST(&pool->list);
3165 for (int i = 0; oldpa && i < pca->nr; i++)
3166 oldpa = TAILQ_NEXT(oldpa, entries);
3172 if (pca->action == PF_CHANGE_REMOVE) {
3173 TAILQ_REMOVE(&pool->list, oldpa, entries);
3174 switch (oldpa->addr.type) {
3175 case PF_ADDR_DYNIFTL:
3176 pfi_dynaddr_remove(oldpa->addr.p.dyn);
3179 pfr_detach_table(oldpa->addr.p.tbl);
3183 pfi_kkif_unref(oldpa->kif);
3184 free(oldpa, M_PFRULE);
3187 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
3188 else if (pca->action == PF_CHANGE_ADD_HEAD ||
3189 pca->action == PF_CHANGE_ADD_BEFORE)
3190 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
3192 TAILQ_INSERT_AFTER(&pool->list, oldpa,
3196 pool->cur = TAILQ_FIRST(&pool->list);
3197 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
3202 DIOCCHANGEADDR_error:
3203 if (newpa != NULL) {
3205 pfi_kkif_unref(newpa->kif);
3206 free(newpa, M_PFRULE);
3213 case DIOCGETRULESETS: {
3214 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
3215 struct pf_kruleset *ruleset;
3216 struct pf_kanchor *anchor;
3219 pr->path[sizeof(pr->path) - 1] = 0;
3220 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
3226 if (ruleset->anchor == NULL) {
3227 /* XXX kludge for pf_main_ruleset */
3228 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
3229 if (anchor->parent == NULL)
3232 RB_FOREACH(anchor, pf_kanchor_node,
3233 &ruleset->anchor->children)
3240 case DIOCGETRULESET: {
3241 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
3242 struct pf_kruleset *ruleset;
3243 struct pf_kanchor *anchor;
3247 pr->path[sizeof(pr->path) - 1] = 0;
3248 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
3254 if (ruleset->anchor == NULL) {
3255 /* XXX kludge for pf_main_ruleset */
3256 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
3257 if (anchor->parent == NULL && nr++ == pr->nr) {
3258 strlcpy(pr->name, anchor->name,
3263 RB_FOREACH(anchor, pf_kanchor_node,
3264 &ruleset->anchor->children)
3265 if (nr++ == pr->nr) {
3266 strlcpy(pr->name, anchor->name,
3277 case DIOCRCLRTABLES: {
3278 struct pfioc_table *io = (struct pfioc_table *)addr;
3280 if (io->pfrio_esize != 0) {
3285 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
3286 io->pfrio_flags | PFR_FLAG_USERIOCTL);
3291 case DIOCRADDTABLES: {
3292 struct pfioc_table *io = (struct pfioc_table *)addr;
3293 struct pfr_table *pfrts;
3296 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3301 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
3302 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
3307 totlen = io->pfrio_size * sizeof(struct pfr_table);
3308 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
3310 error = copyin(io->pfrio_buffer, pfrts, totlen);
3312 free(pfrts, M_TEMP);
3316 error = pfr_add_tables(pfrts, io->pfrio_size,
3317 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3319 free(pfrts, M_TEMP);
3323 case DIOCRDELTABLES: {
3324 struct pfioc_table *io = (struct pfioc_table *)addr;
3325 struct pfr_table *pfrts;
3328 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3333 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
3334 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
3339 totlen = io->pfrio_size * sizeof(struct pfr_table);
3340 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
3342 error = copyin(io->pfrio_buffer, pfrts, totlen);
3344 free(pfrts, M_TEMP);
3348 error = pfr_del_tables(pfrts, io->pfrio_size,
3349 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3351 free(pfrts, M_TEMP);
3355 case DIOCRGETTABLES: {
3356 struct pfioc_table *io = (struct pfioc_table *)addr;
3357 struct pfr_table *pfrts;
3361 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3366 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
3372 io->pfrio_size = min(io->pfrio_size, n);
3374 totlen = io->pfrio_size * sizeof(struct pfr_table);
3376 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
3378 if (pfrts == NULL) {
3383 error = pfr_get_tables(&io->pfrio_table, pfrts,
3384 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3387 error = copyout(pfrts, io->pfrio_buffer, totlen);
3388 free(pfrts, M_TEMP);
3392 case DIOCRGETTSTATS: {
3393 struct pfioc_table *io = (struct pfioc_table *)addr;
3394 struct pfr_tstats *pfrtstats;
3398 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
3403 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
3409 io->pfrio_size = min(io->pfrio_size, n);
3411 totlen = io->pfrio_size * sizeof(struct pfr_tstats);
3412 pfrtstats = mallocarray(io->pfrio_size,
3413 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT);
3414 if (pfrtstats == NULL) {
3419 error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
3420 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3423 error = copyout(pfrtstats, io->pfrio_buffer, totlen);
3424 free(pfrtstats, M_TEMP);
3428 case DIOCRCLRTSTATS: {
3429 struct pfioc_table *io = (struct pfioc_table *)addr;
3430 struct pfr_table *pfrts;
3433 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3438 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
3439 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
3440 /* We used to count tables and use the minimum required
3441 * size, so we didn't fail on overly large requests.
3443 io->pfrio_size = pf_ioctl_maxcount;
3447 totlen = io->pfrio_size * sizeof(struct pfr_table);
3448 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
3450 if (pfrts == NULL) {
3454 error = copyin(io->pfrio_buffer, pfrts, totlen);
3456 free(pfrts, M_TEMP);
3461 error = pfr_clr_tstats(pfrts, io->pfrio_size,
3462 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3464 free(pfrts, M_TEMP);
3468 case DIOCRSETTFLAGS: {
3469 struct pfioc_table *io = (struct pfioc_table *)addr;
3470 struct pfr_table *pfrts;
3474 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3480 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
3487 io->pfrio_size = min(io->pfrio_size, n);
3490 totlen = io->pfrio_size * sizeof(struct pfr_table);
3491 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
3493 error = copyin(io->pfrio_buffer, pfrts, totlen);
3495 free(pfrts, M_TEMP);
3499 error = pfr_set_tflags(pfrts, io->pfrio_size,
3500 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
3501 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3503 free(pfrts, M_TEMP);
3507 case DIOCRCLRADDRS: {
3508 struct pfioc_table *io = (struct pfioc_table *)addr;
3510 if (io->pfrio_esize != 0) {
3515 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
3516 io->pfrio_flags | PFR_FLAG_USERIOCTL);
3521 case DIOCRADDADDRS: {
3522 struct pfioc_table *io = (struct pfioc_table *)addr;
3523 struct pfr_addr *pfras;
3526 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3530 if (io->pfrio_size < 0 ||
3531 io->pfrio_size > pf_ioctl_maxcount ||
3532 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3536 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3537 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3543 error = copyin(io->pfrio_buffer, pfras, totlen);
3545 free(pfras, M_TEMP);
3549 error = pfr_add_addrs(&io->pfrio_table, pfras,
3550 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
3551 PFR_FLAG_USERIOCTL);
3553 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
3554 error = copyout(pfras, io->pfrio_buffer, totlen);
3555 free(pfras, M_TEMP);
3559 case DIOCRDELADDRS: {
3560 struct pfioc_table *io = (struct pfioc_table *)addr;
3561 struct pfr_addr *pfras;
3564 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3568 if (io->pfrio_size < 0 ||
3569 io->pfrio_size > pf_ioctl_maxcount ||
3570 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3574 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3575 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3581 error = copyin(io->pfrio_buffer, pfras, totlen);
3583 free(pfras, M_TEMP);
3587 error = pfr_del_addrs(&io->pfrio_table, pfras,
3588 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
3589 PFR_FLAG_USERIOCTL);
3591 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
3592 error = copyout(pfras, io->pfrio_buffer, totlen);
3593 free(pfras, M_TEMP);
3597 case DIOCRSETADDRS: {
3598 struct pfioc_table *io = (struct pfioc_table *)addr;
3599 struct pfr_addr *pfras;
3600 size_t totlen, count;
3602 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3606 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
3610 count = max(io->pfrio_size, io->pfrio_size2);
3611 if (count > pf_ioctl_maxcount ||
3612 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
3616 totlen = count * sizeof(struct pfr_addr);
3617 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
3623 error = copyin(io->pfrio_buffer, pfras, totlen);
3625 free(pfras, M_TEMP);
3629 error = pfr_set_addrs(&io->pfrio_table, pfras,
3630 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
3631 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
3632 PFR_FLAG_USERIOCTL, 0);
3634 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
3635 error = copyout(pfras, io->pfrio_buffer, totlen);
3636 free(pfras, M_TEMP);
3640 case DIOCRGETADDRS: {
3641 struct pfioc_table *io = (struct pfioc_table *)addr;
3642 struct pfr_addr *pfras;
3645 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3649 if (io->pfrio_size < 0 ||
3650 io->pfrio_size > pf_ioctl_maxcount ||
3651 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3655 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3656 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3663 error = pfr_get_addrs(&io->pfrio_table, pfras,
3664 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3667 error = copyout(pfras, io->pfrio_buffer, totlen);
3668 free(pfras, M_TEMP);
3672 case DIOCRGETASTATS: {
3673 struct pfioc_table *io = (struct pfioc_table *)addr;
3674 struct pfr_astats *pfrastats;
3677 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
3681 if (io->pfrio_size < 0 ||
3682 io->pfrio_size > pf_ioctl_maxcount ||
3683 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
3687 totlen = io->pfrio_size * sizeof(struct pfr_astats);
3688 pfrastats = mallocarray(io->pfrio_size,
3689 sizeof(struct pfr_astats), M_TEMP, M_NOWAIT);
3695 error = pfr_get_astats(&io->pfrio_table, pfrastats,
3696 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3699 error = copyout(pfrastats, io->pfrio_buffer, totlen);
3700 free(pfrastats, M_TEMP);
3704 case DIOCRCLRASTATS: {
3705 struct pfioc_table *io = (struct pfioc_table *)addr;
3706 struct pfr_addr *pfras;
3709 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3713 if (io->pfrio_size < 0 ||
3714 io->pfrio_size > pf_ioctl_maxcount ||
3715 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3719 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3720 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3726 error = copyin(io->pfrio_buffer, pfras, totlen);
3728 free(pfras, M_TEMP);
3732 error = pfr_clr_astats(&io->pfrio_table, pfras,
3733 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
3734 PFR_FLAG_USERIOCTL);
3736 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
3737 error = copyout(pfras, io->pfrio_buffer, totlen);
3738 free(pfras, M_TEMP);
3742 case DIOCRTSTADDRS: {
3743 struct pfioc_table *io = (struct pfioc_table *)addr;
3744 struct pfr_addr *pfras;
3747 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3751 if (io->pfrio_size < 0 ||
3752 io->pfrio_size > pf_ioctl_maxcount ||
3753 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3757 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3758 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3764 error = copyin(io->pfrio_buffer, pfras, totlen);
3766 free(pfras, M_TEMP);
3770 error = pfr_tst_addrs(&io->pfrio_table, pfras,
3771 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
3772 PFR_FLAG_USERIOCTL);
3775 error = copyout(pfras, io->pfrio_buffer, totlen);
3776 free(pfras, M_TEMP);
3780 case DIOCRINADEFINE: {
3781 struct pfioc_table *io = (struct pfioc_table *)addr;
3782 struct pfr_addr *pfras;
3785 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3789 if (io->pfrio_size < 0 ||
3790 io->pfrio_size > pf_ioctl_maxcount ||
3791 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3795 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3796 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3802 error = copyin(io->pfrio_buffer, pfras, totlen);
3804 free(pfras, M_TEMP);
3808 error = pfr_ina_define(&io->pfrio_table, pfras,
3809 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
3810 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3812 free(pfras, M_TEMP);
3817 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
3819 error = pf_osfp_add(io);
3825 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
3827 error = pf_osfp_get(io);
3833 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3834 struct pfioc_trans_e *ioes, *ioe;
3838 if (io->esize != sizeof(*ioe)) {
3843 io->size > pf_ioctl_maxcount ||
3844 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3848 totlen = sizeof(struct pfioc_trans_e) * io->size;
3849 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3855 error = copyin(io->array, ioes, totlen);
3861 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3862 switch (ioe->rs_num) {
3864 case PF_RULESET_ALTQ:
3865 if (ioe->anchor[0]) {
3871 if ((error = pf_begin_altq(&ioe->ticket))) {
3878 case PF_RULESET_TABLE:
3880 struct pfr_table table;
3882 bzero(&table, sizeof(table));
3883 strlcpy(table.pfrt_anchor, ioe->anchor,
3884 sizeof(table.pfrt_anchor));
3885 if ((error = pfr_ina_begin(&table,
3886 &ioe->ticket, NULL, 0))) {
3894 if ((error = pf_begin_rules(&ioe->ticket,
3895 ioe->rs_num, ioe->anchor))) {
3904 error = copyout(ioes, io->array, totlen);
3909 case DIOCXROLLBACK: {
3910 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3911 struct pfioc_trans_e *ioe, *ioes;
3915 if (io->esize != sizeof(*ioe)) {
3920 io->size > pf_ioctl_maxcount ||
3921 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3925 totlen = sizeof(struct pfioc_trans_e) * io->size;
3926 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3932 error = copyin(io->array, ioes, totlen);
3938 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3939 switch (ioe->rs_num) {
3941 case PF_RULESET_ALTQ:
3942 if (ioe->anchor[0]) {
3948 if ((error = pf_rollback_altq(ioe->ticket))) {
3951 goto fail; /* really bad */
3955 case PF_RULESET_TABLE:
3957 struct pfr_table table;
3959 bzero(&table, sizeof(table));
3960 strlcpy(table.pfrt_anchor, ioe->anchor,
3961 sizeof(table.pfrt_anchor));
3962 if ((error = pfr_ina_rollback(&table,
3963 ioe->ticket, NULL, 0))) {
3966 goto fail; /* really bad */
3971 if ((error = pf_rollback_rules(ioe->ticket,
3972 ioe->rs_num, ioe->anchor))) {
3975 goto fail; /* really bad */
3986 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3987 struct pfioc_trans_e *ioe, *ioes;
3988 struct pf_kruleset *rs;
3992 if (io->esize != sizeof(*ioe)) {
3998 io->size > pf_ioctl_maxcount ||
3999 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
4004 totlen = sizeof(struct pfioc_trans_e) * io->size;
4005 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
4011 error = copyin(io->array, ioes, totlen);
4017 /* First makes sure everything will succeed. */
4018 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4019 switch (ioe->rs_num) {
4021 case PF_RULESET_ALTQ:
4022 if (ioe->anchor[0]) {
4028 if (!V_altqs_inactive_open || ioe->ticket !=
4029 V_ticket_altqs_inactive) {
4037 case PF_RULESET_TABLE:
4038 rs = pf_find_kruleset(ioe->anchor);
4039 if (rs == NULL || !rs->topen || ioe->ticket !=
4048 if (ioe->rs_num < 0 || ioe->rs_num >=
4055 rs = pf_find_kruleset(ioe->anchor);
4057 !rs->rules[ioe->rs_num].inactive.open ||
4058 rs->rules[ioe->rs_num].inactive.ticket !=
4068 /* Now do the commit - no errors should happen here. */
4069 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4070 switch (ioe->rs_num) {
4072 case PF_RULESET_ALTQ:
4073 if ((error = pf_commit_altq(ioe->ticket))) {
4076 goto fail; /* really bad */
4080 case PF_RULESET_TABLE:
4082 struct pfr_table table;
4084 bzero(&table, sizeof(table));
4085 strlcpy(table.pfrt_anchor, ioe->anchor,
4086 sizeof(table.pfrt_anchor));
4087 if ((error = pfr_ina_commit(&table,
4088 ioe->ticket, NULL, NULL, 0))) {
4091 goto fail; /* really bad */
4096 if ((error = pf_commit_rules(ioe->ticket,
4097 ioe->rs_num, ioe->anchor))) {
4100 goto fail; /* really bad */
4110 case DIOCGETSRCNODES: {
4111 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
4112 struct pf_srchash *sh;
4113 struct pf_ksrc_node *n;
4114 struct pf_src_node *p, *pstore;
4117 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
4119 PF_HASHROW_LOCK(sh);
4120 LIST_FOREACH(n, &sh->nodes, entry)
4122 PF_HASHROW_UNLOCK(sh);
4125 psn->psn_len = min(psn->psn_len,
4126 sizeof(struct pf_src_node) * nr);
4128 if (psn->psn_len == 0) {
4129 psn->psn_len = sizeof(struct pf_src_node) * nr;
4135 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
4136 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
4138 PF_HASHROW_LOCK(sh);
4139 LIST_FOREACH(n, &sh->nodes, entry) {
4141 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
4144 pf_src_node_copy(n, p);
4149 PF_HASHROW_UNLOCK(sh);
4151 error = copyout(pstore, psn->psn_src_nodes,
4152 sizeof(struct pf_src_node) * nr);
4154 free(pstore, M_TEMP);
4157 psn->psn_len = sizeof(struct pf_src_node) * nr;
4158 free(pstore, M_TEMP);
4162 case DIOCCLRSRCNODES: {
4164 pf_clear_srcnodes(NULL);
4165 pf_purge_expired_src_nodes();
4169 case DIOCKILLSRCNODES:
4170 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
4173 case DIOCSETHOSTID: {
4174 u_int32_t *hostid = (u_int32_t *)addr;
4178 V_pf_status.hostid = arc4random();
4180 V_pf_status.hostid = *hostid;
4191 case DIOCIGETIFACES: {
4192 struct pfioc_iface *io = (struct pfioc_iface *)addr;
4193 struct pfi_kif *ifstore;
4196 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
4201 if (io->pfiio_size < 0 ||
4202 io->pfiio_size > pf_ioctl_maxcount ||
4203 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
4208 bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
4209 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
4211 if (ifstore == NULL) {
4217 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
4219 error = copyout(ifstore, io->pfiio_buffer, bufsiz);
4220 free(ifstore, M_TEMP);
4224 case DIOCSETIFFLAG: {
4225 struct pfioc_iface *io = (struct pfioc_iface *)addr;
4228 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
4233 case DIOCCLRIFFLAG: {
4234 struct pfioc_iface *io = (struct pfioc_iface *)addr;
4237 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
4247 if (sx_xlocked(&pf_ioctl_lock))
4248 sx_xunlock(&pf_ioctl_lock);
4255 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
4257 bzero(sp, sizeof(struct pfsync_state));
4259 /* copy from state key */
4260 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
4261 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
4262 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
4263 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
4264 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
4265 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
4266 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
4267 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
4268 sp->proto = st->key[PF_SK_WIRE]->proto;
4269 sp->af = st->key[PF_SK_WIRE]->af;
4271 /* copy from state */
4272 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
4273 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
4274 sp->creation = htonl(time_uptime - st->creation);
4275 sp->expire = pf_state_expires(st);
4276 if (sp->expire <= time_uptime)
4277 sp->expire = htonl(0);
4279 sp->expire = htonl(sp->expire - time_uptime);
4281 sp->direction = st->direction;
4283 sp->timeout = st->timeout;
4284 sp->state_flags = st->state_flags;
4286 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
4287 if (st->nat_src_node)
4288 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
4291 sp->creatorid = st->creatorid;
4292 pf_state_peer_hton(&st->src, &sp->src);
4293 pf_state_peer_hton(&st->dst, &sp->dst);
4295 if (st->rule.ptr == NULL)
4296 sp->rule = htonl(-1);
4298 sp->rule = htonl(st->rule.ptr->nr);
4299 if (st->anchor.ptr == NULL)
4300 sp->anchor = htonl(-1);
4302 sp->anchor = htonl(st->anchor.ptr->nr);
4303 if (st->nat_rule.ptr == NULL)
4304 sp->nat_rule = htonl(-1);
4306 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
4308 pf_state_counter_hton(counter_u64_fetch(st->packets[0]),
4310 pf_state_counter_hton(counter_u64_fetch(st->packets[1]),
4312 pf_state_counter_hton(counter_u64_fetch(st->bytes[0]), sp->bytes[0]);
4313 pf_state_counter_hton(counter_u64_fetch(st->bytes[1]), sp->bytes[1]);
4318 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
4320 struct pfr_ktable *kt;
4322 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
4325 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
4326 kt = kt->pfrkt_root;
4328 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
4333 * XXX - Check for version missmatch!!!
4336 pf_clear_states(void)
4341 for (i = 0; i <= pf_hashmask; i++) {
4342 struct pf_idhash *ih = &V_pf_idhash[i];
4344 PF_HASHROW_LOCK(ih);
4345 LIST_FOREACH(s, &ih->states, entry) {
4346 s->timeout = PFTM_PURGE;
4347 /* Don't send out individual delete messages. */
4348 s->state_flags |= PFSTATE_NOSYNC;
4349 pf_unlink_state(s, PF_ENTER_LOCKED);
4352 PF_HASHROW_UNLOCK(ih);
4357 pf_clear_tables(void)
4359 struct pfioc_table io;
4362 bzero(&io, sizeof(io));
4364 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
4371 pf_clear_srcnodes(struct pf_ksrc_node *n)
4376 for (i = 0; i <= pf_hashmask; i++) {
4377 struct pf_idhash *ih = &V_pf_idhash[i];
4379 PF_HASHROW_LOCK(ih);
4380 LIST_FOREACH(s, &ih->states, entry) {
4381 if (n == NULL || n == s->src_node)
4383 if (n == NULL || n == s->nat_src_node)
4384 s->nat_src_node = NULL;
4386 PF_HASHROW_UNLOCK(ih);
4390 struct pf_srchash *sh;
4392 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
4394 PF_HASHROW_LOCK(sh);
4395 LIST_FOREACH(n, &sh->nodes, entry) {
4399 PF_HASHROW_UNLOCK(sh);
4402 /* XXX: hash slot should already be locked here. */
4409 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
4411 struct pf_ksrc_node_list kill;
4414 for (int i = 0; i <= pf_srchashmask; i++) {
4415 struct pf_srchash *sh = &V_pf_srchash[i];
4416 struct pf_ksrc_node *sn, *tmp;
4418 PF_HASHROW_LOCK(sh);
4419 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
4420 if (PF_MATCHA(psnk->psnk_src.neg,
4421 &psnk->psnk_src.addr.v.a.addr,
4422 &psnk->psnk_src.addr.v.a.mask,
4423 &sn->addr, sn->af) &&
4424 PF_MATCHA(psnk->psnk_dst.neg,
4425 &psnk->psnk_dst.addr.v.a.addr,
4426 &psnk->psnk_dst.addr.v.a.mask,
4427 &sn->raddr, sn->af)) {
4428 pf_unlink_src_node(sn);
4429 LIST_INSERT_HEAD(&kill, sn, entry);
4432 PF_HASHROW_UNLOCK(sh);
4435 for (int i = 0; i <= pf_hashmask; i++) {
4436 struct pf_idhash *ih = &V_pf_idhash[i];
4439 PF_HASHROW_LOCK(ih);
4440 LIST_FOREACH(s, &ih->states, entry) {
4441 if (s->src_node && s->src_node->expire == 1)
4443 if (s->nat_src_node && s->nat_src_node->expire == 1)
4444 s->nat_src_node = NULL;
4446 PF_HASHROW_UNLOCK(ih);
4449 psnk->psnk_killed = pf_free_src_nodes(&kill);
4453 * XXX - Check for version missmatch!!!
4457 * Duplicate pfctl -Fa operation to get rid of as much as we can.
4467 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
4469 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
4472 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
4474 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
4475 break; /* XXX: rollback? */
4477 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
4479 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
4480 break; /* XXX: rollback? */
4482 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
4484 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
4485 break; /* XXX: rollback? */
4487 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
4489 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
4490 break; /* XXX: rollback? */
4493 /* XXX: these should always succeed here */
4494 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
4495 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
4496 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
4497 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
4498 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
4500 if ((error = pf_clear_tables()) != 0)
4504 if ((error = pf_begin_altq(&t[0])) != 0) {
4505 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
4508 pf_commit_altq(t[0]);
4513 pf_clear_srcnodes(NULL);
4515 /* status does not use malloced mem so no need to cleanup */
4516 /* fingerprints and interfaces have their own cleanup code */
4524 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
4529 chk = pf_test(PF_IN, flags, ifp, m, inp);
4541 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
4546 chk = pf_test(PF_OUT, flags, ifp, m, inp);
4560 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
4566 * In case of loopback traffic IPv6 uses the real interface in
4567 * order to support scoped addresses. In order to support stateful
4568 * filtering we have change this to lo0 as it is the case in IPv4.
4570 CURVNET_SET(ifp->if_vnet);
4571 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp);
4583 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
4588 CURVNET_SET(ifp->if_vnet);
4589 chk = pf_test6(PF_OUT, flags, ifp, m, inp);
4605 struct pfil_head *pfh_inet;
4608 struct pfil_head *pfh_inet6;
4611 if (V_pf_pfil_hooked)
4615 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
4616 if (pfh_inet == NULL)
4617 return (ESRCH); /* XXX */
4618 pfil_add_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet);
4619 pfil_add_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet);
4622 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
4623 if (pfh_inet6 == NULL) {
4625 pfil_remove_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
4627 pfil_remove_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
4630 return (ESRCH); /* XXX */
4632 pfil_add_hook_flags(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6);
4633 pfil_add_hook_flags(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6);
4636 V_pf_pfil_hooked = 1;
4644 struct pfil_head *pfh_inet;
4647 struct pfil_head *pfh_inet6;
4650 if (V_pf_pfil_hooked == 0)
4654 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
4655 if (pfh_inet == NULL)
4656 return (ESRCH); /* XXX */
4657 pfil_remove_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
4659 pfil_remove_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
4663 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
4664 if (pfh_inet6 == NULL)
4665 return (ESRCH); /* XXX */
4666 pfil_remove_hook_flags(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK,
4668 pfil_remove_hook_flags(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK,
4672 V_pf_pfil_hooked = 0;
4679 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
4680 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
4682 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
4683 PF_RULE_TAG_HASH_SIZE_DEFAULT);
4685 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
4686 PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
4690 V_pf_vnet_active = 1;
4698 rm_init(&pf_rules_lock, "pf rulesets");
4699 sx_init(&pf_ioctl_lock, "pf ioctl");
4700 sx_init(&pf_end_lock, "pf end thread");
4702 pf_mtag_initialize();
4704 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
4709 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
4719 pf_unload_vnet(void)
4723 V_pf_vnet_active = 0;
4724 V_pf_status.running = 0;
4725 error = dehook_pf();
4728 * Should not happen!
4729 * XXX Due to error code ESRCH, kldunload will show
4730 * a message like 'No such process'.
4732 printf("%s : pfil unregisteration fail\n", __FUNCTION__);
4740 ret = swi_remove(V_pf_swi_cookie);
4742 ret = intr_event_destroy(V_pf_swi_ie);
4745 pf_unload_vnet_purge();
4747 pf_normalize_cleanup();
4754 if (IS_DEFAULT_VNET(curvnet))
4757 pf_cleanup_tagset(&V_pf_tags);
4759 pf_cleanup_tagset(&V_pf_qids);
4761 uma_zdestroy(V_pf_tag_z);
4763 /* Free counters last as we updated them during shutdown. */
4764 counter_u64_free(V_pf_default_rule.evaluations);
4765 for (int i = 0; i < 2; i++) {
4766 counter_u64_free(V_pf_default_rule.packets[i]);
4767 counter_u64_free(V_pf_default_rule.bytes[i]);
4769 counter_u64_free(V_pf_default_rule.states_cur);
4770 counter_u64_free(V_pf_default_rule.states_tot);
4771 counter_u64_free(V_pf_default_rule.src_nodes);
4773 for (int i = 0; i < PFRES_MAX; i++)
4774 counter_u64_free(V_pf_status.counters[i]);
4775 for (int i = 0; i < LCNT_MAX; i++)
4776 counter_u64_free(V_pf_status.lcounters[i]);
4777 for (int i = 0; i < FCNT_MAX; i++)
4778 counter_u64_free(V_pf_status.fcounters[i]);
4779 for (int i = 0; i < SCNT_MAX; i++)
4780 counter_u64_free(V_pf_status.scounters[i]);
4787 sx_xlock(&pf_end_lock);
4789 while (pf_end_threads < 2) {
4790 wakeup_one(pf_purge_thread);
4791 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
4793 sx_xunlock(&pf_end_lock);
4796 destroy_dev(pf_dev);
4800 rm_destroy(&pf_rules_lock);
4801 sx_destroy(&pf_ioctl_lock);
4802 sx_destroy(&pf_end_lock);
4806 vnet_pf_init(void *unused __unused)
4811 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
4812 vnet_pf_init, NULL);
4815 vnet_pf_uninit(const void *unused __unused)
4820 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
4821 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
4822 vnet_pf_uninit, NULL);
4826 pf_modevent(module_t mod, int type, void *data)
4835 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after
4836 * the vnet_pf_uninit()s */
4846 static moduledata_t pf_mod = {
4852 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
4853 MODULE_VERSION(pf, PF_MODVER);