2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002,2003 Henning Brauer
6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 #include "opt_inet6.h"
48 #include <sys/param.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/interrupt.h>
56 #include <sys/kernel.h>
57 #include <sys/kthread.h>
60 #include <sys/module.h>
62 #include <sys/rwlock.h>
64 #include <sys/socket.h>
65 #include <sys/sysctl.h>
67 #include <sys/ucred.h>
70 #include <net/if_var.h>
72 #include <net/route.h>
74 #include <net/pfvar.h>
75 #include <net/if_pfsync.h>
76 #include <net/if_pflog.h>
78 #include <netinet/in.h>
79 #include <netinet/ip.h>
80 #include <netinet/ip_var.h>
81 #include <netinet6/ip6_var.h>
82 #include <netinet/ip_icmp.h>
85 #include <netinet/ip6.h>
89 #include <net/altq/altq.h>
92 static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
93 u_int8_t, u_int8_t, u_int8_t);
95 static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
96 static void pf_empty_pool(struct pf_palist *);
97 static int pfioctl(struct cdev *, u_long, caddr_t, int,
100 static int pf_begin_altq(u_int32_t *);
101 static int pf_rollback_altq(u_int32_t);
102 static int pf_commit_altq(u_int32_t);
103 static int pf_enable_altq(struct pf_altq *);
104 static int pf_disable_altq(struct pf_altq *);
105 static u_int32_t pf_qname2qid(char *);
106 static void pf_qid_unref(u_int32_t);
108 static int pf_begin_rules(u_int32_t *, int, const char *);
109 static int pf_rollback_rules(u_int32_t, int, char *);
110 static int pf_setup_pfsync_matching(struct pf_ruleset *);
111 static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
112 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
113 static int pf_commit_rules(u_int32_t, int, char *);
114 static int pf_addr_setup(struct pf_ruleset *,
115 struct pf_addr_wrap *, sa_family_t);
116 static void pf_addr_copyout(struct pf_addr_wrap *);
118 VNET_DEFINE(struct pf_rule, pf_default_rule);
121 static VNET_DEFINE(int, pf_altq_running);
122 #define V_pf_altq_running VNET(pf_altq_running)
125 #define TAGID_MAX 50000
127 TAILQ_ENTRY(pf_tagname) entries;
128 char name[PF_TAG_NAME_SIZE];
133 TAILQ_HEAD(pf_tags, pf_tagname);
134 #define V_pf_tags VNET(pf_tags)
135 VNET_DEFINE(struct pf_tags, pf_tags);
136 #define V_pf_qids VNET(pf_qids)
137 VNET_DEFINE(struct pf_tags, pf_qids);
138 static MALLOC_DEFINE(M_PFTAG, "pf_tag", "pf(4) tag names");
139 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
140 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
142 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
143 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
146 static u_int16_t tagname2tag(struct pf_tags *, char *);
147 static u_int16_t pf_tagname2tag(char *);
148 static void tag_unref(struct pf_tags *, u_int16_t);
150 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
155 * XXX - These are new and need to be checked when moveing to a new version
157 static void pf_clear_states(void);
158 static int pf_clear_tables(void);
159 static void pf_clear_srcnodes(struct pf_src_node *);
160 static void pf_kill_srcnodes(struct pfioc_src_node_kill *);
161 static void pf_tbladdr_copyout(struct pf_addr_wrap *);
164 * Wrapper functions for pfil(9) hooks
167 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
168 int dir, int flags, struct inpcb *inp);
169 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
170 int dir, int flags, struct inpcb *inp);
173 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
174 int dir, int flags, struct inpcb *inp);
175 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
176 int dir, int flags, struct inpcb *inp);
179 static int hook_pf(void);
180 static int dehook_pf(void);
181 static int shutdown_pf(void);
182 static int pf_load(void);
183 static void pf_unload(void);
185 static struct cdevsw pf_cdevsw = {
188 .d_version = D_VERSION,
191 static volatile VNET_DEFINE(int, pf_pfil_hooked);
192 #define V_pf_pfil_hooked VNET(pf_pfil_hooked)
195 * We need a flag that is neither hooked nor running to know when
196 * the VNET is "valid". We primarily need this to control (global)
197 * external event, e.g., eventhandlers.
199 VNET_DEFINE(int, pf_vnet_active);
200 #define V_pf_vnet_active VNET(pf_vnet_active)
203 struct proc *pf_purge_proc;
205 struct rwlock pf_rules_lock;
206 struct sx pf_ioctl_lock;
207 struct sx pf_end_lock;
210 pfsync_state_import_t *pfsync_state_import_ptr = NULL;
211 pfsync_insert_state_t *pfsync_insert_state_ptr = NULL;
212 pfsync_update_state_t *pfsync_update_state_ptr = NULL;
213 pfsync_delete_state_t *pfsync_delete_state_ptr = NULL;
214 pfsync_clear_states_t *pfsync_clear_states_ptr = NULL;
215 pfsync_defer_t *pfsync_defer_ptr = NULL;
217 pflog_packet_t *pflog_packet_ptr = NULL;
219 extern u_long pf_ioctl_maxcount;
224 u_int32_t *my_timeout = V_pf_default_rule.timeout;
228 pfi_initialize_vnet();
231 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
232 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
234 RB_INIT(&V_pf_anchors);
235 pf_init_ruleset(&pf_main_ruleset);
237 /* default rule should never be garbage collected */
238 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
239 #ifdef PF_DEFAULT_TO_DROP
240 V_pf_default_rule.action = PF_DROP;
242 V_pf_default_rule.action = PF_PASS;
244 V_pf_default_rule.nr = -1;
245 V_pf_default_rule.rtableid = -1;
247 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
248 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
249 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
251 /* initialize default timeouts */
252 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
253 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
254 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
255 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
256 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
257 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
258 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
259 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
260 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
261 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
262 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
263 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
264 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
265 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
266 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
267 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
268 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
269 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
270 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
271 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
273 bzero(&V_pf_status, sizeof(V_pf_status));
274 V_pf_status.debug = PF_DEBUG_URGENT;
276 V_pf_pfil_hooked = 0;
278 /* XXX do our best to avoid a conflict */
279 V_pf_status.hostid = arc4random();
281 for (int i = 0; i < PFRES_MAX; i++)
282 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
283 for (int i = 0; i < LCNT_MAX; i++)
284 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
285 for (int i = 0; i < FCNT_MAX; i++)
286 V_pf_status.fcounters[i] = counter_u64_alloc(M_WAITOK);
287 for (int i = 0; i < SCNT_MAX; i++)
288 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
290 if (swi_add(NULL, "pf send", pf_intr, curvnet, SWI_NET,
291 INTR_MPSAFE, &V_pf_swi_cookie) != 0)
292 /* XXXGL: leaked all above. */
297 static struct pf_pool *
298 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
299 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
300 u_int8_t check_ticket)
302 struct pf_ruleset *ruleset;
303 struct pf_rule *rule;
306 ruleset = pf_find_ruleset(anchor);
309 rs_num = pf_get_ruleset_number(rule_action);
310 if (rs_num >= PF_RULESET_MAX)
313 if (check_ticket && ticket !=
314 ruleset->rules[rs_num].active.ticket)
317 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
320 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
322 if (check_ticket && ticket !=
323 ruleset->rules[rs_num].inactive.ticket)
326 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
329 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
332 while ((rule != NULL) && (rule->nr != rule_number))
333 rule = TAILQ_NEXT(rule, entries);
338 return (&rule->rpool);
342 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
344 struct pf_pooladdr *mv_pool_pa;
346 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
347 TAILQ_REMOVE(poola, mv_pool_pa, entries);
348 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
353 pf_empty_pool(struct pf_palist *poola)
355 struct pf_pooladdr *pa;
357 while ((pa = TAILQ_FIRST(poola)) != NULL) {
358 switch (pa->addr.type) {
359 case PF_ADDR_DYNIFTL:
360 pfi_dynaddr_remove(pa->addr.p.dyn);
363 /* XXX: this could be unfinished pooladdr on pabuf */
364 if (pa->addr.p.tbl != NULL)
365 pfr_detach_table(pa->addr.p.tbl);
369 pfi_kif_unref(pa->kif);
370 TAILQ_REMOVE(poola, pa, entries);
376 pf_unlink_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
381 TAILQ_REMOVE(rulequeue, rule, entries);
383 PF_UNLNKDRULES_LOCK();
384 rule->rule_flag |= PFRULE_REFS;
385 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
386 PF_UNLNKDRULES_UNLOCK();
390 pf_free_rule(struct pf_rule *rule)
396 tag_unref(&V_pf_tags, rule->tag);
398 tag_unref(&V_pf_tags, rule->match_tag);
400 if (rule->pqid != rule->qid)
401 pf_qid_unref(rule->pqid);
402 pf_qid_unref(rule->qid);
404 switch (rule->src.addr.type) {
405 case PF_ADDR_DYNIFTL:
406 pfi_dynaddr_remove(rule->src.addr.p.dyn);
409 pfr_detach_table(rule->src.addr.p.tbl);
412 switch (rule->dst.addr.type) {
413 case PF_ADDR_DYNIFTL:
414 pfi_dynaddr_remove(rule->dst.addr.p.dyn);
417 pfr_detach_table(rule->dst.addr.p.tbl);
420 if (rule->overload_tbl)
421 pfr_detach_table(rule->overload_tbl);
423 pfi_kif_unref(rule->kif);
424 pf_anchor_remove(rule);
425 pf_empty_pool(&rule->rpool.list);
426 counter_u64_free(rule->states_cur);
427 counter_u64_free(rule->states_tot);
428 counter_u64_free(rule->src_nodes);
429 free(rule, M_PFRULE);
433 tagname2tag(struct pf_tags *head, char *tagname)
435 struct pf_tagname *tag, *p = NULL;
436 u_int16_t new_tagid = 1;
440 TAILQ_FOREACH(tag, head, entries)
441 if (strcmp(tagname, tag->name) == 0) {
447 * to avoid fragmentation, we do a linear search from the beginning
448 * and take the first free slot we find. if there is none or the list
449 * is empty, append a new entry at the end.
453 if (!TAILQ_EMPTY(head))
454 for (p = TAILQ_FIRST(head); p != NULL &&
455 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
456 new_tagid = p->tag + 1;
458 if (new_tagid > TAGID_MAX)
461 /* allocate and fill new struct pf_tagname */
462 tag = malloc(sizeof(*tag), M_PFTAG, M_NOWAIT|M_ZERO);
465 strlcpy(tag->name, tagname, sizeof(tag->name));
466 tag->tag = new_tagid;
469 if (p != NULL) /* insert new entry before p */
470 TAILQ_INSERT_BEFORE(p, tag, entries);
471 else /* either list empty or no free slot in between */
472 TAILQ_INSERT_TAIL(head, tag, entries);
478 tag_unref(struct pf_tags *head, u_int16_t tag)
480 struct pf_tagname *p, *next;
484 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
485 next = TAILQ_NEXT(p, entries);
488 TAILQ_REMOVE(head, p, entries);
497 pf_tagname2tag(char *tagname)
499 return (tagname2tag(&V_pf_tags, tagname));
504 pf_qname2qid(char *qname)
506 return ((u_int32_t)tagname2tag(&V_pf_qids, qname));
510 pf_qid_unref(u_int32_t qid)
512 tag_unref(&V_pf_qids, (u_int16_t)qid);
516 pf_begin_altq(u_int32_t *ticket)
518 struct pf_altq *altq;
523 /* Purge the old altq list */
524 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) {
525 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries);
526 if (altq->qname[0] == 0 &&
527 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
528 /* detach and destroy the discipline */
529 error = altq_remove(altq);
531 pf_qid_unref(altq->qid);
532 free(altq, M_PFALTQ);
536 *ticket = ++V_ticket_altqs_inactive;
537 V_altqs_inactive_open = 1;
542 pf_rollback_altq(u_int32_t ticket)
544 struct pf_altq *altq;
549 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
551 /* Purge the old altq list */
552 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) {
553 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries);
554 if (altq->qname[0] == 0 &&
555 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
556 /* detach and destroy the discipline */
557 error = altq_remove(altq);
559 pf_qid_unref(altq->qid);
560 free(altq, M_PFALTQ);
562 V_altqs_inactive_open = 0;
567 pf_commit_altq(u_int32_t ticket)
569 struct pf_altqqueue *old_altqs;
570 struct pf_altq *altq;
575 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
578 /* swap altqs, keep the old. */
579 old_altqs = V_pf_altqs_active;
580 V_pf_altqs_active = V_pf_altqs_inactive;
581 V_pf_altqs_inactive = old_altqs;
582 V_ticket_altqs_active = V_ticket_altqs_inactive;
584 /* Attach new disciplines */
585 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
586 if (altq->qname[0] == 0 &&
587 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
588 /* attach the discipline */
589 error = altq_pfattach(altq);
590 if (error == 0 && V_pf_altq_running)
591 error = pf_enable_altq(altq);
597 /* Purge the old altq list */
598 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) {
599 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries);
600 if (altq->qname[0] == 0 &&
601 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
602 /* detach and destroy the discipline */
603 if (V_pf_altq_running)
604 error = pf_disable_altq(altq);
605 err = altq_pfdetach(altq);
606 if (err != 0 && error == 0)
608 err = altq_remove(altq);
609 if (err != 0 && error == 0)
612 pf_qid_unref(altq->qid);
613 free(altq, M_PFALTQ);
616 V_altqs_inactive_open = 0;
621 pf_enable_altq(struct pf_altq *altq)
624 struct tb_profile tb;
627 if ((ifp = ifunit(altq->ifname)) == NULL)
630 if (ifp->if_snd.altq_type != ALTQT_NONE)
631 error = altq_enable(&ifp->if_snd);
633 /* set tokenbucket regulator */
634 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
635 tb.rate = altq->ifbandwidth;
636 tb.depth = altq->tbrsize;
637 error = tbr_set(&ifp->if_snd, &tb);
644 pf_disable_altq(struct pf_altq *altq)
647 struct tb_profile tb;
650 if ((ifp = ifunit(altq->ifname)) == NULL)
654 * when the discipline is no longer referenced, it was overridden
655 * by a new one. if so, just return.
657 if (altq->altq_disc != ifp->if_snd.altq_disc)
660 error = altq_disable(&ifp->if_snd);
663 /* clear tokenbucket regulator */
665 error = tbr_set(&ifp->if_snd, &tb);
672 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
675 struct pf_altq *a1, *a2, *a3;
679 /* Interrupt userland queue modifications */
680 if (V_altqs_inactive_open)
681 pf_rollback_altq(V_ticket_altqs_inactive);
683 /* Start new altq ruleset */
684 if (pf_begin_altq(&ticket))
687 /* Copy the current active set */
688 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
689 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
694 bcopy(a1, a2, sizeof(struct pf_altq));
696 if (a2->qname[0] != 0) {
697 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
702 a2->altq_disc = NULL;
703 TAILQ_FOREACH(a3, V_pf_altqs_inactive, entries) {
704 if (strncmp(a3->ifname, a2->ifname,
705 IFNAMSIZ) == 0 && a3->qname[0] == 0) {
706 a2->altq_disc = a3->altq_disc;
711 /* Deactivate the interface in question */
712 a2->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
713 if ((ifp1 = ifunit(a2->ifname)) == NULL ||
714 (remove && ifp1 == ifp)) {
715 a2->local_flags |= PFALTQ_FLAG_IF_REMOVED;
717 error = altq_add(a2);
719 if (ticket != V_ticket_altqs_inactive)
728 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
732 pf_rollback_altq(ticket);
734 pf_commit_altq(ticket);
739 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
741 struct pf_ruleset *rs;
742 struct pf_rule *rule;
746 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
748 rs = pf_find_or_create_ruleset(anchor);
751 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
752 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
753 rs->rules[rs_num].inactive.rcount--;
755 *ticket = ++rs->rules[rs_num].inactive.ticket;
756 rs->rules[rs_num].inactive.open = 1;
761 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
763 struct pf_ruleset *rs;
764 struct pf_rule *rule;
768 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
770 rs = pf_find_ruleset(anchor);
771 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
772 rs->rules[rs_num].inactive.ticket != ticket)
774 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
775 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
776 rs->rules[rs_num].inactive.rcount--;
778 rs->rules[rs_num].inactive.open = 0;
782 #define PF_MD5_UPD(st, elm) \
783 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
785 #define PF_MD5_UPD_STR(st, elm) \
786 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
788 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
789 (stor) = htonl((st)->elm); \
790 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
793 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
794 (stor) = htons((st)->elm); \
795 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
799 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
801 PF_MD5_UPD(pfr, addr.type);
802 switch (pfr->addr.type) {
803 case PF_ADDR_DYNIFTL:
804 PF_MD5_UPD(pfr, addr.v.ifname);
805 PF_MD5_UPD(pfr, addr.iflags);
808 PF_MD5_UPD(pfr, addr.v.tblname);
810 case PF_ADDR_ADDRMASK:
812 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
813 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
817 PF_MD5_UPD(pfr, port[0]);
818 PF_MD5_UPD(pfr, port[1]);
819 PF_MD5_UPD(pfr, neg);
820 PF_MD5_UPD(pfr, port_op);
824 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
829 pf_hash_rule_addr(ctx, &rule->src);
830 pf_hash_rule_addr(ctx, &rule->dst);
831 PF_MD5_UPD_STR(rule, label);
832 PF_MD5_UPD_STR(rule, ifname);
833 PF_MD5_UPD_STR(rule, match_tagname);
834 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
835 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
836 PF_MD5_UPD_HTONL(rule, prob, y);
837 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
838 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
839 PF_MD5_UPD(rule, uid.op);
840 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
841 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
842 PF_MD5_UPD(rule, gid.op);
843 PF_MD5_UPD_HTONL(rule, rule_flag, y);
844 PF_MD5_UPD(rule, action);
845 PF_MD5_UPD(rule, direction);
846 PF_MD5_UPD(rule, af);
847 PF_MD5_UPD(rule, quick);
848 PF_MD5_UPD(rule, ifnot);
849 PF_MD5_UPD(rule, match_tag_not);
850 PF_MD5_UPD(rule, natpass);
851 PF_MD5_UPD(rule, keep_state);
852 PF_MD5_UPD(rule, proto);
853 PF_MD5_UPD(rule, type);
854 PF_MD5_UPD(rule, code);
855 PF_MD5_UPD(rule, flags);
856 PF_MD5_UPD(rule, flagset);
857 PF_MD5_UPD(rule, allow_opts);
858 PF_MD5_UPD(rule, rt);
859 PF_MD5_UPD(rule, tos);
863 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
865 struct pf_ruleset *rs;
866 struct pf_rule *rule, **old_array;
867 struct pf_rulequeue *old_rules;
869 u_int32_t old_rcount;
873 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
875 rs = pf_find_ruleset(anchor);
876 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
877 ticket != rs->rules[rs_num].inactive.ticket)
880 /* Calculate checksum for the main ruleset */
881 if (rs == &pf_main_ruleset) {
882 error = pf_setup_pfsync_matching(rs);
887 /* Swap rules, keep the old. */
888 old_rules = rs->rules[rs_num].active.ptr;
889 old_rcount = rs->rules[rs_num].active.rcount;
890 old_array = rs->rules[rs_num].active.ptr_array;
892 rs->rules[rs_num].active.ptr =
893 rs->rules[rs_num].inactive.ptr;
894 rs->rules[rs_num].active.ptr_array =
895 rs->rules[rs_num].inactive.ptr_array;
896 rs->rules[rs_num].active.rcount =
897 rs->rules[rs_num].inactive.rcount;
898 rs->rules[rs_num].inactive.ptr = old_rules;
899 rs->rules[rs_num].inactive.ptr_array = old_array;
900 rs->rules[rs_num].inactive.rcount = old_rcount;
902 rs->rules[rs_num].active.ticket =
903 rs->rules[rs_num].inactive.ticket;
904 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
907 /* Purge the old rule list. */
908 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
909 pf_unlink_rule(old_rules, rule);
910 if (rs->rules[rs_num].inactive.ptr_array)
911 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
912 rs->rules[rs_num].inactive.ptr_array = NULL;
913 rs->rules[rs_num].inactive.rcount = 0;
914 rs->rules[rs_num].inactive.open = 0;
915 pf_remove_if_empty_ruleset(rs);
921 pf_setup_pfsync_matching(struct pf_ruleset *rs)
924 struct pf_rule *rule;
926 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
929 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
930 /* XXX PF_RULESET_SCRUB as well? */
931 if (rs_cnt == PF_RULESET_SCRUB)
934 if (rs->rules[rs_cnt].inactive.ptr_array)
935 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
936 rs->rules[rs_cnt].inactive.ptr_array = NULL;
938 if (rs->rules[rs_cnt].inactive.rcount) {
939 rs->rules[rs_cnt].inactive.ptr_array =
940 malloc(sizeof(caddr_t) *
941 rs->rules[rs_cnt].inactive.rcount,
944 if (!rs->rules[rs_cnt].inactive.ptr_array)
948 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
950 pf_hash_rule(&ctx, rule);
951 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
955 MD5Final(digest, &ctx);
956 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
961 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
966 switch (addr->type) {
968 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
969 if (addr->p.tbl == NULL)
972 case PF_ADDR_DYNIFTL:
973 error = pfi_dynaddr_setup(addr, af);
981 pf_addr_copyout(struct pf_addr_wrap *addr)
984 switch (addr->type) {
985 case PF_ADDR_DYNIFTL:
986 pfi_dynaddr_copyout(addr);
989 pf_tbladdr_copyout(addr);
995 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
999 /* XXX keep in sync with switch() below */
1000 if (securelevel_gt(td->td_ucred, 2))
1007 case DIOCSETSTATUSIF:
1013 case DIOCGETTIMEOUT:
1014 case DIOCCLRRULECTRS:
1019 case DIOCGETRULESETS:
1020 case DIOCGETRULESET:
1021 case DIOCRGETTABLES:
1022 case DIOCRGETTSTATS:
1023 case DIOCRCLRTSTATS:
1029 case DIOCRGETASTATS:
1030 case DIOCRCLRASTATS:
1033 case DIOCGETSRCNODES:
1034 case DIOCCLRSRCNODES:
1035 case DIOCIGETIFACES:
1040 case DIOCRCLRTABLES:
1041 case DIOCRADDTABLES:
1042 case DIOCRDELTABLES:
1043 case DIOCRSETTFLAGS:
1044 if (((struct pfioc_table *)addr)->pfrio_flags &
1046 break; /* dummy operation ok */
1052 if (!(flags & FWRITE))
1060 case DIOCGETTIMEOUT:
1065 case DIOCGETRULESETS:
1066 case DIOCGETRULESET:
1068 case DIOCRGETTABLES:
1069 case DIOCRGETTSTATS:
1071 case DIOCRGETASTATS:
1074 case DIOCGETSRCNODES:
1075 case DIOCIGETIFACES:
1078 case DIOCRCLRTABLES:
1079 case DIOCRADDTABLES:
1080 case DIOCRDELTABLES:
1081 case DIOCRCLRTSTATS:
1086 case DIOCRSETTFLAGS:
1087 if (((struct pfioc_table *)addr)->pfrio_flags &
1089 flags |= FWRITE; /* need write lock for dummy */
1090 break; /* dummy operation ok */
1094 if (((struct pfioc_rule *)addr)->action ==
1102 CURVNET_SET(TD_TO_VNET(td));
1106 sx_xlock(&pf_ioctl_lock);
1107 if (V_pf_status.running)
1114 DPFPRINTF(PF_DEBUG_MISC,
1115 ("pf: pfil registration failed\n"));
1118 V_pf_status.running = 1;
1119 V_pf_status.since = time_second;
1122 V_pf_stateid[cpu] = time_second;
1124 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1129 sx_xlock(&pf_ioctl_lock);
1130 if (!V_pf_status.running)
1133 V_pf_status.running = 0;
1134 error = dehook_pf();
1136 V_pf_status.running = 1;
1137 DPFPRINTF(PF_DEBUG_MISC,
1138 ("pf: pfil unregistration failed\n"));
1140 V_pf_status.since = time_second;
1141 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1146 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1147 struct pf_ruleset *ruleset;
1148 struct pf_rule *rule, *tail;
1149 struct pf_pooladdr *pa;
1150 struct pfi_kif *kif = NULL;
1153 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1158 if (pr->rule.af == AF_INET) {
1159 error = EAFNOSUPPORT;
1164 if (pr->rule.af == AF_INET6) {
1165 error = EAFNOSUPPORT;
1170 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
1171 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1172 if (rule->ifname[0])
1173 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
1174 rule->states_cur = counter_u64_alloc(M_WAITOK);
1175 rule->states_tot = counter_u64_alloc(M_WAITOK);
1176 rule->src_nodes = counter_u64_alloc(M_WAITOK);
1177 rule->cuid = td->td_ucred->cr_ruid;
1178 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1179 TAILQ_INIT(&rule->rpool.list);
1181 #define ERROUT(x) { error = (x); goto DIOCADDRULE_error; }
1184 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1185 ruleset = pf_find_ruleset(pr->anchor);
1186 if (ruleset == NULL)
1188 rs_num = pf_get_ruleset_number(pr->rule.action);
1189 if (rs_num >= PF_RULESET_MAX)
1191 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1192 DPFPRINTF(PF_DEBUG_MISC,
1193 ("ticket: %d != [%d]%d\n", pr->ticket, rs_num,
1194 ruleset->rules[rs_num].inactive.ticket));
1197 if (pr->pool_ticket != V_ticket_pabuf) {
1198 DPFPRINTF(PF_DEBUG_MISC,
1199 ("pool_ticket: %d != %d\n", pr->pool_ticket,
1204 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1207 rule->nr = tail->nr + 1;
1210 if (rule->ifname[0]) {
1211 rule->kif = pfi_kif_attach(kif, rule->ifname);
1212 pfi_kif_ref(rule->kif);
1216 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
1221 if (rule->qname[0] != 0) {
1222 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1224 else if (rule->pqname[0] != 0) {
1226 pf_qname2qid(rule->pqname)) == 0)
1229 rule->pqid = rule->qid;
1232 if (rule->tagname[0])
1233 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1235 if (rule->match_tagname[0])
1236 if ((rule->match_tag =
1237 pf_tagname2tag(rule->match_tagname)) == 0)
1239 if (rule->rt && !rule->direction)
1243 if (rule->logif >= PFLOGIFS_MAX)
1245 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1247 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1249 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1251 if (rule->scrub_flags & PFSTATE_SETPRIO &&
1252 (rule->set_prio[0] > PF_PRIO_MAX ||
1253 rule->set_prio[1] > PF_PRIO_MAX))
1255 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
1256 if (pa->addr.type == PF_ADDR_TABLE) {
1257 pa->addr.p.tbl = pfr_attach_table(ruleset,
1258 pa->addr.v.tblname);
1259 if (pa->addr.p.tbl == NULL)
1263 rule->overload_tbl = NULL;
1264 if (rule->overload_tblname[0]) {
1265 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1266 rule->overload_tblname)) == NULL)
1269 rule->overload_tbl->pfrkt_flags |=
1273 pf_mv_pool(&V_pf_pabuf, &rule->rpool.list);
1274 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1275 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1276 (rule->rt > PF_NOPFROUTE)) &&
1277 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1286 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1287 rule->evaluations = rule->packets[0] = rule->packets[1] =
1288 rule->bytes[0] = rule->bytes[1] = 0;
1289 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1291 ruleset->rules[rs_num].inactive.rcount++;
1298 counter_u64_free(rule->states_cur);
1299 counter_u64_free(rule->states_tot);
1300 counter_u64_free(rule->src_nodes);
1301 free(rule, M_PFRULE);
1303 free(kif, PFI_MTYPE);
1307 case DIOCGETRULES: {
1308 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1309 struct pf_ruleset *ruleset;
1310 struct pf_rule *tail;
1314 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1315 ruleset = pf_find_ruleset(pr->anchor);
1316 if (ruleset == NULL) {
1321 rs_num = pf_get_ruleset_number(pr->rule.action);
1322 if (rs_num >= PF_RULESET_MAX) {
1327 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1330 pr->nr = tail->nr + 1;
1333 pr->ticket = ruleset->rules[rs_num].active.ticket;
1339 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1340 struct pf_ruleset *ruleset;
1341 struct pf_rule *rule;
1345 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1346 ruleset = pf_find_ruleset(pr->anchor);
1347 if (ruleset == NULL) {
1352 rs_num = pf_get_ruleset_number(pr->rule.action);
1353 if (rs_num >= PF_RULESET_MAX) {
1358 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1363 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1364 while ((rule != NULL) && (rule->nr != pr->nr))
1365 rule = TAILQ_NEXT(rule, entries);
1371 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1372 pr->rule.u_states_cur = counter_u64_fetch(rule->states_cur);
1373 pr->rule.u_states_tot = counter_u64_fetch(rule->states_tot);
1374 pr->rule.u_src_nodes = counter_u64_fetch(rule->src_nodes);
1375 if (pf_anchor_copyout(ruleset, rule, pr)) {
1380 pf_addr_copyout(&pr->rule.src.addr);
1381 pf_addr_copyout(&pr->rule.dst.addr);
1382 for (i = 0; i < PF_SKIP_COUNT; ++i)
1383 if (rule->skip[i].ptr == NULL)
1384 pr->rule.skip[i].nr = -1;
1386 pr->rule.skip[i].nr =
1387 rule->skip[i].ptr->nr;
1389 if (pr->action == PF_GET_CLR_CNTR) {
1390 rule->evaluations = 0;
1391 rule->packets[0] = rule->packets[1] = 0;
1392 rule->bytes[0] = rule->bytes[1] = 0;
1393 counter_u64_zero(rule->states_tot);
1399 case DIOCCHANGERULE: {
1400 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1401 struct pf_ruleset *ruleset;
1402 struct pf_rule *oldrule = NULL, *newrule = NULL;
1403 struct pfi_kif *kif = NULL;
1404 struct pf_pooladdr *pa;
1408 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1409 pcr->action > PF_CHANGE_GET_TICKET) {
1413 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1418 if (pcr->action != PF_CHANGE_REMOVE) {
1420 if (pcr->rule.af == AF_INET) {
1421 error = EAFNOSUPPORT;
1426 if (pcr->rule.af == AF_INET6) {
1427 error = EAFNOSUPPORT;
1431 newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK);
1432 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1433 if (newrule->ifname[0])
1434 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
1435 newrule->states_cur = counter_u64_alloc(M_WAITOK);
1436 newrule->states_tot = counter_u64_alloc(M_WAITOK);
1437 newrule->src_nodes = counter_u64_alloc(M_WAITOK);
1438 newrule->cuid = td->td_ucred->cr_ruid;
1439 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1440 TAILQ_INIT(&newrule->rpool.list);
1443 #define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; }
1446 if (!(pcr->action == PF_CHANGE_REMOVE ||
1447 pcr->action == PF_CHANGE_GET_TICKET) &&
1448 pcr->pool_ticket != V_ticket_pabuf)
1451 ruleset = pf_find_ruleset(pcr->anchor);
1452 if (ruleset == NULL)
1455 rs_num = pf_get_ruleset_number(pcr->rule.action);
1456 if (rs_num >= PF_RULESET_MAX)
1459 if (pcr->action == PF_CHANGE_GET_TICKET) {
1460 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1462 } else if (pcr->ticket !=
1463 ruleset->rules[rs_num].active.ticket)
1466 if (pcr->action != PF_CHANGE_REMOVE) {
1467 if (newrule->ifname[0]) {
1468 newrule->kif = pfi_kif_attach(kif,
1470 pfi_kif_ref(newrule->kif);
1472 newrule->kif = NULL;
1474 if (newrule->rtableid > 0 &&
1475 newrule->rtableid >= rt_numfibs)
1480 if (newrule->qname[0] != 0) {
1482 pf_qname2qid(newrule->qname)) == 0)
1484 else if (newrule->pqname[0] != 0) {
1485 if ((newrule->pqid =
1486 pf_qname2qid(newrule->pqname)) == 0)
1489 newrule->pqid = newrule->qid;
1492 if (newrule->tagname[0])
1494 pf_tagname2tag(newrule->tagname)) == 0)
1496 if (newrule->match_tagname[0])
1497 if ((newrule->match_tag = pf_tagname2tag(
1498 newrule->match_tagname)) == 0)
1500 if (newrule->rt && !newrule->direction)
1504 if (newrule->logif >= PFLOGIFS_MAX)
1506 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1508 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1510 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1512 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
1513 if (pa->addr.type == PF_ADDR_TABLE) {
1515 pfr_attach_table(ruleset,
1516 pa->addr.v.tblname);
1517 if (pa->addr.p.tbl == NULL)
1521 newrule->overload_tbl = NULL;
1522 if (newrule->overload_tblname[0]) {
1523 if ((newrule->overload_tbl = pfr_attach_table(
1524 ruleset, newrule->overload_tblname)) ==
1528 newrule->overload_tbl->pfrkt_flags |=
1532 pf_mv_pool(&V_pf_pabuf, &newrule->rpool.list);
1533 if (((((newrule->action == PF_NAT) ||
1534 (newrule->action == PF_RDR) ||
1535 (newrule->action == PF_BINAT) ||
1536 (newrule->rt > PF_NOPFROUTE)) &&
1537 !newrule->anchor)) &&
1538 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1542 pf_free_rule(newrule);
1547 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1548 newrule->evaluations = 0;
1549 newrule->packets[0] = newrule->packets[1] = 0;
1550 newrule->bytes[0] = newrule->bytes[1] = 0;
1552 pf_empty_pool(&V_pf_pabuf);
1554 if (pcr->action == PF_CHANGE_ADD_HEAD)
1555 oldrule = TAILQ_FIRST(
1556 ruleset->rules[rs_num].active.ptr);
1557 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1558 oldrule = TAILQ_LAST(
1559 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1561 oldrule = TAILQ_FIRST(
1562 ruleset->rules[rs_num].active.ptr);
1563 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1564 oldrule = TAILQ_NEXT(oldrule, entries);
1565 if (oldrule == NULL) {
1566 if (newrule != NULL)
1567 pf_free_rule(newrule);
1574 if (pcr->action == PF_CHANGE_REMOVE) {
1575 pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
1577 ruleset->rules[rs_num].active.rcount--;
1579 if (oldrule == NULL)
1581 ruleset->rules[rs_num].active.ptr,
1583 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1584 pcr->action == PF_CHANGE_ADD_BEFORE)
1585 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1588 ruleset->rules[rs_num].active.ptr,
1589 oldrule, newrule, entries);
1590 ruleset->rules[rs_num].active.rcount++;
1594 TAILQ_FOREACH(oldrule,
1595 ruleset->rules[rs_num].active.ptr, entries)
1598 ruleset->rules[rs_num].active.ticket++;
1600 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1601 pf_remove_if_empty_ruleset(ruleset);
1607 DIOCCHANGERULE_error:
1609 if (newrule != NULL) {
1610 counter_u64_free(newrule->states_cur);
1611 counter_u64_free(newrule->states_tot);
1612 counter_u64_free(newrule->src_nodes);
1613 free(newrule, M_PFRULE);
1616 free(kif, PFI_MTYPE);
1620 case DIOCCLRSTATES: {
1622 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1623 u_int i, killed = 0;
1625 for (i = 0; i <= pf_hashmask; i++) {
1626 struct pf_idhash *ih = &V_pf_idhash[i];
1628 relock_DIOCCLRSTATES:
1629 PF_HASHROW_LOCK(ih);
1630 LIST_FOREACH(s, &ih->states, entry)
1631 if (!psk->psk_ifname[0] ||
1632 !strcmp(psk->psk_ifname,
1633 s->kif->pfik_name)) {
1635 * Don't send out individual
1638 s->state_flags |= PFSTATE_NOSYNC;
1639 pf_unlink_state(s, PF_ENTER_LOCKED);
1641 goto relock_DIOCCLRSTATES;
1643 PF_HASHROW_UNLOCK(ih);
1645 psk->psk_killed = killed;
1646 if (pfsync_clear_states_ptr != NULL)
1647 pfsync_clear_states_ptr(V_pf_status.hostid, psk->psk_ifname);
1651 case DIOCKILLSTATES: {
1653 struct pf_state_key *sk;
1654 struct pf_addr *srcaddr, *dstaddr;
1655 u_int16_t srcport, dstport;
1656 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1657 u_int i, killed = 0;
1659 if (psk->psk_pfcmp.id) {
1660 if (psk->psk_pfcmp.creatorid == 0)
1661 psk->psk_pfcmp.creatorid = V_pf_status.hostid;
1662 if ((s = pf_find_state_byid(psk->psk_pfcmp.id,
1663 psk->psk_pfcmp.creatorid))) {
1664 pf_unlink_state(s, PF_ENTER_LOCKED);
1665 psk->psk_killed = 1;
1670 for (i = 0; i <= pf_hashmask; i++) {
1671 struct pf_idhash *ih = &V_pf_idhash[i];
1673 relock_DIOCKILLSTATES:
1674 PF_HASHROW_LOCK(ih);
1675 LIST_FOREACH(s, &ih->states, entry) {
1676 sk = s->key[PF_SK_WIRE];
1677 if (s->direction == PF_OUT) {
1678 srcaddr = &sk->addr[1];
1679 dstaddr = &sk->addr[0];
1680 srcport = sk->port[1];
1681 dstport = sk->port[0];
1683 srcaddr = &sk->addr[0];
1684 dstaddr = &sk->addr[1];
1685 srcport = sk->port[0];
1686 dstport = sk->port[1];
1689 if ((!psk->psk_af || sk->af == psk->psk_af)
1690 && (!psk->psk_proto || psk->psk_proto ==
1692 PF_MATCHA(psk->psk_src.neg,
1693 &psk->psk_src.addr.v.a.addr,
1694 &psk->psk_src.addr.v.a.mask,
1696 PF_MATCHA(psk->psk_dst.neg,
1697 &psk->psk_dst.addr.v.a.addr,
1698 &psk->psk_dst.addr.v.a.mask,
1700 (psk->psk_src.port_op == 0 ||
1701 pf_match_port(psk->psk_src.port_op,
1702 psk->psk_src.port[0], psk->psk_src.port[1],
1704 (psk->psk_dst.port_op == 0 ||
1705 pf_match_port(psk->psk_dst.port_op,
1706 psk->psk_dst.port[0], psk->psk_dst.port[1],
1708 (!psk->psk_label[0] ||
1709 (s->rule.ptr->label[0] &&
1710 !strcmp(psk->psk_label,
1711 s->rule.ptr->label))) &&
1712 (!psk->psk_ifname[0] ||
1713 !strcmp(psk->psk_ifname,
1714 s->kif->pfik_name))) {
1715 pf_unlink_state(s, PF_ENTER_LOCKED);
1717 goto relock_DIOCKILLSTATES;
1720 PF_HASHROW_UNLOCK(ih);
1722 psk->psk_killed = killed;
1726 case DIOCADDSTATE: {
1727 struct pfioc_state *ps = (struct pfioc_state *)addr;
1728 struct pfsync_state *sp = &ps->state;
1730 if (sp->timeout >= PFTM_MAX) {
1734 if (pfsync_state_import_ptr != NULL) {
1736 error = pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL);
1743 case DIOCGETSTATE: {
1744 struct pfioc_state *ps = (struct pfioc_state *)addr;
1747 s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
1753 pfsync_state_export(&ps->state, s);
1758 case DIOCGETSTATES: {
1759 struct pfioc_states *ps = (struct pfioc_states *)addr;
1761 struct pfsync_state *pstore, *p;
1764 if (ps->ps_len == 0) {
1765 nr = uma_zone_get_cur(V_pf_state_z);
1766 ps->ps_len = sizeof(struct pfsync_state) * nr;
1770 p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK);
1773 for (i = 0; i <= pf_hashmask; i++) {
1774 struct pf_idhash *ih = &V_pf_idhash[i];
1776 PF_HASHROW_LOCK(ih);
1777 LIST_FOREACH(s, &ih->states, entry) {
1779 if (s->timeout == PFTM_UNLINKED)
1782 if ((nr+1) * sizeof(*p) > ps->ps_len) {
1783 PF_HASHROW_UNLOCK(ih);
1784 goto DIOCGETSTATES_full;
1786 pfsync_state_export(p, s);
1790 PF_HASHROW_UNLOCK(ih);
1793 error = copyout(pstore, ps->ps_states,
1794 sizeof(struct pfsync_state) * nr);
1796 free(pstore, M_TEMP);
1799 ps->ps_len = sizeof(struct pfsync_state) * nr;
1800 free(pstore, M_TEMP);
1805 case DIOCGETSTATUS: {
1806 struct pf_status *s = (struct pf_status *)addr;
1809 s->running = V_pf_status.running;
1810 s->since = V_pf_status.since;
1811 s->debug = V_pf_status.debug;
1812 s->hostid = V_pf_status.hostid;
1813 s->states = V_pf_status.states;
1814 s->src_nodes = V_pf_status.src_nodes;
1816 for (int i = 0; i < PFRES_MAX; i++)
1818 counter_u64_fetch(V_pf_status.counters[i]);
1819 for (int i = 0; i < LCNT_MAX; i++)
1821 counter_u64_fetch(V_pf_status.lcounters[i]);
1822 for (int i = 0; i < FCNT_MAX; i++)
1824 counter_u64_fetch(V_pf_status.fcounters[i]);
1825 for (int i = 0; i < SCNT_MAX; i++)
1827 counter_u64_fetch(V_pf_status.scounters[i]);
1829 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ);
1830 bcopy(V_pf_status.pf_chksum, s->pf_chksum,
1831 PF_MD5_DIGEST_LENGTH);
1833 pfi_update_status(s->ifname, s);
1838 case DIOCSETSTATUSIF: {
1839 struct pfioc_if *pi = (struct pfioc_if *)addr;
1841 if (pi->ifname[0] == 0) {
1842 bzero(V_pf_status.ifname, IFNAMSIZ);
1846 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
1851 case DIOCCLRSTATUS: {
1853 for (int i = 0; i < PFRES_MAX; i++)
1854 counter_u64_zero(V_pf_status.counters[i]);
1855 for (int i = 0; i < FCNT_MAX; i++)
1856 counter_u64_zero(V_pf_status.fcounters[i]);
1857 for (int i = 0; i < SCNT_MAX; i++)
1858 counter_u64_zero(V_pf_status.scounters[i]);
1859 for (int i = 0; i < LCNT_MAX; i++)
1860 counter_u64_zero(V_pf_status.lcounters[i]);
1861 V_pf_status.since = time_second;
1862 if (*V_pf_status.ifname)
1863 pfi_update_status(V_pf_status.ifname, NULL);
1869 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1870 struct pf_state_key *sk;
1871 struct pf_state *state;
1872 struct pf_state_key_cmp key;
1873 int m = 0, direction = pnl->direction;
1876 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
1877 sidx = (direction == PF_IN) ? 1 : 0;
1878 didx = (direction == PF_IN) ? 0 : 1;
1881 PF_AZERO(&pnl->saddr, pnl->af) ||
1882 PF_AZERO(&pnl->daddr, pnl->af) ||
1883 ((pnl->proto == IPPROTO_TCP ||
1884 pnl->proto == IPPROTO_UDP) &&
1885 (!pnl->dport || !pnl->sport)))
1888 bzero(&key, sizeof(key));
1890 key.proto = pnl->proto;
1891 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
1892 key.port[sidx] = pnl->sport;
1893 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
1894 key.port[didx] = pnl->dport;
1896 state = pf_find_state_all(&key, direction, &m);
1899 error = E2BIG; /* more than one state */
1900 else if (state != NULL) {
1901 /* XXXGL: not locked read */
1902 sk = state->key[sidx];
1903 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
1904 pnl->rsport = sk->port[sidx];
1905 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
1906 pnl->rdport = sk->port[didx];
1913 case DIOCSETTIMEOUT: {
1914 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1917 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1923 old = V_pf_default_rule.timeout[pt->timeout];
1924 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1926 V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
1927 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
1928 wakeup(pf_purge_thread);
1934 case DIOCGETTIMEOUT: {
1935 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1937 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1942 pt->seconds = V_pf_default_rule.timeout[pt->timeout];
1947 case DIOCGETLIMIT: {
1948 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1950 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1955 pl->limit = V_pf_limits[pl->index].limit;
1960 case DIOCSETLIMIT: {
1961 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1965 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1966 V_pf_limits[pl->index].zone == NULL) {
1971 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
1972 old_limit = V_pf_limits[pl->index].limit;
1973 V_pf_limits[pl->index].limit = pl->limit;
1974 pl->limit = old_limit;
1979 case DIOCSETDEBUG: {
1980 u_int32_t *level = (u_int32_t *)addr;
1983 V_pf_status.debug = *level;
1988 case DIOCCLRRULECTRS: {
1989 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1990 struct pf_ruleset *ruleset = &pf_main_ruleset;
1991 struct pf_rule *rule;
1995 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1996 rule->evaluations = 0;
1997 rule->packets[0] = rule->packets[1] = 0;
1998 rule->bytes[0] = rule->bytes[1] = 0;
2004 case DIOCGIFSPEED: {
2005 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr;
2006 struct pf_ifspeed ps;
2009 if (psp->ifname[0] != 0) {
2010 /* Can we completely trust user-land? */
2011 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
2012 ifp = ifunit(ps.ifname);
2014 psp->baudrate = ifp->if_baudrate;
2023 case DIOCSTARTALTQ: {
2024 struct pf_altq *altq;
2027 /* enable all altq interfaces on active list */
2028 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
2029 if (altq->qname[0] == 0 && (altq->local_flags &
2030 PFALTQ_FLAG_IF_REMOVED) == 0) {
2031 error = pf_enable_altq(altq);
2037 V_pf_altq_running = 1;
2039 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2043 case DIOCSTOPALTQ: {
2044 struct pf_altq *altq;
2047 /* disable all altq interfaces on active list */
2048 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
2049 if (altq->qname[0] == 0 && (altq->local_flags &
2050 PFALTQ_FLAG_IF_REMOVED) == 0) {
2051 error = pf_disable_altq(altq);
2057 V_pf_altq_running = 0;
2059 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2064 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2065 struct pf_altq *altq, *a;
2068 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK);
2069 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2070 altq->local_flags = 0;
2073 if (pa->ticket != V_ticket_altqs_inactive) {
2075 free(altq, M_PFALTQ);
2081 * if this is for a queue, find the discipline and
2082 * copy the necessary fields
2084 if (altq->qname[0] != 0) {
2085 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2088 free(altq, M_PFALTQ);
2091 altq->altq_disc = NULL;
2092 TAILQ_FOREACH(a, V_pf_altqs_inactive, entries) {
2093 if (strncmp(a->ifname, altq->ifname,
2094 IFNAMSIZ) == 0 && a->qname[0] == 0) {
2095 altq->altq_disc = a->altq_disc;
2101 if ((ifp = ifunit(altq->ifname)) == NULL)
2102 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
2104 error = altq_add(altq);
2108 free(altq, M_PFALTQ);
2112 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
2113 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2118 case DIOCGETALTQS: {
2119 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2120 struct pf_altq *altq;
2124 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
2126 pa->ticket = V_ticket_altqs_active;
2132 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2133 struct pf_altq *altq;
2137 if (pa->ticket != V_ticket_altqs_active) {
2143 altq = TAILQ_FIRST(V_pf_altqs_active);
2144 while ((altq != NULL) && (nr < pa->nr)) {
2145 altq = TAILQ_NEXT(altq, entries);
2153 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2158 case DIOCCHANGEALTQ:
2159 /* CHANGEALTQ not supported yet! */
2163 case DIOCGETQSTATS: {
2164 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2165 struct pf_altq *altq;
2170 if (pq->ticket != V_ticket_altqs_active) {
2175 nbytes = pq->nbytes;
2177 altq = TAILQ_FIRST(V_pf_altqs_active);
2178 while ((altq != NULL) && (nr < pq->nr)) {
2179 altq = TAILQ_NEXT(altq, entries);
2188 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
2194 error = altq_getqstats(altq, pq->buf, &nbytes);
2196 pq->scheduler = altq->scheduler;
2197 pq->nbytes = nbytes;
2203 case DIOCBEGINADDRS: {
2204 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2207 pf_empty_pool(&V_pf_pabuf);
2208 pp->ticket = ++V_ticket_pabuf;
2214 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2215 struct pf_pooladdr *pa;
2216 struct pfi_kif *kif = NULL;
2219 if (pp->af == AF_INET) {
2220 error = EAFNOSUPPORT;
2225 if (pp->af == AF_INET6) {
2226 error = EAFNOSUPPORT;
2230 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2231 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2232 pp->addr.addr.type != PF_ADDR_TABLE) {
2236 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2237 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2239 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
2241 if (pp->ticket != V_ticket_pabuf) {
2244 free(kif, PFI_MTYPE);
2249 if (pa->ifname[0]) {
2250 pa->kif = pfi_kif_attach(kif, pa->ifname);
2251 pfi_kif_ref(pa->kif);
2254 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2255 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2257 pfi_kif_unref(pa->kif);
2262 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
2267 case DIOCGETADDRS: {
2268 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2269 struct pf_pool *pool;
2270 struct pf_pooladdr *pa;
2274 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2275 pp->r_num, 0, 1, 0);
2281 TAILQ_FOREACH(pa, &pool->list, entries)
2288 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2289 struct pf_pool *pool;
2290 struct pf_pooladdr *pa;
2294 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2295 pp->r_num, 0, 1, 1);
2301 pa = TAILQ_FIRST(&pool->list);
2302 while ((pa != NULL) && (nr < pp->nr)) {
2303 pa = TAILQ_NEXT(pa, entries);
2311 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2312 pf_addr_copyout(&pp->addr.addr);
2317 case DIOCCHANGEADDR: {
2318 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2319 struct pf_pool *pool;
2320 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2321 struct pf_ruleset *ruleset;
2322 struct pfi_kif *kif = NULL;
2324 if (pca->action < PF_CHANGE_ADD_HEAD ||
2325 pca->action > PF_CHANGE_REMOVE) {
2329 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2330 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2331 pca->addr.addr.type != PF_ADDR_TABLE) {
2336 if (pca->action != PF_CHANGE_REMOVE) {
2338 if (pca->af == AF_INET) {
2339 error = EAFNOSUPPORT;
2344 if (pca->af == AF_INET6) {
2345 error = EAFNOSUPPORT;
2349 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
2350 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2351 if (newpa->ifname[0])
2352 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
2356 #define ERROUT(x) { error = (x); goto DIOCCHANGEADDR_error; }
2358 ruleset = pf_find_ruleset(pca->anchor);
2359 if (ruleset == NULL)
2362 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2363 pca->r_num, pca->r_last, 1, 1);
2367 if (pca->action != PF_CHANGE_REMOVE) {
2368 if (newpa->ifname[0]) {
2369 newpa->kif = pfi_kif_attach(kif, newpa->ifname);
2370 pfi_kif_ref(newpa->kif);
2374 switch (newpa->addr.type) {
2375 case PF_ADDR_DYNIFTL:
2376 error = pfi_dynaddr_setup(&newpa->addr,
2380 newpa->addr.p.tbl = pfr_attach_table(ruleset,
2381 newpa->addr.v.tblname);
2382 if (newpa->addr.p.tbl == NULL)
2387 goto DIOCCHANGEADDR_error;
2390 switch (pca->action) {
2391 case PF_CHANGE_ADD_HEAD:
2392 oldpa = TAILQ_FIRST(&pool->list);
2394 case PF_CHANGE_ADD_TAIL:
2395 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2398 oldpa = TAILQ_FIRST(&pool->list);
2399 for (int i = 0; oldpa && i < pca->nr; i++)
2400 oldpa = TAILQ_NEXT(oldpa, entries);
2406 if (pca->action == PF_CHANGE_REMOVE) {
2407 TAILQ_REMOVE(&pool->list, oldpa, entries);
2408 switch (oldpa->addr.type) {
2409 case PF_ADDR_DYNIFTL:
2410 pfi_dynaddr_remove(oldpa->addr.p.dyn);
2413 pfr_detach_table(oldpa->addr.p.tbl);
2417 pfi_kif_unref(oldpa->kif);
2418 free(oldpa, M_PFRULE);
2421 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2422 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2423 pca->action == PF_CHANGE_ADD_BEFORE)
2424 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2426 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2430 pool->cur = TAILQ_FIRST(&pool->list);
2431 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
2436 DIOCCHANGEADDR_error:
2437 if (newpa != NULL) {
2439 pfi_kif_unref(newpa->kif);
2440 free(newpa, M_PFRULE);
2444 free(kif, PFI_MTYPE);
2448 case DIOCGETRULESETS: {
2449 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2450 struct pf_ruleset *ruleset;
2451 struct pf_anchor *anchor;
2454 pr->path[sizeof(pr->path) - 1] = 0;
2455 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2461 if (ruleset->anchor == NULL) {
2462 /* XXX kludge for pf_main_ruleset */
2463 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors)
2464 if (anchor->parent == NULL)
2467 RB_FOREACH(anchor, pf_anchor_node,
2468 &ruleset->anchor->children)
2475 case DIOCGETRULESET: {
2476 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2477 struct pf_ruleset *ruleset;
2478 struct pf_anchor *anchor;
2482 pr->path[sizeof(pr->path) - 1] = 0;
2483 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2489 if (ruleset->anchor == NULL) {
2490 /* XXX kludge for pf_main_ruleset */
2491 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors)
2492 if (anchor->parent == NULL && nr++ == pr->nr) {
2493 strlcpy(pr->name, anchor->name,
2498 RB_FOREACH(anchor, pf_anchor_node,
2499 &ruleset->anchor->children)
2500 if (nr++ == pr->nr) {
2501 strlcpy(pr->name, anchor->name,
2512 case DIOCRCLRTABLES: {
2513 struct pfioc_table *io = (struct pfioc_table *)addr;
2515 if (io->pfrio_esize != 0) {
2520 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2521 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2526 case DIOCRADDTABLES: {
2527 struct pfioc_table *io = (struct pfioc_table *)addr;
2528 struct pfr_table *pfrts;
2531 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2536 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
2537 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
2542 totlen = io->pfrio_size * sizeof(struct pfr_table);
2543 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2545 error = copyin(io->pfrio_buffer, pfrts, totlen);
2547 free(pfrts, M_TEMP);
2551 error = pfr_add_tables(pfrts, io->pfrio_size,
2552 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2554 free(pfrts, M_TEMP);
2558 case DIOCRDELTABLES: {
2559 struct pfioc_table *io = (struct pfioc_table *)addr;
2560 struct pfr_table *pfrts;
2563 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2568 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
2569 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
2574 totlen = io->pfrio_size * sizeof(struct pfr_table);
2575 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2577 error = copyin(io->pfrio_buffer, pfrts, totlen);
2579 free(pfrts, M_TEMP);
2583 error = pfr_del_tables(pfrts, io->pfrio_size,
2584 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2586 free(pfrts, M_TEMP);
2590 case DIOCRGETTABLES: {
2591 struct pfioc_table *io = (struct pfioc_table *)addr;
2592 struct pfr_table *pfrts;
2595 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2600 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
2601 io->pfrio_size = min(io->pfrio_size, n);
2603 totlen = io->pfrio_size * sizeof(struct pfr_table);
2605 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2607 if (pfrts == NULL) {
2612 error = pfr_get_tables(&io->pfrio_table, pfrts,
2613 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2616 error = copyout(pfrts, io->pfrio_buffer, totlen);
2617 free(pfrts, M_TEMP);
2621 case DIOCRGETTSTATS: {
2622 struct pfioc_table *io = (struct pfioc_table *)addr;
2623 struct pfr_tstats *pfrtstats;
2626 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2631 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
2632 io->pfrio_size = min(io->pfrio_size, n);
2634 totlen = io->pfrio_size * sizeof(struct pfr_tstats);
2635 pfrtstats = mallocarray(io->pfrio_size,
2636 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT);
2637 if (pfrtstats == NULL) {
2642 error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
2643 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2646 error = copyout(pfrtstats, io->pfrio_buffer, totlen);
2647 free(pfrtstats, M_TEMP);
2651 case DIOCRCLRTSTATS: {
2652 struct pfioc_table *io = (struct pfioc_table *)addr;
2653 struct pfr_table *pfrts;
2656 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2662 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
2663 io->pfrio_size = min(io->pfrio_size, n);
2665 totlen = io->pfrio_size * sizeof(struct pfr_table);
2666 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2668 if (pfrts == NULL) {
2673 error = copyin(io->pfrio_buffer, pfrts, totlen);
2675 free(pfrts, M_TEMP);
2679 error = pfr_clr_tstats(pfrts, io->pfrio_size,
2680 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2682 free(pfrts, M_TEMP);
2686 case DIOCRSETTFLAGS: {
2687 struct pfioc_table *io = (struct pfioc_table *)addr;
2688 struct pfr_table *pfrts;
2691 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2697 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
2698 io->pfrio_size = min(io->pfrio_size, n);
2700 totlen = io->pfrio_size * sizeof(struct pfr_table);
2701 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2703 if (pfrts == NULL) {
2708 error = copyin(io->pfrio_buffer, pfrts, totlen);
2710 free(pfrts, M_TEMP);
2714 error = pfr_set_tflags(pfrts, io->pfrio_size,
2715 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2716 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2718 free(pfrts, M_TEMP);
2722 case DIOCRCLRADDRS: {
2723 struct pfioc_table *io = (struct pfioc_table *)addr;
2725 if (io->pfrio_esize != 0) {
2730 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2731 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2736 case DIOCRADDADDRS: {
2737 struct pfioc_table *io = (struct pfioc_table *)addr;
2738 struct pfr_addr *pfras;
2741 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2745 if (io->pfrio_size < 0 ||
2746 io->pfrio_size > pf_ioctl_maxcount ||
2747 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
2751 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2752 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
2758 error = copyin(io->pfrio_buffer, pfras, totlen);
2760 free(pfras, M_TEMP);
2764 error = pfr_add_addrs(&io->pfrio_table, pfras,
2765 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2766 PFR_FLAG_USERIOCTL);
2768 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
2769 error = copyout(pfras, io->pfrio_buffer, totlen);
2770 free(pfras, M_TEMP);
2774 case DIOCRDELADDRS: {
2775 struct pfioc_table *io = (struct pfioc_table *)addr;
2776 struct pfr_addr *pfras;
2779 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2783 if (io->pfrio_size < 0 ||
2784 io->pfrio_size > pf_ioctl_maxcount ||
2785 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
2789 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2790 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
2796 error = copyin(io->pfrio_buffer, pfras, totlen);
2798 free(pfras, M_TEMP);
2802 error = pfr_del_addrs(&io->pfrio_table, pfras,
2803 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2804 PFR_FLAG_USERIOCTL);
2806 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
2807 error = copyout(pfras, io->pfrio_buffer, totlen);
2808 free(pfras, M_TEMP);
2812 case DIOCRSETADDRS: {
2813 struct pfioc_table *io = (struct pfioc_table *)addr;
2814 struct pfr_addr *pfras;
2815 size_t totlen, count;
2817 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2821 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
2825 count = max(io->pfrio_size, io->pfrio_size2);
2826 if (count > pf_ioctl_maxcount ||
2827 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
2831 totlen = count * sizeof(struct pfr_addr);
2832 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
2838 error = copyin(io->pfrio_buffer, pfras, totlen);
2840 free(pfras, M_TEMP);
2844 error = pfr_set_addrs(&io->pfrio_table, pfras,
2845 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2846 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2847 PFR_FLAG_USERIOCTL, 0);
2849 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
2850 error = copyout(pfras, io->pfrio_buffer, totlen);
2851 free(pfras, M_TEMP);
2855 case DIOCRGETADDRS: {
2856 struct pfioc_table *io = (struct pfioc_table *)addr;
2857 struct pfr_addr *pfras;
2860 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2864 if (io->pfrio_size < 0 ||
2865 io->pfrio_size > pf_ioctl_maxcount ||
2866 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
2870 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2871 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
2878 error = pfr_get_addrs(&io->pfrio_table, pfras,
2879 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2882 error = copyout(pfras, io->pfrio_buffer, totlen);
2883 free(pfras, M_TEMP);
2887 case DIOCRGETASTATS: {
2888 struct pfioc_table *io = (struct pfioc_table *)addr;
2889 struct pfr_astats *pfrastats;
2892 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2896 if (io->pfrio_size < 0 ||
2897 io->pfrio_size > pf_ioctl_maxcount ||
2898 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
2902 totlen = io->pfrio_size * sizeof(struct pfr_astats);
2903 pfrastats = mallocarray(io->pfrio_size,
2904 sizeof(struct pfr_astats), M_TEMP, M_NOWAIT);
2910 error = pfr_get_astats(&io->pfrio_table, pfrastats,
2911 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2914 error = copyout(pfrastats, io->pfrio_buffer, totlen);
2915 free(pfrastats, M_TEMP);
2919 case DIOCRCLRASTATS: {
2920 struct pfioc_table *io = (struct pfioc_table *)addr;
2921 struct pfr_addr *pfras;
2924 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2928 if (io->pfrio_size < 0 ||
2929 io->pfrio_size > pf_ioctl_maxcount ||
2930 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
2934 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2935 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
2941 error = copyin(io->pfrio_buffer, pfras, totlen);
2943 free(pfras, M_TEMP);
2947 error = pfr_clr_astats(&io->pfrio_table, pfras,
2948 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2949 PFR_FLAG_USERIOCTL);
2951 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
2952 error = copyout(pfras, io->pfrio_buffer, totlen);
2953 free(pfras, M_TEMP);
2957 case DIOCRTSTADDRS: {
2958 struct pfioc_table *io = (struct pfioc_table *)addr;
2959 struct pfr_addr *pfras;
2962 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2966 if (io->pfrio_size < 0 ||
2967 io->pfrio_size > pf_ioctl_maxcount ||
2968 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
2972 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2973 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
2979 error = copyin(io->pfrio_buffer, pfras, totlen);
2981 free(pfras, M_TEMP);
2985 error = pfr_tst_addrs(&io->pfrio_table, pfras,
2986 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2987 PFR_FLAG_USERIOCTL);
2990 error = copyout(pfras, io->pfrio_buffer, totlen);
2991 free(pfras, M_TEMP);
2995 case DIOCRINADEFINE: {
2996 struct pfioc_table *io = (struct pfioc_table *)addr;
2997 struct pfr_addr *pfras;
3000 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3004 if (io->pfrio_size < 0 ||
3005 io->pfrio_size > pf_ioctl_maxcount ||
3006 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3010 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3011 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3017 error = copyin(io->pfrio_buffer, pfras, totlen);
3019 free(pfras, M_TEMP);
3023 error = pfr_ina_define(&io->pfrio_table, pfras,
3024 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
3025 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3027 free(pfras, M_TEMP);
3032 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
3034 error = pf_osfp_add(io);
3040 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
3042 error = pf_osfp_get(io);
3048 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3049 struct pfioc_trans_e *ioes, *ioe;
3053 if (io->esize != sizeof(*ioe)) {
3058 io->size > pf_ioctl_maxcount ||
3059 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3063 totlen = sizeof(struct pfioc_trans_e) * io->size;
3064 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3070 error = copyin(io->array, ioes, totlen);
3076 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3077 switch (ioe->rs_num) {
3079 case PF_RULESET_ALTQ:
3080 if (ioe->anchor[0]) {
3086 if ((error = pf_begin_altq(&ioe->ticket))) {
3093 case PF_RULESET_TABLE:
3095 struct pfr_table table;
3097 bzero(&table, sizeof(table));
3098 strlcpy(table.pfrt_anchor, ioe->anchor,
3099 sizeof(table.pfrt_anchor));
3100 if ((error = pfr_ina_begin(&table,
3101 &ioe->ticket, NULL, 0))) {
3109 if ((error = pf_begin_rules(&ioe->ticket,
3110 ioe->rs_num, ioe->anchor))) {
3119 error = copyout(ioes, io->array, totlen);
3124 case DIOCXROLLBACK: {
3125 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3126 struct pfioc_trans_e *ioe, *ioes;
3130 if (io->esize != sizeof(*ioe)) {
3135 io->size > pf_ioctl_maxcount ||
3136 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3140 totlen = sizeof(struct pfioc_trans_e) * io->size;
3141 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3147 error = copyin(io->array, ioes, totlen);
3153 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3154 switch (ioe->rs_num) {
3156 case PF_RULESET_ALTQ:
3157 if (ioe->anchor[0]) {
3163 if ((error = pf_rollback_altq(ioe->ticket))) {
3166 goto fail; /* really bad */
3170 case PF_RULESET_TABLE:
3172 struct pfr_table table;
3174 bzero(&table, sizeof(table));
3175 strlcpy(table.pfrt_anchor, ioe->anchor,
3176 sizeof(table.pfrt_anchor));
3177 if ((error = pfr_ina_rollback(&table,
3178 ioe->ticket, NULL, 0))) {
3181 goto fail; /* really bad */
3186 if ((error = pf_rollback_rules(ioe->ticket,
3187 ioe->rs_num, ioe->anchor))) {
3190 goto fail; /* really bad */
3201 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3202 struct pfioc_trans_e *ioe, *ioes;
3203 struct pf_ruleset *rs;
3207 if (io->esize != sizeof(*ioe)) {
3213 io->size > pf_ioctl_maxcount ||
3214 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3219 totlen = sizeof(struct pfioc_trans_e) * io->size;
3220 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3226 error = copyin(io->array, ioes, totlen);
3232 /* First makes sure everything will succeed. */
3233 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3234 switch (ioe->rs_num) {
3236 case PF_RULESET_ALTQ:
3237 if (ioe->anchor[0]) {
3243 if (!V_altqs_inactive_open || ioe->ticket !=
3244 V_ticket_altqs_inactive) {
3252 case PF_RULESET_TABLE:
3253 rs = pf_find_ruleset(ioe->anchor);
3254 if (rs == NULL || !rs->topen || ioe->ticket !=
3263 if (ioe->rs_num < 0 || ioe->rs_num >=
3270 rs = pf_find_ruleset(ioe->anchor);
3272 !rs->rules[ioe->rs_num].inactive.open ||
3273 rs->rules[ioe->rs_num].inactive.ticket !=
3283 /* Now do the commit - no errors should happen here. */
3284 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3285 switch (ioe->rs_num) {
3287 case PF_RULESET_ALTQ:
3288 if ((error = pf_commit_altq(ioe->ticket))) {
3291 goto fail; /* really bad */
3295 case PF_RULESET_TABLE:
3297 struct pfr_table table;
3299 bzero(&table, sizeof(table));
3300 strlcpy(table.pfrt_anchor, ioe->anchor,
3301 sizeof(table.pfrt_anchor));
3302 if ((error = pfr_ina_commit(&table,
3303 ioe->ticket, NULL, NULL, 0))) {
3306 goto fail; /* really bad */
3311 if ((error = pf_commit_rules(ioe->ticket,
3312 ioe->rs_num, ioe->anchor))) {
3315 goto fail; /* really bad */
3325 case DIOCGETSRCNODES: {
3326 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
3327 struct pf_srchash *sh;
3328 struct pf_src_node *n, *p, *pstore;
3331 if (psn->psn_len == 0) {
3332 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
3334 PF_HASHROW_LOCK(sh);
3335 LIST_FOREACH(n, &sh->nodes, entry)
3337 PF_HASHROW_UNLOCK(sh);
3339 psn->psn_len = sizeof(struct pf_src_node) * nr;
3343 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK);
3344 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
3346 PF_HASHROW_LOCK(sh);
3347 LIST_FOREACH(n, &sh->nodes, entry) {
3348 int secs = time_uptime, diff;
3350 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
3353 bcopy(n, p, sizeof(struct pf_src_node));
3354 if (n->rule.ptr != NULL)
3355 p->rule.nr = n->rule.ptr->nr;
3356 p->creation = secs - p->creation;
3357 if (p->expire > secs)
3362 /* Adjust the connection rate estimate. */
3363 diff = secs - n->conn_rate.last;
3364 if (diff >= n->conn_rate.seconds)
3365 p->conn_rate.count = 0;
3367 p->conn_rate.count -=
3368 n->conn_rate.count * diff /
3369 n->conn_rate.seconds;
3373 PF_HASHROW_UNLOCK(sh);
3375 error = copyout(pstore, psn->psn_src_nodes,
3376 sizeof(struct pf_src_node) * nr);
3378 free(pstore, M_TEMP);
3381 psn->psn_len = sizeof(struct pf_src_node) * nr;
3382 free(pstore, M_TEMP);
3386 case DIOCCLRSRCNODES: {
3388 pf_clear_srcnodes(NULL);
3389 pf_purge_expired_src_nodes();
3393 case DIOCKILLSRCNODES:
3394 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
3397 case DIOCSETHOSTID: {
3398 u_int32_t *hostid = (u_int32_t *)addr;
3402 V_pf_status.hostid = arc4random();
3404 V_pf_status.hostid = *hostid;
3415 case DIOCIGETIFACES: {
3416 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3417 struct pfi_kif *ifstore;
3420 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
3425 if (io->pfiio_size < 0 ||
3426 io->pfiio_size > pf_ioctl_maxcount ||
3427 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
3432 bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
3433 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
3435 if (ifstore == NULL) {
3441 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
3443 error = copyout(ifstore, io->pfiio_buffer, bufsiz);
3444 free(ifstore, M_TEMP);
3448 case DIOCSETIFFLAG: {
3449 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3452 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
3457 case DIOCCLRIFFLAG: {
3458 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3461 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
3471 if (sx_xlocked(&pf_ioctl_lock))
3472 sx_xunlock(&pf_ioctl_lock);
3479 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
3481 bzero(sp, sizeof(struct pfsync_state));
3483 /* copy from state key */
3484 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
3485 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
3486 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
3487 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
3488 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
3489 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
3490 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
3491 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
3492 sp->proto = st->key[PF_SK_WIRE]->proto;
3493 sp->af = st->key[PF_SK_WIRE]->af;
3495 /* copy from state */
3496 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
3497 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
3498 sp->creation = htonl(time_uptime - st->creation);
3499 sp->expire = pf_state_expires(st);
3500 if (sp->expire <= time_uptime)
3501 sp->expire = htonl(0);
3503 sp->expire = htonl(sp->expire - time_uptime);
3505 sp->direction = st->direction;
3507 sp->timeout = st->timeout;
3508 sp->state_flags = st->state_flags;
3510 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
3511 if (st->nat_src_node)
3512 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
3515 sp->creatorid = st->creatorid;
3516 pf_state_peer_hton(&st->src, &sp->src);
3517 pf_state_peer_hton(&st->dst, &sp->dst);
3519 if (st->rule.ptr == NULL)
3520 sp->rule = htonl(-1);
3522 sp->rule = htonl(st->rule.ptr->nr);
3523 if (st->anchor.ptr == NULL)
3524 sp->anchor = htonl(-1);
3526 sp->anchor = htonl(st->anchor.ptr->nr);
3527 if (st->nat_rule.ptr == NULL)
3528 sp->nat_rule = htonl(-1);
3530 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
3532 pf_state_counter_hton(st->packets[0], sp->packets[0]);
3533 pf_state_counter_hton(st->packets[1], sp->packets[1]);
3534 pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
3535 pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
3540 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
3542 struct pfr_ktable *kt;
3544 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
3547 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
3548 kt = kt->pfrkt_root;
3550 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
3555 * XXX - Check for version missmatch!!!
3558 pf_clear_states(void)
3563 for (i = 0; i <= pf_hashmask; i++) {
3564 struct pf_idhash *ih = &V_pf_idhash[i];
3566 PF_HASHROW_LOCK(ih);
3567 LIST_FOREACH(s, &ih->states, entry) {
3568 s->timeout = PFTM_PURGE;
3569 /* Don't send out individual delete messages. */
3570 s->state_flags |= PFSTATE_NOSYNC;
3571 pf_unlink_state(s, PF_ENTER_LOCKED);
3574 PF_HASHROW_UNLOCK(ih);
3579 pf_clear_tables(void)
3581 struct pfioc_table io;
3584 bzero(&io, sizeof(io));
3586 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
3593 pf_clear_srcnodes(struct pf_src_node *n)
3598 for (i = 0; i <= pf_hashmask; i++) {
3599 struct pf_idhash *ih = &V_pf_idhash[i];
3601 PF_HASHROW_LOCK(ih);
3602 LIST_FOREACH(s, &ih->states, entry) {
3603 if (n == NULL || n == s->src_node)
3605 if (n == NULL || n == s->nat_src_node)
3606 s->nat_src_node = NULL;
3608 PF_HASHROW_UNLOCK(ih);
3612 struct pf_srchash *sh;
3614 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
3616 PF_HASHROW_LOCK(sh);
3617 LIST_FOREACH(n, &sh->nodes, entry) {
3621 PF_HASHROW_UNLOCK(sh);
3624 /* XXX: hash slot should already be locked here. */
3631 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
3633 struct pf_src_node_list kill;
3636 for (int i = 0; i <= pf_srchashmask; i++) {
3637 struct pf_srchash *sh = &V_pf_srchash[i];
3638 struct pf_src_node *sn, *tmp;
3640 PF_HASHROW_LOCK(sh);
3641 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
3642 if (PF_MATCHA(psnk->psnk_src.neg,
3643 &psnk->psnk_src.addr.v.a.addr,
3644 &psnk->psnk_src.addr.v.a.mask,
3645 &sn->addr, sn->af) &&
3646 PF_MATCHA(psnk->psnk_dst.neg,
3647 &psnk->psnk_dst.addr.v.a.addr,
3648 &psnk->psnk_dst.addr.v.a.mask,
3649 &sn->raddr, sn->af)) {
3650 pf_unlink_src_node(sn);
3651 LIST_INSERT_HEAD(&kill, sn, entry);
3654 PF_HASHROW_UNLOCK(sh);
3657 for (int i = 0; i <= pf_hashmask; i++) {
3658 struct pf_idhash *ih = &V_pf_idhash[i];
3661 PF_HASHROW_LOCK(ih);
3662 LIST_FOREACH(s, &ih->states, entry) {
3663 if (s->src_node && s->src_node->expire == 1)
3665 if (s->nat_src_node && s->nat_src_node->expire == 1)
3666 s->nat_src_node = NULL;
3668 PF_HASHROW_UNLOCK(ih);
3671 psnk->psnk_killed = pf_free_src_nodes(&kill);
3675 * XXX - Check for version missmatch!!!
3679 * Duplicate pfctl -Fa operation to get rid of as much as we can.
3689 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
3691 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
3694 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
3696 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
3697 break; /* XXX: rollback? */
3699 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
3701 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
3702 break; /* XXX: rollback? */
3704 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
3706 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
3707 break; /* XXX: rollback? */
3709 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
3711 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
3712 break; /* XXX: rollback? */
3715 /* XXX: these should always succeed here */
3716 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
3717 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
3718 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
3719 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
3720 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
3722 if ((error = pf_clear_tables()) != 0)
3726 if ((error = pf_begin_altq(&t[0])) != 0) {
3727 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
3730 pf_commit_altq(t[0]);
3735 pf_clear_srcnodes(NULL);
3737 /* status does not use malloced mem so no need to cleanup */
3738 /* fingerprints and interfaces have their own cleanup code */
3740 /* Free counters last as we updated them during shutdown. */
3741 counter_u64_free(V_pf_default_rule.states_cur);
3742 counter_u64_free(V_pf_default_rule.states_tot);
3743 counter_u64_free(V_pf_default_rule.src_nodes);
3745 for (int i = 0; i < PFRES_MAX; i++)
3746 counter_u64_free(V_pf_status.counters[i]);
3747 for (int i = 0; i < LCNT_MAX; i++)
3748 counter_u64_free(V_pf_status.lcounters[i]);
3749 for (int i = 0; i < FCNT_MAX; i++)
3750 counter_u64_free(V_pf_status.fcounters[i]);
3751 for (int i = 0; i < SCNT_MAX; i++)
3752 counter_u64_free(V_pf_status.scounters[i]);
3760 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
3765 chk = pf_test(PF_IN, flags, ifp, m, inp);
3777 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
3782 chk = pf_test(PF_OUT, flags, ifp, m, inp);
3796 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
3802 * In case of loopback traffic IPv6 uses the real interface in
3803 * order to support scoped addresses. In order to support stateful
3804 * filtering we have change this to lo0 as it is the case in IPv4.
3806 CURVNET_SET(ifp->if_vnet);
3807 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp);
3819 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
3824 CURVNET_SET(ifp->if_vnet);
3825 chk = pf_test6(PF_OUT, flags, ifp, m, inp);
3841 struct pfil_head *pfh_inet;
3844 struct pfil_head *pfh_inet6;
3847 if (V_pf_pfil_hooked)
3851 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3852 if (pfh_inet == NULL)
3853 return (ESRCH); /* XXX */
3854 pfil_add_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet);
3855 pfil_add_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet);
3858 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3859 if (pfh_inet6 == NULL) {
3861 pfil_remove_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3863 pfil_remove_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3866 return (ESRCH); /* XXX */
3868 pfil_add_hook_flags(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6);
3869 pfil_add_hook_flags(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6);
3872 V_pf_pfil_hooked = 1;
3880 struct pfil_head *pfh_inet;
3883 struct pfil_head *pfh_inet6;
3886 if (V_pf_pfil_hooked == 0)
3890 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3891 if (pfh_inet == NULL)
3892 return (ESRCH); /* XXX */
3893 pfil_remove_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3895 pfil_remove_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3899 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3900 if (pfh_inet6 == NULL)
3901 return (ESRCH); /* XXX */
3902 pfil_remove_hook_flags(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK,
3904 pfil_remove_hook_flags(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK,
3908 V_pf_pfil_hooked = 0;
3915 TAILQ_INIT(&V_pf_tags);
3916 TAILQ_INIT(&V_pf_qids);
3919 V_pf_vnet_active = 1;
3927 rw_init(&pf_rules_lock, "pf rulesets");
3928 sx_init(&pf_ioctl_lock, "pf ioctl");
3929 sx_init(&pf_end_lock, "pf end thread");
3931 pf_mtag_initialize();
3933 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
3938 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
3948 pf_unload_vnet(void)
3952 V_pf_vnet_active = 0;
3953 V_pf_status.running = 0;
3954 swi_remove(V_pf_swi_cookie);
3955 error = dehook_pf();
3958 * Should not happen!
3959 * XXX Due to error code ESRCH, kldunload will show
3960 * a message like 'No such process'.
3962 printf("%s : pfil unregisteration fail\n", __FUNCTION__);
3970 pf_unload_vnet_purge();
3972 pf_normalize_cleanup();
3979 if (IS_DEFAULT_VNET(curvnet))
3987 sx_xlock(&pf_end_lock);
3989 while (pf_end_threads < 2) {
3990 wakeup_one(pf_purge_thread);
3991 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
3993 sx_xunlock(&pf_end_lock);
3996 destroy_dev(pf_dev);
4000 rw_destroy(&pf_rules_lock);
4001 sx_destroy(&pf_ioctl_lock);
4002 sx_destroy(&pf_end_lock);
4006 vnet_pf_init(void *unused __unused)
4011 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
4012 vnet_pf_init, NULL);
4015 vnet_pf_uninit(const void *unused __unused)
4020 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
4021 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
4022 vnet_pf_uninit, NULL);
4026 pf_modevent(module_t mod, int type, void *data)
4035 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after
4036 * the vnet_pf_uninit()s */
4046 static moduledata_t pf_mod = {
4052 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
4053 MODULE_VERSION(pf, PF_MODVER);