2 * Copyright (c) 2001 Daniel Hartmeier
3 * Copyright (c) 2002,2003 Henning Brauer
4 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * Effort sponsored in part by the Defense Advanced Research Projects
32 * Agency (DARPA) and Air Force Research Laboratory, Air Force
33 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
42 #include "opt_inet6.h"
46 #include <sys/param.h>
49 #include <sys/endian.h>
50 #include <sys/fcntl.h>
51 #include <sys/filio.h>
52 #include <sys/interrupt.h>
54 #include <sys/kernel.h>
55 #include <sys/kthread.h>
58 #include <sys/module.h>
61 #include <sys/socket.h>
62 #include <sys/sysctl.h>
64 #include <sys/ucred.h>
67 #include <net/if_var.h>
69 #include <net/route.h>
71 #include <net/pfvar.h>
72 #include <net/if_pfsync.h>
73 #include <net/if_pflog.h>
75 #include <netinet/in.h>
76 #include <netinet/ip.h>
77 #include <netinet/ip_var.h>
78 #include <netinet6/ip6_var.h>
79 #include <netinet/ip_icmp.h>
82 #include <netinet/ip6.h>
86 #include <net/altq/altq.h>
89 static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
90 u_int8_t, u_int8_t, u_int8_t);
92 static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
93 static void pf_empty_pool(struct pf_palist *);
94 static int pfioctl(struct cdev *, u_long, caddr_t, int,
97 static int pf_begin_altq(u_int32_t *);
98 static int pf_rollback_altq(u_int32_t);
99 static int pf_commit_altq(u_int32_t);
100 static int pf_enable_altq(struct pf_altq *);
101 static int pf_disable_altq(struct pf_altq *);
102 static u_int32_t pf_qname2qid(char *);
103 static void pf_qid_unref(u_int32_t);
105 static int pf_begin_rules(u_int32_t *, int, const char *);
106 static int pf_rollback_rules(u_int32_t, int, char *);
107 static int pf_setup_pfsync_matching(struct pf_ruleset *);
108 static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
109 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
110 static int pf_commit_rules(u_int32_t, int, char *);
111 static int pf_addr_setup(struct pf_ruleset *,
112 struct pf_addr_wrap *, sa_family_t);
113 static void pf_addr_copyout(struct pf_addr_wrap *);
115 VNET_DEFINE(struct pf_rule, pf_default_rule);
118 static VNET_DEFINE(int, pf_altq_running);
119 #define V_pf_altq_running VNET(pf_altq_running)
122 #define TAGID_MAX 50000
124 TAILQ_ENTRY(pf_tagname) entries;
125 char name[PF_TAG_NAME_SIZE];
130 TAILQ_HEAD(pf_tags, pf_tagname);
131 #define V_pf_tags VNET(pf_tags)
132 VNET_DEFINE(struct pf_tags, pf_tags);
133 #define V_pf_qids VNET(pf_qids)
134 VNET_DEFINE(struct pf_tags, pf_qids);
135 static MALLOC_DEFINE(M_PFTAG, "pf_tag", "pf(4) tag names");
136 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
137 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
139 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
140 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
143 static u_int16_t tagname2tag(struct pf_tags *, char *);
144 static u_int16_t pf_tagname2tag(char *);
145 static void tag_unref(struct pf_tags *, u_int16_t);
147 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
152 * XXX - These are new and need to be checked when moveing to a new version
154 static void pf_clear_states(void);
155 static int pf_clear_tables(void);
156 static void pf_clear_srcnodes(struct pf_src_node *);
157 static void pf_kill_srcnodes(struct pfioc_src_node_kill *);
158 static void pf_tbladdr_copyout(struct pf_addr_wrap *);
161 * Wrapper functions for pfil(9) hooks
164 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
165 int dir, int flags, struct inpcb *inp);
166 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
167 int dir, int flags, struct inpcb *inp);
170 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
171 int dir, int flags, struct inpcb *inp);
172 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
173 int dir, int flags, struct inpcb *inp);
176 static int hook_pf(void);
177 static int dehook_pf(void);
178 static int shutdown_pf(void);
179 static int pf_load(void);
180 static int pf_unload(void);
182 static struct cdevsw pf_cdevsw = {
185 .d_version = D_VERSION,
188 static volatile VNET_DEFINE(int, pf_pfil_hooked);
189 #define V_pf_pfil_hooked VNET(pf_pfil_hooked)
192 * We need a flag that is neither hooked nor running to know when
193 * the VNET is "valid". We primarily need this to control (global)
194 * external event, e.g., eventhandlers.
196 VNET_DEFINE(int, pf_vnet_active);
197 #define V_pf_vnet_active VNET(pf_vnet_active)
201 struct rmlock pf_rules_lock;
202 struct sx pf_ioctl_lock;
205 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
206 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
207 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
208 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
209 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
210 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
211 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
214 pflog_packet_t *pflog_packet_ptr = NULL;
216 extern u_long pf_ioctl_maxcount;
221 u_int32_t *my_timeout = V_pf_default_rule.timeout;
225 pfi_initialize_vnet();
228 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
229 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
231 RB_INIT(&V_pf_anchors);
232 pf_init_ruleset(&pf_main_ruleset);
234 /* default rule should never be garbage collected */
235 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
236 #ifdef PF_DEFAULT_TO_DROP
237 V_pf_default_rule.action = PF_DROP;
239 V_pf_default_rule.action = PF_PASS;
241 V_pf_default_rule.nr = -1;
242 V_pf_default_rule.rtableid = -1;
244 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
245 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
246 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
248 /* initialize default timeouts */
249 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
250 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
251 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
252 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
253 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
254 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
255 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
256 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
257 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
258 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
259 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
260 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
261 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
262 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
263 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
264 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
265 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
266 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
267 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
268 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
270 bzero(&V_pf_status, sizeof(V_pf_status));
271 V_pf_status.debug = PF_DEBUG_URGENT;
273 V_pf_pfil_hooked = 0;
275 /* XXX do our best to avoid a conflict */
276 V_pf_status.hostid = arc4random();
278 for (int i = 0; i < PFRES_MAX; i++)
279 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
280 for (int i = 0; i < LCNT_MAX; i++)
281 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
282 for (int i = 0; i < FCNT_MAX; i++)
283 V_pf_status.fcounters[i] = counter_u64_alloc(M_WAITOK);
284 for (int i = 0; i < SCNT_MAX; i++)
285 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
287 if (swi_add(NULL, "pf send", pf_intr, curvnet, SWI_NET,
288 INTR_MPSAFE, &V_pf_swi_cookie) != 0)
289 /* XXXGL: leaked all above. */
294 static struct pf_pool *
295 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
296 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
297 u_int8_t check_ticket)
299 struct pf_ruleset *ruleset;
300 struct pf_rule *rule;
303 ruleset = pf_find_ruleset(anchor);
306 rs_num = pf_get_ruleset_number(rule_action);
307 if (rs_num >= PF_RULESET_MAX)
310 if (check_ticket && ticket !=
311 ruleset->rules[rs_num].active.ticket)
314 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
317 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
319 if (check_ticket && ticket !=
320 ruleset->rules[rs_num].inactive.ticket)
323 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
326 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
329 while ((rule != NULL) && (rule->nr != rule_number))
330 rule = TAILQ_NEXT(rule, entries);
335 return (&rule->rpool);
339 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
341 struct pf_pooladdr *mv_pool_pa;
343 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
344 TAILQ_REMOVE(poola, mv_pool_pa, entries);
345 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
350 pf_empty_pool(struct pf_palist *poola)
352 struct pf_pooladdr *pa;
354 while ((pa = TAILQ_FIRST(poola)) != NULL) {
355 switch (pa->addr.type) {
356 case PF_ADDR_DYNIFTL:
357 pfi_dynaddr_remove(pa->addr.p.dyn);
360 /* XXX: this could be unfinished pooladdr on pabuf */
361 if (pa->addr.p.tbl != NULL)
362 pfr_detach_table(pa->addr.p.tbl);
366 pfi_kif_unref(pa->kif);
367 TAILQ_REMOVE(poola, pa, entries);
373 pf_unlink_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
378 TAILQ_REMOVE(rulequeue, rule, entries);
380 PF_UNLNKDRULES_LOCK();
381 rule->rule_flag |= PFRULE_REFS;
382 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
383 PF_UNLNKDRULES_UNLOCK();
387 pf_free_rule(struct pf_rule *rule)
393 tag_unref(&V_pf_tags, rule->tag);
395 tag_unref(&V_pf_tags, rule->match_tag);
397 if (rule->pqid != rule->qid)
398 pf_qid_unref(rule->pqid);
399 pf_qid_unref(rule->qid);
401 switch (rule->src.addr.type) {
402 case PF_ADDR_DYNIFTL:
403 pfi_dynaddr_remove(rule->src.addr.p.dyn);
406 pfr_detach_table(rule->src.addr.p.tbl);
409 switch (rule->dst.addr.type) {
410 case PF_ADDR_DYNIFTL:
411 pfi_dynaddr_remove(rule->dst.addr.p.dyn);
414 pfr_detach_table(rule->dst.addr.p.tbl);
417 if (rule->overload_tbl)
418 pfr_detach_table(rule->overload_tbl);
420 pfi_kif_unref(rule->kif);
421 pf_anchor_remove(rule);
422 pf_empty_pool(&rule->rpool.list);
423 counter_u64_free(rule->states_cur);
424 counter_u64_free(rule->states_tot);
425 counter_u64_free(rule->src_nodes);
426 free(rule, M_PFRULE);
430 tagname2tag(struct pf_tags *head, char *tagname)
432 struct pf_tagname *tag, *p = NULL;
433 u_int16_t new_tagid = 1;
437 TAILQ_FOREACH(tag, head, entries)
438 if (strcmp(tagname, tag->name) == 0) {
444 * to avoid fragmentation, we do a linear search from the beginning
445 * and take the first free slot we find. if there is none or the list
446 * is empty, append a new entry at the end.
450 if (!TAILQ_EMPTY(head))
451 for (p = TAILQ_FIRST(head); p != NULL &&
452 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
453 new_tagid = p->tag + 1;
455 if (new_tagid > TAGID_MAX)
458 /* allocate and fill new struct pf_tagname */
459 tag = malloc(sizeof(*tag), M_PFTAG, M_NOWAIT|M_ZERO);
462 strlcpy(tag->name, tagname, sizeof(tag->name));
463 tag->tag = new_tagid;
466 if (p != NULL) /* insert new entry before p */
467 TAILQ_INSERT_BEFORE(p, tag, entries);
468 else /* either list empty or no free slot in between */
469 TAILQ_INSERT_TAIL(head, tag, entries);
475 tag_unref(struct pf_tags *head, u_int16_t tag)
477 struct pf_tagname *p, *next;
481 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
482 next = TAILQ_NEXT(p, entries);
485 TAILQ_REMOVE(head, p, entries);
494 pf_tagname2tag(char *tagname)
496 return (tagname2tag(&V_pf_tags, tagname));
501 pf_qname2qid(char *qname)
503 return ((u_int32_t)tagname2tag(&V_pf_qids, qname));
507 pf_qid_unref(u_int32_t qid)
509 tag_unref(&V_pf_qids, (u_int16_t)qid);
513 pf_begin_altq(u_int32_t *ticket)
515 struct pf_altq *altq;
520 /* Purge the old altq list */
521 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) {
522 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries);
523 if (altq->qname[0] == 0 &&
524 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
525 /* detach and destroy the discipline */
526 error = altq_remove(altq);
528 pf_qid_unref(altq->qid);
529 free(altq, M_PFALTQ);
533 *ticket = ++V_ticket_altqs_inactive;
534 V_altqs_inactive_open = 1;
539 pf_rollback_altq(u_int32_t ticket)
541 struct pf_altq *altq;
546 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
548 /* Purge the old altq list */
549 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) {
550 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries);
551 if (altq->qname[0] == 0 &&
552 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
553 /* detach and destroy the discipline */
554 error = altq_remove(altq);
556 pf_qid_unref(altq->qid);
557 free(altq, M_PFALTQ);
559 V_altqs_inactive_open = 0;
564 pf_commit_altq(u_int32_t ticket)
566 struct pf_altqqueue *old_altqs;
567 struct pf_altq *altq;
572 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
575 /* swap altqs, keep the old. */
576 old_altqs = V_pf_altqs_active;
577 V_pf_altqs_active = V_pf_altqs_inactive;
578 V_pf_altqs_inactive = old_altqs;
579 V_ticket_altqs_active = V_ticket_altqs_inactive;
581 /* Attach new disciplines */
582 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
583 if (altq->qname[0] == 0 &&
584 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
585 /* attach the discipline */
586 error = altq_pfattach(altq);
587 if (error == 0 && V_pf_altq_running)
588 error = pf_enable_altq(altq);
594 /* Purge the old altq list */
595 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) {
596 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries);
597 if (altq->qname[0] == 0 &&
598 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
599 /* detach and destroy the discipline */
600 if (V_pf_altq_running)
601 error = pf_disable_altq(altq);
602 err = altq_pfdetach(altq);
603 if (err != 0 && error == 0)
605 err = altq_remove(altq);
606 if (err != 0 && error == 0)
609 pf_qid_unref(altq->qid);
610 free(altq, M_PFALTQ);
613 V_altqs_inactive_open = 0;
618 pf_enable_altq(struct pf_altq *altq)
621 struct tb_profile tb;
624 if ((ifp = ifunit(altq->ifname)) == NULL)
627 if (ifp->if_snd.altq_type != ALTQT_NONE)
628 error = altq_enable(&ifp->if_snd);
630 /* set tokenbucket regulator */
631 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
632 tb.rate = altq->ifbandwidth;
633 tb.depth = altq->tbrsize;
634 error = tbr_set(&ifp->if_snd, &tb);
641 pf_disable_altq(struct pf_altq *altq)
644 struct tb_profile tb;
647 if ((ifp = ifunit(altq->ifname)) == NULL)
651 * when the discipline is no longer referenced, it was overridden
652 * by a new one. if so, just return.
654 if (altq->altq_disc != ifp->if_snd.altq_disc)
657 error = altq_disable(&ifp->if_snd);
660 /* clear tokenbucket regulator */
662 error = tbr_set(&ifp->if_snd, &tb);
669 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
672 struct pf_altq *a1, *a2, *a3;
676 /* Interrupt userland queue modifications */
677 if (V_altqs_inactive_open)
678 pf_rollback_altq(V_ticket_altqs_inactive);
680 /* Start new altq ruleset */
681 if (pf_begin_altq(&ticket))
684 /* Copy the current active set */
685 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
686 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
691 bcopy(a1, a2, sizeof(struct pf_altq));
693 if (a2->qname[0] != 0) {
694 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
699 a2->altq_disc = NULL;
700 TAILQ_FOREACH(a3, V_pf_altqs_inactive, entries) {
701 if (strncmp(a3->ifname, a2->ifname,
702 IFNAMSIZ) == 0 && a3->qname[0] == 0) {
703 a2->altq_disc = a3->altq_disc;
708 /* Deactivate the interface in question */
709 a2->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
710 if ((ifp1 = ifunit(a2->ifname)) == NULL ||
711 (remove && ifp1 == ifp)) {
712 a2->local_flags |= PFALTQ_FLAG_IF_REMOVED;
714 error = altq_add(a2);
716 if (ticket != V_ticket_altqs_inactive)
725 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
729 pf_rollback_altq(ticket);
731 pf_commit_altq(ticket);
736 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
738 struct pf_ruleset *rs;
739 struct pf_rule *rule;
743 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
745 rs = pf_find_or_create_ruleset(anchor);
748 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
749 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
750 rs->rules[rs_num].inactive.rcount--;
752 *ticket = ++rs->rules[rs_num].inactive.ticket;
753 rs->rules[rs_num].inactive.open = 1;
758 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
760 struct pf_ruleset *rs;
761 struct pf_rule *rule;
765 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
767 rs = pf_find_ruleset(anchor);
768 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
769 rs->rules[rs_num].inactive.ticket != ticket)
771 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
772 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
773 rs->rules[rs_num].inactive.rcount--;
775 rs->rules[rs_num].inactive.open = 0;
779 #define PF_MD5_UPD(st, elm) \
780 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
782 #define PF_MD5_UPD_STR(st, elm) \
783 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
785 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
786 (stor) = htonl((st)->elm); \
787 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
790 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
791 (stor) = htons((st)->elm); \
792 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
796 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
798 PF_MD5_UPD(pfr, addr.type);
799 switch (pfr->addr.type) {
800 case PF_ADDR_DYNIFTL:
801 PF_MD5_UPD(pfr, addr.v.ifname);
802 PF_MD5_UPD(pfr, addr.iflags);
805 PF_MD5_UPD(pfr, addr.v.tblname);
807 case PF_ADDR_ADDRMASK:
809 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
810 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
814 PF_MD5_UPD(pfr, port[0]);
815 PF_MD5_UPD(pfr, port[1]);
816 PF_MD5_UPD(pfr, neg);
817 PF_MD5_UPD(pfr, port_op);
821 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
826 pf_hash_rule_addr(ctx, &rule->src);
827 pf_hash_rule_addr(ctx, &rule->dst);
828 PF_MD5_UPD_STR(rule, label);
829 PF_MD5_UPD_STR(rule, ifname);
830 PF_MD5_UPD_STR(rule, match_tagname);
831 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
832 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
833 PF_MD5_UPD_HTONL(rule, prob, y);
834 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
835 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
836 PF_MD5_UPD(rule, uid.op);
837 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
838 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
839 PF_MD5_UPD(rule, gid.op);
840 PF_MD5_UPD_HTONL(rule, rule_flag, y);
841 PF_MD5_UPD(rule, action);
842 PF_MD5_UPD(rule, direction);
843 PF_MD5_UPD(rule, af);
844 PF_MD5_UPD(rule, quick);
845 PF_MD5_UPD(rule, ifnot);
846 PF_MD5_UPD(rule, match_tag_not);
847 PF_MD5_UPD(rule, natpass);
848 PF_MD5_UPD(rule, keep_state);
849 PF_MD5_UPD(rule, proto);
850 PF_MD5_UPD(rule, type);
851 PF_MD5_UPD(rule, code);
852 PF_MD5_UPD(rule, flags);
853 PF_MD5_UPD(rule, flagset);
854 PF_MD5_UPD(rule, allow_opts);
855 PF_MD5_UPD(rule, rt);
856 PF_MD5_UPD(rule, tos);
860 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
862 struct pf_ruleset *rs;
863 struct pf_rule *rule, **old_array;
864 struct pf_rulequeue *old_rules;
866 u_int32_t old_rcount;
870 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
872 rs = pf_find_ruleset(anchor);
873 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
874 ticket != rs->rules[rs_num].inactive.ticket)
877 /* Calculate checksum for the main ruleset */
878 if (rs == &pf_main_ruleset) {
879 error = pf_setup_pfsync_matching(rs);
884 /* Swap rules, keep the old. */
885 old_rules = rs->rules[rs_num].active.ptr;
886 old_rcount = rs->rules[rs_num].active.rcount;
887 old_array = rs->rules[rs_num].active.ptr_array;
889 rs->rules[rs_num].active.ptr =
890 rs->rules[rs_num].inactive.ptr;
891 rs->rules[rs_num].active.ptr_array =
892 rs->rules[rs_num].inactive.ptr_array;
893 rs->rules[rs_num].active.rcount =
894 rs->rules[rs_num].inactive.rcount;
895 rs->rules[rs_num].inactive.ptr = old_rules;
896 rs->rules[rs_num].inactive.ptr_array = old_array;
897 rs->rules[rs_num].inactive.rcount = old_rcount;
899 rs->rules[rs_num].active.ticket =
900 rs->rules[rs_num].inactive.ticket;
901 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
904 /* Purge the old rule list. */
905 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
906 pf_unlink_rule(old_rules, rule);
907 if (rs->rules[rs_num].inactive.ptr_array)
908 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
909 rs->rules[rs_num].inactive.ptr_array = NULL;
910 rs->rules[rs_num].inactive.rcount = 0;
911 rs->rules[rs_num].inactive.open = 0;
912 pf_remove_if_empty_ruleset(rs);
918 pf_setup_pfsync_matching(struct pf_ruleset *rs)
921 struct pf_rule *rule;
923 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
926 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
927 /* XXX PF_RULESET_SCRUB as well? */
928 if (rs_cnt == PF_RULESET_SCRUB)
931 if (rs->rules[rs_cnt].inactive.ptr_array)
932 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
933 rs->rules[rs_cnt].inactive.ptr_array = NULL;
935 if (rs->rules[rs_cnt].inactive.rcount) {
936 rs->rules[rs_cnt].inactive.ptr_array =
937 malloc(sizeof(caddr_t) *
938 rs->rules[rs_cnt].inactive.rcount,
941 if (!rs->rules[rs_cnt].inactive.ptr_array)
945 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
947 pf_hash_rule(&ctx, rule);
948 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
952 MD5Final(digest, &ctx);
953 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
958 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
963 switch (addr->type) {
965 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
966 if (addr->p.tbl == NULL)
969 case PF_ADDR_DYNIFTL:
970 error = pfi_dynaddr_setup(addr, af);
978 pf_addr_copyout(struct pf_addr_wrap *addr)
981 switch (addr->type) {
982 case PF_ADDR_DYNIFTL:
983 pfi_dynaddr_copyout(addr);
986 pf_tbladdr_copyout(addr);
992 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
995 PF_RULES_RLOCK_TRACKER;
997 /* XXX keep in sync with switch() below */
998 if (securelevel_gt(td->td_ucred, 2))
1005 case DIOCSETSTATUSIF:
1011 case DIOCGETTIMEOUT:
1012 case DIOCCLRRULECTRS:
1017 case DIOCGETRULESETS:
1018 case DIOCGETRULESET:
1019 case DIOCRGETTABLES:
1020 case DIOCRGETTSTATS:
1021 case DIOCRCLRTSTATS:
1027 case DIOCRGETASTATS:
1028 case DIOCRCLRASTATS:
1031 case DIOCGETSRCNODES:
1032 case DIOCCLRSRCNODES:
1033 case DIOCIGETIFACES:
1038 case DIOCRCLRTABLES:
1039 case DIOCRADDTABLES:
1040 case DIOCRDELTABLES:
1041 case DIOCRSETTFLAGS:
1042 if (((struct pfioc_table *)addr)->pfrio_flags &
1044 break; /* dummy operation ok */
1050 if (!(flags & FWRITE))
1058 case DIOCGETTIMEOUT:
1063 case DIOCGETRULESETS:
1064 case DIOCGETRULESET:
1066 case DIOCRGETTABLES:
1067 case DIOCRGETTSTATS:
1069 case DIOCRGETASTATS:
1072 case DIOCGETSRCNODES:
1073 case DIOCIGETIFACES:
1076 case DIOCRCLRTABLES:
1077 case DIOCRADDTABLES:
1078 case DIOCRDELTABLES:
1079 case DIOCRCLRTSTATS:
1084 case DIOCRSETTFLAGS:
1085 if (((struct pfioc_table *)addr)->pfrio_flags &
1087 flags |= FWRITE; /* need write lock for dummy */
1088 break; /* dummy operation ok */
1092 if (((struct pfioc_rule *)addr)->action ==
1100 CURVNET_SET(TD_TO_VNET(td));
1104 sx_xlock(&pf_ioctl_lock);
1105 if (V_pf_status.running)
1112 DPFPRINTF(PF_DEBUG_MISC,
1113 ("pf: pfil registration failed\n"));
1116 V_pf_status.running = 1;
1117 V_pf_status.since = time_second;
1120 V_pf_stateid[cpu] = time_second;
1122 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1127 sx_xlock(&pf_ioctl_lock);
1128 if (!V_pf_status.running)
1131 V_pf_status.running = 0;
1132 error = dehook_pf();
1134 V_pf_status.running = 1;
1135 DPFPRINTF(PF_DEBUG_MISC,
1136 ("pf: pfil unregistration failed\n"));
1138 V_pf_status.since = time_second;
1139 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1144 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1145 struct pf_ruleset *ruleset;
1146 struct pf_rule *rule, *tail;
1147 struct pf_pooladdr *pa;
1148 struct pfi_kif *kif = NULL;
1151 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1156 if (pr->rule.af == AF_INET) {
1157 error = EAFNOSUPPORT;
1162 if (pr->rule.af == AF_INET6) {
1163 error = EAFNOSUPPORT;
1168 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
1169 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1170 if (rule->ifname[0])
1171 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
1172 rule->states_cur = counter_u64_alloc(M_WAITOK);
1173 rule->states_tot = counter_u64_alloc(M_WAITOK);
1174 rule->src_nodes = counter_u64_alloc(M_WAITOK);
1175 rule->cuid = td->td_ucred->cr_ruid;
1176 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1177 TAILQ_INIT(&rule->rpool.list);
1179 #define ERROUT(x) { error = (x); goto DIOCADDRULE_error; }
1182 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1183 ruleset = pf_find_ruleset(pr->anchor);
1184 if (ruleset == NULL)
1186 rs_num = pf_get_ruleset_number(pr->rule.action);
1187 if (rs_num >= PF_RULESET_MAX)
1189 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1190 DPFPRINTF(PF_DEBUG_MISC,
1191 ("ticket: %d != [%d]%d\n", pr->ticket, rs_num,
1192 ruleset->rules[rs_num].inactive.ticket));
1195 if (pr->pool_ticket != V_ticket_pabuf) {
1196 DPFPRINTF(PF_DEBUG_MISC,
1197 ("pool_ticket: %d != %d\n", pr->pool_ticket,
1202 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1205 rule->nr = tail->nr + 1;
1208 if (rule->ifname[0]) {
1209 rule->kif = pfi_kif_attach(kif, rule->ifname);
1210 pfi_kif_ref(rule->kif);
1214 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
1219 if (rule->qname[0] != 0) {
1220 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1222 else if (rule->pqname[0] != 0) {
1224 pf_qname2qid(rule->pqname)) == 0)
1227 rule->pqid = rule->qid;
1230 if (rule->tagname[0])
1231 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1233 if (rule->match_tagname[0])
1234 if ((rule->match_tag =
1235 pf_tagname2tag(rule->match_tagname)) == 0)
1237 if (rule->rt && !rule->direction)
1241 if (rule->logif >= PFLOGIFS_MAX)
1243 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1245 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1247 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1249 if (rule->scrub_flags & PFSTATE_SETPRIO &&
1250 (rule->set_prio[0] > PF_PRIO_MAX ||
1251 rule->set_prio[1] > PF_PRIO_MAX))
1253 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
1254 if (pa->addr.type == PF_ADDR_TABLE) {
1255 pa->addr.p.tbl = pfr_attach_table(ruleset,
1256 pa->addr.v.tblname);
1257 if (pa->addr.p.tbl == NULL)
1261 rule->overload_tbl = NULL;
1262 if (rule->overload_tblname[0]) {
1263 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1264 rule->overload_tblname)) == NULL)
1267 rule->overload_tbl->pfrkt_flags |=
1271 pf_mv_pool(&V_pf_pabuf, &rule->rpool.list);
1272 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1273 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1274 (rule->rt > PF_FASTROUTE)) &&
1275 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1284 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1285 rule->evaluations = rule->packets[0] = rule->packets[1] =
1286 rule->bytes[0] = rule->bytes[1] = 0;
1287 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1289 ruleset->rules[rs_num].inactive.rcount++;
1296 counter_u64_free(rule->states_cur);
1297 counter_u64_free(rule->states_tot);
1298 counter_u64_free(rule->src_nodes);
1299 free(rule, M_PFRULE);
1301 free(kif, PFI_MTYPE);
1305 case DIOCGETRULES: {
1306 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1307 struct pf_ruleset *ruleset;
1308 struct pf_rule *tail;
1312 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1313 ruleset = pf_find_ruleset(pr->anchor);
1314 if (ruleset == NULL) {
1319 rs_num = pf_get_ruleset_number(pr->rule.action);
1320 if (rs_num >= PF_RULESET_MAX) {
1325 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1328 pr->nr = tail->nr + 1;
1331 pr->ticket = ruleset->rules[rs_num].active.ticket;
1337 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1338 struct pf_ruleset *ruleset;
1339 struct pf_rule *rule;
1343 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1344 ruleset = pf_find_ruleset(pr->anchor);
1345 if (ruleset == NULL) {
1350 rs_num = pf_get_ruleset_number(pr->rule.action);
1351 if (rs_num >= PF_RULESET_MAX) {
1356 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1361 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1362 while ((rule != NULL) && (rule->nr != pr->nr))
1363 rule = TAILQ_NEXT(rule, entries);
1369 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1370 pr->rule.u_states_cur = counter_u64_fetch(rule->states_cur);
1371 pr->rule.u_states_tot = counter_u64_fetch(rule->states_tot);
1372 pr->rule.u_src_nodes = counter_u64_fetch(rule->src_nodes);
1373 if (pf_anchor_copyout(ruleset, rule, pr)) {
1378 pf_addr_copyout(&pr->rule.src.addr);
1379 pf_addr_copyout(&pr->rule.dst.addr);
1380 for (i = 0; i < PF_SKIP_COUNT; ++i)
1381 if (rule->skip[i].ptr == NULL)
1382 pr->rule.skip[i].nr = -1;
1384 pr->rule.skip[i].nr =
1385 rule->skip[i].ptr->nr;
1387 if (pr->action == PF_GET_CLR_CNTR) {
1388 rule->evaluations = 0;
1389 rule->packets[0] = rule->packets[1] = 0;
1390 rule->bytes[0] = rule->bytes[1] = 0;
1391 counter_u64_zero(rule->states_tot);
1397 case DIOCCHANGERULE: {
1398 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1399 struct pf_ruleset *ruleset;
1400 struct pf_rule *oldrule = NULL, *newrule = NULL;
1401 struct pfi_kif *kif = NULL;
1402 struct pf_pooladdr *pa;
1406 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1407 pcr->action > PF_CHANGE_GET_TICKET) {
1411 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1416 if (pcr->action != PF_CHANGE_REMOVE) {
1418 if (pcr->rule.af == AF_INET) {
1419 error = EAFNOSUPPORT;
1424 if (pcr->rule.af == AF_INET6) {
1425 error = EAFNOSUPPORT;
1429 newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK);
1430 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1431 if (newrule->ifname[0])
1432 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
1433 newrule->states_cur = counter_u64_alloc(M_WAITOK);
1434 newrule->states_tot = counter_u64_alloc(M_WAITOK);
1435 newrule->src_nodes = counter_u64_alloc(M_WAITOK);
1436 newrule->cuid = td->td_ucred->cr_ruid;
1437 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1438 TAILQ_INIT(&newrule->rpool.list);
1441 #define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; }
1444 if (!(pcr->action == PF_CHANGE_REMOVE ||
1445 pcr->action == PF_CHANGE_GET_TICKET) &&
1446 pcr->pool_ticket != V_ticket_pabuf)
1449 ruleset = pf_find_ruleset(pcr->anchor);
1450 if (ruleset == NULL)
1453 rs_num = pf_get_ruleset_number(pcr->rule.action);
1454 if (rs_num >= PF_RULESET_MAX)
1457 if (pcr->action == PF_CHANGE_GET_TICKET) {
1458 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1460 } else if (pcr->ticket !=
1461 ruleset->rules[rs_num].active.ticket)
1464 if (pcr->action != PF_CHANGE_REMOVE) {
1465 if (newrule->ifname[0]) {
1466 newrule->kif = pfi_kif_attach(kif,
1468 pfi_kif_ref(newrule->kif);
1470 newrule->kif = NULL;
1472 if (newrule->rtableid > 0 &&
1473 newrule->rtableid >= rt_numfibs)
1478 if (newrule->qname[0] != 0) {
1480 pf_qname2qid(newrule->qname)) == 0)
1482 else if (newrule->pqname[0] != 0) {
1483 if ((newrule->pqid =
1484 pf_qname2qid(newrule->pqname)) == 0)
1487 newrule->pqid = newrule->qid;
1490 if (newrule->tagname[0])
1492 pf_tagname2tag(newrule->tagname)) == 0)
1494 if (newrule->match_tagname[0])
1495 if ((newrule->match_tag = pf_tagname2tag(
1496 newrule->match_tagname)) == 0)
1498 if (newrule->rt && !newrule->direction)
1502 if (newrule->logif >= PFLOGIFS_MAX)
1504 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1506 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1508 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1510 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
1511 if (pa->addr.type == PF_ADDR_TABLE) {
1513 pfr_attach_table(ruleset,
1514 pa->addr.v.tblname);
1515 if (pa->addr.p.tbl == NULL)
1519 newrule->overload_tbl = NULL;
1520 if (newrule->overload_tblname[0]) {
1521 if ((newrule->overload_tbl = pfr_attach_table(
1522 ruleset, newrule->overload_tblname)) ==
1526 newrule->overload_tbl->pfrkt_flags |=
1530 pf_mv_pool(&V_pf_pabuf, &newrule->rpool.list);
1531 if (((((newrule->action == PF_NAT) ||
1532 (newrule->action == PF_RDR) ||
1533 (newrule->action == PF_BINAT) ||
1534 (newrule->rt > PF_FASTROUTE)) &&
1535 !newrule->anchor)) &&
1536 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1540 pf_free_rule(newrule);
1545 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1546 newrule->evaluations = 0;
1547 newrule->packets[0] = newrule->packets[1] = 0;
1548 newrule->bytes[0] = newrule->bytes[1] = 0;
1550 pf_empty_pool(&V_pf_pabuf);
1552 if (pcr->action == PF_CHANGE_ADD_HEAD)
1553 oldrule = TAILQ_FIRST(
1554 ruleset->rules[rs_num].active.ptr);
1555 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1556 oldrule = TAILQ_LAST(
1557 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1559 oldrule = TAILQ_FIRST(
1560 ruleset->rules[rs_num].active.ptr);
1561 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1562 oldrule = TAILQ_NEXT(oldrule, entries);
1563 if (oldrule == NULL) {
1564 if (newrule != NULL)
1565 pf_free_rule(newrule);
1572 if (pcr->action == PF_CHANGE_REMOVE) {
1573 pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
1575 ruleset->rules[rs_num].active.rcount--;
1577 if (oldrule == NULL)
1579 ruleset->rules[rs_num].active.ptr,
1581 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1582 pcr->action == PF_CHANGE_ADD_BEFORE)
1583 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1586 ruleset->rules[rs_num].active.ptr,
1587 oldrule, newrule, entries);
1588 ruleset->rules[rs_num].active.rcount++;
1592 TAILQ_FOREACH(oldrule,
1593 ruleset->rules[rs_num].active.ptr, entries)
1596 ruleset->rules[rs_num].active.ticket++;
1598 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1599 pf_remove_if_empty_ruleset(ruleset);
1605 DIOCCHANGERULE_error:
1607 if (newrule != NULL) {
1608 counter_u64_free(newrule->states_cur);
1609 counter_u64_free(newrule->states_tot);
1610 counter_u64_free(newrule->src_nodes);
1611 free(newrule, M_PFRULE);
1614 free(kif, PFI_MTYPE);
1618 case DIOCCLRSTATES: {
1620 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1621 u_int i, killed = 0;
1623 for (i = 0; i <= pf_hashmask; i++) {
1624 struct pf_idhash *ih = &V_pf_idhash[i];
1626 relock_DIOCCLRSTATES:
1627 PF_HASHROW_LOCK(ih);
1628 LIST_FOREACH(s, &ih->states, entry)
1629 if (!psk->psk_ifname[0] ||
1630 !strcmp(psk->psk_ifname,
1631 s->kif->pfik_name)) {
1633 * Don't send out individual
1636 s->state_flags |= PFSTATE_NOSYNC;
1637 pf_unlink_state(s, PF_ENTER_LOCKED);
1639 goto relock_DIOCCLRSTATES;
1641 PF_HASHROW_UNLOCK(ih);
1643 psk->psk_killed = killed;
1644 if (V_pfsync_clear_states_ptr != NULL)
1645 V_pfsync_clear_states_ptr(V_pf_status.hostid, psk->psk_ifname);
1649 case DIOCKILLSTATES: {
1651 struct pf_state_key *sk;
1652 struct pf_addr *srcaddr, *dstaddr;
1653 u_int16_t srcport, dstport;
1654 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1655 u_int i, killed = 0;
1657 if (psk->psk_pfcmp.id) {
1658 if (psk->psk_pfcmp.creatorid == 0)
1659 psk->psk_pfcmp.creatorid = V_pf_status.hostid;
1660 if ((s = pf_find_state_byid(psk->psk_pfcmp.id,
1661 psk->psk_pfcmp.creatorid))) {
1662 pf_unlink_state(s, PF_ENTER_LOCKED);
1663 psk->psk_killed = 1;
1668 for (i = 0; i <= pf_hashmask; i++) {
1669 struct pf_idhash *ih = &V_pf_idhash[i];
1671 relock_DIOCKILLSTATES:
1672 PF_HASHROW_LOCK(ih);
1673 LIST_FOREACH(s, &ih->states, entry) {
1674 sk = s->key[PF_SK_WIRE];
1675 if (s->direction == PF_OUT) {
1676 srcaddr = &sk->addr[1];
1677 dstaddr = &sk->addr[0];
1678 srcport = sk->port[1];
1679 dstport = sk->port[0];
1681 srcaddr = &sk->addr[0];
1682 dstaddr = &sk->addr[1];
1683 srcport = sk->port[0];
1684 dstport = sk->port[1];
1687 if ((!psk->psk_af || sk->af == psk->psk_af)
1688 && (!psk->psk_proto || psk->psk_proto ==
1690 PF_MATCHA(psk->psk_src.neg,
1691 &psk->psk_src.addr.v.a.addr,
1692 &psk->psk_src.addr.v.a.mask,
1694 PF_MATCHA(psk->psk_dst.neg,
1695 &psk->psk_dst.addr.v.a.addr,
1696 &psk->psk_dst.addr.v.a.mask,
1698 (psk->psk_src.port_op == 0 ||
1699 pf_match_port(psk->psk_src.port_op,
1700 psk->psk_src.port[0], psk->psk_src.port[1],
1702 (psk->psk_dst.port_op == 0 ||
1703 pf_match_port(psk->psk_dst.port_op,
1704 psk->psk_dst.port[0], psk->psk_dst.port[1],
1706 (!psk->psk_label[0] ||
1707 (s->rule.ptr->label[0] &&
1708 !strcmp(psk->psk_label,
1709 s->rule.ptr->label))) &&
1710 (!psk->psk_ifname[0] ||
1711 !strcmp(psk->psk_ifname,
1712 s->kif->pfik_name))) {
1713 pf_unlink_state(s, PF_ENTER_LOCKED);
1715 goto relock_DIOCKILLSTATES;
1718 PF_HASHROW_UNLOCK(ih);
1720 psk->psk_killed = killed;
1724 case DIOCADDSTATE: {
1725 struct pfioc_state *ps = (struct pfioc_state *)addr;
1726 struct pfsync_state *sp = &ps->state;
1728 if (sp->timeout >= PFTM_MAX) {
1732 if (V_pfsync_state_import_ptr != NULL) {
1734 error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL);
1741 case DIOCGETSTATE: {
1742 struct pfioc_state *ps = (struct pfioc_state *)addr;
1745 s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
1751 pfsync_state_export(&ps->state, s);
1756 case DIOCGETSTATES: {
1757 struct pfioc_states *ps = (struct pfioc_states *)addr;
1759 struct pfsync_state *pstore, *p;
1762 if (ps->ps_len == 0) {
1763 nr = uma_zone_get_cur(V_pf_state_z);
1764 ps->ps_len = sizeof(struct pfsync_state) * nr;
1768 p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK);
1771 for (i = 0; i <= pf_hashmask; i++) {
1772 struct pf_idhash *ih = &V_pf_idhash[i];
1774 PF_HASHROW_LOCK(ih);
1775 LIST_FOREACH(s, &ih->states, entry) {
1777 if (s->timeout == PFTM_UNLINKED)
1780 if ((nr+1) * sizeof(*p) > ps->ps_len) {
1781 PF_HASHROW_UNLOCK(ih);
1782 goto DIOCGETSTATES_full;
1784 pfsync_state_export(p, s);
1788 PF_HASHROW_UNLOCK(ih);
1791 error = copyout(pstore, ps->ps_states,
1792 sizeof(struct pfsync_state) * nr);
1794 free(pstore, M_TEMP);
1797 ps->ps_len = sizeof(struct pfsync_state) * nr;
1798 free(pstore, M_TEMP);
1803 case DIOCGETSTATUS: {
1804 struct pf_status *s = (struct pf_status *)addr;
1807 s->running = V_pf_status.running;
1808 s->since = V_pf_status.since;
1809 s->debug = V_pf_status.debug;
1810 s->hostid = V_pf_status.hostid;
1811 s->states = V_pf_status.states;
1812 s->src_nodes = V_pf_status.src_nodes;
1814 for (int i = 0; i < PFRES_MAX; i++)
1816 counter_u64_fetch(V_pf_status.counters[i]);
1817 for (int i = 0; i < LCNT_MAX; i++)
1819 counter_u64_fetch(V_pf_status.lcounters[i]);
1820 for (int i = 0; i < FCNT_MAX; i++)
1822 counter_u64_fetch(V_pf_status.fcounters[i]);
1823 for (int i = 0; i < SCNT_MAX; i++)
1825 counter_u64_fetch(V_pf_status.scounters[i]);
1827 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ);
1828 bcopy(V_pf_status.pf_chksum, s->pf_chksum,
1829 PF_MD5_DIGEST_LENGTH);
1831 pfi_update_status(s->ifname, s);
1836 case DIOCSETSTATUSIF: {
1837 struct pfioc_if *pi = (struct pfioc_if *)addr;
1839 if (pi->ifname[0] == 0) {
1840 bzero(V_pf_status.ifname, IFNAMSIZ);
1844 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
1849 case DIOCCLRSTATUS: {
1851 for (int i = 0; i < PFRES_MAX; i++)
1852 counter_u64_zero(V_pf_status.counters[i]);
1853 for (int i = 0; i < FCNT_MAX; i++)
1854 counter_u64_zero(V_pf_status.fcounters[i]);
1855 for (int i = 0; i < SCNT_MAX; i++)
1856 counter_u64_zero(V_pf_status.scounters[i]);
1857 V_pf_status.since = time_second;
1858 if (*V_pf_status.ifname)
1859 pfi_update_status(V_pf_status.ifname, NULL);
1865 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1866 struct pf_state_key *sk;
1867 struct pf_state *state;
1868 struct pf_state_key_cmp key;
1869 int m = 0, direction = pnl->direction;
1872 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
1873 sidx = (direction == PF_IN) ? 1 : 0;
1874 didx = (direction == PF_IN) ? 0 : 1;
1877 PF_AZERO(&pnl->saddr, pnl->af) ||
1878 PF_AZERO(&pnl->daddr, pnl->af) ||
1879 ((pnl->proto == IPPROTO_TCP ||
1880 pnl->proto == IPPROTO_UDP) &&
1881 (!pnl->dport || !pnl->sport)))
1884 bzero(&key, sizeof(key));
1886 key.proto = pnl->proto;
1887 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
1888 key.port[sidx] = pnl->sport;
1889 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
1890 key.port[didx] = pnl->dport;
1892 state = pf_find_state_all(&key, direction, &m);
1895 error = E2BIG; /* more than one state */
1896 else if (state != NULL) {
1897 /* XXXGL: not locked read */
1898 sk = state->key[sidx];
1899 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
1900 pnl->rsport = sk->port[sidx];
1901 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
1902 pnl->rdport = sk->port[didx];
1909 case DIOCSETTIMEOUT: {
1910 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1913 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1919 old = V_pf_default_rule.timeout[pt->timeout];
1920 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1922 V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
1923 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
1924 wakeup(pf_purge_thread);
1930 case DIOCGETTIMEOUT: {
1931 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1933 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1938 pt->seconds = V_pf_default_rule.timeout[pt->timeout];
1943 case DIOCGETLIMIT: {
1944 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1946 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1951 pl->limit = V_pf_limits[pl->index].limit;
1956 case DIOCSETLIMIT: {
1957 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1961 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1962 V_pf_limits[pl->index].zone == NULL) {
1967 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
1968 old_limit = V_pf_limits[pl->index].limit;
1969 V_pf_limits[pl->index].limit = pl->limit;
1970 pl->limit = old_limit;
1975 case DIOCSETDEBUG: {
1976 u_int32_t *level = (u_int32_t *)addr;
1979 V_pf_status.debug = *level;
1984 case DIOCCLRRULECTRS: {
1985 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1986 struct pf_ruleset *ruleset = &pf_main_ruleset;
1987 struct pf_rule *rule;
1991 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1992 rule->evaluations = 0;
1993 rule->packets[0] = rule->packets[1] = 0;
1994 rule->bytes[0] = rule->bytes[1] = 0;
2000 case DIOCGIFSPEED: {
2001 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr;
2002 struct pf_ifspeed ps;
2005 if (psp->ifname[0] != 0) {
2006 /* Can we completely trust user-land? */
2007 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
2008 ifp = ifunit(ps.ifname);
2010 psp->baudrate = ifp->if_baudrate;
2019 case DIOCSTARTALTQ: {
2020 struct pf_altq *altq;
2023 /* enable all altq interfaces on active list */
2024 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
2025 if (altq->qname[0] == 0 && (altq->local_flags &
2026 PFALTQ_FLAG_IF_REMOVED) == 0) {
2027 error = pf_enable_altq(altq);
2033 V_pf_altq_running = 1;
2035 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2039 case DIOCSTOPALTQ: {
2040 struct pf_altq *altq;
2043 /* disable all altq interfaces on active list */
2044 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
2045 if (altq->qname[0] == 0 && (altq->local_flags &
2046 PFALTQ_FLAG_IF_REMOVED) == 0) {
2047 error = pf_disable_altq(altq);
2053 V_pf_altq_running = 0;
2055 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2060 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2061 struct pf_altq *altq, *a;
2064 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK);
2065 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2066 altq->local_flags = 0;
2069 if (pa->ticket != V_ticket_altqs_inactive) {
2071 free(altq, M_PFALTQ);
2077 * if this is for a queue, find the discipline and
2078 * copy the necessary fields
2080 if (altq->qname[0] != 0) {
2081 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2084 free(altq, M_PFALTQ);
2087 altq->altq_disc = NULL;
2088 TAILQ_FOREACH(a, V_pf_altqs_inactive, entries) {
2089 if (strncmp(a->ifname, altq->ifname,
2090 IFNAMSIZ) == 0 && a->qname[0] == 0) {
2091 altq->altq_disc = a->altq_disc;
2097 if ((ifp = ifunit(altq->ifname)) == NULL)
2098 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
2100 error = altq_add(altq);
2104 free(altq, M_PFALTQ);
2108 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
2109 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2114 case DIOCGETALTQS: {
2115 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2116 struct pf_altq *altq;
2120 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
2122 pa->ticket = V_ticket_altqs_active;
2128 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2129 struct pf_altq *altq;
2133 if (pa->ticket != V_ticket_altqs_active) {
2139 altq = TAILQ_FIRST(V_pf_altqs_active);
2140 while ((altq != NULL) && (nr < pa->nr)) {
2141 altq = TAILQ_NEXT(altq, entries);
2149 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2154 case DIOCCHANGEALTQ:
2155 /* CHANGEALTQ not supported yet! */
2159 case DIOCGETQSTATS: {
2160 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2161 struct pf_altq *altq;
2166 if (pq->ticket != V_ticket_altqs_active) {
2171 nbytes = pq->nbytes;
2173 altq = TAILQ_FIRST(V_pf_altqs_active);
2174 while ((altq != NULL) && (nr < pq->nr)) {
2175 altq = TAILQ_NEXT(altq, entries);
2184 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
2190 error = altq_getqstats(altq, pq->buf, &nbytes);
2192 pq->scheduler = altq->scheduler;
2193 pq->nbytes = nbytes;
2199 case DIOCBEGINADDRS: {
2200 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2203 pf_empty_pool(&V_pf_pabuf);
2204 pp->ticket = ++V_ticket_pabuf;
2210 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2211 struct pf_pooladdr *pa;
2212 struct pfi_kif *kif = NULL;
2215 if (pp->af == AF_INET) {
2216 error = EAFNOSUPPORT;
2221 if (pp->af == AF_INET6) {
2222 error = EAFNOSUPPORT;
2226 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2227 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2228 pp->addr.addr.type != PF_ADDR_TABLE) {
2232 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2233 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2235 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
2237 if (pp->ticket != V_ticket_pabuf) {
2240 free(kif, PFI_MTYPE);
2245 if (pa->ifname[0]) {
2246 pa->kif = pfi_kif_attach(kif, pa->ifname);
2247 pfi_kif_ref(pa->kif);
2250 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2251 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2253 pfi_kif_unref(pa->kif);
2258 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
2263 case DIOCGETADDRS: {
2264 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2265 struct pf_pool *pool;
2266 struct pf_pooladdr *pa;
2270 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2271 pp->r_num, 0, 1, 0);
2277 TAILQ_FOREACH(pa, &pool->list, entries)
2284 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2285 struct pf_pool *pool;
2286 struct pf_pooladdr *pa;
2290 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2291 pp->r_num, 0, 1, 1);
2297 pa = TAILQ_FIRST(&pool->list);
2298 while ((pa != NULL) && (nr < pp->nr)) {
2299 pa = TAILQ_NEXT(pa, entries);
2307 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2308 pf_addr_copyout(&pp->addr.addr);
2313 case DIOCCHANGEADDR: {
2314 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2315 struct pf_pool *pool;
2316 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2317 struct pf_ruleset *ruleset;
2318 struct pfi_kif *kif = NULL;
2320 if (pca->action < PF_CHANGE_ADD_HEAD ||
2321 pca->action > PF_CHANGE_REMOVE) {
2325 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2326 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2327 pca->addr.addr.type != PF_ADDR_TABLE) {
2332 if (pca->action != PF_CHANGE_REMOVE) {
2334 if (pca->af == AF_INET) {
2335 error = EAFNOSUPPORT;
2340 if (pca->af == AF_INET6) {
2341 error = EAFNOSUPPORT;
2345 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
2346 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2347 if (newpa->ifname[0])
2348 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
2352 #define ERROUT(x) { error = (x); goto DIOCCHANGEADDR_error; }
2354 ruleset = pf_find_ruleset(pca->anchor);
2355 if (ruleset == NULL)
2358 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2359 pca->r_num, pca->r_last, 1, 1);
2363 if (pca->action != PF_CHANGE_REMOVE) {
2364 if (newpa->ifname[0]) {
2365 newpa->kif = pfi_kif_attach(kif, newpa->ifname);
2366 pfi_kif_ref(newpa->kif);
2370 switch (newpa->addr.type) {
2371 case PF_ADDR_DYNIFTL:
2372 error = pfi_dynaddr_setup(&newpa->addr,
2376 newpa->addr.p.tbl = pfr_attach_table(ruleset,
2377 newpa->addr.v.tblname);
2378 if (newpa->addr.p.tbl == NULL)
2383 goto DIOCCHANGEADDR_error;
2386 switch (pca->action) {
2387 case PF_CHANGE_ADD_HEAD:
2388 oldpa = TAILQ_FIRST(&pool->list);
2390 case PF_CHANGE_ADD_TAIL:
2391 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2394 oldpa = TAILQ_FIRST(&pool->list);
2395 for (int i = 0; oldpa && i < pca->nr; i++)
2396 oldpa = TAILQ_NEXT(oldpa, entries);
2402 if (pca->action == PF_CHANGE_REMOVE) {
2403 TAILQ_REMOVE(&pool->list, oldpa, entries);
2404 switch (oldpa->addr.type) {
2405 case PF_ADDR_DYNIFTL:
2406 pfi_dynaddr_remove(oldpa->addr.p.dyn);
2409 pfr_detach_table(oldpa->addr.p.tbl);
2413 pfi_kif_unref(oldpa->kif);
2414 free(oldpa, M_PFRULE);
2417 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2418 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2419 pca->action == PF_CHANGE_ADD_BEFORE)
2420 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2422 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2426 pool->cur = TAILQ_FIRST(&pool->list);
2427 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
2432 DIOCCHANGEADDR_error:
2434 pfi_kif_unref(newpa->kif);
2437 free(newpa, M_PFRULE);
2439 free(kif, PFI_MTYPE);
2443 case DIOCGETRULESETS: {
2444 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2445 struct pf_ruleset *ruleset;
2446 struct pf_anchor *anchor;
2449 pr->path[sizeof(pr->path) - 1] = 0;
2450 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2456 if (ruleset->anchor == NULL) {
2457 /* XXX kludge for pf_main_ruleset */
2458 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors)
2459 if (anchor->parent == NULL)
2462 RB_FOREACH(anchor, pf_anchor_node,
2463 &ruleset->anchor->children)
2470 case DIOCGETRULESET: {
2471 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2472 struct pf_ruleset *ruleset;
2473 struct pf_anchor *anchor;
2477 pr->path[sizeof(pr->path) - 1] = 0;
2478 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2484 if (ruleset->anchor == NULL) {
2485 /* XXX kludge for pf_main_ruleset */
2486 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors)
2487 if (anchor->parent == NULL && nr++ == pr->nr) {
2488 strlcpy(pr->name, anchor->name,
2493 RB_FOREACH(anchor, pf_anchor_node,
2494 &ruleset->anchor->children)
2495 if (nr++ == pr->nr) {
2496 strlcpy(pr->name, anchor->name,
2507 case DIOCRCLRTABLES: {
2508 struct pfioc_table *io = (struct pfioc_table *)addr;
2510 if (io->pfrio_esize != 0) {
2515 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2516 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2521 case DIOCRADDTABLES: {
2522 struct pfioc_table *io = (struct pfioc_table *)addr;
2523 struct pfr_table *pfrts;
2526 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2531 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
2532 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
2537 totlen = io->pfrio_size * sizeof(struct pfr_table);
2538 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2540 error = copyin(io->pfrio_buffer, pfrts, totlen);
2542 free(pfrts, M_TEMP);
2546 error = pfr_add_tables(pfrts, io->pfrio_size,
2547 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2549 free(pfrts, M_TEMP);
2553 case DIOCRDELTABLES: {
2554 struct pfioc_table *io = (struct pfioc_table *)addr;
2555 struct pfr_table *pfrts;
2558 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2563 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
2564 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
2569 totlen = io->pfrio_size * sizeof(struct pfr_table);
2570 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2572 error = copyin(io->pfrio_buffer, pfrts, totlen);
2574 free(pfrts, M_TEMP);
2578 error = pfr_del_tables(pfrts, io->pfrio_size,
2579 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2581 free(pfrts, M_TEMP);
2585 case DIOCRGETTABLES: {
2586 struct pfioc_table *io = (struct pfioc_table *)addr;
2587 struct pfr_table *pfrts;
2590 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2595 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
2596 io->pfrio_size = min(io->pfrio_size, n);
2598 totlen = io->pfrio_size * sizeof(struct pfr_table);
2600 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2602 if (pfrts == NULL) {
2607 error = pfr_get_tables(&io->pfrio_table, pfrts,
2608 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2611 error = copyout(pfrts, io->pfrio_buffer, totlen);
2612 free(pfrts, M_TEMP);
2616 case DIOCRGETTSTATS: {
2617 struct pfioc_table *io = (struct pfioc_table *)addr;
2618 struct pfr_tstats *pfrtstats;
2621 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2626 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
2627 io->pfrio_size = min(io->pfrio_size, n);
2629 totlen = io->pfrio_size * sizeof(struct pfr_tstats);
2630 pfrtstats = mallocarray(io->pfrio_size,
2631 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT);
2632 if (pfrtstats == NULL) {
2637 error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
2638 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2641 error = copyout(pfrtstats, io->pfrio_buffer, totlen);
2642 free(pfrtstats, M_TEMP);
2646 case DIOCRCLRTSTATS: {
2647 struct pfioc_table *io = (struct pfioc_table *)addr;
2648 struct pfr_table *pfrts;
2651 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2657 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
2658 io->pfrio_size = min(io->pfrio_size, n);
2660 totlen = io->pfrio_size * sizeof(struct pfr_table);
2661 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2663 if (pfrts == NULL) {
2668 error = copyin(io->pfrio_buffer, pfrts, totlen);
2670 free(pfrts, M_TEMP);
2674 error = pfr_clr_tstats(pfrts, io->pfrio_size,
2675 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2677 free(pfrts, M_TEMP);
2681 case DIOCRSETTFLAGS: {
2682 struct pfioc_table *io = (struct pfioc_table *)addr;
2683 struct pfr_table *pfrts;
2686 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2692 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
2693 io->pfrio_size = min(io->pfrio_size, n);
2696 totlen = io->pfrio_size * sizeof(struct pfr_table);
2697 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
2699 error = copyin(io->pfrio_buffer, pfrts, totlen);
2701 free(pfrts, M_TEMP);
2705 error = pfr_set_tflags(pfrts, io->pfrio_size,
2706 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2707 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2709 free(pfrts, M_TEMP);
2713 case DIOCRCLRADDRS: {
2714 struct pfioc_table *io = (struct pfioc_table *)addr;
2716 if (io->pfrio_esize != 0) {
2721 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2722 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2727 case DIOCRADDADDRS: {
2728 struct pfioc_table *io = (struct pfioc_table *)addr;
2729 struct pfr_addr *pfras;
2732 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2736 if (io->pfrio_size < 0 ||
2737 io->pfrio_size > pf_ioctl_maxcount ||
2738 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
2742 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2743 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
2749 error = copyin(io->pfrio_buffer, pfras, totlen);
2751 free(pfras, M_TEMP);
2755 error = pfr_add_addrs(&io->pfrio_table, pfras,
2756 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2757 PFR_FLAG_USERIOCTL);
2759 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
2760 error = copyout(pfras, io->pfrio_buffer, totlen);
2761 free(pfras, M_TEMP);
2765 case DIOCRDELADDRS: {
2766 struct pfioc_table *io = (struct pfioc_table *)addr;
2767 struct pfr_addr *pfras;
2770 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2774 if (io->pfrio_size < 0 ||
2775 io->pfrio_size > pf_ioctl_maxcount ||
2776 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
2780 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2781 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
2787 error = copyin(io->pfrio_buffer, pfras, totlen);
2789 free(pfras, M_TEMP);
2793 error = pfr_del_addrs(&io->pfrio_table, pfras,
2794 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2795 PFR_FLAG_USERIOCTL);
2797 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
2798 error = copyout(pfras, io->pfrio_buffer, totlen);
2799 free(pfras, M_TEMP);
2803 case DIOCRSETADDRS: {
2804 struct pfioc_table *io = (struct pfioc_table *)addr;
2805 struct pfr_addr *pfras;
2806 size_t totlen, count;
2808 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2812 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
2816 count = max(io->pfrio_size, io->pfrio_size2);
2817 if (count > pf_ioctl_maxcount ||
2818 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
2822 totlen = count * sizeof(struct pfr_addr);
2823 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
2829 error = copyin(io->pfrio_buffer, pfras, totlen);
2831 free(pfras, M_TEMP);
2835 error = pfr_set_addrs(&io->pfrio_table, pfras,
2836 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2837 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2838 PFR_FLAG_USERIOCTL, 0);
2840 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
2841 error = copyout(pfras, io->pfrio_buffer, totlen);
2842 free(pfras, M_TEMP);
2846 case DIOCRGETADDRS: {
2847 struct pfioc_table *io = (struct pfioc_table *)addr;
2848 struct pfr_addr *pfras;
2851 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2855 if (io->pfrio_size < 0 ||
2856 io->pfrio_size > pf_ioctl_maxcount ||
2857 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
2861 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2862 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
2869 error = pfr_get_addrs(&io->pfrio_table, pfras,
2870 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2873 error = copyout(pfras, io->pfrio_buffer, totlen);
2874 free(pfras, M_TEMP);
2878 case DIOCRGETASTATS: {
2879 struct pfioc_table *io = (struct pfioc_table *)addr;
2880 struct pfr_astats *pfrastats;
2883 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2887 if (io->pfrio_size < 0 ||
2888 io->pfrio_size > pf_ioctl_maxcount ||
2889 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
2893 totlen = io->pfrio_size * sizeof(struct pfr_astats);
2894 pfrastats = mallocarray(io->pfrio_size,
2895 sizeof(struct pfr_astats), M_TEMP, M_NOWAIT);
2901 error = pfr_get_astats(&io->pfrio_table, pfrastats,
2902 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2905 error = copyout(pfrastats, io->pfrio_buffer, totlen);
2906 free(pfrastats, M_TEMP);
2910 case DIOCRCLRASTATS: {
2911 struct pfioc_table *io = (struct pfioc_table *)addr;
2912 struct pfr_addr *pfras;
2915 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2919 if (io->pfrio_size < 0 ||
2920 io->pfrio_size > pf_ioctl_maxcount ||
2921 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
2925 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2926 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
2932 error = copyin(io->pfrio_buffer, pfras, totlen);
2934 free(pfras, M_TEMP);
2938 error = pfr_clr_astats(&io->pfrio_table, pfras,
2939 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2940 PFR_FLAG_USERIOCTL);
2942 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
2943 error = copyout(pfras, io->pfrio_buffer, totlen);
2944 free(pfras, M_TEMP);
2948 case DIOCRTSTADDRS: {
2949 struct pfioc_table *io = (struct pfioc_table *)addr;
2950 struct pfr_addr *pfras;
2953 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2957 if (io->pfrio_size < 0 ||
2958 io->pfrio_size > pf_ioctl_maxcount ||
2959 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
2963 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2964 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
2970 error = copyin(io->pfrio_buffer, pfras, totlen);
2972 free(pfras, M_TEMP);
2976 error = pfr_tst_addrs(&io->pfrio_table, pfras,
2977 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2978 PFR_FLAG_USERIOCTL);
2981 error = copyout(pfras, io->pfrio_buffer, totlen);
2982 free(pfras, M_TEMP);
2986 case DIOCRINADEFINE: {
2987 struct pfioc_table *io = (struct pfioc_table *)addr;
2988 struct pfr_addr *pfras;
2991 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2995 if (io->pfrio_size < 0 ||
2996 io->pfrio_size > pf_ioctl_maxcount ||
2997 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
3001 totlen = io->pfrio_size * sizeof(struct pfr_addr);
3002 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
3008 error = copyin(io->pfrio_buffer, pfras, totlen);
3010 free(pfras, M_TEMP);
3014 error = pfr_ina_define(&io->pfrio_table, pfras,
3015 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
3016 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3018 free(pfras, M_TEMP);
3023 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
3025 error = pf_osfp_add(io);
3031 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
3033 error = pf_osfp_get(io);
3039 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3040 struct pfioc_trans_e *ioes, *ioe;
3044 if (io->esize != sizeof(*ioe)) {
3049 io->size > pf_ioctl_maxcount ||
3050 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3054 totlen = sizeof(struct pfioc_trans_e) * io->size;
3055 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3061 error = copyin(io->array, ioes, totlen);
3067 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3068 switch (ioe->rs_num) {
3070 case PF_RULESET_ALTQ:
3071 if (ioe->anchor[0]) {
3077 if ((error = pf_begin_altq(&ioe->ticket))) {
3084 case PF_RULESET_TABLE:
3086 struct pfr_table table;
3088 bzero(&table, sizeof(table));
3089 strlcpy(table.pfrt_anchor, ioe->anchor,
3090 sizeof(table.pfrt_anchor));
3091 if ((error = pfr_ina_begin(&table,
3092 &ioe->ticket, NULL, 0))) {
3100 if ((error = pf_begin_rules(&ioe->ticket,
3101 ioe->rs_num, ioe->anchor))) {
3110 error = copyout(ioes, io->array, totlen);
3115 case DIOCXROLLBACK: {
3116 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3117 struct pfioc_trans_e *ioe, *ioes;
3121 if (io->esize != sizeof(*ioe)) {
3126 io->size > pf_ioctl_maxcount ||
3127 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3131 totlen = sizeof(struct pfioc_trans_e) * io->size;
3132 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3138 error = copyin(io->array, ioes, totlen);
3144 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3145 switch (ioe->rs_num) {
3147 case PF_RULESET_ALTQ:
3148 if (ioe->anchor[0]) {
3154 if ((error = pf_rollback_altq(ioe->ticket))) {
3157 goto fail; /* really bad */
3161 case PF_RULESET_TABLE:
3163 struct pfr_table table;
3165 bzero(&table, sizeof(table));
3166 strlcpy(table.pfrt_anchor, ioe->anchor,
3167 sizeof(table.pfrt_anchor));
3168 if ((error = pfr_ina_rollback(&table,
3169 ioe->ticket, NULL, 0))) {
3172 goto fail; /* really bad */
3177 if ((error = pf_rollback_rules(ioe->ticket,
3178 ioe->rs_num, ioe->anchor))) {
3181 goto fail; /* really bad */
3192 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3193 struct pfioc_trans_e *ioe, *ioes;
3194 struct pf_ruleset *rs;
3198 if (io->esize != sizeof(*ioe)) {
3204 io->size > pf_ioctl_maxcount ||
3205 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
3210 totlen = sizeof(struct pfioc_trans_e) * io->size;
3211 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
3217 error = copyin(io->array, ioes, totlen);
3223 /* First makes sure everything will succeed. */
3224 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3225 switch (ioe->rs_num) {
3227 case PF_RULESET_ALTQ:
3228 if (ioe->anchor[0]) {
3234 if (!V_altqs_inactive_open || ioe->ticket !=
3235 V_ticket_altqs_inactive) {
3243 case PF_RULESET_TABLE:
3244 rs = pf_find_ruleset(ioe->anchor);
3245 if (rs == NULL || !rs->topen || ioe->ticket !=
3254 if (ioe->rs_num < 0 || ioe->rs_num >=
3261 rs = pf_find_ruleset(ioe->anchor);
3263 !rs->rules[ioe->rs_num].inactive.open ||
3264 rs->rules[ioe->rs_num].inactive.ticket !=
3274 /* Now do the commit - no errors should happen here. */
3275 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3276 switch (ioe->rs_num) {
3278 case PF_RULESET_ALTQ:
3279 if ((error = pf_commit_altq(ioe->ticket))) {
3282 goto fail; /* really bad */
3286 case PF_RULESET_TABLE:
3288 struct pfr_table table;
3290 bzero(&table, sizeof(table));
3291 strlcpy(table.pfrt_anchor, ioe->anchor,
3292 sizeof(table.pfrt_anchor));
3293 if ((error = pfr_ina_commit(&table,
3294 ioe->ticket, NULL, NULL, 0))) {
3297 goto fail; /* really bad */
3302 if ((error = pf_commit_rules(ioe->ticket,
3303 ioe->rs_num, ioe->anchor))) {
3306 goto fail; /* really bad */
3316 case DIOCGETSRCNODES: {
3317 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
3318 struct pf_srchash *sh;
3319 struct pf_src_node *n, *p, *pstore;
3322 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
3324 PF_HASHROW_LOCK(sh);
3325 LIST_FOREACH(n, &sh->nodes, entry)
3327 PF_HASHROW_UNLOCK(sh);
3330 psn->psn_len = min(psn->psn_len,
3331 sizeof(struct pf_src_node) * nr);
3333 if (psn->psn_len == 0) {
3334 psn->psn_len = sizeof(struct pf_src_node) * nr;
3340 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK);
3341 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
3343 PF_HASHROW_LOCK(sh);
3344 LIST_FOREACH(n, &sh->nodes, entry) {
3345 int secs = time_uptime, diff;
3347 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
3350 bcopy(n, p, sizeof(struct pf_src_node));
3351 if (n->rule.ptr != NULL)
3352 p->rule.nr = n->rule.ptr->nr;
3353 p->creation = secs - p->creation;
3354 if (p->expire > secs)
3359 /* Adjust the connection rate estimate. */
3360 diff = secs - n->conn_rate.last;
3361 if (diff >= n->conn_rate.seconds)
3362 p->conn_rate.count = 0;
3364 p->conn_rate.count -=
3365 n->conn_rate.count * diff /
3366 n->conn_rate.seconds;
3370 PF_HASHROW_UNLOCK(sh);
3372 error = copyout(pstore, psn->psn_src_nodes,
3373 sizeof(struct pf_src_node) * nr);
3375 free(pstore, M_TEMP);
3378 psn->psn_len = sizeof(struct pf_src_node) * nr;
3379 free(pstore, M_TEMP);
3383 case DIOCCLRSRCNODES: {
3385 pf_clear_srcnodes(NULL);
3386 pf_purge_expired_src_nodes();
3390 case DIOCKILLSRCNODES:
3391 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
3394 case DIOCSETHOSTID: {
3395 u_int32_t *hostid = (u_int32_t *)addr;
3399 V_pf_status.hostid = arc4random();
3401 V_pf_status.hostid = *hostid;
3412 case DIOCIGETIFACES: {
3413 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3414 struct pfi_kif *ifstore;
3417 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
3422 if (io->pfiio_size < 0 ||
3423 io->pfiio_size > pf_ioctl_maxcount ||
3424 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
3429 bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
3430 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
3432 if (ifstore == NULL) {
3438 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
3440 error = copyout(ifstore, io->pfiio_buffer, bufsiz);
3441 free(ifstore, M_TEMP);
3445 case DIOCSETIFFLAG: {
3446 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3449 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
3454 case DIOCCLRIFFLAG: {
3455 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3458 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
3468 if (sx_xlocked(&pf_ioctl_lock))
3469 sx_xunlock(&pf_ioctl_lock);
3476 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
3478 bzero(sp, sizeof(struct pfsync_state));
3480 /* copy from state key */
3481 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
3482 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
3483 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
3484 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
3485 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
3486 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
3487 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
3488 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
3489 sp->proto = st->key[PF_SK_WIRE]->proto;
3490 sp->af = st->key[PF_SK_WIRE]->af;
3492 /* copy from state */
3493 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
3494 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
3495 sp->creation = htonl(time_uptime - st->creation);
3496 sp->expire = pf_state_expires(st);
3497 if (sp->expire <= time_uptime)
3498 sp->expire = htonl(0);
3500 sp->expire = htonl(sp->expire - time_uptime);
3502 sp->direction = st->direction;
3504 sp->timeout = st->timeout;
3505 sp->state_flags = st->state_flags;
3507 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
3508 if (st->nat_src_node)
3509 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
3512 sp->creatorid = st->creatorid;
3513 pf_state_peer_hton(&st->src, &sp->src);
3514 pf_state_peer_hton(&st->dst, &sp->dst);
3516 if (st->rule.ptr == NULL)
3517 sp->rule = htonl(-1);
3519 sp->rule = htonl(st->rule.ptr->nr);
3520 if (st->anchor.ptr == NULL)
3521 sp->anchor = htonl(-1);
3523 sp->anchor = htonl(st->anchor.ptr->nr);
3524 if (st->nat_rule.ptr == NULL)
3525 sp->nat_rule = htonl(-1);
3527 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
3529 pf_state_counter_hton(st->packets[0], sp->packets[0]);
3530 pf_state_counter_hton(st->packets[1], sp->packets[1]);
3531 pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
3532 pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
3537 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
3539 struct pfr_ktable *kt;
3541 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
3544 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
3545 kt = kt->pfrkt_root;
3547 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
3552 * XXX - Check for version missmatch!!!
3555 pf_clear_states(void)
3560 for (i = 0; i <= pf_hashmask; i++) {
3561 struct pf_idhash *ih = &V_pf_idhash[i];
3563 PF_HASHROW_LOCK(ih);
3564 LIST_FOREACH(s, &ih->states, entry) {
3565 s->timeout = PFTM_PURGE;
3566 /* Don't send out individual delete messages. */
3567 s->state_flags |= PFSTATE_NOSYNC;
3568 pf_unlink_state(s, PF_ENTER_LOCKED);
3571 PF_HASHROW_UNLOCK(ih);
3576 pf_clear_tables(void)
3578 struct pfioc_table io;
3581 bzero(&io, sizeof(io));
3583 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
3590 pf_clear_srcnodes(struct pf_src_node *n)
3595 for (i = 0; i <= pf_hashmask; i++) {
3596 struct pf_idhash *ih = &V_pf_idhash[i];
3598 PF_HASHROW_LOCK(ih);
3599 LIST_FOREACH(s, &ih->states, entry) {
3600 if (n == NULL || n == s->src_node)
3602 if (n == NULL || n == s->nat_src_node)
3603 s->nat_src_node = NULL;
3605 PF_HASHROW_UNLOCK(ih);
3609 struct pf_srchash *sh;
3611 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
3613 PF_HASHROW_LOCK(sh);
3614 LIST_FOREACH(n, &sh->nodes, entry) {
3618 PF_HASHROW_UNLOCK(sh);
3621 /* XXX: hash slot should already be locked here. */
3628 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
3630 struct pf_src_node_list kill;
3633 for (int i = 0; i <= pf_srchashmask; i++) {
3634 struct pf_srchash *sh = &V_pf_srchash[i];
3635 struct pf_src_node *sn, *tmp;
3637 PF_HASHROW_LOCK(sh);
3638 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
3639 if (PF_MATCHA(psnk->psnk_src.neg,
3640 &psnk->psnk_src.addr.v.a.addr,
3641 &psnk->psnk_src.addr.v.a.mask,
3642 &sn->addr, sn->af) &&
3643 PF_MATCHA(psnk->psnk_dst.neg,
3644 &psnk->psnk_dst.addr.v.a.addr,
3645 &psnk->psnk_dst.addr.v.a.mask,
3646 &sn->raddr, sn->af)) {
3647 pf_unlink_src_node(sn);
3648 LIST_INSERT_HEAD(&kill, sn, entry);
3651 PF_HASHROW_UNLOCK(sh);
3654 for (int i = 0; i <= pf_hashmask; i++) {
3655 struct pf_idhash *ih = &V_pf_idhash[i];
3658 PF_HASHROW_LOCK(ih);
3659 LIST_FOREACH(s, &ih->states, entry) {
3660 if (s->src_node && s->src_node->expire == 1)
3662 if (s->nat_src_node && s->nat_src_node->expire == 1)
3663 s->nat_src_node = NULL;
3665 PF_HASHROW_UNLOCK(ih);
3668 psnk->psnk_killed = pf_free_src_nodes(&kill);
3672 * XXX - Check for version missmatch!!!
3676 * Duplicate pfctl -Fa operation to get rid of as much as we can.
3686 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
3688 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
3691 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
3693 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
3694 break; /* XXX: rollback? */
3696 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
3698 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
3699 break; /* XXX: rollback? */
3701 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
3703 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
3704 break; /* XXX: rollback? */
3706 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
3708 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
3709 break; /* XXX: rollback? */
3712 /* XXX: these should always succeed here */
3713 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
3714 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
3715 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
3716 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
3717 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
3719 if ((error = pf_clear_tables()) != 0)
3723 if ((error = pf_begin_altq(&t[0])) != 0) {
3724 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
3727 pf_commit_altq(t[0]);
3732 pf_clear_srcnodes(NULL);
3734 /* status does not use malloced mem so no need to cleanup */
3735 /* fingerprints and interfaces have their own cleanup code */
3743 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
3748 chk = pf_test(PF_IN, flags, ifp, m, inp);
3760 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
3765 chk = pf_test(PF_OUT, flags, ifp, m, inp);
3779 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
3785 * In case of loopback traffic IPv6 uses the real interface in
3786 * order to support scoped addresses. In order to support stateful
3787 * filtering we have change this to lo0 as it is the case in IPv4.
3789 CURVNET_SET(ifp->if_vnet);
3790 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp);
3802 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
3807 CURVNET_SET(ifp->if_vnet);
3808 chk = pf_test6(PF_OUT, flags, ifp, m, inp);
3824 struct pfil_head *pfh_inet;
3827 struct pfil_head *pfh_inet6;
3830 if (V_pf_pfil_hooked)
3834 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3835 if (pfh_inet == NULL)
3836 return (ESRCH); /* XXX */
3837 pfil_add_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet);
3838 pfil_add_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet);
3841 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3842 if (pfh_inet6 == NULL) {
3844 pfil_remove_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3846 pfil_remove_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3849 return (ESRCH); /* XXX */
3851 pfil_add_hook_flags(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6);
3852 pfil_add_hook_flags(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6);
3855 V_pf_pfil_hooked = 1;
3863 struct pfil_head *pfh_inet;
3866 struct pfil_head *pfh_inet6;
3869 if (V_pf_pfil_hooked == 0)
3873 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3874 if (pfh_inet == NULL)
3875 return (ESRCH); /* XXX */
3876 pfil_remove_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3878 pfil_remove_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3882 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3883 if (pfh_inet6 == NULL)
3884 return (ESRCH); /* XXX */
3885 pfil_remove_hook_flags(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK,
3887 pfil_remove_hook_flags(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK,
3891 V_pf_pfil_hooked = 0;
3898 VNET_ITERATOR_DECL(vnet_iter);
3901 VNET_FOREACH(vnet_iter) {
3902 CURVNET_SET(vnet_iter);
3903 V_pf_pfil_hooked = 0;
3904 TAILQ_INIT(&V_pf_tags);
3905 TAILQ_INIT(&V_pf_qids);
3908 VNET_LIST_RUNLOCK();
3911 V_pf_vnet_active = 1;
3919 rm_init(&pf_rules_lock, "pf rulesets");
3920 sx_init(&pf_ioctl_lock, "pf ioctl");
3922 pf_mtag_initialize();
3924 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
3929 error = kproc_create(pf_purge_thread, NULL, NULL, 0, 0, "pf purge");
3939 pf_unload_vnet(void)
3943 V_pf_vnet_active = 0;
3944 V_pf_status.running = 0;
3945 error = dehook_pf();
3948 * Should not happen!
3949 * XXX Due to error code ESRCH, kldunload will show
3950 * a message like 'No such process'.
3952 printf("%s : pfil unregisteration fail\n", __FUNCTION__);
3956 pf_unload_vnet_purge();
3962 swi_remove(V_pf_swi_cookie);
3964 pf_normalize_cleanup();
3971 if (IS_DEFAULT_VNET(curvnet))
3974 /* Free counters last as we updated them during shutdown. */
3975 counter_u64_free(V_pf_default_rule.states_cur);
3976 counter_u64_free(V_pf_default_rule.states_tot);
3977 counter_u64_free(V_pf_default_rule.src_nodes);
3979 for (int i = 0; i < PFRES_MAX; i++)
3980 counter_u64_free(V_pf_status.counters[i]);
3981 for (int i = 0; i < LCNT_MAX; i++)
3982 counter_u64_free(V_pf_status.lcounters[i]);
3983 for (int i = 0; i < FCNT_MAX; i++)
3984 counter_u64_free(V_pf_status.fcounters[i]);
3985 for (int i = 0; i < SCNT_MAX; i++)
3986 counter_u64_free(V_pf_status.scounters[i]);
3995 while (pf_end_threads < 2) {
3996 wakeup_one(pf_purge_thread);
3997 rm_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftmo", 0);
4001 destroy_dev(pf_dev);
4005 rm_destroy(&pf_rules_lock);
4006 sx_destroy(&pf_ioctl_lock);
4012 vnet_pf_init(void *unused __unused)
4017 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
4018 vnet_pf_init, NULL);
4021 vnet_pf_uninit(const void *unused __unused)
4026 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
4027 vnet_pf_uninit, NULL);
4031 pf_modevent(module_t mod, int type, void *data)
4041 * Module should not be unloaded due to race conditions.
4046 error = pf_unload();
4056 static moduledata_t pf_mod = {
4062 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
4063 MODULE_VERSION(pf, PF_MODVER);