2 * Copyright (c) 2001 Daniel Hartmeier
3 * Copyright (c) 2002,2003 Henning Brauer
4 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * Effort sponsored in part by the Defense Advanced Research Projects
32 * Agency (DARPA) and Air Force Research Laboratory, Air Force
33 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
42 #include "opt_inet6.h"
46 #include <sys/param.h>
49 #include <sys/endian.h>
50 #include <sys/fcntl.h>
51 #include <sys/filio.h>
52 #include <sys/interrupt.h>
54 #include <sys/kernel.h>
55 #include <sys/kthread.h>
58 #include <sys/module.h>
60 #include <sys/rwlock.h>
62 #include <sys/socket.h>
63 #include <sys/sysctl.h>
65 #include <sys/ucred.h>
68 #include <net/if_var.h>
70 #include <net/route.h>
72 #include <net/pfvar.h>
73 #include <net/if_pfsync.h>
74 #include <net/if_pflog.h>
76 #include <netinet/in.h>
77 #include <netinet/ip.h>
78 #include <netinet/ip_var.h>
79 #include <netinet6/ip6_var.h>
80 #include <netinet/ip_icmp.h>
83 #include <netinet/ip6.h>
87 #include <net/altq/altq.h>
90 static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
91 u_int8_t, u_int8_t, u_int8_t);
93 static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
94 static void pf_empty_pool(struct pf_palist *);
95 static int pfioctl(struct cdev *, u_long, caddr_t, int,
98 static int pf_begin_altq(u_int32_t *);
99 static int pf_rollback_altq(u_int32_t);
100 static int pf_commit_altq(u_int32_t);
101 static int pf_enable_altq(struct pf_altq *);
102 static int pf_disable_altq(struct pf_altq *);
103 static u_int32_t pf_qname2qid(char *);
104 static void pf_qid_unref(u_int32_t);
106 static int pf_begin_rules(u_int32_t *, int, const char *);
107 static int pf_rollback_rules(u_int32_t, int, char *);
108 static int pf_setup_pfsync_matching(struct pf_ruleset *);
109 static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
110 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
111 static int pf_commit_rules(u_int32_t, int, char *);
112 static int pf_addr_setup(struct pf_ruleset *,
113 struct pf_addr_wrap *, sa_family_t);
114 static void pf_addr_copyout(struct pf_addr_wrap *);
116 VNET_DEFINE(struct pf_rule, pf_default_rule);
119 static VNET_DEFINE(int, pf_altq_running);
120 #define V_pf_altq_running VNET(pf_altq_running)
123 #define TAGID_MAX 50000
125 TAILQ_ENTRY(pf_tagname) entries;
126 char name[PF_TAG_NAME_SIZE];
131 TAILQ_HEAD(pf_tags, pf_tagname);
132 #define V_pf_tags VNET(pf_tags)
133 VNET_DEFINE(struct pf_tags, pf_tags);
134 #define V_pf_qids VNET(pf_qids)
135 VNET_DEFINE(struct pf_tags, pf_qids);
136 static MALLOC_DEFINE(M_PFTAG, "pf_tag", "pf(4) tag names");
137 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
138 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
140 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
141 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
144 static u_int16_t tagname2tag(struct pf_tags *, char *);
145 static u_int16_t pf_tagname2tag(char *);
146 static void tag_unref(struct pf_tags *, u_int16_t);
148 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
153 * XXX - These are new and need to be checked when moveing to a new version
155 static void pf_clear_states(void);
156 static int pf_clear_tables(void);
157 static void pf_clear_srcnodes(struct pf_src_node *);
158 static void pf_kill_srcnodes(struct pfioc_src_node_kill *);
159 static void pf_tbladdr_copyout(struct pf_addr_wrap *);
162 * Wrapper functions for pfil(9) hooks
165 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
166 int dir, struct inpcb *inp);
167 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
168 int dir, struct inpcb *inp);
171 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
172 int dir, struct inpcb *inp);
173 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
174 int dir, struct inpcb *inp);
177 static int hook_pf(void);
178 static int dehook_pf(void);
179 static int shutdown_pf(void);
180 static int pf_load(void);
181 static int pf_unload(void);
183 static struct cdevsw pf_cdevsw = {
186 .d_version = D_VERSION,
189 static volatile VNET_DEFINE(int, pf_pfil_hooked);
190 #define V_pf_pfil_hooked VNET(pf_pfil_hooked)
193 * We need a flag that is neither hooked nor running to know when
194 * the VNET is "valid". We primarily need this to control (global)
195 * external event, e.g., eventhandlers.
197 VNET_DEFINE(int, pf_vnet_active);
198 #define V_pf_vnet_active VNET(pf_vnet_active)
202 struct rwlock pf_rules_lock;
203 struct sx pf_ioctl_lock;
206 pfsync_state_import_t *pfsync_state_import_ptr = NULL;
207 pfsync_insert_state_t *pfsync_insert_state_ptr = NULL;
208 pfsync_update_state_t *pfsync_update_state_ptr = NULL;
209 pfsync_delete_state_t *pfsync_delete_state_ptr = NULL;
210 pfsync_clear_states_t *pfsync_clear_states_ptr = NULL;
211 pfsync_defer_t *pfsync_defer_ptr = NULL;
213 pflog_packet_t *pflog_packet_ptr = NULL;
218 u_int32_t *my_timeout = V_pf_default_rule.timeout;
222 pfi_initialize_vnet();
225 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
226 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
228 RB_INIT(&V_pf_anchors);
229 pf_init_ruleset(&pf_main_ruleset);
231 /* default rule should never be garbage collected */
232 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
233 #ifdef PF_DEFAULT_TO_DROP
234 V_pf_default_rule.action = PF_DROP;
236 V_pf_default_rule.action = PF_PASS;
238 V_pf_default_rule.nr = -1;
239 V_pf_default_rule.rtableid = -1;
241 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
242 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
243 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
245 /* initialize default timeouts */
246 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
247 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
248 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
249 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
250 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
251 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
252 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
253 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
254 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
255 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
256 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
257 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
258 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
259 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
260 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
261 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
262 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
263 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
264 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
265 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
267 bzero(&V_pf_status, sizeof(V_pf_status));
268 V_pf_status.debug = PF_DEBUG_URGENT;
270 V_pf_pfil_hooked = 0;
272 /* XXX do our best to avoid a conflict */
273 V_pf_status.hostid = arc4random();
275 for (int i = 0; i < PFRES_MAX; i++)
276 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
277 for (int i = 0; i < LCNT_MAX; i++)
278 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
279 for (int i = 0; i < FCNT_MAX; i++)
280 V_pf_status.fcounters[i] = counter_u64_alloc(M_WAITOK);
281 for (int i = 0; i < SCNT_MAX; i++)
282 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
284 if (swi_add(NULL, "pf send", pf_intr, curvnet, SWI_NET,
285 INTR_MPSAFE, &V_pf_swi_cookie) != 0)
286 /* XXXGL: leaked all above. */
291 static struct pf_pool *
292 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
293 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
294 u_int8_t check_ticket)
296 struct pf_ruleset *ruleset;
297 struct pf_rule *rule;
300 ruleset = pf_find_ruleset(anchor);
303 rs_num = pf_get_ruleset_number(rule_action);
304 if (rs_num >= PF_RULESET_MAX)
307 if (check_ticket && ticket !=
308 ruleset->rules[rs_num].active.ticket)
311 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
314 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
316 if (check_ticket && ticket !=
317 ruleset->rules[rs_num].inactive.ticket)
320 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
323 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
326 while ((rule != NULL) && (rule->nr != rule_number))
327 rule = TAILQ_NEXT(rule, entries);
332 return (&rule->rpool);
336 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
338 struct pf_pooladdr *mv_pool_pa;
340 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
341 TAILQ_REMOVE(poola, mv_pool_pa, entries);
342 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
347 pf_empty_pool(struct pf_palist *poola)
349 struct pf_pooladdr *pa;
351 while ((pa = TAILQ_FIRST(poola)) != NULL) {
352 switch (pa->addr.type) {
353 case PF_ADDR_DYNIFTL:
354 pfi_dynaddr_remove(pa->addr.p.dyn);
357 /* XXX: this could be unfinished pooladdr on pabuf */
358 if (pa->addr.p.tbl != NULL)
359 pfr_detach_table(pa->addr.p.tbl);
363 pfi_kif_unref(pa->kif);
364 TAILQ_REMOVE(poola, pa, entries);
370 pf_unlink_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
375 TAILQ_REMOVE(rulequeue, rule, entries);
377 PF_UNLNKDRULES_LOCK();
378 rule->rule_flag |= PFRULE_REFS;
379 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
380 PF_UNLNKDRULES_UNLOCK();
384 pf_free_rule(struct pf_rule *rule)
390 tag_unref(&V_pf_tags, rule->tag);
392 tag_unref(&V_pf_tags, rule->match_tag);
394 if (rule->pqid != rule->qid)
395 pf_qid_unref(rule->pqid);
396 pf_qid_unref(rule->qid);
398 switch (rule->src.addr.type) {
399 case PF_ADDR_DYNIFTL:
400 pfi_dynaddr_remove(rule->src.addr.p.dyn);
403 pfr_detach_table(rule->src.addr.p.tbl);
406 switch (rule->dst.addr.type) {
407 case PF_ADDR_DYNIFTL:
408 pfi_dynaddr_remove(rule->dst.addr.p.dyn);
411 pfr_detach_table(rule->dst.addr.p.tbl);
414 if (rule->overload_tbl)
415 pfr_detach_table(rule->overload_tbl);
417 pfi_kif_unref(rule->kif);
418 pf_anchor_remove(rule);
419 pf_empty_pool(&rule->rpool.list);
420 counter_u64_free(rule->states_cur);
421 counter_u64_free(rule->states_tot);
422 counter_u64_free(rule->src_nodes);
423 free(rule, M_PFRULE);
427 tagname2tag(struct pf_tags *head, char *tagname)
429 struct pf_tagname *tag, *p = NULL;
430 u_int16_t new_tagid = 1;
434 TAILQ_FOREACH(tag, head, entries)
435 if (strcmp(tagname, tag->name) == 0) {
441 * to avoid fragmentation, we do a linear search from the beginning
442 * and take the first free slot we find. if there is none or the list
443 * is empty, append a new entry at the end.
447 if (!TAILQ_EMPTY(head))
448 for (p = TAILQ_FIRST(head); p != NULL &&
449 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
450 new_tagid = p->tag + 1;
452 if (new_tagid > TAGID_MAX)
455 /* allocate and fill new struct pf_tagname */
456 tag = malloc(sizeof(*tag), M_PFTAG, M_NOWAIT|M_ZERO);
459 strlcpy(tag->name, tagname, sizeof(tag->name));
460 tag->tag = new_tagid;
463 if (p != NULL) /* insert new entry before p */
464 TAILQ_INSERT_BEFORE(p, tag, entries);
465 else /* either list empty or no free slot in between */
466 TAILQ_INSERT_TAIL(head, tag, entries);
472 tag_unref(struct pf_tags *head, u_int16_t tag)
474 struct pf_tagname *p, *next;
478 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
479 next = TAILQ_NEXT(p, entries);
482 TAILQ_REMOVE(head, p, entries);
491 pf_tagname2tag(char *tagname)
493 return (tagname2tag(&V_pf_tags, tagname));
498 pf_qname2qid(char *qname)
500 return ((u_int32_t)tagname2tag(&V_pf_qids, qname));
504 pf_qid_unref(u_int32_t qid)
506 tag_unref(&V_pf_qids, (u_int16_t)qid);
510 pf_begin_altq(u_int32_t *ticket)
512 struct pf_altq *altq;
517 /* Purge the old altq list */
518 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) {
519 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries);
520 if (altq->qname[0] == 0 &&
521 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
522 /* detach and destroy the discipline */
523 error = altq_remove(altq);
525 pf_qid_unref(altq->qid);
526 free(altq, M_PFALTQ);
530 *ticket = ++V_ticket_altqs_inactive;
531 V_altqs_inactive_open = 1;
536 pf_rollback_altq(u_int32_t ticket)
538 struct pf_altq *altq;
543 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
545 /* Purge the old altq list */
546 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) {
547 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries);
548 if (altq->qname[0] == 0 &&
549 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
550 /* detach and destroy the discipline */
551 error = altq_remove(altq);
553 pf_qid_unref(altq->qid);
554 free(altq, M_PFALTQ);
556 V_altqs_inactive_open = 0;
561 pf_commit_altq(u_int32_t ticket)
563 struct pf_altqqueue *old_altqs;
564 struct pf_altq *altq;
569 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
572 /* swap altqs, keep the old. */
573 old_altqs = V_pf_altqs_active;
574 V_pf_altqs_active = V_pf_altqs_inactive;
575 V_pf_altqs_inactive = old_altqs;
576 V_ticket_altqs_active = V_ticket_altqs_inactive;
578 /* Attach new disciplines */
579 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
580 if (altq->qname[0] == 0 &&
581 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
582 /* attach the discipline */
583 error = altq_pfattach(altq);
584 if (error == 0 && V_pf_altq_running)
585 error = pf_enable_altq(altq);
591 /* Purge the old altq list */
592 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) {
593 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries);
594 if (altq->qname[0] == 0 &&
595 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
596 /* detach and destroy the discipline */
597 if (V_pf_altq_running)
598 error = pf_disable_altq(altq);
599 err = altq_pfdetach(altq);
600 if (err != 0 && error == 0)
602 err = altq_remove(altq);
603 if (err != 0 && error == 0)
606 pf_qid_unref(altq->qid);
607 free(altq, M_PFALTQ);
610 V_altqs_inactive_open = 0;
615 pf_enable_altq(struct pf_altq *altq)
618 struct tb_profile tb;
621 if ((ifp = ifunit(altq->ifname)) == NULL)
624 if (ifp->if_snd.altq_type != ALTQT_NONE)
625 error = altq_enable(&ifp->if_snd);
627 /* set tokenbucket regulator */
628 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
629 tb.rate = altq->ifbandwidth;
630 tb.depth = altq->tbrsize;
631 error = tbr_set(&ifp->if_snd, &tb);
638 pf_disable_altq(struct pf_altq *altq)
641 struct tb_profile tb;
644 if ((ifp = ifunit(altq->ifname)) == NULL)
648 * when the discipline is no longer referenced, it was overridden
649 * by a new one. if so, just return.
651 if (altq->altq_disc != ifp->if_snd.altq_disc)
654 error = altq_disable(&ifp->if_snd);
657 /* clear tokenbucket regulator */
659 error = tbr_set(&ifp->if_snd, &tb);
666 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
669 struct pf_altq *a1, *a2, *a3;
673 /* Interrupt userland queue modifications */
674 if (V_altqs_inactive_open)
675 pf_rollback_altq(V_ticket_altqs_inactive);
677 /* Start new altq ruleset */
678 if (pf_begin_altq(&ticket))
681 /* Copy the current active set */
682 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
683 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
688 bcopy(a1, a2, sizeof(struct pf_altq));
690 if (a2->qname[0] != 0) {
691 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
696 a2->altq_disc = NULL;
697 TAILQ_FOREACH(a3, V_pf_altqs_inactive, entries) {
698 if (strncmp(a3->ifname, a2->ifname,
699 IFNAMSIZ) == 0 && a3->qname[0] == 0) {
700 a2->altq_disc = a3->altq_disc;
705 /* Deactivate the interface in question */
706 a2->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
707 if ((ifp1 = ifunit(a2->ifname)) == NULL ||
708 (remove && ifp1 == ifp)) {
709 a2->local_flags |= PFALTQ_FLAG_IF_REMOVED;
711 error = altq_add(a2);
713 if (ticket != V_ticket_altqs_inactive)
722 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
726 pf_rollback_altq(ticket);
728 pf_commit_altq(ticket);
733 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
735 struct pf_ruleset *rs;
736 struct pf_rule *rule;
740 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
742 rs = pf_find_or_create_ruleset(anchor);
745 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
746 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
747 rs->rules[rs_num].inactive.rcount--;
749 *ticket = ++rs->rules[rs_num].inactive.ticket;
750 rs->rules[rs_num].inactive.open = 1;
755 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
757 struct pf_ruleset *rs;
758 struct pf_rule *rule;
762 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
764 rs = pf_find_ruleset(anchor);
765 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
766 rs->rules[rs_num].inactive.ticket != ticket)
768 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
769 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
770 rs->rules[rs_num].inactive.rcount--;
772 rs->rules[rs_num].inactive.open = 0;
776 #define PF_MD5_UPD(st, elm) \
777 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
779 #define PF_MD5_UPD_STR(st, elm) \
780 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
782 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
783 (stor) = htonl((st)->elm); \
784 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
787 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
788 (stor) = htons((st)->elm); \
789 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
793 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
795 PF_MD5_UPD(pfr, addr.type);
796 switch (pfr->addr.type) {
797 case PF_ADDR_DYNIFTL:
798 PF_MD5_UPD(pfr, addr.v.ifname);
799 PF_MD5_UPD(pfr, addr.iflags);
802 PF_MD5_UPD(pfr, addr.v.tblname);
804 case PF_ADDR_ADDRMASK:
806 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
807 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
811 PF_MD5_UPD(pfr, port[0]);
812 PF_MD5_UPD(pfr, port[1]);
813 PF_MD5_UPD(pfr, neg);
814 PF_MD5_UPD(pfr, port_op);
818 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
823 pf_hash_rule_addr(ctx, &rule->src);
824 pf_hash_rule_addr(ctx, &rule->dst);
825 PF_MD5_UPD_STR(rule, label);
826 PF_MD5_UPD_STR(rule, ifname);
827 PF_MD5_UPD_STR(rule, match_tagname);
828 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
829 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
830 PF_MD5_UPD_HTONL(rule, prob, y);
831 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
832 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
833 PF_MD5_UPD(rule, uid.op);
834 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
835 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
836 PF_MD5_UPD(rule, gid.op);
837 PF_MD5_UPD_HTONL(rule, rule_flag, y);
838 PF_MD5_UPD(rule, action);
839 PF_MD5_UPD(rule, direction);
840 PF_MD5_UPD(rule, af);
841 PF_MD5_UPD(rule, quick);
842 PF_MD5_UPD(rule, ifnot);
843 PF_MD5_UPD(rule, match_tag_not);
844 PF_MD5_UPD(rule, natpass);
845 PF_MD5_UPD(rule, keep_state);
846 PF_MD5_UPD(rule, proto);
847 PF_MD5_UPD(rule, type);
848 PF_MD5_UPD(rule, code);
849 PF_MD5_UPD(rule, flags);
850 PF_MD5_UPD(rule, flagset);
851 PF_MD5_UPD(rule, allow_opts);
852 PF_MD5_UPD(rule, rt);
853 PF_MD5_UPD(rule, tos);
857 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
859 struct pf_ruleset *rs;
860 struct pf_rule *rule, **old_array;
861 struct pf_rulequeue *old_rules;
863 u_int32_t old_rcount;
867 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
869 rs = pf_find_ruleset(anchor);
870 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
871 ticket != rs->rules[rs_num].inactive.ticket)
874 /* Calculate checksum for the main ruleset */
875 if (rs == &pf_main_ruleset) {
876 error = pf_setup_pfsync_matching(rs);
881 /* Swap rules, keep the old. */
882 old_rules = rs->rules[rs_num].active.ptr;
883 old_rcount = rs->rules[rs_num].active.rcount;
884 old_array = rs->rules[rs_num].active.ptr_array;
886 rs->rules[rs_num].active.ptr =
887 rs->rules[rs_num].inactive.ptr;
888 rs->rules[rs_num].active.ptr_array =
889 rs->rules[rs_num].inactive.ptr_array;
890 rs->rules[rs_num].active.rcount =
891 rs->rules[rs_num].inactive.rcount;
892 rs->rules[rs_num].inactive.ptr = old_rules;
893 rs->rules[rs_num].inactive.ptr_array = old_array;
894 rs->rules[rs_num].inactive.rcount = old_rcount;
896 rs->rules[rs_num].active.ticket =
897 rs->rules[rs_num].inactive.ticket;
898 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
901 /* Purge the old rule list. */
902 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
903 pf_unlink_rule(old_rules, rule);
904 if (rs->rules[rs_num].inactive.ptr_array)
905 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
906 rs->rules[rs_num].inactive.ptr_array = NULL;
907 rs->rules[rs_num].inactive.rcount = 0;
908 rs->rules[rs_num].inactive.open = 0;
909 pf_remove_if_empty_ruleset(rs);
915 pf_setup_pfsync_matching(struct pf_ruleset *rs)
918 struct pf_rule *rule;
920 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
923 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
924 /* XXX PF_RULESET_SCRUB as well? */
925 if (rs_cnt == PF_RULESET_SCRUB)
928 if (rs->rules[rs_cnt].inactive.ptr_array)
929 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
930 rs->rules[rs_cnt].inactive.ptr_array = NULL;
932 if (rs->rules[rs_cnt].inactive.rcount) {
933 rs->rules[rs_cnt].inactive.ptr_array =
934 malloc(sizeof(caddr_t) *
935 rs->rules[rs_cnt].inactive.rcount,
938 if (!rs->rules[rs_cnt].inactive.ptr_array)
942 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
944 pf_hash_rule(&ctx, rule);
945 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
949 MD5Final(digest, &ctx);
950 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
955 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
960 switch (addr->type) {
962 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
963 if (addr->p.tbl == NULL)
966 case PF_ADDR_DYNIFTL:
967 error = pfi_dynaddr_setup(addr, af);
975 pf_addr_copyout(struct pf_addr_wrap *addr)
978 switch (addr->type) {
979 case PF_ADDR_DYNIFTL:
980 pfi_dynaddr_copyout(addr);
983 pf_tbladdr_copyout(addr);
989 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
993 /* XXX keep in sync with switch() below */
994 if (securelevel_gt(td->td_ucred, 2))
1001 case DIOCSETSTATUSIF:
1007 case DIOCGETTIMEOUT:
1008 case DIOCCLRRULECTRS:
1013 case DIOCGETRULESETS:
1014 case DIOCGETRULESET:
1015 case DIOCRGETTABLES:
1016 case DIOCRGETTSTATS:
1017 case DIOCRCLRTSTATS:
1023 case DIOCRGETASTATS:
1024 case DIOCRCLRASTATS:
1027 case DIOCGETSRCNODES:
1028 case DIOCCLRSRCNODES:
1029 case DIOCIGETIFACES:
1034 case DIOCRCLRTABLES:
1035 case DIOCRADDTABLES:
1036 case DIOCRDELTABLES:
1037 case DIOCRSETTFLAGS:
1038 if (((struct pfioc_table *)addr)->pfrio_flags &
1040 break; /* dummy operation ok */
1046 if (!(flags & FWRITE))
1054 case DIOCGETTIMEOUT:
1059 case DIOCGETRULESETS:
1060 case DIOCGETRULESET:
1062 case DIOCRGETTABLES:
1063 case DIOCRGETTSTATS:
1065 case DIOCRGETASTATS:
1068 case DIOCGETSRCNODES:
1069 case DIOCIGETIFACES:
1072 case DIOCRCLRTABLES:
1073 case DIOCRADDTABLES:
1074 case DIOCRDELTABLES:
1075 case DIOCRCLRTSTATS:
1080 case DIOCRSETTFLAGS:
1081 if (((struct pfioc_table *)addr)->pfrio_flags &
1083 flags |= FWRITE; /* need write lock for dummy */
1084 break; /* dummy operation ok */
1088 if (((struct pfioc_rule *)addr)->action ==
1096 CURVNET_SET(TD_TO_VNET(td));
1100 sx_xlock(&pf_ioctl_lock);
1101 if (V_pf_status.running)
1108 DPFPRINTF(PF_DEBUG_MISC,
1109 ("pf: pfil registration failed\n"));
1112 V_pf_status.running = 1;
1113 V_pf_status.since = time_second;
1116 V_pf_stateid[cpu] = time_second;
1118 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1123 sx_xlock(&pf_ioctl_lock);
1124 if (!V_pf_status.running)
1127 V_pf_status.running = 0;
1128 error = dehook_pf();
1130 V_pf_status.running = 1;
1131 DPFPRINTF(PF_DEBUG_MISC,
1132 ("pf: pfil unregistration failed\n"));
1134 V_pf_status.since = time_second;
1135 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1140 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1141 struct pf_ruleset *ruleset;
1142 struct pf_rule *rule, *tail;
1143 struct pf_pooladdr *pa;
1144 struct pfi_kif *kif = NULL;
1147 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1152 if (pr->rule.af == AF_INET) {
1153 error = EAFNOSUPPORT;
1158 if (pr->rule.af == AF_INET6) {
1159 error = EAFNOSUPPORT;
1164 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
1165 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1166 if (rule->ifname[0])
1167 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
1168 rule->states_cur = counter_u64_alloc(M_WAITOK);
1169 rule->states_tot = counter_u64_alloc(M_WAITOK);
1170 rule->src_nodes = counter_u64_alloc(M_WAITOK);
1171 rule->cuid = td->td_ucred->cr_ruid;
1172 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1173 TAILQ_INIT(&rule->rpool.list);
1175 #define ERROUT(x) { error = (x); goto DIOCADDRULE_error; }
1178 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1179 ruleset = pf_find_ruleset(pr->anchor);
1180 if (ruleset == NULL)
1182 rs_num = pf_get_ruleset_number(pr->rule.action);
1183 if (rs_num >= PF_RULESET_MAX)
1185 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1186 DPFPRINTF(PF_DEBUG_MISC,
1187 ("ticket: %d != [%d]%d\n", pr->ticket, rs_num,
1188 ruleset->rules[rs_num].inactive.ticket));
1191 if (pr->pool_ticket != V_ticket_pabuf) {
1192 DPFPRINTF(PF_DEBUG_MISC,
1193 ("pool_ticket: %d != %d\n", pr->pool_ticket,
1198 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1201 rule->nr = tail->nr + 1;
1204 if (rule->ifname[0]) {
1205 rule->kif = pfi_kif_attach(kif, rule->ifname);
1206 pfi_kif_ref(rule->kif);
1210 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
1215 if (rule->qname[0] != 0) {
1216 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1218 else if (rule->pqname[0] != 0) {
1220 pf_qname2qid(rule->pqname)) == 0)
1223 rule->pqid = rule->qid;
1226 if (rule->tagname[0])
1227 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1229 if (rule->match_tagname[0])
1230 if ((rule->match_tag =
1231 pf_tagname2tag(rule->match_tagname)) == 0)
1233 if (rule->rt && !rule->direction)
1237 if (rule->logif >= PFLOGIFS_MAX)
1239 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1241 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1243 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1245 if (rule->scrub_flags & PFSTATE_SETPRIO &&
1246 (rule->set_prio[0] > PF_PRIO_MAX ||
1247 rule->set_prio[1] > PF_PRIO_MAX))
1249 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
1250 if (pa->addr.type == PF_ADDR_TABLE) {
1251 pa->addr.p.tbl = pfr_attach_table(ruleset,
1252 pa->addr.v.tblname);
1253 if (pa->addr.p.tbl == NULL)
1257 rule->overload_tbl = NULL;
1258 if (rule->overload_tblname[0]) {
1259 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1260 rule->overload_tblname)) == NULL)
1263 rule->overload_tbl->pfrkt_flags |=
1267 pf_mv_pool(&V_pf_pabuf, &rule->rpool.list);
1268 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1269 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1270 (rule->rt > PF_FASTROUTE)) &&
1271 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1280 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1281 rule->evaluations = rule->packets[0] = rule->packets[1] =
1282 rule->bytes[0] = rule->bytes[1] = 0;
1283 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1285 ruleset->rules[rs_num].inactive.rcount++;
1292 counter_u64_free(rule->states_cur);
1293 counter_u64_free(rule->states_tot);
1294 counter_u64_free(rule->src_nodes);
1295 free(rule, M_PFRULE);
1297 free(kif, PFI_MTYPE);
1301 case DIOCGETRULES: {
1302 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1303 struct pf_ruleset *ruleset;
1304 struct pf_rule *tail;
1308 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1309 ruleset = pf_find_ruleset(pr->anchor);
1310 if (ruleset == NULL) {
1315 rs_num = pf_get_ruleset_number(pr->rule.action);
1316 if (rs_num >= PF_RULESET_MAX) {
1321 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1324 pr->nr = tail->nr + 1;
1327 pr->ticket = ruleset->rules[rs_num].active.ticket;
1333 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1334 struct pf_ruleset *ruleset;
1335 struct pf_rule *rule;
1339 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1340 ruleset = pf_find_ruleset(pr->anchor);
1341 if (ruleset == NULL) {
1346 rs_num = pf_get_ruleset_number(pr->rule.action);
1347 if (rs_num >= PF_RULESET_MAX) {
1352 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1357 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1358 while ((rule != NULL) && (rule->nr != pr->nr))
1359 rule = TAILQ_NEXT(rule, entries);
1365 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1366 pr->rule.u_states_cur = counter_u64_fetch(rule->states_cur);
1367 pr->rule.u_states_tot = counter_u64_fetch(rule->states_tot);
1368 pr->rule.u_src_nodes = counter_u64_fetch(rule->src_nodes);
1369 if (pf_anchor_copyout(ruleset, rule, pr)) {
1374 pf_addr_copyout(&pr->rule.src.addr);
1375 pf_addr_copyout(&pr->rule.dst.addr);
1376 for (i = 0; i < PF_SKIP_COUNT; ++i)
1377 if (rule->skip[i].ptr == NULL)
1378 pr->rule.skip[i].nr = -1;
1380 pr->rule.skip[i].nr =
1381 rule->skip[i].ptr->nr;
1383 if (pr->action == PF_GET_CLR_CNTR) {
1384 rule->evaluations = 0;
1385 rule->packets[0] = rule->packets[1] = 0;
1386 rule->bytes[0] = rule->bytes[1] = 0;
1387 counter_u64_zero(rule->states_tot);
1393 case DIOCCHANGERULE: {
1394 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1395 struct pf_ruleset *ruleset;
1396 struct pf_rule *oldrule = NULL, *newrule = NULL;
1397 struct pfi_kif *kif = NULL;
1398 struct pf_pooladdr *pa;
1402 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1403 pcr->action > PF_CHANGE_GET_TICKET) {
1407 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1412 if (pcr->action != PF_CHANGE_REMOVE) {
1414 if (pcr->rule.af == AF_INET) {
1415 error = EAFNOSUPPORT;
1420 if (pcr->rule.af == AF_INET6) {
1421 error = EAFNOSUPPORT;
1425 newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK);
1426 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1427 if (newrule->ifname[0])
1428 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
1429 newrule->states_cur = counter_u64_alloc(M_WAITOK);
1430 newrule->states_tot = counter_u64_alloc(M_WAITOK);
1431 newrule->src_nodes = counter_u64_alloc(M_WAITOK);
1432 newrule->cuid = td->td_ucred->cr_ruid;
1433 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1434 TAILQ_INIT(&newrule->rpool.list);
1437 #define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; }
1440 if (!(pcr->action == PF_CHANGE_REMOVE ||
1441 pcr->action == PF_CHANGE_GET_TICKET) &&
1442 pcr->pool_ticket != V_ticket_pabuf)
1445 ruleset = pf_find_ruleset(pcr->anchor);
1446 if (ruleset == NULL)
1449 rs_num = pf_get_ruleset_number(pcr->rule.action);
1450 if (rs_num >= PF_RULESET_MAX)
1453 if (pcr->action == PF_CHANGE_GET_TICKET) {
1454 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1456 } else if (pcr->ticket !=
1457 ruleset->rules[rs_num].active.ticket)
1460 if (pcr->action != PF_CHANGE_REMOVE) {
1461 if (newrule->ifname[0]) {
1462 newrule->kif = pfi_kif_attach(kif,
1464 pfi_kif_ref(newrule->kif);
1466 newrule->kif = NULL;
1468 if (newrule->rtableid > 0 &&
1469 newrule->rtableid >= rt_numfibs)
1474 if (newrule->qname[0] != 0) {
1476 pf_qname2qid(newrule->qname)) == 0)
1478 else if (newrule->pqname[0] != 0) {
1479 if ((newrule->pqid =
1480 pf_qname2qid(newrule->pqname)) == 0)
1483 newrule->pqid = newrule->qid;
1486 if (newrule->tagname[0])
1488 pf_tagname2tag(newrule->tagname)) == 0)
1490 if (newrule->match_tagname[0])
1491 if ((newrule->match_tag = pf_tagname2tag(
1492 newrule->match_tagname)) == 0)
1494 if (newrule->rt && !newrule->direction)
1498 if (newrule->logif >= PFLOGIFS_MAX)
1500 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1502 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1504 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1506 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
1507 if (pa->addr.type == PF_ADDR_TABLE) {
1509 pfr_attach_table(ruleset,
1510 pa->addr.v.tblname);
1511 if (pa->addr.p.tbl == NULL)
1515 newrule->overload_tbl = NULL;
1516 if (newrule->overload_tblname[0]) {
1517 if ((newrule->overload_tbl = pfr_attach_table(
1518 ruleset, newrule->overload_tblname)) ==
1522 newrule->overload_tbl->pfrkt_flags |=
1526 pf_mv_pool(&V_pf_pabuf, &newrule->rpool.list);
1527 if (((((newrule->action == PF_NAT) ||
1528 (newrule->action == PF_RDR) ||
1529 (newrule->action == PF_BINAT) ||
1530 (newrule->rt > PF_FASTROUTE)) &&
1531 !newrule->anchor)) &&
1532 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1536 pf_free_rule(newrule);
1541 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1542 newrule->evaluations = 0;
1543 newrule->packets[0] = newrule->packets[1] = 0;
1544 newrule->bytes[0] = newrule->bytes[1] = 0;
1546 pf_empty_pool(&V_pf_pabuf);
1548 if (pcr->action == PF_CHANGE_ADD_HEAD)
1549 oldrule = TAILQ_FIRST(
1550 ruleset->rules[rs_num].active.ptr);
1551 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1552 oldrule = TAILQ_LAST(
1553 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1555 oldrule = TAILQ_FIRST(
1556 ruleset->rules[rs_num].active.ptr);
1557 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1558 oldrule = TAILQ_NEXT(oldrule, entries);
1559 if (oldrule == NULL) {
1560 if (newrule != NULL)
1561 pf_free_rule(newrule);
1568 if (pcr->action == PF_CHANGE_REMOVE) {
1569 pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
1571 ruleset->rules[rs_num].active.rcount--;
1573 if (oldrule == NULL)
1575 ruleset->rules[rs_num].active.ptr,
1577 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1578 pcr->action == PF_CHANGE_ADD_BEFORE)
1579 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1582 ruleset->rules[rs_num].active.ptr,
1583 oldrule, newrule, entries);
1584 ruleset->rules[rs_num].active.rcount++;
1588 TAILQ_FOREACH(oldrule,
1589 ruleset->rules[rs_num].active.ptr, entries)
1592 ruleset->rules[rs_num].active.ticket++;
1594 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1595 pf_remove_if_empty_ruleset(ruleset);
1601 DIOCCHANGERULE_error:
1603 if (newrule != NULL) {
1604 counter_u64_free(newrule->states_cur);
1605 counter_u64_free(newrule->states_tot);
1606 counter_u64_free(newrule->src_nodes);
1607 free(newrule, M_PFRULE);
1610 free(kif, PFI_MTYPE);
1614 case DIOCCLRSTATES: {
1616 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1617 u_int i, killed = 0;
1619 for (i = 0; i <= pf_hashmask; i++) {
1620 struct pf_idhash *ih = &V_pf_idhash[i];
1622 relock_DIOCCLRSTATES:
1623 PF_HASHROW_LOCK(ih);
1624 LIST_FOREACH(s, &ih->states, entry)
1625 if (!psk->psk_ifname[0] ||
1626 !strcmp(psk->psk_ifname,
1627 s->kif->pfik_name)) {
1629 * Don't send out individual
1632 s->state_flags |= PFSTATE_NOSYNC;
1633 pf_unlink_state(s, PF_ENTER_LOCKED);
1635 goto relock_DIOCCLRSTATES;
1637 PF_HASHROW_UNLOCK(ih);
1639 psk->psk_killed = killed;
1640 if (pfsync_clear_states_ptr != NULL)
1641 pfsync_clear_states_ptr(V_pf_status.hostid, psk->psk_ifname);
1645 case DIOCKILLSTATES: {
1647 struct pf_state_key *sk;
1648 struct pf_addr *srcaddr, *dstaddr;
1649 u_int16_t srcport, dstport;
1650 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1651 u_int i, killed = 0;
1653 if (psk->psk_pfcmp.id) {
1654 if (psk->psk_pfcmp.creatorid == 0)
1655 psk->psk_pfcmp.creatorid = V_pf_status.hostid;
1656 if ((s = pf_find_state_byid(psk->psk_pfcmp.id,
1657 psk->psk_pfcmp.creatorid))) {
1658 pf_unlink_state(s, PF_ENTER_LOCKED);
1659 psk->psk_killed = 1;
1664 for (i = 0; i <= pf_hashmask; i++) {
1665 struct pf_idhash *ih = &V_pf_idhash[i];
1667 relock_DIOCKILLSTATES:
1668 PF_HASHROW_LOCK(ih);
1669 LIST_FOREACH(s, &ih->states, entry) {
1670 sk = s->key[PF_SK_WIRE];
1671 if (s->direction == PF_OUT) {
1672 srcaddr = &sk->addr[1];
1673 dstaddr = &sk->addr[0];
1674 srcport = sk->port[1];
1675 dstport = sk->port[0];
1677 srcaddr = &sk->addr[0];
1678 dstaddr = &sk->addr[1];
1679 srcport = sk->port[0];
1680 dstport = sk->port[1];
1683 if ((!psk->psk_af || sk->af == psk->psk_af)
1684 && (!psk->psk_proto || psk->psk_proto ==
1686 PF_MATCHA(psk->psk_src.neg,
1687 &psk->psk_src.addr.v.a.addr,
1688 &psk->psk_src.addr.v.a.mask,
1690 PF_MATCHA(psk->psk_dst.neg,
1691 &psk->psk_dst.addr.v.a.addr,
1692 &psk->psk_dst.addr.v.a.mask,
1694 (psk->psk_src.port_op == 0 ||
1695 pf_match_port(psk->psk_src.port_op,
1696 psk->psk_src.port[0], psk->psk_src.port[1],
1698 (psk->psk_dst.port_op == 0 ||
1699 pf_match_port(psk->psk_dst.port_op,
1700 psk->psk_dst.port[0], psk->psk_dst.port[1],
1702 (!psk->psk_label[0] ||
1703 (s->rule.ptr->label[0] &&
1704 !strcmp(psk->psk_label,
1705 s->rule.ptr->label))) &&
1706 (!psk->psk_ifname[0] ||
1707 !strcmp(psk->psk_ifname,
1708 s->kif->pfik_name))) {
1709 pf_unlink_state(s, PF_ENTER_LOCKED);
1711 goto relock_DIOCKILLSTATES;
1714 PF_HASHROW_UNLOCK(ih);
1716 psk->psk_killed = killed;
1720 case DIOCADDSTATE: {
1721 struct pfioc_state *ps = (struct pfioc_state *)addr;
1722 struct pfsync_state *sp = &ps->state;
1724 if (sp->timeout >= PFTM_MAX) {
1728 if (pfsync_state_import_ptr != NULL) {
1730 error = pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL);
1737 case DIOCGETSTATE: {
1738 struct pfioc_state *ps = (struct pfioc_state *)addr;
1741 s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
1747 pfsync_state_export(&ps->state, s);
1752 case DIOCGETSTATES: {
1753 struct pfioc_states *ps = (struct pfioc_states *)addr;
1755 struct pfsync_state *pstore, *p;
1758 if (ps->ps_len == 0) {
1759 nr = uma_zone_get_cur(V_pf_state_z);
1760 ps->ps_len = sizeof(struct pfsync_state) * nr;
1764 p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK);
1767 for (i = 0; i <= pf_hashmask; i++) {
1768 struct pf_idhash *ih = &V_pf_idhash[i];
1770 PF_HASHROW_LOCK(ih);
1771 LIST_FOREACH(s, &ih->states, entry) {
1773 if (s->timeout == PFTM_UNLINKED)
1776 if ((nr+1) * sizeof(*p) > ps->ps_len) {
1777 PF_HASHROW_UNLOCK(ih);
1778 goto DIOCGETSTATES_full;
1780 pfsync_state_export(p, s);
1784 PF_HASHROW_UNLOCK(ih);
1787 error = copyout(pstore, ps->ps_states,
1788 sizeof(struct pfsync_state) * nr);
1790 free(pstore, M_TEMP);
1793 ps->ps_len = sizeof(struct pfsync_state) * nr;
1794 free(pstore, M_TEMP);
1799 case DIOCGETSTATUS: {
1800 struct pf_status *s = (struct pf_status *)addr;
1803 s->running = V_pf_status.running;
1804 s->since = V_pf_status.since;
1805 s->debug = V_pf_status.debug;
1806 s->hostid = V_pf_status.hostid;
1807 s->states = V_pf_status.states;
1808 s->src_nodes = V_pf_status.src_nodes;
1810 for (int i = 0; i < PFRES_MAX; i++)
1812 counter_u64_fetch(V_pf_status.counters[i]);
1813 for (int i = 0; i < LCNT_MAX; i++)
1815 counter_u64_fetch(V_pf_status.lcounters[i]);
1816 for (int i = 0; i < FCNT_MAX; i++)
1818 counter_u64_fetch(V_pf_status.fcounters[i]);
1819 for (int i = 0; i < SCNT_MAX; i++)
1821 counter_u64_fetch(V_pf_status.scounters[i]);
1823 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ);
1824 bcopy(V_pf_status.pf_chksum, s->pf_chksum,
1825 PF_MD5_DIGEST_LENGTH);
1827 pfi_update_status(s->ifname, s);
1832 case DIOCSETSTATUSIF: {
1833 struct pfioc_if *pi = (struct pfioc_if *)addr;
1835 if (pi->ifname[0] == 0) {
1836 bzero(V_pf_status.ifname, IFNAMSIZ);
1840 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
1845 case DIOCCLRSTATUS: {
1847 for (int i = 0; i < PFRES_MAX; i++)
1848 counter_u64_zero(V_pf_status.counters[i]);
1849 for (int i = 0; i < FCNT_MAX; i++)
1850 counter_u64_zero(V_pf_status.fcounters[i]);
1851 for (int i = 0; i < SCNT_MAX; i++)
1852 counter_u64_zero(V_pf_status.scounters[i]);
1853 V_pf_status.since = time_second;
1854 if (*V_pf_status.ifname)
1855 pfi_update_status(V_pf_status.ifname, NULL);
1861 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1862 struct pf_state_key *sk;
1863 struct pf_state *state;
1864 struct pf_state_key_cmp key;
1865 int m = 0, direction = pnl->direction;
1868 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
1869 sidx = (direction == PF_IN) ? 1 : 0;
1870 didx = (direction == PF_IN) ? 0 : 1;
1873 PF_AZERO(&pnl->saddr, pnl->af) ||
1874 PF_AZERO(&pnl->daddr, pnl->af) ||
1875 ((pnl->proto == IPPROTO_TCP ||
1876 pnl->proto == IPPROTO_UDP) &&
1877 (!pnl->dport || !pnl->sport)))
1880 bzero(&key, sizeof(key));
1882 key.proto = pnl->proto;
1883 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
1884 key.port[sidx] = pnl->sport;
1885 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
1886 key.port[didx] = pnl->dport;
1888 state = pf_find_state_all(&key, direction, &m);
1891 error = E2BIG; /* more than one state */
1892 else if (state != NULL) {
1893 /* XXXGL: not locked read */
1894 sk = state->key[sidx];
1895 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
1896 pnl->rsport = sk->port[sidx];
1897 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
1898 pnl->rdport = sk->port[didx];
1905 case DIOCSETTIMEOUT: {
1906 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1909 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1915 old = V_pf_default_rule.timeout[pt->timeout];
1916 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1918 V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
1919 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
1920 wakeup(pf_purge_thread);
1926 case DIOCGETTIMEOUT: {
1927 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1929 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1934 pt->seconds = V_pf_default_rule.timeout[pt->timeout];
1939 case DIOCGETLIMIT: {
1940 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1942 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1947 pl->limit = V_pf_limits[pl->index].limit;
1952 case DIOCSETLIMIT: {
1953 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1957 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1958 V_pf_limits[pl->index].zone == NULL) {
1963 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
1964 old_limit = V_pf_limits[pl->index].limit;
1965 V_pf_limits[pl->index].limit = pl->limit;
1966 pl->limit = old_limit;
1971 case DIOCSETDEBUG: {
1972 u_int32_t *level = (u_int32_t *)addr;
1975 V_pf_status.debug = *level;
1980 case DIOCCLRRULECTRS: {
1981 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1982 struct pf_ruleset *ruleset = &pf_main_ruleset;
1983 struct pf_rule *rule;
1987 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1988 rule->evaluations = 0;
1989 rule->packets[0] = rule->packets[1] = 0;
1990 rule->bytes[0] = rule->bytes[1] = 0;
1996 case DIOCGIFSPEED: {
1997 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr;
1998 struct pf_ifspeed ps;
2001 if (psp->ifname[0] != 0) {
2002 /* Can we completely trust user-land? */
2003 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
2004 ifp = ifunit(ps.ifname);
2006 psp->baudrate = ifp->if_baudrate;
2015 case DIOCSTARTALTQ: {
2016 struct pf_altq *altq;
2019 /* enable all altq interfaces on active list */
2020 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
2021 if (altq->qname[0] == 0 && (altq->local_flags &
2022 PFALTQ_FLAG_IF_REMOVED) == 0) {
2023 error = pf_enable_altq(altq);
2029 V_pf_altq_running = 1;
2031 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2035 case DIOCSTOPALTQ: {
2036 struct pf_altq *altq;
2039 /* disable all altq interfaces on active list */
2040 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
2041 if (altq->qname[0] == 0 && (altq->local_flags &
2042 PFALTQ_FLAG_IF_REMOVED) == 0) {
2043 error = pf_disable_altq(altq);
2049 V_pf_altq_running = 0;
2051 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2056 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2057 struct pf_altq *altq, *a;
2060 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK);
2061 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2062 altq->local_flags = 0;
2065 if (pa->ticket != V_ticket_altqs_inactive) {
2067 free(altq, M_PFALTQ);
2073 * if this is for a queue, find the discipline and
2074 * copy the necessary fields
2076 if (altq->qname[0] != 0) {
2077 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2080 free(altq, M_PFALTQ);
2083 altq->altq_disc = NULL;
2084 TAILQ_FOREACH(a, V_pf_altqs_inactive, entries) {
2085 if (strncmp(a->ifname, altq->ifname,
2086 IFNAMSIZ) == 0 && a->qname[0] == 0) {
2087 altq->altq_disc = a->altq_disc;
2093 if ((ifp = ifunit(altq->ifname)) == NULL)
2094 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
2096 error = altq_add(altq);
2100 free(altq, M_PFALTQ);
2104 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
2105 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2110 case DIOCGETALTQS: {
2111 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2112 struct pf_altq *altq;
2116 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
2118 pa->ticket = V_ticket_altqs_active;
2124 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2125 struct pf_altq *altq;
2129 if (pa->ticket != V_ticket_altqs_active) {
2135 altq = TAILQ_FIRST(V_pf_altqs_active);
2136 while ((altq != NULL) && (nr < pa->nr)) {
2137 altq = TAILQ_NEXT(altq, entries);
2145 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2150 case DIOCCHANGEALTQ:
2151 /* CHANGEALTQ not supported yet! */
2155 case DIOCGETQSTATS: {
2156 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2157 struct pf_altq *altq;
2162 if (pq->ticket != V_ticket_altqs_active) {
2167 nbytes = pq->nbytes;
2169 altq = TAILQ_FIRST(V_pf_altqs_active);
2170 while ((altq != NULL) && (nr < pq->nr)) {
2171 altq = TAILQ_NEXT(altq, entries);
2180 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
2186 error = altq_getqstats(altq, pq->buf, &nbytes);
2188 pq->scheduler = altq->scheduler;
2189 pq->nbytes = nbytes;
2195 case DIOCBEGINADDRS: {
2196 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2199 pf_empty_pool(&V_pf_pabuf);
2200 pp->ticket = ++V_ticket_pabuf;
2206 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2207 struct pf_pooladdr *pa;
2208 struct pfi_kif *kif = NULL;
2211 if (pp->af == AF_INET) {
2212 error = EAFNOSUPPORT;
2217 if (pp->af == AF_INET6) {
2218 error = EAFNOSUPPORT;
2222 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2223 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2224 pp->addr.addr.type != PF_ADDR_TABLE) {
2228 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2229 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2231 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
2233 if (pp->ticket != V_ticket_pabuf) {
2236 free(kif, PFI_MTYPE);
2241 if (pa->ifname[0]) {
2242 pa->kif = pfi_kif_attach(kif, pa->ifname);
2243 pfi_kif_ref(pa->kif);
2246 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2247 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2249 pfi_kif_unref(pa->kif);
2254 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
2259 case DIOCGETADDRS: {
2260 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2261 struct pf_pool *pool;
2262 struct pf_pooladdr *pa;
2266 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2267 pp->r_num, 0, 1, 0);
2273 TAILQ_FOREACH(pa, &pool->list, entries)
2280 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2281 struct pf_pool *pool;
2282 struct pf_pooladdr *pa;
2286 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2287 pp->r_num, 0, 1, 1);
2293 pa = TAILQ_FIRST(&pool->list);
2294 while ((pa != NULL) && (nr < pp->nr)) {
2295 pa = TAILQ_NEXT(pa, entries);
2303 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2304 pf_addr_copyout(&pp->addr.addr);
2309 case DIOCCHANGEADDR: {
2310 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2311 struct pf_pool *pool;
2312 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2313 struct pf_ruleset *ruleset;
2314 struct pfi_kif *kif = NULL;
2316 if (pca->action < PF_CHANGE_ADD_HEAD ||
2317 pca->action > PF_CHANGE_REMOVE) {
2321 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2322 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2323 pca->addr.addr.type != PF_ADDR_TABLE) {
2328 if (pca->action != PF_CHANGE_REMOVE) {
2330 if (pca->af == AF_INET) {
2331 error = EAFNOSUPPORT;
2336 if (pca->af == AF_INET6) {
2337 error = EAFNOSUPPORT;
2341 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
2342 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2343 if (newpa->ifname[0])
2344 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
2348 #define ERROUT(x) { error = (x); goto DIOCCHANGEADDR_error; }
2350 ruleset = pf_find_ruleset(pca->anchor);
2351 if (ruleset == NULL)
2354 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2355 pca->r_num, pca->r_last, 1, 1);
2359 if (pca->action != PF_CHANGE_REMOVE) {
2360 if (newpa->ifname[0]) {
2361 newpa->kif = pfi_kif_attach(kif, newpa->ifname);
2362 pfi_kif_ref(newpa->kif);
2366 switch (newpa->addr.type) {
2367 case PF_ADDR_DYNIFTL:
2368 error = pfi_dynaddr_setup(&newpa->addr,
2372 newpa->addr.p.tbl = pfr_attach_table(ruleset,
2373 newpa->addr.v.tblname);
2374 if (newpa->addr.p.tbl == NULL)
2379 goto DIOCCHANGEADDR_error;
2382 switch (pca->action) {
2383 case PF_CHANGE_ADD_HEAD:
2384 oldpa = TAILQ_FIRST(&pool->list);
2386 case PF_CHANGE_ADD_TAIL:
2387 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2390 oldpa = TAILQ_FIRST(&pool->list);
2391 for (int i = 0; oldpa && i < pca->nr; i++)
2392 oldpa = TAILQ_NEXT(oldpa, entries);
2398 if (pca->action == PF_CHANGE_REMOVE) {
2399 TAILQ_REMOVE(&pool->list, oldpa, entries);
2400 switch (oldpa->addr.type) {
2401 case PF_ADDR_DYNIFTL:
2402 pfi_dynaddr_remove(oldpa->addr.p.dyn);
2405 pfr_detach_table(oldpa->addr.p.tbl);
2409 pfi_kif_unref(oldpa->kif);
2410 free(oldpa, M_PFRULE);
2413 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2414 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2415 pca->action == PF_CHANGE_ADD_BEFORE)
2416 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2418 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2422 pool->cur = TAILQ_FIRST(&pool->list);
2423 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
2428 DIOCCHANGEADDR_error:
2430 pfi_kif_unref(newpa->kif);
2433 free(newpa, M_PFRULE);
2435 free(kif, PFI_MTYPE);
2439 case DIOCGETRULESETS: {
2440 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2441 struct pf_ruleset *ruleset;
2442 struct pf_anchor *anchor;
2445 pr->path[sizeof(pr->path) - 1] = 0;
2446 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2452 if (ruleset->anchor == NULL) {
2453 /* XXX kludge for pf_main_ruleset */
2454 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors)
2455 if (anchor->parent == NULL)
2458 RB_FOREACH(anchor, pf_anchor_node,
2459 &ruleset->anchor->children)
2466 case DIOCGETRULESET: {
2467 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2468 struct pf_ruleset *ruleset;
2469 struct pf_anchor *anchor;
2473 pr->path[sizeof(pr->path) - 1] = 0;
2474 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2480 if (ruleset->anchor == NULL) {
2481 /* XXX kludge for pf_main_ruleset */
2482 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors)
2483 if (anchor->parent == NULL && nr++ == pr->nr) {
2484 strlcpy(pr->name, anchor->name,
2489 RB_FOREACH(anchor, pf_anchor_node,
2490 &ruleset->anchor->children)
2491 if (nr++ == pr->nr) {
2492 strlcpy(pr->name, anchor->name,
2503 case DIOCRCLRTABLES: {
2504 struct pfioc_table *io = (struct pfioc_table *)addr;
2506 if (io->pfrio_esize != 0) {
2511 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2512 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2517 case DIOCRADDTABLES: {
2518 struct pfioc_table *io = (struct pfioc_table *)addr;
2519 struct pfr_table *pfrts;
2522 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2526 totlen = io->pfrio_size * sizeof(struct pfr_table);
2527 pfrts = malloc(totlen, M_TEMP, M_WAITOK);
2528 error = copyin(io->pfrio_buffer, pfrts, totlen);
2530 free(pfrts, M_TEMP);
2534 error = pfr_add_tables(pfrts, io->pfrio_size,
2535 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2537 free(pfrts, M_TEMP);
2541 case DIOCRDELTABLES: {
2542 struct pfioc_table *io = (struct pfioc_table *)addr;
2543 struct pfr_table *pfrts;
2546 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2550 totlen = io->pfrio_size * sizeof(struct pfr_table);
2551 pfrts = malloc(totlen, M_TEMP, M_WAITOK);
2552 error = copyin(io->pfrio_buffer, pfrts, totlen);
2554 free(pfrts, M_TEMP);
2558 error = pfr_del_tables(pfrts, io->pfrio_size,
2559 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2561 free(pfrts, M_TEMP);
2565 case DIOCRGETTABLES: {
2566 struct pfioc_table *io = (struct pfioc_table *)addr;
2567 struct pfr_table *pfrts;
2570 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2574 totlen = io->pfrio_size * sizeof(struct pfr_table);
2575 pfrts = malloc(totlen, M_TEMP, M_WAITOK);
2577 error = pfr_get_tables(&io->pfrio_table, pfrts,
2578 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2581 error = copyout(pfrts, io->pfrio_buffer, totlen);
2582 free(pfrts, M_TEMP);
2586 case DIOCRGETTSTATS: {
2587 struct pfioc_table *io = (struct pfioc_table *)addr;
2588 struct pfr_tstats *pfrtstats;
2591 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2595 totlen = io->pfrio_size * sizeof(struct pfr_tstats);
2596 pfrtstats = malloc(totlen, M_TEMP, M_WAITOK);
2598 error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
2599 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2602 error = copyout(pfrtstats, io->pfrio_buffer, totlen);
2603 free(pfrtstats, M_TEMP);
2607 case DIOCRCLRTSTATS: {
2608 struct pfioc_table *io = (struct pfioc_table *)addr;
2609 struct pfr_table *pfrts;
2612 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2616 totlen = io->pfrio_size * sizeof(struct pfr_table);
2617 pfrts = malloc(totlen, M_TEMP, M_WAITOK);
2618 error = copyin(io->pfrio_buffer, pfrts, totlen);
2620 free(pfrts, M_TEMP);
2624 error = pfr_clr_tstats(pfrts, io->pfrio_size,
2625 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2627 free(pfrts, M_TEMP);
2631 case DIOCRSETTFLAGS: {
2632 struct pfioc_table *io = (struct pfioc_table *)addr;
2633 struct pfr_table *pfrts;
2636 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2640 totlen = io->pfrio_size * sizeof(struct pfr_table);
2641 pfrts = malloc(totlen, M_TEMP, M_WAITOK);
2642 error = copyin(io->pfrio_buffer, pfrts, totlen);
2644 free(pfrts, M_TEMP);
2648 error = pfr_set_tflags(pfrts, io->pfrio_size,
2649 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2650 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2652 free(pfrts, M_TEMP);
2656 case DIOCRCLRADDRS: {
2657 struct pfioc_table *io = (struct pfioc_table *)addr;
2659 if (io->pfrio_esize != 0) {
2664 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2665 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2670 case DIOCRADDADDRS: {
2671 struct pfioc_table *io = (struct pfioc_table *)addr;
2672 struct pfr_addr *pfras;
2675 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2679 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2680 pfras = malloc(totlen, M_TEMP, M_WAITOK);
2681 error = copyin(io->pfrio_buffer, pfras, totlen);
2683 free(pfras, M_TEMP);
2687 error = pfr_add_addrs(&io->pfrio_table, pfras,
2688 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2689 PFR_FLAG_USERIOCTL);
2691 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
2692 error = copyout(pfras, io->pfrio_buffer, totlen);
2693 free(pfras, M_TEMP);
2697 case DIOCRDELADDRS: {
2698 struct pfioc_table *io = (struct pfioc_table *)addr;
2699 struct pfr_addr *pfras;
2702 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2706 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2707 pfras = malloc(totlen, M_TEMP, M_WAITOK);
2708 error = copyin(io->pfrio_buffer, pfras, totlen);
2710 free(pfras, M_TEMP);
2714 error = pfr_del_addrs(&io->pfrio_table, pfras,
2715 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2716 PFR_FLAG_USERIOCTL);
2718 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
2719 error = copyout(pfras, io->pfrio_buffer, totlen);
2720 free(pfras, M_TEMP);
2724 case DIOCRSETADDRS: {
2725 struct pfioc_table *io = (struct pfioc_table *)addr;
2726 struct pfr_addr *pfras;
2727 size_t totlen, count;
2729 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2733 count = max(io->pfrio_size, io->pfrio_size2);
2734 totlen = count * sizeof(struct pfr_addr);
2735 pfras = malloc(totlen, M_TEMP, M_WAITOK);
2736 error = copyin(io->pfrio_buffer, pfras, totlen);
2738 free(pfras, M_TEMP);
2742 error = pfr_set_addrs(&io->pfrio_table, pfras,
2743 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2744 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2745 PFR_FLAG_USERIOCTL, 0);
2747 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
2748 error = copyout(pfras, io->pfrio_buffer, totlen);
2749 free(pfras, M_TEMP);
2753 case DIOCRGETADDRS: {
2754 struct pfioc_table *io = (struct pfioc_table *)addr;
2755 struct pfr_addr *pfras;
2758 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2762 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2763 pfras = malloc(totlen, M_TEMP, M_WAITOK);
2765 error = pfr_get_addrs(&io->pfrio_table, pfras,
2766 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2769 error = copyout(pfras, io->pfrio_buffer, totlen);
2770 free(pfras, M_TEMP);
2774 case DIOCRGETASTATS: {
2775 struct pfioc_table *io = (struct pfioc_table *)addr;
2776 struct pfr_astats *pfrastats;
2779 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2783 totlen = io->pfrio_size * sizeof(struct pfr_astats);
2784 pfrastats = malloc(totlen, M_TEMP, M_WAITOK);
2786 error = pfr_get_astats(&io->pfrio_table, pfrastats,
2787 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2790 error = copyout(pfrastats, io->pfrio_buffer, totlen);
2791 free(pfrastats, M_TEMP);
2795 case DIOCRCLRASTATS: {
2796 struct pfioc_table *io = (struct pfioc_table *)addr;
2797 struct pfr_addr *pfras;
2800 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2804 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2805 pfras = malloc(totlen, M_TEMP, M_WAITOK);
2806 error = copyin(io->pfrio_buffer, pfras, totlen);
2808 free(pfras, M_TEMP);
2812 error = pfr_clr_astats(&io->pfrio_table, pfras,
2813 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2814 PFR_FLAG_USERIOCTL);
2816 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
2817 error = copyout(pfras, io->pfrio_buffer, totlen);
2818 free(pfras, M_TEMP);
2822 case DIOCRTSTADDRS: {
2823 struct pfioc_table *io = (struct pfioc_table *)addr;
2824 struct pfr_addr *pfras;
2827 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2831 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2832 pfras = malloc(totlen, M_TEMP, M_WAITOK);
2833 error = copyin(io->pfrio_buffer, pfras, totlen);
2835 free(pfras, M_TEMP);
2839 error = pfr_tst_addrs(&io->pfrio_table, pfras,
2840 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2841 PFR_FLAG_USERIOCTL);
2844 error = copyout(pfras, io->pfrio_buffer, totlen);
2845 free(pfras, M_TEMP);
2849 case DIOCRINADEFINE: {
2850 struct pfioc_table *io = (struct pfioc_table *)addr;
2851 struct pfr_addr *pfras;
2854 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2858 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2859 pfras = malloc(totlen, M_TEMP, M_WAITOK);
2860 error = copyin(io->pfrio_buffer, pfras, totlen);
2862 free(pfras, M_TEMP);
2866 error = pfr_ina_define(&io->pfrio_table, pfras,
2867 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2868 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2870 free(pfras, M_TEMP);
2875 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2877 error = pf_osfp_add(io);
2883 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2885 error = pf_osfp_get(io);
2891 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2892 struct pfioc_trans_e *ioes, *ioe;
2896 if (io->esize != sizeof(*ioe)) {
2900 totlen = sizeof(struct pfioc_trans_e) * io->size;
2901 ioes = malloc(totlen, M_TEMP, M_WAITOK);
2902 error = copyin(io->array, ioes, totlen);
2908 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
2909 switch (ioe->rs_num) {
2911 case PF_RULESET_ALTQ:
2912 if (ioe->anchor[0]) {
2918 if ((error = pf_begin_altq(&ioe->ticket))) {
2925 case PF_RULESET_TABLE:
2927 struct pfr_table table;
2929 bzero(&table, sizeof(table));
2930 strlcpy(table.pfrt_anchor, ioe->anchor,
2931 sizeof(table.pfrt_anchor));
2932 if ((error = pfr_ina_begin(&table,
2933 &ioe->ticket, NULL, 0))) {
2941 if ((error = pf_begin_rules(&ioe->ticket,
2942 ioe->rs_num, ioe->anchor))) {
2951 error = copyout(ioes, io->array, totlen);
2956 case DIOCXROLLBACK: {
2957 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2958 struct pfioc_trans_e *ioe, *ioes;
2962 if (io->esize != sizeof(*ioe)) {
2966 totlen = sizeof(struct pfioc_trans_e) * io->size;
2967 ioes = malloc(totlen, M_TEMP, M_WAITOK);
2968 error = copyin(io->array, ioes, totlen);
2974 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
2975 switch (ioe->rs_num) {
2977 case PF_RULESET_ALTQ:
2978 if (ioe->anchor[0]) {
2984 if ((error = pf_rollback_altq(ioe->ticket))) {
2987 goto fail; /* really bad */
2991 case PF_RULESET_TABLE:
2993 struct pfr_table table;
2995 bzero(&table, sizeof(table));
2996 strlcpy(table.pfrt_anchor, ioe->anchor,
2997 sizeof(table.pfrt_anchor));
2998 if ((error = pfr_ina_rollback(&table,
2999 ioe->ticket, NULL, 0))) {
3002 goto fail; /* really bad */
3007 if ((error = pf_rollback_rules(ioe->ticket,
3008 ioe->rs_num, ioe->anchor))) {
3011 goto fail; /* really bad */
3022 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3023 struct pfioc_trans_e *ioe, *ioes;
3024 struct pf_ruleset *rs;
3028 if (io->esize != sizeof(*ioe)) {
3032 totlen = sizeof(struct pfioc_trans_e) * io->size;
3033 ioes = malloc(totlen, M_TEMP, M_WAITOK);
3034 error = copyin(io->array, ioes, totlen);
3040 /* First makes sure everything will succeed. */
3041 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3042 switch (ioe->rs_num) {
3044 case PF_RULESET_ALTQ:
3045 if (ioe->anchor[0]) {
3051 if (!V_altqs_inactive_open || ioe->ticket !=
3052 V_ticket_altqs_inactive) {
3060 case PF_RULESET_TABLE:
3061 rs = pf_find_ruleset(ioe->anchor);
3062 if (rs == NULL || !rs->topen || ioe->ticket !=
3071 if (ioe->rs_num < 0 || ioe->rs_num >=
3078 rs = pf_find_ruleset(ioe->anchor);
3080 !rs->rules[ioe->rs_num].inactive.open ||
3081 rs->rules[ioe->rs_num].inactive.ticket !=
3091 /* Now do the commit - no errors should happen here. */
3092 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3093 switch (ioe->rs_num) {
3095 case PF_RULESET_ALTQ:
3096 if ((error = pf_commit_altq(ioe->ticket))) {
3099 goto fail; /* really bad */
3103 case PF_RULESET_TABLE:
3105 struct pfr_table table;
3107 bzero(&table, sizeof(table));
3108 strlcpy(table.pfrt_anchor, ioe->anchor,
3109 sizeof(table.pfrt_anchor));
3110 if ((error = pfr_ina_commit(&table,
3111 ioe->ticket, NULL, NULL, 0))) {
3114 goto fail; /* really bad */
3119 if ((error = pf_commit_rules(ioe->ticket,
3120 ioe->rs_num, ioe->anchor))) {
3123 goto fail; /* really bad */
3133 case DIOCGETSRCNODES: {
3134 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
3135 struct pf_srchash *sh;
3136 struct pf_src_node *n, *p, *pstore;
3139 if (psn->psn_len == 0) {
3140 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
3142 PF_HASHROW_LOCK(sh);
3143 LIST_FOREACH(n, &sh->nodes, entry)
3145 PF_HASHROW_UNLOCK(sh);
3147 psn->psn_len = sizeof(struct pf_src_node) * nr;
3151 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK);
3152 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
3154 PF_HASHROW_LOCK(sh);
3155 LIST_FOREACH(n, &sh->nodes, entry) {
3156 int secs = time_uptime, diff;
3158 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
3161 bcopy(n, p, sizeof(struct pf_src_node));
3162 if (n->rule.ptr != NULL)
3163 p->rule.nr = n->rule.ptr->nr;
3164 p->creation = secs - p->creation;
3165 if (p->expire > secs)
3170 /* Adjust the connection rate estimate. */
3171 diff = secs - n->conn_rate.last;
3172 if (diff >= n->conn_rate.seconds)
3173 p->conn_rate.count = 0;
3175 p->conn_rate.count -=
3176 n->conn_rate.count * diff /
3177 n->conn_rate.seconds;
3181 PF_HASHROW_UNLOCK(sh);
3183 error = copyout(pstore, psn->psn_src_nodes,
3184 sizeof(struct pf_src_node) * nr);
3186 free(pstore, M_TEMP);
3189 psn->psn_len = sizeof(struct pf_src_node) * nr;
3190 free(pstore, M_TEMP);
3194 case DIOCCLRSRCNODES: {
3196 pf_clear_srcnodes(NULL);
3197 pf_purge_expired_src_nodes();
3201 case DIOCKILLSRCNODES:
3202 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
3205 case DIOCSETHOSTID: {
3206 u_int32_t *hostid = (u_int32_t *)addr;
3210 V_pf_status.hostid = arc4random();
3212 V_pf_status.hostid = *hostid;
3223 case DIOCIGETIFACES: {
3224 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3225 struct pfi_kif *ifstore;
3228 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
3233 bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
3234 ifstore = malloc(bufsiz, M_TEMP, M_WAITOK);
3236 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
3238 error = copyout(ifstore, io->pfiio_buffer, bufsiz);
3239 free(ifstore, M_TEMP);
3243 case DIOCSETIFFLAG: {
3244 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3247 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
3252 case DIOCCLRIFFLAG: {
3253 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3256 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
3266 if (sx_xlocked(&pf_ioctl_lock))
3267 sx_xunlock(&pf_ioctl_lock);
3274 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
3276 bzero(sp, sizeof(struct pfsync_state));
3278 /* copy from state key */
3279 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
3280 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
3281 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
3282 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
3283 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
3284 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
3285 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
3286 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
3287 sp->proto = st->key[PF_SK_WIRE]->proto;
3288 sp->af = st->key[PF_SK_WIRE]->af;
3290 /* copy from state */
3291 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
3292 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
3293 sp->creation = htonl(time_uptime - st->creation);
3294 sp->expire = pf_state_expires(st);
3295 if (sp->expire <= time_uptime)
3296 sp->expire = htonl(0);
3298 sp->expire = htonl(sp->expire - time_uptime);
3300 sp->direction = st->direction;
3302 sp->timeout = st->timeout;
3303 sp->state_flags = st->state_flags;
3305 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
3306 if (st->nat_src_node)
3307 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
3310 sp->creatorid = st->creatorid;
3311 pf_state_peer_hton(&st->src, &sp->src);
3312 pf_state_peer_hton(&st->dst, &sp->dst);
3314 if (st->rule.ptr == NULL)
3315 sp->rule = htonl(-1);
3317 sp->rule = htonl(st->rule.ptr->nr);
3318 if (st->anchor.ptr == NULL)
3319 sp->anchor = htonl(-1);
3321 sp->anchor = htonl(st->anchor.ptr->nr);
3322 if (st->nat_rule.ptr == NULL)
3323 sp->nat_rule = htonl(-1);
3325 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
3327 pf_state_counter_hton(st->packets[0], sp->packets[0]);
3328 pf_state_counter_hton(st->packets[1], sp->packets[1]);
3329 pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
3330 pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
3335 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
3337 struct pfr_ktable *kt;
3339 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
3342 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
3343 kt = kt->pfrkt_root;
3345 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
3350 * XXX - Check for version missmatch!!!
3353 pf_clear_states(void)
3358 for (i = 0; i <= pf_hashmask; i++) {
3359 struct pf_idhash *ih = &V_pf_idhash[i];
3361 PF_HASHROW_LOCK(ih);
3362 LIST_FOREACH(s, &ih->states, entry) {
3363 s->timeout = PFTM_PURGE;
3364 /* Don't send out individual delete messages. */
3365 s->state_flags |= PFSTATE_NOSYNC;
3366 pf_unlink_state(s, PF_ENTER_LOCKED);
3369 PF_HASHROW_UNLOCK(ih);
3374 pf_clear_tables(void)
3376 struct pfioc_table io;
3379 bzero(&io, sizeof(io));
3381 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
3388 pf_clear_srcnodes(struct pf_src_node *n)
3393 for (i = 0; i <= pf_hashmask; i++) {
3394 struct pf_idhash *ih = &V_pf_idhash[i];
3396 PF_HASHROW_LOCK(ih);
3397 LIST_FOREACH(s, &ih->states, entry) {
3398 if (n == NULL || n == s->src_node)
3400 if (n == NULL || n == s->nat_src_node)
3401 s->nat_src_node = NULL;
3403 PF_HASHROW_UNLOCK(ih);
3407 struct pf_srchash *sh;
3409 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
3411 PF_HASHROW_LOCK(sh);
3412 LIST_FOREACH(n, &sh->nodes, entry) {
3416 PF_HASHROW_UNLOCK(sh);
3419 /* XXX: hash slot should already be locked here. */
3426 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
3428 struct pf_src_node_list kill;
3431 for (int i = 0; i <= pf_srchashmask; i++) {
3432 struct pf_srchash *sh = &V_pf_srchash[i];
3433 struct pf_src_node *sn, *tmp;
3435 PF_HASHROW_LOCK(sh);
3436 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
3437 if (PF_MATCHA(psnk->psnk_src.neg,
3438 &psnk->psnk_src.addr.v.a.addr,
3439 &psnk->psnk_src.addr.v.a.mask,
3440 &sn->addr, sn->af) &&
3441 PF_MATCHA(psnk->psnk_dst.neg,
3442 &psnk->psnk_dst.addr.v.a.addr,
3443 &psnk->psnk_dst.addr.v.a.mask,
3444 &sn->raddr, sn->af)) {
3445 pf_unlink_src_node(sn);
3446 LIST_INSERT_HEAD(&kill, sn, entry);
3449 PF_HASHROW_UNLOCK(sh);
3452 for (int i = 0; i <= pf_hashmask; i++) {
3453 struct pf_idhash *ih = &V_pf_idhash[i];
3456 PF_HASHROW_LOCK(ih);
3457 LIST_FOREACH(s, &ih->states, entry) {
3458 if (s->src_node && s->src_node->expire == 1)
3460 if (s->nat_src_node && s->nat_src_node->expire == 1)
3461 s->nat_src_node = NULL;
3463 PF_HASHROW_UNLOCK(ih);
3466 psnk->psnk_killed = pf_free_src_nodes(&kill);
3470 * XXX - Check for version missmatch!!!
3474 * Duplicate pfctl -Fa operation to get rid of as much as we can.
3484 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
3486 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
3489 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
3491 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
3492 break; /* XXX: rollback? */
3494 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
3496 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
3497 break; /* XXX: rollback? */
3499 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
3501 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
3502 break; /* XXX: rollback? */
3504 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
3506 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
3507 break; /* XXX: rollback? */
3510 /* XXX: these should always succeed here */
3511 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
3512 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
3513 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
3514 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
3515 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
3517 if ((error = pf_clear_tables()) != 0)
3521 if ((error = pf_begin_altq(&t[0])) != 0) {
3522 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
3525 pf_commit_altq(t[0]);
3530 pf_clear_srcnodes(NULL);
3532 /* status does not use malloced mem so no need to cleanup */
3533 /* fingerprints and interfaces have their own cleanup code */
3535 /* Free counters last as we updated them during shutdown. */
3536 counter_u64_free(V_pf_default_rule.states_cur);
3537 counter_u64_free(V_pf_default_rule.states_tot);
3538 counter_u64_free(V_pf_default_rule.src_nodes);
3540 for (int i = 0; i < PFRES_MAX; i++)
3541 counter_u64_free(V_pf_status.counters[i]);
3542 for (int i = 0; i < LCNT_MAX; i++)
3543 counter_u64_free(V_pf_status.lcounters[i]);
3544 for (int i = 0; i < FCNT_MAX; i++)
3545 counter_u64_free(V_pf_status.fcounters[i]);
3546 for (int i = 0; i < SCNT_MAX; i++)
3547 counter_u64_free(V_pf_status.scounters[i]);
3555 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3560 chk = pf_test(PF_IN, ifp, m, inp);
3570 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3575 chk = pf_test(PF_OUT, ifp, m, inp);
3587 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3593 * In case of loopback traffic IPv6 uses the real interface in
3594 * order to support scoped addresses. In order to support stateful
3595 * filtering we have change this to lo0 as it is the case in IPv4.
3597 CURVNET_SET(ifp->if_vnet);
3598 chk = pf_test6(PF_IN, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp);
3608 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3613 CURVNET_SET(ifp->if_vnet);
3614 chk = pf_test6(PF_OUT, ifp, m, inp);
3628 struct pfil_head *pfh_inet;
3631 struct pfil_head *pfh_inet6;
3634 if (V_pf_pfil_hooked)
3638 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3639 if (pfh_inet == NULL)
3640 return (ESRCH); /* XXX */
3641 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet);
3642 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet);
3645 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3646 if (pfh_inet6 == NULL) {
3648 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3650 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3653 return (ESRCH); /* XXX */
3655 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6);
3656 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6);
3659 V_pf_pfil_hooked = 1;
3667 struct pfil_head *pfh_inet;
3670 struct pfil_head *pfh_inet6;
3673 if (V_pf_pfil_hooked == 0)
3677 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3678 if (pfh_inet == NULL)
3679 return (ESRCH); /* XXX */
3680 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3682 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3686 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3687 if (pfh_inet6 == NULL)
3688 return (ESRCH); /* XXX */
3689 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK,
3691 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK,
3695 V_pf_pfil_hooked = 0;
3702 VNET_ITERATOR_DECL(vnet_iter);
3705 VNET_FOREACH(vnet_iter) {
3706 CURVNET_SET(vnet_iter);
3707 V_pf_pfil_hooked = 0;
3708 TAILQ_INIT(&V_pf_tags);
3709 TAILQ_INIT(&V_pf_qids);
3712 VNET_LIST_RUNLOCK();
3715 V_pf_vnet_active = 1;
3723 rw_init(&pf_rules_lock, "pf rulesets");
3724 sx_init(&pf_ioctl_lock, "pf ioctl");
3726 pf_mtag_initialize();
3728 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
3733 error = kproc_create(pf_purge_thread, NULL, NULL, 0, 0, "pf purge");
3743 pf_unload_vnet(void)
3747 V_pf_vnet_active = 0;
3748 V_pf_status.running = 0;
3749 swi_remove(V_pf_swi_cookie);
3750 error = dehook_pf();
3753 * Should not happen!
3754 * XXX Due to error code ESRCH, kldunload will show
3755 * a message like 'No such process'.
3757 printf("%s : pfil unregisteration fail\n", __FUNCTION__);
3761 pf_unload_vnet_purge();
3767 pf_normalize_cleanup();
3774 if (IS_DEFAULT_VNET(curvnet))
3784 while (pf_end_threads < 2) {
3785 wakeup_one(pf_purge_thread);
3786 rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftmo", 0);
3790 destroy_dev(pf_dev);
3794 rw_destroy(&pf_rules_lock);
3795 sx_destroy(&pf_ioctl_lock);
3801 vnet_pf_init(void *unused __unused)
3806 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
3807 vnet_pf_init, NULL);
3810 vnet_pf_uninit(const void *unused __unused)
3815 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
3816 vnet_pf_uninit, NULL);
3820 pf_modevent(module_t mod, int type, void *data)
3830 * Module should not be unloaded due to race conditions.
3835 error = pf_unload();
3845 static moduledata_t pf_mod = {
3851 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
3852 MODULE_VERSION(pf, PF_MODVER);