2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002,2003 Henning Brauer
6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 #include "opt_inet6.h"
48 #include <sys/param.h>
49 #include <sys/_bitset.h>
50 #include <sys/bitset.h>
53 #include <sys/endian.h>
54 #include <sys/fcntl.h>
55 #include <sys/filio.h>
57 #include <sys/interrupt.h>
59 #include <sys/kernel.h>
60 #include <sys/kthread.h>
63 #include <sys/module.h>
67 #include <sys/socket.h>
68 #include <sys/sysctl.h>
70 #include <sys/ucred.h>
73 #include <net/if_var.h>
75 #include <net/route.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nv.h>
89 #include <netinet/ip6.h>
93 #include <net/altq/altq.h>
96 static struct pf_kpool *pf_get_kpool(char *, u_int32_t, u_int8_t, u_int32_t,
97 u_int8_t, u_int8_t, u_int8_t);
99 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
100 static void pf_empty_kpool(struct pf_kpalist *);
101 static int pfioctl(struct cdev *, u_long, caddr_t, int,
104 static int pf_begin_altq(u_int32_t *);
105 static int pf_rollback_altq(u_int32_t);
106 static int pf_commit_altq(u_int32_t);
107 static int pf_enable_altq(struct pf_altq *);
108 static int pf_disable_altq(struct pf_altq *);
109 static u_int32_t pf_qname2qid(char *);
110 static void pf_qid_unref(u_int32_t);
112 static int pf_begin_rules(u_int32_t *, int, const char *);
113 static int pf_rollback_rules(u_int32_t, int, char *);
114 static int pf_setup_pfsync_matching(struct pf_kruleset *);
115 static void pf_hash_rule(MD5_CTX *, struct pf_krule *);
116 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
117 static int pf_commit_rules(u_int32_t, int, char *);
118 static int pf_addr_setup(struct pf_kruleset *,
119 struct pf_addr_wrap *, sa_family_t);
120 static void pf_addr_copyout(struct pf_addr_wrap *);
121 static void pf_src_node_copy(const struct pf_ksrc_node *,
122 struct pf_src_node *);
124 static int pf_export_kaltq(struct pf_altq *,
125 struct pfioc_altq_v1 *, size_t);
126 static int pf_import_kaltq(struct pfioc_altq_v1 *,
127 struct pf_altq *, size_t);
130 VNET_DEFINE(struct pf_krule, pf_default_rule);
133 VNET_DEFINE_STATIC(int, pf_altq_running);
134 #define V_pf_altq_running VNET(pf_altq_running)
137 #define TAGID_MAX 50000
139 TAILQ_ENTRY(pf_tagname) namehash_entries;
140 TAILQ_ENTRY(pf_tagname) taghash_entries;
141 char name[PF_TAG_NAME_SIZE];
147 TAILQ_HEAD(, pf_tagname) *namehash;
148 TAILQ_HEAD(, pf_tagname) *taghash;
151 BITSET_DEFINE(, TAGID_MAX) avail;
154 VNET_DEFINE(struct pf_tagset, pf_tags);
155 #define V_pf_tags VNET(pf_tags)
156 static unsigned int pf_rule_tag_hashsize;
157 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128
158 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
159 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
160 "Size of pf(4) rule tag hashtable");
163 VNET_DEFINE(struct pf_tagset, pf_qids);
164 #define V_pf_qids VNET(pf_qids)
165 static unsigned int pf_queue_tag_hashsize;
166 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128
167 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
168 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
169 "Size of pf(4) queue tag hashtable");
171 VNET_DEFINE(uma_zone_t, pf_tag_z);
172 #define V_pf_tag_z VNET(pf_tag_z)
173 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
174 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
176 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
177 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
180 static void pf_init_tagset(struct pf_tagset *, unsigned int *,
182 static void pf_cleanup_tagset(struct pf_tagset *);
183 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *);
184 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t);
185 static u_int16_t tagname2tag(struct pf_tagset *, char *);
186 static u_int16_t pf_tagname2tag(char *);
187 static void tag_unref(struct pf_tagset *, u_int16_t);
189 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
194 * XXX - These are new and need to be checked when moveing to a new version
196 static void pf_clear_states(void);
197 static int pf_clear_tables(void);
198 static void pf_clear_srcnodes(struct pf_ksrc_node *);
199 static void pf_kill_srcnodes(struct pfioc_src_node_kill *);
200 static int pf_keepcounters(struct pfioc_nv *);
201 static void pf_tbladdr_copyout(struct pf_addr_wrap *);
204 * Wrapper functions for pfil(9) hooks
207 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
208 int dir, int flags, struct inpcb *inp);
209 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
210 int dir, int flags, struct inpcb *inp);
213 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
214 int dir, int flags, struct inpcb *inp);
215 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
216 int dir, int flags, struct inpcb *inp);
219 static int hook_pf(void);
220 static int dehook_pf(void);
221 static int shutdown_pf(void);
222 static int pf_load(void);
223 static void pf_unload(void);
225 static struct cdevsw pf_cdevsw = {
228 .d_version = D_VERSION,
231 volatile VNET_DEFINE_STATIC(int, pf_pfil_hooked);
232 #define V_pf_pfil_hooked VNET(pf_pfil_hooked)
235 * We need a flag that is neither hooked nor running to know when
236 * the VNET is "valid". We primarily need this to control (global)
237 * external event, e.g., eventhandlers.
239 VNET_DEFINE(int, pf_vnet_active);
240 #define V_pf_vnet_active VNET(pf_vnet_active)
243 struct proc *pf_purge_proc;
245 struct rmlock pf_rules_lock;
246 struct sx pf_ioctl_lock;
247 struct sx pf_end_lock;
250 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
251 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
252 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
253 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
254 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
255 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
256 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
259 pflog_packet_t *pflog_packet_ptr = NULL;
261 extern u_long pf_ioctl_maxcount;
266 u_int32_t *my_timeout = V_pf_default_rule.timeout;
270 pfi_initialize_vnet();
273 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
274 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
276 RB_INIT(&V_pf_anchors);
277 pf_init_kruleset(&pf_main_ruleset);
279 /* default rule should never be garbage collected */
280 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
281 #ifdef PF_DEFAULT_TO_DROP
282 V_pf_default_rule.action = PF_DROP;
284 V_pf_default_rule.action = PF_PASS;
286 V_pf_default_rule.nr = -1;
287 V_pf_default_rule.rtableid = -1;
289 V_pf_default_rule.evaluations = counter_u64_alloc(M_WAITOK);
290 for (int i = 0; i < 2; i++) {
291 V_pf_default_rule.packets[i] = counter_u64_alloc(M_WAITOK);
292 V_pf_default_rule.bytes[i] = counter_u64_alloc(M_WAITOK);
294 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
295 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
296 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
298 /* initialize default timeouts */
299 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
300 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
301 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
302 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
303 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
304 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
305 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
306 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
307 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
308 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
309 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
310 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
311 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
312 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
313 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
314 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
315 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
316 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
317 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
318 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
320 bzero(&V_pf_status, sizeof(V_pf_status));
321 V_pf_status.debug = PF_DEBUG_URGENT;
323 V_pf_pfil_hooked = 0;
325 /* XXX do our best to avoid a conflict */
326 V_pf_status.hostid = arc4random();
328 for (int i = 0; i < PFRES_MAX; i++)
329 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
330 for (int i = 0; i < LCNT_MAX; i++)
331 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
332 for (int i = 0; i < FCNT_MAX; i++)
333 V_pf_status.fcounters[i] = counter_u64_alloc(M_WAITOK);
334 for (int i = 0; i < SCNT_MAX; i++)
335 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
337 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
338 INTR_MPSAFE, &V_pf_swi_cookie) != 0)
339 /* XXXGL: leaked all above. */
343 static struct pf_kpool *
344 pf_get_kpool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
345 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
346 u_int8_t check_ticket)
348 struct pf_kruleset *ruleset;
349 struct pf_krule *rule;
352 ruleset = pf_find_kruleset(anchor);
355 rs_num = pf_get_ruleset_number(rule_action);
356 if (rs_num >= PF_RULESET_MAX)
359 if (check_ticket && ticket !=
360 ruleset->rules[rs_num].active.ticket)
363 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
366 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
368 if (check_ticket && ticket !=
369 ruleset->rules[rs_num].inactive.ticket)
372 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
375 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
378 while ((rule != NULL) && (rule->nr != rule_number))
379 rule = TAILQ_NEXT(rule, entries);
384 return (&rule->rpool);
388 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
390 struct pf_kpooladdr *mv_pool_pa;
392 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
393 TAILQ_REMOVE(poola, mv_pool_pa, entries);
394 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
399 pf_empty_kpool(struct pf_kpalist *poola)
401 struct pf_kpooladdr *pa;
403 while ((pa = TAILQ_FIRST(poola)) != NULL) {
404 switch (pa->addr.type) {
405 case PF_ADDR_DYNIFTL:
406 pfi_dynaddr_remove(pa->addr.p.dyn);
409 /* XXX: this could be unfinished pooladdr on pabuf */
410 if (pa->addr.p.tbl != NULL)
411 pfr_detach_table(pa->addr.p.tbl);
415 pfi_kkif_unref(pa->kif);
416 TAILQ_REMOVE(poola, pa, entries);
422 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
427 TAILQ_REMOVE(rulequeue, rule, entries);
429 PF_UNLNKDRULES_LOCK();
430 rule->rule_ref |= PFRULE_REFS;
431 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
432 PF_UNLNKDRULES_UNLOCK();
436 pf_free_rule(struct pf_krule *rule)
442 tag_unref(&V_pf_tags, rule->tag);
444 tag_unref(&V_pf_tags, rule->match_tag);
446 if (rule->pqid != rule->qid)
447 pf_qid_unref(rule->pqid);
448 pf_qid_unref(rule->qid);
450 switch (rule->src.addr.type) {
451 case PF_ADDR_DYNIFTL:
452 pfi_dynaddr_remove(rule->src.addr.p.dyn);
455 pfr_detach_table(rule->src.addr.p.tbl);
458 switch (rule->dst.addr.type) {
459 case PF_ADDR_DYNIFTL:
460 pfi_dynaddr_remove(rule->dst.addr.p.dyn);
463 pfr_detach_table(rule->dst.addr.p.tbl);
466 if (rule->overload_tbl)
467 pfr_detach_table(rule->overload_tbl);
469 pfi_kkif_unref(rule->kif);
470 pf_kanchor_remove(rule);
471 pf_empty_kpool(&rule->rpool.list);
477 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
478 unsigned int default_size)
481 unsigned int hashsize;
483 if (*tunable_size == 0 || !powerof2(*tunable_size))
484 *tunable_size = default_size;
486 hashsize = *tunable_size;
487 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
489 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
491 ts->mask = hashsize - 1;
492 ts->seed = arc4random();
493 for (i = 0; i < hashsize; i++) {
494 TAILQ_INIT(&ts->namehash[i]);
495 TAILQ_INIT(&ts->taghash[i]);
497 BIT_FILL(TAGID_MAX, &ts->avail);
501 pf_cleanup_tagset(struct pf_tagset *ts)
504 unsigned int hashsize;
505 struct pf_tagname *t, *tmp;
508 * Only need to clean up one of the hashes as each tag is hashed
511 hashsize = ts->mask + 1;
512 for (i = 0; i < hashsize; i++)
513 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
514 uma_zfree(V_pf_tag_z, t);
516 free(ts->namehash, M_PFHASH);
517 free(ts->taghash, M_PFHASH);
521 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
525 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
526 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
530 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
533 return (tag & ts->mask);
537 tagname2tag(struct pf_tagset *ts, char *tagname)
539 struct pf_tagname *tag;
545 index = tagname2hashindex(ts, tagname);
546 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
547 if (strcmp(tagname, tag->name) == 0) {
555 * to avoid fragmentation, we do a linear search from the beginning
556 * and take the first free slot we find.
558 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
560 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
561 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
562 * set. It may also return a bit number greater than TAGID_MAX due
563 * to rounding of the number of bits in the vector up to a multiple
564 * of the vector word size at declaration/allocation time.
566 if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
569 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */
570 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
572 /* allocate and fill new struct pf_tagname */
573 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
576 strlcpy(tag->name, tagname, sizeof(tag->name));
577 tag->tag = new_tagid;
580 /* Insert into namehash */
581 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
583 /* Insert into taghash */
584 index = tag2hashindex(ts, new_tagid);
585 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
591 tag_unref(struct pf_tagset *ts, u_int16_t tag)
593 struct pf_tagname *t;
598 index = tag2hashindex(ts, tag);
599 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
602 TAILQ_REMOVE(&ts->taghash[index], t,
604 index = tagname2hashindex(ts, t->name);
605 TAILQ_REMOVE(&ts->namehash[index], t,
607 /* Bits are 0-based for BIT_SET() */
608 BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
609 uma_zfree(V_pf_tag_z, t);
616 pf_tagname2tag(char *tagname)
618 return (tagname2tag(&V_pf_tags, tagname));
623 pf_qname2qid(char *qname)
625 return ((u_int32_t)tagname2tag(&V_pf_qids, qname));
629 pf_qid_unref(u_int32_t qid)
631 tag_unref(&V_pf_qids, (u_int16_t)qid);
635 pf_begin_altq(u_int32_t *ticket)
637 struct pf_altq *altq, *tmp;
642 /* Purge the old altq lists */
643 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
644 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
645 /* detach and destroy the discipline */
646 error = altq_remove(altq);
648 free(altq, M_PFALTQ);
650 TAILQ_INIT(V_pf_altq_ifs_inactive);
651 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
652 pf_qid_unref(altq->qid);
653 free(altq, M_PFALTQ);
655 TAILQ_INIT(V_pf_altqs_inactive);
658 *ticket = ++V_ticket_altqs_inactive;
659 V_altqs_inactive_open = 1;
664 pf_rollback_altq(u_int32_t ticket)
666 struct pf_altq *altq, *tmp;
671 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
673 /* Purge the old altq lists */
674 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
675 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
676 /* detach and destroy the discipline */
677 error = altq_remove(altq);
679 free(altq, M_PFALTQ);
681 TAILQ_INIT(V_pf_altq_ifs_inactive);
682 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
683 pf_qid_unref(altq->qid);
684 free(altq, M_PFALTQ);
686 TAILQ_INIT(V_pf_altqs_inactive);
687 V_altqs_inactive_open = 0;
692 pf_commit_altq(u_int32_t ticket)
694 struct pf_altqqueue *old_altqs, *old_altq_ifs;
695 struct pf_altq *altq, *tmp;
700 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
703 /* swap altqs, keep the old. */
704 old_altqs = V_pf_altqs_active;
705 old_altq_ifs = V_pf_altq_ifs_active;
706 V_pf_altqs_active = V_pf_altqs_inactive;
707 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
708 V_pf_altqs_inactive = old_altqs;
709 V_pf_altq_ifs_inactive = old_altq_ifs;
710 V_ticket_altqs_active = V_ticket_altqs_inactive;
712 /* Attach new disciplines */
713 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
714 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
715 /* attach the discipline */
716 error = altq_pfattach(altq);
717 if (error == 0 && V_pf_altq_running)
718 error = pf_enable_altq(altq);
724 /* Purge the old altq lists */
725 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
726 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
727 /* detach and destroy the discipline */
728 if (V_pf_altq_running)
729 error = pf_disable_altq(altq);
730 err = altq_pfdetach(altq);
731 if (err != 0 && error == 0)
733 err = altq_remove(altq);
734 if (err != 0 && error == 0)
737 free(altq, M_PFALTQ);
739 TAILQ_INIT(V_pf_altq_ifs_inactive);
740 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
741 pf_qid_unref(altq->qid);
742 free(altq, M_PFALTQ);
744 TAILQ_INIT(V_pf_altqs_inactive);
746 V_altqs_inactive_open = 0;
751 pf_enable_altq(struct pf_altq *altq)
754 struct tb_profile tb;
757 if ((ifp = ifunit(altq->ifname)) == NULL)
760 if (ifp->if_snd.altq_type != ALTQT_NONE)
761 error = altq_enable(&ifp->if_snd);
763 /* set tokenbucket regulator */
764 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
765 tb.rate = altq->ifbandwidth;
766 tb.depth = altq->tbrsize;
767 error = tbr_set(&ifp->if_snd, &tb);
774 pf_disable_altq(struct pf_altq *altq)
777 struct tb_profile tb;
780 if ((ifp = ifunit(altq->ifname)) == NULL)
784 * when the discipline is no longer referenced, it was overridden
785 * by a new one. if so, just return.
787 if (altq->altq_disc != ifp->if_snd.altq_disc)
790 error = altq_disable(&ifp->if_snd);
793 /* clear tokenbucket regulator */
795 error = tbr_set(&ifp->if_snd, &tb);
802 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
803 struct pf_altq *altq)
808 /* Deactivate the interface in question */
809 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
810 if ((ifp1 = ifunit(altq->ifname)) == NULL ||
811 (remove && ifp1 == ifp)) {
812 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
814 error = altq_add(ifp1, altq);
816 if (ticket != V_ticket_altqs_inactive)
820 free(altq, M_PFALTQ);
827 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
829 struct pf_altq *a1, *a2, *a3;
834 * No need to re-evaluate the configuration for events on interfaces
835 * that do not support ALTQ, as it's not possible for such
836 * interfaces to be part of the configuration.
838 if (!ALTQ_IS_READY(&ifp->if_snd))
841 /* Interrupt userland queue modifications */
842 if (V_altqs_inactive_open)
843 pf_rollback_altq(V_ticket_altqs_inactive);
845 /* Start new altq ruleset */
846 if (pf_begin_altq(&ticket))
849 /* Copy the current active set */
850 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
851 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
856 bcopy(a1, a2, sizeof(struct pf_altq));
858 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
862 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
866 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
867 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
872 bcopy(a1, a2, sizeof(struct pf_altq));
874 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
879 a2->altq_disc = NULL;
880 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
881 if (strncmp(a3->ifname, a2->ifname,
883 a2->altq_disc = a3->altq_disc;
887 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
891 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
896 pf_rollback_altq(ticket);
898 pf_commit_altq(ticket);
903 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
905 struct pf_kruleset *rs;
906 struct pf_krule *rule;
910 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
912 rs = pf_find_or_create_kruleset(anchor);
915 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
916 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
917 rs->rules[rs_num].inactive.rcount--;
919 *ticket = ++rs->rules[rs_num].inactive.ticket;
920 rs->rules[rs_num].inactive.open = 1;
925 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
927 struct pf_kruleset *rs;
928 struct pf_krule *rule;
932 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
934 rs = pf_find_kruleset(anchor);
935 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
936 rs->rules[rs_num].inactive.ticket != ticket)
938 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
939 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
940 rs->rules[rs_num].inactive.rcount--;
942 rs->rules[rs_num].inactive.open = 0;
946 #define PF_MD5_UPD(st, elm) \
947 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
949 #define PF_MD5_UPD_STR(st, elm) \
950 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
952 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
953 (stor) = htonl((st)->elm); \
954 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
957 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
958 (stor) = htons((st)->elm); \
959 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
963 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
965 PF_MD5_UPD(pfr, addr.type);
966 switch (pfr->addr.type) {
967 case PF_ADDR_DYNIFTL:
968 PF_MD5_UPD(pfr, addr.v.ifname);
969 PF_MD5_UPD(pfr, addr.iflags);
972 PF_MD5_UPD(pfr, addr.v.tblname);
974 case PF_ADDR_ADDRMASK:
976 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
977 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
981 PF_MD5_UPD(pfr, port[0]);
982 PF_MD5_UPD(pfr, port[1]);
983 PF_MD5_UPD(pfr, neg);
984 PF_MD5_UPD(pfr, port_op);
988 pf_hash_rule(MD5_CTX *ctx, struct pf_krule *rule)
993 pf_hash_rule_addr(ctx, &rule->src);
994 pf_hash_rule_addr(ctx, &rule->dst);
995 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
996 PF_MD5_UPD_STR(rule, label[i]);
997 PF_MD5_UPD_STR(rule, ifname);
998 PF_MD5_UPD_STR(rule, match_tagname);
999 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1000 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1001 PF_MD5_UPD_HTONL(rule, prob, y);
1002 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1003 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1004 PF_MD5_UPD(rule, uid.op);
1005 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1006 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1007 PF_MD5_UPD(rule, gid.op);
1008 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1009 PF_MD5_UPD(rule, action);
1010 PF_MD5_UPD(rule, direction);
1011 PF_MD5_UPD(rule, af);
1012 PF_MD5_UPD(rule, quick);
1013 PF_MD5_UPD(rule, ifnot);
1014 PF_MD5_UPD(rule, match_tag_not);
1015 PF_MD5_UPD(rule, natpass);
1016 PF_MD5_UPD(rule, keep_state);
1017 PF_MD5_UPD(rule, proto);
1018 PF_MD5_UPD(rule, type);
1019 PF_MD5_UPD(rule, code);
1020 PF_MD5_UPD(rule, flags);
1021 PF_MD5_UPD(rule, flagset);
1022 PF_MD5_UPD(rule, allow_opts);
1023 PF_MD5_UPD(rule, rt);
1024 PF_MD5_UPD(rule, tos);
1028 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1031 u_int8_t digest[2][PF_MD5_DIGEST_LENGTH];
1035 pf_hash_rule(&ctx[0], a);
1036 pf_hash_rule(&ctx[1], b);
1037 MD5Final(digest[0], &ctx[0]);
1038 MD5Final(digest[1], &ctx[1]);
1040 return (memcmp(digest[0], digest[1], PF_MD5_DIGEST_LENGTH) == 0);
1044 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1046 struct pf_kruleset *rs;
1047 struct pf_krule *rule, **old_array, *tail;
1048 struct pf_krulequeue *old_rules;
1050 u_int32_t old_rcount;
1054 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1056 rs = pf_find_kruleset(anchor);
1057 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1058 ticket != rs->rules[rs_num].inactive.ticket)
1061 /* Calculate checksum for the main ruleset */
1062 if (rs == &pf_main_ruleset) {
1063 error = pf_setup_pfsync_matching(rs);
1068 /* Swap rules, keep the old. */
1069 old_rules = rs->rules[rs_num].active.ptr;
1070 old_rcount = rs->rules[rs_num].active.rcount;
1071 old_array = rs->rules[rs_num].active.ptr_array;
1073 rs->rules[rs_num].active.ptr =
1074 rs->rules[rs_num].inactive.ptr;
1075 rs->rules[rs_num].active.ptr_array =
1076 rs->rules[rs_num].inactive.ptr_array;
1077 rs->rules[rs_num].active.rcount =
1078 rs->rules[rs_num].inactive.rcount;
1080 /* Attempt to preserve counter information. */
1081 if (V_pf_status.keep_counters) {
1082 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1084 tail = TAILQ_FIRST(old_rules);
1085 while ((tail != NULL) && ! pf_krule_compare(tail, rule))
1086 tail = TAILQ_NEXT(tail, entries);
1088 counter_u64_add(rule->evaluations,
1089 counter_u64_fetch(tail->evaluations));
1090 counter_u64_add(rule->packets[0],
1091 counter_u64_fetch(tail->packets[0]));
1092 counter_u64_add(rule->packets[1],
1093 counter_u64_fetch(tail->packets[1]));
1094 counter_u64_add(rule->bytes[0],
1095 counter_u64_fetch(tail->bytes[0]));
1096 counter_u64_add(rule->bytes[1],
1097 counter_u64_fetch(tail->bytes[1]));
1102 rs->rules[rs_num].inactive.ptr = old_rules;
1103 rs->rules[rs_num].inactive.ptr_array = old_array;
1104 rs->rules[rs_num].inactive.rcount = old_rcount;
1106 rs->rules[rs_num].active.ticket =
1107 rs->rules[rs_num].inactive.ticket;
1108 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1111 /* Purge the old rule list. */
1112 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1113 pf_unlink_rule(old_rules, rule);
1114 if (rs->rules[rs_num].inactive.ptr_array)
1115 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1116 rs->rules[rs_num].inactive.ptr_array = NULL;
1117 rs->rules[rs_num].inactive.rcount = 0;
1118 rs->rules[rs_num].inactive.open = 0;
1119 pf_remove_if_empty_kruleset(rs);
1125 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1128 struct pf_krule *rule;
1130 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1133 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1134 /* XXX PF_RULESET_SCRUB as well? */
1135 if (rs_cnt == PF_RULESET_SCRUB)
1138 if (rs->rules[rs_cnt].inactive.ptr_array)
1139 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1140 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1142 if (rs->rules[rs_cnt].inactive.rcount) {
1143 rs->rules[rs_cnt].inactive.ptr_array =
1144 malloc(sizeof(caddr_t) *
1145 rs->rules[rs_cnt].inactive.rcount,
1148 if (!rs->rules[rs_cnt].inactive.ptr_array)
1152 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1154 pf_hash_rule(&ctx, rule);
1155 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1159 MD5Final(digest, &ctx);
1160 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1165 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1170 switch (addr->type) {
1172 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1173 if (addr->p.tbl == NULL)
1176 case PF_ADDR_DYNIFTL:
1177 error = pfi_dynaddr_setup(addr, af);
1185 pf_addr_copyout(struct pf_addr_wrap *addr)
1188 switch (addr->type) {
1189 case PF_ADDR_DYNIFTL:
1190 pfi_dynaddr_copyout(addr);
1193 pf_tbladdr_copyout(addr);
1199 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1201 int secs = time_uptime, diff;
1203 bzero(out, sizeof(struct pf_src_node));
1205 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1206 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1208 if (in->rule.ptr != NULL)
1209 out->rule.nr = in->rule.ptr->nr;
1211 for (int i = 0; i < 2; i++) {
1212 out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1213 out->packets[i] = counter_u64_fetch(in->packets[i]);
1216 out->states = in->states;
1217 out->conn = in->conn;
1219 out->ruletype = in->ruletype;
1221 out->creation = secs - in->creation;
1222 if (out->expire > secs)
1223 out->expire -= secs;
1227 /* Adjust the connection rate estimate. */
1228 diff = secs - in->conn_rate.last;
1229 if (diff >= in->conn_rate.seconds)
1230 out->conn_rate.count = 0;
1232 out->conn_rate.count -=
1233 in->conn_rate.count * diff /
1234 in->conn_rate.seconds;
1239 * Handle export of struct pf_kaltq to user binaries that may be using any
1240 * version of struct pf_altq.
1243 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1247 if (ioc_size == sizeof(struct pfioc_altq_v0))
1250 version = pa->version;
1252 if (version > PFIOC_ALTQ_VERSION)
1255 #define ASSIGN(x) exported_q->x = q->x
1257 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1258 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1259 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1263 struct pf_altq_v0 *exported_q =
1264 &((struct pfioc_altq_v0 *)pa)->altq;
1270 exported_q->tbrsize = SATU16(q->tbrsize);
1271 exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1276 exported_q->bandwidth = SATU32(q->bandwidth);
1278 ASSIGN(local_flags);
1283 if (q->scheduler == ALTQT_HFSC) {
1284 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1285 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1286 SATU32(q->pq_u.hfsc_opts.x)
1288 ASSIGN_OPT_SATU32(rtsc_m1);
1290 ASSIGN_OPT_SATU32(rtsc_m2);
1292 ASSIGN_OPT_SATU32(lssc_m1);
1294 ASSIGN_OPT_SATU32(lssc_m2);
1296 ASSIGN_OPT_SATU32(ulsc_m1);
1298 ASSIGN_OPT_SATU32(ulsc_m2);
1303 #undef ASSIGN_OPT_SATU32
1311 struct pf_altq_v1 *exported_q =
1312 &((struct pfioc_altq_v1 *)pa)->altq;
1318 ASSIGN(ifbandwidth);
1325 ASSIGN(local_flags);
1335 panic("%s: unhandled struct pfioc_altq version", __func__);
1348 * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1349 * that may be using any version of it.
1352 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1356 if (ioc_size == sizeof(struct pfioc_altq_v0))
1359 version = pa->version;
1361 if (version > PFIOC_ALTQ_VERSION)
1364 #define ASSIGN(x) q->x = imported_q->x
1366 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1370 struct pf_altq_v0 *imported_q =
1371 &((struct pfioc_altq_v0 *)pa)->altq;
1376 ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1377 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1382 ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1384 ASSIGN(local_flags);
1389 if (imported_q->scheduler == ALTQT_HFSC) {
1390 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1393 * The m1 and m2 parameters are being copied from
1396 ASSIGN_OPT(rtsc_m1);
1398 ASSIGN_OPT(rtsc_m2);
1400 ASSIGN_OPT(lssc_m1);
1402 ASSIGN_OPT(lssc_m2);
1404 ASSIGN_OPT(ulsc_m1);
1406 ASSIGN_OPT(ulsc_m2);
1418 struct pf_altq_v1 *imported_q =
1419 &((struct pfioc_altq_v1 *)pa)->altq;
1425 ASSIGN(ifbandwidth);
1432 ASSIGN(local_flags);
1442 panic("%s: unhandled struct pfioc_altq version", __func__);
1452 static struct pf_altq *
1453 pf_altq_get_nth_active(u_int32_t n)
1455 struct pf_altq *altq;
1459 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1465 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1476 pf_krule_free(struct pf_krule *rule)
1481 counter_u64_free(rule->evaluations);
1482 for (int i = 0; i < 2; i++) {
1483 counter_u64_free(rule->packets[i]);
1484 counter_u64_free(rule->bytes[i]);
1486 counter_u64_free(rule->states_cur);
1487 counter_u64_free(rule->states_tot);
1488 counter_u64_free(rule->src_nodes);
1489 free(rule, M_PFRULE);
1493 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1494 struct pf_pooladdr *pool)
1497 bzero(pool, sizeof(*pool));
1498 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1499 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1503 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1504 struct pf_kpooladdr *kpool)
1507 bzero(kpool, sizeof(*kpool));
1508 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1509 strlcpy(kpool->ifname, pool->ifname, sizeof(kpool->ifname));
1513 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool)
1515 bzero(pool, sizeof(*pool));
1517 bcopy(&kpool->key, &pool->key, sizeof(pool->key));
1518 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter));
1520 pool->tblidx = kpool->tblidx;
1521 pool->proxy_port[0] = kpool->proxy_port[0];
1522 pool->proxy_port[1] = kpool->proxy_port[1];
1523 pool->opts = kpool->opts;
1527 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1529 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1530 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1532 bzero(kpool, sizeof(*kpool));
1534 bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1535 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1537 kpool->tblidx = pool->tblidx;
1538 kpool->proxy_port[0] = pool->proxy_port[0];
1539 kpool->proxy_port[1] = pool->proxy_port[1];
1540 kpool->opts = pool->opts;
1546 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule)
1549 bzero(rule, sizeof(*rule));
1551 bcopy(&krule->src, &rule->src, sizeof(rule->src));
1552 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst));
1554 for (int i = 0; i < PF_SKIP_COUNT; ++i) {
1555 if (rule->skip[i].ptr == NULL)
1556 rule->skip[i].nr = -1;
1558 rule->skip[i].nr = krule->skip[i].ptr->nr;
1561 strlcpy(rule->label, krule->label[0], sizeof(rule->label));
1562 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname));
1563 strlcpy(rule->qname, krule->qname, sizeof(rule->qname));
1564 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname));
1565 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname));
1566 strlcpy(rule->match_tagname, krule->match_tagname,
1567 sizeof(rule->match_tagname));
1568 strlcpy(rule->overload_tblname, krule->overload_tblname,
1569 sizeof(rule->overload_tblname));
1571 pf_kpool_to_pool(&krule->rpool, &rule->rpool);
1573 rule->evaluations = counter_u64_fetch(krule->evaluations);
1574 for (int i = 0; i < 2; i++) {
1575 rule->packets[i] = counter_u64_fetch(krule->packets[i]);
1576 rule->bytes[i] = counter_u64_fetch(krule->bytes[i]);
1579 /* kif, anchor, overload_tbl are not copied over. */
1581 rule->os_fingerprint = krule->os_fingerprint;
1583 rule->rtableid = krule->rtableid;
1584 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout));
1585 rule->max_states = krule->max_states;
1586 rule->max_src_nodes = krule->max_src_nodes;
1587 rule->max_src_states = krule->max_src_states;
1588 rule->max_src_conn = krule->max_src_conn;
1589 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit;
1590 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds;
1591 rule->qid = krule->qid;
1592 rule->pqid = krule->pqid;
1593 rule->nr = krule->nr;
1594 rule->prob = krule->prob;
1595 rule->cuid = krule->cuid;
1596 rule->cpid = krule->cpid;
1598 rule->return_icmp = krule->return_icmp;
1599 rule->return_icmp6 = krule->return_icmp6;
1600 rule->max_mss = krule->max_mss;
1601 rule->tag = krule->tag;
1602 rule->match_tag = krule->match_tag;
1603 rule->scrub_flags = krule->scrub_flags;
1605 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid));
1606 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid));
1608 rule->rule_flag = krule->rule_flag;
1609 rule->action = krule->action;
1610 rule->direction = krule->direction;
1611 rule->log = krule->log;
1612 rule->logif = krule->logif;
1613 rule->quick = krule->quick;
1614 rule->ifnot = krule->ifnot;
1615 rule->match_tag_not = krule->match_tag_not;
1616 rule->natpass = krule->natpass;
1618 rule->keep_state = krule->keep_state;
1619 rule->af = krule->af;
1620 rule->proto = krule->proto;
1621 rule->type = krule->type;
1622 rule->code = krule->code;
1623 rule->flags = krule->flags;
1624 rule->flagset = krule->flagset;
1625 rule->min_ttl = krule->min_ttl;
1626 rule->allow_opts = krule->allow_opts;
1627 rule->rt = krule->rt;
1628 rule->return_ttl = krule->return_ttl;
1629 rule->tos = krule->tos;
1630 rule->set_tos = krule->set_tos;
1631 rule->anchor_relative = krule->anchor_relative;
1632 rule->anchor_wildcard = krule->anchor_wildcard;
1634 rule->flush = krule->flush;
1635 rule->prio = krule->prio;
1636 rule->set_prio[0] = krule->set_prio[0];
1637 rule->set_prio[1] = krule->set_prio[1];
1639 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert));
1641 rule->u_states_cur = counter_u64_fetch(krule->states_cur);
1642 rule->u_states_tot = counter_u64_fetch(krule->states_tot);
1643 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes);
1647 pf_check_rule_addr(const struct pf_rule_addr *addr)
1650 switch (addr->addr.type) {
1651 case PF_ADDR_ADDRMASK:
1652 case PF_ADDR_NOROUTE:
1653 case PF_ADDR_DYNIFTL:
1655 case PF_ADDR_URPFFAILED:
1662 if (addr->addr.p.dyn != NULL) {
1670 pf_nvaddr_to_addr(const nvlist_t *nvl, struct pf_addr *paddr)
1672 return (pf_nvbinary(nvl, "addr", paddr, sizeof(*paddr)));
1676 pf_addr_to_nvaddr(const struct pf_addr *paddr)
1680 nvl = nvlist_create(0);
1684 nvlist_add_binary(nvl, "addr", paddr, sizeof(*paddr));
1690 pf_nvmape_to_mape(const nvlist_t *nvl, struct pf_mape_portset *mape)
1694 bzero(mape, sizeof(*mape));
1695 PFNV_CHK(pf_nvuint8(nvl, "offset", &mape->offset));
1696 PFNV_CHK(pf_nvuint8(nvl, "psidlen", &mape->psidlen));
1697 PFNV_CHK(pf_nvuint16(nvl, "psid", &mape->psid));
1704 pf_mape_to_nvmape(const struct pf_mape_portset *mape)
1708 nvl = nvlist_create(0);
1712 nvlist_add_number(nvl, "offset", mape->offset);
1713 nvlist_add_number(nvl, "psidlen", mape->psidlen);
1714 nvlist_add_number(nvl, "psid", mape->psid);
1720 pf_nvpool_to_pool(const nvlist_t *nvl, struct pf_kpool *kpool)
1724 bzero(kpool, sizeof(*kpool));
1726 PFNV_CHK(pf_nvbinary(nvl, "key", &kpool->key, sizeof(kpool->key)));
1728 if (nvlist_exists_nvlist(nvl, "counter")) {
1729 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "counter"),
1733 PFNV_CHK(pf_nvint(nvl, "tblidx", &kpool->tblidx));
1734 PFNV_CHK(pf_nvuint16_array(nvl, "proxy_port", kpool->proxy_port, 2,
1736 PFNV_CHK(pf_nvuint8(nvl, "opts", &kpool->opts));
1738 if (nvlist_exists_nvlist(nvl, "mape")) {
1739 PFNV_CHK(pf_nvmape_to_mape(nvlist_get_nvlist(nvl, "mape"),
1748 pf_pool_to_nvpool(const struct pf_kpool *pool)
1753 nvl = nvlist_create(0);
1757 nvlist_add_binary(nvl, "key", &pool->key, sizeof(pool->key));
1758 tmp = pf_addr_to_nvaddr(&pool->counter);
1761 nvlist_add_nvlist(nvl, "counter", tmp);
1763 nvlist_add_number(nvl, "tblidx", pool->tblidx);
1764 pf_uint16_array_nv(nvl, "proxy_port", pool->proxy_port, 2);
1765 nvlist_add_number(nvl, "opts", pool->opts);
1767 tmp = pf_mape_to_nvmape(&pool->mape);
1770 nvlist_add_nvlist(nvl, "mape", tmp);
1775 nvlist_destroy(nvl);
1780 pf_nvaddr_wrap_to_addr_wrap(const nvlist_t *nvl, struct pf_addr_wrap *addr)
1784 bzero(addr, sizeof(*addr));
1786 PFNV_CHK(pf_nvuint8(nvl, "type", &addr->type));
1787 PFNV_CHK(pf_nvuint8(nvl, "iflags", &addr->iflags));
1788 if (addr->type == PF_ADDR_DYNIFTL)
1789 PFNV_CHK(pf_nvstring(nvl, "ifname", addr->v.ifname,
1790 sizeof(addr->v.ifname)));
1791 if (addr->type == PF_ADDR_TABLE)
1792 PFNV_CHK(pf_nvstring(nvl, "tblname", addr->v.tblname,
1793 sizeof(addr->v.tblname)));
1795 if (! nvlist_exists_nvlist(nvl, "addr"))
1797 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "addr"),
1800 if (! nvlist_exists_nvlist(nvl, "mask"))
1802 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "mask"),
1805 switch (addr->type) {
1806 case PF_ADDR_DYNIFTL:
1809 case PF_ADDR_ADDRMASK:
1810 case PF_ADDR_NOROUTE:
1811 case PF_ADDR_URPFFAILED:
1822 pf_addr_wrap_to_nvaddr_wrap(const struct pf_addr_wrap *addr)
1827 nvl = nvlist_create(0);
1831 nvlist_add_number(nvl, "type", addr->type);
1832 nvlist_add_number(nvl, "iflags", addr->iflags);
1833 if (addr->type == PF_ADDR_DYNIFTL)
1834 nvlist_add_string(nvl, "ifname", addr->v.ifname);
1835 if (addr->type == PF_ADDR_TABLE)
1836 nvlist_add_string(nvl, "tblname", addr->v.tblname);
1838 tmp = pf_addr_to_nvaddr(&addr->v.a.addr);
1841 nvlist_add_nvlist(nvl, "addr", tmp);
1842 tmp = pf_addr_to_nvaddr(&addr->v.a.mask);
1845 nvlist_add_nvlist(nvl, "mask", tmp);
1850 nvlist_destroy(nvl);
1855 pf_validate_op(uint8_t op)
1877 pf_nvrule_addr_to_rule_addr(const nvlist_t *nvl, struct pf_rule_addr *addr)
1881 if (! nvlist_exists_nvlist(nvl, "addr"))
1884 PFNV_CHK(pf_nvaddr_wrap_to_addr_wrap(nvlist_get_nvlist(nvl, "addr"),
1886 PFNV_CHK(pf_nvuint16_array(nvl, "port", addr->port, 2, NULL));
1887 PFNV_CHK(pf_nvuint8(nvl, "neg", &addr->neg));
1888 PFNV_CHK(pf_nvuint8(nvl, "port_op", &addr->port_op));
1890 PFNV_CHK(pf_validate_op(addr->port_op));
1897 pf_rule_addr_to_nvrule_addr(const struct pf_rule_addr *addr)
1902 nvl = nvlist_create(0);
1906 tmp = pf_addr_wrap_to_nvaddr_wrap(&addr->addr);
1909 nvlist_add_nvlist(nvl, "addr", tmp);
1910 pf_uint16_array_nv(nvl, "port", addr->port, 2);
1911 nvlist_add_number(nvl, "neg", addr->neg);
1912 nvlist_add_number(nvl, "port_op", addr->port_op);
1917 nvlist_destroy(nvl);
1922 pf_nvrule_uid_to_rule_uid(const nvlist_t *nvl, struct pf_rule_uid *uid)
1926 bzero(uid, sizeof(*uid));
1928 PFNV_CHK(pf_nvuint32_array(nvl, "uid", uid->uid, 2, NULL));
1929 PFNV_CHK(pf_nvuint8(nvl, "op", &uid->op));
1931 PFNV_CHK(pf_validate_op(uid->op));
1938 pf_rule_uid_to_nvrule_uid(const struct pf_rule_uid *uid)
1942 nvl = nvlist_create(0);
1946 pf_uint32_array_nv(nvl, "uid", uid->uid, 2);
1947 nvlist_add_number(nvl, "op", uid->op);
1953 pf_nvrule_gid_to_rule_gid(const nvlist_t *nvl, struct pf_rule_gid *gid)
1955 /* Cheat a little. These stucts are the same, other than the name of
1956 * the first field. */
1957 return (pf_nvrule_uid_to_rule_uid(nvl, (struct pf_rule_uid *)gid));
1961 pf_nvrule_to_krule(const nvlist_t *nvl, struct pf_krule **prule)
1963 struct pf_krule *rule;
1966 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK | M_ZERO);
1968 PFNV_CHK(pf_nvuint32(nvl, "nr", &rule->nr));
1970 if (! nvlist_exists_nvlist(nvl, "src")) {
1974 error = pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "src"),
1979 if (! nvlist_exists_nvlist(nvl, "dst")) {
1983 PFNV_CHK(pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "dst"),
1986 if (nvlist_exists_string(nvl, "label")) {
1987 PFNV_CHK(pf_nvstring(nvl, "label", rule->label[0],
1988 sizeof(rule->label[0])));
1989 } else if (nvlist_exists_string_array(nvl, "labels")) {
1990 const char *const *strs;
1994 strs = nvlist_get_string_array(nvl, "labels", &items);
1995 if (items > PF_RULE_MAX_LABEL_COUNT) {
2000 for (size_t i = 0; i < items; i++) {
2001 ret = strlcpy(rule->label[i], strs[i],
2002 sizeof(rule->label[0]));
2003 if (ret >= sizeof(rule->label[0])) {
2010 PFNV_CHK(pf_nvstring(nvl, "ifname", rule->ifname,
2011 sizeof(rule->ifname)));
2012 PFNV_CHK(pf_nvstring(nvl, "qname", rule->qname, sizeof(rule->qname)));
2013 PFNV_CHK(pf_nvstring(nvl, "pqname", rule->pqname,
2014 sizeof(rule->pqname)));
2015 PFNV_CHK(pf_nvstring(nvl, "tagname", rule->tagname,
2016 sizeof(rule->tagname)));
2017 PFNV_CHK(pf_nvstring(nvl, "match_tagname", rule->match_tagname,
2018 sizeof(rule->match_tagname)));
2019 PFNV_CHK(pf_nvstring(nvl, "overload_tblname", rule->overload_tblname,
2020 sizeof(rule->overload_tblname)));
2022 if (! nvlist_exists_nvlist(nvl, "rpool")) {
2026 PFNV_CHK(pf_nvpool_to_pool(nvlist_get_nvlist(nvl, "rpool"),
2029 PFNV_CHK(pf_nvuint32(nvl, "os_fingerprint", &rule->os_fingerprint));
2031 PFNV_CHK(pf_nvint(nvl, "rtableid", &rule->rtableid));
2032 PFNV_CHK(pf_nvuint32_array(nvl, "timeout", rule->timeout, PFTM_MAX, NULL));
2033 PFNV_CHK(pf_nvuint32(nvl, "max_states", &rule->max_states));
2034 PFNV_CHK(pf_nvuint32(nvl, "max_src_nodes", &rule->max_src_nodes));
2035 PFNV_CHK(pf_nvuint32(nvl, "max_src_states", &rule->max_src_states));
2036 PFNV_CHK(pf_nvuint32(nvl, "max_src_conn", &rule->max_src_conn));
2037 PFNV_CHK(pf_nvuint32(nvl, "max_src_conn_rate.limit",
2038 &rule->max_src_conn_rate.limit));
2039 PFNV_CHK(pf_nvuint32(nvl, "max_src_conn_rate.seconds",
2040 &rule->max_src_conn_rate.seconds));
2041 PFNV_CHK(pf_nvuint32(nvl, "prob", &rule->prob));
2042 PFNV_CHK(pf_nvuint32(nvl, "cuid", &rule->cuid));
2043 PFNV_CHK(pf_nvuint32(nvl, "cpid", &rule->cpid));
2045 PFNV_CHK(pf_nvuint16(nvl, "return_icmp", &rule->return_icmp));
2046 PFNV_CHK(pf_nvuint16(nvl, "return_icmp6", &rule->return_icmp6));
2048 PFNV_CHK(pf_nvuint16(nvl, "max_mss", &rule->max_mss));
2049 PFNV_CHK(pf_nvuint16(nvl, "scrub_flags", &rule->scrub_flags));
2051 if (! nvlist_exists_nvlist(nvl, "uid")) {
2055 PFNV_CHK(pf_nvrule_uid_to_rule_uid(nvlist_get_nvlist(nvl, "uid"),
2058 if (! nvlist_exists_nvlist(nvl, "gid")) {
2062 PFNV_CHK(pf_nvrule_gid_to_rule_gid(nvlist_get_nvlist(nvl, "gid"),
2065 PFNV_CHK(pf_nvuint32(nvl, "rule_flag", &rule->rule_flag));
2066 PFNV_CHK(pf_nvuint8(nvl, "action", &rule->action));
2067 PFNV_CHK(pf_nvuint8(nvl, "direction", &rule->direction));
2068 PFNV_CHK(pf_nvuint8(nvl, "log", &rule->log));
2069 PFNV_CHK(pf_nvuint8(nvl, "logif", &rule->logif));
2070 PFNV_CHK(pf_nvuint8(nvl, "quick", &rule->quick));
2071 PFNV_CHK(pf_nvuint8(nvl, "ifnot", &rule->ifnot));
2072 PFNV_CHK(pf_nvuint8(nvl, "match_tag_not", &rule->match_tag_not));
2073 PFNV_CHK(pf_nvuint8(nvl, "natpass", &rule->natpass));
2075 PFNV_CHK(pf_nvuint8(nvl, "keep_state", &rule->keep_state));
2076 PFNV_CHK(pf_nvuint8(nvl, "af", &rule->af));
2077 PFNV_CHK(pf_nvuint8(nvl, "proto", &rule->proto));
2078 PFNV_CHK(pf_nvuint8(nvl, "type", &rule->type));
2079 PFNV_CHK(pf_nvuint8(nvl, "code", &rule->code));
2080 PFNV_CHK(pf_nvuint8(nvl, "flags", &rule->flags));
2081 PFNV_CHK(pf_nvuint8(nvl, "flagset", &rule->flagset));
2082 PFNV_CHK(pf_nvuint8(nvl, "min_ttl", &rule->min_ttl));
2083 PFNV_CHK(pf_nvuint8(nvl, "allow_opts", &rule->allow_opts));
2084 PFNV_CHK(pf_nvuint8(nvl, "rt", &rule->rt));
2085 PFNV_CHK(pf_nvuint8(nvl, "return_ttl", &rule->return_ttl));
2086 PFNV_CHK(pf_nvuint8(nvl, "tos", &rule->tos));
2087 PFNV_CHK(pf_nvuint8(nvl, "set_tos", &rule->set_tos));
2088 PFNV_CHK(pf_nvuint8(nvl, "anchor_relative", &rule->anchor_relative));
2089 PFNV_CHK(pf_nvuint8(nvl, "anchor_wildcard", &rule->anchor_wildcard));
2091 PFNV_CHK(pf_nvuint8(nvl, "flush", &rule->flush));
2092 PFNV_CHK(pf_nvuint8(nvl, "prio", &rule->prio));
2094 PFNV_CHK(pf_nvuint8_array(nvl, "set_prio", &rule->prio, 2, NULL));
2096 if (nvlist_exists_nvlist(nvl, "divert")) {
2097 const nvlist_t *nvldivert = nvlist_get_nvlist(nvl, "divert");
2099 if (! nvlist_exists_nvlist(nvldivert, "addr")) {
2103 PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvldivert, "addr"),
2104 &rule->divert.addr));
2105 PFNV_CHK(pf_nvuint16(nvldivert, "port", &rule->divert.port));
2110 if (rule->af == AF_INET) {
2111 error = EAFNOSUPPORT;
2116 if (rule->af == AF_INET6) {
2117 error = EAFNOSUPPORT;
2122 PFNV_CHK(pf_check_rule_addr(&rule->src));
2123 PFNV_CHK(pf_check_rule_addr(&rule->dst));
2130 pf_krule_free(rule);
2137 pf_divert_to_nvdivert(const struct pf_krule *rule)
2142 nvl = nvlist_create(0);
2146 tmp = pf_addr_to_nvaddr(&rule->divert.addr);
2149 nvlist_add_nvlist(nvl, "addr", tmp);
2150 nvlist_add_number(nvl, "port", rule->divert.port);
2155 nvlist_destroy(nvl);
2160 pf_krule_to_nvrule(const struct pf_krule *rule)
2162 nvlist_t *nvl, *tmp;
2164 nvl = nvlist_create(0);
2168 nvlist_add_number(nvl, "nr", rule->nr);
2169 tmp = pf_rule_addr_to_nvrule_addr(&rule->src);
2172 nvlist_add_nvlist(nvl, "src", tmp);
2173 tmp = pf_rule_addr_to_nvrule_addr(&rule->dst);
2176 nvlist_add_nvlist(nvl, "dst", tmp);
2178 for (int i = 0; i < PF_SKIP_COUNT; i++) {
2179 nvlist_append_number_array(nvl, "skip",
2180 rule->skip[i].ptr ? rule->skip[i].ptr->nr : -1);
2183 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) {
2184 nvlist_append_string_array(nvl, "labels", rule->label[i]);
2186 nvlist_add_string(nvl, "label", rule->label[0]);
2187 nvlist_add_string(nvl, "ifname", rule->ifname);
2188 nvlist_add_string(nvl, "qname", rule->qname);
2189 nvlist_add_string(nvl, "pqname", rule->pqname);
2190 nvlist_add_string(nvl, "tagname", rule->tagname);
2191 nvlist_add_string(nvl, "match_tagname", rule->match_tagname);
2192 nvlist_add_string(nvl, "overload_tblname", rule->overload_tblname);
2194 tmp = pf_pool_to_nvpool(&rule->rpool);
2197 nvlist_add_nvlist(nvl, "rpool", tmp);
2199 nvlist_add_number(nvl, "evaluations",
2200 counter_u64_fetch(rule->evaluations));
2201 for (int i = 0; i < 2; i++) {
2202 nvlist_append_number_array(nvl, "packets",
2203 counter_u64_fetch(rule->packets[i]));
2204 nvlist_append_number_array(nvl, "bytes",
2205 counter_u64_fetch(rule->bytes[i]));
2208 nvlist_add_number(nvl, "os_fingerprint", rule->os_fingerprint);
2210 nvlist_add_number(nvl, "rtableid", rule->rtableid);
2211 pf_uint32_array_nv(nvl, "timeout", rule->timeout, PFTM_MAX);
2212 nvlist_add_number(nvl, "max_states", rule->max_states);
2213 nvlist_add_number(nvl, "max_src_nodes", rule->max_src_nodes);
2214 nvlist_add_number(nvl, "max_src_states", rule->max_src_states);
2215 nvlist_add_number(nvl, "max_src_conn", rule->max_src_conn);
2216 nvlist_add_number(nvl, "max_src_conn_rate.limit",
2217 rule->max_src_conn_rate.limit);
2218 nvlist_add_number(nvl, "max_src_conn_rate.seconds",
2219 rule->max_src_conn_rate.seconds);
2220 nvlist_add_number(nvl, "qid", rule->qid);
2221 nvlist_add_number(nvl, "pqid", rule->pqid);
2222 nvlist_add_number(nvl, "prob", rule->prob);
2223 nvlist_add_number(nvl, "cuid", rule->cuid);
2224 nvlist_add_number(nvl, "cpid", rule->cpid);
2226 nvlist_add_number(nvl, "states_cur",
2227 counter_u64_fetch(rule->states_cur));
2228 nvlist_add_number(nvl, "states_tot",
2229 counter_u64_fetch(rule->states_tot));
2230 nvlist_add_number(nvl, "src_nodes",
2231 counter_u64_fetch(rule->src_nodes));
2233 nvlist_add_number(nvl, "return_icmp", rule->return_icmp);
2234 nvlist_add_number(nvl, "return_icmp6", rule->return_icmp6);
2236 nvlist_add_number(nvl, "max_mss", rule->max_mss);
2237 nvlist_add_number(nvl, "scrub_flags", rule->scrub_flags);
2239 tmp = pf_rule_uid_to_nvrule_uid(&rule->uid);
2242 nvlist_add_nvlist(nvl, "uid", tmp);
2243 tmp = pf_rule_uid_to_nvrule_uid((const struct pf_rule_uid *)&rule->gid);
2246 nvlist_add_nvlist(nvl, "gid", tmp);
2248 nvlist_add_number(nvl, "rule_flag", rule->rule_flag);
2249 nvlist_add_number(nvl, "action", rule->action);
2250 nvlist_add_number(nvl, "direction", rule->direction);
2251 nvlist_add_number(nvl, "log", rule->log);
2252 nvlist_add_number(nvl, "logif", rule->logif);
2253 nvlist_add_number(nvl, "quick", rule->quick);
2254 nvlist_add_number(nvl, "ifnot", rule->ifnot);
2255 nvlist_add_number(nvl, "match_tag_not", rule->match_tag_not);
2256 nvlist_add_number(nvl, "natpass", rule->natpass);
2258 nvlist_add_number(nvl, "keep_state", rule->keep_state);
2259 nvlist_add_number(nvl, "af", rule->af);
2260 nvlist_add_number(nvl, "proto", rule->proto);
2261 nvlist_add_number(nvl, "type", rule->type);
2262 nvlist_add_number(nvl, "code", rule->code);
2263 nvlist_add_number(nvl, "flags", rule->flags);
2264 nvlist_add_number(nvl, "flagset", rule->flagset);
2265 nvlist_add_number(nvl, "min_ttl", rule->min_ttl);
2266 nvlist_add_number(nvl, "allow_opts", rule->allow_opts);
2267 nvlist_add_number(nvl, "rt", rule->rt);
2268 nvlist_add_number(nvl, "return_ttl", rule->return_ttl);
2269 nvlist_add_number(nvl, "tos", rule->tos);
2270 nvlist_add_number(nvl, "set_tos", rule->set_tos);
2271 nvlist_add_number(nvl, "anchor_relative", rule->anchor_relative);
2272 nvlist_add_number(nvl, "anchor_wildcard", rule->anchor_wildcard);
2274 nvlist_add_number(nvl, "flush", rule->flush);
2275 nvlist_add_number(nvl, "prio", rule->prio);
2277 pf_uint8_array_nv(nvl, "set_prio", &rule->prio, 2);
2279 tmp = pf_divert_to_nvdivert(rule);
2282 nvlist_add_nvlist(nvl, "divert", tmp);
2287 nvlist_destroy(nvl);
2292 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
2297 if (rule->af == AF_INET) {
2298 return (EAFNOSUPPORT);
2302 if (rule->af == AF_INET6) {
2303 return (EAFNOSUPPORT);
2307 ret = pf_check_rule_addr(&rule->src);
2310 ret = pf_check_rule_addr(&rule->dst);
2314 bzero(krule, sizeof(*krule));
2316 bcopy(&rule->src, &krule->src, sizeof(rule->src));
2317 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
2319 strlcpy(krule->label[0], rule->label, sizeof(rule->label));
2320 strlcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
2321 strlcpy(krule->qname, rule->qname, sizeof(rule->qname));
2322 strlcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
2323 strlcpy(krule->tagname, rule->tagname, sizeof(rule->tagname));
2324 strlcpy(krule->match_tagname, rule->match_tagname,
2325 sizeof(rule->match_tagname));
2326 strlcpy(krule->overload_tblname, rule->overload_tblname,
2327 sizeof(rule->overload_tblname));
2329 ret = pf_pool_to_kpool(&rule->rpool, &krule->rpool);
2333 /* Don't allow userspace to set evaulations, packets or bytes. */
2334 /* kif, anchor, overload_tbl are not copied over. */
2336 krule->os_fingerprint = rule->os_fingerprint;
2338 krule->rtableid = rule->rtableid;
2339 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout));
2340 krule->max_states = rule->max_states;
2341 krule->max_src_nodes = rule->max_src_nodes;
2342 krule->max_src_states = rule->max_src_states;
2343 krule->max_src_conn = rule->max_src_conn;
2344 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
2345 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
2346 krule->qid = rule->qid;
2347 krule->pqid = rule->pqid;
2348 krule->nr = rule->nr;
2349 krule->prob = rule->prob;
2350 krule->cuid = rule->cuid;
2351 krule->cpid = rule->cpid;
2353 krule->return_icmp = rule->return_icmp;
2354 krule->return_icmp6 = rule->return_icmp6;
2355 krule->max_mss = rule->max_mss;
2356 krule->tag = rule->tag;
2357 krule->match_tag = rule->match_tag;
2358 krule->scrub_flags = rule->scrub_flags;
2360 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
2361 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
2363 krule->rule_flag = rule->rule_flag;
2364 krule->action = rule->action;
2365 krule->direction = rule->direction;
2366 krule->log = rule->log;
2367 krule->logif = rule->logif;
2368 krule->quick = rule->quick;
2369 krule->ifnot = rule->ifnot;
2370 krule->match_tag_not = rule->match_tag_not;
2371 krule->natpass = rule->natpass;
2373 krule->keep_state = rule->keep_state;
2374 krule->af = rule->af;
2375 krule->proto = rule->proto;
2376 krule->type = rule->type;
2377 krule->code = rule->code;
2378 krule->flags = rule->flags;
2379 krule->flagset = rule->flagset;
2380 krule->min_ttl = rule->min_ttl;
2381 krule->allow_opts = rule->allow_opts;
2382 krule->rt = rule->rt;
2383 krule->return_ttl = rule->return_ttl;
2384 krule->tos = rule->tos;
2385 krule->set_tos = rule->set_tos;
2386 krule->anchor_relative = rule->anchor_relative;
2387 krule->anchor_wildcard = rule->anchor_wildcard;
2389 krule->flush = rule->flush;
2390 krule->prio = rule->prio;
2391 krule->set_prio[0] = rule->set_prio[0];
2392 krule->set_prio[1] = rule->set_prio[1];
2394 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2400 pf_label_match(const struct pf_krule *rule, const char *label)
2404 while (*rule->label[i]) {
2405 if (strcmp(rule->label[i], label) == 0)
2414 pf_killstates_row(struct pfioc_state_kill *psk, struct pf_idhash *ih)
2417 struct pf_state_key *sk;
2418 struct pf_addr *srcaddr, *dstaddr;
2420 u_int16_t srcport, dstport;
2422 relock_DIOCKILLSTATES:
2423 PF_HASHROW_LOCK(ih);
2424 LIST_FOREACH(s, &ih->states, entry) {
2425 sk = s->key[PF_SK_WIRE];
2426 if (s->direction == PF_OUT) {
2427 srcaddr = &sk->addr[1];
2428 dstaddr = &sk->addr[0];
2429 srcport = sk->port[1];
2430 dstport = sk->port[0];
2432 srcaddr = &sk->addr[0];
2433 dstaddr = &sk->addr[1];
2434 srcport = sk->port[0];
2435 dstport = sk->port[1];
2438 if (psk->psk_af && sk->af != psk->psk_af)
2441 if (psk->psk_proto && psk->psk_proto != sk->proto)
2444 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
2445 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2448 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
2449 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2452 if (psk->psk_src.port_op != 0 &&
2453 ! pf_match_port(psk->psk_src.port_op,
2454 psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2457 if (psk->psk_dst.port_op != 0 &&
2458 ! pf_match_port(psk->psk_dst.port_op,
2459 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2462 if (psk->psk_label[0] &&
2463 ! pf_label_match(s->rule.ptr, psk->psk_label))
2466 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2470 pf_unlink_state(s, PF_ENTER_LOCKED);
2472 goto relock_DIOCKILLSTATES;
2474 PF_HASHROW_UNLOCK(ih);
2480 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2481 uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2484 struct pf_kruleset *ruleset;
2485 struct pf_krule *tail;
2486 struct pf_kpooladdr *pa;
2487 struct pfi_kkif *kif = NULL;
2491 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
2493 goto errout_unlocked;
2496 #define ERROUT(x) { error = (x); goto errout; }
2498 if (rule->ifname[0])
2499 kif = pf_kkif_create(M_WAITOK);
2500 rule->evaluations = counter_u64_alloc(M_WAITOK);
2501 for (int i = 0; i < 2; i++) {
2502 rule->packets[i] = counter_u64_alloc(M_WAITOK);
2503 rule->bytes[i] = counter_u64_alloc(M_WAITOK);
2505 rule->states_cur = counter_u64_alloc(M_WAITOK);
2506 rule->states_tot = counter_u64_alloc(M_WAITOK);
2507 rule->src_nodes = counter_u64_alloc(M_WAITOK);
2508 rule->cuid = td->td_ucred->cr_ruid;
2509 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
2510 TAILQ_INIT(&rule->rpool.list);
2513 ruleset = pf_find_kruleset(anchor);
2514 if (ruleset == NULL)
2516 rs_num = pf_get_ruleset_number(rule->action);
2517 if (rs_num >= PF_RULESET_MAX)
2519 if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2520 DPFPRINTF(PF_DEBUG_MISC,
2521 ("ticket: %d != [%d]%d\n", ticket, rs_num,
2522 ruleset->rules[rs_num].inactive.ticket));
2525 if (pool_ticket != V_ticket_pabuf) {
2526 DPFPRINTF(PF_DEBUG_MISC,
2527 ("pool_ticket: %d != %d\n", pool_ticket,
2532 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2535 rule->nr = tail->nr + 1;
2538 if (rule->ifname[0]) {
2539 rule->kif = pfi_kkif_attach(kif, rule->ifname);
2541 pfi_kkif_ref(rule->kif);
2545 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2550 if (rule->qname[0] != 0) {
2551 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2553 else if (rule->pqname[0] != 0) {
2555 pf_qname2qid(rule->pqname)) == 0)
2558 rule->pqid = rule->qid;
2561 if (rule->tagname[0])
2562 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2564 if (rule->match_tagname[0])
2565 if ((rule->match_tag =
2566 pf_tagname2tag(rule->match_tagname)) == 0)
2568 if (rule->rt && !rule->direction)
2572 if (rule->logif >= PFLOGIFS_MAX)
2574 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2576 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2578 if (pf_kanchor_setup(rule, ruleset, anchor_call))
2580 if (rule->scrub_flags & PFSTATE_SETPRIO &&
2581 (rule->set_prio[0] > PF_PRIO_MAX ||
2582 rule->set_prio[1] > PF_PRIO_MAX))
2584 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2585 if (pa->addr.type == PF_ADDR_TABLE) {
2586 pa->addr.p.tbl = pfr_attach_table(ruleset,
2587 pa->addr.v.tblname);
2588 if (pa->addr.p.tbl == NULL)
2592 rule->overload_tbl = NULL;
2593 if (rule->overload_tblname[0]) {
2594 if ((rule->overload_tbl = pfr_attach_table(ruleset,
2595 rule->overload_tblname)) == NULL)
2598 rule->overload_tbl->pfrkt_flags |=
2602 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
2603 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2604 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
2605 (rule->rt > PF_NOPFROUTE)) &&
2606 (TAILQ_FIRST(&rule->rpool.list) == NULL))
2615 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2616 counter_u64_zero(rule->evaluations);
2617 for (int i = 0; i < 2; i++) {
2618 counter_u64_zero(rule->packets[i]);
2619 counter_u64_zero(rule->bytes[i]);
2621 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2623 ruleset->rules[rs_num].inactive.rcount++;
2633 pf_krule_free(rule);
2638 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2641 PF_RULES_RLOCK_TRACKER;
2643 /* XXX keep in sync with switch() below */
2644 if (securelevel_gt(td->td_ucred, 2))
2652 case DIOCSETSTATUSIF:
2658 case DIOCGETTIMEOUT:
2659 case DIOCCLRRULECTRS:
2661 case DIOCGETALTQSV0:
2662 case DIOCGETALTQSV1:
2665 case DIOCGETQSTATSV0:
2666 case DIOCGETQSTATSV1:
2667 case DIOCGETRULESETS:
2668 case DIOCGETRULESET:
2669 case DIOCRGETTABLES:
2670 case DIOCRGETTSTATS:
2671 case DIOCRCLRTSTATS:
2677 case DIOCRGETASTATS:
2678 case DIOCRCLRASTATS:
2681 case DIOCGETSRCNODES:
2682 case DIOCCLRSRCNODES:
2683 case DIOCIGETIFACES:
2684 case DIOCGIFSPEEDV0:
2685 case DIOCGIFSPEEDV1:
2689 case DIOCRCLRTABLES:
2690 case DIOCRADDTABLES:
2691 case DIOCRDELTABLES:
2692 case DIOCRSETTFLAGS:
2693 if (((struct pfioc_table *)addr)->pfrio_flags &
2695 break; /* dummy operation ok */
2701 if (!(flags & FWRITE))
2709 case DIOCGETTIMEOUT:
2711 case DIOCGETALTQSV0:
2712 case DIOCGETALTQSV1:
2715 case DIOCGETQSTATSV0:
2716 case DIOCGETQSTATSV1:
2717 case DIOCGETRULESETS:
2718 case DIOCGETRULESET:
2720 case DIOCRGETTABLES:
2721 case DIOCRGETTSTATS:
2723 case DIOCRGETASTATS:
2726 case DIOCGETSRCNODES:
2727 case DIOCIGETIFACES:
2728 case DIOCGIFSPEEDV1:
2729 case DIOCGIFSPEEDV0:
2732 case DIOCRCLRTABLES:
2733 case DIOCRADDTABLES:
2734 case DIOCRDELTABLES:
2735 case DIOCRCLRTSTATS:
2740 case DIOCRSETTFLAGS:
2741 if (((struct pfioc_table *)addr)->pfrio_flags &
2743 flags |= FWRITE; /* need write lock for dummy */
2744 break; /* dummy operation ok */
2748 if (((struct pfioc_rule *)addr)->action ==
2756 CURVNET_SET(TD_TO_VNET(td));
2760 sx_xlock(&pf_ioctl_lock);
2761 if (V_pf_status.running)
2768 DPFPRINTF(PF_DEBUG_MISC,
2769 ("pf: pfil registration failed\n"));
2772 V_pf_status.running = 1;
2773 V_pf_status.since = time_second;
2776 V_pf_stateid[cpu] = time_second;
2778 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
2783 sx_xlock(&pf_ioctl_lock);
2784 if (!V_pf_status.running)
2787 V_pf_status.running = 0;
2788 error = dehook_pf();
2790 V_pf_status.running = 1;
2791 DPFPRINTF(PF_DEBUG_MISC,
2792 ("pf: pfil unregistration failed\n"));
2794 V_pf_status.since = time_second;
2795 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
2799 case DIOCADDRULENV: {
2800 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
2801 nvlist_t *nvl = NULL;
2802 void *nvlpacked = NULL;
2803 struct pf_krule *rule = NULL;
2804 const char *anchor = "", *anchor_call = "";
2805 uint32_t ticket = 0, pool_ticket = 0;
2807 #define ERROUT(x) do { error = (x); goto DIOCADDRULENV_error; } while (0)
2809 if (nv->len > pf_ioctl_maxcount)
2812 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
2813 error = copyin(nv->data, nvlpacked, nv->len);
2817 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2821 if (! nvlist_exists_number(nvl, "ticket"))
2823 ticket = nvlist_get_number(nvl, "ticket");
2825 if (! nvlist_exists_number(nvl, "pool_ticket"))
2827 pool_ticket = nvlist_get_number(nvl, "pool_ticket");
2829 if (! nvlist_exists_nvlist(nvl, "rule"))
2832 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
2837 if (nvlist_exists_string(nvl, "anchor"))
2838 anchor = nvlist_get_string(nvl, "anchor");
2839 if (nvlist_exists_string(nvl, "anchor_call"))
2840 anchor_call = nvlist_get_string(nvl, "anchor_call");
2842 if ((error = nvlist_error(nvl)))
2845 /* Frees rule on error */
2846 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
2849 nvlist_destroy(nvl);
2850 free(nvlpacked, M_TEMP);
2853 DIOCADDRULENV_error:
2854 pf_krule_free(rule);
2855 nvlist_destroy(nvl);
2856 free(nvlpacked, M_TEMP);
2861 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
2862 struct pf_krule *rule;
2864 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
2865 error = pf_rule_to_krule(&pr->rule, rule);
2867 free(rule, M_PFRULE);
2871 pr->anchor[sizeof(pr->anchor) - 1] = 0;
2873 /* Frees rule on error */
2874 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
2875 pr->anchor, pr->anchor_call, td);
2879 case DIOCGETRULES: {
2880 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
2881 struct pf_kruleset *ruleset;
2882 struct pf_krule *tail;
2886 pr->anchor[sizeof(pr->anchor) - 1] = 0;
2887 ruleset = pf_find_kruleset(pr->anchor);
2888 if (ruleset == NULL) {
2893 rs_num = pf_get_ruleset_number(pr->rule.action);
2894 if (rs_num >= PF_RULESET_MAX) {
2899 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2902 pr->nr = tail->nr + 1;
2905 pr->ticket = ruleset->rules[rs_num].active.ticket;
2911 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
2912 struct pf_kruleset *ruleset;
2913 struct pf_krule *rule;
2917 pr->anchor[sizeof(pr->anchor) - 1] = 0;
2918 ruleset = pf_find_kruleset(pr->anchor);
2919 if (ruleset == NULL) {
2924 rs_num = pf_get_ruleset_number(pr->rule.action);
2925 if (rs_num >= PF_RULESET_MAX) {
2930 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
2935 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2936 while ((rule != NULL) && (rule->nr != pr->nr))
2937 rule = TAILQ_NEXT(rule, entries);
2944 pf_krule_to_rule(rule, &pr->rule);
2946 if (pf_kanchor_copyout(ruleset, rule, pr)) {
2951 pf_addr_copyout(&pr->rule.src.addr);
2952 pf_addr_copyout(&pr->rule.dst.addr);
2954 if (pr->action == PF_GET_CLR_CNTR) {
2955 counter_u64_zero(rule->evaluations);
2956 for (int i = 0; i < 2; i++) {
2957 counter_u64_zero(rule->packets[i]);
2958 counter_u64_zero(rule->bytes[i]);
2960 counter_u64_zero(rule->states_tot);
2966 case DIOCGETRULENV: {
2967 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
2968 nvlist_t *nvrule = NULL;
2969 nvlist_t *nvl = NULL;
2970 struct pf_kruleset *ruleset;
2971 struct pf_krule *rule;
2972 void *nvlpacked = NULL;
2974 bool clear_counter = false;
2976 #define ERROUT(x) do { error = (x); goto DIOCGETRULENV_error; } while (0)
2978 if (nv->len > pf_ioctl_maxcount)
2981 /* Copy the request in */
2982 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
2983 if (nvlpacked == NULL)
2986 error = copyin(nv->data, nvlpacked, nv->len);
2990 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2994 if (! nvlist_exists_string(nvl, "anchor"))
2996 if (! nvlist_exists_number(nvl, "ruleset"))
2998 if (! nvlist_exists_number(nvl, "ticket"))
3000 if (! nvlist_exists_number(nvl, "nr"))
3003 if (nvlist_exists_bool(nvl, "clear_counter"))
3004 clear_counter = nvlist_get_bool(nvl, "clear_counter");
3006 if (clear_counter && !(flags & FWRITE))
3009 nr = nvlist_get_number(nvl, "nr");
3012 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3013 if (ruleset == NULL) {
3018 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3019 if (rs_num >= PF_RULESET_MAX) {
3024 if (nvlist_get_number(nvl, "ticket") !=
3025 ruleset->rules[rs_num].active.ticket) {
3031 if ((error = nvlist_error(nvl))) {
3036 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3037 while ((rule != NULL) && (rule->nr != nr))
3038 rule = TAILQ_NEXT(rule, entries);
3045 nvrule = pf_krule_to_nvrule(rule);
3047 nvlist_destroy(nvl);
3048 nvl = nvlist_create(0);
3053 nvlist_add_number(nvl, "nr", nr);
3054 nvlist_add_nvlist(nvl, "rule", nvrule);
3056 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3061 free(nvlpacked, M_TEMP);
3062 nvlpacked = nvlist_pack(nvl, &nv->len);
3063 if (nvlpacked == NULL) {
3068 if (nv->size == 0) {
3072 else if (nv->size < nv->len) {
3077 error = copyout(nvlpacked, nv->data, nv->len);
3079 if (clear_counter) {
3080 counter_u64_zero(rule->evaluations);
3081 for (int i = 0; i < 2; i++) {
3082 counter_u64_zero(rule->packets[i]);
3083 counter_u64_zero(rule->bytes[i]);
3085 counter_u64_zero(rule->states_tot);
3090 DIOCGETRULENV_error:
3091 free(nvlpacked, M_TEMP);
3092 nvlist_destroy(nvrule);
3093 nvlist_destroy(nvl);
3098 case DIOCCHANGERULE: {
3099 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
3100 struct pf_kruleset *ruleset;
3101 struct pf_krule *oldrule = NULL, *newrule = NULL;
3102 struct pfi_kkif *kif = NULL;
3103 struct pf_kpooladdr *pa;
3107 if (pcr->action < PF_CHANGE_ADD_HEAD ||
3108 pcr->action > PF_CHANGE_GET_TICKET) {
3112 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3117 if (pcr->action != PF_CHANGE_REMOVE) {
3118 newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK);
3119 error = pf_rule_to_krule(&pcr->rule, newrule);
3121 free(newrule, M_PFRULE);
3125 if (newrule->ifname[0])
3126 kif = pf_kkif_create(M_WAITOK);
3127 newrule->evaluations = counter_u64_alloc(M_WAITOK);
3128 for (int i = 0; i < 2; i++) {
3129 newrule->packets[i] =
3130 counter_u64_alloc(M_WAITOK);
3132 counter_u64_alloc(M_WAITOK);
3134 newrule->states_cur = counter_u64_alloc(M_WAITOK);
3135 newrule->states_tot = counter_u64_alloc(M_WAITOK);
3136 newrule->src_nodes = counter_u64_alloc(M_WAITOK);
3137 newrule->cuid = td->td_ucred->cr_ruid;
3138 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3139 TAILQ_INIT(&newrule->rpool.list);
3142 #define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; }
3145 if (!(pcr->action == PF_CHANGE_REMOVE ||
3146 pcr->action == PF_CHANGE_GET_TICKET) &&
3147 pcr->pool_ticket != V_ticket_pabuf)
3150 ruleset = pf_find_kruleset(pcr->anchor);
3151 if (ruleset == NULL)
3154 rs_num = pf_get_ruleset_number(pcr->rule.action);
3155 if (rs_num >= PF_RULESET_MAX)
3158 if (pcr->action == PF_CHANGE_GET_TICKET) {
3159 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3161 } else if (pcr->ticket !=
3162 ruleset->rules[rs_num].active.ticket)
3165 if (pcr->action != PF_CHANGE_REMOVE) {
3166 if (newrule->ifname[0]) {
3167 newrule->kif = pfi_kkif_attach(kif,
3170 pfi_kkif_ref(newrule->kif);
3172 newrule->kif = NULL;
3174 if (newrule->rtableid > 0 &&
3175 newrule->rtableid >= rt_numfibs)
3180 if (newrule->qname[0] != 0) {
3182 pf_qname2qid(newrule->qname)) == 0)
3184 else if (newrule->pqname[0] != 0) {
3185 if ((newrule->pqid =
3186 pf_qname2qid(newrule->pqname)) == 0)
3189 newrule->pqid = newrule->qid;
3192 if (newrule->tagname[0])
3194 pf_tagname2tag(newrule->tagname)) == 0)
3196 if (newrule->match_tagname[0])
3197 if ((newrule->match_tag = pf_tagname2tag(
3198 newrule->match_tagname)) == 0)
3200 if (newrule->rt && !newrule->direction)
3204 if (newrule->logif >= PFLOGIFS_MAX)
3206 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3208 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3210 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3212 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
3213 if (pa->addr.type == PF_ADDR_TABLE) {
3215 pfr_attach_table(ruleset,
3216 pa->addr.v.tblname);
3217 if (pa->addr.p.tbl == NULL)
3221 newrule->overload_tbl = NULL;
3222 if (newrule->overload_tblname[0]) {
3223 if ((newrule->overload_tbl = pfr_attach_table(
3224 ruleset, newrule->overload_tblname)) ==
3228 newrule->overload_tbl->pfrkt_flags |=
3232 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
3233 if (((((newrule->action == PF_NAT) ||
3234 (newrule->action == PF_RDR) ||
3235 (newrule->action == PF_BINAT) ||
3236 (newrule->rt > PF_NOPFROUTE)) &&
3237 !newrule->anchor)) &&
3238 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
3242 pf_free_rule(newrule);
3247 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3249 pf_empty_kpool(&V_pf_pabuf);
3251 if (pcr->action == PF_CHANGE_ADD_HEAD)
3252 oldrule = TAILQ_FIRST(
3253 ruleset->rules[rs_num].active.ptr);
3254 else if (pcr->action == PF_CHANGE_ADD_TAIL)
3255 oldrule = TAILQ_LAST(
3256 ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3258 oldrule = TAILQ_FIRST(
3259 ruleset->rules[rs_num].active.ptr);
3260 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3261 oldrule = TAILQ_NEXT(oldrule, entries);
3262 if (oldrule == NULL) {
3263 if (newrule != NULL)
3264 pf_free_rule(newrule);
3271 if (pcr->action == PF_CHANGE_REMOVE) {
3272 pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3274 ruleset->rules[rs_num].active.rcount--;
3276 if (oldrule == NULL)
3278 ruleset->rules[rs_num].active.ptr,
3280 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3281 pcr->action == PF_CHANGE_ADD_BEFORE)
3282 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3285 ruleset->rules[rs_num].active.ptr,
3286 oldrule, newrule, entries);
3287 ruleset->rules[rs_num].active.rcount++;
3291 TAILQ_FOREACH(oldrule,
3292 ruleset->rules[rs_num].active.ptr, entries)
3295 ruleset->rules[rs_num].active.ticket++;
3297 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3298 pf_remove_if_empty_kruleset(ruleset);
3304 DIOCCHANGERULE_error:
3306 pf_krule_free(newrule);
3311 case DIOCCLRSTATES: {
3313 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
3314 u_int i, killed = 0;
3316 for (i = 0; i <= pf_hashmask; i++) {
3317 struct pf_idhash *ih = &V_pf_idhash[i];
3319 relock_DIOCCLRSTATES:
3320 PF_HASHROW_LOCK(ih);
3321 LIST_FOREACH(s, &ih->states, entry)
3322 if (!psk->psk_ifname[0] ||
3323 !strcmp(psk->psk_ifname,
3324 s->kif->pfik_name)) {
3326 * Don't send out individual
3329 s->state_flags |= PFSTATE_NOSYNC;
3330 pf_unlink_state(s, PF_ENTER_LOCKED);
3332 goto relock_DIOCCLRSTATES;
3334 PF_HASHROW_UNLOCK(ih);
3336 psk->psk_killed = killed;
3337 if (V_pfsync_clear_states_ptr != NULL)
3338 V_pfsync_clear_states_ptr(V_pf_status.hostid, psk->psk_ifname);
3342 case DIOCKILLSTATES: {
3344 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
3345 u_int i, killed = 0;
3347 if (psk->psk_pfcmp.id) {
3348 if (psk->psk_pfcmp.creatorid == 0)
3349 psk->psk_pfcmp.creatorid = V_pf_status.hostid;
3350 if ((s = pf_find_state_byid(psk->psk_pfcmp.id,
3351 psk->psk_pfcmp.creatorid))) {
3352 pf_unlink_state(s, PF_ENTER_LOCKED);
3353 psk->psk_killed = 1;
3358 for (i = 0; i <= pf_hashmask; i++)
3359 killed += pf_killstates_row(psk, &V_pf_idhash[i]);
3361 psk->psk_killed = killed;
3365 case DIOCADDSTATE: {
3366 struct pfioc_state *ps = (struct pfioc_state *)addr;
3367 struct pfsync_state *sp = &ps->state;
3369 if (sp->timeout >= PFTM_MAX) {
3373 if (V_pfsync_state_import_ptr != NULL) {
3375 error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL);
3382 case DIOCGETSTATE: {
3383 struct pfioc_state *ps = (struct pfioc_state *)addr;
3386 s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
3392 pfsync_state_export(&ps->state, s);
3397 case DIOCGETSTATES: {
3398 struct pfioc_states *ps = (struct pfioc_states *)addr;
3400 struct pfsync_state *pstore, *p;
3403 if (ps->ps_len <= 0) {
3404 nr = uma_zone_get_cur(V_pf_state_z);
3405 ps->ps_len = sizeof(struct pfsync_state) * nr;
3409 p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK | M_ZERO);
3412 for (i = 0; i <= pf_hashmask; i++) {
3413 struct pf_idhash *ih = &V_pf_idhash[i];
3415 PF_HASHROW_LOCK(ih);
3416 LIST_FOREACH(s, &ih->states, entry) {
3418 if (s->timeout == PFTM_UNLINKED)
3421 if ((nr+1) * sizeof(*p) > ps->ps_len) {
3422 PF_HASHROW_UNLOCK(ih);
3423 goto DIOCGETSTATES_full;
3425 pfsync_state_export(p, s);
3429 PF_HASHROW_UNLOCK(ih);
3432 error = copyout(pstore, ps->ps_states,
3433 sizeof(struct pfsync_state) * nr);
3435 free(pstore, M_TEMP);
3438 ps->ps_len = sizeof(struct pfsync_state) * nr;
3439 free(pstore, M_TEMP);
3444 case DIOCGETSTATUS: {
3445 struct pf_status *s = (struct pf_status *)addr;
3448 s->running = V_pf_status.running;
3449 s->since = V_pf_status.since;
3450 s->debug = V_pf_status.debug;
3451 s->hostid = V_pf_status.hostid;
3452 s->states = V_pf_status.states;
3453 s->src_nodes = V_pf_status.src_nodes;
3455 for (int i = 0; i < PFRES_MAX; i++)
3457 counter_u64_fetch(V_pf_status.counters[i]);
3458 for (int i = 0; i < LCNT_MAX; i++)
3460 counter_u64_fetch(V_pf_status.lcounters[i]);
3461 for (int i = 0; i < FCNT_MAX; i++)
3463 counter_u64_fetch(V_pf_status.fcounters[i]);
3464 for (int i = 0; i < SCNT_MAX; i++)
3466 counter_u64_fetch(V_pf_status.scounters[i]);
3468 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ);
3469 bcopy(V_pf_status.pf_chksum, s->pf_chksum,
3470 PF_MD5_DIGEST_LENGTH);
3472 pfi_update_status(s->ifname, s);
3477 case DIOCSETSTATUSIF: {
3478 struct pfioc_if *pi = (struct pfioc_if *)addr;
3480 if (pi->ifname[0] == 0) {
3481 bzero(V_pf_status.ifname, IFNAMSIZ);
3485 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
3490 case DIOCCLRSTATUS: {
3492 for (int i = 0; i < PFRES_MAX; i++)
3493 counter_u64_zero(V_pf_status.counters[i]);
3494 for (int i = 0; i < FCNT_MAX; i++)
3495 counter_u64_zero(V_pf_status.fcounters[i]);
3496 for (int i = 0; i < SCNT_MAX; i++)
3497 counter_u64_zero(V_pf_status.scounters[i]);
3498 for (int i = 0; i < LCNT_MAX; i++)
3499 counter_u64_zero(V_pf_status.lcounters[i]);
3500 V_pf_status.since = time_second;
3501 if (*V_pf_status.ifname)
3502 pfi_update_status(V_pf_status.ifname, NULL);
3508 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
3509 struct pf_state_key *sk;
3510 struct pf_state *state;
3511 struct pf_state_key_cmp key;
3512 int m = 0, direction = pnl->direction;
3515 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
3516 sidx = (direction == PF_IN) ? 1 : 0;
3517 didx = (direction == PF_IN) ? 0 : 1;
3520 PF_AZERO(&pnl->saddr, pnl->af) ||
3521 PF_AZERO(&pnl->daddr, pnl->af) ||
3522 ((pnl->proto == IPPROTO_TCP ||
3523 pnl->proto == IPPROTO_UDP) &&
3524 (!pnl->dport || !pnl->sport)))
3527 bzero(&key, sizeof(key));
3529 key.proto = pnl->proto;
3530 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
3531 key.port[sidx] = pnl->sport;
3532 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
3533 key.port[didx] = pnl->dport;
3535 state = pf_find_state_all(&key, direction, &m);
3538 error = E2BIG; /* more than one state */
3539 else if (state != NULL) {
3540 /* XXXGL: not locked read */
3541 sk = state->key[sidx];
3542 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
3543 pnl->rsport = sk->port[sidx];
3544 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
3545 pnl->rdport = sk->port[didx];
3552 case DIOCSETTIMEOUT: {
3553 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
3556 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3562 old = V_pf_default_rule.timeout[pt->timeout];
3563 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
3565 V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
3566 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
3567 wakeup(pf_purge_thread);
3573 case DIOCGETTIMEOUT: {
3574 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
3576 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3581 pt->seconds = V_pf_default_rule.timeout[pt->timeout];
3586 case DIOCGETLIMIT: {
3587 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
3589 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3594 pl->limit = V_pf_limits[pl->index].limit;
3599 case DIOCSETLIMIT: {
3600 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
3604 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3605 V_pf_limits[pl->index].zone == NULL) {
3610 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
3611 old_limit = V_pf_limits[pl->index].limit;
3612 V_pf_limits[pl->index].limit = pl->limit;
3613 pl->limit = old_limit;
3618 case DIOCSETDEBUG: {
3619 u_int32_t *level = (u_int32_t *)addr;
3622 V_pf_status.debug = *level;
3627 case DIOCCLRRULECTRS: {
3628 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
3629 struct pf_kruleset *ruleset = &pf_main_ruleset;
3630 struct pf_krule *rule;
3634 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
3635 counter_u64_zero(rule->evaluations);
3636 for (int i = 0; i < 2; i++) {
3637 counter_u64_zero(rule->packets[i]);
3638 counter_u64_zero(rule->bytes[i]);
3645 case DIOCGIFSPEEDV0:
3646 case DIOCGIFSPEEDV1: {
3647 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr;
3648 struct pf_ifspeed_v1 ps;
3651 if (psp->ifname[0] != 0) {
3652 /* Can we completely trust user-land? */
3653 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
3654 ifp = ifunit(ps.ifname);
3657 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
3658 if (cmd == DIOCGIFSPEEDV1)
3659 psp->baudrate = ifp->if_baudrate;
3668 case DIOCSTARTALTQ: {
3669 struct pf_altq *altq;
3672 /* enable all altq interfaces on active list */
3673 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
3674 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
3675 error = pf_enable_altq(altq);
3681 V_pf_altq_running = 1;
3683 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
3687 case DIOCSTOPALTQ: {
3688 struct pf_altq *altq;
3691 /* disable all altq interfaces on active list */
3692 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
3693 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
3694 error = pf_disable_altq(altq);
3700 V_pf_altq_running = 0;
3702 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
3707 case DIOCADDALTQV1: {
3708 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
3709 struct pf_altq *altq, *a;
3712 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
3713 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
3716 altq->local_flags = 0;
3719 if (pa->ticket != V_ticket_altqs_inactive) {
3721 free(altq, M_PFALTQ);
3727 * if this is for a queue, find the discipline and
3728 * copy the necessary fields
3730 if (altq->qname[0] != 0) {
3731 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
3734 free(altq, M_PFALTQ);
3737 altq->altq_disc = NULL;
3738 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
3739 if (strncmp(a->ifname, altq->ifname,
3741 altq->altq_disc = a->altq_disc;
3747 if ((ifp = ifunit(altq->ifname)) == NULL)
3748 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
3750 error = altq_add(ifp, altq);
3754 free(altq, M_PFALTQ);
3758 if (altq->qname[0] != 0)
3759 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
3761 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
3762 /* version error check done on import above */
3763 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
3768 case DIOCGETALTQSV0:
3769 case DIOCGETALTQSV1: {
3770 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
3771 struct pf_altq *altq;
3775 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
3777 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
3779 pa->ticket = V_ticket_altqs_active;
3785 case DIOCGETALTQV1: {
3786 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
3787 struct pf_altq *altq;
3790 if (pa->ticket != V_ticket_altqs_active) {
3795 altq = pf_altq_get_nth_active(pa->nr);
3801 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
3806 case DIOCCHANGEALTQV0:
3807 case DIOCCHANGEALTQV1:
3808 /* CHANGEALTQ not supported yet! */
3812 case DIOCGETQSTATSV0:
3813 case DIOCGETQSTATSV1: {
3814 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr;
3815 struct pf_altq *altq;
3820 if (pq->ticket != V_ticket_altqs_active) {
3825 nbytes = pq->nbytes;
3826 altq = pf_altq_get_nth_active(pq->nr);
3833 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
3839 if (cmd == DIOCGETQSTATSV0)
3840 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */
3842 version = pq->version;
3843 error = altq_getqstats(altq, pq->buf, &nbytes, version);
3845 pq->scheduler = altq->scheduler;
3846 pq->nbytes = nbytes;
3852 case DIOCBEGINADDRS: {
3853 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
3856 pf_empty_kpool(&V_pf_pabuf);
3857 pp->ticket = ++V_ticket_pabuf;
3863 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
3864 struct pf_kpooladdr *pa;
3865 struct pfi_kkif *kif = NULL;
3868 if (pp->af == AF_INET) {
3869 error = EAFNOSUPPORT;
3874 if (pp->af == AF_INET6) {
3875 error = EAFNOSUPPORT;
3879 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3880 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3881 pp->addr.addr.type != PF_ADDR_TABLE) {
3885 if (pp->addr.addr.p.dyn != NULL) {
3889 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
3890 pf_pooladdr_to_kpooladdr(&pp->addr, pa);
3892 kif = pf_kkif_create(M_WAITOK);
3894 if (pp->ticket != V_ticket_pabuf) {
3902 if (pa->ifname[0]) {
3903 pa->kif = pfi_kkif_attach(kif, pa->ifname);
3905 pfi_kkif_ref(pa->kif);
3908 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
3909 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
3911 pfi_kkif_unref(pa->kif);
3916 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
3921 case DIOCGETADDRS: {
3922 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
3923 struct pf_kpool *pool;
3924 struct pf_kpooladdr *pa;
3928 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
3929 pp->r_num, 0, 1, 0);
3935 TAILQ_FOREACH(pa, &pool->list, entries)
3942 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
3943 struct pf_kpool *pool;
3944 struct pf_kpooladdr *pa;
3948 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
3949 pp->r_num, 0, 1, 1);
3955 pa = TAILQ_FIRST(&pool->list);
3956 while ((pa != NULL) && (nr < pp->nr)) {
3957 pa = TAILQ_NEXT(pa, entries);
3965 pf_kpooladdr_to_pooladdr(pa, &pp->addr);
3966 pf_addr_copyout(&pp->addr.addr);
3971 case DIOCCHANGEADDR: {
3972 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
3973 struct pf_kpool *pool;
3974 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL;
3975 struct pf_kruleset *ruleset;
3976 struct pfi_kkif *kif = NULL;
3978 if (pca->action < PF_CHANGE_ADD_HEAD ||
3979 pca->action > PF_CHANGE_REMOVE) {
3983 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
3984 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
3985 pca->addr.addr.type != PF_ADDR_TABLE) {
3989 if (pca->addr.addr.p.dyn != NULL) {
3994 if (pca->action != PF_CHANGE_REMOVE) {
3996 if (pca->af == AF_INET) {
3997 error = EAFNOSUPPORT;
4002 if (pca->af == AF_INET6) {
4003 error = EAFNOSUPPORT;
4007 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4008 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4009 if (newpa->ifname[0])
4010 kif = pf_kkif_create(M_WAITOK);
4014 #define ERROUT(x) { error = (x); goto DIOCCHANGEADDR_error; }
4016 ruleset = pf_find_kruleset(pca->anchor);
4017 if (ruleset == NULL)
4020 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4021 pca->r_num, pca->r_last, 1, 1);
4025 if (pca->action != PF_CHANGE_REMOVE) {
4026 if (newpa->ifname[0]) {
4027 newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4028 pfi_kkif_ref(newpa->kif);
4032 switch (newpa->addr.type) {
4033 case PF_ADDR_DYNIFTL:
4034 error = pfi_dynaddr_setup(&newpa->addr,
4038 newpa->addr.p.tbl = pfr_attach_table(ruleset,
4039 newpa->addr.v.tblname);
4040 if (newpa->addr.p.tbl == NULL)
4045 goto DIOCCHANGEADDR_error;
4048 switch (pca->action) {
4049 case PF_CHANGE_ADD_HEAD:
4050 oldpa = TAILQ_FIRST(&pool->list);
4052 case PF_CHANGE_ADD_TAIL:
4053 oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4056 oldpa = TAILQ_FIRST(&pool->list);
4057 for (int i = 0; oldpa && i < pca->nr; i++)
4058 oldpa = TAILQ_NEXT(oldpa, entries);
4064 if (pca->action == PF_CHANGE_REMOVE) {
4065 TAILQ_REMOVE(&pool->list, oldpa, entries);
4066 switch (oldpa->addr.type) {
4067 case PF_ADDR_DYNIFTL:
4068 pfi_dynaddr_remove(oldpa->addr.p.dyn);
4071 pfr_detach_table(oldpa->addr.p.tbl);
4075 pfi_kkif_unref(oldpa->kif);
4076 free(oldpa, M_PFRULE);
4079 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4080 else if (pca->action == PF_CHANGE_ADD_HEAD ||
4081 pca->action == PF_CHANGE_ADD_BEFORE)
4082 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4084 TAILQ_INSERT_AFTER(&pool->list, oldpa,
4088 pool->cur = TAILQ_FIRST(&pool->list);
4089 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4094 DIOCCHANGEADDR_error:
4095 if (newpa != NULL) {
4097 pfi_kkif_unref(newpa->kif);
4098 free(newpa, M_PFRULE);
4105 case DIOCGETRULESETS: {
4106 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
4107 struct pf_kruleset *ruleset;
4108 struct pf_kanchor *anchor;
4111 pr->path[sizeof(pr->path) - 1] = 0;
4112 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4118 if (ruleset->anchor == NULL) {
4119 /* XXX kludge for pf_main_ruleset */
4120 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4121 if (anchor->parent == NULL)
4124 RB_FOREACH(anchor, pf_kanchor_node,
4125 &ruleset->anchor->children)
4132 case DIOCGETRULESET: {
4133 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
4134 struct pf_kruleset *ruleset;
4135 struct pf_kanchor *anchor;
4139 pr->path[sizeof(pr->path) - 1] = 0;
4140 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4146 if (ruleset->anchor == NULL) {
4147 /* XXX kludge for pf_main_ruleset */
4148 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4149 if (anchor->parent == NULL && nr++ == pr->nr) {
4150 strlcpy(pr->name, anchor->name,
4155 RB_FOREACH(anchor, pf_kanchor_node,
4156 &ruleset->anchor->children)
4157 if (nr++ == pr->nr) {
4158 strlcpy(pr->name, anchor->name,
4169 case DIOCRCLRTABLES: {
4170 struct pfioc_table *io = (struct pfioc_table *)addr;
4172 if (io->pfrio_esize != 0) {
4177 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4178 io->pfrio_flags | PFR_FLAG_USERIOCTL);
4183 case DIOCRADDTABLES: {
4184 struct pfioc_table *io = (struct pfioc_table *)addr;
4185 struct pfr_table *pfrts;
4188 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4193 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4194 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4199 totlen = io->pfrio_size * sizeof(struct pfr_table);
4200 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4202 error = copyin(io->pfrio_buffer, pfrts, totlen);
4204 free(pfrts, M_TEMP);
4208 error = pfr_add_tables(pfrts, io->pfrio_size,
4209 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4211 free(pfrts, M_TEMP);
4215 case DIOCRDELTABLES: {
4216 struct pfioc_table *io = (struct pfioc_table *)addr;
4217 struct pfr_table *pfrts;
4220 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4225 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4226 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4231 totlen = io->pfrio_size * sizeof(struct pfr_table);
4232 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4234 error = copyin(io->pfrio_buffer, pfrts, totlen);
4236 free(pfrts, M_TEMP);
4240 error = pfr_del_tables(pfrts, io->pfrio_size,
4241 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4243 free(pfrts, M_TEMP);
4247 case DIOCRGETTABLES: {
4248 struct pfioc_table *io = (struct pfioc_table *)addr;
4249 struct pfr_table *pfrts;
4253 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4258 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4264 io->pfrio_size = min(io->pfrio_size, n);
4266 totlen = io->pfrio_size * sizeof(struct pfr_table);
4268 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4270 if (pfrts == NULL) {
4275 error = pfr_get_tables(&io->pfrio_table, pfrts,
4276 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4279 error = copyout(pfrts, io->pfrio_buffer, totlen);
4280 free(pfrts, M_TEMP);
4284 case DIOCRGETTSTATS: {
4285 struct pfioc_table *io = (struct pfioc_table *)addr;
4286 struct pfr_tstats *pfrtstats;
4290 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4295 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4301 io->pfrio_size = min(io->pfrio_size, n);
4303 totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4304 pfrtstats = mallocarray(io->pfrio_size,
4305 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT);
4306 if (pfrtstats == NULL) {
4311 error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4312 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4315 error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4316 free(pfrtstats, M_TEMP);
4320 case DIOCRCLRTSTATS: {
4321 struct pfioc_table *io = (struct pfioc_table *)addr;
4322 struct pfr_table *pfrts;
4325 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4330 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4331 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4332 /* We used to count tables and use the minimum required
4333 * size, so we didn't fail on overly large requests.
4335 io->pfrio_size = pf_ioctl_maxcount;
4339 totlen = io->pfrio_size * sizeof(struct pfr_table);
4340 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4342 if (pfrts == NULL) {
4346 error = copyin(io->pfrio_buffer, pfrts, totlen);
4348 free(pfrts, M_TEMP);
4353 error = pfr_clr_tstats(pfrts, io->pfrio_size,
4354 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4356 free(pfrts, M_TEMP);
4360 case DIOCRSETTFLAGS: {
4361 struct pfioc_table *io = (struct pfioc_table *)addr;
4362 struct pfr_table *pfrts;
4366 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4372 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4379 io->pfrio_size = min(io->pfrio_size, n);
4382 totlen = io->pfrio_size * sizeof(struct pfr_table);
4383 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4385 error = copyin(io->pfrio_buffer, pfrts, totlen);
4387 free(pfrts, M_TEMP);
4391 error = pfr_set_tflags(pfrts, io->pfrio_size,
4392 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4393 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4395 free(pfrts, M_TEMP);
4399 case DIOCRCLRADDRS: {
4400 struct pfioc_table *io = (struct pfioc_table *)addr;
4402 if (io->pfrio_esize != 0) {
4407 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4408 io->pfrio_flags | PFR_FLAG_USERIOCTL);
4413 case DIOCRADDADDRS: {
4414 struct pfioc_table *io = (struct pfioc_table *)addr;
4415 struct pfr_addr *pfras;
4418 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4422 if (io->pfrio_size < 0 ||
4423 io->pfrio_size > pf_ioctl_maxcount ||
4424 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4428 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4429 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4435 error = copyin(io->pfrio_buffer, pfras, totlen);
4437 free(pfras, M_TEMP);
4441 error = pfr_add_addrs(&io->pfrio_table, pfras,
4442 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4443 PFR_FLAG_USERIOCTL);
4445 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4446 error = copyout(pfras, io->pfrio_buffer, totlen);
4447 free(pfras, M_TEMP);
4451 case DIOCRDELADDRS: {
4452 struct pfioc_table *io = (struct pfioc_table *)addr;
4453 struct pfr_addr *pfras;
4456 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4460 if (io->pfrio_size < 0 ||
4461 io->pfrio_size > pf_ioctl_maxcount ||
4462 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4466 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4467 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4473 error = copyin(io->pfrio_buffer, pfras, totlen);
4475 free(pfras, M_TEMP);
4479 error = pfr_del_addrs(&io->pfrio_table, pfras,
4480 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
4481 PFR_FLAG_USERIOCTL);
4483 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4484 error = copyout(pfras, io->pfrio_buffer, totlen);
4485 free(pfras, M_TEMP);
4489 case DIOCRSETADDRS: {
4490 struct pfioc_table *io = (struct pfioc_table *)addr;
4491 struct pfr_addr *pfras;
4492 size_t totlen, count;
4494 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4498 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
4502 count = max(io->pfrio_size, io->pfrio_size2);
4503 if (count > pf_ioctl_maxcount ||
4504 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
4508 totlen = count * sizeof(struct pfr_addr);
4509 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
4515 error = copyin(io->pfrio_buffer, pfras, totlen);
4517 free(pfras, M_TEMP);
4521 error = pfr_set_addrs(&io->pfrio_table, pfras,
4522 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
4523 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
4524 PFR_FLAG_USERIOCTL, 0);
4526 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4527 error = copyout(pfras, io->pfrio_buffer, totlen);
4528 free(pfras, M_TEMP);
4532 case DIOCRGETADDRS: {
4533 struct pfioc_table *io = (struct pfioc_table *)addr;
4534 struct pfr_addr *pfras;
4537 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4541 if (io->pfrio_size < 0 ||
4542 io->pfrio_size > pf_ioctl_maxcount ||
4543 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4547 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4548 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4555 error = pfr_get_addrs(&io->pfrio_table, pfras,
4556 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4559 error = copyout(pfras, io->pfrio_buffer, totlen);
4560 free(pfras, M_TEMP);
4564 case DIOCRGETASTATS: {
4565 struct pfioc_table *io = (struct pfioc_table *)addr;
4566 struct pfr_astats *pfrastats;
4569 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
4573 if (io->pfrio_size < 0 ||
4574 io->pfrio_size > pf_ioctl_maxcount ||
4575 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
4579 totlen = io->pfrio_size * sizeof(struct pfr_astats);
4580 pfrastats = mallocarray(io->pfrio_size,
4581 sizeof(struct pfr_astats), M_TEMP, M_NOWAIT);
4587 error = pfr_get_astats(&io->pfrio_table, pfrastats,
4588 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4591 error = copyout(pfrastats, io->pfrio_buffer, totlen);
4592 free(pfrastats, M_TEMP);
4596 case DIOCRCLRASTATS: {
4597 struct pfioc_table *io = (struct pfioc_table *)addr;
4598 struct pfr_addr *pfras;
4601 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4605 if (io->pfrio_size < 0 ||
4606 io->pfrio_size > pf_ioctl_maxcount ||
4607 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4611 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4612 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4618 error = copyin(io->pfrio_buffer, pfras, totlen);
4620 free(pfras, M_TEMP);
4624 error = pfr_clr_astats(&io->pfrio_table, pfras,
4625 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
4626 PFR_FLAG_USERIOCTL);
4628 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4629 error = copyout(pfras, io->pfrio_buffer, totlen);
4630 free(pfras, M_TEMP);
4634 case DIOCRTSTADDRS: {
4635 struct pfioc_table *io = (struct pfioc_table *)addr;
4636 struct pfr_addr *pfras;
4639 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4643 if (io->pfrio_size < 0 ||
4644 io->pfrio_size > pf_ioctl_maxcount ||
4645 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4649 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4650 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4656 error = copyin(io->pfrio_buffer, pfras, totlen);
4658 free(pfras, M_TEMP);
4662 error = pfr_tst_addrs(&io->pfrio_table, pfras,
4663 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
4664 PFR_FLAG_USERIOCTL);
4667 error = copyout(pfras, io->pfrio_buffer, totlen);
4668 free(pfras, M_TEMP);
4672 case DIOCRINADEFINE: {
4673 struct pfioc_table *io = (struct pfioc_table *)addr;
4674 struct pfr_addr *pfras;
4677 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4681 if (io->pfrio_size < 0 ||
4682 io->pfrio_size > pf_ioctl_maxcount ||
4683 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4687 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4688 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4694 error = copyin(io->pfrio_buffer, pfras, totlen);
4696 free(pfras, M_TEMP);
4700 error = pfr_ina_define(&io->pfrio_table, pfras,
4701 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
4702 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4704 free(pfras, M_TEMP);
4709 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
4711 error = pf_osfp_add(io);
4717 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
4719 error = pf_osfp_get(io);
4725 struct pfioc_trans *io = (struct pfioc_trans *)addr;
4726 struct pfioc_trans_e *ioes, *ioe;
4730 if (io->esize != sizeof(*ioe)) {
4735 io->size > pf_ioctl_maxcount ||
4736 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
4740 totlen = sizeof(struct pfioc_trans_e) * io->size;
4741 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
4747 error = copyin(io->array, ioes, totlen);
4753 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4754 switch (ioe->rs_num) {
4756 case PF_RULESET_ALTQ:
4757 if (ioe->anchor[0]) {
4763 if ((error = pf_begin_altq(&ioe->ticket))) {
4770 case PF_RULESET_TABLE:
4772 struct pfr_table table;
4774 bzero(&table, sizeof(table));
4775 strlcpy(table.pfrt_anchor, ioe->anchor,
4776 sizeof(table.pfrt_anchor));
4777 if ((error = pfr_ina_begin(&table,
4778 &ioe->ticket, NULL, 0))) {
4786 if ((error = pf_begin_rules(&ioe->ticket,
4787 ioe->rs_num, ioe->anchor))) {
4796 error = copyout(ioes, io->array, totlen);
4801 case DIOCXROLLBACK: {
4802 struct pfioc_trans *io = (struct pfioc_trans *)addr;
4803 struct pfioc_trans_e *ioe, *ioes;
4807 if (io->esize != sizeof(*ioe)) {
4812 io->size > pf_ioctl_maxcount ||
4813 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
4817 totlen = sizeof(struct pfioc_trans_e) * io->size;
4818 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
4824 error = copyin(io->array, ioes, totlen);
4830 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4831 switch (ioe->rs_num) {
4833 case PF_RULESET_ALTQ:
4834 if (ioe->anchor[0]) {
4840 if ((error = pf_rollback_altq(ioe->ticket))) {
4843 goto fail; /* really bad */
4847 case PF_RULESET_TABLE:
4849 struct pfr_table table;
4851 bzero(&table, sizeof(table));
4852 strlcpy(table.pfrt_anchor, ioe->anchor,
4853 sizeof(table.pfrt_anchor));
4854 if ((error = pfr_ina_rollback(&table,
4855 ioe->ticket, NULL, 0))) {
4858 goto fail; /* really bad */
4863 if ((error = pf_rollback_rules(ioe->ticket,
4864 ioe->rs_num, ioe->anchor))) {
4867 goto fail; /* really bad */
4878 struct pfioc_trans *io = (struct pfioc_trans *)addr;
4879 struct pfioc_trans_e *ioe, *ioes;
4880 struct pf_kruleset *rs;
4884 if (io->esize != sizeof(*ioe)) {
4890 io->size > pf_ioctl_maxcount ||
4891 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
4896 totlen = sizeof(struct pfioc_trans_e) * io->size;
4897 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
4903 error = copyin(io->array, ioes, totlen);
4909 /* First makes sure everything will succeed. */
4910 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4911 switch (ioe->rs_num) {
4913 case PF_RULESET_ALTQ:
4914 if (ioe->anchor[0]) {
4920 if (!V_altqs_inactive_open || ioe->ticket !=
4921 V_ticket_altqs_inactive) {
4929 case PF_RULESET_TABLE:
4930 rs = pf_find_kruleset(ioe->anchor);
4931 if (rs == NULL || !rs->topen || ioe->ticket !=
4940 if (ioe->rs_num < 0 || ioe->rs_num >=
4947 rs = pf_find_kruleset(ioe->anchor);
4949 !rs->rules[ioe->rs_num].inactive.open ||
4950 rs->rules[ioe->rs_num].inactive.ticket !=
4960 /* Now do the commit - no errors should happen here. */
4961 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4962 switch (ioe->rs_num) {
4964 case PF_RULESET_ALTQ:
4965 if ((error = pf_commit_altq(ioe->ticket))) {
4968 goto fail; /* really bad */
4972 case PF_RULESET_TABLE:
4974 struct pfr_table table;
4976 bzero(&table, sizeof(table));
4977 strlcpy(table.pfrt_anchor, ioe->anchor,
4978 sizeof(table.pfrt_anchor));
4979 if ((error = pfr_ina_commit(&table,
4980 ioe->ticket, NULL, NULL, 0))) {
4983 goto fail; /* really bad */
4988 if ((error = pf_commit_rules(ioe->ticket,
4989 ioe->rs_num, ioe->anchor))) {
4992 goto fail; /* really bad */
5002 case DIOCGETSRCNODES: {
5003 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
5004 struct pf_srchash *sh;
5005 struct pf_ksrc_node *n;
5006 struct pf_src_node *p, *pstore;
5009 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5011 PF_HASHROW_LOCK(sh);
5012 LIST_FOREACH(n, &sh->nodes, entry)
5014 PF_HASHROW_UNLOCK(sh);
5017 psn->psn_len = min(psn->psn_len,
5018 sizeof(struct pf_src_node) * nr);
5020 if (psn->psn_len == 0) {
5021 psn->psn_len = sizeof(struct pf_src_node) * nr;
5027 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5028 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5030 PF_HASHROW_LOCK(sh);
5031 LIST_FOREACH(n, &sh->nodes, entry) {
5033 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5036 pf_src_node_copy(n, p);
5041 PF_HASHROW_UNLOCK(sh);
5043 error = copyout(pstore, psn->psn_src_nodes,
5044 sizeof(struct pf_src_node) * nr);
5046 free(pstore, M_TEMP);
5049 psn->psn_len = sizeof(struct pf_src_node) * nr;
5050 free(pstore, M_TEMP);
5054 case DIOCCLRSRCNODES: {
5056 pf_clear_srcnodes(NULL);
5057 pf_purge_expired_src_nodes();
5061 case DIOCKILLSRCNODES:
5062 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5065 case DIOCKEEPCOUNTERS:
5066 error = pf_keepcounters((struct pfioc_nv *)addr);
5069 case DIOCSETHOSTID: {
5070 u_int32_t *hostid = (u_int32_t *)addr;
5074 V_pf_status.hostid = arc4random();
5076 V_pf_status.hostid = *hostid;
5087 case DIOCIGETIFACES: {
5088 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5089 struct pfi_kif *ifstore;
5092 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5097 if (io->pfiio_size < 0 ||
5098 io->pfiio_size > pf_ioctl_maxcount ||
5099 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5104 bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5105 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5107 if (ifstore == NULL) {
5113 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5115 error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5116 free(ifstore, M_TEMP);
5120 case DIOCSETIFFLAG: {
5121 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5124 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5129 case DIOCCLRIFFLAG: {
5130 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5133 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5143 if (sx_xlocked(&pf_ioctl_lock))
5144 sx_xunlock(&pf_ioctl_lock);
5151 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
5153 bzero(sp, sizeof(struct pfsync_state));
5155 /* copy from state key */
5156 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5157 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5158 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5159 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5160 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5161 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5162 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5163 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5164 sp->proto = st->key[PF_SK_WIRE]->proto;
5165 sp->af = st->key[PF_SK_WIRE]->af;
5167 /* copy from state */
5168 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5169 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
5170 sp->creation = htonl(time_uptime - st->creation);
5171 sp->expire = pf_state_expires(st);
5172 if (sp->expire <= time_uptime)
5173 sp->expire = htonl(0);
5175 sp->expire = htonl(sp->expire - time_uptime);
5177 sp->direction = st->direction;
5179 sp->timeout = st->timeout;
5180 sp->state_flags = st->state_flags;
5182 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5183 if (st->nat_src_node)
5184 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5187 sp->creatorid = st->creatorid;
5188 pf_state_peer_hton(&st->src, &sp->src);
5189 pf_state_peer_hton(&st->dst, &sp->dst);
5191 if (st->rule.ptr == NULL)
5192 sp->rule = htonl(-1);
5194 sp->rule = htonl(st->rule.ptr->nr);
5195 if (st->anchor.ptr == NULL)
5196 sp->anchor = htonl(-1);
5198 sp->anchor = htonl(st->anchor.ptr->nr);
5199 if (st->nat_rule.ptr == NULL)
5200 sp->nat_rule = htonl(-1);
5202 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
5204 pf_state_counter_hton(counter_u64_fetch(st->packets[0]),
5206 pf_state_counter_hton(counter_u64_fetch(st->packets[1]),
5208 pf_state_counter_hton(counter_u64_fetch(st->bytes[0]), sp->bytes[0]);
5209 pf_state_counter_hton(counter_u64_fetch(st->bytes[1]), sp->bytes[1]);
5214 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5216 struct pfr_ktable *kt;
5218 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5221 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5222 kt = kt->pfrkt_root;
5224 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5229 * XXX - Check for version missmatch!!!
5232 pf_clear_states(void)
5237 for (i = 0; i <= pf_hashmask; i++) {
5238 struct pf_idhash *ih = &V_pf_idhash[i];
5240 PF_HASHROW_LOCK(ih);
5241 LIST_FOREACH(s, &ih->states, entry) {
5242 s->timeout = PFTM_PURGE;
5243 /* Don't send out individual delete messages. */
5244 s->state_flags |= PFSTATE_NOSYNC;
5245 pf_unlink_state(s, PF_ENTER_LOCKED);
5248 PF_HASHROW_UNLOCK(ih);
5253 pf_clear_tables(void)
5255 struct pfioc_table io;
5258 bzero(&io, sizeof(io));
5260 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
5267 pf_clear_srcnodes(struct pf_ksrc_node *n)
5272 for (i = 0; i <= pf_hashmask; i++) {
5273 struct pf_idhash *ih = &V_pf_idhash[i];
5275 PF_HASHROW_LOCK(ih);
5276 LIST_FOREACH(s, &ih->states, entry) {
5277 if (n == NULL || n == s->src_node)
5279 if (n == NULL || n == s->nat_src_node)
5280 s->nat_src_node = NULL;
5282 PF_HASHROW_UNLOCK(ih);
5286 struct pf_srchash *sh;
5288 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5290 PF_HASHROW_LOCK(sh);
5291 LIST_FOREACH(n, &sh->nodes, entry) {
5295 PF_HASHROW_UNLOCK(sh);
5298 /* XXX: hash slot should already be locked here. */
5305 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
5307 struct pf_ksrc_node_list kill;
5310 for (int i = 0; i <= pf_srchashmask; i++) {
5311 struct pf_srchash *sh = &V_pf_srchash[i];
5312 struct pf_ksrc_node *sn, *tmp;
5314 PF_HASHROW_LOCK(sh);
5315 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
5316 if (PF_MATCHA(psnk->psnk_src.neg,
5317 &psnk->psnk_src.addr.v.a.addr,
5318 &psnk->psnk_src.addr.v.a.mask,
5319 &sn->addr, sn->af) &&
5320 PF_MATCHA(psnk->psnk_dst.neg,
5321 &psnk->psnk_dst.addr.v.a.addr,
5322 &psnk->psnk_dst.addr.v.a.mask,
5323 &sn->raddr, sn->af)) {
5324 pf_unlink_src_node(sn);
5325 LIST_INSERT_HEAD(&kill, sn, entry);
5328 PF_HASHROW_UNLOCK(sh);
5331 for (int i = 0; i <= pf_hashmask; i++) {
5332 struct pf_idhash *ih = &V_pf_idhash[i];
5335 PF_HASHROW_LOCK(ih);
5336 LIST_FOREACH(s, &ih->states, entry) {
5337 if (s->src_node && s->src_node->expire == 1)
5339 if (s->nat_src_node && s->nat_src_node->expire == 1)
5340 s->nat_src_node = NULL;
5342 PF_HASHROW_UNLOCK(ih);
5345 psnk->psnk_killed = pf_free_src_nodes(&kill);
5349 pf_keepcounters(struct pfioc_nv *nv)
5351 nvlist_t *nvl = NULL;
5352 void *nvlpacked = NULL;
5355 #define ERROUT(x) do { error = (x); goto on_error; } while (0)
5357 if (nv->len > pf_ioctl_maxcount)
5360 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK);
5361 if (nvlpacked == NULL)
5364 error = copyin(nv->data, nvlpacked, nv->len);
5368 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
5372 if (! nvlist_exists_bool(nvl, "keep_counters"))
5375 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
5378 nvlist_destroy(nvl);
5379 free(nvlpacked, M_TEMP);
5384 * XXX - Check for version missmatch!!!
5388 * Duplicate pfctl -Fa operation to get rid of as much as we can.
5398 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
5400 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
5403 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
5405 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
5406 break; /* XXX: rollback? */
5408 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
5410 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
5411 break; /* XXX: rollback? */
5413 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
5415 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
5416 break; /* XXX: rollback? */
5418 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
5420 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
5421 break; /* XXX: rollback? */
5424 /* XXX: these should always succeed here */
5425 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
5426 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
5427 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
5428 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
5429 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
5431 if ((error = pf_clear_tables()) != 0)
5435 if ((error = pf_begin_altq(&t[0])) != 0) {
5436 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
5439 pf_commit_altq(t[0]);
5444 pf_clear_srcnodes(NULL);
5446 /* status does not use malloced mem so no need to cleanup */
5447 /* fingerprints and interfaces have their own cleanup code */
5455 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
5460 chk = pf_test(PF_IN, flags, ifp, m, inp);
5472 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
5477 chk = pf_test(PF_OUT, flags, ifp, m, inp);
5491 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
5497 * In case of loopback traffic IPv6 uses the real interface in
5498 * order to support scoped addresses. In order to support stateful
5499 * filtering we have change this to lo0 as it is the case in IPv4.
5501 CURVNET_SET(ifp->if_vnet);
5502 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp);
5514 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, int flags,
5519 CURVNET_SET(ifp->if_vnet);
5520 chk = pf_test6(PF_OUT, flags, ifp, m, inp);
5536 struct pfil_head *pfh_inet;
5539 struct pfil_head *pfh_inet6;
5542 if (V_pf_pfil_hooked)
5546 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
5547 if (pfh_inet == NULL)
5548 return (ESRCH); /* XXX */
5549 pfil_add_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet);
5550 pfil_add_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet);
5553 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
5554 if (pfh_inet6 == NULL) {
5556 pfil_remove_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
5558 pfil_remove_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
5561 return (ESRCH); /* XXX */
5563 pfil_add_hook_flags(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6);
5564 pfil_add_hook_flags(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6);
5567 V_pf_pfil_hooked = 1;
5575 struct pfil_head *pfh_inet;
5578 struct pfil_head *pfh_inet6;
5581 if (V_pf_pfil_hooked == 0)
5585 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
5586 if (pfh_inet == NULL)
5587 return (ESRCH); /* XXX */
5588 pfil_remove_hook_flags(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
5590 pfil_remove_hook_flags(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
5594 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
5595 if (pfh_inet6 == NULL)
5596 return (ESRCH); /* XXX */
5597 pfil_remove_hook_flags(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK,
5599 pfil_remove_hook_flags(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK,
5603 V_pf_pfil_hooked = 0;
5610 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
5611 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
5613 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
5614 PF_RULE_TAG_HASH_SIZE_DEFAULT);
5616 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
5617 PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
5621 V_pf_vnet_active = 1;
5629 rm_init(&pf_rules_lock, "pf rulesets");
5630 sx_init(&pf_ioctl_lock, "pf ioctl");
5631 sx_init(&pf_end_lock, "pf end thread");
5633 pf_mtag_initialize();
5635 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
5640 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
5650 pf_unload_vnet(void)
5654 V_pf_vnet_active = 0;
5655 V_pf_status.running = 0;
5656 error = dehook_pf();
5659 * Should not happen!
5660 * XXX Due to error code ESRCH, kldunload will show
5661 * a message like 'No such process'.
5663 printf("%s : pfil unregisteration fail\n", __FUNCTION__);
5671 ret = swi_remove(V_pf_swi_cookie);
5673 ret = intr_event_destroy(V_pf_swi_ie);
5676 pf_unload_vnet_purge();
5678 pf_normalize_cleanup();
5685 if (IS_DEFAULT_VNET(curvnet))
5688 pf_cleanup_tagset(&V_pf_tags);
5690 pf_cleanup_tagset(&V_pf_qids);
5692 uma_zdestroy(V_pf_tag_z);
5694 /* Free counters last as we updated them during shutdown. */
5695 counter_u64_free(V_pf_default_rule.evaluations);
5696 for (int i = 0; i < 2; i++) {
5697 counter_u64_free(V_pf_default_rule.packets[i]);
5698 counter_u64_free(V_pf_default_rule.bytes[i]);
5700 counter_u64_free(V_pf_default_rule.states_cur);
5701 counter_u64_free(V_pf_default_rule.states_tot);
5702 counter_u64_free(V_pf_default_rule.src_nodes);
5704 for (int i = 0; i < PFRES_MAX; i++)
5705 counter_u64_free(V_pf_status.counters[i]);
5706 for (int i = 0; i < LCNT_MAX; i++)
5707 counter_u64_free(V_pf_status.lcounters[i]);
5708 for (int i = 0; i < FCNT_MAX; i++)
5709 counter_u64_free(V_pf_status.fcounters[i]);
5710 for (int i = 0; i < SCNT_MAX; i++)
5711 counter_u64_free(V_pf_status.scounters[i]);
5718 sx_xlock(&pf_end_lock);
5720 while (pf_end_threads < 2) {
5721 wakeup_one(pf_purge_thread);
5722 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
5724 sx_xunlock(&pf_end_lock);
5727 destroy_dev(pf_dev);
5731 rm_destroy(&pf_rules_lock);
5732 sx_destroy(&pf_ioctl_lock);
5733 sx_destroy(&pf_end_lock);
5737 vnet_pf_init(void *unused __unused)
5742 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
5743 vnet_pf_init, NULL);
5746 vnet_pf_uninit(const void *unused __unused)
5751 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
5752 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
5753 vnet_pf_uninit, NULL);
5757 pf_modevent(module_t mod, int type, void *data)
5766 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after
5767 * the vnet_pf_uninit()s */
5777 static moduledata_t pf_mod = {
5783 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
5784 MODULE_VERSION(pf, PF_MODVER);