1 /* $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ */
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002,2003 Henning Brauer
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * - Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * - Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
32 * Effort sponsored in part by the Defense Advanced Research Projects
33 * Agency (DARPA) and Air Force Research Laboratory, Air Force
34 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
43 #include "opt_inet6.h"
50 #define NPFLOG DEV_PFLOG
55 #else /* !__FreeBSD__ */
58 #endif /* __FreeBSD__ */
60 #include <sys/param.h>
61 #include <sys/systm.h>
63 #include <sys/filio.h>
64 #include <sys/fcntl.h>
65 #include <sys/socket.h>
66 #include <sys/socketvar.h>
67 #include <sys/kernel.h>
70 #include <sys/ucred.h>
72 #include <sys/module.h>
75 #include <sys/sysctl.h>
77 #include <sys/timeout.h>
81 #include <sys/malloc.h>
82 #include <sys/kthread.h>
84 #include <sys/rwlock.h>
85 #include <uvm/uvm_extern.h>
89 #include <net/if_types.h>
93 #include <net/route.h>
95 #include <netinet/in.h>
96 #include <netinet/in_var.h>
97 #include <netinet/in_systm.h>
98 #include <netinet/ip.h>
99 #include <netinet/ip_var.h>
100 #include <netinet/ip_icmp.h>
105 #include <dev/rndvar.h>
106 #include <crypto/md5.h>
108 #include <net/pfvar.h>
110 #include <net/if_pfsync.h>
113 #include <net/if_pflog.h>
114 #endif /* NPFLOG > 0 */
117 #include <netinet/ip6.h>
118 #include <netinet/in_pcb.h>
122 #include <altq/altq.h>
126 #include <sys/limits.h>
127 #include <sys/lock.h>
128 #include <sys/mutex.h>
129 #include <net/pfil.h>
130 #endif /* __FreeBSD__ */
133 void init_zone_var(void);
134 void cleanup_pf_zone(void);
138 void pf_thread_create(void *);
139 int pfopen(dev_t, int, int, struct proc *);
140 int pfclose(dev_t, int, int, struct proc *);
142 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
143 u_int8_t, u_int8_t, u_int8_t);
145 void pf_mv_pool(struct pf_palist *, struct pf_palist *);
146 void pf_empty_pool(struct pf_palist *);
148 int pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *);
150 int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
153 int pf_begin_altq(u_int32_t *);
154 int pf_rollback_altq(u_int32_t);
155 int pf_commit_altq(u_int32_t);
156 int pf_enable_altq(struct pf_altq *);
157 int pf_disable_altq(struct pf_altq *);
159 int pf_begin_rules(u_int32_t *, int, const char *);
160 int pf_rollback_rules(u_int32_t, int, char *);
161 int pf_setup_pfsync_matching(struct pf_ruleset *);
162 void pf_hash_rule(MD5_CTX *, struct pf_rule *);
163 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
164 int pf_commit_rules(u_int32_t, int, char *);
165 int pf_addr_setup(struct pf_ruleset *,
166 struct pf_addr_wrap *, sa_family_t);
167 void pf_addr_copyout(struct pf_addr_wrap *);
169 #define TAGID_MAX 50000
172 VNET_DEFINE(struct pf_rule, pf_default_rule);
173 VNET_DEFINE(struct sx, pf_consistency_lock);
176 static VNET_DEFINE(int, pf_altq_running);
177 #define V_pf_altq_running VNET(pf_altq_running)
180 TAILQ_HEAD(pf_tags, pf_tagname);
182 #define V_pf_tags VNET(pf_tags)
183 VNET_DEFINE(struct pf_tags, pf_tags);
184 #define V_pf_qids VNET(pf_qids)
185 VNET_DEFINE(struct pf_tags, pf_qids);
187 #else /* !__FreeBSD__ */
188 struct pf_rule pf_default_rule;
189 struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk");
191 static int pf_altq_running;
194 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
195 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
196 #endif /* __FreeBSD__ */
198 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
199 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
202 u_int16_t tagname2tag(struct pf_tags *, char *);
203 void tag2tagname(struct pf_tags *, u_int16_t, char *);
204 void tag_unref(struct pf_tags *, u_int16_t);
205 int pf_rtlabel_add(struct pf_addr_wrap *);
206 void pf_rtlabel_remove(struct pf_addr_wrap *);
207 void pf_rtlabel_copyout(struct pf_addr_wrap *);
210 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
212 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
219 * XXX - These are new and need to be checked when moveing to a new version
221 static void pf_clear_states(void);
222 static int pf_clear_tables(void);
223 static void pf_clear_srcnodes(void);
225 * XXX - These are new and need to be checked when moveing to a new version
229 * Wrapper functions for pfil(9) hooks
232 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
233 int dir, struct inpcb *inp);
234 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
235 int dir, struct inpcb *inp);
238 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
239 int dir, struct inpcb *inp);
240 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
241 int dir, struct inpcb *inp);
244 static int hook_pf(void);
245 static int dehook_pf(void);
246 static int shutdown_pf(void);
247 static int pf_load(void);
248 static int pf_unload(void);
250 static struct cdevsw pf_cdevsw = {
253 .d_version = D_VERSION,
256 static volatile VNET_DEFINE(int, pf_pfil_hooked);
257 #define V_pf_pfil_hooked VNET(pf_pfil_hooked)
258 VNET_DEFINE(int, pf_end_threads);
259 struct mtx pf_task_mtx;
262 pfsync_state_import_t *pfsync_state_import_ptr = NULL;
263 pfsync_insert_state_t *pfsync_insert_state_ptr = NULL;
264 pfsync_update_state_t *pfsync_update_state_ptr = NULL;
265 pfsync_delete_state_t *pfsync_delete_state_ptr = NULL;
266 pfsync_clear_states_t *pfsync_clear_states_ptr = NULL;
267 pfsync_state_in_use_t *pfsync_state_in_use_ptr = NULL;
268 pfsync_defer_t *pfsync_defer_ptr = NULL;
269 pfsync_up_t *pfsync_up_ptr = NULL;
271 export_pflow_t *export_pflow_ptr = NULL;
273 pflog_packet_t *pflog_packet_ptr = NULL;
275 VNET_DEFINE(int, debug_pfugidhack);
276 SYSCTL_VNET_INT(_debug, OID_AUTO, pfugidhack, CTLFLAG_RW,
277 &VNET_NAME(debug_pfugidhack), 0,
278 "Enable/disable pf user/group rules mpsafe hack");
284 mtx_init(&pf_task_mtx, "pf task mtx", NULL, MTX_DEF);
288 destroy_pf_mutex(void)
291 mtx_destroy(&pf_task_mtx);
296 V_pf_src_tree_pl = V_pf_rule_pl = NULL;
297 V_pf_state_pl = V_pf_state_key_pl = V_pf_state_item_pl = NULL;
298 V_pf_altq_pl = V_pf_pooladdr_pl = NULL;
299 V_pf_frent_pl = V_pf_frag_pl = V_pf_cache_pl = V_pf_cent_pl = NULL;
300 V_pf_state_scrub_pl = NULL;
301 V_pfr_ktable_pl = V_pfr_kentry_pl = V_pfr_kcounters_pl = NULL;
305 cleanup_pf_zone(void)
307 UMA_DESTROY(V_pf_src_tree_pl);
308 UMA_DESTROY(V_pf_rule_pl);
309 UMA_DESTROY(V_pf_state_pl);
310 UMA_DESTROY(V_pf_state_key_pl);
311 UMA_DESTROY(V_pf_state_item_pl);
312 UMA_DESTROY(V_pf_altq_pl);
313 UMA_DESTROY(V_pf_pooladdr_pl);
314 UMA_DESTROY(V_pf_frent_pl);
315 UMA_DESTROY(V_pf_frag_pl);
316 UMA_DESTROY(V_pf_cache_pl);
317 UMA_DESTROY(V_pf_cent_pl);
318 UMA_DESTROY(V_pfr_ktable_pl);
319 UMA_DESTROY(V_pfr_kentry_pl);
320 UMA_DESTROY(V_pfr_kcounters_pl);
321 UMA_DESTROY(V_pf_state_scrub_pl);
322 UMA_DESTROY(V_pfi_addr_pl);
328 u_int32_t *my_timeout = V_pf_default_rule.timeout;
332 UMA_CREATE(V_pf_src_tree_pl, struct pf_src_node, "pfsrctrpl");
333 UMA_CREATE(V_pf_rule_pl, struct pf_rule, "pfrulepl");
334 UMA_CREATE(V_pf_state_pl, struct pf_state, "pfstatepl");
335 UMA_CREATE(V_pf_state_key_pl, struct pf_state, "pfstatekeypl");
336 UMA_CREATE(V_pf_state_item_pl, struct pf_state, "pfstateitempl");
337 UMA_CREATE(V_pf_altq_pl, struct pf_altq, "pfaltqpl");
338 UMA_CREATE(V_pf_pooladdr_pl, struct pf_pooladdr, "pfpooladdrpl");
339 UMA_CREATE(V_pfr_ktable_pl, struct pfr_ktable, "pfrktable");
340 UMA_CREATE(V_pfr_kentry_pl, struct pfr_kentry, "pfrkentry");
341 UMA_CREATE(V_pfr_kcounters_pl, struct pfr_kcounters, "pfrkcounters");
342 UMA_CREATE(V_pf_frent_pl, struct pf_frent, "pffrent");
343 UMA_CREATE(V_pf_frag_pl, struct pf_fragment, "pffrag");
344 UMA_CREATE(V_pf_cache_pl, struct pf_fragment, "pffrcache");
345 UMA_CREATE(V_pf_cent_pl, struct pf_frcache, "pffrcent");
346 UMA_CREATE(V_pf_state_scrub_pl, struct pf_state_scrub,
348 UMA_CREATE(V_pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl");
357 if ( (error = pf_osfp_initialize()) ) {
363 V_pf_pool_limits[PF_LIMIT_STATES].pp = V_pf_state_pl;
364 V_pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
365 V_pf_pool_limits[PF_LIMIT_SRC_NODES].pp = V_pf_src_tree_pl;
366 V_pf_pool_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
367 V_pf_pool_limits[PF_LIMIT_FRAGS].pp = V_pf_frent_pl;
368 V_pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
369 V_pf_pool_limits[PF_LIMIT_TABLES].pp = V_pfr_ktable_pl;
370 V_pf_pool_limits[PF_LIMIT_TABLES].limit = PFR_KTABLE_HIWAT;
371 V_pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].pp = V_pfr_kentry_pl;
372 V_pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
373 uma_zone_set_max(V_pf_pool_limits[PF_LIMIT_STATES].pp,
374 V_pf_pool_limits[PF_LIMIT_STATES].limit);
376 RB_INIT(&V_tree_src_tracking);
377 RB_INIT(&V_pf_anchors);
378 pf_init_ruleset(&pf_main_ruleset);
380 TAILQ_INIT(&V_pf_altqs[0]);
381 TAILQ_INIT(&V_pf_altqs[1]);
382 TAILQ_INIT(&V_pf_pabuf);
383 V_pf_altqs_active = &V_pf_altqs[0];
384 V_pf_altqs_inactive = &V_pf_altqs[1];
385 TAILQ_INIT(&V_state_list);
387 /* default rule should never be garbage collected */
388 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
389 V_pf_default_rule.action = PF_PASS;
390 V_pf_default_rule.nr = -1;
391 V_pf_default_rule.rtableid = -1;
393 /* initialize default timeouts */
394 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
395 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
396 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
397 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
398 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
399 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
400 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
401 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
402 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
403 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
404 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
405 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
406 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
407 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
408 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
409 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
410 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
411 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
412 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
413 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
417 bzero(&V_pf_status, sizeof(V_pf_status));
418 V_pf_status.debug = PF_DEBUG_URGENT;
420 V_pf_pfil_hooked = 0;
422 /* XXX do our best to avoid a conflict */
423 V_pf_status.hostid = arc4random();
425 if (kproc_create(pf_purge_thread, curvnet, NULL, 0, 0, "pfpurge"))
428 m_addr_chg_pf_p = pf_pkt_addr_changed;
432 #else /* !__FreeBSD__ */
437 u_int32_t *timeout = pf_default_rule.timeout;
439 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
440 &pool_allocator_nointr);
441 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
443 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
445 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
446 "pfstatekeypl", NULL);
447 pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0, 0, 0,
448 "pfstateitempl", NULL);
449 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
450 &pool_allocator_nointr);
451 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
452 "pfpooladdrpl", &pool_allocator_nointr);
455 pf_osfp_initialize();
457 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
458 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
460 if (physmem <= atop(100*1024*1024))
461 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
462 PFR_KENTRY_HIWAT_SMALL;
464 RB_INIT(&tree_src_tracking);
465 RB_INIT(&pf_anchors);
466 pf_init_ruleset(&pf_main_ruleset);
467 TAILQ_INIT(&pf_altqs[0]);
468 TAILQ_INIT(&pf_altqs[1]);
469 TAILQ_INIT(&pf_pabuf);
470 pf_altqs_active = &pf_altqs[0];
471 pf_altqs_inactive = &pf_altqs[1];
472 TAILQ_INIT(&state_list);
474 /* default rule should never be garbage collected */
475 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
476 pf_default_rule.action = PF_PASS;
477 pf_default_rule.nr = -1;
478 pf_default_rule.rtableid = -1;
480 /* initialize default timeouts */
481 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
482 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
483 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
484 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
485 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
486 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
487 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
488 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
489 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
490 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
491 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
492 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
493 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
494 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
495 timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
496 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
497 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
498 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
499 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
500 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
503 bzero(&pf_status, sizeof(pf_status));
504 pf_status.debug = PF_DEBUG_URGENT;
506 /* XXX do our best to avoid a conflict */
507 pf_status.hostid = arc4random();
509 /* require process context to purge states, so perform in a thread */
510 kthread_create_deferred(pf_thread_create, NULL);
514 pf_thread_create(void *v)
516 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge"))
517 panic("pfpurge thread");
521 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
529 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
538 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
539 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
540 u_int8_t check_ticket)
542 struct pf_ruleset *ruleset;
543 struct pf_rule *rule;
546 ruleset = pf_find_ruleset(anchor);
549 rs_num = pf_get_ruleset_number(rule_action);
550 if (rs_num >= PF_RULESET_MAX)
553 if (check_ticket && ticket !=
554 ruleset->rules[rs_num].active.ticket)
557 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
560 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
562 if (check_ticket && ticket !=
563 ruleset->rules[rs_num].inactive.ticket)
566 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
569 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
572 while ((rule != NULL) && (rule->nr != rule_number))
573 rule = TAILQ_NEXT(rule, entries);
578 return (&rule->rpool);
582 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
584 struct pf_pooladdr *mv_pool_pa;
586 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
587 TAILQ_REMOVE(poola, mv_pool_pa, entries);
588 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
593 pf_empty_pool(struct pf_palist *poola)
595 struct pf_pooladdr *empty_pool_pa;
597 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
598 pfi_dynaddr_remove(&empty_pool_pa->addr);
599 pf_tbladdr_remove(&empty_pool_pa->addr);
600 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
601 TAILQ_REMOVE(poola, empty_pool_pa, entries);
603 pool_put(&V_pf_pooladdr_pl, empty_pool_pa);
605 pool_put(&pf_pooladdr_pl, empty_pool_pa);
611 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
613 if (rulequeue != NULL) {
614 if (rule->states_cur <= 0) {
616 * XXX - we need to remove the table *before* detaching
617 * the rule to make sure the table code does not delete
618 * the anchor under our feet.
620 pf_tbladdr_remove(&rule->src.addr);
621 pf_tbladdr_remove(&rule->dst.addr);
622 if (rule->overload_tbl)
623 pfr_detach_table(rule->overload_tbl);
625 TAILQ_REMOVE(rulequeue, rule, entries);
626 rule->entries.tqe_prev = NULL;
630 if (rule->states_cur > 0 || rule->src_nodes > 0 ||
631 rule->entries.tqe_prev != NULL)
633 pf_tag_unref(rule->tag);
634 pf_tag_unref(rule->match_tag);
636 if (rule->pqid != rule->qid)
637 pf_qid_unref(rule->pqid);
638 pf_qid_unref(rule->qid);
640 pf_rtlabel_remove(&rule->src.addr);
641 pf_rtlabel_remove(&rule->dst.addr);
642 pfi_dynaddr_remove(&rule->src.addr);
643 pfi_dynaddr_remove(&rule->dst.addr);
644 if (rulequeue == NULL) {
645 pf_tbladdr_remove(&rule->src.addr);
646 pf_tbladdr_remove(&rule->dst.addr);
647 if (rule->overload_tbl)
648 pfr_detach_table(rule->overload_tbl);
650 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
651 pf_anchor_remove(rule);
652 pf_empty_pool(&rule->rpool.list);
654 pool_put(&V_pf_rule_pl, rule);
656 pool_put(&pf_rule_pl, rule);
661 tagname2tag(struct pf_tags *head, char *tagname)
663 struct pf_tagname *tag, *p = NULL;
664 u_int16_t new_tagid = 1;
666 TAILQ_FOREACH(tag, head, entries)
667 if (strcmp(tagname, tag->name) == 0) {
673 * to avoid fragmentation, we do a linear search from the beginning
674 * and take the first free slot we find. if there is none or the list
675 * is empty, append a new entry at the end.
679 if (!TAILQ_EMPTY(head))
680 for (p = TAILQ_FIRST(head); p != NULL &&
681 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
682 new_tagid = p->tag + 1;
684 if (new_tagid > TAGID_MAX)
687 /* allocate and fill new struct pf_tagname */
688 tag = malloc(sizeof(*tag), M_TEMP, M_NOWAIT|M_ZERO);
691 strlcpy(tag->name, tagname, sizeof(tag->name));
692 tag->tag = new_tagid;
695 if (p != NULL) /* insert new entry before p */
696 TAILQ_INSERT_BEFORE(p, tag, entries);
697 else /* either list empty or no free slot in between */
698 TAILQ_INSERT_TAIL(head, tag, entries);
704 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
706 struct pf_tagname *tag;
708 TAILQ_FOREACH(tag, head, entries)
709 if (tag->tag == tagid) {
710 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
716 tag_unref(struct pf_tags *head, u_int16_t tag)
718 struct pf_tagname *p, *next;
723 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
724 next = TAILQ_NEXT(p, entries);
727 TAILQ_REMOVE(head, p, entries);
736 pf_tagname2tag(char *tagname)
739 return (tagname2tag(&V_pf_tags, tagname));
741 return (tagname2tag(&pf_tags, tagname));
746 pf_tag2tagname(u_int16_t tagid, char *p)
749 tag2tagname(&V_pf_tags, tagid, p);
751 tag2tagname(&pf_tags, tagid, p);
756 pf_tag_ref(u_int16_t tag)
758 struct pf_tagname *t;
761 TAILQ_FOREACH(t, &V_pf_tags, entries)
763 TAILQ_FOREACH(t, &pf_tags, entries)
772 pf_tag_unref(u_int16_t tag)
775 tag_unref(&V_pf_tags, tag);
777 tag_unref(&pf_tags, tag);
782 pf_rtlabel_add(struct pf_addr_wrap *a)
785 /* XXX_IMPORT: later */
788 if (a->type == PF_ADDR_RTLABEL &&
789 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
796 pf_rtlabel_remove(struct pf_addr_wrap *a)
799 /* XXX_IMPORT: later */
801 if (a->type == PF_ADDR_RTLABEL)
802 rtlabel_unref(a->v.rtlabel);
807 pf_rtlabel_copyout(struct pf_addr_wrap *a)
810 /* XXX_IMPORT: later */
811 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel)
812 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname));
816 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
817 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
818 strlcpy(a->v.rtlabelname, "?",
819 sizeof(a->v.rtlabelname));
821 strlcpy(a->v.rtlabelname, name,
822 sizeof(a->v.rtlabelname));
829 pf_qname2qid(char *qname)
832 return ((u_int32_t)tagname2tag(&V_pf_qids, qname));
834 return ((u_int32_t)tagname2tag(&pf_qids, qname));
839 pf_qid2qname(u_int32_t qid, char *p)
842 tag2tagname(&V_pf_qids, (u_int16_t)qid, p);
844 tag2tagname(&pf_qids, (u_int16_t)qid, p);
849 pf_qid_unref(u_int32_t qid)
852 tag_unref(&V_pf_qids, (u_int16_t)qid);
854 tag_unref(&pf_qids, (u_int16_t)qid);
859 pf_begin_altq(u_int32_t *ticket)
861 struct pf_altq *altq;
864 /* Purge the old altq list */
866 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) {
867 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries);
868 if (altq->qname[0] == 0 &&
869 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
871 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
872 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
873 if (altq->qname[0] == 0) {
875 /* detach and destroy the discipline */
876 error = altq_remove(altq);
878 pf_qid_unref(altq->qid);
880 pool_put(&V_pf_altq_pl, altq);
882 pool_put(&pf_altq_pl, altq);
888 *ticket = ++V_ticket_altqs_inactive;
889 V_altqs_inactive_open = 1;
891 *ticket = ++ticket_altqs_inactive;
892 altqs_inactive_open = 1;
898 pf_rollback_altq(u_int32_t ticket)
900 struct pf_altq *altq;
904 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
906 /* Purge the old altq list */
907 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) {
908 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries);
909 if (altq->qname[0] == 0 &&
910 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
912 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
914 /* Purge the old altq list */
915 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
916 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
917 if (altq->qname[0] == 0) {
919 /* detach and destroy the discipline */
920 error = altq_remove(altq);
922 pf_qid_unref(altq->qid);
924 pool_put(&V_pf_altq_pl, altq);
926 pool_put(&pf_altq_pl, altq);
930 V_altqs_inactive_open = 0;
932 altqs_inactive_open = 0;
938 pf_commit_altq(u_int32_t ticket)
940 struct pf_altqqueue *old_altqs;
941 struct pf_altq *altq;
942 int s, err, error = 0;
945 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
947 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
951 /* swap altqs, keep the old. */
954 old_altqs = V_pf_altqs_active;
955 V_pf_altqs_active = V_pf_altqs_inactive;
956 V_pf_altqs_inactive = old_altqs;
957 V_ticket_altqs_active = V_ticket_altqs_inactive;
959 old_altqs = pf_altqs_active;
960 pf_altqs_active = pf_altqs_inactive;
961 pf_altqs_inactive = old_altqs;
962 ticket_altqs_active = ticket_altqs_inactive;
965 /* Attach new disciplines */
967 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
968 if (altq->qname[0] == 0 &&
969 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
971 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
972 if (altq->qname[0] == 0) {
974 /* attach the discipline */
975 error = altq_pfattach(altq);
977 if (error == 0 && V_pf_altq_running)
979 if (error == 0 && pf_altq_running)
981 error = pf_enable_altq(altq);
989 /* Purge the old altq list */
991 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) {
992 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries);
993 if (altq->qname[0] == 0 &&
994 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
996 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
997 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
998 if (altq->qname[0] == 0) {
1000 /* detach and destroy the discipline */
1002 if (V_pf_altq_running)
1004 if (pf_altq_running)
1006 error = pf_disable_altq(altq);
1007 err = altq_pfdetach(altq);
1008 if (err != 0 && error == 0)
1010 err = altq_remove(altq);
1011 if (err != 0 && error == 0)
1014 pf_qid_unref(altq->qid);
1016 pool_put(&V_pf_altq_pl, altq);
1018 pool_put(&pf_altq_pl, altq);
1024 V_altqs_inactive_open = 0;
1026 altqs_inactive_open = 0;
1032 pf_enable_altq(struct pf_altq *altq)
1035 struct tb_profile tb;
1038 if ((ifp = ifunit(altq->ifname)) == NULL)
1041 if (ifp->if_snd.altq_type != ALTQT_NONE)
1042 error = altq_enable(&ifp->if_snd);
1044 /* set tokenbucket regulator */
1045 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1046 tb.rate = altq->ifbandwidth;
1047 tb.depth = altq->tbrsize;
1052 error = tbr_set(&ifp->if_snd, &tb);
1063 pf_disable_altq(struct pf_altq *altq)
1066 struct tb_profile tb;
1069 if ((ifp = ifunit(altq->ifname)) == NULL)
1073 * when the discipline is no longer referenced, it was overridden
1074 * by a new one. if so, just return.
1076 if (altq->altq_disc != ifp->if_snd.altq_disc)
1079 error = altq_disable(&ifp->if_snd);
1082 /* clear tokenbucket regulator */
1088 error = tbr_set(&ifp->if_snd, &tb);
1100 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1103 struct pf_altq *a1, *a2, *a3;
1107 /* Interrupt userland queue modifications */
1109 if (V_altqs_inactive_open)
1110 pf_rollback_altq(V_ticket_altqs_inactive);
1112 if (altqs_inactive_open)
1113 pf_rollback_altq(ticket_altqs_inactive);
1116 /* Start new altq ruleset */
1117 if (pf_begin_altq(&ticket))
1120 /* Copy the current active set */
1122 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1123 a2 = pool_get(&V_pf_altq_pl, PR_NOWAIT);
1125 TAILQ_FOREACH(a1, pf_altqs_active, entries) {
1126 a2 = pool_get(&pf_altq_pl, PR_NOWAIT);
1132 bcopy(a1, a2, sizeof(struct pf_altq));
1134 if (a2->qname[0] != 0) {
1135 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
1138 pool_put(&V_pf_altq_pl, a2);
1140 pool_put(&pf_altq_pl, a2);
1144 a2->altq_disc = NULL;
1146 TAILQ_FOREACH(a3, V_pf_altqs_inactive, entries) {
1148 TAILQ_FOREACH(a3, pf_altqs_inactive, entries) {
1150 if (strncmp(a3->ifname, a2->ifname,
1151 IFNAMSIZ) == 0 && a3->qname[0] == 0) {
1152 a2->altq_disc = a3->altq_disc;
1157 /* Deactivate the interface in question */
1158 a2->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1159 if ((ifp1 = ifunit(a2->ifname)) == NULL ||
1160 (remove && ifp1 == ifp)) {
1161 a2->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1164 error = altq_add(a2);
1168 if (ticket != V_ticket_altqs_inactive)
1170 if (ticket != ticket_altqs_inactive)
1176 pool_put(&V_pf_altq_pl, a2);
1178 pool_put(&pf_altq_pl, a2);
1185 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1187 TAILQ_INSERT_TAIL(pf_altqs_inactive, a2, entries);
1192 pf_rollback_altq(ticket);
1194 pf_commit_altq(ticket);
1200 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1202 struct pf_ruleset *rs;
1203 struct pf_rule *rule;
1205 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1207 rs = pf_find_or_create_ruleset(anchor);
1210 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1211 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1212 rs->rules[rs_num].inactive.rcount--;
1214 *ticket = ++rs->rules[rs_num].inactive.ticket;
1215 rs->rules[rs_num].inactive.open = 1;
1220 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1222 struct pf_ruleset *rs;
1223 struct pf_rule *rule;
1225 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1227 rs = pf_find_ruleset(anchor);
1228 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1229 rs->rules[rs_num].inactive.ticket != ticket)
1231 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1232 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1233 rs->rules[rs_num].inactive.rcount--;
1235 rs->rules[rs_num].inactive.open = 0;
1239 #define PF_MD5_UPD(st, elm) \
1240 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1242 #define PF_MD5_UPD_STR(st, elm) \
1243 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1245 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
1246 (stor) = htonl((st)->elm); \
1247 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1250 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
1251 (stor) = htons((st)->elm); \
1252 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1256 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1258 PF_MD5_UPD(pfr, addr.type);
1259 switch (pfr->addr.type) {
1260 case PF_ADDR_DYNIFTL:
1261 PF_MD5_UPD(pfr, addr.v.ifname);
1262 PF_MD5_UPD(pfr, addr.iflags);
1265 PF_MD5_UPD(pfr, addr.v.tblname);
1267 case PF_ADDR_ADDRMASK:
1268 /* XXX ignore af? */
1269 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1270 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1272 case PF_ADDR_RTLABEL:
1273 PF_MD5_UPD(pfr, addr.v.rtlabelname);
1277 PF_MD5_UPD(pfr, port[0]);
1278 PF_MD5_UPD(pfr, port[1]);
1279 PF_MD5_UPD(pfr, neg);
1280 PF_MD5_UPD(pfr, port_op);
1284 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
1289 pf_hash_rule_addr(ctx, &rule->src);
1290 pf_hash_rule_addr(ctx, &rule->dst);
1291 PF_MD5_UPD_STR(rule, label);
1292 PF_MD5_UPD_STR(rule, ifname);
1293 PF_MD5_UPD_STR(rule, match_tagname);
1294 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1295 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1296 PF_MD5_UPD_HTONL(rule, prob, y);
1297 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1298 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1299 PF_MD5_UPD(rule, uid.op);
1300 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1301 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1302 PF_MD5_UPD(rule, gid.op);
1303 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1304 PF_MD5_UPD(rule, action);
1305 PF_MD5_UPD(rule, direction);
1306 PF_MD5_UPD(rule, af);
1307 PF_MD5_UPD(rule, quick);
1308 PF_MD5_UPD(rule, ifnot);
1309 PF_MD5_UPD(rule, match_tag_not);
1310 PF_MD5_UPD(rule, natpass);
1311 PF_MD5_UPD(rule, keep_state);
1312 PF_MD5_UPD(rule, proto);
1313 PF_MD5_UPD(rule, type);
1314 PF_MD5_UPD(rule, code);
1315 PF_MD5_UPD(rule, flags);
1316 PF_MD5_UPD(rule, flagset);
1317 PF_MD5_UPD(rule, allow_opts);
1318 PF_MD5_UPD(rule, rt);
1319 PF_MD5_UPD(rule, tos);
1323 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1325 struct pf_ruleset *rs;
1326 struct pf_rule *rule, **old_array;
1327 struct pf_rulequeue *old_rules;
1329 u_int32_t old_rcount;
1331 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1333 rs = pf_find_ruleset(anchor);
1334 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1335 ticket != rs->rules[rs_num].inactive.ticket)
1338 /* Calculate checksum for the main ruleset */
1339 if (rs == &pf_main_ruleset) {
1340 error = pf_setup_pfsync_matching(rs);
1345 /* Swap rules, keep the old. */
1347 old_rules = rs->rules[rs_num].active.ptr;
1348 old_rcount = rs->rules[rs_num].active.rcount;
1349 old_array = rs->rules[rs_num].active.ptr_array;
1351 rs->rules[rs_num].active.ptr =
1352 rs->rules[rs_num].inactive.ptr;
1353 rs->rules[rs_num].active.ptr_array =
1354 rs->rules[rs_num].inactive.ptr_array;
1355 rs->rules[rs_num].active.rcount =
1356 rs->rules[rs_num].inactive.rcount;
1357 rs->rules[rs_num].inactive.ptr = old_rules;
1358 rs->rules[rs_num].inactive.ptr_array = old_array;
1359 rs->rules[rs_num].inactive.rcount = old_rcount;
1361 rs->rules[rs_num].active.ticket =
1362 rs->rules[rs_num].inactive.ticket;
1363 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1366 /* Purge the old rule list. */
1367 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1368 pf_rm_rule(old_rules, rule);
1369 if (rs->rules[rs_num].inactive.ptr_array)
1370 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1371 rs->rules[rs_num].inactive.ptr_array = NULL;
1372 rs->rules[rs_num].inactive.rcount = 0;
1373 rs->rules[rs_num].inactive.open = 0;
1374 pf_remove_if_empty_ruleset(rs);
1380 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1383 struct pf_rule *rule;
1385 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1388 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1389 /* XXX PF_RULESET_SCRUB as well? */
1390 if (rs_cnt == PF_RULESET_SCRUB)
1393 if (rs->rules[rs_cnt].inactive.ptr_array)
1394 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1395 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1397 if (rs->rules[rs_cnt].inactive.rcount) {
1398 rs->rules[rs_cnt].inactive.ptr_array =
1399 malloc(sizeof(caddr_t) *
1400 rs->rules[rs_cnt].inactive.rcount,
1403 if (!rs->rules[rs_cnt].inactive.ptr_array)
1407 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1409 pf_hash_rule(&ctx, rule);
1410 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1414 MD5Final(digest, &ctx);
1416 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1418 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
1424 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
1427 if (pfi_dynaddr_setup(addr, af) ||
1428 pf_tbladdr_setup(ruleset, addr))
1435 pf_addr_copyout(struct pf_addr_wrap *addr)
1437 pfi_dynaddr_copyout(addr);
1438 pf_tbladdr_copyout(addr);
1439 pf_rtlabel_copyout(addr);
1444 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1446 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1449 struct pf_pooladdr *pa = NULL;
1450 struct pf_pool *pool = NULL;
1456 CURVNET_SET(TD_TO_VNET(td));
1458 /* XXX keep in sync with switch() below */
1460 if (securelevel_gt(td->td_ucred, 2))
1462 if (securelevel > 1)
1470 case DIOCSETSTATUSIF:
1476 case DIOCGETTIMEOUT:
1477 case DIOCCLRRULECTRS:
1482 case DIOCGETRULESETS:
1483 case DIOCGETRULESET:
1484 case DIOCRGETTABLES:
1485 case DIOCRGETTSTATS:
1486 case DIOCRCLRTSTATS:
1492 case DIOCRGETASTATS:
1493 case DIOCRCLRASTATS:
1496 case DIOCGETSRCNODES:
1497 case DIOCCLRSRCNODES:
1498 case DIOCIGETIFACES:
1505 case DIOCRCLRTABLES:
1506 case DIOCRADDTABLES:
1507 case DIOCRDELTABLES:
1508 case DIOCRSETTFLAGS:
1509 if (((struct pfioc_table *)addr)->pfrio_flags &
1511 break; /* dummy operation ok */
1517 if (!(flags & FWRITE))
1525 case DIOCGETTIMEOUT:
1530 case DIOCGETRULESETS:
1531 case DIOCGETRULESET:
1533 case DIOCRGETTABLES:
1534 case DIOCRGETTSTATS:
1536 case DIOCRGETASTATS:
1539 case DIOCGETSRCNODES:
1540 case DIOCIGETIFACES:
1545 case DIOCRCLRTABLES:
1546 case DIOCRADDTABLES:
1547 case DIOCRDELTABLES:
1548 case DIOCRCLRTSTATS:
1553 case DIOCRSETTFLAGS:
1554 if (((struct pfioc_table *)addr)->pfrio_flags &
1556 flags |= FWRITE; /* need write lock for dummy */
1557 break; /* dummy operation ok */
1561 if (((struct pfioc_rule *)addr)->action ==
1571 sx_xlock(&V_pf_consistency_lock);
1573 sx_slock(&V_pf_consistency_lock);
1575 rw_enter_write(&pf_consistency_lock);
1577 rw_enter_read(&pf_consistency_lock);
1589 if (V_pf_status.running)
1591 if (pf_status.running)
1600 DPFPRINTF(PF_DEBUG_MISC,
1601 ("pf: pfil registeration fail\n"));
1604 V_pf_status.running = 1;
1605 V_pf_status.since = time_second;
1607 if (V_pf_status.stateid == 0) {
1608 V_pf_status.stateid = time_second;
1609 V_pf_status.stateid = V_pf_status.stateid << 32;
1612 pf_status.running = 1;
1613 pf_status.since = time_second;
1615 if (pf_status.stateid == 0) {
1616 pf_status.stateid = time_second;
1617 pf_status.stateid = pf_status.stateid << 32;
1620 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1626 if (!V_pf_status.running)
1629 V_pf_status.running = 0;
1631 error = dehook_pf();
1634 V_pf_status.running = 1;
1635 DPFPRINTF(PF_DEBUG_MISC,
1636 ("pf: pfil unregisteration failed\n"));
1638 V_pf_status.since = time_second;
1640 if (!pf_status.running)
1643 pf_status.running = 0;
1644 pf_status.since = time_second;
1646 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1651 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1652 struct pf_ruleset *ruleset;
1653 struct pf_rule *rule, *tail;
1654 struct pf_pooladdr *pa;
1657 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1658 ruleset = pf_find_ruleset(pr->anchor);
1659 if (ruleset == NULL) {
1663 rs_num = pf_get_ruleset_number(pr->rule.action);
1664 if (rs_num >= PF_RULESET_MAX) {
1668 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1672 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1674 DPFPRINTF(PF_DEBUG_MISC,
1675 ("ticket: %d != [%d]%d\n", pr->ticket, rs_num,
1676 ruleset->rules[rs_num].inactive.ticket));
1682 if (pr->pool_ticket != V_ticket_pabuf) {
1683 DPFPRINTF(PF_DEBUG_MISC,
1684 ("pool_ticket: %d != %d\n", pr->pool_ticket,
1687 if (pr->pool_ticket != ticket_pabuf) {
1693 rule = pool_get(&V_pf_rule_pl, PR_NOWAIT);
1695 rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL);
1701 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1703 rule->cuid = td->td_ucred->cr_ruid;
1704 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1706 rule->cuid = p->p_cred->p_ruid;
1707 rule->cpid = p->p_pid;
1709 rule->anchor = NULL;
1711 TAILQ_INIT(&rule->rpool.list);
1712 /* initialize refcounting */
1713 rule->states_cur = 0;
1714 rule->src_nodes = 0;
1715 rule->entries.tqe_prev = NULL;
1717 if (rule->af == AF_INET) {
1719 pool_put(&V_pf_rule_pl, rule);
1721 pool_put(&pf_rule_pl, rule);
1723 error = EAFNOSUPPORT;
1728 if (rule->af == AF_INET6) {
1730 pool_put(&V_pf_rule_pl, rule);
1732 pool_put(&pf_rule_pl, rule);
1734 error = EAFNOSUPPORT;
1738 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1741 rule->nr = tail->nr + 1;
1744 if (rule->ifname[0]) {
1745 rule->kif = pfi_kif_get(rule->ifname);
1746 if (rule->kif == NULL) {
1748 pool_put(&V_pf_rule_pl, rule);
1750 pool_put(&pf_rule_pl, rule);
1755 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
1758 #ifdef __FreeBSD__ /* ROUTING */
1759 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
1761 if (rule->rtableid > 0 && !rtable_exists(rule->rtableid))
1767 if (rule->qname[0] != 0) {
1768 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1770 else if (rule->pqname[0] != 0) {
1772 pf_qname2qid(rule->pqname)) == 0)
1775 rule->pqid = rule->qid;
1778 if (rule->tagname[0])
1779 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1781 if (rule->match_tagname[0])
1782 if ((rule->match_tag =
1783 pf_tagname2tag(rule->match_tagname)) == 0)
1785 if (rule->rt && !rule->direction)
1790 if (rule->logif >= PFLOGIFS_MAX)
1793 if (pf_rtlabel_add(&rule->src.addr) ||
1794 pf_rtlabel_add(&rule->dst.addr))
1796 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1798 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1800 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1803 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
1805 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1807 if (pf_tbladdr_setup(ruleset, &pa->addr))
1810 if (rule->overload_tblname[0]) {
1811 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1812 rule->overload_tblname, 0)) == NULL)
1815 rule->overload_tbl->pfrkt_flags |=
1820 pf_mv_pool(&V_pf_pabuf, &rule->rpool.list);
1822 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1824 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1825 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1826 (rule->rt > PF_FASTROUTE)) &&
1827 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1831 pf_rm_rule(NULL, rule);
1836 if (!V_debug_pfugidhack && (rule->uid.op || rule->gid.op ||
1837 rule->log & PF_LOG_SOCKET_LOOKUP)) {
1838 DPFPRINTF(PF_DEBUG_MISC,
1839 ("pf: debug.pfugidhack enabled\n"));
1840 V_debug_pfugidhack = 1;
1843 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1844 rule->evaluations = rule->packets[0] = rule->packets[1] =
1845 rule->bytes[0] = rule->bytes[1] = 0;
1846 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1848 ruleset->rules[rs_num].inactive.rcount++;
1852 case DIOCGETRULES: {
1853 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1854 struct pf_ruleset *ruleset;
1855 struct pf_rule *tail;
1858 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1859 ruleset = pf_find_ruleset(pr->anchor);
1860 if (ruleset == NULL) {
1864 rs_num = pf_get_ruleset_number(pr->rule.action);
1865 if (rs_num >= PF_RULESET_MAX) {
1869 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1872 pr->nr = tail->nr + 1;
1875 pr->ticket = ruleset->rules[rs_num].active.ticket;
1880 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1881 struct pf_ruleset *ruleset;
1882 struct pf_rule *rule;
1885 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1886 ruleset = pf_find_ruleset(pr->anchor);
1887 if (ruleset == NULL) {
1891 rs_num = pf_get_ruleset_number(pr->rule.action);
1892 if (rs_num >= PF_RULESET_MAX) {
1896 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1900 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1901 while ((rule != NULL) && (rule->nr != pr->nr))
1902 rule = TAILQ_NEXT(rule, entries);
1907 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1908 if (pf_anchor_copyout(ruleset, rule, pr)) {
1912 pf_addr_copyout(&pr->rule.src.addr);
1913 pf_addr_copyout(&pr->rule.dst.addr);
1914 for (i = 0; i < PF_SKIP_COUNT; ++i)
1915 if (rule->skip[i].ptr == NULL)
1916 pr->rule.skip[i].nr = -1;
1918 pr->rule.skip[i].nr =
1919 rule->skip[i].ptr->nr;
1921 if (pr->action == PF_GET_CLR_CNTR) {
1922 rule->evaluations = 0;
1923 rule->packets[0] = rule->packets[1] = 0;
1924 rule->bytes[0] = rule->bytes[1] = 0;
1925 rule->states_tot = 0;
1930 case DIOCCHANGERULE: {
1931 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1932 struct pf_ruleset *ruleset;
1933 struct pf_rule *oldrule = NULL, *newrule = NULL;
1937 if (!(pcr->action == PF_CHANGE_REMOVE ||
1938 pcr->action == PF_CHANGE_GET_TICKET) &&
1940 pcr->pool_ticket != V_ticket_pabuf) {
1942 pcr->pool_ticket != ticket_pabuf) {
1948 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1949 pcr->action > PF_CHANGE_GET_TICKET) {
1953 ruleset = pf_find_ruleset(pcr->anchor);
1954 if (ruleset == NULL) {
1958 rs_num = pf_get_ruleset_number(pcr->rule.action);
1959 if (rs_num >= PF_RULESET_MAX) {
1964 if (pcr->action == PF_CHANGE_GET_TICKET) {
1965 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1969 ruleset->rules[rs_num].active.ticket) {
1973 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1979 if (pcr->action != PF_CHANGE_REMOVE) {
1981 newrule = pool_get(&V_pf_rule_pl, PR_NOWAIT);
1983 newrule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL);
1985 if (newrule == NULL) {
1989 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1991 newrule->cuid = td->td_ucred->cr_ruid;
1992 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1994 newrule->cuid = p->p_cred->p_ruid;
1995 newrule->cpid = p->p_pid;
1997 TAILQ_INIT(&newrule->rpool.list);
1998 /* initialize refcounting */
1999 newrule->states_cur = 0;
2000 newrule->entries.tqe_prev = NULL;
2002 if (newrule->af == AF_INET) {
2004 pool_put(&V_pf_rule_pl, newrule);
2006 pool_put(&pf_rule_pl, newrule);
2008 error = EAFNOSUPPORT;
2013 if (newrule->af == AF_INET6) {
2015 pool_put(&V_pf_rule_pl, newrule);
2017 pool_put(&pf_rule_pl, newrule);
2019 error = EAFNOSUPPORT;
2023 if (newrule->ifname[0]) {
2024 newrule->kif = pfi_kif_get(newrule->ifname);
2025 if (newrule->kif == NULL) {
2027 pool_put(&V_pf_rule_pl, newrule);
2029 pool_put(&pf_rule_pl, newrule);
2034 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
2036 newrule->kif = NULL;
2038 if (newrule->rtableid > 0 &&
2039 #ifdef __FreeBSD__ /* ROUTING */
2040 newrule->rtableid >= rt_numfibs)
2042 !rtable_exists(newrule->rtableid))
2048 if (newrule->qname[0] != 0) {
2050 pf_qname2qid(newrule->qname)) == 0)
2052 else if (newrule->pqname[0] != 0) {
2053 if ((newrule->pqid =
2054 pf_qname2qid(newrule->pqname)) == 0)
2057 newrule->pqid = newrule->qid;
2060 if (newrule->tagname[0])
2062 pf_tagname2tag(newrule->tagname)) == 0)
2064 if (newrule->match_tagname[0])
2065 if ((newrule->match_tag = pf_tagname2tag(
2066 newrule->match_tagname)) == 0)
2068 if (newrule->rt && !newrule->direction)
2073 if (newrule->logif >= PFLOGIFS_MAX)
2076 if (pf_rtlabel_add(&newrule->src.addr) ||
2077 pf_rtlabel_add(&newrule->dst.addr))
2079 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
2081 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
2083 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
2086 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2088 TAILQ_FOREACH(pa, &pf_pabuf, entries)
2090 if (pf_tbladdr_setup(ruleset, &pa->addr))
2093 if (newrule->overload_tblname[0]) {
2094 if ((newrule->overload_tbl = pfr_attach_table(
2095 ruleset, newrule->overload_tblname, 0)) ==
2099 newrule->overload_tbl->pfrkt_flags |=
2104 pf_mv_pool(&V_pf_pabuf, &newrule->rpool.list);
2106 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
2108 if (((((newrule->action == PF_NAT) ||
2109 (newrule->action == PF_RDR) ||
2110 (newrule->action == PF_BINAT) ||
2111 (newrule->rt > PF_FASTROUTE)) &&
2112 !newrule->anchor)) &&
2113 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
2117 pf_rm_rule(NULL, newrule);
2122 if (!V_debug_pfugidhack && (newrule->uid.op ||
2124 newrule->log & PF_LOG_SOCKET_LOOKUP)) {
2125 DPFPRINTF(PF_DEBUG_MISC,
2126 ("pf: debug.pfugidhack enabled\n"));
2127 V_debug_pfugidhack = 1;
2131 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
2132 newrule->evaluations = 0;
2133 newrule->packets[0] = newrule->packets[1] = 0;
2134 newrule->bytes[0] = newrule->bytes[1] = 0;
2137 pf_empty_pool(&V_pf_pabuf);
2139 pf_empty_pool(&pf_pabuf);
2142 if (pcr->action == PF_CHANGE_ADD_HEAD)
2143 oldrule = TAILQ_FIRST(
2144 ruleset->rules[rs_num].active.ptr);
2145 else if (pcr->action == PF_CHANGE_ADD_TAIL)
2146 oldrule = TAILQ_LAST(
2147 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
2149 oldrule = TAILQ_FIRST(
2150 ruleset->rules[rs_num].active.ptr);
2151 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
2152 oldrule = TAILQ_NEXT(oldrule, entries);
2153 if (oldrule == NULL) {
2154 if (newrule != NULL)
2155 pf_rm_rule(NULL, newrule);
2161 if (pcr->action == PF_CHANGE_REMOVE) {
2162 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
2163 ruleset->rules[rs_num].active.rcount--;
2165 if (oldrule == NULL)
2167 ruleset->rules[rs_num].active.ptr,
2169 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
2170 pcr->action == PF_CHANGE_ADD_BEFORE)
2171 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
2174 ruleset->rules[rs_num].active.ptr,
2175 oldrule, newrule, entries);
2176 ruleset->rules[rs_num].active.rcount++;
2180 TAILQ_FOREACH(oldrule,
2181 ruleset->rules[rs_num].active.ptr, entries)
2184 ruleset->rules[rs_num].active.ticket++;
2186 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
2187 pf_remove_if_empty_ruleset(ruleset);
2192 case DIOCCLRSTATES: {
2193 struct pf_state *s, *nexts;
2194 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
2198 for (s = RB_MIN(pf_state_tree_id, &V_tree_id); s; s = nexts) {
2199 nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, s);
2201 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
2202 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
2205 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
2206 s->kif->pfik_name)) {
2208 /* don't send out individual delete messages */
2209 SET(s->state_flags, PFSTATE_NOSYNC);
2215 psk->psk_killed = killed;
2218 if (pfsync_clear_states_ptr != NULL)
2219 pfsync_clear_states_ptr(V_pf_status.hostid, psk->psk_ifname);
2221 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
2227 case DIOCKILLSTATES: {
2228 struct pf_state *s, *nexts;
2229 struct pf_state_key *sk;
2230 struct pf_addr *srcaddr, *dstaddr;
2231 u_int16_t srcport, dstport;
2232 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
2235 if (psk->psk_pfcmp.id) {
2236 if (psk->psk_pfcmp.creatorid == 0)
2238 psk->psk_pfcmp.creatorid = V_pf_status.hostid;
2240 psk->psk_pfcmp.creatorid = pf_status.hostid;
2242 if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
2244 psk->psk_killed = 1;
2250 for (s = RB_MIN(pf_state_tree_id, &V_tree_id); s;
2252 nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, s);
2254 for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
2256 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
2258 sk = s->key[PF_SK_WIRE];
2260 if (s->direction == PF_OUT) {
2261 srcaddr = &sk->addr[1];
2262 dstaddr = &sk->addr[0];
2263 srcport = sk->port[0];
2264 dstport = sk->port[0];
2266 srcaddr = &sk->addr[0];
2267 dstaddr = &sk->addr[1];
2268 srcport = sk->port[0];
2269 dstport = sk->port[0];
2271 if ((!psk->psk_af || sk->af == psk->psk_af)
2272 && (!psk->psk_proto || psk->psk_proto ==
2274 PF_MATCHA(psk->psk_src.neg,
2275 &psk->psk_src.addr.v.a.addr,
2276 &psk->psk_src.addr.v.a.mask,
2278 PF_MATCHA(psk->psk_dst.neg,
2279 &psk->psk_dst.addr.v.a.addr,
2280 &psk->psk_dst.addr.v.a.mask,
2282 (psk->psk_src.port_op == 0 ||
2283 pf_match_port(psk->psk_src.port_op,
2284 psk->psk_src.port[0], psk->psk_src.port[1],
2286 (psk->psk_dst.port_op == 0 ||
2287 pf_match_port(psk->psk_dst.port_op,
2288 psk->psk_dst.port[0], psk->psk_dst.port[1],
2290 (!psk->psk_label[0] || (s->rule.ptr->label[0] &&
2291 !strcmp(psk->psk_label, s->rule.ptr->label))) &&
2292 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
2293 s->kif->pfik_name))) {
2298 psk->psk_killed = killed;
2302 case DIOCADDSTATE: {
2303 struct pfioc_state *ps = (struct pfioc_state *)addr;
2304 struct pfsync_state *sp = &ps->state;
2306 if (sp->timeout >= PFTM_MAX &&
2307 sp->timeout != PFTM_UNTIL_PACKET) {
2312 if (pfsync_state_import_ptr != NULL)
2313 error = pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL);
2315 error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
2320 case DIOCGETSTATE: {
2321 struct pfioc_state *ps = (struct pfioc_state *)addr;
2323 struct pf_state_cmp id_key;
2325 bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
2326 id_key.creatorid = ps->state.creatorid;
2328 s = pf_find_state_byid(&id_key);
2334 pfsync_state_export(&ps->state, s);
2338 case DIOCGETSTATES: {
2339 struct pfioc_states *ps = (struct pfioc_states *)addr;
2340 struct pf_state *state;
2341 struct pfsync_state *p, *pstore;
2344 if (ps->ps_len == 0) {
2346 nr = V_pf_status.states;
2348 nr = pf_status.states;
2350 ps->ps_len = sizeof(struct pfsync_state) * nr;
2357 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2365 state = TAILQ_FIRST(&V_state_list);
2367 state = TAILQ_FIRST(&state_list);
2370 if (state->timeout != PFTM_UNLINKED) {
2371 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
2373 pfsync_state_export(pstore, state);
2375 PF_COPYOUT(pstore, p, sizeof(*p), error);
2377 error = copyout(pstore, p, sizeof(*p));
2380 free(pstore, M_TEMP);
2386 state = TAILQ_NEXT(state, entry_list);
2389 ps->ps_len = sizeof(struct pfsync_state) * nr;
2391 free(pstore, M_TEMP);
2395 case DIOCGETSTATUS: {
2396 struct pf_status *s = (struct pf_status *)addr;
2398 bcopy(&V_pf_status, s, sizeof(struct pf_status));
2400 bcopy(&pf_status, s, sizeof(struct pf_status));
2402 pfi_update_status(s->ifname, s);
2406 case DIOCSETSTATUSIF: {
2407 struct pfioc_if *pi = (struct pfioc_if *)addr;
2409 if (pi->ifname[0] == 0) {
2411 bzero(V_pf_status.ifname, IFNAMSIZ);
2413 bzero(pf_status.ifname, IFNAMSIZ);
2418 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
2420 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
2425 case DIOCCLRSTATUS: {
2427 bzero(V_pf_status.counters, sizeof(V_pf_status.counters));
2428 bzero(V_pf_status.fcounters, sizeof(V_pf_status.fcounters));
2429 bzero(V_pf_status.scounters, sizeof(V_pf_status.scounters));
2430 V_pf_status.since = time_second;
2431 if (*V_pf_status.ifname)
2432 pfi_update_status(V_pf_status.ifname, NULL);
2434 bzero(pf_status.counters, sizeof(pf_status.counters));
2435 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
2436 bzero(pf_status.scounters, sizeof(pf_status.scounters));
2437 pf_status.since = time_second;
2438 if (*pf_status.ifname)
2439 pfi_update_status(pf_status.ifname, NULL);
2445 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
2446 struct pf_state_key *sk;
2447 struct pf_state *state;
2448 struct pf_state_key_cmp key;
2449 int m = 0, direction = pnl->direction;
2452 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
2453 sidx = (direction == PF_IN) ? 1 : 0;
2454 didx = (direction == PF_IN) ? 0 : 1;
2457 PF_AZERO(&pnl->saddr, pnl->af) ||
2458 PF_AZERO(&pnl->daddr, pnl->af) ||
2459 ((pnl->proto == IPPROTO_TCP ||
2460 pnl->proto == IPPROTO_UDP) &&
2461 (!pnl->dport || !pnl->sport)))
2465 key.proto = pnl->proto;
2466 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
2467 key.port[sidx] = pnl->sport;
2468 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
2469 key.port[didx] = pnl->dport;
2471 state = pf_find_state_all(&key, direction, &m);
2474 error = E2BIG; /* more than one state */
2475 else if (state != NULL) {
2476 sk = state->key[sidx];
2477 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
2478 pnl->rsport = sk->port[sidx];
2479 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
2480 pnl->rdport = sk->port[didx];
2487 case DIOCSETTIMEOUT: {
2488 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2491 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
2497 old = V_pf_default_rule.timeout[pt->timeout];
2499 old = pf_default_rule.timeout[pt->timeout];
2501 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
2504 V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
2506 pf_default_rule.timeout[pt->timeout] = pt->seconds;
2508 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
2509 wakeup(pf_purge_thread);
2514 case DIOCGETTIMEOUT: {
2515 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2517 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
2522 pt->seconds = V_pf_default_rule.timeout[pt->timeout];
2524 pt->seconds = pf_default_rule.timeout[pt->timeout];
2529 case DIOCGETLIMIT: {
2530 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2532 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
2537 pl->limit = V_pf_pool_limits[pl->index].limit;
2539 pl->limit = pf_pool_limits[pl->index].limit;
2544 case DIOCSETLIMIT: {
2545 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2548 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
2550 V_pf_pool_limits[pl->index].pp == NULL) {
2552 pf_pool_limits[pl->index].pp == NULL) {
2558 uma_zone_set_max(V_pf_pool_limits[pl->index].pp, pl->limit);
2559 old_limit = V_pf_pool_limits[pl->index].limit;
2560 V_pf_pool_limits[pl->index].limit = pl->limit;
2561 pl->limit = old_limit;
2563 if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
2564 pl->limit, NULL, 0) != 0) {
2568 old_limit = pf_pool_limits[pl->index].limit;
2569 pf_pool_limits[pl->index].limit = pl->limit;
2570 pl->limit = old_limit;
2575 case DIOCSETDEBUG: {
2576 u_int32_t *level = (u_int32_t *)addr;
2579 V_pf_status.debug = *level;
2581 pf_status.debug = *level;
2586 case DIOCCLRRULECTRS: {
2587 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
2588 struct pf_ruleset *ruleset = &pf_main_ruleset;
2589 struct pf_rule *rule;
2592 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
2593 rule->evaluations = 0;
2594 rule->packets[0] = rule->packets[1] = 0;
2595 rule->bytes[0] = rule->bytes[1] = 0;
2601 case DIOCGIFSPEED: {
2602 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr;
2603 struct pf_ifspeed ps;
2606 if (psp->ifname[0] != 0) {
2607 /* Can we completely trust user-land? */
2608 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
2609 ifp = ifunit(ps.ifname);
2611 psp->baudrate = ifp->if_baudrate;
2618 #endif /* __FreeBSD__ */
2621 case DIOCSTARTALTQ: {
2622 struct pf_altq *altq;
2624 /* enable all altq interfaces on active list */
2626 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
2627 if (altq->qname[0] == 0 && (altq->local_flags &
2628 PFALTQ_FLAG_IF_REMOVED) == 0) {
2630 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2631 if (altq->qname[0] == 0) {
2633 error = pf_enable_altq(altq);
2640 V_pf_altq_running = 1;
2642 pf_altq_running = 1;
2644 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2648 case DIOCSTOPALTQ: {
2649 struct pf_altq *altq;
2651 /* disable all altq interfaces on active list */
2653 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
2654 if (altq->qname[0] == 0 && (altq->local_flags &
2655 PFALTQ_FLAG_IF_REMOVED) == 0) {
2657 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2658 if (altq->qname[0] == 0) {
2660 error = pf_disable_altq(altq);
2667 V_pf_altq_running = 0;
2669 pf_altq_running = 0;
2671 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2676 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2677 struct pf_altq *altq, *a;
2680 if (pa->ticket != V_ticket_altqs_inactive) {
2682 if (pa->ticket != ticket_altqs_inactive) {
2688 altq = pool_get(&V_pf_altq_pl, PR_NOWAIT);
2690 altq = pool_get(&pf_altq_pl, PR_WAITOK|PR_LIMITFAIL);
2696 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2698 altq->local_flags = 0;
2702 * if this is for a queue, find the discipline and
2703 * copy the necessary fields
2705 if (altq->qname[0] != 0) {
2706 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2709 pool_put(&V_pf_altq_pl, altq);
2711 pool_put(&pf_altq_pl, altq);
2715 altq->altq_disc = NULL;
2717 TAILQ_FOREACH(a, V_pf_altqs_inactive, entries) {
2719 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2721 if (strncmp(a->ifname, altq->ifname,
2722 IFNAMSIZ) == 0 && a->qname[0] == 0) {
2723 altq->altq_disc = a->altq_disc;
2732 if ((ifp = ifunit(altq->ifname)) == NULL) {
2733 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
2737 error = altq_add(altq);
2744 pool_put(&V_pf_altq_pl, altq);
2746 pool_put(&pf_altq_pl, altq);
2752 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
2754 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2756 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2760 case DIOCGETALTQS: {
2761 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2762 struct pf_altq *altq;
2766 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
2768 pa->ticket = V_ticket_altqs_active;
2770 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2772 pa->ticket = ticket_altqs_active;
2778 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2779 struct pf_altq *altq;
2783 if (pa->ticket != V_ticket_altqs_active) {
2785 if (pa->ticket != ticket_altqs_active) {
2792 altq = TAILQ_FIRST(V_pf_altqs_active);
2794 altq = TAILQ_FIRST(pf_altqs_active);
2796 while ((altq != NULL) && (nr < pa->nr)) {
2797 altq = TAILQ_NEXT(altq, entries);
2804 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2808 case DIOCCHANGEALTQ:
2809 /* CHANGEALTQ not supported yet! */
2813 case DIOCGETQSTATS: {
2814 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2815 struct pf_altq *altq;
2820 if (pq->ticket != V_ticket_altqs_active) {
2822 if (pq->ticket != ticket_altqs_active) {
2827 nbytes = pq->nbytes;
2830 altq = TAILQ_FIRST(V_pf_altqs_active);
2832 altq = TAILQ_FIRST(pf_altqs_active);
2834 while ((altq != NULL) && (nr < pq->nr)) {
2835 altq = TAILQ_NEXT(altq, entries);
2844 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
2850 error = altq_getqstats(altq, pq->buf, &nbytes);
2855 pq->scheduler = altq->scheduler;
2856 pq->nbytes = nbytes;
2862 case DIOCBEGINADDRS: {
2863 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2866 pf_empty_pool(&V_pf_pabuf);
2867 pp->ticket = ++V_ticket_pabuf;
2869 pf_empty_pool(&pf_pabuf);
2870 pp->ticket = ++ticket_pabuf;
2876 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2879 if (pp->ticket != V_ticket_pabuf) {
2881 if (pp->ticket != ticket_pabuf) {
2887 if (pp->af == AF_INET) {
2888 error = EAFNOSUPPORT;
2893 if (pp->af == AF_INET6) {
2894 error = EAFNOSUPPORT;
2898 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2899 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2900 pp->addr.addr.type != PF_ADDR_TABLE) {
2905 pa = pool_get(&V_pf_pooladdr_pl, PR_NOWAIT);
2907 pa = pool_get(&pf_pooladdr_pl, PR_WAITOK|PR_LIMITFAIL);
2913 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2914 if (pa->ifname[0]) {
2915 pa->kif = pfi_kif_get(pa->ifname);
2916 if (pa->kif == NULL) {
2918 pool_put(&V_pf_pooladdr_pl, pa);
2920 pool_put(&pf_pooladdr_pl, pa);
2925 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
2927 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2928 pfi_dynaddr_remove(&pa->addr);
2929 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
2931 pool_put(&V_pf_pooladdr_pl, pa);
2933 pool_put(&pf_pooladdr_pl, pa);
2939 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
2941 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2946 case DIOCGETADDRS: {
2947 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2950 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2951 pp->r_num, 0, 1, 0);
2956 TAILQ_FOREACH(pa, &pool->list, entries)
2962 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2965 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2966 pp->r_num, 0, 1, 1);
2971 pa = TAILQ_FIRST(&pool->list);
2972 while ((pa != NULL) && (nr < pp->nr)) {
2973 pa = TAILQ_NEXT(pa, entries);
2980 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2981 pf_addr_copyout(&pp->addr.addr);
2985 case DIOCCHANGEADDR: {
2986 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2987 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2988 struct pf_ruleset *ruleset;
2990 if (pca->action < PF_CHANGE_ADD_HEAD ||
2991 pca->action > PF_CHANGE_REMOVE) {
2995 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2996 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2997 pca->addr.addr.type != PF_ADDR_TABLE) {
3002 ruleset = pf_find_ruleset(pca->anchor);
3003 if (ruleset == NULL) {
3007 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
3008 pca->r_num, pca->r_last, 1, 1);
3013 if (pca->action != PF_CHANGE_REMOVE) {
3015 newpa = pool_get(&V_pf_pooladdr_pl,
3018 newpa = pool_get(&pf_pooladdr_pl,
3019 PR_WAITOK|PR_LIMITFAIL);
3021 if (newpa == NULL) {
3025 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
3027 if (pca->af == AF_INET) {
3029 pool_put(&V_pf_pooladdr_pl, newpa);
3031 pool_put(&pf_pooladdr_pl, newpa);
3033 error = EAFNOSUPPORT;
3038 if (pca->af == AF_INET6) {
3040 pool_put(&V_pf_pooladdr_pl, newpa);
3042 pool_put(&pf_pooladdr_pl, newpa);
3044 error = EAFNOSUPPORT;
3048 if (newpa->ifname[0]) {
3049 newpa->kif = pfi_kif_get(newpa->ifname);
3050 if (newpa->kif == NULL) {
3052 pool_put(&V_pf_pooladdr_pl, newpa);
3054 pool_put(&pf_pooladdr_pl, newpa);
3059 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
3062 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
3063 pf_tbladdr_setup(ruleset, &newpa->addr)) {
3064 pfi_dynaddr_remove(&newpa->addr);
3065 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
3067 pool_put(&V_pf_pooladdr_pl, newpa);
3069 pool_put(&pf_pooladdr_pl, newpa);
3076 if (pca->action == PF_CHANGE_ADD_HEAD)
3077 oldpa = TAILQ_FIRST(&pool->list);
3078 else if (pca->action == PF_CHANGE_ADD_TAIL)
3079 oldpa = TAILQ_LAST(&pool->list, pf_palist);
3083 oldpa = TAILQ_FIRST(&pool->list);
3084 while ((oldpa != NULL) && (i < pca->nr)) {
3085 oldpa = TAILQ_NEXT(oldpa, entries);
3088 if (oldpa == NULL) {
3094 if (pca->action == PF_CHANGE_REMOVE) {
3095 TAILQ_REMOVE(&pool->list, oldpa, entries);
3096 pfi_dynaddr_remove(&oldpa->addr);
3097 pf_tbladdr_remove(&oldpa->addr);
3098 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
3100 pool_put(&V_pf_pooladdr_pl, oldpa);
3102 pool_put(&pf_pooladdr_pl, oldpa);
3106 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
3107 else if (pca->action == PF_CHANGE_ADD_HEAD ||
3108 pca->action == PF_CHANGE_ADD_BEFORE)
3109 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
3111 TAILQ_INSERT_AFTER(&pool->list, oldpa,
3115 pool->cur = TAILQ_FIRST(&pool->list);
3116 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
3121 case DIOCGETRULESETS: {
3122 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
3123 struct pf_ruleset *ruleset;
3124 struct pf_anchor *anchor;
3126 pr->path[sizeof(pr->path) - 1] = 0;
3127 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
3132 if (ruleset->anchor == NULL) {
3133 /* XXX kludge for pf_main_ruleset */
3135 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors)
3137 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
3139 if (anchor->parent == NULL)
3142 RB_FOREACH(anchor, pf_anchor_node,
3143 &ruleset->anchor->children)
3149 case DIOCGETRULESET: {
3150 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
3151 struct pf_ruleset *ruleset;
3152 struct pf_anchor *anchor;
3155 pr->path[sizeof(pr->path) - 1] = 0;
3156 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
3161 if (ruleset->anchor == NULL) {
3162 /* XXX kludge for pf_main_ruleset */
3164 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors)
3166 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
3168 if (anchor->parent == NULL && nr++ == pr->nr) {
3169 strlcpy(pr->name, anchor->name,
3174 RB_FOREACH(anchor, pf_anchor_node,
3175 &ruleset->anchor->children)
3176 if (nr++ == pr->nr) {
3177 strlcpy(pr->name, anchor->name,
3187 case DIOCRCLRTABLES: {
3188 struct pfioc_table *io = (struct pfioc_table *)addr;
3190 if (io->pfrio_esize != 0) {
3194 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
3195 io->pfrio_flags | PFR_FLAG_USERIOCTL);
3199 case DIOCRADDTABLES: {
3200 struct pfioc_table *io = (struct pfioc_table *)addr;
3202 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3206 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
3207 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3211 case DIOCRDELTABLES: {
3212 struct pfioc_table *io = (struct pfioc_table *)addr;
3214 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3218 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
3219 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3223 case DIOCRGETTABLES: {
3224 struct pfioc_table *io = (struct pfioc_table *)addr;
3226 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3230 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
3231 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3235 case DIOCRGETTSTATS: {
3236 struct pfioc_table *io = (struct pfioc_table *)addr;
3238 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
3242 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
3243 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3247 case DIOCRCLRTSTATS: {
3248 struct pfioc_table *io = (struct pfioc_table *)addr;
3250 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3254 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
3255 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3259 case DIOCRSETTFLAGS: {
3260 struct pfioc_table *io = (struct pfioc_table *)addr;
3262 if (io->pfrio_esize != sizeof(struct pfr_table)) {
3266 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
3267 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
3268 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3272 case DIOCRCLRADDRS: {
3273 struct pfioc_table *io = (struct pfioc_table *)addr;
3275 if (io->pfrio_esize != 0) {
3279 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
3280 io->pfrio_flags | PFR_FLAG_USERIOCTL);
3284 case DIOCRADDADDRS: {
3285 struct pfioc_table *io = (struct pfioc_table *)addr;
3287 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3291 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
3292 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
3293 PFR_FLAG_USERIOCTL);
3297 case DIOCRDELADDRS: {
3298 struct pfioc_table *io = (struct pfioc_table *)addr;
3300 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3304 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
3305 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
3306 PFR_FLAG_USERIOCTL);
3310 case DIOCRSETADDRS: {
3311 struct pfioc_table *io = (struct pfioc_table *)addr;
3313 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3317 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
3318 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
3319 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
3320 PFR_FLAG_USERIOCTL, 0);
3324 case DIOCRGETADDRS: {
3325 struct pfioc_table *io = (struct pfioc_table *)addr;
3327 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3331 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
3332 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3336 case DIOCRGETASTATS: {
3337 struct pfioc_table *io = (struct pfioc_table *)addr;
3339 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
3343 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
3344 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3348 case DIOCRCLRASTATS: {
3349 struct pfioc_table *io = (struct pfioc_table *)addr;
3351 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3355 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
3356 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
3357 PFR_FLAG_USERIOCTL);
3361 case DIOCRTSTADDRS: {
3362 struct pfioc_table *io = (struct pfioc_table *)addr;
3364 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3368 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
3369 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
3370 PFR_FLAG_USERIOCTL);
3374 case DIOCRINADEFINE: {
3375 struct pfioc_table *io = (struct pfioc_table *)addr;
3377 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
3381 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
3382 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
3383 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
3388 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
3389 error = pf_osfp_add(io);
3394 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
3395 error = pf_osfp_get(io);
3400 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3401 struct pfioc_trans_e *ioe;
3402 struct pfr_table *table;
3405 if (io->esize != sizeof(*ioe)) {
3412 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
3413 table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
3417 for (i = 0; i < io->size; i++) {
3419 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error);
3422 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
3424 free(table, M_TEMP);
3429 switch (ioe->rs_num) {
3431 case PF_RULESET_ALTQ:
3432 if (ioe->anchor[0]) {
3433 free(table, M_TEMP);
3438 if ((error = pf_begin_altq(&ioe->ticket))) {
3439 free(table, M_TEMP);
3445 case PF_RULESET_TABLE:
3446 bzero(table, sizeof(*table));
3447 strlcpy(table->pfrt_anchor, ioe->anchor,
3448 sizeof(table->pfrt_anchor));
3449 if ((error = pfr_ina_begin(table,
3450 &ioe->ticket, NULL, 0))) {
3451 free(table, M_TEMP);
3457 if ((error = pf_begin_rules(&ioe->ticket,
3458 ioe->rs_num, ioe->anchor))) {
3459 free(table, M_TEMP);
3466 PF_COPYOUT(ioe, io->array+i, sizeof(io->array[i]),
3470 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
3472 free(table, M_TEMP);
3478 free(table, M_TEMP);
3483 case DIOCXROLLBACK: {
3484 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3485 struct pfioc_trans_e *ioe;
3486 struct pfr_table *table;
3489 if (io->esize != sizeof(*ioe)) {
3496 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
3497 table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
3501 for (i = 0; i < io->size; i++) {
3503 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error);
3506 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
3508 free(table, M_TEMP);
3513 switch (ioe->rs_num) {
3515 case PF_RULESET_ALTQ:
3516 if (ioe->anchor[0]) {
3517 free(table, M_TEMP);
3522 if ((error = pf_rollback_altq(ioe->ticket))) {
3523 free(table, M_TEMP);
3525 goto fail; /* really bad */
3529 case PF_RULESET_TABLE:
3530 bzero(table, sizeof(*table));
3531 strlcpy(table->pfrt_anchor, ioe->anchor,
3532 sizeof(table->pfrt_anchor));
3533 if ((error = pfr_ina_rollback(table,
3534 ioe->ticket, NULL, 0))) {
3535 free(table, M_TEMP);
3537 goto fail; /* really bad */
3541 if ((error = pf_rollback_rules(ioe->ticket,
3542 ioe->rs_num, ioe->anchor))) {
3543 free(table, M_TEMP);
3545 goto fail; /* really bad */
3550 free(table, M_TEMP);
3556 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3557 struct pfioc_trans_e *ioe;
3558 struct pfr_table *table;
3559 struct pf_ruleset *rs;
3562 if (io->esize != sizeof(*ioe)) {
3569 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
3570 table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
3574 /* first makes sure everything will succeed */
3575 for (i = 0; i < io->size; i++) {
3577 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error);
3580 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
3582 free(table, M_TEMP);
3587 switch (ioe->rs_num) {
3589 case PF_RULESET_ALTQ:
3590 if (ioe->anchor[0]) {
3591 free(table, M_TEMP);
3597 if (!V_altqs_inactive_open || ioe->ticket !=
3598 V_ticket_altqs_inactive) {
3600 if (!altqs_inactive_open || ioe->ticket !=
3601 ticket_altqs_inactive) {
3603 free(table, M_TEMP);
3610 case PF_RULESET_TABLE:
3611 rs = pf_find_ruleset(ioe->anchor);
3612 if (rs == NULL || !rs->topen || ioe->ticket !=
3614 free(table, M_TEMP);
3621 if (ioe->rs_num < 0 || ioe->rs_num >=
3623 free(table, M_TEMP);
3628 rs = pf_find_ruleset(ioe->anchor);
3630 !rs->rules[ioe->rs_num].inactive.open ||
3631 rs->rules[ioe->rs_num].inactive.ticket !=
3633 free(table, M_TEMP);
3641 /* now do the commit - no errors should happen here */
3642 for (i = 0; i < io->size; i++) {
3644 PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error);
3647 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
3649 free(table, M_TEMP);
3654 switch (ioe->rs_num) {
3656 case PF_RULESET_ALTQ:
3657 if ((error = pf_commit_altq(ioe->ticket))) {
3658 free(table, M_TEMP);
3660 goto fail; /* really bad */
3664 case PF_RULESET_TABLE:
3665 bzero(table, sizeof(*table));
3666 strlcpy(table->pfrt_anchor, ioe->anchor,
3667 sizeof(table->pfrt_anchor));
3668 if ((error = pfr_ina_commit(table, ioe->ticket,
3670 free(table, M_TEMP);
3672 goto fail; /* really bad */
3676 if ((error = pf_commit_rules(ioe->ticket,
3677 ioe->rs_num, ioe->anchor))) {
3678 free(table, M_TEMP);
3680 goto fail; /* really bad */
3685 free(table, M_TEMP);
3690 case DIOCGETSRCNODES: {
3691 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
3692 struct pf_src_node *n, *p, *pstore;
3694 int space = psn->psn_len;
3698 RB_FOREACH(n, pf_src_tree, &V_tree_src_tracking)
3700 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
3703 psn->psn_len = sizeof(struct pf_src_node) * nr;
3710 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
3714 p = psn->psn_src_nodes;
3716 RB_FOREACH(n, pf_src_tree, &V_tree_src_tracking) {
3718 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3720 int secs = time_second, diff;
3722 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
3725 bcopy(n, pstore, sizeof(*pstore));
3726 if (n->rule.ptr != NULL)
3727 pstore->rule.nr = n->rule.ptr->nr;
3728 pstore->creation = secs - pstore->creation;
3729 if (pstore->expire > secs)
3730 pstore->expire -= secs;
3734 /* adjust the connection rate estimate */
3735 diff = secs - n->conn_rate.last;
3736 if (diff >= n->conn_rate.seconds)
3737 pstore->conn_rate.count = 0;
3739 pstore->conn_rate.count -=
3740 n->conn_rate.count * diff /
3741 n->conn_rate.seconds;
3744 PF_COPYOUT(pstore, p, sizeof(*p), error);
3746 error = copyout(pstore, p, sizeof(*p));
3749 free(pstore, M_TEMP);
3755 psn->psn_len = sizeof(struct pf_src_node) * nr;
3757 free(pstore, M_TEMP);
3761 case DIOCCLRSRCNODES: {
3762 struct pf_src_node *n;
3763 struct pf_state *state;
3766 RB_FOREACH(state, pf_state_tree_id, &V_tree_id) {
3768 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
3770 state->src_node = NULL;
3771 state->nat_src_node = NULL;
3774 RB_FOREACH(n, pf_src_tree, &V_tree_src_tracking) {
3776 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3781 pf_purge_expired_src_nodes(1);
3783 V_pf_status.src_nodes = 0;
3785 pf_status.src_nodes = 0;
3790 case DIOCKILLSRCNODES: {
3791 struct pf_src_node *sn;
3793 struct pfioc_src_node_kill *psnk =
3794 (struct pfioc_src_node_kill *)addr;
3798 RB_FOREACH(sn, pf_src_tree, &V_tree_src_tracking) {
3800 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
3802 if (PF_MATCHA(psnk->psnk_src.neg,
3803 &psnk->psnk_src.addr.v.a.addr,
3804 &psnk->psnk_src.addr.v.a.mask,
3805 &sn->addr, sn->af) &&
3806 PF_MATCHA(psnk->psnk_dst.neg,
3807 &psnk->psnk_dst.addr.v.a.addr,
3808 &psnk->psnk_dst.addr.v.a.mask,
3809 &sn->raddr, sn->af)) {
3810 /* Handle state to src_node linkage */
3811 if (sn->states != 0) {
3812 RB_FOREACH(s, pf_state_tree_id,
3818 if (s->src_node == sn)
3820 if (s->nat_src_node == sn)
3821 s->nat_src_node = NULL;
3831 pf_purge_expired_src_nodes(1);
3833 psnk->psnk_killed = killed;
3837 case DIOCSETHOSTID: {
3838 u_int32_t *hostid = (u_int32_t *)addr;
3842 V_pf_status.hostid = arc4random();
3844 V_pf_status.hostid = *hostid;
3847 pf_status.hostid = arc4random();
3849 pf_status.hostid = *hostid;
3858 case DIOCIGETIFACES: {
3859 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3861 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
3865 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
3870 case DIOCSETIFFLAG: {
3871 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3873 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
3877 case DIOCCLRIFFLAG: {
3878 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3880 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
3893 sx_xunlock(&V_pf_consistency_lock);
3895 sx_sunlock(&V_pf_consistency_lock);
3899 rw_exit_write(&pf_consistency_lock);
3901 rw_exit_read(&pf_consistency_lock);
3911 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
3913 bzero(sp, sizeof(struct pfsync_state));
3915 /* copy from state key */
3916 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
3917 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
3918 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
3919 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
3920 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
3921 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
3922 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
3923 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
3924 sp->proto = st->key[PF_SK_WIRE]->proto;
3925 sp->af = st->key[PF_SK_WIRE]->af;
3927 /* copy from state */
3928 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
3929 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
3930 sp->creation = htonl(time_second - st->creation);
3931 sp->expire = pf_state_expires(st);
3932 if (sp->expire <= time_second)
3933 sp->expire = htonl(0);
3935 sp->expire = htonl(sp->expire - time_second);
3937 sp->direction = st->direction;
3939 sp->timeout = st->timeout;
3940 sp->state_flags = st->state_flags;
3942 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
3943 if (st->nat_src_node)
3944 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
3946 bcopy(&st->id, &sp->id, sizeof(sp->id));
3947 sp->creatorid = st->creatorid;
3948 pf_state_peer_hton(&st->src, &sp->src);
3949 pf_state_peer_hton(&st->dst, &sp->dst);
3951 if (st->rule.ptr == NULL)
3952 sp->rule = htonl(-1);
3954 sp->rule = htonl(st->rule.ptr->nr);
3955 if (st->anchor.ptr == NULL)
3956 sp->anchor = htonl(-1);
3958 sp->anchor = htonl(st->anchor.ptr->nr);
3959 if (st->nat_rule.ptr == NULL)
3960 sp->nat_rule = htonl(-1);
3962 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
3964 pf_state_counter_hton(st->packets[0], sp->packets[0]);
3965 pf_state_counter_hton(st->packets[1], sp->packets[1]);
3966 pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
3967 pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
3972 * XXX - Check for version missmatch!!!
3975 pf_clear_states(void)
3977 struct pf_state *state;
3980 RB_FOREACH(state, pf_state_tree_id, &V_tree_id) {
3982 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
3984 state->timeout = PFTM_PURGE;
3986 /* don't send out individual delete messages */
3987 state->sync_state = PFSTATE_NOSYNC;
3989 pf_unlink_state(state);
3994 * XXX This is called on module unload, we do not want to sync that over? */
3996 pfsync_clear_states(V_pf_status.hostid, psk->psk_ifname);
4001 pf_clear_tables(void)
4003 struct pfioc_table io;
4006 bzero(&io, sizeof(io));
4008 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
4015 pf_clear_srcnodes(void)
4017 struct pf_src_node *n;
4018 struct pf_state *state;
4021 RB_FOREACH(state, pf_state_tree_id, &V_tree_id) {
4023 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
4025 state->src_node = NULL;
4026 state->nat_src_node = NULL;
4029 RB_FOREACH(n, pf_src_tree, &V_tree_src_tracking) {
4031 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
4038 * XXX - Check for version missmatch!!!
4042 * Duplicate pfctl -Fa operation to get rid of as much as we can.
4051 V_pf_status.running = 0;
4053 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
4055 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
4058 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
4060 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
4061 break; /* XXX: rollback? */
4063 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
4065 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
4066 break; /* XXX: rollback? */
4068 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
4070 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
4071 break; /* XXX: rollback? */
4073 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
4075 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
4076 break; /* XXX: rollback? */
4079 /* XXX: these should always succeed here */
4080 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
4081 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
4082 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
4083 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
4084 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
4086 if ((error = pf_clear_tables()) != 0)
4090 if ((error = pf_begin_altq(&t[0])) != 0) {
4091 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
4094 pf_commit_altq(t[0]);
4099 pf_clear_srcnodes();
4101 /* status does not use malloced mem so no need to cleanup */
4102 /* fingerprints and interfaces have thier own cleanup code */
4110 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
4114 * XXX Wed Jul 9 22:03:16 2003 UTC
4115 * OpenBSD has changed its byte ordering convention on ip_len/ip_off
4116 * in network stack. OpenBSD's network stack have converted
4117 * ip_len/ip_off to host byte order frist as FreeBSD.
4118 * Now this is not true anymore , so we should convert back to network
4121 struct ip *h = NULL;
4124 if ((*m)->m_pkthdr.len >= (int)sizeof(struct ip)) {
4125 /* if m_pkthdr.len is less than ip header, pf will handle. */
4126 h = mtod(*m, struct ip *);
4130 CURVNET_SET(ifp->if_vnet);
4131 chk = pf_test(PF_IN, ifp, m, NULL, inp);
4138 /* pf_test can change ip header location */
4139 h = mtod(*m, struct ip *);
4147 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
4151 * XXX Wed Jul 9 22:03:16 2003 UTC
4152 * OpenBSD has changed its byte ordering convention on ip_len/ip_off
4153 * in network stack. OpenBSD's network stack have converted
4154 * ip_len/ip_off to host byte order frist as FreeBSD.
4155 * Now this is not true anymore , so we should convert back to network
4158 struct ip *h = NULL;
4161 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
4162 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
4163 in_delayed_cksum(*m);
4164 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
4166 if ((*m)->m_pkthdr.len >= (int)sizeof(*h)) {
4167 /* if m_pkthdr.len is less than ip header, pf will handle. */
4168 h = mtod(*m, struct ip *);
4172 CURVNET_SET(ifp->if_vnet);
4173 chk = pf_test(PF_OUT, ifp, m, NULL, inp);
4180 /* pf_test can change ip header location */
4181 h = mtod(*m, struct ip *);
4191 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
4196 * IPv6 is not affected by ip_len/ip_off byte order changes.
4201 * In case of loopback traffic IPv6 uses the real interface in
4202 * order to support scoped addresses. In order to support stateful
4203 * filtering we have change this to lo0 as it is the case in IPv4.
4205 CURVNET_SET(ifp->if_vnet);
4206 chk = pf_test6(PF_IN, (*m)->m_flags & M_LOOP ? V_loif : ifp, m,
4217 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
4221 * IPv6 does not affected ip_len/ip_off byte order changes.
4225 /* We need a proper CSUM before we start (s. OpenBSD ip_output) */
4226 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
4228 /* XXX-BZ copy&paste error from r126261? */
4229 in_delayed_cksum(*m);
4231 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
4233 CURVNET_SET(ifp->if_vnet);
4234 chk = pf_test6(PF_OUT, ifp, m, NULL, inp);
4248 struct pfil_head *pfh_inet;
4251 struct pfil_head *pfh_inet6;
4256 if (V_pf_pfil_hooked)
4260 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
4261 if (pfh_inet == NULL)
4262 return (ESRCH); /* XXX */
4263 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet);
4264 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet);
4267 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
4268 if (pfh_inet6 == NULL) {
4270 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
4272 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
4275 return (ESRCH); /* XXX */
4277 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6);
4278 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6);
4281 V_pf_pfil_hooked = 1;
4289 struct pfil_head *pfh_inet;
4292 struct pfil_head *pfh_inet6;
4297 if (V_pf_pfil_hooked == 0)
4301 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
4302 if (pfh_inet == NULL)
4303 return (ESRCH); /* XXX */
4304 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
4306 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
4310 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
4311 if (pfh_inet6 == NULL)
4312 return (ESRCH); /* XXX */
4313 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK,
4315 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK,
4319 V_pf_pfil_hooked = 0;
4326 VNET_ITERATOR_DECL(vnet_iter);
4329 VNET_FOREACH(vnet_iter) {
4330 CURVNET_SET(vnet_iter);
4331 V_pf_pfil_hooked = 0;
4332 V_pf_end_threads = 0;
4333 V_debug_pfugidhack = 0;
4334 TAILQ_INIT(&V_pf_tags);
4335 TAILQ_INIT(&V_pf_qids);
4338 VNET_LIST_RUNLOCK();
4341 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
4343 sx_init(&V_pf_consistency_lock, "pf_statetbl_lock");
4356 V_pf_status.running = 0;
4358 m_addr_chg_pf_p = NULL;
4359 error = dehook_pf();
4362 * Should not happen!
4363 * XXX Due to error code ESRCH, kldunload will show
4364 * a message like 'No such process'.
4366 printf("%s : pfil unregisteration fail\n", __FUNCTION__);
4371 V_pf_end_threads = 1;
4372 while (V_pf_end_threads < 2) {
4373 wakeup_one(pf_purge_thread);
4374 msleep(pf_purge_thread, &pf_task_mtx, 0, "pftmo", hz);
4381 destroy_dev(pf_dev);
4383 sx_destroy(&V_pf_consistency_lock);
4388 pf_modevent(module_t mod, int type, void *data)
4398 * Module should not be unloaded due to race conditions.
4403 error = pf_unload();
4412 static moduledata_t pf_mod = {
4418 DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST);
4419 MODULE_VERSION(pf, PF_MODVER);
4420 #endif /* __FreeBSD__ */