1 /* $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $ */
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002 - 2008 Henning Brauer
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * - Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * - Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
32 * Effort sponsored in part by the Defense Advanced Research Projects
33 * Agency (DARPA) and Air Force Research Laboratory, Air Force
34 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
40 #include "opt_inet6.h"
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
53 #define NPFLOW DEV_PFLOW
65 #include <sys/param.h>
66 #include <sys/systm.h>
68 #include <sys/filio.h>
69 #include <sys/socket.h>
70 #include <sys/socketvar.h>
71 #include <sys/kernel.h>
74 #include <sys/random.h>
75 #include <sys/sysctl.h>
76 #include <sys/endian.h>
77 #define betoh64 be64toh
83 #include <sys/kthread.h>
87 #include <sys/rwlock.h>
93 #include <crypto/md5.h>
97 #include <net/if_types.h>
99 #include <net/route.h>
102 #include <net/radix_mpath.h>
105 #include <net/radix_mpath.h>
108 #include <netinet/in.h>
109 #include <netinet/in_var.h>
110 #include <netinet/in_systm.h>
111 #include <netinet/ip.h>
112 #include <netinet/ip_var.h>
113 #include <netinet/tcp.h>
114 #include <netinet/tcp_seq.h>
115 #include <netinet/udp.h>
116 #include <netinet/ip_icmp.h>
117 #include <netinet/in_pcb.h>
118 #include <netinet/tcp_timer.h>
119 #include <netinet/tcp_var.h>
120 #include <netinet/udp_var.h>
121 #include <netinet/icmp_var.h>
122 #include <netinet/if_ether.h>
124 #include <netinet/ip_fw.h>
125 #include <netpfil/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */
129 #include <dev/rndvar.h>
131 #include <net/pfvar.h>
132 #include <net/if_pflog.h>
133 #include <net/if_pflow.h>
134 #include <net/if_pfsync.h>
137 #include <netinet/ip6.h>
138 #include <netinet/in_pcb.h>
139 #include <netinet/icmp6.h>
140 #include <netinet6/nd6.h>
142 #include <netinet6/ip6_var.h>
143 #include <netinet6/in6_pcb.h>
148 #include <machine/in_cksum.h>
149 #include <sys/limits.h>
150 #include <sys/ucred.h>
151 #include <security/mac/mac_framework.h>
153 extern int ip_optcopy(struct ip *, struct ip *);
157 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
159 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
168 VNET_DEFINE(struct pf_state_tree, pf_statetbl);
170 VNET_DEFINE(struct pf_altqqueue, pf_altqs[2]);
171 VNET_DEFINE(struct pf_palist, pf_pabuf);
172 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active);
173 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive);
174 VNET_DEFINE(struct pf_status, pf_status);
176 VNET_DEFINE(u_int32_t, ticket_altqs_active);
177 VNET_DEFINE(u_int32_t, ticket_altqs_inactive);
178 VNET_DEFINE(int, altqs_inactive_open);
179 VNET_DEFINE(u_int32_t, ticket_pabuf);
181 VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx);
182 #define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx)
183 VNET_DEFINE(u_char, pf_tcp_secret[16]);
184 #define V_pf_tcp_secret VNET(pf_tcp_secret)
185 VNET_DEFINE(int, pf_tcp_secret_init);
186 #define V_pf_tcp_secret_init VNET(pf_tcp_secret_init)
187 VNET_DEFINE(int, pf_tcp_iss_off);
188 #define V_pf_tcp_iss_off VNET(pf_tcp_iss_off)
190 struct pf_anchor_stackframe {
191 struct pf_ruleset *rs;
193 struct pf_anchor_node *parent;
194 struct pf_anchor *child;
196 VNET_DEFINE(struct pf_anchor_stackframe, pf_anchor_stack[64]);
197 #define V_pf_anchor_stack VNET(pf_anchor_stack)
199 VNET_DEFINE(uma_zone_t, pf_src_tree_pl);
200 VNET_DEFINE(uma_zone_t, pf_rule_pl);
201 VNET_DEFINE(uma_zone_t, pf_pooladdr_pl);
202 VNET_DEFINE(uma_zone_t, pf_state_pl);
203 VNET_DEFINE(uma_zone_t, pf_state_key_pl);
204 VNET_DEFINE(uma_zone_t, pf_state_item_pl);
205 VNET_DEFINE(uma_zone_t, pf_altq_pl);
207 struct pf_state_tree pf_statetbl;
209 struct pf_altqqueue pf_altqs[2];
210 struct pf_palist pf_pabuf;
211 struct pf_altqqueue *pf_altqs_active;
212 struct pf_altqqueue *pf_altqs_inactive;
213 struct pf_status pf_status;
215 u_int32_t ticket_altqs_active;
216 u_int32_t ticket_altqs_inactive;
217 int altqs_inactive_open;
218 u_int32_t ticket_pabuf;
220 MD5_CTX pf_tcp_secret_ctx;
221 u_char pf_tcp_secret[16];
222 int pf_tcp_secret_init;
225 struct pf_anchor_stackframe {
226 struct pf_ruleset *rs;
228 struct pf_anchor_node *parent;
229 struct pf_anchor *child;
230 } pf_anchor_stack[64];
232 struct pool pf_src_tree_pl, pf_rule_pl, pf_pooladdr_pl;
233 struct pool pf_state_pl, pf_state_key_pl, pf_state_item_pl;
234 struct pool pf_altq_pl;
237 void pf_init_threshold(struct pf_threshold *, u_int32_t,
239 void pf_add_threshold(struct pf_threshold *);
240 int pf_check_threshold(struct pf_threshold *);
242 void pf_change_ap(struct mbuf *, struct pf_addr *, u_int16_t *,
243 u_int16_t *, u_int16_t *, struct pf_addr *,
244 u_int16_t, u_int8_t, sa_family_t);
245 int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
246 struct tcphdr *, struct pf_state_peer *);
248 void pf_change_a6(struct pf_addr *, u_int16_t *,
249 struct pf_addr *, u_int8_t);
251 void pf_change_icmp(struct pf_addr *, u_int16_t *,
252 struct pf_addr *, struct pf_addr *, u_int16_t,
253 u_int16_t *, u_int16_t *, u_int16_t *,
254 u_int16_t *, u_int8_t, sa_family_t);
256 void pf_send_tcp(struct mbuf *,
257 const struct pf_rule *, sa_family_t,
259 void pf_send_tcp(const struct pf_rule *, sa_family_t,
261 const struct pf_addr *, const struct pf_addr *,
262 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
263 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
264 u_int16_t, struct ether_header *, struct ifnet *);
265 static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
266 sa_family_t, struct pf_rule *);
267 void pf_detach_state(struct pf_state *);
268 void pf_state_key_detach(struct pf_state *, int);
269 u_int32_t pf_tcp_iss(struct pf_pdesc *);
270 int pf_test_rule(struct pf_rule **, struct pf_state **,
271 int, struct pfi_kif *, struct mbuf *, int,
272 void *, struct pf_pdesc *, struct pf_rule **,
274 struct pf_ruleset **, struct ifqueue *,
277 struct pf_ruleset **, struct ifqueue *);
279 static __inline int pf_create_state(struct pf_rule *, struct pf_rule *,
280 struct pf_rule *, struct pf_pdesc *,
281 struct pf_src_node *, struct pf_state_key *,
282 struct pf_state_key *, struct pf_state_key *,
283 struct pf_state_key *, struct mbuf *, int,
284 u_int16_t, u_int16_t, int *, struct pfi_kif *,
285 struct pf_state **, int, u_int16_t, u_int16_t,
287 int pf_test_fragment(struct pf_rule **, int,
288 struct pfi_kif *, struct mbuf *, void *,
289 struct pf_pdesc *, struct pf_rule **,
290 struct pf_ruleset **);
291 int pf_tcp_track_full(struct pf_state_peer *,
292 struct pf_state_peer *, struct pf_state **,
293 struct pfi_kif *, struct mbuf *, int,
294 struct pf_pdesc *, u_short *, int *);
295 int pf_tcp_track_sloppy(struct pf_state_peer *,
296 struct pf_state_peer *, struct pf_state **,
297 struct pf_pdesc *, u_short *);
298 int pf_test_state_tcp(struct pf_state **, int,
299 struct pfi_kif *, struct mbuf *, int,
300 void *, struct pf_pdesc *, u_short *);
301 int pf_test_state_udp(struct pf_state **, int,
302 struct pfi_kif *, struct mbuf *, int,
303 void *, struct pf_pdesc *);
304 int pf_test_state_icmp(struct pf_state **, int,
305 struct pfi_kif *, struct mbuf *, int,
306 void *, struct pf_pdesc *, u_short *);
307 int pf_test_state_other(struct pf_state **, int,
308 struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
309 void pf_route(struct mbuf **, struct pf_rule *, int,
310 struct ifnet *, struct pf_state *,
312 void pf_route6(struct mbuf **, struct pf_rule *, int,
313 struct ifnet *, struct pf_state *,
316 int pf_socket_lookup(int, struct pf_pdesc *);
318 u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
320 u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
322 u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
324 void pf_set_rt_ifp(struct pf_state *,
326 int pf_check_proto_cksum(struct mbuf *, int, int,
327 u_int8_t, sa_family_t);
329 struct pf_divert *pf_get_divert(struct mbuf *);
331 void pf_print_state_parts(struct pf_state *,
332 struct pf_state_key *, struct pf_state_key *);
333 int pf_addr_wrap_neq(struct pf_addr_wrap *,
334 struct pf_addr_wrap *);
335 int pf_compare_state_keys(struct pf_state_key *,
336 struct pf_state_key *, struct pfi_kif *, u_int);
338 struct pf_state *pf_find_state(struct pfi_kif *,
339 struct pf_state_key_cmp *, u_int, struct mbuf *,
342 struct pf_state *pf_find_state(struct pfi_kif *,
343 struct pf_state_key_cmp *, u_int, struct mbuf *);
345 int pf_src_connlimit(struct pf_state **);
346 int pf_check_congestion(struct ifqueue *);
349 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
351 VNET_DECLARE(int, pf_end_threads);
353 VNET_DEFINE(struct pf_pool_limit, pf_pool_limits[PF_LIMIT_MAX]);
355 extern struct pool pfr_ktable_pl;
356 extern struct pool pfr_kentry_pl;
358 struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = {
359 { &pf_state_pl, PFSTATE_HIWAT },
360 { &pf_src_tree_pl, PFSNODE_HIWAT },
361 { &pf_frent_pl, PFFRAG_FRENT_HIWAT },
362 { &pfr_ktable_pl, PFR_KTABLE_HIWAT },
363 { &pfr_kentry_pl, PFR_KENTRY_HIWAT }
368 #define PPACKET_LOOPED() \
369 (pd->pf_mtag->flags & PF_PACKET_LOOPED)
371 #define PACKET_LOOPED() \
372 (pd.pf_mtag->flags & PF_PACKET_LOOPED)
374 #define STATE_LOOKUP(i, k, d, s, m, pt) \
376 s = pf_find_state(i, k, d, m, pt); \
377 if (s == NULL || (s)->timeout == PFTM_PURGE) \
379 if (PPACKET_LOOPED()) \
382 (((s)->rule.ptr->rt == PF_ROUTETO && \
383 (s)->rule.ptr->direction == PF_OUT) || \
384 ((s)->rule.ptr->rt == PF_REPLYTO && \
385 (s)->rule.ptr->direction == PF_IN)) && \
386 (s)->rt_kif != NULL && \
391 #define STATE_LOOKUP(i, k, d, s, m) \
393 s = pf_find_state(i, k, d, m); \
394 if (s == NULL || (s)->timeout == PFTM_PURGE) \
397 (((s)->rule.ptr->rt == PF_ROUTETO && \
398 (s)->rule.ptr->direction == PF_OUT) || \
399 ((s)->rule.ptr->rt == PF_REPLYTO && \
400 (s)->rule.ptr->direction == PF_IN)) && \
401 (s)->rt_kif != NULL && \
408 #define BOUND_IFACE(r, k) \
409 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
411 #define BOUND_IFACE(r, k) \
412 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
415 #define STATE_INC_COUNTERS(s) \
417 s->rule.ptr->states_cur++; \
418 s->rule.ptr->states_tot++; \
419 if (s->anchor.ptr != NULL) { \
420 s->anchor.ptr->states_cur++; \
421 s->anchor.ptr->states_tot++; \
423 if (s->nat_rule.ptr != NULL) { \
424 s->nat_rule.ptr->states_cur++; \
425 s->nat_rule.ptr->states_tot++; \
429 #define STATE_DEC_COUNTERS(s) \
431 if (s->nat_rule.ptr != NULL) \
432 s->nat_rule.ptr->states_cur--; \
433 if (s->anchor.ptr != NULL) \
434 s->anchor.ptr->states_cur--; \
435 s->rule.ptr->states_cur--; \
438 static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *);
439 static __inline int pf_state_compare_key(struct pf_state_key *,
440 struct pf_state_key *);
441 static __inline int pf_state_compare_id(struct pf_state *,
445 VNET_DEFINE(struct pf_src_tree, tree_src_tracking);
447 VNET_DEFINE(struct pf_state_tree_id, tree_id);
448 VNET_DEFINE(struct pf_state_queue, state_list);
450 struct pf_src_tree tree_src_tracking;
452 struct pf_state_tree_id tree_id;
453 struct pf_state_queue state_list;
456 RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare);
457 RB_GENERATE(pf_state_tree, pf_state_key, entry, pf_state_compare_key);
458 RB_GENERATE(pf_state_tree_id, pf_state,
459 entry_id, pf_state_compare_id);
462 pf_src_compare(struct pf_src_node *a, struct pf_src_node *b)
466 if (a->rule.ptr > b->rule.ptr)
468 if (a->rule.ptr < b->rule.ptr)
470 if ((diff = a->af - b->af) != 0)
475 if (a->addr.addr32[0] > b->addr.addr32[0])
477 if (a->addr.addr32[0] < b->addr.addr32[0])
483 if (a->addr.addr32[3] > b->addr.addr32[3])
485 if (a->addr.addr32[3] < b->addr.addr32[3])
487 if (a->addr.addr32[2] > b->addr.addr32[2])
489 if (a->addr.addr32[2] < b->addr.addr32[2])
491 if (a->addr.addr32[1] > b->addr.addr32[1])
493 if (a->addr.addr32[1] < b->addr.addr32[1])
495 if (a->addr.addr32[0] > b->addr.addr32[0])
497 if (a->addr.addr32[0] < b->addr.addr32[0])
507 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
512 dst->addr32[0] = src->addr32[0];
516 dst->addr32[0] = src->addr32[0];
517 dst->addr32[1] = src->addr32[1];
518 dst->addr32[2] = src->addr32[2];
519 dst->addr32[3] = src->addr32[3];
526 pf_init_threshold(struct pf_threshold *threshold,
527 u_int32_t limit, u_int32_t seconds)
529 threshold->limit = limit * PF_THRESHOLD_MULT;
530 threshold->seconds = seconds;
531 threshold->count = 0;
532 threshold->last = time_second;
536 pf_add_threshold(struct pf_threshold *threshold)
538 u_int32_t t = time_second, diff = t - threshold->last;
540 if (diff >= threshold->seconds)
541 threshold->count = 0;
543 threshold->count -= threshold->count * diff /
545 threshold->count += PF_THRESHOLD_MULT;
550 pf_check_threshold(struct pf_threshold *threshold)
552 return (threshold->count > threshold->limit);
556 pf_src_connlimit(struct pf_state **state)
560 (*state)->src_node->conn++;
561 (*state)->src.tcp_est = 1;
562 pf_add_threshold(&(*state)->src_node->conn_rate);
564 if ((*state)->rule.ptr->max_src_conn &&
565 (*state)->rule.ptr->max_src_conn <
566 (*state)->src_node->conn) {
568 V_pf_status.lcounters[LCNT_SRCCONN]++;
570 pf_status.lcounters[LCNT_SRCCONN]++;
575 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
576 pf_check_threshold(&(*state)->src_node->conn_rate)) {
578 V_pf_status.lcounters[LCNT_SRCCONNRATE]++;
580 pf_status.lcounters[LCNT_SRCCONNRATE]++;
588 if ((*state)->rule.ptr->overload_tbl) {
590 u_int32_t killed = 0;
593 V_pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
594 if (V_pf_status.debug >= PF_DEBUG_MISC) {
596 pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
597 if (pf_status.debug >= PF_DEBUG_MISC) {
599 printf("pf_src_connlimit: blocking address ");
600 pf_print_host(&(*state)->src_node->addr, 0,
601 (*state)->key[PF_SK_WIRE]->af);
604 bzero(&p, sizeof(p));
605 p.pfra_af = (*state)->key[PF_SK_WIRE]->af;
606 switch ((*state)->key[PF_SK_WIRE]->af) {
610 p.pfra_ip4addr = (*state)->src_node->addr.v4;
616 p.pfra_ip6addr = (*state)->src_node->addr.v6;
621 pfr_insert_kentry((*state)->rule.ptr->overload_tbl,
624 /* kill existing states if that's required. */
625 if ((*state)->rule.ptr->flush) {
626 struct pf_state_key *sk;
630 V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
631 RB_FOREACH(st, pf_state_tree_id, &V_tree_id) {
633 pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
634 RB_FOREACH(st, pf_state_tree_id, &tree_id) {
636 sk = st->key[PF_SK_WIRE];
638 * Kill states from this source. (Only those
639 * from the same rule if PF_FLUSH_GLOBAL is not
643 (*state)->key[PF_SK_WIRE]->af &&
644 (((*state)->direction == PF_OUT &&
645 PF_AEQ(&(*state)->src_node->addr,
646 &sk->addr[1], sk->af)) ||
647 ((*state)->direction == PF_IN &&
648 PF_AEQ(&(*state)->src_node->addr,
649 &sk->addr[0], sk->af))) &&
650 ((*state)->rule.ptr->flush &
652 (*state)->rule.ptr == st->rule.ptr)) {
653 st->timeout = PFTM_PURGE;
654 st->src.state = st->dst.state =
660 if (V_pf_status.debug >= PF_DEBUG_MISC)
662 if (pf_status.debug >= PF_DEBUG_MISC)
664 printf(", %u states killed", killed);
667 if (V_pf_status.debug >= PF_DEBUG_MISC)
669 if (pf_status.debug >= PF_DEBUG_MISC)
674 /* kill this state */
675 (*state)->timeout = PFTM_PURGE;
676 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
681 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
682 struct pf_addr *src, sa_family_t af)
684 struct pf_src_node k;
688 PF_ACPY(&k.addr, src, af);
689 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
690 rule->rpool.opts & PF_POOL_STICKYADDR)
695 V_pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
696 *sn = RB_FIND(pf_src_tree, &V_tree_src_tracking, &k);
698 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
699 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
703 if (!rule->max_src_nodes ||
704 rule->src_nodes < rule->max_src_nodes)
706 (*sn) = pool_get(&V_pf_src_tree_pl, PR_NOWAIT | PR_ZERO);
708 (*sn) = pool_get(&pf_src_tree_pl, PR_NOWAIT | PR_ZERO);
712 V_pf_status.lcounters[LCNT_SRCNODES]++;
714 pf_status.lcounters[LCNT_SRCNODES]++;
719 pf_init_threshold(&(*sn)->conn_rate,
720 rule->max_src_conn_rate.limit,
721 rule->max_src_conn_rate.seconds);
724 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
725 rule->rpool.opts & PF_POOL_STICKYADDR)
726 (*sn)->rule.ptr = rule;
728 (*sn)->rule.ptr = NULL;
729 PF_ACPY(&(*sn)->addr, src, af);
730 if (RB_INSERT(pf_src_tree,
732 &V_tree_src_tracking, *sn) != NULL) {
733 if (V_pf_status.debug >= PF_DEBUG_MISC) {
735 &tree_src_tracking, *sn) != NULL) {
736 if (pf_status.debug >= PF_DEBUG_MISC) {
738 printf("pf: src_tree insert failed: ");
739 pf_print_host(&(*sn)->addr, 0, af);
743 pool_put(&V_pf_src_tree_pl, *sn);
745 pool_put(&pf_src_tree_pl, *sn);
749 (*sn)->creation = time_second;
750 (*sn)->ruletype = rule->action;
751 if ((*sn)->rule.ptr != NULL)
752 (*sn)->rule.ptr->src_nodes++;
754 V_pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
755 V_pf_status.src_nodes++;
757 pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
758 pf_status.src_nodes++;
761 if (rule->max_src_states &&
762 (*sn)->states >= rule->max_src_states) {
764 V_pf_status.lcounters[LCNT_SRCSTATES]++;
766 pf_status.lcounters[LCNT_SRCSTATES]++;
774 /* state table stuff */
777 pf_state_compare_key(struct pf_state_key *a, struct pf_state_key *b)
781 if ((diff = a->proto - b->proto) != 0)
783 if ((diff = a->af - b->af) != 0)
788 if (a->addr[0].addr32[0] > b->addr[0].addr32[0])
790 if (a->addr[0].addr32[0] < b->addr[0].addr32[0])
792 if (a->addr[1].addr32[0] > b->addr[1].addr32[0])
794 if (a->addr[1].addr32[0] < b->addr[1].addr32[0])
800 if (a->addr[0].addr32[3] > b->addr[0].addr32[3])
802 if (a->addr[0].addr32[3] < b->addr[0].addr32[3])
804 if (a->addr[1].addr32[3] > b->addr[1].addr32[3])
806 if (a->addr[1].addr32[3] < b->addr[1].addr32[3])
808 if (a->addr[0].addr32[2] > b->addr[0].addr32[2])
810 if (a->addr[0].addr32[2] < b->addr[0].addr32[2])
812 if (a->addr[1].addr32[2] > b->addr[1].addr32[2])
814 if (a->addr[1].addr32[2] < b->addr[1].addr32[2])
816 if (a->addr[0].addr32[1] > b->addr[0].addr32[1])
818 if (a->addr[0].addr32[1] < b->addr[0].addr32[1])
820 if (a->addr[1].addr32[1] > b->addr[1].addr32[1])
822 if (a->addr[1].addr32[1] < b->addr[1].addr32[1])
824 if (a->addr[0].addr32[0] > b->addr[0].addr32[0])
826 if (a->addr[0].addr32[0] < b->addr[0].addr32[0])
828 if (a->addr[1].addr32[0] > b->addr[1].addr32[0])
830 if (a->addr[1].addr32[0] < b->addr[1].addr32[0])
836 if ((diff = a->port[0] - b->port[0]) != 0)
838 if ((diff = a->port[1] - b->port[1]) != 0)
845 pf_state_compare_id(struct pf_state *a, struct pf_state *b)
851 if (a->creatorid > b->creatorid)
853 if (a->creatorid < b->creatorid)
860 pf_state_key_attach(struct pf_state_key *sk, struct pf_state *s, int idx)
862 struct pf_state_item *si;
863 struct pf_state_key *cur;
864 struct pf_state *olds = NULL;
867 KASSERT(s->key[idx] == NULL, ("%s: key is null!", __FUNCTION__));
869 KASSERT(s->key[idx] == NULL); /* XXX handle this? */
873 if ((cur = RB_INSERT(pf_state_tree, &V_pf_statetbl, sk)) != NULL) {
875 if ((cur = RB_INSERT(pf_state_tree, &pf_statetbl, sk)) != NULL) {
877 /* key exists. check for same kif, if none, add to key */
878 TAILQ_FOREACH(si, &cur->states, entry)
879 if (si->s->kif == s->kif &&
880 si->s->direction == s->direction) {
881 if (sk->proto == IPPROTO_TCP &&
882 si->s->src.state >= TCPS_FIN_WAIT_2 &&
883 si->s->dst.state >= TCPS_FIN_WAIT_2) {
884 si->s->src.state = si->s->dst.state =
886 /* unlink late or sks can go away */
890 if (V_pf_status.debug >= PF_DEBUG_MISC) {
892 if (pf_status.debug >= PF_DEBUG_MISC) {
894 printf("pf: %s key attach "
896 (idx == PF_SK_WIRE) ?
899 pf_print_state_parts(s,
900 (idx == PF_SK_WIRE) ?
902 (idx == PF_SK_STACK) ?
904 printf(", existing: ");
905 pf_print_state_parts(si->s,
906 (idx == PF_SK_WIRE) ?
908 (idx == PF_SK_STACK) ?
913 pool_put(&V_pf_state_key_pl, sk);
915 pool_put(&pf_state_key_pl, sk);
917 return (-1); /* collision! */
921 pool_put(&V_pf_state_key_pl, sk);
923 pool_put(&pf_state_key_pl, sk);
930 if ((si = pool_get(&V_pf_state_item_pl, PR_NOWAIT)) == NULL) {
932 if ((si = pool_get(&pf_state_item_pl, PR_NOWAIT)) == NULL) {
934 pf_state_key_detach(s, idx);
939 /* list is sorted, if-bound states before floating */
941 if (s->kif == V_pfi_all)
943 if (s->kif == pfi_all)
945 TAILQ_INSERT_TAIL(&s->key[idx]->states, si, entry);
947 TAILQ_INSERT_HEAD(&s->key[idx]->states, si, entry);
950 pf_unlink_state(olds);
956 pf_detach_state(struct pf_state *s)
958 if (s->key[PF_SK_WIRE] == s->key[PF_SK_STACK])
959 s->key[PF_SK_WIRE] = NULL;
961 if (s->key[PF_SK_STACK] != NULL)
962 pf_state_key_detach(s, PF_SK_STACK);
964 if (s->key[PF_SK_WIRE] != NULL)
965 pf_state_key_detach(s, PF_SK_WIRE);
969 pf_state_key_detach(struct pf_state *s, int idx)
971 struct pf_state_item *si;
973 si = TAILQ_FIRST(&s->key[idx]->states);
974 while (si && si->s != s)
975 si = TAILQ_NEXT(si, entry);
978 TAILQ_REMOVE(&s->key[idx]->states, si, entry);
980 pool_put(&V_pf_state_item_pl, si);
982 pool_put(&pf_state_item_pl, si);
986 if (TAILQ_EMPTY(&s->key[idx]->states)) {
988 RB_REMOVE(pf_state_tree, &V_pf_statetbl, s->key[idx]);
990 RB_REMOVE(pf_state_tree, &pf_statetbl, s->key[idx]);
992 if (s->key[idx]->reverse)
993 s->key[idx]->reverse->reverse = NULL;
995 /* XXX: implement this */
997 if (s->key[idx]->inp)
998 s->key[idx]->inp->inp_pf_sk = NULL;
1001 pool_put(&V_pf_state_key_pl, s->key[idx]);
1003 pool_put(&pf_state_key_pl, s->key[idx]);
1009 struct pf_state_key *
1010 pf_alloc_state_key(int pool_flags)
1012 struct pf_state_key *sk;
1015 if ((sk = pool_get(&V_pf_state_key_pl, pool_flags)) == NULL)
1017 if ((sk = pool_get(&pf_state_key_pl, pool_flags)) == NULL)
1020 TAILQ_INIT(&sk->states);
1026 pf_state_key_setup(struct pf_pdesc *pd, struct pf_rule *nr,
1027 struct pf_state_key **skw, struct pf_state_key **sks,
1028 struct pf_state_key **skp, struct pf_state_key **nkp,
1029 struct pf_addr *saddr, struct pf_addr *daddr,
1030 u_int16_t sport, u_int16_t dport)
1033 KASSERT((*skp == NULL && *nkp == NULL),
1034 ("%s: skp == NULL && nkp == NULL", __FUNCTION__));
1036 KASSERT((*skp == NULL && *nkp == NULL));
1039 if ((*skp = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL)
1042 PF_ACPY(&(*skp)->addr[pd->sidx], saddr, pd->af);
1043 PF_ACPY(&(*skp)->addr[pd->didx], daddr, pd->af);
1044 (*skp)->port[pd->sidx] = sport;
1045 (*skp)->port[pd->didx] = dport;
1046 (*skp)->proto = pd->proto;
1047 (*skp)->af = pd->af;
1050 if ((*nkp = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL)
1051 return (ENOMEM); /* caller must handle cleanup */
1053 /* XXX maybe just bcopy and TAILQ_INIT(&(*nkp)->states) */
1054 PF_ACPY(&(*nkp)->addr[0], &(*skp)->addr[0], pd->af);
1055 PF_ACPY(&(*nkp)->addr[1], &(*skp)->addr[1], pd->af);
1056 (*nkp)->port[0] = (*skp)->port[0];
1057 (*nkp)->port[1] = (*skp)->port[1];
1058 (*nkp)->proto = pd->proto;
1059 (*nkp)->af = pd->af;
1063 if (pd->dir == PF_IN) {
1075 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw,
1076 struct pf_state_key *sks, struct pf_state *s)
1079 splassert(IPL_SOFTNET);
1085 if (pf_state_key_attach(skw, s, PF_SK_WIRE))
1087 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1089 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) {
1091 pool_put(&V_pf_state_key_pl, sks);
1093 pool_put(&pf_state_key_pl, sks);
1097 if (pf_state_key_attach(sks, s, PF_SK_STACK)) {
1098 pf_state_key_detach(s, PF_SK_WIRE);
1103 if (s->id == 0 && s->creatorid == 0) {
1105 s->id = htobe64(V_pf_status.stateid++);
1106 s->creatorid = V_pf_status.hostid;
1108 s->id = htobe64(pf_status.stateid++);
1109 s->creatorid = pf_status.hostid;
1113 if (RB_INSERT(pf_state_tree_id, &V_tree_id, s) != NULL) {
1114 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1116 if (RB_INSERT(pf_state_tree_id, &tree_id, s) != NULL) {
1117 if (pf_status.debug >= PF_DEBUG_MISC) {
1119 printf("pf: state insert failed: "
1120 "id: %016llx creatorid: %08x",
1122 (unsigned long long)betoh64(s->id), ntohl(s->creatorid));
1124 betoh64(s->id), ntohl(s->creatorid));
1132 TAILQ_INSERT_TAIL(&V_state_list, s, entry_list);
1133 V_pf_status.fcounters[FCNT_STATE_INSERT]++;
1134 V_pf_status.states++;
1136 TAILQ_INSERT_TAIL(&state_list, s, entry_list);
1137 pf_status.fcounters[FCNT_STATE_INSERT]++;
1140 pfi_kif_ref(kif, PFI_KIF_REF_STATE);
1143 if (pfsync_insert_state_ptr != NULL)
1144 pfsync_insert_state_ptr(s);
1146 pfsync_insert_state(s);
1153 pf_find_state_byid(struct pf_state_cmp *key)
1156 V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
1158 return (RB_FIND(pf_state_tree_id, &V_tree_id, (struct pf_state *)key));
1160 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1162 return (RB_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key));
1166 /* XXX debug function, intended to be removed one day */
1168 pf_compare_state_keys(struct pf_state_key *a, struct pf_state_key *b,
1169 struct pfi_kif *kif, u_int dir)
1171 /* a (from hdr) and b (new) must be exact opposites of each other */
1172 if (a->af == b->af && a->proto == b->proto &&
1173 PF_AEQ(&a->addr[0], &b->addr[1], a->af) &&
1174 PF_AEQ(&a->addr[1], &b->addr[0], a->af) &&
1175 a->port[0] == b->port[1] &&
1176 a->port[1] == b->port[0])
1179 /* mismatch. must not happen. */
1180 printf("pf: state key linking mismatch! dir=%s, "
1181 "if=%s, stored af=%u, a0: ",
1182 dir == PF_OUT ? "OUT" : "IN", kif->pfik_name, a->af);
1183 pf_print_host(&a->addr[0], a->port[0], a->af);
1185 pf_print_host(&a->addr[1], a->port[1], a->af);
1186 printf(", proto=%u", a->proto);
1187 printf(", found af=%u, a0: ", b->af);
1188 pf_print_host(&b->addr[0], b->port[0], b->af);
1190 pf_print_host(&b->addr[1], b->port[1], b->af);
1191 printf(", proto=%u", b->proto);
1199 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir,
1200 struct mbuf *m, struct pf_mtag *pftag)
1202 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir,
1206 struct pf_state_key *sk;
1207 struct pf_state_item *si;
1210 V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
1212 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1216 if (dir == PF_OUT && pftag->statekey &&
1217 ((struct pf_state_key *)pftag->statekey)->reverse)
1218 sk = ((struct pf_state_key *)pftag->statekey)->reverse;
1221 if ((sk = RB_FIND(pf_state_tree, &V_pf_statetbl,
1223 if ((sk = RB_FIND(pf_state_tree, &pf_statetbl,
1225 (struct pf_state_key *)key)) == NULL)
1227 if (dir == PF_OUT && pftag->statekey &&
1228 pf_compare_state_keys(pftag->statekey, sk,
1230 ((struct pf_state_key *)
1231 pftag->statekey)->reverse = sk;
1232 sk->reverse = pftag->statekey;
1236 if (dir == PF_OUT && m->m_pkthdr.pf.statekey &&
1237 ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse)
1238 sk = ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse;
1241 if ((sk = RB_FIND(pf_state_tree, &V_pf_statetbl,
1243 if ((sk = RB_FIND(pf_state_tree, &pf_statetbl,
1245 (struct pf_state_key *)key)) == NULL)
1247 if (dir == PF_OUT && m->m_pkthdr.pf.statekey &&
1248 pf_compare_state_keys(m->m_pkthdr.pf.statekey, sk,
1250 ((struct pf_state_key *)
1251 m->m_pkthdr.pf.statekey)->reverse = sk;
1252 sk->reverse = m->m_pkthdr.pf.statekey;
1259 pftag->statekey = NULL;
1261 m->m_pkthdr.pf.statekey = NULL;
1264 /* list is sorted, if-bound states before floating ones */
1265 TAILQ_FOREACH(si, &sk->states, entry)
1267 if ((si->s->kif == V_pfi_all || si->s->kif == kif) &&
1269 if ((si->s->kif == pfi_all || si->s->kif == kif) &&
1271 sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] :
1272 si->s->key[PF_SK_STACK]))
1279 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1281 struct pf_state_key *sk;
1282 struct pf_state_item *si, *ret = NULL;
1285 V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
1287 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1291 sk = RB_FIND(pf_state_tree, &V_pf_statetbl, (struct pf_state_key *)key);
1293 sk = RB_FIND(pf_state_tree, &pf_statetbl, (struct pf_state_key *)key);
1296 TAILQ_FOREACH(si, &sk->states, entry)
1297 if (dir == PF_INOUT ||
1298 (sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] :
1299 si->s->key[PF_SK_STACK]))) {
1309 return (ret ? ret->s : NULL);
1312 /* END state table stuff */
1316 pf_purge_thread(void *v)
1323 CURVNET_SET((struct vnet *)v);
1326 tsleep(pf_purge_thread, PWAIT, "pftm", 1 * hz);
1329 sx_slock(&V_pf_consistency_lock);
1333 if (V_pf_end_threads) {
1335 sx_sunlock(&V_pf_consistency_lock);
1336 sx_xlock(&V_pf_consistency_lock);
1339 pf_purge_expired_states(V_pf_status.states, 1);
1340 pf_purge_expired_fragments();
1341 pf_purge_expired_src_nodes(1);
1344 sx_xunlock(&V_pf_consistency_lock);
1346 wakeup(pf_purge_thread);
1352 /* process a fraction of the state table every second */
1354 if (!pf_purge_expired_states(1 + (V_pf_status.states /
1355 V_pf_default_rule.timeout[PFTM_INTERVAL]), 0)) {
1357 sx_sunlock(&V_pf_consistency_lock);
1358 sx_xlock(&V_pf_consistency_lock);
1362 pf_purge_expired_states(1 + (V_pf_status.states /
1363 V_pf_default_rule.timeout[PFTM_INTERVAL]), 1);
1366 pf_purge_expired_states(1 + (pf_status.states
1367 / pf_default_rule.timeout[PFTM_INTERVAL]));
1370 /* purge other expired types every PFTM_INTERVAL seconds */
1372 if (++nloops >= V_pf_default_rule.timeout[PFTM_INTERVAL]) {
1374 if (++nloops >= pf_default_rule.timeout[PFTM_INTERVAL]) {
1376 pf_purge_expired_fragments();
1377 pf_purge_expired_src_nodes(0);
1385 sx_xunlock(&V_pf_consistency_lock);
1387 sx_sunlock(&V_pf_consistency_lock);
1394 pf_state_expires(const struct pf_state *state)
1401 /* handle all PFTM_* > PFTM_MAX here */
1402 if (state->timeout == PFTM_PURGE)
1403 return (time_second);
1404 if (state->timeout == PFTM_UNTIL_PACKET)
1407 KASSERT(state->timeout != PFTM_UNLINKED,
1408 ("pf_state_expires: timeout == PFTM_UNLINKED"));
1409 KASSERT((state->timeout < PFTM_MAX),
1410 ("pf_state_expires: timeout > PFTM_MAX"));
1412 KASSERT(state->timeout != PFTM_UNLINKED);
1413 KASSERT(state->timeout < PFTM_MAX);
1415 timeout = state->rule.ptr->timeout[state->timeout];
1418 timeout = V_pf_default_rule.timeout[state->timeout];
1420 timeout = pf_default_rule.timeout[state->timeout];
1422 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1424 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1425 states = state->rule.ptr->states_cur;
1428 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1429 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1430 states = V_pf_status.states;
1432 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1433 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1434 states = pf_status.states;
1437 if (end && states > start && start < end) {
1439 return (state->expire + timeout * (end - states) /
1442 return (time_second);
1444 return (state->expire + timeout);
1449 pf_purge_expired_src_nodes(int waslocked)
1452 pf_purge_expired_src_nodes(int waslocked)
1455 struct pf_src_node *cur, *next;
1456 int locked = waslocked;
1459 for (cur = RB_MIN(pf_src_tree, &V_tree_src_tracking); cur; cur = next) {
1460 next = RB_NEXT(pf_src_tree, &V_tree_src_tracking, cur);
1462 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) {
1463 next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur);
1466 if (cur->states <= 0 && cur->expire <= time_second) {
1469 if (!sx_try_upgrade(&V_pf_consistency_lock))
1472 rw_enter_write(&pf_consistency_lock);
1474 next = RB_NEXT(pf_src_tree,
1476 &V_tree_src_tracking, cur);
1478 &tree_src_tracking, cur);
1482 if (cur->rule.ptr != NULL) {
1483 cur->rule.ptr->src_nodes--;
1484 if (cur->rule.ptr->states_cur <= 0 &&
1485 cur->rule.ptr->max_src_nodes <= 0)
1486 pf_rm_rule(NULL, cur->rule.ptr);
1489 RB_REMOVE(pf_src_tree, &V_tree_src_tracking, cur);
1490 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
1491 V_pf_status.src_nodes--;
1492 pool_put(&V_pf_src_tree_pl, cur);
1494 RB_REMOVE(pf_src_tree, &tree_src_tracking, cur);
1495 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
1496 pf_status.src_nodes--;
1497 pool_put(&pf_src_tree_pl, cur);
1502 if (locked && !waslocked)
1505 sx_downgrade(&V_pf_consistency_lock);
1509 rw_exit_write(&pf_consistency_lock);
1514 pf_src_tree_remove_state(struct pf_state *s)
1518 if (s->src_node != NULL) {
1520 --s->src_node->conn;
1521 if (--s->src_node->states <= 0) {
1522 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1526 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1528 pf_default_rule.timeout[PFTM_SRC_NODE];
1530 s->src_node->expire = time_second + timeout;
1533 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1534 if (--s->nat_src_node->states <= 0) {
1535 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1539 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1541 pf_default_rule.timeout[PFTM_SRC_NODE];
1543 s->nat_src_node->expire = time_second + timeout;
1546 s->src_node = s->nat_src_node = NULL;
1549 /* callers should be at splsoftnet */
1551 pf_unlink_state(struct pf_state *cur)
1554 if (cur->local_flags & PFSTATE_EXPIRING)
1556 cur->local_flags |= PFSTATE_EXPIRING;
1558 splassert(IPL_SOFTNET);
1561 if (cur->src.state == PF_TCPS_PROXY_DST) {
1562 /* XXX wire key the right one? */
1564 pf_send_tcp(NULL, cur->rule.ptr, cur->key[PF_SK_WIRE]->af,
1566 pf_send_tcp(cur->rule.ptr, cur->key[PF_SK_WIRE]->af,
1568 &cur->key[PF_SK_WIRE]->addr[1],
1569 &cur->key[PF_SK_WIRE]->addr[0],
1570 cur->key[PF_SK_WIRE]->port[1],
1571 cur->key[PF_SK_WIRE]->port[0],
1572 cur->src.seqhi, cur->src.seqlo + 1,
1573 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL);
1576 RB_REMOVE(pf_state_tree_id, &V_tree_id, cur);
1578 RB_REMOVE(pf_state_tree_id, &tree_id, cur);
1581 if (cur->state_flags & PFSTATE_PFLOW)
1583 if (export_pflow_ptr != NULL)
1584 export_pflow_ptr(cur);
1591 if (pfsync_delete_state_ptr != NULL)
1592 pfsync_delete_state_ptr(cur);
1594 pfsync_delete_state(cur);
1597 cur->timeout = PFTM_UNLINKED;
1598 pf_src_tree_remove_state(cur);
1599 pf_detach_state(cur);
1602 /* callers should be at splsoftnet and hold the
1603 * write_lock on pf_consistency_lock */
1605 pf_free_state(struct pf_state *cur)
1608 splassert(IPL_SOFTNET);
1613 if (pfsync_state_in_use_ptr != NULL &&
1614 pfsync_state_in_use_ptr(cur))
1616 if (pfsync_state_in_use(cur))
1621 KASSERT(cur->timeout == PFTM_UNLINKED,
1622 ("pf_free_state: cur->timeout != PFTM_UNLINKED"));
1624 KASSERT(cur->timeout == PFTM_UNLINKED);
1626 if (--cur->rule.ptr->states_cur <= 0 &&
1627 cur->rule.ptr->src_nodes <= 0)
1628 pf_rm_rule(NULL, cur->rule.ptr);
1629 if (cur->nat_rule.ptr != NULL)
1630 if (--cur->nat_rule.ptr->states_cur <= 0 &&
1631 cur->nat_rule.ptr->src_nodes <= 0)
1632 pf_rm_rule(NULL, cur->nat_rule.ptr);
1633 if (cur->anchor.ptr != NULL)
1634 if (--cur->anchor.ptr->states_cur <= 0)
1635 pf_rm_rule(NULL, cur->anchor.ptr);
1636 pf_normalize_tcp_cleanup(cur);
1637 pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE);
1639 TAILQ_REMOVE(&V_state_list, cur, entry_list);
1641 TAILQ_REMOVE(&state_list, cur, entry_list);
1644 pf_tag_unref(cur->tag);
1646 pool_put(&V_pf_state_pl, cur);
1647 V_pf_status.fcounters[FCNT_STATE_REMOVALS]++;
1648 V_pf_status.states--;
1650 pool_put(&pf_state_pl, cur);
1651 pf_status.fcounters[FCNT_STATE_REMOVALS]++;
1658 pf_purge_expired_states(u_int32_t maxcheck, int waslocked)
1661 pf_purge_expired_states(u_int32_t maxcheck)
1664 static struct pf_state *cur = NULL;
1665 struct pf_state *next;
1667 int locked = waslocked;
1672 while (maxcheck--) {
1673 /* wrap to start of list when we hit the end */
1676 cur = TAILQ_FIRST(&V_state_list);
1678 cur = TAILQ_FIRST(&state_list);
1681 break; /* list empty */
1684 /* get next state, as cur may get deleted */
1685 next = TAILQ_NEXT(cur, entry_list);
1687 if (cur->timeout == PFTM_UNLINKED) {
1688 /* free unlinked state */
1691 if (!sx_try_upgrade(&V_pf_consistency_lock))
1694 rw_enter_write(&pf_consistency_lock);
1699 } else if (pf_state_expires(cur) <= time_second) {
1700 /* unlink and free expired state */
1701 pf_unlink_state(cur);
1704 if (!sx_try_upgrade(&V_pf_consistency_lock))
1707 rw_enter_write(&pf_consistency_lock);
1717 if (!waslocked && locked)
1718 sx_downgrade(&V_pf_consistency_lock);
1723 rw_exit_write(&pf_consistency_lock);
1728 pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw)
1730 if (aw->type != PF_ADDR_TABLE)
1732 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname, 1)) == NULL)
1738 pf_tbladdr_remove(struct pf_addr_wrap *aw)
1740 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL)
1742 pfr_detach_table(aw->p.tbl);
1747 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
1749 struct pfr_ktable *kt = aw->p.tbl;
1751 if (aw->type != PF_ADDR_TABLE || kt == NULL)
1753 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1754 kt = kt->pfrkt_root;
1756 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
1761 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1766 u_int32_t a = ntohl(addr->addr32[0]);
1767 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1779 u_int8_t i, curstart, curend, maxstart, maxend;
1780 curstart = curend = maxstart = maxend = 255;
1781 for (i = 0; i < 8; i++) {
1782 if (!addr->addr16[i]) {
1783 if (curstart == 255)
1787 if ((curend - curstart) >
1788 (maxend - maxstart)) {
1789 maxstart = curstart;
1792 curstart = curend = 255;
1795 if ((curend - curstart) >
1796 (maxend - maxstart)) {
1797 maxstart = curstart;
1800 for (i = 0; i < 8; i++) {
1801 if (i >= maxstart && i <= maxend) {
1807 b = ntohs(addr->addr16[i]);
1824 pf_print_state(struct pf_state *s)
1826 pf_print_state_parts(s, NULL, NULL);
1830 pf_print_state_parts(struct pf_state *s,
1831 struct pf_state_key *skwp, struct pf_state_key *sksp)
1833 struct pf_state_key *skw, *sks;
1834 u_int8_t proto, dir;
1836 /* Do our best to fill these, but they're skipped if NULL */
1837 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1838 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1839 proto = skw ? skw->proto : (sks ? sks->proto : 0);
1840 dir = s ? s->direction : 0;
1858 case IPPROTO_ICMPV6:
1862 printf("%u", skw->proto);
1875 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1877 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1882 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1884 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1889 if (proto == IPPROTO_TCP) {
1890 printf(" [lo=%u high=%u win=%u modulator=%u",
1891 s->src.seqlo, s->src.seqhi,
1892 s->src.max_win, s->src.seqdiff);
1893 if (s->src.wscale && s->dst.wscale)
1894 printf(" wscale=%u",
1895 s->src.wscale & PF_WSCALE_MASK);
1897 printf(" [lo=%u high=%u win=%u modulator=%u",
1898 s->dst.seqlo, s->dst.seqhi,
1899 s->dst.max_win, s->dst.seqdiff);
1900 if (s->src.wscale && s->dst.wscale)
1901 printf(" wscale=%u",
1902 s->dst.wscale & PF_WSCALE_MASK);
1905 printf(" %u:%u", s->src.state, s->dst.state);
1910 pf_print_flags(u_int8_t f)
1932 #define PF_SET_SKIP_STEPS(i) \
1934 while (head[i] != cur) { \
1935 head[i]->skip[i].ptr = cur; \
1936 head[i] = TAILQ_NEXT(head[i], entries); \
1941 pf_calc_skip_steps(struct pf_rulequeue *rules)
1943 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1946 cur = TAILQ_FIRST(rules);
1948 for (i = 0; i < PF_SKIP_COUNT; ++i)
1950 while (cur != NULL) {
1952 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1953 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1954 if (cur->direction != prev->direction)
1955 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1956 if (cur->af != prev->af)
1957 PF_SET_SKIP_STEPS(PF_SKIP_AF);
1958 if (cur->proto != prev->proto)
1959 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1960 if (cur->src.neg != prev->src.neg ||
1961 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1962 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1963 if (cur->src.port[0] != prev->src.port[0] ||
1964 cur->src.port[1] != prev->src.port[1] ||
1965 cur->src.port_op != prev->src.port_op)
1966 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1967 if (cur->dst.neg != prev->dst.neg ||
1968 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1969 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1970 if (cur->dst.port[0] != prev->dst.port[0] ||
1971 cur->dst.port[1] != prev->dst.port[1] ||
1972 cur->dst.port_op != prev->dst.port_op)
1973 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1976 cur = TAILQ_NEXT(cur, entries);
1978 for (i = 0; i < PF_SKIP_COUNT; ++i)
1979 PF_SET_SKIP_STEPS(i);
1983 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
1985 if (aw1->type != aw2->type)
1987 switch (aw1->type) {
1988 case PF_ADDR_ADDRMASK:
1990 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
1992 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
1995 case PF_ADDR_DYNIFTL:
1996 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
1997 case PF_ADDR_NOROUTE:
1998 case PF_ADDR_URPFFAILED:
2001 return (aw1->p.tbl != aw2->p.tbl);
2002 case PF_ADDR_RTLABEL:
2003 return (aw1->v.rtlabel != aw2->v.rtlabel);
2005 printf("invalid address type: %d\n", aw1->type);
2011 * Checksum updates are a little complicated because the checksum in the TCP/UDP
2012 * header isn't always a full checksum. In some cases (i.e. output) it's a
2013 * pseudo-header checksum, which is a partial checksum over src/dst IP
2014 * addresses, protocol number and length.
2016 * That means we have the following cases:
2017 * * Input or forwarding: we don't have TSO, the checksum fields are full
2018 * checksums, we need to update the checksum whenever we change anything.
2019 * * Output (i.e. the checksum is a pseudo-header checksum):
2020 * x The field being updated is src/dst address or affects the length of
2021 * the packet. We need to update the pseudo-header checksum (note that this
2022 * checksum is not ones' complement).
2023 * x Some other field is being modified (e.g. src/dst port numbers): We
2024 * don't have to update anything.
2027 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2033 l = cksum + old - new;
2034 l = (l >> 16) + (l & 65535);
2042 pf_proto_cksum_fixup(struct mbuf *m, u_int16_t cksum, u_int16_t old,
2043 u_int16_t new, u_int8_t udp)
2045 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2048 return (pf_cksum_fixup(cksum, old, new, udp));
2052 pf_change_ap(struct mbuf *m, struct pf_addr *a, u_int16_t *p, u_int16_t *ic,
2053 u_int16_t *pc, struct pf_addr *an, u_int16_t pn, u_int8_t u,
2059 PF_ACPY(&ao, a, af);
2062 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2070 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2071 ao.addr16[0], an->addr16[0], 0),
2072 ao.addr16[1], an->addr16[1], 0);
2075 *pc = pf_cksum_fixup(pf_cksum_fixup(*pc,
2076 ao.addr16[0], an->addr16[0], u),
2077 ao.addr16[1], an->addr16[1], u);
2079 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2084 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2085 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2086 pf_cksum_fixup(pf_cksum_fixup(*pc,
2087 ao.addr16[0], an->addr16[0], u),
2088 ao.addr16[1], an->addr16[1], u),
2089 ao.addr16[2], an->addr16[2], u),
2090 ao.addr16[3], an->addr16[3], u),
2091 ao.addr16[4], an->addr16[4], u),
2092 ao.addr16[5], an->addr16[5], u),
2093 ao.addr16[6], an->addr16[6], u),
2094 ao.addr16[7], an->addr16[7], u);
2096 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2101 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA |
2102 CSUM_DELAY_DATA_IPV6)) {
2109 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2111 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2115 memcpy(&ao, a, sizeof(ao));
2116 memcpy(a, &an, sizeof(u_int32_t));
2117 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2118 ao % 65536, an % 65536, u);
2122 pf_change_proto_a(struct mbuf *m, void *a, u_int16_t *c, u_int32_t an, u_int8_t udp)
2126 memcpy(&ao, a, sizeof(ao));
2127 memcpy(a, &an, sizeof(u_int32_t));
2129 *c = pf_proto_cksum_fixup(m,
2130 pf_proto_cksum_fixup(m, *c, ao / 65536, an / 65536, udp),
2131 ao % 65536, an % 65536, udp);
2136 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2140 PF_ACPY(&ao, a, AF_INET6);
2141 PF_ACPY(a, an, AF_INET6);
2143 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2144 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2145 pf_cksum_fixup(pf_cksum_fixup(*c,
2146 ao.addr16[0], an->addr16[0], u),
2147 ao.addr16[1], an->addr16[1], u),
2148 ao.addr16[2], an->addr16[2], u),
2149 ao.addr16[3], an->addr16[3], u),
2150 ao.addr16[4], an->addr16[4], u),
2151 ao.addr16[5], an->addr16[5], u),
2152 ao.addr16[6], an->addr16[6], u),
2153 ao.addr16[7], an->addr16[7], u);
2158 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2159 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2160 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2162 struct pf_addr oia, ooa;
2164 PF_ACPY(&oia, ia, af);
2166 PF_ACPY(&ooa, oa, af);
2168 /* Change inner protocol port, fix inner protocol checksum. */
2170 u_int16_t oip = *ip;
2177 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2178 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2180 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2182 /* Change inner ip address, fix inner ip and icmp checksums. */
2183 PF_ACPY(ia, na, af);
2187 u_int32_t oh2c = *h2c;
2189 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2190 oia.addr16[0], ia->addr16[0], 0),
2191 oia.addr16[1], ia->addr16[1], 0);
2192 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2193 oia.addr16[0], ia->addr16[0], 0),
2194 oia.addr16[1], ia->addr16[1], 0);
2195 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2201 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2202 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2203 pf_cksum_fixup(pf_cksum_fixup(*ic,
2204 oia.addr16[0], ia->addr16[0], u),
2205 oia.addr16[1], ia->addr16[1], u),
2206 oia.addr16[2], ia->addr16[2], u),
2207 oia.addr16[3], ia->addr16[3], u),
2208 oia.addr16[4], ia->addr16[4], u),
2209 oia.addr16[5], ia->addr16[5], u),
2210 oia.addr16[6], ia->addr16[6], u),
2211 oia.addr16[7], ia->addr16[7], u);
2215 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2217 PF_ACPY(oa, na, af);
2221 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2222 ooa.addr16[0], oa->addr16[0], 0),
2223 ooa.addr16[1], oa->addr16[1], 0);
2228 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2229 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2230 pf_cksum_fixup(pf_cksum_fixup(*ic,
2231 ooa.addr16[0], oa->addr16[0], u),
2232 ooa.addr16[1], oa->addr16[1], u),
2233 ooa.addr16[2], oa->addr16[2], u),
2234 ooa.addr16[3], oa->addr16[3], u),
2235 ooa.addr16[4], oa->addr16[4], u),
2236 ooa.addr16[5], oa->addr16[5], u),
2237 ooa.addr16[6], oa->addr16[6], u),
2238 ooa.addr16[7], oa->addr16[7], u);
2247 * Need to modulate the sequence numbers in the TCP SACK option
2248 * (credits to Krzysztof Pfaff for report and patch)
2251 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2252 struct tcphdr *th, struct pf_state_peer *dst)
2254 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2256 u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2258 u_int8_t opts[MAX_TCPOPTLEN], *opt = opts;
2260 int copyback = 0, i, olen;
2261 struct sackblk sack;
2263 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2264 if (hlen < TCPOLEN_SACKLEN ||
2265 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2268 while (hlen >= TCPOLEN_SACKLEN) {
2271 case TCPOPT_EOL: /* FALLTHROUGH */
2279 if (olen >= TCPOLEN_SACKLEN) {
2280 for (i = 2; i + TCPOLEN_SACK <= olen;
2281 i += TCPOLEN_SACK) {
2282 memcpy(&sack, &opt[i], sizeof(sack));
2283 pf_change_proto_a(m, &sack.start, &th->th_sum,
2284 htonl(ntohl(sack.start) - dst->seqdiff), 0);
2285 pf_change_proto_a(m, &sack.end, &th->th_sum,
2286 htonl(ntohl(sack.end) - dst->seqdiff), 0);
2287 memcpy(&opt[i], &sack, sizeof(sack));
2302 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2304 m_copyback(m, off + sizeof(*th), thoptlen, opts);
2311 pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af,
2313 pf_send_tcp(const struct pf_rule *r, sa_family_t af,
2315 const struct pf_addr *saddr, const struct pf_addr *daddr,
2316 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2317 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2318 u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp)
2331 struct pf_mtag *pf_mtag;
2345 , ("Unsupported AF %d", af));
2354 #endif /* __FreeBSD__ */
2356 /* maximum segment size tcp option */
2357 tlen = sizeof(struct tcphdr);
2364 len = sizeof(struct ip) + tlen;
2369 len = sizeof(struct ip6_hdr) + tlen;
2374 /* create outgoing mbuf */
2375 m = m_gethdr(M_DONTWAIT, MT_HEADER);
2380 mac_netinet_firewall_send(m);
2382 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2389 m->m_flags |= M_SKIP_FIREWALL;
2390 pf_mtag->tag = rtag;
2392 m->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
2393 m->m_pkthdr.pf.tag = rtag;
2396 if (r != NULL && r->rtableid >= 0)
2399 M_SETFIB(m, r->rtableid);
2400 pf_mtag->rtableid = r->rtableid;
2402 m->m_pkthdr.pf.rtableid = r->rtableid;
2409 if (r != NULL && r->qid) {
2411 pf_mtag->qid = r->qid;
2413 /* add hints for ecn */
2414 pf_mtag->hdr = mtod(m, struct ip *);
2416 m->m_pkthdr.pf.qid = r->qid;
2417 /* add hints for ecn */
2418 m->m_pkthdr.pf.hdr = mtod(m, struct ip *);
2422 m->m_data += max_linkhdr;
2423 m->m_pkthdr.len = m->m_len = len;
2424 m->m_pkthdr.rcvif = NULL;
2425 bzero(m->m_data, len);
2429 h = mtod(m, struct ip *);
2431 /* IP header fields included in the TCP checksum */
2432 h->ip_p = IPPROTO_TCP;
2433 h->ip_len = htons(tlen);
2434 h->ip_src.s_addr = saddr->v4.s_addr;
2435 h->ip_dst.s_addr = daddr->v4.s_addr;
2437 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2442 h6 = mtod(m, struct ip6_hdr *);
2444 /* IP header fields included in the TCP checksum */
2445 h6->ip6_nxt = IPPROTO_TCP;
2446 h6->ip6_plen = htons(tlen);
2447 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2448 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2450 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2456 th->th_sport = sport;
2457 th->th_dport = dport;
2458 th->th_seq = htonl(seq);
2459 th->th_ack = htonl(ack);
2460 th->th_off = tlen >> 2;
2461 th->th_flags = flags;
2462 th->th_win = htons(win);
2465 opt = (char *)(th + 1);
2466 opt[0] = TCPOPT_MAXSEG;
2469 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2476 th->th_sum = in_cksum(m, len);
2478 /* Finish the IP header */
2480 h->ip_hl = sizeof(*h) >> 2;
2481 h->ip_tos = IPTOS_LOWDELAY;
2483 h->ip_off = V_path_mtu_discovery ? IP_DF : 0;
2485 h->ip_ttl = ttl ? ttl : V_ip_defttl;
2487 h->ip_len = htons(len);
2488 h->ip_off = htons(ip_mtudisc ? IP_DF : 0);
2489 h->ip_ttl = ttl ? ttl : ip_defttl;
2495 ip_output(m, (void *)NULL, (void *)NULL, 0,
2496 (void *)NULL, (void *)NULL);
2498 #else /* ! __FreeBSD__ */
2499 ip_output(m, (void *)NULL, (void *)NULL, 0,
2500 (void *)NULL, (void *)NULL);
2505 struct ether_header *e = (void *)ro.ro_dst.sa_data;
2513 ro.ro_dst.sa_len = sizeof(ro.ro_dst);
2514 ro.ro_dst.sa_family = pseudo_AF_HDRCMPLT;
2515 bcopy(eh->ether_dhost, e->ether_shost, ETHER_ADDR_LEN);
2516 bcopy(eh->ether_shost, e->ether_dhost, ETHER_ADDR_LEN);
2517 e->ether_type = eh->ether_type;
2520 /* XXX_IMPORT: later */
2521 ip_output(m, (void *)NULL, &ro, 0,
2522 (void *)NULL, (void *)NULL);
2524 #else /* ! __FreeBSD__ */
2525 ip_output(m, (void *)NULL, &ro, IP_ROUTETOETHER,
2526 (void *)NULL, (void *)NULL);
2534 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2535 sizeof(struct ip6_hdr), tlen);
2537 h6->ip6_vfc |= IPV6_VERSION;
2538 h6->ip6_hlim = IPV6_DEFHLIM;
2542 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
2545 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
2553 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2561 struct pf_mtag *pf_mtag;
2565 m0 = m_copypacket(m, M_DONTWAIT);
2569 if ((m0 = m_copy(m, 0, M_COPYALL)) == NULL)
2574 if ((pf_mtag = pf_get_mtag(m0)) == NULL)
2577 m0->m_flags |= M_SKIP_FIREWALL;
2579 m0->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
2582 if (r->rtableid >= 0)
2585 M_SETFIB(m0, r->rtableid);
2586 pf_mtag->rtableid = r->rtableid;
2588 m0->m_pkthdr.pf.rtableid = r->rtableid;
2597 pf_mtag->qid = r->qid;
2598 /* add hints for ecn */
2599 pf_mtag->hdr = mtod(m0, struct ip *);
2601 m0->m_pkthdr.pf.qid = r->qid;
2602 /* add hints for ecn */
2603 m0->m_pkthdr.pf.hdr = mtod(m0, struct ip *);
2612 /* icmp_error() expects host byte ordering */
2613 ip = mtod(m0, struct ip *);
2617 icmp_error(m0, type, code, 0, 0);
2620 icmp_error(m0, type, code, 0, 0);
2629 icmp6_error(m0, type, code, 0);
2639 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2640 * If n is 0, they match if they are equal. If n is != 0, they match if they
2644 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2645 struct pf_addr *b, sa_family_t af)
2652 if ((a->addr32[0] & m->addr32[0]) ==
2653 (b->addr32[0] & m->addr32[0]))
2659 if (((a->addr32[0] & m->addr32[0]) ==
2660 (b->addr32[0] & m->addr32[0])) &&
2661 ((a->addr32[1] & m->addr32[1]) ==
2662 (b->addr32[1] & m->addr32[1])) &&
2663 ((a->addr32[2] & m->addr32[2]) ==
2664 (b->addr32[2] & m->addr32[2])) &&
2665 ((a->addr32[3] & m->addr32[3]) ==
2666 (b->addr32[3] & m->addr32[3])))
2685 * Return 1 if b <= a <= e, otherwise return 0.
2688 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2689 struct pf_addr *a, sa_family_t af)
2694 if ((a->addr32[0] < b->addr32[0]) ||
2695 (a->addr32[0] > e->addr32[0]))
2704 for (i = 0; i < 4; ++i)
2705 if (a->addr32[i] > b->addr32[i])
2707 else if (a->addr32[i] < b->addr32[i])
2710 for (i = 0; i < 4; ++i)
2711 if (a->addr32[i] < e->addr32[i])
2713 else if (a->addr32[i] > e->addr32[i])
2723 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2727 return ((p > a1) && (p < a2));
2729 return ((p < a1) || (p > a2));
2731 return ((p >= a1) && (p <= a2));
2745 return (0); /* never reached */
2749 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2754 return (pf_match(op, a1, a2, p));
2758 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2760 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2762 return (pf_match(op, a1, a2, u));
2766 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2768 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2770 return (pf_match(op, a1, a2, g));
2775 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag,
2776 struct pf_mtag *pf_mtag)
2778 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag)
2783 *tag = pf_mtag->tag;
2785 *tag = m->m_pkthdr.pf.tag;
2788 return ((!r->match_tag_not && r->match_tag == *tag) ||
2789 (r->match_tag_not && r->match_tag != *tag));
2794 pf_tag_packet(struct mbuf *m, int tag, int rtableid,
2795 struct pf_mtag *pf_mtag)
2797 pf_tag_packet(struct mbuf *m, int tag, int rtableid)
2800 if (tag <= 0 && rtableid < 0)
2807 m->m_pkthdr.pf.tag = tag;
2812 M_SETFIB(m, rtableid);
2815 m->m_pkthdr.pf.rtableid = rtableid;
2822 pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n,
2823 struct pf_rule **r, struct pf_rule **a, int *match)
2825 struct pf_anchor_stackframe *f;
2827 (*r)->anchor->match = 0;
2831 if (*depth >= sizeof(V_pf_anchor_stack) /
2832 sizeof(V_pf_anchor_stack[0])) {
2834 if (*depth >= sizeof(pf_anchor_stack) /
2835 sizeof(pf_anchor_stack[0])) {
2837 printf("pf_step_into_anchor: stack overflow\n");
2838 *r = TAILQ_NEXT(*r, entries);
2840 } else if (*depth == 0 && a != NULL)
2843 f = V_pf_anchor_stack + (*depth)++;
2845 f = pf_anchor_stack + (*depth)++;
2849 if ((*r)->anchor_wildcard) {
2850 f->parent = &(*r)->anchor->children;
2851 if ((f->child = RB_MIN(pf_anchor_node, f->parent)) ==
2856 *rs = &f->child->ruleset;
2860 *rs = &(*r)->anchor->ruleset;
2862 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2866 pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n,
2867 struct pf_rule **r, struct pf_rule **a, int *match)
2869 struct pf_anchor_stackframe *f;
2876 f = V_pf_anchor_stack + *depth - 1;
2878 f = pf_anchor_stack + *depth - 1;
2880 if (f->parent != NULL && f->child != NULL) {
2881 if (f->child->match ||
2882 (match != NULL && *match)) {
2883 f->r->anchor->match = 1;
2886 f->child = RB_NEXT(pf_anchor_node, f->parent, f->child);
2887 if (f->child != NULL) {
2888 *rs = &f->child->ruleset;
2889 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2897 if (*depth == 0 && a != NULL)
2900 if (f->r->anchor->match || (match != NULL && *match))
2901 quick = f->r->quick;
2902 *r = TAILQ_NEXT(f->r, entries);
2903 } while (*r == NULL);
2910 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2911 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2916 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2917 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2921 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2922 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2923 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
2924 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
2925 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
2926 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
2927 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
2928 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
2934 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
2939 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
2943 if (addr->addr32[3] == 0xffffffff) {
2944 addr->addr32[3] = 0;
2945 if (addr->addr32[2] == 0xffffffff) {
2946 addr->addr32[2] = 0;
2947 if (addr->addr32[1] == 0xffffffff) {
2948 addr->addr32[1] = 0;
2950 htonl(ntohl(addr->addr32[0]) + 1);
2953 htonl(ntohl(addr->addr32[1]) + 1);
2956 htonl(ntohl(addr->addr32[2]) + 1);
2959 htonl(ntohl(addr->addr32[3]) + 1);
2967 pf_socket_lookup(int direction, struct pf_pdesc *pd, struct inpcb *inp_arg)
2969 pf_socket_lookup(int direction, struct pf_pdesc *pd)
2972 struct pf_addr *saddr, *daddr;
2973 u_int16_t sport, dport;
2975 struct inpcbinfo *pi;
2977 struct inpcbtable *tb;
2983 pd->lookup.uid = UID_MAX;
2984 pd->lookup.gid = GID_MAX;
2985 pd->lookup.pid = NO_PID;
2988 if (inp_arg != NULL) {
2989 INP_LOCK_ASSERT(inp_arg);
2990 pd->lookup.uid = inp_arg->inp_cred->cr_uid;
2991 pd->lookup.gid = inp_arg->inp_cred->cr_groups[0];
2996 switch (pd->proto) {
2998 if (pd->hdr.tcp == NULL)
3000 sport = pd->hdr.tcp->th_sport;
3001 dport = pd->hdr.tcp->th_dport;
3009 if (pd->hdr.udp == NULL)
3011 sport = pd->hdr.udp->uh_sport;
3012 dport = pd->hdr.udp->uh_dport;
3022 if (direction == PF_IN) {
3039 * XXXRW: would be nice if we had an mbuf here so that we
3040 * could use in_pcblookup_mbuf().
3042 inp = in_pcblookup(pi, saddr->v4, sport, daddr->v4,
3043 dport, INPLOOKUP_RLOCKPCB, NULL);
3045 inp = in_pcblookup(pi, saddr->v4, sport,
3046 daddr->v4, dport, INPLOOKUP_WILDCARD |
3047 INPLOOKUP_RLOCKPCB, NULL);
3052 inp = in_pcbhashlookup(tb, saddr->v4, sport, daddr->v4, dport);
3054 inp = in_pcblookup_listen(tb, daddr->v4, dport, 0,
3066 * XXXRW: would be nice if we had an mbuf here so that we
3067 * could use in6_pcblookup_mbuf().
3069 inp = in6_pcblookup(pi, &saddr->v6, sport,
3070 &daddr->v6, dport, INPLOOKUP_RLOCKPCB, NULL);
3072 inp = in6_pcblookup(pi, &saddr->v6, sport,
3073 &daddr->v6, dport, INPLOOKUP_WILDCARD |
3074 INPLOOKUP_RLOCKPCB, NULL);
3079 inp = in6_pcbhashlookup(tb, &saddr->v6, sport, &daddr->v6,
3082 inp = in6_pcblookup_listen(tb, &daddr->v6, dport, 0,
3095 INP_RLOCK_ASSERT(inp);
3096 pd->lookup.uid = inp->inp_cred->cr_uid;
3097 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3100 pd->lookup.uid = inp->inp_socket->so_euid;
3101 pd->lookup.gid = inp->inp_socket->so_egid;
3102 pd->lookup.pid = inp->inp_socket->so_cpid;
3108 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3112 u_int8_t *opt, optlen;
3113 u_int8_t wscale = 0;
3115 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3116 if (hlen <= sizeof(struct tcphdr))
3118 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3120 opt = hdr + sizeof(struct tcphdr);
3121 hlen -= sizeof(struct tcphdr);
3131 if (wscale > TCP_MAX_WINSHIFT)
3132 wscale = TCP_MAX_WINSHIFT;
3133 wscale |= PF_WSCALE_FLAG;
3148 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3152 u_int8_t *opt, optlen;
3154 u_int16_t mss = V_tcp_mssdflt;
3156 u_int16_t mss = tcp_mssdflt;
3159 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3160 if (hlen <= sizeof(struct tcphdr))
3162 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3164 opt = hdr + sizeof(struct tcphdr);
3165 hlen -= sizeof(struct tcphdr);
3166 while (hlen >= TCPOLEN_MAXSEG) {
3174 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
3190 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
3193 struct sockaddr_in *dst;
3197 struct sockaddr_in6 *dst6;
3198 struct route_in6 ro6;
3200 struct rtentry *rt = NULL;
3203 u_int16_t mss = V_tcp_mssdflt;
3206 u_int16_t mss = tcp_mssdflt;
3212 hlen = sizeof(struct ip);
3213 bzero(&ro, sizeof(ro));
3214 dst = (struct sockaddr_in *)&ro.ro_dst;
3215 dst->sin_family = AF_INET;
3216 dst->sin_len = sizeof(*dst);
3217 dst->sin_addr = addr->v4;
3219 in_rtalloc_ign(&ro, 0, rtableid);
3220 #else /* ! __FreeBSD__ */
3221 rtalloc_noclone(&ro, NO_CLONING);
3228 hlen = sizeof(struct ip6_hdr);
3229 bzero(&ro6, sizeof(ro6));
3230 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
3231 dst6->sin6_family = AF_INET6;
3232 dst6->sin6_len = sizeof(*dst6);
3233 dst6->sin6_addr = addr->v6;
3235 in6_rtalloc_ign(&ro6, 0, rtableid);
3236 #else /* ! __FreeBSD__ */
3237 rtalloc_noclone((struct route *)&ro6, NO_CLONING);
3244 if (rt && rt->rt_ifp) {
3245 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr);
3247 mss = max(V_tcp_mssdflt, mss);
3249 mss = max(tcp_mssdflt, mss);
3253 mss = min(mss, offer);
3254 mss = max(mss, 64); /* sanity - at least max opt space */
3259 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr)
3261 struct pf_rule *r = s->rule.ptr;
3262 struct pf_src_node *sn = NULL;
3265 if (!r->rt || r->rt == PF_FASTROUTE)
3267 switch (s->key[PF_SK_WIRE]->af) {
3270 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL, &sn);
3271 s->rt_kif = r->rpool.cur->kif;
3276 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL, &sn);
3277 s->rt_kif = r->rpool.cur->kif;
3284 pf_tcp_iss(struct pf_pdesc *pd)
3287 u_int32_t digest[4];
3290 if (V_pf_tcp_secret_init == 0) {
3291 read_random(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
3292 MD5Init(&V_pf_tcp_secret_ctx);
3293 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
3294 sizeof(V_pf_tcp_secret));
3295 V_pf_tcp_secret_init = 1;
3298 ctx = V_pf_tcp_secret_ctx;
3300 if (pf_tcp_secret_init == 0) {
3301 arc4random_buf(pf_tcp_secret, sizeof(pf_tcp_secret));
3302 MD5Init(&pf_tcp_secret_ctx);
3303 MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret,
3304 sizeof(pf_tcp_secret));
3305 pf_tcp_secret_init = 1;
3308 ctx = pf_tcp_secret_ctx;
3311 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
3312 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
3313 if (pd->af == AF_INET6) {
3314 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3315 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3317 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3318 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3320 MD5Final((u_char *)digest, &ctx);
3322 V_pf_tcp_iss_off += 4096;
3323 #define ISN_RANDOM_INCREMENT (4096 - 1)
3324 return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3326 #undef ISN_RANDOM_INCREMENT
3328 pf_tcp_iss_off += 4096;
3329 return (digest[0] + tcp_iss + pf_tcp_iss_off);
3334 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
3335 struct pfi_kif *kif, struct mbuf *m, int off, void *h,
3336 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm,
3338 struct ifqueue *ifq, struct inpcb *inp)
3340 struct ifqueue *ifq)
3343 struct pf_rule *nr = NULL;
3344 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
3345 sa_family_t af = pd->af;
3346 struct pf_rule *r, *a = NULL;
3347 struct pf_ruleset *ruleset = NULL;
3348 struct pf_src_node *nsn = NULL;
3349 struct tcphdr *th = pd->hdr.tcp;
3350 struct pf_state_key *skw = NULL, *sks = NULL;
3351 struct pf_state_key *sk = NULL, *nk = NULL;
3353 int rewrite = 0, hdrlen = 0;
3354 int tag = -1, rtableid = -1;
3359 u_int16_t sport = 0, dport = 0;
3360 u_int16_t bproto_sum = 0, bip_sum = 0;
3362 u_int16_t sport, dport;
3363 u_int16_t bproto_sum = 0, bip_sum;
3365 u_int8_t icmptype = 0, icmpcode = 0;
3368 if (direction == PF_IN && pf_check_congestion(ifq)) {
3369 REASON_SET(&reason, PFRES_CONGEST);
3375 pd->lookup.done = pf_socket_lookup(direction, pd, inp);
3376 else if (V_debug_pfugidhack) {
3378 DPFPRINTF(PF_DEBUG_MISC, ("pf: unlocked lookup\n"));
3379 pd->lookup.done = pf_socket_lookup(direction, pd, inp);
3384 switch (pd->proto) {
3386 sport = th->th_sport;
3387 dport = th->th_dport;
3388 hdrlen = sizeof(*th);
3391 sport = pd->hdr.udp->uh_sport;
3392 dport = pd->hdr.udp->uh_dport;
3393 hdrlen = sizeof(*pd->hdr.udp);
3397 if (pd->af != AF_INET)
3399 sport = dport = pd->hdr.icmp->icmp_id;
3400 hdrlen = sizeof(*pd->hdr.icmp);
3401 icmptype = pd->hdr.icmp->icmp_type;
3402 icmpcode = pd->hdr.icmp->icmp_code;
3404 if (icmptype == ICMP_UNREACH ||
3405 icmptype == ICMP_SOURCEQUENCH ||
3406 icmptype == ICMP_REDIRECT ||
3407 icmptype == ICMP_TIMXCEED ||
3408 icmptype == ICMP_PARAMPROB)
3413 case IPPROTO_ICMPV6:
3416 sport = dport = pd->hdr.icmp6->icmp6_id;
3417 hdrlen = sizeof(*pd->hdr.icmp6);
3418 icmptype = pd->hdr.icmp6->icmp6_type;
3419 icmpcode = pd->hdr.icmp6->icmp6_code;
3421 if (icmptype == ICMP6_DST_UNREACH ||
3422 icmptype == ICMP6_PACKET_TOO_BIG ||
3423 icmptype == ICMP6_TIME_EXCEEDED ||
3424 icmptype == ICMP6_PARAM_PROB)
3429 sport = dport = hdrlen = 0;
3433 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3435 /* check packet for BINAT/NAT/RDR */
3436 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn,
3437 &skw, &sks, &sk, &nk, saddr, daddr, sport, dport)) != NULL) {
3438 if (nk == NULL || sk == NULL) {
3439 REASON_SET(&reason, PFRES_MEMORY);
3444 bip_sum = *pd->ip_sum;
3446 switch (pd->proto) {
3448 bproto_sum = th->th_sum;
3449 pd->proto_sum = &th->th_sum;
3451 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3452 nk->port[pd->sidx] != sport) {
3453 pf_change_ap(m, saddr, &th->th_sport, pd->ip_sum,
3454 &th->th_sum, &nk->addr[pd->sidx],
3455 nk->port[pd->sidx], 0, af);
3456 pd->sport = &th->th_sport;
3457 sport = th->th_sport;
3460 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3461 nk->port[pd->didx] != dport) {
3462 pf_change_ap(m, daddr, &th->th_dport, pd->ip_sum,
3463 &th->th_sum, &nk->addr[pd->didx],
3464 nk->port[pd->didx], 0, af);
3465 dport = th->th_dport;
3466 pd->dport = &th->th_dport;
3471 bproto_sum = pd->hdr.udp->uh_sum;
3472 pd->proto_sum = &pd->hdr.udp->uh_sum;
3474 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3475 nk->port[pd->sidx] != sport) {
3476 pf_change_ap(m, saddr, &pd->hdr.udp->uh_sport,
3477 pd->ip_sum, &pd->hdr.udp->uh_sum,
3478 &nk->addr[pd->sidx],
3479 nk->port[pd->sidx], 1, af);
3480 sport = pd->hdr.udp->uh_sport;
3481 pd->sport = &pd->hdr.udp->uh_sport;
3484 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3485 nk->port[pd->didx] != dport) {
3486 pf_change_ap(m, daddr, &pd->hdr.udp->uh_dport,
3487 pd->ip_sum, &pd->hdr.udp->uh_sum,
3488 &nk->addr[pd->didx],
3489 nk->port[pd->didx], 1, af);
3490 dport = pd->hdr.udp->uh_dport;
3491 pd->dport = &pd->hdr.udp->uh_dport;
3497 nk->port[0] = nk->port[1];
3498 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3499 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3500 nk->addr[pd->sidx].v4.s_addr, 0);
3502 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3503 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3504 nk->addr[pd->didx].v4.s_addr, 0);
3506 if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3507 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3508 pd->hdr.icmp->icmp_cksum, sport,
3510 pd->hdr.icmp->icmp_id = nk->port[1];
3511 pd->sport = &pd->hdr.icmp->icmp_id;
3513 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3517 case IPPROTO_ICMPV6:
3518 nk->port[0] = nk->port[1];
3519 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3520 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3521 &nk->addr[pd->sidx], 0);
3523 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3524 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3525 &nk->addr[pd->didx], 0);
3534 &nk->addr[pd->sidx], AF_INET))
3535 pf_change_a(&saddr->v4.s_addr,
3537 nk->addr[pd->sidx].v4.s_addr, 0);
3540 &nk->addr[pd->didx], AF_INET))
3541 pf_change_a(&daddr->v4.s_addr,
3543 nk->addr[pd->didx].v4.s_addr, 0);
3549 &nk->addr[pd->sidx], AF_INET6))
3550 PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3553 &nk->addr[pd->didx], AF_INET6))
3554 PF_ACPY(saddr, &nk->addr[pd->didx], af);
3567 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3568 r = r->skip[PF_SKIP_IFP].ptr;
3569 else if (r->direction && r->direction != direction)
3570 r = r->skip[PF_SKIP_DIR].ptr;
3571 else if (r->af && r->af != af)
3572 r = r->skip[PF_SKIP_AF].ptr;
3573 else if (r->proto && r->proto != pd->proto)
3574 r = r->skip[PF_SKIP_PROTO].ptr;
3575 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3576 r->src.neg, kif, M_GETFIB(m)))
3577 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3578 /* tcp/udp only. port_op always 0 in other cases */
3579 else if (r->src.port_op && !pf_match_port(r->src.port_op,
3580 r->src.port[0], r->src.port[1], sport))
3581 r = r->skip[PF_SKIP_SRC_PORT].ptr;
3582 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3583 r->dst.neg, NULL, M_GETFIB(m)))
3584 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3585 /* tcp/udp only. port_op always 0 in other cases */
3586 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3587 r->dst.port[0], r->dst.port[1], dport))
3588 r = r->skip[PF_SKIP_DST_PORT].ptr;
3589 /* icmp only. type always 0 in other cases */
3590 else if (r->type && r->type != icmptype + 1)
3591 r = TAILQ_NEXT(r, entries);
3592 /* icmp only. type always 0 in other cases */
3593 else if (r->code && r->code != icmpcode + 1)
3594 r = TAILQ_NEXT(r, entries);
3595 else if (r->tos && !(r->tos == pd->tos))
3596 r = TAILQ_NEXT(r, entries);
3597 else if (r->rule_flag & PFRULE_FRAGMENT)
3598 r = TAILQ_NEXT(r, entries);
3599 else if (pd->proto == IPPROTO_TCP &&
3600 (r->flagset & th->th_flags) != r->flags)
3601 r = TAILQ_NEXT(r, entries);
3602 /* tcp/udp only. uid.op always 0 in other cases */
3603 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3605 pf_socket_lookup(direction, pd, inp), 1)) &&
3607 pf_socket_lookup(direction, pd), 1)) &&
3609 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3611 r = TAILQ_NEXT(r, entries);
3612 /* tcp/udp only. gid.op always 0 in other cases */
3613 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3615 pf_socket_lookup(direction, pd, inp), 1)) &&
3617 pf_socket_lookup(direction, pd), 1)) &&
3619 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3621 r = TAILQ_NEXT(r, entries);
3624 r->prob <= arc4random())
3626 r->prob <= arc4random_uniform(UINT_MAX - 1) + 1)
3628 r = TAILQ_NEXT(r, entries);
3630 else if (r->match_tag && !pf_match_tag(m, r, &tag, pd->pf_mtag))
3632 else if (r->match_tag && !pf_match_tag(m, r, &tag))
3634 r = TAILQ_NEXT(r, entries);
3635 else if (r->os_fingerprint != PF_OSFP_ANY &&
3636 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3637 pf_osfp_fingerprint(pd, m, off, th),
3638 r->os_fingerprint)))
3639 r = TAILQ_NEXT(r, entries);
3643 if (r->rtableid >= 0)
3644 rtableid = r->rtableid;
3645 if (r->anchor == NULL) {
3652 r = TAILQ_NEXT(r, entries);
3654 pf_step_into_anchor(&asd, &ruleset,
3655 PF_RULESET_FILTER, &r, &a, &match);
3657 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
3658 PF_RULESET_FILTER, &r, &a, &match))
3665 REASON_SET(&reason, PFRES_MATCH);
3667 if (r->log || (nr != NULL && nr->log)) {
3669 m_copyback(m, off, hdrlen, pd->hdr.any);
3670 PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr,
3674 if ((r->action == PF_DROP) &&
3675 ((r->rule_flag & PFRULE_RETURNRST) ||
3676 (r->rule_flag & PFRULE_RETURNICMP) ||
3677 (r->rule_flag & PFRULE_RETURN))) {
3678 /* undo NAT changes, if they have taken place */
3680 PF_ACPY(saddr, &sk->addr[pd->sidx], af);
3681 PF_ACPY(daddr, &sk->addr[pd->didx], af);
3683 *pd->sport = sk->port[pd->sidx];
3685 *pd->dport = sk->port[pd->didx];
3687 *pd->proto_sum = bproto_sum;
3689 *pd->ip_sum = bip_sum;
3690 m_copyback(m, off, hdrlen, pd->hdr.any);
3692 if (pd->proto == IPPROTO_TCP &&
3693 ((r->rule_flag & PFRULE_RETURNRST) ||
3694 (r->rule_flag & PFRULE_RETURN)) &&
3695 !(th->th_flags & TH_RST)) {
3696 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
3708 h4 = mtod(m, struct ip *);
3709 len = ntohs(h4->ip_len) - off;
3714 h6 = mtod(m, struct ip6_hdr *);
3715 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
3720 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
3721 REASON_SET(&reason, PFRES_PROTCKSUM);
3723 if (th->th_flags & TH_SYN)
3725 if (th->th_flags & TH_FIN)
3728 pf_send_tcp(m, r, af, pd->dst,
3730 pf_send_tcp(r, af, pd->dst,
3732 pd->src, th->th_dport, th->th_sport,
3733 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3734 r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp);
3736 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3738 pf_send_icmp(m, r->return_icmp >> 8,
3739 r->return_icmp & 255, af, r);
3740 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3742 pf_send_icmp(m, r->return_icmp6 >> 8,
3743 r->return_icmp6 & 255, af, r);
3746 if (r->action == PF_DROP)
3750 if (pf_tag_packet(m, tag, rtableid, pd->pf_mtag)) {
3752 if (pf_tag_packet(m, tag, rtableid)) {
3754 REASON_SET(&reason, PFRES_MEMORY);
3758 if (!state_icmp && (r->keep_state || nr != NULL ||
3759 (pd->flags & PFDESC_TCP_NORM))) {
3761 action = pf_create_state(r, nr, a, pd, nsn, skw, sks, nk, sk, m,
3762 off, sport, dport, &rewrite, kif, sm, tag, bproto_sum,
3764 if (action != PF_PASS)
3769 pool_put(&V_pf_state_key_pl, sk);
3771 pool_put(&V_pf_state_key_pl, nk);
3774 pool_put(&pf_state_key_pl, sk);
3776 pool_put(&pf_state_key_pl, nk);
3780 /* copy back packet headers if we performed NAT operations */
3782 m_copyback(m, off, hdrlen, pd->hdr.any);
3785 if (*sm != NULL && !ISSET((*sm)->state_flags, PFSTATE_NOSYNC) &&
3787 direction == PF_OUT && pfsync_up_ptr != NULL && pfsync_up_ptr()) {
3789 direction == PF_OUT && pfsync_up()) {
3792 * We want the state created, but we dont
3793 * want to send this in case a partner
3794 * firewall has to know about it to allow
3795 * replies through it.
3798 if (pfsync_defer_ptr != NULL &&
3799 pfsync_defer_ptr(*sm, m))
3801 if (pfsync_defer(*sm, m))
3812 pool_put(&V_pf_state_key_pl, sk);
3814 pool_put(&V_pf_state_key_pl, nk);
3817 pool_put(&pf_state_key_pl, sk);
3819 pool_put(&pf_state_key_pl, nk);
3825 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
3826 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *skw,
3827 struct pf_state_key *sks, struct pf_state_key *nk, struct pf_state_key *sk,
3828 struct mbuf *m, int off, u_int16_t sport, u_int16_t dport, int *rewrite,
3829 struct pfi_kif *kif, struct pf_state **sm, int tag, u_int16_t bproto_sum,
3830 u_int16_t bip_sum, int hdrlen)
3832 struct pf_state *s = NULL;
3833 struct pf_src_node *sn = NULL;
3834 struct tcphdr *th = pd->hdr.tcp;
3836 u_int16_t mss = V_tcp_mssdflt;
3838 u_int16_t mss = tcp_mssdflt;
3842 /* check maximums */
3843 if (r->max_states && (r->states_cur >= r->max_states)) {
3845 V_pf_status.lcounters[LCNT_STATES]++;
3847 pf_status.lcounters[LCNT_STATES]++;
3849 REASON_SET(&reason, PFRES_MAXSTATES);
3852 /* src node for filter rule */
3853 if ((r->rule_flag & PFRULE_SRCTRACK ||
3854 r->rpool.opts & PF_POOL_STICKYADDR) &&
3855 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3856 REASON_SET(&reason, PFRES_SRCLIMIT);
3859 /* src node for translation rule */
3860 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3861 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3862 REASON_SET(&reason, PFRES_SRCLIMIT);
3866 s = pool_get(&V_pf_state_pl, PR_NOWAIT | PR_ZERO);
3868 s = pool_get(&pf_state_pl, PR_NOWAIT | PR_ZERO);
3871 REASON_SET(&reason, PFRES_MEMORY);
3875 s->nat_rule.ptr = nr;
3877 STATE_INC_COUNTERS(s);
3879 s->state_flags |= PFSTATE_ALLOWOPTS;
3880 if (r->rule_flag & PFRULE_STATESLOPPY)
3881 s->state_flags |= PFSTATE_SLOPPY;
3882 if (r->rule_flag & PFRULE_PFLOW)
3883 s->state_flags |= PFSTATE_PFLOW;
3884 s->log = r->log & PF_LOG_ALL;
3885 s->sync_state = PFSYNC_S_NONE;
3887 s->log |= nr->log & PF_LOG_ALL;
3888 switch (pd->proto) {
3890 s->src.seqlo = ntohl(th->th_seq);
3891 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3892 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3893 r->keep_state == PF_STATE_MODULATE) {
3894 /* Generate sequence number modulator */
3895 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3898 pf_change_proto_a(m, &th->th_seq, &th->th_sum,
3899 htonl(s->src.seqlo + s->src.seqdiff), 0);
3903 if (th->th_flags & TH_SYN) {
3905 s->src.wscale = pf_get_wscale(m, off,
3906 th->th_off, pd->af);
3908 s->src.max_win = MAX(ntohs(th->th_win), 1);
3909 if (s->src.wscale & PF_WSCALE_MASK) {
3910 /* Remove scale factor from initial window */
3911 int win = s->src.max_win;
3912 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3913 s->src.max_win = (win - 1) >>
3914 (s->src.wscale & PF_WSCALE_MASK);
3916 if (th->th_flags & TH_FIN)
3920 s->src.state = TCPS_SYN_SENT;
3921 s->dst.state = TCPS_CLOSED;
3922 s->timeout = PFTM_TCP_FIRST_PACKET;
3925 s->src.state = PFUDPS_SINGLE;
3926 s->dst.state = PFUDPS_NO_TRAFFIC;
3927 s->timeout = PFTM_UDP_FIRST_PACKET;
3931 case IPPROTO_ICMPV6:
3933 s->timeout = PFTM_ICMP_FIRST_PACKET;
3936 s->src.state = PFOTHERS_SINGLE;
3937 s->dst.state = PFOTHERS_NO_TRAFFIC;
3938 s->timeout = PFTM_OTHER_FIRST_PACKET;
3941 s->creation = time_second;
3942 s->expire = time_second;
3946 s->src_node->states++;
3949 /* XXX We only modify one side for now. */
3950 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
3951 s->nat_src_node = nsn;
3952 s->nat_src_node->states++;
3954 if (pd->proto == IPPROTO_TCP) {
3955 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
3956 off, pd, th, &s->src, &s->dst)) {
3957 REASON_SET(&reason, PFRES_MEMORY);
3958 pf_src_tree_remove_state(s);
3959 STATE_DEC_COUNTERS(s);
3961 pool_put(&V_pf_state_pl, s);
3963 pool_put(&pf_state_pl, s);
3967 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
3968 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
3969 &s->src, &s->dst, rewrite)) {
3970 /* This really shouldn't happen!!! */
3971 DPFPRINTF(PF_DEBUG_URGENT,
3972 ("pf_normalize_tcp_stateful failed on first pkt"));
3973 pf_normalize_tcp_cleanup(s);
3974 pf_src_tree_remove_state(s);
3975 STATE_DEC_COUNTERS(s);
3977 pool_put(&V_pf_state_pl, s);
3979 pool_put(&pf_state_pl, s);
3984 s->direction = pd->dir;
3986 if (sk == NULL && pf_state_key_setup(pd, nr, &skw, &sks, &sk, &nk,
3987 pd->src, pd->dst, sport, dport))
3990 if (pf_state_insert(BOUND_IFACE(r, kif), skw, sks, s)) {
3991 if (pd->proto == IPPROTO_TCP)
3992 pf_normalize_tcp_cleanup(s);
3993 REASON_SET(&reason, PFRES_STATEINS);
3994 pf_src_tree_remove_state(s);
3995 STATE_DEC_COUNTERS(s);
3997 pool_put(&V_pf_state_pl, s);
3999 pool_put(&pf_state_pl, s);
4005 pf_set_rt_ifp(s, pd->src); /* needs s->state_key set */
4010 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
4011 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
4012 s->src.state = PF_TCPS_PROXY_SRC;
4013 /* undo NAT changes, if they have taken place */
4015 struct pf_state_key *skt = s->key[PF_SK_WIRE];
4016 if (pd->dir == PF_OUT)
4017 skt = s->key[PF_SK_STACK];
4018 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
4019 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
4021 *pd->sport = skt->port[pd->sidx];
4023 *pd->dport = skt->port[pd->didx];
4025 *pd->proto_sum = bproto_sum;
4027 *pd->ip_sum = bip_sum;
4028 m_copyback(m, off, hdrlen, pd->hdr.any);
4030 s->src.seqhi = htonl(arc4random());
4031 /* Find mss option */
4032 int rtid = M_GETFIB(m);
4033 mss = pf_get_mss(m, off, th->th_off, pd->af);
4034 mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
4035 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
4038 pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport,
4040 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
4042 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
4043 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL);
4044 REASON_SET(&reason, PFRES_SYNPROXY);
4045 return (PF_SYNPROXY_DROP);
4053 pool_put(&V_pf_state_key_pl, sk);
4055 pool_put(&V_pf_state_key_pl, nk);
4058 pool_put(&pf_state_key_pl, sk);
4060 pool_put(&pf_state_key_pl, nk);
4063 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
4065 RB_REMOVE(pf_src_tree, &V_tree_src_tracking, sn);
4066 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
4067 V_pf_status.src_nodes--;
4068 pool_put(&V_pf_src_tree_pl, sn);
4070 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
4071 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
4072 pf_status.src_nodes--;
4073 pool_put(&pf_src_tree_pl, sn);
4076 if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) {
4078 RB_REMOVE(pf_src_tree, &V_tree_src_tracking, nsn);
4079 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
4080 V_pf_status.src_nodes--;
4081 pool_put(&V_pf_src_tree_pl, nsn);
4083 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
4084 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
4085 pf_status.src_nodes--;
4086 pool_put(&pf_src_tree_pl, nsn);
4093 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
4094 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
4095 struct pf_ruleset **rsm)
4097 struct pf_rule *r, *a = NULL;
4098 struct pf_ruleset *ruleset = NULL;
4099 sa_family_t af = pd->af;
4105 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
4108 if (pfi_kif_match(r->kif, kif) == r->ifnot)
4109 r = r->skip[PF_SKIP_IFP].ptr;
4110 else if (r->direction && r->direction != direction)
4111 r = r->skip[PF_SKIP_DIR].ptr;
4112 else if (r->af && r->af != af)
4113 r = r->skip[PF_SKIP_AF].ptr;
4114 else if (r->proto && r->proto != pd->proto)
4115 r = r->skip[PF_SKIP_PROTO].ptr;
4116 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
4117 r->src.neg, kif, M_GETFIB(m)))
4118 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
4119 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
4120 r->dst.neg, NULL, M_GETFIB(m)))
4121 r = r->skip[PF_SKIP_DST_ADDR].ptr;
4122 else if (r->tos && !(r->tos == pd->tos))
4123 r = TAILQ_NEXT(r, entries);
4124 else if (r->os_fingerprint != PF_OSFP_ANY)
4125 r = TAILQ_NEXT(r, entries);
4126 else if (pd->proto == IPPROTO_UDP &&
4127 (r->src.port_op || r->dst.port_op))
4128 r = TAILQ_NEXT(r, entries);
4129 else if (pd->proto == IPPROTO_TCP &&
4130 (r->src.port_op || r->dst.port_op || r->flagset))
4131 r = TAILQ_NEXT(r, entries);
4132 else if ((pd->proto == IPPROTO_ICMP ||
4133 pd->proto == IPPROTO_ICMPV6) &&
4134 (r->type || r->code))
4135 r = TAILQ_NEXT(r, entries);
4136 else if (r->prob && r->prob <=
4137 (arc4random() % (UINT_MAX - 1) + 1))
4138 r = TAILQ_NEXT(r, entries);
4140 else if (r->match_tag && !pf_match_tag(m, r, &tag, pd->pf_mtag))
4142 else if (r->match_tag && !pf_match_tag(m, r, &tag))
4144 r = TAILQ_NEXT(r, entries);
4146 if (r->anchor == NULL) {
4153 r = TAILQ_NEXT(r, entries);
4155 pf_step_into_anchor(&asd, &ruleset,
4156 PF_RULESET_FILTER, &r, &a, &match);
4158 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
4159 PF_RULESET_FILTER, &r, &a, &match))
4166 REASON_SET(&reason, PFRES_MATCH);
4169 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset,
4172 if (r->action != PF_PASS)
4176 if (pf_tag_packet(m, tag, -1, pd->pf_mtag)) {
4178 if (pf_tag_packet(m, tag, -1)) {
4180 REASON_SET(&reason, PFRES_MEMORY);
4188 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
4189 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
4190 struct pf_pdesc *pd, u_short *reason, int *copyback)
4192 struct tcphdr *th = pd->hdr.tcp;
4193 u_int16_t win = ntohs(th->th_win);
4194 u_int32_t ack, end, seq, orig_seq;
4198 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
4199 sws = src->wscale & PF_WSCALE_MASK;
4200 dws = dst->wscale & PF_WSCALE_MASK;
4205 * Sequence tracking algorithm from Guido van Rooij's paper:
4206 * http://www.madison-gurkha.com/publications/tcp_filtering/
4210 orig_seq = seq = ntohl(th->th_seq);
4211 if (src->seqlo == 0) {
4212 /* First packet from this end. Set its state */
4214 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
4215 src->scrub == NULL) {
4216 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
4217 REASON_SET(reason, PFRES_MEMORY);
4222 /* Deferred generation of sequence number modulator */
4223 if (dst->seqdiff && !src->seqdiff) {
4224 /* use random iss for the TCP server */
4225 while ((src->seqdiff = arc4random() - seq) == 0)
4227 ack = ntohl(th->th_ack) - dst->seqdiff;
4228 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
4230 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
4233 ack = ntohl(th->th_ack);
4236 end = seq + pd->p_len;
4237 if (th->th_flags & TH_SYN) {
4239 if (dst->wscale & PF_WSCALE_FLAG) {
4240 src->wscale = pf_get_wscale(m, off, th->th_off,
4242 if (src->wscale & PF_WSCALE_FLAG) {
4243 /* Remove scale factor from initial
4245 sws = src->wscale & PF_WSCALE_MASK;
4246 win = ((u_int32_t)win + (1 << sws) - 1)
4248 dws = dst->wscale & PF_WSCALE_MASK;
4250 /* fixup other window */
4251 dst->max_win <<= dst->wscale &
4253 /* in case of a retrans SYN|ACK */
4258 if (th->th_flags & TH_FIN)
4262 if (src->state < TCPS_SYN_SENT)
4263 src->state = TCPS_SYN_SENT;
4266 * May need to slide the window (seqhi may have been set by
4267 * the crappy stack check or if we picked up the connection
4268 * after establishment)
4270 if (src->seqhi == 1 ||
4271 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
4272 src->seqhi = end + MAX(1, dst->max_win << dws);
4273 if (win > src->max_win)
4277 ack = ntohl(th->th_ack) - dst->seqdiff;
4279 /* Modulate sequence numbers */
4280 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
4282 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
4285 end = seq + pd->p_len;
4286 if (th->th_flags & TH_SYN)
4288 if (th->th_flags & TH_FIN)
4292 if ((th->th_flags & TH_ACK) == 0) {
4293 /* Let it pass through the ack skew check */
4295 } else if ((ack == 0 &&
4296 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
4297 /* broken tcp stacks do not set ack */
4298 (dst->state < TCPS_SYN_SENT)) {
4300 * Many stacks (ours included) will set the ACK number in an
4301 * FIN|ACK if the SYN times out -- no sequence to ACK.
4307 /* Ease sequencing restrictions on no data packets */
4312 ackskew = dst->seqlo - ack;
4316 * Need to demodulate the sequence numbers in any TCP SACK options
4317 * (Selective ACK). We could optionally validate the SACK values
4318 * against the current ACK window, either forwards or backwards, but
4319 * I'm not confident that SACK has been implemented properly
4320 * everywhere. It wouldn't surprise me if several stacks accidently
4321 * SACK too far backwards of previously ACKed data. There really aren't
4322 * any security implications of bad SACKing unless the target stack
4323 * doesn't validate the option length correctly. Someone trying to
4324 * spoof into a TCP connection won't bother blindly sending SACK
4327 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
4328 if (pf_modulate_sack(m, off, pd, th, dst))
4333 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
4334 if (SEQ_GEQ(src->seqhi, end) &&
4335 /* Last octet inside other's window space */
4336 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
4337 /* Retrans: not more than one window back */
4338 (ackskew >= -MAXACKWINDOW) &&
4339 /* Acking not more than one reassembled fragment backwards */
4340 (ackskew <= (MAXACKWINDOW << sws)) &&
4341 /* Acking not more than one window forward */
4342 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
4343 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
4344 (pd->flags & PFDESC_IP_REAS) == 0)) {
4345 /* Require an exact/+1 sequence match on resets when possible */
4347 if (dst->scrub || src->scrub) {
4348 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4349 *state, src, dst, copyback))
4353 /* update max window */
4354 if (src->max_win < win)
4356 /* synchronize sequencing */
4357 if (SEQ_GT(end, src->seqlo))
4359 /* slide the window of what the other end can send */
4360 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4361 dst->seqhi = ack + MAX((win << sws), 1);
4365 if (th->th_flags & TH_SYN)
4366 if (src->state < TCPS_SYN_SENT)
4367 src->state = TCPS_SYN_SENT;
4368 if (th->th_flags & TH_FIN)
4369 if (src->state < TCPS_CLOSING)
4370 src->state = TCPS_CLOSING;
4371 if (th->th_flags & TH_ACK) {
4372 if (dst->state == TCPS_SYN_SENT) {
4373 dst->state = TCPS_ESTABLISHED;
4374 if (src->state == TCPS_ESTABLISHED &&
4375 (*state)->src_node != NULL &&
4376 pf_src_connlimit(state)) {
4377 REASON_SET(reason, PFRES_SRCLIMIT);
4380 } else if (dst->state == TCPS_CLOSING)
4381 dst->state = TCPS_FIN_WAIT_2;
4383 if (th->th_flags & TH_RST)
4384 src->state = dst->state = TCPS_TIME_WAIT;
4386 /* update expire time */
4387 (*state)->expire = time_second;
4388 if (src->state >= TCPS_FIN_WAIT_2 &&
4389 dst->state >= TCPS_FIN_WAIT_2)
4390 (*state)->timeout = PFTM_TCP_CLOSED;
4391 else if (src->state >= TCPS_CLOSING &&
4392 dst->state >= TCPS_CLOSING)
4393 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4394 else if (src->state < TCPS_ESTABLISHED ||
4395 dst->state < TCPS_ESTABLISHED)
4396 (*state)->timeout = PFTM_TCP_OPENING;
4397 else if (src->state >= TCPS_CLOSING ||
4398 dst->state >= TCPS_CLOSING)
4399 (*state)->timeout = PFTM_TCP_CLOSING;
4401 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4403 /* Fall through to PASS packet */
4405 } else if ((dst->state < TCPS_SYN_SENT ||
4406 dst->state >= TCPS_FIN_WAIT_2 ||
4407 src->state >= TCPS_FIN_WAIT_2) &&
4408 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
4409 /* Within a window forward of the originating packet */
4410 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
4411 /* Within a window backward of the originating packet */
4414 * This currently handles three situations:
4415 * 1) Stupid stacks will shotgun SYNs before their peer
4417 * 2) When PF catches an already established stream (the
4418 * firewall rebooted, the state table was flushed, routes
4420 * 3) Packets get funky immediately after the connection
4421 * closes (this should catch Solaris spurious ACK|FINs
4422 * that web servers like to spew after a close)
4424 * This must be a little more careful than the above code
4425 * since packet floods will also be caught here. We don't
4426 * update the TTL here to mitigate the damage of a packet
4427 * flood and so the same code can handle awkward establishment
4428 * and a loosened connection close.
4429 * In the establishment case, a correct peer response will
4430 * validate the connection, go through the normal state code
4431 * and keep updating the state TTL.
4435 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4437 if (pf_status.debug >= PF_DEBUG_MISC) {
4439 printf("pf: loose state match: ");
4440 pf_print_state(*state);
4441 pf_print_flags(th->th_flags);
4442 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4443 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
4445 pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
4446 (unsigned long long)(*state)->packets[1],
4448 pd->p_len, ackskew, (*state)->packets[0],
4449 (*state)->packets[1],
4451 pd->dir == PF_IN ? "in" : "out",
4452 pd->dir == (*state)->direction ? "fwd" : "rev");
4455 if (dst->scrub || src->scrub) {
4456 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4457 *state, src, dst, copyback))
4461 /* update max window */
4462 if (src->max_win < win)
4464 /* synchronize sequencing */
4465 if (SEQ_GT(end, src->seqlo))
4467 /* slide the window of what the other end can send */
4468 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4469 dst->seqhi = ack + MAX((win << sws), 1);
4472 * Cannot set dst->seqhi here since this could be a shotgunned
4473 * SYN and not an already established connection.
4476 if (th->th_flags & TH_FIN)
4477 if (src->state < TCPS_CLOSING)
4478 src->state = TCPS_CLOSING;
4479 if (th->th_flags & TH_RST)
4480 src->state = dst->state = TCPS_TIME_WAIT;
4482 /* Fall through to PASS packet */
4485 if ((*state)->dst.state == TCPS_SYN_SENT &&
4486 (*state)->src.state == TCPS_SYN_SENT) {
4487 /* Send RST for state mismatches during handshake */
4488 if (!(th->th_flags & TH_RST))
4490 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4492 pf_send_tcp((*state)->rule.ptr, pd->af,
4494 pd->dst, pd->src, th->th_dport,
4495 th->th_sport, ntohl(th->th_ack), 0,
4497 (*state)->rule.ptr->return_ttl, 1, 0,
4498 pd->eh, kif->pfik_ifp);
4503 } else if (V_pf_status.debug >= PF_DEBUG_MISC) {
4505 } else if (pf_status.debug >= PF_DEBUG_MISC) {
4507 printf("pf: BAD state: ");
4508 pf_print_state(*state);
4509 pf_print_flags(th->th_flags);
4510 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4511 "pkts=%llu:%llu dir=%s,%s\n",
4512 seq, orig_seq, ack, pd->p_len, ackskew,
4514 (unsigned long long)(*state)->packets[0],
4515 (unsigned long long)(*state)->packets[1],
4517 (*state)->packets[0], (*state)->packets[1],
4519 pd->dir == PF_IN ? "in" : "out",
4520 pd->dir == (*state)->direction ? "fwd" : "rev");
4521 printf("pf: State failure on: %c %c %c %c | %c %c\n",
4522 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4523 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4525 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4526 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4527 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4528 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4530 REASON_SET(reason, PFRES_BADSTATE);
4538 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4539 struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4541 struct tcphdr *th = pd->hdr.tcp;
4543 if (th->th_flags & TH_SYN)
4544 if (src->state < TCPS_SYN_SENT)
4545 src->state = TCPS_SYN_SENT;
4546 if (th->th_flags & TH_FIN)
4547 if (src->state < TCPS_CLOSING)
4548 src->state = TCPS_CLOSING;
4549 if (th->th_flags & TH_ACK) {
4550 if (dst->state == TCPS_SYN_SENT) {
4551 dst->state = TCPS_ESTABLISHED;
4552 if (src->state == TCPS_ESTABLISHED &&
4553 (*state)->src_node != NULL &&
4554 pf_src_connlimit(state)) {
4555 REASON_SET(reason, PFRES_SRCLIMIT);
4558 } else if (dst->state == TCPS_CLOSING) {
4559 dst->state = TCPS_FIN_WAIT_2;
4560 } else if (src->state == TCPS_SYN_SENT &&
4561 dst->state < TCPS_SYN_SENT) {
4563 * Handle a special sloppy case where we only see one
4564 * half of the connection. If there is a ACK after
4565 * the initial SYN without ever seeing a packet from
4566 * the destination, set the connection to established.
4568 dst->state = src->state = TCPS_ESTABLISHED;
4569 if ((*state)->src_node != NULL &&
4570 pf_src_connlimit(state)) {
4571 REASON_SET(reason, PFRES_SRCLIMIT);
4574 } else if (src->state == TCPS_CLOSING &&
4575 dst->state == TCPS_ESTABLISHED &&
4578 * Handle the closing of half connections where we
4579 * don't see the full bidirectional FIN/ACK+ACK
4582 dst->state = TCPS_CLOSING;
4585 if (th->th_flags & TH_RST)
4586 src->state = dst->state = TCPS_TIME_WAIT;
4588 /* update expire time */
4589 (*state)->expire = time_second;
4590 if (src->state >= TCPS_FIN_WAIT_2 &&
4591 dst->state >= TCPS_FIN_WAIT_2)
4592 (*state)->timeout = PFTM_TCP_CLOSED;
4593 else if (src->state >= TCPS_CLOSING &&
4594 dst->state >= TCPS_CLOSING)
4595 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4596 else if (src->state < TCPS_ESTABLISHED ||
4597 dst->state < TCPS_ESTABLISHED)
4598 (*state)->timeout = PFTM_TCP_OPENING;
4599 else if (src->state >= TCPS_CLOSING ||
4600 dst->state >= TCPS_CLOSING)
4601 (*state)->timeout = PFTM_TCP_CLOSING;
4603 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4609 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
4610 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4613 struct pf_state_key_cmp key;
4614 struct tcphdr *th = pd->hdr.tcp;
4616 struct pf_state_peer *src, *dst;
4617 struct pf_state_key *sk;
4620 key.proto = IPPROTO_TCP;
4621 if (direction == PF_IN) { /* wire side, straight */
4622 PF_ACPY(&key.addr[0], pd->src, key.af);
4623 PF_ACPY(&key.addr[1], pd->dst, key.af);
4624 key.port[0] = th->th_sport;
4625 key.port[1] = th->th_dport;
4626 } else { /* stack side, reverse */
4627 PF_ACPY(&key.addr[1], pd->src, key.af);
4628 PF_ACPY(&key.addr[0], pd->dst, key.af);
4629 key.port[1] = th->th_sport;
4630 key.port[0] = th->th_dport;
4634 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
4636 STATE_LOOKUP(kif, &key, direction, *state, m);
4639 if (direction == (*state)->direction) {
4640 src = &(*state)->src;
4641 dst = &(*state)->dst;
4643 src = &(*state)->dst;
4644 dst = &(*state)->src;
4647 sk = (*state)->key[pd->didx];
4649 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4650 if (direction != (*state)->direction) {
4651 REASON_SET(reason, PFRES_SYNPROXY);
4652 return (PF_SYNPROXY_DROP);
4654 if (th->th_flags & TH_SYN) {
4655 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4656 REASON_SET(reason, PFRES_SYNPROXY);
4660 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4662 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
4664 pd->src, th->th_dport, th->th_sport,
4665 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4666 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1,
4668 REASON_SET(reason, PFRES_SYNPROXY);
4669 return (PF_SYNPROXY_DROP);
4670 } else if (!(th->th_flags & TH_ACK) ||
4671 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4672 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4673 REASON_SET(reason, PFRES_SYNPROXY);
4675 } else if ((*state)->src_node != NULL &&
4676 pf_src_connlimit(state)) {
4677 REASON_SET(reason, PFRES_SRCLIMIT);
4680 (*state)->src.state = PF_TCPS_PROXY_DST;
4682 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4683 if (direction == (*state)->direction) {
4684 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4685 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4686 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4687 REASON_SET(reason, PFRES_SYNPROXY);
4690 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4691 if ((*state)->dst.seqhi == 1)
4692 (*state)->dst.seqhi = htonl(arc4random());
4694 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4696 pf_send_tcp((*state)->rule.ptr, pd->af,
4698 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4699 sk->port[pd->sidx], sk->port[pd->didx],
4700 (*state)->dst.seqhi, 0, TH_SYN, 0,
4701 (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL);
4702 REASON_SET(reason, PFRES_SYNPROXY);
4703 return (PF_SYNPROXY_DROP);
4704 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4706 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4707 REASON_SET(reason, PFRES_SYNPROXY);
4710 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4711 (*state)->dst.seqlo = ntohl(th->th_seq);
4713 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4715 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
4717 pd->src, th->th_dport, th->th_sport,
4718 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4719 TH_ACK, (*state)->src.max_win, 0, 0, 0,
4720 (*state)->tag, NULL, NULL);
4722 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4724 pf_send_tcp((*state)->rule.ptr, pd->af,
4726 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4727 sk->port[pd->sidx], sk->port[pd->didx],
4728 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4729 TH_ACK, (*state)->dst.max_win, 0, 0, 1,
4731 (*state)->src.seqdiff = (*state)->dst.seqhi -
4732 (*state)->src.seqlo;
4733 (*state)->dst.seqdiff = (*state)->src.seqhi -
4734 (*state)->dst.seqlo;
4735 (*state)->src.seqhi = (*state)->src.seqlo +
4736 (*state)->dst.max_win;
4737 (*state)->dst.seqhi = (*state)->dst.seqlo +
4738 (*state)->src.max_win;
4739 (*state)->src.wscale = (*state)->dst.wscale = 0;
4740 (*state)->src.state = (*state)->dst.state =
4742 REASON_SET(reason, PFRES_SYNPROXY);
4743 return (PF_SYNPROXY_DROP);
4747 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4748 dst->state >= TCPS_FIN_WAIT_2 &&
4749 src->state >= TCPS_FIN_WAIT_2) {
4751 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4753 if (pf_status.debug >= PF_DEBUG_MISC) {
4755 printf("pf: state reuse ");
4756 pf_print_state(*state);
4757 pf_print_flags(th->th_flags);
4760 /* XXX make sure it's the same direction ?? */
4761 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4762 pf_unlink_state(*state);
4767 if ((*state)->state_flags & PFSTATE_SLOPPY) {
4768 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
4771 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
4772 ©back) == PF_DROP)
4776 /* translate source/destination address, if necessary */
4777 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4778 struct pf_state_key *nk = (*state)->key[pd->didx];
4780 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4781 nk->port[pd->sidx] != th->th_sport)
4782 pf_change_ap(m, pd->src, &th->th_sport,
4783 pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx],
4784 nk->port[pd->sidx], 0, pd->af);
4786 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4787 nk->port[pd->didx] != th->th_dport)
4788 pf_change_ap(m, pd->dst, &th->th_dport,
4789 pd->ip_sum, &th->th_sum, &nk->addr[pd->didx],
4790 nk->port[pd->didx], 0, pd->af);
4794 /* Copyback sequence modulation or stateful scrub changes if needed */
4797 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4799 m_copyback(m, off, sizeof(*th), th);
4806 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4807 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4809 struct pf_state_peer *src, *dst;
4810 struct pf_state_key_cmp key;
4811 struct udphdr *uh = pd->hdr.udp;
4814 key.proto = IPPROTO_UDP;
4815 if (direction == PF_IN) { /* wire side, straight */
4816 PF_ACPY(&key.addr[0], pd->src, key.af);
4817 PF_ACPY(&key.addr[1], pd->dst, key.af);
4818 key.port[0] = uh->uh_sport;
4819 key.port[1] = uh->uh_dport;
4820 } else { /* stack side, reverse */
4821 PF_ACPY(&key.addr[1], pd->src, key.af);
4822 PF_ACPY(&key.addr[0], pd->dst, key.af);
4823 key.port[1] = uh->uh_sport;
4824 key.port[0] = uh->uh_dport;
4828 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
4830 STATE_LOOKUP(kif, &key, direction, *state, m);
4833 if (direction == (*state)->direction) {
4834 src = &(*state)->src;
4835 dst = &(*state)->dst;
4837 src = &(*state)->dst;
4838 dst = &(*state)->src;
4842 if (src->state < PFUDPS_SINGLE)
4843 src->state = PFUDPS_SINGLE;
4844 if (dst->state == PFUDPS_SINGLE)
4845 dst->state = PFUDPS_MULTIPLE;
4847 /* update expire time */
4848 (*state)->expire = time_second;
4849 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4850 (*state)->timeout = PFTM_UDP_MULTIPLE;
4852 (*state)->timeout = PFTM_UDP_SINGLE;
4854 /* translate source/destination address, if necessary */
4855 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4856 struct pf_state_key *nk = (*state)->key[pd->didx];
4858 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4859 nk->port[pd->sidx] != uh->uh_sport)
4860 pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
4861 &uh->uh_sum, &nk->addr[pd->sidx],
4862 nk->port[pd->sidx], 1, pd->af);
4864 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4865 nk->port[pd->didx] != uh->uh_dport)
4866 pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
4867 &uh->uh_sum, &nk->addr[pd->didx],
4868 nk->port[pd->didx], 1, pd->af);
4870 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4872 m_copyback(m, off, sizeof(*uh), uh);
4880 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4881 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4883 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4885 u_int16_t icmpid = 0, *icmpsum;
4887 u_int16_t icmpid, *icmpsum;
4891 struct pf_state_key_cmp key;
4893 switch (pd->proto) {
4896 icmptype = pd->hdr.icmp->icmp_type;
4897 icmpid = pd->hdr.icmp->icmp_id;
4898 icmpsum = &pd->hdr.icmp->icmp_cksum;
4900 if (icmptype == ICMP_UNREACH ||
4901 icmptype == ICMP_SOURCEQUENCH ||
4902 icmptype == ICMP_REDIRECT ||
4903 icmptype == ICMP_TIMXCEED ||
4904 icmptype == ICMP_PARAMPROB)
4909 case IPPROTO_ICMPV6:
4910 icmptype = pd->hdr.icmp6->icmp6_type;
4911 icmpid = pd->hdr.icmp6->icmp6_id;
4912 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4914 if (icmptype == ICMP6_DST_UNREACH ||
4915 icmptype == ICMP6_PACKET_TOO_BIG ||
4916 icmptype == ICMP6_TIME_EXCEEDED ||
4917 icmptype == ICMP6_PARAM_PROB)
4926 * ICMP query/reply message not related to a TCP/UDP packet.
4927 * Search for an ICMP state.
4930 key.proto = pd->proto;
4931 key.port[0] = key.port[1] = icmpid;
4932 if (direction == PF_IN) { /* wire side, straight */
4933 PF_ACPY(&key.addr[0], pd->src, key.af);
4934 PF_ACPY(&key.addr[1], pd->dst, key.af);
4935 } else { /* stack side, reverse */
4936 PF_ACPY(&key.addr[1], pd->src, key.af);
4937 PF_ACPY(&key.addr[0], pd->dst, key.af);
4941 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
4943 STATE_LOOKUP(kif, &key, direction, *state, m);
4946 (*state)->expire = time_second;
4947 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4949 /* translate source/destination address, if necessary */
4950 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4951 struct pf_state_key *nk = (*state)->key[pd->didx];
4956 if (PF_ANEQ(pd->src,
4957 &nk->addr[pd->sidx], AF_INET))
4958 pf_change_a(&saddr->v4.s_addr,
4960 nk->addr[pd->sidx].v4.s_addr, 0);
4962 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
4964 pf_change_a(&daddr->v4.s_addr,
4966 nk->addr[pd->didx].v4.s_addr, 0);
4969 pd->hdr.icmp->icmp_id) {
4970 pd->hdr.icmp->icmp_cksum =
4972 pd->hdr.icmp->icmp_cksum, icmpid,
4973 nk->port[pd->sidx], 0);
4974 pd->hdr.icmp->icmp_id =
4978 m_copyback(m, off, ICMP_MINLEN,
4987 if (PF_ANEQ(pd->src,
4988 &nk->addr[pd->sidx], AF_INET6))
4990 &pd->hdr.icmp6->icmp6_cksum,
4991 &nk->addr[pd->sidx], 0);
4993 if (PF_ANEQ(pd->dst,
4994 &nk->addr[pd->didx], AF_INET6))
4996 &pd->hdr.icmp6->icmp6_cksum,
4997 &nk->addr[pd->didx], 0);
5000 sizeof(struct icmp6_hdr),
5013 * ICMP error message in response to a TCP/UDP packet.
5014 * Extract the inner TCP/UDP header and search for that state.
5017 struct pf_pdesc pd2;
5019 bzero(&pd2, sizeof pd2);
5025 struct ip6_hdr h2_6;
5037 /* Payload packet is from the opposite direction. */
5038 pd2.sidx = (direction == PF_IN) ? 1 : 0;
5039 pd2.didx = (direction == PF_IN) ? 0 : 1;
5043 /* offset of h2 in mbuf chain */
5044 ipoff2 = off + ICMP_MINLEN;
5046 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
5047 NULL, reason, pd2.af)) {
5048 DPFPRINTF(PF_DEBUG_MISC,
5049 ("pf: ICMP error message too short "
5054 * ICMP error messages don't refer to non-first
5057 if (h2.ip_off & htons(IP_OFFMASK)) {
5058 REASON_SET(reason, PFRES_FRAG);
5062 /* offset of protocol header that follows h2 */
5063 off2 = ipoff2 + (h2.ip_hl << 2);
5065 pd2.proto = h2.ip_p;
5066 pd2.src = (struct pf_addr *)&h2.ip_src;
5067 pd2.dst = (struct pf_addr *)&h2.ip_dst;
5068 pd2.ip_sum = &h2.ip_sum;
5073 ipoff2 = off + sizeof(struct icmp6_hdr);
5075 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
5076 NULL, reason, pd2.af)) {
5077 DPFPRINTF(PF_DEBUG_MISC,
5078 ("pf: ICMP error message too short "
5082 pd2.proto = h2_6.ip6_nxt;
5083 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
5084 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
5086 off2 = ipoff2 + sizeof(h2_6);
5088 switch (pd2.proto) {
5089 case IPPROTO_FRAGMENT:
5091 * ICMPv6 error messages for
5092 * non-first fragments
5094 REASON_SET(reason, PFRES_FRAG);
5097 case IPPROTO_HOPOPTS:
5098 case IPPROTO_ROUTING:
5099 case IPPROTO_DSTOPTS: {
5100 /* get next header and header length */
5101 struct ip6_ext opt6;
5103 if (!pf_pull_hdr(m, off2, &opt6,
5104 sizeof(opt6), NULL, reason,
5106 DPFPRINTF(PF_DEBUG_MISC,
5107 ("pf: ICMPv6 short opt\n"));
5110 if (pd2.proto == IPPROTO_AH)
5111 off2 += (opt6.ip6e_len + 2) * 4;
5113 off2 += (opt6.ip6e_len + 1) * 8;
5114 pd2.proto = opt6.ip6e_nxt;
5115 /* goto the next header */
5122 } while (!terminal);
5127 switch (pd2.proto) {
5131 struct pf_state_peer *src, *dst;
5136 * Only the first 8 bytes of the TCP header can be
5137 * expected. Don't access any TCP header fields after
5138 * th_seq, an ackskew test is not possible.
5140 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
5142 DPFPRINTF(PF_DEBUG_MISC,
5143 ("pf: ICMP error message too short "
5149 key.proto = IPPROTO_TCP;
5150 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5151 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5152 key.port[pd2.sidx] = th.th_sport;
5153 key.port[pd2.didx] = th.th_dport;
5156 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5158 STATE_LOOKUP(kif, &key, direction, *state, m);
5161 if (direction == (*state)->direction) {
5162 src = &(*state)->dst;
5163 dst = &(*state)->src;
5165 src = &(*state)->src;
5166 dst = &(*state)->dst;
5169 if (src->wscale && dst->wscale)
5170 dws = dst->wscale & PF_WSCALE_MASK;
5174 /* Demodulate sequence number */
5175 seq = ntohl(th.th_seq) - src->seqdiff;
5177 pf_change_a(&th.th_seq, icmpsum,
5182 if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
5183 (!SEQ_GEQ(src->seqhi, seq) ||
5184 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
5186 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5188 if (pf_status.debug >= PF_DEBUG_MISC) {
5190 printf("pf: BAD ICMP %d:%d ",
5191 icmptype, pd->hdr.icmp->icmp_code);
5192 pf_print_host(pd->src, 0, pd->af);
5194 pf_print_host(pd->dst, 0, pd->af);
5196 pf_print_state(*state);
5197 printf(" seq=%u\n", seq);
5199 REASON_SET(reason, PFRES_BADSTATE);
5203 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5205 if (pf_status.debug >= PF_DEBUG_MISC) {
5207 printf("pf: OK ICMP %d:%d ",
5208 icmptype, pd->hdr.icmp->icmp_code);
5209 pf_print_host(pd->src, 0, pd->af);
5211 pf_print_host(pd->dst, 0, pd->af);
5213 pf_print_state(*state);
5214 printf(" seq=%u\n", seq);
5218 /* translate source/destination address, if necessary */
5219 if ((*state)->key[PF_SK_WIRE] !=
5220 (*state)->key[PF_SK_STACK]) {
5221 struct pf_state_key *nk =
5222 (*state)->key[pd->didx];
5224 if (PF_ANEQ(pd2.src,
5225 &nk->addr[pd2.sidx], pd2.af) ||
5226 nk->port[pd2.sidx] != th.th_sport)
5227 pf_change_icmp(pd2.src, &th.th_sport,
5228 daddr, &nk->addr[pd2.sidx],
5229 nk->port[pd2.sidx], NULL,
5230 pd2.ip_sum, icmpsum,
5231 pd->ip_sum, 0, pd2.af);
5233 if (PF_ANEQ(pd2.dst,
5234 &nk->addr[pd2.didx], pd2.af) ||
5235 nk->port[pd2.didx] != th.th_dport)
5236 pf_change_icmp(pd2.dst, &th.th_dport,
5237 NULL, /* XXX Inbound NAT? */
5238 &nk->addr[pd2.didx],
5239 nk->port[pd2.didx], NULL,
5240 pd2.ip_sum, icmpsum,
5241 pd->ip_sum, 0, pd2.af);
5249 m_copyback(m, off, ICMP_MINLEN,
5254 m_copyback(m, ipoff2, sizeof(h2),
5264 sizeof(struct icmp6_hdr),
5269 m_copyback(m, ipoff2, sizeof(h2_6),
5278 m_copyback(m, off2, 8, (caddr_t)&th);
5280 m_copyback(m, off2, 8, &th);
5290 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
5291 NULL, reason, pd2.af)) {
5292 DPFPRINTF(PF_DEBUG_MISC,
5293 ("pf: ICMP error message too short "
5299 key.proto = IPPROTO_UDP;
5300 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5301 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5302 key.port[pd2.sidx] = uh.uh_sport;
5303 key.port[pd2.didx] = uh.uh_dport;
5306 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5308 STATE_LOOKUP(kif, &key, direction, *state, m);
5311 /* translate source/destination address, if necessary */
5312 if ((*state)->key[PF_SK_WIRE] !=
5313 (*state)->key[PF_SK_STACK]) {
5314 struct pf_state_key *nk =
5315 (*state)->key[pd->didx];
5317 if (PF_ANEQ(pd2.src,
5318 &nk->addr[pd2.sidx], pd2.af) ||
5319 nk->port[pd2.sidx] != uh.uh_sport)
5320 pf_change_icmp(pd2.src, &uh.uh_sport,
5321 daddr, &nk->addr[pd2.sidx],
5322 nk->port[pd2.sidx], &uh.uh_sum,
5323 pd2.ip_sum, icmpsum,
5324 pd->ip_sum, 1, pd2.af);
5326 if (PF_ANEQ(pd2.dst,
5327 &nk->addr[pd2.didx], pd2.af) ||
5328 nk->port[pd2.didx] != uh.uh_dport)
5329 pf_change_icmp(pd2.dst, &uh.uh_dport,
5330 NULL, /* XXX Inbound NAT? */
5331 &nk->addr[pd2.didx],
5332 nk->port[pd2.didx], &uh.uh_sum,
5333 pd2.ip_sum, icmpsum,
5334 pd->ip_sum, 1, pd2.af);
5339 m_copyback(m, off, ICMP_MINLEN,
5345 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5347 m_copyback(m, ipoff2, sizeof(h2), &h2);
5354 sizeof(struct icmp6_hdr),
5359 m_copyback(m, ipoff2, sizeof(h2_6),
5368 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
5370 m_copyback(m, off2, sizeof(uh), &uh);
5377 case IPPROTO_ICMP: {
5380 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
5381 NULL, reason, pd2.af)) {
5382 DPFPRINTF(PF_DEBUG_MISC,
5383 ("pf: ICMP error message too short i"
5389 key.proto = IPPROTO_ICMP;
5390 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5391 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5392 key.port[0] = key.port[1] = iih.icmp_id;
5395 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5397 STATE_LOOKUP(kif, &key, direction, *state, m);
5400 /* translate source/destination address, if necessary */
5401 if ((*state)->key[PF_SK_WIRE] !=
5402 (*state)->key[PF_SK_STACK]) {
5403 struct pf_state_key *nk =
5404 (*state)->key[pd->didx];
5406 if (PF_ANEQ(pd2.src,
5407 &nk->addr[pd2.sidx], pd2.af) ||
5408 nk->port[pd2.sidx] != iih.icmp_id)
5409 pf_change_icmp(pd2.src, &iih.icmp_id,
5410 daddr, &nk->addr[pd2.sidx],
5411 nk->port[pd2.sidx], NULL,
5412 pd2.ip_sum, icmpsum,
5413 pd->ip_sum, 0, AF_INET);
5415 if (PF_ANEQ(pd2.dst,
5416 &nk->addr[pd2.didx], pd2.af) ||
5417 nk->port[pd2.didx] != iih.icmp_id)
5418 pf_change_icmp(pd2.dst, &iih.icmp_id,
5419 NULL, /* XXX Inbound NAT? */
5420 &nk->addr[pd2.didx],
5421 nk->port[pd2.didx], NULL,
5422 pd2.ip_sum, icmpsum,
5423 pd->ip_sum, 0, AF_INET);
5426 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
5427 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5428 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
5430 m_copyback(m, off, ICMP_MINLEN, pd->hdr.icmp);
5431 m_copyback(m, ipoff2, sizeof(h2), &h2);
5432 m_copyback(m, off2, ICMP_MINLEN, &iih);
5440 case IPPROTO_ICMPV6: {
5441 struct icmp6_hdr iih;
5443 if (!pf_pull_hdr(m, off2, &iih,
5444 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
5445 DPFPRINTF(PF_DEBUG_MISC,
5446 ("pf: ICMP error message too short "
5452 key.proto = IPPROTO_ICMPV6;
5453 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5454 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5455 key.port[0] = key.port[1] = iih.icmp6_id;
5458 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5460 STATE_LOOKUP(kif, &key, direction, *state, m);
5463 /* translate source/destination address, if necessary */
5464 if ((*state)->key[PF_SK_WIRE] !=
5465 (*state)->key[PF_SK_STACK]) {
5466 struct pf_state_key *nk =
5467 (*state)->key[pd->didx];
5469 if (PF_ANEQ(pd2.src,
5470 &nk->addr[pd2.sidx], pd2.af) ||
5471 nk->port[pd2.sidx] != iih.icmp6_id)
5472 pf_change_icmp(pd2.src, &iih.icmp6_id,
5473 daddr, &nk->addr[pd2.sidx],
5474 nk->port[pd2.sidx], NULL,
5475 pd2.ip_sum, icmpsum,
5476 pd->ip_sum, 0, AF_INET6);
5478 if (PF_ANEQ(pd2.dst,
5479 &nk->addr[pd2.didx], pd2.af) ||
5480 nk->port[pd2.didx] != iih.icmp6_id)
5481 pf_change_icmp(pd2.dst, &iih.icmp6_id,
5482 NULL, /* XXX Inbound NAT? */
5483 &nk->addr[pd2.didx],
5484 nk->port[pd2.didx], NULL,
5485 pd2.ip_sum, icmpsum,
5486 pd->ip_sum, 0, AF_INET6);
5489 m_copyback(m, off, sizeof(struct icmp6_hdr),
5490 (caddr_t)pd->hdr.icmp6);
5491 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
5492 m_copyback(m, off2, sizeof(struct icmp6_hdr),
5495 m_copyback(m, off, sizeof(struct icmp6_hdr),
5497 m_copyback(m, ipoff2, sizeof(h2_6), &h2_6);
5498 m_copyback(m, off2, sizeof(struct icmp6_hdr),
5508 key.proto = pd2.proto;
5509 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5510 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5511 key.port[0] = key.port[1] = 0;
5514 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5516 STATE_LOOKUP(kif, &key, direction, *state, m);
5519 /* translate source/destination address, if necessary */
5520 if ((*state)->key[PF_SK_WIRE] !=
5521 (*state)->key[PF_SK_STACK]) {
5522 struct pf_state_key *nk =
5523 (*state)->key[pd->didx];
5525 if (PF_ANEQ(pd2.src,
5526 &nk->addr[pd2.sidx], pd2.af))
5527 pf_change_icmp(pd2.src, NULL, daddr,
5528 &nk->addr[pd2.sidx], 0, NULL,
5529 pd2.ip_sum, icmpsum,
5530 pd->ip_sum, 0, pd2.af);
5532 if (PF_ANEQ(pd2.dst,
5533 &nk->addr[pd2.didx], pd2.af))
5534 pf_change_icmp(pd2.src, NULL,
5535 NULL, /* XXX Inbound NAT? */
5536 &nk->addr[pd2.didx], 0, NULL,
5537 pd2.ip_sum, icmpsum,
5538 pd->ip_sum, 0, pd2.af);
5544 m_copyback(m, off, ICMP_MINLEN,
5545 (caddr_t)pd->hdr.icmp);
5546 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5548 m_copyback(m, off, ICMP_MINLEN,
5550 m_copyback(m, ipoff2, sizeof(h2), &h2);
5557 sizeof(struct icmp6_hdr),
5562 m_copyback(m, ipoff2, sizeof(h2_6),
5579 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
5580 struct mbuf *m, struct pf_pdesc *pd)
5582 struct pf_state_peer *src, *dst;
5583 struct pf_state_key_cmp key;
5586 key.proto = pd->proto;
5587 if (direction == PF_IN) {
5588 PF_ACPY(&key.addr[0], pd->src, key.af);
5589 PF_ACPY(&key.addr[1], pd->dst, key.af);
5590 key.port[0] = key.port[1] = 0;
5592 PF_ACPY(&key.addr[1], pd->src, key.af);
5593 PF_ACPY(&key.addr[0], pd->dst, key.af);
5594 key.port[1] = key.port[0] = 0;
5598 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5600 STATE_LOOKUP(kif, &key, direction, *state, m);
5603 if (direction == (*state)->direction) {
5604 src = &(*state)->src;
5605 dst = &(*state)->dst;
5607 src = &(*state)->dst;
5608 dst = &(*state)->src;
5612 if (src->state < PFOTHERS_SINGLE)
5613 src->state = PFOTHERS_SINGLE;
5614 if (dst->state == PFOTHERS_SINGLE)
5615 dst->state = PFOTHERS_MULTIPLE;
5617 /* update expire time */
5618 (*state)->expire = time_second;
5619 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5620 (*state)->timeout = PFTM_OTHER_MULTIPLE;
5622 (*state)->timeout = PFTM_OTHER_SINGLE;
5624 /* translate source/destination address, if necessary */
5625 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5626 struct pf_state_key *nk = (*state)->key[pd->didx];
5629 KASSERT(nk, ("%s: nk is null", __FUNCTION__));
5630 KASSERT(pd, ("%s: pd is null", __FUNCTION__));
5631 KASSERT(pd->src, ("%s: pd->src is null", __FUNCTION__));
5632 KASSERT(pd->dst, ("%s: pd->dst is null", __FUNCTION__));
5642 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5643 pf_change_a(&pd->src->v4.s_addr,
5645 nk->addr[pd->sidx].v4.s_addr,
5649 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5650 pf_change_a(&pd->dst->v4.s_addr,
5652 nk->addr[pd->didx].v4.s_addr,
5659 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5660 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5662 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5663 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5671 * ipoff and off are measured from the start of the mbuf chain.
5672 * h must be at "ipoff" on the mbuf chain.
5675 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5676 u_short *actionp, u_short *reasonp, sa_family_t af)
5681 struct ip *h = mtod(m, struct ip *);
5682 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
5686 ACTION_SET(actionp, PF_PASS);
5688 ACTION_SET(actionp, PF_DROP);
5689 REASON_SET(reasonp, PFRES_FRAG);
5693 if (m->m_pkthdr.len < off + len ||
5694 ntohs(h->ip_len) < off + len) {
5695 ACTION_SET(actionp, PF_DROP);
5696 REASON_SET(reasonp, PFRES_SHORT);
5704 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
5706 if (m->m_pkthdr.len < off + len ||
5707 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5708 (unsigned)(off + len)) {
5709 ACTION_SET(actionp, PF_DROP);
5710 REASON_SET(reasonp, PFRES_SHORT);
5717 m_copydata(m, off, len, p);
5722 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5727 struct radix_node_head *rnh;
5730 struct sockaddr_in *dst;
5734 extern int ipmultipath;
5738 extern int ip6_multipath;
5740 struct sockaddr_in6 *dst6;
5741 struct route_in6 ro;
5745 struct radix_node *rn;
5752 /* XXX: stick to table 0 for now */
5753 rnh = rt_tables_get_rnh(0, af);
5754 if (rnh != NULL && rn_mpath_capable(rnh))
5758 bzero(&ro, sizeof(ro));
5761 dst = satosin(&ro.ro_dst);
5762 dst->sin_family = AF_INET;
5763 dst->sin_len = sizeof(*dst);
5764 dst->sin_addr = addr->v4;
5773 * Skip check for addresses with embedded interface scope,
5774 * as they would always match anyway.
5776 if (IN6_IS_SCOPE_EMBED(&addr->v6))
5778 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5779 dst6->sin6_family = AF_INET6;
5780 dst6->sin6_len = sizeof(*dst6);
5781 dst6->sin6_addr = addr->v6;
5792 /* Skip checks for ipsec interfaces */
5793 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5800 in6_rtalloc_ign(&ro, 0, rtableid);
5805 in_rtalloc_ign((struct route *)&ro, 0, rtableid);
5809 rtalloc_ign((struct route *)&ro, 0); /* No/default FIB. */
5812 #else /* ! __FreeBSD__ */
5813 rtalloc_noclone((struct route *)&ro, NO_CLONING);
5816 if (ro.ro_rt != NULL) {
5817 /* No interface given, this is a no-route check */
5821 if (kif->pfik_ifp == NULL) {
5826 /* Perform uRPF check if passed input interface */
5828 rn = (struct radix_node *)ro.ro_rt;
5830 rt = (struct rtentry *)rn;
5831 #ifndef __FreeBSD__ /* CARPDEV */
5832 if (rt->rt_ifp->if_type == IFT_CARP)
5833 ifp = rt->rt_ifp->if_carpdev;
5838 if (kif->pfik_ifp == ifp)
5842 rn = rn_mpath_next(rn);
5845 rn = rn_mpath_next(rn, 0);
5847 } while (check_mpath == 1 && rn != NULL && ret == 0);
5851 if (ro.ro_rt != NULL)
5857 pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw,
5860 struct sockaddr_in *dst;
5862 struct sockaddr_in6 *dst6;
5863 struct route_in6 ro;
5869 bzero(&ro, sizeof(ro));
5872 dst = satosin(&ro.ro_dst);
5873 dst->sin_family = AF_INET;
5874 dst->sin_len = sizeof(*dst);
5875 dst->sin_addr = addr->v4;
5879 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5880 dst6->sin6_family = AF_INET6;
5881 dst6->sin6_len = sizeof(*dst6);
5882 dst6->sin6_addr = addr->v6;
5893 in6_rtalloc_ign(&ro, 0, rtableid);
5898 in_rtalloc_ign((struct route *)&ro, 0, rtableid);
5902 rtalloc_ign((struct route *)&ro, 0);
5905 #else /* ! __FreeBSD__ */
5906 rtalloc_noclone((struct route *)&ro, NO_CLONING);
5909 if (ro.ro_rt != NULL) {
5911 /* XXX_IMPORT: later */
5913 if (ro.ro_rt->rt_labelid == aw->v.rtlabel)
5924 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5925 struct pf_state *s, struct pf_pdesc *pd)
5927 struct mbuf *m0, *m1;
5928 struct route iproute;
5929 struct route *ro = NULL;
5930 struct sockaddr_in *dst;
5932 struct ifnet *ifp = NULL;
5933 struct pf_addr naddr;
5934 struct pf_src_node *sn = NULL;
5943 if (m == NULL || *m == NULL || r == NULL ||
5944 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
5945 panic("pf_route: invalid parameters");
5948 if (pd->pf_mtag->routed++ > 3) {
5950 if ((*m)->m_pkthdr.pf.routed++ > 3) {
5957 if (r->rt == PF_DUPTO) {
5959 if ((m0 = m_dup(*m, M_DONTWAIT)) == NULL)
5961 if ((m0 = m_copym2(*m, 0, M_COPYALL, M_NOWAIT)) == NULL)
5965 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
5970 if (m0->m_len < sizeof(struct ip)) {
5971 DPFPRINTF(PF_DEBUG_URGENT,
5972 ("pf_route: m0->m_len < sizeof(struct ip)\n"));
5976 ip = mtod(m0, struct ip *);
5979 bzero((caddr_t)ro, sizeof(*ro));
5980 dst = satosin(&ro->ro_dst);
5981 dst->sin_family = AF_INET;
5982 dst->sin_len = sizeof(*dst);
5983 dst->sin_addr = ip->ip_dst;
5985 if (r->rt == PF_FASTROUTE) {
5987 in_rtalloc_ign(ro, 0, M_GETFIB(m0));
5991 if (ro->ro_rt == 0) {
5993 KMOD_IPSTAT_INC(ips_noroute);
5995 ipstat.ips_noroute++;
6000 ifp = ro->ro_rt->rt_ifp;
6001 ro->ro_rt->rt_use++;
6003 if (ro->ro_rt->rt_flags & RTF_GATEWAY)
6004 dst = satosin(ro->ro_rt->rt_gateway);
6006 if (TAILQ_EMPTY(&r->rpool.list)) {
6007 DPFPRINTF(PF_DEBUG_URGENT,
6008 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n"));
6012 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
6014 if (!PF_AZERO(&naddr, AF_INET))
6015 dst->sin_addr.s_addr = naddr.v4.s_addr;
6016 ifp = r->rpool.cur->kif ?
6017 r->rpool.cur->kif->pfik_ifp : NULL;
6019 if (!PF_AZERO(&s->rt_addr, AF_INET))
6020 dst->sin_addr.s_addr =
6021 s->rt_addr.v4.s_addr;
6022 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
6031 if (pf_test(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) {
6034 } else if (m0 == NULL) {
6040 if (pf_test(PF_OUT, ifp, &m0, NULL) != PF_PASS)
6042 else if (m0 == NULL)
6045 if (m0->m_len < sizeof(struct ip)) {
6046 DPFPRINTF(PF_DEBUG_URGENT,
6047 ("pf_route: m0->m_len < sizeof(struct ip)\n"));
6050 ip = mtod(m0, struct ip *);
6054 /* Copied from FreeBSD 5.1-CURRENT ip_output. */
6055 m0->m_pkthdr.csum_flags |= CSUM_IP;
6056 sw_csum = m0->m_pkthdr.csum_flags & ~ifp->if_hwassist;
6057 if (sw_csum & CSUM_DELAY_DATA) {
6059 * XXX: in_delayed_cksum assumes HBO for ip->ip_len (at least)
6062 NTOHS(ip->ip_off); /* XXX: needed? */
6063 in_delayed_cksum(m0);
6066 sw_csum &= ~CSUM_DELAY_DATA;
6068 m0->m_pkthdr.csum_flags &= ifp->if_hwassist;
6070 if (ntohs(ip->ip_len) <= ifp->if_mtu ||
6071 (ifp->if_hwassist & CSUM_FRAGMENT &&
6072 ((ip->ip_off & htons(IP_DF)) == 0))) {
6074 * ip->ip_len = htons(ip->ip_len);
6075 * ip->ip_off = htons(ip->ip_off);
6078 if (sw_csum & CSUM_DELAY_IP) {
6080 if (ip->ip_v == IPVERSION &&
6081 (ip->ip_hl << 2) == sizeof(*ip)) {
6082 ip->ip_sum = in_cksum_hdr(ip);
6084 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
6088 error = (*ifp->if_output)(ifp, m0, sintosa(dst), ro);
6093 /* Copied from ip_output. */
6096 * If deferred crypto processing is needed, check that the
6097 * interface supports it.
6099 if ((mtag = m_tag_find(m0, PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED, NULL))
6100 != NULL && (ifp->if_capabilities & IFCAP_IPSEC) == 0) {
6101 /* Notify IPsec to do its own crypto. */
6102 ipsp_skipcrypto_unmark((struct tdb_ident *)(mtag + 1));
6107 /* Catch routing changes wrt. hardware checksumming for TCP or UDP. */
6108 if (m0->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT) {
6109 if (!(ifp->if_capabilities & IFCAP_CSUM_TCPv4) ||
6110 ifp->if_bridge != NULL) {
6111 in_delayed_cksum(m0);
6112 m0->m_pkthdr.csum_flags &= ~M_TCPV4_CSUM_OUT; /* Clr */
6114 } else if (m0->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) {
6115 if (!(ifp->if_capabilities & IFCAP_CSUM_UDPv4) ||
6116 ifp->if_bridge != NULL) {
6117 in_delayed_cksum(m0);
6118 m0->m_pkthdr.csum_flags &= ~M_UDPV4_CSUM_OUT; /* Clr */
6122 if (ntohs(ip->ip_len) <= ifp->if_mtu) {
6124 if ((ifp->if_capabilities & IFCAP_CSUM_IPv4) &&
6125 ifp->if_bridge == NULL) {
6126 m0->m_pkthdr.csum_flags |= M_IPV4_CSUM_OUT;
6128 KMOD_IPSTAT_INC(ips_outhwcsum);
6130 ipstat.ips_outhwcsum++;
6133 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
6134 /* Update relevant hardware checksum stats for TCP/UDP */
6135 if (m0->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
6136 KMOD_TCPSTAT_INC(tcps_outhwcsum);
6137 else if (m0->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
6138 KMOD_UDPSTAT_INC(udps_outhwcsum);
6139 error = (*ifp->if_output)(ifp, m0, sintosa(dst), NULL);
6145 * Too large for interface; fragment if possible.
6146 * Must be able to put at least 8 bytes per fragment.
6148 if (ip->ip_off & htons(IP_DF)) {
6150 KMOD_IPSTAT_INC(ips_cantfrag);
6152 ipstat.ips_cantfrag++;
6154 if (r->rt != PF_DUPTO) {
6156 /* icmp_error() expects host byte ordering */
6160 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
6164 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
6175 * XXX: is cheaper + less error prone than own function
6179 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist, sw_csum);
6181 error = ip_fragment(m0, ifp, ifp->if_mtu);
6184 #ifndef __FreeBSD__ /* ip_fragment does not do m_freem() on FreeBSD */
6190 for (m0 = m1; m0; m0 = m1) {
6196 error = (*ifp->if_output)(ifp, m0, sintosa(dst),
6202 error = (*ifp->if_output)(ifp, m0, sintosa(dst),
6211 KMOD_IPSTAT_INC(ips_fragmented);
6213 ipstat.ips_fragmented++;
6217 if (r->rt != PF_DUPTO)
6219 if (ro == &iproute && ro->ro_rt)
6231 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
6232 struct pf_state *s, struct pf_pdesc *pd)
6235 struct route_in6 ip6route;
6236 struct route_in6 *ro;
6237 struct sockaddr_in6 *dst;
6238 struct ip6_hdr *ip6;
6239 struct ifnet *ifp = NULL;
6240 struct pf_addr naddr;
6241 struct pf_src_node *sn = NULL;
6243 if (m == NULL || *m == NULL || r == NULL ||
6244 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
6245 panic("pf_route6: invalid parameters");
6248 if (pd->pf_mtag->routed++ > 3) {
6250 if ((*m)->m_pkthdr.pf.routed++ > 3) {
6257 if (r->rt == PF_DUPTO) {
6259 if ((m0 = m_dup(*m, M_DONTWAIT)) == NULL)
6261 if ((m0 = m_copym2(*m, 0, M_COPYALL, M_NOWAIT)) == NULL)
6265 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
6270 if (m0->m_len < sizeof(struct ip6_hdr)) {
6271 DPFPRINTF(PF_DEBUG_URGENT,
6272 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n"));
6275 ip6 = mtod(m0, struct ip6_hdr *);
6278 bzero((caddr_t)ro, sizeof(*ro));
6279 dst = (struct sockaddr_in6 *)&ro->ro_dst;
6280 dst->sin6_family = AF_INET6;
6281 dst->sin6_len = sizeof(*dst);
6282 dst->sin6_addr = ip6->ip6_dst;
6284 /* Cheat. XXX why only in the v6 case??? */
6285 if (r->rt == PF_FASTROUTE) {
6287 m0->m_flags |= M_SKIP_FIREWALL;
6289 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
6291 m0->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
6292 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
6297 if (TAILQ_EMPTY(&r->rpool.list)) {
6298 DPFPRINTF(PF_DEBUG_URGENT,
6299 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n"));
6303 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
6305 if (!PF_AZERO(&naddr, AF_INET6))
6306 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
6308 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
6310 if (!PF_AZERO(&s->rt_addr, AF_INET6))
6311 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
6312 &s->rt_addr, AF_INET6);
6313 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
6321 if (pf_test6(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) {
6324 } else if (m0 == NULL) {
6330 if (pf_test6(PF_OUT, ifp, &m0, NULL) != PF_PASS)
6332 else if (m0 == NULL)
6335 if (m0->m_len < sizeof(struct ip6_hdr)) {
6336 DPFPRINTF(PF_DEBUG_URGENT,
6337 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n"));
6340 ip6 = mtod(m0, struct ip6_hdr *);
6343 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 &
6344 ~ifp->if_hwassist) {
6345 uint32_t plen = m0->m_pkthdr.len - sizeof(*ip6);
6346 in6_delayed_cksum(m0, plen, sizeof(struct ip6_hdr));
6347 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6;
6351 * If the packet is too large for the outgoing interface,
6352 * send back an icmp6 error.
6354 if (IN6_IS_SCOPE_EMBED(&dst->sin6_addr))
6355 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
6356 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) {
6360 nd6_output(ifp, ifp, m0, dst, NULL);
6365 in6_ifstat_inc(ifp, ifs6_in_toobig);
6367 if (r->rt != PF_DUPTO) {
6369 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
6373 if (r->rt != PF_DUPTO)
6374 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
6381 if (r->rt != PF_DUPTO)
6393 * FreeBSD supports cksum offloads for the following drivers.
6394 * em(4), fxp(4), ixgb(4), lge(4), ndis(4), nge(4), re(4),
6395 * ti(4), txp(4), xl(4)
6397 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
6398 * network driver performed cksum including pseudo header, need to verify
6401 * network driver performed cksum, needs to additional pseudo header
6402 * cksum computation with partial csum_data(i.e. lack of H/W support for
6403 * pseudo header, for instance hme(4), sk(4) and possibly gem(4))
6405 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
6406 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
6408 * Also, set csum_data to 0xffff to force cksum validation.
6411 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
6417 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
6419 if (m->m_pkthdr.len < off + len)
6424 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
6425 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
6426 sum = m->m_pkthdr.csum_data;
6428 ip = mtod(m, struct ip *);
6429 sum = in_pseudo(ip->ip_src.s_addr,
6430 ip->ip_dst.s_addr, htonl((u_short)len +
6431 m->m_pkthdr.csum_data + IPPROTO_TCP));
6438 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
6439 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
6440 sum = m->m_pkthdr.csum_data;
6442 ip = mtod(m, struct ip *);
6443 sum = in_pseudo(ip->ip_src.s_addr,
6444 ip->ip_dst.s_addr, htonl((u_short)len +
6445 m->m_pkthdr.csum_data + IPPROTO_UDP));
6453 case IPPROTO_ICMPV6:
6463 if (p == IPPROTO_ICMP) {
6468 sum = in_cksum(m, len);
6472 if (m->m_len < sizeof(struct ip))
6474 sum = in4_cksum(m, p, off, len);
6479 if (m->m_len < sizeof(struct ip6_hdr))
6481 sum = in6_cksum(m, p, off, len);
6492 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
6497 KMOD_UDPSTAT_INC(udps_badsum);
6503 KMOD_ICMPSTAT_INC(icps_checksum);
6508 case IPPROTO_ICMPV6:
6510 KMOD_ICMP6STAT_INC(icp6s_checksum);
6517 if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
6518 m->m_pkthdr.csum_flags |=
6519 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
6520 m->m_pkthdr.csum_data = 0xffff;
6525 #else /* !__FreeBSD__ */
6528 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
6529 * off is the offset where the protocol header starts
6530 * len is the total length of protocol header plus payload
6531 * returns 0 when the checksum is valid, otherwise returns 1.
6534 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p,
6537 u_int16_t flag_ok, flag_bad;
6542 flag_ok = M_TCP_CSUM_IN_OK;
6543 flag_bad = M_TCP_CSUM_IN_BAD;
6546 flag_ok = M_UDP_CSUM_IN_OK;
6547 flag_bad = M_UDP_CSUM_IN_BAD;
6551 case IPPROTO_ICMPV6:
6553 flag_ok = flag_bad = 0;
6558 if (m->m_pkthdr.csum_flags & flag_ok)
6560 if (m->m_pkthdr.csum_flags & flag_bad)
6562 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
6564 if (m->m_pkthdr.len < off + len)
6569 if (p == IPPROTO_ICMP) {
6574 sum = in_cksum(m, len);
6578 if (m->m_len < sizeof(struct ip))
6580 sum = in4_cksum(m, p, off, len);
6586 if (m->m_len < sizeof(struct ip6_hdr))
6588 sum = in6_cksum(m, p, off, len);
6595 m->m_pkthdr.csum_flags |= flag_bad;
6598 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
6601 KMOD_UDPSTAT_INC(udps_badsum);
6605 KMOD_ICMPSTAT_INC(icps_checksum);
6609 case IPPROTO_ICMPV6:
6610 KMOD_ICMP6STAT_INC(icp6s_checksum);
6616 m->m_pkthdr.csum_flags |= flag_ok;
6623 pf_find_divert(struct mbuf *m)
6627 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL)
6630 return ((struct pf_divert *)(mtag + 1));
6634 pf_get_divert(struct mbuf *m)
6638 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) {
6639 mtag = m_tag_get(PACKET_TAG_PF_DIVERT, sizeof(struct pf_divert),
6643 bzero(mtag + 1, sizeof(struct pf_divert));
6644 m_tag_prepend(m, mtag);
6647 return ((struct pf_divert *)(mtag + 1));
6654 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
6655 struct ether_header *eh, struct inpcb *inp)
6657 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
6658 struct ether_header *eh)
6661 struct pfi_kif *kif;
6662 u_short action, reason = 0, log = 0;
6663 struct mbuf *m = *m0;
6665 struct ip *h = NULL;
6666 struct m_tag *ipfwtag;
6667 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6670 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
6672 struct pf_state *s = NULL;
6673 struct pf_ruleset *ruleset = NULL;
6675 int off, dirndx, pqid = 0;
6679 if (!V_pf_status.running)
6685 if (!pf_status.running)
6689 memset(&pd, 0, sizeof(pd));
6691 if ((pd.pf_mtag = pf_get_mtag(m)) == NULL) {
6693 DPFPRINTF(PF_DEBUG_URGENT,
6694 ("pf_test: pf_get_mtag returned NULL\n"));
6699 if (ifp->if_type == IFT_CARP && ifp->if_carpdev)
6700 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif;
6703 kif = (struct pfi_kif *)ifp->if_pf_kif;
6709 DPFPRINTF(PF_DEBUG_URGENT,
6710 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
6713 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6727 if ((m->m_flags & M_PKTHDR) == 0)
6728 panic("non-M_PKTHDR is passed to pf_test");
6729 #endif /* DIAGNOSTIC */
6732 if (m->m_pkthdr.len < (int)sizeof(*h)) {
6734 REASON_SET(&reason, PFRES_SHORT);
6740 if (m->m_flags & M_SKIP_FIREWALL) {
6745 if (m->m_pkthdr.pf.flags & PF_TAG_GENERATED)
6750 if (ip_divert_ptr != NULL &&
6751 ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
6752 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
6753 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
6754 pd.pf_mtag->flags |= PF_PACKET_LOOPED;
6755 m_tag_delete(m, ipfwtag);
6757 if (pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
6758 m->m_flags |= M_FASTFWD_OURS;
6759 pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
6763 /* We do IP header normalization and packet reassembly here */
6764 if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
6768 m = *m0; /* pf_normalize messes with m0 */
6769 h = mtod(m, struct ip *);
6771 off = h->ip_hl << 2;
6772 if (off < (int)sizeof(*h)) {
6774 REASON_SET(&reason, PFRES_SHORT);
6779 pd.src = (struct pf_addr *)&h->ip_src;
6780 pd.dst = (struct pf_addr *)&h->ip_dst;
6781 pd.sport = pd.dport = NULL;
6782 pd.ip_sum = &h->ip_sum;
6783 pd.proto_sum = NULL;
6786 pd.sidx = (dir == PF_IN) ? 0 : 1;
6787 pd.didx = (dir == PF_IN) ? 1 : 0;
6790 pd.tot_len = ntohs(h->ip_len);
6793 /* handle fragments that didn't get reassembled by normalization */
6794 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
6795 action = pf_test_fragment(&r, dir, kif, m, h,
6806 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6807 &action, &reason, AF_INET)) {
6808 log = action != PF_PASS;
6811 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6812 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
6814 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6815 if (action == PF_DROP)
6817 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6819 if (action == PF_PASS) {
6822 if (pfsync_update_state_ptr != NULL)
6823 pfsync_update_state_ptr(s);
6825 pfsync_update_state(s);
6827 #endif /* NPFSYNC */
6831 } else if (s == NULL)
6833 action = pf_test_rule(&r, &s, dir, kif,
6834 m, off, h, &pd, &a, &ruleset, NULL, inp);
6836 action = pf_test_rule(&r, &s, dir, kif,
6837 m, off, h, &pd, &a, &ruleset, &ipintrq);
6846 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6847 &action, &reason, AF_INET)) {
6848 log = action != PF_PASS;
6851 if (uh.uh_dport == 0 ||
6852 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6853 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6855 REASON_SET(&reason, PFRES_SHORT);
6858 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6859 if (action == PF_PASS) {
6862 if (pfsync_update_state_ptr != NULL)
6863 pfsync_update_state_ptr(s);
6865 pfsync_update_state(s);
6867 #endif /* NPFSYNC */
6871 } else if (s == NULL)
6873 action = pf_test_rule(&r, &s, dir, kif,
6874 m, off, h, &pd, &a, &ruleset, NULL, inp);
6876 action = pf_test_rule(&r, &s, dir, kif,
6877 m, off, h, &pd, &a, &ruleset, &ipintrq);
6882 case IPPROTO_ICMP: {
6886 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
6887 &action, &reason, AF_INET)) {
6888 log = action != PF_PASS;
6891 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
6893 if (action == PF_PASS) {
6896 if (pfsync_update_state_ptr != NULL)
6897 pfsync_update_state_ptr(s);
6899 pfsync_update_state(s);
6901 #endif /* NPFSYNC */
6905 } else if (s == NULL)
6907 action = pf_test_rule(&r, &s, dir, kif,
6908 m, off, h, &pd, &a, &ruleset, NULL, inp);
6910 action = pf_test_rule(&r, &s, dir, kif,
6911 m, off, h, &pd, &a, &ruleset, &ipintrq);
6917 case IPPROTO_ICMPV6: {
6919 DPFPRINTF(PF_DEBUG_MISC,
6920 ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
6926 action = pf_test_state_other(&s, dir, kif, m, &pd);
6927 if (action == PF_PASS) {
6930 if (pfsync_update_state_ptr != NULL)
6931 pfsync_update_state_ptr(s);
6933 pfsync_update_state(s);
6935 #endif /* NPFSYNC */
6939 } else if (s == NULL)
6941 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
6942 &pd, &a, &ruleset, NULL, inp);
6944 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
6945 &pd, &a, &ruleset, &ipintrq);
6951 if (action == PF_PASS && h->ip_hl > 5 &&
6952 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6954 REASON_SET(&reason, PFRES_IPOPTIONS);
6956 DPFPRINTF(PF_DEBUG_MISC,
6957 ("pf: dropping packet with ip options\n"));
6960 if ((s && s->tag) || r->rtableid >= 0)
6962 pf_tag_packet(m, s ? s->tag : 0, r->rtableid, pd.pf_mtag);
6964 pf_tag_packet(m, s ? s->tag : 0, r->rtableid);
6967 if (dir == PF_IN && s && s->key[PF_SK_STACK])
6969 pd.pf_mtag->statekey = s->key[PF_SK_STACK];
6971 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK];
6975 if (action == PF_PASS && r->qid) {
6977 if (pqid || (pd.tos & IPTOS_LOWDELAY))
6978 pd.pf_mtag->qid = r->pqid;
6980 pd.pf_mtag->qid = r->qid;
6981 /* add hints for ecn */
6982 pd.pf_mtag->hdr = h;
6985 if (pqid || (pd.tos & IPTOS_LOWDELAY))
6986 m->m_pkthdr.pf.qid = r->pqid;
6988 m->m_pkthdr.pf.qid = r->qid;
6989 /* add hints for ecn */
6990 m->m_pkthdr.pf.hdr = h;
6996 * connections redirected to loopback should not match sockets
6997 * bound specifically to loopback due to security implications,
6998 * see tcp_input() and in_pcblookup_listen().
7000 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
7001 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
7002 (s->nat_rule.ptr->action == PF_RDR ||
7003 s->nat_rule.ptr->action == PF_BINAT) &&
7004 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
7006 m->m_flags |= M_SKIP_FIREWALL;
7008 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST;
7012 if (action == PF_PASS && r->divert.port &&
7013 ip_divert_ptr != NULL && !PACKET_LOOPED()) {
7015 ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
7016 sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
7017 if (ipfwtag != NULL) {
7018 ((struct ipfw_rule_ref *)(ipfwtag+1))->info =
7019 ntohs(r->divert.port);
7020 ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
7022 m_tag_prepend(m, ipfwtag);
7026 if (m->m_flags & M_FASTFWD_OURS) {
7027 pd.pf_mtag->flags |= PF_FASTFWD_OURS_PRESENT;
7028 m->m_flags &= ~M_FASTFWD_OURS;
7032 dir == PF_IN ? DIR_IN : DIR_OUT);
7036 /* XXX: ipfw has the same behaviour! */
7038 REASON_SET(&reason, PFRES_MEMORY);
7040 DPFPRINTF(PF_DEBUG_MISC,
7041 ("pf: failed to allocate divert tag\n"));
7045 if (dir == PF_IN && action == PF_PASS && r->divert.port) {
7046 struct pf_divert *divert;
7048 if ((divert = pf_get_divert(m))) {
7049 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED;
7050 divert->port = r->divert.port;
7051 divert->addr.ipv4 = r->divert.addr.v4;
7059 if (s != NULL && s->nat_rule.ptr != NULL &&
7060 s->nat_rule.ptr->log & PF_LOG_ALL)
7061 lr = s->nat_rule.ptr;
7064 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset,
7068 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
7069 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
7071 if (action == PF_PASS || r->action == PF_DROP) {
7072 dirndx = (dir == PF_OUT);
7073 r->packets[dirndx]++;
7074 r->bytes[dirndx] += pd.tot_len;
7076 a->packets[dirndx]++;
7077 a->bytes[dirndx] += pd.tot_len;
7080 if (s->nat_rule.ptr != NULL) {
7081 s->nat_rule.ptr->packets[dirndx]++;
7082 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
7084 if (s->src_node != NULL) {
7085 s->src_node->packets[dirndx]++;
7086 s->src_node->bytes[dirndx] += pd.tot_len;
7088 if (s->nat_src_node != NULL) {
7089 s->nat_src_node->packets[dirndx]++;
7090 s->nat_src_node->bytes[dirndx] += pd.tot_len;
7092 dirndx = (dir == s->direction) ? 0 : 1;
7093 s->packets[dirndx]++;
7094 s->bytes[dirndx] += pd.tot_len;
7097 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
7099 if (nr != NULL && r == &V_pf_default_rule)
7101 if (nr != NULL && r == &pf_default_rule)
7104 if (tr->src.addr.type == PF_ADDR_TABLE)
7105 pfr_update_stats(tr->src.addr.p.tbl,
7106 (s == NULL) ? pd.src :
7107 &s->key[(s->direction == PF_IN)]->
7108 addr[(s->direction == PF_OUT)],
7109 pd.af, pd.tot_len, dir == PF_OUT,
7110 r->action == PF_PASS, tr->src.neg);
7111 if (tr->dst.addr.type == PF_ADDR_TABLE)
7112 pfr_update_stats(tr->dst.addr.p.tbl,
7113 (s == NULL) ? pd.dst :
7114 &s->key[(s->direction == PF_IN)]->
7115 addr[(s->direction == PF_IN)],
7116 pd.af, pd.tot_len, dir == PF_OUT,
7117 r->action == PF_PASS, tr->dst.neg);
7121 case PF_SYNPROXY_DROP:
7128 /* pf_route can free the mbuf causing *m0 to become NULL */
7130 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
7143 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0,
7144 struct ether_header *eh, struct inpcb *inp)
7146 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0,
7147 struct ether_header *eh)
7150 struct pfi_kif *kif;
7151 u_short action, reason = 0, log = 0;
7152 struct mbuf *m = *m0, *n = NULL;
7154 struct ip6_hdr *h = NULL;
7155 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
7158 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
7160 struct pf_state *s = NULL;
7161 struct pf_ruleset *ruleset = NULL;
7163 int off, terminal = 0, dirndx, rh_cnt = 0;
7167 if (!V_pf_status.running) {
7172 if (!pf_status.running)
7176 memset(&pd, 0, sizeof(pd));
7178 if ((pd.pf_mtag = pf_get_mtag(m)) == NULL) {
7180 DPFPRINTF(PF_DEBUG_URGENT,
7181 ("pf_test: pf_get_mtag returned NULL\n"));
7186 if (ifp->if_type == IFT_CARP && ifp->if_carpdev)
7187 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif;
7190 kif = (struct pfi_kif *)ifp->if_pf_kif;
7196 DPFPRINTF(PF_DEBUG_URGENT,
7197 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
7200 if (kif->pfik_flags & PFI_IFLAG_SKIP)
7214 if ((m->m_flags & M_PKTHDR) == 0)
7215 panic("non-M_PKTHDR is passed to pf_test6");
7216 #endif /* DIAGNOSTIC */
7219 if (m->m_pkthdr.len < (int)sizeof(*h)) {
7221 REASON_SET(&reason, PFRES_SHORT);
7227 if (pd.pf_mtag->flags & PF_TAG_GENERATED) {
7230 if (m->m_pkthdr.pf.flags & PF_TAG_GENERATED)
7237 /* We do IP header normalization and packet reassembly here */
7238 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
7242 m = *m0; /* pf_normalize messes with m0 */
7243 h = mtod(m, struct ip6_hdr *);
7247 * we do not support jumbogram yet. if we keep going, zero ip6_plen
7248 * will do something bad, so drop the packet for now.
7250 if (htons(h->ip6_plen) == 0) {
7252 REASON_SET(&reason, PFRES_NORM); /*XXX*/
7257 pd.src = (struct pf_addr *)&h->ip6_src;
7258 pd.dst = (struct pf_addr *)&h->ip6_dst;
7259 pd.sport = pd.dport = NULL;
7261 pd.proto_sum = NULL;
7263 pd.sidx = (dir == PF_IN) ? 0 : 1;
7264 pd.didx = (dir == PF_IN) ? 1 : 0;
7267 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
7270 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
7271 pd.proto = h->ip6_nxt;
7274 case IPPROTO_FRAGMENT:
7275 action = pf_test_fragment(&r, dir, kif, m, h,
7277 if (action == PF_DROP)
7278 REASON_SET(&reason, PFRES_FRAG);
7280 case IPPROTO_ROUTING: {
7281 struct ip6_rthdr rthdr;
7284 DPFPRINTF(PF_DEBUG_MISC,
7285 ("pf: IPv6 more than one rthdr\n"));
7287 REASON_SET(&reason, PFRES_IPOPTIONS);
7291 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
7293 DPFPRINTF(PF_DEBUG_MISC,
7294 ("pf: IPv6 short rthdr\n"));
7296 REASON_SET(&reason, PFRES_SHORT);
7300 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
7301 DPFPRINTF(PF_DEBUG_MISC,
7302 ("pf: IPv6 rthdr0\n"));
7304 REASON_SET(&reason, PFRES_IPOPTIONS);
7311 case IPPROTO_HOPOPTS:
7312 case IPPROTO_DSTOPTS: {
7313 /* get next header and header length */
7314 struct ip6_ext opt6;
7316 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
7317 NULL, &reason, pd.af)) {
7318 DPFPRINTF(PF_DEBUG_MISC,
7319 ("pf: IPv6 short opt\n"));
7324 if (pd.proto == IPPROTO_AH)
7325 off += (opt6.ip6e_len + 2) * 4;
7327 off += (opt6.ip6e_len + 1) * 8;
7328 pd.proto = opt6.ip6e_nxt;
7329 /* goto the next header */
7336 } while (!terminal);
7338 /* if there's no routing header, use unmodified mbuf for checksumming */
7348 if (!pf_pull_hdr(m, off, &th, sizeof(th),
7349 &action, &reason, AF_INET6)) {
7350 log = action != PF_PASS;
7353 pd.p_len = pd.tot_len - off - (th.th_off << 2);
7354 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
7355 if (action == PF_DROP)
7357 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
7359 if (action == PF_PASS) {
7362 if (pfsync_update_state_ptr != NULL)
7363 pfsync_update_state_ptr(s);
7365 pfsync_update_state(s);
7367 #endif /* NPFSYNC */
7371 } else if (s == NULL)
7373 action = pf_test_rule(&r, &s, dir, kif,
7374 m, off, h, &pd, &a, &ruleset, NULL, inp);
7376 action = pf_test_rule(&r, &s, dir, kif,
7377 m, off, h, &pd, &a, &ruleset, &ip6intrq);
7386 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
7387 &action, &reason, AF_INET6)) {
7388 log = action != PF_PASS;
7391 if (uh.uh_dport == 0 ||
7392 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
7393 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
7395 REASON_SET(&reason, PFRES_SHORT);
7398 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
7399 if (action == PF_PASS) {
7402 if (pfsync_update_state_ptr != NULL)
7403 pfsync_update_state_ptr(s);
7405 pfsync_update_state(s);
7407 #endif /* NPFSYNC */
7411 } else if (s == NULL)
7413 action = pf_test_rule(&r, &s, dir, kif,
7414 m, off, h, &pd, &a, &ruleset, NULL, inp);
7416 action = pf_test_rule(&r, &s, dir, kif,
7417 m, off, h, &pd, &a, &ruleset, &ip6intrq);
7422 case IPPROTO_ICMP: {
7424 DPFPRINTF(PF_DEBUG_MISC,
7425 ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
7429 case IPPROTO_ICMPV6: {
7430 struct icmp6_hdr ih;
7433 if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
7434 &action, &reason, AF_INET6)) {
7435 log = action != PF_PASS;
7438 action = pf_test_state_icmp(&s, dir, kif,
7439 m, off, h, &pd, &reason);
7440 if (action == PF_PASS) {
7443 if (pfsync_update_state_ptr != NULL)
7444 pfsync_update_state_ptr(s);
7446 pfsync_update_state(s);
7448 #endif /* NPFSYNC */
7452 } else if (s == NULL)
7454 action = pf_test_rule(&r, &s, dir, kif,
7455 m, off, h, &pd, &a, &ruleset, NULL, inp);
7457 action = pf_test_rule(&r, &s, dir, kif,
7458 m, off, h, &pd, &a, &ruleset, &ip6intrq);
7464 action = pf_test_state_other(&s, dir, kif, m, &pd);
7465 if (action == PF_PASS) {
7468 if (pfsync_update_state_ptr != NULL)
7469 pfsync_update_state_ptr(s);
7471 pfsync_update_state(s);
7473 #endif /* NPFSYNC */
7477 } else if (s == NULL)
7479 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
7480 &pd, &a, &ruleset, NULL, inp);
7482 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
7483 &pd, &a, &ruleset, &ip6intrq);
7494 /* handle dangerous IPv6 extension headers. */
7495 if (action == PF_PASS && rh_cnt &&
7496 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
7498 REASON_SET(&reason, PFRES_IPOPTIONS);
7500 DPFPRINTF(PF_DEBUG_MISC,
7501 ("pf: dropping packet with dangerous v6 headers\n"));
7504 if ((s && s->tag) || r->rtableid >= 0)
7506 pf_tag_packet(m, s ? s->tag : 0, r->rtableid, pd.pf_mtag);
7508 pf_tag_packet(m, s ? s->tag : 0, r->rtableid);
7511 if (dir == PF_IN && s && s->key[PF_SK_STACK])
7513 pd.pf_mtag->statekey = s->key[PF_SK_STACK];
7515 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK];
7519 if (action == PF_PASS && r->qid) {
7521 if (pd.tos & IPTOS_LOWDELAY)
7522 pd.pf_mtag->qid = r->pqid;
7524 pd.pf_mtag->qid = r->qid;
7525 /* add hints for ecn */
7526 pd.pf_mtag->hdr = h;
7528 if (pd.tos & IPTOS_LOWDELAY)
7529 m->m_pkthdr.pf.qid = r->pqid;
7531 m->m_pkthdr.pf.qid = r->qid;
7532 /* add hints for ecn */
7533 m->m_pkthdr.pf.hdr = h;
7538 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
7539 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
7540 (s->nat_rule.ptr->action == PF_RDR ||
7541 s->nat_rule.ptr->action == PF_BINAT) &&
7542 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
7544 m->m_flags |= M_SKIP_FIREWALL;
7546 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST;
7550 /* XXX: Anybody working on it?! */
7552 printf("pf: divert(9) is not supported for IPv6\n");
7554 if (dir == PF_IN && action == PF_PASS && r->divert.port) {
7555 struct pf_divert *divert;
7557 if ((divert = pf_get_divert(m))) {
7558 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED;
7559 divert->port = r->divert.port;
7560 divert->addr.ipv6 = r->divert.addr.v6;
7568 if (s != NULL && s->nat_rule.ptr != NULL &&
7569 s->nat_rule.ptr->log & PF_LOG_ALL)
7570 lr = s->nat_rule.ptr;
7573 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, lr, a, ruleset,
7577 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
7578 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
7580 if (action == PF_PASS || r->action == PF_DROP) {
7581 dirndx = (dir == PF_OUT);
7582 r->packets[dirndx]++;
7583 r->bytes[dirndx] += pd.tot_len;
7585 a->packets[dirndx]++;
7586 a->bytes[dirndx] += pd.tot_len;
7589 if (s->nat_rule.ptr != NULL) {
7590 s->nat_rule.ptr->packets[dirndx]++;
7591 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
7593 if (s->src_node != NULL) {
7594 s->src_node->packets[dirndx]++;
7595 s->src_node->bytes[dirndx] += pd.tot_len;
7597 if (s->nat_src_node != NULL) {
7598 s->nat_src_node->packets[dirndx]++;
7599 s->nat_src_node->bytes[dirndx] += pd.tot_len;
7601 dirndx = (dir == s->direction) ? 0 : 1;
7602 s->packets[dirndx]++;
7603 s->bytes[dirndx] += pd.tot_len;
7606 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
7608 if (nr != NULL && r == &V_pf_default_rule)
7610 if (nr != NULL && r == &pf_default_rule)
7613 if (tr->src.addr.type == PF_ADDR_TABLE)
7614 pfr_update_stats(tr->src.addr.p.tbl,
7615 (s == NULL) ? pd.src :
7616 &s->key[(s->direction == PF_IN)]->addr[0],
7617 pd.af, pd.tot_len, dir == PF_OUT,
7618 r->action == PF_PASS, tr->src.neg);
7619 if (tr->dst.addr.type == PF_ADDR_TABLE)
7620 pfr_update_stats(tr->dst.addr.p.tbl,
7621 (s == NULL) ? pd.dst :
7622 &s->key[(s->direction == PF_IN)]->addr[1],
7623 pd.af, pd.tot_len, dir == PF_OUT,
7624 r->action == PF_PASS, tr->dst.neg);
7628 case PF_SYNPROXY_DROP:
7635 /* pf_route6 can free the mbuf causing *m0 to become NULL */
7637 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
7649 pf_check_congestion(struct ifqueue *ifq)
7652 /* XXX_IMPORT: later */
7655 if (ifq->ifq_congestion)
7663 * must be called whenever any addressing information such as
7664 * address, port, protocol has changed
7667 pf_pkt_addr_changed(struct mbuf *m)
7670 struct pf_mtag *pf_tag;
7672 if ((pf_tag = pf_find_mtag(m)) != NULL)
7673 pf_tag->statekey = NULL;
7675 m->m_pkthdr.pf.statekey = NULL;