1 /* $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $ */
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002 - 2008 Henning Brauer
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * - Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * - Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
32 * Effort sponsored in part by the Defense Advanced Research Projects
33 * Agency (DARPA) and Air Force Research Laboratory, Air Force
34 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
40 #include "opt_inet6.h"
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
53 #define NPFLOW DEV_PFLOW
65 #include <sys/param.h>
66 #include <sys/systm.h>
68 #include <sys/filio.h>
69 #include <sys/socket.h>
70 #include <sys/socketvar.h>
71 #include <sys/kernel.h>
74 #include <sys/random.h>
75 #include <sys/sysctl.h>
76 #include <sys/endian.h>
77 #define betoh64 be64toh
83 #include <sys/kthread.h>
87 #include <sys/rwlock.h>
93 #include <crypto/md5.h>
97 #include <net/if_types.h>
99 #include <net/route.h>
102 #include <net/radix_mpath.h>
105 #include <net/radix_mpath.h>
108 #include <netinet/in.h>
109 #include <netinet/in_var.h>
110 #include <netinet/in_systm.h>
111 #include <netinet/ip.h>
112 #include <netinet/ip_var.h>
113 #include <netinet/tcp.h>
114 #include <netinet/tcp_seq.h>
115 #include <netinet/udp.h>
116 #include <netinet/ip_icmp.h>
117 #include <netinet/in_pcb.h>
118 #include <netinet/tcp_timer.h>
119 #include <netinet/tcp_var.h>
120 #include <netinet/udp_var.h>
121 #include <netinet/icmp_var.h>
122 #include <netinet/if_ether.h>
124 #include <netinet/ip_fw.h>
125 #include <netpfil/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */
129 #include <dev/rndvar.h>
131 #include <net/pfvar.h>
132 #include <net/if_pflog.h>
133 #include <net/if_pflow.h>
134 #include <net/if_pfsync.h>
137 #include <netinet/ip6.h>
138 #include <netinet/in_pcb.h>
139 #include <netinet/icmp6.h>
140 #include <netinet6/nd6.h>
142 #include <netinet6/ip6_var.h>
143 #include <netinet6/in6_pcb.h>
148 #include <machine/in_cksum.h>
149 #include <sys/limits.h>
150 #include <sys/ucred.h>
151 #include <security/mac/mac_framework.h>
153 extern int ip_optcopy(struct ip *, struct ip *);
157 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
159 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
168 VNET_DEFINE(struct pf_state_tree, pf_statetbl);
170 VNET_DEFINE(struct pf_altqqueue, pf_altqs[2]);
171 VNET_DEFINE(struct pf_palist, pf_pabuf);
172 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active);
173 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive);
174 VNET_DEFINE(struct pf_status, pf_status);
176 VNET_DEFINE(u_int32_t, ticket_altqs_active);
177 VNET_DEFINE(u_int32_t, ticket_altqs_inactive);
178 VNET_DEFINE(int, altqs_inactive_open);
179 VNET_DEFINE(u_int32_t, ticket_pabuf);
181 VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx);
182 #define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx)
183 VNET_DEFINE(u_char, pf_tcp_secret[16]);
184 #define V_pf_tcp_secret VNET(pf_tcp_secret)
185 VNET_DEFINE(int, pf_tcp_secret_init);
186 #define V_pf_tcp_secret_init VNET(pf_tcp_secret_init)
187 VNET_DEFINE(int, pf_tcp_iss_off);
188 #define V_pf_tcp_iss_off VNET(pf_tcp_iss_off)
190 struct pf_anchor_stackframe {
191 struct pf_ruleset *rs;
193 struct pf_anchor_node *parent;
194 struct pf_anchor *child;
196 VNET_DEFINE(struct pf_anchor_stackframe, pf_anchor_stack[64]);
197 #define V_pf_anchor_stack VNET(pf_anchor_stack)
199 VNET_DEFINE(uma_zone_t, pf_src_tree_pl);
200 VNET_DEFINE(uma_zone_t, pf_rule_pl);
201 VNET_DEFINE(uma_zone_t, pf_pooladdr_pl);
202 VNET_DEFINE(uma_zone_t, pf_state_pl);
203 VNET_DEFINE(uma_zone_t, pf_state_key_pl);
204 VNET_DEFINE(uma_zone_t, pf_state_item_pl);
205 VNET_DEFINE(uma_zone_t, pf_altq_pl);
207 struct pf_state_tree pf_statetbl;
209 struct pf_altqqueue pf_altqs[2];
210 struct pf_palist pf_pabuf;
211 struct pf_altqqueue *pf_altqs_active;
212 struct pf_altqqueue *pf_altqs_inactive;
213 struct pf_status pf_status;
215 u_int32_t ticket_altqs_active;
216 u_int32_t ticket_altqs_inactive;
217 int altqs_inactive_open;
218 u_int32_t ticket_pabuf;
220 MD5_CTX pf_tcp_secret_ctx;
221 u_char pf_tcp_secret[16];
222 int pf_tcp_secret_init;
225 struct pf_anchor_stackframe {
226 struct pf_ruleset *rs;
228 struct pf_anchor_node *parent;
229 struct pf_anchor *child;
230 } pf_anchor_stack[64];
232 struct pool pf_src_tree_pl, pf_rule_pl, pf_pooladdr_pl;
233 struct pool pf_state_pl, pf_state_key_pl, pf_state_item_pl;
234 struct pool pf_altq_pl;
237 void pf_init_threshold(struct pf_threshold *, u_int32_t,
239 void pf_add_threshold(struct pf_threshold *);
240 int pf_check_threshold(struct pf_threshold *);
242 void pf_change_ap(struct pf_addr *, u_int16_t *,
243 u_int16_t *, u_int16_t *, struct pf_addr *,
244 u_int16_t, u_int8_t, sa_family_t);
245 int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
246 struct tcphdr *, struct pf_state_peer *);
248 void pf_change_a6(struct pf_addr *, u_int16_t *,
249 struct pf_addr *, u_int8_t);
251 void pf_change_icmp(struct pf_addr *, u_int16_t *,
252 struct pf_addr *, struct pf_addr *, u_int16_t,
253 u_int16_t *, u_int16_t *, u_int16_t *,
254 u_int16_t *, u_int8_t, sa_family_t);
256 void pf_send_tcp(struct mbuf *,
257 const struct pf_rule *, sa_family_t,
259 void pf_send_tcp(const struct pf_rule *, sa_family_t,
261 const struct pf_addr *, const struct pf_addr *,
262 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
263 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
264 u_int16_t, struct ether_header *, struct ifnet *);
265 static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
266 sa_family_t, struct pf_rule *);
267 void pf_detach_state(struct pf_state *);
268 void pf_state_key_detach(struct pf_state *, int);
269 u_int32_t pf_tcp_iss(struct pf_pdesc *);
270 int pf_test_rule(struct pf_rule **, struct pf_state **,
271 int, struct pfi_kif *, struct mbuf *, int,
272 void *, struct pf_pdesc *, struct pf_rule **,
274 struct pf_ruleset **, struct ifqueue *,
277 struct pf_ruleset **, struct ifqueue *);
279 static __inline int pf_create_state(struct pf_rule *, struct pf_rule *,
280 struct pf_rule *, struct pf_pdesc *,
281 struct pf_src_node *, struct pf_state_key *,
282 struct pf_state_key *, struct pf_state_key *,
283 struct pf_state_key *, struct mbuf *, int,
284 u_int16_t, u_int16_t, int *, struct pfi_kif *,
285 struct pf_state **, int, u_int16_t, u_int16_t,
287 int pf_test_fragment(struct pf_rule **, int,
288 struct pfi_kif *, struct mbuf *, void *,
289 struct pf_pdesc *, struct pf_rule **,
290 struct pf_ruleset **);
291 int pf_tcp_track_full(struct pf_state_peer *,
292 struct pf_state_peer *, struct pf_state **,
293 struct pfi_kif *, struct mbuf *, int,
294 struct pf_pdesc *, u_short *, int *);
295 int pf_tcp_track_sloppy(struct pf_state_peer *,
296 struct pf_state_peer *, struct pf_state **,
297 struct pf_pdesc *, u_short *);
298 int pf_test_state_tcp(struct pf_state **, int,
299 struct pfi_kif *, struct mbuf *, int,
300 void *, struct pf_pdesc *, u_short *);
301 int pf_test_state_udp(struct pf_state **, int,
302 struct pfi_kif *, struct mbuf *, int,
303 void *, struct pf_pdesc *);
304 int pf_test_state_icmp(struct pf_state **, int,
305 struct pfi_kif *, struct mbuf *, int,
306 void *, struct pf_pdesc *, u_short *);
307 int pf_test_state_other(struct pf_state **, int,
308 struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
309 void pf_route(struct mbuf **, struct pf_rule *, int,
310 struct ifnet *, struct pf_state *,
312 void pf_route6(struct mbuf **, struct pf_rule *, int,
313 struct ifnet *, struct pf_state *,
316 int pf_socket_lookup(int, struct pf_pdesc *);
318 u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
320 u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
322 u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
324 void pf_set_rt_ifp(struct pf_state *,
326 int pf_check_proto_cksum(struct mbuf *, int, int,
327 u_int8_t, sa_family_t);
329 struct pf_divert *pf_get_divert(struct mbuf *);
331 void pf_print_state_parts(struct pf_state *,
332 struct pf_state_key *, struct pf_state_key *);
333 int pf_addr_wrap_neq(struct pf_addr_wrap *,
334 struct pf_addr_wrap *);
335 int pf_compare_state_keys(struct pf_state_key *,
336 struct pf_state_key *, struct pfi_kif *, u_int);
338 struct pf_state *pf_find_state(struct pfi_kif *,
339 struct pf_state_key_cmp *, u_int, struct mbuf *,
342 struct pf_state *pf_find_state(struct pfi_kif *,
343 struct pf_state_key_cmp *, u_int, struct mbuf *);
345 int pf_src_connlimit(struct pf_state **);
346 int pf_check_congestion(struct ifqueue *);
349 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
351 VNET_DECLARE(int, pf_end_threads);
353 VNET_DEFINE(struct pf_pool_limit, pf_pool_limits[PF_LIMIT_MAX]);
355 extern struct pool pfr_ktable_pl;
356 extern struct pool pfr_kentry_pl;
358 struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = {
359 { &pf_state_pl, PFSTATE_HIWAT },
360 { &pf_src_tree_pl, PFSNODE_HIWAT },
361 { &pf_frent_pl, PFFRAG_FRENT_HIWAT },
362 { &pfr_ktable_pl, PFR_KTABLE_HIWAT },
363 { &pfr_kentry_pl, PFR_KENTRY_HIWAT }
368 #define PPACKET_LOOPED() \
369 (pd->pf_mtag->flags & PF_PACKET_LOOPED)
371 #define PACKET_LOOPED() \
372 (pd.pf_mtag->flags & PF_PACKET_LOOPED)
374 #define STATE_LOOKUP(i, k, d, s, m, pt) \
376 s = pf_find_state(i, k, d, m, pt); \
377 if (s == NULL || (s)->timeout == PFTM_PURGE) \
379 if (PPACKET_LOOPED()) \
382 (((s)->rule.ptr->rt == PF_ROUTETO && \
383 (s)->rule.ptr->direction == PF_OUT) || \
384 ((s)->rule.ptr->rt == PF_REPLYTO && \
385 (s)->rule.ptr->direction == PF_IN)) && \
386 (s)->rt_kif != NULL && \
391 #define STATE_LOOKUP(i, k, d, s, m) \
393 s = pf_find_state(i, k, d, m); \
394 if (s == NULL || (s)->timeout == PFTM_PURGE) \
397 (((s)->rule.ptr->rt == PF_ROUTETO && \
398 (s)->rule.ptr->direction == PF_OUT) || \
399 ((s)->rule.ptr->rt == PF_REPLYTO && \
400 (s)->rule.ptr->direction == PF_IN)) && \
401 (s)->rt_kif != NULL && \
408 #define BOUND_IFACE(r, k) \
409 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
411 #define BOUND_IFACE(r, k) \
412 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
415 #define STATE_INC_COUNTERS(s) \
417 s->rule.ptr->states_cur++; \
418 s->rule.ptr->states_tot++; \
419 if (s->anchor.ptr != NULL) { \
420 s->anchor.ptr->states_cur++; \
421 s->anchor.ptr->states_tot++; \
423 if (s->nat_rule.ptr != NULL) { \
424 s->nat_rule.ptr->states_cur++; \
425 s->nat_rule.ptr->states_tot++; \
429 #define STATE_DEC_COUNTERS(s) \
431 if (s->nat_rule.ptr != NULL) \
432 s->nat_rule.ptr->states_cur--; \
433 if (s->anchor.ptr != NULL) \
434 s->anchor.ptr->states_cur--; \
435 s->rule.ptr->states_cur--; \
438 static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *);
439 static __inline int pf_state_compare_key(struct pf_state_key *,
440 struct pf_state_key *);
441 static __inline int pf_state_compare_id(struct pf_state *,
445 VNET_DEFINE(struct pf_src_tree, tree_src_tracking);
447 VNET_DEFINE(struct pf_state_tree_id, tree_id);
448 VNET_DEFINE(struct pf_state_queue, state_list);
450 struct pf_src_tree tree_src_tracking;
452 struct pf_state_tree_id tree_id;
453 struct pf_state_queue state_list;
456 RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare);
457 RB_GENERATE(pf_state_tree, pf_state_key, entry, pf_state_compare_key);
458 RB_GENERATE(pf_state_tree_id, pf_state,
459 entry_id, pf_state_compare_id);
462 pf_src_compare(struct pf_src_node *a, struct pf_src_node *b)
466 if (a->rule.ptr > b->rule.ptr)
468 if (a->rule.ptr < b->rule.ptr)
470 if ((diff = a->af - b->af) != 0)
475 if (a->addr.addr32[0] > b->addr.addr32[0])
477 if (a->addr.addr32[0] < b->addr.addr32[0])
483 if (a->addr.addr32[3] > b->addr.addr32[3])
485 if (a->addr.addr32[3] < b->addr.addr32[3])
487 if (a->addr.addr32[2] > b->addr.addr32[2])
489 if (a->addr.addr32[2] < b->addr.addr32[2])
491 if (a->addr.addr32[1] > b->addr.addr32[1])
493 if (a->addr.addr32[1] < b->addr.addr32[1])
495 if (a->addr.addr32[0] > b->addr.addr32[0])
497 if (a->addr.addr32[0] < b->addr.addr32[0])
507 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
512 dst->addr32[0] = src->addr32[0];
516 dst->addr32[0] = src->addr32[0];
517 dst->addr32[1] = src->addr32[1];
518 dst->addr32[2] = src->addr32[2];
519 dst->addr32[3] = src->addr32[3];
526 pf_init_threshold(struct pf_threshold *threshold,
527 u_int32_t limit, u_int32_t seconds)
529 threshold->limit = limit * PF_THRESHOLD_MULT;
530 threshold->seconds = seconds;
531 threshold->count = 0;
532 threshold->last = time_second;
536 pf_add_threshold(struct pf_threshold *threshold)
538 u_int32_t t = time_second, diff = t - threshold->last;
540 if (diff >= threshold->seconds)
541 threshold->count = 0;
543 threshold->count -= threshold->count * diff /
545 threshold->count += PF_THRESHOLD_MULT;
550 pf_check_threshold(struct pf_threshold *threshold)
552 return (threshold->count > threshold->limit);
556 pf_src_connlimit(struct pf_state **state)
560 (*state)->src_node->conn++;
561 (*state)->src.tcp_est = 1;
562 pf_add_threshold(&(*state)->src_node->conn_rate);
564 if ((*state)->rule.ptr->max_src_conn &&
565 (*state)->rule.ptr->max_src_conn <
566 (*state)->src_node->conn) {
568 V_pf_status.lcounters[LCNT_SRCCONN]++;
570 pf_status.lcounters[LCNT_SRCCONN]++;
575 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
576 pf_check_threshold(&(*state)->src_node->conn_rate)) {
578 V_pf_status.lcounters[LCNT_SRCCONNRATE]++;
580 pf_status.lcounters[LCNT_SRCCONNRATE]++;
588 if ((*state)->rule.ptr->overload_tbl) {
590 u_int32_t killed = 0;
593 V_pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
594 if (V_pf_status.debug >= PF_DEBUG_MISC) {
596 pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
597 if (pf_status.debug >= PF_DEBUG_MISC) {
599 printf("pf_src_connlimit: blocking address ");
600 pf_print_host(&(*state)->src_node->addr, 0,
601 (*state)->key[PF_SK_WIRE]->af);
604 bzero(&p, sizeof(p));
605 p.pfra_af = (*state)->key[PF_SK_WIRE]->af;
606 switch ((*state)->key[PF_SK_WIRE]->af) {
610 p.pfra_ip4addr = (*state)->src_node->addr.v4;
616 p.pfra_ip6addr = (*state)->src_node->addr.v6;
621 pfr_insert_kentry((*state)->rule.ptr->overload_tbl,
624 /* kill existing states if that's required. */
625 if ((*state)->rule.ptr->flush) {
626 struct pf_state_key *sk;
630 V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
631 RB_FOREACH(st, pf_state_tree_id, &V_tree_id) {
633 pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
634 RB_FOREACH(st, pf_state_tree_id, &tree_id) {
636 sk = st->key[PF_SK_WIRE];
638 * Kill states from this source. (Only those
639 * from the same rule if PF_FLUSH_GLOBAL is not
643 (*state)->key[PF_SK_WIRE]->af &&
644 (((*state)->direction == PF_OUT &&
645 PF_AEQ(&(*state)->src_node->addr,
646 &sk->addr[1], sk->af)) ||
647 ((*state)->direction == PF_IN &&
648 PF_AEQ(&(*state)->src_node->addr,
649 &sk->addr[0], sk->af))) &&
650 ((*state)->rule.ptr->flush &
652 (*state)->rule.ptr == st->rule.ptr)) {
653 st->timeout = PFTM_PURGE;
654 st->src.state = st->dst.state =
660 if (V_pf_status.debug >= PF_DEBUG_MISC)
662 if (pf_status.debug >= PF_DEBUG_MISC)
664 printf(", %u states killed", killed);
667 if (V_pf_status.debug >= PF_DEBUG_MISC)
669 if (pf_status.debug >= PF_DEBUG_MISC)
674 /* kill this state */
675 (*state)->timeout = PFTM_PURGE;
676 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
681 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
682 struct pf_addr *src, sa_family_t af)
684 struct pf_src_node k;
688 PF_ACPY(&k.addr, src, af);
689 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
690 rule->rpool.opts & PF_POOL_STICKYADDR)
695 V_pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
696 *sn = RB_FIND(pf_src_tree, &V_tree_src_tracking, &k);
698 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
699 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
703 if (!rule->max_src_nodes ||
704 rule->src_nodes < rule->max_src_nodes)
706 (*sn) = pool_get(&V_pf_src_tree_pl, PR_NOWAIT | PR_ZERO);
708 (*sn) = pool_get(&pf_src_tree_pl, PR_NOWAIT | PR_ZERO);
712 V_pf_status.lcounters[LCNT_SRCNODES]++;
714 pf_status.lcounters[LCNT_SRCNODES]++;
719 pf_init_threshold(&(*sn)->conn_rate,
720 rule->max_src_conn_rate.limit,
721 rule->max_src_conn_rate.seconds);
724 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
725 rule->rpool.opts & PF_POOL_STICKYADDR)
726 (*sn)->rule.ptr = rule;
728 (*sn)->rule.ptr = NULL;
729 PF_ACPY(&(*sn)->addr, src, af);
730 if (RB_INSERT(pf_src_tree,
732 &V_tree_src_tracking, *sn) != NULL) {
733 if (V_pf_status.debug >= PF_DEBUG_MISC) {
735 &tree_src_tracking, *sn) != NULL) {
736 if (pf_status.debug >= PF_DEBUG_MISC) {
738 printf("pf: src_tree insert failed: ");
739 pf_print_host(&(*sn)->addr, 0, af);
743 pool_put(&V_pf_src_tree_pl, *sn);
745 pool_put(&pf_src_tree_pl, *sn);
749 (*sn)->creation = time_second;
750 (*sn)->ruletype = rule->action;
751 if ((*sn)->rule.ptr != NULL)
752 (*sn)->rule.ptr->src_nodes++;
754 V_pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
755 V_pf_status.src_nodes++;
757 pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
758 pf_status.src_nodes++;
761 if (rule->max_src_states &&
762 (*sn)->states >= rule->max_src_states) {
764 V_pf_status.lcounters[LCNT_SRCSTATES]++;
766 pf_status.lcounters[LCNT_SRCSTATES]++;
774 /* state table stuff */
777 pf_state_compare_key(struct pf_state_key *a, struct pf_state_key *b)
781 if ((diff = a->proto - b->proto) != 0)
783 if ((diff = a->af - b->af) != 0)
788 if (a->addr[0].addr32[0] > b->addr[0].addr32[0])
790 if (a->addr[0].addr32[0] < b->addr[0].addr32[0])
792 if (a->addr[1].addr32[0] > b->addr[1].addr32[0])
794 if (a->addr[1].addr32[0] < b->addr[1].addr32[0])
800 if (a->addr[0].addr32[3] > b->addr[0].addr32[3])
802 if (a->addr[0].addr32[3] < b->addr[0].addr32[3])
804 if (a->addr[1].addr32[3] > b->addr[1].addr32[3])
806 if (a->addr[1].addr32[3] < b->addr[1].addr32[3])
808 if (a->addr[0].addr32[2] > b->addr[0].addr32[2])
810 if (a->addr[0].addr32[2] < b->addr[0].addr32[2])
812 if (a->addr[1].addr32[2] > b->addr[1].addr32[2])
814 if (a->addr[1].addr32[2] < b->addr[1].addr32[2])
816 if (a->addr[0].addr32[1] > b->addr[0].addr32[1])
818 if (a->addr[0].addr32[1] < b->addr[0].addr32[1])
820 if (a->addr[1].addr32[1] > b->addr[1].addr32[1])
822 if (a->addr[1].addr32[1] < b->addr[1].addr32[1])
824 if (a->addr[0].addr32[0] > b->addr[0].addr32[0])
826 if (a->addr[0].addr32[0] < b->addr[0].addr32[0])
828 if (a->addr[1].addr32[0] > b->addr[1].addr32[0])
830 if (a->addr[1].addr32[0] < b->addr[1].addr32[0])
836 if ((diff = a->port[0] - b->port[0]) != 0)
838 if ((diff = a->port[1] - b->port[1]) != 0)
845 pf_state_compare_id(struct pf_state *a, struct pf_state *b)
851 if (a->creatorid > b->creatorid)
853 if (a->creatorid < b->creatorid)
860 pf_state_key_attach(struct pf_state_key *sk, struct pf_state *s, int idx)
862 struct pf_state_item *si;
863 struct pf_state_key *cur;
864 struct pf_state *olds = NULL;
867 KASSERT(s->key[idx] == NULL, ("%s: key is null!", __FUNCTION__));
869 KASSERT(s->key[idx] == NULL); /* XXX handle this? */
873 if ((cur = RB_INSERT(pf_state_tree, &V_pf_statetbl, sk)) != NULL) {
875 if ((cur = RB_INSERT(pf_state_tree, &pf_statetbl, sk)) != NULL) {
877 /* key exists. check for same kif, if none, add to key */
878 TAILQ_FOREACH(si, &cur->states, entry)
879 if (si->s->kif == s->kif &&
880 si->s->direction == s->direction) {
881 if (sk->proto == IPPROTO_TCP &&
882 si->s->src.state >= TCPS_FIN_WAIT_2 &&
883 si->s->dst.state >= TCPS_FIN_WAIT_2) {
884 si->s->src.state = si->s->dst.state =
886 /* unlink late or sks can go away */
890 if (V_pf_status.debug >= PF_DEBUG_MISC) {
892 if (pf_status.debug >= PF_DEBUG_MISC) {
894 printf("pf: %s key attach "
896 (idx == PF_SK_WIRE) ?
899 pf_print_state_parts(s,
900 (idx == PF_SK_WIRE) ?
902 (idx == PF_SK_STACK) ?
904 printf(", existing: ");
905 pf_print_state_parts(si->s,
906 (idx == PF_SK_WIRE) ?
908 (idx == PF_SK_STACK) ?
913 pool_put(&V_pf_state_key_pl, sk);
915 pool_put(&pf_state_key_pl, sk);
917 return (-1); /* collision! */
921 pool_put(&V_pf_state_key_pl, sk);
923 pool_put(&pf_state_key_pl, sk);
930 if ((si = pool_get(&V_pf_state_item_pl, PR_NOWAIT)) == NULL) {
932 if ((si = pool_get(&pf_state_item_pl, PR_NOWAIT)) == NULL) {
934 pf_state_key_detach(s, idx);
939 /* list is sorted, if-bound states before floating */
941 if (s->kif == V_pfi_all)
943 if (s->kif == pfi_all)
945 TAILQ_INSERT_TAIL(&s->key[idx]->states, si, entry);
947 TAILQ_INSERT_HEAD(&s->key[idx]->states, si, entry);
950 pf_unlink_state(olds);
956 pf_detach_state(struct pf_state *s)
958 if (s->key[PF_SK_WIRE] == s->key[PF_SK_STACK])
959 s->key[PF_SK_WIRE] = NULL;
961 if (s->key[PF_SK_STACK] != NULL)
962 pf_state_key_detach(s, PF_SK_STACK);
964 if (s->key[PF_SK_WIRE] != NULL)
965 pf_state_key_detach(s, PF_SK_WIRE);
969 pf_state_key_detach(struct pf_state *s, int idx)
971 struct pf_state_item *si;
973 si = TAILQ_FIRST(&s->key[idx]->states);
974 while (si && si->s != s)
975 si = TAILQ_NEXT(si, entry);
978 TAILQ_REMOVE(&s->key[idx]->states, si, entry);
980 pool_put(&V_pf_state_item_pl, si);
982 pool_put(&pf_state_item_pl, si);
986 if (TAILQ_EMPTY(&s->key[idx]->states)) {
988 RB_REMOVE(pf_state_tree, &V_pf_statetbl, s->key[idx]);
990 RB_REMOVE(pf_state_tree, &pf_statetbl, s->key[idx]);
992 if (s->key[idx]->reverse)
993 s->key[idx]->reverse->reverse = NULL;
995 /* XXX: implement this */
997 if (s->key[idx]->inp)
998 s->key[idx]->inp->inp_pf_sk = NULL;
1001 pool_put(&V_pf_state_key_pl, s->key[idx]);
1003 pool_put(&pf_state_key_pl, s->key[idx]);
1009 struct pf_state_key *
1010 pf_alloc_state_key(int pool_flags)
1012 struct pf_state_key *sk;
1015 if ((sk = pool_get(&V_pf_state_key_pl, pool_flags)) == NULL)
1017 if ((sk = pool_get(&pf_state_key_pl, pool_flags)) == NULL)
1020 TAILQ_INIT(&sk->states);
1026 pf_state_key_setup(struct pf_pdesc *pd, struct pf_rule *nr,
1027 struct pf_state_key **skw, struct pf_state_key **sks,
1028 struct pf_state_key **skp, struct pf_state_key **nkp,
1029 struct pf_addr *saddr, struct pf_addr *daddr,
1030 u_int16_t sport, u_int16_t dport)
1033 KASSERT((*skp == NULL && *nkp == NULL),
1034 ("%s: skp == NULL && nkp == NULL", __FUNCTION__));
1036 KASSERT((*skp == NULL && *nkp == NULL));
1039 if ((*skp = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL)
1042 PF_ACPY(&(*skp)->addr[pd->sidx], saddr, pd->af);
1043 PF_ACPY(&(*skp)->addr[pd->didx], daddr, pd->af);
1044 (*skp)->port[pd->sidx] = sport;
1045 (*skp)->port[pd->didx] = dport;
1046 (*skp)->proto = pd->proto;
1047 (*skp)->af = pd->af;
1050 if ((*nkp = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL)
1051 return (ENOMEM); /* caller must handle cleanup */
1053 /* XXX maybe just bcopy and TAILQ_INIT(&(*nkp)->states) */
1054 PF_ACPY(&(*nkp)->addr[0], &(*skp)->addr[0], pd->af);
1055 PF_ACPY(&(*nkp)->addr[1], &(*skp)->addr[1], pd->af);
1056 (*nkp)->port[0] = (*skp)->port[0];
1057 (*nkp)->port[1] = (*skp)->port[1];
1058 (*nkp)->proto = pd->proto;
1059 (*nkp)->af = pd->af;
1063 if (pd->dir == PF_IN) {
1075 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw,
1076 struct pf_state_key *sks, struct pf_state *s)
1079 splassert(IPL_SOFTNET);
1085 if (pf_state_key_attach(skw, s, PF_SK_WIRE))
1087 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1089 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) {
1091 pool_put(&V_pf_state_key_pl, sks);
1093 pool_put(&pf_state_key_pl, sks);
1097 if (pf_state_key_attach(sks, s, PF_SK_STACK)) {
1098 pf_state_key_detach(s, PF_SK_WIRE);
1103 if (s->id == 0 && s->creatorid == 0) {
1105 s->id = htobe64(V_pf_status.stateid++);
1106 s->creatorid = V_pf_status.hostid;
1108 s->id = htobe64(pf_status.stateid++);
1109 s->creatorid = pf_status.hostid;
1113 if (RB_INSERT(pf_state_tree_id, &V_tree_id, s) != NULL) {
1114 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1116 if (RB_INSERT(pf_state_tree_id, &tree_id, s) != NULL) {
1117 if (pf_status.debug >= PF_DEBUG_MISC) {
1119 printf("pf: state insert failed: "
1120 "id: %016llx creatorid: %08x",
1122 (unsigned long long)betoh64(s->id), ntohl(s->creatorid));
1124 betoh64(s->id), ntohl(s->creatorid));
1132 TAILQ_INSERT_TAIL(&V_state_list, s, entry_list);
1133 V_pf_status.fcounters[FCNT_STATE_INSERT]++;
1134 V_pf_status.states++;
1136 TAILQ_INSERT_TAIL(&state_list, s, entry_list);
1137 pf_status.fcounters[FCNT_STATE_INSERT]++;
1140 pfi_kif_ref(kif, PFI_KIF_REF_STATE);
1143 if (pfsync_insert_state_ptr != NULL)
1144 pfsync_insert_state_ptr(s);
1146 pfsync_insert_state(s);
1153 pf_find_state_byid(struct pf_state_cmp *key)
1156 V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
1158 return (RB_FIND(pf_state_tree_id, &V_tree_id, (struct pf_state *)key));
1160 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1162 return (RB_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key));
1166 /* XXX debug function, intended to be removed one day */
1168 pf_compare_state_keys(struct pf_state_key *a, struct pf_state_key *b,
1169 struct pfi_kif *kif, u_int dir)
1171 /* a (from hdr) and b (new) must be exact opposites of each other */
1172 if (a->af == b->af && a->proto == b->proto &&
1173 PF_AEQ(&a->addr[0], &b->addr[1], a->af) &&
1174 PF_AEQ(&a->addr[1], &b->addr[0], a->af) &&
1175 a->port[0] == b->port[1] &&
1176 a->port[1] == b->port[0])
1179 /* mismatch. must not happen. */
1180 printf("pf: state key linking mismatch! dir=%s, "
1181 "if=%s, stored af=%u, a0: ",
1182 dir == PF_OUT ? "OUT" : "IN", kif->pfik_name, a->af);
1183 pf_print_host(&a->addr[0], a->port[0], a->af);
1185 pf_print_host(&a->addr[1], a->port[1], a->af);
1186 printf(", proto=%u", a->proto);
1187 printf(", found af=%u, a0: ", b->af);
1188 pf_print_host(&b->addr[0], b->port[0], b->af);
1190 pf_print_host(&b->addr[1], b->port[1], b->af);
1191 printf(", proto=%u", b->proto);
1199 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir,
1200 struct mbuf *m, struct pf_mtag *pftag)
1202 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir,
1206 struct pf_state_key *sk;
1207 struct pf_state_item *si;
1210 V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
1212 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1216 if (dir == PF_OUT && pftag->statekey &&
1217 ((struct pf_state_key *)pftag->statekey)->reverse)
1218 sk = ((struct pf_state_key *)pftag->statekey)->reverse;
1221 if ((sk = RB_FIND(pf_state_tree, &V_pf_statetbl,
1223 if ((sk = RB_FIND(pf_state_tree, &pf_statetbl,
1225 (struct pf_state_key *)key)) == NULL)
1227 if (dir == PF_OUT && pftag->statekey &&
1228 pf_compare_state_keys(pftag->statekey, sk,
1230 ((struct pf_state_key *)
1231 pftag->statekey)->reverse = sk;
1232 sk->reverse = pftag->statekey;
1236 if (dir == PF_OUT && m->m_pkthdr.pf.statekey &&
1237 ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse)
1238 sk = ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse;
1241 if ((sk = RB_FIND(pf_state_tree, &V_pf_statetbl,
1243 if ((sk = RB_FIND(pf_state_tree, &pf_statetbl,
1245 (struct pf_state_key *)key)) == NULL)
1247 if (dir == PF_OUT && m->m_pkthdr.pf.statekey &&
1248 pf_compare_state_keys(m->m_pkthdr.pf.statekey, sk,
1250 ((struct pf_state_key *)
1251 m->m_pkthdr.pf.statekey)->reverse = sk;
1252 sk->reverse = m->m_pkthdr.pf.statekey;
1259 pftag->statekey = NULL;
1261 m->m_pkthdr.pf.statekey = NULL;
1264 /* list is sorted, if-bound states before floating ones */
1265 TAILQ_FOREACH(si, &sk->states, entry)
1267 if ((si->s->kif == V_pfi_all || si->s->kif == kif) &&
1269 if ((si->s->kif == pfi_all || si->s->kif == kif) &&
1271 sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] :
1272 si->s->key[PF_SK_STACK]))
1279 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1281 struct pf_state_key *sk;
1282 struct pf_state_item *si, *ret = NULL;
1285 V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
1287 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1291 sk = RB_FIND(pf_state_tree, &V_pf_statetbl, (struct pf_state_key *)key);
1293 sk = RB_FIND(pf_state_tree, &pf_statetbl, (struct pf_state_key *)key);
1296 TAILQ_FOREACH(si, &sk->states, entry)
1297 if (dir == PF_INOUT ||
1298 (sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] :
1299 si->s->key[PF_SK_STACK]))) {
1309 return (ret ? ret->s : NULL);
1312 /* END state table stuff */
1316 pf_purge_thread(void *v)
1323 CURVNET_SET((struct vnet *)v);
1326 tsleep(pf_purge_thread, PWAIT, "pftm", 1 * hz);
1329 sx_slock(&V_pf_consistency_lock);
1333 if (V_pf_end_threads) {
1335 sx_sunlock(&V_pf_consistency_lock);
1336 sx_xlock(&V_pf_consistency_lock);
1339 pf_purge_expired_states(V_pf_status.states, 1);
1340 pf_purge_expired_fragments();
1341 pf_purge_expired_src_nodes(1);
1344 sx_xunlock(&V_pf_consistency_lock);
1346 wakeup(pf_purge_thread);
1352 /* process a fraction of the state table every second */
1354 if (!pf_purge_expired_states(1 + (V_pf_status.states /
1355 V_pf_default_rule.timeout[PFTM_INTERVAL]), 0)) {
1357 sx_sunlock(&V_pf_consistency_lock);
1358 sx_xlock(&V_pf_consistency_lock);
1362 pf_purge_expired_states(1 + (V_pf_status.states /
1363 V_pf_default_rule.timeout[PFTM_INTERVAL]), 1);
1366 pf_purge_expired_states(1 + (pf_status.states
1367 / pf_default_rule.timeout[PFTM_INTERVAL]));
1370 /* purge other expired types every PFTM_INTERVAL seconds */
1372 if (++nloops >= V_pf_default_rule.timeout[PFTM_INTERVAL]) {
1374 if (++nloops >= pf_default_rule.timeout[PFTM_INTERVAL]) {
1376 pf_purge_expired_fragments();
1377 pf_purge_expired_src_nodes(0);
1385 sx_xunlock(&V_pf_consistency_lock);
1387 sx_sunlock(&V_pf_consistency_lock);
1394 pf_state_expires(const struct pf_state *state)
1401 /* handle all PFTM_* > PFTM_MAX here */
1402 if (state->timeout == PFTM_PURGE)
1403 return (time_second);
1404 if (state->timeout == PFTM_UNTIL_PACKET)
1407 KASSERT(state->timeout != PFTM_UNLINKED,
1408 ("pf_state_expires: timeout == PFTM_UNLINKED"));
1409 KASSERT((state->timeout < PFTM_MAX),
1410 ("pf_state_expires: timeout > PFTM_MAX"));
1412 KASSERT(state->timeout != PFTM_UNLINKED);
1413 KASSERT(state->timeout < PFTM_MAX);
1415 timeout = state->rule.ptr->timeout[state->timeout];
1418 timeout = V_pf_default_rule.timeout[state->timeout];
1420 timeout = pf_default_rule.timeout[state->timeout];
1422 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1424 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1425 states = state->rule.ptr->states_cur;
1428 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1429 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1430 states = V_pf_status.states;
1432 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1433 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1434 states = pf_status.states;
1437 if (end && states > start && start < end) {
1439 return (state->expire + timeout * (end - states) /
1442 return (time_second);
1444 return (state->expire + timeout);
1449 pf_purge_expired_src_nodes(int waslocked)
1452 pf_purge_expired_src_nodes(int waslocked)
1455 struct pf_src_node *cur, *next;
1456 int locked = waslocked;
1459 for (cur = RB_MIN(pf_src_tree, &V_tree_src_tracking); cur; cur = next) {
1460 next = RB_NEXT(pf_src_tree, &V_tree_src_tracking, cur);
1462 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) {
1463 next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur);
1466 if (cur->states <= 0 && cur->expire <= time_second) {
1469 if (!sx_try_upgrade(&V_pf_consistency_lock))
1472 rw_enter_write(&pf_consistency_lock);
1474 next = RB_NEXT(pf_src_tree,
1476 &V_tree_src_tracking, cur);
1478 &tree_src_tracking, cur);
1482 if (cur->rule.ptr != NULL) {
1483 cur->rule.ptr->src_nodes--;
1484 if (cur->rule.ptr->states_cur <= 0 &&
1485 cur->rule.ptr->max_src_nodes <= 0)
1486 pf_rm_rule(NULL, cur->rule.ptr);
1489 RB_REMOVE(pf_src_tree, &V_tree_src_tracking, cur);
1490 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
1491 V_pf_status.src_nodes--;
1492 pool_put(&V_pf_src_tree_pl, cur);
1494 RB_REMOVE(pf_src_tree, &tree_src_tracking, cur);
1495 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
1496 pf_status.src_nodes--;
1497 pool_put(&pf_src_tree_pl, cur);
1502 if (locked && !waslocked)
1505 sx_downgrade(&V_pf_consistency_lock);
1509 rw_exit_write(&pf_consistency_lock);
1514 pf_src_tree_remove_state(struct pf_state *s)
1518 if (s->src_node != NULL) {
1520 --s->src_node->conn;
1521 if (--s->src_node->states <= 0) {
1522 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1526 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1528 pf_default_rule.timeout[PFTM_SRC_NODE];
1530 s->src_node->expire = time_second + timeout;
1533 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1534 if (--s->nat_src_node->states <= 0) {
1535 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1539 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1541 pf_default_rule.timeout[PFTM_SRC_NODE];
1543 s->nat_src_node->expire = time_second + timeout;
1546 s->src_node = s->nat_src_node = NULL;
1549 /* callers should be at splsoftnet */
1551 pf_unlink_state(struct pf_state *cur)
1554 if (cur->local_flags & PFSTATE_EXPIRING)
1556 cur->local_flags |= PFSTATE_EXPIRING;
1558 splassert(IPL_SOFTNET);
1561 if (cur->src.state == PF_TCPS_PROXY_DST) {
1562 /* XXX wire key the right one? */
1564 pf_send_tcp(NULL, cur->rule.ptr, cur->key[PF_SK_WIRE]->af,
1566 pf_send_tcp(cur->rule.ptr, cur->key[PF_SK_WIRE]->af,
1568 &cur->key[PF_SK_WIRE]->addr[1],
1569 &cur->key[PF_SK_WIRE]->addr[0],
1570 cur->key[PF_SK_WIRE]->port[1],
1571 cur->key[PF_SK_WIRE]->port[0],
1572 cur->src.seqhi, cur->src.seqlo + 1,
1573 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL);
1576 RB_REMOVE(pf_state_tree_id, &V_tree_id, cur);
1578 RB_REMOVE(pf_state_tree_id, &tree_id, cur);
1581 if (cur->state_flags & PFSTATE_PFLOW)
1583 if (export_pflow_ptr != NULL)
1584 export_pflow_ptr(cur);
1591 if (pfsync_delete_state_ptr != NULL)
1592 pfsync_delete_state_ptr(cur);
1594 pfsync_delete_state(cur);
1597 cur->timeout = PFTM_UNLINKED;
1598 pf_src_tree_remove_state(cur);
1599 pf_detach_state(cur);
1602 /* callers should be at splsoftnet and hold the
1603 * write_lock on pf_consistency_lock */
1605 pf_free_state(struct pf_state *cur)
1608 splassert(IPL_SOFTNET);
1613 if (pfsync_state_in_use_ptr != NULL &&
1614 pfsync_state_in_use_ptr(cur))
1616 if (pfsync_state_in_use(cur))
1621 KASSERT(cur->timeout == PFTM_UNLINKED,
1622 ("pf_free_state: cur->timeout != PFTM_UNLINKED"));
1624 KASSERT(cur->timeout == PFTM_UNLINKED);
1626 if (--cur->rule.ptr->states_cur <= 0 &&
1627 cur->rule.ptr->src_nodes <= 0)
1628 pf_rm_rule(NULL, cur->rule.ptr);
1629 if (cur->nat_rule.ptr != NULL)
1630 if (--cur->nat_rule.ptr->states_cur <= 0 &&
1631 cur->nat_rule.ptr->src_nodes <= 0)
1632 pf_rm_rule(NULL, cur->nat_rule.ptr);
1633 if (cur->anchor.ptr != NULL)
1634 if (--cur->anchor.ptr->states_cur <= 0)
1635 pf_rm_rule(NULL, cur->anchor.ptr);
1636 pf_normalize_tcp_cleanup(cur);
1637 pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE);
1639 TAILQ_REMOVE(&V_state_list, cur, entry_list);
1641 TAILQ_REMOVE(&state_list, cur, entry_list);
1644 pf_tag_unref(cur->tag);
1646 pool_put(&V_pf_state_pl, cur);
1647 V_pf_status.fcounters[FCNT_STATE_REMOVALS]++;
1648 V_pf_status.states--;
1650 pool_put(&pf_state_pl, cur);
1651 pf_status.fcounters[FCNT_STATE_REMOVALS]++;
1658 pf_purge_expired_states(u_int32_t maxcheck, int waslocked)
1661 pf_purge_expired_states(u_int32_t maxcheck)
1664 static struct pf_state *cur = NULL;
1665 struct pf_state *next;
1667 int locked = waslocked;
1672 while (maxcheck--) {
1673 /* wrap to start of list when we hit the end */
1676 cur = TAILQ_FIRST(&V_state_list);
1678 cur = TAILQ_FIRST(&state_list);
1681 break; /* list empty */
1684 /* get next state, as cur may get deleted */
1685 next = TAILQ_NEXT(cur, entry_list);
1687 if (cur->timeout == PFTM_UNLINKED) {
1688 /* free unlinked state */
1691 if (!sx_try_upgrade(&V_pf_consistency_lock))
1694 rw_enter_write(&pf_consistency_lock);
1699 } else if (pf_state_expires(cur) <= time_second) {
1700 /* unlink and free expired state */
1701 pf_unlink_state(cur);
1704 if (!sx_try_upgrade(&V_pf_consistency_lock))
1707 rw_enter_write(&pf_consistency_lock);
1717 if (!waslocked && locked)
1718 sx_downgrade(&V_pf_consistency_lock);
1723 rw_exit_write(&pf_consistency_lock);
1728 pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw)
1730 if (aw->type != PF_ADDR_TABLE)
1732 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname, 1)) == NULL)
1738 pf_tbladdr_remove(struct pf_addr_wrap *aw)
1740 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL)
1742 pfr_detach_table(aw->p.tbl);
1747 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
1749 struct pfr_ktable *kt = aw->p.tbl;
1751 if (aw->type != PF_ADDR_TABLE || kt == NULL)
1753 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1754 kt = kt->pfrkt_root;
1756 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
1761 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1766 u_int32_t a = ntohl(addr->addr32[0]);
1767 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1779 u_int8_t i, curstart, curend, maxstart, maxend;
1780 curstart = curend = maxstart = maxend = 255;
1781 for (i = 0; i < 8; i++) {
1782 if (!addr->addr16[i]) {
1783 if (curstart == 255)
1787 if ((curend - curstart) >
1788 (maxend - maxstart)) {
1789 maxstart = curstart;
1792 curstart = curend = 255;
1795 if ((curend - curstart) >
1796 (maxend - maxstart)) {
1797 maxstart = curstart;
1800 for (i = 0; i < 8; i++) {
1801 if (i >= maxstart && i <= maxend) {
1807 b = ntohs(addr->addr16[i]);
1824 pf_print_state(struct pf_state *s)
1826 pf_print_state_parts(s, NULL, NULL);
1830 pf_print_state_parts(struct pf_state *s,
1831 struct pf_state_key *skwp, struct pf_state_key *sksp)
1833 struct pf_state_key *skw, *sks;
1834 u_int8_t proto, dir;
1836 /* Do our best to fill these, but they're skipped if NULL */
1837 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1838 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1839 proto = skw ? skw->proto : (sks ? sks->proto : 0);
1840 dir = s ? s->direction : 0;
1858 case IPPROTO_ICMPV6:
1862 printf("%u", skw->proto);
1875 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1877 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1882 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1884 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1889 if (proto == IPPROTO_TCP) {
1890 printf(" [lo=%u high=%u win=%u modulator=%u",
1891 s->src.seqlo, s->src.seqhi,
1892 s->src.max_win, s->src.seqdiff);
1893 if (s->src.wscale && s->dst.wscale)
1894 printf(" wscale=%u",
1895 s->src.wscale & PF_WSCALE_MASK);
1897 printf(" [lo=%u high=%u win=%u modulator=%u",
1898 s->dst.seqlo, s->dst.seqhi,
1899 s->dst.max_win, s->dst.seqdiff);
1900 if (s->src.wscale && s->dst.wscale)
1901 printf(" wscale=%u",
1902 s->dst.wscale & PF_WSCALE_MASK);
1905 printf(" %u:%u", s->src.state, s->dst.state);
1910 pf_print_flags(u_int8_t f)
1932 #define PF_SET_SKIP_STEPS(i) \
1934 while (head[i] != cur) { \
1935 head[i]->skip[i].ptr = cur; \
1936 head[i] = TAILQ_NEXT(head[i], entries); \
1941 pf_calc_skip_steps(struct pf_rulequeue *rules)
1943 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1946 cur = TAILQ_FIRST(rules);
1948 for (i = 0; i < PF_SKIP_COUNT; ++i)
1950 while (cur != NULL) {
1952 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1953 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1954 if (cur->direction != prev->direction)
1955 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1956 if (cur->af != prev->af)
1957 PF_SET_SKIP_STEPS(PF_SKIP_AF);
1958 if (cur->proto != prev->proto)
1959 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1960 if (cur->src.neg != prev->src.neg ||
1961 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1962 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1963 if (cur->src.port[0] != prev->src.port[0] ||
1964 cur->src.port[1] != prev->src.port[1] ||
1965 cur->src.port_op != prev->src.port_op)
1966 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1967 if (cur->dst.neg != prev->dst.neg ||
1968 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1969 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1970 if (cur->dst.port[0] != prev->dst.port[0] ||
1971 cur->dst.port[1] != prev->dst.port[1] ||
1972 cur->dst.port_op != prev->dst.port_op)
1973 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1976 cur = TAILQ_NEXT(cur, entries);
1978 for (i = 0; i < PF_SKIP_COUNT; ++i)
1979 PF_SET_SKIP_STEPS(i);
1983 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
1985 if (aw1->type != aw2->type)
1987 switch (aw1->type) {
1988 case PF_ADDR_ADDRMASK:
1990 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
1992 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
1995 case PF_ADDR_DYNIFTL:
1996 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
1997 case PF_ADDR_NOROUTE:
1998 case PF_ADDR_URPFFAILED:
2001 return (aw1->p.tbl != aw2->p.tbl);
2002 case PF_ADDR_RTLABEL:
2003 return (aw1->v.rtlabel != aw2->v.rtlabel);
2005 printf("invalid address type: %d\n", aw1->type);
2011 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2017 l = cksum + old - new;
2018 l = (l >> 16) + (l & 65535);
2026 pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc,
2027 struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af)
2032 PF_ACPY(&ao, a, af);
2040 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2041 ao.addr16[0], an->addr16[0], 0),
2042 ao.addr16[1], an->addr16[1], 0);
2044 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2045 ao.addr16[0], an->addr16[0], u),
2046 ao.addr16[1], an->addr16[1], u),
2052 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2053 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2054 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2055 ao.addr16[0], an->addr16[0], u),
2056 ao.addr16[1], an->addr16[1], u),
2057 ao.addr16[2], an->addr16[2], u),
2058 ao.addr16[3], an->addr16[3], u),
2059 ao.addr16[4], an->addr16[4], u),
2060 ao.addr16[5], an->addr16[5], u),
2061 ao.addr16[6], an->addr16[6], u),
2062 ao.addr16[7], an->addr16[7], u),
2070 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2072 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2076 memcpy(&ao, a, sizeof(ao));
2077 memcpy(a, &an, sizeof(u_int32_t));
2078 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2079 ao % 65536, an % 65536, u);
2084 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2088 PF_ACPY(&ao, a, AF_INET6);
2089 PF_ACPY(a, an, AF_INET6);
2091 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2092 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2093 pf_cksum_fixup(pf_cksum_fixup(*c,
2094 ao.addr16[0], an->addr16[0], u),
2095 ao.addr16[1], an->addr16[1], u),
2096 ao.addr16[2], an->addr16[2], u),
2097 ao.addr16[3], an->addr16[3], u),
2098 ao.addr16[4], an->addr16[4], u),
2099 ao.addr16[5], an->addr16[5], u),
2100 ao.addr16[6], an->addr16[6], u),
2101 ao.addr16[7], an->addr16[7], u);
2106 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2107 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2108 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2110 struct pf_addr oia, ooa;
2112 PF_ACPY(&oia, ia, af);
2114 PF_ACPY(&ooa, oa, af);
2116 /* Change inner protocol port, fix inner protocol checksum. */
2118 u_int16_t oip = *ip;
2125 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2126 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2128 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2130 /* Change inner ip address, fix inner ip and icmp checksums. */
2131 PF_ACPY(ia, na, af);
2135 u_int32_t oh2c = *h2c;
2137 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2138 oia.addr16[0], ia->addr16[0], 0),
2139 oia.addr16[1], ia->addr16[1], 0);
2140 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2141 oia.addr16[0], ia->addr16[0], 0),
2142 oia.addr16[1], ia->addr16[1], 0);
2143 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2149 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2150 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2151 pf_cksum_fixup(pf_cksum_fixup(*ic,
2152 oia.addr16[0], ia->addr16[0], u),
2153 oia.addr16[1], ia->addr16[1], u),
2154 oia.addr16[2], ia->addr16[2], u),
2155 oia.addr16[3], ia->addr16[3], u),
2156 oia.addr16[4], ia->addr16[4], u),
2157 oia.addr16[5], ia->addr16[5], u),
2158 oia.addr16[6], ia->addr16[6], u),
2159 oia.addr16[7], ia->addr16[7], u);
2163 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2165 PF_ACPY(oa, na, af);
2169 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2170 ooa.addr16[0], oa->addr16[0], 0),
2171 ooa.addr16[1], oa->addr16[1], 0);
2176 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2177 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2178 pf_cksum_fixup(pf_cksum_fixup(*ic,
2179 ooa.addr16[0], oa->addr16[0], u),
2180 ooa.addr16[1], oa->addr16[1], u),
2181 ooa.addr16[2], oa->addr16[2], u),
2182 ooa.addr16[3], oa->addr16[3], u),
2183 ooa.addr16[4], oa->addr16[4], u),
2184 ooa.addr16[5], oa->addr16[5], u),
2185 ooa.addr16[6], oa->addr16[6], u),
2186 ooa.addr16[7], oa->addr16[7], u);
2195 * Need to modulate the sequence numbers in the TCP SACK option
2196 * (credits to Krzysztof Pfaff for report and patch)
2199 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2200 struct tcphdr *th, struct pf_state_peer *dst)
2202 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2204 u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2206 u_int8_t opts[MAX_TCPOPTLEN], *opt = opts;
2208 int copyback = 0, i, olen;
2209 struct sackblk sack;
2211 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2212 if (hlen < TCPOLEN_SACKLEN ||
2213 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2216 while (hlen >= TCPOLEN_SACKLEN) {
2219 case TCPOPT_EOL: /* FALLTHROUGH */
2227 if (olen >= TCPOLEN_SACKLEN) {
2228 for (i = 2; i + TCPOLEN_SACK <= olen;
2229 i += TCPOLEN_SACK) {
2230 memcpy(&sack, &opt[i], sizeof(sack));
2231 pf_change_a(&sack.start, &th->th_sum,
2232 htonl(ntohl(sack.start) -
2234 pf_change_a(&sack.end, &th->th_sum,
2235 htonl(ntohl(sack.end) -
2237 memcpy(&opt[i], &sack, sizeof(sack));
2252 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2254 m_copyback(m, off + sizeof(*th), thoptlen, opts);
2261 pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af,
2263 pf_send_tcp(const struct pf_rule *r, sa_family_t af,
2265 const struct pf_addr *saddr, const struct pf_addr *daddr,
2266 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2267 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2268 u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp)
2281 struct pf_mtag *pf_mtag;
2295 , ("Unsupported AF %d", af));
2304 #endif /* __FreeBSD__ */
2306 /* maximum segment size tcp option */
2307 tlen = sizeof(struct tcphdr);
2314 len = sizeof(struct ip) + tlen;
2319 len = sizeof(struct ip6_hdr) + tlen;
2324 /* create outgoing mbuf */
2325 m = m_gethdr(M_DONTWAIT, MT_HEADER);
2330 mac_netinet_firewall_send(m);
2332 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2339 m->m_flags |= M_SKIP_FIREWALL;
2340 pf_mtag->tag = rtag;
2342 m->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
2343 m->m_pkthdr.pf.tag = rtag;
2346 if (r != NULL && r->rtableid >= 0)
2349 M_SETFIB(m, r->rtableid);
2350 pf_mtag->rtableid = r->rtableid;
2352 m->m_pkthdr.pf.rtableid = r->rtableid;
2359 if (r != NULL && r->qid) {
2361 pf_mtag->qid = r->qid;
2363 /* add hints for ecn */
2364 pf_mtag->hdr = mtod(m, struct ip *);
2366 m->m_pkthdr.pf.qid = r->qid;
2367 /* add hints for ecn */
2368 m->m_pkthdr.pf.hdr = mtod(m, struct ip *);
2372 m->m_data += max_linkhdr;
2373 m->m_pkthdr.len = m->m_len = len;
2374 m->m_pkthdr.rcvif = NULL;
2375 bzero(m->m_data, len);
2379 h = mtod(m, struct ip *);
2381 /* IP header fields included in the TCP checksum */
2382 h->ip_p = IPPROTO_TCP;
2383 h->ip_len = htons(tlen);
2384 h->ip_src.s_addr = saddr->v4.s_addr;
2385 h->ip_dst.s_addr = daddr->v4.s_addr;
2387 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2392 h6 = mtod(m, struct ip6_hdr *);
2394 /* IP header fields included in the TCP checksum */
2395 h6->ip6_nxt = IPPROTO_TCP;
2396 h6->ip6_plen = htons(tlen);
2397 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2398 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2400 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2406 th->th_sport = sport;
2407 th->th_dport = dport;
2408 th->th_seq = htonl(seq);
2409 th->th_ack = htonl(ack);
2410 th->th_off = tlen >> 2;
2411 th->th_flags = flags;
2412 th->th_win = htons(win);
2415 opt = (char *)(th + 1);
2416 opt[0] = TCPOPT_MAXSEG;
2419 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2426 th->th_sum = in_cksum(m, len);
2428 /* Finish the IP header */
2430 h->ip_hl = sizeof(*h) >> 2;
2431 h->ip_tos = IPTOS_LOWDELAY;
2433 h->ip_off = V_path_mtu_discovery ? IP_DF : 0;
2435 h->ip_ttl = ttl ? ttl : V_ip_defttl;
2437 h->ip_len = htons(len);
2438 h->ip_off = htons(ip_mtudisc ? IP_DF : 0);
2439 h->ip_ttl = ttl ? ttl : ip_defttl;
2445 ip_output(m, (void *)NULL, (void *)NULL, 0,
2446 (void *)NULL, (void *)NULL);
2448 #else /* ! __FreeBSD__ */
2449 ip_output(m, (void *)NULL, (void *)NULL, 0,
2450 (void *)NULL, (void *)NULL);
2455 struct ether_header *e = (void *)ro.ro_dst.sa_data;
2463 ro.ro_dst.sa_len = sizeof(ro.ro_dst);
2464 ro.ro_dst.sa_family = pseudo_AF_HDRCMPLT;
2465 bcopy(eh->ether_dhost, e->ether_shost, ETHER_ADDR_LEN);
2466 bcopy(eh->ether_shost, e->ether_dhost, ETHER_ADDR_LEN);
2467 e->ether_type = eh->ether_type;
2470 /* XXX_IMPORT: later */
2471 ip_output(m, (void *)NULL, &ro, 0,
2472 (void *)NULL, (void *)NULL);
2474 #else /* ! __FreeBSD__ */
2475 ip_output(m, (void *)NULL, &ro, IP_ROUTETOETHER,
2476 (void *)NULL, (void *)NULL);
2484 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2485 sizeof(struct ip6_hdr), tlen);
2487 h6->ip6_vfc |= IPV6_VERSION;
2488 h6->ip6_hlim = IPV6_DEFHLIM;
2492 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
2495 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
2503 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2511 struct pf_mtag *pf_mtag;
2515 m0 = m_copypacket(m, M_DONTWAIT);
2519 if ((m0 = m_copy(m, 0, M_COPYALL)) == NULL)
2524 if ((pf_mtag = pf_get_mtag(m0)) == NULL)
2527 m0->m_flags |= M_SKIP_FIREWALL;
2529 m0->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
2532 if (r->rtableid >= 0)
2535 M_SETFIB(m0, r->rtableid);
2536 pf_mtag->rtableid = r->rtableid;
2538 m0->m_pkthdr.pf.rtableid = r->rtableid;
2547 pf_mtag->qid = r->qid;
2548 /* add hints for ecn */
2549 pf_mtag->hdr = mtod(m0, struct ip *);
2551 m0->m_pkthdr.pf.qid = r->qid;
2552 /* add hints for ecn */
2553 m0->m_pkthdr.pf.hdr = mtod(m0, struct ip *);
2562 /* icmp_error() expects host byte ordering */
2563 ip = mtod(m0, struct ip *);
2567 icmp_error(m0, type, code, 0, 0);
2570 icmp_error(m0, type, code, 0, 0);
2579 icmp6_error(m0, type, code, 0);
2589 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2590 * If n is 0, they match if they are equal. If n is != 0, they match if they
2594 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2595 struct pf_addr *b, sa_family_t af)
2602 if ((a->addr32[0] & m->addr32[0]) ==
2603 (b->addr32[0] & m->addr32[0]))
2609 if (((a->addr32[0] & m->addr32[0]) ==
2610 (b->addr32[0] & m->addr32[0])) &&
2611 ((a->addr32[1] & m->addr32[1]) ==
2612 (b->addr32[1] & m->addr32[1])) &&
2613 ((a->addr32[2] & m->addr32[2]) ==
2614 (b->addr32[2] & m->addr32[2])) &&
2615 ((a->addr32[3] & m->addr32[3]) ==
2616 (b->addr32[3] & m->addr32[3])))
2635 * Return 1 if b <= a <= e, otherwise return 0.
2638 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2639 struct pf_addr *a, sa_family_t af)
2644 if ((a->addr32[0] < b->addr32[0]) ||
2645 (a->addr32[0] > e->addr32[0]))
2654 for (i = 0; i < 4; ++i)
2655 if (a->addr32[i] > b->addr32[i])
2657 else if (a->addr32[i] < b->addr32[i])
2660 for (i = 0; i < 4; ++i)
2661 if (a->addr32[i] < e->addr32[i])
2663 else if (a->addr32[i] > e->addr32[i])
2673 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2677 return ((p > a1) && (p < a2));
2679 return ((p < a1) || (p > a2));
2681 return ((p >= a1) && (p <= a2));
2695 return (0); /* never reached */
2699 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2704 return (pf_match(op, a1, a2, p));
2708 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2710 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2712 return (pf_match(op, a1, a2, u));
2716 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2718 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2720 return (pf_match(op, a1, a2, g));
2725 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag,
2726 struct pf_mtag *pf_mtag)
2728 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag)
2733 *tag = pf_mtag->tag;
2735 *tag = m->m_pkthdr.pf.tag;
2738 return ((!r->match_tag_not && r->match_tag == *tag) ||
2739 (r->match_tag_not && r->match_tag != *tag));
2744 pf_tag_packet(struct mbuf *m, int tag, int rtableid,
2745 struct pf_mtag *pf_mtag)
2747 pf_tag_packet(struct mbuf *m, int tag, int rtableid)
2750 if (tag <= 0 && rtableid < 0)
2757 m->m_pkthdr.pf.tag = tag;
2762 M_SETFIB(m, rtableid);
2765 m->m_pkthdr.pf.rtableid = rtableid;
2772 pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n,
2773 struct pf_rule **r, struct pf_rule **a, int *match)
2775 struct pf_anchor_stackframe *f;
2777 (*r)->anchor->match = 0;
2781 if (*depth >= sizeof(V_pf_anchor_stack) /
2782 sizeof(V_pf_anchor_stack[0])) {
2784 if (*depth >= sizeof(pf_anchor_stack) /
2785 sizeof(pf_anchor_stack[0])) {
2787 printf("pf_step_into_anchor: stack overflow\n");
2788 *r = TAILQ_NEXT(*r, entries);
2790 } else if (*depth == 0 && a != NULL)
2793 f = V_pf_anchor_stack + (*depth)++;
2795 f = pf_anchor_stack + (*depth)++;
2799 if ((*r)->anchor_wildcard) {
2800 f->parent = &(*r)->anchor->children;
2801 if ((f->child = RB_MIN(pf_anchor_node, f->parent)) ==
2806 *rs = &f->child->ruleset;
2810 *rs = &(*r)->anchor->ruleset;
2812 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2816 pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n,
2817 struct pf_rule **r, struct pf_rule **a, int *match)
2819 struct pf_anchor_stackframe *f;
2826 f = V_pf_anchor_stack + *depth - 1;
2828 f = pf_anchor_stack + *depth - 1;
2830 if (f->parent != NULL && f->child != NULL) {
2831 if (f->child->match ||
2832 (match != NULL && *match)) {
2833 f->r->anchor->match = 1;
2836 f->child = RB_NEXT(pf_anchor_node, f->parent, f->child);
2837 if (f->child != NULL) {
2838 *rs = &f->child->ruleset;
2839 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2847 if (*depth == 0 && a != NULL)
2850 if (f->r->anchor->match || (match != NULL && *match))
2851 quick = f->r->quick;
2852 *r = TAILQ_NEXT(f->r, entries);
2853 } while (*r == NULL);
2860 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2861 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2866 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2867 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2871 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2872 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2873 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
2874 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
2875 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
2876 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
2877 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
2878 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
2884 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
2889 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
2893 if (addr->addr32[3] == 0xffffffff) {
2894 addr->addr32[3] = 0;
2895 if (addr->addr32[2] == 0xffffffff) {
2896 addr->addr32[2] = 0;
2897 if (addr->addr32[1] == 0xffffffff) {
2898 addr->addr32[1] = 0;
2900 htonl(ntohl(addr->addr32[0]) + 1);
2903 htonl(ntohl(addr->addr32[1]) + 1);
2906 htonl(ntohl(addr->addr32[2]) + 1);
2909 htonl(ntohl(addr->addr32[3]) + 1);
2917 pf_socket_lookup(int direction, struct pf_pdesc *pd, struct inpcb *inp_arg)
2919 pf_socket_lookup(int direction, struct pf_pdesc *pd)
2922 struct pf_addr *saddr, *daddr;
2923 u_int16_t sport, dport;
2925 struct inpcbinfo *pi;
2927 struct inpcbtable *tb;
2933 pd->lookup.uid = UID_MAX;
2934 pd->lookup.gid = GID_MAX;
2935 pd->lookup.pid = NO_PID;
2938 if (inp_arg != NULL) {
2939 INP_LOCK_ASSERT(inp_arg);
2940 pd->lookup.uid = inp_arg->inp_cred->cr_uid;
2941 pd->lookup.gid = inp_arg->inp_cred->cr_groups[0];
2946 switch (pd->proto) {
2948 if (pd->hdr.tcp == NULL)
2950 sport = pd->hdr.tcp->th_sport;
2951 dport = pd->hdr.tcp->th_dport;
2959 if (pd->hdr.udp == NULL)
2961 sport = pd->hdr.udp->uh_sport;
2962 dport = pd->hdr.udp->uh_dport;
2972 if (direction == PF_IN) {
2989 * XXXRW: would be nice if we had an mbuf here so that we
2990 * could use in_pcblookup_mbuf().
2992 inp = in_pcblookup(pi, saddr->v4, sport, daddr->v4,
2993 dport, INPLOOKUP_RLOCKPCB, NULL);
2995 inp = in_pcblookup(pi, saddr->v4, sport,
2996 daddr->v4, dport, INPLOOKUP_WILDCARD |
2997 INPLOOKUP_RLOCKPCB, NULL);
3002 inp = in_pcbhashlookup(tb, saddr->v4, sport, daddr->v4, dport);
3004 inp = in_pcblookup_listen(tb, daddr->v4, dport, 0,
3016 * XXXRW: would be nice if we had an mbuf here so that we
3017 * could use in6_pcblookup_mbuf().
3019 inp = in6_pcblookup(pi, &saddr->v6, sport,
3020 &daddr->v6, dport, INPLOOKUP_RLOCKPCB, NULL);
3022 inp = in6_pcblookup(pi, &saddr->v6, sport,
3023 &daddr->v6, dport, INPLOOKUP_WILDCARD |
3024 INPLOOKUP_RLOCKPCB, NULL);
3029 inp = in6_pcbhashlookup(tb, &saddr->v6, sport, &daddr->v6,
3032 inp = in6_pcblookup_listen(tb, &daddr->v6, dport, 0,
3045 INP_RLOCK_ASSERT(inp);
3046 pd->lookup.uid = inp->inp_cred->cr_uid;
3047 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3050 pd->lookup.uid = inp->inp_socket->so_euid;
3051 pd->lookup.gid = inp->inp_socket->so_egid;
3052 pd->lookup.pid = inp->inp_socket->so_cpid;
3058 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3062 u_int8_t *opt, optlen;
3063 u_int8_t wscale = 0;
3065 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3066 if (hlen <= sizeof(struct tcphdr))
3068 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3070 opt = hdr + sizeof(struct tcphdr);
3071 hlen -= sizeof(struct tcphdr);
3081 if (wscale > TCP_MAX_WINSHIFT)
3082 wscale = TCP_MAX_WINSHIFT;
3083 wscale |= PF_WSCALE_FLAG;
3098 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3102 u_int8_t *opt, optlen;
3104 u_int16_t mss = V_tcp_mssdflt;
3106 u_int16_t mss = tcp_mssdflt;
3109 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3110 if (hlen <= sizeof(struct tcphdr))
3112 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3114 opt = hdr + sizeof(struct tcphdr);
3115 hlen -= sizeof(struct tcphdr);
3116 while (hlen >= TCPOLEN_MAXSEG) {
3124 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
3140 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
3143 struct sockaddr_in *dst;
3147 struct sockaddr_in6 *dst6;
3148 struct route_in6 ro6;
3150 struct rtentry *rt = NULL;
3153 u_int16_t mss = V_tcp_mssdflt;
3156 u_int16_t mss = tcp_mssdflt;
3162 hlen = sizeof(struct ip);
3163 bzero(&ro, sizeof(ro));
3164 dst = (struct sockaddr_in *)&ro.ro_dst;
3165 dst->sin_family = AF_INET;
3166 dst->sin_len = sizeof(*dst);
3167 dst->sin_addr = addr->v4;
3169 in_rtalloc_ign(&ro, 0, rtableid);
3170 #else /* ! __FreeBSD__ */
3171 rtalloc_noclone(&ro, NO_CLONING);
3178 hlen = sizeof(struct ip6_hdr);
3179 bzero(&ro6, sizeof(ro6));
3180 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
3181 dst6->sin6_family = AF_INET6;
3182 dst6->sin6_len = sizeof(*dst6);
3183 dst6->sin6_addr = addr->v6;
3185 in6_rtalloc_ign(&ro6, 0, rtableid);
3186 #else /* ! __FreeBSD__ */
3187 rtalloc_noclone((struct route *)&ro6, NO_CLONING);
3194 if (rt && rt->rt_ifp) {
3195 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr);
3197 mss = max(V_tcp_mssdflt, mss);
3199 mss = max(tcp_mssdflt, mss);
3203 mss = min(mss, offer);
3204 mss = max(mss, 64); /* sanity - at least max opt space */
3209 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr)
3211 struct pf_rule *r = s->rule.ptr;
3212 struct pf_src_node *sn = NULL;
3215 if (!r->rt || r->rt == PF_FASTROUTE)
3217 switch (s->key[PF_SK_WIRE]->af) {
3220 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL, &sn);
3221 s->rt_kif = r->rpool.cur->kif;
3226 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL, &sn);
3227 s->rt_kif = r->rpool.cur->kif;
3234 pf_tcp_iss(struct pf_pdesc *pd)
3237 u_int32_t digest[4];
3240 if (V_pf_tcp_secret_init == 0) {
3241 read_random(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
3242 MD5Init(&V_pf_tcp_secret_ctx);
3243 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
3244 sizeof(V_pf_tcp_secret));
3245 V_pf_tcp_secret_init = 1;
3248 ctx = V_pf_tcp_secret_ctx;
3250 if (pf_tcp_secret_init == 0) {
3251 arc4random_buf(pf_tcp_secret, sizeof(pf_tcp_secret));
3252 MD5Init(&pf_tcp_secret_ctx);
3253 MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret,
3254 sizeof(pf_tcp_secret));
3255 pf_tcp_secret_init = 1;
3258 ctx = pf_tcp_secret_ctx;
3261 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
3262 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
3263 if (pd->af == AF_INET6) {
3264 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3265 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3267 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3268 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3270 MD5Final((u_char *)digest, &ctx);
3272 V_pf_tcp_iss_off += 4096;
3273 #define ISN_RANDOM_INCREMENT (4096 - 1)
3274 return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3276 #undef ISN_RANDOM_INCREMENT
3278 pf_tcp_iss_off += 4096;
3279 return (digest[0] + tcp_iss + pf_tcp_iss_off);
3284 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
3285 struct pfi_kif *kif, struct mbuf *m, int off, void *h,
3286 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm,
3288 struct ifqueue *ifq, struct inpcb *inp)
3290 struct ifqueue *ifq)
3293 struct pf_rule *nr = NULL;
3294 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
3295 sa_family_t af = pd->af;
3296 struct pf_rule *r, *a = NULL;
3297 struct pf_ruleset *ruleset = NULL;
3298 struct pf_src_node *nsn = NULL;
3299 struct tcphdr *th = pd->hdr.tcp;
3300 struct pf_state_key *skw = NULL, *sks = NULL;
3301 struct pf_state_key *sk = NULL, *nk = NULL;
3303 int rewrite = 0, hdrlen = 0;
3304 int tag = -1, rtableid = -1;
3309 u_int16_t sport = 0, dport = 0;
3310 u_int16_t bproto_sum = 0, bip_sum = 0;
3312 u_int16_t sport, dport;
3313 u_int16_t bproto_sum = 0, bip_sum;
3315 u_int8_t icmptype = 0, icmpcode = 0;
3318 if (direction == PF_IN && pf_check_congestion(ifq)) {
3319 REASON_SET(&reason, PFRES_CONGEST);
3325 pd->lookup.done = pf_socket_lookup(direction, pd, inp);
3326 else if (V_debug_pfugidhack) {
3328 DPFPRINTF(PF_DEBUG_MISC, ("pf: unlocked lookup\n"));
3329 pd->lookup.done = pf_socket_lookup(direction, pd, inp);
3334 switch (pd->proto) {
3336 sport = th->th_sport;
3337 dport = th->th_dport;
3338 hdrlen = sizeof(*th);
3341 sport = pd->hdr.udp->uh_sport;
3342 dport = pd->hdr.udp->uh_dport;
3343 hdrlen = sizeof(*pd->hdr.udp);
3347 if (pd->af != AF_INET)
3349 sport = dport = pd->hdr.icmp->icmp_id;
3350 hdrlen = sizeof(*pd->hdr.icmp);
3351 icmptype = pd->hdr.icmp->icmp_type;
3352 icmpcode = pd->hdr.icmp->icmp_code;
3354 if (icmptype == ICMP_UNREACH ||
3355 icmptype == ICMP_SOURCEQUENCH ||
3356 icmptype == ICMP_REDIRECT ||
3357 icmptype == ICMP_TIMXCEED ||
3358 icmptype == ICMP_PARAMPROB)
3363 case IPPROTO_ICMPV6:
3366 sport = dport = pd->hdr.icmp6->icmp6_id;
3367 hdrlen = sizeof(*pd->hdr.icmp6);
3368 icmptype = pd->hdr.icmp6->icmp6_type;
3369 icmpcode = pd->hdr.icmp6->icmp6_code;
3371 if (icmptype == ICMP6_DST_UNREACH ||
3372 icmptype == ICMP6_PACKET_TOO_BIG ||
3373 icmptype == ICMP6_TIME_EXCEEDED ||
3374 icmptype == ICMP6_PARAM_PROB)
3379 sport = dport = hdrlen = 0;
3383 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3385 /* check packet for BINAT/NAT/RDR */
3386 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn,
3387 &skw, &sks, &sk, &nk, saddr, daddr, sport, dport)) != NULL) {
3388 if (nk == NULL || sk == NULL) {
3389 REASON_SET(&reason, PFRES_MEMORY);
3394 bip_sum = *pd->ip_sum;
3396 switch (pd->proto) {
3398 bproto_sum = th->th_sum;
3399 pd->proto_sum = &th->th_sum;
3401 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3402 nk->port[pd->sidx] != sport) {
3403 pf_change_ap(saddr, &th->th_sport, pd->ip_sum,
3404 &th->th_sum, &nk->addr[pd->sidx],
3405 nk->port[pd->sidx], 0, af);
3406 pd->sport = &th->th_sport;
3407 sport = th->th_sport;
3410 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3411 nk->port[pd->didx] != dport) {
3412 pf_change_ap(daddr, &th->th_dport, pd->ip_sum,
3413 &th->th_sum, &nk->addr[pd->didx],
3414 nk->port[pd->didx], 0, af);
3415 dport = th->th_dport;
3416 pd->dport = &th->th_dport;
3421 bproto_sum = pd->hdr.udp->uh_sum;
3422 pd->proto_sum = &pd->hdr.udp->uh_sum;
3424 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3425 nk->port[pd->sidx] != sport) {
3426 pf_change_ap(saddr, &pd->hdr.udp->uh_sport,
3427 pd->ip_sum, &pd->hdr.udp->uh_sum,
3428 &nk->addr[pd->sidx],
3429 nk->port[pd->sidx], 1, af);
3430 sport = pd->hdr.udp->uh_sport;
3431 pd->sport = &pd->hdr.udp->uh_sport;
3434 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3435 nk->port[pd->didx] != dport) {
3436 pf_change_ap(daddr, &pd->hdr.udp->uh_dport,
3437 pd->ip_sum, &pd->hdr.udp->uh_sum,
3438 &nk->addr[pd->didx],
3439 nk->port[pd->didx], 1, af);
3440 dport = pd->hdr.udp->uh_dport;
3441 pd->dport = &pd->hdr.udp->uh_dport;
3447 nk->port[0] = nk->port[1];
3448 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3449 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3450 nk->addr[pd->sidx].v4.s_addr, 0);
3452 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3453 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3454 nk->addr[pd->didx].v4.s_addr, 0);
3456 if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3457 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3458 pd->hdr.icmp->icmp_cksum, sport,
3460 pd->hdr.icmp->icmp_id = nk->port[1];
3461 pd->sport = &pd->hdr.icmp->icmp_id;
3463 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3467 case IPPROTO_ICMPV6:
3468 nk->port[0] = nk->port[1];
3469 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3470 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3471 &nk->addr[pd->sidx], 0);
3473 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3474 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3475 &nk->addr[pd->didx], 0);
3484 &nk->addr[pd->sidx], AF_INET))
3485 pf_change_a(&saddr->v4.s_addr,
3487 nk->addr[pd->sidx].v4.s_addr, 0);
3490 &nk->addr[pd->didx], AF_INET))
3491 pf_change_a(&daddr->v4.s_addr,
3493 nk->addr[pd->didx].v4.s_addr, 0);
3499 &nk->addr[pd->sidx], AF_INET6))
3500 PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3503 &nk->addr[pd->didx], AF_INET6))
3504 PF_ACPY(saddr, &nk->addr[pd->didx], af);
3517 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3518 r = r->skip[PF_SKIP_IFP].ptr;
3519 else if (r->direction && r->direction != direction)
3520 r = r->skip[PF_SKIP_DIR].ptr;
3521 else if (r->af && r->af != af)
3522 r = r->skip[PF_SKIP_AF].ptr;
3523 else if (r->proto && r->proto != pd->proto)
3524 r = r->skip[PF_SKIP_PROTO].ptr;
3525 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3526 r->src.neg, kif, M_GETFIB(m)))
3527 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3528 /* tcp/udp only. port_op always 0 in other cases */
3529 else if (r->src.port_op && !pf_match_port(r->src.port_op,
3530 r->src.port[0], r->src.port[1], sport))
3531 r = r->skip[PF_SKIP_SRC_PORT].ptr;
3532 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3533 r->dst.neg, NULL, M_GETFIB(m)))
3534 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3535 /* tcp/udp only. port_op always 0 in other cases */
3536 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3537 r->dst.port[0], r->dst.port[1], dport))
3538 r = r->skip[PF_SKIP_DST_PORT].ptr;
3539 /* icmp only. type always 0 in other cases */
3540 else if (r->type && r->type != icmptype + 1)
3541 r = TAILQ_NEXT(r, entries);
3542 /* icmp only. type always 0 in other cases */
3543 else if (r->code && r->code != icmpcode + 1)
3544 r = TAILQ_NEXT(r, entries);
3545 else if (r->tos && !(r->tos == pd->tos))
3546 r = TAILQ_NEXT(r, entries);
3547 else if (r->rule_flag & PFRULE_FRAGMENT)
3548 r = TAILQ_NEXT(r, entries);
3549 else if (pd->proto == IPPROTO_TCP &&
3550 (r->flagset & th->th_flags) != r->flags)
3551 r = TAILQ_NEXT(r, entries);
3552 /* tcp/udp only. uid.op always 0 in other cases */
3553 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3555 pf_socket_lookup(direction, pd, inp), 1)) &&
3557 pf_socket_lookup(direction, pd), 1)) &&
3559 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3561 r = TAILQ_NEXT(r, entries);
3562 /* tcp/udp only. gid.op always 0 in other cases */
3563 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3565 pf_socket_lookup(direction, pd, inp), 1)) &&
3567 pf_socket_lookup(direction, pd), 1)) &&
3569 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3571 r = TAILQ_NEXT(r, entries);
3574 r->prob <= arc4random())
3576 r->prob <= arc4random_uniform(UINT_MAX - 1) + 1)
3578 r = TAILQ_NEXT(r, entries);
3580 else if (r->match_tag && !pf_match_tag(m, r, &tag, pd->pf_mtag))
3582 else if (r->match_tag && !pf_match_tag(m, r, &tag))
3584 r = TAILQ_NEXT(r, entries);
3585 else if (r->os_fingerprint != PF_OSFP_ANY &&
3586 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3587 pf_osfp_fingerprint(pd, m, off, th),
3588 r->os_fingerprint)))
3589 r = TAILQ_NEXT(r, entries);
3593 if (r->rtableid >= 0)
3594 rtableid = r->rtableid;
3595 if (r->anchor == NULL) {
3602 r = TAILQ_NEXT(r, entries);
3604 pf_step_into_anchor(&asd, &ruleset,
3605 PF_RULESET_FILTER, &r, &a, &match);
3607 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
3608 PF_RULESET_FILTER, &r, &a, &match))
3615 REASON_SET(&reason, PFRES_MATCH);
3617 if (r->log || (nr != NULL && nr->log)) {
3619 m_copyback(m, off, hdrlen, pd->hdr.any);
3620 PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr,
3624 if ((r->action == PF_DROP) &&
3625 ((r->rule_flag & PFRULE_RETURNRST) ||
3626 (r->rule_flag & PFRULE_RETURNICMP) ||
3627 (r->rule_flag & PFRULE_RETURN))) {
3628 /* undo NAT changes, if they have taken place */
3630 PF_ACPY(saddr, &sk->addr[pd->sidx], af);
3631 PF_ACPY(daddr, &sk->addr[pd->didx], af);
3633 *pd->sport = sk->port[pd->sidx];
3635 *pd->dport = sk->port[pd->didx];
3637 *pd->proto_sum = bproto_sum;
3639 *pd->ip_sum = bip_sum;
3640 m_copyback(m, off, hdrlen, pd->hdr.any);
3642 if (pd->proto == IPPROTO_TCP &&
3643 ((r->rule_flag & PFRULE_RETURNRST) ||
3644 (r->rule_flag & PFRULE_RETURN)) &&
3645 !(th->th_flags & TH_RST)) {
3646 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
3658 h4 = mtod(m, struct ip *);
3659 len = ntohs(h4->ip_len) - off;
3664 h6 = mtod(m, struct ip6_hdr *);
3665 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
3670 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
3671 REASON_SET(&reason, PFRES_PROTCKSUM);
3673 if (th->th_flags & TH_SYN)
3675 if (th->th_flags & TH_FIN)
3678 pf_send_tcp(m, r, af, pd->dst,
3680 pf_send_tcp(r, af, pd->dst,
3682 pd->src, th->th_dport, th->th_sport,
3683 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3684 r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp);
3686 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3688 pf_send_icmp(m, r->return_icmp >> 8,
3689 r->return_icmp & 255, af, r);
3690 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3692 pf_send_icmp(m, r->return_icmp6 >> 8,
3693 r->return_icmp6 & 255, af, r);
3696 if (r->action == PF_DROP)
3700 if (pf_tag_packet(m, tag, rtableid, pd->pf_mtag)) {
3702 if (pf_tag_packet(m, tag, rtableid)) {
3704 REASON_SET(&reason, PFRES_MEMORY);
3708 if (!state_icmp && (r->keep_state || nr != NULL ||
3709 (pd->flags & PFDESC_TCP_NORM))) {
3711 action = pf_create_state(r, nr, a, pd, nsn, skw, sks, nk, sk, m,
3712 off, sport, dport, &rewrite, kif, sm, tag, bproto_sum,
3714 if (action != PF_PASS)
3719 pool_put(&V_pf_state_key_pl, sk);
3721 pool_put(&V_pf_state_key_pl, nk);
3724 pool_put(&pf_state_key_pl, sk);
3726 pool_put(&pf_state_key_pl, nk);
3730 /* copy back packet headers if we performed NAT operations */
3732 m_copyback(m, off, hdrlen, pd->hdr.any);
3735 if (*sm != NULL && !ISSET((*sm)->state_flags, PFSTATE_NOSYNC) &&
3737 direction == PF_OUT && pfsync_up_ptr != NULL && pfsync_up_ptr()) {
3739 direction == PF_OUT && pfsync_up()) {
3742 * We want the state created, but we dont
3743 * want to send this in case a partner
3744 * firewall has to know about it to allow
3745 * replies through it.
3748 if (pfsync_defer_ptr != NULL &&
3749 pfsync_defer_ptr(*sm, m))
3751 if (pfsync_defer(*sm, m))
3762 pool_put(&V_pf_state_key_pl, sk);
3764 pool_put(&V_pf_state_key_pl, nk);
3767 pool_put(&pf_state_key_pl, sk);
3769 pool_put(&pf_state_key_pl, nk);
3775 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
3776 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *skw,
3777 struct pf_state_key *sks, struct pf_state_key *nk, struct pf_state_key *sk,
3778 struct mbuf *m, int off, u_int16_t sport, u_int16_t dport, int *rewrite,
3779 struct pfi_kif *kif, struct pf_state **sm, int tag, u_int16_t bproto_sum,
3780 u_int16_t bip_sum, int hdrlen)
3782 struct pf_state *s = NULL;
3783 struct pf_src_node *sn = NULL;
3784 struct tcphdr *th = pd->hdr.tcp;
3786 u_int16_t mss = V_tcp_mssdflt;
3788 u_int16_t mss = tcp_mssdflt;
3792 /* check maximums */
3793 if (r->max_states && (r->states_cur >= r->max_states)) {
3795 V_pf_status.lcounters[LCNT_STATES]++;
3797 pf_status.lcounters[LCNT_STATES]++;
3799 REASON_SET(&reason, PFRES_MAXSTATES);
3802 /* src node for filter rule */
3803 if ((r->rule_flag & PFRULE_SRCTRACK ||
3804 r->rpool.opts & PF_POOL_STICKYADDR) &&
3805 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3806 REASON_SET(&reason, PFRES_SRCLIMIT);
3809 /* src node for translation rule */
3810 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3811 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3812 REASON_SET(&reason, PFRES_SRCLIMIT);
3816 s = pool_get(&V_pf_state_pl, PR_NOWAIT | PR_ZERO);
3818 s = pool_get(&pf_state_pl, PR_NOWAIT | PR_ZERO);
3821 REASON_SET(&reason, PFRES_MEMORY);
3825 s->nat_rule.ptr = nr;
3827 STATE_INC_COUNTERS(s);
3829 s->state_flags |= PFSTATE_ALLOWOPTS;
3830 if (r->rule_flag & PFRULE_STATESLOPPY)
3831 s->state_flags |= PFSTATE_SLOPPY;
3832 if (r->rule_flag & PFRULE_PFLOW)
3833 s->state_flags |= PFSTATE_PFLOW;
3834 s->log = r->log & PF_LOG_ALL;
3835 s->sync_state = PFSYNC_S_NONE;
3837 s->log |= nr->log & PF_LOG_ALL;
3838 switch (pd->proto) {
3840 s->src.seqlo = ntohl(th->th_seq);
3841 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3842 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3843 r->keep_state == PF_STATE_MODULATE) {
3844 /* Generate sequence number modulator */
3845 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3848 pf_change_a(&th->th_seq, &th->th_sum,
3849 htonl(s->src.seqlo + s->src.seqdiff), 0);
3853 if (th->th_flags & TH_SYN) {
3855 s->src.wscale = pf_get_wscale(m, off,
3856 th->th_off, pd->af);
3858 s->src.max_win = MAX(ntohs(th->th_win), 1);
3859 if (s->src.wscale & PF_WSCALE_MASK) {
3860 /* Remove scale factor from initial window */
3861 int win = s->src.max_win;
3862 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3863 s->src.max_win = (win - 1) >>
3864 (s->src.wscale & PF_WSCALE_MASK);
3866 if (th->th_flags & TH_FIN)
3870 s->src.state = TCPS_SYN_SENT;
3871 s->dst.state = TCPS_CLOSED;
3872 s->timeout = PFTM_TCP_FIRST_PACKET;
3875 s->src.state = PFUDPS_SINGLE;
3876 s->dst.state = PFUDPS_NO_TRAFFIC;
3877 s->timeout = PFTM_UDP_FIRST_PACKET;
3881 case IPPROTO_ICMPV6:
3883 s->timeout = PFTM_ICMP_FIRST_PACKET;
3886 s->src.state = PFOTHERS_SINGLE;
3887 s->dst.state = PFOTHERS_NO_TRAFFIC;
3888 s->timeout = PFTM_OTHER_FIRST_PACKET;
3891 s->creation = time_second;
3892 s->expire = time_second;
3896 s->src_node->states++;
3899 /* XXX We only modify one side for now. */
3900 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
3901 s->nat_src_node = nsn;
3902 s->nat_src_node->states++;
3904 if (pd->proto == IPPROTO_TCP) {
3905 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
3906 off, pd, th, &s->src, &s->dst)) {
3907 REASON_SET(&reason, PFRES_MEMORY);
3908 pf_src_tree_remove_state(s);
3909 STATE_DEC_COUNTERS(s);
3911 pool_put(&V_pf_state_pl, s);
3913 pool_put(&pf_state_pl, s);
3917 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
3918 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
3919 &s->src, &s->dst, rewrite)) {
3920 /* This really shouldn't happen!!! */
3921 DPFPRINTF(PF_DEBUG_URGENT,
3922 ("pf_normalize_tcp_stateful failed on first pkt"));
3923 pf_normalize_tcp_cleanup(s);
3924 pf_src_tree_remove_state(s);
3925 STATE_DEC_COUNTERS(s);
3927 pool_put(&V_pf_state_pl, s);
3929 pool_put(&pf_state_pl, s);
3934 s->direction = pd->dir;
3936 if (sk == NULL && pf_state_key_setup(pd, nr, &skw, &sks, &sk, &nk,
3937 pd->src, pd->dst, sport, dport))
3940 if (pf_state_insert(BOUND_IFACE(r, kif), skw, sks, s)) {
3941 if (pd->proto == IPPROTO_TCP)
3942 pf_normalize_tcp_cleanup(s);
3943 REASON_SET(&reason, PFRES_STATEINS);
3944 pf_src_tree_remove_state(s);
3945 STATE_DEC_COUNTERS(s);
3947 pool_put(&V_pf_state_pl, s);
3949 pool_put(&pf_state_pl, s);
3955 pf_set_rt_ifp(s, pd->src); /* needs s->state_key set */
3960 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
3961 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
3962 s->src.state = PF_TCPS_PROXY_SRC;
3963 /* undo NAT changes, if they have taken place */
3965 struct pf_state_key *skt = s->key[PF_SK_WIRE];
3966 if (pd->dir == PF_OUT)
3967 skt = s->key[PF_SK_STACK];
3968 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
3969 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
3971 *pd->sport = skt->port[pd->sidx];
3973 *pd->dport = skt->port[pd->didx];
3975 *pd->proto_sum = bproto_sum;
3977 *pd->ip_sum = bip_sum;
3978 m_copyback(m, off, hdrlen, pd->hdr.any);
3980 s->src.seqhi = htonl(arc4random());
3981 /* Find mss option */
3982 int rtid = M_GETFIB(m);
3983 mss = pf_get_mss(m, off, th->th_off, pd->af);
3984 mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
3985 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
3988 pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport,
3990 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
3992 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
3993 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL);
3994 REASON_SET(&reason, PFRES_SYNPROXY);
3995 return (PF_SYNPROXY_DROP);
4003 pool_put(&V_pf_state_key_pl, sk);
4005 pool_put(&V_pf_state_key_pl, nk);
4008 pool_put(&pf_state_key_pl, sk);
4010 pool_put(&pf_state_key_pl, nk);
4013 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
4015 RB_REMOVE(pf_src_tree, &V_tree_src_tracking, sn);
4016 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
4017 V_pf_status.src_nodes--;
4018 pool_put(&V_pf_src_tree_pl, sn);
4020 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
4021 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
4022 pf_status.src_nodes--;
4023 pool_put(&pf_src_tree_pl, sn);
4026 if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) {
4028 RB_REMOVE(pf_src_tree, &V_tree_src_tracking, nsn);
4029 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
4030 V_pf_status.src_nodes--;
4031 pool_put(&V_pf_src_tree_pl, nsn);
4033 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
4034 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
4035 pf_status.src_nodes--;
4036 pool_put(&pf_src_tree_pl, nsn);
4043 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
4044 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
4045 struct pf_ruleset **rsm)
4047 struct pf_rule *r, *a = NULL;
4048 struct pf_ruleset *ruleset = NULL;
4049 sa_family_t af = pd->af;
4055 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
4058 if (pfi_kif_match(r->kif, kif) == r->ifnot)
4059 r = r->skip[PF_SKIP_IFP].ptr;
4060 else if (r->direction && r->direction != direction)
4061 r = r->skip[PF_SKIP_DIR].ptr;
4062 else if (r->af && r->af != af)
4063 r = r->skip[PF_SKIP_AF].ptr;
4064 else if (r->proto && r->proto != pd->proto)
4065 r = r->skip[PF_SKIP_PROTO].ptr;
4066 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
4067 r->src.neg, kif, M_GETFIB(m)))
4068 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
4069 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
4070 r->dst.neg, NULL, M_GETFIB(m)))
4071 r = r->skip[PF_SKIP_DST_ADDR].ptr;
4072 else if (r->tos && !(r->tos == pd->tos))
4073 r = TAILQ_NEXT(r, entries);
4074 else if (r->os_fingerprint != PF_OSFP_ANY)
4075 r = TAILQ_NEXT(r, entries);
4076 else if (pd->proto == IPPROTO_UDP &&
4077 (r->src.port_op || r->dst.port_op))
4078 r = TAILQ_NEXT(r, entries);
4079 else if (pd->proto == IPPROTO_TCP &&
4080 (r->src.port_op || r->dst.port_op || r->flagset))
4081 r = TAILQ_NEXT(r, entries);
4082 else if ((pd->proto == IPPROTO_ICMP ||
4083 pd->proto == IPPROTO_ICMPV6) &&
4084 (r->type || r->code))
4085 r = TAILQ_NEXT(r, entries);
4086 else if (r->prob && r->prob <=
4087 (arc4random() % (UINT_MAX - 1) + 1))
4088 r = TAILQ_NEXT(r, entries);
4090 else if (r->match_tag && !pf_match_tag(m, r, &tag, pd->pf_mtag))
4092 else if (r->match_tag && !pf_match_tag(m, r, &tag))
4094 r = TAILQ_NEXT(r, entries);
4096 if (r->anchor == NULL) {
4103 r = TAILQ_NEXT(r, entries);
4105 pf_step_into_anchor(&asd, &ruleset,
4106 PF_RULESET_FILTER, &r, &a, &match);
4108 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
4109 PF_RULESET_FILTER, &r, &a, &match))
4116 REASON_SET(&reason, PFRES_MATCH);
4119 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset,
4122 if (r->action != PF_PASS)
4126 if (pf_tag_packet(m, tag, -1, pd->pf_mtag)) {
4128 if (pf_tag_packet(m, tag, -1)) {
4130 REASON_SET(&reason, PFRES_MEMORY);
4138 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
4139 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
4140 struct pf_pdesc *pd, u_short *reason, int *copyback)
4142 struct tcphdr *th = pd->hdr.tcp;
4143 u_int16_t win = ntohs(th->th_win);
4144 u_int32_t ack, end, seq, orig_seq;
4148 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
4149 sws = src->wscale & PF_WSCALE_MASK;
4150 dws = dst->wscale & PF_WSCALE_MASK;
4155 * Sequence tracking algorithm from Guido van Rooij's paper:
4156 * http://www.madison-gurkha.com/publications/tcp_filtering/
4160 orig_seq = seq = ntohl(th->th_seq);
4161 if (src->seqlo == 0) {
4162 /* First packet from this end. Set its state */
4164 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
4165 src->scrub == NULL) {
4166 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
4167 REASON_SET(reason, PFRES_MEMORY);
4172 /* Deferred generation of sequence number modulator */
4173 if (dst->seqdiff && !src->seqdiff) {
4174 /* use random iss for the TCP server */
4175 while ((src->seqdiff = arc4random() - seq) == 0)
4177 ack = ntohl(th->th_ack) - dst->seqdiff;
4178 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
4180 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
4183 ack = ntohl(th->th_ack);
4186 end = seq + pd->p_len;
4187 if (th->th_flags & TH_SYN) {
4189 if (dst->wscale & PF_WSCALE_FLAG) {
4190 src->wscale = pf_get_wscale(m, off, th->th_off,
4192 if (src->wscale & PF_WSCALE_FLAG) {
4193 /* Remove scale factor from initial
4195 sws = src->wscale & PF_WSCALE_MASK;
4196 win = ((u_int32_t)win + (1 << sws) - 1)
4198 dws = dst->wscale & PF_WSCALE_MASK;
4200 /* fixup other window */
4201 dst->max_win <<= dst->wscale &
4203 /* in case of a retrans SYN|ACK */
4208 if (th->th_flags & TH_FIN)
4212 if (src->state < TCPS_SYN_SENT)
4213 src->state = TCPS_SYN_SENT;
4216 * May need to slide the window (seqhi may have been set by
4217 * the crappy stack check or if we picked up the connection
4218 * after establishment)
4220 if (src->seqhi == 1 ||
4221 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
4222 src->seqhi = end + MAX(1, dst->max_win << dws);
4223 if (win > src->max_win)
4227 ack = ntohl(th->th_ack) - dst->seqdiff;
4229 /* Modulate sequence numbers */
4230 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
4232 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
4235 end = seq + pd->p_len;
4236 if (th->th_flags & TH_SYN)
4238 if (th->th_flags & TH_FIN)
4242 if ((th->th_flags & TH_ACK) == 0) {
4243 /* Let it pass through the ack skew check */
4245 } else if ((ack == 0 &&
4246 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
4247 /* broken tcp stacks do not set ack */
4248 (dst->state < TCPS_SYN_SENT)) {
4250 * Many stacks (ours included) will set the ACK number in an
4251 * FIN|ACK if the SYN times out -- no sequence to ACK.
4257 /* Ease sequencing restrictions on no data packets */
4262 ackskew = dst->seqlo - ack;
4266 * Need to demodulate the sequence numbers in any TCP SACK options
4267 * (Selective ACK). We could optionally validate the SACK values
4268 * against the current ACK window, either forwards or backwards, but
4269 * I'm not confident that SACK has been implemented properly
4270 * everywhere. It wouldn't surprise me if several stacks accidently
4271 * SACK too far backwards of previously ACKed data. There really aren't
4272 * any security implications of bad SACKing unless the target stack
4273 * doesn't validate the option length correctly. Someone trying to
4274 * spoof into a TCP connection won't bother blindly sending SACK
4277 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
4278 if (pf_modulate_sack(m, off, pd, th, dst))
4283 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
4284 if (SEQ_GEQ(src->seqhi, end) &&
4285 /* Last octet inside other's window space */
4286 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
4287 /* Retrans: not more than one window back */
4288 (ackskew >= -MAXACKWINDOW) &&
4289 /* Acking not more than one reassembled fragment backwards */
4290 (ackskew <= (MAXACKWINDOW << sws)) &&
4291 /* Acking not more than one window forward */
4292 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
4293 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
4294 (pd->flags & PFDESC_IP_REAS) == 0)) {
4295 /* Require an exact/+1 sequence match on resets when possible */
4297 if (dst->scrub || src->scrub) {
4298 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4299 *state, src, dst, copyback))
4303 /* update max window */
4304 if (src->max_win < win)
4306 /* synchronize sequencing */
4307 if (SEQ_GT(end, src->seqlo))
4309 /* slide the window of what the other end can send */
4310 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4311 dst->seqhi = ack + MAX((win << sws), 1);
4315 if (th->th_flags & TH_SYN)
4316 if (src->state < TCPS_SYN_SENT)
4317 src->state = TCPS_SYN_SENT;
4318 if (th->th_flags & TH_FIN)
4319 if (src->state < TCPS_CLOSING)
4320 src->state = TCPS_CLOSING;
4321 if (th->th_flags & TH_ACK) {
4322 if (dst->state == TCPS_SYN_SENT) {
4323 dst->state = TCPS_ESTABLISHED;
4324 if (src->state == TCPS_ESTABLISHED &&
4325 (*state)->src_node != NULL &&
4326 pf_src_connlimit(state)) {
4327 REASON_SET(reason, PFRES_SRCLIMIT);
4330 } else if (dst->state == TCPS_CLOSING)
4331 dst->state = TCPS_FIN_WAIT_2;
4333 if (th->th_flags & TH_RST)
4334 src->state = dst->state = TCPS_TIME_WAIT;
4336 /* update expire time */
4337 (*state)->expire = time_second;
4338 if (src->state >= TCPS_FIN_WAIT_2 &&
4339 dst->state >= TCPS_FIN_WAIT_2)
4340 (*state)->timeout = PFTM_TCP_CLOSED;
4341 else if (src->state >= TCPS_CLOSING &&
4342 dst->state >= TCPS_CLOSING)
4343 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4344 else if (src->state < TCPS_ESTABLISHED ||
4345 dst->state < TCPS_ESTABLISHED)
4346 (*state)->timeout = PFTM_TCP_OPENING;
4347 else if (src->state >= TCPS_CLOSING ||
4348 dst->state >= TCPS_CLOSING)
4349 (*state)->timeout = PFTM_TCP_CLOSING;
4351 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4353 /* Fall through to PASS packet */
4355 } else if ((dst->state < TCPS_SYN_SENT ||
4356 dst->state >= TCPS_FIN_WAIT_2 ||
4357 src->state >= TCPS_FIN_WAIT_2) &&
4358 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
4359 /* Within a window forward of the originating packet */
4360 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
4361 /* Within a window backward of the originating packet */
4364 * This currently handles three situations:
4365 * 1) Stupid stacks will shotgun SYNs before their peer
4367 * 2) When PF catches an already established stream (the
4368 * firewall rebooted, the state table was flushed, routes
4370 * 3) Packets get funky immediately after the connection
4371 * closes (this should catch Solaris spurious ACK|FINs
4372 * that web servers like to spew after a close)
4374 * This must be a little more careful than the above code
4375 * since packet floods will also be caught here. We don't
4376 * update the TTL here to mitigate the damage of a packet
4377 * flood and so the same code can handle awkward establishment
4378 * and a loosened connection close.
4379 * In the establishment case, a correct peer response will
4380 * validate the connection, go through the normal state code
4381 * and keep updating the state TTL.
4385 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4387 if (pf_status.debug >= PF_DEBUG_MISC) {
4389 printf("pf: loose state match: ");
4390 pf_print_state(*state);
4391 pf_print_flags(th->th_flags);
4392 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4393 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
4395 pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
4396 (unsigned long long)(*state)->packets[1],
4398 pd->p_len, ackskew, (*state)->packets[0],
4399 (*state)->packets[1],
4401 pd->dir == PF_IN ? "in" : "out",
4402 pd->dir == (*state)->direction ? "fwd" : "rev");
4405 if (dst->scrub || src->scrub) {
4406 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4407 *state, src, dst, copyback))
4411 /* update max window */
4412 if (src->max_win < win)
4414 /* synchronize sequencing */
4415 if (SEQ_GT(end, src->seqlo))
4417 /* slide the window of what the other end can send */
4418 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4419 dst->seqhi = ack + MAX((win << sws), 1);
4422 * Cannot set dst->seqhi here since this could be a shotgunned
4423 * SYN and not an already established connection.
4426 if (th->th_flags & TH_FIN)
4427 if (src->state < TCPS_CLOSING)
4428 src->state = TCPS_CLOSING;
4429 if (th->th_flags & TH_RST)
4430 src->state = dst->state = TCPS_TIME_WAIT;
4432 /* Fall through to PASS packet */
4435 if ((*state)->dst.state == TCPS_SYN_SENT &&
4436 (*state)->src.state == TCPS_SYN_SENT) {
4437 /* Send RST for state mismatches during handshake */
4438 if (!(th->th_flags & TH_RST))
4440 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4442 pf_send_tcp((*state)->rule.ptr, pd->af,
4444 pd->dst, pd->src, th->th_dport,
4445 th->th_sport, ntohl(th->th_ack), 0,
4447 (*state)->rule.ptr->return_ttl, 1, 0,
4448 pd->eh, kif->pfik_ifp);
4453 } else if (V_pf_status.debug >= PF_DEBUG_MISC) {
4455 } else if (pf_status.debug >= PF_DEBUG_MISC) {
4457 printf("pf: BAD state: ");
4458 pf_print_state(*state);
4459 pf_print_flags(th->th_flags);
4460 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4461 "pkts=%llu:%llu dir=%s,%s\n",
4462 seq, orig_seq, ack, pd->p_len, ackskew,
4464 (unsigned long long)(*state)->packets[0],
4465 (unsigned long long)(*state)->packets[1],
4467 (*state)->packets[0], (*state)->packets[1],
4469 pd->dir == PF_IN ? "in" : "out",
4470 pd->dir == (*state)->direction ? "fwd" : "rev");
4471 printf("pf: State failure on: %c %c %c %c | %c %c\n",
4472 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4473 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4475 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4476 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4477 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4478 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4480 REASON_SET(reason, PFRES_BADSTATE);
4488 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4489 struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4491 struct tcphdr *th = pd->hdr.tcp;
4493 if (th->th_flags & TH_SYN)
4494 if (src->state < TCPS_SYN_SENT)
4495 src->state = TCPS_SYN_SENT;
4496 if (th->th_flags & TH_FIN)
4497 if (src->state < TCPS_CLOSING)
4498 src->state = TCPS_CLOSING;
4499 if (th->th_flags & TH_ACK) {
4500 if (dst->state == TCPS_SYN_SENT) {
4501 dst->state = TCPS_ESTABLISHED;
4502 if (src->state == TCPS_ESTABLISHED &&
4503 (*state)->src_node != NULL &&
4504 pf_src_connlimit(state)) {
4505 REASON_SET(reason, PFRES_SRCLIMIT);
4508 } else if (dst->state == TCPS_CLOSING) {
4509 dst->state = TCPS_FIN_WAIT_2;
4510 } else if (src->state == TCPS_SYN_SENT &&
4511 dst->state < TCPS_SYN_SENT) {
4513 * Handle a special sloppy case where we only see one
4514 * half of the connection. If there is a ACK after
4515 * the initial SYN without ever seeing a packet from
4516 * the destination, set the connection to established.
4518 dst->state = src->state = TCPS_ESTABLISHED;
4519 if ((*state)->src_node != NULL &&
4520 pf_src_connlimit(state)) {
4521 REASON_SET(reason, PFRES_SRCLIMIT);
4524 } else if (src->state == TCPS_CLOSING &&
4525 dst->state == TCPS_ESTABLISHED &&
4528 * Handle the closing of half connections where we
4529 * don't see the full bidirectional FIN/ACK+ACK
4532 dst->state = TCPS_CLOSING;
4535 if (th->th_flags & TH_RST)
4536 src->state = dst->state = TCPS_TIME_WAIT;
4538 /* update expire time */
4539 (*state)->expire = time_second;
4540 if (src->state >= TCPS_FIN_WAIT_2 &&
4541 dst->state >= TCPS_FIN_WAIT_2)
4542 (*state)->timeout = PFTM_TCP_CLOSED;
4543 else if (src->state >= TCPS_CLOSING &&
4544 dst->state >= TCPS_CLOSING)
4545 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4546 else if (src->state < TCPS_ESTABLISHED ||
4547 dst->state < TCPS_ESTABLISHED)
4548 (*state)->timeout = PFTM_TCP_OPENING;
4549 else if (src->state >= TCPS_CLOSING ||
4550 dst->state >= TCPS_CLOSING)
4551 (*state)->timeout = PFTM_TCP_CLOSING;
4553 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4559 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
4560 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4563 struct pf_state_key_cmp key;
4564 struct tcphdr *th = pd->hdr.tcp;
4566 struct pf_state_peer *src, *dst;
4567 struct pf_state_key *sk;
4570 key.proto = IPPROTO_TCP;
4571 if (direction == PF_IN) { /* wire side, straight */
4572 PF_ACPY(&key.addr[0], pd->src, key.af);
4573 PF_ACPY(&key.addr[1], pd->dst, key.af);
4574 key.port[0] = th->th_sport;
4575 key.port[1] = th->th_dport;
4576 } else { /* stack side, reverse */
4577 PF_ACPY(&key.addr[1], pd->src, key.af);
4578 PF_ACPY(&key.addr[0], pd->dst, key.af);
4579 key.port[1] = th->th_sport;
4580 key.port[0] = th->th_dport;
4584 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
4586 STATE_LOOKUP(kif, &key, direction, *state, m);
4589 if (direction == (*state)->direction) {
4590 src = &(*state)->src;
4591 dst = &(*state)->dst;
4593 src = &(*state)->dst;
4594 dst = &(*state)->src;
4597 sk = (*state)->key[pd->didx];
4599 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4600 if (direction != (*state)->direction) {
4601 REASON_SET(reason, PFRES_SYNPROXY);
4602 return (PF_SYNPROXY_DROP);
4604 if (th->th_flags & TH_SYN) {
4605 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4606 REASON_SET(reason, PFRES_SYNPROXY);
4610 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4612 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
4614 pd->src, th->th_dport, th->th_sport,
4615 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4616 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1,
4618 REASON_SET(reason, PFRES_SYNPROXY);
4619 return (PF_SYNPROXY_DROP);
4620 } else if (!(th->th_flags & TH_ACK) ||
4621 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4622 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4623 REASON_SET(reason, PFRES_SYNPROXY);
4625 } else if ((*state)->src_node != NULL &&
4626 pf_src_connlimit(state)) {
4627 REASON_SET(reason, PFRES_SRCLIMIT);
4630 (*state)->src.state = PF_TCPS_PROXY_DST;
4632 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4633 if (direction == (*state)->direction) {
4634 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4635 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4636 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4637 REASON_SET(reason, PFRES_SYNPROXY);
4640 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4641 if ((*state)->dst.seqhi == 1)
4642 (*state)->dst.seqhi = htonl(arc4random());
4644 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4646 pf_send_tcp((*state)->rule.ptr, pd->af,
4648 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4649 sk->port[pd->sidx], sk->port[pd->didx],
4650 (*state)->dst.seqhi, 0, TH_SYN, 0,
4651 (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL);
4652 REASON_SET(reason, PFRES_SYNPROXY);
4653 return (PF_SYNPROXY_DROP);
4654 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4656 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4657 REASON_SET(reason, PFRES_SYNPROXY);
4660 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4661 (*state)->dst.seqlo = ntohl(th->th_seq);
4663 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4665 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
4667 pd->src, th->th_dport, th->th_sport,
4668 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4669 TH_ACK, (*state)->src.max_win, 0, 0, 0,
4670 (*state)->tag, NULL, NULL);
4672 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4674 pf_send_tcp((*state)->rule.ptr, pd->af,
4676 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4677 sk->port[pd->sidx], sk->port[pd->didx],
4678 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4679 TH_ACK, (*state)->dst.max_win, 0, 0, 1,
4681 (*state)->src.seqdiff = (*state)->dst.seqhi -
4682 (*state)->src.seqlo;
4683 (*state)->dst.seqdiff = (*state)->src.seqhi -
4684 (*state)->dst.seqlo;
4685 (*state)->src.seqhi = (*state)->src.seqlo +
4686 (*state)->dst.max_win;
4687 (*state)->dst.seqhi = (*state)->dst.seqlo +
4688 (*state)->src.max_win;
4689 (*state)->src.wscale = (*state)->dst.wscale = 0;
4690 (*state)->src.state = (*state)->dst.state =
4692 REASON_SET(reason, PFRES_SYNPROXY);
4693 return (PF_SYNPROXY_DROP);
4697 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4698 dst->state >= TCPS_FIN_WAIT_2 &&
4699 src->state >= TCPS_FIN_WAIT_2) {
4701 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4703 if (pf_status.debug >= PF_DEBUG_MISC) {
4705 printf("pf: state reuse ");
4706 pf_print_state(*state);
4707 pf_print_flags(th->th_flags);
4710 /* XXX make sure it's the same direction ?? */
4711 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4712 pf_unlink_state(*state);
4717 if ((*state)->state_flags & PFSTATE_SLOPPY) {
4718 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
4721 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
4722 ©back) == PF_DROP)
4726 /* translate source/destination address, if necessary */
4727 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4728 struct pf_state_key *nk = (*state)->key[pd->didx];
4730 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4731 nk->port[pd->sidx] != th->th_sport)
4732 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum,
4733 &th->th_sum, &nk->addr[pd->sidx],
4734 nk->port[pd->sidx], 0, pd->af);
4736 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4737 nk->port[pd->didx] != th->th_dport)
4738 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum,
4739 &th->th_sum, &nk->addr[pd->didx],
4740 nk->port[pd->didx], 0, pd->af);
4744 /* Copyback sequence modulation or stateful scrub changes if needed */
4747 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4749 m_copyback(m, off, sizeof(*th), th);
4756 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4757 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4759 struct pf_state_peer *src, *dst;
4760 struct pf_state_key_cmp key;
4761 struct udphdr *uh = pd->hdr.udp;
4764 key.proto = IPPROTO_UDP;
4765 if (direction == PF_IN) { /* wire side, straight */
4766 PF_ACPY(&key.addr[0], pd->src, key.af);
4767 PF_ACPY(&key.addr[1], pd->dst, key.af);
4768 key.port[0] = uh->uh_sport;
4769 key.port[1] = uh->uh_dport;
4770 } else { /* stack side, reverse */
4771 PF_ACPY(&key.addr[1], pd->src, key.af);
4772 PF_ACPY(&key.addr[0], pd->dst, key.af);
4773 key.port[1] = uh->uh_sport;
4774 key.port[0] = uh->uh_dport;
4778 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
4780 STATE_LOOKUP(kif, &key, direction, *state, m);
4783 if (direction == (*state)->direction) {
4784 src = &(*state)->src;
4785 dst = &(*state)->dst;
4787 src = &(*state)->dst;
4788 dst = &(*state)->src;
4792 if (src->state < PFUDPS_SINGLE)
4793 src->state = PFUDPS_SINGLE;
4794 if (dst->state == PFUDPS_SINGLE)
4795 dst->state = PFUDPS_MULTIPLE;
4797 /* update expire time */
4798 (*state)->expire = time_second;
4799 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4800 (*state)->timeout = PFTM_UDP_MULTIPLE;
4802 (*state)->timeout = PFTM_UDP_SINGLE;
4804 /* translate source/destination address, if necessary */
4805 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4806 struct pf_state_key *nk = (*state)->key[pd->didx];
4808 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4809 nk->port[pd->sidx] != uh->uh_sport)
4810 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum,
4811 &uh->uh_sum, &nk->addr[pd->sidx],
4812 nk->port[pd->sidx], 1, pd->af);
4814 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4815 nk->port[pd->didx] != uh->uh_dport)
4816 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum,
4817 &uh->uh_sum, &nk->addr[pd->didx],
4818 nk->port[pd->didx], 1, pd->af);
4820 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4822 m_copyback(m, off, sizeof(*uh), uh);
4830 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4831 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4833 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4835 u_int16_t icmpid = 0, *icmpsum;
4837 u_int16_t icmpid, *icmpsum;
4841 struct pf_state_key_cmp key;
4843 switch (pd->proto) {
4846 icmptype = pd->hdr.icmp->icmp_type;
4847 icmpid = pd->hdr.icmp->icmp_id;
4848 icmpsum = &pd->hdr.icmp->icmp_cksum;
4850 if (icmptype == ICMP_UNREACH ||
4851 icmptype == ICMP_SOURCEQUENCH ||
4852 icmptype == ICMP_REDIRECT ||
4853 icmptype == ICMP_TIMXCEED ||
4854 icmptype == ICMP_PARAMPROB)
4859 case IPPROTO_ICMPV6:
4860 icmptype = pd->hdr.icmp6->icmp6_type;
4861 icmpid = pd->hdr.icmp6->icmp6_id;
4862 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4864 if (icmptype == ICMP6_DST_UNREACH ||
4865 icmptype == ICMP6_PACKET_TOO_BIG ||
4866 icmptype == ICMP6_TIME_EXCEEDED ||
4867 icmptype == ICMP6_PARAM_PROB)
4876 * ICMP query/reply message not related to a TCP/UDP packet.
4877 * Search for an ICMP state.
4880 key.proto = pd->proto;
4881 key.port[0] = key.port[1] = icmpid;
4882 if (direction == PF_IN) { /* wire side, straight */
4883 PF_ACPY(&key.addr[0], pd->src, key.af);
4884 PF_ACPY(&key.addr[1], pd->dst, key.af);
4885 } else { /* stack side, reverse */
4886 PF_ACPY(&key.addr[1], pd->src, key.af);
4887 PF_ACPY(&key.addr[0], pd->dst, key.af);
4891 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
4893 STATE_LOOKUP(kif, &key, direction, *state, m);
4896 (*state)->expire = time_second;
4897 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4899 /* translate source/destination address, if necessary */
4900 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4901 struct pf_state_key *nk = (*state)->key[pd->didx];
4906 if (PF_ANEQ(pd->src,
4907 &nk->addr[pd->sidx], AF_INET))
4908 pf_change_a(&saddr->v4.s_addr,
4910 nk->addr[pd->sidx].v4.s_addr, 0);
4912 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
4914 pf_change_a(&daddr->v4.s_addr,
4916 nk->addr[pd->didx].v4.s_addr, 0);
4919 pd->hdr.icmp->icmp_id) {
4920 pd->hdr.icmp->icmp_cksum =
4922 pd->hdr.icmp->icmp_cksum, icmpid,
4923 nk->port[pd->sidx], 0);
4924 pd->hdr.icmp->icmp_id =
4928 m_copyback(m, off, ICMP_MINLEN,
4937 if (PF_ANEQ(pd->src,
4938 &nk->addr[pd->sidx], AF_INET6))
4940 &pd->hdr.icmp6->icmp6_cksum,
4941 &nk->addr[pd->sidx], 0);
4943 if (PF_ANEQ(pd->dst,
4944 &nk->addr[pd->didx], AF_INET6))
4946 &pd->hdr.icmp6->icmp6_cksum,
4947 &nk->addr[pd->didx], 0);
4950 sizeof(struct icmp6_hdr),
4963 * ICMP error message in response to a TCP/UDP packet.
4964 * Extract the inner TCP/UDP header and search for that state.
4967 struct pf_pdesc pd2;
4969 bzero(&pd2, sizeof pd2);
4975 struct ip6_hdr h2_6;
4987 /* Payload packet is from the opposite direction. */
4988 pd2.sidx = (direction == PF_IN) ? 1 : 0;
4989 pd2.didx = (direction == PF_IN) ? 0 : 1;
4993 /* offset of h2 in mbuf chain */
4994 ipoff2 = off + ICMP_MINLEN;
4996 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
4997 NULL, reason, pd2.af)) {
4998 DPFPRINTF(PF_DEBUG_MISC,
4999 ("pf: ICMP error message too short "
5004 * ICMP error messages don't refer to non-first
5007 if (h2.ip_off & htons(IP_OFFMASK)) {
5008 REASON_SET(reason, PFRES_FRAG);
5012 /* offset of protocol header that follows h2 */
5013 off2 = ipoff2 + (h2.ip_hl << 2);
5015 pd2.proto = h2.ip_p;
5016 pd2.src = (struct pf_addr *)&h2.ip_src;
5017 pd2.dst = (struct pf_addr *)&h2.ip_dst;
5018 pd2.ip_sum = &h2.ip_sum;
5023 ipoff2 = off + sizeof(struct icmp6_hdr);
5025 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
5026 NULL, reason, pd2.af)) {
5027 DPFPRINTF(PF_DEBUG_MISC,
5028 ("pf: ICMP error message too short "
5032 pd2.proto = h2_6.ip6_nxt;
5033 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
5034 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
5036 off2 = ipoff2 + sizeof(h2_6);
5038 switch (pd2.proto) {
5039 case IPPROTO_FRAGMENT:
5041 * ICMPv6 error messages for
5042 * non-first fragments
5044 REASON_SET(reason, PFRES_FRAG);
5047 case IPPROTO_HOPOPTS:
5048 case IPPROTO_ROUTING:
5049 case IPPROTO_DSTOPTS: {
5050 /* get next header and header length */
5051 struct ip6_ext opt6;
5053 if (!pf_pull_hdr(m, off2, &opt6,
5054 sizeof(opt6), NULL, reason,
5056 DPFPRINTF(PF_DEBUG_MISC,
5057 ("pf: ICMPv6 short opt\n"));
5060 if (pd2.proto == IPPROTO_AH)
5061 off2 += (opt6.ip6e_len + 2) * 4;
5063 off2 += (opt6.ip6e_len + 1) * 8;
5064 pd2.proto = opt6.ip6e_nxt;
5065 /* goto the next header */
5072 } while (!terminal);
5077 switch (pd2.proto) {
5081 struct pf_state_peer *src, *dst;
5086 * Only the first 8 bytes of the TCP header can be
5087 * expected. Don't access any TCP header fields after
5088 * th_seq, an ackskew test is not possible.
5090 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
5092 DPFPRINTF(PF_DEBUG_MISC,
5093 ("pf: ICMP error message too short "
5099 key.proto = IPPROTO_TCP;
5100 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5101 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5102 key.port[pd2.sidx] = th.th_sport;
5103 key.port[pd2.didx] = th.th_dport;
5106 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5108 STATE_LOOKUP(kif, &key, direction, *state, m);
5111 if (direction == (*state)->direction) {
5112 src = &(*state)->dst;
5113 dst = &(*state)->src;
5115 src = &(*state)->src;
5116 dst = &(*state)->dst;
5119 if (src->wscale && dst->wscale)
5120 dws = dst->wscale & PF_WSCALE_MASK;
5124 /* Demodulate sequence number */
5125 seq = ntohl(th.th_seq) - src->seqdiff;
5127 pf_change_a(&th.th_seq, icmpsum,
5132 if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
5133 (!SEQ_GEQ(src->seqhi, seq) ||
5134 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
5136 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5138 if (pf_status.debug >= PF_DEBUG_MISC) {
5140 printf("pf: BAD ICMP %d:%d ",
5141 icmptype, pd->hdr.icmp->icmp_code);
5142 pf_print_host(pd->src, 0, pd->af);
5144 pf_print_host(pd->dst, 0, pd->af);
5146 pf_print_state(*state);
5147 printf(" seq=%u\n", seq);
5149 REASON_SET(reason, PFRES_BADSTATE);
5153 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5155 if (pf_status.debug >= PF_DEBUG_MISC) {
5157 printf("pf: OK ICMP %d:%d ",
5158 icmptype, pd->hdr.icmp->icmp_code);
5159 pf_print_host(pd->src, 0, pd->af);
5161 pf_print_host(pd->dst, 0, pd->af);
5163 pf_print_state(*state);
5164 printf(" seq=%u\n", seq);
5168 /* translate source/destination address, if necessary */
5169 if ((*state)->key[PF_SK_WIRE] !=
5170 (*state)->key[PF_SK_STACK]) {
5171 struct pf_state_key *nk =
5172 (*state)->key[pd->didx];
5174 if (PF_ANEQ(pd2.src,
5175 &nk->addr[pd2.sidx], pd2.af) ||
5176 nk->port[pd2.sidx] != th.th_sport)
5177 pf_change_icmp(pd2.src, &th.th_sport,
5178 daddr, &nk->addr[pd2.sidx],
5179 nk->port[pd2.sidx], NULL,
5180 pd2.ip_sum, icmpsum,
5181 pd->ip_sum, 0, pd2.af);
5183 if (PF_ANEQ(pd2.dst,
5184 &nk->addr[pd2.didx], pd2.af) ||
5185 nk->port[pd2.didx] != th.th_dport)
5186 pf_change_icmp(pd2.dst, &th.th_dport,
5187 NULL, /* XXX Inbound NAT? */
5188 &nk->addr[pd2.didx],
5189 nk->port[pd2.didx], NULL,
5190 pd2.ip_sum, icmpsum,
5191 pd->ip_sum, 0, pd2.af);
5199 m_copyback(m, off, ICMP_MINLEN,
5204 m_copyback(m, ipoff2, sizeof(h2),
5214 sizeof(struct icmp6_hdr),
5219 m_copyback(m, ipoff2, sizeof(h2_6),
5228 m_copyback(m, off2, 8, (caddr_t)&th);
5230 m_copyback(m, off2, 8, &th);
5240 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
5241 NULL, reason, pd2.af)) {
5242 DPFPRINTF(PF_DEBUG_MISC,
5243 ("pf: ICMP error message too short "
5249 key.proto = IPPROTO_UDP;
5250 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5251 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5252 key.port[pd2.sidx] = uh.uh_sport;
5253 key.port[pd2.didx] = uh.uh_dport;
5256 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5258 STATE_LOOKUP(kif, &key, direction, *state, m);
5261 /* translate source/destination address, if necessary */
5262 if ((*state)->key[PF_SK_WIRE] !=
5263 (*state)->key[PF_SK_STACK]) {
5264 struct pf_state_key *nk =
5265 (*state)->key[pd->didx];
5267 if (PF_ANEQ(pd2.src,
5268 &nk->addr[pd2.sidx], pd2.af) ||
5269 nk->port[pd2.sidx] != uh.uh_sport)
5270 pf_change_icmp(pd2.src, &uh.uh_sport,
5271 daddr, &nk->addr[pd2.sidx],
5272 nk->port[pd2.sidx], &uh.uh_sum,
5273 pd2.ip_sum, icmpsum,
5274 pd->ip_sum, 1, pd2.af);
5276 if (PF_ANEQ(pd2.dst,
5277 &nk->addr[pd2.didx], pd2.af) ||
5278 nk->port[pd2.didx] != uh.uh_dport)
5279 pf_change_icmp(pd2.dst, &uh.uh_dport,
5280 NULL, /* XXX Inbound NAT? */
5281 &nk->addr[pd2.didx],
5282 nk->port[pd2.didx], &uh.uh_sum,
5283 pd2.ip_sum, icmpsum,
5284 pd->ip_sum, 1, pd2.af);
5289 m_copyback(m, off, ICMP_MINLEN,
5295 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5297 m_copyback(m, ipoff2, sizeof(h2), &h2);
5304 sizeof(struct icmp6_hdr),
5309 m_copyback(m, ipoff2, sizeof(h2_6),
5318 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
5320 m_copyback(m, off2, sizeof(uh), &uh);
5327 case IPPROTO_ICMP: {
5330 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
5331 NULL, reason, pd2.af)) {
5332 DPFPRINTF(PF_DEBUG_MISC,
5333 ("pf: ICMP error message too short i"
5339 key.proto = IPPROTO_ICMP;
5340 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5341 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5342 key.port[0] = key.port[1] = iih.icmp_id;
5345 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5347 STATE_LOOKUP(kif, &key, direction, *state, m);
5350 /* translate source/destination address, if necessary */
5351 if ((*state)->key[PF_SK_WIRE] !=
5352 (*state)->key[PF_SK_STACK]) {
5353 struct pf_state_key *nk =
5354 (*state)->key[pd->didx];
5356 if (PF_ANEQ(pd2.src,
5357 &nk->addr[pd2.sidx], pd2.af) ||
5358 nk->port[pd2.sidx] != iih.icmp_id)
5359 pf_change_icmp(pd2.src, &iih.icmp_id,
5360 daddr, &nk->addr[pd2.sidx],
5361 nk->port[pd2.sidx], NULL,
5362 pd2.ip_sum, icmpsum,
5363 pd->ip_sum, 0, AF_INET);
5365 if (PF_ANEQ(pd2.dst,
5366 &nk->addr[pd2.didx], pd2.af) ||
5367 nk->port[pd2.didx] != iih.icmp_id)
5368 pf_change_icmp(pd2.dst, &iih.icmp_id,
5369 NULL, /* XXX Inbound NAT? */
5370 &nk->addr[pd2.didx],
5371 nk->port[pd2.didx], NULL,
5372 pd2.ip_sum, icmpsum,
5373 pd->ip_sum, 0, AF_INET);
5376 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
5377 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5378 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
5380 m_copyback(m, off, ICMP_MINLEN, pd->hdr.icmp);
5381 m_copyback(m, ipoff2, sizeof(h2), &h2);
5382 m_copyback(m, off2, ICMP_MINLEN, &iih);
5390 case IPPROTO_ICMPV6: {
5391 struct icmp6_hdr iih;
5393 if (!pf_pull_hdr(m, off2, &iih,
5394 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
5395 DPFPRINTF(PF_DEBUG_MISC,
5396 ("pf: ICMP error message too short "
5402 key.proto = IPPROTO_ICMPV6;
5403 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5404 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5405 key.port[0] = key.port[1] = iih.icmp6_id;
5408 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5410 STATE_LOOKUP(kif, &key, direction, *state, m);
5413 /* translate source/destination address, if necessary */
5414 if ((*state)->key[PF_SK_WIRE] !=
5415 (*state)->key[PF_SK_STACK]) {
5416 struct pf_state_key *nk =
5417 (*state)->key[pd->didx];
5419 if (PF_ANEQ(pd2.src,
5420 &nk->addr[pd2.sidx], pd2.af) ||
5421 nk->port[pd2.sidx] != iih.icmp6_id)
5422 pf_change_icmp(pd2.src, &iih.icmp6_id,
5423 daddr, &nk->addr[pd2.sidx],
5424 nk->port[pd2.sidx], NULL,
5425 pd2.ip_sum, icmpsum,
5426 pd->ip_sum, 0, AF_INET6);
5428 if (PF_ANEQ(pd2.dst,
5429 &nk->addr[pd2.didx], pd2.af) ||
5430 nk->port[pd2.didx] != iih.icmp6_id)
5431 pf_change_icmp(pd2.dst, &iih.icmp6_id,
5432 NULL, /* XXX Inbound NAT? */
5433 &nk->addr[pd2.didx],
5434 nk->port[pd2.didx], NULL,
5435 pd2.ip_sum, icmpsum,
5436 pd->ip_sum, 0, AF_INET6);
5439 m_copyback(m, off, sizeof(struct icmp6_hdr),
5440 (caddr_t)pd->hdr.icmp6);
5441 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
5442 m_copyback(m, off2, sizeof(struct icmp6_hdr),
5445 m_copyback(m, off, sizeof(struct icmp6_hdr),
5447 m_copyback(m, ipoff2, sizeof(h2_6), &h2_6);
5448 m_copyback(m, off2, sizeof(struct icmp6_hdr),
5458 key.proto = pd2.proto;
5459 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5460 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5461 key.port[0] = key.port[1] = 0;
5464 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5466 STATE_LOOKUP(kif, &key, direction, *state, m);
5469 /* translate source/destination address, if necessary */
5470 if ((*state)->key[PF_SK_WIRE] !=
5471 (*state)->key[PF_SK_STACK]) {
5472 struct pf_state_key *nk =
5473 (*state)->key[pd->didx];
5475 if (PF_ANEQ(pd2.src,
5476 &nk->addr[pd2.sidx], pd2.af))
5477 pf_change_icmp(pd2.src, NULL, daddr,
5478 &nk->addr[pd2.sidx], 0, NULL,
5479 pd2.ip_sum, icmpsum,
5480 pd->ip_sum, 0, pd2.af);
5482 if (PF_ANEQ(pd2.dst,
5483 &nk->addr[pd2.didx], pd2.af))
5484 pf_change_icmp(pd2.src, NULL,
5485 NULL, /* XXX Inbound NAT? */
5486 &nk->addr[pd2.didx], 0, NULL,
5487 pd2.ip_sum, icmpsum,
5488 pd->ip_sum, 0, pd2.af);
5494 m_copyback(m, off, ICMP_MINLEN,
5495 (caddr_t)pd->hdr.icmp);
5496 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5498 m_copyback(m, off, ICMP_MINLEN,
5500 m_copyback(m, ipoff2, sizeof(h2), &h2);
5507 sizeof(struct icmp6_hdr),
5512 m_copyback(m, ipoff2, sizeof(h2_6),
5529 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
5530 struct mbuf *m, struct pf_pdesc *pd)
5532 struct pf_state_peer *src, *dst;
5533 struct pf_state_key_cmp key;
5536 key.proto = pd->proto;
5537 if (direction == PF_IN) {
5538 PF_ACPY(&key.addr[0], pd->src, key.af);
5539 PF_ACPY(&key.addr[1], pd->dst, key.af);
5540 key.port[0] = key.port[1] = 0;
5542 PF_ACPY(&key.addr[1], pd->src, key.af);
5543 PF_ACPY(&key.addr[0], pd->dst, key.af);
5544 key.port[1] = key.port[0] = 0;
5548 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5550 STATE_LOOKUP(kif, &key, direction, *state, m);
5553 if (direction == (*state)->direction) {
5554 src = &(*state)->src;
5555 dst = &(*state)->dst;
5557 src = &(*state)->dst;
5558 dst = &(*state)->src;
5562 if (src->state < PFOTHERS_SINGLE)
5563 src->state = PFOTHERS_SINGLE;
5564 if (dst->state == PFOTHERS_SINGLE)
5565 dst->state = PFOTHERS_MULTIPLE;
5567 /* update expire time */
5568 (*state)->expire = time_second;
5569 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5570 (*state)->timeout = PFTM_OTHER_MULTIPLE;
5572 (*state)->timeout = PFTM_OTHER_SINGLE;
5574 /* translate source/destination address, if necessary */
5575 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5576 struct pf_state_key *nk = (*state)->key[pd->didx];
5579 KASSERT(nk, ("%s: nk is null", __FUNCTION__));
5580 KASSERT(pd, ("%s: pd is null", __FUNCTION__));
5581 KASSERT(pd->src, ("%s: pd->src is null", __FUNCTION__));
5582 KASSERT(pd->dst, ("%s: pd->dst is null", __FUNCTION__));
5592 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5593 pf_change_a(&pd->src->v4.s_addr,
5595 nk->addr[pd->sidx].v4.s_addr,
5599 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5600 pf_change_a(&pd->dst->v4.s_addr,
5602 nk->addr[pd->didx].v4.s_addr,
5609 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5610 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5612 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5613 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5621 * ipoff and off are measured from the start of the mbuf chain.
5622 * h must be at "ipoff" on the mbuf chain.
5625 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5626 u_short *actionp, u_short *reasonp, sa_family_t af)
5631 struct ip *h = mtod(m, struct ip *);
5632 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
5636 ACTION_SET(actionp, PF_PASS);
5638 ACTION_SET(actionp, PF_DROP);
5639 REASON_SET(reasonp, PFRES_FRAG);
5643 if (m->m_pkthdr.len < off + len ||
5644 ntohs(h->ip_len) < off + len) {
5645 ACTION_SET(actionp, PF_DROP);
5646 REASON_SET(reasonp, PFRES_SHORT);
5654 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
5656 if (m->m_pkthdr.len < off + len ||
5657 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5658 (unsigned)(off + len)) {
5659 ACTION_SET(actionp, PF_DROP);
5660 REASON_SET(reasonp, PFRES_SHORT);
5667 m_copydata(m, off, len, p);
5672 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5677 struct radix_node_head *rnh;
5680 struct sockaddr_in *dst;
5684 extern int ipmultipath;
5688 extern int ip6_multipath;
5690 struct sockaddr_in6 *dst6;
5691 struct route_in6 ro;
5695 struct radix_node *rn;
5702 /* XXX: stick to table 0 for now */
5703 rnh = rt_tables_get_rnh(0, af);
5704 if (rnh != NULL && rn_mpath_capable(rnh))
5708 bzero(&ro, sizeof(ro));
5711 dst = satosin(&ro.ro_dst);
5712 dst->sin_family = AF_INET;
5713 dst->sin_len = sizeof(*dst);
5714 dst->sin_addr = addr->v4;
5723 * Skip check for addresses with embedded interface scope,
5724 * as they would always match anyway.
5726 if (IN6_IS_SCOPE_EMBED(&addr->v6))
5728 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5729 dst6->sin6_family = AF_INET6;
5730 dst6->sin6_len = sizeof(*dst6);
5731 dst6->sin6_addr = addr->v6;
5742 /* Skip checks for ipsec interfaces */
5743 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5750 in6_rtalloc_ign(&ro, 0, rtableid);
5755 in_rtalloc_ign((struct route *)&ro, 0, rtableid);
5759 rtalloc_ign((struct route *)&ro, 0); /* No/default FIB. */
5762 #else /* ! __FreeBSD__ */
5763 rtalloc_noclone((struct route *)&ro, NO_CLONING);
5766 if (ro.ro_rt != NULL) {
5767 /* No interface given, this is a no-route check */
5771 if (kif->pfik_ifp == NULL) {
5776 /* Perform uRPF check if passed input interface */
5778 rn = (struct radix_node *)ro.ro_rt;
5780 rt = (struct rtentry *)rn;
5781 #ifndef __FreeBSD__ /* CARPDEV */
5782 if (rt->rt_ifp->if_type == IFT_CARP)
5783 ifp = rt->rt_ifp->if_carpdev;
5788 if (kif->pfik_ifp == ifp)
5792 rn = rn_mpath_next(rn);
5795 rn = rn_mpath_next(rn, 0);
5797 } while (check_mpath == 1 && rn != NULL && ret == 0);
5801 if (ro.ro_rt != NULL)
5807 pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw,
5810 struct sockaddr_in *dst;
5812 struct sockaddr_in6 *dst6;
5813 struct route_in6 ro;
5819 bzero(&ro, sizeof(ro));
5822 dst = satosin(&ro.ro_dst);
5823 dst->sin_family = AF_INET;
5824 dst->sin_len = sizeof(*dst);
5825 dst->sin_addr = addr->v4;
5829 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5830 dst6->sin6_family = AF_INET6;
5831 dst6->sin6_len = sizeof(*dst6);
5832 dst6->sin6_addr = addr->v6;
5843 in6_rtalloc_ign(&ro, 0, rtableid);
5848 in_rtalloc_ign((struct route *)&ro, 0, rtableid);
5852 rtalloc_ign((struct route *)&ro, 0);
5855 #else /* ! __FreeBSD__ */
5856 rtalloc_noclone((struct route *)&ro, NO_CLONING);
5859 if (ro.ro_rt != NULL) {
5861 /* XXX_IMPORT: later */
5863 if (ro.ro_rt->rt_labelid == aw->v.rtlabel)
5874 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5875 struct pf_state *s, struct pf_pdesc *pd)
5877 struct mbuf *m0, *m1;
5878 struct route iproute;
5879 struct route *ro = NULL;
5880 struct sockaddr_in *dst;
5882 struct ifnet *ifp = NULL;
5883 struct pf_addr naddr;
5884 struct pf_src_node *sn = NULL;
5893 if (m == NULL || *m == NULL || r == NULL ||
5894 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
5895 panic("pf_route: invalid parameters");
5898 if (pd->pf_mtag->routed++ > 3) {
5900 if ((*m)->m_pkthdr.pf.routed++ > 3) {
5907 if (r->rt == PF_DUPTO) {
5909 if ((m0 = m_dup(*m, M_DONTWAIT)) == NULL)
5911 if ((m0 = m_copym2(*m, 0, M_COPYALL, M_NOWAIT)) == NULL)
5915 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
5920 if (m0->m_len < sizeof(struct ip)) {
5921 DPFPRINTF(PF_DEBUG_URGENT,
5922 ("pf_route: m0->m_len < sizeof(struct ip)\n"));
5926 ip = mtod(m0, struct ip *);
5929 bzero((caddr_t)ro, sizeof(*ro));
5930 dst = satosin(&ro->ro_dst);
5931 dst->sin_family = AF_INET;
5932 dst->sin_len = sizeof(*dst);
5933 dst->sin_addr = ip->ip_dst;
5935 if (r->rt == PF_FASTROUTE) {
5937 in_rtalloc_ign(ro, 0, M_GETFIB(m0));
5941 if (ro->ro_rt == 0) {
5943 KMOD_IPSTAT_INC(ips_noroute);
5945 ipstat.ips_noroute++;
5950 ifp = ro->ro_rt->rt_ifp;
5951 ro->ro_rt->rt_use++;
5953 if (ro->ro_rt->rt_flags & RTF_GATEWAY)
5954 dst = satosin(ro->ro_rt->rt_gateway);
5956 if (TAILQ_EMPTY(&r->rpool.list)) {
5957 DPFPRINTF(PF_DEBUG_URGENT,
5958 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n"));
5962 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5964 if (!PF_AZERO(&naddr, AF_INET))
5965 dst->sin_addr.s_addr = naddr.v4.s_addr;
5966 ifp = r->rpool.cur->kif ?
5967 r->rpool.cur->kif->pfik_ifp : NULL;
5969 if (!PF_AZERO(&s->rt_addr, AF_INET))
5970 dst->sin_addr.s_addr =
5971 s->rt_addr.v4.s_addr;
5972 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5981 if (pf_test(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) {
5984 } else if (m0 == NULL) {
5990 if (pf_test(PF_OUT, ifp, &m0, NULL) != PF_PASS)
5992 else if (m0 == NULL)
5995 if (m0->m_len < sizeof(struct ip)) {
5996 DPFPRINTF(PF_DEBUG_URGENT,
5997 ("pf_route: m0->m_len < sizeof(struct ip)\n"));
6000 ip = mtod(m0, struct ip *);
6004 /* Copied from FreeBSD 5.1-CURRENT ip_output. */
6005 m0->m_pkthdr.csum_flags |= CSUM_IP;
6006 sw_csum = m0->m_pkthdr.csum_flags & ~ifp->if_hwassist;
6007 if (sw_csum & CSUM_DELAY_DATA) {
6009 * XXX: in_delayed_cksum assumes HBO for ip->ip_len (at least)
6012 NTOHS(ip->ip_off); /* XXX: needed? */
6013 in_delayed_cksum(m0);
6016 sw_csum &= ~CSUM_DELAY_DATA;
6018 m0->m_pkthdr.csum_flags &= ifp->if_hwassist;
6020 if (ntohs(ip->ip_len) <= ifp->if_mtu ||
6021 (ifp->if_hwassist & CSUM_FRAGMENT &&
6022 ((ip->ip_off & htons(IP_DF)) == 0))) {
6024 * ip->ip_len = htons(ip->ip_len);
6025 * ip->ip_off = htons(ip->ip_off);
6028 if (sw_csum & CSUM_DELAY_IP) {
6030 if (ip->ip_v == IPVERSION &&
6031 (ip->ip_hl << 2) == sizeof(*ip)) {
6032 ip->ip_sum = in_cksum_hdr(ip);
6034 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
6038 error = (*ifp->if_output)(ifp, m0, sintosa(dst), ro);
6043 /* Copied from ip_output. */
6046 * If deferred crypto processing is needed, check that the
6047 * interface supports it.
6049 if ((mtag = m_tag_find(m0, PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED, NULL))
6050 != NULL && (ifp->if_capabilities & IFCAP_IPSEC) == 0) {
6051 /* Notify IPsec to do its own crypto. */
6052 ipsp_skipcrypto_unmark((struct tdb_ident *)(mtag + 1));
6057 /* Catch routing changes wrt. hardware checksumming for TCP or UDP. */
6058 if (m0->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT) {
6059 if (!(ifp->if_capabilities & IFCAP_CSUM_TCPv4) ||
6060 ifp->if_bridge != NULL) {
6061 in_delayed_cksum(m0);
6062 m0->m_pkthdr.csum_flags &= ~M_TCPV4_CSUM_OUT; /* Clr */
6064 } else if (m0->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) {
6065 if (!(ifp->if_capabilities & IFCAP_CSUM_UDPv4) ||
6066 ifp->if_bridge != NULL) {
6067 in_delayed_cksum(m0);
6068 m0->m_pkthdr.csum_flags &= ~M_UDPV4_CSUM_OUT; /* Clr */
6072 if (ntohs(ip->ip_len) <= ifp->if_mtu) {
6074 if ((ifp->if_capabilities & IFCAP_CSUM_IPv4) &&
6075 ifp->if_bridge == NULL) {
6076 m0->m_pkthdr.csum_flags |= M_IPV4_CSUM_OUT;
6078 KMOD_IPSTAT_INC(ips_outhwcsum);
6080 ipstat.ips_outhwcsum++;
6083 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
6084 /* Update relevant hardware checksum stats for TCP/UDP */
6085 if (m0->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
6086 KMOD_TCPSTAT_INC(tcps_outhwcsum);
6087 else if (m0->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
6088 KMOD_UDPSTAT_INC(udps_outhwcsum);
6089 error = (*ifp->if_output)(ifp, m0, sintosa(dst), NULL);
6095 * Too large for interface; fragment if possible.
6096 * Must be able to put at least 8 bytes per fragment.
6098 if (ip->ip_off & htons(IP_DF)) {
6100 KMOD_IPSTAT_INC(ips_cantfrag);
6102 ipstat.ips_cantfrag++;
6104 if (r->rt != PF_DUPTO) {
6106 /* icmp_error() expects host byte ordering */
6110 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
6114 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
6125 * XXX: is cheaper + less error prone than own function
6129 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist, sw_csum);
6131 error = ip_fragment(m0, ifp, ifp->if_mtu);
6134 #ifndef __FreeBSD__ /* ip_fragment does not do m_freem() on FreeBSD */
6140 for (m0 = m1; m0; m0 = m1) {
6146 error = (*ifp->if_output)(ifp, m0, sintosa(dst),
6152 error = (*ifp->if_output)(ifp, m0, sintosa(dst),
6161 KMOD_IPSTAT_INC(ips_fragmented);
6163 ipstat.ips_fragmented++;
6167 if (r->rt != PF_DUPTO)
6169 if (ro == &iproute && ro->ro_rt)
6181 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
6182 struct pf_state *s, struct pf_pdesc *pd)
6185 struct route_in6 ip6route;
6186 struct route_in6 *ro;
6187 struct sockaddr_in6 *dst;
6188 struct ip6_hdr *ip6;
6189 struct ifnet *ifp = NULL;
6190 struct pf_addr naddr;
6191 struct pf_src_node *sn = NULL;
6193 if (m == NULL || *m == NULL || r == NULL ||
6194 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
6195 panic("pf_route6: invalid parameters");
6198 if (pd->pf_mtag->routed++ > 3) {
6200 if ((*m)->m_pkthdr.pf.routed++ > 3) {
6207 if (r->rt == PF_DUPTO) {
6209 if ((m0 = m_dup(*m, M_DONTWAIT)) == NULL)
6211 if ((m0 = m_copym2(*m, 0, M_COPYALL, M_NOWAIT)) == NULL)
6215 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
6220 if (m0->m_len < sizeof(struct ip6_hdr)) {
6221 DPFPRINTF(PF_DEBUG_URGENT,
6222 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n"));
6225 ip6 = mtod(m0, struct ip6_hdr *);
6228 bzero((caddr_t)ro, sizeof(*ro));
6229 dst = (struct sockaddr_in6 *)&ro->ro_dst;
6230 dst->sin6_family = AF_INET6;
6231 dst->sin6_len = sizeof(*dst);
6232 dst->sin6_addr = ip6->ip6_dst;
6234 /* Cheat. XXX why only in the v6 case??? */
6235 if (r->rt == PF_FASTROUTE) {
6237 m0->m_flags |= M_SKIP_FIREWALL;
6239 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
6241 m0->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
6242 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
6247 if (TAILQ_EMPTY(&r->rpool.list)) {
6248 DPFPRINTF(PF_DEBUG_URGENT,
6249 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n"));
6253 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
6255 if (!PF_AZERO(&naddr, AF_INET6))
6256 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
6258 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
6260 if (!PF_AZERO(&s->rt_addr, AF_INET6))
6261 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
6262 &s->rt_addr, AF_INET6);
6263 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
6271 if (pf_test6(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) {
6274 } else if (m0 == NULL) {
6280 if (pf_test6(PF_OUT, ifp, &m0, NULL) != PF_PASS)
6282 else if (m0 == NULL)
6285 if (m0->m_len < sizeof(struct ip6_hdr)) {
6286 DPFPRINTF(PF_DEBUG_URGENT,
6287 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n"));
6290 ip6 = mtod(m0, struct ip6_hdr *);
6294 * If the packet is too large for the outgoing interface,
6295 * send back an icmp6 error.
6297 if (IN6_IS_SCOPE_EMBED(&dst->sin6_addr))
6298 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
6299 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) {
6303 nd6_output(ifp, ifp, m0, dst, NULL);
6308 in6_ifstat_inc(ifp, ifs6_in_toobig);
6310 if (r->rt != PF_DUPTO) {
6312 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
6316 if (r->rt != PF_DUPTO)
6317 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
6324 if (r->rt != PF_DUPTO)
6336 * FreeBSD supports cksum offloads for the following drivers.
6337 * em(4), fxp(4), ixgb(4), lge(4), ndis(4), nge(4), re(4),
6338 * ti(4), txp(4), xl(4)
6340 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
6341 * network driver performed cksum including pseudo header, need to verify
6344 * network driver performed cksum, needs to additional pseudo header
6345 * cksum computation with partial csum_data(i.e. lack of H/W support for
6346 * pseudo header, for instance hme(4), sk(4) and possibly gem(4))
6348 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
6349 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
6351 * Also, set csum_data to 0xffff to force cksum validation.
6354 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
6360 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
6362 if (m->m_pkthdr.len < off + len)
6367 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
6368 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
6369 sum = m->m_pkthdr.csum_data;
6371 ip = mtod(m, struct ip *);
6372 sum = in_pseudo(ip->ip_src.s_addr,
6373 ip->ip_dst.s_addr, htonl((u_short)len +
6374 m->m_pkthdr.csum_data + IPPROTO_TCP));
6381 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
6382 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
6383 sum = m->m_pkthdr.csum_data;
6385 ip = mtod(m, struct ip *);
6386 sum = in_pseudo(ip->ip_src.s_addr,
6387 ip->ip_dst.s_addr, htonl((u_short)len +
6388 m->m_pkthdr.csum_data + IPPROTO_UDP));
6396 case IPPROTO_ICMPV6:
6406 if (p == IPPROTO_ICMP) {
6411 sum = in_cksum(m, len);
6415 if (m->m_len < sizeof(struct ip))
6417 sum = in4_cksum(m, p, off, len);
6422 if (m->m_len < sizeof(struct ip6_hdr))
6424 sum = in6_cksum(m, p, off, len);
6435 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
6440 KMOD_UDPSTAT_INC(udps_badsum);
6446 KMOD_ICMPSTAT_INC(icps_checksum);
6451 case IPPROTO_ICMPV6:
6453 KMOD_ICMP6STAT_INC(icp6s_checksum);
6460 if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
6461 m->m_pkthdr.csum_flags |=
6462 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
6463 m->m_pkthdr.csum_data = 0xffff;
6468 #else /* !__FreeBSD__ */
6471 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
6472 * off is the offset where the protocol header starts
6473 * len is the total length of protocol header plus payload
6474 * returns 0 when the checksum is valid, otherwise returns 1.
6477 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p,
6480 u_int16_t flag_ok, flag_bad;
6485 flag_ok = M_TCP_CSUM_IN_OK;
6486 flag_bad = M_TCP_CSUM_IN_BAD;
6489 flag_ok = M_UDP_CSUM_IN_OK;
6490 flag_bad = M_UDP_CSUM_IN_BAD;
6494 case IPPROTO_ICMPV6:
6496 flag_ok = flag_bad = 0;
6501 if (m->m_pkthdr.csum_flags & flag_ok)
6503 if (m->m_pkthdr.csum_flags & flag_bad)
6505 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
6507 if (m->m_pkthdr.len < off + len)
6512 if (p == IPPROTO_ICMP) {
6517 sum = in_cksum(m, len);
6521 if (m->m_len < sizeof(struct ip))
6523 sum = in4_cksum(m, p, off, len);
6529 if (m->m_len < sizeof(struct ip6_hdr))
6531 sum = in6_cksum(m, p, off, len);
6538 m->m_pkthdr.csum_flags |= flag_bad;
6541 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
6544 KMOD_UDPSTAT_INC(udps_badsum);
6548 KMOD_ICMPSTAT_INC(icps_checksum);
6552 case IPPROTO_ICMPV6:
6553 KMOD_ICMP6STAT_INC(icp6s_checksum);
6559 m->m_pkthdr.csum_flags |= flag_ok;
6566 pf_find_divert(struct mbuf *m)
6570 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL)
6573 return ((struct pf_divert *)(mtag + 1));
6577 pf_get_divert(struct mbuf *m)
6581 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) {
6582 mtag = m_tag_get(PACKET_TAG_PF_DIVERT, sizeof(struct pf_divert),
6586 bzero(mtag + 1, sizeof(struct pf_divert));
6587 m_tag_prepend(m, mtag);
6590 return ((struct pf_divert *)(mtag + 1));
6597 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
6598 struct ether_header *eh, struct inpcb *inp)
6600 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
6601 struct ether_header *eh)
6604 struct pfi_kif *kif;
6605 u_short action, reason = 0, log = 0;
6606 struct mbuf *m = *m0;
6608 struct ip *h = NULL;
6609 struct m_tag *ipfwtag;
6610 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6613 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
6615 struct pf_state *s = NULL;
6616 struct pf_ruleset *ruleset = NULL;
6618 int off, dirndx, pqid = 0;
6622 if (!V_pf_status.running)
6628 if (!pf_status.running)
6632 memset(&pd, 0, sizeof(pd));
6634 if ((pd.pf_mtag = pf_get_mtag(m)) == NULL) {
6636 DPFPRINTF(PF_DEBUG_URGENT,
6637 ("pf_test: pf_get_mtag returned NULL\n"));
6642 if (ifp->if_type == IFT_CARP && ifp->if_carpdev)
6643 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif;
6646 kif = (struct pfi_kif *)ifp->if_pf_kif;
6652 DPFPRINTF(PF_DEBUG_URGENT,
6653 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
6656 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6670 if ((m->m_flags & M_PKTHDR) == 0)
6671 panic("non-M_PKTHDR is passed to pf_test");
6672 #endif /* DIAGNOSTIC */
6675 if (m->m_pkthdr.len < (int)sizeof(*h)) {
6677 REASON_SET(&reason, PFRES_SHORT);
6683 if (m->m_flags & M_SKIP_FIREWALL) {
6688 if (m->m_pkthdr.pf.flags & PF_TAG_GENERATED)
6693 if (ip_divert_ptr != NULL &&
6694 ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
6695 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
6696 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
6697 pd.pf_mtag->flags |= PF_PACKET_LOOPED;
6698 m_tag_delete(m, ipfwtag);
6700 if (pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
6701 m->m_flags |= M_FASTFWD_OURS;
6702 pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
6706 /* We do IP header normalization and packet reassembly here */
6707 if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
6711 m = *m0; /* pf_normalize messes with m0 */
6712 h = mtod(m, struct ip *);
6714 off = h->ip_hl << 2;
6715 if (off < (int)sizeof(*h)) {
6717 REASON_SET(&reason, PFRES_SHORT);
6722 pd.src = (struct pf_addr *)&h->ip_src;
6723 pd.dst = (struct pf_addr *)&h->ip_dst;
6724 pd.sport = pd.dport = NULL;
6725 pd.ip_sum = &h->ip_sum;
6726 pd.proto_sum = NULL;
6729 pd.sidx = (dir == PF_IN) ? 0 : 1;
6730 pd.didx = (dir == PF_IN) ? 1 : 0;
6733 pd.tot_len = ntohs(h->ip_len);
6736 /* handle fragments that didn't get reassembled by normalization */
6737 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
6738 action = pf_test_fragment(&r, dir, kif, m, h,
6749 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6750 &action, &reason, AF_INET)) {
6751 log = action != PF_PASS;
6754 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6755 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
6757 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6758 if (action == PF_DROP)
6760 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6762 if (action == PF_PASS) {
6765 if (pfsync_update_state_ptr != NULL)
6766 pfsync_update_state_ptr(s);
6768 pfsync_update_state(s);
6770 #endif /* NPFSYNC */
6774 } else if (s == NULL)
6776 action = pf_test_rule(&r, &s, dir, kif,
6777 m, off, h, &pd, &a, &ruleset, NULL, inp);
6779 action = pf_test_rule(&r, &s, dir, kif,
6780 m, off, h, &pd, &a, &ruleset, &ipintrq);
6789 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6790 &action, &reason, AF_INET)) {
6791 log = action != PF_PASS;
6794 if (uh.uh_dport == 0 ||
6795 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6796 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6798 REASON_SET(&reason, PFRES_SHORT);
6801 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6802 if (action == PF_PASS) {
6805 if (pfsync_update_state_ptr != NULL)
6806 pfsync_update_state_ptr(s);
6808 pfsync_update_state(s);
6810 #endif /* NPFSYNC */
6814 } else if (s == NULL)
6816 action = pf_test_rule(&r, &s, dir, kif,
6817 m, off, h, &pd, &a, &ruleset, NULL, inp);
6819 action = pf_test_rule(&r, &s, dir, kif,
6820 m, off, h, &pd, &a, &ruleset, &ipintrq);
6825 case IPPROTO_ICMP: {
6829 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
6830 &action, &reason, AF_INET)) {
6831 log = action != PF_PASS;
6834 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
6836 if (action == PF_PASS) {
6839 if (pfsync_update_state_ptr != NULL)
6840 pfsync_update_state_ptr(s);
6842 pfsync_update_state(s);
6844 #endif /* NPFSYNC */
6848 } else if (s == NULL)
6850 action = pf_test_rule(&r, &s, dir, kif,
6851 m, off, h, &pd, &a, &ruleset, NULL, inp);
6853 action = pf_test_rule(&r, &s, dir, kif,
6854 m, off, h, &pd, &a, &ruleset, &ipintrq);
6860 case IPPROTO_ICMPV6: {
6862 DPFPRINTF(PF_DEBUG_MISC,
6863 ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
6869 action = pf_test_state_other(&s, dir, kif, m, &pd);
6870 if (action == PF_PASS) {
6873 if (pfsync_update_state_ptr != NULL)
6874 pfsync_update_state_ptr(s);
6876 pfsync_update_state(s);
6878 #endif /* NPFSYNC */
6882 } else if (s == NULL)
6884 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
6885 &pd, &a, &ruleset, NULL, inp);
6887 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
6888 &pd, &a, &ruleset, &ipintrq);
6894 if (action == PF_PASS && h->ip_hl > 5 &&
6895 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6897 REASON_SET(&reason, PFRES_IPOPTIONS);
6899 DPFPRINTF(PF_DEBUG_MISC,
6900 ("pf: dropping packet with ip options\n"));
6903 if ((s && s->tag) || r->rtableid >= 0)
6905 pf_tag_packet(m, s ? s->tag : 0, r->rtableid, pd.pf_mtag);
6907 pf_tag_packet(m, s ? s->tag : 0, r->rtableid);
6910 if (dir == PF_IN && s && s->key[PF_SK_STACK])
6912 pd.pf_mtag->statekey = s->key[PF_SK_STACK];
6914 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK];
6918 if (action == PF_PASS && r->qid) {
6920 if (pqid || (pd.tos & IPTOS_LOWDELAY))
6921 pd.pf_mtag->qid = r->pqid;
6923 pd.pf_mtag->qid = r->qid;
6924 /* add hints for ecn */
6925 pd.pf_mtag->hdr = h;
6928 if (pqid || (pd.tos & IPTOS_LOWDELAY))
6929 m->m_pkthdr.pf.qid = r->pqid;
6931 m->m_pkthdr.pf.qid = r->qid;
6932 /* add hints for ecn */
6933 m->m_pkthdr.pf.hdr = h;
6939 * connections redirected to loopback should not match sockets
6940 * bound specifically to loopback due to security implications,
6941 * see tcp_input() and in_pcblookup_listen().
6943 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6944 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6945 (s->nat_rule.ptr->action == PF_RDR ||
6946 s->nat_rule.ptr->action == PF_BINAT) &&
6947 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
6949 m->m_flags |= M_SKIP_FIREWALL;
6951 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST;
6955 if (action == PF_PASS && r->divert.port &&
6956 ip_divert_ptr != NULL && !PACKET_LOOPED()) {
6958 ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
6959 sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
6960 if (ipfwtag != NULL) {
6961 ((struct ipfw_rule_ref *)(ipfwtag+1))->info =
6962 ntohs(r->divert.port);
6963 ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
6965 m_tag_prepend(m, ipfwtag);
6969 if (m->m_flags & M_FASTFWD_OURS) {
6970 pd.pf_mtag->flags |= PF_FASTFWD_OURS_PRESENT;
6971 m->m_flags &= ~M_FASTFWD_OURS;
6975 dir == PF_IN ? DIR_IN : DIR_OUT);
6979 /* XXX: ipfw has the same behaviour! */
6981 REASON_SET(&reason, PFRES_MEMORY);
6983 DPFPRINTF(PF_DEBUG_MISC,
6984 ("pf: failed to allocate divert tag\n"));
6988 if (dir == PF_IN && action == PF_PASS && r->divert.port) {
6989 struct pf_divert *divert;
6991 if ((divert = pf_get_divert(m))) {
6992 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED;
6993 divert->port = r->divert.port;
6994 divert->addr.ipv4 = r->divert.addr.v4;
7002 if (s != NULL && s->nat_rule.ptr != NULL &&
7003 s->nat_rule.ptr->log & PF_LOG_ALL)
7004 lr = s->nat_rule.ptr;
7007 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset,
7011 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
7012 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
7014 if (action == PF_PASS || r->action == PF_DROP) {
7015 dirndx = (dir == PF_OUT);
7016 r->packets[dirndx]++;
7017 r->bytes[dirndx] += pd.tot_len;
7019 a->packets[dirndx]++;
7020 a->bytes[dirndx] += pd.tot_len;
7023 if (s->nat_rule.ptr != NULL) {
7024 s->nat_rule.ptr->packets[dirndx]++;
7025 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
7027 if (s->src_node != NULL) {
7028 s->src_node->packets[dirndx]++;
7029 s->src_node->bytes[dirndx] += pd.tot_len;
7031 if (s->nat_src_node != NULL) {
7032 s->nat_src_node->packets[dirndx]++;
7033 s->nat_src_node->bytes[dirndx] += pd.tot_len;
7035 dirndx = (dir == s->direction) ? 0 : 1;
7036 s->packets[dirndx]++;
7037 s->bytes[dirndx] += pd.tot_len;
7040 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
7042 if (nr != NULL && r == &V_pf_default_rule)
7044 if (nr != NULL && r == &pf_default_rule)
7047 if (tr->src.addr.type == PF_ADDR_TABLE)
7048 pfr_update_stats(tr->src.addr.p.tbl,
7049 (s == NULL) ? pd.src :
7050 &s->key[(s->direction == PF_IN)]->
7051 addr[(s->direction == PF_OUT)],
7052 pd.af, pd.tot_len, dir == PF_OUT,
7053 r->action == PF_PASS, tr->src.neg);
7054 if (tr->dst.addr.type == PF_ADDR_TABLE)
7055 pfr_update_stats(tr->dst.addr.p.tbl,
7056 (s == NULL) ? pd.dst :
7057 &s->key[(s->direction == PF_IN)]->
7058 addr[(s->direction == PF_IN)],
7059 pd.af, pd.tot_len, dir == PF_OUT,
7060 r->action == PF_PASS, tr->dst.neg);
7064 case PF_SYNPROXY_DROP:
7071 /* pf_route can free the mbuf causing *m0 to become NULL */
7073 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
7086 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0,
7087 struct ether_header *eh, struct inpcb *inp)
7089 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0,
7090 struct ether_header *eh)
7093 struct pfi_kif *kif;
7094 u_short action, reason = 0, log = 0;
7095 struct mbuf *m = *m0, *n = NULL;
7097 struct ip6_hdr *h = NULL;
7098 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
7101 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
7103 struct pf_state *s = NULL;
7104 struct pf_ruleset *ruleset = NULL;
7106 int off, terminal = 0, dirndx, rh_cnt = 0;
7110 if (!V_pf_status.running) {
7115 if (!pf_status.running)
7119 memset(&pd, 0, sizeof(pd));
7121 if ((pd.pf_mtag = pf_get_mtag(m)) == NULL) {
7123 DPFPRINTF(PF_DEBUG_URGENT,
7124 ("pf_test: pf_get_mtag returned NULL\n"));
7129 if (ifp->if_type == IFT_CARP && ifp->if_carpdev)
7130 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif;
7133 kif = (struct pfi_kif *)ifp->if_pf_kif;
7139 DPFPRINTF(PF_DEBUG_URGENT,
7140 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
7143 if (kif->pfik_flags & PFI_IFLAG_SKIP)
7157 if ((m->m_flags & M_PKTHDR) == 0)
7158 panic("non-M_PKTHDR is passed to pf_test6");
7159 #endif /* DIAGNOSTIC */
7162 if (m->m_pkthdr.len < (int)sizeof(*h)) {
7164 REASON_SET(&reason, PFRES_SHORT);
7170 if (pd.pf_mtag->flags & PF_TAG_GENERATED) {
7173 if (m->m_pkthdr.pf.flags & PF_TAG_GENERATED)
7180 /* We do IP header normalization and packet reassembly here */
7181 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
7185 m = *m0; /* pf_normalize messes with m0 */
7186 h = mtod(m, struct ip6_hdr *);
7190 * we do not support jumbogram yet. if we keep going, zero ip6_plen
7191 * will do something bad, so drop the packet for now.
7193 if (htons(h->ip6_plen) == 0) {
7195 REASON_SET(&reason, PFRES_NORM); /*XXX*/
7200 pd.src = (struct pf_addr *)&h->ip6_src;
7201 pd.dst = (struct pf_addr *)&h->ip6_dst;
7202 pd.sport = pd.dport = NULL;
7204 pd.proto_sum = NULL;
7206 pd.sidx = (dir == PF_IN) ? 0 : 1;
7207 pd.didx = (dir == PF_IN) ? 1 : 0;
7210 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
7213 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
7214 pd.proto = h->ip6_nxt;
7217 case IPPROTO_FRAGMENT:
7218 action = pf_test_fragment(&r, dir, kif, m, h,
7220 if (action == PF_DROP)
7221 REASON_SET(&reason, PFRES_FRAG);
7223 case IPPROTO_ROUTING: {
7224 struct ip6_rthdr rthdr;
7227 DPFPRINTF(PF_DEBUG_MISC,
7228 ("pf: IPv6 more than one rthdr\n"));
7230 REASON_SET(&reason, PFRES_IPOPTIONS);
7234 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
7236 DPFPRINTF(PF_DEBUG_MISC,
7237 ("pf: IPv6 short rthdr\n"));
7239 REASON_SET(&reason, PFRES_SHORT);
7243 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
7244 DPFPRINTF(PF_DEBUG_MISC,
7245 ("pf: IPv6 rthdr0\n"));
7247 REASON_SET(&reason, PFRES_IPOPTIONS);
7254 case IPPROTO_HOPOPTS:
7255 case IPPROTO_DSTOPTS: {
7256 /* get next header and header length */
7257 struct ip6_ext opt6;
7259 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
7260 NULL, &reason, pd.af)) {
7261 DPFPRINTF(PF_DEBUG_MISC,
7262 ("pf: IPv6 short opt\n"));
7267 if (pd.proto == IPPROTO_AH)
7268 off += (opt6.ip6e_len + 2) * 4;
7270 off += (opt6.ip6e_len + 1) * 8;
7271 pd.proto = opt6.ip6e_nxt;
7272 /* goto the next header */
7279 } while (!terminal);
7281 /* if there's no routing header, use unmodified mbuf for checksumming */
7291 if (!pf_pull_hdr(m, off, &th, sizeof(th),
7292 &action, &reason, AF_INET6)) {
7293 log = action != PF_PASS;
7296 pd.p_len = pd.tot_len - off - (th.th_off << 2);
7297 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
7298 if (action == PF_DROP)
7300 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
7302 if (action == PF_PASS) {
7305 if (pfsync_update_state_ptr != NULL)
7306 pfsync_update_state_ptr(s);
7308 pfsync_update_state(s);
7310 #endif /* NPFSYNC */
7314 } else if (s == NULL)
7316 action = pf_test_rule(&r, &s, dir, kif,
7317 m, off, h, &pd, &a, &ruleset, NULL, inp);
7319 action = pf_test_rule(&r, &s, dir, kif,
7320 m, off, h, &pd, &a, &ruleset, &ip6intrq);
7329 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
7330 &action, &reason, AF_INET6)) {
7331 log = action != PF_PASS;
7334 if (uh.uh_dport == 0 ||
7335 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
7336 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
7338 REASON_SET(&reason, PFRES_SHORT);
7341 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
7342 if (action == PF_PASS) {
7345 if (pfsync_update_state_ptr != NULL)
7346 pfsync_update_state_ptr(s);
7348 pfsync_update_state(s);
7350 #endif /* NPFSYNC */
7354 } else if (s == NULL)
7356 action = pf_test_rule(&r, &s, dir, kif,
7357 m, off, h, &pd, &a, &ruleset, NULL, inp);
7359 action = pf_test_rule(&r, &s, dir, kif,
7360 m, off, h, &pd, &a, &ruleset, &ip6intrq);
7365 case IPPROTO_ICMP: {
7367 DPFPRINTF(PF_DEBUG_MISC,
7368 ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
7372 case IPPROTO_ICMPV6: {
7373 struct icmp6_hdr ih;
7376 if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
7377 &action, &reason, AF_INET6)) {
7378 log = action != PF_PASS;
7381 action = pf_test_state_icmp(&s, dir, kif,
7382 m, off, h, &pd, &reason);
7383 if (action == PF_PASS) {
7386 if (pfsync_update_state_ptr != NULL)
7387 pfsync_update_state_ptr(s);
7389 pfsync_update_state(s);
7391 #endif /* NPFSYNC */
7395 } else if (s == NULL)
7397 action = pf_test_rule(&r, &s, dir, kif,
7398 m, off, h, &pd, &a, &ruleset, NULL, inp);
7400 action = pf_test_rule(&r, &s, dir, kif,
7401 m, off, h, &pd, &a, &ruleset, &ip6intrq);
7407 action = pf_test_state_other(&s, dir, kif, m, &pd);
7408 if (action == PF_PASS) {
7411 if (pfsync_update_state_ptr != NULL)
7412 pfsync_update_state_ptr(s);
7414 pfsync_update_state(s);
7416 #endif /* NPFSYNC */
7420 } else if (s == NULL)
7422 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
7423 &pd, &a, &ruleset, NULL, inp);
7425 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
7426 &pd, &a, &ruleset, &ip6intrq);
7437 /* handle dangerous IPv6 extension headers. */
7438 if (action == PF_PASS && rh_cnt &&
7439 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
7441 REASON_SET(&reason, PFRES_IPOPTIONS);
7443 DPFPRINTF(PF_DEBUG_MISC,
7444 ("pf: dropping packet with dangerous v6 headers\n"));
7447 if ((s && s->tag) || r->rtableid >= 0)
7449 pf_tag_packet(m, s ? s->tag : 0, r->rtableid, pd.pf_mtag);
7451 pf_tag_packet(m, s ? s->tag : 0, r->rtableid);
7454 if (dir == PF_IN && s && s->key[PF_SK_STACK])
7456 pd.pf_mtag->statekey = s->key[PF_SK_STACK];
7458 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK];
7462 if (action == PF_PASS && r->qid) {
7464 if (pd.tos & IPTOS_LOWDELAY)
7465 pd.pf_mtag->qid = r->pqid;
7467 pd.pf_mtag->qid = r->qid;
7468 /* add hints for ecn */
7469 pd.pf_mtag->hdr = h;
7471 if (pd.tos & IPTOS_LOWDELAY)
7472 m->m_pkthdr.pf.qid = r->pqid;
7474 m->m_pkthdr.pf.qid = r->qid;
7475 /* add hints for ecn */
7476 m->m_pkthdr.pf.hdr = h;
7481 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
7482 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
7483 (s->nat_rule.ptr->action == PF_RDR ||
7484 s->nat_rule.ptr->action == PF_BINAT) &&
7485 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
7487 m->m_flags |= M_SKIP_FIREWALL;
7489 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST;
7493 /* XXX: Anybody working on it?! */
7495 printf("pf: divert(9) is not supported for IPv6\n");
7497 if (dir == PF_IN && action == PF_PASS && r->divert.port) {
7498 struct pf_divert *divert;
7500 if ((divert = pf_get_divert(m))) {
7501 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED;
7502 divert->port = r->divert.port;
7503 divert->addr.ipv6 = r->divert.addr.v6;
7511 if (s != NULL && s->nat_rule.ptr != NULL &&
7512 s->nat_rule.ptr->log & PF_LOG_ALL)
7513 lr = s->nat_rule.ptr;
7516 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, lr, a, ruleset,
7520 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
7521 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
7523 if (action == PF_PASS || r->action == PF_DROP) {
7524 dirndx = (dir == PF_OUT);
7525 r->packets[dirndx]++;
7526 r->bytes[dirndx] += pd.tot_len;
7528 a->packets[dirndx]++;
7529 a->bytes[dirndx] += pd.tot_len;
7532 if (s->nat_rule.ptr != NULL) {
7533 s->nat_rule.ptr->packets[dirndx]++;
7534 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
7536 if (s->src_node != NULL) {
7537 s->src_node->packets[dirndx]++;
7538 s->src_node->bytes[dirndx] += pd.tot_len;
7540 if (s->nat_src_node != NULL) {
7541 s->nat_src_node->packets[dirndx]++;
7542 s->nat_src_node->bytes[dirndx] += pd.tot_len;
7544 dirndx = (dir == s->direction) ? 0 : 1;
7545 s->packets[dirndx]++;
7546 s->bytes[dirndx] += pd.tot_len;
7549 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
7551 if (nr != NULL && r == &V_pf_default_rule)
7553 if (nr != NULL && r == &pf_default_rule)
7556 if (tr->src.addr.type == PF_ADDR_TABLE)
7557 pfr_update_stats(tr->src.addr.p.tbl,
7558 (s == NULL) ? pd.src :
7559 &s->key[(s->direction == PF_IN)]->addr[0],
7560 pd.af, pd.tot_len, dir == PF_OUT,
7561 r->action == PF_PASS, tr->src.neg);
7562 if (tr->dst.addr.type == PF_ADDR_TABLE)
7563 pfr_update_stats(tr->dst.addr.p.tbl,
7564 (s == NULL) ? pd.dst :
7565 &s->key[(s->direction == PF_IN)]->addr[1],
7566 pd.af, pd.tot_len, dir == PF_OUT,
7567 r->action == PF_PASS, tr->dst.neg);
7571 case PF_SYNPROXY_DROP:
7578 /* pf_route6 can free the mbuf causing *m0 to become NULL */
7580 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
7592 pf_check_congestion(struct ifqueue *ifq)
7595 /* XXX_IMPORT: later */
7598 if (ifq->ifq_congestion)
7606 * must be called whenever any addressing information such as
7607 * address, port, protocol has changed
7610 pf_pkt_addr_changed(struct mbuf *m)
7613 struct pf_mtag *pf_tag;
7615 if ((pf_tag = pf_find_mtag(m)) != NULL)
7616 pf_tag->statekey = NULL;
7618 m->m_pkthdr.pf.statekey = NULL;