1 /* $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $ */
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002 - 2008 Henning Brauer
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * - Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * - Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
32 * Effort sponsored in part by the Defense Advanced Research Projects
33 * Agency (DARPA) and Air Force Research Laboratory, Air Force
34 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
40 #include "opt_inet6.h"
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
51 #define NBPFILTER DEV_BPF
57 #define NPFLOG DEV_PFLOG
63 #define NPFSYNC DEV_PFSYNC
69 #define NPFLOW DEV_PFLOW
81 #include <sys/param.h>
82 #include <sys/systm.h>
84 #include <sys/filio.h>
85 #include <sys/socket.h>
86 #include <sys/socketvar.h>
87 #include <sys/kernel.h>
90 #include <sys/random.h>
91 #include <sys/sysctl.h>
92 #include <sys/endian.h>
93 #define betoh64 be64toh
99 #include <sys/kthread.h>
100 #include <sys/lock.h>
103 #include <sys/rwlock.h>
109 #include <crypto/md5.h>
113 #include <net/if_types.h>
115 #include <net/route.h>
118 #include <net/radix_mpath.h>
121 #include <net/radix_mpath.h>
124 #include <netinet/in.h>
125 #include <netinet/in_var.h>
126 #include <netinet/in_systm.h>
127 #include <netinet/ip.h>
128 #include <netinet/ip_var.h>
129 #include <netinet/tcp.h>
130 #include <netinet/tcp_seq.h>
131 #include <netinet/udp.h>
132 #include <netinet/ip_icmp.h>
133 #include <netinet/in_pcb.h>
134 #include <netinet/tcp_timer.h>
135 #include <netinet/tcp_var.h>
136 #include <netinet/udp_var.h>
137 #include <netinet/icmp_var.h>
138 #include <netinet/if_ether.h>
140 #include <netinet/ip_fw.h>
141 #include <netinet/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */
145 #include <dev/rndvar.h>
147 #include <net/pfvar.h>
148 #include <net/if_pflog.h>
149 #include <net/if_pflow.h>
150 #include <net/if_pfsync.h>
153 #include <netinet/ip6.h>
154 #include <netinet/in_pcb.h>
155 #include <netinet/icmp6.h>
156 #include <netinet6/nd6.h>
158 #include <netinet6/ip6_var.h>
159 #include <netinet6/in6_pcb.h>
164 #include <machine/in_cksum.h>
165 #include <sys/limits.h>
166 #include <sys/ucred.h>
167 #include <security/mac/mac_framework.h>
169 extern int ip_optcopy(struct ip *, struct ip *);
173 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
175 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
184 VNET_DEFINE(struct pf_state_tree, pf_statetbl);
186 VNET_DEFINE(struct pf_altqqueue, pf_altqs[2]);
187 VNET_DEFINE(struct pf_palist, pf_pabuf);
188 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active);
189 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive);
190 VNET_DEFINE(struct pf_status, pf_status);
192 VNET_DEFINE(u_int32_t, ticket_altqs_active);
193 VNET_DEFINE(u_int32_t, ticket_altqs_inactive);
194 VNET_DEFINE(int, altqs_inactive_open);
195 VNET_DEFINE(u_int32_t, ticket_pabuf);
197 VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx);
198 #define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx)
199 VNET_DEFINE(u_char, pf_tcp_secret[16]);
200 #define V_pf_tcp_secret VNET(pf_tcp_secret)
201 VNET_DEFINE(int, pf_tcp_secret_init);
202 #define V_pf_tcp_secret_init VNET(pf_tcp_secret_init)
203 VNET_DEFINE(int, pf_tcp_iss_off);
204 #define V_pf_tcp_iss_off VNET(pf_tcp_iss_off)
206 struct pf_anchor_stackframe {
207 struct pf_ruleset *rs;
209 struct pf_anchor_node *parent;
210 struct pf_anchor *child;
212 VNET_DEFINE(struct pf_anchor_stackframe, pf_anchor_stack[64]);
213 #define V_pf_anchor_stack VNET(pf_anchor_stack)
215 VNET_DEFINE(uma_zone_t, pf_src_tree_pl);
216 VNET_DEFINE(uma_zone_t, pf_rule_pl);
217 VNET_DEFINE(uma_zone_t, pf_pooladdr_pl);
218 VNET_DEFINE(uma_zone_t, pf_state_pl);
219 VNET_DEFINE(uma_zone_t, pf_state_key_pl);
220 VNET_DEFINE(uma_zone_t, pf_state_item_pl);
221 VNET_DEFINE(uma_zone_t, pf_altq_pl);
223 struct pf_state_tree pf_statetbl;
225 struct pf_altqqueue pf_altqs[2];
226 struct pf_palist pf_pabuf;
227 struct pf_altqqueue *pf_altqs_active;
228 struct pf_altqqueue *pf_altqs_inactive;
229 struct pf_status pf_status;
231 u_int32_t ticket_altqs_active;
232 u_int32_t ticket_altqs_inactive;
233 int altqs_inactive_open;
234 u_int32_t ticket_pabuf;
236 MD5_CTX pf_tcp_secret_ctx;
237 u_char pf_tcp_secret[16];
238 int pf_tcp_secret_init;
241 struct pf_anchor_stackframe {
242 struct pf_ruleset *rs;
244 struct pf_anchor_node *parent;
245 struct pf_anchor *child;
246 } pf_anchor_stack[64];
248 struct pool pf_src_tree_pl, pf_rule_pl, pf_pooladdr_pl;
249 struct pool pf_state_pl, pf_state_key_pl, pf_state_item_pl;
250 struct pool pf_altq_pl;
253 void pf_init_threshold(struct pf_threshold *, u_int32_t,
255 void pf_add_threshold(struct pf_threshold *);
256 int pf_check_threshold(struct pf_threshold *);
258 void pf_change_ap(struct pf_addr *, u_int16_t *,
259 u_int16_t *, u_int16_t *, struct pf_addr *,
260 u_int16_t, u_int8_t, sa_family_t);
261 int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
262 struct tcphdr *, struct pf_state_peer *);
264 void pf_change_a6(struct pf_addr *, u_int16_t *,
265 struct pf_addr *, u_int8_t);
267 void pf_change_icmp(struct pf_addr *, u_int16_t *,
268 struct pf_addr *, struct pf_addr *, u_int16_t,
269 u_int16_t *, u_int16_t *, u_int16_t *,
270 u_int16_t *, u_int8_t, sa_family_t);
272 void pf_send_tcp(struct mbuf *,
273 const struct pf_rule *, sa_family_t,
275 void pf_send_tcp(const struct pf_rule *, sa_family_t,
277 const struct pf_addr *, const struct pf_addr *,
278 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
279 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
280 u_int16_t, struct ether_header *, struct ifnet *);
281 static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
282 sa_family_t, struct pf_rule *);
283 void pf_detach_state(struct pf_state *);
284 void pf_state_key_detach(struct pf_state *, int);
285 u_int32_t pf_tcp_iss(struct pf_pdesc *);
286 int pf_test_rule(struct pf_rule **, struct pf_state **,
287 int, struct pfi_kif *, struct mbuf *, int,
288 void *, struct pf_pdesc *, struct pf_rule **,
290 struct pf_ruleset **, struct ifqueue *,
293 struct pf_ruleset **, struct ifqueue *);
295 static __inline int pf_create_state(struct pf_rule *, struct pf_rule *,
296 struct pf_rule *, struct pf_pdesc *,
297 struct pf_src_node *, struct pf_state_key *,
298 struct pf_state_key *, struct pf_state_key *,
299 struct pf_state_key *, struct mbuf *, int,
300 u_int16_t, u_int16_t, int *, struct pfi_kif *,
301 struct pf_state **, int, u_int16_t, u_int16_t,
303 int pf_test_fragment(struct pf_rule **, int,
304 struct pfi_kif *, struct mbuf *, void *,
305 struct pf_pdesc *, struct pf_rule **,
306 struct pf_ruleset **);
307 int pf_tcp_track_full(struct pf_state_peer *,
308 struct pf_state_peer *, struct pf_state **,
309 struct pfi_kif *, struct mbuf *, int,
310 struct pf_pdesc *, u_short *, int *);
311 int pf_tcp_track_sloppy(struct pf_state_peer *,
312 struct pf_state_peer *, struct pf_state **,
313 struct pf_pdesc *, u_short *);
314 int pf_test_state_tcp(struct pf_state **, int,
315 struct pfi_kif *, struct mbuf *, int,
316 void *, struct pf_pdesc *, u_short *);
317 int pf_test_state_udp(struct pf_state **, int,
318 struct pfi_kif *, struct mbuf *, int,
319 void *, struct pf_pdesc *);
320 int pf_test_state_icmp(struct pf_state **, int,
321 struct pfi_kif *, struct mbuf *, int,
322 void *, struct pf_pdesc *, u_short *);
323 int pf_test_state_other(struct pf_state **, int,
324 struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
325 void pf_route(struct mbuf **, struct pf_rule *, int,
326 struct ifnet *, struct pf_state *,
328 void pf_route6(struct mbuf **, struct pf_rule *, int,
329 struct ifnet *, struct pf_state *,
332 int pf_socket_lookup(int, struct pf_pdesc *);
334 u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
336 u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
338 u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
340 void pf_set_rt_ifp(struct pf_state *,
342 int pf_check_proto_cksum(struct mbuf *, int, int,
343 u_int8_t, sa_family_t);
345 struct pf_divert *pf_get_divert(struct mbuf *);
347 void pf_print_state_parts(struct pf_state *,
348 struct pf_state_key *, struct pf_state_key *);
349 int pf_addr_wrap_neq(struct pf_addr_wrap *,
350 struct pf_addr_wrap *);
351 int pf_compare_state_keys(struct pf_state_key *,
352 struct pf_state_key *, struct pfi_kif *, u_int);
354 struct pf_state *pf_find_state(struct pfi_kif *,
355 struct pf_state_key_cmp *, u_int, struct mbuf *,
358 struct pf_state *pf_find_state(struct pfi_kif *,
359 struct pf_state_key_cmp *, u_int, struct mbuf *);
361 int pf_src_connlimit(struct pf_state **);
362 int pf_check_congestion(struct ifqueue *);
365 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
367 VNET_DECLARE(int, pf_end_threads);
369 VNET_DEFINE(struct pf_pool_limit, pf_pool_limits[PF_LIMIT_MAX]);
371 extern struct pool pfr_ktable_pl;
372 extern struct pool pfr_kentry_pl;
374 struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = {
375 { &pf_state_pl, PFSTATE_HIWAT },
376 { &pf_src_tree_pl, PFSNODE_HIWAT },
377 { &pf_frent_pl, PFFRAG_FRENT_HIWAT },
378 { &pfr_ktable_pl, PFR_KTABLE_HIWAT },
379 { &pfr_kentry_pl, PFR_KENTRY_HIWAT }
384 #define PPACKET_LOOPED() \
385 (pd->pf_mtag->flags & PF_PACKET_LOOPED)
387 #define PACKET_LOOPED() \
388 (pd.pf_mtag->flags & PF_PACKET_LOOPED)
390 #define STATE_LOOKUP(i, k, d, s, m, pt) \
392 s = pf_find_state(i, k, d, m, pt); \
393 if (s == NULL || (s)->timeout == PFTM_PURGE) \
395 if (PPACKET_LOOPED()) \
398 (((s)->rule.ptr->rt == PF_ROUTETO && \
399 (s)->rule.ptr->direction == PF_OUT) || \
400 ((s)->rule.ptr->rt == PF_REPLYTO && \
401 (s)->rule.ptr->direction == PF_IN)) && \
402 (s)->rt_kif != NULL && \
407 #define STATE_LOOKUP(i, k, d, s, m) \
409 s = pf_find_state(i, k, d, m); \
410 if (s == NULL || (s)->timeout == PFTM_PURGE) \
413 (((s)->rule.ptr->rt == PF_ROUTETO && \
414 (s)->rule.ptr->direction == PF_OUT) || \
415 ((s)->rule.ptr->rt == PF_REPLYTO && \
416 (s)->rule.ptr->direction == PF_IN)) && \
417 (s)->rt_kif != NULL && \
424 #define BOUND_IFACE(r, k) \
425 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
427 #define BOUND_IFACE(r, k) \
428 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
431 #define STATE_INC_COUNTERS(s) \
433 s->rule.ptr->states_cur++; \
434 s->rule.ptr->states_tot++; \
435 if (s->anchor.ptr != NULL) { \
436 s->anchor.ptr->states_cur++; \
437 s->anchor.ptr->states_tot++; \
439 if (s->nat_rule.ptr != NULL) { \
440 s->nat_rule.ptr->states_cur++; \
441 s->nat_rule.ptr->states_tot++; \
445 #define STATE_DEC_COUNTERS(s) \
447 if (s->nat_rule.ptr != NULL) \
448 s->nat_rule.ptr->states_cur--; \
449 if (s->anchor.ptr != NULL) \
450 s->anchor.ptr->states_cur--; \
451 s->rule.ptr->states_cur--; \
454 static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *);
455 static __inline int pf_state_compare_key(struct pf_state_key *,
456 struct pf_state_key *);
457 static __inline int pf_state_compare_id(struct pf_state *,
461 VNET_DEFINE(struct pf_src_tree, tree_src_tracking);
463 VNET_DEFINE(struct pf_state_tree_id, tree_id);
464 VNET_DEFINE(struct pf_state_queue, state_list);
466 struct pf_src_tree tree_src_tracking;
468 struct pf_state_tree_id tree_id;
469 struct pf_state_queue state_list;
472 RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare);
473 RB_GENERATE(pf_state_tree, pf_state_key, entry, pf_state_compare_key);
474 RB_GENERATE(pf_state_tree_id, pf_state,
475 entry_id, pf_state_compare_id);
478 pf_src_compare(struct pf_src_node *a, struct pf_src_node *b)
482 if (a->rule.ptr > b->rule.ptr)
484 if (a->rule.ptr < b->rule.ptr)
486 if ((diff = a->af - b->af) != 0)
491 if (a->addr.addr32[0] > b->addr.addr32[0])
493 if (a->addr.addr32[0] < b->addr.addr32[0])
499 if (a->addr.addr32[3] > b->addr.addr32[3])
501 if (a->addr.addr32[3] < b->addr.addr32[3])
503 if (a->addr.addr32[2] > b->addr.addr32[2])
505 if (a->addr.addr32[2] < b->addr.addr32[2])
507 if (a->addr.addr32[1] > b->addr.addr32[1])
509 if (a->addr.addr32[1] < b->addr.addr32[1])
511 if (a->addr.addr32[0] > b->addr.addr32[0])
513 if (a->addr.addr32[0] < b->addr.addr32[0])
523 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
528 dst->addr32[0] = src->addr32[0];
532 dst->addr32[0] = src->addr32[0];
533 dst->addr32[1] = src->addr32[1];
534 dst->addr32[2] = src->addr32[2];
535 dst->addr32[3] = src->addr32[3];
542 pf_init_threshold(struct pf_threshold *threshold,
543 u_int32_t limit, u_int32_t seconds)
545 threshold->limit = limit * PF_THRESHOLD_MULT;
546 threshold->seconds = seconds;
547 threshold->count = 0;
548 threshold->last = time_second;
552 pf_add_threshold(struct pf_threshold *threshold)
554 u_int32_t t = time_second, diff = t - threshold->last;
556 if (diff >= threshold->seconds)
557 threshold->count = 0;
559 threshold->count -= threshold->count * diff /
561 threshold->count += PF_THRESHOLD_MULT;
566 pf_check_threshold(struct pf_threshold *threshold)
568 return (threshold->count > threshold->limit);
572 pf_src_connlimit(struct pf_state **state)
576 (*state)->src_node->conn++;
577 (*state)->src.tcp_est = 1;
578 pf_add_threshold(&(*state)->src_node->conn_rate);
580 if ((*state)->rule.ptr->max_src_conn &&
581 (*state)->rule.ptr->max_src_conn <
582 (*state)->src_node->conn) {
584 V_pf_status.lcounters[LCNT_SRCCONN]++;
586 pf_status.lcounters[LCNT_SRCCONN]++;
591 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
592 pf_check_threshold(&(*state)->src_node->conn_rate)) {
594 V_pf_status.lcounters[LCNT_SRCCONNRATE]++;
596 pf_status.lcounters[LCNT_SRCCONNRATE]++;
604 if ((*state)->rule.ptr->overload_tbl) {
606 u_int32_t killed = 0;
609 V_pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
610 if (V_pf_status.debug >= PF_DEBUG_MISC) {
612 pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
613 if (pf_status.debug >= PF_DEBUG_MISC) {
615 printf("pf_src_connlimit: blocking address ");
616 pf_print_host(&(*state)->src_node->addr, 0,
617 (*state)->key[PF_SK_WIRE]->af);
620 bzero(&p, sizeof(p));
621 p.pfra_af = (*state)->key[PF_SK_WIRE]->af;
622 switch ((*state)->key[PF_SK_WIRE]->af) {
626 p.pfra_ip4addr = (*state)->src_node->addr.v4;
632 p.pfra_ip6addr = (*state)->src_node->addr.v6;
637 pfr_insert_kentry((*state)->rule.ptr->overload_tbl,
640 /* kill existing states if that's required. */
641 if ((*state)->rule.ptr->flush) {
642 struct pf_state_key *sk;
646 V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
647 RB_FOREACH(st, pf_state_tree_id, &V_tree_id) {
649 pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
650 RB_FOREACH(st, pf_state_tree_id, &tree_id) {
652 sk = st->key[PF_SK_WIRE];
654 * Kill states from this source. (Only those
655 * from the same rule if PF_FLUSH_GLOBAL is not
659 (*state)->key[PF_SK_WIRE]->af &&
660 (((*state)->direction == PF_OUT &&
661 PF_AEQ(&(*state)->src_node->addr,
662 &sk->addr[0], sk->af)) ||
663 ((*state)->direction == PF_IN &&
664 PF_AEQ(&(*state)->src_node->addr,
665 &sk->addr[1], sk->af))) &&
666 ((*state)->rule.ptr->flush &
668 (*state)->rule.ptr == st->rule.ptr)) {
669 st->timeout = PFTM_PURGE;
670 st->src.state = st->dst.state =
676 if (V_pf_status.debug >= PF_DEBUG_MISC)
678 if (pf_status.debug >= PF_DEBUG_MISC)
680 printf(", %u states killed", killed);
683 if (V_pf_status.debug >= PF_DEBUG_MISC)
685 if (pf_status.debug >= PF_DEBUG_MISC)
690 /* kill this state */
691 (*state)->timeout = PFTM_PURGE;
692 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
697 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
698 struct pf_addr *src, sa_family_t af)
700 struct pf_src_node k;
704 PF_ACPY(&k.addr, src, af);
705 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
706 rule->rpool.opts & PF_POOL_STICKYADDR)
711 V_pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
712 *sn = RB_FIND(pf_src_tree, &V_tree_src_tracking, &k);
714 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
715 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
719 if (!rule->max_src_nodes ||
720 rule->src_nodes < rule->max_src_nodes)
722 (*sn) = pool_get(&V_pf_src_tree_pl, PR_NOWAIT | PR_ZERO);
724 (*sn) = pool_get(&pf_src_tree_pl, PR_NOWAIT | PR_ZERO);
728 V_pf_status.lcounters[LCNT_SRCNODES]++;
730 pf_status.lcounters[LCNT_SRCNODES]++;
735 pf_init_threshold(&(*sn)->conn_rate,
736 rule->max_src_conn_rate.limit,
737 rule->max_src_conn_rate.seconds);
740 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
741 rule->rpool.opts & PF_POOL_STICKYADDR)
742 (*sn)->rule.ptr = rule;
744 (*sn)->rule.ptr = NULL;
745 PF_ACPY(&(*sn)->addr, src, af);
746 if (RB_INSERT(pf_src_tree,
748 &V_tree_src_tracking, *sn) != NULL) {
749 if (V_pf_status.debug >= PF_DEBUG_MISC) {
751 &tree_src_tracking, *sn) != NULL) {
752 if (pf_status.debug >= PF_DEBUG_MISC) {
754 printf("pf: src_tree insert failed: ");
755 pf_print_host(&(*sn)->addr, 0, af);
759 pool_put(&V_pf_src_tree_pl, *sn);
761 pool_put(&pf_src_tree_pl, *sn);
765 (*sn)->creation = time_second;
766 (*sn)->ruletype = rule->action;
767 if ((*sn)->rule.ptr != NULL)
768 (*sn)->rule.ptr->src_nodes++;
770 V_pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
771 V_pf_status.src_nodes++;
773 pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
774 pf_status.src_nodes++;
777 if (rule->max_src_states &&
778 (*sn)->states >= rule->max_src_states) {
780 V_pf_status.lcounters[LCNT_SRCSTATES]++;
782 pf_status.lcounters[LCNT_SRCSTATES]++;
790 /* state table stuff */
793 pf_state_compare_key(struct pf_state_key *a, struct pf_state_key *b)
797 if ((diff = a->proto - b->proto) != 0)
799 if ((diff = a->af - b->af) != 0)
804 if (a->addr[0].addr32[0] > b->addr[0].addr32[0])
806 if (a->addr[0].addr32[0] < b->addr[0].addr32[0])
808 if (a->addr[1].addr32[0] > b->addr[1].addr32[0])
810 if (a->addr[1].addr32[0] < b->addr[1].addr32[0])
816 if (a->addr[0].addr32[3] > b->addr[0].addr32[3])
818 if (a->addr[0].addr32[3] < b->addr[0].addr32[3])
820 if (a->addr[1].addr32[3] > b->addr[1].addr32[3])
822 if (a->addr[1].addr32[3] < b->addr[1].addr32[3])
824 if (a->addr[0].addr32[2] > b->addr[0].addr32[2])
826 if (a->addr[0].addr32[2] < b->addr[0].addr32[2])
828 if (a->addr[1].addr32[2] > b->addr[1].addr32[2])
830 if (a->addr[1].addr32[2] < b->addr[1].addr32[2])
832 if (a->addr[0].addr32[1] > b->addr[0].addr32[1])
834 if (a->addr[0].addr32[1] < b->addr[0].addr32[1])
836 if (a->addr[1].addr32[1] > b->addr[1].addr32[1])
838 if (a->addr[1].addr32[1] < b->addr[1].addr32[1])
840 if (a->addr[0].addr32[0] > b->addr[0].addr32[0])
842 if (a->addr[0].addr32[0] < b->addr[0].addr32[0])
844 if (a->addr[1].addr32[0] > b->addr[1].addr32[0])
846 if (a->addr[1].addr32[0] < b->addr[1].addr32[0])
852 if ((diff = a->port[0] - b->port[0]) != 0)
854 if ((diff = a->port[1] - b->port[1]) != 0)
861 pf_state_compare_id(struct pf_state *a, struct pf_state *b)
867 if (a->creatorid > b->creatorid)
869 if (a->creatorid < b->creatorid)
876 pf_state_key_attach(struct pf_state_key *sk, struct pf_state *s, int idx)
878 struct pf_state_item *si;
879 struct pf_state_key *cur;
880 struct pf_state *olds = NULL;
883 KASSERT(s->key[idx] == NULL, ("%s: key is null!", __FUNCTION__));
885 KASSERT(s->key[idx] == NULL); /* XXX handle this? */
889 if ((cur = RB_INSERT(pf_state_tree, &V_pf_statetbl, sk)) != NULL) {
891 if ((cur = RB_INSERT(pf_state_tree, &pf_statetbl, sk)) != NULL) {
893 /* key exists. check for same kif, if none, add to key */
894 TAILQ_FOREACH(si, &cur->states, entry)
895 if (si->s->kif == s->kif &&
896 si->s->direction == s->direction) {
897 if (sk->proto == IPPROTO_TCP &&
898 si->s->src.state >= TCPS_FIN_WAIT_2 &&
899 si->s->dst.state >= TCPS_FIN_WAIT_2) {
900 si->s->src.state = si->s->dst.state =
902 /* unlink late or sks can go away */
906 if (V_pf_status.debug >= PF_DEBUG_MISC) {
908 if (pf_status.debug >= PF_DEBUG_MISC) {
910 printf("pf: %s key attach "
912 (idx == PF_SK_WIRE) ?
915 pf_print_state_parts(s,
916 (idx == PF_SK_WIRE) ?
918 (idx == PF_SK_STACK) ?
920 printf(", existing: ");
921 pf_print_state_parts(si->s,
922 (idx == PF_SK_WIRE) ?
924 (idx == PF_SK_STACK) ?
929 pool_put(&V_pf_state_key_pl, sk);
931 pool_put(&pf_state_key_pl, sk);
933 return (-1); /* collision! */
937 pool_put(&V_pf_state_key_pl, sk);
939 pool_put(&pf_state_key_pl, sk);
946 if ((si = pool_get(&V_pf_state_item_pl, PR_NOWAIT)) == NULL) {
948 if ((si = pool_get(&pf_state_item_pl, PR_NOWAIT)) == NULL) {
950 pf_state_key_detach(s, idx);
955 /* list is sorted, if-bound states before floating */
957 if (s->kif == V_pfi_all)
959 if (s->kif == pfi_all)
961 TAILQ_INSERT_TAIL(&s->key[idx]->states, si, entry);
963 TAILQ_INSERT_HEAD(&s->key[idx]->states, si, entry);
966 pf_unlink_state(olds);
972 pf_detach_state(struct pf_state *s)
974 if (s->key[PF_SK_WIRE] == s->key[PF_SK_STACK])
975 s->key[PF_SK_WIRE] = NULL;
977 if (s->key[PF_SK_STACK] != NULL)
978 pf_state_key_detach(s, PF_SK_STACK);
980 if (s->key[PF_SK_WIRE] != NULL)
981 pf_state_key_detach(s, PF_SK_WIRE);
985 pf_state_key_detach(struct pf_state *s, int idx)
987 struct pf_state_item *si;
989 si = TAILQ_FIRST(&s->key[idx]->states);
990 while (si && si->s != s)
991 si = TAILQ_NEXT(si, entry);
994 TAILQ_REMOVE(&s->key[idx]->states, si, entry);
996 pool_put(&V_pf_state_item_pl, si);
998 pool_put(&pf_state_item_pl, si);
1002 if (TAILQ_EMPTY(&s->key[idx]->states)) {
1004 RB_REMOVE(pf_state_tree, &V_pf_statetbl, s->key[idx]);
1006 RB_REMOVE(pf_state_tree, &pf_statetbl, s->key[idx]);
1008 if (s->key[idx]->reverse)
1009 s->key[idx]->reverse->reverse = NULL;
1011 /* XXX: implement this */
1013 if (s->key[idx]->inp)
1014 s->key[idx]->inp->inp_pf_sk = NULL;
1017 pool_put(&V_pf_state_key_pl, s->key[idx]);
1019 pool_put(&pf_state_key_pl, s->key[idx]);
1025 struct pf_state_key *
1026 pf_alloc_state_key(int pool_flags)
1028 struct pf_state_key *sk;
1031 if ((sk = pool_get(&V_pf_state_key_pl, pool_flags)) == NULL)
1033 if ((sk = pool_get(&pf_state_key_pl, pool_flags)) == NULL)
1036 TAILQ_INIT(&sk->states);
1042 pf_state_key_setup(struct pf_pdesc *pd, struct pf_rule *nr,
1043 struct pf_state_key **skw, struct pf_state_key **sks,
1044 struct pf_state_key **skp, struct pf_state_key **nkp,
1045 struct pf_addr *saddr, struct pf_addr *daddr,
1046 u_int16_t sport, u_int16_t dport)
1049 KASSERT((*skp == NULL && *nkp == NULL),
1050 ("%s: skp == NULL && nkp == NULL", __FUNCTION__));
1052 KASSERT((*skp == NULL && *nkp == NULL));
1055 if ((*skp = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL)
1058 PF_ACPY(&(*skp)->addr[pd->sidx], saddr, pd->af);
1059 PF_ACPY(&(*skp)->addr[pd->didx], daddr, pd->af);
1060 (*skp)->port[pd->sidx] = sport;
1061 (*skp)->port[pd->didx] = dport;
1062 (*skp)->proto = pd->proto;
1063 (*skp)->af = pd->af;
1066 if ((*nkp = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL)
1067 return (ENOMEM); /* caller must handle cleanup */
1069 /* XXX maybe just bcopy and TAILQ_INIT(&(*nkp)->states) */
1070 PF_ACPY(&(*nkp)->addr[0], &(*skp)->addr[0], pd->af);
1071 PF_ACPY(&(*nkp)->addr[1], &(*skp)->addr[1], pd->af);
1072 (*nkp)->port[0] = (*skp)->port[0];
1073 (*nkp)->port[1] = (*skp)->port[1];
1074 (*nkp)->proto = pd->proto;
1075 (*nkp)->af = pd->af;
1079 if (pd->dir == PF_IN) {
1091 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw,
1092 struct pf_state_key *sks, struct pf_state *s)
1095 splassert(IPL_SOFTNET);
1101 if (pf_state_key_attach(skw, s, PF_SK_WIRE))
1103 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1105 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) {
1107 pool_put(&V_pf_state_key_pl, sks);
1109 pool_put(&pf_state_key_pl, sks);
1113 if (pf_state_key_attach(sks, s, PF_SK_STACK)) {
1114 pf_state_key_detach(s, PF_SK_WIRE);
1119 if (s->id == 0 && s->creatorid == 0) {
1121 s->id = htobe64(V_pf_status.stateid++);
1122 s->creatorid = V_pf_status.hostid;
1124 s->id = htobe64(pf_status.stateid++);
1125 s->creatorid = pf_status.hostid;
1129 if (RB_INSERT(pf_state_tree_id, &V_tree_id, s) != NULL) {
1130 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1132 if (RB_INSERT(pf_state_tree_id, &tree_id, s) != NULL) {
1133 if (pf_status.debug >= PF_DEBUG_MISC) {
1135 printf("pf: state insert failed: "
1136 "id: %016llx creatorid: %08x",
1138 (unsigned long long)betoh64(s->id), ntohl(s->creatorid));
1140 betoh64(s->id), ntohl(s->creatorid));
1148 TAILQ_INSERT_TAIL(&V_state_list, s, entry_list);
1149 V_pf_status.fcounters[FCNT_STATE_INSERT]++;
1150 V_pf_status.states++;
1152 TAILQ_INSERT_TAIL(&state_list, s, entry_list);
1153 pf_status.fcounters[FCNT_STATE_INSERT]++;
1156 pfi_kif_ref(kif, PFI_KIF_REF_STATE);
1159 if (pfsync_insert_state_ptr != NULL)
1160 pfsync_insert_state_ptr(s);
1162 pfsync_insert_state(s);
1169 pf_find_state_byid(struct pf_state_cmp *key)
1172 V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
1174 return (RB_FIND(pf_state_tree_id, &V_tree_id, (struct pf_state *)key));
1176 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1178 return (RB_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key));
1182 /* XXX debug function, intended to be removed one day */
1184 pf_compare_state_keys(struct pf_state_key *a, struct pf_state_key *b,
1185 struct pfi_kif *kif, u_int dir)
1187 /* a (from hdr) and b (new) must be exact opposites of each other */
1188 if (a->af == b->af && a->proto == b->proto &&
1189 PF_AEQ(&a->addr[0], &b->addr[1], a->af) &&
1190 PF_AEQ(&a->addr[1], &b->addr[0], a->af) &&
1191 a->port[0] == b->port[1] &&
1192 a->port[1] == b->port[0])
1195 /* mismatch. must not happen. */
1196 printf("pf: state key linking mismatch! dir=%s, "
1197 "if=%s, stored af=%u, a0: ",
1198 dir == PF_OUT ? "OUT" : "IN", kif->pfik_name, a->af);
1199 pf_print_host(&a->addr[0], a->port[0], a->af);
1201 pf_print_host(&a->addr[1], a->port[1], a->af);
1202 printf(", proto=%u", a->proto);
1203 printf(", found af=%u, a0: ", b->af);
1204 pf_print_host(&b->addr[0], b->port[0], b->af);
1206 pf_print_host(&b->addr[1], b->port[1], b->af);
1207 printf(", proto=%u", b->proto);
1215 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir,
1216 struct mbuf *m, struct pf_mtag *pftag)
1218 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir,
1222 struct pf_state_key *sk;
1223 struct pf_state_item *si;
1226 V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
1228 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1232 if (dir == PF_OUT && pftag->statekey &&
1233 ((struct pf_state_key *)pftag->statekey)->reverse)
1234 sk = ((struct pf_state_key *)pftag->statekey)->reverse;
1237 if ((sk = RB_FIND(pf_state_tree, &V_pf_statetbl,
1239 if ((sk = RB_FIND(pf_state_tree, &pf_statetbl,
1241 (struct pf_state_key *)key)) == NULL)
1243 if (dir == PF_OUT && pftag->statekey &&
1244 pf_compare_state_keys(pftag->statekey, sk,
1246 ((struct pf_state_key *)
1247 pftag->statekey)->reverse = sk;
1248 sk->reverse = pftag->statekey;
1252 if (dir == PF_OUT && m->m_pkthdr.pf.statekey &&
1253 ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse)
1254 sk = ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse;
1257 if ((sk = RB_FIND(pf_state_tree, &V_pf_statetbl,
1259 if ((sk = RB_FIND(pf_state_tree, &pf_statetbl,
1261 (struct pf_state_key *)key)) == NULL)
1263 if (dir == PF_OUT && m->m_pkthdr.pf.statekey &&
1264 pf_compare_state_keys(m->m_pkthdr.pf.statekey, sk,
1266 ((struct pf_state_key *)
1267 m->m_pkthdr.pf.statekey)->reverse = sk;
1268 sk->reverse = m->m_pkthdr.pf.statekey;
1275 pftag->statekey = NULL;
1277 m->m_pkthdr.pf.statekey = NULL;
1280 /* list is sorted, if-bound states before floating ones */
1281 TAILQ_FOREACH(si, &sk->states, entry)
1283 if ((si->s->kif == V_pfi_all || si->s->kif == kif) &&
1285 if ((si->s->kif == pfi_all || si->s->kif == kif) &&
1287 sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] :
1288 si->s->key[PF_SK_STACK]))
1295 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1297 struct pf_state_key *sk;
1298 struct pf_state_item *si, *ret = NULL;
1301 V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
1303 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1307 sk = RB_FIND(pf_state_tree, &V_pf_statetbl, (struct pf_state_key *)key);
1309 sk = RB_FIND(pf_state_tree, &pf_statetbl, (struct pf_state_key *)key);
1312 TAILQ_FOREACH(si, &sk->states, entry)
1313 if (dir == PF_INOUT ||
1314 (sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] :
1315 si->s->key[PF_SK_STACK]))) {
1325 return (ret ? ret->s : NULL);
1328 /* END state table stuff */
1332 pf_purge_thread(void *v)
1339 CURVNET_SET((struct vnet *)v);
1342 tsleep(pf_purge_thread, PWAIT, "pftm", 1 * hz);
1345 sx_slock(&V_pf_consistency_lock);
1349 if (V_pf_end_threads) {
1351 sx_sunlock(&V_pf_consistency_lock);
1352 sx_xlock(&V_pf_consistency_lock);
1355 pf_purge_expired_states(V_pf_status.states, 1);
1356 pf_purge_expired_fragments();
1357 pf_purge_expired_src_nodes(1);
1360 sx_xunlock(&V_pf_consistency_lock);
1362 wakeup(pf_purge_thread);
1368 /* process a fraction of the state table every second */
1370 if (!pf_purge_expired_states(1 + (V_pf_status.states /
1371 V_pf_default_rule.timeout[PFTM_INTERVAL]), 0)) {
1373 sx_sunlock(&V_pf_consistency_lock);
1374 sx_xlock(&V_pf_consistency_lock);
1378 pf_purge_expired_states(1 + (V_pf_status.states /
1379 V_pf_default_rule.timeout[PFTM_INTERVAL]), 1);
1382 pf_purge_expired_states(1 + (pf_status.states
1383 / pf_default_rule.timeout[PFTM_INTERVAL]));
1386 /* purge other expired types every PFTM_INTERVAL seconds */
1388 if (++nloops >= V_pf_default_rule.timeout[PFTM_INTERVAL]) {
1390 if (++nloops >= pf_default_rule.timeout[PFTM_INTERVAL]) {
1392 pf_purge_expired_fragments();
1393 pf_purge_expired_src_nodes(0);
1401 sx_xunlock(&V_pf_consistency_lock);
1403 sx_sunlock(&V_pf_consistency_lock);
1410 pf_state_expires(const struct pf_state *state)
1417 /* handle all PFTM_* > PFTM_MAX here */
1418 if (state->timeout == PFTM_PURGE)
1419 return (time_second);
1420 if (state->timeout == PFTM_UNTIL_PACKET)
1423 KASSERT(state->timeout != PFTM_UNLINKED,
1424 ("pf_state_expires: timeout == PFTM_UNLINKED"));
1425 KASSERT((state->timeout < PFTM_MAX),
1426 ("pf_state_expires: timeout > PFTM_MAX"));
1428 KASSERT(state->timeout != PFTM_UNLINKED);
1429 KASSERT(state->timeout < PFTM_MAX);
1431 timeout = state->rule.ptr->timeout[state->timeout];
1434 timeout = V_pf_default_rule.timeout[state->timeout];
1436 timeout = pf_default_rule.timeout[state->timeout];
1438 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1440 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1441 states = state->rule.ptr->states_cur;
1444 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1445 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1446 states = V_pf_status.states;
1448 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1449 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1450 states = pf_status.states;
1453 if (end && states > start && start < end) {
1455 return (state->expire + timeout * (end - states) /
1458 return (time_second);
1460 return (state->expire + timeout);
1465 pf_purge_expired_src_nodes(int waslocked)
1468 pf_purge_expired_src_nodes(int waslocked)
1471 struct pf_src_node *cur, *next;
1472 int locked = waslocked;
1475 for (cur = RB_MIN(pf_src_tree, &V_tree_src_tracking); cur; cur = next) {
1476 next = RB_NEXT(pf_src_tree, &V_tree_src_tracking, cur);
1478 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) {
1479 next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur);
1482 if (cur->states <= 0 && cur->expire <= time_second) {
1485 if (!sx_try_upgrade(&V_pf_consistency_lock))
1488 rw_enter_write(&pf_consistency_lock);
1490 next = RB_NEXT(pf_src_tree,
1492 &V_tree_src_tracking, cur);
1494 &tree_src_tracking, cur);
1498 if (cur->rule.ptr != NULL) {
1499 cur->rule.ptr->src_nodes--;
1500 if (cur->rule.ptr->states_cur <= 0 &&
1501 cur->rule.ptr->max_src_nodes <= 0)
1502 pf_rm_rule(NULL, cur->rule.ptr);
1505 RB_REMOVE(pf_src_tree, &V_tree_src_tracking, cur);
1506 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
1507 V_pf_status.src_nodes--;
1508 pool_put(&V_pf_src_tree_pl, cur);
1510 RB_REMOVE(pf_src_tree, &tree_src_tracking, cur);
1511 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
1512 pf_status.src_nodes--;
1513 pool_put(&pf_src_tree_pl, cur);
1518 if (locked && !waslocked)
1521 sx_downgrade(&V_pf_consistency_lock);
1525 rw_exit_write(&pf_consistency_lock);
1530 pf_src_tree_remove_state(struct pf_state *s)
1534 if (s->src_node != NULL) {
1536 --s->src_node->conn;
1537 if (--s->src_node->states <= 0) {
1538 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1542 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1544 pf_default_rule.timeout[PFTM_SRC_NODE];
1546 s->src_node->expire = time_second + timeout;
1549 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1550 if (--s->nat_src_node->states <= 0) {
1551 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1555 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1557 pf_default_rule.timeout[PFTM_SRC_NODE];
1559 s->nat_src_node->expire = time_second + timeout;
1562 s->src_node = s->nat_src_node = NULL;
1565 /* callers should be at splsoftnet */
1567 pf_unlink_state(struct pf_state *cur)
1570 if (cur->local_flags & PFSTATE_EXPIRING)
1572 cur->local_flags |= PFSTATE_EXPIRING;
1574 splassert(IPL_SOFTNET);
1577 if (cur->src.state == PF_TCPS_PROXY_DST) {
1578 /* XXX wire key the right one? */
1580 pf_send_tcp(NULL, cur->rule.ptr, cur->key[PF_SK_WIRE]->af,
1582 pf_send_tcp(cur->rule.ptr, cur->key[PF_SK_WIRE]->af,
1584 &cur->key[PF_SK_WIRE]->addr[1],
1585 &cur->key[PF_SK_WIRE]->addr[0],
1586 cur->key[PF_SK_WIRE]->port[1],
1587 cur->key[PF_SK_WIRE]->port[0],
1588 cur->src.seqhi, cur->src.seqlo + 1,
1589 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL);
1592 RB_REMOVE(pf_state_tree_id, &V_tree_id, cur);
1594 RB_REMOVE(pf_state_tree_id, &tree_id, cur);
1597 if (cur->state_flags & PFSTATE_PFLOW)
1599 if (export_pflow_ptr != NULL)
1600 export_pflow_ptr(cur);
1607 if (pfsync_delete_state_ptr != NULL)
1608 pfsync_delete_state_ptr(cur);
1610 pfsync_delete_state(cur);
1613 cur->timeout = PFTM_UNLINKED;
1614 pf_src_tree_remove_state(cur);
1615 pf_detach_state(cur);
1618 /* callers should be at splsoftnet and hold the
1619 * write_lock on pf_consistency_lock */
1621 pf_free_state(struct pf_state *cur)
1624 splassert(IPL_SOFTNET);
1629 if (pfsync_state_in_use_ptr != NULL &&
1630 pfsync_state_in_use_ptr(cur))
1632 if (pfsync_state_in_use(cur))
1637 KASSERT(cur->timeout == PFTM_UNLINKED,
1638 ("pf_free_state: cur->timeout != PFTM_UNLINKED"));
1640 KASSERT(cur->timeout == PFTM_UNLINKED);
1642 if (--cur->rule.ptr->states_cur <= 0 &&
1643 cur->rule.ptr->src_nodes <= 0)
1644 pf_rm_rule(NULL, cur->rule.ptr);
1645 if (cur->nat_rule.ptr != NULL)
1646 if (--cur->nat_rule.ptr->states_cur <= 0 &&
1647 cur->nat_rule.ptr->src_nodes <= 0)
1648 pf_rm_rule(NULL, cur->nat_rule.ptr);
1649 if (cur->anchor.ptr != NULL)
1650 if (--cur->anchor.ptr->states_cur <= 0)
1651 pf_rm_rule(NULL, cur->anchor.ptr);
1652 pf_normalize_tcp_cleanup(cur);
1653 pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE);
1655 TAILQ_REMOVE(&V_state_list, cur, entry_list);
1657 TAILQ_REMOVE(&state_list, cur, entry_list);
1660 pf_tag_unref(cur->tag);
1662 pool_put(&V_pf_state_pl, cur);
1663 V_pf_status.fcounters[FCNT_STATE_REMOVALS]++;
1664 V_pf_status.states--;
1666 pool_put(&pf_state_pl, cur);
1667 pf_status.fcounters[FCNT_STATE_REMOVALS]++;
1674 pf_purge_expired_states(u_int32_t maxcheck, int waslocked)
1677 pf_purge_expired_states(u_int32_t maxcheck)
1680 static struct pf_state *cur = NULL;
1681 struct pf_state *next;
1683 int locked = waslocked;
1688 while (maxcheck--) {
1689 /* wrap to start of list when we hit the end */
1692 cur = TAILQ_FIRST(&V_state_list);
1694 cur = TAILQ_FIRST(&state_list);
1697 break; /* list empty */
1700 /* get next state, as cur may get deleted */
1701 next = TAILQ_NEXT(cur, entry_list);
1703 if (cur->timeout == PFTM_UNLINKED) {
1704 /* free unlinked state */
1707 if (!sx_try_upgrade(&V_pf_consistency_lock))
1710 rw_enter_write(&pf_consistency_lock);
1715 } else if (pf_state_expires(cur) <= time_second) {
1716 /* unlink and free expired state */
1717 pf_unlink_state(cur);
1720 if (!sx_try_upgrade(&V_pf_consistency_lock))
1723 rw_enter_write(&pf_consistency_lock);
1733 if (!waslocked && locked)
1734 sx_downgrade(&V_pf_consistency_lock);
1739 rw_exit_write(&pf_consistency_lock);
1744 pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw)
1746 if (aw->type != PF_ADDR_TABLE)
1748 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname, 1)) == NULL)
1754 pf_tbladdr_remove(struct pf_addr_wrap *aw)
1756 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL)
1758 pfr_detach_table(aw->p.tbl);
1763 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
1765 struct pfr_ktable *kt = aw->p.tbl;
1767 if (aw->type != PF_ADDR_TABLE || kt == NULL)
1769 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1770 kt = kt->pfrkt_root;
1772 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
1777 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1782 u_int32_t a = ntohl(addr->addr32[0]);
1783 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1795 u_int8_t i, curstart, curend, maxstart, maxend;
1796 curstart = curend = maxstart = maxend = 255;
1797 for (i = 0; i < 8; i++) {
1798 if (!addr->addr16[i]) {
1799 if (curstart == 255)
1803 if ((curend - curstart) >
1804 (maxend - maxstart)) {
1805 maxstart = curstart;
1808 curstart = curend = 255;
1811 if ((curend - curstart) >
1812 (maxend - maxstart)) {
1813 maxstart = curstart;
1816 for (i = 0; i < 8; i++) {
1817 if (i >= maxstart && i <= maxend) {
1823 b = ntohs(addr->addr16[i]);
1840 pf_print_state(struct pf_state *s)
1842 pf_print_state_parts(s, NULL, NULL);
1846 pf_print_state_parts(struct pf_state *s,
1847 struct pf_state_key *skwp, struct pf_state_key *sksp)
1849 struct pf_state_key *skw, *sks;
1850 u_int8_t proto, dir;
1852 /* Do our best to fill these, but they're skipped if NULL */
1853 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1854 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1855 proto = skw ? skw->proto : (sks ? sks->proto : 0);
1856 dir = s ? s->direction : 0;
1874 case IPPROTO_ICMPV6:
1878 printf("%u", skw->proto);
1891 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1893 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1898 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1900 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1905 if (proto == IPPROTO_TCP) {
1906 printf(" [lo=%u high=%u win=%u modulator=%u",
1907 s->src.seqlo, s->src.seqhi,
1908 s->src.max_win, s->src.seqdiff);
1909 if (s->src.wscale && s->dst.wscale)
1910 printf(" wscale=%u",
1911 s->src.wscale & PF_WSCALE_MASK);
1913 printf(" [lo=%u high=%u win=%u modulator=%u",
1914 s->dst.seqlo, s->dst.seqhi,
1915 s->dst.max_win, s->dst.seqdiff);
1916 if (s->src.wscale && s->dst.wscale)
1917 printf(" wscale=%u",
1918 s->dst.wscale & PF_WSCALE_MASK);
1921 printf(" %u:%u", s->src.state, s->dst.state);
1926 pf_print_flags(u_int8_t f)
1948 #define PF_SET_SKIP_STEPS(i) \
1950 while (head[i] != cur) { \
1951 head[i]->skip[i].ptr = cur; \
1952 head[i] = TAILQ_NEXT(head[i], entries); \
1957 pf_calc_skip_steps(struct pf_rulequeue *rules)
1959 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1962 cur = TAILQ_FIRST(rules);
1964 for (i = 0; i < PF_SKIP_COUNT; ++i)
1966 while (cur != NULL) {
1968 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1969 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1970 if (cur->direction != prev->direction)
1971 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1972 if (cur->af != prev->af)
1973 PF_SET_SKIP_STEPS(PF_SKIP_AF);
1974 if (cur->proto != prev->proto)
1975 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1976 if (cur->src.neg != prev->src.neg ||
1977 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1978 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1979 if (cur->src.port[0] != prev->src.port[0] ||
1980 cur->src.port[1] != prev->src.port[1] ||
1981 cur->src.port_op != prev->src.port_op)
1982 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1983 if (cur->dst.neg != prev->dst.neg ||
1984 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1985 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1986 if (cur->dst.port[0] != prev->dst.port[0] ||
1987 cur->dst.port[1] != prev->dst.port[1] ||
1988 cur->dst.port_op != prev->dst.port_op)
1989 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1992 cur = TAILQ_NEXT(cur, entries);
1994 for (i = 0; i < PF_SKIP_COUNT; ++i)
1995 PF_SET_SKIP_STEPS(i);
1999 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
2001 if (aw1->type != aw2->type)
2003 switch (aw1->type) {
2004 case PF_ADDR_ADDRMASK:
2006 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
2008 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
2011 case PF_ADDR_DYNIFTL:
2012 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
2013 case PF_ADDR_NOROUTE:
2014 case PF_ADDR_URPFFAILED:
2017 return (aw1->p.tbl != aw2->p.tbl);
2018 case PF_ADDR_RTLABEL:
2019 return (aw1->v.rtlabel != aw2->v.rtlabel);
2021 printf("invalid address type: %d\n", aw1->type);
2027 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2033 l = cksum + old - new;
2034 l = (l >> 16) + (l & 65535);
2042 pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc,
2043 struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af)
2048 PF_ACPY(&ao, a, af);
2056 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2057 ao.addr16[0], an->addr16[0], 0),
2058 ao.addr16[1], an->addr16[1], 0);
2060 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2061 ao.addr16[0], an->addr16[0], u),
2062 ao.addr16[1], an->addr16[1], u),
2068 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2069 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2070 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2071 ao.addr16[0], an->addr16[0], u),
2072 ao.addr16[1], an->addr16[1], u),
2073 ao.addr16[2], an->addr16[2], u),
2074 ao.addr16[3], an->addr16[3], u),
2075 ao.addr16[4], an->addr16[4], u),
2076 ao.addr16[5], an->addr16[5], u),
2077 ao.addr16[6], an->addr16[6], u),
2078 ao.addr16[7], an->addr16[7], u),
2086 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2088 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2092 memcpy(&ao, a, sizeof(ao));
2093 memcpy(a, &an, sizeof(u_int32_t));
2094 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2095 ao % 65536, an % 65536, u);
2100 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2104 PF_ACPY(&ao, a, AF_INET6);
2105 PF_ACPY(a, an, AF_INET6);
2107 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2108 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2109 pf_cksum_fixup(pf_cksum_fixup(*c,
2110 ao.addr16[0], an->addr16[0], u),
2111 ao.addr16[1], an->addr16[1], u),
2112 ao.addr16[2], an->addr16[2], u),
2113 ao.addr16[3], an->addr16[3], u),
2114 ao.addr16[4], an->addr16[4], u),
2115 ao.addr16[5], an->addr16[5], u),
2116 ao.addr16[6], an->addr16[6], u),
2117 ao.addr16[7], an->addr16[7], u);
2122 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2123 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2124 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2126 struct pf_addr oia, ooa;
2128 PF_ACPY(&oia, ia, af);
2130 PF_ACPY(&ooa, oa, af);
2132 /* Change inner protocol port, fix inner protocol checksum. */
2134 u_int16_t oip = *ip;
2141 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2142 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2144 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2146 /* Change inner ip address, fix inner ip and icmp checksums. */
2147 PF_ACPY(ia, na, af);
2151 u_int32_t oh2c = *h2c;
2153 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2154 oia.addr16[0], ia->addr16[0], 0),
2155 oia.addr16[1], ia->addr16[1], 0);
2156 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2157 oia.addr16[0], ia->addr16[0], 0),
2158 oia.addr16[1], ia->addr16[1], 0);
2159 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2165 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2166 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2167 pf_cksum_fixup(pf_cksum_fixup(*ic,
2168 oia.addr16[0], ia->addr16[0], u),
2169 oia.addr16[1], ia->addr16[1], u),
2170 oia.addr16[2], ia->addr16[2], u),
2171 oia.addr16[3], ia->addr16[3], u),
2172 oia.addr16[4], ia->addr16[4], u),
2173 oia.addr16[5], ia->addr16[5], u),
2174 oia.addr16[6], ia->addr16[6], u),
2175 oia.addr16[7], ia->addr16[7], u);
2179 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2181 PF_ACPY(oa, na, af);
2185 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2186 ooa.addr16[0], oa->addr16[0], 0),
2187 ooa.addr16[1], oa->addr16[1], 0);
2192 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2193 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2194 pf_cksum_fixup(pf_cksum_fixup(*ic,
2195 ooa.addr16[0], oa->addr16[0], u),
2196 ooa.addr16[1], oa->addr16[1], u),
2197 ooa.addr16[2], oa->addr16[2], u),
2198 ooa.addr16[3], oa->addr16[3], u),
2199 ooa.addr16[4], oa->addr16[4], u),
2200 ooa.addr16[5], oa->addr16[5], u),
2201 ooa.addr16[6], oa->addr16[6], u),
2202 ooa.addr16[7], oa->addr16[7], u);
2211 * Need to modulate the sequence numbers in the TCP SACK option
2212 * (credits to Krzysztof Pfaff for report and patch)
2215 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2216 struct tcphdr *th, struct pf_state_peer *dst)
2218 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2220 u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2222 u_int8_t opts[MAX_TCPOPTLEN], *opt = opts;
2224 int copyback = 0, i, olen;
2225 struct sackblk sack;
2227 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2228 if (hlen < TCPOLEN_SACKLEN ||
2229 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2232 while (hlen >= TCPOLEN_SACKLEN) {
2235 case TCPOPT_EOL: /* FALLTHROUGH */
2243 if (olen >= TCPOLEN_SACKLEN) {
2244 for (i = 2; i + TCPOLEN_SACK <= olen;
2245 i += TCPOLEN_SACK) {
2246 memcpy(&sack, &opt[i], sizeof(sack));
2247 pf_change_a(&sack.start, &th->th_sum,
2248 htonl(ntohl(sack.start) -
2250 pf_change_a(&sack.end, &th->th_sum,
2251 htonl(ntohl(sack.end) -
2253 memcpy(&opt[i], &sack, sizeof(sack));
2268 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2270 m_copyback(m, off + sizeof(*th), thoptlen, opts);
2277 pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af,
2279 pf_send_tcp(const struct pf_rule *r, sa_family_t af,
2281 const struct pf_addr *saddr, const struct pf_addr *daddr,
2282 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2283 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2284 u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp)
2297 struct pf_mtag *pf_mtag;
2311 , ("Unsupported AF %d", af));
2320 #endif /* __FreeBSD__ */
2322 /* maximum segment size tcp option */
2323 tlen = sizeof(struct tcphdr);
2330 len = sizeof(struct ip) + tlen;
2335 len = sizeof(struct ip6_hdr) + tlen;
2340 /* create outgoing mbuf */
2341 m = m_gethdr(M_DONTWAIT, MT_HEADER);
2346 mac_netinet_firewall_send(m);
2348 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2355 m->m_flags |= M_SKIP_FIREWALL;
2356 pf_mtag->tag = rtag;
2358 m->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
2359 m->m_pkthdr.pf.tag = rtag;
2362 if (r != NULL && r->rtableid >= 0)
2365 M_SETFIB(m, r->rtableid);
2366 pf_mtag->rtableid = r->rtableid;
2368 m->m_pkthdr.pf.rtableid = r->rtableid;
2375 if (r != NULL && r->qid) {
2377 pf_mtag->qid = r->qid;
2379 /* add hints for ecn */
2380 pf_mtag->hdr = mtod(m, struct ip *);
2382 m->m_pkthdr.pf.qid = r->qid;
2383 /* add hints for ecn */
2384 m->m_pkthdr.pf.hdr = mtod(m, struct ip *);
2388 m->m_data += max_linkhdr;
2389 m->m_pkthdr.len = m->m_len = len;
2390 m->m_pkthdr.rcvif = NULL;
2391 bzero(m->m_data, len);
2395 h = mtod(m, struct ip *);
2397 /* IP header fields included in the TCP checksum */
2398 h->ip_p = IPPROTO_TCP;
2399 h->ip_len = htons(tlen);
2400 h->ip_src.s_addr = saddr->v4.s_addr;
2401 h->ip_dst.s_addr = daddr->v4.s_addr;
2403 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2408 h6 = mtod(m, struct ip6_hdr *);
2410 /* IP header fields included in the TCP checksum */
2411 h6->ip6_nxt = IPPROTO_TCP;
2412 h6->ip6_plen = htons(tlen);
2413 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2414 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2416 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2422 th->th_sport = sport;
2423 th->th_dport = dport;
2424 th->th_seq = htonl(seq);
2425 th->th_ack = htonl(ack);
2426 th->th_off = tlen >> 2;
2427 th->th_flags = flags;
2428 th->th_win = htons(win);
2431 opt = (char *)(th + 1);
2432 opt[0] = TCPOPT_MAXSEG;
2435 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2442 th->th_sum = in_cksum(m, len);
2444 /* Finish the IP header */
2446 h->ip_hl = sizeof(*h) >> 2;
2447 h->ip_tos = IPTOS_LOWDELAY;
2449 h->ip_off = V_path_mtu_discovery ? IP_DF : 0;
2451 h->ip_ttl = ttl ? ttl : V_ip_defttl;
2453 h->ip_len = htons(len);
2454 h->ip_off = htons(ip_mtudisc ? IP_DF : 0);
2455 h->ip_ttl = ttl ? ttl : ip_defttl;
2461 ip_output(m, (void *)NULL, (void *)NULL, 0,
2462 (void *)NULL, (void *)NULL);
2464 #else /* ! __FreeBSD__ */
2465 ip_output(m, (void *)NULL, (void *)NULL, 0,
2466 (void *)NULL, (void *)NULL);
2471 struct ether_header *e = (void *)ro.ro_dst.sa_data;
2479 ro.ro_dst.sa_len = sizeof(ro.ro_dst);
2480 ro.ro_dst.sa_family = pseudo_AF_HDRCMPLT;
2481 bcopy(eh->ether_dhost, e->ether_shost, ETHER_ADDR_LEN);
2482 bcopy(eh->ether_shost, e->ether_dhost, ETHER_ADDR_LEN);
2483 e->ether_type = eh->ether_type;
2486 /* XXX_IMPORT: later */
2487 ip_output(m, (void *)NULL, &ro, 0,
2488 (void *)NULL, (void *)NULL);
2490 #else /* ! __FreeBSD__ */
2491 ip_output(m, (void *)NULL, &ro, IP_ROUTETOETHER,
2492 (void *)NULL, (void *)NULL);
2500 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2501 sizeof(struct ip6_hdr), tlen);
2503 h6->ip6_vfc |= IPV6_VERSION;
2504 h6->ip6_hlim = IPV6_DEFHLIM;
2508 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
2511 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
2519 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2527 struct pf_mtag *pf_mtag;
2531 m0 = m_copypacket(m, M_DONTWAIT);
2535 if ((m0 = m_copy(m, 0, M_COPYALL)) == NULL)
2540 if ((pf_mtag = pf_get_mtag(m0)) == NULL)
2543 m0->m_flags |= M_SKIP_FIREWALL;
2545 m0->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
2548 if (r->rtableid >= 0)
2551 M_SETFIB(m0, r->rtableid);
2552 pf_mtag->rtableid = r->rtableid;
2554 m0->m_pkthdr.pf.rtableid = r->rtableid;
2563 pf_mtag->qid = r->qid;
2564 /* add hints for ecn */
2565 pf_mtag->hdr = mtod(m0, struct ip *);
2567 m0->m_pkthdr.pf.qid = r->qid;
2568 /* add hints for ecn */
2569 m0->m_pkthdr.pf.hdr = mtod(m0, struct ip *);
2578 /* icmp_error() expects host byte ordering */
2579 ip = mtod(m0, struct ip *);
2583 icmp_error(m0, type, code, 0, 0);
2586 icmp_error(m0, type, code, 0, 0);
2595 icmp6_error(m0, type, code, 0);
2605 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2606 * If n is 0, they match if they are equal. If n is != 0, they match if they
2610 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2611 struct pf_addr *b, sa_family_t af)
2618 if ((a->addr32[0] & m->addr32[0]) ==
2619 (b->addr32[0] & m->addr32[0]))
2625 if (((a->addr32[0] & m->addr32[0]) ==
2626 (b->addr32[0] & m->addr32[0])) &&
2627 ((a->addr32[1] & m->addr32[1]) ==
2628 (b->addr32[1] & m->addr32[1])) &&
2629 ((a->addr32[2] & m->addr32[2]) ==
2630 (b->addr32[2] & m->addr32[2])) &&
2631 ((a->addr32[3] & m->addr32[3]) ==
2632 (b->addr32[3] & m->addr32[3])))
2651 * Return 1 if b <= a <= e, otherwise return 0.
2654 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2655 struct pf_addr *a, sa_family_t af)
2660 if ((a->addr32[0] < b->addr32[0]) ||
2661 (a->addr32[0] > e->addr32[0]))
2670 for (i = 0; i < 4; ++i)
2671 if (a->addr32[i] > b->addr32[i])
2673 else if (a->addr32[i] < b->addr32[i])
2676 for (i = 0; i < 4; ++i)
2677 if (a->addr32[i] < e->addr32[i])
2679 else if (a->addr32[i] > e->addr32[i])
2689 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2693 return ((p > a1) && (p < a2));
2695 return ((p < a1) || (p > a2));
2697 return ((p >= a1) && (p <= a2));
2711 return (0); /* never reached */
2715 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2720 return (pf_match(op, a1, a2, p));
2724 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2726 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2728 return (pf_match(op, a1, a2, u));
2732 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2734 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2736 return (pf_match(op, a1, a2, g));
2741 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag,
2742 struct pf_mtag *pf_mtag)
2744 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag)
2749 *tag = pf_mtag->tag;
2751 *tag = m->m_pkthdr.pf.tag;
2754 return ((!r->match_tag_not && r->match_tag == *tag) ||
2755 (r->match_tag_not && r->match_tag != *tag));
2760 pf_tag_packet(struct mbuf *m, int tag, int rtableid,
2761 struct pf_mtag *pf_mtag)
2763 pf_tag_packet(struct mbuf *m, int tag, int rtableid)
2766 if (tag <= 0 && rtableid < 0)
2773 m->m_pkthdr.pf.tag = tag;
2778 M_SETFIB(m, rtableid);
2781 m->m_pkthdr.pf.rtableid = rtableid;
2788 pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n,
2789 struct pf_rule **r, struct pf_rule **a, int *match)
2791 struct pf_anchor_stackframe *f;
2793 (*r)->anchor->match = 0;
2797 if (*depth >= sizeof(V_pf_anchor_stack) /
2798 sizeof(V_pf_anchor_stack[0])) {
2800 if (*depth >= sizeof(pf_anchor_stack) /
2801 sizeof(pf_anchor_stack[0])) {
2803 printf("pf_step_into_anchor: stack overflow\n");
2804 *r = TAILQ_NEXT(*r, entries);
2806 } else if (*depth == 0 && a != NULL)
2809 f = V_pf_anchor_stack + (*depth)++;
2811 f = pf_anchor_stack + (*depth)++;
2815 if ((*r)->anchor_wildcard) {
2816 f->parent = &(*r)->anchor->children;
2817 if ((f->child = RB_MIN(pf_anchor_node, f->parent)) ==
2822 *rs = &f->child->ruleset;
2826 *rs = &(*r)->anchor->ruleset;
2828 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2832 pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n,
2833 struct pf_rule **r, struct pf_rule **a, int *match)
2835 struct pf_anchor_stackframe *f;
2842 f = V_pf_anchor_stack + *depth - 1;
2844 f = pf_anchor_stack + *depth - 1;
2846 if (f->parent != NULL && f->child != NULL) {
2847 if (f->child->match ||
2848 (match != NULL && *match)) {
2849 f->r->anchor->match = 1;
2852 f->child = RB_NEXT(pf_anchor_node, f->parent, f->child);
2853 if (f->child != NULL) {
2854 *rs = &f->child->ruleset;
2855 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2863 if (*depth == 0 && a != NULL)
2866 if (f->r->anchor->match || (match != NULL && *match))
2867 quick = f->r->quick;
2868 *r = TAILQ_NEXT(f->r, entries);
2869 } while (*r == NULL);
2876 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2877 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2882 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2883 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2887 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2888 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2889 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
2890 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
2891 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
2892 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
2893 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
2894 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
2900 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
2905 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
2909 if (addr->addr32[3] == 0xffffffff) {
2910 addr->addr32[3] = 0;
2911 if (addr->addr32[2] == 0xffffffff) {
2912 addr->addr32[2] = 0;
2913 if (addr->addr32[1] == 0xffffffff) {
2914 addr->addr32[1] = 0;
2916 htonl(ntohl(addr->addr32[0]) + 1);
2919 htonl(ntohl(addr->addr32[1]) + 1);
2922 htonl(ntohl(addr->addr32[2]) + 1);
2925 htonl(ntohl(addr->addr32[3]) + 1);
2933 pf_socket_lookup(int direction, struct pf_pdesc *pd, struct inpcb *inp_arg)
2935 pf_socket_lookup(int direction, struct pf_pdesc *pd)
2938 struct pf_addr *saddr, *daddr;
2939 u_int16_t sport, dport;
2941 struct inpcbinfo *pi;
2943 struct inpcbtable *tb;
2949 pd->lookup.uid = UID_MAX;
2950 pd->lookup.gid = GID_MAX;
2951 pd->lookup.pid = NO_PID;
2954 if (inp_arg != NULL) {
2955 INP_LOCK_ASSERT(inp_arg);
2956 pd->lookup.uid = inp_arg->inp_cred->cr_uid;
2957 pd->lookup.gid = inp_arg->inp_cred->cr_groups[0];
2962 switch (pd->proto) {
2964 if (pd->hdr.tcp == NULL)
2966 sport = pd->hdr.tcp->th_sport;
2967 dport = pd->hdr.tcp->th_dport;
2975 if (pd->hdr.udp == NULL)
2977 sport = pd->hdr.udp->uh_sport;
2978 dport = pd->hdr.udp->uh_dport;
2988 if (direction == PF_IN) {
3005 * XXXRW: would be nice if we had an mbuf here so that we
3006 * could use in_pcblookup_mbuf().
3008 inp = in_pcblookup(pi, saddr->v4, sport, daddr->v4,
3009 dport, INPLOOKUP_RLOCKPCB, NULL);
3011 inp = in_pcblookup(pi, saddr->v4, sport,
3012 daddr->v4, dport, INPLOOKUP_WILDCARD |
3013 INPLOOKUP_RLOCKPCB, NULL);
3018 inp = in_pcbhashlookup(tb, saddr->v4, sport, daddr->v4, dport);
3020 inp = in_pcblookup_listen(tb, daddr->v4, dport, 0,
3032 * XXXRW: would be nice if we had an mbuf here so that we
3033 * could use in6_pcblookup_mbuf().
3035 inp = in6_pcblookup(pi, &saddr->v6, sport,
3036 &daddr->v6, dport, INPLOOKUP_RLOCKPCB, NULL);
3038 inp = in6_pcblookup(pi, &saddr->v6, sport,
3039 &daddr->v6, dport, INPLOOKUP_WILDCARD |
3040 INPLOOKUP_RLOCKPCB, NULL);
3045 inp = in6_pcbhashlookup(tb, &saddr->v6, sport, &daddr->v6,
3048 inp = in6_pcblookup_listen(tb, &daddr->v6, dport, 0,
3061 INP_RLOCK_ASSERT(inp);
3062 pd->lookup.uid = inp->inp_cred->cr_uid;
3063 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3066 pd->lookup.uid = inp->inp_socket->so_euid;
3067 pd->lookup.gid = inp->inp_socket->so_egid;
3068 pd->lookup.pid = inp->inp_socket->so_cpid;
3074 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3078 u_int8_t *opt, optlen;
3079 u_int8_t wscale = 0;
3081 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3082 if (hlen <= sizeof(struct tcphdr))
3084 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3086 opt = hdr + sizeof(struct tcphdr);
3087 hlen -= sizeof(struct tcphdr);
3097 if (wscale > TCP_MAX_WINSHIFT)
3098 wscale = TCP_MAX_WINSHIFT;
3099 wscale |= PF_WSCALE_FLAG;
3114 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3118 u_int8_t *opt, optlen;
3120 u_int16_t mss = V_tcp_mssdflt;
3122 u_int16_t mss = tcp_mssdflt;
3125 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3126 if (hlen <= sizeof(struct tcphdr))
3128 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3130 opt = hdr + sizeof(struct tcphdr);
3131 hlen -= sizeof(struct tcphdr);
3132 while (hlen >= TCPOLEN_MAXSEG) {
3140 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
3156 pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer)
3159 struct sockaddr_in *dst;
3163 struct sockaddr_in6 *dst6;
3164 struct route_in6 ro6;
3166 struct rtentry *rt = NULL;
3169 u_int16_t mss = V_tcp_mssdflt;
3172 u_int16_t mss = tcp_mssdflt;
3178 hlen = sizeof(struct ip);
3179 bzero(&ro, sizeof(ro));
3180 dst = (struct sockaddr_in *)&ro.ro_dst;
3181 dst->sin_family = AF_INET;
3182 dst->sin_len = sizeof(*dst);
3183 dst->sin_addr = addr->v4;
3185 #ifdef RTF_PRCLONING
3186 rtalloc_ign(&ro, (RTF_CLONING | RTF_PRCLONING));
3187 #else /* !RTF_PRCLONING */
3188 in_rtalloc_ign(&ro, 0, 0);
3190 #else /* ! __FreeBSD__ */
3191 rtalloc_noclone(&ro, NO_CLONING);
3198 hlen = sizeof(struct ip6_hdr);
3199 bzero(&ro6, sizeof(ro6));
3200 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
3201 dst6->sin6_family = AF_INET6;
3202 dst6->sin6_len = sizeof(*dst6);
3203 dst6->sin6_addr = addr->v6;
3205 #ifdef RTF_PRCLONING
3206 rtalloc_ign((struct route *)&ro6,
3207 (RTF_CLONING | RTF_PRCLONING));
3208 #else /* !RTF_PRCLONING */
3209 rtalloc_ign((struct route *)&ro6, 0);
3211 #else /* ! __FreeBSD__ */
3212 rtalloc_noclone((struct route *)&ro6, NO_CLONING);
3219 if (rt && rt->rt_ifp) {
3220 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr);
3222 mss = max(V_tcp_mssdflt, mss);
3224 mss = max(tcp_mssdflt, mss);
3228 mss = min(mss, offer);
3229 mss = max(mss, 64); /* sanity - at least max opt space */
3234 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr)
3236 struct pf_rule *r = s->rule.ptr;
3237 struct pf_src_node *sn = NULL;
3240 if (!r->rt || r->rt == PF_FASTROUTE)
3242 switch (s->key[PF_SK_WIRE]->af) {
3245 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL, &sn);
3246 s->rt_kif = r->rpool.cur->kif;
3251 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL, &sn);
3252 s->rt_kif = r->rpool.cur->kif;
3259 pf_tcp_iss(struct pf_pdesc *pd)
3262 u_int32_t digest[4];
3265 if (V_pf_tcp_secret_init == 0) {
3266 read_random(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
3267 MD5Init(&V_pf_tcp_secret_ctx);
3268 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
3269 sizeof(V_pf_tcp_secret));
3270 V_pf_tcp_secret_init = 1;
3273 ctx = V_pf_tcp_secret_ctx;
3275 if (pf_tcp_secret_init == 0) {
3276 arc4random_buf(pf_tcp_secret, sizeof(pf_tcp_secret));
3277 MD5Init(&pf_tcp_secret_ctx);
3278 MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret,
3279 sizeof(pf_tcp_secret));
3280 pf_tcp_secret_init = 1;
3283 ctx = pf_tcp_secret_ctx;
3286 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
3287 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
3288 if (pd->af == AF_INET6) {
3289 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3290 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3292 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3293 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3295 MD5Final((u_char *)digest, &ctx);
3297 V_pf_tcp_iss_off += 4096;
3298 #define ISN_RANDOM_INCREMENT (4096 - 1)
3299 return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3301 #undef ISN_RANDOM_INCREMENT
3303 pf_tcp_iss_off += 4096;
3304 return (digest[0] + tcp_iss + pf_tcp_iss_off);
3309 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
3310 struct pfi_kif *kif, struct mbuf *m, int off, void *h,
3311 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm,
3313 struct ifqueue *ifq, struct inpcb *inp)
3315 struct ifqueue *ifq)
3318 struct pf_rule *nr = NULL;
3319 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
3320 sa_family_t af = pd->af;
3321 struct pf_rule *r, *a = NULL;
3322 struct pf_ruleset *ruleset = NULL;
3323 struct pf_src_node *nsn = NULL;
3324 struct tcphdr *th = pd->hdr.tcp;
3325 struct pf_state_key *skw = NULL, *sks = NULL;
3326 struct pf_state_key *sk = NULL, *nk = NULL;
3328 int rewrite = 0, hdrlen = 0;
3329 int tag = -1, rtableid = -1;
3334 u_int16_t sport = 0, dport = 0;
3335 u_int16_t bproto_sum = 0, bip_sum = 0;
3337 u_int16_t sport, dport;
3338 u_int16_t bproto_sum = 0, bip_sum;
3340 u_int8_t icmptype = 0, icmpcode = 0;
3343 if (direction == PF_IN && pf_check_congestion(ifq)) {
3344 REASON_SET(&reason, PFRES_CONGEST);
3350 pd->lookup.done = pf_socket_lookup(direction, pd, inp);
3351 else if (V_debug_pfugidhack) {
3353 DPFPRINTF(PF_DEBUG_MISC, ("pf: unlocked lookup\n"));
3354 pd->lookup.done = pf_socket_lookup(direction, pd, inp);
3359 switch (pd->proto) {
3361 sport = th->th_sport;
3362 dport = th->th_dport;
3363 hdrlen = sizeof(*th);
3366 sport = pd->hdr.udp->uh_sport;
3367 dport = pd->hdr.udp->uh_dport;
3368 hdrlen = sizeof(*pd->hdr.udp);
3372 if (pd->af != AF_INET)
3374 sport = dport = pd->hdr.icmp->icmp_id;
3375 hdrlen = sizeof(*pd->hdr.icmp);
3376 icmptype = pd->hdr.icmp->icmp_type;
3377 icmpcode = pd->hdr.icmp->icmp_code;
3379 if (icmptype == ICMP_UNREACH ||
3380 icmptype == ICMP_SOURCEQUENCH ||
3381 icmptype == ICMP_REDIRECT ||
3382 icmptype == ICMP_TIMXCEED ||
3383 icmptype == ICMP_PARAMPROB)
3388 case IPPROTO_ICMPV6:
3391 sport = dport = pd->hdr.icmp6->icmp6_id;
3392 hdrlen = sizeof(*pd->hdr.icmp6);
3393 icmptype = pd->hdr.icmp6->icmp6_type;
3394 icmpcode = pd->hdr.icmp6->icmp6_code;
3396 if (icmptype == ICMP6_DST_UNREACH ||
3397 icmptype == ICMP6_PACKET_TOO_BIG ||
3398 icmptype == ICMP6_TIME_EXCEEDED ||
3399 icmptype == ICMP6_PARAM_PROB)
3404 sport = dport = hdrlen = 0;
3408 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3410 /* check packet for BINAT/NAT/RDR */
3411 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn,
3412 &skw, &sks, &sk, &nk, saddr, daddr, sport, dport)) != NULL) {
3413 if (nk == NULL || sk == NULL) {
3414 REASON_SET(&reason, PFRES_MEMORY);
3419 bip_sum = *pd->ip_sum;
3421 switch (pd->proto) {
3423 bproto_sum = th->th_sum;
3424 pd->proto_sum = &th->th_sum;
3426 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3427 nk->port[pd->sidx] != sport) {
3428 pf_change_ap(saddr, &th->th_sport, pd->ip_sum,
3429 &th->th_sum, &nk->addr[pd->sidx],
3430 nk->port[pd->sidx], 0, af);
3431 pd->sport = &th->th_sport;
3432 sport = th->th_sport;
3435 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3436 nk->port[pd->didx] != dport) {
3437 pf_change_ap(daddr, &th->th_dport, pd->ip_sum,
3438 &th->th_sum, &nk->addr[pd->didx],
3439 nk->port[pd->didx], 0, af);
3440 dport = th->th_dport;
3441 pd->dport = &th->th_dport;
3446 bproto_sum = pd->hdr.udp->uh_sum;
3447 pd->proto_sum = &pd->hdr.udp->uh_sum;
3449 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3450 nk->port[pd->sidx] != sport) {
3451 pf_change_ap(saddr, &pd->hdr.udp->uh_sport,
3452 pd->ip_sum, &pd->hdr.udp->uh_sum,
3453 &nk->addr[pd->sidx],
3454 nk->port[pd->sidx], 1, af);
3455 sport = pd->hdr.udp->uh_sport;
3456 pd->sport = &pd->hdr.udp->uh_sport;
3459 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3460 nk->port[pd->didx] != dport) {
3461 pf_change_ap(daddr, &pd->hdr.udp->uh_dport,
3462 pd->ip_sum, &pd->hdr.udp->uh_sum,
3463 &nk->addr[pd->didx],
3464 nk->port[pd->didx], 1, af);
3465 dport = pd->hdr.udp->uh_dport;
3466 pd->dport = &pd->hdr.udp->uh_dport;
3472 nk->port[0] = nk->port[1];
3473 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3474 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3475 nk->addr[pd->sidx].v4.s_addr, 0);
3477 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3478 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3479 nk->addr[pd->didx].v4.s_addr, 0);
3481 if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3482 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3483 pd->hdr.icmp->icmp_cksum, sport,
3485 pd->hdr.icmp->icmp_id = nk->port[1];
3486 pd->sport = &pd->hdr.icmp->icmp_id;
3488 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3492 case IPPROTO_ICMPV6:
3493 nk->port[0] = nk->port[1];
3494 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3495 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3496 &nk->addr[pd->sidx], 0);
3498 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3499 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3500 &nk->addr[pd->didx], 0);
3509 &nk->addr[pd->sidx], AF_INET))
3510 pf_change_a(&saddr->v4.s_addr,
3512 nk->addr[pd->sidx].v4.s_addr, 0);
3515 &nk->addr[pd->didx], AF_INET))
3516 pf_change_a(&daddr->v4.s_addr,
3518 nk->addr[pd->didx].v4.s_addr, 0);
3524 &nk->addr[pd->sidx], AF_INET6))
3525 PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3528 &nk->addr[pd->didx], AF_INET6))
3529 PF_ACPY(saddr, &nk->addr[pd->didx], af);
3542 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3543 r = r->skip[PF_SKIP_IFP].ptr;
3544 else if (r->direction && r->direction != direction)
3545 r = r->skip[PF_SKIP_DIR].ptr;
3546 else if (r->af && r->af != af)
3547 r = r->skip[PF_SKIP_AF].ptr;
3548 else if (r->proto && r->proto != pd->proto)
3549 r = r->skip[PF_SKIP_PROTO].ptr;
3550 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3552 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3553 /* tcp/udp only. port_op always 0 in other cases */
3554 else if (r->src.port_op && !pf_match_port(r->src.port_op,
3555 r->src.port[0], r->src.port[1], sport))
3556 r = r->skip[PF_SKIP_SRC_PORT].ptr;
3557 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3559 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3560 /* tcp/udp only. port_op always 0 in other cases */
3561 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3562 r->dst.port[0], r->dst.port[1], dport))
3563 r = r->skip[PF_SKIP_DST_PORT].ptr;
3564 /* icmp only. type always 0 in other cases */
3565 else if (r->type && r->type != icmptype + 1)
3566 r = TAILQ_NEXT(r, entries);
3567 /* icmp only. type always 0 in other cases */
3568 else if (r->code && r->code != icmpcode + 1)
3569 r = TAILQ_NEXT(r, entries);
3570 else if (r->tos && !(r->tos == pd->tos))
3571 r = TAILQ_NEXT(r, entries);
3572 else if (r->rule_flag & PFRULE_FRAGMENT)
3573 r = TAILQ_NEXT(r, entries);
3574 else if (pd->proto == IPPROTO_TCP &&
3575 (r->flagset & th->th_flags) != r->flags)
3576 r = TAILQ_NEXT(r, entries);
3577 /* tcp/udp only. uid.op always 0 in other cases */
3578 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3580 pf_socket_lookup(direction, pd, inp), 1)) &&
3582 pf_socket_lookup(direction, pd), 1)) &&
3584 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3586 r = TAILQ_NEXT(r, entries);
3587 /* tcp/udp only. gid.op always 0 in other cases */
3588 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3590 pf_socket_lookup(direction, pd, inp), 1)) &&
3592 pf_socket_lookup(direction, pd), 1)) &&
3594 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3596 r = TAILQ_NEXT(r, entries);
3599 r->prob <= arc4random())
3601 r->prob <= arc4random_uniform(UINT_MAX - 1) + 1)
3603 r = TAILQ_NEXT(r, entries);
3605 else if (r->match_tag && !pf_match_tag(m, r, &tag, pd->pf_mtag))
3607 else if (r->match_tag && !pf_match_tag(m, r, &tag))
3609 r = TAILQ_NEXT(r, entries);
3610 else if (r->os_fingerprint != PF_OSFP_ANY &&
3611 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3612 pf_osfp_fingerprint(pd, m, off, th),
3613 r->os_fingerprint)))
3614 r = TAILQ_NEXT(r, entries);
3618 if (r->rtableid >= 0)
3619 rtableid = r->rtableid;
3620 if (r->anchor == NULL) {
3627 r = TAILQ_NEXT(r, entries);
3629 pf_step_into_anchor(&asd, &ruleset,
3630 PF_RULESET_FILTER, &r, &a, &match);
3632 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
3633 PF_RULESET_FILTER, &r, &a, &match))
3640 REASON_SET(&reason, PFRES_MATCH);
3642 if (r->log || (nr != NULL && nr->log)) {
3644 m_copyback(m, off, hdrlen, pd->hdr.any);
3645 PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr,
3649 if ((r->action == PF_DROP) &&
3650 ((r->rule_flag & PFRULE_RETURNRST) ||
3651 (r->rule_flag & PFRULE_RETURNICMP) ||
3652 (r->rule_flag & PFRULE_RETURN))) {
3653 /* undo NAT changes, if they have taken place */
3655 PF_ACPY(saddr, &sk->addr[pd->sidx], af);
3656 PF_ACPY(daddr, &sk->addr[pd->didx], af);
3658 *pd->sport = sk->port[pd->sidx];
3660 *pd->dport = sk->port[pd->didx];
3662 *pd->proto_sum = bproto_sum;
3664 *pd->ip_sum = bip_sum;
3665 m_copyback(m, off, hdrlen, pd->hdr.any);
3667 if (pd->proto == IPPROTO_TCP &&
3668 ((r->rule_flag & PFRULE_RETURNRST) ||
3669 (r->rule_flag & PFRULE_RETURN)) &&
3670 !(th->th_flags & TH_RST)) {
3671 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
3683 h4 = mtod(m, struct ip *);
3684 len = ntohs(h4->ip_len) - off;
3689 h6 = mtod(m, struct ip6_hdr *);
3690 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
3695 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
3696 REASON_SET(&reason, PFRES_PROTCKSUM);
3698 if (th->th_flags & TH_SYN)
3700 if (th->th_flags & TH_FIN)
3703 pf_send_tcp(m, r, af, pd->dst,
3705 pf_send_tcp(r, af, pd->dst,
3707 pd->src, th->th_dport, th->th_sport,
3708 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3709 r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp);
3711 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3713 pf_send_icmp(m, r->return_icmp >> 8,
3714 r->return_icmp & 255, af, r);
3715 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3717 pf_send_icmp(m, r->return_icmp6 >> 8,
3718 r->return_icmp6 & 255, af, r);
3721 if (r->action == PF_DROP)
3725 if (pf_tag_packet(m, tag, rtableid, pd->pf_mtag)) {
3727 if (pf_tag_packet(m, tag, rtableid)) {
3729 REASON_SET(&reason, PFRES_MEMORY);
3733 if (!state_icmp && (r->keep_state || nr != NULL ||
3734 (pd->flags & PFDESC_TCP_NORM))) {
3736 action = pf_create_state(r, nr, a, pd, nsn, skw, sks, nk, sk, m,
3737 off, sport, dport, &rewrite, kif, sm, tag, bproto_sum,
3739 if (action != PF_PASS)
3744 pool_put(&V_pf_state_key_pl, sk);
3746 pool_put(&V_pf_state_key_pl, nk);
3749 pool_put(&pf_state_key_pl, sk);
3751 pool_put(&pf_state_key_pl, nk);
3755 /* copy back packet headers if we performed NAT operations */
3757 m_copyback(m, off, hdrlen, pd->hdr.any);
3760 if (*sm != NULL && !ISSET((*sm)->state_flags, PFSTATE_NOSYNC) &&
3762 direction == PF_OUT && pfsync_up_ptr != NULL && pfsync_up_ptr()) {
3764 direction == PF_OUT && pfsync_up()) {
3767 * We want the state created, but we dont
3768 * want to send this in case a partner
3769 * firewall has to know about it to allow
3770 * replies through it.
3773 if (pfsync_defer_ptr != NULL)
3774 pfsync_defer_ptr(*sm, m);
3776 if (pfsync_defer(*sm, m))
3787 pool_put(&V_pf_state_key_pl, sk);
3789 pool_put(&V_pf_state_key_pl, nk);
3792 pool_put(&pf_state_key_pl, sk);
3794 pool_put(&pf_state_key_pl, nk);
3800 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
3801 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *skw,
3802 struct pf_state_key *sks, struct pf_state_key *nk, struct pf_state_key *sk,
3803 struct mbuf *m, int off, u_int16_t sport, u_int16_t dport, int *rewrite,
3804 struct pfi_kif *kif, struct pf_state **sm, int tag, u_int16_t bproto_sum,
3805 u_int16_t bip_sum, int hdrlen)
3807 struct pf_state *s = NULL;
3808 struct pf_src_node *sn = NULL;
3809 struct tcphdr *th = pd->hdr.tcp;
3811 u_int16_t mss = V_tcp_mssdflt;
3813 u_int16_t mss = tcp_mssdflt;
3817 /* check maximums */
3818 if (r->max_states && (r->states_cur >= r->max_states)) {
3820 V_pf_status.lcounters[LCNT_STATES]++;
3822 pf_status.lcounters[LCNT_STATES]++;
3824 REASON_SET(&reason, PFRES_MAXSTATES);
3827 /* src node for filter rule */
3828 if ((r->rule_flag & PFRULE_SRCTRACK ||
3829 r->rpool.opts & PF_POOL_STICKYADDR) &&
3830 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3831 REASON_SET(&reason, PFRES_SRCLIMIT);
3834 /* src node for translation rule */
3835 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3836 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3837 REASON_SET(&reason, PFRES_SRCLIMIT);
3841 s = pool_get(&V_pf_state_pl, PR_NOWAIT | PR_ZERO);
3843 s = pool_get(&pf_state_pl, PR_NOWAIT | PR_ZERO);
3846 REASON_SET(&reason, PFRES_MEMORY);
3850 s->nat_rule.ptr = nr;
3852 STATE_INC_COUNTERS(s);
3854 s->state_flags |= PFSTATE_ALLOWOPTS;
3855 if (r->rule_flag & PFRULE_STATESLOPPY)
3856 s->state_flags |= PFSTATE_SLOPPY;
3857 if (r->rule_flag & PFRULE_PFLOW)
3858 s->state_flags |= PFSTATE_PFLOW;
3859 s->log = r->log & PF_LOG_ALL;
3860 s->sync_state = PFSYNC_S_NONE;
3862 s->log |= nr->log & PF_LOG_ALL;
3863 switch (pd->proto) {
3865 s->src.seqlo = ntohl(th->th_seq);
3866 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3867 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3868 r->keep_state == PF_STATE_MODULATE) {
3869 /* Generate sequence number modulator */
3870 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3873 pf_change_a(&th->th_seq, &th->th_sum,
3874 htonl(s->src.seqlo + s->src.seqdiff), 0);
3878 if (th->th_flags & TH_SYN) {
3880 s->src.wscale = pf_get_wscale(m, off,
3881 th->th_off, pd->af);
3883 s->src.max_win = MAX(ntohs(th->th_win), 1);
3884 if (s->src.wscale & PF_WSCALE_MASK) {
3885 /* Remove scale factor from initial window */
3886 int win = s->src.max_win;
3887 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3888 s->src.max_win = (win - 1) >>
3889 (s->src.wscale & PF_WSCALE_MASK);
3891 if (th->th_flags & TH_FIN)
3895 s->src.state = TCPS_SYN_SENT;
3896 s->dst.state = TCPS_CLOSED;
3897 s->timeout = PFTM_TCP_FIRST_PACKET;
3900 s->src.state = PFUDPS_SINGLE;
3901 s->dst.state = PFUDPS_NO_TRAFFIC;
3902 s->timeout = PFTM_UDP_FIRST_PACKET;
3906 case IPPROTO_ICMPV6:
3908 s->timeout = PFTM_ICMP_FIRST_PACKET;
3911 s->src.state = PFOTHERS_SINGLE;
3912 s->dst.state = PFOTHERS_NO_TRAFFIC;
3913 s->timeout = PFTM_OTHER_FIRST_PACKET;
3916 s->creation = time_second;
3917 s->expire = time_second;
3921 s->src_node->states++;
3924 /* XXX We only modify one side for now. */
3925 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
3926 s->nat_src_node = nsn;
3927 s->nat_src_node->states++;
3929 if (pd->proto == IPPROTO_TCP) {
3930 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
3931 off, pd, th, &s->src, &s->dst)) {
3932 REASON_SET(&reason, PFRES_MEMORY);
3933 pf_src_tree_remove_state(s);
3934 STATE_DEC_COUNTERS(s);
3936 pool_put(&V_pf_state_pl, s);
3938 pool_put(&pf_state_pl, s);
3942 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
3943 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
3944 &s->src, &s->dst, rewrite)) {
3945 /* This really shouldn't happen!!! */
3946 DPFPRINTF(PF_DEBUG_URGENT,
3947 ("pf_normalize_tcp_stateful failed on first pkt"));
3948 pf_normalize_tcp_cleanup(s);
3949 pf_src_tree_remove_state(s);
3950 STATE_DEC_COUNTERS(s);
3952 pool_put(&V_pf_state_pl, s);
3954 pool_put(&pf_state_pl, s);
3959 s->direction = pd->dir;
3961 if (sk == NULL && pf_state_key_setup(pd, nr, &skw, &sks, &sk, &nk,
3962 pd->src, pd->dst, sport, dport))
3965 if (pf_state_insert(BOUND_IFACE(r, kif), skw, sks, s)) {
3966 if (pd->proto == IPPROTO_TCP)
3967 pf_normalize_tcp_cleanup(s);
3968 REASON_SET(&reason, PFRES_STATEINS);
3969 pf_src_tree_remove_state(s);
3970 STATE_DEC_COUNTERS(s);
3972 pool_put(&V_pf_state_pl, s);
3974 pool_put(&pf_state_pl, s);
3980 pf_set_rt_ifp(s, pd->src); /* needs s->state_key set */
3985 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
3986 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
3987 s->src.state = PF_TCPS_PROXY_SRC;
3988 /* undo NAT changes, if they have taken place */
3990 struct pf_state_key *skt = s->key[PF_SK_WIRE];
3991 if (pd->dir == PF_OUT)
3992 skt = s->key[PF_SK_STACK];
3993 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
3994 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
3996 *pd->sport = skt->port[pd->sidx];
3998 *pd->dport = skt->port[pd->didx];
4000 *pd->proto_sum = bproto_sum;
4002 *pd->ip_sum = bip_sum;
4003 m_copyback(m, off, hdrlen, pd->hdr.any);
4005 s->src.seqhi = htonl(arc4random());
4006 /* Find mss option */
4007 mss = pf_get_mss(m, off, th->th_off, pd->af);
4008 mss = pf_calc_mss(pd->src, pd->af, mss);
4009 mss = pf_calc_mss(pd->dst, pd->af, mss);
4012 pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport,
4014 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
4016 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
4017 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL);
4018 REASON_SET(&reason, PFRES_SYNPROXY);
4019 return (PF_SYNPROXY_DROP);
4027 pool_put(&V_pf_state_key_pl, sk);
4029 pool_put(&V_pf_state_key_pl, nk);
4032 pool_put(&pf_state_key_pl, sk);
4034 pool_put(&pf_state_key_pl, nk);
4037 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
4039 RB_REMOVE(pf_src_tree, &V_tree_src_tracking, sn);
4040 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
4041 V_pf_status.src_nodes--;
4042 pool_put(&V_pf_src_tree_pl, sn);
4044 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
4045 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
4046 pf_status.src_nodes--;
4047 pool_put(&pf_src_tree_pl, sn);
4050 if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) {
4052 RB_REMOVE(pf_src_tree, &V_tree_src_tracking, nsn);
4053 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
4054 V_pf_status.src_nodes--;
4055 pool_put(&V_pf_src_tree_pl, nsn);
4057 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
4058 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
4059 pf_status.src_nodes--;
4060 pool_put(&pf_src_tree_pl, nsn);
4067 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
4068 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
4069 struct pf_ruleset **rsm)
4071 struct pf_rule *r, *a = NULL;
4072 struct pf_ruleset *ruleset = NULL;
4073 sa_family_t af = pd->af;
4079 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
4082 if (pfi_kif_match(r->kif, kif) == r->ifnot)
4083 r = r->skip[PF_SKIP_IFP].ptr;
4084 else if (r->direction && r->direction != direction)
4085 r = r->skip[PF_SKIP_DIR].ptr;
4086 else if (r->af && r->af != af)
4087 r = r->skip[PF_SKIP_AF].ptr;
4088 else if (r->proto && r->proto != pd->proto)
4089 r = r->skip[PF_SKIP_PROTO].ptr;
4090 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
4092 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
4093 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
4095 r = r->skip[PF_SKIP_DST_ADDR].ptr;
4096 else if (r->tos && !(r->tos == pd->tos))
4097 r = TAILQ_NEXT(r, entries);
4098 else if (r->os_fingerprint != PF_OSFP_ANY)
4099 r = TAILQ_NEXT(r, entries);
4100 else if (pd->proto == IPPROTO_UDP &&
4101 (r->src.port_op || r->dst.port_op))
4102 r = TAILQ_NEXT(r, entries);
4103 else if (pd->proto == IPPROTO_TCP &&
4104 (r->src.port_op || r->dst.port_op || r->flagset))
4105 r = TAILQ_NEXT(r, entries);
4106 else if ((pd->proto == IPPROTO_ICMP ||
4107 pd->proto == IPPROTO_ICMPV6) &&
4108 (r->type || r->code))
4109 r = TAILQ_NEXT(r, entries);
4110 else if (r->prob && r->prob <=
4111 (arc4random() % (UINT_MAX - 1) + 1))
4112 r = TAILQ_NEXT(r, entries);
4114 else if (r->match_tag && !pf_match_tag(m, r, &tag, pd->pf_mtag))
4116 else if (r->match_tag && !pf_match_tag(m, r, &tag))
4118 r = TAILQ_NEXT(r, entries);
4120 if (r->anchor == NULL) {
4127 r = TAILQ_NEXT(r, entries);
4129 pf_step_into_anchor(&asd, &ruleset,
4130 PF_RULESET_FILTER, &r, &a, &match);
4132 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
4133 PF_RULESET_FILTER, &r, &a, &match))
4140 REASON_SET(&reason, PFRES_MATCH);
4143 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset,
4146 if (r->action != PF_PASS)
4150 if (pf_tag_packet(m, tag, -1, pd->pf_mtag)) {
4152 if (pf_tag_packet(m, tag, -1)) {
4154 REASON_SET(&reason, PFRES_MEMORY);
4162 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
4163 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
4164 struct pf_pdesc *pd, u_short *reason, int *copyback)
4166 struct tcphdr *th = pd->hdr.tcp;
4167 u_int16_t win = ntohs(th->th_win);
4168 u_int32_t ack, end, seq, orig_seq;
4172 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
4173 sws = src->wscale & PF_WSCALE_MASK;
4174 dws = dst->wscale & PF_WSCALE_MASK;
4179 * Sequence tracking algorithm from Guido van Rooij's paper:
4180 * http://www.madison-gurkha.com/publications/tcp_filtering/
4184 orig_seq = seq = ntohl(th->th_seq);
4185 if (src->seqlo == 0) {
4186 /* First packet from this end. Set its state */
4188 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
4189 src->scrub == NULL) {
4190 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
4191 REASON_SET(reason, PFRES_MEMORY);
4196 /* Deferred generation of sequence number modulator */
4197 if (dst->seqdiff && !src->seqdiff) {
4198 /* use random iss for the TCP server */
4199 while ((src->seqdiff = arc4random() - seq) == 0)
4201 ack = ntohl(th->th_ack) - dst->seqdiff;
4202 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
4204 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
4207 ack = ntohl(th->th_ack);
4210 end = seq + pd->p_len;
4211 if (th->th_flags & TH_SYN) {
4213 if (dst->wscale & PF_WSCALE_FLAG) {
4214 src->wscale = pf_get_wscale(m, off, th->th_off,
4216 if (src->wscale & PF_WSCALE_FLAG) {
4217 /* Remove scale factor from initial
4219 sws = src->wscale & PF_WSCALE_MASK;
4220 win = ((u_int32_t)win + (1 << sws) - 1)
4222 dws = dst->wscale & PF_WSCALE_MASK;
4224 /* fixup other window */
4225 dst->max_win <<= dst->wscale &
4227 /* in case of a retrans SYN|ACK */
4232 if (th->th_flags & TH_FIN)
4236 if (src->state < TCPS_SYN_SENT)
4237 src->state = TCPS_SYN_SENT;
4240 * May need to slide the window (seqhi may have been set by
4241 * the crappy stack check or if we picked up the connection
4242 * after establishment)
4244 if (src->seqhi == 1 ||
4245 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
4246 src->seqhi = end + MAX(1, dst->max_win << dws);
4247 if (win > src->max_win)
4251 ack = ntohl(th->th_ack) - dst->seqdiff;
4253 /* Modulate sequence numbers */
4254 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
4256 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
4259 end = seq + pd->p_len;
4260 if (th->th_flags & TH_SYN)
4262 if (th->th_flags & TH_FIN)
4266 if ((th->th_flags & TH_ACK) == 0) {
4267 /* Let it pass through the ack skew check */
4269 } else if ((ack == 0 &&
4270 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
4271 /* broken tcp stacks do not set ack */
4272 (dst->state < TCPS_SYN_SENT)) {
4274 * Many stacks (ours included) will set the ACK number in an
4275 * FIN|ACK if the SYN times out -- no sequence to ACK.
4281 /* Ease sequencing restrictions on no data packets */
4286 ackskew = dst->seqlo - ack;
4290 * Need to demodulate the sequence numbers in any TCP SACK options
4291 * (Selective ACK). We could optionally validate the SACK values
4292 * against the current ACK window, either forwards or backwards, but
4293 * I'm not confident that SACK has been implemented properly
4294 * everywhere. It wouldn't surprise me if several stacks accidently
4295 * SACK too far backwards of previously ACKed data. There really aren't
4296 * any security implications of bad SACKing unless the target stack
4297 * doesn't validate the option length correctly. Someone trying to
4298 * spoof into a TCP connection won't bother blindly sending SACK
4301 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
4302 if (pf_modulate_sack(m, off, pd, th, dst))
4307 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
4308 if (SEQ_GEQ(src->seqhi, end) &&
4309 /* Last octet inside other's window space */
4310 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
4311 /* Retrans: not more than one window back */
4312 (ackskew >= -MAXACKWINDOW) &&
4313 /* Acking not more than one reassembled fragment backwards */
4314 (ackskew <= (MAXACKWINDOW << sws)) &&
4315 /* Acking not more than one window forward */
4316 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
4317 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
4318 (pd->flags & PFDESC_IP_REAS) == 0)) {
4319 /* Require an exact/+1 sequence match on resets when possible */
4321 if (dst->scrub || src->scrub) {
4322 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4323 *state, src, dst, copyback))
4327 /* update max window */
4328 if (src->max_win < win)
4330 /* synchronize sequencing */
4331 if (SEQ_GT(end, src->seqlo))
4333 /* slide the window of what the other end can send */
4334 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4335 dst->seqhi = ack + MAX((win << sws), 1);
4339 if (th->th_flags & TH_SYN)
4340 if (src->state < TCPS_SYN_SENT)
4341 src->state = TCPS_SYN_SENT;
4342 if (th->th_flags & TH_FIN)
4343 if (src->state < TCPS_CLOSING)
4344 src->state = TCPS_CLOSING;
4345 if (th->th_flags & TH_ACK) {
4346 if (dst->state == TCPS_SYN_SENT) {
4347 dst->state = TCPS_ESTABLISHED;
4348 if (src->state == TCPS_ESTABLISHED &&
4349 (*state)->src_node != NULL &&
4350 pf_src_connlimit(state)) {
4351 REASON_SET(reason, PFRES_SRCLIMIT);
4354 } else if (dst->state == TCPS_CLOSING)
4355 dst->state = TCPS_FIN_WAIT_2;
4357 if (th->th_flags & TH_RST)
4358 src->state = dst->state = TCPS_TIME_WAIT;
4360 /* update expire time */
4361 (*state)->expire = time_second;
4362 if (src->state >= TCPS_FIN_WAIT_2 &&
4363 dst->state >= TCPS_FIN_WAIT_2)
4364 (*state)->timeout = PFTM_TCP_CLOSED;
4365 else if (src->state >= TCPS_CLOSING &&
4366 dst->state >= TCPS_CLOSING)
4367 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4368 else if (src->state < TCPS_ESTABLISHED ||
4369 dst->state < TCPS_ESTABLISHED)
4370 (*state)->timeout = PFTM_TCP_OPENING;
4371 else if (src->state >= TCPS_CLOSING ||
4372 dst->state >= TCPS_CLOSING)
4373 (*state)->timeout = PFTM_TCP_CLOSING;
4375 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4377 /* Fall through to PASS packet */
4379 } else if ((dst->state < TCPS_SYN_SENT ||
4380 dst->state >= TCPS_FIN_WAIT_2 ||
4381 src->state >= TCPS_FIN_WAIT_2) &&
4382 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
4383 /* Within a window forward of the originating packet */
4384 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
4385 /* Within a window backward of the originating packet */
4388 * This currently handles three situations:
4389 * 1) Stupid stacks will shotgun SYNs before their peer
4391 * 2) When PF catches an already established stream (the
4392 * firewall rebooted, the state table was flushed, routes
4394 * 3) Packets get funky immediately after the connection
4395 * closes (this should catch Solaris spurious ACK|FINs
4396 * that web servers like to spew after a close)
4398 * This must be a little more careful than the above code
4399 * since packet floods will also be caught here. We don't
4400 * update the TTL here to mitigate the damage of a packet
4401 * flood and so the same code can handle awkward establishment
4402 * and a loosened connection close.
4403 * In the establishment case, a correct peer response will
4404 * validate the connection, go through the normal state code
4405 * and keep updating the state TTL.
4409 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4411 if (pf_status.debug >= PF_DEBUG_MISC) {
4413 printf("pf: loose state match: ");
4414 pf_print_state(*state);
4415 pf_print_flags(th->th_flags);
4416 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4417 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
4419 pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
4420 (unsigned long long)(*state)->packets[1],
4422 pd->p_len, ackskew, (*state)->packets[0],
4423 (*state)->packets[1],
4425 pd->dir == PF_IN ? "in" : "out",
4426 pd->dir == (*state)->direction ? "fwd" : "rev");
4429 if (dst->scrub || src->scrub) {
4430 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4431 *state, src, dst, copyback))
4435 /* update max window */
4436 if (src->max_win < win)
4438 /* synchronize sequencing */
4439 if (SEQ_GT(end, src->seqlo))
4441 /* slide the window of what the other end can send */
4442 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4443 dst->seqhi = ack + MAX((win << sws), 1);
4446 * Cannot set dst->seqhi here since this could be a shotgunned
4447 * SYN and not an already established connection.
4450 if (th->th_flags & TH_FIN)
4451 if (src->state < TCPS_CLOSING)
4452 src->state = TCPS_CLOSING;
4453 if (th->th_flags & TH_RST)
4454 src->state = dst->state = TCPS_TIME_WAIT;
4456 /* Fall through to PASS packet */
4459 if ((*state)->dst.state == TCPS_SYN_SENT &&
4460 (*state)->src.state == TCPS_SYN_SENT) {
4461 /* Send RST for state mismatches during handshake */
4462 if (!(th->th_flags & TH_RST))
4464 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4466 pf_send_tcp((*state)->rule.ptr, pd->af,
4468 pd->dst, pd->src, th->th_dport,
4469 th->th_sport, ntohl(th->th_ack), 0,
4471 (*state)->rule.ptr->return_ttl, 1, 0,
4472 pd->eh, kif->pfik_ifp);
4477 } else if (V_pf_status.debug >= PF_DEBUG_MISC) {
4479 } else if (pf_status.debug >= PF_DEBUG_MISC) {
4481 printf("pf: BAD state: ");
4482 pf_print_state(*state);
4483 pf_print_flags(th->th_flags);
4484 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4485 "pkts=%llu:%llu dir=%s,%s\n",
4486 seq, orig_seq, ack, pd->p_len, ackskew,
4488 (unsigned long long)(*state)->packets[0],
4489 (unsigned long long)(*state)->packets[1],
4491 (*state)->packets[0], (*state)->packets[1],
4493 pd->dir == PF_IN ? "in" : "out",
4494 pd->dir == (*state)->direction ? "fwd" : "rev");
4495 printf("pf: State failure on: %c %c %c %c | %c %c\n",
4496 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4497 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4499 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4500 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4501 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4502 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4504 REASON_SET(reason, PFRES_BADSTATE);
4512 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4513 struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4515 struct tcphdr *th = pd->hdr.tcp;
4517 if (th->th_flags & TH_SYN)
4518 if (src->state < TCPS_SYN_SENT)
4519 src->state = TCPS_SYN_SENT;
4520 if (th->th_flags & TH_FIN)
4521 if (src->state < TCPS_CLOSING)
4522 src->state = TCPS_CLOSING;
4523 if (th->th_flags & TH_ACK) {
4524 if (dst->state == TCPS_SYN_SENT) {
4525 dst->state = TCPS_ESTABLISHED;
4526 if (src->state == TCPS_ESTABLISHED &&
4527 (*state)->src_node != NULL &&
4528 pf_src_connlimit(state)) {
4529 REASON_SET(reason, PFRES_SRCLIMIT);
4532 } else if (dst->state == TCPS_CLOSING) {
4533 dst->state = TCPS_FIN_WAIT_2;
4534 } else if (src->state == TCPS_SYN_SENT &&
4535 dst->state < TCPS_SYN_SENT) {
4537 * Handle a special sloppy case where we only see one
4538 * half of the connection. If there is a ACK after
4539 * the initial SYN without ever seeing a packet from
4540 * the destination, set the connection to established.
4542 dst->state = src->state = TCPS_ESTABLISHED;
4543 if ((*state)->src_node != NULL &&
4544 pf_src_connlimit(state)) {
4545 REASON_SET(reason, PFRES_SRCLIMIT);
4548 } else if (src->state == TCPS_CLOSING &&
4549 dst->state == TCPS_ESTABLISHED &&
4552 * Handle the closing of half connections where we
4553 * don't see the full bidirectional FIN/ACK+ACK
4556 dst->state = TCPS_CLOSING;
4559 if (th->th_flags & TH_RST)
4560 src->state = dst->state = TCPS_TIME_WAIT;
4562 /* update expire time */
4563 (*state)->expire = time_second;
4564 if (src->state >= TCPS_FIN_WAIT_2 &&
4565 dst->state >= TCPS_FIN_WAIT_2)
4566 (*state)->timeout = PFTM_TCP_CLOSED;
4567 else if (src->state >= TCPS_CLOSING &&
4568 dst->state >= TCPS_CLOSING)
4569 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4570 else if (src->state < TCPS_ESTABLISHED ||
4571 dst->state < TCPS_ESTABLISHED)
4572 (*state)->timeout = PFTM_TCP_OPENING;
4573 else if (src->state >= TCPS_CLOSING ||
4574 dst->state >= TCPS_CLOSING)
4575 (*state)->timeout = PFTM_TCP_CLOSING;
4577 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4583 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
4584 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4587 struct pf_state_key_cmp key;
4588 struct tcphdr *th = pd->hdr.tcp;
4590 struct pf_state_peer *src, *dst;
4591 struct pf_state_key *sk;
4594 key.proto = IPPROTO_TCP;
4595 if (direction == PF_IN) { /* wire side, straight */
4596 PF_ACPY(&key.addr[0], pd->src, key.af);
4597 PF_ACPY(&key.addr[1], pd->dst, key.af);
4598 key.port[0] = th->th_sport;
4599 key.port[1] = th->th_dport;
4600 } else { /* stack side, reverse */
4601 PF_ACPY(&key.addr[1], pd->src, key.af);
4602 PF_ACPY(&key.addr[0], pd->dst, key.af);
4603 key.port[1] = th->th_sport;
4604 key.port[0] = th->th_dport;
4608 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
4610 STATE_LOOKUP(kif, &key, direction, *state, m);
4613 if (direction == (*state)->direction) {
4614 src = &(*state)->src;
4615 dst = &(*state)->dst;
4617 src = &(*state)->dst;
4618 dst = &(*state)->src;
4621 sk = (*state)->key[pd->didx];
4623 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4624 if (direction != (*state)->direction) {
4625 REASON_SET(reason, PFRES_SYNPROXY);
4626 return (PF_SYNPROXY_DROP);
4628 if (th->th_flags & TH_SYN) {
4629 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4630 REASON_SET(reason, PFRES_SYNPROXY);
4634 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4636 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
4638 pd->src, th->th_dport, th->th_sport,
4639 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4640 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1,
4642 REASON_SET(reason, PFRES_SYNPROXY);
4643 return (PF_SYNPROXY_DROP);
4644 } else if (!(th->th_flags & TH_ACK) ||
4645 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4646 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4647 REASON_SET(reason, PFRES_SYNPROXY);
4649 } else if ((*state)->src_node != NULL &&
4650 pf_src_connlimit(state)) {
4651 REASON_SET(reason, PFRES_SRCLIMIT);
4654 (*state)->src.state = PF_TCPS_PROXY_DST;
4656 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4657 if (direction == (*state)->direction) {
4658 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4659 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4660 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4661 REASON_SET(reason, PFRES_SYNPROXY);
4664 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4665 if ((*state)->dst.seqhi == 1)
4666 (*state)->dst.seqhi = htonl(arc4random());
4668 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4670 pf_send_tcp((*state)->rule.ptr, pd->af,
4672 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4673 sk->port[pd->sidx], sk->port[pd->didx],
4674 (*state)->dst.seqhi, 0, TH_SYN, 0,
4675 (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL);
4676 REASON_SET(reason, PFRES_SYNPROXY);
4677 return (PF_SYNPROXY_DROP);
4678 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4680 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4681 REASON_SET(reason, PFRES_SYNPROXY);
4684 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4685 (*state)->dst.seqlo = ntohl(th->th_seq);
4687 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4689 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
4691 pd->src, th->th_dport, th->th_sport,
4692 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4693 TH_ACK, (*state)->src.max_win, 0, 0, 0,
4694 (*state)->tag, NULL, NULL);
4696 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4698 pf_send_tcp((*state)->rule.ptr, pd->af,
4700 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4701 sk->port[pd->sidx], sk->port[pd->didx],
4702 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4703 TH_ACK, (*state)->dst.max_win, 0, 0, 1,
4705 (*state)->src.seqdiff = (*state)->dst.seqhi -
4706 (*state)->src.seqlo;
4707 (*state)->dst.seqdiff = (*state)->src.seqhi -
4708 (*state)->dst.seqlo;
4709 (*state)->src.seqhi = (*state)->src.seqlo +
4710 (*state)->dst.max_win;
4711 (*state)->dst.seqhi = (*state)->dst.seqlo +
4712 (*state)->src.max_win;
4713 (*state)->src.wscale = (*state)->dst.wscale = 0;
4714 (*state)->src.state = (*state)->dst.state =
4716 REASON_SET(reason, PFRES_SYNPROXY);
4717 return (PF_SYNPROXY_DROP);
4721 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4722 dst->state >= TCPS_FIN_WAIT_2 &&
4723 src->state >= TCPS_FIN_WAIT_2) {
4725 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4727 if (pf_status.debug >= PF_DEBUG_MISC) {
4729 printf("pf: state reuse ");
4730 pf_print_state(*state);
4731 pf_print_flags(th->th_flags);
4734 /* XXX make sure it's the same direction ?? */
4735 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4736 pf_unlink_state(*state);
4741 if ((*state)->state_flags & PFSTATE_SLOPPY) {
4742 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
4745 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
4746 ©back) == PF_DROP)
4750 /* translate source/destination address, if necessary */
4751 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4752 struct pf_state_key *nk = (*state)->key[pd->didx];
4754 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4755 nk->port[pd->sidx] != th->th_sport)
4756 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum,
4757 &th->th_sum, &nk->addr[pd->sidx],
4758 nk->port[pd->sidx], 0, pd->af);
4760 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4761 nk->port[pd->didx] != th->th_dport)
4762 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum,
4763 &th->th_sum, &nk->addr[pd->didx],
4764 nk->port[pd->didx], 0, pd->af);
4768 /* Copyback sequence modulation or stateful scrub changes if needed */
4771 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4773 m_copyback(m, off, sizeof(*th), th);
4780 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4781 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4783 struct pf_state_peer *src, *dst;
4784 struct pf_state_key_cmp key;
4785 struct udphdr *uh = pd->hdr.udp;
4788 key.proto = IPPROTO_UDP;
4789 if (direction == PF_IN) { /* wire side, straight */
4790 PF_ACPY(&key.addr[0], pd->src, key.af);
4791 PF_ACPY(&key.addr[1], pd->dst, key.af);
4792 key.port[0] = uh->uh_sport;
4793 key.port[1] = uh->uh_dport;
4794 } else { /* stack side, reverse */
4795 PF_ACPY(&key.addr[1], pd->src, key.af);
4796 PF_ACPY(&key.addr[0], pd->dst, key.af);
4797 key.port[1] = uh->uh_sport;
4798 key.port[0] = uh->uh_dport;
4802 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
4804 STATE_LOOKUP(kif, &key, direction, *state, m);
4807 if (direction == (*state)->direction) {
4808 src = &(*state)->src;
4809 dst = &(*state)->dst;
4811 src = &(*state)->dst;
4812 dst = &(*state)->src;
4816 if (src->state < PFUDPS_SINGLE)
4817 src->state = PFUDPS_SINGLE;
4818 if (dst->state == PFUDPS_SINGLE)
4819 dst->state = PFUDPS_MULTIPLE;
4821 /* update expire time */
4822 (*state)->expire = time_second;
4823 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4824 (*state)->timeout = PFTM_UDP_MULTIPLE;
4826 (*state)->timeout = PFTM_UDP_SINGLE;
4828 /* translate source/destination address, if necessary */
4829 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4830 struct pf_state_key *nk = (*state)->key[pd->didx];
4832 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4833 nk->port[pd->sidx] != uh->uh_sport)
4834 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum,
4835 &uh->uh_sum, &nk->addr[pd->sidx],
4836 nk->port[pd->sidx], 1, pd->af);
4838 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4839 nk->port[pd->didx] != uh->uh_dport)
4840 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum,
4841 &uh->uh_sum, &nk->addr[pd->didx],
4842 nk->port[pd->didx], 1, pd->af);
4844 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4846 m_copyback(m, off, sizeof(*uh), uh);
4854 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4855 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4857 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4859 u_int16_t icmpid = 0, *icmpsum;
4861 u_int16_t icmpid, *icmpsum;
4865 struct pf_state_key_cmp key;
4867 switch (pd->proto) {
4870 icmptype = pd->hdr.icmp->icmp_type;
4871 icmpid = pd->hdr.icmp->icmp_id;
4872 icmpsum = &pd->hdr.icmp->icmp_cksum;
4874 if (icmptype == ICMP_UNREACH ||
4875 icmptype == ICMP_SOURCEQUENCH ||
4876 icmptype == ICMP_REDIRECT ||
4877 icmptype == ICMP_TIMXCEED ||
4878 icmptype == ICMP_PARAMPROB)
4883 case IPPROTO_ICMPV6:
4884 icmptype = pd->hdr.icmp6->icmp6_type;
4885 icmpid = pd->hdr.icmp6->icmp6_id;
4886 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4888 if (icmptype == ICMP6_DST_UNREACH ||
4889 icmptype == ICMP6_PACKET_TOO_BIG ||
4890 icmptype == ICMP6_TIME_EXCEEDED ||
4891 icmptype == ICMP6_PARAM_PROB)
4900 * ICMP query/reply message not related to a TCP/UDP packet.
4901 * Search for an ICMP state.
4904 key.proto = pd->proto;
4905 key.port[0] = key.port[1] = icmpid;
4906 if (direction == PF_IN) { /* wire side, straight */
4907 PF_ACPY(&key.addr[0], pd->src, key.af);
4908 PF_ACPY(&key.addr[1], pd->dst, key.af);
4909 } else { /* stack side, reverse */
4910 PF_ACPY(&key.addr[1], pd->src, key.af);
4911 PF_ACPY(&key.addr[0], pd->dst, key.af);
4915 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
4917 STATE_LOOKUP(kif, &key, direction, *state, m);
4920 (*state)->expire = time_second;
4921 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4923 /* translate source/destination address, if necessary */
4924 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4925 struct pf_state_key *nk = (*state)->key[pd->didx];
4930 if (PF_ANEQ(pd->src,
4931 &nk->addr[pd->sidx], AF_INET))
4932 pf_change_a(&saddr->v4.s_addr,
4934 nk->addr[pd->sidx].v4.s_addr, 0);
4936 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
4938 pf_change_a(&daddr->v4.s_addr,
4940 nk->addr[pd->didx].v4.s_addr, 0);
4943 pd->hdr.icmp->icmp_id) {
4944 pd->hdr.icmp->icmp_cksum =
4946 pd->hdr.icmp->icmp_cksum, icmpid,
4947 nk->port[pd->sidx], 0);
4948 pd->hdr.icmp->icmp_id =
4952 m_copyback(m, off, ICMP_MINLEN,
4961 if (PF_ANEQ(pd->src,
4962 &nk->addr[pd->sidx], AF_INET6))
4964 &pd->hdr.icmp6->icmp6_cksum,
4965 &nk->addr[pd->sidx], 0);
4967 if (PF_ANEQ(pd->dst,
4968 &nk->addr[pd->didx], AF_INET6))
4970 &pd->hdr.icmp6->icmp6_cksum,
4971 &nk->addr[pd->didx], 0);
4974 sizeof(struct icmp6_hdr),
4987 * ICMP error message in response to a TCP/UDP packet.
4988 * Extract the inner TCP/UDP header and search for that state.
4991 struct pf_pdesc pd2;
4993 bzero(&pd2, sizeof pd2);
4999 struct ip6_hdr h2_6;
5011 /* Payload packet is from the opposite direction. */
5012 pd2.sidx = (direction == PF_IN) ? 1 : 0;
5013 pd2.didx = (direction == PF_IN) ? 0 : 1;
5017 /* offset of h2 in mbuf chain */
5018 ipoff2 = off + ICMP_MINLEN;
5020 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
5021 NULL, reason, pd2.af)) {
5022 DPFPRINTF(PF_DEBUG_MISC,
5023 ("pf: ICMP error message too short "
5028 * ICMP error messages don't refer to non-first
5031 if (h2.ip_off & htons(IP_OFFMASK)) {
5032 REASON_SET(reason, PFRES_FRAG);
5036 /* offset of protocol header that follows h2 */
5037 off2 = ipoff2 + (h2.ip_hl << 2);
5039 pd2.proto = h2.ip_p;
5040 pd2.src = (struct pf_addr *)&h2.ip_src;
5041 pd2.dst = (struct pf_addr *)&h2.ip_dst;
5042 pd2.ip_sum = &h2.ip_sum;
5047 ipoff2 = off + sizeof(struct icmp6_hdr);
5049 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
5050 NULL, reason, pd2.af)) {
5051 DPFPRINTF(PF_DEBUG_MISC,
5052 ("pf: ICMP error message too short "
5056 pd2.proto = h2_6.ip6_nxt;
5057 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
5058 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
5060 off2 = ipoff2 + sizeof(h2_6);
5062 switch (pd2.proto) {
5063 case IPPROTO_FRAGMENT:
5065 * ICMPv6 error messages for
5066 * non-first fragments
5068 REASON_SET(reason, PFRES_FRAG);
5071 case IPPROTO_HOPOPTS:
5072 case IPPROTO_ROUTING:
5073 case IPPROTO_DSTOPTS: {
5074 /* get next header and header length */
5075 struct ip6_ext opt6;
5077 if (!pf_pull_hdr(m, off2, &opt6,
5078 sizeof(opt6), NULL, reason,
5080 DPFPRINTF(PF_DEBUG_MISC,
5081 ("pf: ICMPv6 short opt\n"));
5084 if (pd2.proto == IPPROTO_AH)
5085 off2 += (opt6.ip6e_len + 2) * 4;
5087 off2 += (opt6.ip6e_len + 1) * 8;
5088 pd2.proto = opt6.ip6e_nxt;
5089 /* goto the next header */
5096 } while (!terminal);
5101 switch (pd2.proto) {
5105 struct pf_state_peer *src, *dst;
5110 * Only the first 8 bytes of the TCP header can be
5111 * expected. Don't access any TCP header fields after
5112 * th_seq, an ackskew test is not possible.
5114 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
5116 DPFPRINTF(PF_DEBUG_MISC,
5117 ("pf: ICMP error message too short "
5123 key.proto = IPPROTO_TCP;
5124 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5125 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5126 key.port[pd2.sidx] = th.th_sport;
5127 key.port[pd2.didx] = th.th_dport;
5130 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5132 STATE_LOOKUP(kif, &key, direction, *state, m);
5135 if (direction == (*state)->direction) {
5136 src = &(*state)->dst;
5137 dst = &(*state)->src;
5139 src = &(*state)->src;
5140 dst = &(*state)->dst;
5143 if (src->wscale && dst->wscale)
5144 dws = dst->wscale & PF_WSCALE_MASK;
5148 /* Demodulate sequence number */
5149 seq = ntohl(th.th_seq) - src->seqdiff;
5151 pf_change_a(&th.th_seq, icmpsum,
5156 if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
5157 (!SEQ_GEQ(src->seqhi, seq) ||
5158 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
5160 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5162 if (pf_status.debug >= PF_DEBUG_MISC) {
5164 printf("pf: BAD ICMP %d:%d ",
5165 icmptype, pd->hdr.icmp->icmp_code);
5166 pf_print_host(pd->src, 0, pd->af);
5168 pf_print_host(pd->dst, 0, pd->af);
5170 pf_print_state(*state);
5171 printf(" seq=%u\n", seq);
5173 REASON_SET(reason, PFRES_BADSTATE);
5177 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5179 if (pf_status.debug >= PF_DEBUG_MISC) {
5181 printf("pf: OK ICMP %d:%d ",
5182 icmptype, pd->hdr.icmp->icmp_code);
5183 pf_print_host(pd->src, 0, pd->af);
5185 pf_print_host(pd->dst, 0, pd->af);
5187 pf_print_state(*state);
5188 printf(" seq=%u\n", seq);
5192 /* translate source/destination address, if necessary */
5193 if ((*state)->key[PF_SK_WIRE] !=
5194 (*state)->key[PF_SK_STACK]) {
5195 struct pf_state_key *nk =
5196 (*state)->key[pd->didx];
5198 if (PF_ANEQ(pd2.src,
5199 &nk->addr[pd2.sidx], pd2.af) ||
5200 nk->port[pd2.sidx] != th.th_sport)
5201 pf_change_icmp(pd2.src, &th.th_sport,
5202 daddr, &nk->addr[pd2.sidx],
5203 nk->port[pd2.sidx], NULL,
5204 pd2.ip_sum, icmpsum,
5205 pd->ip_sum, 0, pd2.af);
5207 if (PF_ANEQ(pd2.dst,
5208 &nk->addr[pd2.didx], pd2.af) ||
5209 nk->port[pd2.didx] != th.th_dport)
5210 pf_change_icmp(pd2.dst, &th.th_dport,
5211 NULL, /* XXX Inbound NAT? */
5212 &nk->addr[pd2.didx],
5213 nk->port[pd2.didx], NULL,
5214 pd2.ip_sum, icmpsum,
5215 pd->ip_sum, 0, pd2.af);
5223 m_copyback(m, off, ICMP_MINLEN,
5228 m_copyback(m, ipoff2, sizeof(h2),
5238 sizeof(struct icmp6_hdr),
5243 m_copyback(m, ipoff2, sizeof(h2_6),
5252 m_copyback(m, off2, 8, (caddr_t)&th);
5254 m_copyback(m, off2, 8, &th);
5264 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
5265 NULL, reason, pd2.af)) {
5266 DPFPRINTF(PF_DEBUG_MISC,
5267 ("pf: ICMP error message too short "
5273 key.proto = IPPROTO_UDP;
5274 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5275 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5276 key.port[pd2.sidx] = uh.uh_sport;
5277 key.port[pd2.didx] = uh.uh_dport;
5280 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5282 STATE_LOOKUP(kif, &key, direction, *state, m);
5285 /* translate source/destination address, if necessary */
5286 if ((*state)->key[PF_SK_WIRE] !=
5287 (*state)->key[PF_SK_STACK]) {
5288 struct pf_state_key *nk =
5289 (*state)->key[pd->didx];
5291 if (PF_ANEQ(pd2.src,
5292 &nk->addr[pd2.sidx], pd2.af) ||
5293 nk->port[pd2.sidx] != uh.uh_sport)
5294 pf_change_icmp(pd2.src, &uh.uh_sport,
5295 daddr, &nk->addr[pd2.sidx],
5296 nk->port[pd2.sidx], &uh.uh_sum,
5297 pd2.ip_sum, icmpsum,
5298 pd->ip_sum, 1, pd2.af);
5300 if (PF_ANEQ(pd2.dst,
5301 &nk->addr[pd2.didx], pd2.af) ||
5302 nk->port[pd2.didx] != uh.uh_dport)
5303 pf_change_icmp(pd2.dst, &uh.uh_dport,
5304 NULL, /* XXX Inbound NAT? */
5305 &nk->addr[pd2.didx],
5306 nk->port[pd2.didx], &uh.uh_sum,
5307 pd2.ip_sum, icmpsum,
5308 pd->ip_sum, 1, pd2.af);
5313 m_copyback(m, off, ICMP_MINLEN,
5319 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5321 m_copyback(m, ipoff2, sizeof(h2), &h2);
5328 sizeof(struct icmp6_hdr),
5333 m_copyback(m, ipoff2, sizeof(h2_6),
5342 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
5344 m_copyback(m, off2, sizeof(uh), &uh);
5351 case IPPROTO_ICMP: {
5354 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
5355 NULL, reason, pd2.af)) {
5356 DPFPRINTF(PF_DEBUG_MISC,
5357 ("pf: ICMP error message too short i"
5363 key.proto = IPPROTO_ICMP;
5364 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5365 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5366 key.port[0] = key.port[1] = iih.icmp_id;
5369 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5371 STATE_LOOKUP(kif, &key, direction, *state, m);
5374 /* translate source/destination address, if necessary */
5375 if ((*state)->key[PF_SK_WIRE] !=
5376 (*state)->key[PF_SK_STACK]) {
5377 struct pf_state_key *nk =
5378 (*state)->key[pd->didx];
5380 if (PF_ANEQ(pd2.src,
5381 &nk->addr[pd2.sidx], pd2.af) ||
5382 nk->port[pd2.sidx] != iih.icmp_id)
5383 pf_change_icmp(pd2.src, &iih.icmp_id,
5384 daddr, &nk->addr[pd2.sidx],
5385 nk->port[pd2.sidx], NULL,
5386 pd2.ip_sum, icmpsum,
5387 pd->ip_sum, 0, AF_INET);
5389 if (PF_ANEQ(pd2.dst,
5390 &nk->addr[pd2.didx], pd2.af) ||
5391 nk->port[pd2.didx] != iih.icmp_id)
5392 pf_change_icmp(pd2.dst, &iih.icmp_id,
5393 NULL, /* XXX Inbound NAT? */
5394 &nk->addr[pd2.didx],
5395 nk->port[pd2.didx], NULL,
5396 pd2.ip_sum, icmpsum,
5397 pd->ip_sum, 0, AF_INET);
5400 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
5401 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5402 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
5404 m_copyback(m, off, ICMP_MINLEN, pd->hdr.icmp);
5405 m_copyback(m, ipoff2, sizeof(h2), &h2);
5406 m_copyback(m, off2, ICMP_MINLEN, &iih);
5414 case IPPROTO_ICMPV6: {
5415 struct icmp6_hdr iih;
5417 if (!pf_pull_hdr(m, off2, &iih,
5418 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
5419 DPFPRINTF(PF_DEBUG_MISC,
5420 ("pf: ICMP error message too short "
5426 key.proto = IPPROTO_ICMPV6;
5427 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5428 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5429 key.port[0] = key.port[1] = iih.icmp6_id;
5432 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5434 STATE_LOOKUP(kif, &key, direction, *state, m);
5437 /* translate source/destination address, if necessary */
5438 if ((*state)->key[PF_SK_WIRE] !=
5439 (*state)->key[PF_SK_STACK]) {
5440 struct pf_state_key *nk =
5441 (*state)->key[pd->didx];
5443 if (PF_ANEQ(pd2.src,
5444 &nk->addr[pd2.sidx], pd2.af) ||
5445 nk->port[pd2.sidx] != iih.icmp6_id)
5446 pf_change_icmp(pd2.src, &iih.icmp6_id,
5447 daddr, &nk->addr[pd2.sidx],
5448 nk->port[pd2.sidx], NULL,
5449 pd2.ip_sum, icmpsum,
5450 pd->ip_sum, 0, AF_INET6);
5452 if (PF_ANEQ(pd2.dst,
5453 &nk->addr[pd2.didx], pd2.af) ||
5454 nk->port[pd2.didx] != iih.icmp6_id)
5455 pf_change_icmp(pd2.dst, &iih.icmp6_id,
5456 NULL, /* XXX Inbound NAT? */
5457 &nk->addr[pd2.didx],
5458 nk->port[pd2.didx], NULL,
5459 pd2.ip_sum, icmpsum,
5460 pd->ip_sum, 0, AF_INET6);
5463 m_copyback(m, off, sizeof(struct icmp6_hdr),
5464 (caddr_t)pd->hdr.icmp6);
5465 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
5466 m_copyback(m, off2, sizeof(struct icmp6_hdr),
5469 m_copyback(m, off, sizeof(struct icmp6_hdr),
5471 m_copyback(m, ipoff2, sizeof(h2_6), &h2_6);
5472 m_copyback(m, off2, sizeof(struct icmp6_hdr),
5482 key.proto = pd2.proto;
5483 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5484 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5485 key.port[0] = key.port[1] = 0;
5488 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5490 STATE_LOOKUP(kif, &key, direction, *state, m);
5493 /* translate source/destination address, if necessary */
5494 if ((*state)->key[PF_SK_WIRE] !=
5495 (*state)->key[PF_SK_STACK]) {
5496 struct pf_state_key *nk =
5497 (*state)->key[pd->didx];
5499 if (PF_ANEQ(pd2.src,
5500 &nk->addr[pd2.sidx], pd2.af))
5501 pf_change_icmp(pd2.src, NULL, daddr,
5502 &nk->addr[pd2.sidx], 0, NULL,
5503 pd2.ip_sum, icmpsum,
5504 pd->ip_sum, 0, pd2.af);
5506 if (PF_ANEQ(pd2.dst,
5507 &nk->addr[pd2.didx], pd2.af))
5508 pf_change_icmp(pd2.src, NULL,
5509 NULL, /* XXX Inbound NAT? */
5510 &nk->addr[pd2.didx], 0, NULL,
5511 pd2.ip_sum, icmpsum,
5512 pd->ip_sum, 0, pd2.af);
5518 m_copyback(m, off, ICMP_MINLEN,
5519 (caddr_t)pd->hdr.icmp);
5520 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5522 m_copyback(m, off, ICMP_MINLEN,
5524 m_copyback(m, ipoff2, sizeof(h2), &h2);
5531 sizeof(struct icmp6_hdr),
5536 m_copyback(m, ipoff2, sizeof(h2_6),
5553 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
5554 struct mbuf *m, struct pf_pdesc *pd)
5556 struct pf_state_peer *src, *dst;
5557 struct pf_state_key_cmp key;
5560 key.proto = pd->proto;
5561 if (direction == PF_IN) {
5562 PF_ACPY(&key.addr[0], pd->src, key.af);
5563 PF_ACPY(&key.addr[1], pd->dst, key.af);
5564 key.port[0] = key.port[1] = 0;
5566 PF_ACPY(&key.addr[1], pd->src, key.af);
5567 PF_ACPY(&key.addr[0], pd->dst, key.af);
5568 key.port[1] = key.port[0] = 0;
5572 STATE_LOOKUP(kif, &key, direction, *state, m, pd->pf_mtag);
5574 STATE_LOOKUP(kif, &key, direction, *state, m);
5577 if (direction == (*state)->direction) {
5578 src = &(*state)->src;
5579 dst = &(*state)->dst;
5581 src = &(*state)->dst;
5582 dst = &(*state)->src;
5586 if (src->state < PFOTHERS_SINGLE)
5587 src->state = PFOTHERS_SINGLE;
5588 if (dst->state == PFOTHERS_SINGLE)
5589 dst->state = PFOTHERS_MULTIPLE;
5591 /* update expire time */
5592 (*state)->expire = time_second;
5593 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5594 (*state)->timeout = PFTM_OTHER_MULTIPLE;
5596 (*state)->timeout = PFTM_OTHER_SINGLE;
5598 /* translate source/destination address, if necessary */
5599 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5600 struct pf_state_key *nk = (*state)->key[pd->didx];
5603 KASSERT(nk, ("%s: nk is null", __FUNCTION__));
5604 KASSERT(pd, ("%s: pd is null", __FUNCTION__));
5605 KASSERT(pd->src, ("%s: pd->src is null", __FUNCTION__));
5606 KASSERT(pd->dst, ("%s: pd->dst is null", __FUNCTION__));
5616 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5617 pf_change_a(&pd->src->v4.s_addr,
5619 nk->addr[pd->sidx].v4.s_addr,
5623 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5624 pf_change_a(&pd->dst->v4.s_addr,
5626 nk->addr[pd->didx].v4.s_addr,
5633 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5634 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5636 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5637 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5645 * ipoff and off are measured from the start of the mbuf chain.
5646 * h must be at "ipoff" on the mbuf chain.
5649 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5650 u_short *actionp, u_short *reasonp, sa_family_t af)
5655 struct ip *h = mtod(m, struct ip *);
5656 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
5660 ACTION_SET(actionp, PF_PASS);
5662 ACTION_SET(actionp, PF_DROP);
5663 REASON_SET(reasonp, PFRES_FRAG);
5667 if (m->m_pkthdr.len < off + len ||
5668 ntohs(h->ip_len) < off + len) {
5669 ACTION_SET(actionp, PF_DROP);
5670 REASON_SET(reasonp, PFRES_SHORT);
5678 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
5680 if (m->m_pkthdr.len < off + len ||
5681 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5682 (unsigned)(off + len)) {
5683 ACTION_SET(actionp, PF_DROP);
5684 REASON_SET(reasonp, PFRES_SHORT);
5691 m_copydata(m, off, len, p);
5696 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif)
5700 struct radix_node_head *rnh;
5703 struct sockaddr_in *dst;
5707 extern int ipmultipath;
5711 extern int ip6_multipath;
5713 struct sockaddr_in6 *dst6;
5714 struct route_in6 ro;
5718 struct radix_node *rn;
5725 /* XXX: stick to table 0 for now */
5726 rnh = rt_tables_get_rnh(0, af);
5727 if (rnh != NULL && rn_mpath_capable(rnh))
5731 bzero(&ro, sizeof(ro));
5734 dst = satosin(&ro.ro_dst);
5735 dst->sin_family = AF_INET;
5736 dst->sin_len = sizeof(*dst);
5737 dst->sin_addr = addr->v4;
5746 * Skip check for addresses with embedded interface scope,
5747 * as they would always match anyway.
5749 if (IN6_IS_SCOPE_EMBED(&addr->v6))
5751 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5752 dst6->sin6_family = AF_INET6;
5753 dst6->sin6_len = sizeof(*dst6);
5754 dst6->sin6_addr = addr->v6;
5765 /* Skip checks for ipsec interfaces */
5766 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5770 /* XXX MRT not always INET */ /* stick with table 0 though */
5773 in_rtalloc_ign((struct route *)&ro, 0, 0);
5776 rtalloc_ign((struct route *)&ro, 0);
5777 #else /* ! __FreeBSD__ */
5778 rtalloc_noclone((struct route *)&ro, NO_CLONING);
5781 if (ro.ro_rt != NULL) {
5782 /* No interface given, this is a no-route check */
5786 if (kif->pfik_ifp == NULL) {
5791 /* Perform uRPF check if passed input interface */
5793 rn = (struct radix_node *)ro.ro_rt;
5795 rt = (struct rtentry *)rn;
5796 #ifndef __FreeBSD__ /* CARPDEV */
5797 if (rt->rt_ifp->if_type == IFT_CARP)
5798 ifp = rt->rt_ifp->if_carpdev;
5803 if (kif->pfik_ifp == ifp)
5807 rn = rn_mpath_next(rn);
5810 rn = rn_mpath_next(rn, 0);
5812 } while (check_mpath == 1 && rn != NULL && ret == 0);
5816 if (ro.ro_rt != NULL)
5822 pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw)
5824 struct sockaddr_in *dst;
5826 struct sockaddr_in6 *dst6;
5827 struct route_in6 ro;
5833 bzero(&ro, sizeof(ro));
5836 dst = satosin(&ro.ro_dst);
5837 dst->sin_family = AF_INET;
5838 dst->sin_len = sizeof(*dst);
5839 dst->sin_addr = addr->v4;
5843 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5844 dst6->sin6_family = AF_INET6;
5845 dst6->sin6_len = sizeof(*dst6);
5846 dst6->sin6_addr = addr->v6;
5854 # ifdef RTF_PRCLONING
5855 rtalloc_ign((struct route *)&ro, (RTF_CLONING|RTF_PRCLONING));
5856 # else /* !RTF_PRCLONING */
5859 in_rtalloc_ign((struct route *)&ro, 0, 0);
5862 rtalloc_ign((struct route *)&ro, 0);
5864 #else /* ! __FreeBSD__ */
5865 rtalloc_noclone((struct route *)&ro, NO_CLONING);
5868 if (ro.ro_rt != NULL) {
5870 /* XXX_IMPORT: later */
5872 if (ro.ro_rt->rt_labelid == aw->v.rtlabel)
5883 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5884 struct pf_state *s, struct pf_pdesc *pd)
5886 struct mbuf *m0, *m1;
5887 struct route iproute;
5888 struct route *ro = NULL;
5889 struct sockaddr_in *dst;
5891 struct ifnet *ifp = NULL;
5892 struct pf_addr naddr;
5893 struct pf_src_node *sn = NULL;
5902 if (m == NULL || *m == NULL || r == NULL ||
5903 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
5904 panic("pf_route: invalid parameters");
5907 if (pd->pf_mtag->routed++ > 3) {
5909 if ((*m)->m_pkthdr.pf.routed++ > 3) {
5916 if (r->rt == PF_DUPTO) {
5918 if ((m0 = m_dup(*m, M_DONTWAIT)) == NULL)
5920 if ((m0 = m_copym2(*m, 0, M_COPYALL, M_NOWAIT)) == NULL)
5924 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
5929 if (m0->m_len < sizeof(struct ip)) {
5930 DPFPRINTF(PF_DEBUG_URGENT,
5931 ("pf_route: m0->m_len < sizeof(struct ip)\n"));
5935 ip = mtod(m0, struct ip *);
5938 bzero((caddr_t)ro, sizeof(*ro));
5939 dst = satosin(&ro->ro_dst);
5940 dst->sin_family = AF_INET;
5941 dst->sin_len = sizeof(*dst);
5942 dst->sin_addr = ip->ip_dst;
5944 if (r->rt == PF_FASTROUTE) {
5950 if (ro->ro_rt == 0) {
5952 KMOD_IPSTAT_INC(ips_noroute);
5954 ipstat.ips_noroute++;
5959 ifp = ro->ro_rt->rt_ifp;
5960 ro->ro_rt->rt_use++;
5962 if (ro->ro_rt->rt_flags & RTF_GATEWAY)
5963 dst = satosin(ro->ro_rt->rt_gateway);
5965 if (TAILQ_EMPTY(&r->rpool.list)) {
5966 DPFPRINTF(PF_DEBUG_URGENT,
5967 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n"));
5971 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5973 if (!PF_AZERO(&naddr, AF_INET))
5974 dst->sin_addr.s_addr = naddr.v4.s_addr;
5975 ifp = r->rpool.cur->kif ?
5976 r->rpool.cur->kif->pfik_ifp : NULL;
5978 if (!PF_AZERO(&s->rt_addr, AF_INET))
5979 dst->sin_addr.s_addr =
5980 s->rt_addr.v4.s_addr;
5981 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5990 if (pf_test(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) {
5993 } else if (m0 == NULL) {
5999 if (pf_test(PF_OUT, ifp, &m0, NULL) != PF_PASS)
6001 else if (m0 == NULL)
6004 if (m0->m_len < sizeof(struct ip)) {
6005 DPFPRINTF(PF_DEBUG_URGENT,
6006 ("pf_route: m0->m_len < sizeof(struct ip)\n"));
6009 ip = mtod(m0, struct ip *);
6013 /* Copied from FreeBSD 5.1-CURRENT ip_output. */
6014 m0->m_pkthdr.csum_flags |= CSUM_IP;
6015 sw_csum = m0->m_pkthdr.csum_flags & ~ifp->if_hwassist;
6016 if (sw_csum & CSUM_DELAY_DATA) {
6018 * XXX: in_delayed_cksum assumes HBO for ip->ip_len (at least)
6021 NTOHS(ip->ip_off); /* XXX: needed? */
6022 in_delayed_cksum(m0);
6025 sw_csum &= ~CSUM_DELAY_DATA;
6027 m0->m_pkthdr.csum_flags &= ifp->if_hwassist;
6029 if (ntohs(ip->ip_len) <= ifp->if_mtu ||
6030 (ifp->if_hwassist & CSUM_FRAGMENT &&
6031 ((ip->ip_off & htons(IP_DF)) == 0))) {
6033 * ip->ip_len = htons(ip->ip_len);
6034 * ip->ip_off = htons(ip->ip_off);
6037 if (sw_csum & CSUM_DELAY_IP) {
6039 if (ip->ip_v == IPVERSION &&
6040 (ip->ip_hl << 2) == sizeof(*ip)) {
6041 ip->ip_sum = in_cksum_hdr(ip);
6043 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
6047 error = (*ifp->if_output)(ifp, m0, sintosa(dst), ro);
6052 /* Copied from ip_output. */
6055 * If deferred crypto processing is needed, check that the
6056 * interface supports it.
6058 if ((mtag = m_tag_find(m0, PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED, NULL))
6059 != NULL && (ifp->if_capabilities & IFCAP_IPSEC) == 0) {
6060 /* Notify IPsec to do its own crypto. */
6061 ipsp_skipcrypto_unmark((struct tdb_ident *)(mtag + 1));
6066 /* Catch routing changes wrt. hardware checksumming for TCP or UDP. */
6067 if (m0->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT) {
6068 if (!(ifp->if_capabilities & IFCAP_CSUM_TCPv4) ||
6069 ifp->if_bridge != NULL) {
6070 in_delayed_cksum(m0);
6071 m0->m_pkthdr.csum_flags &= ~M_TCPV4_CSUM_OUT; /* Clr */
6073 } else if (m0->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) {
6074 if (!(ifp->if_capabilities & IFCAP_CSUM_UDPv4) ||
6075 ifp->if_bridge != NULL) {
6076 in_delayed_cksum(m0);
6077 m0->m_pkthdr.csum_flags &= ~M_UDPV4_CSUM_OUT; /* Clr */
6081 if (ntohs(ip->ip_len) <= ifp->if_mtu) {
6083 if ((ifp->if_capabilities & IFCAP_CSUM_IPv4) &&
6084 ifp->if_bridge == NULL) {
6085 m0->m_pkthdr.csum_flags |= M_IPV4_CSUM_OUT;
6087 KMOD_IPSTAT_INC(ips_outhwcsum);
6089 ipstat.ips_outhwcsum++;
6092 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
6093 /* Update relevant hardware checksum stats for TCP/UDP */
6094 if (m0->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
6095 KMOD_TCPSTAT_INC(tcps_outhwcsum);
6096 else if (m0->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
6097 KMOD_UDPSTAT_INC(udps_outhwcsum);
6098 error = (*ifp->if_output)(ifp, m0, sintosa(dst), NULL);
6104 * Too large for interface; fragment if possible.
6105 * Must be able to put at least 8 bytes per fragment.
6107 if (ip->ip_off & htons(IP_DF)) {
6109 KMOD_IPSTAT_INC(ips_cantfrag);
6111 ipstat.ips_cantfrag++;
6113 if (r->rt != PF_DUPTO) {
6115 /* icmp_error() expects host byte ordering */
6119 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
6123 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
6134 * XXX: is cheaper + less error prone than own function
6138 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist, sw_csum);
6140 error = ip_fragment(m0, ifp, ifp->if_mtu);
6143 #ifndef __FreeBSD__ /* ip_fragment does not do m_freem() on FreeBSD */
6149 for (m0 = m1; m0; m0 = m1) {
6155 error = (*ifp->if_output)(ifp, m0, sintosa(dst),
6161 error = (*ifp->if_output)(ifp, m0, sintosa(dst),
6170 KMOD_IPSTAT_INC(ips_fragmented);
6172 ipstat.ips_fragmented++;
6176 if (r->rt != PF_DUPTO)
6178 if (ro == &iproute && ro->ro_rt)
6190 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
6191 struct pf_state *s, struct pf_pdesc *pd)
6194 struct route_in6 ip6route;
6195 struct route_in6 *ro;
6196 struct sockaddr_in6 *dst;
6197 struct ip6_hdr *ip6;
6198 struct ifnet *ifp = NULL;
6199 struct pf_addr naddr;
6200 struct pf_src_node *sn = NULL;
6202 if (m == NULL || *m == NULL || r == NULL ||
6203 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
6204 panic("pf_route6: invalid parameters");
6207 if (pd->pf_mtag->routed++ > 3) {
6209 if ((*m)->m_pkthdr.pf.routed++ > 3) {
6216 if (r->rt == PF_DUPTO) {
6218 if ((m0 = m_dup(*m, M_DONTWAIT)) == NULL)
6220 if ((m0 = m_copym2(*m, 0, M_COPYALL, M_NOWAIT)) == NULL)
6224 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
6229 if (m0->m_len < sizeof(struct ip6_hdr)) {
6230 DPFPRINTF(PF_DEBUG_URGENT,
6231 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n"));
6234 ip6 = mtod(m0, struct ip6_hdr *);
6237 bzero((caddr_t)ro, sizeof(*ro));
6238 dst = (struct sockaddr_in6 *)&ro->ro_dst;
6239 dst->sin6_family = AF_INET6;
6240 dst->sin6_len = sizeof(*dst);
6241 dst->sin6_addr = ip6->ip6_dst;
6243 /* Cheat. XXX why only in the v6 case??? */
6244 if (r->rt == PF_FASTROUTE) {
6246 m0->m_flags |= M_SKIP_FIREWALL;
6248 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
6250 m0->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
6251 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
6256 if (TAILQ_EMPTY(&r->rpool.list)) {
6257 DPFPRINTF(PF_DEBUG_URGENT,
6258 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n"));
6262 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
6264 if (!PF_AZERO(&naddr, AF_INET6))
6265 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
6267 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
6269 if (!PF_AZERO(&s->rt_addr, AF_INET6))
6270 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
6271 &s->rt_addr, AF_INET6);
6272 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
6280 if (pf_test6(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) {
6283 } else if (m0 == NULL) {
6289 if (pf_test6(PF_OUT, ifp, &m0, NULL) != PF_PASS)
6291 else if (m0 == NULL)
6294 if (m0->m_len < sizeof(struct ip6_hdr)) {
6295 DPFPRINTF(PF_DEBUG_URGENT,
6296 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n"));
6299 ip6 = mtod(m0, struct ip6_hdr *);
6303 * If the packet is too large for the outgoing interface,
6304 * send back an icmp6 error.
6306 if (IN6_IS_SCOPE_EMBED(&dst->sin6_addr))
6307 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
6308 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) {
6312 nd6_output(ifp, ifp, m0, dst, NULL);
6317 in6_ifstat_inc(ifp, ifs6_in_toobig);
6319 if (r->rt != PF_DUPTO) {
6321 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
6325 if (r->rt != PF_DUPTO)
6326 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
6333 if (r->rt != PF_DUPTO)
6345 * FreeBSD supports cksum offloads for the following drivers.
6346 * em(4), fxp(4), ixgb(4), lge(4), ndis(4), nge(4), re(4),
6347 * ti(4), txp(4), xl(4)
6349 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
6350 * network driver performed cksum including pseudo header, need to verify
6353 * network driver performed cksum, needs to additional pseudo header
6354 * cksum computation with partial csum_data(i.e. lack of H/W support for
6355 * pseudo header, for instance hme(4), sk(4) and possibly gem(4))
6357 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
6358 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
6360 * Also, set csum_data to 0xffff to force cksum validation.
6363 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
6369 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
6371 if (m->m_pkthdr.len < off + len)
6376 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
6377 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
6378 sum = m->m_pkthdr.csum_data;
6380 ip = mtod(m, struct ip *);
6381 sum = in_pseudo(ip->ip_src.s_addr,
6382 ip->ip_dst.s_addr, htonl((u_short)len +
6383 m->m_pkthdr.csum_data + IPPROTO_TCP));
6390 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
6391 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
6392 sum = m->m_pkthdr.csum_data;
6394 ip = mtod(m, struct ip *);
6395 sum = in_pseudo(ip->ip_src.s_addr,
6396 ip->ip_dst.s_addr, htonl((u_short)len +
6397 m->m_pkthdr.csum_data + IPPROTO_UDP));
6405 case IPPROTO_ICMPV6:
6415 if (p == IPPROTO_ICMP) {
6420 sum = in_cksum(m, len);
6424 if (m->m_len < sizeof(struct ip))
6426 sum = in4_cksum(m, p, off, len);
6431 if (m->m_len < sizeof(struct ip6_hdr))
6433 sum = in6_cksum(m, p, off, len);
6444 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
6449 KMOD_UDPSTAT_INC(udps_badsum);
6455 KMOD_ICMPSTAT_INC(icps_checksum);
6460 case IPPROTO_ICMPV6:
6462 KMOD_ICMP6STAT_INC(icp6s_checksum);
6469 if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
6470 m->m_pkthdr.csum_flags |=
6471 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
6472 m->m_pkthdr.csum_data = 0xffff;
6477 #else /* !__FreeBSD__ */
6480 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
6481 * off is the offset where the protocol header starts
6482 * len is the total length of protocol header plus payload
6483 * returns 0 when the checksum is valid, otherwise returns 1.
6486 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p,
6489 u_int16_t flag_ok, flag_bad;
6494 flag_ok = M_TCP_CSUM_IN_OK;
6495 flag_bad = M_TCP_CSUM_IN_BAD;
6498 flag_ok = M_UDP_CSUM_IN_OK;
6499 flag_bad = M_UDP_CSUM_IN_BAD;
6503 case IPPROTO_ICMPV6:
6505 flag_ok = flag_bad = 0;
6510 if (m->m_pkthdr.csum_flags & flag_ok)
6512 if (m->m_pkthdr.csum_flags & flag_bad)
6514 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
6516 if (m->m_pkthdr.len < off + len)
6521 if (p == IPPROTO_ICMP) {
6526 sum = in_cksum(m, len);
6530 if (m->m_len < sizeof(struct ip))
6532 sum = in4_cksum(m, p, off, len);
6538 if (m->m_len < sizeof(struct ip6_hdr))
6540 sum = in6_cksum(m, p, off, len);
6547 m->m_pkthdr.csum_flags |= flag_bad;
6550 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
6553 KMOD_UDPSTAT_INC(udps_badsum);
6557 KMOD_ICMPSTAT_INC(icps_checksum);
6561 case IPPROTO_ICMPV6:
6562 KMOD_ICMP6STAT_INC(icp6s_checksum);
6568 m->m_pkthdr.csum_flags |= flag_ok;
6575 pf_find_divert(struct mbuf *m)
6579 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL)
6582 return ((struct pf_divert *)(mtag + 1));
6586 pf_get_divert(struct mbuf *m)
6590 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) {
6591 mtag = m_tag_get(PACKET_TAG_PF_DIVERT, sizeof(struct pf_divert),
6595 bzero(mtag + 1, sizeof(struct pf_divert));
6596 m_tag_prepend(m, mtag);
6599 return ((struct pf_divert *)(mtag + 1));
6606 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
6607 struct ether_header *eh, struct inpcb *inp)
6609 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
6610 struct ether_header *eh)
6613 struct pfi_kif *kif;
6614 u_short action, reason = 0, log = 0;
6615 struct mbuf *m = *m0;
6617 struct ip *h = NULL;
6618 struct m_tag *ipfwtag;
6619 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6622 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
6624 struct pf_state *s = NULL;
6625 struct pf_ruleset *ruleset = NULL;
6627 int off, dirndx, pqid = 0;
6631 if (!V_pf_status.running)
6637 if (!pf_status.running)
6641 memset(&pd, 0, sizeof(pd));
6643 if ((pd.pf_mtag = pf_get_mtag(m)) == NULL) {
6645 DPFPRINTF(PF_DEBUG_URGENT,
6646 ("pf_test: pf_get_mtag returned NULL\n"));
6651 if (ifp->if_type == IFT_CARP && ifp->if_carpdev)
6652 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif;
6655 kif = (struct pfi_kif *)ifp->if_pf_kif;
6661 DPFPRINTF(PF_DEBUG_URGENT,
6662 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
6665 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6679 if ((m->m_flags & M_PKTHDR) == 0)
6680 panic("non-M_PKTHDR is passed to pf_test");
6681 #endif /* DIAGNOSTIC */
6684 if (m->m_pkthdr.len < (int)sizeof(*h)) {
6686 REASON_SET(&reason, PFRES_SHORT);
6692 if (m->m_flags & M_SKIP_FIREWALL) {
6697 if (m->m_pkthdr.pf.flags & PF_TAG_GENERATED)
6702 if (ip_divert_ptr != NULL &&
6703 ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
6704 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
6705 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
6706 pd.pf_mtag->flags |= PF_PACKET_LOOPED;
6707 m_tag_delete(m, ipfwtag);
6709 if (pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
6710 m->m_flags |= M_FASTFWD_OURS;
6711 pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
6715 /* We do IP header normalization and packet reassembly here */
6716 if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
6720 m = *m0; /* pf_normalize messes with m0 */
6721 h = mtod(m, struct ip *);
6723 off = h->ip_hl << 2;
6724 if (off < (int)sizeof(*h)) {
6726 REASON_SET(&reason, PFRES_SHORT);
6731 pd.src = (struct pf_addr *)&h->ip_src;
6732 pd.dst = (struct pf_addr *)&h->ip_dst;
6733 pd.sport = pd.dport = NULL;
6734 pd.ip_sum = &h->ip_sum;
6735 pd.proto_sum = NULL;
6738 pd.sidx = (dir == PF_IN) ? 0 : 1;
6739 pd.didx = (dir == PF_IN) ? 1 : 0;
6742 pd.tot_len = ntohs(h->ip_len);
6745 /* handle fragments that didn't get reassembled by normalization */
6746 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
6747 action = pf_test_fragment(&r, dir, kif, m, h,
6758 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6759 &action, &reason, AF_INET)) {
6760 log = action != PF_PASS;
6763 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6764 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
6766 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6767 if (action == PF_DROP)
6769 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6771 if (action == PF_PASS) {
6774 if (pfsync_update_state_ptr != NULL)
6775 pfsync_update_state_ptr(s);
6777 pfsync_update_state(s);
6779 #endif /* NPFSYNC */
6783 } else if (s == NULL)
6785 action = pf_test_rule(&r, &s, dir, kif,
6786 m, off, h, &pd, &a, &ruleset, NULL, inp);
6788 action = pf_test_rule(&r, &s, dir, kif,
6789 m, off, h, &pd, &a, &ruleset, &ipintrq);
6798 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6799 &action, &reason, AF_INET)) {
6800 log = action != PF_PASS;
6803 if (uh.uh_dport == 0 ||
6804 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6805 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6807 REASON_SET(&reason, PFRES_SHORT);
6810 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6811 if (action == PF_PASS) {
6814 if (pfsync_update_state_ptr != NULL)
6815 pfsync_update_state_ptr(s);
6817 pfsync_update_state(s);
6819 #endif /* NPFSYNC */
6823 } else if (s == NULL)
6825 action = pf_test_rule(&r, &s, dir, kif,
6826 m, off, h, &pd, &a, &ruleset, NULL, inp);
6828 action = pf_test_rule(&r, &s, dir, kif,
6829 m, off, h, &pd, &a, &ruleset, &ipintrq);
6834 case IPPROTO_ICMP: {
6838 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
6839 &action, &reason, AF_INET)) {
6840 log = action != PF_PASS;
6843 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
6845 if (action == PF_PASS) {
6848 if (pfsync_update_state_ptr != NULL)
6849 pfsync_update_state_ptr(s);
6851 pfsync_update_state(s);
6853 #endif /* NPFSYNC */
6857 } else if (s == NULL)
6859 action = pf_test_rule(&r, &s, dir, kif,
6860 m, off, h, &pd, &a, &ruleset, NULL, inp);
6862 action = pf_test_rule(&r, &s, dir, kif,
6863 m, off, h, &pd, &a, &ruleset, &ipintrq);
6869 case IPPROTO_ICMPV6: {
6871 DPFPRINTF(PF_DEBUG_MISC,
6872 ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
6878 action = pf_test_state_other(&s, dir, kif, m, &pd);
6879 if (action == PF_PASS) {
6882 if (pfsync_update_state_ptr != NULL)
6883 pfsync_update_state_ptr(s);
6885 pfsync_update_state(s);
6887 #endif /* NPFSYNC */
6891 } else if (s == NULL)
6893 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
6894 &pd, &a, &ruleset, NULL, inp);
6896 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
6897 &pd, &a, &ruleset, &ipintrq);
6903 if (action == PF_PASS && h->ip_hl > 5 &&
6904 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6906 REASON_SET(&reason, PFRES_IPOPTIONS);
6908 DPFPRINTF(PF_DEBUG_MISC,
6909 ("pf: dropping packet with ip options\n"));
6912 if ((s && s->tag) || r->rtableid)
6914 pf_tag_packet(m, s ? s->tag : 0, r->rtableid, pd.pf_mtag);
6916 pf_tag_packet(m, s ? s->tag : 0, r->rtableid);
6919 if (dir == PF_IN && s && s->key[PF_SK_STACK])
6921 pd.pf_mtag->statekey = s->key[PF_SK_STACK];
6923 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK];
6927 if (action == PF_PASS && r->qid) {
6929 if (pqid || (pd.tos & IPTOS_LOWDELAY))
6930 pd.pf_mtag->qid = r->pqid;
6932 pd.pf_mtag->qid = r->qid;
6933 /* add hints for ecn */
6934 pd.pf_mtag->hdr = h;
6937 if (pqid || (pd.tos & IPTOS_LOWDELAY))
6938 m->m_pkthdr.pf.qid = r->pqid;
6940 m->m_pkthdr.pf.qid = r->qid;
6941 /* add hints for ecn */
6942 m->m_pkthdr.pf.hdr = h;
6948 * connections redirected to loopback should not match sockets
6949 * bound specifically to loopback due to security implications,
6950 * see tcp_input() and in_pcblookup_listen().
6952 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6953 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6954 (s->nat_rule.ptr->action == PF_RDR ||
6955 s->nat_rule.ptr->action == PF_BINAT) &&
6956 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
6958 m->m_flags |= M_SKIP_FIREWALL;
6960 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST;
6964 if (action == PF_PASS && r->divert.port &&
6965 ip_divert_ptr != NULL && !PACKET_LOOPED()) {
6967 ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
6968 sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
6969 if (ipfwtag != NULL) {
6970 ((struct ipfw_rule_ref *)(ipfwtag+1))->info =
6971 ntohs(r->divert.port);
6972 ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
6974 m_tag_prepend(m, ipfwtag);
6978 if (m->m_flags & M_FASTFWD_OURS) {
6979 pd.pf_mtag->flags |= PF_FASTFWD_OURS_PRESENT;
6980 m->m_flags &= ~M_FASTFWD_OURS;
6984 dir == PF_IN ? DIR_IN : DIR_OUT);
6988 /* XXX: ipfw has the same behaviour! */
6990 REASON_SET(&reason, PFRES_MEMORY);
6992 DPFPRINTF(PF_DEBUG_MISC,
6993 ("pf: failed to allocate divert tag\n"));
6997 if (dir == PF_IN && action == PF_PASS && r->divert.port) {
6998 struct pf_divert *divert;
7000 if ((divert = pf_get_divert(m))) {
7001 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED;
7002 divert->port = r->divert.port;
7003 divert->addr.ipv4 = r->divert.addr.v4;
7011 if (s != NULL && s->nat_rule.ptr != NULL &&
7012 s->nat_rule.ptr->log & PF_LOG_ALL)
7013 lr = s->nat_rule.ptr;
7016 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset,
7020 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
7021 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
7023 if (action == PF_PASS || r->action == PF_DROP) {
7024 dirndx = (dir == PF_OUT);
7025 r->packets[dirndx]++;
7026 r->bytes[dirndx] += pd.tot_len;
7028 a->packets[dirndx]++;
7029 a->bytes[dirndx] += pd.tot_len;
7032 if (s->nat_rule.ptr != NULL) {
7033 s->nat_rule.ptr->packets[dirndx]++;
7034 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
7036 if (s->src_node != NULL) {
7037 s->src_node->packets[dirndx]++;
7038 s->src_node->bytes[dirndx] += pd.tot_len;
7040 if (s->nat_src_node != NULL) {
7041 s->nat_src_node->packets[dirndx]++;
7042 s->nat_src_node->bytes[dirndx] += pd.tot_len;
7044 dirndx = (dir == s->direction) ? 0 : 1;
7045 s->packets[dirndx]++;
7046 s->bytes[dirndx] += pd.tot_len;
7049 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
7051 if (nr != NULL && r == &V_pf_default_rule)
7053 if (nr != NULL && r == &pf_default_rule)
7056 if (tr->src.addr.type == PF_ADDR_TABLE)
7057 pfr_update_stats(tr->src.addr.p.tbl,
7058 (s == NULL) ? pd.src :
7059 &s->key[(s->direction == PF_IN)]->
7060 addr[(s->direction == PF_OUT)],
7061 pd.af, pd.tot_len, dir == PF_OUT,
7062 r->action == PF_PASS, tr->src.neg);
7063 if (tr->dst.addr.type == PF_ADDR_TABLE)
7064 pfr_update_stats(tr->dst.addr.p.tbl,
7065 (s == NULL) ? pd.dst :
7066 &s->key[(s->direction == PF_IN)]->
7067 addr[(s->direction == PF_IN)],
7068 pd.af, pd.tot_len, dir == PF_OUT,
7069 r->action == PF_PASS, tr->dst.neg);
7073 case PF_SYNPROXY_DROP:
7080 /* pf_route can free the mbuf causing *m0 to become NULL */
7082 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
7095 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0,
7096 struct ether_header *eh, struct inpcb *inp)
7098 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0,
7099 struct ether_header *eh)
7102 struct pfi_kif *kif;
7103 u_short action, reason = 0, log = 0;
7104 struct mbuf *m = *m0, *n = NULL;
7106 struct ip6_hdr *h = NULL;
7107 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
7110 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
7112 struct pf_state *s = NULL;
7113 struct pf_ruleset *ruleset = NULL;
7115 int off, terminal = 0, dirndx, rh_cnt = 0;
7119 if (!V_pf_status.running) {
7124 if (!pf_status.running)
7128 memset(&pd, 0, sizeof(pd));
7130 if ((pd.pf_mtag = pf_get_mtag(m)) == NULL) {
7132 DPFPRINTF(PF_DEBUG_URGENT,
7133 ("pf_test: pf_get_mtag returned NULL\n"));
7138 if (ifp->if_type == IFT_CARP && ifp->if_carpdev)
7139 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif;
7142 kif = (struct pfi_kif *)ifp->if_pf_kif;
7148 DPFPRINTF(PF_DEBUG_URGENT,
7149 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
7152 if (kif->pfik_flags & PFI_IFLAG_SKIP)
7166 if ((m->m_flags & M_PKTHDR) == 0)
7167 panic("non-M_PKTHDR is passed to pf_test6");
7168 #endif /* DIAGNOSTIC */
7171 if (m->m_pkthdr.len < (int)sizeof(*h)) {
7173 REASON_SET(&reason, PFRES_SHORT);
7179 if (pd.pf_mtag->flags & PF_TAG_GENERATED) {
7182 if (m->m_pkthdr.pf.flags & PF_TAG_GENERATED)
7189 /* We do IP header normalization and packet reassembly here */
7190 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
7194 m = *m0; /* pf_normalize messes with m0 */
7195 h = mtod(m, struct ip6_hdr *);
7199 * we do not support jumbogram yet. if we keep going, zero ip6_plen
7200 * will do something bad, so drop the packet for now.
7202 if (htons(h->ip6_plen) == 0) {
7204 REASON_SET(&reason, PFRES_NORM); /*XXX*/
7209 pd.src = (struct pf_addr *)&h->ip6_src;
7210 pd.dst = (struct pf_addr *)&h->ip6_dst;
7211 pd.sport = pd.dport = NULL;
7213 pd.proto_sum = NULL;
7215 pd.sidx = (dir == PF_IN) ? 0 : 1;
7216 pd.didx = (dir == PF_IN) ? 1 : 0;
7219 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
7222 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
7223 pd.proto = h->ip6_nxt;
7226 case IPPROTO_FRAGMENT:
7227 action = pf_test_fragment(&r, dir, kif, m, h,
7229 if (action == PF_DROP)
7230 REASON_SET(&reason, PFRES_FRAG);
7232 case IPPROTO_ROUTING: {
7233 struct ip6_rthdr rthdr;
7236 DPFPRINTF(PF_DEBUG_MISC,
7237 ("pf: IPv6 more than one rthdr\n"));
7239 REASON_SET(&reason, PFRES_IPOPTIONS);
7243 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
7245 DPFPRINTF(PF_DEBUG_MISC,
7246 ("pf: IPv6 short rthdr\n"));
7248 REASON_SET(&reason, PFRES_SHORT);
7252 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
7253 DPFPRINTF(PF_DEBUG_MISC,
7254 ("pf: IPv6 rthdr0\n"));
7256 REASON_SET(&reason, PFRES_IPOPTIONS);
7263 case IPPROTO_HOPOPTS:
7264 case IPPROTO_DSTOPTS: {
7265 /* get next header and header length */
7266 struct ip6_ext opt6;
7268 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
7269 NULL, &reason, pd.af)) {
7270 DPFPRINTF(PF_DEBUG_MISC,
7271 ("pf: IPv6 short opt\n"));
7276 if (pd.proto == IPPROTO_AH)
7277 off += (opt6.ip6e_len + 2) * 4;
7279 off += (opt6.ip6e_len + 1) * 8;
7280 pd.proto = opt6.ip6e_nxt;
7281 /* goto the next header */
7288 } while (!terminal);
7290 /* if there's no routing header, use unmodified mbuf for checksumming */
7300 if (!pf_pull_hdr(m, off, &th, sizeof(th),
7301 &action, &reason, AF_INET6)) {
7302 log = action != PF_PASS;
7305 pd.p_len = pd.tot_len - off - (th.th_off << 2);
7306 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
7307 if (action == PF_DROP)
7309 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
7311 if (action == PF_PASS) {
7314 if (pfsync_update_state_ptr != NULL)
7315 pfsync_update_state_ptr(s);
7317 pfsync_update_state(s);
7319 #endif /* NPFSYNC */
7323 } else if (s == NULL)
7325 action = pf_test_rule(&r, &s, dir, kif,
7326 m, off, h, &pd, &a, &ruleset, NULL, inp);
7328 action = pf_test_rule(&r, &s, dir, kif,
7329 m, off, h, &pd, &a, &ruleset, &ip6intrq);
7338 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
7339 &action, &reason, AF_INET6)) {
7340 log = action != PF_PASS;
7343 if (uh.uh_dport == 0 ||
7344 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
7345 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
7347 REASON_SET(&reason, PFRES_SHORT);
7350 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
7351 if (action == PF_PASS) {
7354 if (pfsync_update_state_ptr != NULL)
7355 pfsync_update_state_ptr(s);
7357 pfsync_update_state(s);
7359 #endif /* NPFSYNC */
7363 } else if (s == NULL)
7365 action = pf_test_rule(&r, &s, dir, kif,
7366 m, off, h, &pd, &a, &ruleset, NULL, inp);
7368 action = pf_test_rule(&r, &s, dir, kif,
7369 m, off, h, &pd, &a, &ruleset, &ip6intrq);
7374 case IPPROTO_ICMP: {
7376 DPFPRINTF(PF_DEBUG_MISC,
7377 ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
7381 case IPPROTO_ICMPV6: {
7382 struct icmp6_hdr ih;
7385 if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
7386 &action, &reason, AF_INET6)) {
7387 log = action != PF_PASS;
7390 action = pf_test_state_icmp(&s, dir, kif,
7391 m, off, h, &pd, &reason);
7392 if (action == PF_PASS) {
7395 if (pfsync_update_state_ptr != NULL)
7396 pfsync_update_state_ptr(s);
7398 pfsync_update_state(s);
7400 #endif /* NPFSYNC */
7404 } else if (s == NULL)
7406 action = pf_test_rule(&r, &s, dir, kif,
7407 m, off, h, &pd, &a, &ruleset, NULL, inp);
7409 action = pf_test_rule(&r, &s, dir, kif,
7410 m, off, h, &pd, &a, &ruleset, &ip6intrq);
7416 action = pf_test_state_other(&s, dir, kif, m, &pd);
7417 if (action == PF_PASS) {
7420 if (pfsync_update_state_ptr != NULL)
7421 pfsync_update_state_ptr(s);
7423 pfsync_update_state(s);
7425 #endif /* NPFSYNC */
7429 } else if (s == NULL)
7431 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
7432 &pd, &a, &ruleset, NULL, inp);
7434 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
7435 &pd, &a, &ruleset, &ip6intrq);
7446 /* handle dangerous IPv6 extension headers. */
7447 if (action == PF_PASS && rh_cnt &&
7448 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
7450 REASON_SET(&reason, PFRES_IPOPTIONS);
7452 DPFPRINTF(PF_DEBUG_MISC,
7453 ("pf: dropping packet with dangerous v6 headers\n"));
7456 if ((s && s->tag) || r->rtableid)
7458 pf_tag_packet(m, s ? s->tag : 0, r->rtableid, pd.pf_mtag);
7460 pf_tag_packet(m, s ? s->tag : 0, r->rtableid);
7463 if (dir == PF_IN && s && s->key[PF_SK_STACK])
7465 pd.pf_mtag->statekey = s->key[PF_SK_STACK];
7467 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK];
7471 if (action == PF_PASS && r->qid) {
7473 if (pd.tos & IPTOS_LOWDELAY)
7474 pd.pf_mtag->qid = r->pqid;
7476 pd.pf_mtag->qid = r->qid;
7477 /* add hints for ecn */
7478 pd.pf_mtag->hdr = h;
7480 if (pd.tos & IPTOS_LOWDELAY)
7481 m->m_pkthdr.pf.qid = r->pqid;
7483 m->m_pkthdr.pf.qid = r->qid;
7484 /* add hints for ecn */
7485 m->m_pkthdr.pf.hdr = h;
7490 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
7491 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
7492 (s->nat_rule.ptr->action == PF_RDR ||
7493 s->nat_rule.ptr->action == PF_BINAT) &&
7494 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
7496 m->m_flags |= M_SKIP_FIREWALL;
7498 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST;
7502 /* XXX: Anybody working on it?! */
7504 printf("pf: divert(9) is not supported for IPv6\n");
7506 if (dir == PF_IN && action == PF_PASS && r->divert.port) {
7507 struct pf_divert *divert;
7509 if ((divert = pf_get_divert(m))) {
7510 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED;
7511 divert->port = r->divert.port;
7512 divert->addr.ipv6 = r->divert.addr.v6;
7520 if (s != NULL && s->nat_rule.ptr != NULL &&
7521 s->nat_rule.ptr->log & PF_LOG_ALL)
7522 lr = s->nat_rule.ptr;
7525 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, lr, a, ruleset,
7529 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
7530 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
7532 if (action == PF_PASS || r->action == PF_DROP) {
7533 dirndx = (dir == PF_OUT);
7534 r->packets[dirndx]++;
7535 r->bytes[dirndx] += pd.tot_len;
7537 a->packets[dirndx]++;
7538 a->bytes[dirndx] += pd.tot_len;
7541 if (s->nat_rule.ptr != NULL) {
7542 s->nat_rule.ptr->packets[dirndx]++;
7543 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
7545 if (s->src_node != NULL) {
7546 s->src_node->packets[dirndx]++;
7547 s->src_node->bytes[dirndx] += pd.tot_len;
7549 if (s->nat_src_node != NULL) {
7550 s->nat_src_node->packets[dirndx]++;
7551 s->nat_src_node->bytes[dirndx] += pd.tot_len;
7553 dirndx = (dir == s->direction) ? 0 : 1;
7554 s->packets[dirndx]++;
7555 s->bytes[dirndx] += pd.tot_len;
7558 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
7560 if (nr != NULL && r == &V_pf_default_rule)
7562 if (nr != NULL && r == &pf_default_rule)
7565 if (tr->src.addr.type == PF_ADDR_TABLE)
7566 pfr_update_stats(tr->src.addr.p.tbl,
7567 (s == NULL) ? pd.src :
7568 &s->key[(s->direction == PF_IN)]->addr[0],
7569 pd.af, pd.tot_len, dir == PF_OUT,
7570 r->action == PF_PASS, tr->src.neg);
7571 if (tr->dst.addr.type == PF_ADDR_TABLE)
7572 pfr_update_stats(tr->dst.addr.p.tbl,
7573 (s == NULL) ? pd.dst :
7574 &s->key[(s->direction == PF_IN)]->addr[1],
7575 pd.af, pd.tot_len, dir == PF_OUT,
7576 r->action == PF_PASS, tr->dst.neg);
7580 case PF_SYNPROXY_DROP:
7587 /* pf_route6 can free the mbuf causing *m0 to become NULL */
7589 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
7601 pf_check_congestion(struct ifqueue *ifq)
7604 /* XXX_IMPORT: later */
7607 if (ifq->ifq_congestion)
7615 * must be called whenever any addressing information such as
7616 * address, port, protocol has changed
7619 pf_pkt_addr_changed(struct mbuf *m)
7622 struct pf_mtag *pf_tag;
7624 if ((pf_tag = pf_find_mtag(m)) != NULL)
7625 pf_tag->statekey = NULL;
7627 m->m_pkthdr.pf.statekey = NULL;