2 * Copyright (c) 2001 Daniel Hartmeier
3 * Copyright (c) 2002 - 2008 Henning Brauer
4 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * Effort sponsored in part by the Defense Advanced Research Projects
32 * Agency (DARPA) and Air Force Research Laboratory, Air Force
33 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35 * $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
42 #include "opt_inet6.h"
46 #include <sys/param.h>
48 #include <sys/endian.h>
50 #include <sys/interrupt.h>
51 #include <sys/kernel.h>
52 #include <sys/kthread.h>
53 #include <sys/limits.h>
56 #include <sys/random.h>
57 #include <sys/refcount.h>
58 #include <sys/socket.h>
59 #include <sys/sysctl.h>
60 #include <sys/taskqueue.h>
61 #include <sys/ucred.h>
64 #include <net/if_var.h>
65 #include <net/if_types.h>
66 #include <net/if_vlan_var.h>
67 #include <net/route.h>
68 #include <net/radix_mpath.h>
72 #include <net/pfvar.h>
73 #include <net/if_pflog.h>
74 #include <net/if_pfsync.h>
76 #include <netinet/in_pcb.h>
77 #include <netinet/in_var.h>
78 #include <netinet/in_fib.h>
79 #include <netinet/ip.h>
80 #include <netinet/ip_fw.h>
81 #include <netinet/ip_icmp.h>
82 #include <netinet/icmp_var.h>
83 #include <netinet/ip_var.h>
84 #include <netinet/tcp.h>
85 #include <netinet/tcp_fsm.h>
86 #include <netinet/tcp_seq.h>
87 #include <netinet/tcp_timer.h>
88 #include <netinet/tcp_var.h>
89 #include <netinet/udp.h>
90 #include <netinet/udp_var.h>
92 #include <netpfil/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */
95 #include <netinet/ip6.h>
96 #include <netinet/icmp6.h>
97 #include <netinet6/nd6.h>
98 #include <netinet6/ip6_var.h>
99 #include <netinet6/in6_pcb.h>
100 #include <netinet6/in6_fib.h>
101 #include <netinet6/scope6_var.h>
104 #include <machine/in_cksum.h>
105 #include <security/mac/mac_framework.h>
107 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
114 VNET_DEFINE(struct pf_altqqueue, pf_altqs[2]);
115 VNET_DEFINE(struct pf_palist, pf_pabuf);
116 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active);
117 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive);
118 VNET_DEFINE(struct pf_kstatus, pf_status);
120 VNET_DEFINE(u_int32_t, ticket_altqs_active);
121 VNET_DEFINE(u_int32_t, ticket_altqs_inactive);
122 VNET_DEFINE(int, altqs_inactive_open);
123 VNET_DEFINE(u_int32_t, ticket_pabuf);
125 VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx);
126 #define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx)
127 VNET_DEFINE(u_char, pf_tcp_secret[16]);
128 #define V_pf_tcp_secret VNET(pf_tcp_secret)
129 VNET_DEFINE(int, pf_tcp_secret_init);
130 #define V_pf_tcp_secret_init VNET(pf_tcp_secret_init)
131 VNET_DEFINE(int, pf_tcp_iss_off);
132 #define V_pf_tcp_iss_off VNET(pf_tcp_iss_off)
133 VNET_DECLARE(int, pf_vnet_active);
134 #define V_pf_vnet_active VNET(pf_vnet_active)
137 * Queue for pf_intr() sends.
139 static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
140 struct pf_send_entry {
141 STAILQ_ENTRY(pf_send_entry) pfse_next;
156 STAILQ_HEAD(pf_send_head, pf_send_entry);
157 static VNET_DEFINE(struct pf_send_head, pf_sendqueue);
158 #define V_pf_sendqueue VNET(pf_sendqueue)
160 static struct mtx pf_sendqueue_mtx;
161 MTX_SYSINIT(pf_sendqueue_mtx, &pf_sendqueue_mtx, "pf send queue", MTX_DEF);
162 #define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx)
163 #define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx)
166 * Queue for pf_overload_task() tasks.
168 struct pf_overload_entry {
169 SLIST_ENTRY(pf_overload_entry) next;
173 struct pf_rule *rule;
176 SLIST_HEAD(pf_overload_head, pf_overload_entry);
177 static VNET_DEFINE(struct pf_overload_head, pf_overloadqueue);
178 #define V_pf_overloadqueue VNET(pf_overloadqueue)
179 static VNET_DEFINE(struct task, pf_overloadtask);
180 #define V_pf_overloadtask VNET(pf_overloadtask)
182 static struct mtx pf_overloadqueue_mtx;
183 MTX_SYSINIT(pf_overloadqueue_mtx, &pf_overloadqueue_mtx,
184 "pf overload/flush queue", MTX_DEF);
185 #define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx)
186 #define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx)
188 VNET_DEFINE(struct pf_rulequeue, pf_unlinked_rules);
189 struct mtx pf_unlnkdrules_mtx;
190 MTX_SYSINIT(pf_unlnkdrules_mtx, &pf_unlnkdrules_mtx, "pf unlinked rules",
193 static VNET_DEFINE(uma_zone_t, pf_sources_z);
194 #define V_pf_sources_z VNET(pf_sources_z)
195 uma_zone_t pf_mtag_z;
196 VNET_DEFINE(uma_zone_t, pf_state_z);
197 VNET_DEFINE(uma_zone_t, pf_state_key_z);
199 VNET_DEFINE(uint64_t, pf_stateid[MAXCPU]);
200 #define PFID_CPUBITS 8
201 #define PFID_CPUSHIFT (sizeof(uint64_t) * NBBY - PFID_CPUBITS)
202 #define PFID_CPUMASK ((uint64_t)((1 << PFID_CPUBITS) - 1) << PFID_CPUSHIFT)
203 #define PFID_MAXID (~PFID_CPUMASK)
204 CTASSERT((1 << PFID_CPUBITS) >= MAXCPU);
206 static void pf_src_tree_remove_state(struct pf_state *);
207 static void pf_init_threshold(struct pf_threshold *, u_int32_t,
209 static void pf_add_threshold(struct pf_threshold *);
210 static int pf_check_threshold(struct pf_threshold *);
212 static void pf_change_ap(struct mbuf *, struct pf_addr *, u_int16_t *,
213 u_int16_t *, u_int16_t *, struct pf_addr *,
214 u_int16_t, u_int8_t, sa_family_t);
215 static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
216 struct tcphdr *, struct pf_state_peer *);
217 static void pf_change_icmp(struct pf_addr *, u_int16_t *,
218 struct pf_addr *, struct pf_addr *, u_int16_t,
219 u_int16_t *, u_int16_t *, u_int16_t *,
220 u_int16_t *, u_int8_t, sa_family_t);
221 static void pf_send_tcp(struct mbuf *,
222 const struct pf_rule *, sa_family_t,
223 const struct pf_addr *, const struct pf_addr *,
224 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
225 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
226 u_int16_t, struct ifnet *);
227 static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
228 sa_family_t, struct pf_rule *);
229 static void pf_detach_state(struct pf_state *);
230 static int pf_state_key_attach(struct pf_state_key *,
231 struct pf_state_key *, struct pf_state *);
232 static void pf_state_key_detach(struct pf_state *, int);
233 static int pf_state_key_ctor(void *, int, void *, int);
234 static u_int32_t pf_tcp_iss(struct pf_pdesc *);
235 static int pf_test_rule(struct pf_rule **, struct pf_state **,
236 int, struct pfi_kif *, struct mbuf *, int,
237 struct pf_pdesc *, struct pf_rule **,
238 struct pf_ruleset **, struct inpcb *);
239 static int pf_create_state(struct pf_rule *, struct pf_rule *,
240 struct pf_rule *, struct pf_pdesc *,
241 struct pf_src_node *, struct pf_state_key *,
242 struct pf_state_key *, struct mbuf *, int,
243 u_int16_t, u_int16_t, int *, struct pfi_kif *,
244 struct pf_state **, int, u_int16_t, u_int16_t,
246 static int pf_test_fragment(struct pf_rule **, int,
247 struct pfi_kif *, struct mbuf *, void *,
248 struct pf_pdesc *, struct pf_rule **,
249 struct pf_ruleset **);
250 static int pf_tcp_track_full(struct pf_state_peer *,
251 struct pf_state_peer *, struct pf_state **,
252 struct pfi_kif *, struct mbuf *, int,
253 struct pf_pdesc *, u_short *, int *);
254 static int pf_tcp_track_sloppy(struct pf_state_peer *,
255 struct pf_state_peer *, struct pf_state **,
256 struct pf_pdesc *, u_short *);
257 static int pf_test_state_tcp(struct pf_state **, int,
258 struct pfi_kif *, struct mbuf *, int,
259 void *, struct pf_pdesc *, u_short *);
260 static int pf_test_state_udp(struct pf_state **, int,
261 struct pfi_kif *, struct mbuf *, int,
262 void *, struct pf_pdesc *);
263 static int pf_test_state_icmp(struct pf_state **, int,
264 struct pfi_kif *, struct mbuf *, int,
265 void *, struct pf_pdesc *, u_short *);
266 static int pf_test_state_other(struct pf_state **, int,
267 struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
268 static u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
270 static u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
272 static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
274 static int pf_check_proto_cksum(struct mbuf *, int, int,
275 u_int8_t, sa_family_t);
276 static void pf_print_state_parts(struct pf_state *,
277 struct pf_state_key *, struct pf_state_key *);
278 static int pf_addr_wrap_neq(struct pf_addr_wrap *,
279 struct pf_addr_wrap *);
280 static struct pf_state *pf_find_state(struct pfi_kif *,
281 struct pf_state_key_cmp *, u_int);
282 static int pf_src_connlimit(struct pf_state **);
283 static void pf_overload_task(void *v, int pending);
284 static int pf_insert_src_node(struct pf_src_node **,
285 struct pf_rule *, struct pf_addr *, sa_family_t);
286 static u_int pf_purge_expired_states(u_int, int);
287 static void pf_purge_unlinked_rules(void);
288 static int pf_mtag_uminit(void *, int, int);
289 static void pf_mtag_free(struct m_tag *);
291 static void pf_route(struct mbuf **, struct pf_rule *, int,
292 struct ifnet *, struct pf_state *,
296 static void pf_change_a6(struct pf_addr *, u_int16_t *,
297 struct pf_addr *, u_int8_t);
298 static void pf_route6(struct mbuf **, struct pf_rule *, int,
299 struct ifnet *, struct pf_state *,
303 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
305 extern int pf_end_threads;
307 VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
309 #define PACKET_LOOPED(pd) ((pd)->pf_mtag && \
310 (pd)->pf_mtag->flags & PF_PACKET_LOOPED)
312 #define STATE_LOOKUP(i, k, d, s, pd) \
314 (s) = pf_find_state((i), (k), (d)); \
317 if (PACKET_LOOPED(pd)) \
319 if ((d) == PF_OUT && \
320 (((s)->rule.ptr->rt == PF_ROUTETO && \
321 (s)->rule.ptr->direction == PF_OUT) || \
322 ((s)->rule.ptr->rt == PF_REPLYTO && \
323 (s)->rule.ptr->direction == PF_IN)) && \
324 (s)->rt_kif != NULL && \
325 (s)->rt_kif != (i)) \
329 #define BOUND_IFACE(r, k) \
330 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
332 #define STATE_INC_COUNTERS(s) \
334 counter_u64_add(s->rule.ptr->states_cur, 1); \
335 counter_u64_add(s->rule.ptr->states_tot, 1); \
336 if (s->anchor.ptr != NULL) { \
337 counter_u64_add(s->anchor.ptr->states_cur, 1); \
338 counter_u64_add(s->anchor.ptr->states_tot, 1); \
340 if (s->nat_rule.ptr != NULL) { \
341 counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
342 counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
346 #define STATE_DEC_COUNTERS(s) \
348 if (s->nat_rule.ptr != NULL) \
349 counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
350 if (s->anchor.ptr != NULL) \
351 counter_u64_add(s->anchor.ptr->states_cur, -1); \
352 counter_u64_add(s->rule.ptr->states_cur, -1); \
355 static MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
356 VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
357 VNET_DEFINE(struct pf_idhash *, pf_idhash);
358 VNET_DEFINE(struct pf_srchash *, pf_srchash);
360 SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW, 0, "pf(4)");
363 u_long pf_srchashmask;
364 static u_long pf_hashsize;
365 static u_long pf_srchashsize;
367 SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
368 &pf_hashsize, 0, "Size of pf(4) states hashtable");
369 SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
370 &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable");
372 VNET_DEFINE(void *, pf_swi_cookie);
374 VNET_DEFINE(uint32_t, pf_hashseed);
375 #define V_pf_hashseed VNET(pf_hashseed)
378 pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
384 if (a->addr32[0] > b->addr32[0])
386 if (a->addr32[0] < b->addr32[0])
392 if (a->addr32[3] > b->addr32[3])
394 if (a->addr32[3] < b->addr32[3])
396 if (a->addr32[2] > b->addr32[2])
398 if (a->addr32[2] < b->addr32[2])
400 if (a->addr32[1] > b->addr32[1])
402 if (a->addr32[1] < b->addr32[1])
404 if (a->addr32[0] > b->addr32[0])
406 if (a->addr32[0] < b->addr32[0])
411 panic("%s: unknown address family %u", __func__, af);
416 static __inline uint32_t
417 pf_hashkey(struct pf_state_key *sk)
421 h = murmur3_32_hash32((uint32_t *)sk,
422 sizeof(struct pf_state_key_cmp)/sizeof(uint32_t),
425 return (h & pf_hashmask);
428 static __inline uint32_t
429 pf_hashsrc(struct pf_addr *addr, sa_family_t af)
435 h = murmur3_32_hash32((uint32_t *)&addr->v4,
436 sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed);
439 h = murmur3_32_hash32((uint32_t *)&addr->v6,
440 sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed);
443 panic("%s: unknown address family %u", __func__, af);
446 return (h & pf_srchashmask);
451 pf_state_hash(struct pf_state *s)
453 u_int32_t hv = (intptr_t)s / sizeof(*s);
455 hv ^= crc32(&s->src, sizeof(s->src));
456 hv ^= crc32(&s->dst, sizeof(s->dst));
465 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
470 dst->addr32[0] = src->addr32[0];
474 dst->addr32[0] = src->addr32[0];
475 dst->addr32[1] = src->addr32[1];
476 dst->addr32[2] = src->addr32[2];
477 dst->addr32[3] = src->addr32[3];
484 pf_init_threshold(struct pf_threshold *threshold,
485 u_int32_t limit, u_int32_t seconds)
487 threshold->limit = limit * PF_THRESHOLD_MULT;
488 threshold->seconds = seconds;
489 threshold->count = 0;
490 threshold->last = time_uptime;
494 pf_add_threshold(struct pf_threshold *threshold)
496 u_int32_t t = time_uptime, diff = t - threshold->last;
498 if (diff >= threshold->seconds)
499 threshold->count = 0;
501 threshold->count -= threshold->count * diff /
503 threshold->count += PF_THRESHOLD_MULT;
508 pf_check_threshold(struct pf_threshold *threshold)
510 return (threshold->count > threshold->limit);
514 pf_src_connlimit(struct pf_state **state)
516 struct pf_overload_entry *pfoe;
519 PF_STATE_LOCK_ASSERT(*state);
521 (*state)->src_node->conn++;
522 (*state)->src.tcp_est = 1;
523 pf_add_threshold(&(*state)->src_node->conn_rate);
525 if ((*state)->rule.ptr->max_src_conn &&
526 (*state)->rule.ptr->max_src_conn <
527 (*state)->src_node->conn) {
528 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1);
532 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
533 pf_check_threshold(&(*state)->src_node->conn_rate)) {
534 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1);
541 /* Kill this state. */
542 (*state)->timeout = PFTM_PURGE;
543 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
545 if ((*state)->rule.ptr->overload_tbl == NULL)
548 /* Schedule overloading and flushing task. */
549 pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
551 return (1); /* too bad :( */
553 bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
554 pfoe->af = (*state)->key[PF_SK_WIRE]->af;
555 pfoe->rule = (*state)->rule.ptr;
556 pfoe->dir = (*state)->direction;
558 SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
559 PF_OVERLOADQ_UNLOCK();
560 taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
566 pf_overload_task(void *v, int pending)
568 struct pf_overload_head queue;
570 struct pf_overload_entry *pfoe, *pfoe1;
573 CURVNET_SET((struct vnet *)v);
576 queue = V_pf_overloadqueue;
577 SLIST_INIT(&V_pf_overloadqueue);
578 PF_OVERLOADQ_UNLOCK();
580 bzero(&p, sizeof(p));
581 SLIST_FOREACH(pfoe, &queue, next) {
582 counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1);
583 if (V_pf_status.debug >= PF_DEBUG_MISC) {
584 printf("%s: blocking address ", __func__);
585 pf_print_host(&pfoe->addr, 0, pfoe->af);
589 p.pfra_af = pfoe->af;
594 p.pfra_ip4addr = pfoe->addr.v4;
600 p.pfra_ip6addr = pfoe->addr.v6;
606 pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
611 * Remove those entries, that don't need flushing.
613 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
614 if (pfoe->rule->flush == 0) {
615 SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
616 free(pfoe, M_PFTEMP);
619 V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1);
621 /* If nothing to flush, return. */
622 if (SLIST_EMPTY(&queue)) {
627 for (int i = 0; i <= pf_hashmask; i++) {
628 struct pf_idhash *ih = &V_pf_idhash[i];
629 struct pf_state_key *sk;
633 LIST_FOREACH(s, &ih->states, entry) {
634 sk = s->key[PF_SK_WIRE];
635 SLIST_FOREACH(pfoe, &queue, next)
636 if (sk->af == pfoe->af &&
637 ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
638 pfoe->rule == s->rule.ptr) &&
639 ((pfoe->dir == PF_OUT &&
640 PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
641 (pfoe->dir == PF_IN &&
642 PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
643 s->timeout = PFTM_PURGE;
644 s->src.state = s->dst.state = TCPS_CLOSED;
648 PF_HASHROW_UNLOCK(ih);
650 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
651 free(pfoe, M_PFTEMP);
652 if (V_pf_status.debug >= PF_DEBUG_MISC)
653 printf("%s: %u states killed", __func__, killed);
659 * Can return locked on failure, so that we can consistently
660 * allocate and insert a new one.
663 pf_find_src_node(struct pf_addr *src, struct pf_rule *rule, sa_family_t af,
666 struct pf_srchash *sh;
667 struct pf_src_node *n;
669 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1);
671 sh = &V_pf_srchash[pf_hashsrc(src, af)];
673 LIST_FOREACH(n, &sh->nodes, entry)
674 if (n->rule.ptr == rule && n->af == af &&
675 ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
676 (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
680 PF_HASHROW_UNLOCK(sh);
681 } else if (returnlocked == 0)
682 PF_HASHROW_UNLOCK(sh);
688 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
689 struct pf_addr *src, sa_family_t af)
692 KASSERT((rule->rule_flag & PFRULE_RULESRCTRACK ||
693 rule->rpool.opts & PF_POOL_STICKYADDR),
694 ("%s for non-tracking rule %p", __func__, rule));
697 *sn = pf_find_src_node(src, rule, af, 1);
700 struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)];
702 PF_HASHROW_ASSERT(sh);
704 if (!rule->max_src_nodes ||
705 counter_u64_fetch(rule->src_nodes) < rule->max_src_nodes)
706 (*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
708 counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES],
711 PF_HASHROW_UNLOCK(sh);
715 pf_init_threshold(&(*sn)->conn_rate,
716 rule->max_src_conn_rate.limit,
717 rule->max_src_conn_rate.seconds);
720 (*sn)->rule.ptr = rule;
721 PF_ACPY(&(*sn)->addr, src, af);
722 LIST_INSERT_HEAD(&sh->nodes, *sn, entry);
723 (*sn)->creation = time_uptime;
724 (*sn)->ruletype = rule->action;
726 if ((*sn)->rule.ptr != NULL)
727 counter_u64_add((*sn)->rule.ptr->src_nodes, 1);
728 PF_HASHROW_UNLOCK(sh);
729 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1);
731 if (rule->max_src_states &&
732 (*sn)->states >= rule->max_src_states) {
733 counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES],
742 pf_unlink_src_node(struct pf_src_node *src)
745 PF_HASHROW_ASSERT(&V_pf_srchash[pf_hashsrc(&src->addr, src->af)]);
746 LIST_REMOVE(src, entry);
748 counter_u64_add(src->rule.ptr->src_nodes, -1);
752 pf_free_src_nodes(struct pf_src_node_list *head)
754 struct pf_src_node *sn, *tmp;
757 LIST_FOREACH_SAFE(sn, head, entry, tmp) {
758 uma_zfree(V_pf_sources_z, sn);
762 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], count);
771 pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
772 sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL,
776 /* Per-vnet data storage structures initialization. */
780 struct pf_keyhash *kh;
781 struct pf_idhash *ih;
782 struct pf_srchash *sh;
785 if (pf_hashsize == 0 || !powerof2(pf_hashsize))
786 pf_hashsize = PF_HASHSIZ;
787 if (pf_srchashsize == 0 || !powerof2(pf_srchashsize))
788 pf_srchashsize = PF_SRCHASHSIZ;
790 V_pf_hashseed = arc4random();
792 /* States and state keys storage. */
793 V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_state),
794 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
795 V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
796 uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
797 uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
799 V_pf_state_key_z = uma_zcreate("pf state keys",
800 sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
803 V_pf_keyhash = mallocarray(pf_hashsize, sizeof(struct pf_keyhash),
804 M_PFHASH, M_NOWAIT | M_ZERO);
805 V_pf_idhash = mallocarray(pf_hashsize, sizeof(struct pf_idhash),
806 M_PFHASH, M_NOWAIT | M_ZERO);
807 if (V_pf_keyhash == NULL || V_pf_idhash == NULL) {
808 printf("pf: Unable to allocate memory for "
809 "state_hashsize %lu.\n", pf_hashsize);
811 free(V_pf_keyhash, M_PFHASH);
812 free(V_pf_idhash, M_PFHASH);
814 pf_hashsize = PF_HASHSIZ;
815 V_pf_keyhash = mallocarray(pf_hashsize,
816 sizeof(struct pf_keyhash), M_PFHASH, M_WAITOK | M_ZERO);
817 V_pf_idhash = mallocarray(pf_hashsize,
818 sizeof(struct pf_idhash), M_PFHASH, M_WAITOK | M_ZERO);
821 pf_hashmask = pf_hashsize - 1;
822 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
824 mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
825 mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
829 V_pf_sources_z = uma_zcreate("pf source nodes",
830 sizeof(struct pf_src_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
832 V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
833 uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
834 uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
836 V_pf_srchash = mallocarray(pf_srchashsize,
837 sizeof(struct pf_srchash), M_PFHASH, M_NOWAIT | M_ZERO);
838 if (V_pf_srchash == NULL) {
839 printf("pf: Unable to allocate memory for "
840 "source_hashsize %lu.\n", pf_srchashsize);
842 pf_srchashsize = PF_SRCHASHSIZ;
843 V_pf_srchash = mallocarray(pf_srchashsize,
844 sizeof(struct pf_srchash), M_PFHASH, M_WAITOK | M_ZERO);
847 pf_srchashmask = pf_srchashsize - 1;
848 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++)
849 mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
852 TAILQ_INIT(&V_pf_altqs[0]);
853 TAILQ_INIT(&V_pf_altqs[1]);
854 TAILQ_INIT(&V_pf_pabuf);
855 V_pf_altqs_active = &V_pf_altqs[0];
856 V_pf_altqs_inactive = &V_pf_altqs[1];
858 /* Send & overload+flush queues. */
859 STAILQ_INIT(&V_pf_sendqueue);
860 SLIST_INIT(&V_pf_overloadqueue);
861 TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
863 /* Unlinked, but may be referenced rules. */
864 TAILQ_INIT(&V_pf_unlinked_rules);
871 uma_zdestroy(pf_mtag_z);
877 struct pf_keyhash *kh;
878 struct pf_idhash *ih;
879 struct pf_srchash *sh;
880 struct pf_send_entry *pfse, *next;
883 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
885 KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
887 KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
889 mtx_destroy(&kh->lock);
890 mtx_destroy(&ih->lock);
892 free(V_pf_keyhash, M_PFHASH);
893 free(V_pf_idhash, M_PFHASH);
895 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
896 KASSERT(LIST_EMPTY(&sh->nodes),
897 ("%s: source node hash not empty", __func__));
898 mtx_destroy(&sh->lock);
900 free(V_pf_srchash, M_PFHASH);
902 STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
903 m_freem(pfse->pfse_m);
904 free(pfse, M_PFTEMP);
907 uma_zdestroy(V_pf_sources_z);
908 uma_zdestroy(V_pf_state_z);
909 uma_zdestroy(V_pf_state_key_z);
913 pf_mtag_uminit(void *mem, int size, int how)
917 t = (struct m_tag *)mem;
918 t->m_tag_cookie = MTAG_ABI_COMPAT;
919 t->m_tag_id = PACKET_TAG_PF;
920 t->m_tag_len = sizeof(struct pf_mtag);
921 t->m_tag_free = pf_mtag_free;
927 pf_mtag_free(struct m_tag *t)
930 uma_zfree(pf_mtag_z, t);
934 pf_get_mtag(struct mbuf *m)
938 if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
939 return ((struct pf_mtag *)(mtag + 1));
941 mtag = uma_zalloc(pf_mtag_z, M_NOWAIT);
944 bzero(mtag + 1, sizeof(struct pf_mtag));
945 m_tag_prepend(m, mtag);
947 return ((struct pf_mtag *)(mtag + 1));
951 pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
954 struct pf_keyhash *khs, *khw, *kh;
955 struct pf_state_key *sk, *cur;
956 struct pf_state *si, *olds = NULL;
959 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
960 KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
961 KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
964 * We need to lock hash slots of both keys. To avoid deadlock
965 * we always lock the slot with lower address first. Unlock order
968 * We also need to lock ID hash slot before dropping key
969 * locks. On success we return with ID hash slot locked.
973 khs = khw = &V_pf_keyhash[pf_hashkey(skw)];
974 PF_HASHROW_LOCK(khs);
976 khs = &V_pf_keyhash[pf_hashkey(sks)];
977 khw = &V_pf_keyhash[pf_hashkey(skw)];
979 PF_HASHROW_LOCK(khs);
980 } else if (khs < khw) {
981 PF_HASHROW_LOCK(khs);
982 PF_HASHROW_LOCK(khw);
984 PF_HASHROW_LOCK(khw);
985 PF_HASHROW_LOCK(khs);
989 #define KEYS_UNLOCK() do { \
991 PF_HASHROW_UNLOCK(khs); \
992 PF_HASHROW_UNLOCK(khw); \
994 PF_HASHROW_UNLOCK(khs); \
998 * First run: start with wire key.
1005 LIST_FOREACH(cur, &kh->keys, entry)
1006 if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
1010 /* Key exists. Check for same kif, if none, add to key. */
1011 TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
1012 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
1014 PF_HASHROW_LOCK(ih);
1015 if (si->kif == s->kif &&
1016 si->direction == s->direction) {
1017 if (sk->proto == IPPROTO_TCP &&
1018 si->src.state >= TCPS_FIN_WAIT_2 &&
1019 si->dst.state >= TCPS_FIN_WAIT_2) {
1021 * New state matches an old >FIN_WAIT_2
1022 * state. We can't drop key hash locks,
1023 * thus we can't unlink it properly.
1025 * As a workaround we drop it into
1026 * TCPS_CLOSED state, schedule purge
1027 * ASAP and push it into the very end
1028 * of the slot TAILQ, so that it won't
1029 * conflict with our new state.
1031 si->src.state = si->dst.state =
1033 si->timeout = PFTM_PURGE;
1036 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1037 printf("pf: %s key attach "
1039 (idx == PF_SK_WIRE) ?
1042 pf_print_state_parts(s,
1043 (idx == PF_SK_WIRE) ?
1045 (idx == PF_SK_STACK) ?
1047 printf(", existing: ");
1048 pf_print_state_parts(si,
1049 (idx == PF_SK_WIRE) ?
1051 (idx == PF_SK_STACK) ?
1055 PF_HASHROW_UNLOCK(ih);
1057 uma_zfree(V_pf_state_key_z, sk);
1058 if (idx == PF_SK_STACK)
1060 return (EEXIST); /* collision! */
1063 PF_HASHROW_UNLOCK(ih);
1065 uma_zfree(V_pf_state_key_z, sk);
1068 LIST_INSERT_HEAD(&kh->keys, sk, entry);
1073 /* List is sorted, if-bound states before floating. */
1074 if (s->kif == V_pfi_all)
1075 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
1077 TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
1080 TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]);
1081 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds,
1087 * Attach done. See how should we (or should not?)
1088 * attach a second key.
1091 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1095 } else if (sks != NULL) {
1097 * Continue attaching with stack key.
1109 KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
1110 ("%s failure", __func__));
1117 pf_detach_state(struct pf_state *s)
1119 struct pf_state_key *sks = s->key[PF_SK_STACK];
1120 struct pf_keyhash *kh;
1123 kh = &V_pf_keyhash[pf_hashkey(sks)];
1124 PF_HASHROW_LOCK(kh);
1125 if (s->key[PF_SK_STACK] != NULL)
1126 pf_state_key_detach(s, PF_SK_STACK);
1128 * If both point to same key, then we are done.
1130 if (sks == s->key[PF_SK_WIRE]) {
1131 pf_state_key_detach(s, PF_SK_WIRE);
1132 PF_HASHROW_UNLOCK(kh);
1135 PF_HASHROW_UNLOCK(kh);
1138 if (s->key[PF_SK_WIRE] != NULL) {
1139 kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
1140 PF_HASHROW_LOCK(kh);
1141 if (s->key[PF_SK_WIRE] != NULL)
1142 pf_state_key_detach(s, PF_SK_WIRE);
1143 PF_HASHROW_UNLOCK(kh);
1148 pf_state_key_detach(struct pf_state *s, int idx)
1150 struct pf_state_key *sk = s->key[idx];
1152 struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1154 PF_HASHROW_ASSERT(kh);
1156 TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1159 if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1160 LIST_REMOVE(sk, entry);
1161 uma_zfree(V_pf_state_key_z, sk);
1166 pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1168 struct pf_state_key *sk = mem;
1170 bzero(sk, sizeof(struct pf_state_key_cmp));
1171 TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1172 TAILQ_INIT(&sk->states[PF_SK_STACK]);
1177 struct pf_state_key *
1178 pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
1179 struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
1181 struct pf_state_key *sk;
1183 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1187 PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
1188 PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
1189 sk->port[pd->sidx] = sport;
1190 sk->port[pd->didx] = dport;
1191 sk->proto = pd->proto;
1197 struct pf_state_key *
1198 pf_state_key_clone(struct pf_state_key *orig)
1200 struct pf_state_key *sk;
1202 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1206 bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1212 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw,
1213 struct pf_state_key *sks, struct pf_state *s)
1215 struct pf_idhash *ih;
1216 struct pf_state *cur;
1219 KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1220 ("%s: sks not pristine", __func__));
1221 KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1222 ("%s: skw not pristine", __func__));
1223 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1227 if (s->id == 0 && s->creatorid == 0) {
1228 /* XXX: should be atomic, but probability of collision low */
1229 if ((s->id = V_pf_stateid[curcpu]++) == PFID_MAXID)
1230 V_pf_stateid[curcpu] = 1;
1231 s->id |= (uint64_t )curcpu << PFID_CPUSHIFT;
1232 s->id = htobe64(s->id);
1233 s->creatorid = V_pf_status.hostid;
1236 /* Returns with ID locked on success. */
1237 if ((error = pf_state_key_attach(skw, sks, s)) != 0)
1240 ih = &V_pf_idhash[PF_IDHASH(s)];
1241 PF_HASHROW_ASSERT(ih);
1242 LIST_FOREACH(cur, &ih->states, entry)
1243 if (cur->id == s->id && cur->creatorid == s->creatorid)
1247 PF_HASHROW_UNLOCK(ih);
1248 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1249 printf("pf: state ID collision: "
1250 "id: %016llx creatorid: %08x\n",
1251 (unsigned long long)be64toh(s->id),
1252 ntohl(s->creatorid));
1257 LIST_INSERT_HEAD(&ih->states, s, entry);
1258 /* One for keys, one for ID hash. */
1259 refcount_init(&s->refs, 2);
1261 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_INSERT], 1);
1262 if (pfsync_insert_state_ptr != NULL)
1263 pfsync_insert_state_ptr(s);
1265 /* Returns locked. */
1270 * Find state by ID: returns with locked row on success.
1273 pf_find_state_byid(uint64_t id, uint32_t creatorid)
1275 struct pf_idhash *ih;
1278 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1280 ih = &V_pf_idhash[(be64toh(id) % (pf_hashmask + 1))];
1282 PF_HASHROW_LOCK(ih);
1283 LIST_FOREACH(s, &ih->states, entry)
1284 if (s->id == id && s->creatorid == creatorid)
1288 PF_HASHROW_UNLOCK(ih);
1294 * Find state by key.
1295 * Returns with ID hash slot locked on success.
1297 static struct pf_state *
1298 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir)
1300 struct pf_keyhash *kh;
1301 struct pf_state_key *sk;
1305 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1307 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1309 PF_HASHROW_LOCK(kh);
1310 LIST_FOREACH(sk, &kh->keys, entry)
1311 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1314 PF_HASHROW_UNLOCK(kh);
1318 idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1320 /* List is sorted, if-bound states before floating ones. */
1321 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1322 if (s->kif == V_pfi_all || s->kif == kif) {
1324 PF_HASHROW_UNLOCK(kh);
1325 if (s->timeout >= PFTM_MAX) {
1327 * State is either being processed by
1328 * pf_unlink_state() in an other thread, or
1329 * is scheduled for immediate expiry.
1336 PF_HASHROW_UNLOCK(kh);
1342 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1344 struct pf_keyhash *kh;
1345 struct pf_state_key *sk;
1346 struct pf_state *s, *ret = NULL;
1349 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1351 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1353 PF_HASHROW_LOCK(kh);
1354 LIST_FOREACH(sk, &kh->keys, entry)
1355 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1358 PF_HASHROW_UNLOCK(kh);
1373 panic("%s: dir %u", __func__, dir);
1376 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1378 PF_HASHROW_UNLOCK(kh);
1392 PF_HASHROW_UNLOCK(kh);
1397 /* END state table stuff */
1400 pf_send(struct pf_send_entry *pfse)
1404 STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
1406 swi_sched(V_pf_swi_cookie, 0);
1412 struct pf_send_head queue;
1413 struct pf_send_entry *pfse, *next;
1415 CURVNET_SET((struct vnet *)v);
1418 queue = V_pf_sendqueue;
1419 STAILQ_INIT(&V_pf_sendqueue);
1422 STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
1423 switch (pfse->pfse_type) {
1426 ip_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL);
1429 icmp_error(pfse->pfse_m, pfse->icmpopts.type,
1430 pfse->icmpopts.code, 0, pfse->icmpopts.mtu);
1435 ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL,
1439 icmp6_error(pfse->pfse_m, pfse->icmpopts.type,
1440 pfse->icmpopts.code, pfse->icmpopts.mtu);
1444 panic("%s: unknown type", __func__);
1446 free(pfse, M_PFTEMP);
1452 pf_purge_thread(void *unused __unused)
1454 VNET_ITERATOR_DECL(vnet_iter);
1459 rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftm", hz / 10);
1463 VNET_FOREACH(vnet_iter) {
1464 CURVNET_SET(vnet_iter);
1466 if (pf_end_threads) {
1468 wakeup(pf_purge_thread);
1472 /* Wait while V_pf_default_rule.timeout is initialized. */
1473 if (V_pf_vnet_active == 0) {
1478 /* Process 1/interval fraction of the state table every run. */
1479 idx = pf_purge_expired_states(idx, pf_hashmask /
1480 (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
1482 /* Purge other expired types every PFTM_INTERVAL seconds. */
1485 * Order is important:
1486 * - states and src nodes reference rules
1487 * - states and rules reference kifs
1489 pf_purge_expired_fragments();
1490 pf_purge_expired_src_nodes();
1491 pf_purge_unlinked_rules();
1496 VNET_LIST_RUNLOCK();
1502 pf_unload_vnet_purge(void)
1506 * To cleanse up all kifs and rules we need
1507 * two runs: first one clears reference flags,
1508 * then pf_purge_expired_states() doesn't
1509 * raise them, and then second run frees.
1511 pf_purge_unlinked_rules();
1515 * Now purge everything.
1517 pf_purge_expired_states(0, pf_hashmask);
1518 pf_purge_expired_fragments();
1519 pf_purge_expired_src_nodes();
1522 * Now all kifs & rules should be unreferenced,
1523 * thus should be successfully freed.
1525 pf_purge_unlinked_rules();
1531 pf_state_expires(const struct pf_state *state)
1538 /* handle all PFTM_* > PFTM_MAX here */
1539 if (state->timeout == PFTM_PURGE)
1540 return (time_uptime);
1541 KASSERT(state->timeout != PFTM_UNLINKED,
1542 ("pf_state_expires: timeout == PFTM_UNLINKED"));
1543 KASSERT((state->timeout < PFTM_MAX),
1544 ("pf_state_expires: timeout > PFTM_MAX"));
1545 timeout = state->rule.ptr->timeout[state->timeout];
1547 timeout = V_pf_default_rule.timeout[state->timeout];
1548 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1550 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1551 states = counter_u64_fetch(state->rule.ptr->states_cur);
1553 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1554 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1555 states = V_pf_status.states;
1557 if (end && states > start && start < end) {
1559 return (state->expire + timeout * (end - states) /
1562 return (time_uptime);
1564 return (state->expire + timeout);
1568 pf_purge_expired_src_nodes()
1570 struct pf_src_node_list freelist;
1571 struct pf_srchash *sh;
1572 struct pf_src_node *cur, *next;
1575 LIST_INIT(&freelist);
1576 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
1577 PF_HASHROW_LOCK(sh);
1578 LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
1579 if (cur->states == 0 && cur->expire <= time_uptime) {
1580 pf_unlink_src_node(cur);
1581 LIST_INSERT_HEAD(&freelist, cur, entry);
1582 } else if (cur->rule.ptr != NULL)
1583 cur->rule.ptr->rule_flag |= PFRULE_REFS;
1584 PF_HASHROW_UNLOCK(sh);
1587 pf_free_src_nodes(&freelist);
1589 V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z);
1593 pf_src_tree_remove_state(struct pf_state *s)
1595 struct pf_src_node *sn;
1596 struct pf_srchash *sh;
1599 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE] ?
1600 s->rule.ptr->timeout[PFTM_SRC_NODE] :
1601 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1603 if (s->src_node != NULL) {
1605 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1606 PF_HASHROW_LOCK(sh);
1609 if (--sn->states == 0)
1610 sn->expire = time_uptime + timeout;
1611 PF_HASHROW_UNLOCK(sh);
1613 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1614 sn = s->nat_src_node;
1615 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1616 PF_HASHROW_LOCK(sh);
1617 if (--sn->states == 0)
1618 sn->expire = time_uptime + timeout;
1619 PF_HASHROW_UNLOCK(sh);
1621 s->src_node = s->nat_src_node = NULL;
1625 * Unlink and potentilly free a state. Function may be
1626 * called with ID hash row locked, but always returns
1627 * unlocked, since it needs to go through key hash locking.
1630 pf_unlink_state(struct pf_state *s, u_int flags)
1632 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
1634 if ((flags & PF_ENTER_LOCKED) == 0)
1635 PF_HASHROW_LOCK(ih);
1637 PF_HASHROW_ASSERT(ih);
1639 if (s->timeout == PFTM_UNLINKED) {
1641 * State is being processed
1642 * by pf_unlink_state() in
1645 PF_HASHROW_UNLOCK(ih);
1646 return (0); /* XXXGL: undefined actually */
1649 if (s->src.state == PF_TCPS_PROXY_DST) {
1650 /* XXX wire key the right one? */
1651 pf_send_tcp(NULL, s->rule.ptr, s->key[PF_SK_WIRE]->af,
1652 &s->key[PF_SK_WIRE]->addr[1],
1653 &s->key[PF_SK_WIRE]->addr[0],
1654 s->key[PF_SK_WIRE]->port[1],
1655 s->key[PF_SK_WIRE]->port[0],
1656 s->src.seqhi, s->src.seqlo + 1,
1657 TH_RST|TH_ACK, 0, 0, 0, 1, s->tag, NULL);
1660 LIST_REMOVE(s, entry);
1661 pf_src_tree_remove_state(s);
1663 if (pfsync_delete_state_ptr != NULL)
1664 pfsync_delete_state_ptr(s);
1666 STATE_DEC_COUNTERS(s);
1668 s->timeout = PFTM_UNLINKED;
1670 PF_HASHROW_UNLOCK(ih);
1673 refcount_release(&s->refs);
1675 return (pf_release_state(s));
1679 pf_free_state(struct pf_state *cur)
1682 KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
1683 KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
1686 pf_normalize_tcp_cleanup(cur);
1687 uma_zfree(V_pf_state_z, cur);
1688 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
1692 * Called only from pf_purge_thread(), thus serialized.
1695 pf_purge_expired_states(u_int i, int maxcheck)
1697 struct pf_idhash *ih;
1700 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1703 * Go through hash and unlink states that expire now.
1705 while (maxcheck > 0) {
1707 ih = &V_pf_idhash[i];
1709 PF_HASHROW_LOCK(ih);
1710 LIST_FOREACH(s, &ih->states, entry) {
1711 if (pf_state_expires(s) <= time_uptime) {
1712 V_pf_status.states -=
1713 pf_unlink_state(s, PF_ENTER_LOCKED);
1716 s->rule.ptr->rule_flag |= PFRULE_REFS;
1717 if (s->nat_rule.ptr != NULL)
1718 s->nat_rule.ptr->rule_flag |= PFRULE_REFS;
1719 if (s->anchor.ptr != NULL)
1720 s->anchor.ptr->rule_flag |= PFRULE_REFS;
1721 s->kif->pfik_flags |= PFI_IFLAG_REFS;
1723 s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
1725 PF_HASHROW_UNLOCK(ih);
1727 /* Return when we hit end of hash. */
1728 if (++i > pf_hashmask) {
1729 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1736 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1742 pf_purge_unlinked_rules()
1744 struct pf_rulequeue tmpq;
1745 struct pf_rule *r, *r1;
1748 * If we have overloading task pending, then we'd
1749 * better skip purging this time. There is a tiny
1750 * probability that overloading task references
1751 * an already unlinked rule.
1753 PF_OVERLOADQ_LOCK();
1754 if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
1755 PF_OVERLOADQ_UNLOCK();
1758 PF_OVERLOADQ_UNLOCK();
1761 * Do naive mark-and-sweep garbage collecting of old rules.
1762 * Reference flag is raised by pf_purge_expired_states()
1763 * and pf_purge_expired_src_nodes().
1765 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
1766 * use a temporary queue.
1769 PF_UNLNKDRULES_LOCK();
1770 TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
1771 if (!(r->rule_flag & PFRULE_REFS)) {
1772 TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
1773 TAILQ_INSERT_TAIL(&tmpq, r, entries);
1775 r->rule_flag &= ~PFRULE_REFS;
1777 PF_UNLNKDRULES_UNLOCK();
1779 if (!TAILQ_EMPTY(&tmpq)) {
1781 TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
1782 TAILQ_REMOVE(&tmpq, r, entries);
1790 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1795 u_int32_t a = ntohl(addr->addr32[0]);
1796 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1808 u_int8_t i, curstart, curend, maxstart, maxend;
1809 curstart = curend = maxstart = maxend = 255;
1810 for (i = 0; i < 8; i++) {
1811 if (!addr->addr16[i]) {
1812 if (curstart == 255)
1816 if ((curend - curstart) >
1817 (maxend - maxstart)) {
1818 maxstart = curstart;
1821 curstart = curend = 255;
1824 if ((curend - curstart) >
1825 (maxend - maxstart)) {
1826 maxstart = curstart;
1829 for (i = 0; i < 8; i++) {
1830 if (i >= maxstart && i <= maxend) {
1836 b = ntohs(addr->addr16[i]);
1853 pf_print_state(struct pf_state *s)
1855 pf_print_state_parts(s, NULL, NULL);
1859 pf_print_state_parts(struct pf_state *s,
1860 struct pf_state_key *skwp, struct pf_state_key *sksp)
1862 struct pf_state_key *skw, *sks;
1863 u_int8_t proto, dir;
1865 /* Do our best to fill these, but they're skipped if NULL */
1866 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1867 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1868 proto = skw ? skw->proto : (sks ? sks->proto : 0);
1869 dir = s ? s->direction : 0;
1887 case IPPROTO_ICMPV6:
1891 printf("%u", proto);
1904 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1906 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1911 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1913 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1918 if (proto == IPPROTO_TCP) {
1919 printf(" [lo=%u high=%u win=%u modulator=%u",
1920 s->src.seqlo, s->src.seqhi,
1921 s->src.max_win, s->src.seqdiff);
1922 if (s->src.wscale && s->dst.wscale)
1923 printf(" wscale=%u",
1924 s->src.wscale & PF_WSCALE_MASK);
1926 printf(" [lo=%u high=%u win=%u modulator=%u",
1927 s->dst.seqlo, s->dst.seqhi,
1928 s->dst.max_win, s->dst.seqdiff);
1929 if (s->src.wscale && s->dst.wscale)
1930 printf(" wscale=%u",
1931 s->dst.wscale & PF_WSCALE_MASK);
1934 printf(" %u:%u", s->src.state, s->dst.state);
1939 pf_print_flags(u_int8_t f)
1961 #define PF_SET_SKIP_STEPS(i) \
1963 while (head[i] != cur) { \
1964 head[i]->skip[i].ptr = cur; \
1965 head[i] = TAILQ_NEXT(head[i], entries); \
1970 pf_calc_skip_steps(struct pf_rulequeue *rules)
1972 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1975 cur = TAILQ_FIRST(rules);
1977 for (i = 0; i < PF_SKIP_COUNT; ++i)
1979 while (cur != NULL) {
1981 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1982 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1983 if (cur->direction != prev->direction)
1984 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1985 if (cur->af != prev->af)
1986 PF_SET_SKIP_STEPS(PF_SKIP_AF);
1987 if (cur->proto != prev->proto)
1988 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1989 if (cur->src.neg != prev->src.neg ||
1990 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1991 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1992 if (cur->src.port[0] != prev->src.port[0] ||
1993 cur->src.port[1] != prev->src.port[1] ||
1994 cur->src.port_op != prev->src.port_op)
1995 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1996 if (cur->dst.neg != prev->dst.neg ||
1997 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1998 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1999 if (cur->dst.port[0] != prev->dst.port[0] ||
2000 cur->dst.port[1] != prev->dst.port[1] ||
2001 cur->dst.port_op != prev->dst.port_op)
2002 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2005 cur = TAILQ_NEXT(cur, entries);
2007 for (i = 0; i < PF_SKIP_COUNT; ++i)
2008 PF_SET_SKIP_STEPS(i);
2012 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
2014 if (aw1->type != aw2->type)
2016 switch (aw1->type) {
2017 case PF_ADDR_ADDRMASK:
2019 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6))
2021 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6))
2024 case PF_ADDR_DYNIFTL:
2025 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
2026 case PF_ADDR_NOROUTE:
2027 case PF_ADDR_URPFFAILED:
2030 return (aw1->p.tbl != aw2->p.tbl);
2032 printf("invalid address type: %d\n", aw1->type);
2038 * Checksum updates are a little complicated because the checksum in the TCP/UDP
2039 * header isn't always a full checksum. In some cases (i.e. output) it's a
2040 * pseudo-header checksum, which is a partial checksum over src/dst IP
2041 * addresses, protocol number and length.
2043 * That means we have the following cases:
2044 * * Input or forwarding: we don't have TSO, the checksum fields are full
2045 * checksums, we need to update the checksum whenever we change anything.
2046 * * Output (i.e. the checksum is a pseudo-header checksum):
2047 * x The field being updated is src/dst address or affects the length of
2048 * the packet. We need to update the pseudo-header checksum (note that this
2049 * checksum is not ones' complement).
2050 * x Some other field is being modified (e.g. src/dst port numbers): We
2051 * don't have to update anything.
2054 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2060 l = cksum + old - new;
2061 l = (l >> 16) + (l & 65535);
2069 pf_proto_cksum_fixup(struct mbuf *m, u_int16_t cksum, u_int16_t old,
2070 u_int16_t new, u_int8_t udp)
2072 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2075 return (pf_cksum_fixup(cksum, old, new, udp));
2079 pf_change_ap(struct mbuf *m, struct pf_addr *a, u_int16_t *p, u_int16_t *ic,
2080 u_int16_t *pc, struct pf_addr *an, u_int16_t pn, u_int8_t u,
2086 PF_ACPY(&ao, a, af);
2089 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2097 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2098 ao.addr16[0], an->addr16[0], 0),
2099 ao.addr16[1], an->addr16[1], 0);
2102 *pc = pf_cksum_fixup(pf_cksum_fixup(*pc,
2103 ao.addr16[0], an->addr16[0], u),
2104 ao.addr16[1], an->addr16[1], u);
2106 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2111 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2112 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2113 pf_cksum_fixup(pf_cksum_fixup(*pc,
2114 ao.addr16[0], an->addr16[0], u),
2115 ao.addr16[1], an->addr16[1], u),
2116 ao.addr16[2], an->addr16[2], u),
2117 ao.addr16[3], an->addr16[3], u),
2118 ao.addr16[4], an->addr16[4], u),
2119 ao.addr16[5], an->addr16[5], u),
2120 ao.addr16[6], an->addr16[6], u),
2121 ao.addr16[7], an->addr16[7], u);
2123 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2128 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA |
2129 CSUM_DELAY_DATA_IPV6)) {
2136 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2138 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2142 memcpy(&ao, a, sizeof(ao));
2143 memcpy(a, &an, sizeof(u_int32_t));
2144 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2145 ao % 65536, an % 65536, u);
2149 pf_change_proto_a(struct mbuf *m, void *a, u_int16_t *c, u_int32_t an, u_int8_t udp)
2153 memcpy(&ao, a, sizeof(ao));
2154 memcpy(a, &an, sizeof(u_int32_t));
2156 *c = pf_proto_cksum_fixup(m,
2157 pf_proto_cksum_fixup(m, *c, ao / 65536, an / 65536, udp),
2158 ao % 65536, an % 65536, udp);
2163 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2167 PF_ACPY(&ao, a, AF_INET6);
2168 PF_ACPY(a, an, AF_INET6);
2170 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2171 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2172 pf_cksum_fixup(pf_cksum_fixup(*c,
2173 ao.addr16[0], an->addr16[0], u),
2174 ao.addr16[1], an->addr16[1], u),
2175 ao.addr16[2], an->addr16[2], u),
2176 ao.addr16[3], an->addr16[3], u),
2177 ao.addr16[4], an->addr16[4], u),
2178 ao.addr16[5], an->addr16[5], u),
2179 ao.addr16[6], an->addr16[6], u),
2180 ao.addr16[7], an->addr16[7], u);
2185 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2186 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2187 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2189 struct pf_addr oia, ooa;
2191 PF_ACPY(&oia, ia, af);
2193 PF_ACPY(&ooa, oa, af);
2195 /* Change inner protocol port, fix inner protocol checksum. */
2197 u_int16_t oip = *ip;
2204 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2205 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2207 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2209 /* Change inner ip address, fix inner ip and icmp checksums. */
2210 PF_ACPY(ia, na, af);
2214 u_int32_t oh2c = *h2c;
2216 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2217 oia.addr16[0], ia->addr16[0], 0),
2218 oia.addr16[1], ia->addr16[1], 0);
2219 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2220 oia.addr16[0], ia->addr16[0], 0),
2221 oia.addr16[1], ia->addr16[1], 0);
2222 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2228 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2229 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2230 pf_cksum_fixup(pf_cksum_fixup(*ic,
2231 oia.addr16[0], ia->addr16[0], u),
2232 oia.addr16[1], ia->addr16[1], u),
2233 oia.addr16[2], ia->addr16[2], u),
2234 oia.addr16[3], ia->addr16[3], u),
2235 oia.addr16[4], ia->addr16[4], u),
2236 oia.addr16[5], ia->addr16[5], u),
2237 oia.addr16[6], ia->addr16[6], u),
2238 oia.addr16[7], ia->addr16[7], u);
2242 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2244 PF_ACPY(oa, na, af);
2248 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2249 ooa.addr16[0], oa->addr16[0], 0),
2250 ooa.addr16[1], oa->addr16[1], 0);
2255 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2256 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2257 pf_cksum_fixup(pf_cksum_fixup(*ic,
2258 ooa.addr16[0], oa->addr16[0], u),
2259 ooa.addr16[1], oa->addr16[1], u),
2260 ooa.addr16[2], oa->addr16[2], u),
2261 ooa.addr16[3], oa->addr16[3], u),
2262 ooa.addr16[4], oa->addr16[4], u),
2263 ooa.addr16[5], oa->addr16[5], u),
2264 ooa.addr16[6], oa->addr16[6], u),
2265 ooa.addr16[7], oa->addr16[7], u);
2274 * Need to modulate the sequence numbers in the TCP SACK option
2275 * (credits to Krzysztof Pfaff for report and patch)
2278 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2279 struct tcphdr *th, struct pf_state_peer *dst)
2281 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2282 u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2283 int copyback = 0, i, olen;
2284 struct sackblk sack;
2286 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2287 if (hlen < TCPOLEN_SACKLEN ||
2288 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2291 while (hlen >= TCPOLEN_SACKLEN) {
2294 case TCPOPT_EOL: /* FALLTHROUGH */
2302 if (olen >= TCPOLEN_SACKLEN) {
2303 for (i = 2; i + TCPOLEN_SACK <= olen;
2304 i += TCPOLEN_SACK) {
2305 memcpy(&sack, &opt[i], sizeof(sack));
2306 pf_change_proto_a(m, &sack.start, &th->th_sum,
2307 htonl(ntohl(sack.start) - dst->seqdiff), 0);
2308 pf_change_proto_a(m, &sack.end, &th->th_sum,
2309 htonl(ntohl(sack.end) - dst->seqdiff), 0);
2310 memcpy(&opt[i], &sack, sizeof(sack));
2324 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2329 pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af,
2330 const struct pf_addr *saddr, const struct pf_addr *daddr,
2331 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2332 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2333 u_int16_t rtag, struct ifnet *ifp)
2335 struct pf_send_entry *pfse;
2339 struct ip *h = NULL;
2342 struct ip6_hdr *h6 = NULL;
2346 struct pf_mtag *pf_mtag;
2351 /* maximum segment size tcp option */
2352 tlen = sizeof(struct tcphdr);
2359 len = sizeof(struct ip) + tlen;
2364 len = sizeof(struct ip6_hdr) + tlen;
2368 panic("%s: unsupported af %d", __func__, af);
2371 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2372 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2375 m = m_gethdr(M_NOWAIT, MT_DATA);
2377 free(pfse, M_PFTEMP);
2381 mac_netinet_firewall_send(m);
2383 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2384 free(pfse, M_PFTEMP);
2389 m->m_flags |= M_SKIP_FIREWALL;
2390 pf_mtag->tag = rtag;
2392 if (r != NULL && r->rtableid >= 0)
2393 M_SETFIB(m, r->rtableid);
2396 if (r != NULL && r->qid) {
2397 pf_mtag->qid = r->qid;
2399 /* add hints for ecn */
2400 pf_mtag->hdr = mtod(m, struct ip *);
2403 m->m_data += max_linkhdr;
2404 m->m_pkthdr.len = m->m_len = len;
2405 m->m_pkthdr.rcvif = NULL;
2406 bzero(m->m_data, len);
2410 h = mtod(m, struct ip *);
2412 /* IP header fields included in the TCP checksum */
2413 h->ip_p = IPPROTO_TCP;
2414 h->ip_len = htons(tlen);
2415 h->ip_src.s_addr = saddr->v4.s_addr;
2416 h->ip_dst.s_addr = daddr->v4.s_addr;
2418 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2423 h6 = mtod(m, struct ip6_hdr *);
2425 /* IP header fields included in the TCP checksum */
2426 h6->ip6_nxt = IPPROTO_TCP;
2427 h6->ip6_plen = htons(tlen);
2428 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2429 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2431 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2437 th->th_sport = sport;
2438 th->th_dport = dport;
2439 th->th_seq = htonl(seq);
2440 th->th_ack = htonl(ack);
2441 th->th_off = tlen >> 2;
2442 th->th_flags = flags;
2443 th->th_win = htons(win);
2446 opt = (char *)(th + 1);
2447 opt[0] = TCPOPT_MAXSEG;
2450 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2457 th->th_sum = in_cksum(m, len);
2459 /* Finish the IP header */
2461 h->ip_hl = sizeof(*h) >> 2;
2462 h->ip_tos = IPTOS_LOWDELAY;
2463 h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
2464 h->ip_len = htons(len);
2465 h->ip_ttl = ttl ? ttl : V_ip_defttl;
2468 pfse->pfse_type = PFSE_IP;
2474 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2475 sizeof(struct ip6_hdr), tlen);
2477 h6->ip6_vfc |= IPV6_VERSION;
2478 h6->ip6_hlim = IPV6_DEFHLIM;
2480 pfse->pfse_type = PFSE_IP6;
2489 pf_ieee8021q_setpcp(struct mbuf *m, u_int8_t prio)
2493 KASSERT(prio <= PF_PRIO_MAX,
2494 ("%s with invalid pcp", __func__));
2496 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_OUT, NULL);
2498 mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_OUT,
2499 sizeof(uint8_t), M_NOWAIT);
2502 m_tag_prepend(m, mtag);
2505 *(uint8_t *)(mtag + 1) = prio;
2510 pf_match_ieee8021q_pcp(u_int8_t prio, struct mbuf *m)
2515 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL);
2519 if (prio == PF_PRIO_ZERO)
2522 mpcp = *(uint8_t *)(mtag + 1);
2524 return (mpcp == prio);
2528 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2531 struct pf_send_entry *pfse;
2533 struct pf_mtag *pf_mtag;
2535 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2536 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2540 if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
2541 free(pfse, M_PFTEMP);
2545 if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
2546 free(pfse, M_PFTEMP);
2550 m0->m_flags |= M_SKIP_FIREWALL;
2552 if (r->rtableid >= 0)
2553 M_SETFIB(m0, r->rtableid);
2557 pf_mtag->qid = r->qid;
2558 /* add hints for ecn */
2559 pf_mtag->hdr = mtod(m0, struct ip *);
2566 pfse->pfse_type = PFSE_ICMP;
2571 pfse->pfse_type = PFSE_ICMP6;
2576 pfse->icmpopts.type = type;
2577 pfse->icmpopts.code = code;
2582 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2583 * If n is 0, they match if they are equal. If n is != 0, they match if they
2587 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2588 struct pf_addr *b, sa_family_t af)
2595 if ((a->addr32[0] & m->addr32[0]) ==
2596 (b->addr32[0] & m->addr32[0]))
2602 if (((a->addr32[0] & m->addr32[0]) ==
2603 (b->addr32[0] & m->addr32[0])) &&
2604 ((a->addr32[1] & m->addr32[1]) ==
2605 (b->addr32[1] & m->addr32[1])) &&
2606 ((a->addr32[2] & m->addr32[2]) ==
2607 (b->addr32[2] & m->addr32[2])) &&
2608 ((a->addr32[3] & m->addr32[3]) ==
2609 (b->addr32[3] & m->addr32[3])))
2628 * Return 1 if b <= a <= e, otherwise return 0.
2631 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2632 struct pf_addr *a, sa_family_t af)
2637 if ((ntohl(a->addr32[0]) < ntohl(b->addr32[0])) ||
2638 (ntohl(a->addr32[0]) > ntohl(e->addr32[0])))
2647 for (i = 0; i < 4; ++i)
2648 if (ntohl(a->addr32[i]) > ntohl(b->addr32[i]))
2650 else if (ntohl(a->addr32[i]) < ntohl(b->addr32[i]))
2653 for (i = 0; i < 4; ++i)
2654 if (ntohl(a->addr32[i]) < ntohl(e->addr32[i]))
2656 else if (ntohl(a->addr32[i]) > ntohl(e->addr32[i]))
2666 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2670 return ((p > a1) && (p < a2));
2672 return ((p < a1) || (p > a2));
2674 return ((p >= a1) && (p <= a2));
2688 return (0); /* never reached */
2692 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2697 return (pf_match(op, a1, a2, p));
2701 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2703 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2705 return (pf_match(op, a1, a2, u));
2709 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2711 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2713 return (pf_match(op, a1, a2, g));
2717 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag, int mtag)
2722 return ((!r->match_tag_not && r->match_tag == *tag) ||
2723 (r->match_tag_not && r->match_tag != *tag));
2727 pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag)
2730 KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
2732 if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL))
2735 pd->pf_mtag->tag = tag;
2740 #define PF_ANCHOR_STACKSIZE 32
2741 struct pf_anchor_stackframe {
2742 struct pf_ruleset *rs;
2743 struct pf_rule *r; /* XXX: + match bit */
2744 struct pf_anchor *child;
2748 * XXX: We rely on malloc(9) returning pointer aligned addresses.
2750 #define PF_ANCHORSTACK_MATCH 0x00000001
2751 #define PF_ANCHORSTACK_MASK (PF_ANCHORSTACK_MATCH)
2753 #define PF_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
2754 #define PF_ANCHOR_RULE(f) (struct pf_rule *) \
2755 ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
2756 #define PF_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \
2757 ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \
2761 pf_step_into_anchor(struct pf_anchor_stackframe *stack, int *depth,
2762 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2765 struct pf_anchor_stackframe *f;
2771 if (*depth >= PF_ANCHOR_STACKSIZE) {
2772 printf("%s: anchor stack overflow on %s\n",
2773 __func__, (*r)->anchor->name);
2774 *r = TAILQ_NEXT(*r, entries);
2776 } else if (*depth == 0 && a != NULL)
2778 f = stack + (*depth)++;
2781 if ((*r)->anchor_wildcard) {
2782 struct pf_anchor_node *parent = &(*r)->anchor->children;
2784 if ((f->child = RB_MIN(pf_anchor_node, parent)) == NULL) {
2788 *rs = &f->child->ruleset;
2791 *rs = &(*r)->anchor->ruleset;
2793 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2797 pf_step_out_of_anchor(struct pf_anchor_stackframe *stack, int *depth,
2798 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2801 struct pf_anchor_stackframe *f;
2810 f = stack + *depth - 1;
2811 fr = PF_ANCHOR_RULE(f);
2812 if (f->child != NULL) {
2813 struct pf_anchor_node *parent;
2816 * This block traverses through
2817 * a wildcard anchor.
2819 parent = &fr->anchor->children;
2820 if (match != NULL && *match) {
2822 * If any of "*" matched, then
2823 * "foo/ *" matched, mark frame
2826 PF_ANCHOR_SET_MATCH(f);
2829 f->child = RB_NEXT(pf_anchor_node, parent, f->child);
2830 if (f->child != NULL) {
2831 *rs = &f->child->ruleset;
2832 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2840 if (*depth == 0 && a != NULL)
2843 if (PF_ANCHOR_MATCH(f) || (match != NULL && *match))
2845 *r = TAILQ_NEXT(fr, entries);
2846 } while (*r == NULL);
2853 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2854 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2859 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2860 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2864 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2865 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2866 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
2867 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
2868 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
2869 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
2870 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
2871 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
2877 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
2882 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
2886 if (addr->addr32[3] == 0xffffffff) {
2887 addr->addr32[3] = 0;
2888 if (addr->addr32[2] == 0xffffffff) {
2889 addr->addr32[2] = 0;
2890 if (addr->addr32[1] == 0xffffffff) {
2891 addr->addr32[1] = 0;
2893 htonl(ntohl(addr->addr32[0]) + 1);
2896 htonl(ntohl(addr->addr32[1]) + 1);
2899 htonl(ntohl(addr->addr32[2]) + 1);
2902 htonl(ntohl(addr->addr32[3]) + 1);
2909 pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
2911 struct pf_addr *saddr, *daddr;
2912 u_int16_t sport, dport;
2913 struct inpcbinfo *pi;
2916 pd->lookup.uid = UID_MAX;
2917 pd->lookup.gid = GID_MAX;
2919 switch (pd->proto) {
2921 if (pd->hdr.tcp == NULL)
2923 sport = pd->hdr.tcp->th_sport;
2924 dport = pd->hdr.tcp->th_dport;
2928 if (pd->hdr.udp == NULL)
2930 sport = pd->hdr.udp->uh_sport;
2931 dport = pd->hdr.udp->uh_dport;
2937 if (direction == PF_IN) {
2952 inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
2953 dport, INPLOOKUP_RLOCKPCB, NULL, m);
2955 inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
2956 daddr->v4, dport, INPLOOKUP_WILDCARD |
2957 INPLOOKUP_RLOCKPCB, NULL, m);
2965 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
2966 dport, INPLOOKUP_RLOCKPCB, NULL, m);
2968 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
2969 &daddr->v6, dport, INPLOOKUP_WILDCARD |
2970 INPLOOKUP_RLOCKPCB, NULL, m);
2980 INP_RLOCK_ASSERT(inp);
2981 pd->lookup.uid = inp->inp_cred->cr_uid;
2982 pd->lookup.gid = inp->inp_cred->cr_groups[0];
2989 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2993 u_int8_t *opt, optlen;
2994 u_int8_t wscale = 0;
2996 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
2997 if (hlen <= sizeof(struct tcphdr))
2999 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3001 opt = hdr + sizeof(struct tcphdr);
3002 hlen -= sizeof(struct tcphdr);
3012 if (wscale > TCP_MAX_WINSHIFT)
3013 wscale = TCP_MAX_WINSHIFT;
3014 wscale |= PF_WSCALE_FLAG;
3029 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3033 u_int8_t *opt, optlen;
3034 u_int16_t mss = V_tcp_mssdflt;
3036 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3037 if (hlen <= sizeof(struct tcphdr))
3039 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3041 opt = hdr + sizeof(struct tcphdr);
3042 hlen -= sizeof(struct tcphdr);
3043 while (hlen >= TCPOLEN_MAXSEG) {
3051 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
3067 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
3070 struct nhop4_basic nh4;
3073 struct nhop6_basic nh6;
3074 struct in6_addr dst6;
3083 hlen = sizeof(struct ip);
3084 if (fib4_lookup_nh_basic(rtableid, addr->v4, 0, 0, &nh4) == 0)
3085 mss = nh4.nh_mtu - hlen - sizeof(struct tcphdr);
3090 hlen = sizeof(struct ip6_hdr);
3091 in6_splitscope(&addr->v6, &dst6, &scopeid);
3092 if (fib6_lookup_nh_basic(rtableid, &dst6, scopeid, 0,0,&nh6)==0)
3093 mss = nh6.nh_mtu - hlen - sizeof(struct tcphdr);
3098 mss = max(V_tcp_mssdflt, mss);
3099 mss = min(mss, offer);
3100 mss = max(mss, 64); /* sanity - at least max opt space */
3105 pf_tcp_iss(struct pf_pdesc *pd)
3108 u_int32_t digest[4];
3110 if (V_pf_tcp_secret_init == 0) {
3111 read_random(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
3112 MD5Init(&V_pf_tcp_secret_ctx);
3113 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
3114 sizeof(V_pf_tcp_secret));
3115 V_pf_tcp_secret_init = 1;
3118 ctx = V_pf_tcp_secret_ctx;
3120 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
3121 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
3122 if (pd->af == AF_INET6) {
3123 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3124 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3126 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3127 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3129 MD5Final((u_char *)digest, &ctx);
3130 V_pf_tcp_iss_off += 4096;
3131 #define ISN_RANDOM_INCREMENT (4096 - 1)
3132 return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3134 #undef ISN_RANDOM_INCREMENT
3138 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
3139 struct pfi_kif *kif, struct mbuf *m, int off, struct pf_pdesc *pd,
3140 struct pf_rule **am, struct pf_ruleset **rsm, struct inpcb *inp)
3142 struct pf_rule *nr = NULL;
3143 struct pf_addr * const saddr = pd->src;
3144 struct pf_addr * const daddr = pd->dst;
3145 sa_family_t af = pd->af;
3146 struct pf_rule *r, *a = NULL;
3147 struct pf_ruleset *ruleset = NULL;
3148 struct pf_src_node *nsn = NULL;
3149 struct tcphdr *th = pd->hdr.tcp;
3150 struct pf_state_key *sk = NULL, *nk = NULL;
3152 int rewrite = 0, hdrlen = 0;
3153 int tag = -1, rtableid = -1;
3157 u_int16_t sport = 0, dport = 0;
3158 u_int16_t bproto_sum = 0, bip_sum = 0;
3159 u_int8_t icmptype = 0, icmpcode = 0;
3160 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3165 INP_LOCK_ASSERT(inp);
3166 pd->lookup.uid = inp->inp_cred->cr_uid;
3167 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3168 pd->lookup.done = 1;
3171 switch (pd->proto) {
3173 sport = th->th_sport;
3174 dport = th->th_dport;
3175 hdrlen = sizeof(*th);
3178 sport = pd->hdr.udp->uh_sport;
3179 dport = pd->hdr.udp->uh_dport;
3180 hdrlen = sizeof(*pd->hdr.udp);
3184 if (pd->af != AF_INET)
3186 sport = dport = pd->hdr.icmp->icmp_id;
3187 hdrlen = sizeof(*pd->hdr.icmp);
3188 icmptype = pd->hdr.icmp->icmp_type;
3189 icmpcode = pd->hdr.icmp->icmp_code;
3191 if (icmptype == ICMP_UNREACH ||
3192 icmptype == ICMP_SOURCEQUENCH ||
3193 icmptype == ICMP_REDIRECT ||
3194 icmptype == ICMP_TIMXCEED ||
3195 icmptype == ICMP_PARAMPROB)
3200 case IPPROTO_ICMPV6:
3203 sport = dport = pd->hdr.icmp6->icmp6_id;
3204 hdrlen = sizeof(*pd->hdr.icmp6);
3205 icmptype = pd->hdr.icmp6->icmp6_type;
3206 icmpcode = pd->hdr.icmp6->icmp6_code;
3208 if (icmptype == ICMP6_DST_UNREACH ||
3209 icmptype == ICMP6_PACKET_TOO_BIG ||
3210 icmptype == ICMP6_TIME_EXCEEDED ||
3211 icmptype == ICMP6_PARAM_PROB)
3216 sport = dport = hdrlen = 0;
3220 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3222 /* check packet for BINAT/NAT/RDR */
3223 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk,
3224 &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) {
3225 KASSERT(sk != NULL, ("%s: null sk", __func__));
3226 KASSERT(nk != NULL, ("%s: null nk", __func__));
3229 bip_sum = *pd->ip_sum;
3231 switch (pd->proto) {
3233 bproto_sum = th->th_sum;
3234 pd->proto_sum = &th->th_sum;
3236 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3237 nk->port[pd->sidx] != sport) {
3238 pf_change_ap(m, saddr, &th->th_sport, pd->ip_sum,
3239 &th->th_sum, &nk->addr[pd->sidx],
3240 nk->port[pd->sidx], 0, af);
3241 pd->sport = &th->th_sport;
3242 sport = th->th_sport;
3245 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3246 nk->port[pd->didx] != dport) {
3247 pf_change_ap(m, daddr, &th->th_dport, pd->ip_sum,
3248 &th->th_sum, &nk->addr[pd->didx],
3249 nk->port[pd->didx], 0, af);
3250 dport = th->th_dport;
3251 pd->dport = &th->th_dport;
3256 bproto_sum = pd->hdr.udp->uh_sum;
3257 pd->proto_sum = &pd->hdr.udp->uh_sum;
3259 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3260 nk->port[pd->sidx] != sport) {
3261 pf_change_ap(m, saddr, &pd->hdr.udp->uh_sport,
3262 pd->ip_sum, &pd->hdr.udp->uh_sum,
3263 &nk->addr[pd->sidx],
3264 nk->port[pd->sidx], 1, af);
3265 sport = pd->hdr.udp->uh_sport;
3266 pd->sport = &pd->hdr.udp->uh_sport;
3269 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3270 nk->port[pd->didx] != dport) {
3271 pf_change_ap(m, daddr, &pd->hdr.udp->uh_dport,
3272 pd->ip_sum, &pd->hdr.udp->uh_sum,
3273 &nk->addr[pd->didx],
3274 nk->port[pd->didx], 1, af);
3275 dport = pd->hdr.udp->uh_dport;
3276 pd->dport = &pd->hdr.udp->uh_dport;
3282 nk->port[0] = nk->port[1];
3283 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3284 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3285 nk->addr[pd->sidx].v4.s_addr, 0);
3287 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3288 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3289 nk->addr[pd->didx].v4.s_addr, 0);
3291 if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3292 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3293 pd->hdr.icmp->icmp_cksum, sport,
3295 pd->hdr.icmp->icmp_id = nk->port[1];
3296 pd->sport = &pd->hdr.icmp->icmp_id;
3298 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3302 case IPPROTO_ICMPV6:
3303 nk->port[0] = nk->port[1];
3304 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3305 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3306 &nk->addr[pd->sidx], 0);
3308 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3309 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3310 &nk->addr[pd->didx], 0);
3319 &nk->addr[pd->sidx], AF_INET))
3320 pf_change_a(&saddr->v4.s_addr,
3322 nk->addr[pd->sidx].v4.s_addr, 0);
3325 &nk->addr[pd->didx], AF_INET))
3326 pf_change_a(&daddr->v4.s_addr,
3328 nk->addr[pd->didx].v4.s_addr, 0);
3334 &nk->addr[pd->sidx], AF_INET6))
3335 PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3338 &nk->addr[pd->didx], AF_INET6))
3339 PF_ACPY(saddr, &nk->addr[pd->didx], af);
3352 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3353 r = r->skip[PF_SKIP_IFP].ptr;
3354 else if (r->direction && r->direction != direction)
3355 r = r->skip[PF_SKIP_DIR].ptr;
3356 else if (r->af && r->af != af)
3357 r = r->skip[PF_SKIP_AF].ptr;
3358 else if (r->proto && r->proto != pd->proto)
3359 r = r->skip[PF_SKIP_PROTO].ptr;
3360 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3361 r->src.neg, kif, M_GETFIB(m)))
3362 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3363 /* tcp/udp only. port_op always 0 in other cases */
3364 else if (r->src.port_op && !pf_match_port(r->src.port_op,
3365 r->src.port[0], r->src.port[1], sport))
3366 r = r->skip[PF_SKIP_SRC_PORT].ptr;
3367 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3368 r->dst.neg, NULL, M_GETFIB(m)))
3369 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3370 /* tcp/udp only. port_op always 0 in other cases */
3371 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3372 r->dst.port[0], r->dst.port[1], dport))
3373 r = r->skip[PF_SKIP_DST_PORT].ptr;
3374 /* icmp only. type always 0 in other cases */
3375 else if (r->type && r->type != icmptype + 1)
3376 r = TAILQ_NEXT(r, entries);
3377 /* icmp only. type always 0 in other cases */
3378 else if (r->code && r->code != icmpcode + 1)
3379 r = TAILQ_NEXT(r, entries);
3380 else if (r->tos && !(r->tos == pd->tos))
3381 r = TAILQ_NEXT(r, entries);
3382 else if (r->rule_flag & PFRULE_FRAGMENT)
3383 r = TAILQ_NEXT(r, entries);
3384 else if (pd->proto == IPPROTO_TCP &&
3385 (r->flagset & th->th_flags) != r->flags)
3386 r = TAILQ_NEXT(r, entries);
3387 /* tcp/udp only. uid.op always 0 in other cases */
3388 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3389 pf_socket_lookup(direction, pd, m), 1)) &&
3390 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3392 r = TAILQ_NEXT(r, entries);
3393 /* tcp/udp only. gid.op always 0 in other cases */
3394 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3395 pf_socket_lookup(direction, pd, m), 1)) &&
3396 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3398 r = TAILQ_NEXT(r, entries);
3400 !pf_match_ieee8021q_pcp(r->prio, m))
3401 r = TAILQ_NEXT(r, entries);
3403 r->prob <= arc4random())
3404 r = TAILQ_NEXT(r, entries);
3405 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3406 pd->pf_mtag ? pd->pf_mtag->tag : 0))
3407 r = TAILQ_NEXT(r, entries);
3408 else if (r->os_fingerprint != PF_OSFP_ANY &&
3409 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3410 pf_osfp_fingerprint(pd, m, off, th),
3411 r->os_fingerprint)))
3412 r = TAILQ_NEXT(r, entries);
3416 if (r->rtableid >= 0)
3417 rtableid = r->rtableid;
3418 if (r->anchor == NULL) {
3425 r = TAILQ_NEXT(r, entries);
3427 pf_step_into_anchor(anchor_stack, &asd,
3428 &ruleset, PF_RULESET_FILTER, &r, &a,
3431 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3432 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3439 REASON_SET(&reason, PFRES_MATCH);
3441 if (r->log || (nr != NULL && nr->log)) {
3443 m_copyback(m, off, hdrlen, pd->hdr.any);
3444 PFLOG_PACKET(kif, m, af, direction, reason, r->log ? r : nr, a,
3448 if ((r->action == PF_DROP) &&
3449 ((r->rule_flag & PFRULE_RETURNRST) ||
3450 (r->rule_flag & PFRULE_RETURNICMP) ||
3451 (r->rule_flag & PFRULE_RETURN))) {
3452 /* undo NAT changes, if they have taken place */
3454 PF_ACPY(saddr, &sk->addr[pd->sidx], af);
3455 PF_ACPY(daddr, &sk->addr[pd->didx], af);
3457 *pd->sport = sk->port[pd->sidx];
3459 *pd->dport = sk->port[pd->didx];
3461 *pd->proto_sum = bproto_sum;
3463 *pd->ip_sum = bip_sum;
3464 m_copyback(m, off, hdrlen, pd->hdr.any);
3466 if (pd->proto == IPPROTO_TCP &&
3467 ((r->rule_flag & PFRULE_RETURNRST) ||
3468 (r->rule_flag & PFRULE_RETURN)) &&
3469 !(th->th_flags & TH_RST)) {
3470 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
3482 h4 = mtod(m, struct ip *);
3483 len = ntohs(h4->ip_len) - off;
3488 h6 = mtod(m, struct ip6_hdr *);
3489 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
3494 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
3495 REASON_SET(&reason, PFRES_PROTCKSUM);
3497 if (th->th_flags & TH_SYN)
3499 if (th->th_flags & TH_FIN)
3501 pf_send_tcp(m, r, af, pd->dst,
3502 pd->src, th->th_dport, th->th_sport,
3503 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3504 r->return_ttl, 1, 0, kif->pfik_ifp);
3506 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3508 pf_send_icmp(m, r->return_icmp >> 8,
3509 r->return_icmp & 255, af, r);
3510 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3512 pf_send_icmp(m, r->return_icmp6 >> 8,
3513 r->return_icmp6 & 255, af, r);
3516 if (r->action == PF_DROP)
3519 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3520 REASON_SET(&reason, PFRES_MEMORY);
3524 M_SETFIB(m, rtableid);
3526 if (!state_icmp && (r->keep_state || nr != NULL ||
3527 (pd->flags & PFDESC_TCP_NORM))) {
3529 action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
3530 sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
3532 if (action != PF_PASS)
3536 uma_zfree(V_pf_state_key_z, sk);
3538 uma_zfree(V_pf_state_key_z, nk);
3541 /* copy back packet headers if we performed NAT operations */
3543 m_copyback(m, off, hdrlen, pd->hdr.any);
3545 if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
3546 direction == PF_OUT &&
3547 pfsync_defer_ptr != NULL && pfsync_defer_ptr(*sm, m))
3549 * We want the state created, but we dont
3550 * want to send this in case a partner
3551 * firewall has to know about it to allow
3552 * replies through it.
3560 uma_zfree(V_pf_state_key_z, sk);
3562 uma_zfree(V_pf_state_key_z, nk);
3567 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
3568 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *nk,
3569 struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
3570 u_int16_t dport, int *rewrite, struct pfi_kif *kif, struct pf_state **sm,
3571 int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen)
3573 struct pf_state *s = NULL;
3574 struct pf_src_node *sn = NULL;
3575 struct tcphdr *th = pd->hdr.tcp;
3576 u_int16_t mss = V_tcp_mssdflt;
3579 /* check maximums */
3580 if (r->max_states &&
3581 (counter_u64_fetch(r->states_cur) >= r->max_states)) {
3582 counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1);
3583 REASON_SET(&reason, PFRES_MAXSTATES);
3586 /* src node for filter rule */
3587 if ((r->rule_flag & PFRULE_SRCTRACK ||
3588 r->rpool.opts & PF_POOL_STICKYADDR) &&
3589 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3590 REASON_SET(&reason, PFRES_SRCLIMIT);
3593 /* src node for translation rule */
3594 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3595 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3596 REASON_SET(&reason, PFRES_SRCLIMIT);
3599 s = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO);
3601 REASON_SET(&reason, PFRES_MEMORY);
3605 s->nat_rule.ptr = nr;
3607 STATE_INC_COUNTERS(s);
3609 s->state_flags |= PFSTATE_ALLOWOPTS;
3610 if (r->rule_flag & PFRULE_STATESLOPPY)
3611 s->state_flags |= PFSTATE_SLOPPY;
3612 s->log = r->log & PF_LOG_ALL;
3613 s->sync_state = PFSYNC_S_NONE;
3615 s->log |= nr->log & PF_LOG_ALL;
3616 switch (pd->proto) {
3618 s->src.seqlo = ntohl(th->th_seq);
3619 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3620 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3621 r->keep_state == PF_STATE_MODULATE) {
3622 /* Generate sequence number modulator */
3623 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3626 pf_change_proto_a(m, &th->th_seq, &th->th_sum,
3627 htonl(s->src.seqlo + s->src.seqdiff), 0);
3631 if (th->th_flags & TH_SYN) {
3633 s->src.wscale = pf_get_wscale(m, off,
3634 th->th_off, pd->af);
3636 s->src.max_win = MAX(ntohs(th->th_win), 1);
3637 if (s->src.wscale & PF_WSCALE_MASK) {
3638 /* Remove scale factor from initial window */
3639 int win = s->src.max_win;
3640 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3641 s->src.max_win = (win - 1) >>
3642 (s->src.wscale & PF_WSCALE_MASK);
3644 if (th->th_flags & TH_FIN)
3648 s->src.state = TCPS_SYN_SENT;
3649 s->dst.state = TCPS_CLOSED;
3650 s->timeout = PFTM_TCP_FIRST_PACKET;
3653 s->src.state = PFUDPS_SINGLE;
3654 s->dst.state = PFUDPS_NO_TRAFFIC;
3655 s->timeout = PFTM_UDP_FIRST_PACKET;
3659 case IPPROTO_ICMPV6:
3661 s->timeout = PFTM_ICMP_FIRST_PACKET;
3664 s->src.state = PFOTHERS_SINGLE;
3665 s->dst.state = PFOTHERS_NO_TRAFFIC;
3666 s->timeout = PFTM_OTHER_FIRST_PACKET;
3669 if (r->rt && r->rt != PF_FASTROUTE) {
3670 if (pf_map_addr(pd->af, r, pd->src, &s->rt_addr, NULL, &sn)) {
3671 REASON_SET(&reason, PFRES_MAPFAILED);
3672 pf_src_tree_remove_state(s);
3673 STATE_DEC_COUNTERS(s);
3674 uma_zfree(V_pf_state_z, s);
3677 s->rt_kif = r->rpool.cur->kif;
3680 s->creation = time_uptime;
3681 s->expire = time_uptime;
3686 /* XXX We only modify one side for now. */
3687 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
3688 s->nat_src_node = nsn;
3690 if (pd->proto == IPPROTO_TCP) {
3691 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
3692 off, pd, th, &s->src, &s->dst)) {
3693 REASON_SET(&reason, PFRES_MEMORY);
3694 pf_src_tree_remove_state(s);
3695 STATE_DEC_COUNTERS(s);
3696 uma_zfree(V_pf_state_z, s);
3699 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
3700 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
3701 &s->src, &s->dst, rewrite)) {
3702 /* This really shouldn't happen!!! */
3703 DPFPRINTF(PF_DEBUG_URGENT,
3704 ("pf_normalize_tcp_stateful failed on first pkt"));
3705 pf_normalize_tcp_cleanup(s);
3706 pf_src_tree_remove_state(s);
3707 STATE_DEC_COUNTERS(s);
3708 uma_zfree(V_pf_state_z, s);
3712 s->direction = pd->dir;
3715 * sk/nk could already been setup by pf_get_translation().
3718 KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
3719 __func__, nr, sk, nk));
3720 sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
3725 KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p",
3726 __func__, nr, sk, nk));
3728 /* Swap sk/nk for PF_OUT. */
3729 if (pf_state_insert(BOUND_IFACE(r, kif),
3730 (pd->dir == PF_IN) ? sk : nk,
3731 (pd->dir == PF_IN) ? nk : sk, s)) {
3732 if (pd->proto == IPPROTO_TCP)
3733 pf_normalize_tcp_cleanup(s);
3734 REASON_SET(&reason, PFRES_STATEINS);
3735 pf_src_tree_remove_state(s);
3736 STATE_DEC_COUNTERS(s);
3737 uma_zfree(V_pf_state_z, s);
3744 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
3745 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
3746 s->src.state = PF_TCPS_PROXY_SRC;
3747 /* undo NAT changes, if they have taken place */
3749 struct pf_state_key *skt = s->key[PF_SK_WIRE];
3750 if (pd->dir == PF_OUT)
3751 skt = s->key[PF_SK_STACK];
3752 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
3753 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
3755 *pd->sport = skt->port[pd->sidx];
3757 *pd->dport = skt->port[pd->didx];
3759 *pd->proto_sum = bproto_sum;
3761 *pd->ip_sum = bip_sum;
3762 m_copyback(m, off, hdrlen, pd->hdr.any);
3764 s->src.seqhi = htonl(arc4random());
3765 /* Find mss option */
3766 int rtid = M_GETFIB(m);
3767 mss = pf_get_mss(m, off, th->th_off, pd->af);
3768 mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
3769 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
3771 pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport,
3772 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
3773 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL);
3774 REASON_SET(&reason, PFRES_SYNPROXY);
3775 return (PF_SYNPROXY_DROP);
3782 uma_zfree(V_pf_state_key_z, sk);
3784 uma_zfree(V_pf_state_key_z, nk);
3787 struct pf_srchash *sh;
3789 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
3790 PF_HASHROW_LOCK(sh);
3791 if (--sn->states == 0 && sn->expire == 0) {
3792 pf_unlink_src_node(sn);
3793 uma_zfree(V_pf_sources_z, sn);
3795 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
3797 PF_HASHROW_UNLOCK(sh);
3800 if (nsn != sn && nsn != NULL) {
3801 struct pf_srchash *sh;
3803 sh = &V_pf_srchash[pf_hashsrc(&nsn->addr, nsn->af)];
3804 PF_HASHROW_LOCK(sh);
3805 if (--nsn->states == 0 && nsn->expire == 0) {
3806 pf_unlink_src_node(nsn);
3807 uma_zfree(V_pf_sources_z, nsn);
3809 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
3811 PF_HASHROW_UNLOCK(sh);
3818 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
3819 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
3820 struct pf_ruleset **rsm)
3822 struct pf_rule *r, *a = NULL;
3823 struct pf_ruleset *ruleset = NULL;
3824 sa_family_t af = pd->af;
3829 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3833 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3836 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3837 r = r->skip[PF_SKIP_IFP].ptr;
3838 else if (r->direction && r->direction != direction)
3839 r = r->skip[PF_SKIP_DIR].ptr;
3840 else if (r->af && r->af != af)
3841 r = r->skip[PF_SKIP_AF].ptr;
3842 else if (r->proto && r->proto != pd->proto)
3843 r = r->skip[PF_SKIP_PROTO].ptr;
3844 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
3845 r->src.neg, kif, M_GETFIB(m)))
3846 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3847 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
3848 r->dst.neg, NULL, M_GETFIB(m)))
3849 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3850 else if (r->tos && !(r->tos == pd->tos))
3851 r = TAILQ_NEXT(r, entries);
3852 else if (r->os_fingerprint != PF_OSFP_ANY)
3853 r = TAILQ_NEXT(r, entries);
3854 else if (pd->proto == IPPROTO_UDP &&
3855 (r->src.port_op || r->dst.port_op))
3856 r = TAILQ_NEXT(r, entries);
3857 else if (pd->proto == IPPROTO_TCP &&
3858 (r->src.port_op || r->dst.port_op || r->flagset))
3859 r = TAILQ_NEXT(r, entries);
3860 else if ((pd->proto == IPPROTO_ICMP ||
3861 pd->proto == IPPROTO_ICMPV6) &&
3862 (r->type || r->code))
3863 r = TAILQ_NEXT(r, entries);
3865 !pf_match_ieee8021q_pcp(r->prio, m))
3866 r = TAILQ_NEXT(r, entries);
3867 else if (r->prob && r->prob <=
3868 (arc4random() % (UINT_MAX - 1) + 1))
3869 r = TAILQ_NEXT(r, entries);
3870 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3871 pd->pf_mtag ? pd->pf_mtag->tag : 0))
3872 r = TAILQ_NEXT(r, entries);
3874 if (r->anchor == NULL) {
3881 r = TAILQ_NEXT(r, entries);
3883 pf_step_into_anchor(anchor_stack, &asd,
3884 &ruleset, PF_RULESET_FILTER, &r, &a,
3887 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3888 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3895 REASON_SET(&reason, PFRES_MATCH);
3898 PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
3901 if (r->action != PF_PASS)
3904 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3905 REASON_SET(&reason, PFRES_MEMORY);
3913 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
3914 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
3915 struct pf_pdesc *pd, u_short *reason, int *copyback)
3917 struct tcphdr *th = pd->hdr.tcp;
3918 u_int16_t win = ntohs(th->th_win);
3919 u_int32_t ack, end, seq, orig_seq;
3923 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
3924 sws = src->wscale & PF_WSCALE_MASK;
3925 dws = dst->wscale & PF_WSCALE_MASK;
3930 * Sequence tracking algorithm from Guido van Rooij's paper:
3931 * http://www.madison-gurkha.com/publications/tcp_filtering/
3935 orig_seq = seq = ntohl(th->th_seq);
3936 if (src->seqlo == 0) {
3937 /* First packet from this end. Set its state */
3939 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
3940 src->scrub == NULL) {
3941 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
3942 REASON_SET(reason, PFRES_MEMORY);
3947 /* Deferred generation of sequence number modulator */
3948 if (dst->seqdiff && !src->seqdiff) {
3949 /* use random iss for the TCP server */
3950 while ((src->seqdiff = arc4random() - seq) == 0)
3952 ack = ntohl(th->th_ack) - dst->seqdiff;
3953 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
3955 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
3958 ack = ntohl(th->th_ack);
3961 end = seq + pd->p_len;
3962 if (th->th_flags & TH_SYN) {
3964 if (dst->wscale & PF_WSCALE_FLAG) {
3965 src->wscale = pf_get_wscale(m, off, th->th_off,
3967 if (src->wscale & PF_WSCALE_FLAG) {
3968 /* Remove scale factor from initial
3970 sws = src->wscale & PF_WSCALE_MASK;
3971 win = ((u_int32_t)win + (1 << sws) - 1)
3973 dws = dst->wscale & PF_WSCALE_MASK;
3975 /* fixup other window */
3976 dst->max_win <<= dst->wscale &
3978 /* in case of a retrans SYN|ACK */
3983 if (th->th_flags & TH_FIN)
3987 if (src->state < TCPS_SYN_SENT)
3988 src->state = TCPS_SYN_SENT;
3991 * May need to slide the window (seqhi may have been set by
3992 * the crappy stack check or if we picked up the connection
3993 * after establishment)
3995 if (src->seqhi == 1 ||
3996 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
3997 src->seqhi = end + MAX(1, dst->max_win << dws);
3998 if (win > src->max_win)
4002 ack = ntohl(th->th_ack) - dst->seqdiff;
4004 /* Modulate sequence numbers */
4005 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
4007 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
4010 end = seq + pd->p_len;
4011 if (th->th_flags & TH_SYN)
4013 if (th->th_flags & TH_FIN)
4017 if ((th->th_flags & TH_ACK) == 0) {
4018 /* Let it pass through the ack skew check */
4020 } else if ((ack == 0 &&
4021 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
4022 /* broken tcp stacks do not set ack */
4023 (dst->state < TCPS_SYN_SENT)) {
4025 * Many stacks (ours included) will set the ACK number in an
4026 * FIN|ACK if the SYN times out -- no sequence to ACK.
4032 /* Ease sequencing restrictions on no data packets */
4037 ackskew = dst->seqlo - ack;
4041 * Need to demodulate the sequence numbers in any TCP SACK options
4042 * (Selective ACK). We could optionally validate the SACK values
4043 * against the current ACK window, either forwards or backwards, but
4044 * I'm not confident that SACK has been implemented properly
4045 * everywhere. It wouldn't surprise me if several stacks accidentally
4046 * SACK too far backwards of previously ACKed data. There really aren't
4047 * any security implications of bad SACKing unless the target stack
4048 * doesn't validate the option length correctly. Someone trying to
4049 * spoof into a TCP connection won't bother blindly sending SACK
4052 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
4053 if (pf_modulate_sack(m, off, pd, th, dst))
4058 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
4059 if (SEQ_GEQ(src->seqhi, end) &&
4060 /* Last octet inside other's window space */
4061 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
4062 /* Retrans: not more than one window back */
4063 (ackskew >= -MAXACKWINDOW) &&
4064 /* Acking not more than one reassembled fragment backwards */
4065 (ackskew <= (MAXACKWINDOW << sws)) &&
4066 /* Acking not more than one window forward */
4067 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
4068 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
4069 (pd->flags & PFDESC_IP_REAS) == 0)) {
4070 /* Require an exact/+1 sequence match on resets when possible */
4072 if (dst->scrub || src->scrub) {
4073 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4074 *state, src, dst, copyback))
4078 /* update max window */
4079 if (src->max_win < win)
4081 /* synchronize sequencing */
4082 if (SEQ_GT(end, src->seqlo))
4084 /* slide the window of what the other end can send */
4085 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4086 dst->seqhi = ack + MAX((win << sws), 1);
4090 if (th->th_flags & TH_SYN)
4091 if (src->state < TCPS_SYN_SENT)
4092 src->state = TCPS_SYN_SENT;
4093 if (th->th_flags & TH_FIN)
4094 if (src->state < TCPS_CLOSING)
4095 src->state = TCPS_CLOSING;
4096 if (th->th_flags & TH_ACK) {
4097 if (dst->state == TCPS_SYN_SENT) {
4098 dst->state = TCPS_ESTABLISHED;
4099 if (src->state == TCPS_ESTABLISHED &&
4100 (*state)->src_node != NULL &&
4101 pf_src_connlimit(state)) {
4102 REASON_SET(reason, PFRES_SRCLIMIT);
4105 } else if (dst->state == TCPS_CLOSING)
4106 dst->state = TCPS_FIN_WAIT_2;
4108 if (th->th_flags & TH_RST)
4109 src->state = dst->state = TCPS_TIME_WAIT;
4111 /* update expire time */
4112 (*state)->expire = time_uptime;
4113 if (src->state >= TCPS_FIN_WAIT_2 &&
4114 dst->state >= TCPS_FIN_WAIT_2)
4115 (*state)->timeout = PFTM_TCP_CLOSED;
4116 else if (src->state >= TCPS_CLOSING &&
4117 dst->state >= TCPS_CLOSING)
4118 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4119 else if (src->state < TCPS_ESTABLISHED ||
4120 dst->state < TCPS_ESTABLISHED)
4121 (*state)->timeout = PFTM_TCP_OPENING;
4122 else if (src->state >= TCPS_CLOSING ||
4123 dst->state >= TCPS_CLOSING)
4124 (*state)->timeout = PFTM_TCP_CLOSING;
4126 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4128 /* Fall through to PASS packet */
4130 } else if ((dst->state < TCPS_SYN_SENT ||
4131 dst->state >= TCPS_FIN_WAIT_2 ||
4132 src->state >= TCPS_FIN_WAIT_2) &&
4133 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
4134 /* Within a window forward of the originating packet */
4135 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
4136 /* Within a window backward of the originating packet */
4139 * This currently handles three situations:
4140 * 1) Stupid stacks will shotgun SYNs before their peer
4142 * 2) When PF catches an already established stream (the
4143 * firewall rebooted, the state table was flushed, routes
4145 * 3) Packets get funky immediately after the connection
4146 * closes (this should catch Solaris spurious ACK|FINs
4147 * that web servers like to spew after a close)
4149 * This must be a little more careful than the above code
4150 * since packet floods will also be caught here. We don't
4151 * update the TTL here to mitigate the damage of a packet
4152 * flood and so the same code can handle awkward establishment
4153 * and a loosened connection close.
4154 * In the establishment case, a correct peer response will
4155 * validate the connection, go through the normal state code
4156 * and keep updating the state TTL.
4159 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4160 printf("pf: loose state match: ");
4161 pf_print_state(*state);
4162 pf_print_flags(th->th_flags);
4163 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4164 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
4165 pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
4166 (unsigned long long)(*state)->packets[1],
4167 pd->dir == PF_IN ? "in" : "out",
4168 pd->dir == (*state)->direction ? "fwd" : "rev");
4171 if (dst->scrub || src->scrub) {
4172 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4173 *state, src, dst, copyback))
4177 /* update max window */
4178 if (src->max_win < win)
4180 /* synchronize sequencing */
4181 if (SEQ_GT(end, src->seqlo))
4183 /* slide the window of what the other end can send */
4184 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4185 dst->seqhi = ack + MAX((win << sws), 1);
4188 * Cannot set dst->seqhi here since this could be a shotgunned
4189 * SYN and not an already established connection.
4192 if (th->th_flags & TH_FIN)
4193 if (src->state < TCPS_CLOSING)
4194 src->state = TCPS_CLOSING;
4195 if (th->th_flags & TH_RST)
4196 src->state = dst->state = TCPS_TIME_WAIT;
4198 /* Fall through to PASS packet */
4201 if ((*state)->dst.state == TCPS_SYN_SENT &&
4202 (*state)->src.state == TCPS_SYN_SENT) {
4203 /* Send RST for state mismatches during handshake */
4204 if (!(th->th_flags & TH_RST))
4205 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4206 pd->dst, pd->src, th->th_dport,
4207 th->th_sport, ntohl(th->th_ack), 0,
4209 (*state)->rule.ptr->return_ttl, 1, 0,
4214 } else if (V_pf_status.debug >= PF_DEBUG_MISC) {
4215 printf("pf: BAD state: ");
4216 pf_print_state(*state);
4217 pf_print_flags(th->th_flags);
4218 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4219 "pkts=%llu:%llu dir=%s,%s\n",
4220 seq, orig_seq, ack, pd->p_len, ackskew,
4221 (unsigned long long)(*state)->packets[0],
4222 (unsigned long long)(*state)->packets[1],
4223 pd->dir == PF_IN ? "in" : "out",
4224 pd->dir == (*state)->direction ? "fwd" : "rev");
4225 printf("pf: State failure on: %c %c %c %c | %c %c\n",
4226 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4227 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4229 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4230 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4231 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4232 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4234 REASON_SET(reason, PFRES_BADSTATE);
4242 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4243 struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4245 struct tcphdr *th = pd->hdr.tcp;
4247 if (th->th_flags & TH_SYN)
4248 if (src->state < TCPS_SYN_SENT)
4249 src->state = TCPS_SYN_SENT;
4250 if (th->th_flags & TH_FIN)
4251 if (src->state < TCPS_CLOSING)
4252 src->state = TCPS_CLOSING;
4253 if (th->th_flags & TH_ACK) {
4254 if (dst->state == TCPS_SYN_SENT) {
4255 dst->state = TCPS_ESTABLISHED;
4256 if (src->state == TCPS_ESTABLISHED &&
4257 (*state)->src_node != NULL &&
4258 pf_src_connlimit(state)) {
4259 REASON_SET(reason, PFRES_SRCLIMIT);
4262 } else if (dst->state == TCPS_CLOSING) {
4263 dst->state = TCPS_FIN_WAIT_2;
4264 } else if (src->state == TCPS_SYN_SENT &&
4265 dst->state < TCPS_SYN_SENT) {
4267 * Handle a special sloppy case where we only see one
4268 * half of the connection. If there is a ACK after
4269 * the initial SYN without ever seeing a packet from
4270 * the destination, set the connection to established.
4272 dst->state = src->state = TCPS_ESTABLISHED;
4273 if ((*state)->src_node != NULL &&
4274 pf_src_connlimit(state)) {
4275 REASON_SET(reason, PFRES_SRCLIMIT);
4278 } else if (src->state == TCPS_CLOSING &&
4279 dst->state == TCPS_ESTABLISHED &&
4282 * Handle the closing of half connections where we
4283 * don't see the full bidirectional FIN/ACK+ACK
4286 dst->state = TCPS_CLOSING;
4289 if (th->th_flags & TH_RST)
4290 src->state = dst->state = TCPS_TIME_WAIT;
4292 /* update expire time */
4293 (*state)->expire = time_uptime;
4294 if (src->state >= TCPS_FIN_WAIT_2 &&
4295 dst->state >= TCPS_FIN_WAIT_2)
4296 (*state)->timeout = PFTM_TCP_CLOSED;
4297 else if (src->state >= TCPS_CLOSING &&
4298 dst->state >= TCPS_CLOSING)
4299 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4300 else if (src->state < TCPS_ESTABLISHED ||
4301 dst->state < TCPS_ESTABLISHED)
4302 (*state)->timeout = PFTM_TCP_OPENING;
4303 else if (src->state >= TCPS_CLOSING ||
4304 dst->state >= TCPS_CLOSING)
4305 (*state)->timeout = PFTM_TCP_CLOSING;
4307 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4313 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
4314 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4317 struct pf_state_key_cmp key;
4318 struct tcphdr *th = pd->hdr.tcp;
4320 struct pf_state_peer *src, *dst;
4321 struct pf_state_key *sk;
4323 bzero(&key, sizeof(key));
4325 key.proto = IPPROTO_TCP;
4326 if (direction == PF_IN) { /* wire side, straight */
4327 PF_ACPY(&key.addr[0], pd->src, key.af);
4328 PF_ACPY(&key.addr[1], pd->dst, key.af);
4329 key.port[0] = th->th_sport;
4330 key.port[1] = th->th_dport;
4331 } else { /* stack side, reverse */
4332 PF_ACPY(&key.addr[1], pd->src, key.af);
4333 PF_ACPY(&key.addr[0], pd->dst, key.af);
4334 key.port[1] = th->th_sport;
4335 key.port[0] = th->th_dport;
4338 STATE_LOOKUP(kif, &key, direction, *state, pd);
4340 if (direction == (*state)->direction) {
4341 src = &(*state)->src;
4342 dst = &(*state)->dst;
4344 src = &(*state)->dst;
4345 dst = &(*state)->src;
4348 sk = (*state)->key[pd->didx];
4350 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4351 if (direction != (*state)->direction) {
4352 REASON_SET(reason, PFRES_SYNPROXY);
4353 return (PF_SYNPROXY_DROP);
4355 if (th->th_flags & TH_SYN) {
4356 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4357 REASON_SET(reason, PFRES_SYNPROXY);
4360 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4361 pd->src, th->th_dport, th->th_sport,
4362 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4363 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0, NULL);
4364 REASON_SET(reason, PFRES_SYNPROXY);
4365 return (PF_SYNPROXY_DROP);
4366 } else if (!(th->th_flags & TH_ACK) ||
4367 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4368 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4369 REASON_SET(reason, PFRES_SYNPROXY);
4371 } else if ((*state)->src_node != NULL &&
4372 pf_src_connlimit(state)) {
4373 REASON_SET(reason, PFRES_SRCLIMIT);
4376 (*state)->src.state = PF_TCPS_PROXY_DST;
4378 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4379 if (direction == (*state)->direction) {
4380 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4381 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4382 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4383 REASON_SET(reason, PFRES_SYNPROXY);
4386 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4387 if ((*state)->dst.seqhi == 1)
4388 (*state)->dst.seqhi = htonl(arc4random());
4389 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4390 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4391 sk->port[pd->sidx], sk->port[pd->didx],
4392 (*state)->dst.seqhi, 0, TH_SYN, 0,
4393 (*state)->src.mss, 0, 0, (*state)->tag, NULL);
4394 REASON_SET(reason, PFRES_SYNPROXY);
4395 return (PF_SYNPROXY_DROP);
4396 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4398 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4399 REASON_SET(reason, PFRES_SYNPROXY);
4402 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4403 (*state)->dst.seqlo = ntohl(th->th_seq);
4404 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4405 pd->src, th->th_dport, th->th_sport,
4406 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4407 TH_ACK, (*state)->src.max_win, 0, 0, 0,
4408 (*state)->tag, NULL);
4409 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4410 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4411 sk->port[pd->sidx], sk->port[pd->didx],
4412 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4413 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0, NULL);
4414 (*state)->src.seqdiff = (*state)->dst.seqhi -
4415 (*state)->src.seqlo;
4416 (*state)->dst.seqdiff = (*state)->src.seqhi -
4417 (*state)->dst.seqlo;
4418 (*state)->src.seqhi = (*state)->src.seqlo +
4419 (*state)->dst.max_win;
4420 (*state)->dst.seqhi = (*state)->dst.seqlo +
4421 (*state)->src.max_win;
4422 (*state)->src.wscale = (*state)->dst.wscale = 0;
4423 (*state)->src.state = (*state)->dst.state =
4425 REASON_SET(reason, PFRES_SYNPROXY);
4426 return (PF_SYNPROXY_DROP);
4430 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4431 dst->state >= TCPS_FIN_WAIT_2 &&
4432 src->state >= TCPS_FIN_WAIT_2) {
4433 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4434 printf("pf: state reuse ");
4435 pf_print_state(*state);
4436 pf_print_flags(th->th_flags);
4439 /* XXX make sure it's the same direction ?? */
4440 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4441 pf_unlink_state(*state, PF_ENTER_LOCKED);
4446 if ((*state)->state_flags & PFSTATE_SLOPPY) {
4447 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
4450 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
4451 ©back) == PF_DROP)
4455 /* translate source/destination address, if necessary */
4456 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4457 struct pf_state_key *nk = (*state)->key[pd->didx];
4459 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4460 nk->port[pd->sidx] != th->th_sport)
4461 pf_change_ap(m, pd->src, &th->th_sport,
4462 pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx],
4463 nk->port[pd->sidx], 0, pd->af);
4465 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4466 nk->port[pd->didx] != th->th_dport)
4467 pf_change_ap(m, pd->dst, &th->th_dport,
4468 pd->ip_sum, &th->th_sum, &nk->addr[pd->didx],
4469 nk->port[pd->didx], 0, pd->af);
4473 /* Copyback sequence modulation or stateful scrub changes if needed */
4475 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4481 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4482 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4484 struct pf_state_peer *src, *dst;
4485 struct pf_state_key_cmp key;
4486 struct udphdr *uh = pd->hdr.udp;
4488 bzero(&key, sizeof(key));
4490 key.proto = IPPROTO_UDP;
4491 if (direction == PF_IN) { /* wire side, straight */
4492 PF_ACPY(&key.addr[0], pd->src, key.af);
4493 PF_ACPY(&key.addr[1], pd->dst, key.af);
4494 key.port[0] = uh->uh_sport;
4495 key.port[1] = uh->uh_dport;
4496 } else { /* stack side, reverse */
4497 PF_ACPY(&key.addr[1], pd->src, key.af);
4498 PF_ACPY(&key.addr[0], pd->dst, key.af);
4499 key.port[1] = uh->uh_sport;
4500 key.port[0] = uh->uh_dport;
4503 STATE_LOOKUP(kif, &key, direction, *state, pd);
4505 if (direction == (*state)->direction) {
4506 src = &(*state)->src;
4507 dst = &(*state)->dst;
4509 src = &(*state)->dst;
4510 dst = &(*state)->src;
4514 if (src->state < PFUDPS_SINGLE)
4515 src->state = PFUDPS_SINGLE;
4516 if (dst->state == PFUDPS_SINGLE)
4517 dst->state = PFUDPS_MULTIPLE;
4519 /* update expire time */
4520 (*state)->expire = time_uptime;
4521 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4522 (*state)->timeout = PFTM_UDP_MULTIPLE;
4524 (*state)->timeout = PFTM_UDP_SINGLE;
4526 /* translate source/destination address, if necessary */
4527 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4528 struct pf_state_key *nk = (*state)->key[pd->didx];
4530 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4531 nk->port[pd->sidx] != uh->uh_sport)
4532 pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
4533 &uh->uh_sum, &nk->addr[pd->sidx],
4534 nk->port[pd->sidx], 1, pd->af);
4536 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4537 nk->port[pd->didx] != uh->uh_dport)
4538 pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
4539 &uh->uh_sum, &nk->addr[pd->didx],
4540 nk->port[pd->didx], 1, pd->af);
4541 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4548 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4549 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4551 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4552 u_int16_t icmpid = 0, *icmpsum;
4553 u_int8_t icmptype, icmpcode;
4555 struct pf_state_key_cmp key;
4557 bzero(&key, sizeof(key));
4558 switch (pd->proto) {
4561 icmptype = pd->hdr.icmp->icmp_type;
4562 icmpcode = pd->hdr.icmp->icmp_code;
4563 icmpid = pd->hdr.icmp->icmp_id;
4564 icmpsum = &pd->hdr.icmp->icmp_cksum;
4566 if (icmptype == ICMP_UNREACH ||
4567 icmptype == ICMP_SOURCEQUENCH ||
4568 icmptype == ICMP_REDIRECT ||
4569 icmptype == ICMP_TIMXCEED ||
4570 icmptype == ICMP_PARAMPROB)
4575 case IPPROTO_ICMPV6:
4576 icmptype = pd->hdr.icmp6->icmp6_type;
4577 icmpcode = pd->hdr.icmp6->icmp6_code;
4578 icmpid = pd->hdr.icmp6->icmp6_id;
4579 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4581 if (icmptype == ICMP6_DST_UNREACH ||
4582 icmptype == ICMP6_PACKET_TOO_BIG ||
4583 icmptype == ICMP6_TIME_EXCEEDED ||
4584 icmptype == ICMP6_PARAM_PROB)
4593 * ICMP query/reply message not related to a TCP/UDP packet.
4594 * Search for an ICMP state.
4597 key.proto = pd->proto;
4598 key.port[0] = key.port[1] = icmpid;
4599 if (direction == PF_IN) { /* wire side, straight */
4600 PF_ACPY(&key.addr[0], pd->src, key.af);
4601 PF_ACPY(&key.addr[1], pd->dst, key.af);
4602 } else { /* stack side, reverse */
4603 PF_ACPY(&key.addr[1], pd->src, key.af);
4604 PF_ACPY(&key.addr[0], pd->dst, key.af);
4607 STATE_LOOKUP(kif, &key, direction, *state, pd);
4609 (*state)->expire = time_uptime;
4610 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4612 /* translate source/destination address, if necessary */
4613 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4614 struct pf_state_key *nk = (*state)->key[pd->didx];
4619 if (PF_ANEQ(pd->src,
4620 &nk->addr[pd->sidx], AF_INET))
4621 pf_change_a(&saddr->v4.s_addr,
4623 nk->addr[pd->sidx].v4.s_addr, 0);
4625 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
4627 pf_change_a(&daddr->v4.s_addr,
4629 nk->addr[pd->didx].v4.s_addr, 0);
4632 pd->hdr.icmp->icmp_id) {
4633 pd->hdr.icmp->icmp_cksum =
4635 pd->hdr.icmp->icmp_cksum, icmpid,
4636 nk->port[pd->sidx], 0);
4637 pd->hdr.icmp->icmp_id =
4641 m_copyback(m, off, ICMP_MINLEN,
4642 (caddr_t )pd->hdr.icmp);
4647 if (PF_ANEQ(pd->src,
4648 &nk->addr[pd->sidx], AF_INET6))
4650 &pd->hdr.icmp6->icmp6_cksum,
4651 &nk->addr[pd->sidx], 0);
4653 if (PF_ANEQ(pd->dst,
4654 &nk->addr[pd->didx], AF_INET6))
4656 &pd->hdr.icmp6->icmp6_cksum,
4657 &nk->addr[pd->didx], 0);
4659 m_copyback(m, off, sizeof(struct icmp6_hdr),
4660 (caddr_t )pd->hdr.icmp6);
4669 * ICMP error message in response to a TCP/UDP packet.
4670 * Extract the inner TCP/UDP header and search for that state.
4673 struct pf_pdesc pd2;
4674 bzero(&pd2, sizeof pd2);
4679 struct ip6_hdr h2_6;
4686 /* Payload packet is from the opposite direction. */
4687 pd2.sidx = (direction == PF_IN) ? 1 : 0;
4688 pd2.didx = (direction == PF_IN) ? 0 : 1;
4692 /* offset of h2 in mbuf chain */
4693 ipoff2 = off + ICMP_MINLEN;
4695 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
4696 NULL, reason, pd2.af)) {
4697 DPFPRINTF(PF_DEBUG_MISC,
4698 ("pf: ICMP error message too short "
4703 * ICMP error messages don't refer to non-first
4706 if (h2.ip_off & htons(IP_OFFMASK)) {
4707 REASON_SET(reason, PFRES_FRAG);
4711 /* offset of protocol header that follows h2 */
4712 off2 = ipoff2 + (h2.ip_hl << 2);
4714 pd2.proto = h2.ip_p;
4715 pd2.src = (struct pf_addr *)&h2.ip_src;
4716 pd2.dst = (struct pf_addr *)&h2.ip_dst;
4717 pd2.ip_sum = &h2.ip_sum;
4722 ipoff2 = off + sizeof(struct icmp6_hdr);
4724 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
4725 NULL, reason, pd2.af)) {
4726 DPFPRINTF(PF_DEBUG_MISC,
4727 ("pf: ICMP error message too short "
4731 pd2.proto = h2_6.ip6_nxt;
4732 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
4733 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
4735 off2 = ipoff2 + sizeof(h2_6);
4737 switch (pd2.proto) {
4738 case IPPROTO_FRAGMENT:
4740 * ICMPv6 error messages for
4741 * non-first fragments
4743 REASON_SET(reason, PFRES_FRAG);
4746 case IPPROTO_HOPOPTS:
4747 case IPPROTO_ROUTING:
4748 case IPPROTO_DSTOPTS: {
4749 /* get next header and header length */
4750 struct ip6_ext opt6;
4752 if (!pf_pull_hdr(m, off2, &opt6,
4753 sizeof(opt6), NULL, reason,
4755 DPFPRINTF(PF_DEBUG_MISC,
4756 ("pf: ICMPv6 short opt\n"));
4759 if (pd2.proto == IPPROTO_AH)
4760 off2 += (opt6.ip6e_len + 2) * 4;
4762 off2 += (opt6.ip6e_len + 1) * 8;
4763 pd2.proto = opt6.ip6e_nxt;
4764 /* goto the next header */
4771 } while (!terminal);
4776 if (PF_ANEQ(pd->dst, pd2.src, pd->af)) {
4777 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4778 printf("pf: BAD ICMP %d:%d outer dst: ",
4779 icmptype, icmpcode);
4780 pf_print_host(pd->src, 0, pd->af);
4782 pf_print_host(pd->dst, 0, pd->af);
4783 printf(" inner src: ");
4784 pf_print_host(pd2.src, 0, pd2.af);
4786 pf_print_host(pd2.dst, 0, pd2.af);
4789 REASON_SET(reason, PFRES_BADSTATE);
4793 switch (pd2.proto) {
4797 struct pf_state_peer *src, *dst;
4802 * Only the first 8 bytes of the TCP header can be
4803 * expected. Don't access any TCP header fields after
4804 * th_seq, an ackskew test is not possible.
4806 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
4808 DPFPRINTF(PF_DEBUG_MISC,
4809 ("pf: ICMP error message too short "
4815 key.proto = IPPROTO_TCP;
4816 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4817 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4818 key.port[pd2.sidx] = th.th_sport;
4819 key.port[pd2.didx] = th.th_dport;
4821 STATE_LOOKUP(kif, &key, direction, *state, pd);
4823 if (direction == (*state)->direction) {
4824 src = &(*state)->dst;
4825 dst = &(*state)->src;
4827 src = &(*state)->src;
4828 dst = &(*state)->dst;
4831 if (src->wscale && dst->wscale)
4832 dws = dst->wscale & PF_WSCALE_MASK;
4836 /* Demodulate sequence number */
4837 seq = ntohl(th.th_seq) - src->seqdiff;
4839 pf_change_a(&th.th_seq, icmpsum,
4844 if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
4845 (!SEQ_GEQ(src->seqhi, seq) ||
4846 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
4847 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4848 printf("pf: BAD ICMP %d:%d ",
4849 icmptype, icmpcode);
4850 pf_print_host(pd->src, 0, pd->af);
4852 pf_print_host(pd->dst, 0, pd->af);
4854 pf_print_state(*state);
4855 printf(" seq=%u\n", seq);
4857 REASON_SET(reason, PFRES_BADSTATE);
4860 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4861 printf("pf: OK ICMP %d:%d ",
4862 icmptype, icmpcode);
4863 pf_print_host(pd->src, 0, pd->af);
4865 pf_print_host(pd->dst, 0, pd->af);
4867 pf_print_state(*state);
4868 printf(" seq=%u\n", seq);
4872 /* translate source/destination address, if necessary */
4873 if ((*state)->key[PF_SK_WIRE] !=
4874 (*state)->key[PF_SK_STACK]) {
4875 struct pf_state_key *nk =
4876 (*state)->key[pd->didx];
4878 if (PF_ANEQ(pd2.src,
4879 &nk->addr[pd2.sidx], pd2.af) ||
4880 nk->port[pd2.sidx] != th.th_sport)
4881 pf_change_icmp(pd2.src, &th.th_sport,
4882 daddr, &nk->addr[pd2.sidx],
4883 nk->port[pd2.sidx], NULL,
4884 pd2.ip_sum, icmpsum,
4885 pd->ip_sum, 0, pd2.af);
4887 if (PF_ANEQ(pd2.dst,
4888 &nk->addr[pd2.didx], pd2.af) ||
4889 nk->port[pd2.didx] != th.th_dport)
4890 pf_change_icmp(pd2.dst, &th.th_dport,
4891 saddr, &nk->addr[pd2.didx],
4892 nk->port[pd2.didx], NULL,
4893 pd2.ip_sum, icmpsum,
4894 pd->ip_sum, 0, pd2.af);
4902 m_copyback(m, off, ICMP_MINLEN,
4903 (caddr_t )pd->hdr.icmp);
4904 m_copyback(m, ipoff2, sizeof(h2),
4911 sizeof(struct icmp6_hdr),
4912 (caddr_t )pd->hdr.icmp6);
4913 m_copyback(m, ipoff2, sizeof(h2_6),
4918 m_copyback(m, off2, 8, (caddr_t)&th);
4927 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
4928 NULL, reason, pd2.af)) {
4929 DPFPRINTF(PF_DEBUG_MISC,
4930 ("pf: ICMP error message too short "
4936 key.proto = IPPROTO_UDP;
4937 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4938 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4939 key.port[pd2.sidx] = uh.uh_sport;
4940 key.port[pd2.didx] = uh.uh_dport;
4942 STATE_LOOKUP(kif, &key, direction, *state, pd);
4944 /* translate source/destination address, if necessary */
4945 if ((*state)->key[PF_SK_WIRE] !=
4946 (*state)->key[PF_SK_STACK]) {
4947 struct pf_state_key *nk =
4948 (*state)->key[pd->didx];
4950 if (PF_ANEQ(pd2.src,
4951 &nk->addr[pd2.sidx], pd2.af) ||
4952 nk->port[pd2.sidx] != uh.uh_sport)
4953 pf_change_icmp(pd2.src, &uh.uh_sport,
4954 daddr, &nk->addr[pd2.sidx],
4955 nk->port[pd2.sidx], &uh.uh_sum,
4956 pd2.ip_sum, icmpsum,
4957 pd->ip_sum, 1, pd2.af);
4959 if (PF_ANEQ(pd2.dst,
4960 &nk->addr[pd2.didx], pd2.af) ||
4961 nk->port[pd2.didx] != uh.uh_dport)
4962 pf_change_icmp(pd2.dst, &uh.uh_dport,
4963 saddr, &nk->addr[pd2.didx],
4964 nk->port[pd2.didx], &uh.uh_sum,
4965 pd2.ip_sum, icmpsum,
4966 pd->ip_sum, 1, pd2.af);
4971 m_copyback(m, off, ICMP_MINLEN,
4972 (caddr_t )pd->hdr.icmp);
4973 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4979 sizeof(struct icmp6_hdr),
4980 (caddr_t )pd->hdr.icmp6);
4981 m_copyback(m, ipoff2, sizeof(h2_6),
4986 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
4992 case IPPROTO_ICMP: {
4995 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
4996 NULL, reason, pd2.af)) {
4997 DPFPRINTF(PF_DEBUG_MISC,
4998 ("pf: ICMP error message too short i"
5004 key.proto = IPPROTO_ICMP;
5005 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5006 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5007 key.port[0] = key.port[1] = iih.icmp_id;
5009 STATE_LOOKUP(kif, &key, direction, *state, pd);
5011 /* translate source/destination address, if necessary */
5012 if ((*state)->key[PF_SK_WIRE] !=
5013 (*state)->key[PF_SK_STACK]) {
5014 struct pf_state_key *nk =
5015 (*state)->key[pd->didx];
5017 if (PF_ANEQ(pd2.src,
5018 &nk->addr[pd2.sidx], pd2.af) ||
5019 nk->port[pd2.sidx] != iih.icmp_id)
5020 pf_change_icmp(pd2.src, &iih.icmp_id,
5021 daddr, &nk->addr[pd2.sidx],
5022 nk->port[pd2.sidx], NULL,
5023 pd2.ip_sum, icmpsum,
5024 pd->ip_sum, 0, AF_INET);
5026 if (PF_ANEQ(pd2.dst,
5027 &nk->addr[pd2.didx], pd2.af) ||
5028 nk->port[pd2.didx] != iih.icmp_id)
5029 pf_change_icmp(pd2.dst, &iih.icmp_id,
5030 saddr, &nk->addr[pd2.didx],
5031 nk->port[pd2.didx], NULL,
5032 pd2.ip_sum, icmpsum,
5033 pd->ip_sum, 0, AF_INET);
5035 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
5036 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5037 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
5044 case IPPROTO_ICMPV6: {
5045 struct icmp6_hdr iih;
5047 if (!pf_pull_hdr(m, off2, &iih,
5048 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
5049 DPFPRINTF(PF_DEBUG_MISC,
5050 ("pf: ICMP error message too short "
5056 key.proto = IPPROTO_ICMPV6;
5057 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5058 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5059 key.port[0] = key.port[1] = iih.icmp6_id;
5061 STATE_LOOKUP(kif, &key, direction, *state, pd);
5063 /* translate source/destination address, if necessary */
5064 if ((*state)->key[PF_SK_WIRE] !=
5065 (*state)->key[PF_SK_STACK]) {
5066 struct pf_state_key *nk =
5067 (*state)->key[pd->didx];
5069 if (PF_ANEQ(pd2.src,
5070 &nk->addr[pd2.sidx], pd2.af) ||
5071 nk->port[pd2.sidx] != iih.icmp6_id)
5072 pf_change_icmp(pd2.src, &iih.icmp6_id,
5073 daddr, &nk->addr[pd2.sidx],
5074 nk->port[pd2.sidx], NULL,
5075 pd2.ip_sum, icmpsum,
5076 pd->ip_sum, 0, AF_INET6);
5078 if (PF_ANEQ(pd2.dst,
5079 &nk->addr[pd2.didx], pd2.af) ||
5080 nk->port[pd2.didx] != iih.icmp6_id)
5081 pf_change_icmp(pd2.dst, &iih.icmp6_id,
5082 saddr, &nk->addr[pd2.didx],
5083 nk->port[pd2.didx], NULL,
5084 pd2.ip_sum, icmpsum,
5085 pd->ip_sum, 0, AF_INET6);
5087 m_copyback(m, off, sizeof(struct icmp6_hdr),
5088 (caddr_t)pd->hdr.icmp6);
5089 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
5090 m_copyback(m, off2, sizeof(struct icmp6_hdr),
5099 key.proto = pd2.proto;
5100 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5101 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5102 key.port[0] = key.port[1] = 0;
5104 STATE_LOOKUP(kif, &key, direction, *state, pd);
5106 /* translate source/destination address, if necessary */
5107 if ((*state)->key[PF_SK_WIRE] !=
5108 (*state)->key[PF_SK_STACK]) {
5109 struct pf_state_key *nk =
5110 (*state)->key[pd->didx];
5112 if (PF_ANEQ(pd2.src,
5113 &nk->addr[pd2.sidx], pd2.af))
5114 pf_change_icmp(pd2.src, NULL, daddr,
5115 &nk->addr[pd2.sidx], 0, NULL,
5116 pd2.ip_sum, icmpsum,
5117 pd->ip_sum, 0, pd2.af);
5119 if (PF_ANEQ(pd2.dst,
5120 &nk->addr[pd2.didx], pd2.af))
5121 pf_change_icmp(pd2.dst, NULL, saddr,
5122 &nk->addr[pd2.didx], 0, NULL,
5123 pd2.ip_sum, icmpsum,
5124 pd->ip_sum, 0, pd2.af);
5129 m_copyback(m, off, ICMP_MINLEN,
5130 (caddr_t)pd->hdr.icmp);
5131 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5137 sizeof(struct icmp6_hdr),
5138 (caddr_t )pd->hdr.icmp6);
5139 m_copyback(m, ipoff2, sizeof(h2_6),
5153 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
5154 struct mbuf *m, struct pf_pdesc *pd)
5156 struct pf_state_peer *src, *dst;
5157 struct pf_state_key_cmp key;
5159 bzero(&key, sizeof(key));
5161 key.proto = pd->proto;
5162 if (direction == PF_IN) {
5163 PF_ACPY(&key.addr[0], pd->src, key.af);
5164 PF_ACPY(&key.addr[1], pd->dst, key.af);
5165 key.port[0] = key.port[1] = 0;
5167 PF_ACPY(&key.addr[1], pd->src, key.af);
5168 PF_ACPY(&key.addr[0], pd->dst, key.af);
5169 key.port[1] = key.port[0] = 0;
5172 STATE_LOOKUP(kif, &key, direction, *state, pd);
5174 if (direction == (*state)->direction) {
5175 src = &(*state)->src;
5176 dst = &(*state)->dst;
5178 src = &(*state)->dst;
5179 dst = &(*state)->src;
5183 if (src->state < PFOTHERS_SINGLE)
5184 src->state = PFOTHERS_SINGLE;
5185 if (dst->state == PFOTHERS_SINGLE)
5186 dst->state = PFOTHERS_MULTIPLE;
5188 /* update expire time */
5189 (*state)->expire = time_uptime;
5190 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5191 (*state)->timeout = PFTM_OTHER_MULTIPLE;
5193 (*state)->timeout = PFTM_OTHER_SINGLE;
5195 /* translate source/destination address, if necessary */
5196 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5197 struct pf_state_key *nk = (*state)->key[pd->didx];
5199 KASSERT(nk, ("%s: nk is null", __func__));
5200 KASSERT(pd, ("%s: pd is null", __func__));
5201 KASSERT(pd->src, ("%s: pd->src is null", __func__));
5202 KASSERT(pd->dst, ("%s: pd->dst is null", __func__));
5206 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5207 pf_change_a(&pd->src->v4.s_addr,
5209 nk->addr[pd->sidx].v4.s_addr,
5213 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5214 pf_change_a(&pd->dst->v4.s_addr,
5216 nk->addr[pd->didx].v4.s_addr,
5223 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5224 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5226 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5227 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5235 * ipoff and off are measured from the start of the mbuf chain.
5236 * h must be at "ipoff" on the mbuf chain.
5239 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5240 u_short *actionp, u_short *reasonp, sa_family_t af)
5245 struct ip *h = mtod(m, struct ip *);
5246 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
5250 ACTION_SET(actionp, PF_PASS);
5252 ACTION_SET(actionp, PF_DROP);
5253 REASON_SET(reasonp, PFRES_FRAG);
5257 if (m->m_pkthdr.len < off + len ||
5258 ntohs(h->ip_len) < off + len) {
5259 ACTION_SET(actionp, PF_DROP);
5260 REASON_SET(reasonp, PFRES_SHORT);
5268 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
5270 if (m->m_pkthdr.len < off + len ||
5271 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5272 (unsigned)(off + len)) {
5273 ACTION_SET(actionp, PF_DROP);
5274 REASON_SET(reasonp, PFRES_SHORT);
5281 m_copydata(m, off, len, p);
5287 pf_routable_oldmpath(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5290 struct radix_node_head *rnh;
5291 struct sockaddr_in *dst;
5295 struct sockaddr_in6 *dst6;
5296 struct route_in6 ro;
5300 struct radix_node *rn;
5305 /* XXX: stick to table 0 for now */
5306 rnh = rt_tables_get_rnh(0, af);
5307 if (rnh != NULL && rn_mpath_capable(rnh))
5309 bzero(&ro, sizeof(ro));
5312 dst = satosin(&ro.ro_dst);
5313 dst->sin_family = AF_INET;
5314 dst->sin_len = sizeof(*dst);
5315 dst->sin_addr = addr->v4;
5320 * Skip check for addresses with embedded interface scope,
5321 * as they would always match anyway.
5323 if (IN6_IS_SCOPE_EMBED(&addr->v6))
5325 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5326 dst6->sin6_family = AF_INET6;
5327 dst6->sin6_len = sizeof(*dst6);
5328 dst6->sin6_addr = addr->v6;
5335 /* Skip checks for ipsec interfaces */
5336 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5342 in6_rtalloc_ign(&ro, 0, rtableid);
5347 in_rtalloc_ign((struct route *)&ro, 0, rtableid);
5352 if (ro.ro_rt != NULL) {
5353 /* No interface given, this is a no-route check */
5357 if (kif->pfik_ifp == NULL) {
5362 /* Perform uRPF check if passed input interface */
5364 rn = (struct radix_node *)ro.ro_rt;
5366 rt = (struct rtentry *)rn;
5369 if (kif->pfik_ifp == ifp)
5371 rn = rn_mpath_next(rn);
5372 } while (check_mpath == 1 && rn != NULL && ret == 0);
5376 if (ro.ro_rt != NULL)
5383 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5387 struct nhop4_basic nh4;
5390 struct nhop6_basic nh6;
5394 struct radix_node_head *rnh;
5396 /* XXX: stick to table 0 for now */
5397 rnh = rt_tables_get_rnh(0, af);
5398 if (rnh != NULL && rn_mpath_capable(rnh))
5399 return (pf_routable_oldmpath(addr, af, kif, rtableid));
5402 * Skip check for addresses with embedded interface scope,
5403 * as they would always match anyway.
5405 if (af == AF_INET6 && IN6_IS_SCOPE_EMBED(&addr->v6))
5408 if (af != AF_INET && af != AF_INET6)
5411 /* Skip checks for ipsec interfaces */
5412 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5420 if (fib6_lookup_nh_basic(rtableid, &addr->v6, 0, 0, 0, &nh6)!=0)
5427 if (fib4_lookup_nh_basic(rtableid, addr->v4, 0, 0, &nh4) != 0)
5434 /* No interface given, this is a no-route check */
5438 if (kif->pfik_ifp == NULL)
5441 /* Perform uRPF check if passed input interface */
5442 if (kif->pfik_ifp == ifp)
5449 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5450 struct pf_state *s, struct pf_pdesc *pd)
5452 struct mbuf *m0, *m1;
5453 struct sockaddr_in dst;
5455 struct ifnet *ifp = NULL;
5456 struct pf_addr naddr;
5457 struct pf_src_node *sn = NULL;
5459 uint16_t ip_len, ip_off;
5461 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5462 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5465 if ((pd->pf_mtag == NULL &&
5466 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5467 pd->pf_mtag->routed++ > 3) {
5473 if (r->rt == PF_DUPTO) {
5474 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5480 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5488 ip = mtod(m0, struct ip *);
5490 bzero(&dst, sizeof(dst));
5491 dst.sin_family = AF_INET;
5492 dst.sin_len = sizeof(dst);
5493 dst.sin_addr = ip->ip_dst;
5495 if (r->rt == PF_FASTROUTE) {
5496 struct nhop4_basic nh4;
5501 if (fib4_lookup_nh_basic(M_GETFIB(m0), ip->ip_dst, 0,
5502 m0->m_pkthdr.flowid, &nh4) != 0) {
5503 KMOD_IPSTAT_INC(ips_noroute);
5504 error = EHOSTUNREACH;
5509 dst.sin_addr = nh4.nh_addr;
5511 if (TAILQ_EMPTY(&r->rpool.list)) {
5512 DPFPRINTF(PF_DEBUG_URGENT,
5513 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5517 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5519 if (!PF_AZERO(&naddr, AF_INET))
5520 dst.sin_addr.s_addr = naddr.v4.s_addr;
5521 ifp = r->rpool.cur->kif ?
5522 r->rpool.cur->kif->pfik_ifp : NULL;
5524 if (!PF_AZERO(&s->rt_addr, AF_INET))
5525 dst.sin_addr.s_addr =
5526 s->rt_addr.v4.s_addr;
5527 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5535 if (pf_test(PF_OUT, 0, ifp, &m0, NULL) != PF_PASS)
5537 else if (m0 == NULL)
5539 if (m0->m_len < sizeof(struct ip)) {
5540 DPFPRINTF(PF_DEBUG_URGENT,
5541 ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
5544 ip = mtod(m0, struct ip *);
5547 if (ifp->if_flags & IFF_LOOPBACK)
5548 m0->m_flags |= M_SKIP_FIREWALL;
5550 ip_len = ntohs(ip->ip_len);
5551 ip_off = ntohs(ip->ip_off);
5553 /* Copied from FreeBSD 10.0-CURRENT ip_output. */
5554 m0->m_pkthdr.csum_flags |= CSUM_IP;
5555 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
5556 in_delayed_cksum(m0);
5557 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
5560 if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
5561 sctp_delayed_cksum(m, (uint32_t)(ip->ip_hl << 2));
5562 m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
5567 * If small enough for interface, or the interface will take
5568 * care of the fragmentation for us, we can just send directly.
5570 if (ip_len <= ifp->if_mtu ||
5571 (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0) {
5573 if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
5574 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
5575 m0->m_pkthdr.csum_flags &= ~CSUM_IP;
5577 m_clrprotoflags(m0); /* Avoid confusing lower layers. */
5578 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5582 /* Balk when DF bit is set or the interface didn't support TSO. */
5583 if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5585 KMOD_IPSTAT_INC(ips_cantfrag);
5586 if (r->rt != PF_DUPTO) {
5587 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
5594 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
5598 for (; m0; m0 = m1) {
5600 m0->m_nextpkt = NULL;
5602 m_clrprotoflags(m0);
5603 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5609 KMOD_IPSTAT_INC(ips_fragmented);
5612 if (r->rt != PF_DUPTO)
5627 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5628 struct pf_state *s, struct pf_pdesc *pd)
5631 struct sockaddr_in6 dst;
5632 struct ip6_hdr *ip6;
5633 struct ifnet *ifp = NULL;
5634 struct pf_addr naddr;
5635 struct pf_src_node *sn = NULL;
5637 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5638 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5641 if ((pd->pf_mtag == NULL &&
5642 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5643 pd->pf_mtag->routed++ > 3) {
5649 if (r->rt == PF_DUPTO) {
5650 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5656 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5664 ip6 = mtod(m0, struct ip6_hdr *);
5666 bzero(&dst, sizeof(dst));
5667 dst.sin6_family = AF_INET6;
5668 dst.sin6_len = sizeof(dst);
5669 dst.sin6_addr = ip6->ip6_dst;
5671 /* Cheat. XXX why only in the v6 case??? */
5672 if (r->rt == PF_FASTROUTE) {
5675 m0->m_flags |= M_SKIP_FIREWALL;
5676 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
5681 if (TAILQ_EMPTY(&r->rpool.list)) {
5682 DPFPRINTF(PF_DEBUG_URGENT,
5683 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5687 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
5689 if (!PF_AZERO(&naddr, AF_INET6))
5690 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5692 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
5694 if (!PF_AZERO(&s->rt_addr, AF_INET6))
5695 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5696 &s->rt_addr, AF_INET6);
5697 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5707 if (pf_test6(PF_OUT, PFIL_FWD, ifp, &m0, NULL) != PF_PASS)
5709 else if (m0 == NULL)
5711 if (m0->m_len < sizeof(struct ip6_hdr)) {
5712 DPFPRINTF(PF_DEBUG_URGENT,
5713 ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
5717 ip6 = mtod(m0, struct ip6_hdr *);
5720 if (ifp->if_flags & IFF_LOOPBACK)
5721 m0->m_flags |= M_SKIP_FIREWALL;
5723 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 &
5724 ~ifp->if_hwassist) {
5725 uint32_t plen = m0->m_pkthdr.len - sizeof(*ip6);
5726 in6_delayed_cksum(m0, plen, sizeof(struct ip6_hdr));
5727 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6;
5731 * If the packet is too large for the outgoing interface,
5732 * send back an icmp6 error.
5734 if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
5735 dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
5736 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu)
5737 nd6_output_ifp(ifp, ifp, m0, &dst, NULL);
5739 in6_ifstat_inc(ifp, ifs6_in_toobig);
5740 if (r->rt != PF_DUPTO)
5741 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
5747 if (r->rt != PF_DUPTO)
5761 * FreeBSD supports cksum offloads for the following drivers.
5762 * em(4), fxp(4), ixgb(4), lge(4), ndis(4), nge(4), re(4),
5763 * ti(4), txp(4), xl(4)
5765 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
5766 * network driver performed cksum including pseudo header, need to verify
5769 * network driver performed cksum, needs to additional pseudo header
5770 * cksum computation with partial csum_data(i.e. lack of H/W support for
5771 * pseudo header, for instance hme(4), sk(4) and possibly gem(4))
5773 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
5774 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
5776 * Also, set csum_data to 0xffff to force cksum validation.
5779 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
5785 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
5787 if (m->m_pkthdr.len < off + len)
5792 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5793 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5794 sum = m->m_pkthdr.csum_data;
5796 ip = mtod(m, struct ip *);
5797 sum = in_pseudo(ip->ip_src.s_addr,
5798 ip->ip_dst.s_addr, htonl((u_short)len +
5799 m->m_pkthdr.csum_data + IPPROTO_TCP));
5806 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5807 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5808 sum = m->m_pkthdr.csum_data;
5810 ip = mtod(m, struct ip *);
5811 sum = in_pseudo(ip->ip_src.s_addr,
5812 ip->ip_dst.s_addr, htonl((u_short)len +
5813 m->m_pkthdr.csum_data + IPPROTO_UDP));
5821 case IPPROTO_ICMPV6:
5831 if (p == IPPROTO_ICMP) {
5836 sum = in_cksum(m, len);
5840 if (m->m_len < sizeof(struct ip))
5842 sum = in4_cksum(m, p, off, len);
5847 if (m->m_len < sizeof(struct ip6_hdr))
5849 sum = in6_cksum(m, p, off, len);
5860 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
5865 KMOD_UDPSTAT_INC(udps_badsum);
5871 KMOD_ICMPSTAT_INC(icps_checksum);
5876 case IPPROTO_ICMPV6:
5878 KMOD_ICMP6STAT_INC(icp6s_checksum);
5885 if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
5886 m->m_pkthdr.csum_flags |=
5887 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
5888 m->m_pkthdr.csum_data = 0xffff;
5897 pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
5899 struct pfi_kif *kif;
5900 u_short action, reason = 0, log = 0;
5901 struct mbuf *m = *m0;
5902 struct ip *h = NULL;
5903 struct m_tag *ipfwtag;
5904 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
5905 struct pf_state *s = NULL;
5906 struct pf_ruleset *ruleset = NULL;
5908 int off, dirndx, pqid = 0;
5912 if (!V_pf_status.running)
5915 memset(&pd, 0, sizeof(pd));
5917 kif = (struct pfi_kif *)ifp->if_pf_kif;
5920 DPFPRINTF(PF_DEBUG_URGENT,
5921 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
5924 if (kif->pfik_flags & PFI_IFLAG_SKIP)
5927 if (m->m_flags & M_SKIP_FIREWALL)
5930 pd.pf_mtag = pf_find_mtag(m);
5934 if (ip_divert_ptr != NULL &&
5935 ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
5936 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
5937 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
5938 if (pd.pf_mtag == NULL &&
5939 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5943 pd.pf_mtag->flags |= PF_PACKET_LOOPED;
5944 m_tag_delete(m, ipfwtag);
5946 if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
5947 m->m_flags |= M_FASTFWD_OURS;
5948 pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
5950 } else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
5951 /* We do IP header normalization and packet reassembly here */
5955 m = *m0; /* pf_normalize messes with m0 */
5956 h = mtod(m, struct ip *);
5958 off = h->ip_hl << 2;
5959 if (off < (int)sizeof(struct ip)) {
5961 REASON_SET(&reason, PFRES_SHORT);
5966 pd.src = (struct pf_addr *)&h->ip_src;
5967 pd.dst = (struct pf_addr *)&h->ip_dst;
5968 pd.sport = pd.dport = NULL;
5969 pd.ip_sum = &h->ip_sum;
5970 pd.proto_sum = NULL;
5973 pd.sidx = (dir == PF_IN) ? 0 : 1;
5974 pd.didx = (dir == PF_IN) ? 1 : 0;
5977 pd.tot_len = ntohs(h->ip_len);
5979 /* handle fragments that didn't get reassembled by normalization */
5980 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
5981 action = pf_test_fragment(&r, dir, kif, m, h,
5992 if (!pf_pull_hdr(m, off, &th, sizeof(th),
5993 &action, &reason, AF_INET)) {
5994 log = action != PF_PASS;
5997 pd.p_len = pd.tot_len - off - (th.th_off << 2);
5998 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
6000 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6001 if (action == PF_DROP)
6003 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6005 if (action == PF_PASS) {
6006 if (pfsync_update_state_ptr != NULL)
6007 pfsync_update_state_ptr(s);
6011 } else if (s == NULL)
6012 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6021 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6022 &action, &reason, AF_INET)) {
6023 log = action != PF_PASS;
6026 if (uh.uh_dport == 0 ||
6027 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6028 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6030 REASON_SET(&reason, PFRES_SHORT);
6033 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6034 if (action == PF_PASS) {
6035 if (pfsync_update_state_ptr != NULL)
6036 pfsync_update_state_ptr(s);
6040 } else if (s == NULL)
6041 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6046 case IPPROTO_ICMP: {
6050 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
6051 &action, &reason, AF_INET)) {
6052 log = action != PF_PASS;
6055 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
6057 if (action == PF_PASS) {
6058 if (pfsync_update_state_ptr != NULL)
6059 pfsync_update_state_ptr(s);
6063 } else if (s == NULL)
6064 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6070 case IPPROTO_ICMPV6: {
6072 DPFPRINTF(PF_DEBUG_MISC,
6073 ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
6079 action = pf_test_state_other(&s, dir, kif, m, &pd);
6080 if (action == PF_PASS) {
6081 if (pfsync_update_state_ptr != NULL)
6082 pfsync_update_state_ptr(s);
6086 } else if (s == NULL)
6087 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6094 if (action == PF_PASS && h->ip_hl > 5 &&
6095 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6097 REASON_SET(&reason, PFRES_IPOPTIONS);
6099 DPFPRINTF(PF_DEBUG_MISC,
6100 ("pf: dropping packet with ip options\n"));
6103 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6105 REASON_SET(&reason, PFRES_MEMORY);
6107 if (r->rtableid >= 0)
6108 M_SETFIB(m, r->rtableid);
6110 if (r->scrub_flags & PFSTATE_SETPRIO) {
6111 if (pd.tos & IPTOS_LOWDELAY)
6113 if (pf_ieee8021q_setpcp(m, r->set_prio[pqid])) {
6115 REASON_SET(&reason, PFRES_MEMORY);
6117 DPFPRINTF(PF_DEBUG_MISC,
6118 ("pf: failed to allocate 802.1q mtag\n"));
6123 if (action == PF_PASS && r->qid) {
6124 if (pd.pf_mtag == NULL &&
6125 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6127 REASON_SET(&reason, PFRES_MEMORY);
6130 pd.pf_mtag->qid_hash = pf_state_hash(s);
6131 if (pqid || (pd.tos & IPTOS_LOWDELAY))
6132 pd.pf_mtag->qid = r->pqid;
6134 pd.pf_mtag->qid = r->qid;
6135 /* Add hints for ecn. */
6136 pd.pf_mtag->hdr = h;
6143 * connections redirected to loopback should not match sockets
6144 * bound specifically to loopback due to security implications,
6145 * see tcp_input() and in_pcblookup_listen().
6147 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6148 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6149 (s->nat_rule.ptr->action == PF_RDR ||
6150 s->nat_rule.ptr->action == PF_BINAT) &&
6151 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
6152 m->m_flags |= M_SKIP_FIREWALL;
6154 if (action == PF_PASS && r->divert.port && ip_divert_ptr != NULL &&
6155 !PACKET_LOOPED(&pd)) {
6157 ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
6158 sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
6159 if (ipfwtag != NULL) {
6160 ((struct ipfw_rule_ref *)(ipfwtag+1))->info =
6161 ntohs(r->divert.port);
6162 ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
6167 m_tag_prepend(m, ipfwtag);
6168 if (m->m_flags & M_FASTFWD_OURS) {
6169 if (pd.pf_mtag == NULL &&
6170 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6172 REASON_SET(&reason, PFRES_MEMORY);
6174 DPFPRINTF(PF_DEBUG_MISC,
6175 ("pf: failed to allocate tag\n"));
6177 pd.pf_mtag->flags |=
6178 PF_FASTFWD_OURS_PRESENT;
6179 m->m_flags &= ~M_FASTFWD_OURS;
6182 ip_divert_ptr(*m0, dir == PF_IN ? DIR_IN : DIR_OUT);
6187 /* XXX: ipfw has the same behaviour! */
6189 REASON_SET(&reason, PFRES_MEMORY);
6191 DPFPRINTF(PF_DEBUG_MISC,
6192 ("pf: failed to allocate divert tag\n"));
6199 if (s != NULL && s->nat_rule.ptr != NULL &&
6200 s->nat_rule.ptr->log & PF_LOG_ALL)
6201 lr = s->nat_rule.ptr;
6204 PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd,
6208 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6209 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
6211 if (action == PF_PASS || r->action == PF_DROP) {
6212 dirndx = (dir == PF_OUT);
6213 r->packets[dirndx]++;
6214 r->bytes[dirndx] += pd.tot_len;
6216 a->packets[dirndx]++;
6217 a->bytes[dirndx] += pd.tot_len;
6220 if (s->nat_rule.ptr != NULL) {
6221 s->nat_rule.ptr->packets[dirndx]++;
6222 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6224 if (s->src_node != NULL) {
6225 s->src_node->packets[dirndx]++;
6226 s->src_node->bytes[dirndx] += pd.tot_len;
6228 if (s->nat_src_node != NULL) {
6229 s->nat_src_node->packets[dirndx]++;
6230 s->nat_src_node->bytes[dirndx] += pd.tot_len;
6232 dirndx = (dir == s->direction) ? 0 : 1;
6233 s->packets[dirndx]++;
6234 s->bytes[dirndx] += pd.tot_len;
6237 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6238 if (nr != NULL && r == &V_pf_default_rule)
6240 if (tr->src.addr.type == PF_ADDR_TABLE)
6241 pfr_update_stats(tr->src.addr.p.tbl,
6242 (s == NULL) ? pd.src :
6243 &s->key[(s->direction == PF_IN)]->
6244 addr[(s->direction == PF_OUT)],
6245 pd.af, pd.tot_len, dir == PF_OUT,
6246 r->action == PF_PASS, tr->src.neg);
6247 if (tr->dst.addr.type == PF_ADDR_TABLE)
6248 pfr_update_stats(tr->dst.addr.p.tbl,
6249 (s == NULL) ? pd.dst :
6250 &s->key[(s->direction == PF_IN)]->
6251 addr[(s->direction == PF_IN)],
6252 pd.af, pd.tot_len, dir == PF_OUT,
6253 r->action == PF_PASS, tr->dst.neg);
6257 case PF_SYNPROXY_DROP:
6268 /* pf_route() returns unlocked. */
6270 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
6284 pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
6286 struct pfi_kif *kif;
6287 u_short action, reason = 0, log = 0;
6288 struct mbuf *m = *m0, *n = NULL;
6290 struct ip6_hdr *h = NULL;
6291 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6292 struct pf_state *s = NULL;
6293 struct pf_ruleset *ruleset = NULL;
6295 int off, terminal = 0, dirndx, rh_cnt = 0, pqid = 0;
6299 if (!V_pf_status.running)
6302 memset(&pd, 0, sizeof(pd));
6303 pd.pf_mtag = pf_find_mtag(m);
6305 if (pd.pf_mtag && pd.pf_mtag->flags & PF_TAG_GENERATED)
6308 kif = (struct pfi_kif *)ifp->if_pf_kif;
6310 DPFPRINTF(PF_DEBUG_URGENT,
6311 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
6314 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6317 if (m->m_flags & M_SKIP_FIREWALL)
6322 /* We do IP header normalization and packet reassembly here */
6323 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
6327 m = *m0; /* pf_normalize messes with m0 */
6328 h = mtod(m, struct ip6_hdr *);
6332 * we do not support jumbogram yet. if we keep going, zero ip6_plen
6333 * will do something bad, so drop the packet for now.
6335 if (htons(h->ip6_plen) == 0) {
6337 REASON_SET(&reason, PFRES_NORM); /*XXX*/
6342 pd.src = (struct pf_addr *)&h->ip6_src;
6343 pd.dst = (struct pf_addr *)&h->ip6_dst;
6344 pd.sport = pd.dport = NULL;
6346 pd.proto_sum = NULL;
6348 pd.sidx = (dir == PF_IN) ? 0 : 1;
6349 pd.didx = (dir == PF_IN) ? 1 : 0;
6352 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6354 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
6355 pd.proto = h->ip6_nxt;
6358 case IPPROTO_FRAGMENT:
6359 action = pf_test_fragment(&r, dir, kif, m, h,
6361 if (action == PF_DROP)
6362 REASON_SET(&reason, PFRES_FRAG);
6364 case IPPROTO_ROUTING: {
6365 struct ip6_rthdr rthdr;
6368 DPFPRINTF(PF_DEBUG_MISC,
6369 ("pf: IPv6 more than one rthdr\n"));
6371 REASON_SET(&reason, PFRES_IPOPTIONS);
6375 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
6377 DPFPRINTF(PF_DEBUG_MISC,
6378 ("pf: IPv6 short rthdr\n"));
6380 REASON_SET(&reason, PFRES_SHORT);
6384 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
6385 DPFPRINTF(PF_DEBUG_MISC,
6386 ("pf: IPv6 rthdr0\n"));
6388 REASON_SET(&reason, PFRES_IPOPTIONS);
6395 case IPPROTO_HOPOPTS:
6396 case IPPROTO_DSTOPTS: {
6397 /* get next header and header length */
6398 struct ip6_ext opt6;
6400 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
6401 NULL, &reason, pd.af)) {
6402 DPFPRINTF(PF_DEBUG_MISC,
6403 ("pf: IPv6 short opt\n"));
6408 if (pd.proto == IPPROTO_AH)
6409 off += (opt6.ip6e_len + 2) * 4;
6411 off += (opt6.ip6e_len + 1) * 8;
6412 pd.proto = opt6.ip6e_nxt;
6413 /* goto the next header */
6420 } while (!terminal);
6422 /* if there's no routing header, use unmodified mbuf for checksumming */
6432 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6433 &action, &reason, AF_INET6)) {
6434 log = action != PF_PASS;
6437 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6438 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6439 if (action == PF_DROP)
6441 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6443 if (action == PF_PASS) {
6444 if (pfsync_update_state_ptr != NULL)
6445 pfsync_update_state_ptr(s);
6449 } else if (s == NULL)
6450 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6459 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6460 &action, &reason, AF_INET6)) {
6461 log = action != PF_PASS;
6464 if (uh.uh_dport == 0 ||
6465 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6466 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6468 REASON_SET(&reason, PFRES_SHORT);
6471 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6472 if (action == PF_PASS) {
6473 if (pfsync_update_state_ptr != NULL)
6474 pfsync_update_state_ptr(s);
6478 } else if (s == NULL)
6479 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6484 case IPPROTO_ICMP: {
6486 DPFPRINTF(PF_DEBUG_MISC,
6487 ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
6491 case IPPROTO_ICMPV6: {
6492 struct icmp6_hdr ih;
6495 if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
6496 &action, &reason, AF_INET6)) {
6497 log = action != PF_PASS;
6500 action = pf_test_state_icmp(&s, dir, kif,
6501 m, off, h, &pd, &reason);
6502 if (action == PF_PASS) {
6503 if (pfsync_update_state_ptr != NULL)
6504 pfsync_update_state_ptr(s);
6508 } else if (s == NULL)
6509 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6515 action = pf_test_state_other(&s, dir, kif, m, &pd);
6516 if (action == PF_PASS) {
6517 if (pfsync_update_state_ptr != NULL)
6518 pfsync_update_state_ptr(s);
6522 } else if (s == NULL)
6523 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6535 /* handle dangerous IPv6 extension headers. */
6536 if (action == PF_PASS && rh_cnt &&
6537 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6539 REASON_SET(&reason, PFRES_IPOPTIONS);
6541 DPFPRINTF(PF_DEBUG_MISC,
6542 ("pf: dropping packet with dangerous v6 headers\n"));
6545 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6547 REASON_SET(&reason, PFRES_MEMORY);
6549 if (r->rtableid >= 0)
6550 M_SETFIB(m, r->rtableid);
6552 if (r->scrub_flags & PFSTATE_SETPRIO) {
6553 if (pd.tos & IPTOS_LOWDELAY)
6555 if (pf_ieee8021q_setpcp(m, r->set_prio[pqid])) {
6557 REASON_SET(&reason, PFRES_MEMORY);
6559 DPFPRINTF(PF_DEBUG_MISC,
6560 ("pf: failed to allocate 802.1q mtag\n"));
6565 if (action == PF_PASS && r->qid) {
6566 if (pd.pf_mtag == NULL &&
6567 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6569 REASON_SET(&reason, PFRES_MEMORY);
6572 pd.pf_mtag->qid_hash = pf_state_hash(s);
6573 if (pd.tos & IPTOS_LOWDELAY)
6574 pd.pf_mtag->qid = r->pqid;
6576 pd.pf_mtag->qid = r->qid;
6577 /* Add hints for ecn. */
6578 pd.pf_mtag->hdr = h;
6583 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6584 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6585 (s->nat_rule.ptr->action == PF_RDR ||
6586 s->nat_rule.ptr->action == PF_BINAT) &&
6587 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
6588 m->m_flags |= M_SKIP_FIREWALL;
6590 /* XXX: Anybody working on it?! */
6592 printf("pf: divert(9) is not supported for IPv6\n");
6597 if (s != NULL && s->nat_rule.ptr != NULL &&
6598 s->nat_rule.ptr->log & PF_LOG_ALL)
6599 lr = s->nat_rule.ptr;
6602 PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset,
6606 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6607 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
6609 if (action == PF_PASS || r->action == PF_DROP) {
6610 dirndx = (dir == PF_OUT);
6611 r->packets[dirndx]++;
6612 r->bytes[dirndx] += pd.tot_len;
6614 a->packets[dirndx]++;
6615 a->bytes[dirndx] += pd.tot_len;
6618 if (s->nat_rule.ptr != NULL) {
6619 s->nat_rule.ptr->packets[dirndx]++;
6620 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6622 if (s->src_node != NULL) {
6623 s->src_node->packets[dirndx]++;
6624 s->src_node->bytes[dirndx] += pd.tot_len;
6626 if (s->nat_src_node != NULL) {
6627 s->nat_src_node->packets[dirndx]++;
6628 s->nat_src_node->bytes[dirndx] += pd.tot_len;
6630 dirndx = (dir == s->direction) ? 0 : 1;
6631 s->packets[dirndx]++;
6632 s->bytes[dirndx] += pd.tot_len;
6635 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6636 if (nr != NULL && r == &V_pf_default_rule)
6638 if (tr->src.addr.type == PF_ADDR_TABLE)
6639 pfr_update_stats(tr->src.addr.p.tbl,
6640 (s == NULL) ? pd.src :
6641 &s->key[(s->direction == PF_IN)]->addr[0],
6642 pd.af, pd.tot_len, dir == PF_OUT,
6643 r->action == PF_PASS, tr->src.neg);
6644 if (tr->dst.addr.type == PF_ADDR_TABLE)
6645 pfr_update_stats(tr->dst.addr.p.tbl,
6646 (s == NULL) ? pd.dst :
6647 &s->key[(s->direction == PF_IN)]->addr[1],
6648 pd.af, pd.tot_len, dir == PF_OUT,
6649 r->action == PF_PASS, tr->dst.neg);
6653 case PF_SYNPROXY_DROP:
6664 /* pf_route6() returns unlocked. */
6666 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
6675 /* If reassembled packet passed, create new fragments. */
6676 if (action == PF_PASS && *m0 && (pflags & PFIL_FWD) &&
6677 (mtag = m_tag_find(m, PF_REASSEMBLED, NULL)) != NULL)
6678 action = pf_refragment6(ifp, m0, mtag);