2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002 - 2008 Henning Brauer
6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
37 * $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 #include "opt_inet6.h"
48 #include <sys/param.h>
50 #include <sys/endian.h>
52 #include <sys/interrupt.h>
53 #include <sys/kernel.h>
54 #include <sys/kthread.h>
55 #include <sys/limits.h>
58 #include <sys/random.h>
59 #include <sys/refcount.h>
60 #include <sys/socket.h>
61 #include <sys/sysctl.h>
62 #include <sys/taskqueue.h>
63 #include <sys/ucred.h>
66 #include <net/if_var.h>
67 #include <net/if_types.h>
68 #include <net/if_vlan_var.h>
69 #include <net/route.h>
70 #include <net/radix_mpath.h>
73 #include <net/pfvar.h>
74 #include <net/if_pflog.h>
75 #include <net/if_pfsync.h>
77 #include <netinet/in_pcb.h>
78 #include <netinet/in_var.h>
79 #include <netinet/in_fib.h>
80 #include <netinet/ip.h>
81 #include <netinet/ip_fw.h>
82 #include <netinet/ip_icmp.h>
83 #include <netinet/icmp_var.h>
84 #include <netinet/ip_var.h>
85 #include <netinet/tcp.h>
86 #include <netinet/tcp_fsm.h>
87 #include <netinet/tcp_seq.h>
88 #include <netinet/tcp_timer.h>
89 #include <netinet/tcp_var.h>
90 #include <netinet/udp.h>
91 #include <netinet/udp_var.h>
93 #include <netpfil/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */
96 #include <netinet/ip6.h>
97 #include <netinet/icmp6.h>
98 #include <netinet6/nd6.h>
99 #include <netinet6/ip6_var.h>
100 #include <netinet6/in6_pcb.h>
101 #include <netinet6/in6_fib.h>
102 #include <netinet6/scope6_var.h>
105 #include <machine/in_cksum.h>
106 #include <security/mac/mac_framework.h>
108 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
115 VNET_DEFINE(struct pf_altqqueue, pf_altqs[2]);
116 VNET_DEFINE(struct pf_palist, pf_pabuf);
117 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active);
118 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive);
119 VNET_DEFINE(struct pf_kstatus, pf_status);
121 VNET_DEFINE(u_int32_t, ticket_altqs_active);
122 VNET_DEFINE(u_int32_t, ticket_altqs_inactive);
123 VNET_DEFINE(int, altqs_inactive_open);
124 VNET_DEFINE(u_int32_t, ticket_pabuf);
126 VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx);
127 #define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx)
128 VNET_DEFINE(u_char, pf_tcp_secret[16]);
129 #define V_pf_tcp_secret VNET(pf_tcp_secret)
130 VNET_DEFINE(int, pf_tcp_secret_init);
131 #define V_pf_tcp_secret_init VNET(pf_tcp_secret_init)
132 VNET_DEFINE(int, pf_tcp_iss_off);
133 #define V_pf_tcp_iss_off VNET(pf_tcp_iss_off)
134 VNET_DECLARE(int, pf_vnet_active);
135 #define V_pf_vnet_active VNET(pf_vnet_active)
137 static VNET_DEFINE(uint32_t, pf_purge_idx);
138 #define V_pf_purge_idx VNET(pf_purge_idx)
141 * Queue for pf_intr() sends.
143 static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
144 struct pf_send_entry {
145 STAILQ_ENTRY(pf_send_entry) pfse_next;
160 STAILQ_HEAD(pf_send_head, pf_send_entry);
161 static VNET_DEFINE(struct pf_send_head, pf_sendqueue);
162 #define V_pf_sendqueue VNET(pf_sendqueue)
164 static struct mtx pf_sendqueue_mtx;
165 MTX_SYSINIT(pf_sendqueue_mtx, &pf_sendqueue_mtx, "pf send queue", MTX_DEF);
166 #define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx)
167 #define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx)
170 * Queue for pf_overload_task() tasks.
172 struct pf_overload_entry {
173 SLIST_ENTRY(pf_overload_entry) next;
177 struct pf_rule *rule;
180 SLIST_HEAD(pf_overload_head, pf_overload_entry);
181 static VNET_DEFINE(struct pf_overload_head, pf_overloadqueue);
182 #define V_pf_overloadqueue VNET(pf_overloadqueue)
183 static VNET_DEFINE(struct task, pf_overloadtask);
184 #define V_pf_overloadtask VNET(pf_overloadtask)
186 static struct mtx pf_overloadqueue_mtx;
187 MTX_SYSINIT(pf_overloadqueue_mtx, &pf_overloadqueue_mtx,
188 "pf overload/flush queue", MTX_DEF);
189 #define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx)
190 #define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx)
192 VNET_DEFINE(struct pf_rulequeue, pf_unlinked_rules);
193 struct mtx pf_unlnkdrules_mtx;
194 MTX_SYSINIT(pf_unlnkdrules_mtx, &pf_unlnkdrules_mtx, "pf unlinked rules",
197 static VNET_DEFINE(uma_zone_t, pf_sources_z);
198 #define V_pf_sources_z VNET(pf_sources_z)
199 uma_zone_t pf_mtag_z;
200 VNET_DEFINE(uma_zone_t, pf_state_z);
201 VNET_DEFINE(uma_zone_t, pf_state_key_z);
203 VNET_DEFINE(uint64_t, pf_stateid[MAXCPU]);
204 #define PFID_CPUBITS 8
205 #define PFID_CPUSHIFT (sizeof(uint64_t) * NBBY - PFID_CPUBITS)
206 #define PFID_CPUMASK ((uint64_t)((1 << PFID_CPUBITS) - 1) << PFID_CPUSHIFT)
207 #define PFID_MAXID (~PFID_CPUMASK)
208 CTASSERT((1 << PFID_CPUBITS) >= MAXCPU);
210 static void pf_src_tree_remove_state(struct pf_state *);
211 static void pf_init_threshold(struct pf_threshold *, u_int32_t,
213 static void pf_add_threshold(struct pf_threshold *);
214 static int pf_check_threshold(struct pf_threshold *);
216 static void pf_change_ap(struct mbuf *, struct pf_addr *, u_int16_t *,
217 u_int16_t *, u_int16_t *, struct pf_addr *,
218 u_int16_t, u_int8_t, sa_family_t);
219 static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
220 struct tcphdr *, struct pf_state_peer *);
221 static void pf_change_icmp(struct pf_addr *, u_int16_t *,
222 struct pf_addr *, struct pf_addr *, u_int16_t,
223 u_int16_t *, u_int16_t *, u_int16_t *,
224 u_int16_t *, u_int8_t, sa_family_t);
225 static void pf_send_tcp(struct mbuf *,
226 const struct pf_rule *, sa_family_t,
227 const struct pf_addr *, const struct pf_addr *,
228 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
229 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
230 u_int16_t, struct ifnet *);
231 static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
232 sa_family_t, struct pf_rule *);
233 static void pf_detach_state(struct pf_state *);
234 static int pf_state_key_attach(struct pf_state_key *,
235 struct pf_state_key *, struct pf_state *);
236 static void pf_state_key_detach(struct pf_state *, int);
237 static int pf_state_key_ctor(void *, int, void *, int);
238 static u_int32_t pf_tcp_iss(struct pf_pdesc *);
239 static int pf_test_rule(struct pf_rule **, struct pf_state **,
240 int, struct pfi_kif *, struct mbuf *, int,
241 struct pf_pdesc *, struct pf_rule **,
242 struct pf_ruleset **, struct inpcb *);
243 static int pf_create_state(struct pf_rule *, struct pf_rule *,
244 struct pf_rule *, struct pf_pdesc *,
245 struct pf_src_node *, struct pf_state_key *,
246 struct pf_state_key *, struct mbuf *, int,
247 u_int16_t, u_int16_t, int *, struct pfi_kif *,
248 struct pf_state **, int, u_int16_t, u_int16_t,
250 static int pf_test_fragment(struct pf_rule **, int,
251 struct pfi_kif *, struct mbuf *, void *,
252 struct pf_pdesc *, struct pf_rule **,
253 struct pf_ruleset **);
254 static int pf_tcp_track_full(struct pf_state_peer *,
255 struct pf_state_peer *, struct pf_state **,
256 struct pfi_kif *, struct mbuf *, int,
257 struct pf_pdesc *, u_short *, int *);
258 static int pf_tcp_track_sloppy(struct pf_state_peer *,
259 struct pf_state_peer *, struct pf_state **,
260 struct pf_pdesc *, u_short *);
261 static int pf_test_state_tcp(struct pf_state **, int,
262 struct pfi_kif *, struct mbuf *, int,
263 void *, struct pf_pdesc *, u_short *);
264 static int pf_test_state_udp(struct pf_state **, int,
265 struct pfi_kif *, struct mbuf *, int,
266 void *, struct pf_pdesc *);
267 static int pf_test_state_icmp(struct pf_state **, int,
268 struct pfi_kif *, struct mbuf *, int,
269 void *, struct pf_pdesc *, u_short *);
270 static int pf_test_state_other(struct pf_state **, int,
271 struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
272 static u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
274 static u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
276 static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
278 static int pf_check_proto_cksum(struct mbuf *, int, int,
279 u_int8_t, sa_family_t);
280 static void pf_print_state_parts(struct pf_state *,
281 struct pf_state_key *, struct pf_state_key *);
282 static int pf_addr_wrap_neq(struct pf_addr_wrap *,
283 struct pf_addr_wrap *);
284 static struct pf_state *pf_find_state(struct pfi_kif *,
285 struct pf_state_key_cmp *, u_int);
286 static int pf_src_connlimit(struct pf_state **);
287 static void pf_overload_task(void *v, int pending);
288 static int pf_insert_src_node(struct pf_src_node **,
289 struct pf_rule *, struct pf_addr *, sa_family_t);
290 static u_int pf_purge_expired_states(u_int, int);
291 static void pf_purge_unlinked_rules(void);
292 static int pf_mtag_uminit(void *, int, int);
293 static void pf_mtag_free(struct m_tag *);
295 static void pf_route(struct mbuf **, struct pf_rule *, int,
296 struct ifnet *, struct pf_state *,
300 static void pf_change_a6(struct pf_addr *, u_int16_t *,
301 struct pf_addr *, u_int8_t);
302 static void pf_route6(struct mbuf **, struct pf_rule *, int,
303 struct ifnet *, struct pf_state *,
307 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
309 extern int pf_end_threads;
310 extern struct proc *pf_purge_proc;
312 VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
314 #define PACKET_LOOPED(pd) ((pd)->pf_mtag && \
315 (pd)->pf_mtag->flags & PF_PACKET_LOOPED)
317 #define STATE_LOOKUP(i, k, d, s, pd) \
319 (s) = pf_find_state((i), (k), (d)); \
322 if (PACKET_LOOPED(pd)) \
324 if ((d) == PF_OUT && \
325 (((s)->rule.ptr->rt == PF_ROUTETO && \
326 (s)->rule.ptr->direction == PF_OUT) || \
327 ((s)->rule.ptr->rt == PF_REPLYTO && \
328 (s)->rule.ptr->direction == PF_IN)) && \
329 (s)->rt_kif != NULL && \
330 (s)->rt_kif != (i)) \
334 #define BOUND_IFACE(r, k) \
335 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
337 #define STATE_INC_COUNTERS(s) \
339 counter_u64_add(s->rule.ptr->states_cur, 1); \
340 counter_u64_add(s->rule.ptr->states_tot, 1); \
341 if (s->anchor.ptr != NULL) { \
342 counter_u64_add(s->anchor.ptr->states_cur, 1); \
343 counter_u64_add(s->anchor.ptr->states_tot, 1); \
345 if (s->nat_rule.ptr != NULL) { \
346 counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
347 counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
351 #define STATE_DEC_COUNTERS(s) \
353 if (s->nat_rule.ptr != NULL) \
354 counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
355 if (s->anchor.ptr != NULL) \
356 counter_u64_add(s->anchor.ptr->states_cur, -1); \
357 counter_u64_add(s->rule.ptr->states_cur, -1); \
360 static MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
361 VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
362 VNET_DEFINE(struct pf_idhash *, pf_idhash);
363 VNET_DEFINE(struct pf_srchash *, pf_srchash);
365 SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW, 0, "pf(4)");
368 u_long pf_srchashmask;
369 static u_long pf_hashsize;
370 static u_long pf_srchashsize;
372 SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
373 &pf_hashsize, 0, "Size of pf(4) states hashtable");
374 SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
375 &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable");
377 VNET_DEFINE(void *, pf_swi_cookie);
379 VNET_DEFINE(uint32_t, pf_hashseed);
380 #define V_pf_hashseed VNET(pf_hashseed)
383 pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
389 if (a->addr32[0] > b->addr32[0])
391 if (a->addr32[0] < b->addr32[0])
397 if (a->addr32[3] > b->addr32[3])
399 if (a->addr32[3] < b->addr32[3])
401 if (a->addr32[2] > b->addr32[2])
403 if (a->addr32[2] < b->addr32[2])
405 if (a->addr32[1] > b->addr32[1])
407 if (a->addr32[1] < b->addr32[1])
409 if (a->addr32[0] > b->addr32[0])
411 if (a->addr32[0] < b->addr32[0])
416 panic("%s: unknown address family %u", __func__, af);
421 static __inline uint32_t
422 pf_hashkey(struct pf_state_key *sk)
426 h = murmur3_32_hash32((uint32_t *)sk,
427 sizeof(struct pf_state_key_cmp)/sizeof(uint32_t),
430 return (h & pf_hashmask);
433 static __inline uint32_t
434 pf_hashsrc(struct pf_addr *addr, sa_family_t af)
440 h = murmur3_32_hash32((uint32_t *)&addr->v4,
441 sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed);
444 h = murmur3_32_hash32((uint32_t *)&addr->v6,
445 sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed);
448 panic("%s: unknown address family %u", __func__, af);
451 return (h & pf_srchashmask);
456 pf_state_hash(struct pf_state *s)
458 u_int32_t hv = (intptr_t)s / sizeof(*s);
460 hv ^= crc32(&s->src, sizeof(s->src));
461 hv ^= crc32(&s->dst, sizeof(s->dst));
470 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
475 dst->addr32[0] = src->addr32[0];
479 dst->addr32[0] = src->addr32[0];
480 dst->addr32[1] = src->addr32[1];
481 dst->addr32[2] = src->addr32[2];
482 dst->addr32[3] = src->addr32[3];
489 pf_init_threshold(struct pf_threshold *threshold,
490 u_int32_t limit, u_int32_t seconds)
492 threshold->limit = limit * PF_THRESHOLD_MULT;
493 threshold->seconds = seconds;
494 threshold->count = 0;
495 threshold->last = time_uptime;
499 pf_add_threshold(struct pf_threshold *threshold)
501 u_int32_t t = time_uptime, diff = t - threshold->last;
503 if (diff >= threshold->seconds)
504 threshold->count = 0;
506 threshold->count -= threshold->count * diff /
508 threshold->count += PF_THRESHOLD_MULT;
513 pf_check_threshold(struct pf_threshold *threshold)
515 return (threshold->count > threshold->limit);
519 pf_src_connlimit(struct pf_state **state)
521 struct pf_overload_entry *pfoe;
524 PF_STATE_LOCK_ASSERT(*state);
526 (*state)->src_node->conn++;
527 (*state)->src.tcp_est = 1;
528 pf_add_threshold(&(*state)->src_node->conn_rate);
530 if ((*state)->rule.ptr->max_src_conn &&
531 (*state)->rule.ptr->max_src_conn <
532 (*state)->src_node->conn) {
533 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1);
537 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
538 pf_check_threshold(&(*state)->src_node->conn_rate)) {
539 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1);
546 /* Kill this state. */
547 (*state)->timeout = PFTM_PURGE;
548 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
550 if ((*state)->rule.ptr->overload_tbl == NULL)
553 /* Schedule overloading and flushing task. */
554 pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
556 return (1); /* too bad :( */
558 bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
559 pfoe->af = (*state)->key[PF_SK_WIRE]->af;
560 pfoe->rule = (*state)->rule.ptr;
561 pfoe->dir = (*state)->direction;
563 SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
564 PF_OVERLOADQ_UNLOCK();
565 taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
571 pf_overload_task(void *v, int pending)
573 struct pf_overload_head queue;
575 struct pf_overload_entry *pfoe, *pfoe1;
578 CURVNET_SET((struct vnet *)v);
581 queue = V_pf_overloadqueue;
582 SLIST_INIT(&V_pf_overloadqueue);
583 PF_OVERLOADQ_UNLOCK();
585 bzero(&p, sizeof(p));
586 SLIST_FOREACH(pfoe, &queue, next) {
587 counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1);
588 if (V_pf_status.debug >= PF_DEBUG_MISC) {
589 printf("%s: blocking address ", __func__);
590 pf_print_host(&pfoe->addr, 0, pfoe->af);
594 p.pfra_af = pfoe->af;
599 p.pfra_ip4addr = pfoe->addr.v4;
605 p.pfra_ip6addr = pfoe->addr.v6;
611 pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
616 * Remove those entries, that don't need flushing.
618 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
619 if (pfoe->rule->flush == 0) {
620 SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
621 free(pfoe, M_PFTEMP);
624 V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1);
626 /* If nothing to flush, return. */
627 if (SLIST_EMPTY(&queue)) {
632 for (int i = 0; i <= pf_hashmask; i++) {
633 struct pf_idhash *ih = &V_pf_idhash[i];
634 struct pf_state_key *sk;
638 LIST_FOREACH(s, &ih->states, entry) {
639 sk = s->key[PF_SK_WIRE];
640 SLIST_FOREACH(pfoe, &queue, next)
641 if (sk->af == pfoe->af &&
642 ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
643 pfoe->rule == s->rule.ptr) &&
644 ((pfoe->dir == PF_OUT &&
645 PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
646 (pfoe->dir == PF_IN &&
647 PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
648 s->timeout = PFTM_PURGE;
649 s->src.state = s->dst.state = TCPS_CLOSED;
653 PF_HASHROW_UNLOCK(ih);
655 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
656 free(pfoe, M_PFTEMP);
657 if (V_pf_status.debug >= PF_DEBUG_MISC)
658 printf("%s: %u states killed", __func__, killed);
664 * Can return locked on failure, so that we can consistently
665 * allocate and insert a new one.
668 pf_find_src_node(struct pf_addr *src, struct pf_rule *rule, sa_family_t af,
671 struct pf_srchash *sh;
672 struct pf_src_node *n;
674 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1);
676 sh = &V_pf_srchash[pf_hashsrc(src, af)];
678 LIST_FOREACH(n, &sh->nodes, entry)
679 if (n->rule.ptr == rule && n->af == af &&
680 ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
681 (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
685 PF_HASHROW_UNLOCK(sh);
686 } else if (returnlocked == 0)
687 PF_HASHROW_UNLOCK(sh);
693 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
694 struct pf_addr *src, sa_family_t af)
697 KASSERT((rule->rule_flag & PFRULE_RULESRCTRACK ||
698 rule->rpool.opts & PF_POOL_STICKYADDR),
699 ("%s for non-tracking rule %p", __func__, rule));
702 *sn = pf_find_src_node(src, rule, af, 1);
705 struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)];
707 PF_HASHROW_ASSERT(sh);
709 if (!rule->max_src_nodes ||
710 counter_u64_fetch(rule->src_nodes) < rule->max_src_nodes)
711 (*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
713 counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES],
716 PF_HASHROW_UNLOCK(sh);
720 pf_init_threshold(&(*sn)->conn_rate,
721 rule->max_src_conn_rate.limit,
722 rule->max_src_conn_rate.seconds);
725 (*sn)->rule.ptr = rule;
726 PF_ACPY(&(*sn)->addr, src, af);
727 LIST_INSERT_HEAD(&sh->nodes, *sn, entry);
728 (*sn)->creation = time_uptime;
729 (*sn)->ruletype = rule->action;
731 if ((*sn)->rule.ptr != NULL)
732 counter_u64_add((*sn)->rule.ptr->src_nodes, 1);
733 PF_HASHROW_UNLOCK(sh);
734 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1);
736 if (rule->max_src_states &&
737 (*sn)->states >= rule->max_src_states) {
738 counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES],
747 pf_unlink_src_node(struct pf_src_node *src)
750 PF_HASHROW_ASSERT(&V_pf_srchash[pf_hashsrc(&src->addr, src->af)]);
751 LIST_REMOVE(src, entry);
753 counter_u64_add(src->rule.ptr->src_nodes, -1);
757 pf_free_src_nodes(struct pf_src_node_list *head)
759 struct pf_src_node *sn, *tmp;
762 LIST_FOREACH_SAFE(sn, head, entry, tmp) {
763 uma_zfree(V_pf_sources_z, sn);
767 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], count);
776 pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
777 sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL,
781 /* Per-vnet data storage structures initialization. */
785 struct pf_keyhash *kh;
786 struct pf_idhash *ih;
787 struct pf_srchash *sh;
790 if (pf_hashsize == 0 || !powerof2(pf_hashsize))
791 pf_hashsize = PF_HASHSIZ;
792 if (pf_srchashsize == 0 || !powerof2(pf_srchashsize))
793 pf_srchashsize = PF_HASHSIZ / 4;
795 V_pf_hashseed = arc4random();
797 /* States and state keys storage. */
798 V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_state),
799 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
800 V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
801 uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
802 uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
804 V_pf_state_key_z = uma_zcreate("pf state keys",
805 sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
807 V_pf_keyhash = malloc(pf_hashsize * sizeof(struct pf_keyhash),
808 M_PFHASH, M_WAITOK | M_ZERO);
809 V_pf_idhash = malloc(pf_hashsize * sizeof(struct pf_idhash),
810 M_PFHASH, M_WAITOK | M_ZERO);
811 pf_hashmask = pf_hashsize - 1;
812 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
814 mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
815 mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
819 V_pf_sources_z = uma_zcreate("pf source nodes",
820 sizeof(struct pf_src_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
822 V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
823 uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
824 uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
825 V_pf_srchash = malloc(pf_srchashsize * sizeof(struct pf_srchash),
826 M_PFHASH, M_WAITOK|M_ZERO);
827 pf_srchashmask = pf_srchashsize - 1;
828 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++)
829 mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
832 TAILQ_INIT(&V_pf_altqs[0]);
833 TAILQ_INIT(&V_pf_altqs[1]);
834 TAILQ_INIT(&V_pf_pabuf);
835 V_pf_altqs_active = &V_pf_altqs[0];
836 V_pf_altqs_inactive = &V_pf_altqs[1];
838 /* Send & overload+flush queues. */
839 STAILQ_INIT(&V_pf_sendqueue);
840 SLIST_INIT(&V_pf_overloadqueue);
841 TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
843 /* Unlinked, but may be referenced rules. */
844 TAILQ_INIT(&V_pf_unlinked_rules);
851 uma_zdestroy(pf_mtag_z);
857 struct pf_keyhash *kh;
858 struct pf_idhash *ih;
859 struct pf_srchash *sh;
860 struct pf_send_entry *pfse, *next;
863 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
865 KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
867 KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
869 mtx_destroy(&kh->lock);
870 mtx_destroy(&ih->lock);
872 free(V_pf_keyhash, M_PFHASH);
873 free(V_pf_idhash, M_PFHASH);
875 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
876 KASSERT(LIST_EMPTY(&sh->nodes),
877 ("%s: source node hash not empty", __func__));
878 mtx_destroy(&sh->lock);
880 free(V_pf_srchash, M_PFHASH);
882 STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
883 m_freem(pfse->pfse_m);
884 free(pfse, M_PFTEMP);
887 uma_zdestroy(V_pf_sources_z);
888 uma_zdestroy(V_pf_state_z);
889 uma_zdestroy(V_pf_state_key_z);
893 pf_mtag_uminit(void *mem, int size, int how)
897 t = (struct m_tag *)mem;
898 t->m_tag_cookie = MTAG_ABI_COMPAT;
899 t->m_tag_id = PACKET_TAG_PF;
900 t->m_tag_len = sizeof(struct pf_mtag);
901 t->m_tag_free = pf_mtag_free;
907 pf_mtag_free(struct m_tag *t)
910 uma_zfree(pf_mtag_z, t);
914 pf_get_mtag(struct mbuf *m)
918 if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
919 return ((struct pf_mtag *)(mtag + 1));
921 mtag = uma_zalloc(pf_mtag_z, M_NOWAIT);
924 bzero(mtag + 1, sizeof(struct pf_mtag));
925 m_tag_prepend(m, mtag);
927 return ((struct pf_mtag *)(mtag + 1));
931 pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
934 struct pf_keyhash *khs, *khw, *kh;
935 struct pf_state_key *sk, *cur;
936 struct pf_state *si, *olds = NULL;
939 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
940 KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
941 KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
944 * We need to lock hash slots of both keys. To avoid deadlock
945 * we always lock the slot with lower address first. Unlock order
948 * We also need to lock ID hash slot before dropping key
949 * locks. On success we return with ID hash slot locked.
953 khs = khw = &V_pf_keyhash[pf_hashkey(skw)];
954 PF_HASHROW_LOCK(khs);
956 khs = &V_pf_keyhash[pf_hashkey(sks)];
957 khw = &V_pf_keyhash[pf_hashkey(skw)];
959 PF_HASHROW_LOCK(khs);
960 } else if (khs < khw) {
961 PF_HASHROW_LOCK(khs);
962 PF_HASHROW_LOCK(khw);
964 PF_HASHROW_LOCK(khw);
965 PF_HASHROW_LOCK(khs);
969 #define KEYS_UNLOCK() do { \
971 PF_HASHROW_UNLOCK(khs); \
972 PF_HASHROW_UNLOCK(khw); \
974 PF_HASHROW_UNLOCK(khs); \
978 * First run: start with wire key.
985 LIST_FOREACH(cur, &kh->keys, entry)
986 if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
990 /* Key exists. Check for same kif, if none, add to key. */
991 TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
992 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
995 if (si->kif == s->kif &&
996 si->direction == s->direction) {
997 if (sk->proto == IPPROTO_TCP &&
998 si->src.state >= TCPS_FIN_WAIT_2 &&
999 si->dst.state >= TCPS_FIN_WAIT_2) {
1001 * New state matches an old >FIN_WAIT_2
1002 * state. We can't drop key hash locks,
1003 * thus we can't unlink it properly.
1005 * As a workaround we drop it into
1006 * TCPS_CLOSED state, schedule purge
1007 * ASAP and push it into the very end
1008 * of the slot TAILQ, so that it won't
1009 * conflict with our new state.
1011 si->src.state = si->dst.state =
1013 si->timeout = PFTM_PURGE;
1016 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1017 printf("pf: %s key attach "
1019 (idx == PF_SK_WIRE) ?
1022 pf_print_state_parts(s,
1023 (idx == PF_SK_WIRE) ?
1025 (idx == PF_SK_STACK) ?
1027 printf(", existing: ");
1028 pf_print_state_parts(si,
1029 (idx == PF_SK_WIRE) ?
1031 (idx == PF_SK_STACK) ?
1035 PF_HASHROW_UNLOCK(ih);
1037 uma_zfree(V_pf_state_key_z, sk);
1038 if (idx == PF_SK_STACK)
1040 return (EEXIST); /* collision! */
1043 PF_HASHROW_UNLOCK(ih);
1045 uma_zfree(V_pf_state_key_z, sk);
1048 LIST_INSERT_HEAD(&kh->keys, sk, entry);
1053 /* List is sorted, if-bound states before floating. */
1054 if (s->kif == V_pfi_all)
1055 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
1057 TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
1060 TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]);
1061 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds,
1067 * Attach done. See how should we (or should not?)
1068 * attach a second key.
1071 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1075 } else if (sks != NULL) {
1077 * Continue attaching with stack key.
1089 KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
1090 ("%s failure", __func__));
1097 pf_detach_state(struct pf_state *s)
1099 struct pf_state_key *sks = s->key[PF_SK_STACK];
1100 struct pf_keyhash *kh;
1103 kh = &V_pf_keyhash[pf_hashkey(sks)];
1104 PF_HASHROW_LOCK(kh);
1105 if (s->key[PF_SK_STACK] != NULL)
1106 pf_state_key_detach(s, PF_SK_STACK);
1108 * If both point to same key, then we are done.
1110 if (sks == s->key[PF_SK_WIRE]) {
1111 pf_state_key_detach(s, PF_SK_WIRE);
1112 PF_HASHROW_UNLOCK(kh);
1115 PF_HASHROW_UNLOCK(kh);
1118 if (s->key[PF_SK_WIRE] != NULL) {
1119 kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
1120 PF_HASHROW_LOCK(kh);
1121 if (s->key[PF_SK_WIRE] != NULL)
1122 pf_state_key_detach(s, PF_SK_WIRE);
1123 PF_HASHROW_UNLOCK(kh);
1128 pf_state_key_detach(struct pf_state *s, int idx)
1130 struct pf_state_key *sk = s->key[idx];
1132 struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1134 PF_HASHROW_ASSERT(kh);
1136 TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1139 if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1140 LIST_REMOVE(sk, entry);
1141 uma_zfree(V_pf_state_key_z, sk);
1146 pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1148 struct pf_state_key *sk = mem;
1150 bzero(sk, sizeof(struct pf_state_key_cmp));
1151 TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1152 TAILQ_INIT(&sk->states[PF_SK_STACK]);
1157 struct pf_state_key *
1158 pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
1159 struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
1161 struct pf_state_key *sk;
1163 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1167 PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
1168 PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
1169 sk->port[pd->sidx] = sport;
1170 sk->port[pd->didx] = dport;
1171 sk->proto = pd->proto;
1177 struct pf_state_key *
1178 pf_state_key_clone(struct pf_state_key *orig)
1180 struct pf_state_key *sk;
1182 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1186 bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1192 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw,
1193 struct pf_state_key *sks, struct pf_state *s)
1195 struct pf_idhash *ih;
1196 struct pf_state *cur;
1199 KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1200 ("%s: sks not pristine", __func__));
1201 KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1202 ("%s: skw not pristine", __func__));
1203 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1207 if (s->id == 0 && s->creatorid == 0) {
1208 /* XXX: should be atomic, but probability of collision low */
1209 if ((s->id = V_pf_stateid[curcpu]++) == PFID_MAXID)
1210 V_pf_stateid[curcpu] = 1;
1211 s->id |= (uint64_t )curcpu << PFID_CPUSHIFT;
1212 s->id = htobe64(s->id);
1213 s->creatorid = V_pf_status.hostid;
1216 /* Returns with ID locked on success. */
1217 if ((error = pf_state_key_attach(skw, sks, s)) != 0)
1220 ih = &V_pf_idhash[PF_IDHASH(s)];
1221 PF_HASHROW_ASSERT(ih);
1222 LIST_FOREACH(cur, &ih->states, entry)
1223 if (cur->id == s->id && cur->creatorid == s->creatorid)
1227 PF_HASHROW_UNLOCK(ih);
1228 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1229 printf("pf: state ID collision: "
1230 "id: %016llx creatorid: %08x\n",
1231 (unsigned long long)be64toh(s->id),
1232 ntohl(s->creatorid));
1237 LIST_INSERT_HEAD(&ih->states, s, entry);
1238 /* One for keys, one for ID hash. */
1239 refcount_init(&s->refs, 2);
1241 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_INSERT], 1);
1242 if (pfsync_insert_state_ptr != NULL)
1243 pfsync_insert_state_ptr(s);
1245 /* Returns locked. */
1250 * Find state by ID: returns with locked row on success.
1253 pf_find_state_byid(uint64_t id, uint32_t creatorid)
1255 struct pf_idhash *ih;
1258 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1260 ih = &V_pf_idhash[(be64toh(id) % (pf_hashmask + 1))];
1262 PF_HASHROW_LOCK(ih);
1263 LIST_FOREACH(s, &ih->states, entry)
1264 if (s->id == id && s->creatorid == creatorid)
1268 PF_HASHROW_UNLOCK(ih);
1274 * Find state by key.
1275 * Returns with ID hash slot locked on success.
1277 static struct pf_state *
1278 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir)
1280 struct pf_keyhash *kh;
1281 struct pf_state_key *sk;
1285 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1287 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1289 PF_HASHROW_LOCK(kh);
1290 LIST_FOREACH(sk, &kh->keys, entry)
1291 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1294 PF_HASHROW_UNLOCK(kh);
1298 idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1300 /* List is sorted, if-bound states before floating ones. */
1301 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1302 if (s->kif == V_pfi_all || s->kif == kif) {
1304 PF_HASHROW_UNLOCK(kh);
1305 if (s->timeout >= PFTM_MAX) {
1307 * State is either being processed by
1308 * pf_unlink_state() in an other thread, or
1309 * is scheduled for immediate expiry.
1316 PF_HASHROW_UNLOCK(kh);
1322 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1324 struct pf_keyhash *kh;
1325 struct pf_state_key *sk;
1326 struct pf_state *s, *ret = NULL;
1329 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1331 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1333 PF_HASHROW_LOCK(kh);
1334 LIST_FOREACH(sk, &kh->keys, entry)
1335 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1338 PF_HASHROW_UNLOCK(kh);
1353 panic("%s: dir %u", __func__, dir);
1356 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1358 PF_HASHROW_UNLOCK(kh);
1372 PF_HASHROW_UNLOCK(kh);
1377 /* END state table stuff */
1380 pf_send(struct pf_send_entry *pfse)
1384 STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
1386 swi_sched(V_pf_swi_cookie, 0);
1392 struct pf_send_head queue;
1393 struct pf_send_entry *pfse, *next;
1395 CURVNET_SET((struct vnet *)v);
1398 queue = V_pf_sendqueue;
1399 STAILQ_INIT(&V_pf_sendqueue);
1402 STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
1403 switch (pfse->pfse_type) {
1406 ip_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL);
1409 icmp_error(pfse->pfse_m, pfse->icmpopts.type,
1410 pfse->icmpopts.code, 0, pfse->icmpopts.mtu);
1415 ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL,
1419 icmp6_error(pfse->pfse_m, pfse->icmpopts.type,
1420 pfse->icmpopts.code, pfse->icmpopts.mtu);
1424 panic("%s: unknown type", __func__);
1426 free(pfse, M_PFTEMP);
1432 pf_purge_thread(void *unused __unused)
1434 VNET_ITERATOR_DECL(vnet_iter);
1436 sx_xlock(&pf_end_lock);
1437 while (pf_end_threads == 0) {
1438 sx_sleep(pf_purge_thread, &pf_end_lock, 0, "pftm", hz / 10);
1441 VNET_FOREACH(vnet_iter) {
1442 CURVNET_SET(vnet_iter);
1445 /* Wait until V_pf_default_rule is initialized. */
1446 if (V_pf_vnet_active == 0) {
1452 * Process 1/interval fraction of the state
1456 pf_purge_expired_states(V_pf_purge_idx, pf_hashmask /
1457 (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
1460 * Purge other expired types every
1461 * PFTM_INTERVAL seconds.
1463 if (V_pf_purge_idx == 0) {
1465 * Order is important:
1466 * - states and src nodes reference rules
1467 * - states and rules reference kifs
1469 pf_purge_expired_fragments();
1470 pf_purge_expired_src_nodes();
1471 pf_purge_unlinked_rules();
1476 VNET_LIST_RUNLOCK();
1480 sx_xunlock(&pf_end_lock);
1485 pf_unload_vnet_purge(void)
1489 * To cleanse up all kifs and rules we need
1490 * two runs: first one clears reference flags,
1491 * then pf_purge_expired_states() doesn't
1492 * raise them, and then second run frees.
1494 pf_purge_unlinked_rules();
1498 * Now purge everything.
1500 pf_purge_expired_states(0, pf_hashmask);
1501 pf_purge_fragments(UINT_MAX);
1502 pf_purge_expired_src_nodes();
1505 * Now all kifs & rules should be unreferenced,
1506 * thus should be successfully freed.
1508 pf_purge_unlinked_rules();
1514 pf_state_expires(const struct pf_state *state)
1521 /* handle all PFTM_* > PFTM_MAX here */
1522 if (state->timeout == PFTM_PURGE)
1523 return (time_uptime);
1524 KASSERT(state->timeout != PFTM_UNLINKED,
1525 ("pf_state_expires: timeout == PFTM_UNLINKED"));
1526 KASSERT((state->timeout < PFTM_MAX),
1527 ("pf_state_expires: timeout > PFTM_MAX"));
1528 timeout = state->rule.ptr->timeout[state->timeout];
1530 timeout = V_pf_default_rule.timeout[state->timeout];
1531 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1533 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1534 states = counter_u64_fetch(state->rule.ptr->states_cur);
1536 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1537 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1538 states = V_pf_status.states;
1540 if (end && states > start && start < end) {
1542 return (state->expire + timeout * (end - states) /
1545 return (time_uptime);
1547 return (state->expire + timeout);
1551 pf_purge_expired_src_nodes()
1553 struct pf_src_node_list freelist;
1554 struct pf_srchash *sh;
1555 struct pf_src_node *cur, *next;
1558 LIST_INIT(&freelist);
1559 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
1560 PF_HASHROW_LOCK(sh);
1561 LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
1562 if (cur->states == 0 && cur->expire <= time_uptime) {
1563 pf_unlink_src_node(cur);
1564 LIST_INSERT_HEAD(&freelist, cur, entry);
1565 } else if (cur->rule.ptr != NULL)
1566 cur->rule.ptr->rule_flag |= PFRULE_REFS;
1567 PF_HASHROW_UNLOCK(sh);
1570 pf_free_src_nodes(&freelist);
1572 V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z);
1576 pf_src_tree_remove_state(struct pf_state *s)
1578 struct pf_src_node *sn;
1579 struct pf_srchash *sh;
1582 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE] ?
1583 s->rule.ptr->timeout[PFTM_SRC_NODE] :
1584 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1586 if (s->src_node != NULL) {
1588 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1589 PF_HASHROW_LOCK(sh);
1592 if (--sn->states == 0)
1593 sn->expire = time_uptime + timeout;
1594 PF_HASHROW_UNLOCK(sh);
1596 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1597 sn = s->nat_src_node;
1598 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1599 PF_HASHROW_LOCK(sh);
1600 if (--sn->states == 0)
1601 sn->expire = time_uptime + timeout;
1602 PF_HASHROW_UNLOCK(sh);
1604 s->src_node = s->nat_src_node = NULL;
1608 * Unlink and potentilly free a state. Function may be
1609 * called with ID hash row locked, but always returns
1610 * unlocked, since it needs to go through key hash locking.
1613 pf_unlink_state(struct pf_state *s, u_int flags)
1615 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
1617 if ((flags & PF_ENTER_LOCKED) == 0)
1618 PF_HASHROW_LOCK(ih);
1620 PF_HASHROW_ASSERT(ih);
1622 if (s->timeout == PFTM_UNLINKED) {
1624 * State is being processed
1625 * by pf_unlink_state() in
1628 PF_HASHROW_UNLOCK(ih);
1629 return (0); /* XXXGL: undefined actually */
1632 if (s->src.state == PF_TCPS_PROXY_DST) {
1633 /* XXX wire key the right one? */
1634 pf_send_tcp(NULL, s->rule.ptr, s->key[PF_SK_WIRE]->af,
1635 &s->key[PF_SK_WIRE]->addr[1],
1636 &s->key[PF_SK_WIRE]->addr[0],
1637 s->key[PF_SK_WIRE]->port[1],
1638 s->key[PF_SK_WIRE]->port[0],
1639 s->src.seqhi, s->src.seqlo + 1,
1640 TH_RST|TH_ACK, 0, 0, 0, 1, s->tag, NULL);
1643 LIST_REMOVE(s, entry);
1644 pf_src_tree_remove_state(s);
1646 if (pfsync_delete_state_ptr != NULL)
1647 pfsync_delete_state_ptr(s);
1649 STATE_DEC_COUNTERS(s);
1651 s->timeout = PFTM_UNLINKED;
1653 PF_HASHROW_UNLOCK(ih);
1656 refcount_release(&s->refs);
1658 return (pf_release_state(s));
1662 pf_free_state(struct pf_state *cur)
1665 KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
1666 KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
1669 pf_normalize_tcp_cleanup(cur);
1670 uma_zfree(V_pf_state_z, cur);
1671 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
1675 * Called only from pf_purge_thread(), thus serialized.
1678 pf_purge_expired_states(u_int i, int maxcheck)
1680 struct pf_idhash *ih;
1683 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1686 * Go through hash and unlink states that expire now.
1688 while (maxcheck > 0) {
1690 ih = &V_pf_idhash[i];
1692 PF_HASHROW_LOCK(ih);
1693 LIST_FOREACH(s, &ih->states, entry) {
1694 if (pf_state_expires(s) <= time_uptime) {
1695 V_pf_status.states -=
1696 pf_unlink_state(s, PF_ENTER_LOCKED);
1699 s->rule.ptr->rule_flag |= PFRULE_REFS;
1700 if (s->nat_rule.ptr != NULL)
1701 s->nat_rule.ptr->rule_flag |= PFRULE_REFS;
1702 if (s->anchor.ptr != NULL)
1703 s->anchor.ptr->rule_flag |= PFRULE_REFS;
1704 s->kif->pfik_flags |= PFI_IFLAG_REFS;
1706 s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
1708 PF_HASHROW_UNLOCK(ih);
1710 /* Return when we hit end of hash. */
1711 if (++i > pf_hashmask) {
1712 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1719 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1725 pf_purge_unlinked_rules()
1727 struct pf_rulequeue tmpq;
1728 struct pf_rule *r, *r1;
1731 * If we have overloading task pending, then we'd
1732 * better skip purging this time. There is a tiny
1733 * probability that overloading task references
1734 * an already unlinked rule.
1736 PF_OVERLOADQ_LOCK();
1737 if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
1738 PF_OVERLOADQ_UNLOCK();
1741 PF_OVERLOADQ_UNLOCK();
1744 * Do naive mark-and-sweep garbage collecting of old rules.
1745 * Reference flag is raised by pf_purge_expired_states()
1746 * and pf_purge_expired_src_nodes().
1748 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
1749 * use a temporary queue.
1752 PF_UNLNKDRULES_LOCK();
1753 TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
1754 if (!(r->rule_flag & PFRULE_REFS)) {
1755 TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
1756 TAILQ_INSERT_TAIL(&tmpq, r, entries);
1758 r->rule_flag &= ~PFRULE_REFS;
1760 PF_UNLNKDRULES_UNLOCK();
1762 if (!TAILQ_EMPTY(&tmpq)) {
1764 TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
1765 TAILQ_REMOVE(&tmpq, r, entries);
1773 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1778 u_int32_t a = ntohl(addr->addr32[0]);
1779 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1791 u_int8_t i, curstart, curend, maxstart, maxend;
1792 curstart = curend = maxstart = maxend = 255;
1793 for (i = 0; i < 8; i++) {
1794 if (!addr->addr16[i]) {
1795 if (curstart == 255)
1799 if ((curend - curstart) >
1800 (maxend - maxstart)) {
1801 maxstart = curstart;
1804 curstart = curend = 255;
1807 if ((curend - curstart) >
1808 (maxend - maxstart)) {
1809 maxstart = curstart;
1812 for (i = 0; i < 8; i++) {
1813 if (i >= maxstart && i <= maxend) {
1819 b = ntohs(addr->addr16[i]);
1836 pf_print_state(struct pf_state *s)
1838 pf_print_state_parts(s, NULL, NULL);
1842 pf_print_state_parts(struct pf_state *s,
1843 struct pf_state_key *skwp, struct pf_state_key *sksp)
1845 struct pf_state_key *skw, *sks;
1846 u_int8_t proto, dir;
1848 /* Do our best to fill these, but they're skipped if NULL */
1849 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1850 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1851 proto = skw ? skw->proto : (sks ? sks->proto : 0);
1852 dir = s ? s->direction : 0;
1870 case IPPROTO_ICMPV6:
1874 printf("%u", proto);
1887 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1889 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1894 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1896 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1901 if (proto == IPPROTO_TCP) {
1902 printf(" [lo=%u high=%u win=%u modulator=%u",
1903 s->src.seqlo, s->src.seqhi,
1904 s->src.max_win, s->src.seqdiff);
1905 if (s->src.wscale && s->dst.wscale)
1906 printf(" wscale=%u",
1907 s->src.wscale & PF_WSCALE_MASK);
1909 printf(" [lo=%u high=%u win=%u modulator=%u",
1910 s->dst.seqlo, s->dst.seqhi,
1911 s->dst.max_win, s->dst.seqdiff);
1912 if (s->src.wscale && s->dst.wscale)
1913 printf(" wscale=%u",
1914 s->dst.wscale & PF_WSCALE_MASK);
1917 printf(" %u:%u", s->src.state, s->dst.state);
1922 pf_print_flags(u_int8_t f)
1944 #define PF_SET_SKIP_STEPS(i) \
1946 while (head[i] != cur) { \
1947 head[i]->skip[i].ptr = cur; \
1948 head[i] = TAILQ_NEXT(head[i], entries); \
1953 pf_calc_skip_steps(struct pf_rulequeue *rules)
1955 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1958 cur = TAILQ_FIRST(rules);
1960 for (i = 0; i < PF_SKIP_COUNT; ++i)
1962 while (cur != NULL) {
1964 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1965 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1966 if (cur->direction != prev->direction)
1967 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1968 if (cur->af != prev->af)
1969 PF_SET_SKIP_STEPS(PF_SKIP_AF);
1970 if (cur->proto != prev->proto)
1971 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1972 if (cur->src.neg != prev->src.neg ||
1973 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1974 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1975 if (cur->src.port[0] != prev->src.port[0] ||
1976 cur->src.port[1] != prev->src.port[1] ||
1977 cur->src.port_op != prev->src.port_op)
1978 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1979 if (cur->dst.neg != prev->dst.neg ||
1980 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1981 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1982 if (cur->dst.port[0] != prev->dst.port[0] ||
1983 cur->dst.port[1] != prev->dst.port[1] ||
1984 cur->dst.port_op != prev->dst.port_op)
1985 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1988 cur = TAILQ_NEXT(cur, entries);
1990 for (i = 0; i < PF_SKIP_COUNT; ++i)
1991 PF_SET_SKIP_STEPS(i);
1995 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
1997 if (aw1->type != aw2->type)
1999 switch (aw1->type) {
2000 case PF_ADDR_ADDRMASK:
2002 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6))
2004 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6))
2007 case PF_ADDR_DYNIFTL:
2008 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
2009 case PF_ADDR_NOROUTE:
2010 case PF_ADDR_URPFFAILED:
2013 return (aw1->p.tbl != aw2->p.tbl);
2015 printf("invalid address type: %d\n", aw1->type);
2021 * Checksum updates are a little complicated because the checksum in the TCP/UDP
2022 * header isn't always a full checksum. In some cases (i.e. output) it's a
2023 * pseudo-header checksum, which is a partial checksum over src/dst IP
2024 * addresses, protocol number and length.
2026 * That means we have the following cases:
2027 * * Input or forwarding: we don't have TSO, the checksum fields are full
2028 * checksums, we need to update the checksum whenever we change anything.
2029 * * Output (i.e. the checksum is a pseudo-header checksum):
2030 * x The field being updated is src/dst address or affects the length of
2031 * the packet. We need to update the pseudo-header checksum (note that this
2032 * checksum is not ones' complement).
2033 * x Some other field is being modified (e.g. src/dst port numbers): We
2034 * don't have to update anything.
2037 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2043 l = cksum + old - new;
2044 l = (l >> 16) + (l & 65535);
2052 pf_proto_cksum_fixup(struct mbuf *m, u_int16_t cksum, u_int16_t old,
2053 u_int16_t new, u_int8_t udp)
2055 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2058 return (pf_cksum_fixup(cksum, old, new, udp));
2062 pf_change_ap(struct mbuf *m, struct pf_addr *a, u_int16_t *p, u_int16_t *ic,
2063 u_int16_t *pc, struct pf_addr *an, u_int16_t pn, u_int8_t u,
2069 PF_ACPY(&ao, a, af);
2072 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2080 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2081 ao.addr16[0], an->addr16[0], 0),
2082 ao.addr16[1], an->addr16[1], 0);
2085 *pc = pf_cksum_fixup(pf_cksum_fixup(*pc,
2086 ao.addr16[0], an->addr16[0], u),
2087 ao.addr16[1], an->addr16[1], u);
2089 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2094 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2095 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2096 pf_cksum_fixup(pf_cksum_fixup(*pc,
2097 ao.addr16[0], an->addr16[0], u),
2098 ao.addr16[1], an->addr16[1], u),
2099 ao.addr16[2], an->addr16[2], u),
2100 ao.addr16[3], an->addr16[3], u),
2101 ao.addr16[4], an->addr16[4], u),
2102 ao.addr16[5], an->addr16[5], u),
2103 ao.addr16[6], an->addr16[6], u),
2104 ao.addr16[7], an->addr16[7], u);
2106 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2111 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA |
2112 CSUM_DELAY_DATA_IPV6)) {
2119 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2121 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2125 memcpy(&ao, a, sizeof(ao));
2126 memcpy(a, &an, sizeof(u_int32_t));
2127 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2128 ao % 65536, an % 65536, u);
2132 pf_change_proto_a(struct mbuf *m, void *a, u_int16_t *c, u_int32_t an, u_int8_t udp)
2136 memcpy(&ao, a, sizeof(ao));
2137 memcpy(a, &an, sizeof(u_int32_t));
2139 *c = pf_proto_cksum_fixup(m,
2140 pf_proto_cksum_fixup(m, *c, ao / 65536, an / 65536, udp),
2141 ao % 65536, an % 65536, udp);
2146 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2150 PF_ACPY(&ao, a, AF_INET6);
2151 PF_ACPY(a, an, AF_INET6);
2153 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2154 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2155 pf_cksum_fixup(pf_cksum_fixup(*c,
2156 ao.addr16[0], an->addr16[0], u),
2157 ao.addr16[1], an->addr16[1], u),
2158 ao.addr16[2], an->addr16[2], u),
2159 ao.addr16[3], an->addr16[3], u),
2160 ao.addr16[4], an->addr16[4], u),
2161 ao.addr16[5], an->addr16[5], u),
2162 ao.addr16[6], an->addr16[6], u),
2163 ao.addr16[7], an->addr16[7], u);
2168 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2169 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2170 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2172 struct pf_addr oia, ooa;
2174 PF_ACPY(&oia, ia, af);
2176 PF_ACPY(&ooa, oa, af);
2178 /* Change inner protocol port, fix inner protocol checksum. */
2180 u_int16_t oip = *ip;
2187 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2188 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2190 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2192 /* Change inner ip address, fix inner ip and icmp checksums. */
2193 PF_ACPY(ia, na, af);
2197 u_int32_t oh2c = *h2c;
2199 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2200 oia.addr16[0], ia->addr16[0], 0),
2201 oia.addr16[1], ia->addr16[1], 0);
2202 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2203 oia.addr16[0], ia->addr16[0], 0),
2204 oia.addr16[1], ia->addr16[1], 0);
2205 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2211 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2212 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2213 pf_cksum_fixup(pf_cksum_fixup(*ic,
2214 oia.addr16[0], ia->addr16[0], u),
2215 oia.addr16[1], ia->addr16[1], u),
2216 oia.addr16[2], ia->addr16[2], u),
2217 oia.addr16[3], ia->addr16[3], u),
2218 oia.addr16[4], ia->addr16[4], u),
2219 oia.addr16[5], ia->addr16[5], u),
2220 oia.addr16[6], ia->addr16[6], u),
2221 oia.addr16[7], ia->addr16[7], u);
2225 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2227 PF_ACPY(oa, na, af);
2231 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2232 ooa.addr16[0], oa->addr16[0], 0),
2233 ooa.addr16[1], oa->addr16[1], 0);
2238 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2239 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2240 pf_cksum_fixup(pf_cksum_fixup(*ic,
2241 ooa.addr16[0], oa->addr16[0], u),
2242 ooa.addr16[1], oa->addr16[1], u),
2243 ooa.addr16[2], oa->addr16[2], u),
2244 ooa.addr16[3], oa->addr16[3], u),
2245 ooa.addr16[4], oa->addr16[4], u),
2246 ooa.addr16[5], oa->addr16[5], u),
2247 ooa.addr16[6], oa->addr16[6], u),
2248 ooa.addr16[7], oa->addr16[7], u);
2257 * Need to modulate the sequence numbers in the TCP SACK option
2258 * (credits to Krzysztof Pfaff for report and patch)
2261 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2262 struct tcphdr *th, struct pf_state_peer *dst)
2264 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2265 u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2266 int copyback = 0, i, olen;
2267 struct sackblk sack;
2269 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2270 if (hlen < TCPOLEN_SACKLEN ||
2271 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2274 while (hlen >= TCPOLEN_SACKLEN) {
2277 case TCPOPT_EOL: /* FALLTHROUGH */
2285 if (olen >= TCPOLEN_SACKLEN) {
2286 for (i = 2; i + TCPOLEN_SACK <= olen;
2287 i += TCPOLEN_SACK) {
2288 memcpy(&sack, &opt[i], sizeof(sack));
2289 pf_change_proto_a(m, &sack.start, &th->th_sum,
2290 htonl(ntohl(sack.start) - dst->seqdiff), 0);
2291 pf_change_proto_a(m, &sack.end, &th->th_sum,
2292 htonl(ntohl(sack.end) - dst->seqdiff), 0);
2293 memcpy(&opt[i], &sack, sizeof(sack));
2307 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2312 pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af,
2313 const struct pf_addr *saddr, const struct pf_addr *daddr,
2314 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2315 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2316 u_int16_t rtag, struct ifnet *ifp)
2318 struct pf_send_entry *pfse;
2322 struct ip *h = NULL;
2325 struct ip6_hdr *h6 = NULL;
2329 struct pf_mtag *pf_mtag;
2334 /* maximum segment size tcp option */
2335 tlen = sizeof(struct tcphdr);
2342 len = sizeof(struct ip) + tlen;
2347 len = sizeof(struct ip6_hdr) + tlen;
2351 panic("%s: unsupported af %d", __func__, af);
2354 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2355 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2358 m = m_gethdr(M_NOWAIT, MT_DATA);
2360 free(pfse, M_PFTEMP);
2364 mac_netinet_firewall_send(m);
2366 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2367 free(pfse, M_PFTEMP);
2372 m->m_flags |= M_SKIP_FIREWALL;
2373 pf_mtag->tag = rtag;
2375 if (r != NULL && r->rtableid >= 0)
2376 M_SETFIB(m, r->rtableid);
2379 if (r != NULL && r->qid) {
2380 pf_mtag->qid = r->qid;
2382 /* add hints for ecn */
2383 pf_mtag->hdr = mtod(m, struct ip *);
2386 m->m_data += max_linkhdr;
2387 m->m_pkthdr.len = m->m_len = len;
2388 m->m_pkthdr.rcvif = NULL;
2389 bzero(m->m_data, len);
2393 h = mtod(m, struct ip *);
2395 /* IP header fields included in the TCP checksum */
2396 h->ip_p = IPPROTO_TCP;
2397 h->ip_len = htons(tlen);
2398 h->ip_src.s_addr = saddr->v4.s_addr;
2399 h->ip_dst.s_addr = daddr->v4.s_addr;
2401 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2406 h6 = mtod(m, struct ip6_hdr *);
2408 /* IP header fields included in the TCP checksum */
2409 h6->ip6_nxt = IPPROTO_TCP;
2410 h6->ip6_plen = htons(tlen);
2411 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2412 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2414 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2420 th->th_sport = sport;
2421 th->th_dport = dport;
2422 th->th_seq = htonl(seq);
2423 th->th_ack = htonl(ack);
2424 th->th_off = tlen >> 2;
2425 th->th_flags = flags;
2426 th->th_win = htons(win);
2429 opt = (char *)(th + 1);
2430 opt[0] = TCPOPT_MAXSEG;
2433 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2440 th->th_sum = in_cksum(m, len);
2442 /* Finish the IP header */
2444 h->ip_hl = sizeof(*h) >> 2;
2445 h->ip_tos = IPTOS_LOWDELAY;
2446 h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
2447 h->ip_len = htons(len);
2448 h->ip_ttl = ttl ? ttl : V_ip_defttl;
2451 pfse->pfse_type = PFSE_IP;
2457 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2458 sizeof(struct ip6_hdr), tlen);
2460 h6->ip6_vfc |= IPV6_VERSION;
2461 h6->ip6_hlim = IPV6_DEFHLIM;
2463 pfse->pfse_type = PFSE_IP6;
2472 pf_ieee8021q_setpcp(struct mbuf *m, u_int8_t prio)
2476 KASSERT(prio <= PF_PRIO_MAX,
2477 ("%s with invalid pcp", __func__));
2479 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_OUT, NULL);
2481 mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_OUT,
2482 sizeof(uint8_t), M_NOWAIT);
2485 m_tag_prepend(m, mtag);
2488 *(uint8_t *)(mtag + 1) = prio;
2493 pf_match_ieee8021q_pcp(u_int8_t prio, struct mbuf *m)
2498 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL);
2502 if (prio == PF_PRIO_ZERO)
2505 mpcp = *(uint8_t *)(mtag + 1);
2507 return (mpcp == prio);
2511 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2514 struct pf_send_entry *pfse;
2516 struct pf_mtag *pf_mtag;
2518 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2519 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2523 if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
2524 free(pfse, M_PFTEMP);
2528 if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
2529 free(pfse, M_PFTEMP);
2533 m0->m_flags |= M_SKIP_FIREWALL;
2535 if (r->rtableid >= 0)
2536 M_SETFIB(m0, r->rtableid);
2540 pf_mtag->qid = r->qid;
2541 /* add hints for ecn */
2542 pf_mtag->hdr = mtod(m0, struct ip *);
2549 pfse->pfse_type = PFSE_ICMP;
2554 pfse->pfse_type = PFSE_ICMP6;
2559 pfse->icmpopts.type = type;
2560 pfse->icmpopts.code = code;
2565 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2566 * If n is 0, they match if they are equal. If n is != 0, they match if they
2570 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2571 struct pf_addr *b, sa_family_t af)
2578 if ((a->addr32[0] & m->addr32[0]) ==
2579 (b->addr32[0] & m->addr32[0]))
2585 if (((a->addr32[0] & m->addr32[0]) ==
2586 (b->addr32[0] & m->addr32[0])) &&
2587 ((a->addr32[1] & m->addr32[1]) ==
2588 (b->addr32[1] & m->addr32[1])) &&
2589 ((a->addr32[2] & m->addr32[2]) ==
2590 (b->addr32[2] & m->addr32[2])) &&
2591 ((a->addr32[3] & m->addr32[3]) ==
2592 (b->addr32[3] & m->addr32[3])))
2611 * Return 1 if b <= a <= e, otherwise return 0.
2614 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2615 struct pf_addr *a, sa_family_t af)
2620 if ((ntohl(a->addr32[0]) < ntohl(b->addr32[0])) ||
2621 (ntohl(a->addr32[0]) > ntohl(e->addr32[0])))
2630 for (i = 0; i < 4; ++i)
2631 if (ntohl(a->addr32[i]) > ntohl(b->addr32[i]))
2633 else if (ntohl(a->addr32[i]) < ntohl(b->addr32[i]))
2636 for (i = 0; i < 4; ++i)
2637 if (ntohl(a->addr32[i]) < ntohl(e->addr32[i]))
2639 else if (ntohl(a->addr32[i]) > ntohl(e->addr32[i]))
2649 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2653 return ((p > a1) && (p < a2));
2655 return ((p < a1) || (p > a2));
2657 return ((p >= a1) && (p <= a2));
2671 return (0); /* never reached */
2675 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2680 return (pf_match(op, a1, a2, p));
2684 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2686 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2688 return (pf_match(op, a1, a2, u));
2692 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2694 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2696 return (pf_match(op, a1, a2, g));
2700 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag, int mtag)
2705 return ((!r->match_tag_not && r->match_tag == *tag) ||
2706 (r->match_tag_not && r->match_tag != *tag));
2710 pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag)
2713 KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
2715 if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL))
2718 pd->pf_mtag->tag = tag;
2723 #define PF_ANCHOR_STACKSIZE 32
2724 struct pf_anchor_stackframe {
2725 struct pf_ruleset *rs;
2726 struct pf_rule *r; /* XXX: + match bit */
2727 struct pf_anchor *child;
2731 * XXX: We rely on malloc(9) returning pointer aligned addresses.
2733 #define PF_ANCHORSTACK_MATCH 0x00000001
2734 #define PF_ANCHORSTACK_MASK (PF_ANCHORSTACK_MATCH)
2736 #define PF_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
2737 #define PF_ANCHOR_RULE(f) (struct pf_rule *) \
2738 ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
2739 #define PF_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \
2740 ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \
2744 pf_step_into_anchor(struct pf_anchor_stackframe *stack, int *depth,
2745 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2748 struct pf_anchor_stackframe *f;
2754 if (*depth >= PF_ANCHOR_STACKSIZE) {
2755 printf("%s: anchor stack overflow on %s\n",
2756 __func__, (*r)->anchor->name);
2757 *r = TAILQ_NEXT(*r, entries);
2759 } else if (*depth == 0 && a != NULL)
2761 f = stack + (*depth)++;
2764 if ((*r)->anchor_wildcard) {
2765 struct pf_anchor_node *parent = &(*r)->anchor->children;
2767 if ((f->child = RB_MIN(pf_anchor_node, parent)) == NULL) {
2771 *rs = &f->child->ruleset;
2774 *rs = &(*r)->anchor->ruleset;
2776 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2780 pf_step_out_of_anchor(struct pf_anchor_stackframe *stack, int *depth,
2781 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2784 struct pf_anchor_stackframe *f;
2793 f = stack + *depth - 1;
2794 fr = PF_ANCHOR_RULE(f);
2795 if (f->child != NULL) {
2796 struct pf_anchor_node *parent;
2799 * This block traverses through
2800 * a wildcard anchor.
2802 parent = &fr->anchor->children;
2803 if (match != NULL && *match) {
2805 * If any of "*" matched, then
2806 * "foo/ *" matched, mark frame
2809 PF_ANCHOR_SET_MATCH(f);
2812 f->child = RB_NEXT(pf_anchor_node, parent, f->child);
2813 if (f->child != NULL) {
2814 *rs = &f->child->ruleset;
2815 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2823 if (*depth == 0 && a != NULL)
2826 if (PF_ANCHOR_MATCH(f) || (match != NULL && *match))
2828 *r = TAILQ_NEXT(fr, entries);
2829 } while (*r == NULL);
2836 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2837 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2842 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2843 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2847 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2848 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2849 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
2850 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
2851 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
2852 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
2853 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
2854 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
2860 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
2865 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
2869 if (addr->addr32[3] == 0xffffffff) {
2870 addr->addr32[3] = 0;
2871 if (addr->addr32[2] == 0xffffffff) {
2872 addr->addr32[2] = 0;
2873 if (addr->addr32[1] == 0xffffffff) {
2874 addr->addr32[1] = 0;
2876 htonl(ntohl(addr->addr32[0]) + 1);
2879 htonl(ntohl(addr->addr32[1]) + 1);
2882 htonl(ntohl(addr->addr32[2]) + 1);
2885 htonl(ntohl(addr->addr32[3]) + 1);
2892 pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
2894 struct pf_addr *saddr, *daddr;
2895 u_int16_t sport, dport;
2896 struct inpcbinfo *pi;
2899 pd->lookup.uid = UID_MAX;
2900 pd->lookup.gid = GID_MAX;
2902 switch (pd->proto) {
2904 if (pd->hdr.tcp == NULL)
2906 sport = pd->hdr.tcp->th_sport;
2907 dport = pd->hdr.tcp->th_dport;
2911 if (pd->hdr.udp == NULL)
2913 sport = pd->hdr.udp->uh_sport;
2914 dport = pd->hdr.udp->uh_dport;
2920 if (direction == PF_IN) {
2935 inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
2936 dport, INPLOOKUP_RLOCKPCB, NULL, m);
2938 inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
2939 daddr->v4, dport, INPLOOKUP_WILDCARD |
2940 INPLOOKUP_RLOCKPCB, NULL, m);
2948 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
2949 dport, INPLOOKUP_RLOCKPCB, NULL, m);
2951 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
2952 &daddr->v6, dport, INPLOOKUP_WILDCARD |
2953 INPLOOKUP_RLOCKPCB, NULL, m);
2963 INP_RLOCK_ASSERT(inp);
2964 pd->lookup.uid = inp->inp_cred->cr_uid;
2965 pd->lookup.gid = inp->inp_cred->cr_groups[0];
2972 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2976 u_int8_t *opt, optlen;
2977 u_int8_t wscale = 0;
2979 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
2980 if (hlen <= sizeof(struct tcphdr))
2982 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2984 opt = hdr + sizeof(struct tcphdr);
2985 hlen -= sizeof(struct tcphdr);
2995 if (wscale > TCP_MAX_WINSHIFT)
2996 wscale = TCP_MAX_WINSHIFT;
2997 wscale |= PF_WSCALE_FLAG;
3012 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3016 u_int8_t *opt, optlen;
3017 u_int16_t mss = V_tcp_mssdflt;
3019 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3020 if (hlen <= sizeof(struct tcphdr))
3022 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3024 opt = hdr + sizeof(struct tcphdr);
3025 hlen -= sizeof(struct tcphdr);
3026 while (hlen >= TCPOLEN_MAXSEG) {
3034 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
3050 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
3053 struct nhop4_basic nh4;
3056 struct nhop6_basic nh6;
3057 struct in6_addr dst6;
3066 hlen = sizeof(struct ip);
3067 if (fib4_lookup_nh_basic(rtableid, addr->v4, 0, 0, &nh4) == 0)
3068 mss = nh4.nh_mtu - hlen - sizeof(struct tcphdr);
3073 hlen = sizeof(struct ip6_hdr);
3074 in6_splitscope(&addr->v6, &dst6, &scopeid);
3075 if (fib6_lookup_nh_basic(rtableid, &dst6, scopeid, 0,0,&nh6)==0)
3076 mss = nh6.nh_mtu - hlen - sizeof(struct tcphdr);
3081 mss = max(V_tcp_mssdflt, mss);
3082 mss = min(mss, offer);
3083 mss = max(mss, 64); /* sanity - at least max opt space */
3088 pf_tcp_iss(struct pf_pdesc *pd)
3091 u_int32_t digest[4];
3093 if (V_pf_tcp_secret_init == 0) {
3094 read_random(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
3095 MD5Init(&V_pf_tcp_secret_ctx);
3096 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
3097 sizeof(V_pf_tcp_secret));
3098 V_pf_tcp_secret_init = 1;
3101 ctx = V_pf_tcp_secret_ctx;
3103 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
3104 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
3105 if (pd->af == AF_INET6) {
3106 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3107 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3109 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3110 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3112 MD5Final((u_char *)digest, &ctx);
3113 V_pf_tcp_iss_off += 4096;
3114 #define ISN_RANDOM_INCREMENT (4096 - 1)
3115 return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3117 #undef ISN_RANDOM_INCREMENT
3121 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
3122 struct pfi_kif *kif, struct mbuf *m, int off, struct pf_pdesc *pd,
3123 struct pf_rule **am, struct pf_ruleset **rsm, struct inpcb *inp)
3125 struct pf_rule *nr = NULL;
3126 struct pf_addr * const saddr = pd->src;
3127 struct pf_addr * const daddr = pd->dst;
3128 sa_family_t af = pd->af;
3129 struct pf_rule *r, *a = NULL;
3130 struct pf_ruleset *ruleset = NULL;
3131 struct pf_src_node *nsn = NULL;
3132 struct tcphdr *th = pd->hdr.tcp;
3133 struct pf_state_key *sk = NULL, *nk = NULL;
3135 int rewrite = 0, hdrlen = 0;
3136 int tag = -1, rtableid = -1;
3140 u_int16_t sport = 0, dport = 0;
3141 u_int16_t bproto_sum = 0, bip_sum = 0;
3142 u_int8_t icmptype = 0, icmpcode = 0;
3143 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3148 INP_LOCK_ASSERT(inp);
3149 pd->lookup.uid = inp->inp_cred->cr_uid;
3150 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3151 pd->lookup.done = 1;
3154 switch (pd->proto) {
3156 sport = th->th_sport;
3157 dport = th->th_dport;
3158 hdrlen = sizeof(*th);
3161 sport = pd->hdr.udp->uh_sport;
3162 dport = pd->hdr.udp->uh_dport;
3163 hdrlen = sizeof(*pd->hdr.udp);
3167 if (pd->af != AF_INET)
3169 sport = dport = pd->hdr.icmp->icmp_id;
3170 hdrlen = sizeof(*pd->hdr.icmp);
3171 icmptype = pd->hdr.icmp->icmp_type;
3172 icmpcode = pd->hdr.icmp->icmp_code;
3174 if (icmptype == ICMP_UNREACH ||
3175 icmptype == ICMP_SOURCEQUENCH ||
3176 icmptype == ICMP_REDIRECT ||
3177 icmptype == ICMP_TIMXCEED ||
3178 icmptype == ICMP_PARAMPROB)
3183 case IPPROTO_ICMPV6:
3186 sport = dport = pd->hdr.icmp6->icmp6_id;
3187 hdrlen = sizeof(*pd->hdr.icmp6);
3188 icmptype = pd->hdr.icmp6->icmp6_type;
3189 icmpcode = pd->hdr.icmp6->icmp6_code;
3191 if (icmptype == ICMP6_DST_UNREACH ||
3192 icmptype == ICMP6_PACKET_TOO_BIG ||
3193 icmptype == ICMP6_TIME_EXCEEDED ||
3194 icmptype == ICMP6_PARAM_PROB)
3199 sport = dport = hdrlen = 0;
3203 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3205 /* check packet for BINAT/NAT/RDR */
3206 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk,
3207 &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) {
3208 KASSERT(sk != NULL, ("%s: null sk", __func__));
3209 KASSERT(nk != NULL, ("%s: null nk", __func__));
3212 bip_sum = *pd->ip_sum;
3214 switch (pd->proto) {
3216 bproto_sum = th->th_sum;
3217 pd->proto_sum = &th->th_sum;
3219 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3220 nk->port[pd->sidx] != sport) {
3221 pf_change_ap(m, saddr, &th->th_sport, pd->ip_sum,
3222 &th->th_sum, &nk->addr[pd->sidx],
3223 nk->port[pd->sidx], 0, af);
3224 pd->sport = &th->th_sport;
3225 sport = th->th_sport;
3228 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3229 nk->port[pd->didx] != dport) {
3230 pf_change_ap(m, daddr, &th->th_dport, pd->ip_sum,
3231 &th->th_sum, &nk->addr[pd->didx],
3232 nk->port[pd->didx], 0, af);
3233 dport = th->th_dport;
3234 pd->dport = &th->th_dport;
3239 bproto_sum = pd->hdr.udp->uh_sum;
3240 pd->proto_sum = &pd->hdr.udp->uh_sum;
3242 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3243 nk->port[pd->sidx] != sport) {
3244 pf_change_ap(m, saddr, &pd->hdr.udp->uh_sport,
3245 pd->ip_sum, &pd->hdr.udp->uh_sum,
3246 &nk->addr[pd->sidx],
3247 nk->port[pd->sidx], 1, af);
3248 sport = pd->hdr.udp->uh_sport;
3249 pd->sport = &pd->hdr.udp->uh_sport;
3252 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3253 nk->port[pd->didx] != dport) {
3254 pf_change_ap(m, daddr, &pd->hdr.udp->uh_dport,
3255 pd->ip_sum, &pd->hdr.udp->uh_sum,
3256 &nk->addr[pd->didx],
3257 nk->port[pd->didx], 1, af);
3258 dport = pd->hdr.udp->uh_dport;
3259 pd->dport = &pd->hdr.udp->uh_dport;
3265 nk->port[0] = nk->port[1];
3266 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3267 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3268 nk->addr[pd->sidx].v4.s_addr, 0);
3270 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3271 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3272 nk->addr[pd->didx].v4.s_addr, 0);
3274 if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3275 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3276 pd->hdr.icmp->icmp_cksum, sport,
3278 pd->hdr.icmp->icmp_id = nk->port[1];
3279 pd->sport = &pd->hdr.icmp->icmp_id;
3281 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3285 case IPPROTO_ICMPV6:
3286 nk->port[0] = nk->port[1];
3287 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3288 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3289 &nk->addr[pd->sidx], 0);
3291 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3292 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3293 &nk->addr[pd->didx], 0);
3302 &nk->addr[pd->sidx], AF_INET))
3303 pf_change_a(&saddr->v4.s_addr,
3305 nk->addr[pd->sidx].v4.s_addr, 0);
3308 &nk->addr[pd->didx], AF_INET))
3309 pf_change_a(&daddr->v4.s_addr,
3311 nk->addr[pd->didx].v4.s_addr, 0);
3317 &nk->addr[pd->sidx], AF_INET6))
3318 PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3321 &nk->addr[pd->didx], AF_INET6))
3322 PF_ACPY(saddr, &nk->addr[pd->didx], af);
3335 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3336 r = r->skip[PF_SKIP_IFP].ptr;
3337 else if (r->direction && r->direction != direction)
3338 r = r->skip[PF_SKIP_DIR].ptr;
3339 else if (r->af && r->af != af)
3340 r = r->skip[PF_SKIP_AF].ptr;
3341 else if (r->proto && r->proto != pd->proto)
3342 r = r->skip[PF_SKIP_PROTO].ptr;
3343 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3344 r->src.neg, kif, M_GETFIB(m)))
3345 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3346 /* tcp/udp only. port_op always 0 in other cases */
3347 else if (r->src.port_op && !pf_match_port(r->src.port_op,
3348 r->src.port[0], r->src.port[1], sport))
3349 r = r->skip[PF_SKIP_SRC_PORT].ptr;
3350 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3351 r->dst.neg, NULL, M_GETFIB(m)))
3352 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3353 /* tcp/udp only. port_op always 0 in other cases */
3354 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3355 r->dst.port[0], r->dst.port[1], dport))
3356 r = r->skip[PF_SKIP_DST_PORT].ptr;
3357 /* icmp only. type always 0 in other cases */
3358 else if (r->type && r->type != icmptype + 1)
3359 r = TAILQ_NEXT(r, entries);
3360 /* icmp only. type always 0 in other cases */
3361 else if (r->code && r->code != icmpcode + 1)
3362 r = TAILQ_NEXT(r, entries);
3363 else if (r->tos && !(r->tos == pd->tos))
3364 r = TAILQ_NEXT(r, entries);
3365 else if (r->rule_flag & PFRULE_FRAGMENT)
3366 r = TAILQ_NEXT(r, entries);
3367 else if (pd->proto == IPPROTO_TCP &&
3368 (r->flagset & th->th_flags) != r->flags)
3369 r = TAILQ_NEXT(r, entries);
3370 /* tcp/udp only. uid.op always 0 in other cases */
3371 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3372 pf_socket_lookup(direction, pd, m), 1)) &&
3373 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3375 r = TAILQ_NEXT(r, entries);
3376 /* tcp/udp only. gid.op always 0 in other cases */
3377 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3378 pf_socket_lookup(direction, pd, m), 1)) &&
3379 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3381 r = TAILQ_NEXT(r, entries);
3383 !pf_match_ieee8021q_pcp(r->prio, m))
3384 r = TAILQ_NEXT(r, entries);
3386 r->prob <= arc4random())
3387 r = TAILQ_NEXT(r, entries);
3388 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3389 pd->pf_mtag ? pd->pf_mtag->tag : 0))
3390 r = TAILQ_NEXT(r, entries);
3391 else if (r->os_fingerprint != PF_OSFP_ANY &&
3392 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3393 pf_osfp_fingerprint(pd, m, off, th),
3394 r->os_fingerprint)))
3395 r = TAILQ_NEXT(r, entries);
3399 if (r->rtableid >= 0)
3400 rtableid = r->rtableid;
3401 if (r->anchor == NULL) {
3408 r = TAILQ_NEXT(r, entries);
3410 pf_step_into_anchor(anchor_stack, &asd,
3411 &ruleset, PF_RULESET_FILTER, &r, &a,
3414 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3415 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3422 REASON_SET(&reason, PFRES_MATCH);
3424 if (r->log || (nr != NULL && nr->log)) {
3426 m_copyback(m, off, hdrlen, pd->hdr.any);
3427 PFLOG_PACKET(kif, m, af, direction, reason, r->log ? r : nr, a,
3431 if ((r->action == PF_DROP) &&
3432 ((r->rule_flag & PFRULE_RETURNRST) ||
3433 (r->rule_flag & PFRULE_RETURNICMP) ||
3434 (r->rule_flag & PFRULE_RETURN))) {
3435 /* undo NAT changes, if they have taken place */
3437 PF_ACPY(saddr, &sk->addr[pd->sidx], af);
3438 PF_ACPY(daddr, &sk->addr[pd->didx], af);
3440 *pd->sport = sk->port[pd->sidx];
3442 *pd->dport = sk->port[pd->didx];
3444 *pd->proto_sum = bproto_sum;
3446 *pd->ip_sum = bip_sum;
3447 m_copyback(m, off, hdrlen, pd->hdr.any);
3449 if (pd->proto == IPPROTO_TCP &&
3450 ((r->rule_flag & PFRULE_RETURNRST) ||
3451 (r->rule_flag & PFRULE_RETURN)) &&
3452 !(th->th_flags & TH_RST)) {
3453 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
3465 h4 = mtod(m, struct ip *);
3466 len = ntohs(h4->ip_len) - off;
3471 h6 = mtod(m, struct ip6_hdr *);
3472 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
3477 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
3478 REASON_SET(&reason, PFRES_PROTCKSUM);
3480 if (th->th_flags & TH_SYN)
3482 if (th->th_flags & TH_FIN)
3484 pf_send_tcp(m, r, af, pd->dst,
3485 pd->src, th->th_dport, th->th_sport,
3486 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3487 r->return_ttl, 1, 0, kif->pfik_ifp);
3489 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3491 pf_send_icmp(m, r->return_icmp >> 8,
3492 r->return_icmp & 255, af, r);
3493 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3495 pf_send_icmp(m, r->return_icmp6 >> 8,
3496 r->return_icmp6 & 255, af, r);
3499 if (r->action == PF_DROP)
3502 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3503 REASON_SET(&reason, PFRES_MEMORY);
3507 M_SETFIB(m, rtableid);
3509 if (!state_icmp && (r->keep_state || nr != NULL ||
3510 (pd->flags & PFDESC_TCP_NORM))) {
3512 action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
3513 sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
3515 if (action != PF_PASS)
3519 uma_zfree(V_pf_state_key_z, sk);
3521 uma_zfree(V_pf_state_key_z, nk);
3524 /* copy back packet headers if we performed NAT operations */
3526 m_copyback(m, off, hdrlen, pd->hdr.any);
3528 if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
3529 direction == PF_OUT &&
3530 pfsync_defer_ptr != NULL && pfsync_defer_ptr(*sm, m))
3532 * We want the state created, but we dont
3533 * want to send this in case a partner
3534 * firewall has to know about it to allow
3535 * replies through it.
3543 uma_zfree(V_pf_state_key_z, sk);
3545 uma_zfree(V_pf_state_key_z, nk);
3550 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
3551 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *nk,
3552 struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
3553 u_int16_t dport, int *rewrite, struct pfi_kif *kif, struct pf_state **sm,
3554 int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen)
3556 struct pf_state *s = NULL;
3557 struct pf_src_node *sn = NULL;
3558 struct tcphdr *th = pd->hdr.tcp;
3559 u_int16_t mss = V_tcp_mssdflt;
3562 /* check maximums */
3563 if (r->max_states &&
3564 (counter_u64_fetch(r->states_cur) >= r->max_states)) {
3565 counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1);
3566 REASON_SET(&reason, PFRES_MAXSTATES);
3569 /* src node for filter rule */
3570 if ((r->rule_flag & PFRULE_SRCTRACK ||
3571 r->rpool.opts & PF_POOL_STICKYADDR) &&
3572 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3573 REASON_SET(&reason, PFRES_SRCLIMIT);
3576 /* src node for translation rule */
3577 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3578 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3579 REASON_SET(&reason, PFRES_SRCLIMIT);
3582 s = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO);
3584 REASON_SET(&reason, PFRES_MEMORY);
3588 s->nat_rule.ptr = nr;
3590 STATE_INC_COUNTERS(s);
3592 s->state_flags |= PFSTATE_ALLOWOPTS;
3593 if (r->rule_flag & PFRULE_STATESLOPPY)
3594 s->state_flags |= PFSTATE_SLOPPY;
3595 s->log = r->log & PF_LOG_ALL;
3596 s->sync_state = PFSYNC_S_NONE;
3598 s->log |= nr->log & PF_LOG_ALL;
3599 switch (pd->proto) {
3601 s->src.seqlo = ntohl(th->th_seq);
3602 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3603 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3604 r->keep_state == PF_STATE_MODULATE) {
3605 /* Generate sequence number modulator */
3606 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3609 pf_change_proto_a(m, &th->th_seq, &th->th_sum,
3610 htonl(s->src.seqlo + s->src.seqdiff), 0);
3614 if (th->th_flags & TH_SYN) {
3616 s->src.wscale = pf_get_wscale(m, off,
3617 th->th_off, pd->af);
3619 s->src.max_win = MAX(ntohs(th->th_win), 1);
3620 if (s->src.wscale & PF_WSCALE_MASK) {
3621 /* Remove scale factor from initial window */
3622 int win = s->src.max_win;
3623 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3624 s->src.max_win = (win - 1) >>
3625 (s->src.wscale & PF_WSCALE_MASK);
3627 if (th->th_flags & TH_FIN)
3631 s->src.state = TCPS_SYN_SENT;
3632 s->dst.state = TCPS_CLOSED;
3633 s->timeout = PFTM_TCP_FIRST_PACKET;
3636 s->src.state = PFUDPS_SINGLE;
3637 s->dst.state = PFUDPS_NO_TRAFFIC;
3638 s->timeout = PFTM_UDP_FIRST_PACKET;
3642 case IPPROTO_ICMPV6:
3644 s->timeout = PFTM_ICMP_FIRST_PACKET;
3647 s->src.state = PFOTHERS_SINGLE;
3648 s->dst.state = PFOTHERS_NO_TRAFFIC;
3649 s->timeout = PFTM_OTHER_FIRST_PACKET;
3653 if (pf_map_addr(pd->af, r, pd->src, &s->rt_addr, NULL, &sn)) {
3654 REASON_SET(&reason, PFRES_MAPFAILED);
3655 pf_src_tree_remove_state(s);
3656 STATE_DEC_COUNTERS(s);
3657 uma_zfree(V_pf_state_z, s);
3660 s->rt_kif = r->rpool.cur->kif;
3663 s->creation = time_uptime;
3664 s->expire = time_uptime;
3669 /* XXX We only modify one side for now. */
3670 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
3671 s->nat_src_node = nsn;
3673 if (pd->proto == IPPROTO_TCP) {
3674 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
3675 off, pd, th, &s->src, &s->dst)) {
3676 REASON_SET(&reason, PFRES_MEMORY);
3677 pf_src_tree_remove_state(s);
3678 STATE_DEC_COUNTERS(s);
3679 uma_zfree(V_pf_state_z, s);
3682 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
3683 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
3684 &s->src, &s->dst, rewrite)) {
3685 /* This really shouldn't happen!!! */
3686 DPFPRINTF(PF_DEBUG_URGENT,
3687 ("pf_normalize_tcp_stateful failed on first pkt"));
3688 pf_normalize_tcp_cleanup(s);
3689 pf_src_tree_remove_state(s);
3690 STATE_DEC_COUNTERS(s);
3691 uma_zfree(V_pf_state_z, s);
3695 s->direction = pd->dir;
3698 * sk/nk could already been setup by pf_get_translation().
3701 KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
3702 __func__, nr, sk, nk));
3703 sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
3708 KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p",
3709 __func__, nr, sk, nk));
3711 /* Swap sk/nk for PF_OUT. */
3712 if (pf_state_insert(BOUND_IFACE(r, kif),
3713 (pd->dir == PF_IN) ? sk : nk,
3714 (pd->dir == PF_IN) ? nk : sk, s)) {
3715 if (pd->proto == IPPROTO_TCP)
3716 pf_normalize_tcp_cleanup(s);
3717 REASON_SET(&reason, PFRES_STATEINS);
3718 pf_src_tree_remove_state(s);
3719 STATE_DEC_COUNTERS(s);
3720 uma_zfree(V_pf_state_z, s);
3727 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
3728 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
3729 s->src.state = PF_TCPS_PROXY_SRC;
3730 /* undo NAT changes, if they have taken place */
3732 struct pf_state_key *skt = s->key[PF_SK_WIRE];
3733 if (pd->dir == PF_OUT)
3734 skt = s->key[PF_SK_STACK];
3735 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
3736 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
3738 *pd->sport = skt->port[pd->sidx];
3740 *pd->dport = skt->port[pd->didx];
3742 *pd->proto_sum = bproto_sum;
3744 *pd->ip_sum = bip_sum;
3745 m_copyback(m, off, hdrlen, pd->hdr.any);
3747 s->src.seqhi = htonl(arc4random());
3748 /* Find mss option */
3749 int rtid = M_GETFIB(m);
3750 mss = pf_get_mss(m, off, th->th_off, pd->af);
3751 mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
3752 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
3754 pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport,
3755 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
3756 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL);
3757 REASON_SET(&reason, PFRES_SYNPROXY);
3758 return (PF_SYNPROXY_DROP);
3765 uma_zfree(V_pf_state_key_z, sk);
3767 uma_zfree(V_pf_state_key_z, nk);
3770 struct pf_srchash *sh;
3772 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
3773 PF_HASHROW_LOCK(sh);
3774 if (--sn->states == 0 && sn->expire == 0) {
3775 pf_unlink_src_node(sn);
3776 uma_zfree(V_pf_sources_z, sn);
3778 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
3780 PF_HASHROW_UNLOCK(sh);
3783 if (nsn != sn && nsn != NULL) {
3784 struct pf_srchash *sh;
3786 sh = &V_pf_srchash[pf_hashsrc(&nsn->addr, nsn->af)];
3787 PF_HASHROW_LOCK(sh);
3788 if (--nsn->states == 0 && nsn->expire == 0) {
3789 pf_unlink_src_node(nsn);
3790 uma_zfree(V_pf_sources_z, nsn);
3792 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
3794 PF_HASHROW_UNLOCK(sh);
3801 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
3802 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
3803 struct pf_ruleset **rsm)
3805 struct pf_rule *r, *a = NULL;
3806 struct pf_ruleset *ruleset = NULL;
3807 sa_family_t af = pd->af;
3812 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3816 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3819 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3820 r = r->skip[PF_SKIP_IFP].ptr;
3821 else if (r->direction && r->direction != direction)
3822 r = r->skip[PF_SKIP_DIR].ptr;
3823 else if (r->af && r->af != af)
3824 r = r->skip[PF_SKIP_AF].ptr;
3825 else if (r->proto && r->proto != pd->proto)
3826 r = r->skip[PF_SKIP_PROTO].ptr;
3827 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
3828 r->src.neg, kif, M_GETFIB(m)))
3829 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3830 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
3831 r->dst.neg, NULL, M_GETFIB(m)))
3832 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3833 else if (r->tos && !(r->tos == pd->tos))
3834 r = TAILQ_NEXT(r, entries);
3835 else if (r->os_fingerprint != PF_OSFP_ANY)
3836 r = TAILQ_NEXT(r, entries);
3837 else if (pd->proto == IPPROTO_UDP &&
3838 (r->src.port_op || r->dst.port_op))
3839 r = TAILQ_NEXT(r, entries);
3840 else if (pd->proto == IPPROTO_TCP &&
3841 (r->src.port_op || r->dst.port_op || r->flagset))
3842 r = TAILQ_NEXT(r, entries);
3843 else if ((pd->proto == IPPROTO_ICMP ||
3844 pd->proto == IPPROTO_ICMPV6) &&
3845 (r->type || r->code))
3846 r = TAILQ_NEXT(r, entries);
3848 !pf_match_ieee8021q_pcp(r->prio, m))
3849 r = TAILQ_NEXT(r, entries);
3850 else if (r->prob && r->prob <=
3851 (arc4random() % (UINT_MAX - 1) + 1))
3852 r = TAILQ_NEXT(r, entries);
3853 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3854 pd->pf_mtag ? pd->pf_mtag->tag : 0))
3855 r = TAILQ_NEXT(r, entries);
3857 if (r->anchor == NULL) {
3864 r = TAILQ_NEXT(r, entries);
3866 pf_step_into_anchor(anchor_stack, &asd,
3867 &ruleset, PF_RULESET_FILTER, &r, &a,
3870 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3871 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3878 REASON_SET(&reason, PFRES_MATCH);
3881 PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
3884 if (r->action != PF_PASS)
3887 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3888 REASON_SET(&reason, PFRES_MEMORY);
3896 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
3897 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
3898 struct pf_pdesc *pd, u_short *reason, int *copyback)
3900 struct tcphdr *th = pd->hdr.tcp;
3901 u_int16_t win = ntohs(th->th_win);
3902 u_int32_t ack, end, seq, orig_seq;
3906 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
3907 sws = src->wscale & PF_WSCALE_MASK;
3908 dws = dst->wscale & PF_WSCALE_MASK;
3913 * Sequence tracking algorithm from Guido van Rooij's paper:
3914 * http://www.madison-gurkha.com/publications/tcp_filtering/
3918 orig_seq = seq = ntohl(th->th_seq);
3919 if (src->seqlo == 0) {
3920 /* First packet from this end. Set its state */
3922 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
3923 src->scrub == NULL) {
3924 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
3925 REASON_SET(reason, PFRES_MEMORY);
3930 /* Deferred generation of sequence number modulator */
3931 if (dst->seqdiff && !src->seqdiff) {
3932 /* use random iss for the TCP server */
3933 while ((src->seqdiff = arc4random() - seq) == 0)
3935 ack = ntohl(th->th_ack) - dst->seqdiff;
3936 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
3938 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
3941 ack = ntohl(th->th_ack);
3944 end = seq + pd->p_len;
3945 if (th->th_flags & TH_SYN) {
3947 if (dst->wscale & PF_WSCALE_FLAG) {
3948 src->wscale = pf_get_wscale(m, off, th->th_off,
3950 if (src->wscale & PF_WSCALE_FLAG) {
3951 /* Remove scale factor from initial
3953 sws = src->wscale & PF_WSCALE_MASK;
3954 win = ((u_int32_t)win + (1 << sws) - 1)
3956 dws = dst->wscale & PF_WSCALE_MASK;
3958 /* fixup other window */
3959 dst->max_win <<= dst->wscale &
3961 /* in case of a retrans SYN|ACK */
3966 if (th->th_flags & TH_FIN)
3970 if (src->state < TCPS_SYN_SENT)
3971 src->state = TCPS_SYN_SENT;
3974 * May need to slide the window (seqhi may have been set by
3975 * the crappy stack check or if we picked up the connection
3976 * after establishment)
3978 if (src->seqhi == 1 ||
3979 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
3980 src->seqhi = end + MAX(1, dst->max_win << dws);
3981 if (win > src->max_win)
3985 ack = ntohl(th->th_ack) - dst->seqdiff;
3987 /* Modulate sequence numbers */
3988 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
3990 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
3993 end = seq + pd->p_len;
3994 if (th->th_flags & TH_SYN)
3996 if (th->th_flags & TH_FIN)
4000 if ((th->th_flags & TH_ACK) == 0) {
4001 /* Let it pass through the ack skew check */
4003 } else if ((ack == 0 &&
4004 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
4005 /* broken tcp stacks do not set ack */
4006 (dst->state < TCPS_SYN_SENT)) {
4008 * Many stacks (ours included) will set the ACK number in an
4009 * FIN|ACK if the SYN times out -- no sequence to ACK.
4015 /* Ease sequencing restrictions on no data packets */
4020 ackskew = dst->seqlo - ack;
4024 * Need to demodulate the sequence numbers in any TCP SACK options
4025 * (Selective ACK). We could optionally validate the SACK values
4026 * against the current ACK window, either forwards or backwards, but
4027 * I'm not confident that SACK has been implemented properly
4028 * everywhere. It wouldn't surprise me if several stacks accidentally
4029 * SACK too far backwards of previously ACKed data. There really aren't
4030 * any security implications of bad SACKing unless the target stack
4031 * doesn't validate the option length correctly. Someone trying to
4032 * spoof into a TCP connection won't bother blindly sending SACK
4035 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
4036 if (pf_modulate_sack(m, off, pd, th, dst))
4041 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
4042 if (SEQ_GEQ(src->seqhi, end) &&
4043 /* Last octet inside other's window space */
4044 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
4045 /* Retrans: not more than one window back */
4046 (ackskew >= -MAXACKWINDOW) &&
4047 /* Acking not more than one reassembled fragment backwards */
4048 (ackskew <= (MAXACKWINDOW << sws)) &&
4049 /* Acking not more than one window forward */
4050 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
4051 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
4052 (pd->flags & PFDESC_IP_REAS) == 0)) {
4053 /* Require an exact/+1 sequence match on resets when possible */
4055 if (dst->scrub || src->scrub) {
4056 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4057 *state, src, dst, copyback))
4061 /* update max window */
4062 if (src->max_win < win)
4064 /* synchronize sequencing */
4065 if (SEQ_GT(end, src->seqlo))
4067 /* slide the window of what the other end can send */
4068 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4069 dst->seqhi = ack + MAX((win << sws), 1);
4073 if (th->th_flags & TH_SYN)
4074 if (src->state < TCPS_SYN_SENT)
4075 src->state = TCPS_SYN_SENT;
4076 if (th->th_flags & TH_FIN)
4077 if (src->state < TCPS_CLOSING)
4078 src->state = TCPS_CLOSING;
4079 if (th->th_flags & TH_ACK) {
4080 if (dst->state == TCPS_SYN_SENT) {
4081 dst->state = TCPS_ESTABLISHED;
4082 if (src->state == TCPS_ESTABLISHED &&
4083 (*state)->src_node != NULL &&
4084 pf_src_connlimit(state)) {
4085 REASON_SET(reason, PFRES_SRCLIMIT);
4088 } else if (dst->state == TCPS_CLOSING)
4089 dst->state = TCPS_FIN_WAIT_2;
4091 if (th->th_flags & TH_RST)
4092 src->state = dst->state = TCPS_TIME_WAIT;
4094 /* update expire time */
4095 (*state)->expire = time_uptime;
4096 if (src->state >= TCPS_FIN_WAIT_2 &&
4097 dst->state >= TCPS_FIN_WAIT_2)
4098 (*state)->timeout = PFTM_TCP_CLOSED;
4099 else if (src->state >= TCPS_CLOSING &&
4100 dst->state >= TCPS_CLOSING)
4101 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4102 else if (src->state < TCPS_ESTABLISHED ||
4103 dst->state < TCPS_ESTABLISHED)
4104 (*state)->timeout = PFTM_TCP_OPENING;
4105 else if (src->state >= TCPS_CLOSING ||
4106 dst->state >= TCPS_CLOSING)
4107 (*state)->timeout = PFTM_TCP_CLOSING;
4109 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4111 /* Fall through to PASS packet */
4113 } else if ((dst->state < TCPS_SYN_SENT ||
4114 dst->state >= TCPS_FIN_WAIT_2 ||
4115 src->state >= TCPS_FIN_WAIT_2) &&
4116 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
4117 /* Within a window forward of the originating packet */
4118 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
4119 /* Within a window backward of the originating packet */
4122 * This currently handles three situations:
4123 * 1) Stupid stacks will shotgun SYNs before their peer
4125 * 2) When PF catches an already established stream (the
4126 * firewall rebooted, the state table was flushed, routes
4128 * 3) Packets get funky immediately after the connection
4129 * closes (this should catch Solaris spurious ACK|FINs
4130 * that web servers like to spew after a close)
4132 * This must be a little more careful than the above code
4133 * since packet floods will also be caught here. We don't
4134 * update the TTL here to mitigate the damage of a packet
4135 * flood and so the same code can handle awkward establishment
4136 * and a loosened connection close.
4137 * In the establishment case, a correct peer response will
4138 * validate the connection, go through the normal state code
4139 * and keep updating the state TTL.
4142 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4143 printf("pf: loose state match: ");
4144 pf_print_state(*state);
4145 pf_print_flags(th->th_flags);
4146 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4147 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
4148 pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
4149 (unsigned long long)(*state)->packets[1],
4150 pd->dir == PF_IN ? "in" : "out",
4151 pd->dir == (*state)->direction ? "fwd" : "rev");
4154 if (dst->scrub || src->scrub) {
4155 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4156 *state, src, dst, copyback))
4160 /* update max window */
4161 if (src->max_win < win)
4163 /* synchronize sequencing */
4164 if (SEQ_GT(end, src->seqlo))
4166 /* slide the window of what the other end can send */
4167 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4168 dst->seqhi = ack + MAX((win << sws), 1);
4171 * Cannot set dst->seqhi here since this could be a shotgunned
4172 * SYN and not an already established connection.
4175 if (th->th_flags & TH_FIN)
4176 if (src->state < TCPS_CLOSING)
4177 src->state = TCPS_CLOSING;
4178 if (th->th_flags & TH_RST)
4179 src->state = dst->state = TCPS_TIME_WAIT;
4181 /* Fall through to PASS packet */
4184 if ((*state)->dst.state == TCPS_SYN_SENT &&
4185 (*state)->src.state == TCPS_SYN_SENT) {
4186 /* Send RST for state mismatches during handshake */
4187 if (!(th->th_flags & TH_RST))
4188 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4189 pd->dst, pd->src, th->th_dport,
4190 th->th_sport, ntohl(th->th_ack), 0,
4192 (*state)->rule.ptr->return_ttl, 1, 0,
4197 } else if (V_pf_status.debug >= PF_DEBUG_MISC) {
4198 printf("pf: BAD state: ");
4199 pf_print_state(*state);
4200 pf_print_flags(th->th_flags);
4201 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4202 "pkts=%llu:%llu dir=%s,%s\n",
4203 seq, orig_seq, ack, pd->p_len, ackskew,
4204 (unsigned long long)(*state)->packets[0],
4205 (unsigned long long)(*state)->packets[1],
4206 pd->dir == PF_IN ? "in" : "out",
4207 pd->dir == (*state)->direction ? "fwd" : "rev");
4208 printf("pf: State failure on: %c %c %c %c | %c %c\n",
4209 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4210 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4212 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4213 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4214 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4215 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4217 REASON_SET(reason, PFRES_BADSTATE);
4225 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4226 struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4228 struct tcphdr *th = pd->hdr.tcp;
4230 if (th->th_flags & TH_SYN)
4231 if (src->state < TCPS_SYN_SENT)
4232 src->state = TCPS_SYN_SENT;
4233 if (th->th_flags & TH_FIN)
4234 if (src->state < TCPS_CLOSING)
4235 src->state = TCPS_CLOSING;
4236 if (th->th_flags & TH_ACK) {
4237 if (dst->state == TCPS_SYN_SENT) {
4238 dst->state = TCPS_ESTABLISHED;
4239 if (src->state == TCPS_ESTABLISHED &&
4240 (*state)->src_node != NULL &&
4241 pf_src_connlimit(state)) {
4242 REASON_SET(reason, PFRES_SRCLIMIT);
4245 } else if (dst->state == TCPS_CLOSING) {
4246 dst->state = TCPS_FIN_WAIT_2;
4247 } else if (src->state == TCPS_SYN_SENT &&
4248 dst->state < TCPS_SYN_SENT) {
4250 * Handle a special sloppy case where we only see one
4251 * half of the connection. If there is a ACK after
4252 * the initial SYN without ever seeing a packet from
4253 * the destination, set the connection to established.
4255 dst->state = src->state = TCPS_ESTABLISHED;
4256 if ((*state)->src_node != NULL &&
4257 pf_src_connlimit(state)) {
4258 REASON_SET(reason, PFRES_SRCLIMIT);
4261 } else if (src->state == TCPS_CLOSING &&
4262 dst->state == TCPS_ESTABLISHED &&
4265 * Handle the closing of half connections where we
4266 * don't see the full bidirectional FIN/ACK+ACK
4269 dst->state = TCPS_CLOSING;
4272 if (th->th_flags & TH_RST)
4273 src->state = dst->state = TCPS_TIME_WAIT;
4275 /* update expire time */
4276 (*state)->expire = time_uptime;
4277 if (src->state >= TCPS_FIN_WAIT_2 &&
4278 dst->state >= TCPS_FIN_WAIT_2)
4279 (*state)->timeout = PFTM_TCP_CLOSED;
4280 else if (src->state >= TCPS_CLOSING &&
4281 dst->state >= TCPS_CLOSING)
4282 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4283 else if (src->state < TCPS_ESTABLISHED ||
4284 dst->state < TCPS_ESTABLISHED)
4285 (*state)->timeout = PFTM_TCP_OPENING;
4286 else if (src->state >= TCPS_CLOSING ||
4287 dst->state >= TCPS_CLOSING)
4288 (*state)->timeout = PFTM_TCP_CLOSING;
4290 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4296 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
4297 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4300 struct pf_state_key_cmp key;
4301 struct tcphdr *th = pd->hdr.tcp;
4303 struct pf_state_peer *src, *dst;
4304 struct pf_state_key *sk;
4306 bzero(&key, sizeof(key));
4308 key.proto = IPPROTO_TCP;
4309 if (direction == PF_IN) { /* wire side, straight */
4310 PF_ACPY(&key.addr[0], pd->src, key.af);
4311 PF_ACPY(&key.addr[1], pd->dst, key.af);
4312 key.port[0] = th->th_sport;
4313 key.port[1] = th->th_dport;
4314 } else { /* stack side, reverse */
4315 PF_ACPY(&key.addr[1], pd->src, key.af);
4316 PF_ACPY(&key.addr[0], pd->dst, key.af);
4317 key.port[1] = th->th_sport;
4318 key.port[0] = th->th_dport;
4321 STATE_LOOKUP(kif, &key, direction, *state, pd);
4323 if (direction == (*state)->direction) {
4324 src = &(*state)->src;
4325 dst = &(*state)->dst;
4327 src = &(*state)->dst;
4328 dst = &(*state)->src;
4331 sk = (*state)->key[pd->didx];
4333 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4334 if (direction != (*state)->direction) {
4335 REASON_SET(reason, PFRES_SYNPROXY);
4336 return (PF_SYNPROXY_DROP);
4338 if (th->th_flags & TH_SYN) {
4339 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4340 REASON_SET(reason, PFRES_SYNPROXY);
4343 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4344 pd->src, th->th_dport, th->th_sport,
4345 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4346 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0, NULL);
4347 REASON_SET(reason, PFRES_SYNPROXY);
4348 return (PF_SYNPROXY_DROP);
4349 } else if (!(th->th_flags & TH_ACK) ||
4350 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4351 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4352 REASON_SET(reason, PFRES_SYNPROXY);
4354 } else if ((*state)->src_node != NULL &&
4355 pf_src_connlimit(state)) {
4356 REASON_SET(reason, PFRES_SRCLIMIT);
4359 (*state)->src.state = PF_TCPS_PROXY_DST;
4361 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4362 if (direction == (*state)->direction) {
4363 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4364 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4365 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4366 REASON_SET(reason, PFRES_SYNPROXY);
4369 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4370 if ((*state)->dst.seqhi == 1)
4371 (*state)->dst.seqhi = htonl(arc4random());
4372 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4373 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4374 sk->port[pd->sidx], sk->port[pd->didx],
4375 (*state)->dst.seqhi, 0, TH_SYN, 0,
4376 (*state)->src.mss, 0, 0, (*state)->tag, NULL);
4377 REASON_SET(reason, PFRES_SYNPROXY);
4378 return (PF_SYNPROXY_DROP);
4379 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4381 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4382 REASON_SET(reason, PFRES_SYNPROXY);
4385 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4386 (*state)->dst.seqlo = ntohl(th->th_seq);
4387 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4388 pd->src, th->th_dport, th->th_sport,
4389 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4390 TH_ACK, (*state)->src.max_win, 0, 0, 0,
4391 (*state)->tag, NULL);
4392 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4393 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4394 sk->port[pd->sidx], sk->port[pd->didx],
4395 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4396 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0, NULL);
4397 (*state)->src.seqdiff = (*state)->dst.seqhi -
4398 (*state)->src.seqlo;
4399 (*state)->dst.seqdiff = (*state)->src.seqhi -
4400 (*state)->dst.seqlo;
4401 (*state)->src.seqhi = (*state)->src.seqlo +
4402 (*state)->dst.max_win;
4403 (*state)->dst.seqhi = (*state)->dst.seqlo +
4404 (*state)->src.max_win;
4405 (*state)->src.wscale = (*state)->dst.wscale = 0;
4406 (*state)->src.state = (*state)->dst.state =
4408 REASON_SET(reason, PFRES_SYNPROXY);
4409 return (PF_SYNPROXY_DROP);
4413 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4414 dst->state >= TCPS_FIN_WAIT_2 &&
4415 src->state >= TCPS_FIN_WAIT_2) {
4416 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4417 printf("pf: state reuse ");
4418 pf_print_state(*state);
4419 pf_print_flags(th->th_flags);
4422 /* XXX make sure it's the same direction ?? */
4423 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4424 pf_unlink_state(*state, PF_ENTER_LOCKED);
4429 if ((*state)->state_flags & PFSTATE_SLOPPY) {
4430 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
4433 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
4434 ©back) == PF_DROP)
4438 /* translate source/destination address, if necessary */
4439 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4440 struct pf_state_key *nk = (*state)->key[pd->didx];
4442 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4443 nk->port[pd->sidx] != th->th_sport)
4444 pf_change_ap(m, pd->src, &th->th_sport,
4445 pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx],
4446 nk->port[pd->sidx], 0, pd->af);
4448 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4449 nk->port[pd->didx] != th->th_dport)
4450 pf_change_ap(m, pd->dst, &th->th_dport,
4451 pd->ip_sum, &th->th_sum, &nk->addr[pd->didx],
4452 nk->port[pd->didx], 0, pd->af);
4456 /* Copyback sequence modulation or stateful scrub changes if needed */
4458 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4464 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4465 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4467 struct pf_state_peer *src, *dst;
4468 struct pf_state_key_cmp key;
4469 struct udphdr *uh = pd->hdr.udp;
4471 bzero(&key, sizeof(key));
4473 key.proto = IPPROTO_UDP;
4474 if (direction == PF_IN) { /* wire side, straight */
4475 PF_ACPY(&key.addr[0], pd->src, key.af);
4476 PF_ACPY(&key.addr[1], pd->dst, key.af);
4477 key.port[0] = uh->uh_sport;
4478 key.port[1] = uh->uh_dport;
4479 } else { /* stack side, reverse */
4480 PF_ACPY(&key.addr[1], pd->src, key.af);
4481 PF_ACPY(&key.addr[0], pd->dst, key.af);
4482 key.port[1] = uh->uh_sport;
4483 key.port[0] = uh->uh_dport;
4486 STATE_LOOKUP(kif, &key, direction, *state, pd);
4488 if (direction == (*state)->direction) {
4489 src = &(*state)->src;
4490 dst = &(*state)->dst;
4492 src = &(*state)->dst;
4493 dst = &(*state)->src;
4497 if (src->state < PFUDPS_SINGLE)
4498 src->state = PFUDPS_SINGLE;
4499 if (dst->state == PFUDPS_SINGLE)
4500 dst->state = PFUDPS_MULTIPLE;
4502 /* update expire time */
4503 (*state)->expire = time_uptime;
4504 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4505 (*state)->timeout = PFTM_UDP_MULTIPLE;
4507 (*state)->timeout = PFTM_UDP_SINGLE;
4509 /* translate source/destination address, if necessary */
4510 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4511 struct pf_state_key *nk = (*state)->key[pd->didx];
4513 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4514 nk->port[pd->sidx] != uh->uh_sport)
4515 pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
4516 &uh->uh_sum, &nk->addr[pd->sidx],
4517 nk->port[pd->sidx], 1, pd->af);
4519 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4520 nk->port[pd->didx] != uh->uh_dport)
4521 pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
4522 &uh->uh_sum, &nk->addr[pd->didx],
4523 nk->port[pd->didx], 1, pd->af);
4524 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4531 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4532 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4534 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4535 u_int16_t icmpid = 0, *icmpsum;
4538 struct pf_state_key_cmp key;
4540 bzero(&key, sizeof(key));
4541 switch (pd->proto) {
4544 icmptype = pd->hdr.icmp->icmp_type;
4545 icmpid = pd->hdr.icmp->icmp_id;
4546 icmpsum = &pd->hdr.icmp->icmp_cksum;
4548 if (icmptype == ICMP_UNREACH ||
4549 icmptype == ICMP_SOURCEQUENCH ||
4550 icmptype == ICMP_REDIRECT ||
4551 icmptype == ICMP_TIMXCEED ||
4552 icmptype == ICMP_PARAMPROB)
4557 case IPPROTO_ICMPV6:
4558 icmptype = pd->hdr.icmp6->icmp6_type;
4559 icmpid = pd->hdr.icmp6->icmp6_id;
4560 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4562 if (icmptype == ICMP6_DST_UNREACH ||
4563 icmptype == ICMP6_PACKET_TOO_BIG ||
4564 icmptype == ICMP6_TIME_EXCEEDED ||
4565 icmptype == ICMP6_PARAM_PROB)
4574 * ICMP query/reply message not related to a TCP/UDP packet.
4575 * Search for an ICMP state.
4578 key.proto = pd->proto;
4579 key.port[0] = key.port[1] = icmpid;
4580 if (direction == PF_IN) { /* wire side, straight */
4581 PF_ACPY(&key.addr[0], pd->src, key.af);
4582 PF_ACPY(&key.addr[1], pd->dst, key.af);
4583 } else { /* stack side, reverse */
4584 PF_ACPY(&key.addr[1], pd->src, key.af);
4585 PF_ACPY(&key.addr[0], pd->dst, key.af);
4588 STATE_LOOKUP(kif, &key, direction, *state, pd);
4590 (*state)->expire = time_uptime;
4591 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4593 /* translate source/destination address, if necessary */
4594 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4595 struct pf_state_key *nk = (*state)->key[pd->didx];
4600 if (PF_ANEQ(pd->src,
4601 &nk->addr[pd->sidx], AF_INET))
4602 pf_change_a(&saddr->v4.s_addr,
4604 nk->addr[pd->sidx].v4.s_addr, 0);
4606 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
4608 pf_change_a(&daddr->v4.s_addr,
4610 nk->addr[pd->didx].v4.s_addr, 0);
4613 pd->hdr.icmp->icmp_id) {
4614 pd->hdr.icmp->icmp_cksum =
4616 pd->hdr.icmp->icmp_cksum, icmpid,
4617 nk->port[pd->sidx], 0);
4618 pd->hdr.icmp->icmp_id =
4622 m_copyback(m, off, ICMP_MINLEN,
4623 (caddr_t )pd->hdr.icmp);
4628 if (PF_ANEQ(pd->src,
4629 &nk->addr[pd->sidx], AF_INET6))
4631 &pd->hdr.icmp6->icmp6_cksum,
4632 &nk->addr[pd->sidx], 0);
4634 if (PF_ANEQ(pd->dst,
4635 &nk->addr[pd->didx], AF_INET6))
4637 &pd->hdr.icmp6->icmp6_cksum,
4638 &nk->addr[pd->didx], 0);
4640 m_copyback(m, off, sizeof(struct icmp6_hdr),
4641 (caddr_t )pd->hdr.icmp6);
4650 * ICMP error message in response to a TCP/UDP packet.
4651 * Extract the inner TCP/UDP header and search for that state.
4654 struct pf_pdesc pd2;
4655 bzero(&pd2, sizeof pd2);
4660 struct ip6_hdr h2_6;
4667 /* Payload packet is from the opposite direction. */
4668 pd2.sidx = (direction == PF_IN) ? 1 : 0;
4669 pd2.didx = (direction == PF_IN) ? 0 : 1;
4673 /* offset of h2 in mbuf chain */
4674 ipoff2 = off + ICMP_MINLEN;
4676 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
4677 NULL, reason, pd2.af)) {
4678 DPFPRINTF(PF_DEBUG_MISC,
4679 ("pf: ICMP error message too short "
4684 * ICMP error messages don't refer to non-first
4687 if (h2.ip_off & htons(IP_OFFMASK)) {
4688 REASON_SET(reason, PFRES_FRAG);
4692 /* offset of protocol header that follows h2 */
4693 off2 = ipoff2 + (h2.ip_hl << 2);
4695 pd2.proto = h2.ip_p;
4696 pd2.src = (struct pf_addr *)&h2.ip_src;
4697 pd2.dst = (struct pf_addr *)&h2.ip_dst;
4698 pd2.ip_sum = &h2.ip_sum;
4703 ipoff2 = off + sizeof(struct icmp6_hdr);
4705 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
4706 NULL, reason, pd2.af)) {
4707 DPFPRINTF(PF_DEBUG_MISC,
4708 ("pf: ICMP error message too short "
4712 pd2.proto = h2_6.ip6_nxt;
4713 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
4714 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
4716 off2 = ipoff2 + sizeof(h2_6);
4718 switch (pd2.proto) {
4719 case IPPROTO_FRAGMENT:
4721 * ICMPv6 error messages for
4722 * non-first fragments
4724 REASON_SET(reason, PFRES_FRAG);
4727 case IPPROTO_HOPOPTS:
4728 case IPPROTO_ROUTING:
4729 case IPPROTO_DSTOPTS: {
4730 /* get next header and header length */
4731 struct ip6_ext opt6;
4733 if (!pf_pull_hdr(m, off2, &opt6,
4734 sizeof(opt6), NULL, reason,
4736 DPFPRINTF(PF_DEBUG_MISC,
4737 ("pf: ICMPv6 short opt\n"));
4740 if (pd2.proto == IPPROTO_AH)
4741 off2 += (opt6.ip6e_len + 2) * 4;
4743 off2 += (opt6.ip6e_len + 1) * 8;
4744 pd2.proto = opt6.ip6e_nxt;
4745 /* goto the next header */
4752 } while (!terminal);
4757 switch (pd2.proto) {
4761 struct pf_state_peer *src, *dst;
4766 * Only the first 8 bytes of the TCP header can be
4767 * expected. Don't access any TCP header fields after
4768 * th_seq, an ackskew test is not possible.
4770 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
4772 DPFPRINTF(PF_DEBUG_MISC,
4773 ("pf: ICMP error message too short "
4779 key.proto = IPPROTO_TCP;
4780 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4781 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4782 key.port[pd2.sidx] = th.th_sport;
4783 key.port[pd2.didx] = th.th_dport;
4785 STATE_LOOKUP(kif, &key, direction, *state, pd);
4787 if (direction == (*state)->direction) {
4788 src = &(*state)->dst;
4789 dst = &(*state)->src;
4791 src = &(*state)->src;
4792 dst = &(*state)->dst;
4795 if (src->wscale && dst->wscale)
4796 dws = dst->wscale & PF_WSCALE_MASK;
4800 /* Demodulate sequence number */
4801 seq = ntohl(th.th_seq) - src->seqdiff;
4803 pf_change_a(&th.th_seq, icmpsum,
4808 if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
4809 (!SEQ_GEQ(src->seqhi, seq) ||
4810 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
4811 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4812 printf("pf: BAD ICMP %d:%d ",
4813 icmptype, pd->hdr.icmp->icmp_code);
4814 pf_print_host(pd->src, 0, pd->af);
4816 pf_print_host(pd->dst, 0, pd->af);
4818 pf_print_state(*state);
4819 printf(" seq=%u\n", seq);
4821 REASON_SET(reason, PFRES_BADSTATE);
4824 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4825 printf("pf: OK ICMP %d:%d ",
4826 icmptype, pd->hdr.icmp->icmp_code);
4827 pf_print_host(pd->src, 0, pd->af);
4829 pf_print_host(pd->dst, 0, pd->af);
4831 pf_print_state(*state);
4832 printf(" seq=%u\n", seq);
4836 /* translate source/destination address, if necessary */
4837 if ((*state)->key[PF_SK_WIRE] !=
4838 (*state)->key[PF_SK_STACK]) {
4839 struct pf_state_key *nk =
4840 (*state)->key[pd->didx];
4842 if (PF_ANEQ(pd2.src,
4843 &nk->addr[pd2.sidx], pd2.af) ||
4844 nk->port[pd2.sidx] != th.th_sport)
4845 pf_change_icmp(pd2.src, &th.th_sport,
4846 daddr, &nk->addr[pd2.sidx],
4847 nk->port[pd2.sidx], NULL,
4848 pd2.ip_sum, icmpsum,
4849 pd->ip_sum, 0, pd2.af);
4851 if (PF_ANEQ(pd2.dst,
4852 &nk->addr[pd2.didx], pd2.af) ||
4853 nk->port[pd2.didx] != th.th_dport)
4854 pf_change_icmp(pd2.dst, &th.th_dport,
4855 saddr, &nk->addr[pd2.didx],
4856 nk->port[pd2.didx], NULL,
4857 pd2.ip_sum, icmpsum,
4858 pd->ip_sum, 0, pd2.af);
4866 m_copyback(m, off, ICMP_MINLEN,
4867 (caddr_t )pd->hdr.icmp);
4868 m_copyback(m, ipoff2, sizeof(h2),
4875 sizeof(struct icmp6_hdr),
4876 (caddr_t )pd->hdr.icmp6);
4877 m_copyback(m, ipoff2, sizeof(h2_6),
4882 m_copyback(m, off2, 8, (caddr_t)&th);
4891 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
4892 NULL, reason, pd2.af)) {
4893 DPFPRINTF(PF_DEBUG_MISC,
4894 ("pf: ICMP error message too short "
4900 key.proto = IPPROTO_UDP;
4901 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4902 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4903 key.port[pd2.sidx] = uh.uh_sport;
4904 key.port[pd2.didx] = uh.uh_dport;
4906 STATE_LOOKUP(kif, &key, direction, *state, pd);
4908 /* translate source/destination address, if necessary */
4909 if ((*state)->key[PF_SK_WIRE] !=
4910 (*state)->key[PF_SK_STACK]) {
4911 struct pf_state_key *nk =
4912 (*state)->key[pd->didx];
4914 if (PF_ANEQ(pd2.src,
4915 &nk->addr[pd2.sidx], pd2.af) ||
4916 nk->port[pd2.sidx] != uh.uh_sport)
4917 pf_change_icmp(pd2.src, &uh.uh_sport,
4918 daddr, &nk->addr[pd2.sidx],
4919 nk->port[pd2.sidx], &uh.uh_sum,
4920 pd2.ip_sum, icmpsum,
4921 pd->ip_sum, 1, pd2.af);
4923 if (PF_ANEQ(pd2.dst,
4924 &nk->addr[pd2.didx], pd2.af) ||
4925 nk->port[pd2.didx] != uh.uh_dport)
4926 pf_change_icmp(pd2.dst, &uh.uh_dport,
4927 saddr, &nk->addr[pd2.didx],
4928 nk->port[pd2.didx], &uh.uh_sum,
4929 pd2.ip_sum, icmpsum,
4930 pd->ip_sum, 1, pd2.af);
4935 m_copyback(m, off, ICMP_MINLEN,
4936 (caddr_t )pd->hdr.icmp);
4937 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4943 sizeof(struct icmp6_hdr),
4944 (caddr_t )pd->hdr.icmp6);
4945 m_copyback(m, ipoff2, sizeof(h2_6),
4950 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
4956 case IPPROTO_ICMP: {
4959 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
4960 NULL, reason, pd2.af)) {
4961 DPFPRINTF(PF_DEBUG_MISC,
4962 ("pf: ICMP error message too short i"
4968 key.proto = IPPROTO_ICMP;
4969 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4970 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4971 key.port[0] = key.port[1] = iih.icmp_id;
4973 STATE_LOOKUP(kif, &key, direction, *state, pd);
4975 /* translate source/destination address, if necessary */
4976 if ((*state)->key[PF_SK_WIRE] !=
4977 (*state)->key[PF_SK_STACK]) {
4978 struct pf_state_key *nk =
4979 (*state)->key[pd->didx];
4981 if (PF_ANEQ(pd2.src,
4982 &nk->addr[pd2.sidx], pd2.af) ||
4983 nk->port[pd2.sidx] != iih.icmp_id)
4984 pf_change_icmp(pd2.src, &iih.icmp_id,
4985 daddr, &nk->addr[pd2.sidx],
4986 nk->port[pd2.sidx], NULL,
4987 pd2.ip_sum, icmpsum,
4988 pd->ip_sum, 0, AF_INET);
4990 if (PF_ANEQ(pd2.dst,
4991 &nk->addr[pd2.didx], pd2.af) ||
4992 nk->port[pd2.didx] != iih.icmp_id)
4993 pf_change_icmp(pd2.dst, &iih.icmp_id,
4994 saddr, &nk->addr[pd2.didx],
4995 nk->port[pd2.didx], NULL,
4996 pd2.ip_sum, icmpsum,
4997 pd->ip_sum, 0, AF_INET);
4999 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
5000 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5001 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
5008 case IPPROTO_ICMPV6: {
5009 struct icmp6_hdr iih;
5011 if (!pf_pull_hdr(m, off2, &iih,
5012 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
5013 DPFPRINTF(PF_DEBUG_MISC,
5014 ("pf: ICMP error message too short "
5020 key.proto = IPPROTO_ICMPV6;
5021 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5022 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5023 key.port[0] = key.port[1] = iih.icmp6_id;
5025 STATE_LOOKUP(kif, &key, direction, *state, pd);
5027 /* translate source/destination address, if necessary */
5028 if ((*state)->key[PF_SK_WIRE] !=
5029 (*state)->key[PF_SK_STACK]) {
5030 struct pf_state_key *nk =
5031 (*state)->key[pd->didx];
5033 if (PF_ANEQ(pd2.src,
5034 &nk->addr[pd2.sidx], pd2.af) ||
5035 nk->port[pd2.sidx] != iih.icmp6_id)
5036 pf_change_icmp(pd2.src, &iih.icmp6_id,
5037 daddr, &nk->addr[pd2.sidx],
5038 nk->port[pd2.sidx], NULL,
5039 pd2.ip_sum, icmpsum,
5040 pd->ip_sum, 0, AF_INET6);
5042 if (PF_ANEQ(pd2.dst,
5043 &nk->addr[pd2.didx], pd2.af) ||
5044 nk->port[pd2.didx] != iih.icmp6_id)
5045 pf_change_icmp(pd2.dst, &iih.icmp6_id,
5046 saddr, &nk->addr[pd2.didx],
5047 nk->port[pd2.didx], NULL,
5048 pd2.ip_sum, icmpsum,
5049 pd->ip_sum, 0, AF_INET6);
5051 m_copyback(m, off, sizeof(struct icmp6_hdr),
5052 (caddr_t)pd->hdr.icmp6);
5053 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
5054 m_copyback(m, off2, sizeof(struct icmp6_hdr),
5063 key.proto = pd2.proto;
5064 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5065 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5066 key.port[0] = key.port[1] = 0;
5068 STATE_LOOKUP(kif, &key, direction, *state, pd);
5070 /* translate source/destination address, if necessary */
5071 if ((*state)->key[PF_SK_WIRE] !=
5072 (*state)->key[PF_SK_STACK]) {
5073 struct pf_state_key *nk =
5074 (*state)->key[pd->didx];
5076 if (PF_ANEQ(pd2.src,
5077 &nk->addr[pd2.sidx], pd2.af))
5078 pf_change_icmp(pd2.src, NULL, daddr,
5079 &nk->addr[pd2.sidx], 0, NULL,
5080 pd2.ip_sum, icmpsum,
5081 pd->ip_sum, 0, pd2.af);
5083 if (PF_ANEQ(pd2.dst,
5084 &nk->addr[pd2.didx], pd2.af))
5085 pf_change_icmp(pd2.dst, NULL, saddr,
5086 &nk->addr[pd2.didx], 0, NULL,
5087 pd2.ip_sum, icmpsum,
5088 pd->ip_sum, 0, pd2.af);
5093 m_copyback(m, off, ICMP_MINLEN,
5094 (caddr_t)pd->hdr.icmp);
5095 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5101 sizeof(struct icmp6_hdr),
5102 (caddr_t )pd->hdr.icmp6);
5103 m_copyback(m, ipoff2, sizeof(h2_6),
5117 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
5118 struct mbuf *m, struct pf_pdesc *pd)
5120 struct pf_state_peer *src, *dst;
5121 struct pf_state_key_cmp key;
5123 bzero(&key, sizeof(key));
5125 key.proto = pd->proto;
5126 if (direction == PF_IN) {
5127 PF_ACPY(&key.addr[0], pd->src, key.af);
5128 PF_ACPY(&key.addr[1], pd->dst, key.af);
5129 key.port[0] = key.port[1] = 0;
5131 PF_ACPY(&key.addr[1], pd->src, key.af);
5132 PF_ACPY(&key.addr[0], pd->dst, key.af);
5133 key.port[1] = key.port[0] = 0;
5136 STATE_LOOKUP(kif, &key, direction, *state, pd);
5138 if (direction == (*state)->direction) {
5139 src = &(*state)->src;
5140 dst = &(*state)->dst;
5142 src = &(*state)->dst;
5143 dst = &(*state)->src;
5147 if (src->state < PFOTHERS_SINGLE)
5148 src->state = PFOTHERS_SINGLE;
5149 if (dst->state == PFOTHERS_SINGLE)
5150 dst->state = PFOTHERS_MULTIPLE;
5152 /* update expire time */
5153 (*state)->expire = time_uptime;
5154 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5155 (*state)->timeout = PFTM_OTHER_MULTIPLE;
5157 (*state)->timeout = PFTM_OTHER_SINGLE;
5159 /* translate source/destination address, if necessary */
5160 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5161 struct pf_state_key *nk = (*state)->key[pd->didx];
5163 KASSERT(nk, ("%s: nk is null", __func__));
5164 KASSERT(pd, ("%s: pd is null", __func__));
5165 KASSERT(pd->src, ("%s: pd->src is null", __func__));
5166 KASSERT(pd->dst, ("%s: pd->dst is null", __func__));
5170 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5171 pf_change_a(&pd->src->v4.s_addr,
5173 nk->addr[pd->sidx].v4.s_addr,
5177 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5178 pf_change_a(&pd->dst->v4.s_addr,
5180 nk->addr[pd->didx].v4.s_addr,
5187 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5188 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5190 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5191 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5199 * ipoff and off are measured from the start of the mbuf chain.
5200 * h must be at "ipoff" on the mbuf chain.
5203 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5204 u_short *actionp, u_short *reasonp, sa_family_t af)
5209 struct ip *h = mtod(m, struct ip *);
5210 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
5214 ACTION_SET(actionp, PF_PASS);
5216 ACTION_SET(actionp, PF_DROP);
5217 REASON_SET(reasonp, PFRES_FRAG);
5221 if (m->m_pkthdr.len < off + len ||
5222 ntohs(h->ip_len) < off + len) {
5223 ACTION_SET(actionp, PF_DROP);
5224 REASON_SET(reasonp, PFRES_SHORT);
5232 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
5234 if (m->m_pkthdr.len < off + len ||
5235 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5236 (unsigned)(off + len)) {
5237 ACTION_SET(actionp, PF_DROP);
5238 REASON_SET(reasonp, PFRES_SHORT);
5245 m_copydata(m, off, len, p);
5251 pf_routable_oldmpath(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5254 struct radix_node_head *rnh;
5255 struct sockaddr_in *dst;
5259 struct sockaddr_in6 *dst6;
5260 struct route_in6 ro;
5264 struct radix_node *rn;
5269 /* XXX: stick to table 0 for now */
5270 rnh = rt_tables_get_rnh(0, af);
5271 if (rnh != NULL && rn_mpath_capable(rnh))
5273 bzero(&ro, sizeof(ro));
5276 dst = satosin(&ro.ro_dst);
5277 dst->sin_family = AF_INET;
5278 dst->sin_len = sizeof(*dst);
5279 dst->sin_addr = addr->v4;
5284 * Skip check for addresses with embedded interface scope,
5285 * as they would always match anyway.
5287 if (IN6_IS_SCOPE_EMBED(&addr->v6))
5289 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5290 dst6->sin6_family = AF_INET6;
5291 dst6->sin6_len = sizeof(*dst6);
5292 dst6->sin6_addr = addr->v6;
5299 /* Skip checks for ipsec interfaces */
5300 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5306 in6_rtalloc_ign(&ro, 0, rtableid);
5311 in_rtalloc_ign((struct route *)&ro, 0, rtableid);
5316 if (ro.ro_rt != NULL) {
5317 /* No interface given, this is a no-route check */
5321 if (kif->pfik_ifp == NULL) {
5326 /* Perform uRPF check if passed input interface */
5328 rn = (struct radix_node *)ro.ro_rt;
5330 rt = (struct rtentry *)rn;
5333 if (kif->pfik_ifp == ifp)
5335 rn = rn_mpath_next(rn);
5336 } while (check_mpath == 1 && rn != NULL && ret == 0);
5340 if (ro.ro_rt != NULL)
5347 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5351 struct nhop4_basic nh4;
5354 struct nhop6_basic nh6;
5358 struct radix_node_head *rnh;
5360 /* XXX: stick to table 0 for now */
5361 rnh = rt_tables_get_rnh(0, af);
5362 if (rnh != NULL && rn_mpath_capable(rnh))
5363 return (pf_routable_oldmpath(addr, af, kif, rtableid));
5366 * Skip check for addresses with embedded interface scope,
5367 * as they would always match anyway.
5369 if (af == AF_INET6 && IN6_IS_SCOPE_EMBED(&addr->v6))
5372 if (af != AF_INET && af != AF_INET6)
5375 /* Skip checks for ipsec interfaces */
5376 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5384 if (fib6_lookup_nh_basic(rtableid, &addr->v6, 0, 0, 0, &nh6)!=0)
5391 if (fib4_lookup_nh_basic(rtableid, addr->v4, 0, 0, &nh4) != 0)
5398 /* No interface given, this is a no-route check */
5402 if (kif->pfik_ifp == NULL)
5405 /* Perform uRPF check if passed input interface */
5406 if (kif->pfik_ifp == ifp)
5413 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5414 struct pf_state *s, struct pf_pdesc *pd)
5416 struct mbuf *m0, *m1;
5417 struct sockaddr_in dst;
5419 struct ifnet *ifp = NULL;
5420 struct pf_addr naddr;
5421 struct pf_src_node *sn = NULL;
5423 uint16_t ip_len, ip_off;
5425 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5426 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5429 if ((pd->pf_mtag == NULL &&
5430 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5431 pd->pf_mtag->routed++ > 3) {
5437 if (r->rt == PF_DUPTO) {
5438 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5444 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5452 ip = mtod(m0, struct ip *);
5454 bzero(&dst, sizeof(dst));
5455 dst.sin_family = AF_INET;
5456 dst.sin_len = sizeof(dst);
5457 dst.sin_addr = ip->ip_dst;
5459 if (TAILQ_EMPTY(&r->rpool.list)) {
5460 DPFPRINTF(PF_DEBUG_URGENT,
5461 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5465 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5467 if (!PF_AZERO(&naddr, AF_INET))
5468 dst.sin_addr.s_addr = naddr.v4.s_addr;
5469 ifp = r->rpool.cur->kif ?
5470 r->rpool.cur->kif->pfik_ifp : NULL;
5472 if (!PF_AZERO(&s->rt_addr, AF_INET))
5473 dst.sin_addr.s_addr =
5474 s->rt_addr.v4.s_addr;
5475 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5482 if (pf_test(PF_OUT, ifp, &m0, NULL) != PF_PASS)
5484 else if (m0 == NULL)
5486 if (m0->m_len < sizeof(struct ip)) {
5487 DPFPRINTF(PF_DEBUG_URGENT,
5488 ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
5491 ip = mtod(m0, struct ip *);
5494 if (ifp->if_flags & IFF_LOOPBACK)
5495 m0->m_flags |= M_SKIP_FIREWALL;
5497 ip_len = ntohs(ip->ip_len);
5498 ip_off = ntohs(ip->ip_off);
5500 /* Copied from FreeBSD 10.0-CURRENT ip_output. */
5501 m0->m_pkthdr.csum_flags |= CSUM_IP;
5502 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
5503 in_delayed_cksum(m0);
5504 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
5507 if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
5508 sctp_delayed_cksum(m, (uint32_t)(ip->ip_hl << 2));
5509 m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
5514 * If small enough for interface, or the interface will take
5515 * care of the fragmentation for us, we can just send directly.
5517 if (ip_len <= ifp->if_mtu ||
5518 (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0) {
5520 if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
5521 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
5522 m0->m_pkthdr.csum_flags &= ~CSUM_IP;
5524 m_clrprotoflags(m0); /* Avoid confusing lower layers. */
5525 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5529 /* Balk when DF bit is set or the interface didn't support TSO. */
5530 if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5532 KMOD_IPSTAT_INC(ips_cantfrag);
5533 if (r->rt != PF_DUPTO) {
5534 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
5541 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
5545 for (; m0; m0 = m1) {
5547 m0->m_nextpkt = NULL;
5549 m_clrprotoflags(m0);
5550 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5556 KMOD_IPSTAT_INC(ips_fragmented);
5559 if (r->rt != PF_DUPTO)
5574 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5575 struct pf_state *s, struct pf_pdesc *pd)
5578 struct sockaddr_in6 dst;
5579 struct ip6_hdr *ip6;
5580 struct ifnet *ifp = NULL;
5581 struct pf_addr naddr;
5582 struct pf_src_node *sn = NULL;
5584 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5585 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5588 if ((pd->pf_mtag == NULL &&
5589 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5590 pd->pf_mtag->routed++ > 3) {
5596 if (r->rt == PF_DUPTO) {
5597 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5603 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5611 ip6 = mtod(m0, struct ip6_hdr *);
5613 bzero(&dst, sizeof(dst));
5614 dst.sin6_family = AF_INET6;
5615 dst.sin6_len = sizeof(dst);
5616 dst.sin6_addr = ip6->ip6_dst;
5618 if (TAILQ_EMPTY(&r->rpool.list)) {
5619 DPFPRINTF(PF_DEBUG_URGENT,
5620 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5624 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
5626 if (!PF_AZERO(&naddr, AF_INET6))
5627 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5629 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
5631 if (!PF_AZERO(&s->rt_addr, AF_INET6))
5632 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5633 &s->rt_addr, AF_INET6);
5634 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5644 if (pf_test6(PF_FWD, ifp, &m0, NULL) != PF_PASS)
5646 else if (m0 == NULL)
5648 if (m0->m_len < sizeof(struct ip6_hdr)) {
5649 DPFPRINTF(PF_DEBUG_URGENT,
5650 ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
5654 ip6 = mtod(m0, struct ip6_hdr *);
5657 if (ifp->if_flags & IFF_LOOPBACK)
5658 m0->m_flags |= M_SKIP_FIREWALL;
5660 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 &
5661 ~ifp->if_hwassist) {
5662 uint32_t plen = m0->m_pkthdr.len - sizeof(*ip6);
5663 in6_delayed_cksum(m0, plen, sizeof(struct ip6_hdr));
5664 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6;
5668 * If the packet is too large for the outgoing interface,
5669 * send back an icmp6 error.
5671 if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
5672 dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
5673 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu)
5674 nd6_output_ifp(ifp, ifp, m0, &dst, NULL);
5676 in6_ifstat_inc(ifp, ifs6_in_toobig);
5677 if (r->rt != PF_DUPTO)
5678 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
5684 if (r->rt != PF_DUPTO)
5698 * FreeBSD supports cksum offloads for the following drivers.
5699 * em(4), fxp(4), ixgb(4), lge(4), ndis(4), nge(4), re(4),
5700 * ti(4), txp(4), xl(4)
5702 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
5703 * network driver performed cksum including pseudo header, need to verify
5706 * network driver performed cksum, needs to additional pseudo header
5707 * cksum computation with partial csum_data(i.e. lack of H/W support for
5708 * pseudo header, for instance hme(4), sk(4) and possibly gem(4))
5710 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
5711 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
5713 * Also, set csum_data to 0xffff to force cksum validation.
5716 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
5722 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
5724 if (m->m_pkthdr.len < off + len)
5729 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5730 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5731 sum = m->m_pkthdr.csum_data;
5733 ip = mtod(m, struct ip *);
5734 sum = in_pseudo(ip->ip_src.s_addr,
5735 ip->ip_dst.s_addr, htonl((u_short)len +
5736 m->m_pkthdr.csum_data + IPPROTO_TCP));
5743 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5744 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5745 sum = m->m_pkthdr.csum_data;
5747 ip = mtod(m, struct ip *);
5748 sum = in_pseudo(ip->ip_src.s_addr,
5749 ip->ip_dst.s_addr, htonl((u_short)len +
5750 m->m_pkthdr.csum_data + IPPROTO_UDP));
5758 case IPPROTO_ICMPV6:
5768 if (p == IPPROTO_ICMP) {
5773 sum = in_cksum(m, len);
5777 if (m->m_len < sizeof(struct ip))
5779 sum = in4_cksum(m, p, off, len);
5784 if (m->m_len < sizeof(struct ip6_hdr))
5786 sum = in6_cksum(m, p, off, len);
5797 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
5802 KMOD_UDPSTAT_INC(udps_badsum);
5808 KMOD_ICMPSTAT_INC(icps_checksum);
5813 case IPPROTO_ICMPV6:
5815 KMOD_ICMP6STAT_INC(icp6s_checksum);
5822 if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
5823 m->m_pkthdr.csum_flags |=
5824 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
5825 m->m_pkthdr.csum_data = 0xffff;
5834 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
5836 struct pfi_kif *kif;
5837 u_short action, reason = 0, log = 0;
5838 struct mbuf *m = *m0;
5839 struct ip *h = NULL;
5840 struct m_tag *ipfwtag;
5841 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
5842 struct pf_state *s = NULL;
5843 struct pf_ruleset *ruleset = NULL;
5845 int off, dirndx, pqid = 0;
5849 if (!V_pf_status.running)
5852 memset(&pd, 0, sizeof(pd));
5854 kif = (struct pfi_kif *)ifp->if_pf_kif;
5857 DPFPRINTF(PF_DEBUG_URGENT,
5858 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
5861 if (kif->pfik_flags & PFI_IFLAG_SKIP)
5864 if (m->m_flags & M_SKIP_FIREWALL)
5867 pd.pf_mtag = pf_find_mtag(m);
5871 if (ip_divert_ptr != NULL &&
5872 ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
5873 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
5874 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
5875 if (pd.pf_mtag == NULL &&
5876 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5880 pd.pf_mtag->flags |= PF_PACKET_LOOPED;
5881 m_tag_delete(m, ipfwtag);
5883 if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
5884 m->m_flags |= M_FASTFWD_OURS;
5885 pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
5887 } else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
5888 /* We do IP header normalization and packet reassembly here */
5892 m = *m0; /* pf_normalize messes with m0 */
5893 h = mtod(m, struct ip *);
5895 off = h->ip_hl << 2;
5896 if (off < (int)sizeof(struct ip)) {
5898 REASON_SET(&reason, PFRES_SHORT);
5903 pd.src = (struct pf_addr *)&h->ip_src;
5904 pd.dst = (struct pf_addr *)&h->ip_dst;
5905 pd.sport = pd.dport = NULL;
5906 pd.ip_sum = &h->ip_sum;
5907 pd.proto_sum = NULL;
5910 pd.sidx = (dir == PF_IN) ? 0 : 1;
5911 pd.didx = (dir == PF_IN) ? 1 : 0;
5913 pd.tos = h->ip_tos & ~IPTOS_ECN_MASK;
5914 pd.tot_len = ntohs(h->ip_len);
5916 /* handle fragments that didn't get reassembled by normalization */
5917 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
5918 action = pf_test_fragment(&r, dir, kif, m, h,
5929 if (!pf_pull_hdr(m, off, &th, sizeof(th),
5930 &action, &reason, AF_INET)) {
5931 log = action != PF_PASS;
5934 pd.p_len = pd.tot_len - off - (th.th_off << 2);
5935 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
5937 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
5938 if (action == PF_DROP)
5940 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
5942 if (action == PF_PASS) {
5943 if (pfsync_update_state_ptr != NULL)
5944 pfsync_update_state_ptr(s);
5948 } else if (s == NULL)
5949 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5958 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
5959 &action, &reason, AF_INET)) {
5960 log = action != PF_PASS;
5963 if (uh.uh_dport == 0 ||
5964 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
5965 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
5967 REASON_SET(&reason, PFRES_SHORT);
5970 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
5971 if (action == PF_PASS) {
5972 if (pfsync_update_state_ptr != NULL)
5973 pfsync_update_state_ptr(s);
5977 } else if (s == NULL)
5978 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5983 case IPPROTO_ICMP: {
5987 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
5988 &action, &reason, AF_INET)) {
5989 log = action != PF_PASS;
5992 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
5994 if (action == PF_PASS) {
5995 if (pfsync_update_state_ptr != NULL)
5996 pfsync_update_state_ptr(s);
6000 } else if (s == NULL)
6001 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6007 case IPPROTO_ICMPV6: {
6009 DPFPRINTF(PF_DEBUG_MISC,
6010 ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
6016 action = pf_test_state_other(&s, dir, kif, m, &pd);
6017 if (action == PF_PASS) {
6018 if (pfsync_update_state_ptr != NULL)
6019 pfsync_update_state_ptr(s);
6023 } else if (s == NULL)
6024 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6031 if (action == PF_PASS && h->ip_hl > 5 &&
6032 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6034 REASON_SET(&reason, PFRES_IPOPTIONS);
6036 DPFPRINTF(PF_DEBUG_MISC,
6037 ("pf: dropping packet with ip options\n"));
6040 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6042 REASON_SET(&reason, PFRES_MEMORY);
6044 if (r->rtableid >= 0)
6045 M_SETFIB(m, r->rtableid);
6047 if (r->scrub_flags & PFSTATE_SETPRIO) {
6048 if (pd.tos & IPTOS_LOWDELAY)
6050 if (pf_ieee8021q_setpcp(m, r->set_prio[pqid])) {
6052 REASON_SET(&reason, PFRES_MEMORY);
6054 DPFPRINTF(PF_DEBUG_MISC,
6055 ("pf: failed to allocate 802.1q mtag\n"));
6060 if (action == PF_PASS && r->qid) {
6061 if (pd.pf_mtag == NULL &&
6062 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6064 REASON_SET(&reason, PFRES_MEMORY);
6067 pd.pf_mtag->qid_hash = pf_state_hash(s);
6068 if (pqid || (pd.tos & IPTOS_LOWDELAY))
6069 pd.pf_mtag->qid = r->pqid;
6071 pd.pf_mtag->qid = r->qid;
6072 /* Add hints for ecn. */
6073 pd.pf_mtag->hdr = h;
6080 * connections redirected to loopback should not match sockets
6081 * bound specifically to loopback due to security implications,
6082 * see tcp_input() and in_pcblookup_listen().
6084 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6085 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6086 (s->nat_rule.ptr->action == PF_RDR ||
6087 s->nat_rule.ptr->action == PF_BINAT) &&
6088 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
6089 m->m_flags |= M_SKIP_FIREWALL;
6091 if (action == PF_PASS && r->divert.port && ip_divert_ptr != NULL &&
6092 !PACKET_LOOPED(&pd)) {
6094 ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
6095 sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
6096 if (ipfwtag != NULL) {
6097 ((struct ipfw_rule_ref *)(ipfwtag+1))->info =
6098 ntohs(r->divert.port);
6099 ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
6104 m_tag_prepend(m, ipfwtag);
6105 if (m->m_flags & M_FASTFWD_OURS) {
6106 if (pd.pf_mtag == NULL &&
6107 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6109 REASON_SET(&reason, PFRES_MEMORY);
6111 DPFPRINTF(PF_DEBUG_MISC,
6112 ("pf: failed to allocate tag\n"));
6114 pd.pf_mtag->flags |=
6115 PF_FASTFWD_OURS_PRESENT;
6116 m->m_flags &= ~M_FASTFWD_OURS;
6119 ip_divert_ptr(*m0, dir == PF_IN ? DIR_IN : DIR_OUT);
6124 /* XXX: ipfw has the same behaviour! */
6126 REASON_SET(&reason, PFRES_MEMORY);
6128 DPFPRINTF(PF_DEBUG_MISC,
6129 ("pf: failed to allocate divert tag\n"));
6136 if (s != NULL && s->nat_rule.ptr != NULL &&
6137 s->nat_rule.ptr->log & PF_LOG_ALL)
6138 lr = s->nat_rule.ptr;
6141 PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd,
6145 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6146 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
6148 if (action == PF_PASS || r->action == PF_DROP) {
6149 dirndx = (dir == PF_OUT);
6150 r->packets[dirndx]++;
6151 r->bytes[dirndx] += pd.tot_len;
6153 a->packets[dirndx]++;
6154 a->bytes[dirndx] += pd.tot_len;
6157 if (s->nat_rule.ptr != NULL) {
6158 s->nat_rule.ptr->packets[dirndx]++;
6159 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6161 if (s->src_node != NULL) {
6162 s->src_node->packets[dirndx]++;
6163 s->src_node->bytes[dirndx] += pd.tot_len;
6165 if (s->nat_src_node != NULL) {
6166 s->nat_src_node->packets[dirndx]++;
6167 s->nat_src_node->bytes[dirndx] += pd.tot_len;
6169 dirndx = (dir == s->direction) ? 0 : 1;
6170 s->packets[dirndx]++;
6171 s->bytes[dirndx] += pd.tot_len;
6174 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6175 if (nr != NULL && r == &V_pf_default_rule)
6177 if (tr->src.addr.type == PF_ADDR_TABLE)
6178 pfr_update_stats(tr->src.addr.p.tbl,
6179 (s == NULL) ? pd.src :
6180 &s->key[(s->direction == PF_IN)]->
6181 addr[(s->direction == PF_OUT)],
6182 pd.af, pd.tot_len, dir == PF_OUT,
6183 r->action == PF_PASS, tr->src.neg);
6184 if (tr->dst.addr.type == PF_ADDR_TABLE)
6185 pfr_update_stats(tr->dst.addr.p.tbl,
6186 (s == NULL) ? pd.dst :
6187 &s->key[(s->direction == PF_IN)]->
6188 addr[(s->direction == PF_IN)],
6189 pd.af, pd.tot_len, dir == PF_OUT,
6190 r->action == PF_PASS, tr->dst.neg);
6194 case PF_SYNPROXY_DROP:
6205 /* pf_route() returns unlocked. */
6207 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
6221 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
6223 struct pfi_kif *kif;
6224 u_short action, reason = 0, log = 0;
6225 struct mbuf *m = *m0, *n = NULL;
6227 struct ip6_hdr *h = NULL;
6228 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6229 struct pf_state *s = NULL;
6230 struct pf_ruleset *ruleset = NULL;
6232 int off, terminal = 0, dirndx, rh_cnt = 0, pqid = 0;
6237 /* Detect packet forwarding.
6238 * If the input interface is different from the output interface we're
6240 * We do need to be careful about bridges. If the
6241 * net.link.bridge.pfil_bridge sysctl is set we can be filtering on a
6242 * bridge, so if the input interface is a bridge member and the output
6243 * interface is its bridge or a member of the same bridge we're not
6244 * actually forwarding but bridging.
6246 if (dir == PF_OUT && m->m_pkthdr.rcvif && ifp != m->m_pkthdr.rcvif &&
6247 (m->m_pkthdr.rcvif->if_bridge == NULL ||
6248 (m->m_pkthdr.rcvif->if_bridge != ifp->if_softc &&
6249 m->m_pkthdr.rcvif->if_bridge != ifp->if_bridge)))
6255 if (!V_pf_status.running)
6258 memset(&pd, 0, sizeof(pd));
6259 pd.pf_mtag = pf_find_mtag(m);
6261 if (pd.pf_mtag && pd.pf_mtag->flags & PF_TAG_GENERATED)
6264 kif = (struct pfi_kif *)ifp->if_pf_kif;
6266 DPFPRINTF(PF_DEBUG_URGENT,
6267 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
6270 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6273 if (m->m_flags & M_SKIP_FIREWALL)
6278 /* We do IP header normalization and packet reassembly here */
6279 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
6283 m = *m0; /* pf_normalize messes with m0 */
6284 h = mtod(m, struct ip6_hdr *);
6288 * we do not support jumbogram yet. if we keep going, zero ip6_plen
6289 * will do something bad, so drop the packet for now.
6291 if (htons(h->ip6_plen) == 0) {
6293 REASON_SET(&reason, PFRES_NORM); /*XXX*/
6298 pd.src = (struct pf_addr *)&h->ip6_src;
6299 pd.dst = (struct pf_addr *)&h->ip6_dst;
6300 pd.sport = pd.dport = NULL;
6302 pd.proto_sum = NULL;
6304 pd.sidx = (dir == PF_IN) ? 0 : 1;
6305 pd.didx = (dir == PF_IN) ? 1 : 0;
6308 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6310 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
6311 pd.proto = h->ip6_nxt;
6314 case IPPROTO_FRAGMENT:
6315 action = pf_test_fragment(&r, dir, kif, m, h,
6317 if (action == PF_DROP)
6318 REASON_SET(&reason, PFRES_FRAG);
6320 case IPPROTO_ROUTING: {
6321 struct ip6_rthdr rthdr;
6324 DPFPRINTF(PF_DEBUG_MISC,
6325 ("pf: IPv6 more than one rthdr\n"));
6327 REASON_SET(&reason, PFRES_IPOPTIONS);
6331 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
6333 DPFPRINTF(PF_DEBUG_MISC,
6334 ("pf: IPv6 short rthdr\n"));
6336 REASON_SET(&reason, PFRES_SHORT);
6340 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
6341 DPFPRINTF(PF_DEBUG_MISC,
6342 ("pf: IPv6 rthdr0\n"));
6344 REASON_SET(&reason, PFRES_IPOPTIONS);
6351 case IPPROTO_HOPOPTS:
6352 case IPPROTO_DSTOPTS: {
6353 /* get next header and header length */
6354 struct ip6_ext opt6;
6356 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
6357 NULL, &reason, pd.af)) {
6358 DPFPRINTF(PF_DEBUG_MISC,
6359 ("pf: IPv6 short opt\n"));
6364 if (pd.proto == IPPROTO_AH)
6365 off += (opt6.ip6e_len + 2) * 4;
6367 off += (opt6.ip6e_len + 1) * 8;
6368 pd.proto = opt6.ip6e_nxt;
6369 /* goto the next header */
6376 } while (!terminal);
6378 /* if there's no routing header, use unmodified mbuf for checksumming */
6388 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6389 &action, &reason, AF_INET6)) {
6390 log = action != PF_PASS;
6393 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6394 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6395 if (action == PF_DROP)
6397 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6399 if (action == PF_PASS) {
6400 if (pfsync_update_state_ptr != NULL)
6401 pfsync_update_state_ptr(s);
6405 } else if (s == NULL)
6406 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6415 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6416 &action, &reason, AF_INET6)) {
6417 log = action != PF_PASS;
6420 if (uh.uh_dport == 0 ||
6421 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6422 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6424 REASON_SET(&reason, PFRES_SHORT);
6427 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6428 if (action == PF_PASS) {
6429 if (pfsync_update_state_ptr != NULL)
6430 pfsync_update_state_ptr(s);
6434 } else if (s == NULL)
6435 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6440 case IPPROTO_ICMP: {
6442 DPFPRINTF(PF_DEBUG_MISC,
6443 ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
6447 case IPPROTO_ICMPV6: {
6448 struct icmp6_hdr ih;
6451 if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
6452 &action, &reason, AF_INET6)) {
6453 log = action != PF_PASS;
6456 action = pf_test_state_icmp(&s, dir, kif,
6457 m, off, h, &pd, &reason);
6458 if (action == PF_PASS) {
6459 if (pfsync_update_state_ptr != NULL)
6460 pfsync_update_state_ptr(s);
6464 } else if (s == NULL)
6465 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6471 action = pf_test_state_other(&s, dir, kif, m, &pd);
6472 if (action == PF_PASS) {
6473 if (pfsync_update_state_ptr != NULL)
6474 pfsync_update_state_ptr(s);
6478 } else if (s == NULL)
6479 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6491 /* handle dangerous IPv6 extension headers. */
6492 if (action == PF_PASS && rh_cnt &&
6493 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6495 REASON_SET(&reason, PFRES_IPOPTIONS);
6497 DPFPRINTF(PF_DEBUG_MISC,
6498 ("pf: dropping packet with dangerous v6 headers\n"));
6501 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6503 REASON_SET(&reason, PFRES_MEMORY);
6505 if (r->rtableid >= 0)
6506 M_SETFIB(m, r->rtableid);
6508 if (r->scrub_flags & PFSTATE_SETPRIO) {
6509 if (pd.tos & IPTOS_LOWDELAY)
6511 if (pf_ieee8021q_setpcp(m, r->set_prio[pqid])) {
6513 REASON_SET(&reason, PFRES_MEMORY);
6515 DPFPRINTF(PF_DEBUG_MISC,
6516 ("pf: failed to allocate 802.1q mtag\n"));
6521 if (action == PF_PASS && r->qid) {
6522 if (pd.pf_mtag == NULL &&
6523 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6525 REASON_SET(&reason, PFRES_MEMORY);
6528 pd.pf_mtag->qid_hash = pf_state_hash(s);
6529 if (pd.tos & IPTOS_LOWDELAY)
6530 pd.pf_mtag->qid = r->pqid;
6532 pd.pf_mtag->qid = r->qid;
6533 /* Add hints for ecn. */
6534 pd.pf_mtag->hdr = h;
6539 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6540 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6541 (s->nat_rule.ptr->action == PF_RDR ||
6542 s->nat_rule.ptr->action == PF_BINAT) &&
6543 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
6544 m->m_flags |= M_SKIP_FIREWALL;
6546 /* XXX: Anybody working on it?! */
6548 printf("pf: divert(9) is not supported for IPv6\n");
6553 if (s != NULL && s->nat_rule.ptr != NULL &&
6554 s->nat_rule.ptr->log & PF_LOG_ALL)
6555 lr = s->nat_rule.ptr;
6558 PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset,
6562 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6563 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
6565 if (action == PF_PASS || r->action == PF_DROP) {
6566 dirndx = (dir == PF_OUT);
6567 r->packets[dirndx]++;
6568 r->bytes[dirndx] += pd.tot_len;
6570 a->packets[dirndx]++;
6571 a->bytes[dirndx] += pd.tot_len;
6574 if (s->nat_rule.ptr != NULL) {
6575 s->nat_rule.ptr->packets[dirndx]++;
6576 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6578 if (s->src_node != NULL) {
6579 s->src_node->packets[dirndx]++;
6580 s->src_node->bytes[dirndx] += pd.tot_len;
6582 if (s->nat_src_node != NULL) {
6583 s->nat_src_node->packets[dirndx]++;
6584 s->nat_src_node->bytes[dirndx] += pd.tot_len;
6586 dirndx = (dir == s->direction) ? 0 : 1;
6587 s->packets[dirndx]++;
6588 s->bytes[dirndx] += pd.tot_len;
6591 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6592 if (nr != NULL && r == &V_pf_default_rule)
6594 if (tr->src.addr.type == PF_ADDR_TABLE)
6595 pfr_update_stats(tr->src.addr.p.tbl,
6596 (s == NULL) ? pd.src :
6597 &s->key[(s->direction == PF_IN)]->addr[0],
6598 pd.af, pd.tot_len, dir == PF_OUT,
6599 r->action == PF_PASS, tr->src.neg);
6600 if (tr->dst.addr.type == PF_ADDR_TABLE)
6601 pfr_update_stats(tr->dst.addr.p.tbl,
6602 (s == NULL) ? pd.dst :
6603 &s->key[(s->direction == PF_IN)]->addr[1],
6604 pd.af, pd.tot_len, dir == PF_OUT,
6605 r->action == PF_PASS, tr->dst.neg);
6609 case PF_SYNPROXY_DROP:
6620 /* pf_route6() returns unlocked. */
6622 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
6631 /* If reassembled packet passed, create new fragments. */
6632 if (action == PF_PASS && *m0 && fwdir == PF_FWD &&
6633 (mtag = m_tag_find(m, PF_REASSEMBLED, NULL)) != NULL)
6634 action = pf_refragment6(ifp, m0, mtag);