2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002 - 2008 Henning Brauer
6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
37 * $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
45 #include "opt_inet6.h"
49 #include <sys/param.h>
51 #include <sys/endian.h>
53 #include <sys/interrupt.h>
54 #include <sys/kernel.h>
55 #include <sys/kthread.h>
56 #include <sys/limits.h>
59 #include <sys/random.h>
60 #include <sys/refcount.h>
61 #include <sys/socket.h>
62 #include <sys/sysctl.h>
63 #include <sys/taskqueue.h>
64 #include <sys/ucred.h>
67 #include <net/if_var.h>
68 #include <net/if_types.h>
69 #include <net/if_vlan_var.h>
70 #include <net/route.h>
71 #include <net/radix_mpath.h>
75 #include <net/pfvar.h>
76 #include <net/if_pflog.h>
77 #include <net/if_pfsync.h>
79 #include <netinet/in_pcb.h>
80 #include <netinet/in_var.h>
81 #include <netinet/in_fib.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_fw.h>
84 #include <netinet/ip_icmp.h>
85 #include <netinet/icmp_var.h>
86 #include <netinet/ip_var.h>
87 #include <netinet/tcp.h>
88 #include <netinet/tcp_fsm.h>
89 #include <netinet/tcp_seq.h>
90 #include <netinet/tcp_timer.h>
91 #include <netinet/tcp_var.h>
92 #include <netinet/udp.h>
93 #include <netinet/udp_var.h>
95 #include <netpfil/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */
98 #include <netinet/ip6.h>
99 #include <netinet/icmp6.h>
100 #include <netinet6/nd6.h>
101 #include <netinet6/ip6_var.h>
102 #include <netinet6/in6_pcb.h>
103 #include <netinet6/in6_fib.h>
104 #include <netinet6/scope6_var.h>
107 #if defined(SCTP) || defined(SCTP_SUPPORT)
108 #include <netinet/sctp_crc32.h>
111 #include <machine/in_cksum.h>
112 #include <security/mac/mac_framework.h>
114 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
121 VNET_DEFINE(struct pf_altqqueue, pf_altqs[4]);
122 VNET_DEFINE(struct pf_palist, pf_pabuf);
123 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active);
124 VNET_DEFINE(struct pf_altqqueue *, pf_altq_ifs_active);
125 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive);
126 VNET_DEFINE(struct pf_altqqueue *, pf_altq_ifs_inactive);
127 VNET_DEFINE(struct pf_kstatus, pf_status);
129 VNET_DEFINE(u_int32_t, ticket_altqs_active);
130 VNET_DEFINE(u_int32_t, ticket_altqs_inactive);
131 VNET_DEFINE(int, altqs_inactive_open);
132 VNET_DEFINE(u_int32_t, ticket_pabuf);
134 VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx);
135 #define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx)
136 VNET_DEFINE(u_char, pf_tcp_secret[16]);
137 #define V_pf_tcp_secret VNET(pf_tcp_secret)
138 VNET_DEFINE(int, pf_tcp_secret_init);
139 #define V_pf_tcp_secret_init VNET(pf_tcp_secret_init)
140 VNET_DEFINE(int, pf_tcp_iss_off);
141 #define V_pf_tcp_iss_off VNET(pf_tcp_iss_off)
142 VNET_DECLARE(int, pf_vnet_active);
143 #define V_pf_vnet_active VNET(pf_vnet_active)
145 VNET_DEFINE_STATIC(uint32_t, pf_purge_idx);
146 #define V_pf_purge_idx VNET(pf_purge_idx)
149 * Queue for pf_intr() sends.
151 static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
152 struct pf_send_entry {
153 STAILQ_ENTRY(pf_send_entry) pfse_next;
168 STAILQ_HEAD(pf_send_head, pf_send_entry);
169 VNET_DEFINE_STATIC(struct pf_send_head, pf_sendqueue);
170 #define V_pf_sendqueue VNET(pf_sendqueue)
172 static struct mtx pf_sendqueue_mtx;
173 MTX_SYSINIT(pf_sendqueue_mtx, &pf_sendqueue_mtx, "pf send queue", MTX_DEF);
174 #define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx)
175 #define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx)
178 * Queue for pf_overload_task() tasks.
180 struct pf_overload_entry {
181 SLIST_ENTRY(pf_overload_entry) next;
185 struct pf_rule *rule;
188 SLIST_HEAD(pf_overload_head, pf_overload_entry);
189 VNET_DEFINE_STATIC(struct pf_overload_head, pf_overloadqueue);
190 #define V_pf_overloadqueue VNET(pf_overloadqueue)
191 VNET_DEFINE_STATIC(struct task, pf_overloadtask);
192 #define V_pf_overloadtask VNET(pf_overloadtask)
194 static struct mtx pf_overloadqueue_mtx;
195 MTX_SYSINIT(pf_overloadqueue_mtx, &pf_overloadqueue_mtx,
196 "pf overload/flush queue", MTX_DEF);
197 #define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx)
198 #define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx)
200 VNET_DEFINE(struct pf_rulequeue, pf_unlinked_rules);
201 struct mtx pf_unlnkdrules_mtx;
202 MTX_SYSINIT(pf_unlnkdrules_mtx, &pf_unlnkdrules_mtx, "pf unlinked rules",
205 VNET_DEFINE_STATIC(uma_zone_t, pf_sources_z);
206 #define V_pf_sources_z VNET(pf_sources_z)
207 uma_zone_t pf_mtag_z;
208 VNET_DEFINE(uma_zone_t, pf_state_z);
209 VNET_DEFINE(uma_zone_t, pf_state_key_z);
211 VNET_DEFINE(uint64_t, pf_stateid[MAXCPU]);
212 #define PFID_CPUBITS 8
213 #define PFID_CPUSHIFT (sizeof(uint64_t) * NBBY - PFID_CPUBITS)
214 #define PFID_CPUMASK ((uint64_t)((1 << PFID_CPUBITS) - 1) << PFID_CPUSHIFT)
215 #define PFID_MAXID (~PFID_CPUMASK)
216 CTASSERT((1 << PFID_CPUBITS) >= MAXCPU);
218 static void pf_src_tree_remove_state(struct pf_state *);
219 static void pf_init_threshold(struct pf_threshold *, u_int32_t,
221 static void pf_add_threshold(struct pf_threshold *);
222 static int pf_check_threshold(struct pf_threshold *);
224 static void pf_change_ap(struct mbuf *, struct pf_addr *, u_int16_t *,
225 u_int16_t *, u_int16_t *, struct pf_addr *,
226 u_int16_t, u_int8_t, sa_family_t);
227 static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
228 struct tcphdr *, struct pf_state_peer *);
229 static void pf_change_icmp(struct pf_addr *, u_int16_t *,
230 struct pf_addr *, struct pf_addr *, u_int16_t,
231 u_int16_t *, u_int16_t *, u_int16_t *,
232 u_int16_t *, u_int8_t, sa_family_t);
233 static void pf_send_tcp(struct mbuf *,
234 const struct pf_rule *, sa_family_t,
235 const struct pf_addr *, const struct pf_addr *,
236 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
237 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
238 u_int16_t, struct ifnet *);
239 static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
240 sa_family_t, struct pf_rule *);
241 static void pf_detach_state(struct pf_state *);
242 static int pf_state_key_attach(struct pf_state_key *,
243 struct pf_state_key *, struct pf_state *);
244 static void pf_state_key_detach(struct pf_state *, int);
245 static int pf_state_key_ctor(void *, int, void *, int);
246 static u_int32_t pf_tcp_iss(struct pf_pdesc *);
247 static int pf_test_rule(struct pf_rule **, struct pf_state **,
248 int, struct pfi_kif *, struct mbuf *, int,
249 struct pf_pdesc *, struct pf_rule **,
250 struct pf_ruleset **, struct inpcb *);
251 static int pf_create_state(struct pf_rule *, struct pf_rule *,
252 struct pf_rule *, struct pf_pdesc *,
253 struct pf_ksrc_node *, struct pf_state_key *,
254 struct pf_state_key *, struct mbuf *, int,
255 u_int16_t, u_int16_t, int *, struct pfi_kif *,
256 struct pf_state **, int, u_int16_t, u_int16_t,
258 static int pf_test_fragment(struct pf_rule **, int,
259 struct pfi_kif *, struct mbuf *, void *,
260 struct pf_pdesc *, struct pf_rule **,
261 struct pf_ruleset **);
262 static int pf_tcp_track_full(struct pf_state_peer *,
263 struct pf_state_peer *, struct pf_state **,
264 struct pfi_kif *, struct mbuf *, int,
265 struct pf_pdesc *, u_short *, int *);
266 static int pf_tcp_track_sloppy(struct pf_state_peer *,
267 struct pf_state_peer *, struct pf_state **,
268 struct pf_pdesc *, u_short *);
269 static int pf_test_state_tcp(struct pf_state **, int,
270 struct pfi_kif *, struct mbuf *, int,
271 void *, struct pf_pdesc *, u_short *);
272 static int pf_test_state_udp(struct pf_state **, int,
273 struct pfi_kif *, struct mbuf *, int,
274 void *, struct pf_pdesc *);
275 static int pf_test_state_icmp(struct pf_state **, int,
276 struct pfi_kif *, struct mbuf *, int,
277 void *, struct pf_pdesc *, u_short *);
278 static int pf_test_state_other(struct pf_state **, int,
279 struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
280 static u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
282 static u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
284 static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
286 static int pf_check_proto_cksum(struct mbuf *, int, int,
287 u_int8_t, sa_family_t);
288 static void pf_print_state_parts(struct pf_state *,
289 struct pf_state_key *, struct pf_state_key *);
290 static int pf_addr_wrap_neq(struct pf_addr_wrap *,
291 struct pf_addr_wrap *);
292 static void pf_patch_8(struct mbuf *, u_int16_t *, u_int8_t *, u_int8_t,
294 static struct pf_state *pf_find_state(struct pfi_kif *,
295 struct pf_state_key_cmp *, u_int);
296 static int pf_src_connlimit(struct pf_state **);
297 static void pf_overload_task(void *v, int pending);
298 static int pf_insert_src_node(struct pf_ksrc_node **,
299 struct pf_rule *, struct pf_addr *, sa_family_t);
300 static u_int pf_purge_expired_states(u_int, int);
301 static void pf_purge_unlinked_rules(void);
302 static int pf_mtag_uminit(void *, int, int);
303 static void pf_mtag_free(struct m_tag *);
305 static void pf_route(struct mbuf **, struct pf_rule *, int,
306 struct ifnet *, struct pf_state *,
307 struct pf_pdesc *, struct inpcb *);
310 static void pf_change_a6(struct pf_addr *, u_int16_t *,
311 struct pf_addr *, u_int8_t);
312 static void pf_route6(struct mbuf **, struct pf_rule *, int,
313 struct ifnet *, struct pf_state *,
314 struct pf_pdesc *, struct inpcb *);
317 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
319 extern int pf_end_threads;
320 extern struct proc *pf_purge_proc;
322 VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
324 #define PACKET_LOOPED(pd) ((pd)->pf_mtag && \
325 (pd)->pf_mtag->flags & PF_PACKET_LOOPED)
327 #define STATE_LOOKUP(i, k, d, s, pd) \
329 (s) = pf_find_state((i), (k), (d)); \
332 if (PACKET_LOOPED(pd)) \
334 if ((d) == PF_OUT && \
335 (((s)->rule.ptr->rt == PF_ROUTETO && \
336 (s)->rule.ptr->direction == PF_OUT) || \
337 ((s)->rule.ptr->rt == PF_REPLYTO && \
338 (s)->rule.ptr->direction == PF_IN)) && \
339 (s)->rt_kif != NULL && \
340 (s)->rt_kif != (i)) \
344 #define BOUND_IFACE(r, k) \
345 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
347 #define STATE_INC_COUNTERS(s) \
349 counter_u64_add(s->rule.ptr->states_cur, 1); \
350 counter_u64_add(s->rule.ptr->states_tot, 1); \
351 if (s->anchor.ptr != NULL) { \
352 counter_u64_add(s->anchor.ptr->states_cur, 1); \
353 counter_u64_add(s->anchor.ptr->states_tot, 1); \
355 if (s->nat_rule.ptr != NULL) { \
356 counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
357 counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
361 #define STATE_DEC_COUNTERS(s) \
363 if (s->nat_rule.ptr != NULL) \
364 counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
365 if (s->anchor.ptr != NULL) \
366 counter_u64_add(s->anchor.ptr->states_cur, -1); \
367 counter_u64_add(s->rule.ptr->states_cur, -1); \
370 MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
371 VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
372 VNET_DEFINE(struct pf_idhash *, pf_idhash);
373 VNET_DEFINE(struct pf_srchash *, pf_srchash);
375 SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW, 0, "pf(4)");
378 u_long pf_srchashmask;
379 static u_long pf_hashsize;
380 static u_long pf_srchashsize;
381 u_long pf_ioctl_maxcount = 65535;
383 SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
384 &pf_hashsize, 0, "Size of pf(4) states hashtable");
385 SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
386 &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable");
387 SYSCTL_ULONG(_net_pf, OID_AUTO, request_maxcount, CTLFLAG_RWTUN,
388 &pf_ioctl_maxcount, 0, "Maximum number of tables, addresses, ... in a single ioctl() call");
390 VNET_DEFINE(void *, pf_swi_cookie);
392 VNET_DEFINE(uint32_t, pf_hashseed);
393 #define V_pf_hashseed VNET(pf_hashseed)
396 pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
402 if (a->addr32[0] > b->addr32[0])
404 if (a->addr32[0] < b->addr32[0])
410 if (a->addr32[3] > b->addr32[3])
412 if (a->addr32[3] < b->addr32[3])
414 if (a->addr32[2] > b->addr32[2])
416 if (a->addr32[2] < b->addr32[2])
418 if (a->addr32[1] > b->addr32[1])
420 if (a->addr32[1] < b->addr32[1])
422 if (a->addr32[0] > b->addr32[0])
424 if (a->addr32[0] < b->addr32[0])
429 panic("%s: unknown address family %u", __func__, af);
434 static __inline uint32_t
435 pf_hashkey(struct pf_state_key *sk)
439 h = murmur3_32_hash32((uint32_t *)sk,
440 sizeof(struct pf_state_key_cmp)/sizeof(uint32_t),
443 return (h & pf_hashmask);
446 static __inline uint32_t
447 pf_hashsrc(struct pf_addr *addr, sa_family_t af)
453 h = murmur3_32_hash32((uint32_t *)&addr->v4,
454 sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed);
457 h = murmur3_32_hash32((uint32_t *)&addr->v6,
458 sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed);
461 panic("%s: unknown address family %u", __func__, af);
464 return (h & pf_srchashmask);
469 pf_state_hash(struct pf_state *s)
471 u_int32_t hv = (intptr_t)s / sizeof(*s);
473 hv ^= crc32(&s->src, sizeof(s->src));
474 hv ^= crc32(&s->dst, sizeof(s->dst));
483 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
488 dst->addr32[0] = src->addr32[0];
492 dst->addr32[0] = src->addr32[0];
493 dst->addr32[1] = src->addr32[1];
494 dst->addr32[2] = src->addr32[2];
495 dst->addr32[3] = src->addr32[3];
502 pf_init_threshold(struct pf_threshold *threshold,
503 u_int32_t limit, u_int32_t seconds)
505 threshold->limit = limit * PF_THRESHOLD_MULT;
506 threshold->seconds = seconds;
507 threshold->count = 0;
508 threshold->last = time_uptime;
512 pf_add_threshold(struct pf_threshold *threshold)
514 u_int32_t t = time_uptime, diff = t - threshold->last;
516 if (diff >= threshold->seconds)
517 threshold->count = 0;
519 threshold->count -= threshold->count * diff /
521 threshold->count += PF_THRESHOLD_MULT;
526 pf_check_threshold(struct pf_threshold *threshold)
528 return (threshold->count > threshold->limit);
532 pf_src_connlimit(struct pf_state **state)
534 struct pf_overload_entry *pfoe;
537 PF_STATE_LOCK_ASSERT(*state);
539 (*state)->src_node->conn++;
540 (*state)->src.tcp_est = 1;
541 pf_add_threshold(&(*state)->src_node->conn_rate);
543 if ((*state)->rule.ptr->max_src_conn &&
544 (*state)->rule.ptr->max_src_conn <
545 (*state)->src_node->conn) {
546 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1);
550 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
551 pf_check_threshold(&(*state)->src_node->conn_rate)) {
552 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1);
559 /* Kill this state. */
560 (*state)->timeout = PFTM_PURGE;
561 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
563 if ((*state)->rule.ptr->overload_tbl == NULL)
566 /* Schedule overloading and flushing task. */
567 pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
569 return (1); /* too bad :( */
571 bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
572 pfoe->af = (*state)->key[PF_SK_WIRE]->af;
573 pfoe->rule = (*state)->rule.ptr;
574 pfoe->dir = (*state)->direction;
576 SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
577 PF_OVERLOADQ_UNLOCK();
578 taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
584 pf_overload_task(void *v, int pending)
586 struct pf_overload_head queue;
588 struct pf_overload_entry *pfoe, *pfoe1;
591 CURVNET_SET((struct vnet *)v);
594 queue = V_pf_overloadqueue;
595 SLIST_INIT(&V_pf_overloadqueue);
596 PF_OVERLOADQ_UNLOCK();
598 bzero(&p, sizeof(p));
599 SLIST_FOREACH(pfoe, &queue, next) {
600 counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1);
601 if (V_pf_status.debug >= PF_DEBUG_MISC) {
602 printf("%s: blocking address ", __func__);
603 pf_print_host(&pfoe->addr, 0, pfoe->af);
607 p.pfra_af = pfoe->af;
612 p.pfra_ip4addr = pfoe->addr.v4;
618 p.pfra_ip6addr = pfoe->addr.v6;
624 pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
629 * Remove those entries, that don't need flushing.
631 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
632 if (pfoe->rule->flush == 0) {
633 SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
634 free(pfoe, M_PFTEMP);
637 V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1);
639 /* If nothing to flush, return. */
640 if (SLIST_EMPTY(&queue)) {
645 for (int i = 0; i <= pf_hashmask; i++) {
646 struct pf_idhash *ih = &V_pf_idhash[i];
647 struct pf_state_key *sk;
651 LIST_FOREACH(s, &ih->states, entry) {
652 sk = s->key[PF_SK_WIRE];
653 SLIST_FOREACH(pfoe, &queue, next)
654 if (sk->af == pfoe->af &&
655 ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
656 pfoe->rule == s->rule.ptr) &&
657 ((pfoe->dir == PF_OUT &&
658 PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
659 (pfoe->dir == PF_IN &&
660 PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
661 s->timeout = PFTM_PURGE;
662 s->src.state = s->dst.state = TCPS_CLOSED;
666 PF_HASHROW_UNLOCK(ih);
668 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
669 free(pfoe, M_PFTEMP);
670 if (V_pf_status.debug >= PF_DEBUG_MISC)
671 printf("%s: %u states killed", __func__, killed);
677 * Can return locked on failure, so that we can consistently
678 * allocate and insert a new one.
680 struct pf_ksrc_node *
681 pf_find_src_node(struct pf_addr *src, struct pf_rule *rule, sa_family_t af,
684 struct pf_srchash *sh;
685 struct pf_ksrc_node *n;
687 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1);
689 sh = &V_pf_srchash[pf_hashsrc(src, af)];
691 LIST_FOREACH(n, &sh->nodes, entry)
692 if (n->rule.ptr == rule && n->af == af &&
693 ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
694 (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
698 PF_HASHROW_UNLOCK(sh);
699 } else if (returnlocked == 0)
700 PF_HASHROW_UNLOCK(sh);
706 pf_free_src_node(struct pf_ksrc_node *sn)
709 for (int i = 0; i < 2; i++) {
711 counter_u64_free(sn->bytes[i]);
713 counter_u64_free(sn->packets[i]);
715 uma_zfree(V_pf_sources_z, sn);
719 pf_insert_src_node(struct pf_ksrc_node **sn, struct pf_rule *rule,
720 struct pf_addr *src, sa_family_t af)
723 KASSERT((rule->rule_flag & PFRULE_SRCTRACK ||
724 rule->rpool.opts & PF_POOL_STICKYADDR),
725 ("%s for non-tracking rule %p", __func__, rule));
728 *sn = pf_find_src_node(src, rule, af, 1);
731 struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)];
733 PF_HASHROW_ASSERT(sh);
735 if (!rule->max_src_nodes ||
736 counter_u64_fetch(rule->src_nodes) < rule->max_src_nodes)
737 (*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
739 counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES],
742 PF_HASHROW_UNLOCK(sh);
746 for (int i = 0; i < 2; i++) {
747 (*sn)->bytes[i] = counter_u64_alloc(M_NOWAIT);
748 (*sn)->packets[i] = counter_u64_alloc(M_NOWAIT);
750 if ((*sn)->bytes[i] == NULL || (*sn)->packets[i] == NULL) {
751 pf_free_src_node(*sn);
752 PF_HASHROW_UNLOCK(sh);
757 pf_init_threshold(&(*sn)->conn_rate,
758 rule->max_src_conn_rate.limit,
759 rule->max_src_conn_rate.seconds);
762 (*sn)->rule.ptr = rule;
763 PF_ACPY(&(*sn)->addr, src, af);
764 LIST_INSERT_HEAD(&sh->nodes, *sn, entry);
765 (*sn)->creation = time_uptime;
766 (*sn)->ruletype = rule->action;
768 if ((*sn)->rule.ptr != NULL)
769 counter_u64_add((*sn)->rule.ptr->src_nodes, 1);
770 PF_HASHROW_UNLOCK(sh);
771 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1);
773 if (rule->max_src_states &&
774 (*sn)->states >= rule->max_src_states) {
775 counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES],
784 pf_unlink_src_node(struct pf_ksrc_node *src)
787 PF_HASHROW_ASSERT(&V_pf_srchash[pf_hashsrc(&src->addr, src->af)]);
788 LIST_REMOVE(src, entry);
790 counter_u64_add(src->rule.ptr->src_nodes, -1);
794 pf_free_src_nodes(struct pf_ksrc_node_list *head)
796 struct pf_ksrc_node *sn, *tmp;
799 LIST_FOREACH_SAFE(sn, head, entry, tmp) {
800 pf_free_src_node(sn);
804 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], count);
813 pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
814 sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL,
818 /* Per-vnet data storage structures initialization. */
822 struct pf_keyhash *kh;
823 struct pf_idhash *ih;
824 struct pf_srchash *sh;
827 if (pf_hashsize == 0 || !powerof2(pf_hashsize))
828 pf_hashsize = PF_HASHSIZ;
829 if (pf_srchashsize == 0 || !powerof2(pf_srchashsize))
830 pf_srchashsize = PF_SRCHASHSIZ;
832 V_pf_hashseed = arc4random();
834 /* States and state keys storage. */
835 V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_state),
836 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
837 V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
838 uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
839 uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
841 V_pf_state_key_z = uma_zcreate("pf state keys",
842 sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
845 V_pf_keyhash = mallocarray(pf_hashsize, sizeof(struct pf_keyhash),
846 M_PFHASH, M_NOWAIT | M_ZERO);
847 V_pf_idhash = mallocarray(pf_hashsize, sizeof(struct pf_idhash),
848 M_PFHASH, M_NOWAIT | M_ZERO);
849 if (V_pf_keyhash == NULL || V_pf_idhash == NULL) {
850 printf("pf: Unable to allocate memory for "
851 "state_hashsize %lu.\n", pf_hashsize);
853 free(V_pf_keyhash, M_PFHASH);
854 free(V_pf_idhash, M_PFHASH);
856 pf_hashsize = PF_HASHSIZ;
857 V_pf_keyhash = mallocarray(pf_hashsize,
858 sizeof(struct pf_keyhash), M_PFHASH, M_WAITOK | M_ZERO);
859 V_pf_idhash = mallocarray(pf_hashsize,
860 sizeof(struct pf_idhash), M_PFHASH, M_WAITOK | M_ZERO);
863 pf_hashmask = pf_hashsize - 1;
864 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
866 mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
867 mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
871 V_pf_sources_z = uma_zcreate("pf source nodes",
872 sizeof(struct pf_ksrc_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
874 V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
875 uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
876 uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
878 V_pf_srchash = mallocarray(pf_srchashsize,
879 sizeof(struct pf_srchash), M_PFHASH, M_NOWAIT | M_ZERO);
880 if (V_pf_srchash == NULL) {
881 printf("pf: Unable to allocate memory for "
882 "source_hashsize %lu.\n", pf_srchashsize);
884 pf_srchashsize = PF_SRCHASHSIZ;
885 V_pf_srchash = mallocarray(pf_srchashsize,
886 sizeof(struct pf_srchash), M_PFHASH, M_WAITOK | M_ZERO);
889 pf_srchashmask = pf_srchashsize - 1;
890 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++)
891 mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
894 TAILQ_INIT(&V_pf_altqs[0]);
895 TAILQ_INIT(&V_pf_altqs[1]);
896 TAILQ_INIT(&V_pf_altqs[2]);
897 TAILQ_INIT(&V_pf_altqs[3]);
898 TAILQ_INIT(&V_pf_pabuf);
899 V_pf_altqs_active = &V_pf_altqs[0];
900 V_pf_altq_ifs_active = &V_pf_altqs[1];
901 V_pf_altqs_inactive = &V_pf_altqs[2];
902 V_pf_altq_ifs_inactive = &V_pf_altqs[3];
904 /* Send & overload+flush queues. */
905 STAILQ_INIT(&V_pf_sendqueue);
906 SLIST_INIT(&V_pf_overloadqueue);
907 TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
909 /* Unlinked, but may be referenced rules. */
910 TAILQ_INIT(&V_pf_unlinked_rules);
917 uma_zdestroy(pf_mtag_z);
923 struct pf_keyhash *kh;
924 struct pf_idhash *ih;
925 struct pf_srchash *sh;
926 struct pf_send_entry *pfse, *next;
929 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
931 KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
933 KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
935 mtx_destroy(&kh->lock);
936 mtx_destroy(&ih->lock);
938 free(V_pf_keyhash, M_PFHASH);
939 free(V_pf_idhash, M_PFHASH);
941 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
942 KASSERT(LIST_EMPTY(&sh->nodes),
943 ("%s: source node hash not empty", __func__));
944 mtx_destroy(&sh->lock);
946 free(V_pf_srchash, M_PFHASH);
948 STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
949 m_freem(pfse->pfse_m);
950 free(pfse, M_PFTEMP);
953 uma_zdestroy(V_pf_sources_z);
954 uma_zdestroy(V_pf_state_z);
955 uma_zdestroy(V_pf_state_key_z);
959 pf_mtag_uminit(void *mem, int size, int how)
963 t = (struct m_tag *)mem;
964 t->m_tag_cookie = MTAG_ABI_COMPAT;
965 t->m_tag_id = PACKET_TAG_PF;
966 t->m_tag_len = sizeof(struct pf_mtag);
967 t->m_tag_free = pf_mtag_free;
973 pf_mtag_free(struct m_tag *t)
976 uma_zfree(pf_mtag_z, t);
980 pf_get_mtag(struct mbuf *m)
984 if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
985 return ((struct pf_mtag *)(mtag + 1));
987 mtag = uma_zalloc(pf_mtag_z, M_NOWAIT);
990 bzero(mtag + 1, sizeof(struct pf_mtag));
991 m_tag_prepend(m, mtag);
993 return ((struct pf_mtag *)(mtag + 1));
997 pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
1000 struct pf_keyhash *khs, *khw, *kh;
1001 struct pf_state_key *sk, *cur;
1002 struct pf_state *si, *olds = NULL;
1005 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1006 KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
1007 KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
1010 * We need to lock hash slots of both keys. To avoid deadlock
1011 * we always lock the slot with lower address first. Unlock order
1014 * We also need to lock ID hash slot before dropping key
1015 * locks. On success we return with ID hash slot locked.
1019 khs = khw = &V_pf_keyhash[pf_hashkey(skw)];
1020 PF_HASHROW_LOCK(khs);
1022 khs = &V_pf_keyhash[pf_hashkey(sks)];
1023 khw = &V_pf_keyhash[pf_hashkey(skw)];
1025 PF_HASHROW_LOCK(khs);
1026 } else if (khs < khw) {
1027 PF_HASHROW_LOCK(khs);
1028 PF_HASHROW_LOCK(khw);
1030 PF_HASHROW_LOCK(khw);
1031 PF_HASHROW_LOCK(khs);
1035 #define KEYS_UNLOCK() do { \
1037 PF_HASHROW_UNLOCK(khs); \
1038 PF_HASHROW_UNLOCK(khw); \
1040 PF_HASHROW_UNLOCK(khs); \
1044 * First run: start with wire key.
1051 LIST_FOREACH(cur, &kh->keys, entry)
1052 if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
1056 /* Key exists. Check for same kif, if none, add to key. */
1057 TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
1058 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
1060 PF_HASHROW_LOCK(ih);
1061 if (si->kif == s->kif &&
1062 si->direction == s->direction) {
1063 if (sk->proto == IPPROTO_TCP &&
1064 si->src.state >= TCPS_FIN_WAIT_2 &&
1065 si->dst.state >= TCPS_FIN_WAIT_2) {
1067 * New state matches an old >FIN_WAIT_2
1068 * state. We can't drop key hash locks,
1069 * thus we can't unlink it properly.
1071 * As a workaround we drop it into
1072 * TCPS_CLOSED state, schedule purge
1073 * ASAP and push it into the very end
1074 * of the slot TAILQ, so that it won't
1075 * conflict with our new state.
1077 si->src.state = si->dst.state =
1079 si->timeout = PFTM_PURGE;
1082 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1083 printf("pf: %s key attach "
1085 (idx == PF_SK_WIRE) ?
1088 pf_print_state_parts(s,
1089 (idx == PF_SK_WIRE) ?
1091 (idx == PF_SK_STACK) ?
1093 printf(", existing: ");
1094 pf_print_state_parts(si,
1095 (idx == PF_SK_WIRE) ?
1097 (idx == PF_SK_STACK) ?
1101 PF_HASHROW_UNLOCK(ih);
1103 uma_zfree(V_pf_state_key_z, sk);
1104 if (idx == PF_SK_STACK)
1106 return (EEXIST); /* collision! */
1109 PF_HASHROW_UNLOCK(ih);
1111 uma_zfree(V_pf_state_key_z, sk);
1114 LIST_INSERT_HEAD(&kh->keys, sk, entry);
1119 /* List is sorted, if-bound states before floating. */
1120 if (s->kif == V_pfi_all)
1121 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
1123 TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
1126 TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]);
1127 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds,
1133 * Attach done. See how should we (or should not?)
1134 * attach a second key.
1137 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1141 } else if (sks != NULL) {
1143 * Continue attaching with stack key.
1155 KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
1156 ("%s failure", __func__));
1163 pf_detach_state(struct pf_state *s)
1165 struct pf_state_key *sks = s->key[PF_SK_STACK];
1166 struct pf_keyhash *kh;
1169 kh = &V_pf_keyhash[pf_hashkey(sks)];
1170 PF_HASHROW_LOCK(kh);
1171 if (s->key[PF_SK_STACK] != NULL)
1172 pf_state_key_detach(s, PF_SK_STACK);
1174 * If both point to same key, then we are done.
1176 if (sks == s->key[PF_SK_WIRE]) {
1177 pf_state_key_detach(s, PF_SK_WIRE);
1178 PF_HASHROW_UNLOCK(kh);
1181 PF_HASHROW_UNLOCK(kh);
1184 if (s->key[PF_SK_WIRE] != NULL) {
1185 kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
1186 PF_HASHROW_LOCK(kh);
1187 if (s->key[PF_SK_WIRE] != NULL)
1188 pf_state_key_detach(s, PF_SK_WIRE);
1189 PF_HASHROW_UNLOCK(kh);
1194 pf_state_key_detach(struct pf_state *s, int idx)
1196 struct pf_state_key *sk = s->key[idx];
1198 struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1200 PF_HASHROW_ASSERT(kh);
1202 TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1205 if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1206 LIST_REMOVE(sk, entry);
1207 uma_zfree(V_pf_state_key_z, sk);
1212 pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1214 struct pf_state_key *sk = mem;
1216 bzero(sk, sizeof(struct pf_state_key_cmp));
1217 TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1218 TAILQ_INIT(&sk->states[PF_SK_STACK]);
1223 struct pf_state_key *
1224 pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
1225 struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
1227 struct pf_state_key *sk;
1229 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1233 PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
1234 PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
1235 sk->port[pd->sidx] = sport;
1236 sk->port[pd->didx] = dport;
1237 sk->proto = pd->proto;
1243 struct pf_state_key *
1244 pf_state_key_clone(struct pf_state_key *orig)
1246 struct pf_state_key *sk;
1248 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1252 bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1258 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw,
1259 struct pf_state_key *sks, struct pf_state *s)
1261 struct pf_idhash *ih;
1262 struct pf_state *cur;
1265 KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1266 ("%s: sks not pristine", __func__));
1267 KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1268 ("%s: skw not pristine", __func__));
1269 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1273 if (s->id == 0 && s->creatorid == 0) {
1274 /* XXX: should be atomic, but probability of collision low */
1275 if ((s->id = V_pf_stateid[curcpu]++) == PFID_MAXID)
1276 V_pf_stateid[curcpu] = 1;
1277 s->id |= (uint64_t )curcpu << PFID_CPUSHIFT;
1278 s->id = htobe64(s->id);
1279 s->creatorid = V_pf_status.hostid;
1282 /* Returns with ID locked on success. */
1283 if ((error = pf_state_key_attach(skw, sks, s)) != 0)
1286 ih = &V_pf_idhash[PF_IDHASH(s)];
1287 PF_HASHROW_ASSERT(ih);
1288 LIST_FOREACH(cur, &ih->states, entry)
1289 if (cur->id == s->id && cur->creatorid == s->creatorid)
1293 PF_HASHROW_UNLOCK(ih);
1294 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1295 printf("pf: state ID collision: "
1296 "id: %016llx creatorid: %08x\n",
1297 (unsigned long long)be64toh(s->id),
1298 ntohl(s->creatorid));
1303 LIST_INSERT_HEAD(&ih->states, s, entry);
1304 /* One for keys, one for ID hash. */
1305 refcount_init(&s->refs, 2);
1307 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_INSERT], 1);
1308 if (V_pfsync_insert_state_ptr != NULL)
1309 V_pfsync_insert_state_ptr(s);
1311 /* Returns locked. */
1316 * Find state by ID: returns with locked row on success.
1319 pf_find_state_byid(uint64_t id, uint32_t creatorid)
1321 struct pf_idhash *ih;
1324 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1326 ih = &V_pf_idhash[(be64toh(id) % (pf_hashmask + 1))];
1328 PF_HASHROW_LOCK(ih);
1329 LIST_FOREACH(s, &ih->states, entry)
1330 if (s->id == id && s->creatorid == creatorid)
1334 PF_HASHROW_UNLOCK(ih);
1340 * Find state by key.
1341 * Returns with ID hash slot locked on success.
1343 static struct pf_state *
1344 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir)
1346 struct pf_keyhash *kh;
1347 struct pf_state_key *sk;
1351 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1353 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1355 PF_HASHROW_LOCK(kh);
1356 LIST_FOREACH(sk, &kh->keys, entry)
1357 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1360 PF_HASHROW_UNLOCK(kh);
1364 idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1366 /* List is sorted, if-bound states before floating ones. */
1367 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1368 if (s->kif == V_pfi_all || s->kif == kif) {
1370 PF_HASHROW_UNLOCK(kh);
1371 if (s->timeout >= PFTM_MAX) {
1373 * State is either being processed by
1374 * pf_unlink_state() in an other thread, or
1375 * is scheduled for immediate expiry.
1382 PF_HASHROW_UNLOCK(kh);
1388 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1390 struct pf_keyhash *kh;
1391 struct pf_state_key *sk;
1392 struct pf_state *s, *ret = NULL;
1395 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1397 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1399 PF_HASHROW_LOCK(kh);
1400 LIST_FOREACH(sk, &kh->keys, entry)
1401 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1404 PF_HASHROW_UNLOCK(kh);
1419 panic("%s: dir %u", __func__, dir);
1422 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1424 PF_HASHROW_UNLOCK(kh);
1438 PF_HASHROW_UNLOCK(kh);
1443 /* END state table stuff */
1446 pf_send(struct pf_send_entry *pfse)
1450 STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
1452 swi_sched(V_pf_swi_cookie, 0);
1458 struct pf_send_head queue;
1459 struct pf_send_entry *pfse, *next;
1461 CURVNET_SET((struct vnet *)v);
1464 queue = V_pf_sendqueue;
1465 STAILQ_INIT(&V_pf_sendqueue);
1468 STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
1469 switch (pfse->pfse_type) {
1472 ip_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL);
1475 icmp_error(pfse->pfse_m, pfse->icmpopts.type,
1476 pfse->icmpopts.code, 0, pfse->icmpopts.mtu);
1481 ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL,
1485 icmp6_error(pfse->pfse_m, pfse->icmpopts.type,
1486 pfse->icmpopts.code, pfse->icmpopts.mtu);
1490 panic("%s: unknown type", __func__);
1492 free(pfse, M_PFTEMP);
1498 pf_purge_thread(void *unused __unused)
1500 VNET_ITERATOR_DECL(vnet_iter);
1502 sx_xlock(&pf_end_lock);
1503 while (pf_end_threads == 0) {
1504 sx_sleep(pf_purge_thread, &pf_end_lock, 0, "pftm", hz / 10);
1507 VNET_FOREACH(vnet_iter) {
1508 CURVNET_SET(vnet_iter);
1511 /* Wait until V_pf_default_rule is initialized. */
1512 if (V_pf_vnet_active == 0) {
1518 * Process 1/interval fraction of the state
1522 pf_purge_expired_states(V_pf_purge_idx, pf_hashmask /
1523 (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
1526 * Purge other expired types every
1527 * PFTM_INTERVAL seconds.
1529 if (V_pf_purge_idx == 0) {
1531 * Order is important:
1532 * - states and src nodes reference rules
1533 * - states and rules reference kifs
1535 pf_purge_expired_fragments();
1536 pf_purge_expired_src_nodes();
1537 pf_purge_unlinked_rules();
1542 VNET_LIST_RUNLOCK();
1546 sx_xunlock(&pf_end_lock);
1551 pf_unload_vnet_purge(void)
1555 * To cleanse up all kifs and rules we need
1556 * two runs: first one clears reference flags,
1557 * then pf_purge_expired_states() doesn't
1558 * raise them, and then second run frees.
1560 pf_purge_unlinked_rules();
1564 * Now purge everything.
1566 pf_purge_expired_states(0, pf_hashmask);
1567 pf_purge_fragments(UINT_MAX);
1568 pf_purge_expired_src_nodes();
1571 * Now all kifs & rules should be unreferenced,
1572 * thus should be successfully freed.
1574 pf_purge_unlinked_rules();
1580 pf_state_expires(const struct pf_state *state)
1587 /* handle all PFTM_* > PFTM_MAX here */
1588 if (state->timeout == PFTM_PURGE)
1589 return (time_uptime);
1590 KASSERT(state->timeout != PFTM_UNLINKED,
1591 ("pf_state_expires: timeout == PFTM_UNLINKED"));
1592 KASSERT((state->timeout < PFTM_MAX),
1593 ("pf_state_expires: timeout > PFTM_MAX"));
1594 timeout = state->rule.ptr->timeout[state->timeout];
1596 timeout = V_pf_default_rule.timeout[state->timeout];
1597 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1598 if (start && state->rule.ptr != &V_pf_default_rule) {
1599 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1600 states = counter_u64_fetch(state->rule.ptr->states_cur);
1602 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1603 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1604 states = V_pf_status.states;
1606 if (end && states > start && start < end) {
1608 timeout = (u_int64_t)timeout * (end - states) /
1610 return (state->expire + timeout);
1613 return (time_uptime);
1615 return (state->expire + timeout);
1619 pf_purge_expired_src_nodes()
1621 struct pf_ksrc_node_list freelist;
1622 struct pf_srchash *sh;
1623 struct pf_ksrc_node *cur, *next;
1626 LIST_INIT(&freelist);
1627 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
1628 PF_HASHROW_LOCK(sh);
1629 LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
1630 if (cur->states == 0 && cur->expire <= time_uptime) {
1631 pf_unlink_src_node(cur);
1632 LIST_INSERT_HEAD(&freelist, cur, entry);
1633 } else if (cur->rule.ptr != NULL)
1634 cur->rule.ptr->rule_flag |= PFRULE_REFS;
1635 PF_HASHROW_UNLOCK(sh);
1638 pf_free_src_nodes(&freelist);
1640 V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z);
1644 pf_src_tree_remove_state(struct pf_state *s)
1646 struct pf_ksrc_node *sn;
1647 struct pf_srchash *sh;
1650 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE] ?
1651 s->rule.ptr->timeout[PFTM_SRC_NODE] :
1652 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1654 if (s->src_node != NULL) {
1656 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1657 PF_HASHROW_LOCK(sh);
1660 if (--sn->states == 0)
1661 sn->expire = time_uptime + timeout;
1662 PF_HASHROW_UNLOCK(sh);
1664 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1665 sn = s->nat_src_node;
1666 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1667 PF_HASHROW_LOCK(sh);
1668 if (--sn->states == 0)
1669 sn->expire = time_uptime + timeout;
1670 PF_HASHROW_UNLOCK(sh);
1672 s->src_node = s->nat_src_node = NULL;
1676 * Unlink and potentilly free a state. Function may be
1677 * called with ID hash row locked, but always returns
1678 * unlocked, since it needs to go through key hash locking.
1681 pf_unlink_state(struct pf_state *s, u_int flags)
1683 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
1685 if ((flags & PF_ENTER_LOCKED) == 0)
1686 PF_HASHROW_LOCK(ih);
1688 PF_HASHROW_ASSERT(ih);
1690 if (s->timeout == PFTM_UNLINKED) {
1692 * State is being processed
1693 * by pf_unlink_state() in
1696 PF_HASHROW_UNLOCK(ih);
1697 return (0); /* XXXGL: undefined actually */
1700 if (s->src.state == PF_TCPS_PROXY_DST) {
1701 /* XXX wire key the right one? */
1702 pf_send_tcp(NULL, s->rule.ptr, s->key[PF_SK_WIRE]->af,
1703 &s->key[PF_SK_WIRE]->addr[1],
1704 &s->key[PF_SK_WIRE]->addr[0],
1705 s->key[PF_SK_WIRE]->port[1],
1706 s->key[PF_SK_WIRE]->port[0],
1707 s->src.seqhi, s->src.seqlo + 1,
1708 TH_RST|TH_ACK, 0, 0, 0, 1, s->tag, NULL);
1711 LIST_REMOVE(s, entry);
1712 pf_src_tree_remove_state(s);
1714 if (V_pfsync_delete_state_ptr != NULL)
1715 V_pfsync_delete_state_ptr(s);
1717 STATE_DEC_COUNTERS(s);
1719 s->timeout = PFTM_UNLINKED;
1721 PF_HASHROW_UNLOCK(ih);
1724 /* pf_state_insert() initialises refs to 2, so we can never release the
1725 * last reference here, only in pf_release_state(). */
1726 (void)refcount_release(&s->refs);
1728 return (pf_release_state(s));
1732 pf_free_state(struct pf_state *cur)
1735 KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
1736 KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
1739 for (int i = 0; i < 2; i++) {
1740 if (cur->bytes[i] != NULL)
1741 counter_u64_free(cur->bytes[i]);
1742 if (cur->packets[i] != NULL)
1743 counter_u64_free(cur->packets[i]);
1746 pf_normalize_tcp_cleanup(cur);
1747 uma_zfree(V_pf_state_z, cur);
1748 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
1752 * Called only from pf_purge_thread(), thus serialized.
1755 pf_purge_expired_states(u_int i, int maxcheck)
1757 struct pf_idhash *ih;
1760 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1763 * Go through hash and unlink states that expire now.
1765 while (maxcheck > 0) {
1767 ih = &V_pf_idhash[i];
1769 /* only take the lock if we expect to do work */
1770 if (!LIST_EMPTY(&ih->states)) {
1772 PF_HASHROW_LOCK(ih);
1773 LIST_FOREACH(s, &ih->states, entry) {
1774 if (pf_state_expires(s) <= time_uptime) {
1775 V_pf_status.states -=
1776 pf_unlink_state(s, PF_ENTER_LOCKED);
1779 s->rule.ptr->rule_flag |= PFRULE_REFS;
1780 if (s->nat_rule.ptr != NULL)
1781 s->nat_rule.ptr->rule_flag |= PFRULE_REFS;
1782 if (s->anchor.ptr != NULL)
1783 s->anchor.ptr->rule_flag |= PFRULE_REFS;
1784 s->kif->pfik_flags |= PFI_IFLAG_REFS;
1786 s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
1788 PF_HASHROW_UNLOCK(ih);
1791 /* Return when we hit end of hash. */
1792 if (++i > pf_hashmask) {
1793 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1800 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1806 pf_purge_unlinked_rules()
1808 struct pf_rulequeue tmpq;
1809 struct pf_rule *r, *r1;
1812 * If we have overloading task pending, then we'd
1813 * better skip purging this time. There is a tiny
1814 * probability that overloading task references
1815 * an already unlinked rule.
1817 PF_OVERLOADQ_LOCK();
1818 if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
1819 PF_OVERLOADQ_UNLOCK();
1822 PF_OVERLOADQ_UNLOCK();
1825 * Do naive mark-and-sweep garbage collecting of old rules.
1826 * Reference flag is raised by pf_purge_expired_states()
1827 * and pf_purge_expired_src_nodes().
1829 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
1830 * use a temporary queue.
1833 PF_UNLNKDRULES_LOCK();
1834 TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
1835 if (!(r->rule_flag & PFRULE_REFS)) {
1836 TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
1837 TAILQ_INSERT_TAIL(&tmpq, r, entries);
1839 r->rule_flag &= ~PFRULE_REFS;
1841 PF_UNLNKDRULES_UNLOCK();
1843 if (!TAILQ_EMPTY(&tmpq)) {
1845 TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
1846 TAILQ_REMOVE(&tmpq, r, entries);
1854 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1859 u_int32_t a = ntohl(addr->addr32[0]);
1860 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1872 u_int8_t i, curstart, curend, maxstart, maxend;
1873 curstart = curend = maxstart = maxend = 255;
1874 for (i = 0; i < 8; i++) {
1875 if (!addr->addr16[i]) {
1876 if (curstart == 255)
1880 if ((curend - curstart) >
1881 (maxend - maxstart)) {
1882 maxstart = curstart;
1885 curstart = curend = 255;
1888 if ((curend - curstart) >
1889 (maxend - maxstart)) {
1890 maxstart = curstart;
1893 for (i = 0; i < 8; i++) {
1894 if (i >= maxstart && i <= maxend) {
1900 b = ntohs(addr->addr16[i]);
1917 pf_print_state(struct pf_state *s)
1919 pf_print_state_parts(s, NULL, NULL);
1923 pf_print_state_parts(struct pf_state *s,
1924 struct pf_state_key *skwp, struct pf_state_key *sksp)
1926 struct pf_state_key *skw, *sks;
1927 u_int8_t proto, dir;
1929 /* Do our best to fill these, but they're skipped if NULL */
1930 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1931 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1932 proto = skw ? skw->proto : (sks ? sks->proto : 0);
1933 dir = s ? s->direction : 0;
1951 case IPPROTO_ICMPV6:
1955 printf("%u", proto);
1968 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1970 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1975 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1977 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1982 if (proto == IPPROTO_TCP) {
1983 printf(" [lo=%u high=%u win=%u modulator=%u",
1984 s->src.seqlo, s->src.seqhi,
1985 s->src.max_win, s->src.seqdiff);
1986 if (s->src.wscale && s->dst.wscale)
1987 printf(" wscale=%u",
1988 s->src.wscale & PF_WSCALE_MASK);
1990 printf(" [lo=%u high=%u win=%u modulator=%u",
1991 s->dst.seqlo, s->dst.seqhi,
1992 s->dst.max_win, s->dst.seqdiff);
1993 if (s->src.wscale && s->dst.wscale)
1994 printf(" wscale=%u",
1995 s->dst.wscale & PF_WSCALE_MASK);
1998 printf(" %u:%u", s->src.state, s->dst.state);
2003 pf_print_flags(u_int8_t f)
2025 #define PF_SET_SKIP_STEPS(i) \
2027 while (head[i] != cur) { \
2028 head[i]->skip[i].ptr = cur; \
2029 head[i] = TAILQ_NEXT(head[i], entries); \
2034 pf_calc_skip_steps(struct pf_rulequeue *rules)
2036 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
2039 cur = TAILQ_FIRST(rules);
2041 for (i = 0; i < PF_SKIP_COUNT; ++i)
2043 while (cur != NULL) {
2045 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
2046 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
2047 if (cur->direction != prev->direction)
2048 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
2049 if (cur->af != prev->af)
2050 PF_SET_SKIP_STEPS(PF_SKIP_AF);
2051 if (cur->proto != prev->proto)
2052 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
2053 if (cur->src.neg != prev->src.neg ||
2054 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
2055 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
2056 if (cur->src.port[0] != prev->src.port[0] ||
2057 cur->src.port[1] != prev->src.port[1] ||
2058 cur->src.port_op != prev->src.port_op)
2059 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
2060 if (cur->dst.neg != prev->dst.neg ||
2061 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
2062 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
2063 if (cur->dst.port[0] != prev->dst.port[0] ||
2064 cur->dst.port[1] != prev->dst.port[1] ||
2065 cur->dst.port_op != prev->dst.port_op)
2066 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2069 cur = TAILQ_NEXT(cur, entries);
2071 for (i = 0; i < PF_SKIP_COUNT; ++i)
2072 PF_SET_SKIP_STEPS(i);
2076 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
2078 if (aw1->type != aw2->type)
2080 switch (aw1->type) {
2081 case PF_ADDR_ADDRMASK:
2083 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6))
2085 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6))
2088 case PF_ADDR_DYNIFTL:
2089 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
2090 case PF_ADDR_NOROUTE:
2091 case PF_ADDR_URPFFAILED:
2094 return (aw1->p.tbl != aw2->p.tbl);
2096 printf("invalid address type: %d\n", aw1->type);
2102 * Checksum updates are a little complicated because the checksum in the TCP/UDP
2103 * header isn't always a full checksum. In some cases (i.e. output) it's a
2104 * pseudo-header checksum, which is a partial checksum over src/dst IP
2105 * addresses, protocol number and length.
2107 * That means we have the following cases:
2108 * * Input or forwarding: we don't have TSO, the checksum fields are full
2109 * checksums, we need to update the checksum whenever we change anything.
2110 * * Output (i.e. the checksum is a pseudo-header checksum):
2111 * x The field being updated is src/dst address or affects the length of
2112 * the packet. We need to update the pseudo-header checksum (note that this
2113 * checksum is not ones' complement).
2114 * x Some other field is being modified (e.g. src/dst port numbers): We
2115 * don't have to update anything.
2118 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2122 x = cksum + old - new;
2123 x = (x + (x >> 16)) & 0xffff;
2125 /* optimise: eliminate a branch when not udp */
2126 if (udp && cksum == 0x0000)
2128 if (udp && x == 0x0000)
2131 return (u_int16_t)(x);
2135 pf_patch_8(struct mbuf *m, u_int16_t *cksum, u_int8_t *f, u_int8_t v, bool hi,
2138 u_int16_t old = htons(hi ? (*f << 8) : *f);
2139 u_int16_t new = htons(hi ? ( v << 8) : v);
2146 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2149 *cksum = pf_cksum_fixup(*cksum, old, new, udp);
2153 pf_patch_16_unaligned(struct mbuf *m, u_int16_t *cksum, void *f, u_int16_t v,
2154 bool hi, u_int8_t udp)
2156 u_int8_t *fb = (u_int8_t *)f;
2157 u_int8_t *vb = (u_int8_t *)&v;
2159 pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2160 pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2164 pf_patch_32_unaligned(struct mbuf *m, u_int16_t *cksum, void *f, u_int32_t v,
2165 bool hi, u_int8_t udp)
2167 u_int8_t *fb = (u_int8_t *)f;
2168 u_int8_t *vb = (u_int8_t *)&v;
2170 pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2171 pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2172 pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2173 pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2177 pf_proto_cksum_fixup(struct mbuf *m, u_int16_t cksum, u_int16_t old,
2178 u_int16_t new, u_int8_t udp)
2180 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2183 return (pf_cksum_fixup(cksum, old, new, udp));
2187 pf_change_ap(struct mbuf *m, struct pf_addr *a, u_int16_t *p, u_int16_t *ic,
2188 u_int16_t *pc, struct pf_addr *an, u_int16_t pn, u_int8_t u,
2194 PF_ACPY(&ao, a, af);
2197 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2205 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2206 ao.addr16[0], an->addr16[0], 0),
2207 ao.addr16[1], an->addr16[1], 0);
2210 *pc = pf_cksum_fixup(pf_cksum_fixup(*pc,
2211 ao.addr16[0], an->addr16[0], u),
2212 ao.addr16[1], an->addr16[1], u);
2214 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2219 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2220 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2221 pf_cksum_fixup(pf_cksum_fixup(*pc,
2222 ao.addr16[0], an->addr16[0], u),
2223 ao.addr16[1], an->addr16[1], u),
2224 ao.addr16[2], an->addr16[2], u),
2225 ao.addr16[3], an->addr16[3], u),
2226 ao.addr16[4], an->addr16[4], u),
2227 ao.addr16[5], an->addr16[5], u),
2228 ao.addr16[6], an->addr16[6], u),
2229 ao.addr16[7], an->addr16[7], u);
2231 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2236 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA |
2237 CSUM_DELAY_DATA_IPV6)) {
2244 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2246 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2250 memcpy(&ao, a, sizeof(ao));
2251 memcpy(a, &an, sizeof(u_int32_t));
2252 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2253 ao % 65536, an % 65536, u);
2257 pf_change_proto_a(struct mbuf *m, void *a, u_int16_t *c, u_int32_t an, u_int8_t udp)
2261 memcpy(&ao, a, sizeof(ao));
2262 memcpy(a, &an, sizeof(u_int32_t));
2264 *c = pf_proto_cksum_fixup(m,
2265 pf_proto_cksum_fixup(m, *c, ao / 65536, an / 65536, udp),
2266 ao % 65536, an % 65536, udp);
2271 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2275 PF_ACPY(&ao, a, AF_INET6);
2276 PF_ACPY(a, an, AF_INET6);
2278 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2279 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2280 pf_cksum_fixup(pf_cksum_fixup(*c,
2281 ao.addr16[0], an->addr16[0], u),
2282 ao.addr16[1], an->addr16[1], u),
2283 ao.addr16[2], an->addr16[2], u),
2284 ao.addr16[3], an->addr16[3], u),
2285 ao.addr16[4], an->addr16[4], u),
2286 ao.addr16[5], an->addr16[5], u),
2287 ao.addr16[6], an->addr16[6], u),
2288 ao.addr16[7], an->addr16[7], u);
2293 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2294 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2295 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2297 struct pf_addr oia, ooa;
2299 PF_ACPY(&oia, ia, af);
2301 PF_ACPY(&ooa, oa, af);
2303 /* Change inner protocol port, fix inner protocol checksum. */
2305 u_int16_t oip = *ip;
2312 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2313 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2315 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2317 /* Change inner ip address, fix inner ip and icmp checksums. */
2318 PF_ACPY(ia, na, af);
2322 u_int32_t oh2c = *h2c;
2324 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2325 oia.addr16[0], ia->addr16[0], 0),
2326 oia.addr16[1], ia->addr16[1], 0);
2327 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2328 oia.addr16[0], ia->addr16[0], 0),
2329 oia.addr16[1], ia->addr16[1], 0);
2330 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2336 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2337 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2338 pf_cksum_fixup(pf_cksum_fixup(*ic,
2339 oia.addr16[0], ia->addr16[0], u),
2340 oia.addr16[1], ia->addr16[1], u),
2341 oia.addr16[2], ia->addr16[2], u),
2342 oia.addr16[3], ia->addr16[3], u),
2343 oia.addr16[4], ia->addr16[4], u),
2344 oia.addr16[5], ia->addr16[5], u),
2345 oia.addr16[6], ia->addr16[6], u),
2346 oia.addr16[7], ia->addr16[7], u);
2350 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2352 PF_ACPY(oa, na, af);
2356 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2357 ooa.addr16[0], oa->addr16[0], 0),
2358 ooa.addr16[1], oa->addr16[1], 0);
2363 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2364 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2365 pf_cksum_fixup(pf_cksum_fixup(*ic,
2366 ooa.addr16[0], oa->addr16[0], u),
2367 ooa.addr16[1], oa->addr16[1], u),
2368 ooa.addr16[2], oa->addr16[2], u),
2369 ooa.addr16[3], oa->addr16[3], u),
2370 ooa.addr16[4], oa->addr16[4], u),
2371 ooa.addr16[5], oa->addr16[5], u),
2372 ooa.addr16[6], oa->addr16[6], u),
2373 ooa.addr16[7], oa->addr16[7], u);
2382 * Need to modulate the sequence numbers in the TCP SACK option
2383 * (credits to Krzysztof Pfaff for report and patch)
2386 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2387 struct tcphdr *th, struct pf_state_peer *dst)
2389 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2390 u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2391 int copyback = 0, i, olen;
2392 struct sackblk sack;
2394 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2395 if (hlen < TCPOLEN_SACKLEN ||
2396 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2399 while (hlen >= TCPOLEN_SACKLEN) {
2400 size_t startoff = opt - opts;
2403 case TCPOPT_EOL: /* FALLTHROUGH */
2411 if (olen >= TCPOLEN_SACKLEN) {
2412 for (i = 2; i + TCPOLEN_SACK <= olen;
2413 i += TCPOLEN_SACK) {
2414 memcpy(&sack, &opt[i], sizeof(sack));
2415 pf_patch_32_unaligned(m,
2416 &th->th_sum, &sack.start,
2417 htonl(ntohl(sack.start) - dst->seqdiff),
2418 PF_ALGNMNT(startoff),
2420 pf_patch_32_unaligned(m, &th->th_sum,
2422 htonl(ntohl(sack.end) - dst->seqdiff),
2423 PF_ALGNMNT(startoff),
2425 memcpy(&opt[i], &sack, sizeof(sack));
2439 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2444 pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af,
2445 const struct pf_addr *saddr, const struct pf_addr *daddr,
2446 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2447 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2448 u_int16_t rtag, struct ifnet *ifp)
2450 struct pf_send_entry *pfse;
2454 struct ip *h = NULL;
2457 struct ip6_hdr *h6 = NULL;
2461 struct pf_mtag *pf_mtag;
2466 /* maximum segment size tcp option */
2467 tlen = sizeof(struct tcphdr);
2474 len = sizeof(struct ip) + tlen;
2479 len = sizeof(struct ip6_hdr) + tlen;
2483 panic("%s: unsupported af %d", __func__, af);
2486 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2487 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2490 m = m_gethdr(M_NOWAIT, MT_DATA);
2492 free(pfse, M_PFTEMP);
2496 mac_netinet_firewall_send(m);
2498 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2499 free(pfse, M_PFTEMP);
2504 m->m_flags |= M_SKIP_FIREWALL;
2505 pf_mtag->tag = rtag;
2507 if (r != NULL && r->rtableid >= 0)
2508 M_SETFIB(m, r->rtableid);
2511 if (r != NULL && r->qid) {
2512 pf_mtag->qid = r->qid;
2514 /* add hints for ecn */
2515 pf_mtag->hdr = mtod(m, struct ip *);
2518 m->m_data += max_linkhdr;
2519 m->m_pkthdr.len = m->m_len = len;
2520 m->m_pkthdr.rcvif = NULL;
2521 bzero(m->m_data, len);
2525 h = mtod(m, struct ip *);
2527 /* IP header fields included in the TCP checksum */
2528 h->ip_p = IPPROTO_TCP;
2529 h->ip_len = htons(tlen);
2530 h->ip_src.s_addr = saddr->v4.s_addr;
2531 h->ip_dst.s_addr = daddr->v4.s_addr;
2533 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2538 h6 = mtod(m, struct ip6_hdr *);
2540 /* IP header fields included in the TCP checksum */
2541 h6->ip6_nxt = IPPROTO_TCP;
2542 h6->ip6_plen = htons(tlen);
2543 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2544 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2546 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2552 th->th_sport = sport;
2553 th->th_dport = dport;
2554 th->th_seq = htonl(seq);
2555 th->th_ack = htonl(ack);
2556 th->th_off = tlen >> 2;
2557 th->th_flags = flags;
2558 th->th_win = htons(win);
2561 opt = (char *)(th + 1);
2562 opt[0] = TCPOPT_MAXSEG;
2565 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2572 th->th_sum = in_cksum(m, len);
2574 /* Finish the IP header */
2576 h->ip_hl = sizeof(*h) >> 2;
2577 h->ip_tos = IPTOS_LOWDELAY;
2578 h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
2579 h->ip_len = htons(len);
2580 h->ip_ttl = ttl ? ttl : V_ip_defttl;
2583 pfse->pfse_type = PFSE_IP;
2589 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2590 sizeof(struct ip6_hdr), tlen);
2592 h6->ip6_vfc |= IPV6_VERSION;
2593 h6->ip6_hlim = IPV6_DEFHLIM;
2595 pfse->pfse_type = PFSE_IP6;
2604 pf_return(struct pf_rule *r, struct pf_rule *nr, struct pf_pdesc *pd,
2605 struct pf_state_key *sk, int off, struct mbuf *m, struct tcphdr *th,
2606 struct pfi_kif *kif, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen,
2609 struct pf_addr * const saddr = pd->src;
2610 struct pf_addr * const daddr = pd->dst;
2611 sa_family_t af = pd->af;
2613 /* undo NAT changes, if they have taken place */
2615 PF_ACPY(saddr, &sk->addr[pd->sidx], af);
2616 PF_ACPY(daddr, &sk->addr[pd->didx], af);
2618 *pd->sport = sk->port[pd->sidx];
2620 *pd->dport = sk->port[pd->didx];
2622 *pd->proto_sum = bproto_sum;
2624 *pd->ip_sum = bip_sum;
2625 m_copyback(m, off, hdrlen, pd->hdr.any);
2627 if (pd->proto == IPPROTO_TCP &&
2628 ((r->rule_flag & PFRULE_RETURNRST) ||
2629 (r->rule_flag & PFRULE_RETURN)) &&
2630 !(th->th_flags & TH_RST)) {
2631 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
2643 h4 = mtod(m, struct ip *);
2644 len = ntohs(h4->ip_len) - off;
2649 h6 = mtod(m, struct ip6_hdr *);
2650 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
2655 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
2656 REASON_SET(reason, PFRES_PROTCKSUM);
2658 if (th->th_flags & TH_SYN)
2660 if (th->th_flags & TH_FIN)
2662 pf_send_tcp(m, r, af, pd->dst,
2663 pd->src, th->th_dport, th->th_sport,
2664 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
2665 r->return_ttl, 1, 0, kif->pfik_ifp);
2667 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
2669 pf_send_icmp(m, r->return_icmp >> 8,
2670 r->return_icmp & 255, af, r);
2671 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
2673 pf_send_icmp(m, r->return_icmp6 >> 8,
2674 r->return_icmp6 & 255, af, r);
2679 pf_ieee8021q_setpcp(struct mbuf *m, u_int8_t prio)
2683 KASSERT(prio <= PF_PRIO_MAX,
2684 ("%s with invalid pcp", __func__));
2686 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_OUT, NULL);
2688 mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_OUT,
2689 sizeof(uint8_t), M_NOWAIT);
2692 m_tag_prepend(m, mtag);
2695 *(uint8_t *)(mtag + 1) = prio;
2700 pf_match_ieee8021q_pcp(u_int8_t prio, struct mbuf *m)
2705 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL);
2709 if (prio == PF_PRIO_ZERO)
2712 mpcp = *(uint8_t *)(mtag + 1);
2714 return (mpcp == prio);
2718 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2721 struct pf_send_entry *pfse;
2723 struct pf_mtag *pf_mtag;
2725 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2726 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2730 if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
2731 free(pfse, M_PFTEMP);
2735 if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
2736 free(pfse, M_PFTEMP);
2740 m0->m_flags |= M_SKIP_FIREWALL;
2742 if (r->rtableid >= 0)
2743 M_SETFIB(m0, r->rtableid);
2747 pf_mtag->qid = r->qid;
2748 /* add hints for ecn */
2749 pf_mtag->hdr = mtod(m0, struct ip *);
2756 pfse->pfse_type = PFSE_ICMP;
2761 pfse->pfse_type = PFSE_ICMP6;
2766 pfse->icmpopts.type = type;
2767 pfse->icmpopts.code = code;
2772 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2773 * If n is 0, they match if they are equal. If n is != 0, they match if they
2777 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2778 struct pf_addr *b, sa_family_t af)
2785 if ((a->addr32[0] & m->addr32[0]) ==
2786 (b->addr32[0] & m->addr32[0]))
2792 if (((a->addr32[0] & m->addr32[0]) ==
2793 (b->addr32[0] & m->addr32[0])) &&
2794 ((a->addr32[1] & m->addr32[1]) ==
2795 (b->addr32[1] & m->addr32[1])) &&
2796 ((a->addr32[2] & m->addr32[2]) ==
2797 (b->addr32[2] & m->addr32[2])) &&
2798 ((a->addr32[3] & m->addr32[3]) ==
2799 (b->addr32[3] & m->addr32[3])))
2818 * Return 1 if b <= a <= e, otherwise return 0.
2821 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2822 struct pf_addr *a, sa_family_t af)
2827 if ((ntohl(a->addr32[0]) < ntohl(b->addr32[0])) ||
2828 (ntohl(a->addr32[0]) > ntohl(e->addr32[0])))
2837 for (i = 0; i < 4; ++i)
2838 if (ntohl(a->addr32[i]) > ntohl(b->addr32[i]))
2840 else if (ntohl(a->addr32[i]) < ntohl(b->addr32[i]))
2843 for (i = 0; i < 4; ++i)
2844 if (ntohl(a->addr32[i]) < ntohl(e->addr32[i]))
2846 else if (ntohl(a->addr32[i]) > ntohl(e->addr32[i]))
2856 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2860 return ((p > a1) && (p < a2));
2862 return ((p < a1) || (p > a2));
2864 return ((p >= a1) && (p <= a2));
2878 return (0); /* never reached */
2882 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2887 return (pf_match(op, a1, a2, p));
2891 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2893 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2895 return (pf_match(op, a1, a2, u));
2899 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2901 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2903 return (pf_match(op, a1, a2, g));
2907 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag, int mtag)
2912 return ((!r->match_tag_not && r->match_tag == *tag) ||
2913 (r->match_tag_not && r->match_tag != *tag));
2917 pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag)
2920 KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
2922 if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL))
2925 pd->pf_mtag->tag = tag;
2930 #define PF_ANCHOR_STACKSIZE 32
2931 struct pf_anchor_stackframe {
2932 struct pf_ruleset *rs;
2933 struct pf_rule *r; /* XXX: + match bit */
2934 struct pf_anchor *child;
2938 * XXX: We rely on malloc(9) returning pointer aligned addresses.
2940 #define PF_ANCHORSTACK_MATCH 0x00000001
2941 #define PF_ANCHORSTACK_MASK (PF_ANCHORSTACK_MATCH)
2943 #define PF_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
2944 #define PF_ANCHOR_RULE(f) (struct pf_rule *) \
2945 ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
2946 #define PF_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \
2947 ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \
2951 pf_step_into_anchor(struct pf_anchor_stackframe *stack, int *depth,
2952 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2955 struct pf_anchor_stackframe *f;
2961 if (*depth >= PF_ANCHOR_STACKSIZE) {
2962 printf("%s: anchor stack overflow on %s\n",
2963 __func__, (*r)->anchor->name);
2964 *r = TAILQ_NEXT(*r, entries);
2966 } else if (*depth == 0 && a != NULL)
2968 f = stack + (*depth)++;
2971 if ((*r)->anchor_wildcard) {
2972 struct pf_anchor_node *parent = &(*r)->anchor->children;
2974 if ((f->child = RB_MIN(pf_anchor_node, parent)) == NULL) {
2978 *rs = &f->child->ruleset;
2981 *rs = &(*r)->anchor->ruleset;
2983 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2987 pf_step_out_of_anchor(struct pf_anchor_stackframe *stack, int *depth,
2988 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2991 struct pf_anchor_stackframe *f;
3000 f = stack + *depth - 1;
3001 fr = PF_ANCHOR_RULE(f);
3002 if (f->child != NULL) {
3003 struct pf_anchor_node *parent;
3006 * This block traverses through
3007 * a wildcard anchor.
3009 parent = &fr->anchor->children;
3010 if (match != NULL && *match) {
3012 * If any of "*" matched, then
3013 * "foo/ *" matched, mark frame
3016 PF_ANCHOR_SET_MATCH(f);
3019 f->child = RB_NEXT(pf_anchor_node, parent, f->child);
3020 if (f->child != NULL) {
3021 *rs = &f->child->ruleset;
3022 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
3030 if (*depth == 0 && a != NULL)
3033 if (PF_ANCHOR_MATCH(f) || (match != NULL && *match))
3035 *r = TAILQ_NEXT(fr, entries);
3036 } while (*r == NULL);
3043 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
3044 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
3049 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3050 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
3054 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3055 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
3056 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
3057 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
3058 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
3059 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
3060 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
3061 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
3067 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
3072 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
3076 if (addr->addr32[3] == 0xffffffff) {
3077 addr->addr32[3] = 0;
3078 if (addr->addr32[2] == 0xffffffff) {
3079 addr->addr32[2] = 0;
3080 if (addr->addr32[1] == 0xffffffff) {
3081 addr->addr32[1] = 0;
3083 htonl(ntohl(addr->addr32[0]) + 1);
3086 htonl(ntohl(addr->addr32[1]) + 1);
3089 htonl(ntohl(addr->addr32[2]) + 1);
3092 htonl(ntohl(addr->addr32[3]) + 1);
3099 pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
3101 struct pf_addr *saddr, *daddr;
3102 u_int16_t sport, dport;
3103 struct inpcbinfo *pi;
3106 pd->lookup.uid = UID_MAX;
3107 pd->lookup.gid = GID_MAX;
3109 switch (pd->proto) {
3111 if (pd->hdr.tcp == NULL)
3113 sport = pd->hdr.tcp->th_sport;
3114 dport = pd->hdr.tcp->th_dport;
3118 if (pd->hdr.udp == NULL)
3120 sport = pd->hdr.udp->uh_sport;
3121 dport = pd->hdr.udp->uh_dport;
3127 if (direction == PF_IN) {
3142 inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
3143 dport, INPLOOKUP_RLOCKPCB, NULL, m);
3145 inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
3146 daddr->v4, dport, INPLOOKUP_WILDCARD |
3147 INPLOOKUP_RLOCKPCB, NULL, m);
3155 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
3156 dport, INPLOOKUP_RLOCKPCB, NULL, m);
3158 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
3159 &daddr->v6, dport, INPLOOKUP_WILDCARD |
3160 INPLOOKUP_RLOCKPCB, NULL, m);
3170 INP_RLOCK_ASSERT(inp);
3171 pd->lookup.uid = inp->inp_cred->cr_uid;
3172 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3179 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3183 u_int8_t *opt, optlen;
3184 u_int8_t wscale = 0;
3186 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3187 if (hlen <= sizeof(struct tcphdr))
3189 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3191 opt = hdr + sizeof(struct tcphdr);
3192 hlen -= sizeof(struct tcphdr);
3202 if (wscale > TCP_MAX_WINSHIFT)
3203 wscale = TCP_MAX_WINSHIFT;
3204 wscale |= PF_WSCALE_FLAG;
3219 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3223 u_int8_t *opt, optlen;
3224 u_int16_t mss = V_tcp_mssdflt;
3226 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3227 if (hlen <= sizeof(struct tcphdr))
3229 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3231 opt = hdr + sizeof(struct tcphdr);
3232 hlen -= sizeof(struct tcphdr);
3233 while (hlen >= TCPOLEN_MAXSEG) {
3241 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
3257 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
3260 struct nhop4_basic nh4;
3263 struct nhop6_basic nh6;
3264 struct in6_addr dst6;
3273 hlen = sizeof(struct ip);
3274 if (fib4_lookup_nh_basic(rtableid, addr->v4, 0, 0, &nh4) == 0)
3275 mss = nh4.nh_mtu - hlen - sizeof(struct tcphdr);
3280 hlen = sizeof(struct ip6_hdr);
3281 in6_splitscope(&addr->v6, &dst6, &scopeid);
3282 if (fib6_lookup_nh_basic(rtableid, &dst6, scopeid, 0,0,&nh6)==0)
3283 mss = nh6.nh_mtu - hlen - sizeof(struct tcphdr);
3288 mss = max(V_tcp_mssdflt, mss);
3289 mss = min(mss, offer);
3290 mss = max(mss, 64); /* sanity - at least max opt space */
3295 pf_tcp_iss(struct pf_pdesc *pd)
3298 u_int32_t digest[4];
3300 if (V_pf_tcp_secret_init == 0) {
3301 read_random(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
3302 MD5Init(&V_pf_tcp_secret_ctx);
3303 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
3304 sizeof(V_pf_tcp_secret));
3305 V_pf_tcp_secret_init = 1;
3308 ctx = V_pf_tcp_secret_ctx;
3310 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
3311 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
3312 if (pd->af == AF_INET6) {
3313 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3314 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3316 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3317 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3319 MD5Final((u_char *)digest, &ctx);
3320 V_pf_tcp_iss_off += 4096;
3321 #define ISN_RANDOM_INCREMENT (4096 - 1)
3322 return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3324 #undef ISN_RANDOM_INCREMENT
3328 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
3329 struct pfi_kif *kif, struct mbuf *m, int off, struct pf_pdesc *pd,
3330 struct pf_rule **am, struct pf_ruleset **rsm, struct inpcb *inp)
3332 struct pf_rule *nr = NULL;
3333 struct pf_addr * const saddr = pd->src;
3334 struct pf_addr * const daddr = pd->dst;
3335 sa_family_t af = pd->af;
3336 struct pf_rule *r, *a = NULL;
3337 struct pf_ruleset *ruleset = NULL;
3338 struct pf_ksrc_node *nsn = NULL;
3339 struct tcphdr *th = pd->hdr.tcp;
3340 struct pf_state_key *sk = NULL, *nk = NULL;
3342 int rewrite = 0, hdrlen = 0;
3343 int tag = -1, rtableid = -1;
3347 u_int16_t sport = 0, dport = 0;
3348 u_int16_t bproto_sum = 0, bip_sum = 0;
3349 u_int8_t icmptype = 0, icmpcode = 0;
3350 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3355 INP_LOCK_ASSERT(inp);
3356 pd->lookup.uid = inp->inp_cred->cr_uid;
3357 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3358 pd->lookup.done = 1;
3361 switch (pd->proto) {
3363 sport = th->th_sport;
3364 dport = th->th_dport;
3365 hdrlen = sizeof(*th);
3368 sport = pd->hdr.udp->uh_sport;
3369 dport = pd->hdr.udp->uh_dport;
3370 hdrlen = sizeof(*pd->hdr.udp);
3374 if (pd->af != AF_INET)
3376 sport = dport = pd->hdr.icmp->icmp_id;
3377 hdrlen = sizeof(*pd->hdr.icmp);
3378 icmptype = pd->hdr.icmp->icmp_type;
3379 icmpcode = pd->hdr.icmp->icmp_code;
3381 if (icmptype == ICMP_UNREACH ||
3382 icmptype == ICMP_SOURCEQUENCH ||
3383 icmptype == ICMP_REDIRECT ||
3384 icmptype == ICMP_TIMXCEED ||
3385 icmptype == ICMP_PARAMPROB)
3390 case IPPROTO_ICMPV6:
3393 sport = dport = pd->hdr.icmp6->icmp6_id;
3394 hdrlen = sizeof(*pd->hdr.icmp6);
3395 icmptype = pd->hdr.icmp6->icmp6_type;
3396 icmpcode = pd->hdr.icmp6->icmp6_code;
3398 if (icmptype == ICMP6_DST_UNREACH ||
3399 icmptype == ICMP6_PACKET_TOO_BIG ||
3400 icmptype == ICMP6_TIME_EXCEEDED ||
3401 icmptype == ICMP6_PARAM_PROB)
3406 sport = dport = hdrlen = 0;
3410 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3412 /* check packet for BINAT/NAT/RDR */
3413 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk,
3414 &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) {
3415 KASSERT(sk != NULL, ("%s: null sk", __func__));
3416 KASSERT(nk != NULL, ("%s: null nk", __func__));
3419 bip_sum = *pd->ip_sum;
3421 switch (pd->proto) {
3423 bproto_sum = th->th_sum;
3424 pd->proto_sum = &th->th_sum;
3426 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3427 nk->port[pd->sidx] != sport) {
3428 pf_change_ap(m, saddr, &th->th_sport, pd->ip_sum,
3429 &th->th_sum, &nk->addr[pd->sidx],
3430 nk->port[pd->sidx], 0, af);
3431 pd->sport = &th->th_sport;
3432 sport = th->th_sport;
3435 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3436 nk->port[pd->didx] != dport) {
3437 pf_change_ap(m, daddr, &th->th_dport, pd->ip_sum,
3438 &th->th_sum, &nk->addr[pd->didx],
3439 nk->port[pd->didx], 0, af);
3440 dport = th->th_dport;
3441 pd->dport = &th->th_dport;
3446 bproto_sum = pd->hdr.udp->uh_sum;
3447 pd->proto_sum = &pd->hdr.udp->uh_sum;
3449 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3450 nk->port[pd->sidx] != sport) {
3451 pf_change_ap(m, saddr, &pd->hdr.udp->uh_sport,
3452 pd->ip_sum, &pd->hdr.udp->uh_sum,
3453 &nk->addr[pd->sidx],
3454 nk->port[pd->sidx], 1, af);
3455 sport = pd->hdr.udp->uh_sport;
3456 pd->sport = &pd->hdr.udp->uh_sport;
3459 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3460 nk->port[pd->didx] != dport) {
3461 pf_change_ap(m, daddr, &pd->hdr.udp->uh_dport,
3462 pd->ip_sum, &pd->hdr.udp->uh_sum,
3463 &nk->addr[pd->didx],
3464 nk->port[pd->didx], 1, af);
3465 dport = pd->hdr.udp->uh_dport;
3466 pd->dport = &pd->hdr.udp->uh_dport;
3472 nk->port[0] = nk->port[1];
3473 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3474 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3475 nk->addr[pd->sidx].v4.s_addr, 0);
3477 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3478 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3479 nk->addr[pd->didx].v4.s_addr, 0);
3481 if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3482 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3483 pd->hdr.icmp->icmp_cksum, sport,
3485 pd->hdr.icmp->icmp_id = nk->port[1];
3486 pd->sport = &pd->hdr.icmp->icmp_id;
3488 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3492 case IPPROTO_ICMPV6:
3493 nk->port[0] = nk->port[1];
3494 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3495 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3496 &nk->addr[pd->sidx], 0);
3498 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3499 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3500 &nk->addr[pd->didx], 0);
3509 &nk->addr[pd->sidx], AF_INET))
3510 pf_change_a(&saddr->v4.s_addr,
3512 nk->addr[pd->sidx].v4.s_addr, 0);
3515 &nk->addr[pd->didx], AF_INET))
3516 pf_change_a(&daddr->v4.s_addr,
3518 nk->addr[pd->didx].v4.s_addr, 0);
3524 &nk->addr[pd->sidx], AF_INET6))
3525 PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3528 &nk->addr[pd->didx], AF_INET6))
3529 PF_ACPY(daddr, &nk->addr[pd->didx], af);
3542 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3543 r = r->skip[PF_SKIP_IFP].ptr;
3544 else if (r->direction && r->direction != direction)
3545 r = r->skip[PF_SKIP_DIR].ptr;
3546 else if (r->af && r->af != af)
3547 r = r->skip[PF_SKIP_AF].ptr;
3548 else if (r->proto && r->proto != pd->proto)
3549 r = r->skip[PF_SKIP_PROTO].ptr;
3550 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3551 r->src.neg, kif, M_GETFIB(m)))
3552 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3553 /* tcp/udp only. port_op always 0 in other cases */
3554 else if (r->src.port_op && !pf_match_port(r->src.port_op,
3555 r->src.port[0], r->src.port[1], sport))
3556 r = r->skip[PF_SKIP_SRC_PORT].ptr;
3557 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3558 r->dst.neg, NULL, M_GETFIB(m)))
3559 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3560 /* tcp/udp only. port_op always 0 in other cases */
3561 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3562 r->dst.port[0], r->dst.port[1], dport))
3563 r = r->skip[PF_SKIP_DST_PORT].ptr;
3564 /* icmp only. type always 0 in other cases */
3565 else if (r->type && r->type != icmptype + 1)
3566 r = TAILQ_NEXT(r, entries);
3567 /* icmp only. type always 0 in other cases */
3568 else if (r->code && r->code != icmpcode + 1)
3569 r = TAILQ_NEXT(r, entries);
3570 else if (r->tos && !(r->tos == pd->tos))
3571 r = TAILQ_NEXT(r, entries);
3572 else if (r->rule_flag & PFRULE_FRAGMENT)
3573 r = TAILQ_NEXT(r, entries);
3574 else if (pd->proto == IPPROTO_TCP &&
3575 (r->flagset & th->th_flags) != r->flags)
3576 r = TAILQ_NEXT(r, entries);
3577 /* tcp/udp only. uid.op always 0 in other cases */
3578 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3579 pf_socket_lookup(direction, pd, m), 1)) &&
3580 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3582 r = TAILQ_NEXT(r, entries);
3583 /* tcp/udp only. gid.op always 0 in other cases */
3584 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3585 pf_socket_lookup(direction, pd, m), 1)) &&
3586 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3588 r = TAILQ_NEXT(r, entries);
3590 !pf_match_ieee8021q_pcp(r->prio, m))
3591 r = TAILQ_NEXT(r, entries);
3593 r->prob <= arc4random())
3594 r = TAILQ_NEXT(r, entries);
3595 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3596 pd->pf_mtag ? pd->pf_mtag->tag : 0))
3597 r = TAILQ_NEXT(r, entries);
3598 else if (r->os_fingerprint != PF_OSFP_ANY &&
3599 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3600 pf_osfp_fingerprint(pd, m, off, th),
3601 r->os_fingerprint)))
3602 r = TAILQ_NEXT(r, entries);
3606 if (r->rtableid >= 0)
3607 rtableid = r->rtableid;
3608 if (r->anchor == NULL) {
3615 r = TAILQ_NEXT(r, entries);
3617 pf_step_into_anchor(anchor_stack, &asd,
3618 &ruleset, PF_RULESET_FILTER, &r, &a,
3621 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3622 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3629 REASON_SET(&reason, PFRES_MATCH);
3631 if (r->log || (nr != NULL && nr->log)) {
3633 m_copyback(m, off, hdrlen, pd->hdr.any);
3634 PFLOG_PACKET(kif, m, af, direction, reason, r->log ? r : nr, a,
3638 if ((r->action == PF_DROP) &&
3639 ((r->rule_flag & PFRULE_RETURNRST) ||
3640 (r->rule_flag & PFRULE_RETURNICMP) ||
3641 (r->rule_flag & PFRULE_RETURN))) {
3642 pf_return(r, nr, pd, sk, off, m, th, kif, bproto_sum,
3643 bip_sum, hdrlen, &reason);
3646 if (r->action == PF_DROP)
3649 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3650 REASON_SET(&reason, PFRES_MEMORY);
3654 M_SETFIB(m, rtableid);
3656 if (!state_icmp && (r->keep_state || nr != NULL ||
3657 (pd->flags & PFDESC_TCP_NORM))) {
3659 action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
3660 sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
3662 if (action != PF_PASS) {
3663 if (action == PF_DROP &&
3664 (r->rule_flag & PFRULE_RETURN))
3665 pf_return(r, nr, pd, sk, off, m, th, kif,
3666 bproto_sum, bip_sum, hdrlen, &reason);
3671 uma_zfree(V_pf_state_key_z, sk);
3673 uma_zfree(V_pf_state_key_z, nk);
3676 /* copy back packet headers if we performed NAT operations */
3678 m_copyback(m, off, hdrlen, pd->hdr.any);
3680 if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
3681 direction == PF_OUT &&
3682 V_pfsync_defer_ptr != NULL && V_pfsync_defer_ptr(*sm, m))
3684 * We want the state created, but we dont
3685 * want to send this in case a partner
3686 * firewall has to know about it to allow
3687 * replies through it.
3695 uma_zfree(V_pf_state_key_z, sk);
3697 uma_zfree(V_pf_state_key_z, nk);
3702 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
3703 struct pf_pdesc *pd, struct pf_ksrc_node *nsn, struct pf_state_key *nk,
3704 struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
3705 u_int16_t dport, int *rewrite, struct pfi_kif *kif, struct pf_state **sm,
3706 int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen)
3708 struct pf_state *s = NULL;
3709 struct pf_ksrc_node *sn = NULL;
3710 struct tcphdr *th = pd->hdr.tcp;
3711 u_int16_t mss = V_tcp_mssdflt;
3714 /* check maximums */
3715 if (r->max_states &&
3716 (counter_u64_fetch(r->states_cur) >= r->max_states)) {
3717 counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1);
3718 REASON_SET(&reason, PFRES_MAXSTATES);
3721 /* src node for filter rule */
3722 if ((r->rule_flag & PFRULE_SRCTRACK ||
3723 r->rpool.opts & PF_POOL_STICKYADDR) &&
3724 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3725 REASON_SET(&reason, PFRES_SRCLIMIT);
3728 /* src node for translation rule */
3729 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3730 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3731 REASON_SET(&reason, PFRES_SRCLIMIT);
3734 s = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO);
3736 REASON_SET(&reason, PFRES_MEMORY);
3739 for (int i = 0; i < 2; i++) {
3740 s->bytes[i] = counter_u64_alloc(M_NOWAIT);
3741 s->packets[i] = counter_u64_alloc(M_NOWAIT);
3743 if (s->bytes[i] == NULL || s->packets[i] == NULL) {
3745 REASON_SET(&reason, PFRES_MEMORY);
3750 s->nat_rule.ptr = nr;
3752 STATE_INC_COUNTERS(s);
3754 s->state_flags |= PFSTATE_ALLOWOPTS;
3755 if (r->rule_flag & PFRULE_STATESLOPPY)
3756 s->state_flags |= PFSTATE_SLOPPY;
3757 s->log = r->log & PF_LOG_ALL;
3758 s->sync_state = PFSYNC_S_NONE;
3760 s->log |= nr->log & PF_LOG_ALL;
3761 switch (pd->proto) {
3763 s->src.seqlo = ntohl(th->th_seq);
3764 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3765 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3766 r->keep_state == PF_STATE_MODULATE) {
3767 /* Generate sequence number modulator */
3768 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3771 pf_change_proto_a(m, &th->th_seq, &th->th_sum,
3772 htonl(s->src.seqlo + s->src.seqdiff), 0);
3776 if (th->th_flags & TH_SYN) {
3778 s->src.wscale = pf_get_wscale(m, off,
3779 th->th_off, pd->af);
3781 s->src.max_win = MAX(ntohs(th->th_win), 1);
3782 if (s->src.wscale & PF_WSCALE_MASK) {
3783 /* Remove scale factor from initial window */
3784 int win = s->src.max_win;
3785 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3786 s->src.max_win = (win - 1) >>
3787 (s->src.wscale & PF_WSCALE_MASK);
3789 if (th->th_flags & TH_FIN)
3793 s->src.state = TCPS_SYN_SENT;
3794 s->dst.state = TCPS_CLOSED;
3795 s->timeout = PFTM_TCP_FIRST_PACKET;
3798 s->src.state = PFUDPS_SINGLE;
3799 s->dst.state = PFUDPS_NO_TRAFFIC;
3800 s->timeout = PFTM_UDP_FIRST_PACKET;
3804 case IPPROTO_ICMPV6:
3806 s->timeout = PFTM_ICMP_FIRST_PACKET;
3809 s->src.state = PFOTHERS_SINGLE;
3810 s->dst.state = PFOTHERS_NO_TRAFFIC;
3811 s->timeout = PFTM_OTHER_FIRST_PACKET;
3815 if (pf_map_addr(pd->af, r, pd->src, &s->rt_addr, NULL, &sn)) {
3816 REASON_SET(&reason, PFRES_MAPFAILED);
3817 pf_src_tree_remove_state(s);
3818 STATE_DEC_COUNTERS(s);
3819 uma_zfree(V_pf_state_z, s);
3822 s->rt_kif = r->rpool.cur->kif;
3825 s->creation = time_uptime;
3826 s->expire = time_uptime;
3831 /* XXX We only modify one side for now. */
3832 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
3833 s->nat_src_node = nsn;
3835 if (pd->proto == IPPROTO_TCP) {
3836 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
3837 off, pd, th, &s->src, &s->dst)) {
3838 REASON_SET(&reason, PFRES_MEMORY);
3839 pf_src_tree_remove_state(s);
3840 STATE_DEC_COUNTERS(s);
3841 uma_zfree(V_pf_state_z, s);
3844 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
3845 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
3846 &s->src, &s->dst, rewrite)) {
3847 /* This really shouldn't happen!!! */
3848 DPFPRINTF(PF_DEBUG_URGENT,
3849 ("pf_normalize_tcp_stateful failed on first pkt"));
3850 pf_normalize_tcp_cleanup(s);
3851 pf_src_tree_remove_state(s);
3852 STATE_DEC_COUNTERS(s);
3853 uma_zfree(V_pf_state_z, s);
3857 s->direction = pd->dir;
3860 * sk/nk could already been setup by pf_get_translation().
3863 KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
3864 __func__, nr, sk, nk));
3865 sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
3870 KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p",
3871 __func__, nr, sk, nk));
3873 /* Swap sk/nk for PF_OUT. */
3874 if (pf_state_insert(BOUND_IFACE(r, kif),
3875 (pd->dir == PF_IN) ? sk : nk,
3876 (pd->dir == PF_IN) ? nk : sk, s)) {
3877 if (pd->proto == IPPROTO_TCP)
3878 pf_normalize_tcp_cleanup(s);
3879 REASON_SET(&reason, PFRES_STATEINS);
3880 pf_src_tree_remove_state(s);
3881 STATE_DEC_COUNTERS(s);
3882 uma_zfree(V_pf_state_z, s);
3889 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
3890 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
3891 s->src.state = PF_TCPS_PROXY_SRC;
3892 /* undo NAT changes, if they have taken place */
3894 struct pf_state_key *skt = s->key[PF_SK_WIRE];
3895 if (pd->dir == PF_OUT)
3896 skt = s->key[PF_SK_STACK];
3897 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
3898 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
3900 *pd->sport = skt->port[pd->sidx];
3902 *pd->dport = skt->port[pd->didx];
3904 *pd->proto_sum = bproto_sum;
3906 *pd->ip_sum = bip_sum;
3907 m_copyback(m, off, hdrlen, pd->hdr.any);
3909 s->src.seqhi = htonl(arc4random());
3910 /* Find mss option */
3911 int rtid = M_GETFIB(m);
3912 mss = pf_get_mss(m, off, th->th_off, pd->af);
3913 mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
3914 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
3916 pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport,
3917 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
3918 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL);
3919 REASON_SET(&reason, PFRES_SYNPROXY);
3920 return (PF_SYNPROXY_DROP);
3927 uma_zfree(V_pf_state_key_z, sk);
3929 uma_zfree(V_pf_state_key_z, nk);
3932 struct pf_srchash *sh;
3934 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
3935 PF_HASHROW_LOCK(sh);
3936 if (--sn->states == 0 && sn->expire == 0) {
3937 pf_unlink_src_node(sn);
3938 uma_zfree(V_pf_sources_z, sn);
3940 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
3942 PF_HASHROW_UNLOCK(sh);
3945 if (nsn != sn && nsn != NULL) {
3946 struct pf_srchash *sh;
3948 sh = &V_pf_srchash[pf_hashsrc(&nsn->addr, nsn->af)];
3949 PF_HASHROW_LOCK(sh);
3950 if (--nsn->states == 0 && nsn->expire == 0) {
3951 pf_unlink_src_node(nsn);
3952 uma_zfree(V_pf_sources_z, nsn);
3954 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
3956 PF_HASHROW_UNLOCK(sh);
3963 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
3964 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
3965 struct pf_ruleset **rsm)
3967 struct pf_rule *r, *a = NULL;
3968 struct pf_ruleset *ruleset = NULL;
3969 sa_family_t af = pd->af;
3974 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3978 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3981 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3982 r = r->skip[PF_SKIP_IFP].ptr;
3983 else if (r->direction && r->direction != direction)
3984 r = r->skip[PF_SKIP_DIR].ptr;
3985 else if (r->af && r->af != af)
3986 r = r->skip[PF_SKIP_AF].ptr;
3987 else if (r->proto && r->proto != pd->proto)
3988 r = r->skip[PF_SKIP_PROTO].ptr;
3989 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
3990 r->src.neg, kif, M_GETFIB(m)))
3991 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3992 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
3993 r->dst.neg, NULL, M_GETFIB(m)))
3994 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3995 else if (r->tos && !(r->tos == pd->tos))
3996 r = TAILQ_NEXT(r, entries);
3997 else if (r->os_fingerprint != PF_OSFP_ANY)
3998 r = TAILQ_NEXT(r, entries);
3999 else if (pd->proto == IPPROTO_UDP &&
4000 (r->src.port_op || r->dst.port_op))
4001 r = TAILQ_NEXT(r, entries);
4002 else if (pd->proto == IPPROTO_TCP &&
4003 (r->src.port_op || r->dst.port_op || r->flagset))
4004 r = TAILQ_NEXT(r, entries);
4005 else if ((pd->proto == IPPROTO_ICMP ||
4006 pd->proto == IPPROTO_ICMPV6) &&
4007 (r->type || r->code))
4008 r = TAILQ_NEXT(r, entries);
4010 !pf_match_ieee8021q_pcp(r->prio, m))
4011 r = TAILQ_NEXT(r, entries);
4012 else if (r->prob && r->prob <=
4013 (arc4random() % (UINT_MAX - 1) + 1))
4014 r = TAILQ_NEXT(r, entries);
4015 else if (r->match_tag && !pf_match_tag(m, r, &tag,
4016 pd->pf_mtag ? pd->pf_mtag->tag : 0))
4017 r = TAILQ_NEXT(r, entries);
4019 if (r->anchor == NULL) {
4026 r = TAILQ_NEXT(r, entries);
4028 pf_step_into_anchor(anchor_stack, &asd,
4029 &ruleset, PF_RULESET_FILTER, &r, &a,
4032 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
4033 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
4040 REASON_SET(&reason, PFRES_MATCH);
4043 PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
4046 if (r->action != PF_PASS)
4049 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
4050 REASON_SET(&reason, PFRES_MEMORY);
4058 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
4059 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
4060 struct pf_pdesc *pd, u_short *reason, int *copyback)
4062 struct tcphdr *th = pd->hdr.tcp;
4063 u_int16_t win = ntohs(th->th_win);
4064 u_int32_t ack, end, seq, orig_seq;
4068 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
4069 sws = src->wscale & PF_WSCALE_MASK;
4070 dws = dst->wscale & PF_WSCALE_MASK;
4075 * Sequence tracking algorithm from Guido van Rooij's paper:
4076 * http://www.madison-gurkha.com/publications/tcp_filtering/
4080 orig_seq = seq = ntohl(th->th_seq);
4081 if (src->seqlo == 0) {
4082 /* First packet from this end. Set its state */
4084 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
4085 src->scrub == NULL) {
4086 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
4087 REASON_SET(reason, PFRES_MEMORY);
4092 /* Deferred generation of sequence number modulator */
4093 if (dst->seqdiff && !src->seqdiff) {
4094 /* use random iss for the TCP server */
4095 while ((src->seqdiff = arc4random() - seq) == 0)
4097 ack = ntohl(th->th_ack) - dst->seqdiff;
4098 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
4100 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
4103 ack = ntohl(th->th_ack);
4106 end = seq + pd->p_len;
4107 if (th->th_flags & TH_SYN) {
4109 if (dst->wscale & PF_WSCALE_FLAG) {
4110 src->wscale = pf_get_wscale(m, off, th->th_off,
4112 if (src->wscale & PF_WSCALE_FLAG) {
4113 /* Remove scale factor from initial
4115 sws = src->wscale & PF_WSCALE_MASK;
4116 win = ((u_int32_t)win + (1 << sws) - 1)
4118 dws = dst->wscale & PF_WSCALE_MASK;
4120 /* fixup other window */
4121 dst->max_win <<= dst->wscale &
4123 /* in case of a retrans SYN|ACK */
4128 if (th->th_flags & TH_FIN)
4132 if (src->state < TCPS_SYN_SENT)
4133 src->state = TCPS_SYN_SENT;
4136 * May need to slide the window (seqhi may have been set by
4137 * the crappy stack check or if we picked up the connection
4138 * after establishment)
4140 if (src->seqhi == 1 ||
4141 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
4142 src->seqhi = end + MAX(1, dst->max_win << dws);
4143 if (win > src->max_win)
4147 ack = ntohl(th->th_ack) - dst->seqdiff;
4149 /* Modulate sequence numbers */
4150 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
4152 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
4155 end = seq + pd->p_len;
4156 if (th->th_flags & TH_SYN)
4158 if (th->th_flags & TH_FIN)
4162 if ((th->th_flags & TH_ACK) == 0) {
4163 /* Let it pass through the ack skew check */
4165 } else if ((ack == 0 &&
4166 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
4167 /* broken tcp stacks do not set ack */
4168 (dst->state < TCPS_SYN_SENT)) {
4170 * Many stacks (ours included) will set the ACK number in an
4171 * FIN|ACK if the SYN times out -- no sequence to ACK.
4177 /* Ease sequencing restrictions on no data packets */
4182 ackskew = dst->seqlo - ack;
4186 * Need to demodulate the sequence numbers in any TCP SACK options
4187 * (Selective ACK). We could optionally validate the SACK values
4188 * against the current ACK window, either forwards or backwards, but
4189 * I'm not confident that SACK has been implemented properly
4190 * everywhere. It wouldn't surprise me if several stacks accidentally
4191 * SACK too far backwards of previously ACKed data. There really aren't
4192 * any security implications of bad SACKing unless the target stack
4193 * doesn't validate the option length correctly. Someone trying to
4194 * spoof into a TCP connection won't bother blindly sending SACK
4197 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
4198 if (pf_modulate_sack(m, off, pd, th, dst))
4203 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
4204 if (SEQ_GEQ(src->seqhi, end) &&
4205 /* Last octet inside other's window space */
4206 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
4207 /* Retrans: not more than one window back */
4208 (ackskew >= -MAXACKWINDOW) &&
4209 /* Acking not more than one reassembled fragment backwards */
4210 (ackskew <= (MAXACKWINDOW << sws)) &&
4211 /* Acking not more than one window forward */
4212 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
4213 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
4214 (pd->flags & PFDESC_IP_REAS) == 0)) {
4215 /* Require an exact/+1 sequence match on resets when possible */
4217 if (dst->scrub || src->scrub) {
4218 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4219 *state, src, dst, copyback))
4223 /* update max window */
4224 if (src->max_win < win)
4226 /* synchronize sequencing */
4227 if (SEQ_GT(end, src->seqlo))
4229 /* slide the window of what the other end can send */
4230 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4231 dst->seqhi = ack + MAX((win << sws), 1);
4235 if (th->th_flags & TH_SYN)
4236 if (src->state < TCPS_SYN_SENT)
4237 src->state = TCPS_SYN_SENT;
4238 if (th->th_flags & TH_FIN)
4239 if (src->state < TCPS_CLOSING)
4240 src->state = TCPS_CLOSING;
4241 if (th->th_flags & TH_ACK) {
4242 if (dst->state == TCPS_SYN_SENT) {
4243 dst->state = TCPS_ESTABLISHED;
4244 if (src->state == TCPS_ESTABLISHED &&
4245 (*state)->src_node != NULL &&
4246 pf_src_connlimit(state)) {
4247 REASON_SET(reason, PFRES_SRCLIMIT);
4250 } else if (dst->state == TCPS_CLOSING)
4251 dst->state = TCPS_FIN_WAIT_2;
4253 if (th->th_flags & TH_RST)
4254 src->state = dst->state = TCPS_TIME_WAIT;
4256 /* update expire time */
4257 (*state)->expire = time_uptime;
4258 if (src->state >= TCPS_FIN_WAIT_2 &&
4259 dst->state >= TCPS_FIN_WAIT_2)
4260 (*state)->timeout = PFTM_TCP_CLOSED;
4261 else if (src->state >= TCPS_CLOSING &&
4262 dst->state >= TCPS_CLOSING)
4263 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4264 else if (src->state < TCPS_ESTABLISHED ||
4265 dst->state < TCPS_ESTABLISHED)
4266 (*state)->timeout = PFTM_TCP_OPENING;
4267 else if (src->state >= TCPS_CLOSING ||
4268 dst->state >= TCPS_CLOSING)
4269 (*state)->timeout = PFTM_TCP_CLOSING;
4271 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4273 /* Fall through to PASS packet */
4275 } else if ((dst->state < TCPS_SYN_SENT ||
4276 dst->state >= TCPS_FIN_WAIT_2 ||
4277 src->state >= TCPS_FIN_WAIT_2) &&
4278 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
4279 /* Within a window forward of the originating packet */
4280 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
4281 /* Within a window backward of the originating packet */
4284 * This currently handles three situations:
4285 * 1) Stupid stacks will shotgun SYNs before their peer
4287 * 2) When PF catches an already established stream (the
4288 * firewall rebooted, the state table was flushed, routes
4290 * 3) Packets get funky immediately after the connection
4291 * closes (this should catch Solaris spurious ACK|FINs
4292 * that web servers like to spew after a close)
4294 * This must be a little more careful than the above code
4295 * since packet floods will also be caught here. We don't
4296 * update the TTL here to mitigate the damage of a packet
4297 * flood and so the same code can handle awkward establishment
4298 * and a loosened connection close.
4299 * In the establishment case, a correct peer response will
4300 * validate the connection, go through the normal state code
4301 * and keep updating the state TTL.
4304 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4305 printf("pf: loose state match: ");
4306 pf_print_state(*state);
4307 pf_print_flags(th->th_flags);
4308 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4309 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
4311 (unsigned long long)counter_u64_fetch((*state)->packets[0]),
4312 (unsigned long long)counter_u64_fetch((*state)->packets[1]),
4313 pd->dir == PF_IN ? "in" : "out",
4314 pd->dir == (*state)->direction ? "fwd" : "rev");
4317 if (dst->scrub || src->scrub) {
4318 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4319 *state, src, dst, copyback))
4323 /* update max window */
4324 if (src->max_win < win)
4326 /* synchronize sequencing */
4327 if (SEQ_GT(end, src->seqlo))
4329 /* slide the window of what the other end can send */
4330 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4331 dst->seqhi = ack + MAX((win << sws), 1);
4334 * Cannot set dst->seqhi here since this could be a shotgunned
4335 * SYN and not an already established connection.
4338 if (th->th_flags & TH_FIN)
4339 if (src->state < TCPS_CLOSING)
4340 src->state = TCPS_CLOSING;
4341 if (th->th_flags & TH_RST)
4342 src->state = dst->state = TCPS_TIME_WAIT;
4344 /* Fall through to PASS packet */
4347 if ((*state)->dst.state == TCPS_SYN_SENT &&
4348 (*state)->src.state == TCPS_SYN_SENT) {
4349 /* Send RST for state mismatches during handshake */
4350 if (!(th->th_flags & TH_RST))
4351 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4352 pd->dst, pd->src, th->th_dport,
4353 th->th_sport, ntohl(th->th_ack), 0,
4355 (*state)->rule.ptr->return_ttl, 1, 0,
4360 } else if (V_pf_status.debug >= PF_DEBUG_MISC) {
4361 printf("pf: BAD state: ");
4362 pf_print_state(*state);
4363 pf_print_flags(th->th_flags);
4364 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4365 "pkts=%llu:%llu dir=%s,%s\n",
4366 seq, orig_seq, ack, pd->p_len, ackskew,
4367 (unsigned long long)counter_u64_fetch((*state)->packets[0]),
4368 (unsigned long long)counter_u64_fetch((*state)->packets[1]),
4369 pd->dir == PF_IN ? "in" : "out",
4370 pd->dir == (*state)->direction ? "fwd" : "rev");
4371 printf("pf: State failure on: %c %c %c %c | %c %c\n",
4372 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4373 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4375 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4376 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4377 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4378 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4380 REASON_SET(reason, PFRES_BADSTATE);
4388 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4389 struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4391 struct tcphdr *th = pd->hdr.tcp;
4393 if (th->th_flags & TH_SYN)
4394 if (src->state < TCPS_SYN_SENT)
4395 src->state = TCPS_SYN_SENT;
4396 if (th->th_flags & TH_FIN)
4397 if (src->state < TCPS_CLOSING)
4398 src->state = TCPS_CLOSING;
4399 if (th->th_flags & TH_ACK) {
4400 if (dst->state == TCPS_SYN_SENT) {
4401 dst->state = TCPS_ESTABLISHED;
4402 if (src->state == TCPS_ESTABLISHED &&
4403 (*state)->src_node != NULL &&
4404 pf_src_connlimit(state)) {
4405 REASON_SET(reason, PFRES_SRCLIMIT);
4408 } else if (dst->state == TCPS_CLOSING) {
4409 dst->state = TCPS_FIN_WAIT_2;
4410 } else if (src->state == TCPS_SYN_SENT &&
4411 dst->state < TCPS_SYN_SENT) {
4413 * Handle a special sloppy case where we only see one
4414 * half of the connection. If there is a ACK after
4415 * the initial SYN without ever seeing a packet from
4416 * the destination, set the connection to established.
4418 dst->state = src->state = TCPS_ESTABLISHED;
4419 if ((*state)->src_node != NULL &&
4420 pf_src_connlimit(state)) {
4421 REASON_SET(reason, PFRES_SRCLIMIT);
4424 } else if (src->state == TCPS_CLOSING &&
4425 dst->state == TCPS_ESTABLISHED &&
4428 * Handle the closing of half connections where we
4429 * don't see the full bidirectional FIN/ACK+ACK
4432 dst->state = TCPS_CLOSING;
4435 if (th->th_flags & TH_RST)
4436 src->state = dst->state = TCPS_TIME_WAIT;
4438 /* update expire time */
4439 (*state)->expire = time_uptime;
4440 if (src->state >= TCPS_FIN_WAIT_2 &&
4441 dst->state >= TCPS_FIN_WAIT_2)
4442 (*state)->timeout = PFTM_TCP_CLOSED;
4443 else if (src->state >= TCPS_CLOSING &&
4444 dst->state >= TCPS_CLOSING)
4445 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4446 else if (src->state < TCPS_ESTABLISHED ||
4447 dst->state < TCPS_ESTABLISHED)
4448 (*state)->timeout = PFTM_TCP_OPENING;
4449 else if (src->state >= TCPS_CLOSING ||
4450 dst->state >= TCPS_CLOSING)
4451 (*state)->timeout = PFTM_TCP_CLOSING;
4453 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4459 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
4460 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4463 struct pf_state_key_cmp key;
4464 struct tcphdr *th = pd->hdr.tcp;
4466 struct pf_state_peer *src, *dst;
4467 struct pf_state_key *sk;
4469 bzero(&key, sizeof(key));
4471 key.proto = IPPROTO_TCP;
4472 if (direction == PF_IN) { /* wire side, straight */
4473 PF_ACPY(&key.addr[0], pd->src, key.af);
4474 PF_ACPY(&key.addr[1], pd->dst, key.af);
4475 key.port[0] = th->th_sport;
4476 key.port[1] = th->th_dport;
4477 } else { /* stack side, reverse */
4478 PF_ACPY(&key.addr[1], pd->src, key.af);
4479 PF_ACPY(&key.addr[0], pd->dst, key.af);
4480 key.port[1] = th->th_sport;
4481 key.port[0] = th->th_dport;
4484 STATE_LOOKUP(kif, &key, direction, *state, pd);
4486 if (direction == (*state)->direction) {
4487 src = &(*state)->src;
4488 dst = &(*state)->dst;
4490 src = &(*state)->dst;
4491 dst = &(*state)->src;
4494 sk = (*state)->key[pd->didx];
4496 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4497 if (direction != (*state)->direction) {
4498 REASON_SET(reason, PFRES_SYNPROXY);
4499 return (PF_SYNPROXY_DROP);
4501 if (th->th_flags & TH_SYN) {
4502 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4503 REASON_SET(reason, PFRES_SYNPROXY);
4506 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4507 pd->src, th->th_dport, th->th_sport,
4508 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4509 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0, NULL);
4510 REASON_SET(reason, PFRES_SYNPROXY);
4511 return (PF_SYNPROXY_DROP);
4512 } else if ((th->th_flags & (TH_ACK|TH_RST|TH_FIN)) != TH_ACK ||
4513 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4514 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4515 REASON_SET(reason, PFRES_SYNPROXY);
4517 } else if ((*state)->src_node != NULL &&
4518 pf_src_connlimit(state)) {
4519 REASON_SET(reason, PFRES_SRCLIMIT);
4522 (*state)->src.state = PF_TCPS_PROXY_DST;
4524 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4525 if (direction == (*state)->direction) {
4526 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4527 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4528 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4529 REASON_SET(reason, PFRES_SYNPROXY);
4532 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4533 if ((*state)->dst.seqhi == 1)
4534 (*state)->dst.seqhi = htonl(arc4random());
4535 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4536 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4537 sk->port[pd->sidx], sk->port[pd->didx],
4538 (*state)->dst.seqhi, 0, TH_SYN, 0,
4539 (*state)->src.mss, 0, 0, (*state)->tag, NULL);
4540 REASON_SET(reason, PFRES_SYNPROXY);
4541 return (PF_SYNPROXY_DROP);
4542 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4544 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4545 REASON_SET(reason, PFRES_SYNPROXY);
4548 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4549 (*state)->dst.seqlo = ntohl(th->th_seq);
4550 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4551 pd->src, th->th_dport, th->th_sport,
4552 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4553 TH_ACK, (*state)->src.max_win, 0, 0, 0,
4554 (*state)->tag, NULL);
4555 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4556 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4557 sk->port[pd->sidx], sk->port[pd->didx],
4558 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4559 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0, NULL);
4560 (*state)->src.seqdiff = (*state)->dst.seqhi -
4561 (*state)->src.seqlo;
4562 (*state)->dst.seqdiff = (*state)->src.seqhi -
4563 (*state)->dst.seqlo;
4564 (*state)->src.seqhi = (*state)->src.seqlo +
4565 (*state)->dst.max_win;
4566 (*state)->dst.seqhi = (*state)->dst.seqlo +
4567 (*state)->src.max_win;
4568 (*state)->src.wscale = (*state)->dst.wscale = 0;
4569 (*state)->src.state = (*state)->dst.state =
4571 REASON_SET(reason, PFRES_SYNPROXY);
4572 return (PF_SYNPROXY_DROP);
4576 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4577 dst->state >= TCPS_FIN_WAIT_2 &&
4578 src->state >= TCPS_FIN_WAIT_2) {
4579 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4580 printf("pf: state reuse ");
4581 pf_print_state(*state);
4582 pf_print_flags(th->th_flags);
4585 /* XXX make sure it's the same direction ?? */
4586 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4587 pf_unlink_state(*state, PF_ENTER_LOCKED);
4592 if ((*state)->state_flags & PFSTATE_SLOPPY) {
4593 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
4596 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
4597 ©back) == PF_DROP)
4601 /* translate source/destination address, if necessary */
4602 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4603 struct pf_state_key *nk = (*state)->key[pd->didx];
4605 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4606 nk->port[pd->sidx] != th->th_sport)
4607 pf_change_ap(m, pd->src, &th->th_sport,
4608 pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx],
4609 nk->port[pd->sidx], 0, pd->af);
4611 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4612 nk->port[pd->didx] != th->th_dport)
4613 pf_change_ap(m, pd->dst, &th->th_dport,
4614 pd->ip_sum, &th->th_sum, &nk->addr[pd->didx],
4615 nk->port[pd->didx], 0, pd->af);
4619 /* Copyback sequence modulation or stateful scrub changes if needed */
4621 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4627 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4628 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4630 struct pf_state_peer *src, *dst;
4631 struct pf_state_key_cmp key;
4632 struct udphdr *uh = pd->hdr.udp;
4634 bzero(&key, sizeof(key));
4636 key.proto = IPPROTO_UDP;
4637 if (direction == PF_IN) { /* wire side, straight */
4638 PF_ACPY(&key.addr[0], pd->src, key.af);
4639 PF_ACPY(&key.addr[1], pd->dst, key.af);
4640 key.port[0] = uh->uh_sport;
4641 key.port[1] = uh->uh_dport;
4642 } else { /* stack side, reverse */
4643 PF_ACPY(&key.addr[1], pd->src, key.af);
4644 PF_ACPY(&key.addr[0], pd->dst, key.af);
4645 key.port[1] = uh->uh_sport;
4646 key.port[0] = uh->uh_dport;
4649 STATE_LOOKUP(kif, &key, direction, *state, pd);
4651 if (direction == (*state)->direction) {
4652 src = &(*state)->src;
4653 dst = &(*state)->dst;
4655 src = &(*state)->dst;
4656 dst = &(*state)->src;
4660 if (src->state < PFUDPS_SINGLE)
4661 src->state = PFUDPS_SINGLE;
4662 if (dst->state == PFUDPS_SINGLE)
4663 dst->state = PFUDPS_MULTIPLE;
4665 /* update expire time */
4666 (*state)->expire = time_uptime;
4667 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4668 (*state)->timeout = PFTM_UDP_MULTIPLE;
4670 (*state)->timeout = PFTM_UDP_SINGLE;
4672 /* translate source/destination address, if necessary */
4673 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4674 struct pf_state_key *nk = (*state)->key[pd->didx];
4676 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4677 nk->port[pd->sidx] != uh->uh_sport)
4678 pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
4679 &uh->uh_sum, &nk->addr[pd->sidx],
4680 nk->port[pd->sidx], 1, pd->af);
4682 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4683 nk->port[pd->didx] != uh->uh_dport)
4684 pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
4685 &uh->uh_sum, &nk->addr[pd->didx],
4686 nk->port[pd->didx], 1, pd->af);
4687 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4694 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4695 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4697 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4698 u_int16_t icmpid = 0, *icmpsum;
4699 u_int8_t icmptype, icmpcode;
4701 struct pf_state_key_cmp key;
4703 bzero(&key, sizeof(key));
4704 switch (pd->proto) {
4707 icmptype = pd->hdr.icmp->icmp_type;
4708 icmpcode = pd->hdr.icmp->icmp_code;
4709 icmpid = pd->hdr.icmp->icmp_id;
4710 icmpsum = &pd->hdr.icmp->icmp_cksum;
4712 if (icmptype == ICMP_UNREACH ||
4713 icmptype == ICMP_SOURCEQUENCH ||
4714 icmptype == ICMP_REDIRECT ||
4715 icmptype == ICMP_TIMXCEED ||
4716 icmptype == ICMP_PARAMPROB)
4721 case IPPROTO_ICMPV6:
4722 icmptype = pd->hdr.icmp6->icmp6_type;
4723 icmpcode = pd->hdr.icmp6->icmp6_code;
4724 icmpid = pd->hdr.icmp6->icmp6_id;
4725 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4727 if (icmptype == ICMP6_DST_UNREACH ||
4728 icmptype == ICMP6_PACKET_TOO_BIG ||
4729 icmptype == ICMP6_TIME_EXCEEDED ||
4730 icmptype == ICMP6_PARAM_PROB)
4739 * ICMP query/reply message not related to a TCP/UDP packet.
4740 * Search for an ICMP state.
4743 key.proto = pd->proto;
4744 key.port[0] = key.port[1] = icmpid;
4745 if (direction == PF_IN) { /* wire side, straight */
4746 PF_ACPY(&key.addr[0], pd->src, key.af);
4747 PF_ACPY(&key.addr[1], pd->dst, key.af);
4748 } else { /* stack side, reverse */
4749 PF_ACPY(&key.addr[1], pd->src, key.af);
4750 PF_ACPY(&key.addr[0], pd->dst, key.af);
4753 STATE_LOOKUP(kif, &key, direction, *state, pd);
4755 (*state)->expire = time_uptime;
4756 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4758 /* translate source/destination address, if necessary */
4759 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4760 struct pf_state_key *nk = (*state)->key[pd->didx];
4765 if (PF_ANEQ(pd->src,
4766 &nk->addr[pd->sidx], AF_INET))
4767 pf_change_a(&saddr->v4.s_addr,
4769 nk->addr[pd->sidx].v4.s_addr, 0);
4771 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
4773 pf_change_a(&daddr->v4.s_addr,
4775 nk->addr[pd->didx].v4.s_addr, 0);
4778 pd->hdr.icmp->icmp_id) {
4779 pd->hdr.icmp->icmp_cksum =
4781 pd->hdr.icmp->icmp_cksum, icmpid,
4782 nk->port[pd->sidx], 0);
4783 pd->hdr.icmp->icmp_id =
4787 m_copyback(m, off, ICMP_MINLEN,
4788 (caddr_t )pd->hdr.icmp);
4793 if (PF_ANEQ(pd->src,
4794 &nk->addr[pd->sidx], AF_INET6))
4796 &pd->hdr.icmp6->icmp6_cksum,
4797 &nk->addr[pd->sidx], 0);
4799 if (PF_ANEQ(pd->dst,
4800 &nk->addr[pd->didx], AF_INET6))
4802 &pd->hdr.icmp6->icmp6_cksum,
4803 &nk->addr[pd->didx], 0);
4805 m_copyback(m, off, sizeof(struct icmp6_hdr),
4806 (caddr_t )pd->hdr.icmp6);
4815 * ICMP error message in response to a TCP/UDP packet.
4816 * Extract the inner TCP/UDP header and search for that state.
4819 struct pf_pdesc pd2;
4820 bzero(&pd2, sizeof pd2);
4825 struct ip6_hdr h2_6;
4832 /* Payload packet is from the opposite direction. */
4833 pd2.sidx = (direction == PF_IN) ? 1 : 0;
4834 pd2.didx = (direction == PF_IN) ? 0 : 1;
4838 /* offset of h2 in mbuf chain */
4839 ipoff2 = off + ICMP_MINLEN;
4841 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
4842 NULL, reason, pd2.af)) {
4843 DPFPRINTF(PF_DEBUG_MISC,
4844 ("pf: ICMP error message too short "
4849 * ICMP error messages don't refer to non-first
4852 if (h2.ip_off & htons(IP_OFFMASK)) {
4853 REASON_SET(reason, PFRES_FRAG);
4857 /* offset of protocol header that follows h2 */
4858 off2 = ipoff2 + (h2.ip_hl << 2);
4860 pd2.proto = h2.ip_p;
4861 pd2.src = (struct pf_addr *)&h2.ip_src;
4862 pd2.dst = (struct pf_addr *)&h2.ip_dst;
4863 pd2.ip_sum = &h2.ip_sum;
4868 ipoff2 = off + sizeof(struct icmp6_hdr);
4870 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
4871 NULL, reason, pd2.af)) {
4872 DPFPRINTF(PF_DEBUG_MISC,
4873 ("pf: ICMP error message too short "
4877 pd2.proto = h2_6.ip6_nxt;
4878 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
4879 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
4881 off2 = ipoff2 + sizeof(h2_6);
4883 switch (pd2.proto) {
4884 case IPPROTO_FRAGMENT:
4886 * ICMPv6 error messages for
4887 * non-first fragments
4889 REASON_SET(reason, PFRES_FRAG);
4892 case IPPROTO_HOPOPTS:
4893 case IPPROTO_ROUTING:
4894 case IPPROTO_DSTOPTS: {
4895 /* get next header and header length */
4896 struct ip6_ext opt6;
4898 if (!pf_pull_hdr(m, off2, &opt6,
4899 sizeof(opt6), NULL, reason,
4901 DPFPRINTF(PF_DEBUG_MISC,
4902 ("pf: ICMPv6 short opt\n"));
4905 if (pd2.proto == IPPROTO_AH)
4906 off2 += (opt6.ip6e_len + 2) * 4;
4908 off2 += (opt6.ip6e_len + 1) * 8;
4909 pd2.proto = opt6.ip6e_nxt;
4910 /* goto the next header */
4917 } while (!terminal);
4922 if (PF_ANEQ(pd->dst, pd2.src, pd->af)) {
4923 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4924 printf("pf: BAD ICMP %d:%d outer dst: ",
4925 icmptype, icmpcode);
4926 pf_print_host(pd->src, 0, pd->af);
4928 pf_print_host(pd->dst, 0, pd->af);
4929 printf(" inner src: ");
4930 pf_print_host(pd2.src, 0, pd2.af);
4932 pf_print_host(pd2.dst, 0, pd2.af);
4935 REASON_SET(reason, PFRES_BADSTATE);
4939 switch (pd2.proto) {
4943 struct pf_state_peer *src, *dst;
4948 * Only the first 8 bytes of the TCP header can be
4949 * expected. Don't access any TCP header fields after
4950 * th_seq, an ackskew test is not possible.
4952 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
4954 DPFPRINTF(PF_DEBUG_MISC,
4955 ("pf: ICMP error message too short "
4961 key.proto = IPPROTO_TCP;
4962 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4963 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4964 key.port[pd2.sidx] = th.th_sport;
4965 key.port[pd2.didx] = th.th_dport;
4967 STATE_LOOKUP(kif, &key, direction, *state, pd);
4969 if (direction == (*state)->direction) {
4970 src = &(*state)->dst;
4971 dst = &(*state)->src;
4973 src = &(*state)->src;
4974 dst = &(*state)->dst;
4977 if (src->wscale && dst->wscale)
4978 dws = dst->wscale & PF_WSCALE_MASK;
4982 /* Demodulate sequence number */
4983 seq = ntohl(th.th_seq) - src->seqdiff;
4985 pf_change_a(&th.th_seq, icmpsum,
4990 if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
4991 (!SEQ_GEQ(src->seqhi, seq) ||
4992 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
4993 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4994 printf("pf: BAD ICMP %d:%d ",
4995 icmptype, icmpcode);
4996 pf_print_host(pd->src, 0, pd->af);
4998 pf_print_host(pd->dst, 0, pd->af);
5000 pf_print_state(*state);
5001 printf(" seq=%u\n", seq);
5003 REASON_SET(reason, PFRES_BADSTATE);
5006 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5007 printf("pf: OK ICMP %d:%d ",
5008 icmptype, icmpcode);
5009 pf_print_host(pd->src, 0, pd->af);
5011 pf_print_host(pd->dst, 0, pd->af);
5013 pf_print_state(*state);
5014 printf(" seq=%u\n", seq);
5018 /* translate source/destination address, if necessary */
5019 if ((*state)->key[PF_SK_WIRE] !=
5020 (*state)->key[PF_SK_STACK]) {
5021 struct pf_state_key *nk =
5022 (*state)->key[pd->didx];
5024 if (PF_ANEQ(pd2.src,
5025 &nk->addr[pd2.sidx], pd2.af) ||
5026 nk->port[pd2.sidx] != th.th_sport)
5027 pf_change_icmp(pd2.src, &th.th_sport,
5028 daddr, &nk->addr[pd2.sidx],
5029 nk->port[pd2.sidx], NULL,
5030 pd2.ip_sum, icmpsum,
5031 pd->ip_sum, 0, pd2.af);
5033 if (PF_ANEQ(pd2.dst,
5034 &nk->addr[pd2.didx], pd2.af) ||
5035 nk->port[pd2.didx] != th.th_dport)
5036 pf_change_icmp(pd2.dst, &th.th_dport,
5037 saddr, &nk->addr[pd2.didx],
5038 nk->port[pd2.didx], NULL,
5039 pd2.ip_sum, icmpsum,
5040 pd->ip_sum, 0, pd2.af);
5048 m_copyback(m, off, ICMP_MINLEN,
5049 (caddr_t )pd->hdr.icmp);
5050 m_copyback(m, ipoff2, sizeof(h2),
5057 sizeof(struct icmp6_hdr),
5058 (caddr_t )pd->hdr.icmp6);
5059 m_copyback(m, ipoff2, sizeof(h2_6),
5064 m_copyback(m, off2, 8, (caddr_t)&th);
5073 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
5074 NULL, reason, pd2.af)) {
5075 DPFPRINTF(PF_DEBUG_MISC,
5076 ("pf: ICMP error message too short "
5082 key.proto = IPPROTO_UDP;
5083 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5084 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5085 key.port[pd2.sidx] = uh.uh_sport;
5086 key.port[pd2.didx] = uh.uh_dport;
5088 STATE_LOOKUP(kif, &key, direction, *state, pd);
5090 /* translate source/destination address, if necessary */
5091 if ((*state)->key[PF_SK_WIRE] !=
5092 (*state)->key[PF_SK_STACK]) {
5093 struct pf_state_key *nk =
5094 (*state)->key[pd->didx];
5096 if (PF_ANEQ(pd2.src,
5097 &nk->addr[pd2.sidx], pd2.af) ||
5098 nk->port[pd2.sidx] != uh.uh_sport)
5099 pf_change_icmp(pd2.src, &uh.uh_sport,
5100 daddr, &nk->addr[pd2.sidx],
5101 nk->port[pd2.sidx], &uh.uh_sum,
5102 pd2.ip_sum, icmpsum,
5103 pd->ip_sum, 1, pd2.af);
5105 if (PF_ANEQ(pd2.dst,
5106 &nk->addr[pd2.didx], pd2.af) ||
5107 nk->port[pd2.didx] != uh.uh_dport)
5108 pf_change_icmp(pd2.dst, &uh.uh_dport,
5109 saddr, &nk->addr[pd2.didx],
5110 nk->port[pd2.didx], &uh.uh_sum,
5111 pd2.ip_sum, icmpsum,
5112 pd->ip_sum, 1, pd2.af);
5117 m_copyback(m, off, ICMP_MINLEN,
5118 (caddr_t )pd->hdr.icmp);
5119 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5125 sizeof(struct icmp6_hdr),
5126 (caddr_t )pd->hdr.icmp6);
5127 m_copyback(m, ipoff2, sizeof(h2_6),
5132 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
5138 case IPPROTO_ICMP: {
5141 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
5142 NULL, reason, pd2.af)) {
5143 DPFPRINTF(PF_DEBUG_MISC,
5144 ("pf: ICMP error message too short i"
5150 key.proto = IPPROTO_ICMP;
5151 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5152 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5153 key.port[0] = key.port[1] = iih.icmp_id;
5155 STATE_LOOKUP(kif, &key, direction, *state, pd);
5157 /* translate source/destination address, if necessary */
5158 if ((*state)->key[PF_SK_WIRE] !=
5159 (*state)->key[PF_SK_STACK]) {
5160 struct pf_state_key *nk =
5161 (*state)->key[pd->didx];
5163 if (PF_ANEQ(pd2.src,
5164 &nk->addr[pd2.sidx], pd2.af) ||
5165 nk->port[pd2.sidx] != iih.icmp_id)
5166 pf_change_icmp(pd2.src, &iih.icmp_id,
5167 daddr, &nk->addr[pd2.sidx],
5168 nk->port[pd2.sidx], NULL,
5169 pd2.ip_sum, icmpsum,
5170 pd->ip_sum, 0, AF_INET);
5172 if (PF_ANEQ(pd2.dst,
5173 &nk->addr[pd2.didx], pd2.af) ||
5174 nk->port[pd2.didx] != iih.icmp_id)
5175 pf_change_icmp(pd2.dst, &iih.icmp_id,
5176 saddr, &nk->addr[pd2.didx],
5177 nk->port[pd2.didx], NULL,
5178 pd2.ip_sum, icmpsum,
5179 pd->ip_sum, 0, AF_INET);
5181 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
5182 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5183 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
5190 case IPPROTO_ICMPV6: {
5191 struct icmp6_hdr iih;
5193 if (!pf_pull_hdr(m, off2, &iih,
5194 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
5195 DPFPRINTF(PF_DEBUG_MISC,
5196 ("pf: ICMP error message too short "
5202 key.proto = IPPROTO_ICMPV6;
5203 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5204 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5205 key.port[0] = key.port[1] = iih.icmp6_id;
5207 STATE_LOOKUP(kif, &key, direction, *state, pd);
5209 /* translate source/destination address, if necessary */
5210 if ((*state)->key[PF_SK_WIRE] !=
5211 (*state)->key[PF_SK_STACK]) {
5212 struct pf_state_key *nk =
5213 (*state)->key[pd->didx];
5215 if (PF_ANEQ(pd2.src,
5216 &nk->addr[pd2.sidx], pd2.af) ||
5217 nk->port[pd2.sidx] != iih.icmp6_id)
5218 pf_change_icmp(pd2.src, &iih.icmp6_id,
5219 daddr, &nk->addr[pd2.sidx],
5220 nk->port[pd2.sidx], NULL,
5221 pd2.ip_sum, icmpsum,
5222 pd->ip_sum, 0, AF_INET6);
5224 if (PF_ANEQ(pd2.dst,
5225 &nk->addr[pd2.didx], pd2.af) ||
5226 nk->port[pd2.didx] != iih.icmp6_id)
5227 pf_change_icmp(pd2.dst, &iih.icmp6_id,
5228 saddr, &nk->addr[pd2.didx],
5229 nk->port[pd2.didx], NULL,
5230 pd2.ip_sum, icmpsum,
5231 pd->ip_sum, 0, AF_INET6);
5233 m_copyback(m, off, sizeof(struct icmp6_hdr),
5234 (caddr_t)pd->hdr.icmp6);
5235 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
5236 m_copyback(m, off2, sizeof(struct icmp6_hdr),
5245 key.proto = pd2.proto;
5246 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5247 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5248 key.port[0] = key.port[1] = 0;
5250 STATE_LOOKUP(kif, &key, direction, *state, pd);
5252 /* translate source/destination address, if necessary */
5253 if ((*state)->key[PF_SK_WIRE] !=
5254 (*state)->key[PF_SK_STACK]) {
5255 struct pf_state_key *nk =
5256 (*state)->key[pd->didx];
5258 if (PF_ANEQ(pd2.src,
5259 &nk->addr[pd2.sidx], pd2.af))
5260 pf_change_icmp(pd2.src, NULL, daddr,
5261 &nk->addr[pd2.sidx], 0, NULL,
5262 pd2.ip_sum, icmpsum,
5263 pd->ip_sum, 0, pd2.af);
5265 if (PF_ANEQ(pd2.dst,
5266 &nk->addr[pd2.didx], pd2.af))
5267 pf_change_icmp(pd2.dst, NULL, saddr,
5268 &nk->addr[pd2.didx], 0, NULL,
5269 pd2.ip_sum, icmpsum,
5270 pd->ip_sum, 0, pd2.af);
5275 m_copyback(m, off, ICMP_MINLEN,
5276 (caddr_t)pd->hdr.icmp);
5277 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5283 sizeof(struct icmp6_hdr),
5284 (caddr_t )pd->hdr.icmp6);
5285 m_copyback(m, ipoff2, sizeof(h2_6),
5299 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
5300 struct mbuf *m, struct pf_pdesc *pd)
5302 struct pf_state_peer *src, *dst;
5303 struct pf_state_key_cmp key;
5305 bzero(&key, sizeof(key));
5307 key.proto = pd->proto;
5308 if (direction == PF_IN) {
5309 PF_ACPY(&key.addr[0], pd->src, key.af);
5310 PF_ACPY(&key.addr[1], pd->dst, key.af);
5311 key.port[0] = key.port[1] = 0;
5313 PF_ACPY(&key.addr[1], pd->src, key.af);
5314 PF_ACPY(&key.addr[0], pd->dst, key.af);
5315 key.port[1] = key.port[0] = 0;
5318 STATE_LOOKUP(kif, &key, direction, *state, pd);
5320 if (direction == (*state)->direction) {
5321 src = &(*state)->src;
5322 dst = &(*state)->dst;
5324 src = &(*state)->dst;
5325 dst = &(*state)->src;
5329 if (src->state < PFOTHERS_SINGLE)
5330 src->state = PFOTHERS_SINGLE;
5331 if (dst->state == PFOTHERS_SINGLE)
5332 dst->state = PFOTHERS_MULTIPLE;
5334 /* update expire time */
5335 (*state)->expire = time_uptime;
5336 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5337 (*state)->timeout = PFTM_OTHER_MULTIPLE;
5339 (*state)->timeout = PFTM_OTHER_SINGLE;
5341 /* translate source/destination address, if necessary */
5342 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5343 struct pf_state_key *nk = (*state)->key[pd->didx];
5345 KASSERT(nk, ("%s: nk is null", __func__));
5346 KASSERT(pd, ("%s: pd is null", __func__));
5347 KASSERT(pd->src, ("%s: pd->src is null", __func__));
5348 KASSERT(pd->dst, ("%s: pd->dst is null", __func__));
5352 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5353 pf_change_a(&pd->src->v4.s_addr,
5355 nk->addr[pd->sidx].v4.s_addr,
5359 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5360 pf_change_a(&pd->dst->v4.s_addr,
5362 nk->addr[pd->didx].v4.s_addr,
5369 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5370 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5372 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5373 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5381 * ipoff and off are measured from the start of the mbuf chain.
5382 * h must be at "ipoff" on the mbuf chain.
5385 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5386 u_short *actionp, u_short *reasonp, sa_family_t af)
5391 struct ip *h = mtod(m, struct ip *);
5392 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
5396 ACTION_SET(actionp, PF_PASS);
5398 ACTION_SET(actionp, PF_DROP);
5399 REASON_SET(reasonp, PFRES_FRAG);
5403 if (m->m_pkthdr.len < off + len ||
5404 ntohs(h->ip_len) < off + len) {
5405 ACTION_SET(actionp, PF_DROP);
5406 REASON_SET(reasonp, PFRES_SHORT);
5414 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
5416 if (m->m_pkthdr.len < off + len ||
5417 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5418 (unsigned)(off + len)) {
5419 ACTION_SET(actionp, PF_DROP);
5420 REASON_SET(reasonp, PFRES_SHORT);
5427 m_copydata(m, off, len, p);
5433 pf_routable_oldmpath(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5436 struct radix_node_head *rnh;
5437 struct sockaddr_in *dst;
5441 struct sockaddr_in6 *dst6;
5442 struct route_in6 ro;
5446 struct radix_node *rn;
5451 /* XXX: stick to table 0 for now */
5452 rnh = rt_tables_get_rnh(0, af);
5453 if (rnh != NULL && rn_mpath_capable(rnh))
5455 bzero(&ro, sizeof(ro));
5458 dst = satosin(&ro.ro_dst);
5459 dst->sin_family = AF_INET;
5460 dst->sin_len = sizeof(*dst);
5461 dst->sin_addr = addr->v4;
5466 * Skip check for addresses with embedded interface scope,
5467 * as they would always match anyway.
5469 if (IN6_IS_SCOPE_EMBED(&addr->v6))
5471 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5472 dst6->sin6_family = AF_INET6;
5473 dst6->sin6_len = sizeof(*dst6);
5474 dst6->sin6_addr = addr->v6;
5481 /* Skip checks for ipsec interfaces */
5482 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5488 in6_rtalloc_ign(&ro, 0, rtableid);
5493 in_rtalloc_ign((struct route *)&ro, 0, rtableid);
5498 if (ro.ro_rt != NULL) {
5499 /* No interface given, this is a no-route check */
5503 if (kif->pfik_ifp == NULL) {
5508 /* Perform uRPF check if passed input interface */
5510 rn = (struct radix_node *)ro.ro_rt;
5512 rt = (struct rtentry *)rn;
5515 if (kif->pfik_ifp == ifp)
5517 rn = rn_mpath_next(rn);
5518 } while (check_mpath == 1 && rn != NULL && ret == 0);
5522 if (ro.ro_rt != NULL)
5529 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5533 struct nhop4_basic nh4;
5536 struct nhop6_basic nh6;
5540 struct radix_node_head *rnh;
5542 /* XXX: stick to table 0 for now */
5543 rnh = rt_tables_get_rnh(0, af);
5544 if (rnh != NULL && rn_mpath_capable(rnh))
5545 return (pf_routable_oldmpath(addr, af, kif, rtableid));
5548 * Skip check for addresses with embedded interface scope,
5549 * as they would always match anyway.
5551 if (af == AF_INET6 && IN6_IS_SCOPE_EMBED(&addr->v6))
5554 if (af != AF_INET && af != AF_INET6)
5557 /* Skip checks for ipsec interfaces */
5558 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5566 if (fib6_lookup_nh_basic(rtableid, &addr->v6, 0, 0, 0, &nh6)!=0)
5573 if (fib4_lookup_nh_basic(rtableid, addr->v4, 0, 0, &nh4) != 0)
5580 /* No interface given, this is a no-route check */
5584 if (kif->pfik_ifp == NULL)
5587 /* Perform uRPF check if passed input interface */
5588 if (kif->pfik_ifp == ifp)
5595 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5596 struct pf_state *s, struct pf_pdesc *pd, struct inpcb *inp)
5598 struct mbuf *m0, *m1;
5599 struct sockaddr_in dst;
5601 struct ifnet *ifp = NULL;
5602 struct pf_addr naddr;
5603 struct pf_ksrc_node *sn = NULL;
5605 uint16_t ip_len, ip_off;
5607 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5608 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5611 if ((pd->pf_mtag == NULL &&
5612 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5613 pd->pf_mtag->routed++ > 3) {
5619 if (r->rt == PF_DUPTO) {
5620 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5626 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5634 ip = mtod(m0, struct ip *);
5636 bzero(&dst, sizeof(dst));
5637 dst.sin_family = AF_INET;
5638 dst.sin_len = sizeof(dst);
5639 dst.sin_addr = ip->ip_dst;
5641 bzero(&naddr, sizeof(naddr));
5643 if (TAILQ_EMPTY(&r->rpool.list)) {
5644 DPFPRINTF(PF_DEBUG_URGENT,
5645 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5649 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5651 if (!PF_AZERO(&naddr, AF_INET))
5652 dst.sin_addr.s_addr = naddr.v4.s_addr;
5653 ifp = r->rpool.cur->kif ?
5654 r->rpool.cur->kif->pfik_ifp : NULL;
5656 if (!PF_AZERO(&s->rt_addr, AF_INET))
5657 dst.sin_addr.s_addr =
5658 s->rt_addr.v4.s_addr;
5659 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5666 if (pf_test(PF_OUT, 0, ifp, &m0, inp) != PF_PASS)
5668 else if (m0 == NULL)
5670 if (m0->m_len < sizeof(struct ip)) {
5671 DPFPRINTF(PF_DEBUG_URGENT,
5672 ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
5675 ip = mtod(m0, struct ip *);
5678 if (ifp->if_flags & IFF_LOOPBACK)
5679 m0->m_flags |= M_SKIP_FIREWALL;
5681 ip_len = ntohs(ip->ip_len);
5682 ip_off = ntohs(ip->ip_off);
5684 /* Copied from FreeBSD 10.0-CURRENT ip_output. */
5685 m0->m_pkthdr.csum_flags |= CSUM_IP;
5686 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
5687 in_delayed_cksum(m0);
5688 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
5690 #if defined(SCTP) || defined(SCTP_SUPPORT)
5691 if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
5692 sctp_delayed_cksum(m0, (uint32_t)(ip->ip_hl << 2));
5693 m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
5698 * If small enough for interface, or the interface will take
5699 * care of the fragmentation for us, we can just send directly.
5701 if (ip_len <= ifp->if_mtu ||
5702 (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0) {
5704 if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
5705 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
5706 m0->m_pkthdr.csum_flags &= ~CSUM_IP;
5708 m_clrprotoflags(m0); /* Avoid confusing lower layers. */
5709 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5713 /* Balk when DF bit is set or the interface didn't support TSO. */
5714 if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5716 KMOD_IPSTAT_INC(ips_cantfrag);
5717 if (r->rt != PF_DUPTO) {
5718 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
5725 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
5729 for (; m0; m0 = m1) {
5731 m0->m_nextpkt = NULL;
5733 m_clrprotoflags(m0);
5734 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5740 KMOD_IPSTAT_INC(ips_fragmented);
5743 if (r->rt != PF_DUPTO)
5758 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5759 struct pf_state *s, struct pf_pdesc *pd, struct inpcb *inp)
5762 struct sockaddr_in6 dst;
5763 struct ip6_hdr *ip6;
5764 struct ifnet *ifp = NULL;
5765 struct pf_addr naddr;
5766 struct pf_ksrc_node *sn = NULL;
5768 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5769 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5772 if ((pd->pf_mtag == NULL &&
5773 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5774 pd->pf_mtag->routed++ > 3) {
5780 if (r->rt == PF_DUPTO) {
5781 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5787 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5795 ip6 = mtod(m0, struct ip6_hdr *);
5797 bzero(&dst, sizeof(dst));
5798 dst.sin6_family = AF_INET6;
5799 dst.sin6_len = sizeof(dst);
5800 dst.sin6_addr = ip6->ip6_dst;
5802 bzero(&naddr, sizeof(naddr));
5804 if (TAILQ_EMPTY(&r->rpool.list)) {
5805 DPFPRINTF(PF_DEBUG_URGENT,
5806 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5810 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
5812 if (!PF_AZERO(&naddr, AF_INET6))
5813 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5815 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
5817 if (!PF_AZERO(&s->rt_addr, AF_INET6))
5818 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5819 &s->rt_addr, AF_INET6);
5820 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5830 if (pf_test6(PF_OUT, PFIL_FWD, ifp, &m0, inp) != PF_PASS)
5832 else if (m0 == NULL)
5834 if (m0->m_len < sizeof(struct ip6_hdr)) {
5835 DPFPRINTF(PF_DEBUG_URGENT,
5836 ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
5840 ip6 = mtod(m0, struct ip6_hdr *);
5843 if (ifp->if_flags & IFF_LOOPBACK)
5844 m0->m_flags |= M_SKIP_FIREWALL;
5846 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 &
5847 ~ifp->if_hwassist) {
5848 uint32_t plen = m0->m_pkthdr.len - sizeof(*ip6);
5849 in6_delayed_cksum(m0, plen, sizeof(struct ip6_hdr));
5850 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6;
5854 * If the packet is too large for the outgoing interface,
5855 * send back an icmp6 error.
5857 if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
5858 dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
5859 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu)
5860 nd6_output_ifp(ifp, ifp, m0, &dst, NULL);
5862 in6_ifstat_inc(ifp, ifs6_in_toobig);
5863 if (r->rt != PF_DUPTO)
5864 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
5870 if (r->rt != PF_DUPTO)
5884 * FreeBSD supports cksum offloads for the following drivers.
5885 * em(4), fxp(4), lge(4), ndis(4), nge(4), re(4), ti(4), txp(4), xl(4)
5887 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
5888 * network driver performed cksum including pseudo header, need to verify
5891 * network driver performed cksum, needs to additional pseudo header
5892 * cksum computation with partial csum_data(i.e. lack of H/W support for
5893 * pseudo header, for instance hme(4), sk(4) and possibly gem(4))
5895 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
5896 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
5898 * Also, set csum_data to 0xffff to force cksum validation.
5901 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
5907 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
5909 if (m->m_pkthdr.len < off + len)
5914 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5915 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5916 sum = m->m_pkthdr.csum_data;
5918 ip = mtod(m, struct ip *);
5919 sum = in_pseudo(ip->ip_src.s_addr,
5920 ip->ip_dst.s_addr, htonl((u_short)len +
5921 m->m_pkthdr.csum_data + IPPROTO_TCP));
5928 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5929 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5930 sum = m->m_pkthdr.csum_data;
5932 ip = mtod(m, struct ip *);
5933 sum = in_pseudo(ip->ip_src.s_addr,
5934 ip->ip_dst.s_addr, htonl((u_short)len +
5935 m->m_pkthdr.csum_data + IPPROTO_UDP));
5943 case IPPROTO_ICMPV6:
5953 if (p == IPPROTO_ICMP) {
5958 sum = in_cksum(m, len);
5962 if (m->m_len < sizeof(struct ip))
5964 sum = in4_cksum(m, p, off, len);
5969 if (m->m_len < sizeof(struct ip6_hdr))
5971 sum = in6_cksum(m, p, off, len);
5982 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
5987 KMOD_UDPSTAT_INC(udps_badsum);
5993 KMOD_ICMPSTAT_INC(icps_checksum);
5998 case IPPROTO_ICMPV6:
6000 KMOD_ICMP6STAT_INC(icp6s_checksum);
6007 if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
6008 m->m_pkthdr.csum_flags |=
6009 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
6010 m->m_pkthdr.csum_data = 0xffff;
6019 pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
6021 struct pfi_kif *kif;
6022 u_short action, reason = 0, log = 0;
6023 struct mbuf *m = *m0;
6024 struct ip *h = NULL;
6025 struct m_tag *ipfwtag;
6026 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6027 struct pf_state *s = NULL;
6028 struct pf_ruleset *ruleset = NULL;
6030 int off, dirndx, pqid = 0;
6032 PF_RULES_RLOCK_TRACKER;
6036 if (!V_pf_status.running)
6039 memset(&pd, 0, sizeof(pd));
6041 kif = (struct pfi_kif *)ifp->if_pf_kif;
6044 DPFPRINTF(PF_DEBUG_URGENT,
6045 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
6048 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6051 if (m->m_flags & M_SKIP_FIREWALL)
6054 pd.pf_mtag = pf_find_mtag(m);
6058 if (ip_divert_ptr != NULL &&
6059 ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
6060 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
6061 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
6062 if (pd.pf_mtag == NULL &&
6063 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6067 pd.pf_mtag->flags |= PF_PACKET_LOOPED;
6068 m_tag_delete(m, ipfwtag);
6070 if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
6071 m->m_flags |= M_FASTFWD_OURS;
6072 pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
6074 } else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
6075 /* We do IP header normalization and packet reassembly here */
6079 m = *m0; /* pf_normalize messes with m0 */
6080 h = mtod(m, struct ip *);
6082 off = h->ip_hl << 2;
6083 if (off < (int)sizeof(struct ip)) {
6085 REASON_SET(&reason, PFRES_SHORT);
6090 pd.src = (struct pf_addr *)&h->ip_src;
6091 pd.dst = (struct pf_addr *)&h->ip_dst;
6092 pd.sport = pd.dport = NULL;
6093 pd.ip_sum = &h->ip_sum;
6094 pd.proto_sum = NULL;
6097 pd.sidx = (dir == PF_IN) ? 0 : 1;
6098 pd.didx = (dir == PF_IN) ? 1 : 0;
6100 pd.tos = h->ip_tos & ~IPTOS_ECN_MASK;
6101 pd.tot_len = ntohs(h->ip_len);
6103 /* handle fragments that didn't get reassembled by normalization */
6104 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
6105 action = pf_test_fragment(&r, dir, kif, m, h,
6116 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6117 &action, &reason, AF_INET)) {
6118 log = action != PF_PASS;
6121 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6122 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
6124 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6125 if (action == PF_DROP)
6127 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6129 if (action == PF_PASS) {
6130 if (V_pfsync_update_state_ptr != NULL)
6131 V_pfsync_update_state_ptr(s);
6135 } else if (s == NULL)
6136 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6145 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6146 &action, &reason, AF_INET)) {
6147 log = action != PF_PASS;
6150 if (uh.uh_dport == 0 ||
6151 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6152 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6154 REASON_SET(&reason, PFRES_SHORT);
6157 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6158 if (action == PF_PASS) {
6159 if (V_pfsync_update_state_ptr != NULL)
6160 V_pfsync_update_state_ptr(s);
6164 } else if (s == NULL)
6165 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6170 case IPPROTO_ICMP: {
6174 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
6175 &action, &reason, AF_INET)) {
6176 log = action != PF_PASS;
6179 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
6181 if (action == PF_PASS) {
6182 if (V_pfsync_update_state_ptr != NULL)
6183 V_pfsync_update_state_ptr(s);
6187 } else if (s == NULL)
6188 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6194 case IPPROTO_ICMPV6: {
6196 DPFPRINTF(PF_DEBUG_MISC,
6197 ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
6203 action = pf_test_state_other(&s, dir, kif, m, &pd);
6204 if (action == PF_PASS) {
6205 if (V_pfsync_update_state_ptr != NULL)
6206 V_pfsync_update_state_ptr(s);
6210 } else if (s == NULL)
6211 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6218 if (action == PF_PASS && h->ip_hl > 5 &&
6219 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6221 REASON_SET(&reason, PFRES_IPOPTIONS);
6223 DPFPRINTF(PF_DEBUG_MISC,
6224 ("pf: dropping packet with ip options\n"));
6227 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6229 REASON_SET(&reason, PFRES_MEMORY);
6231 if (r->rtableid >= 0)
6232 M_SETFIB(m, r->rtableid);
6234 if (r->scrub_flags & PFSTATE_SETPRIO) {
6235 if (pd.tos & IPTOS_LOWDELAY)
6237 if (pf_ieee8021q_setpcp(m, r->set_prio[pqid])) {
6239 REASON_SET(&reason, PFRES_MEMORY);
6241 DPFPRINTF(PF_DEBUG_MISC,
6242 ("pf: failed to allocate 802.1q mtag\n"));
6247 if (action == PF_PASS && r->qid) {
6248 if (pd.pf_mtag == NULL &&
6249 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6251 REASON_SET(&reason, PFRES_MEMORY);
6254 pd.pf_mtag->qid_hash = pf_state_hash(s);
6255 if (pqid || (pd.tos & IPTOS_LOWDELAY))
6256 pd.pf_mtag->qid = r->pqid;
6258 pd.pf_mtag->qid = r->qid;
6259 /* Add hints for ecn. */
6260 pd.pf_mtag->hdr = h;
6267 * connections redirected to loopback should not match sockets
6268 * bound specifically to loopback due to security implications,
6269 * see tcp_input() and in_pcblookup_listen().
6271 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6272 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6273 (s->nat_rule.ptr->action == PF_RDR ||
6274 s->nat_rule.ptr->action == PF_BINAT) &&
6275 IN_LOOPBACK(ntohl(pd.dst->v4.s_addr)))
6276 m->m_flags |= M_SKIP_FIREWALL;
6278 if (action == PF_PASS && r->divert.port && ip_divert_ptr != NULL &&
6279 !PACKET_LOOPED(&pd)) {
6281 ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
6282 sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
6283 if (ipfwtag != NULL) {
6284 ((struct ipfw_rule_ref *)(ipfwtag+1))->info =
6285 ntohs(r->divert.port);
6286 ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
6291 m_tag_prepend(m, ipfwtag);
6292 if (m->m_flags & M_FASTFWD_OURS) {
6293 if (pd.pf_mtag == NULL &&
6294 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6296 REASON_SET(&reason, PFRES_MEMORY);
6298 DPFPRINTF(PF_DEBUG_MISC,
6299 ("pf: failed to allocate tag\n"));
6301 pd.pf_mtag->flags |=
6302 PF_FASTFWD_OURS_PRESENT;
6303 m->m_flags &= ~M_FASTFWD_OURS;
6306 ip_divert_ptr(*m0, dir == PF_IN ? DIR_IN : DIR_OUT);
6311 /* XXX: ipfw has the same behaviour! */
6313 REASON_SET(&reason, PFRES_MEMORY);
6315 DPFPRINTF(PF_DEBUG_MISC,
6316 ("pf: failed to allocate divert tag\n"));
6323 if (s != NULL && s->nat_rule.ptr != NULL &&
6324 s->nat_rule.ptr->log & PF_LOG_ALL)
6325 lr = s->nat_rule.ptr;
6328 PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd,
6332 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6333 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
6335 if (action == PF_PASS || r->action == PF_DROP) {
6336 dirndx = (dir == PF_OUT);
6337 r->packets[dirndx]++;
6338 r->bytes[dirndx] += pd.tot_len;
6340 a->packets[dirndx]++;
6341 a->bytes[dirndx] += pd.tot_len;
6344 if (s->nat_rule.ptr != NULL) {
6345 s->nat_rule.ptr->packets[dirndx]++;
6346 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6348 if (s->src_node != NULL) {
6349 counter_u64_add(s->src_node->packets[dirndx],
6351 counter_u64_add(s->src_node->bytes[dirndx],
6354 if (s->nat_src_node != NULL) {
6355 counter_u64_add(s->nat_src_node->packets[dirndx],
6357 counter_u64_add(s->nat_src_node->bytes[dirndx],
6360 dirndx = (dir == s->direction) ? 0 : 1;
6361 counter_u64_add(s->packets[dirndx], 1);
6362 counter_u64_add(s->bytes[dirndx], pd.tot_len);
6365 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6366 if (nr != NULL && r == &V_pf_default_rule)
6368 if (tr->src.addr.type == PF_ADDR_TABLE)
6369 pfr_update_stats(tr->src.addr.p.tbl,
6370 (s == NULL) ? pd.src :
6371 &s->key[(s->direction == PF_IN)]->
6372 addr[(s->direction == PF_OUT)],
6373 pd.af, pd.tot_len, dir == PF_OUT,
6374 r->action == PF_PASS, tr->src.neg);
6375 if (tr->dst.addr.type == PF_ADDR_TABLE)
6376 pfr_update_stats(tr->dst.addr.p.tbl,
6377 (s == NULL) ? pd.dst :
6378 &s->key[(s->direction == PF_IN)]->
6379 addr[(s->direction == PF_IN)],
6380 pd.af, pd.tot_len, dir == PF_OUT,
6381 r->action == PF_PASS, tr->dst.neg);
6385 case PF_SYNPROXY_DROP:
6396 /* pf_route() returns unlocked. */
6398 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd, inp);
6412 pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
6414 struct pfi_kif *kif;
6415 u_short action, reason = 0, log = 0;
6416 struct mbuf *m = *m0, *n = NULL;
6418 struct ip6_hdr *h = NULL;
6419 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6420 struct pf_state *s = NULL;
6421 struct pf_ruleset *ruleset = NULL;
6423 int off, terminal = 0, dirndx, rh_cnt = 0, pqid = 0;
6425 PF_RULES_RLOCK_TRACKER;
6428 if (!V_pf_status.running)
6431 memset(&pd, 0, sizeof(pd));
6432 pd.pf_mtag = pf_find_mtag(m);
6434 if (pd.pf_mtag && pd.pf_mtag->flags & PF_TAG_GENERATED)
6437 kif = (struct pfi_kif *)ifp->if_pf_kif;
6439 DPFPRINTF(PF_DEBUG_URGENT,
6440 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
6443 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6446 if (m->m_flags & M_SKIP_FIREWALL)
6451 /* We do IP header normalization and packet reassembly here */
6452 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
6456 m = *m0; /* pf_normalize messes with m0 */
6457 h = mtod(m, struct ip6_hdr *);
6461 * we do not support jumbogram yet. if we keep going, zero ip6_plen
6462 * will do something bad, so drop the packet for now.
6464 if (htons(h->ip6_plen) == 0) {
6466 REASON_SET(&reason, PFRES_NORM); /*XXX*/
6471 pd.src = (struct pf_addr *)&h->ip6_src;
6472 pd.dst = (struct pf_addr *)&h->ip6_dst;
6473 pd.sport = pd.dport = NULL;
6475 pd.proto_sum = NULL;
6477 pd.sidx = (dir == PF_IN) ? 0 : 1;
6478 pd.didx = (dir == PF_IN) ? 1 : 0;
6481 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6483 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
6484 pd.proto = h->ip6_nxt;
6487 case IPPROTO_FRAGMENT:
6488 action = pf_test_fragment(&r, dir, kif, m, h,
6490 if (action == PF_DROP)
6491 REASON_SET(&reason, PFRES_FRAG);
6493 case IPPROTO_ROUTING: {
6494 struct ip6_rthdr rthdr;
6497 DPFPRINTF(PF_DEBUG_MISC,
6498 ("pf: IPv6 more than one rthdr\n"));
6500 REASON_SET(&reason, PFRES_IPOPTIONS);
6504 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
6506 DPFPRINTF(PF_DEBUG_MISC,
6507 ("pf: IPv6 short rthdr\n"));
6509 REASON_SET(&reason, PFRES_SHORT);
6513 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
6514 DPFPRINTF(PF_DEBUG_MISC,
6515 ("pf: IPv6 rthdr0\n"));
6517 REASON_SET(&reason, PFRES_IPOPTIONS);
6524 case IPPROTO_HOPOPTS:
6525 case IPPROTO_DSTOPTS: {
6526 /* get next header and header length */
6527 struct ip6_ext opt6;
6529 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
6530 NULL, &reason, pd.af)) {
6531 DPFPRINTF(PF_DEBUG_MISC,
6532 ("pf: IPv6 short opt\n"));
6537 if (pd.proto == IPPROTO_AH)
6538 off += (opt6.ip6e_len + 2) * 4;
6540 off += (opt6.ip6e_len + 1) * 8;
6541 pd.proto = opt6.ip6e_nxt;
6542 /* goto the next header */
6549 } while (!terminal);
6551 /* if there's no routing header, use unmodified mbuf for checksumming */
6561 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6562 &action, &reason, AF_INET6)) {
6563 log = action != PF_PASS;
6566 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6567 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6568 if (action == PF_DROP)
6570 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6572 if (action == PF_PASS) {
6573 if (V_pfsync_update_state_ptr != NULL)
6574 V_pfsync_update_state_ptr(s);
6578 } else if (s == NULL)
6579 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6588 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6589 &action, &reason, AF_INET6)) {
6590 log = action != PF_PASS;
6593 if (uh.uh_dport == 0 ||
6594 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6595 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6597 REASON_SET(&reason, PFRES_SHORT);
6600 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6601 if (action == PF_PASS) {
6602 if (V_pfsync_update_state_ptr != NULL)
6603 V_pfsync_update_state_ptr(s);
6607 } else if (s == NULL)
6608 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6613 case IPPROTO_ICMP: {
6615 DPFPRINTF(PF_DEBUG_MISC,
6616 ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
6620 case IPPROTO_ICMPV6: {
6621 struct icmp6_hdr ih;
6624 if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
6625 &action, &reason, AF_INET6)) {
6626 log = action != PF_PASS;
6629 action = pf_test_state_icmp(&s, dir, kif,
6630 m, off, h, &pd, &reason);
6631 if (action == PF_PASS) {
6632 if (V_pfsync_update_state_ptr != NULL)
6633 V_pfsync_update_state_ptr(s);
6637 } else if (s == NULL)
6638 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6644 action = pf_test_state_other(&s, dir, kif, m, &pd);
6645 if (action == PF_PASS) {
6646 if (V_pfsync_update_state_ptr != NULL)
6647 V_pfsync_update_state_ptr(s);
6651 } else if (s == NULL)
6652 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6664 /* handle dangerous IPv6 extension headers. */
6665 if (action == PF_PASS && rh_cnt &&
6666 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6668 REASON_SET(&reason, PFRES_IPOPTIONS);
6670 DPFPRINTF(PF_DEBUG_MISC,
6671 ("pf: dropping packet with dangerous v6 headers\n"));
6674 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6676 REASON_SET(&reason, PFRES_MEMORY);
6678 if (r->rtableid >= 0)
6679 M_SETFIB(m, r->rtableid);
6681 if (r->scrub_flags & PFSTATE_SETPRIO) {
6682 if (pd.tos & IPTOS_LOWDELAY)
6684 if (pf_ieee8021q_setpcp(m, r->set_prio[pqid])) {
6686 REASON_SET(&reason, PFRES_MEMORY);
6688 DPFPRINTF(PF_DEBUG_MISC,
6689 ("pf: failed to allocate 802.1q mtag\n"));
6694 if (action == PF_PASS && r->qid) {
6695 if (pd.pf_mtag == NULL &&
6696 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6698 REASON_SET(&reason, PFRES_MEMORY);
6701 pd.pf_mtag->qid_hash = pf_state_hash(s);
6702 if (pd.tos & IPTOS_LOWDELAY)
6703 pd.pf_mtag->qid = r->pqid;
6705 pd.pf_mtag->qid = r->qid;
6706 /* Add hints for ecn. */
6707 pd.pf_mtag->hdr = h;
6712 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6713 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6714 (s->nat_rule.ptr->action == PF_RDR ||
6715 s->nat_rule.ptr->action == PF_BINAT) &&
6716 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
6717 m->m_flags |= M_SKIP_FIREWALL;
6719 /* XXX: Anybody working on it?! */
6721 printf("pf: divert(9) is not supported for IPv6\n");
6726 if (s != NULL && s->nat_rule.ptr != NULL &&
6727 s->nat_rule.ptr->log & PF_LOG_ALL)
6728 lr = s->nat_rule.ptr;
6731 PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset,
6735 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6736 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
6738 if (action == PF_PASS || r->action == PF_DROP) {
6739 dirndx = (dir == PF_OUT);
6740 r->packets[dirndx]++;
6741 r->bytes[dirndx] += pd.tot_len;
6743 a->packets[dirndx]++;
6744 a->bytes[dirndx] += pd.tot_len;
6747 if (s->nat_rule.ptr != NULL) {
6748 s->nat_rule.ptr->packets[dirndx]++;
6749 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6751 if (s->src_node != NULL) {
6752 counter_u64_add(s->src_node->packets[dirndx],
6754 counter_u64_add(s->src_node->bytes[dirndx],
6757 if (s->nat_src_node != NULL) {
6758 counter_u64_add(s->nat_src_node->packets[dirndx],
6760 counter_u64_add(s->nat_src_node->bytes[dirndx],
6763 dirndx = (dir == s->direction) ? 0 : 1;
6764 counter_u64_add(s->packets[dirndx], 1);
6765 counter_u64_add(s->bytes[dirndx], pd.tot_len);
6768 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6769 if (nr != NULL && r == &V_pf_default_rule)
6771 if (tr->src.addr.type == PF_ADDR_TABLE)
6772 pfr_update_stats(tr->src.addr.p.tbl,
6773 (s == NULL) ? pd.src :
6774 &s->key[(s->direction == PF_IN)]->addr[0],
6775 pd.af, pd.tot_len, dir == PF_OUT,
6776 r->action == PF_PASS, tr->src.neg);
6777 if (tr->dst.addr.type == PF_ADDR_TABLE)
6778 pfr_update_stats(tr->dst.addr.p.tbl,
6779 (s == NULL) ? pd.dst :
6780 &s->key[(s->direction == PF_IN)]->addr[1],
6781 pd.af, pd.tot_len, dir == PF_OUT,
6782 r->action == PF_PASS, tr->dst.neg);
6786 case PF_SYNPROXY_DROP:
6797 /* pf_route6() returns unlocked. */
6799 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd, inp);
6808 /* If reassembled packet passed, create new fragments. */
6809 if (action == PF_PASS && *m0 && (pflags & PFIL_FWD) &&
6810 (mtag = m_tag_find(m, PF_REASSEMBLED, NULL)) != NULL)
6811 action = pf_refragment6(ifp, m0, mtag);