2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002 - 2008 Henning Brauer
6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
37 * $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
45 #include "opt_inet6.h"
49 #include <sys/param.h>
51 #include <sys/endian.h>
52 #include <sys/gsb_crc32.h>
54 #include <sys/interrupt.h>
55 #include <sys/kernel.h>
56 #include <sys/kthread.h>
57 #include <sys/limits.h>
60 #include <sys/random.h>
61 #include <sys/refcount.h>
63 #include <sys/socket.h>
64 #include <sys/sysctl.h>
65 #include <sys/taskqueue.h>
66 #include <sys/ucred.h>
69 #include <net/if_var.h>
70 #include <net/if_types.h>
71 #include <net/if_vlan_var.h>
72 #include <net/route.h>
73 #include <net/route/nhop.h>
77 #include <net/pfvar.h>
78 #include <net/if_pflog.h>
79 #include <net/if_pfsync.h>
81 #include <netinet/in_pcb.h>
82 #include <netinet/in_var.h>
83 #include <netinet/in_fib.h>
84 #include <netinet/ip.h>
85 #include <netinet/ip_fw.h>
86 #include <netinet/ip_icmp.h>
87 #include <netinet/icmp_var.h>
88 #include <netinet/ip_var.h>
89 #include <netinet/tcp.h>
90 #include <netinet/tcp_fsm.h>
91 #include <netinet/tcp_seq.h>
92 #include <netinet/tcp_timer.h>
93 #include <netinet/tcp_var.h>
94 #include <netinet/udp.h>
95 #include <netinet/udp_var.h>
98 #include <netinet/ip6.h>
99 #include <netinet/icmp6.h>
100 #include <netinet6/nd6.h>
101 #include <netinet6/ip6_var.h>
102 #include <netinet6/in6_pcb.h>
103 #include <netinet6/in6_fib.h>
104 #include <netinet6/scope6_var.h>
107 #if defined(SCTP) || defined(SCTP_SUPPORT)
108 #include <netinet/sctp_crc32.h>
111 #include <machine/in_cksum.h>
112 #include <security/mac/mac_framework.h>
114 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
116 SDT_PROVIDER_DEFINE(pf);
117 SDT_PROBE_DEFINE4(pf, ip, test, done, "int", "int", "struct pf_krule *",
118 "struct pf_state *");
119 SDT_PROBE_DEFINE4(pf, ip, test6, done, "int", "int", "struct pf_krule *",
120 "struct pf_state *");
121 SDT_PROBE_DEFINE5(pf, ip, state, lookup, "struct pfi_kkif *",
122 "struct pf_state_key_cmp *", "int", "struct pf_pdesc *",
123 "struct pf_state *");
130 VNET_DEFINE(struct pf_altqqueue, pf_altqs[4]);
131 VNET_DEFINE(struct pf_kpalist, pf_pabuf);
132 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active);
133 VNET_DEFINE(struct pf_altqqueue *, pf_altq_ifs_active);
134 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive);
135 VNET_DEFINE(struct pf_altqqueue *, pf_altq_ifs_inactive);
136 VNET_DEFINE(struct pf_kstatus, pf_status);
138 VNET_DEFINE(u_int32_t, ticket_altqs_active);
139 VNET_DEFINE(u_int32_t, ticket_altqs_inactive);
140 VNET_DEFINE(int, altqs_inactive_open);
141 VNET_DEFINE(u_int32_t, ticket_pabuf);
143 VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx);
144 #define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx)
145 VNET_DEFINE(u_char, pf_tcp_secret[16]);
146 #define V_pf_tcp_secret VNET(pf_tcp_secret)
147 VNET_DEFINE(int, pf_tcp_secret_init);
148 #define V_pf_tcp_secret_init VNET(pf_tcp_secret_init)
149 VNET_DEFINE(int, pf_tcp_iss_off);
150 #define V_pf_tcp_iss_off VNET(pf_tcp_iss_off)
151 VNET_DECLARE(int, pf_vnet_active);
152 #define V_pf_vnet_active VNET(pf_vnet_active)
154 VNET_DEFINE_STATIC(uint32_t, pf_purge_idx);
155 #define V_pf_purge_idx VNET(pf_purge_idx)
158 * Queue for pf_intr() sends.
160 static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
161 struct pf_send_entry {
162 STAILQ_ENTRY(pf_send_entry) pfse_next;
177 STAILQ_HEAD(pf_send_head, pf_send_entry);
178 VNET_DEFINE_STATIC(struct pf_send_head, pf_sendqueue);
179 #define V_pf_sendqueue VNET(pf_sendqueue)
181 static struct mtx pf_sendqueue_mtx;
182 MTX_SYSINIT(pf_sendqueue_mtx, &pf_sendqueue_mtx, "pf send queue", MTX_DEF);
183 #define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx)
184 #define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx)
187 * Queue for pf_overload_task() tasks.
189 struct pf_overload_entry {
190 SLIST_ENTRY(pf_overload_entry) next;
194 struct pf_krule *rule;
197 SLIST_HEAD(pf_overload_head, pf_overload_entry);
198 VNET_DEFINE_STATIC(struct pf_overload_head, pf_overloadqueue);
199 #define V_pf_overloadqueue VNET(pf_overloadqueue)
200 VNET_DEFINE_STATIC(struct task, pf_overloadtask);
201 #define V_pf_overloadtask VNET(pf_overloadtask)
203 static struct mtx pf_overloadqueue_mtx;
204 MTX_SYSINIT(pf_overloadqueue_mtx, &pf_overloadqueue_mtx,
205 "pf overload/flush queue", MTX_DEF);
206 #define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx)
207 #define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx)
209 VNET_DEFINE(struct pf_krulequeue, pf_unlinked_rules);
210 struct mtx pf_unlnkdrules_mtx;
211 MTX_SYSINIT(pf_unlnkdrules_mtx, &pf_unlnkdrules_mtx, "pf unlinked rules",
214 VNET_DEFINE_STATIC(uma_zone_t, pf_sources_z);
215 #define V_pf_sources_z VNET(pf_sources_z)
216 uma_zone_t pf_mtag_z;
217 VNET_DEFINE(uma_zone_t, pf_state_z);
218 VNET_DEFINE(uma_zone_t, pf_state_key_z);
220 VNET_DEFINE(uint64_t, pf_stateid[MAXCPU]);
221 #define PFID_CPUBITS 8
222 #define PFID_CPUSHIFT (sizeof(uint64_t) * NBBY - PFID_CPUBITS)
223 #define PFID_CPUMASK ((uint64_t)((1 << PFID_CPUBITS) - 1) << PFID_CPUSHIFT)
224 #define PFID_MAXID (~PFID_CPUMASK)
225 CTASSERT((1 << PFID_CPUBITS) >= MAXCPU);
227 static void pf_src_tree_remove_state(struct pf_state *);
228 static void pf_init_threshold(struct pf_threshold *, u_int32_t,
230 static void pf_add_threshold(struct pf_threshold *);
231 static int pf_check_threshold(struct pf_threshold *);
233 static void pf_change_ap(struct mbuf *, struct pf_addr *, u_int16_t *,
234 u_int16_t *, u_int16_t *, struct pf_addr *,
235 u_int16_t, u_int8_t, sa_family_t);
236 static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
237 struct tcphdr *, struct pf_state_peer *);
238 static void pf_change_icmp(struct pf_addr *, u_int16_t *,
239 struct pf_addr *, struct pf_addr *, u_int16_t,
240 u_int16_t *, u_int16_t *, u_int16_t *,
241 u_int16_t *, u_int8_t, sa_family_t);
242 static void pf_send_tcp(struct mbuf *,
243 const struct pf_krule *, sa_family_t,
244 const struct pf_addr *, const struct pf_addr *,
245 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
246 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
247 u_int16_t, struct ifnet *);
248 static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
249 sa_family_t, struct pf_krule *);
250 static void pf_detach_state(struct pf_state *);
251 static int pf_state_key_attach(struct pf_state_key *,
252 struct pf_state_key *, struct pf_state *);
253 static void pf_state_key_detach(struct pf_state *, int);
254 static int pf_state_key_ctor(void *, int, void *, int);
255 static u_int32_t pf_tcp_iss(struct pf_pdesc *);
256 static int pf_test_rule(struct pf_krule **, struct pf_state **,
257 int, struct pfi_kkif *, struct mbuf *, int,
258 struct pf_pdesc *, struct pf_krule **,
259 struct pf_kruleset **, struct inpcb *);
260 static int pf_create_state(struct pf_krule *, struct pf_krule *,
261 struct pf_krule *, struct pf_pdesc *,
262 struct pf_ksrc_node *, struct pf_state_key *,
263 struct pf_state_key *, struct mbuf *, int,
264 u_int16_t, u_int16_t, int *, struct pfi_kkif *,
265 struct pf_state **, int, u_int16_t, u_int16_t,
267 static int pf_test_fragment(struct pf_krule **, int,
268 struct pfi_kkif *, struct mbuf *, void *,
269 struct pf_pdesc *, struct pf_krule **,
270 struct pf_kruleset **);
271 static int pf_tcp_track_full(struct pf_state_peer *,
272 struct pf_state_peer *, struct pf_state **,
273 struct pfi_kkif *, struct mbuf *, int,
274 struct pf_pdesc *, u_short *, int *);
275 static int pf_tcp_track_sloppy(struct pf_state_peer *,
276 struct pf_state_peer *, struct pf_state **,
277 struct pf_pdesc *, u_short *);
278 static int pf_test_state_tcp(struct pf_state **, int,
279 struct pfi_kkif *, struct mbuf *, int,
280 void *, struct pf_pdesc *, u_short *);
281 static int pf_test_state_udp(struct pf_state **, int,
282 struct pfi_kkif *, struct mbuf *, int,
283 void *, struct pf_pdesc *);
284 static int pf_test_state_icmp(struct pf_state **, int,
285 struct pfi_kkif *, struct mbuf *, int,
286 void *, struct pf_pdesc *, u_short *);
287 static int pf_test_state_other(struct pf_state **, int,
288 struct pfi_kkif *, struct mbuf *, struct pf_pdesc *);
289 static u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
291 static u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
293 static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
295 static int pf_check_proto_cksum(struct mbuf *, int, int,
296 u_int8_t, sa_family_t);
297 static void pf_print_state_parts(struct pf_state *,
298 struct pf_state_key *, struct pf_state_key *);
299 static int pf_addr_wrap_neq(struct pf_addr_wrap *,
300 struct pf_addr_wrap *);
301 static void pf_patch_8(struct mbuf *, u_int16_t *, u_int8_t *, u_int8_t,
303 static struct pf_state *pf_find_state(struct pfi_kkif *,
304 struct pf_state_key_cmp *, u_int);
305 static int pf_src_connlimit(struct pf_state **);
306 static void pf_overload_task(void *v, int pending);
307 static int pf_insert_src_node(struct pf_ksrc_node **,
308 struct pf_krule *, struct pf_addr *, sa_family_t);
309 static u_int pf_purge_expired_states(u_int, int);
310 static void pf_purge_unlinked_rules(void);
311 static int pf_mtag_uminit(void *, int, int);
312 static void pf_mtag_free(struct m_tag *);
314 static void pf_route(struct mbuf **, struct pf_krule *, int,
315 struct ifnet *, struct pf_state *,
316 struct pf_pdesc *, struct inpcb *);
319 static void pf_change_a6(struct pf_addr *, u_int16_t *,
320 struct pf_addr *, u_int8_t);
321 static void pf_route6(struct mbuf **, struct pf_krule *, int,
322 struct ifnet *, struct pf_state *,
323 struct pf_pdesc *, struct inpcb *);
326 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
328 extern int pf_end_threads;
329 extern struct proc *pf_purge_proc;
331 VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
333 #define PACKET_LOOPED(pd) ((pd)->pf_mtag && \
334 (pd)->pf_mtag->flags & PF_PACKET_LOOPED)
336 #define STATE_LOOKUP(i, k, d, s, pd) \
338 (s) = pf_find_state((i), (k), (d)); \
339 SDT_PROBE5(pf, ip, state, lookup, i, k, d, pd, (s)); \
342 if (PACKET_LOOPED(pd)) \
344 if ((d) == PF_OUT && \
345 (s)->rule.ptr->rt == PF_ROUTETO && \
346 (s)->rule.ptr->direction == PF_OUT && \
347 (s)->rt_kif != NULL && \
348 (s)->rt_kif != (i)) \
352 #define BOUND_IFACE(r, k) \
353 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
355 #define STATE_INC_COUNTERS(s) \
357 counter_u64_add(s->rule.ptr->states_cur, 1); \
358 counter_u64_add(s->rule.ptr->states_tot, 1); \
359 if (s->anchor.ptr != NULL) { \
360 counter_u64_add(s->anchor.ptr->states_cur, 1); \
361 counter_u64_add(s->anchor.ptr->states_tot, 1); \
363 if (s->nat_rule.ptr != NULL) { \
364 counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
365 counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
369 #define STATE_DEC_COUNTERS(s) \
371 if (s->nat_rule.ptr != NULL) \
372 counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
373 if (s->anchor.ptr != NULL) \
374 counter_u64_add(s->anchor.ptr->states_cur, -1); \
375 counter_u64_add(s->rule.ptr->states_cur, -1); \
378 MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
379 VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
380 VNET_DEFINE(struct pf_idhash *, pf_idhash);
381 VNET_DEFINE(struct pf_srchash *, pf_srchash);
383 SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
387 u_long pf_srchashmask;
388 static u_long pf_hashsize;
389 static u_long pf_srchashsize;
390 u_long pf_ioctl_maxcount = 65535;
392 SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
393 &pf_hashsize, 0, "Size of pf(4) states hashtable");
394 SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
395 &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable");
396 SYSCTL_ULONG(_net_pf, OID_AUTO, request_maxcount, CTLFLAG_RWTUN,
397 &pf_ioctl_maxcount, 0, "Maximum number of tables, addresses, ... in a single ioctl() call");
399 VNET_DEFINE(void *, pf_swi_cookie);
400 VNET_DEFINE(struct intr_event *, pf_swi_ie);
402 VNET_DEFINE(uint32_t, pf_hashseed);
403 #define V_pf_hashseed VNET(pf_hashseed)
406 pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
412 if (a->addr32[0] > b->addr32[0])
414 if (a->addr32[0] < b->addr32[0])
420 if (a->addr32[3] > b->addr32[3])
422 if (a->addr32[3] < b->addr32[3])
424 if (a->addr32[2] > b->addr32[2])
426 if (a->addr32[2] < b->addr32[2])
428 if (a->addr32[1] > b->addr32[1])
430 if (a->addr32[1] < b->addr32[1])
432 if (a->addr32[0] > b->addr32[0])
434 if (a->addr32[0] < b->addr32[0])
439 panic("%s: unknown address family %u", __func__, af);
444 static __inline uint32_t
445 pf_hashkey(struct pf_state_key *sk)
449 h = murmur3_32_hash32((uint32_t *)sk,
450 sizeof(struct pf_state_key_cmp)/sizeof(uint32_t),
453 return (h & pf_hashmask);
456 static __inline uint32_t
457 pf_hashsrc(struct pf_addr *addr, sa_family_t af)
463 h = murmur3_32_hash32((uint32_t *)&addr->v4,
464 sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed);
467 h = murmur3_32_hash32((uint32_t *)&addr->v6,
468 sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed);
471 panic("%s: unknown address family %u", __func__, af);
474 return (h & pf_srchashmask);
479 pf_state_hash(struct pf_state *s)
481 u_int32_t hv = (intptr_t)s / sizeof(*s);
483 hv ^= crc32(&s->src, sizeof(s->src));
484 hv ^= crc32(&s->dst, sizeof(s->dst));
493 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
498 dst->addr32[0] = src->addr32[0];
502 dst->addr32[0] = src->addr32[0];
503 dst->addr32[1] = src->addr32[1];
504 dst->addr32[2] = src->addr32[2];
505 dst->addr32[3] = src->addr32[3];
512 pf_init_threshold(struct pf_threshold *threshold,
513 u_int32_t limit, u_int32_t seconds)
515 threshold->limit = limit * PF_THRESHOLD_MULT;
516 threshold->seconds = seconds;
517 threshold->count = 0;
518 threshold->last = time_uptime;
522 pf_add_threshold(struct pf_threshold *threshold)
524 u_int32_t t = time_uptime, diff = t - threshold->last;
526 if (diff >= threshold->seconds)
527 threshold->count = 0;
529 threshold->count -= threshold->count * diff /
531 threshold->count += PF_THRESHOLD_MULT;
536 pf_check_threshold(struct pf_threshold *threshold)
538 return (threshold->count > threshold->limit);
542 pf_src_connlimit(struct pf_state **state)
544 struct pf_overload_entry *pfoe;
547 PF_STATE_LOCK_ASSERT(*state);
549 (*state)->src_node->conn++;
550 (*state)->src.tcp_est = 1;
551 pf_add_threshold(&(*state)->src_node->conn_rate);
553 if ((*state)->rule.ptr->max_src_conn &&
554 (*state)->rule.ptr->max_src_conn <
555 (*state)->src_node->conn) {
556 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1);
560 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
561 pf_check_threshold(&(*state)->src_node->conn_rate)) {
562 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1);
569 /* Kill this state. */
570 (*state)->timeout = PFTM_PURGE;
571 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
573 if ((*state)->rule.ptr->overload_tbl == NULL)
576 /* Schedule overloading and flushing task. */
577 pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
579 return (1); /* too bad :( */
581 bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
582 pfoe->af = (*state)->key[PF_SK_WIRE]->af;
583 pfoe->rule = (*state)->rule.ptr;
584 pfoe->dir = (*state)->direction;
586 SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
587 PF_OVERLOADQ_UNLOCK();
588 taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
594 pf_overload_task(void *v, int pending)
596 struct pf_overload_head queue;
598 struct pf_overload_entry *pfoe, *pfoe1;
601 CURVNET_SET((struct vnet *)v);
604 queue = V_pf_overloadqueue;
605 SLIST_INIT(&V_pf_overloadqueue);
606 PF_OVERLOADQ_UNLOCK();
608 bzero(&p, sizeof(p));
609 SLIST_FOREACH(pfoe, &queue, next) {
610 counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1);
611 if (V_pf_status.debug >= PF_DEBUG_MISC) {
612 printf("%s: blocking address ", __func__);
613 pf_print_host(&pfoe->addr, 0, pfoe->af);
617 p.pfra_af = pfoe->af;
622 p.pfra_ip4addr = pfoe->addr.v4;
628 p.pfra_ip6addr = pfoe->addr.v6;
634 pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
639 * Remove those entries, that don't need flushing.
641 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
642 if (pfoe->rule->flush == 0) {
643 SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
644 free(pfoe, M_PFTEMP);
647 V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1);
649 /* If nothing to flush, return. */
650 if (SLIST_EMPTY(&queue)) {
655 for (int i = 0; i <= pf_hashmask; i++) {
656 struct pf_idhash *ih = &V_pf_idhash[i];
657 struct pf_state_key *sk;
661 LIST_FOREACH(s, &ih->states, entry) {
662 sk = s->key[PF_SK_WIRE];
663 SLIST_FOREACH(pfoe, &queue, next)
664 if (sk->af == pfoe->af &&
665 ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
666 pfoe->rule == s->rule.ptr) &&
667 ((pfoe->dir == PF_OUT &&
668 PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
669 (pfoe->dir == PF_IN &&
670 PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
671 s->timeout = PFTM_PURGE;
672 s->src.state = s->dst.state = TCPS_CLOSED;
676 PF_HASHROW_UNLOCK(ih);
678 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
679 free(pfoe, M_PFTEMP);
680 if (V_pf_status.debug >= PF_DEBUG_MISC)
681 printf("%s: %u states killed", __func__, killed);
687 * Can return locked on failure, so that we can consistently
688 * allocate and insert a new one.
690 struct pf_ksrc_node *
691 pf_find_src_node(struct pf_addr *src, struct pf_krule *rule, sa_family_t af,
694 struct pf_srchash *sh;
695 struct pf_ksrc_node *n;
697 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1);
699 sh = &V_pf_srchash[pf_hashsrc(src, af)];
701 LIST_FOREACH(n, &sh->nodes, entry)
702 if (n->rule.ptr == rule && n->af == af &&
703 ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
704 (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
708 PF_HASHROW_UNLOCK(sh);
709 } else if (returnlocked == 0)
710 PF_HASHROW_UNLOCK(sh);
716 pf_free_src_node(struct pf_ksrc_node *sn)
719 for (int i = 0; i < 2; i++) {
720 counter_u64_free(sn->bytes[i]);
721 counter_u64_free(sn->packets[i]);
723 uma_zfree(V_pf_sources_z, sn);
727 pf_insert_src_node(struct pf_ksrc_node **sn, struct pf_krule *rule,
728 struct pf_addr *src, sa_family_t af)
731 KASSERT((rule->rule_flag & PFRULE_SRCTRACK ||
732 rule->rpool.opts & PF_POOL_STICKYADDR),
733 ("%s for non-tracking rule %p", __func__, rule));
736 *sn = pf_find_src_node(src, rule, af, 1);
739 struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)];
741 PF_HASHROW_ASSERT(sh);
743 if (!rule->max_src_nodes ||
744 counter_u64_fetch(rule->src_nodes) < rule->max_src_nodes)
745 (*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
747 counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES],
750 PF_HASHROW_UNLOCK(sh);
754 for (int i = 0; i < 2; i++) {
755 (*sn)->bytes[i] = counter_u64_alloc(M_NOWAIT);
756 (*sn)->packets[i] = counter_u64_alloc(M_NOWAIT);
758 if ((*sn)->bytes[i] == NULL || (*sn)->packets[i] == NULL) {
759 pf_free_src_node(*sn);
760 PF_HASHROW_UNLOCK(sh);
765 pf_init_threshold(&(*sn)->conn_rate,
766 rule->max_src_conn_rate.limit,
767 rule->max_src_conn_rate.seconds);
770 (*sn)->rule.ptr = rule;
771 PF_ACPY(&(*sn)->addr, src, af);
772 LIST_INSERT_HEAD(&sh->nodes, *sn, entry);
773 (*sn)->creation = time_uptime;
774 (*sn)->ruletype = rule->action;
776 if ((*sn)->rule.ptr != NULL)
777 counter_u64_add((*sn)->rule.ptr->src_nodes, 1);
778 PF_HASHROW_UNLOCK(sh);
779 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1);
781 if (rule->max_src_states &&
782 (*sn)->states >= rule->max_src_states) {
783 counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES],
792 pf_unlink_src_node(struct pf_ksrc_node *src)
795 PF_HASHROW_ASSERT(&V_pf_srchash[pf_hashsrc(&src->addr, src->af)]);
796 LIST_REMOVE(src, entry);
798 counter_u64_add(src->rule.ptr->src_nodes, -1);
802 pf_free_src_nodes(struct pf_ksrc_node_list *head)
804 struct pf_ksrc_node *sn, *tmp;
807 LIST_FOREACH_SAFE(sn, head, entry, tmp) {
808 pf_free_src_node(sn);
812 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], count);
821 pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
822 sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL,
826 /* Per-vnet data storage structures initialization. */
830 struct pf_keyhash *kh;
831 struct pf_idhash *ih;
832 struct pf_srchash *sh;
835 if (pf_hashsize == 0 || !powerof2(pf_hashsize))
836 pf_hashsize = PF_HASHSIZ;
837 if (pf_srchashsize == 0 || !powerof2(pf_srchashsize))
838 pf_srchashsize = PF_SRCHASHSIZ;
840 V_pf_hashseed = arc4random();
842 /* States and state keys storage. */
843 V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_state),
844 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
845 V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
846 uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
847 uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
849 V_pf_state_key_z = uma_zcreate("pf state keys",
850 sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
853 V_pf_keyhash = mallocarray(pf_hashsize, sizeof(struct pf_keyhash),
854 M_PFHASH, M_NOWAIT | M_ZERO);
855 V_pf_idhash = mallocarray(pf_hashsize, sizeof(struct pf_idhash),
856 M_PFHASH, M_NOWAIT | M_ZERO);
857 if (V_pf_keyhash == NULL || V_pf_idhash == NULL) {
858 printf("pf: Unable to allocate memory for "
859 "state_hashsize %lu.\n", pf_hashsize);
861 free(V_pf_keyhash, M_PFHASH);
862 free(V_pf_idhash, M_PFHASH);
864 pf_hashsize = PF_HASHSIZ;
865 V_pf_keyhash = mallocarray(pf_hashsize,
866 sizeof(struct pf_keyhash), M_PFHASH, M_WAITOK | M_ZERO);
867 V_pf_idhash = mallocarray(pf_hashsize,
868 sizeof(struct pf_idhash), M_PFHASH, M_WAITOK | M_ZERO);
871 pf_hashmask = pf_hashsize - 1;
872 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
874 mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
875 mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
879 V_pf_sources_z = uma_zcreate("pf source nodes",
880 sizeof(struct pf_ksrc_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
882 V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
883 uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
884 uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
886 V_pf_srchash = mallocarray(pf_srchashsize,
887 sizeof(struct pf_srchash), M_PFHASH, M_NOWAIT | M_ZERO);
888 if (V_pf_srchash == NULL) {
889 printf("pf: Unable to allocate memory for "
890 "source_hashsize %lu.\n", pf_srchashsize);
892 pf_srchashsize = PF_SRCHASHSIZ;
893 V_pf_srchash = mallocarray(pf_srchashsize,
894 sizeof(struct pf_srchash), M_PFHASH, M_WAITOK | M_ZERO);
897 pf_srchashmask = pf_srchashsize - 1;
898 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++)
899 mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
902 TAILQ_INIT(&V_pf_altqs[0]);
903 TAILQ_INIT(&V_pf_altqs[1]);
904 TAILQ_INIT(&V_pf_altqs[2]);
905 TAILQ_INIT(&V_pf_altqs[3]);
906 TAILQ_INIT(&V_pf_pabuf);
907 V_pf_altqs_active = &V_pf_altqs[0];
908 V_pf_altq_ifs_active = &V_pf_altqs[1];
909 V_pf_altqs_inactive = &V_pf_altqs[2];
910 V_pf_altq_ifs_inactive = &V_pf_altqs[3];
912 /* Send & overload+flush queues. */
913 STAILQ_INIT(&V_pf_sendqueue);
914 SLIST_INIT(&V_pf_overloadqueue);
915 TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
917 /* Unlinked, but may be referenced rules. */
918 TAILQ_INIT(&V_pf_unlinked_rules);
925 uma_zdestroy(pf_mtag_z);
931 struct pf_keyhash *kh;
932 struct pf_idhash *ih;
933 struct pf_srchash *sh;
934 struct pf_send_entry *pfse, *next;
937 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
939 KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
941 KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
943 mtx_destroy(&kh->lock);
944 mtx_destroy(&ih->lock);
946 free(V_pf_keyhash, M_PFHASH);
947 free(V_pf_idhash, M_PFHASH);
949 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
950 KASSERT(LIST_EMPTY(&sh->nodes),
951 ("%s: source node hash not empty", __func__));
952 mtx_destroy(&sh->lock);
954 free(V_pf_srchash, M_PFHASH);
956 STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
957 m_freem(pfse->pfse_m);
958 free(pfse, M_PFTEMP);
961 uma_zdestroy(V_pf_sources_z);
962 uma_zdestroy(V_pf_state_z);
963 uma_zdestroy(V_pf_state_key_z);
967 pf_mtag_uminit(void *mem, int size, int how)
971 t = (struct m_tag *)mem;
972 t->m_tag_cookie = MTAG_ABI_COMPAT;
973 t->m_tag_id = PACKET_TAG_PF;
974 t->m_tag_len = sizeof(struct pf_mtag);
975 t->m_tag_free = pf_mtag_free;
981 pf_mtag_free(struct m_tag *t)
984 uma_zfree(pf_mtag_z, t);
988 pf_get_mtag(struct mbuf *m)
992 if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
993 return ((struct pf_mtag *)(mtag + 1));
995 mtag = uma_zalloc(pf_mtag_z, M_NOWAIT);
998 bzero(mtag + 1, sizeof(struct pf_mtag));
999 m_tag_prepend(m, mtag);
1001 return ((struct pf_mtag *)(mtag + 1));
1005 pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
1008 struct pf_keyhash *khs, *khw, *kh;
1009 struct pf_state_key *sk, *cur;
1010 struct pf_state *si, *olds = NULL;
1013 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1014 KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
1015 KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
1018 * We need to lock hash slots of both keys. To avoid deadlock
1019 * we always lock the slot with lower address first. Unlock order
1022 * We also need to lock ID hash slot before dropping key
1023 * locks. On success we return with ID hash slot locked.
1027 khs = khw = &V_pf_keyhash[pf_hashkey(skw)];
1028 PF_HASHROW_LOCK(khs);
1030 khs = &V_pf_keyhash[pf_hashkey(sks)];
1031 khw = &V_pf_keyhash[pf_hashkey(skw)];
1033 PF_HASHROW_LOCK(khs);
1034 } else if (khs < khw) {
1035 PF_HASHROW_LOCK(khs);
1036 PF_HASHROW_LOCK(khw);
1038 PF_HASHROW_LOCK(khw);
1039 PF_HASHROW_LOCK(khs);
1043 #define KEYS_UNLOCK() do { \
1045 PF_HASHROW_UNLOCK(khs); \
1046 PF_HASHROW_UNLOCK(khw); \
1048 PF_HASHROW_UNLOCK(khs); \
1052 * First run: start with wire key.
1059 LIST_FOREACH(cur, &kh->keys, entry)
1060 if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
1064 /* Key exists. Check for same kif, if none, add to key. */
1065 TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
1066 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
1068 PF_HASHROW_LOCK(ih);
1069 if (si->kif == s->kif &&
1070 si->direction == s->direction) {
1071 if (sk->proto == IPPROTO_TCP &&
1072 si->src.state >= TCPS_FIN_WAIT_2 &&
1073 si->dst.state >= TCPS_FIN_WAIT_2) {
1075 * New state matches an old >FIN_WAIT_2
1076 * state. We can't drop key hash locks,
1077 * thus we can't unlink it properly.
1079 * As a workaround we drop it into
1080 * TCPS_CLOSED state, schedule purge
1081 * ASAP and push it into the very end
1082 * of the slot TAILQ, so that it won't
1083 * conflict with our new state.
1085 si->src.state = si->dst.state =
1087 si->timeout = PFTM_PURGE;
1090 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1091 printf("pf: %s key attach "
1093 (idx == PF_SK_WIRE) ?
1096 pf_print_state_parts(s,
1097 (idx == PF_SK_WIRE) ?
1099 (idx == PF_SK_STACK) ?
1101 printf(", existing: ");
1102 pf_print_state_parts(si,
1103 (idx == PF_SK_WIRE) ?
1105 (idx == PF_SK_STACK) ?
1109 PF_HASHROW_UNLOCK(ih);
1111 uma_zfree(V_pf_state_key_z, sk);
1112 if (idx == PF_SK_STACK)
1114 return (EEXIST); /* collision! */
1117 PF_HASHROW_UNLOCK(ih);
1119 uma_zfree(V_pf_state_key_z, sk);
1122 LIST_INSERT_HEAD(&kh->keys, sk, entry);
1127 /* List is sorted, if-bound states before floating. */
1128 if (s->kif == V_pfi_all)
1129 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
1131 TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
1134 TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]);
1135 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds,
1141 * Attach done. See how should we (or should not?)
1142 * attach a second key.
1145 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1149 } else if (sks != NULL) {
1151 * Continue attaching with stack key.
1163 KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
1164 ("%s failure", __func__));
1171 pf_detach_state(struct pf_state *s)
1173 struct pf_state_key *sks = s->key[PF_SK_STACK];
1174 struct pf_keyhash *kh;
1177 kh = &V_pf_keyhash[pf_hashkey(sks)];
1178 PF_HASHROW_LOCK(kh);
1179 if (s->key[PF_SK_STACK] != NULL)
1180 pf_state_key_detach(s, PF_SK_STACK);
1182 * If both point to same key, then we are done.
1184 if (sks == s->key[PF_SK_WIRE]) {
1185 pf_state_key_detach(s, PF_SK_WIRE);
1186 PF_HASHROW_UNLOCK(kh);
1189 PF_HASHROW_UNLOCK(kh);
1192 if (s->key[PF_SK_WIRE] != NULL) {
1193 kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
1194 PF_HASHROW_LOCK(kh);
1195 if (s->key[PF_SK_WIRE] != NULL)
1196 pf_state_key_detach(s, PF_SK_WIRE);
1197 PF_HASHROW_UNLOCK(kh);
1202 pf_state_key_detach(struct pf_state *s, int idx)
1204 struct pf_state_key *sk = s->key[idx];
1206 struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1208 PF_HASHROW_ASSERT(kh);
1210 TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1213 if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1214 LIST_REMOVE(sk, entry);
1215 uma_zfree(V_pf_state_key_z, sk);
1220 pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1222 struct pf_state_key *sk = mem;
1224 bzero(sk, sizeof(struct pf_state_key_cmp));
1225 TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1226 TAILQ_INIT(&sk->states[PF_SK_STACK]);
1231 struct pf_state_key *
1232 pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
1233 struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
1235 struct pf_state_key *sk;
1237 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1241 PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
1242 PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
1243 sk->port[pd->sidx] = sport;
1244 sk->port[pd->didx] = dport;
1245 sk->proto = pd->proto;
1251 struct pf_state_key *
1252 pf_state_key_clone(struct pf_state_key *orig)
1254 struct pf_state_key *sk;
1256 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1260 bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1266 pf_state_insert(struct pfi_kkif *kif, struct pf_state_key *skw,
1267 struct pf_state_key *sks, struct pf_state *s)
1269 struct pf_idhash *ih;
1270 struct pf_state *cur;
1273 KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1274 ("%s: sks not pristine", __func__));
1275 KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1276 ("%s: skw not pristine", __func__));
1277 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1281 if (s->id == 0 && s->creatorid == 0) {
1282 /* XXX: should be atomic, but probability of collision low */
1283 if ((s->id = V_pf_stateid[curcpu]++) == PFID_MAXID)
1284 V_pf_stateid[curcpu] = 1;
1285 s->id |= (uint64_t )curcpu << PFID_CPUSHIFT;
1286 s->id = htobe64(s->id);
1287 s->creatorid = V_pf_status.hostid;
1290 /* Returns with ID locked on success. */
1291 if ((error = pf_state_key_attach(skw, sks, s)) != 0)
1294 ih = &V_pf_idhash[PF_IDHASH(s)];
1295 PF_HASHROW_ASSERT(ih);
1296 LIST_FOREACH(cur, &ih->states, entry)
1297 if (cur->id == s->id && cur->creatorid == s->creatorid)
1301 PF_HASHROW_UNLOCK(ih);
1302 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1303 printf("pf: state ID collision: "
1304 "id: %016llx creatorid: %08x\n",
1305 (unsigned long long)be64toh(s->id),
1306 ntohl(s->creatorid));
1311 LIST_INSERT_HEAD(&ih->states, s, entry);
1312 /* One for keys, one for ID hash. */
1313 refcount_init(&s->refs, 2);
1315 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_INSERT], 1);
1316 if (V_pfsync_insert_state_ptr != NULL)
1317 V_pfsync_insert_state_ptr(s);
1319 /* Returns locked. */
1324 * Find state by ID: returns with locked row on success.
1327 pf_find_state_byid(uint64_t id, uint32_t creatorid)
1329 struct pf_idhash *ih;
1332 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1334 ih = &V_pf_idhash[(be64toh(id) % (pf_hashmask + 1))];
1336 PF_HASHROW_LOCK(ih);
1337 LIST_FOREACH(s, &ih->states, entry)
1338 if (s->id == id && s->creatorid == creatorid)
1342 PF_HASHROW_UNLOCK(ih);
1348 * Find state by key.
1349 * Returns with ID hash slot locked on success.
1351 static struct pf_state *
1352 pf_find_state(struct pfi_kkif *kif, struct pf_state_key_cmp *key, u_int dir)
1354 struct pf_keyhash *kh;
1355 struct pf_state_key *sk;
1359 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1361 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1363 PF_HASHROW_LOCK(kh);
1364 LIST_FOREACH(sk, &kh->keys, entry)
1365 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1368 PF_HASHROW_UNLOCK(kh);
1372 idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1374 /* List is sorted, if-bound states before floating ones. */
1375 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1376 if (s->kif == V_pfi_all || s->kif == kif) {
1378 PF_HASHROW_UNLOCK(kh);
1379 if (s->timeout >= PFTM_MAX) {
1381 * State is either being processed by
1382 * pf_unlink_state() in an other thread, or
1383 * is scheduled for immediate expiry.
1390 PF_HASHROW_UNLOCK(kh);
1396 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1398 struct pf_keyhash *kh;
1399 struct pf_state_key *sk;
1400 struct pf_state *s, *ret = NULL;
1403 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1405 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1407 PF_HASHROW_LOCK(kh);
1408 LIST_FOREACH(sk, &kh->keys, entry)
1409 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1412 PF_HASHROW_UNLOCK(kh);
1427 panic("%s: dir %u", __func__, dir);
1430 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1432 PF_HASHROW_UNLOCK(kh);
1446 PF_HASHROW_UNLOCK(kh);
1451 /* END state table stuff */
1454 pf_send(struct pf_send_entry *pfse)
1458 STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
1460 swi_sched(V_pf_swi_cookie, 0);
1466 struct epoch_tracker et;
1467 struct pf_send_head queue;
1468 struct pf_send_entry *pfse, *next;
1470 CURVNET_SET((struct vnet *)v);
1473 queue = V_pf_sendqueue;
1474 STAILQ_INIT(&V_pf_sendqueue);
1477 NET_EPOCH_ENTER(et);
1479 STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
1480 switch (pfse->pfse_type) {
1483 ip_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL);
1486 icmp_error(pfse->pfse_m, pfse->icmpopts.type,
1487 pfse->icmpopts.code, 0, pfse->icmpopts.mtu);
1492 ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL,
1496 icmp6_error(pfse->pfse_m, pfse->icmpopts.type,
1497 pfse->icmpopts.code, pfse->icmpopts.mtu);
1501 panic("%s: unknown type", __func__);
1503 free(pfse, M_PFTEMP);
1510 pf_purge_thread(void *unused __unused)
1512 VNET_ITERATOR_DECL(vnet_iter);
1514 sx_xlock(&pf_end_lock);
1515 while (pf_end_threads == 0) {
1516 sx_sleep(pf_purge_thread, &pf_end_lock, 0, "pftm", hz / 10);
1519 VNET_FOREACH(vnet_iter) {
1520 CURVNET_SET(vnet_iter);
1522 /* Wait until V_pf_default_rule is initialized. */
1523 if (V_pf_vnet_active == 0) {
1529 * Process 1/interval fraction of the state
1533 pf_purge_expired_states(V_pf_purge_idx, pf_hashmask /
1534 (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
1537 * Purge other expired types every
1538 * PFTM_INTERVAL seconds.
1540 if (V_pf_purge_idx == 0) {
1542 * Order is important:
1543 * - states and src nodes reference rules
1544 * - states and rules reference kifs
1546 pf_purge_expired_fragments();
1547 pf_purge_expired_src_nodes();
1548 pf_purge_unlinked_rules();
1553 VNET_LIST_RUNLOCK();
1557 sx_xunlock(&pf_end_lock);
1562 pf_unload_vnet_purge(void)
1566 * To cleanse up all kifs and rules we need
1567 * two runs: first one clears reference flags,
1568 * then pf_purge_expired_states() doesn't
1569 * raise them, and then second run frees.
1571 pf_purge_unlinked_rules();
1575 * Now purge everything.
1577 pf_purge_expired_states(0, pf_hashmask);
1578 pf_purge_fragments(UINT_MAX);
1579 pf_purge_expired_src_nodes();
1582 * Now all kifs & rules should be unreferenced,
1583 * thus should be successfully freed.
1585 pf_purge_unlinked_rules();
1590 pf_state_expires(const struct pf_state *state)
1597 /* handle all PFTM_* > PFTM_MAX here */
1598 if (state->timeout == PFTM_PURGE)
1599 return (time_uptime);
1600 KASSERT(state->timeout != PFTM_UNLINKED,
1601 ("pf_state_expires: timeout == PFTM_UNLINKED"));
1602 KASSERT((state->timeout < PFTM_MAX),
1603 ("pf_state_expires: timeout > PFTM_MAX"));
1604 timeout = state->rule.ptr->timeout[state->timeout];
1606 timeout = V_pf_default_rule.timeout[state->timeout];
1607 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1608 if (start && state->rule.ptr != &V_pf_default_rule) {
1609 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1610 states = counter_u64_fetch(state->rule.ptr->states_cur);
1612 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1613 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1614 states = V_pf_status.states;
1616 if (end && states > start && start < end) {
1618 timeout = (u_int64_t)timeout * (end - states) /
1620 return (state->expire + timeout);
1623 return (time_uptime);
1625 return (state->expire + timeout);
1629 pf_purge_expired_src_nodes()
1631 struct pf_ksrc_node_list freelist;
1632 struct pf_srchash *sh;
1633 struct pf_ksrc_node *cur, *next;
1636 LIST_INIT(&freelist);
1637 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
1638 PF_HASHROW_LOCK(sh);
1639 LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
1640 if (cur->states == 0 && cur->expire <= time_uptime) {
1641 pf_unlink_src_node(cur);
1642 LIST_INSERT_HEAD(&freelist, cur, entry);
1643 } else if (cur->rule.ptr != NULL)
1644 cur->rule.ptr->rule_flag |= PFRULE_REFS;
1645 PF_HASHROW_UNLOCK(sh);
1648 pf_free_src_nodes(&freelist);
1650 V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z);
1654 pf_src_tree_remove_state(struct pf_state *s)
1656 struct pf_ksrc_node *sn;
1657 struct pf_srchash *sh;
1660 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE] ?
1661 s->rule.ptr->timeout[PFTM_SRC_NODE] :
1662 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1664 if (s->src_node != NULL) {
1666 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1667 PF_HASHROW_LOCK(sh);
1670 if (--sn->states == 0)
1671 sn->expire = time_uptime + timeout;
1672 PF_HASHROW_UNLOCK(sh);
1674 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1675 sn = s->nat_src_node;
1676 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1677 PF_HASHROW_LOCK(sh);
1678 if (--sn->states == 0)
1679 sn->expire = time_uptime + timeout;
1680 PF_HASHROW_UNLOCK(sh);
1682 s->src_node = s->nat_src_node = NULL;
1686 * Unlink and potentilly free a state. Function may be
1687 * called with ID hash row locked, but always returns
1688 * unlocked, since it needs to go through key hash locking.
1691 pf_unlink_state(struct pf_state *s, u_int flags)
1693 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
1695 if ((flags & PF_ENTER_LOCKED) == 0)
1696 PF_HASHROW_LOCK(ih);
1698 PF_HASHROW_ASSERT(ih);
1700 if (s->timeout == PFTM_UNLINKED) {
1702 * State is being processed
1703 * by pf_unlink_state() in
1706 PF_HASHROW_UNLOCK(ih);
1707 return (0); /* XXXGL: undefined actually */
1710 if (s->src.state == PF_TCPS_PROXY_DST) {
1711 /* XXX wire key the right one? */
1712 pf_send_tcp(NULL, s->rule.ptr, s->key[PF_SK_WIRE]->af,
1713 &s->key[PF_SK_WIRE]->addr[1],
1714 &s->key[PF_SK_WIRE]->addr[0],
1715 s->key[PF_SK_WIRE]->port[1],
1716 s->key[PF_SK_WIRE]->port[0],
1717 s->src.seqhi, s->src.seqlo + 1,
1718 TH_RST|TH_ACK, 0, 0, 0, 1, s->tag, NULL);
1721 LIST_REMOVE(s, entry);
1722 pf_src_tree_remove_state(s);
1724 if (V_pfsync_delete_state_ptr != NULL)
1725 V_pfsync_delete_state_ptr(s);
1727 STATE_DEC_COUNTERS(s);
1729 s->timeout = PFTM_UNLINKED;
1731 PF_HASHROW_UNLOCK(ih);
1734 /* pf_state_insert() initialises refs to 2, so we can never release the
1735 * last reference here, only in pf_release_state(). */
1736 (void)refcount_release(&s->refs);
1738 return (pf_release_state(s));
1742 pf_free_state(struct pf_state *cur)
1745 KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
1746 KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
1749 for (int i = 0; i < 2; i++) {
1750 counter_u64_free(cur->bytes[i]);
1751 counter_u64_free(cur->packets[i]);
1754 pf_normalize_tcp_cleanup(cur);
1755 uma_zfree(V_pf_state_z, cur);
1756 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
1760 * Called only from pf_purge_thread(), thus serialized.
1763 pf_purge_expired_states(u_int i, int maxcheck)
1765 struct pf_idhash *ih;
1768 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1771 * Go through hash and unlink states that expire now.
1773 while (maxcheck > 0) {
1774 ih = &V_pf_idhash[i];
1776 /* only take the lock if we expect to do work */
1777 if (!LIST_EMPTY(&ih->states)) {
1779 PF_HASHROW_LOCK(ih);
1780 LIST_FOREACH(s, &ih->states, entry) {
1781 if (pf_state_expires(s) <= time_uptime) {
1782 V_pf_status.states -=
1783 pf_unlink_state(s, PF_ENTER_LOCKED);
1786 s->rule.ptr->rule_flag |= PFRULE_REFS;
1787 if (s->nat_rule.ptr != NULL)
1788 s->nat_rule.ptr->rule_flag |= PFRULE_REFS;
1789 if (s->anchor.ptr != NULL)
1790 s->anchor.ptr->rule_flag |= PFRULE_REFS;
1791 s->kif->pfik_flags |= PFI_IFLAG_REFS;
1793 s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
1795 PF_HASHROW_UNLOCK(ih);
1798 /* Return when we hit end of hash. */
1799 if (++i > pf_hashmask) {
1800 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1807 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1813 pf_purge_unlinked_rules()
1815 struct pf_krulequeue tmpq;
1816 struct pf_krule *r, *r1;
1819 * If we have overloading task pending, then we'd
1820 * better skip purging this time. There is a tiny
1821 * probability that overloading task references
1822 * an already unlinked rule.
1824 PF_OVERLOADQ_LOCK();
1825 if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
1826 PF_OVERLOADQ_UNLOCK();
1829 PF_OVERLOADQ_UNLOCK();
1832 * Do naive mark-and-sweep garbage collecting of old rules.
1833 * Reference flag is raised by pf_purge_expired_states()
1834 * and pf_purge_expired_src_nodes().
1836 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
1837 * use a temporary queue.
1840 PF_UNLNKDRULES_LOCK();
1841 TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
1842 if (!(r->rule_flag & PFRULE_REFS)) {
1843 TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
1844 TAILQ_INSERT_TAIL(&tmpq, r, entries);
1846 r->rule_flag &= ~PFRULE_REFS;
1848 PF_UNLNKDRULES_UNLOCK();
1850 if (!TAILQ_EMPTY(&tmpq)) {
1852 TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
1853 TAILQ_REMOVE(&tmpq, r, entries);
1861 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1866 u_int32_t a = ntohl(addr->addr32[0]);
1867 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1879 u_int8_t i, curstart, curend, maxstart, maxend;
1880 curstart = curend = maxstart = maxend = 255;
1881 for (i = 0; i < 8; i++) {
1882 if (!addr->addr16[i]) {
1883 if (curstart == 255)
1887 if ((curend - curstart) >
1888 (maxend - maxstart)) {
1889 maxstart = curstart;
1892 curstart = curend = 255;
1895 if ((curend - curstart) >
1896 (maxend - maxstart)) {
1897 maxstart = curstart;
1900 for (i = 0; i < 8; i++) {
1901 if (i >= maxstart && i <= maxend) {
1907 b = ntohs(addr->addr16[i]);
1924 pf_print_state(struct pf_state *s)
1926 pf_print_state_parts(s, NULL, NULL);
1930 pf_print_state_parts(struct pf_state *s,
1931 struct pf_state_key *skwp, struct pf_state_key *sksp)
1933 struct pf_state_key *skw, *sks;
1934 u_int8_t proto, dir;
1936 /* Do our best to fill these, but they're skipped if NULL */
1937 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1938 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1939 proto = skw ? skw->proto : (sks ? sks->proto : 0);
1940 dir = s ? s->direction : 0;
1958 case IPPROTO_ICMPV6:
1962 printf("%u", proto);
1975 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1977 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1982 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1984 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1989 if (proto == IPPROTO_TCP) {
1990 printf(" [lo=%u high=%u win=%u modulator=%u",
1991 s->src.seqlo, s->src.seqhi,
1992 s->src.max_win, s->src.seqdiff);
1993 if (s->src.wscale && s->dst.wscale)
1994 printf(" wscale=%u",
1995 s->src.wscale & PF_WSCALE_MASK);
1997 printf(" [lo=%u high=%u win=%u modulator=%u",
1998 s->dst.seqlo, s->dst.seqhi,
1999 s->dst.max_win, s->dst.seqdiff);
2000 if (s->src.wscale && s->dst.wscale)
2001 printf(" wscale=%u",
2002 s->dst.wscale & PF_WSCALE_MASK);
2005 printf(" %u:%u", s->src.state, s->dst.state);
2010 pf_print_flags(u_int8_t f)
2032 #define PF_SET_SKIP_STEPS(i) \
2034 while (head[i] != cur) { \
2035 head[i]->skip[i].ptr = cur; \
2036 head[i] = TAILQ_NEXT(head[i], entries); \
2041 pf_calc_skip_steps(struct pf_krulequeue *rules)
2043 struct pf_krule *cur, *prev, *head[PF_SKIP_COUNT];
2046 cur = TAILQ_FIRST(rules);
2048 for (i = 0; i < PF_SKIP_COUNT; ++i)
2050 while (cur != NULL) {
2051 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
2052 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
2053 if (cur->direction != prev->direction)
2054 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
2055 if (cur->af != prev->af)
2056 PF_SET_SKIP_STEPS(PF_SKIP_AF);
2057 if (cur->proto != prev->proto)
2058 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
2059 if (cur->src.neg != prev->src.neg ||
2060 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
2061 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
2062 if (cur->src.port[0] != prev->src.port[0] ||
2063 cur->src.port[1] != prev->src.port[1] ||
2064 cur->src.port_op != prev->src.port_op)
2065 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
2066 if (cur->dst.neg != prev->dst.neg ||
2067 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
2068 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
2069 if (cur->dst.port[0] != prev->dst.port[0] ||
2070 cur->dst.port[1] != prev->dst.port[1] ||
2071 cur->dst.port_op != prev->dst.port_op)
2072 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2075 cur = TAILQ_NEXT(cur, entries);
2077 for (i = 0; i < PF_SKIP_COUNT; ++i)
2078 PF_SET_SKIP_STEPS(i);
2082 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
2084 if (aw1->type != aw2->type)
2086 switch (aw1->type) {
2087 case PF_ADDR_ADDRMASK:
2089 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6))
2091 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6))
2094 case PF_ADDR_DYNIFTL:
2095 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
2096 case PF_ADDR_NOROUTE:
2097 case PF_ADDR_URPFFAILED:
2100 return (aw1->p.tbl != aw2->p.tbl);
2102 printf("invalid address type: %d\n", aw1->type);
2108 * Checksum updates are a little complicated because the checksum in the TCP/UDP
2109 * header isn't always a full checksum. In some cases (i.e. output) it's a
2110 * pseudo-header checksum, which is a partial checksum over src/dst IP
2111 * addresses, protocol number and length.
2113 * That means we have the following cases:
2114 * * Input or forwarding: we don't have TSO, the checksum fields are full
2115 * checksums, we need to update the checksum whenever we change anything.
2116 * * Output (i.e. the checksum is a pseudo-header checksum):
2117 * x The field being updated is src/dst address or affects the length of
2118 * the packet. We need to update the pseudo-header checksum (note that this
2119 * checksum is not ones' complement).
2120 * x Some other field is being modified (e.g. src/dst port numbers): We
2121 * don't have to update anything.
2124 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2128 x = cksum + old - new;
2129 x = (x + (x >> 16)) & 0xffff;
2131 /* optimise: eliminate a branch when not udp */
2132 if (udp && cksum == 0x0000)
2134 if (udp && x == 0x0000)
2137 return (u_int16_t)(x);
2141 pf_patch_8(struct mbuf *m, u_int16_t *cksum, u_int8_t *f, u_int8_t v, bool hi,
2144 u_int16_t old = htons(hi ? (*f << 8) : *f);
2145 u_int16_t new = htons(hi ? ( v << 8) : v);
2152 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2155 *cksum = pf_cksum_fixup(*cksum, old, new, udp);
2159 pf_patch_16_unaligned(struct mbuf *m, u_int16_t *cksum, void *f, u_int16_t v,
2160 bool hi, u_int8_t udp)
2162 u_int8_t *fb = (u_int8_t *)f;
2163 u_int8_t *vb = (u_int8_t *)&v;
2165 pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2166 pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2170 pf_patch_32_unaligned(struct mbuf *m, u_int16_t *cksum, void *f, u_int32_t v,
2171 bool hi, u_int8_t udp)
2173 u_int8_t *fb = (u_int8_t *)f;
2174 u_int8_t *vb = (u_int8_t *)&v;
2176 pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2177 pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2178 pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2179 pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2183 pf_proto_cksum_fixup(struct mbuf *m, u_int16_t cksum, u_int16_t old,
2184 u_int16_t new, u_int8_t udp)
2186 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2189 return (pf_cksum_fixup(cksum, old, new, udp));
2193 pf_change_ap(struct mbuf *m, struct pf_addr *a, u_int16_t *p, u_int16_t *ic,
2194 u_int16_t *pc, struct pf_addr *an, u_int16_t pn, u_int8_t u,
2200 PF_ACPY(&ao, a, af);
2203 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2211 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2212 ao.addr16[0], an->addr16[0], 0),
2213 ao.addr16[1], an->addr16[1], 0);
2216 *pc = pf_cksum_fixup(pf_cksum_fixup(*pc,
2217 ao.addr16[0], an->addr16[0], u),
2218 ao.addr16[1], an->addr16[1], u);
2220 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2225 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2226 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2227 pf_cksum_fixup(pf_cksum_fixup(*pc,
2228 ao.addr16[0], an->addr16[0], u),
2229 ao.addr16[1], an->addr16[1], u),
2230 ao.addr16[2], an->addr16[2], u),
2231 ao.addr16[3], an->addr16[3], u),
2232 ao.addr16[4], an->addr16[4], u),
2233 ao.addr16[5], an->addr16[5], u),
2234 ao.addr16[6], an->addr16[6], u),
2235 ao.addr16[7], an->addr16[7], u);
2237 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2242 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA |
2243 CSUM_DELAY_DATA_IPV6)) {
2250 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2252 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2256 memcpy(&ao, a, sizeof(ao));
2257 memcpy(a, &an, sizeof(u_int32_t));
2258 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2259 ao % 65536, an % 65536, u);
2263 pf_change_proto_a(struct mbuf *m, void *a, u_int16_t *c, u_int32_t an, u_int8_t udp)
2267 memcpy(&ao, a, sizeof(ao));
2268 memcpy(a, &an, sizeof(u_int32_t));
2270 *c = pf_proto_cksum_fixup(m,
2271 pf_proto_cksum_fixup(m, *c, ao / 65536, an / 65536, udp),
2272 ao % 65536, an % 65536, udp);
2277 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2281 PF_ACPY(&ao, a, AF_INET6);
2282 PF_ACPY(a, an, AF_INET6);
2284 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2285 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2286 pf_cksum_fixup(pf_cksum_fixup(*c,
2287 ao.addr16[0], an->addr16[0], u),
2288 ao.addr16[1], an->addr16[1], u),
2289 ao.addr16[2], an->addr16[2], u),
2290 ao.addr16[3], an->addr16[3], u),
2291 ao.addr16[4], an->addr16[4], u),
2292 ao.addr16[5], an->addr16[5], u),
2293 ao.addr16[6], an->addr16[6], u),
2294 ao.addr16[7], an->addr16[7], u);
2299 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2300 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2301 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2303 struct pf_addr oia, ooa;
2305 PF_ACPY(&oia, ia, af);
2307 PF_ACPY(&ooa, oa, af);
2309 /* Change inner protocol port, fix inner protocol checksum. */
2311 u_int16_t oip = *ip;
2318 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2319 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2321 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2323 /* Change inner ip address, fix inner ip and icmp checksums. */
2324 PF_ACPY(ia, na, af);
2328 u_int32_t oh2c = *h2c;
2330 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2331 oia.addr16[0], ia->addr16[0], 0),
2332 oia.addr16[1], ia->addr16[1], 0);
2333 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2334 oia.addr16[0], ia->addr16[0], 0),
2335 oia.addr16[1], ia->addr16[1], 0);
2336 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2342 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2343 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2344 pf_cksum_fixup(pf_cksum_fixup(*ic,
2345 oia.addr16[0], ia->addr16[0], u),
2346 oia.addr16[1], ia->addr16[1], u),
2347 oia.addr16[2], ia->addr16[2], u),
2348 oia.addr16[3], ia->addr16[3], u),
2349 oia.addr16[4], ia->addr16[4], u),
2350 oia.addr16[5], ia->addr16[5], u),
2351 oia.addr16[6], ia->addr16[6], u),
2352 oia.addr16[7], ia->addr16[7], u);
2356 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2358 PF_ACPY(oa, na, af);
2362 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2363 ooa.addr16[0], oa->addr16[0], 0),
2364 ooa.addr16[1], oa->addr16[1], 0);
2369 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2370 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2371 pf_cksum_fixup(pf_cksum_fixup(*ic,
2372 ooa.addr16[0], oa->addr16[0], u),
2373 ooa.addr16[1], oa->addr16[1], u),
2374 ooa.addr16[2], oa->addr16[2], u),
2375 ooa.addr16[3], oa->addr16[3], u),
2376 ooa.addr16[4], oa->addr16[4], u),
2377 ooa.addr16[5], oa->addr16[5], u),
2378 ooa.addr16[6], oa->addr16[6], u),
2379 ooa.addr16[7], oa->addr16[7], u);
2387 * Need to modulate the sequence numbers in the TCP SACK option
2388 * (credits to Krzysztof Pfaff for report and patch)
2391 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2392 struct tcphdr *th, struct pf_state_peer *dst)
2394 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2395 u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2396 int copyback = 0, i, olen;
2397 struct sackblk sack;
2399 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2400 if (hlen < TCPOLEN_SACKLEN ||
2401 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2404 while (hlen >= TCPOLEN_SACKLEN) {
2405 size_t startoff = opt - opts;
2408 case TCPOPT_EOL: /* FALLTHROUGH */
2416 if (olen >= TCPOLEN_SACKLEN) {
2417 for (i = 2; i + TCPOLEN_SACK <= olen;
2418 i += TCPOLEN_SACK) {
2419 memcpy(&sack, &opt[i], sizeof(sack));
2420 pf_patch_32_unaligned(m,
2421 &th->th_sum, &sack.start,
2422 htonl(ntohl(sack.start) - dst->seqdiff),
2423 PF_ALGNMNT(startoff),
2425 pf_patch_32_unaligned(m, &th->th_sum,
2427 htonl(ntohl(sack.end) - dst->seqdiff),
2428 PF_ALGNMNT(startoff),
2430 memcpy(&opt[i], &sack, sizeof(sack));
2444 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2449 pf_send_tcp(struct mbuf *replyto, const struct pf_krule *r, sa_family_t af,
2450 const struct pf_addr *saddr, const struct pf_addr *daddr,
2451 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2452 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2453 u_int16_t rtag, struct ifnet *ifp)
2455 struct pf_send_entry *pfse;
2459 struct ip *h = NULL;
2462 struct ip6_hdr *h6 = NULL;
2466 struct pf_mtag *pf_mtag;
2471 /* maximum segment size tcp option */
2472 tlen = sizeof(struct tcphdr);
2479 len = sizeof(struct ip) + tlen;
2484 len = sizeof(struct ip6_hdr) + tlen;
2488 panic("%s: unsupported af %d", __func__, af);
2491 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2492 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2495 m = m_gethdr(M_NOWAIT, MT_DATA);
2497 free(pfse, M_PFTEMP);
2501 mac_netinet_firewall_send(m);
2503 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2504 free(pfse, M_PFTEMP);
2509 m->m_flags |= M_SKIP_FIREWALL;
2510 pf_mtag->tag = rtag;
2512 if (r != NULL && r->rtableid >= 0)
2513 M_SETFIB(m, r->rtableid);
2516 if (r != NULL && r->qid) {
2517 pf_mtag->qid = r->qid;
2519 /* add hints for ecn */
2520 pf_mtag->hdr = mtod(m, struct ip *);
2523 m->m_data += max_linkhdr;
2524 m->m_pkthdr.len = m->m_len = len;
2525 m->m_pkthdr.rcvif = NULL;
2526 bzero(m->m_data, len);
2530 h = mtod(m, struct ip *);
2532 /* IP header fields included in the TCP checksum */
2533 h->ip_p = IPPROTO_TCP;
2534 h->ip_len = htons(tlen);
2535 h->ip_src.s_addr = saddr->v4.s_addr;
2536 h->ip_dst.s_addr = daddr->v4.s_addr;
2538 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2543 h6 = mtod(m, struct ip6_hdr *);
2545 /* IP header fields included in the TCP checksum */
2546 h6->ip6_nxt = IPPROTO_TCP;
2547 h6->ip6_plen = htons(tlen);
2548 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2549 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2551 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2557 th->th_sport = sport;
2558 th->th_dport = dport;
2559 th->th_seq = htonl(seq);
2560 th->th_ack = htonl(ack);
2561 th->th_off = tlen >> 2;
2562 th->th_flags = flags;
2563 th->th_win = htons(win);
2566 opt = (char *)(th + 1);
2567 opt[0] = TCPOPT_MAXSEG;
2570 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2577 th->th_sum = in_cksum(m, len);
2579 /* Finish the IP header */
2581 h->ip_hl = sizeof(*h) >> 2;
2582 h->ip_tos = IPTOS_LOWDELAY;
2583 h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
2584 h->ip_len = htons(len);
2585 h->ip_ttl = ttl ? ttl : V_ip_defttl;
2588 pfse->pfse_type = PFSE_IP;
2594 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2595 sizeof(struct ip6_hdr), tlen);
2597 h6->ip6_vfc |= IPV6_VERSION;
2598 h6->ip6_hlim = IPV6_DEFHLIM;
2600 pfse->pfse_type = PFSE_IP6;
2609 pf_return(struct pf_krule *r, struct pf_krule *nr, struct pf_pdesc *pd,
2610 struct pf_state_key *sk, int off, struct mbuf *m, struct tcphdr *th,
2611 struct pfi_kkif *kif, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen,
2614 struct pf_addr * const saddr = pd->src;
2615 struct pf_addr * const daddr = pd->dst;
2616 sa_family_t af = pd->af;
2618 /* undo NAT changes, if they have taken place */
2620 PF_ACPY(saddr, &sk->addr[pd->sidx], af);
2621 PF_ACPY(daddr, &sk->addr[pd->didx], af);
2623 *pd->sport = sk->port[pd->sidx];
2625 *pd->dport = sk->port[pd->didx];
2627 *pd->proto_sum = bproto_sum;
2629 *pd->ip_sum = bip_sum;
2630 m_copyback(m, off, hdrlen, pd->hdr.any);
2632 if (pd->proto == IPPROTO_TCP &&
2633 ((r->rule_flag & PFRULE_RETURNRST) ||
2634 (r->rule_flag & PFRULE_RETURN)) &&
2635 !(th->th_flags & TH_RST)) {
2636 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
2648 h4 = mtod(m, struct ip *);
2649 len = ntohs(h4->ip_len) - off;
2654 h6 = mtod(m, struct ip6_hdr *);
2655 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
2660 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
2661 REASON_SET(reason, PFRES_PROTCKSUM);
2663 if (th->th_flags & TH_SYN)
2665 if (th->th_flags & TH_FIN)
2667 pf_send_tcp(m, r, af, pd->dst,
2668 pd->src, th->th_dport, th->th_sport,
2669 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
2670 r->return_ttl, 1, 0, kif->pfik_ifp);
2672 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
2674 pf_send_icmp(m, r->return_icmp >> 8,
2675 r->return_icmp & 255, af, r);
2676 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
2678 pf_send_icmp(m, r->return_icmp6 >> 8,
2679 r->return_icmp6 & 255, af, r);
2683 pf_ieee8021q_setpcp(struct mbuf *m, u_int8_t prio)
2687 KASSERT(prio <= PF_PRIO_MAX,
2688 ("%s with invalid pcp", __func__));
2690 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_OUT, NULL);
2692 mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_OUT,
2693 sizeof(uint8_t), M_NOWAIT);
2696 m_tag_prepend(m, mtag);
2699 *(uint8_t *)(mtag + 1) = prio;
2704 pf_match_ieee8021q_pcp(u_int8_t prio, struct mbuf *m)
2709 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL);
2713 if (prio == PF_PRIO_ZERO)
2716 mpcp = *(uint8_t *)(mtag + 1);
2718 return (mpcp == prio);
2722 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2725 struct pf_send_entry *pfse;
2727 struct pf_mtag *pf_mtag;
2729 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2730 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2734 if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
2735 free(pfse, M_PFTEMP);
2739 if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
2740 free(pfse, M_PFTEMP);
2744 m0->m_flags |= M_SKIP_FIREWALL;
2746 if (r->rtableid >= 0)
2747 M_SETFIB(m0, r->rtableid);
2751 pf_mtag->qid = r->qid;
2752 /* add hints for ecn */
2753 pf_mtag->hdr = mtod(m0, struct ip *);
2760 pfse->pfse_type = PFSE_ICMP;
2765 pfse->pfse_type = PFSE_ICMP6;
2770 pfse->icmpopts.type = type;
2771 pfse->icmpopts.code = code;
2776 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2777 * If n is 0, they match if they are equal. If n is != 0, they match if they
2781 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2782 struct pf_addr *b, sa_family_t af)
2789 if ((a->addr32[0] & m->addr32[0]) ==
2790 (b->addr32[0] & m->addr32[0]))
2796 if (((a->addr32[0] & m->addr32[0]) ==
2797 (b->addr32[0] & m->addr32[0])) &&
2798 ((a->addr32[1] & m->addr32[1]) ==
2799 (b->addr32[1] & m->addr32[1])) &&
2800 ((a->addr32[2] & m->addr32[2]) ==
2801 (b->addr32[2] & m->addr32[2])) &&
2802 ((a->addr32[3] & m->addr32[3]) ==
2803 (b->addr32[3] & m->addr32[3])))
2822 * Return 1 if b <= a <= e, otherwise return 0.
2825 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2826 struct pf_addr *a, sa_family_t af)
2831 if ((ntohl(a->addr32[0]) < ntohl(b->addr32[0])) ||
2832 (ntohl(a->addr32[0]) > ntohl(e->addr32[0])))
2841 for (i = 0; i < 4; ++i)
2842 if (ntohl(a->addr32[i]) > ntohl(b->addr32[i]))
2844 else if (ntohl(a->addr32[i]) < ntohl(b->addr32[i]))
2847 for (i = 0; i < 4; ++i)
2848 if (ntohl(a->addr32[i]) < ntohl(e->addr32[i]))
2850 else if (ntohl(a->addr32[i]) > ntohl(e->addr32[i]))
2860 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2864 return ((p > a1) && (p < a2));
2866 return ((p < a1) || (p > a2));
2868 return ((p >= a1) && (p <= a2));
2882 return (0); /* never reached */
2886 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2891 return (pf_match(op, a1, a2, p));
2895 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2897 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2899 return (pf_match(op, a1, a2, u));
2903 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2905 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2907 return (pf_match(op, a1, a2, g));
2911 pf_match_tag(struct mbuf *m, struct pf_krule *r, int *tag, int mtag)
2916 return ((!r->match_tag_not && r->match_tag == *tag) ||
2917 (r->match_tag_not && r->match_tag != *tag));
2921 pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag)
2924 KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
2926 if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL))
2929 pd->pf_mtag->tag = tag;
2934 #define PF_ANCHOR_STACKSIZE 32
2935 struct pf_kanchor_stackframe {
2936 struct pf_kruleset *rs;
2937 struct pf_krule *r; /* XXX: + match bit */
2938 struct pf_kanchor *child;
2942 * XXX: We rely on malloc(9) returning pointer aligned addresses.
2944 #define PF_ANCHORSTACK_MATCH 0x00000001
2945 #define PF_ANCHORSTACK_MASK (PF_ANCHORSTACK_MATCH)
2947 #define PF_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
2948 #define PF_ANCHOR_RULE(f) (struct pf_krule *) \
2949 ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
2950 #define PF_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \
2951 ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \
2955 pf_step_into_anchor(struct pf_kanchor_stackframe *stack, int *depth,
2956 struct pf_kruleset **rs, int n, struct pf_krule **r, struct pf_krule **a,
2959 struct pf_kanchor_stackframe *f;
2965 if (*depth >= PF_ANCHOR_STACKSIZE) {
2966 printf("%s: anchor stack overflow on %s\n",
2967 __func__, (*r)->anchor->name);
2968 *r = TAILQ_NEXT(*r, entries);
2970 } else if (*depth == 0 && a != NULL)
2972 f = stack + (*depth)++;
2975 if ((*r)->anchor_wildcard) {
2976 struct pf_kanchor_node *parent = &(*r)->anchor->children;
2978 if ((f->child = RB_MIN(pf_kanchor_node, parent)) == NULL) {
2982 *rs = &f->child->ruleset;
2985 *rs = &(*r)->anchor->ruleset;
2987 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2991 pf_step_out_of_anchor(struct pf_kanchor_stackframe *stack, int *depth,
2992 struct pf_kruleset **rs, int n, struct pf_krule **r, struct pf_krule **a,
2995 struct pf_kanchor_stackframe *f;
2996 struct pf_krule *fr;
3004 f = stack + *depth - 1;
3005 fr = PF_ANCHOR_RULE(f);
3006 if (f->child != NULL) {
3007 struct pf_kanchor_node *parent;
3010 * This block traverses through
3011 * a wildcard anchor.
3013 parent = &fr->anchor->children;
3014 if (match != NULL && *match) {
3016 * If any of "*" matched, then
3017 * "foo/ *" matched, mark frame
3020 PF_ANCHOR_SET_MATCH(f);
3023 f->child = RB_NEXT(pf_kanchor_node, parent, f->child);
3024 if (f->child != NULL) {
3025 *rs = &f->child->ruleset;
3026 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
3034 if (*depth == 0 && a != NULL)
3037 if (PF_ANCHOR_MATCH(f) || (match != NULL && *match))
3039 *r = TAILQ_NEXT(fr, entries);
3040 } while (*r == NULL);
3047 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
3048 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
3053 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3054 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
3058 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3059 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
3060 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
3061 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
3062 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
3063 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
3064 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
3065 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
3071 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
3076 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
3080 if (addr->addr32[3] == 0xffffffff) {
3081 addr->addr32[3] = 0;
3082 if (addr->addr32[2] == 0xffffffff) {
3083 addr->addr32[2] = 0;
3084 if (addr->addr32[1] == 0xffffffff) {
3085 addr->addr32[1] = 0;
3087 htonl(ntohl(addr->addr32[0]) + 1);
3090 htonl(ntohl(addr->addr32[1]) + 1);
3093 htonl(ntohl(addr->addr32[2]) + 1);
3096 htonl(ntohl(addr->addr32[3]) + 1);
3103 pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
3105 struct pf_addr *saddr, *daddr;
3106 u_int16_t sport, dport;
3107 struct inpcbinfo *pi;
3110 pd->lookup.uid = UID_MAX;
3111 pd->lookup.gid = GID_MAX;
3113 switch (pd->proto) {
3115 if (pd->hdr.tcp == NULL)
3117 sport = pd->hdr.tcp->th_sport;
3118 dport = pd->hdr.tcp->th_dport;
3122 if (pd->hdr.udp == NULL)
3124 sport = pd->hdr.udp->uh_sport;
3125 dport = pd->hdr.udp->uh_dport;
3131 if (direction == PF_IN) {
3146 inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
3147 dport, INPLOOKUP_RLOCKPCB, NULL, m);
3149 inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
3150 daddr->v4, dport, INPLOOKUP_WILDCARD |
3151 INPLOOKUP_RLOCKPCB, NULL, m);
3159 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
3160 dport, INPLOOKUP_RLOCKPCB, NULL, m);
3162 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
3163 &daddr->v6, dport, INPLOOKUP_WILDCARD |
3164 INPLOOKUP_RLOCKPCB, NULL, m);
3174 INP_RLOCK_ASSERT(inp);
3175 pd->lookup.uid = inp->inp_cred->cr_uid;
3176 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3183 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3187 u_int8_t *opt, optlen;
3188 u_int8_t wscale = 0;
3190 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3191 if (hlen <= sizeof(struct tcphdr))
3193 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3195 opt = hdr + sizeof(struct tcphdr);
3196 hlen -= sizeof(struct tcphdr);
3206 if (wscale > TCP_MAX_WINSHIFT)
3207 wscale = TCP_MAX_WINSHIFT;
3208 wscale |= PF_WSCALE_FLAG;
3223 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3227 u_int8_t *opt, optlen;
3228 u_int16_t mss = V_tcp_mssdflt;
3230 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3231 if (hlen <= sizeof(struct tcphdr))
3233 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3235 opt = hdr + sizeof(struct tcphdr);
3236 hlen -= sizeof(struct tcphdr);
3237 while (hlen >= TCPOLEN_MAXSEG) {
3245 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
3261 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
3263 struct nhop_object *nh;
3265 struct in6_addr dst6;
3276 hlen = sizeof(struct ip);
3277 nh = fib4_lookup(rtableid, addr->v4, 0, 0, 0);
3279 mss = nh->nh_mtu - hlen - sizeof(struct tcphdr);
3284 hlen = sizeof(struct ip6_hdr);
3285 in6_splitscope(&addr->v6, &dst6, &scopeid);
3286 nh = fib6_lookup(rtableid, &dst6, scopeid, 0, 0);
3288 mss = nh->nh_mtu - hlen - sizeof(struct tcphdr);
3293 mss = max(V_tcp_mssdflt, mss);
3294 mss = min(mss, offer);
3295 mss = max(mss, 64); /* sanity - at least max opt space */
3300 pf_tcp_iss(struct pf_pdesc *pd)
3303 u_int32_t digest[4];
3305 if (V_pf_tcp_secret_init == 0) {
3306 arc4random_buf(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
3307 MD5Init(&V_pf_tcp_secret_ctx);
3308 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
3309 sizeof(V_pf_tcp_secret));
3310 V_pf_tcp_secret_init = 1;
3313 ctx = V_pf_tcp_secret_ctx;
3315 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
3316 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
3317 if (pd->af == AF_INET6) {
3318 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3319 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3321 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3322 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3324 MD5Final((u_char *)digest, &ctx);
3325 V_pf_tcp_iss_off += 4096;
3326 #define ISN_RANDOM_INCREMENT (4096 - 1)
3327 return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3329 #undef ISN_RANDOM_INCREMENT
3333 pf_test_rule(struct pf_krule **rm, struct pf_state **sm, int direction,
3334 struct pfi_kkif *kif, struct mbuf *m, int off, struct pf_pdesc *pd,
3335 struct pf_krule **am, struct pf_kruleset **rsm, struct inpcb *inp)
3337 struct pf_krule *nr = NULL;
3338 struct pf_addr * const saddr = pd->src;
3339 struct pf_addr * const daddr = pd->dst;
3340 sa_family_t af = pd->af;
3341 struct pf_krule *r, *a = NULL;
3342 struct pf_kruleset *ruleset = NULL;
3343 struct pf_ksrc_node *nsn = NULL;
3344 struct tcphdr *th = pd->hdr.tcp;
3345 struct pf_state_key *sk = NULL, *nk = NULL;
3347 int rewrite = 0, hdrlen = 0;
3348 int tag = -1, rtableid = -1;
3352 u_int16_t sport = 0, dport = 0;
3353 u_int16_t bproto_sum = 0, bip_sum = 0;
3354 u_int8_t icmptype = 0, icmpcode = 0;
3355 struct pf_kanchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3360 INP_LOCK_ASSERT(inp);
3361 pd->lookup.uid = inp->inp_cred->cr_uid;
3362 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3363 pd->lookup.done = 1;
3366 switch (pd->proto) {
3368 sport = th->th_sport;
3369 dport = th->th_dport;
3370 hdrlen = sizeof(*th);
3373 sport = pd->hdr.udp->uh_sport;
3374 dport = pd->hdr.udp->uh_dport;
3375 hdrlen = sizeof(*pd->hdr.udp);
3379 if (pd->af != AF_INET)
3381 sport = dport = pd->hdr.icmp->icmp_id;
3382 hdrlen = sizeof(*pd->hdr.icmp);
3383 icmptype = pd->hdr.icmp->icmp_type;
3384 icmpcode = pd->hdr.icmp->icmp_code;
3386 if (icmptype == ICMP_UNREACH ||
3387 icmptype == ICMP_SOURCEQUENCH ||
3388 icmptype == ICMP_REDIRECT ||
3389 icmptype == ICMP_TIMXCEED ||
3390 icmptype == ICMP_PARAMPROB)
3395 case IPPROTO_ICMPV6:
3398 sport = dport = pd->hdr.icmp6->icmp6_id;
3399 hdrlen = sizeof(*pd->hdr.icmp6);
3400 icmptype = pd->hdr.icmp6->icmp6_type;
3401 icmpcode = pd->hdr.icmp6->icmp6_code;
3403 if (icmptype == ICMP6_DST_UNREACH ||
3404 icmptype == ICMP6_PACKET_TOO_BIG ||
3405 icmptype == ICMP6_TIME_EXCEEDED ||
3406 icmptype == ICMP6_PARAM_PROB)
3411 sport = dport = hdrlen = 0;
3415 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3417 /* check packet for BINAT/NAT/RDR */
3418 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk,
3419 &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) {
3420 KASSERT(sk != NULL, ("%s: null sk", __func__));
3421 KASSERT(nk != NULL, ("%s: null nk", __func__));
3424 bip_sum = *pd->ip_sum;
3426 switch (pd->proto) {
3428 bproto_sum = th->th_sum;
3429 pd->proto_sum = &th->th_sum;
3431 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3432 nk->port[pd->sidx] != sport) {
3433 pf_change_ap(m, saddr, &th->th_sport, pd->ip_sum,
3434 &th->th_sum, &nk->addr[pd->sidx],
3435 nk->port[pd->sidx], 0, af);
3436 pd->sport = &th->th_sport;
3437 sport = th->th_sport;
3440 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3441 nk->port[pd->didx] != dport) {
3442 pf_change_ap(m, daddr, &th->th_dport, pd->ip_sum,
3443 &th->th_sum, &nk->addr[pd->didx],
3444 nk->port[pd->didx], 0, af);
3445 dport = th->th_dport;
3446 pd->dport = &th->th_dport;
3451 bproto_sum = pd->hdr.udp->uh_sum;
3452 pd->proto_sum = &pd->hdr.udp->uh_sum;
3454 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3455 nk->port[pd->sidx] != sport) {
3456 pf_change_ap(m, saddr, &pd->hdr.udp->uh_sport,
3457 pd->ip_sum, &pd->hdr.udp->uh_sum,
3458 &nk->addr[pd->sidx],
3459 nk->port[pd->sidx], 1, af);
3460 sport = pd->hdr.udp->uh_sport;
3461 pd->sport = &pd->hdr.udp->uh_sport;
3464 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3465 nk->port[pd->didx] != dport) {
3466 pf_change_ap(m, daddr, &pd->hdr.udp->uh_dport,
3467 pd->ip_sum, &pd->hdr.udp->uh_sum,
3468 &nk->addr[pd->didx],
3469 nk->port[pd->didx], 1, af);
3470 dport = pd->hdr.udp->uh_dport;
3471 pd->dport = &pd->hdr.udp->uh_dport;
3477 nk->port[0] = nk->port[1];
3478 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3479 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3480 nk->addr[pd->sidx].v4.s_addr, 0);
3482 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3483 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3484 nk->addr[pd->didx].v4.s_addr, 0);
3486 if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3487 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3488 pd->hdr.icmp->icmp_cksum, sport,
3490 pd->hdr.icmp->icmp_id = nk->port[1];
3491 pd->sport = &pd->hdr.icmp->icmp_id;
3493 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3497 case IPPROTO_ICMPV6:
3498 nk->port[0] = nk->port[1];
3499 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3500 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3501 &nk->addr[pd->sidx], 0);
3503 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3504 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3505 &nk->addr[pd->didx], 0);
3514 &nk->addr[pd->sidx], AF_INET))
3515 pf_change_a(&saddr->v4.s_addr,
3517 nk->addr[pd->sidx].v4.s_addr, 0);
3520 &nk->addr[pd->didx], AF_INET))
3521 pf_change_a(&daddr->v4.s_addr,
3523 nk->addr[pd->didx].v4.s_addr, 0);
3529 &nk->addr[pd->sidx], AF_INET6))
3530 PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3533 &nk->addr[pd->didx], AF_INET6))
3534 PF_ACPY(daddr, &nk->addr[pd->didx], af);
3546 counter_u64_add(r->evaluations, 1);
3547 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
3548 r = r->skip[PF_SKIP_IFP].ptr;
3549 else if (r->direction && r->direction != direction)
3550 r = r->skip[PF_SKIP_DIR].ptr;
3551 else if (r->af && r->af != af)
3552 r = r->skip[PF_SKIP_AF].ptr;
3553 else if (r->proto && r->proto != pd->proto)
3554 r = r->skip[PF_SKIP_PROTO].ptr;
3555 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3556 r->src.neg, kif, M_GETFIB(m)))
3557 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3558 /* tcp/udp only. port_op always 0 in other cases */
3559 else if (r->src.port_op && !pf_match_port(r->src.port_op,
3560 r->src.port[0], r->src.port[1], sport))
3561 r = r->skip[PF_SKIP_SRC_PORT].ptr;
3562 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3563 r->dst.neg, NULL, M_GETFIB(m)))
3564 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3565 /* tcp/udp only. port_op always 0 in other cases */
3566 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3567 r->dst.port[0], r->dst.port[1], dport))
3568 r = r->skip[PF_SKIP_DST_PORT].ptr;
3569 /* icmp only. type always 0 in other cases */
3570 else if (r->type && r->type != icmptype + 1)
3571 r = TAILQ_NEXT(r, entries);
3572 /* icmp only. type always 0 in other cases */
3573 else if (r->code && r->code != icmpcode + 1)
3574 r = TAILQ_NEXT(r, entries);
3575 else if (r->tos && !(r->tos == pd->tos))
3576 r = TAILQ_NEXT(r, entries);
3577 else if (r->rule_flag & PFRULE_FRAGMENT)
3578 r = TAILQ_NEXT(r, entries);
3579 else if (pd->proto == IPPROTO_TCP &&
3580 (r->flagset & th->th_flags) != r->flags)
3581 r = TAILQ_NEXT(r, entries);
3582 /* tcp/udp only. uid.op always 0 in other cases */
3583 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3584 pf_socket_lookup(direction, pd, m), 1)) &&
3585 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3587 r = TAILQ_NEXT(r, entries);
3588 /* tcp/udp only. gid.op always 0 in other cases */
3589 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3590 pf_socket_lookup(direction, pd, m), 1)) &&
3591 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3593 r = TAILQ_NEXT(r, entries);
3595 !pf_match_ieee8021q_pcp(r->prio, m))
3596 r = TAILQ_NEXT(r, entries);
3598 r->prob <= arc4random())
3599 r = TAILQ_NEXT(r, entries);
3600 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3601 pd->pf_mtag ? pd->pf_mtag->tag : 0))
3602 r = TAILQ_NEXT(r, entries);
3603 else if (r->os_fingerprint != PF_OSFP_ANY &&
3604 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3605 pf_osfp_fingerprint(pd, m, off, th),
3606 r->os_fingerprint)))
3607 r = TAILQ_NEXT(r, entries);
3611 if (r->rtableid >= 0)
3612 rtableid = r->rtableid;
3613 if (r->anchor == NULL) {
3620 r = TAILQ_NEXT(r, entries);
3622 pf_step_into_anchor(anchor_stack, &asd,
3623 &ruleset, PF_RULESET_FILTER, &r, &a,
3626 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3627 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3634 REASON_SET(&reason, PFRES_MATCH);
3636 if (r->log || (nr != NULL && nr->log)) {
3638 m_copyback(m, off, hdrlen, pd->hdr.any);
3639 PFLOG_PACKET(kif, m, af, direction, reason, r->log ? r : nr, a,
3643 if ((r->action == PF_DROP) &&
3644 ((r->rule_flag & PFRULE_RETURNRST) ||
3645 (r->rule_flag & PFRULE_RETURNICMP) ||
3646 (r->rule_flag & PFRULE_RETURN))) {
3647 pf_return(r, nr, pd, sk, off, m, th, kif, bproto_sum,
3648 bip_sum, hdrlen, &reason);
3651 if (r->action == PF_DROP)
3654 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3655 REASON_SET(&reason, PFRES_MEMORY);
3659 M_SETFIB(m, rtableid);
3661 if (!state_icmp && (r->keep_state || nr != NULL ||
3662 (pd->flags & PFDESC_TCP_NORM))) {
3664 action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
3665 sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
3667 if (action != PF_PASS) {
3668 if (action == PF_DROP &&
3669 (r->rule_flag & PFRULE_RETURN))
3670 pf_return(r, nr, pd, sk, off, m, th, kif,
3671 bproto_sum, bip_sum, hdrlen, &reason);
3676 uma_zfree(V_pf_state_key_z, sk);
3678 uma_zfree(V_pf_state_key_z, nk);
3681 /* copy back packet headers if we performed NAT operations */
3683 m_copyback(m, off, hdrlen, pd->hdr.any);
3685 if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
3686 direction == PF_OUT &&
3687 V_pfsync_defer_ptr != NULL && V_pfsync_defer_ptr(*sm, m))
3689 * We want the state created, but we dont
3690 * want to send this in case a partner
3691 * firewall has to know about it to allow
3692 * replies through it.
3700 uma_zfree(V_pf_state_key_z, sk);
3702 uma_zfree(V_pf_state_key_z, nk);
3707 pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a,
3708 struct pf_pdesc *pd, struct pf_ksrc_node *nsn, struct pf_state_key *nk,
3709 struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
3710 u_int16_t dport, int *rewrite, struct pfi_kkif *kif, struct pf_state **sm,
3711 int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen)
3713 struct pf_state *s = NULL;
3714 struct pf_ksrc_node *sn = NULL;
3715 struct tcphdr *th = pd->hdr.tcp;
3716 u_int16_t mss = V_tcp_mssdflt;
3719 /* check maximums */
3720 if (r->max_states &&
3721 (counter_u64_fetch(r->states_cur) >= r->max_states)) {
3722 counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1);
3723 REASON_SET(&reason, PFRES_MAXSTATES);
3726 /* src node for filter rule */
3727 if ((r->rule_flag & PFRULE_SRCTRACK ||
3728 r->rpool.opts & PF_POOL_STICKYADDR) &&
3729 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3730 REASON_SET(&reason, PFRES_SRCLIMIT);
3733 /* src node for translation rule */
3734 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3735 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3736 REASON_SET(&reason, PFRES_SRCLIMIT);
3739 s = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO);
3741 REASON_SET(&reason, PFRES_MEMORY);
3744 for (int i = 0; i < 2; i++) {
3745 s->bytes[i] = counter_u64_alloc(M_NOWAIT);
3746 s->packets[i] = counter_u64_alloc(M_NOWAIT);
3748 if (s->bytes[i] == NULL || s->packets[i] == NULL) {
3750 REASON_SET(&reason, PFRES_MEMORY);
3755 s->nat_rule.ptr = nr;
3757 STATE_INC_COUNTERS(s);
3759 s->state_flags |= PFSTATE_ALLOWOPTS;
3760 if (r->rule_flag & PFRULE_STATESLOPPY)
3761 s->state_flags |= PFSTATE_SLOPPY;
3762 s->log = r->log & PF_LOG_ALL;
3763 s->sync_state = PFSYNC_S_NONE;
3765 s->log |= nr->log & PF_LOG_ALL;
3766 switch (pd->proto) {
3768 s->src.seqlo = ntohl(th->th_seq);
3769 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3770 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3771 r->keep_state == PF_STATE_MODULATE) {
3772 /* Generate sequence number modulator */
3773 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3776 pf_change_proto_a(m, &th->th_seq, &th->th_sum,
3777 htonl(s->src.seqlo + s->src.seqdiff), 0);
3781 if (th->th_flags & TH_SYN) {
3783 s->src.wscale = pf_get_wscale(m, off,
3784 th->th_off, pd->af);
3786 s->src.max_win = MAX(ntohs(th->th_win), 1);
3787 if (s->src.wscale & PF_WSCALE_MASK) {
3788 /* Remove scale factor from initial window */
3789 int win = s->src.max_win;
3790 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3791 s->src.max_win = (win - 1) >>
3792 (s->src.wscale & PF_WSCALE_MASK);
3794 if (th->th_flags & TH_FIN)
3798 s->src.state = TCPS_SYN_SENT;
3799 s->dst.state = TCPS_CLOSED;
3800 s->timeout = PFTM_TCP_FIRST_PACKET;
3803 s->src.state = PFUDPS_SINGLE;
3804 s->dst.state = PFUDPS_NO_TRAFFIC;
3805 s->timeout = PFTM_UDP_FIRST_PACKET;
3809 case IPPROTO_ICMPV6:
3811 s->timeout = PFTM_ICMP_FIRST_PACKET;
3814 s->src.state = PFOTHERS_SINGLE;
3815 s->dst.state = PFOTHERS_NO_TRAFFIC;
3816 s->timeout = PFTM_OTHER_FIRST_PACKET;
3820 if (pf_map_addr(pd->af, r, pd->src, &s->rt_addr, NULL, &sn)) {
3821 REASON_SET(&reason, PFRES_MAPFAILED);
3822 pf_src_tree_remove_state(s);
3823 STATE_DEC_COUNTERS(s);
3824 uma_zfree(V_pf_state_z, s);
3827 s->rt_kif = r->rpool.cur->kif;
3830 s->creation = time_uptime;
3831 s->expire = time_uptime;
3836 /* XXX We only modify one side for now. */
3837 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
3838 s->nat_src_node = nsn;
3840 if (pd->proto == IPPROTO_TCP) {
3841 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
3842 off, pd, th, &s->src, &s->dst)) {
3843 REASON_SET(&reason, PFRES_MEMORY);
3844 pf_src_tree_remove_state(s);
3845 STATE_DEC_COUNTERS(s);
3846 uma_zfree(V_pf_state_z, s);
3849 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
3850 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
3851 &s->src, &s->dst, rewrite)) {
3852 /* This really shouldn't happen!!! */
3853 DPFPRINTF(PF_DEBUG_URGENT,
3854 ("pf_normalize_tcp_stateful failed on first "
3856 pf_normalize_tcp_cleanup(s);
3857 pf_src_tree_remove_state(s);
3858 STATE_DEC_COUNTERS(s);
3859 uma_zfree(V_pf_state_z, s);
3863 s->direction = pd->dir;
3866 * sk/nk could already been setup by pf_get_translation().
3869 KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
3870 __func__, nr, sk, nk));
3871 sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
3876 KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p",
3877 __func__, nr, sk, nk));
3879 /* Swap sk/nk for PF_OUT. */
3880 if (pf_state_insert(BOUND_IFACE(r, kif),
3881 (pd->dir == PF_IN) ? sk : nk,
3882 (pd->dir == PF_IN) ? nk : sk, s)) {
3883 if (pd->proto == IPPROTO_TCP)
3884 pf_normalize_tcp_cleanup(s);
3885 REASON_SET(&reason, PFRES_STATEINS);
3886 pf_src_tree_remove_state(s);
3887 STATE_DEC_COUNTERS(s);
3888 uma_zfree(V_pf_state_z, s);
3895 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
3896 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
3897 s->src.state = PF_TCPS_PROXY_SRC;
3898 /* undo NAT changes, if they have taken place */
3900 struct pf_state_key *skt = s->key[PF_SK_WIRE];
3901 if (pd->dir == PF_OUT)
3902 skt = s->key[PF_SK_STACK];
3903 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
3904 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
3906 *pd->sport = skt->port[pd->sidx];
3908 *pd->dport = skt->port[pd->didx];
3910 *pd->proto_sum = bproto_sum;
3912 *pd->ip_sum = bip_sum;
3913 m_copyback(m, off, hdrlen, pd->hdr.any);
3915 s->src.seqhi = htonl(arc4random());
3916 /* Find mss option */
3917 int rtid = M_GETFIB(m);
3918 mss = pf_get_mss(m, off, th->th_off, pd->af);
3919 mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
3920 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
3922 pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport,
3923 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
3924 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL);
3925 REASON_SET(&reason, PFRES_SYNPROXY);
3926 return (PF_SYNPROXY_DROP);
3933 uma_zfree(V_pf_state_key_z, sk);
3935 uma_zfree(V_pf_state_key_z, nk);
3938 struct pf_srchash *sh;
3940 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
3941 PF_HASHROW_LOCK(sh);
3942 if (--sn->states == 0 && sn->expire == 0) {
3943 pf_unlink_src_node(sn);
3944 uma_zfree(V_pf_sources_z, sn);
3946 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
3948 PF_HASHROW_UNLOCK(sh);
3951 if (nsn != sn && nsn != NULL) {
3952 struct pf_srchash *sh;
3954 sh = &V_pf_srchash[pf_hashsrc(&nsn->addr, nsn->af)];
3955 PF_HASHROW_LOCK(sh);
3956 if (--nsn->states == 0 && nsn->expire == 0) {
3957 pf_unlink_src_node(nsn);
3958 uma_zfree(V_pf_sources_z, nsn);
3960 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
3962 PF_HASHROW_UNLOCK(sh);
3969 pf_test_fragment(struct pf_krule **rm, int direction, struct pfi_kkif *kif,
3970 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_krule **am,
3971 struct pf_kruleset **rsm)
3973 struct pf_krule *r, *a = NULL;
3974 struct pf_kruleset *ruleset = NULL;
3975 sa_family_t af = pd->af;
3980 struct pf_kanchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3984 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3986 counter_u64_add(r->evaluations, 1);
3987 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
3988 r = r->skip[PF_SKIP_IFP].ptr;
3989 else if (r->direction && r->direction != direction)
3990 r = r->skip[PF_SKIP_DIR].ptr;
3991 else if (r->af && r->af != af)
3992 r = r->skip[PF_SKIP_AF].ptr;
3993 else if (r->proto && r->proto != pd->proto)
3994 r = r->skip[PF_SKIP_PROTO].ptr;
3995 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
3996 r->src.neg, kif, M_GETFIB(m)))
3997 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3998 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
3999 r->dst.neg, NULL, M_GETFIB(m)))
4000 r = r->skip[PF_SKIP_DST_ADDR].ptr;
4001 else if (r->tos && !(r->tos == pd->tos))
4002 r = TAILQ_NEXT(r, entries);
4003 else if (r->os_fingerprint != PF_OSFP_ANY)
4004 r = TAILQ_NEXT(r, entries);
4005 else if (pd->proto == IPPROTO_UDP &&
4006 (r->src.port_op || r->dst.port_op))
4007 r = TAILQ_NEXT(r, entries);
4008 else if (pd->proto == IPPROTO_TCP &&
4009 (r->src.port_op || r->dst.port_op || r->flagset))
4010 r = TAILQ_NEXT(r, entries);
4011 else if ((pd->proto == IPPROTO_ICMP ||
4012 pd->proto == IPPROTO_ICMPV6) &&
4013 (r->type || r->code))
4014 r = TAILQ_NEXT(r, entries);
4016 !pf_match_ieee8021q_pcp(r->prio, m))
4017 r = TAILQ_NEXT(r, entries);
4018 else if (r->prob && r->prob <=
4019 (arc4random() % (UINT_MAX - 1) + 1))
4020 r = TAILQ_NEXT(r, entries);
4021 else if (r->match_tag && !pf_match_tag(m, r, &tag,
4022 pd->pf_mtag ? pd->pf_mtag->tag : 0))
4023 r = TAILQ_NEXT(r, entries);
4025 if (r->anchor == NULL) {
4032 r = TAILQ_NEXT(r, entries);
4034 pf_step_into_anchor(anchor_stack, &asd,
4035 &ruleset, PF_RULESET_FILTER, &r, &a,
4038 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
4039 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
4046 REASON_SET(&reason, PFRES_MATCH);
4049 PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
4052 if (r->action != PF_PASS)
4055 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
4056 REASON_SET(&reason, PFRES_MEMORY);
4064 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
4065 struct pf_state **state, struct pfi_kkif *kif, struct mbuf *m, int off,
4066 struct pf_pdesc *pd, u_short *reason, int *copyback)
4068 struct tcphdr *th = pd->hdr.tcp;
4069 u_int16_t win = ntohs(th->th_win);
4070 u_int32_t ack, end, seq, orig_seq;
4074 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
4075 sws = src->wscale & PF_WSCALE_MASK;
4076 dws = dst->wscale & PF_WSCALE_MASK;
4081 * Sequence tracking algorithm from Guido van Rooij's paper:
4082 * http://www.madison-gurkha.com/publications/tcp_filtering/
4086 orig_seq = seq = ntohl(th->th_seq);
4087 if (src->seqlo == 0) {
4088 /* First packet from this end. Set its state */
4090 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
4091 src->scrub == NULL) {
4092 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
4093 REASON_SET(reason, PFRES_MEMORY);
4098 /* Deferred generation of sequence number modulator */
4099 if (dst->seqdiff && !src->seqdiff) {
4100 /* use random iss for the TCP server */
4101 while ((src->seqdiff = arc4random() - seq) == 0)
4103 ack = ntohl(th->th_ack) - dst->seqdiff;
4104 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
4106 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
4109 ack = ntohl(th->th_ack);
4112 end = seq + pd->p_len;
4113 if (th->th_flags & TH_SYN) {
4115 if (dst->wscale & PF_WSCALE_FLAG) {
4116 src->wscale = pf_get_wscale(m, off, th->th_off,
4118 if (src->wscale & PF_WSCALE_FLAG) {
4119 /* Remove scale factor from initial
4121 sws = src->wscale & PF_WSCALE_MASK;
4122 win = ((u_int32_t)win + (1 << sws) - 1)
4124 dws = dst->wscale & PF_WSCALE_MASK;
4126 /* fixup other window */
4127 dst->max_win <<= dst->wscale &
4129 /* in case of a retrans SYN|ACK */
4134 if (th->th_flags & TH_FIN)
4138 if (src->state < TCPS_SYN_SENT)
4139 src->state = TCPS_SYN_SENT;
4142 * May need to slide the window (seqhi may have been set by
4143 * the crappy stack check or if we picked up the connection
4144 * after establishment)
4146 if (src->seqhi == 1 ||
4147 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
4148 src->seqhi = end + MAX(1, dst->max_win << dws);
4149 if (win > src->max_win)
4153 ack = ntohl(th->th_ack) - dst->seqdiff;
4155 /* Modulate sequence numbers */
4156 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
4158 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
4161 end = seq + pd->p_len;
4162 if (th->th_flags & TH_SYN)
4164 if (th->th_flags & TH_FIN)
4168 if ((th->th_flags & TH_ACK) == 0) {
4169 /* Let it pass through the ack skew check */
4171 } else if ((ack == 0 &&
4172 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
4173 /* broken tcp stacks do not set ack */
4174 (dst->state < TCPS_SYN_SENT)) {
4176 * Many stacks (ours included) will set the ACK number in an
4177 * FIN|ACK if the SYN times out -- no sequence to ACK.
4183 /* Ease sequencing restrictions on no data packets */
4188 ackskew = dst->seqlo - ack;
4191 * Need to demodulate the sequence numbers in any TCP SACK options
4192 * (Selective ACK). We could optionally validate the SACK values
4193 * against the current ACK window, either forwards or backwards, but
4194 * I'm not confident that SACK has been implemented properly
4195 * everywhere. It wouldn't surprise me if several stacks accidentally
4196 * SACK too far backwards of previously ACKed data. There really aren't
4197 * any security implications of bad SACKing unless the target stack
4198 * doesn't validate the option length correctly. Someone trying to
4199 * spoof into a TCP connection won't bother blindly sending SACK
4202 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
4203 if (pf_modulate_sack(m, off, pd, th, dst))
4207 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
4208 if (SEQ_GEQ(src->seqhi, end) &&
4209 /* Last octet inside other's window space */
4210 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
4211 /* Retrans: not more than one window back */
4212 (ackskew >= -MAXACKWINDOW) &&
4213 /* Acking not more than one reassembled fragment backwards */
4214 (ackskew <= (MAXACKWINDOW << sws)) &&
4215 /* Acking not more than one window forward */
4216 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
4217 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
4218 (pd->flags & PFDESC_IP_REAS) == 0)) {
4219 /* Require an exact/+1 sequence match on resets when possible */
4221 if (dst->scrub || src->scrub) {
4222 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4223 *state, src, dst, copyback))
4227 /* update max window */
4228 if (src->max_win < win)
4230 /* synchronize sequencing */
4231 if (SEQ_GT(end, src->seqlo))
4233 /* slide the window of what the other end can send */
4234 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4235 dst->seqhi = ack + MAX((win << sws), 1);
4238 if (th->th_flags & TH_SYN)
4239 if (src->state < TCPS_SYN_SENT)
4240 src->state = TCPS_SYN_SENT;
4241 if (th->th_flags & TH_FIN)
4242 if (src->state < TCPS_CLOSING)
4243 src->state = TCPS_CLOSING;
4244 if (th->th_flags & TH_ACK) {
4245 if (dst->state == TCPS_SYN_SENT) {
4246 dst->state = TCPS_ESTABLISHED;
4247 if (src->state == TCPS_ESTABLISHED &&
4248 (*state)->src_node != NULL &&
4249 pf_src_connlimit(state)) {
4250 REASON_SET(reason, PFRES_SRCLIMIT);
4253 } else if (dst->state == TCPS_CLOSING)
4254 dst->state = TCPS_FIN_WAIT_2;
4256 if (th->th_flags & TH_RST)
4257 src->state = dst->state = TCPS_TIME_WAIT;
4259 /* update expire time */
4260 (*state)->expire = time_uptime;
4261 if (src->state >= TCPS_FIN_WAIT_2 &&
4262 dst->state >= TCPS_FIN_WAIT_2)
4263 (*state)->timeout = PFTM_TCP_CLOSED;
4264 else if (src->state >= TCPS_CLOSING &&
4265 dst->state >= TCPS_CLOSING)
4266 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4267 else if (src->state < TCPS_ESTABLISHED ||
4268 dst->state < TCPS_ESTABLISHED)
4269 (*state)->timeout = PFTM_TCP_OPENING;
4270 else if (src->state >= TCPS_CLOSING ||
4271 dst->state >= TCPS_CLOSING)
4272 (*state)->timeout = PFTM_TCP_CLOSING;
4274 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4276 /* Fall through to PASS packet */
4278 } else if ((dst->state < TCPS_SYN_SENT ||
4279 dst->state >= TCPS_FIN_WAIT_2 ||
4280 src->state >= TCPS_FIN_WAIT_2) &&
4281 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
4282 /* Within a window forward of the originating packet */
4283 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
4284 /* Within a window backward of the originating packet */
4287 * This currently handles three situations:
4288 * 1) Stupid stacks will shotgun SYNs before their peer
4290 * 2) When PF catches an already established stream (the
4291 * firewall rebooted, the state table was flushed, routes
4293 * 3) Packets get funky immediately after the connection
4294 * closes (this should catch Solaris spurious ACK|FINs
4295 * that web servers like to spew after a close)
4297 * This must be a little more careful than the above code
4298 * since packet floods will also be caught here. We don't
4299 * update the TTL here to mitigate the damage of a packet
4300 * flood and so the same code can handle awkward establishment
4301 * and a loosened connection close.
4302 * In the establishment case, a correct peer response will
4303 * validate the connection, go through the normal state code
4304 * and keep updating the state TTL.
4307 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4308 printf("pf: loose state match: ");
4309 pf_print_state(*state);
4310 pf_print_flags(th->th_flags);
4311 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4312 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
4314 (unsigned long long)counter_u64_fetch((*state)->packets[0]),
4315 (unsigned long long)counter_u64_fetch((*state)->packets[1]),
4316 pd->dir == PF_IN ? "in" : "out",
4317 pd->dir == (*state)->direction ? "fwd" : "rev");
4320 if (dst->scrub || src->scrub) {
4321 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4322 *state, src, dst, copyback))
4326 /* update max window */
4327 if (src->max_win < win)
4329 /* synchronize sequencing */
4330 if (SEQ_GT(end, src->seqlo))
4332 /* slide the window of what the other end can send */
4333 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4334 dst->seqhi = ack + MAX((win << sws), 1);
4337 * Cannot set dst->seqhi here since this could be a shotgunned
4338 * SYN and not an already established connection.
4341 if (th->th_flags & TH_FIN)
4342 if (src->state < TCPS_CLOSING)
4343 src->state = TCPS_CLOSING;
4344 if (th->th_flags & TH_RST)
4345 src->state = dst->state = TCPS_TIME_WAIT;
4347 /* Fall through to PASS packet */
4350 if ((*state)->dst.state == TCPS_SYN_SENT &&
4351 (*state)->src.state == TCPS_SYN_SENT) {
4352 /* Send RST for state mismatches during handshake */
4353 if (!(th->th_flags & TH_RST))
4354 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4355 pd->dst, pd->src, th->th_dport,
4356 th->th_sport, ntohl(th->th_ack), 0,
4358 (*state)->rule.ptr->return_ttl, 1, 0,
4363 } else if (V_pf_status.debug >= PF_DEBUG_MISC) {
4364 printf("pf: BAD state: ");
4365 pf_print_state(*state);
4366 pf_print_flags(th->th_flags);
4367 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4368 "pkts=%llu:%llu dir=%s,%s\n",
4369 seq, orig_seq, ack, pd->p_len, ackskew,
4370 (unsigned long long)counter_u64_fetch((*state)->packets[0]),
4371 (unsigned long long)counter_u64_fetch((*state)->packets[1]),
4372 pd->dir == PF_IN ? "in" : "out",
4373 pd->dir == (*state)->direction ? "fwd" : "rev");
4374 printf("pf: State failure on: %c %c %c %c | %c %c\n",
4375 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4376 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4378 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4379 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4380 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4381 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4383 REASON_SET(reason, PFRES_BADSTATE);
4391 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4392 struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4394 struct tcphdr *th = pd->hdr.tcp;
4396 if (th->th_flags & TH_SYN)
4397 if (src->state < TCPS_SYN_SENT)
4398 src->state = TCPS_SYN_SENT;
4399 if (th->th_flags & TH_FIN)
4400 if (src->state < TCPS_CLOSING)
4401 src->state = TCPS_CLOSING;
4402 if (th->th_flags & TH_ACK) {
4403 if (dst->state == TCPS_SYN_SENT) {
4404 dst->state = TCPS_ESTABLISHED;
4405 if (src->state == TCPS_ESTABLISHED &&
4406 (*state)->src_node != NULL &&
4407 pf_src_connlimit(state)) {
4408 REASON_SET(reason, PFRES_SRCLIMIT);
4411 } else if (dst->state == TCPS_CLOSING) {
4412 dst->state = TCPS_FIN_WAIT_2;
4413 } else if (src->state == TCPS_SYN_SENT &&
4414 dst->state < TCPS_SYN_SENT) {
4416 * Handle a special sloppy case where we only see one
4417 * half of the connection. If there is a ACK after
4418 * the initial SYN without ever seeing a packet from
4419 * the destination, set the connection to established.
4421 dst->state = src->state = TCPS_ESTABLISHED;
4422 if ((*state)->src_node != NULL &&
4423 pf_src_connlimit(state)) {
4424 REASON_SET(reason, PFRES_SRCLIMIT);
4427 } else if (src->state == TCPS_CLOSING &&
4428 dst->state == TCPS_ESTABLISHED &&
4431 * Handle the closing of half connections where we
4432 * don't see the full bidirectional FIN/ACK+ACK
4435 dst->state = TCPS_CLOSING;
4438 if (th->th_flags & TH_RST)
4439 src->state = dst->state = TCPS_TIME_WAIT;
4441 /* update expire time */
4442 (*state)->expire = time_uptime;
4443 if (src->state >= TCPS_FIN_WAIT_2 &&
4444 dst->state >= TCPS_FIN_WAIT_2)
4445 (*state)->timeout = PFTM_TCP_CLOSED;
4446 else if (src->state >= TCPS_CLOSING &&
4447 dst->state >= TCPS_CLOSING)
4448 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4449 else if (src->state < TCPS_ESTABLISHED ||
4450 dst->state < TCPS_ESTABLISHED)
4451 (*state)->timeout = PFTM_TCP_OPENING;
4452 else if (src->state >= TCPS_CLOSING ||
4453 dst->state >= TCPS_CLOSING)
4454 (*state)->timeout = PFTM_TCP_CLOSING;
4456 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4462 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kkif *kif,
4463 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4466 struct pf_state_key_cmp key;
4467 struct tcphdr *th = pd->hdr.tcp;
4469 struct pf_state_peer *src, *dst;
4470 struct pf_state_key *sk;
4472 bzero(&key, sizeof(key));
4474 key.proto = IPPROTO_TCP;
4475 if (direction == PF_IN) { /* wire side, straight */
4476 PF_ACPY(&key.addr[0], pd->src, key.af);
4477 PF_ACPY(&key.addr[1], pd->dst, key.af);
4478 key.port[0] = th->th_sport;
4479 key.port[1] = th->th_dport;
4480 } else { /* stack side, reverse */
4481 PF_ACPY(&key.addr[1], pd->src, key.af);
4482 PF_ACPY(&key.addr[0], pd->dst, key.af);
4483 key.port[1] = th->th_sport;
4484 key.port[0] = th->th_dport;
4487 STATE_LOOKUP(kif, &key, direction, *state, pd);
4489 if (direction == (*state)->direction) {
4490 src = &(*state)->src;
4491 dst = &(*state)->dst;
4493 src = &(*state)->dst;
4494 dst = &(*state)->src;
4497 sk = (*state)->key[pd->didx];
4499 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4500 if (direction != (*state)->direction) {
4501 REASON_SET(reason, PFRES_SYNPROXY);
4502 return (PF_SYNPROXY_DROP);
4504 if (th->th_flags & TH_SYN) {
4505 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4506 REASON_SET(reason, PFRES_SYNPROXY);
4509 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4510 pd->src, th->th_dport, th->th_sport,
4511 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4512 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0, NULL);
4513 REASON_SET(reason, PFRES_SYNPROXY);
4514 return (PF_SYNPROXY_DROP);
4515 } else if ((th->th_flags & (TH_ACK|TH_RST|TH_FIN)) != TH_ACK ||
4516 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4517 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4518 REASON_SET(reason, PFRES_SYNPROXY);
4520 } else if ((*state)->src_node != NULL &&
4521 pf_src_connlimit(state)) {
4522 REASON_SET(reason, PFRES_SRCLIMIT);
4525 (*state)->src.state = PF_TCPS_PROXY_DST;
4527 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4528 if (direction == (*state)->direction) {
4529 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4530 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4531 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4532 REASON_SET(reason, PFRES_SYNPROXY);
4535 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4536 if ((*state)->dst.seqhi == 1)
4537 (*state)->dst.seqhi = htonl(arc4random());
4538 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4539 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4540 sk->port[pd->sidx], sk->port[pd->didx],
4541 (*state)->dst.seqhi, 0, TH_SYN, 0,
4542 (*state)->src.mss, 0, 0, (*state)->tag, NULL);
4543 REASON_SET(reason, PFRES_SYNPROXY);
4544 return (PF_SYNPROXY_DROP);
4545 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4547 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4548 REASON_SET(reason, PFRES_SYNPROXY);
4551 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4552 (*state)->dst.seqlo = ntohl(th->th_seq);
4553 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4554 pd->src, th->th_dport, th->th_sport,
4555 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4556 TH_ACK, (*state)->src.max_win, 0, 0, 0,
4557 (*state)->tag, NULL);
4558 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4559 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4560 sk->port[pd->sidx], sk->port[pd->didx],
4561 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4562 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0, NULL);
4563 (*state)->src.seqdiff = (*state)->dst.seqhi -
4564 (*state)->src.seqlo;
4565 (*state)->dst.seqdiff = (*state)->src.seqhi -
4566 (*state)->dst.seqlo;
4567 (*state)->src.seqhi = (*state)->src.seqlo +
4568 (*state)->dst.max_win;
4569 (*state)->dst.seqhi = (*state)->dst.seqlo +
4570 (*state)->src.max_win;
4571 (*state)->src.wscale = (*state)->dst.wscale = 0;
4572 (*state)->src.state = (*state)->dst.state =
4574 REASON_SET(reason, PFRES_SYNPROXY);
4575 return (PF_SYNPROXY_DROP);
4579 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4580 dst->state >= TCPS_FIN_WAIT_2 &&
4581 src->state >= TCPS_FIN_WAIT_2) {
4582 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4583 printf("pf: state reuse ");
4584 pf_print_state(*state);
4585 pf_print_flags(th->th_flags);
4588 /* XXX make sure it's the same direction ?? */
4589 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4590 pf_unlink_state(*state, PF_ENTER_LOCKED);
4595 if ((*state)->state_flags & PFSTATE_SLOPPY) {
4596 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
4599 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
4600 ©back) == PF_DROP)
4604 /* translate source/destination address, if necessary */
4605 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4606 struct pf_state_key *nk = (*state)->key[pd->didx];
4608 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4609 nk->port[pd->sidx] != th->th_sport)
4610 pf_change_ap(m, pd->src, &th->th_sport,
4611 pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx],
4612 nk->port[pd->sidx], 0, pd->af);
4614 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4615 nk->port[pd->didx] != th->th_dport)
4616 pf_change_ap(m, pd->dst, &th->th_dport,
4617 pd->ip_sum, &th->th_sum, &nk->addr[pd->didx],
4618 nk->port[pd->didx], 0, pd->af);
4622 /* Copyback sequence modulation or stateful scrub changes if needed */
4624 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4630 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kkif *kif,
4631 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4633 struct pf_state_peer *src, *dst;
4634 struct pf_state_key_cmp key;
4635 struct udphdr *uh = pd->hdr.udp;
4637 bzero(&key, sizeof(key));
4639 key.proto = IPPROTO_UDP;
4640 if (direction == PF_IN) { /* wire side, straight */
4641 PF_ACPY(&key.addr[0], pd->src, key.af);
4642 PF_ACPY(&key.addr[1], pd->dst, key.af);
4643 key.port[0] = uh->uh_sport;
4644 key.port[1] = uh->uh_dport;
4645 } else { /* stack side, reverse */
4646 PF_ACPY(&key.addr[1], pd->src, key.af);
4647 PF_ACPY(&key.addr[0], pd->dst, key.af);
4648 key.port[1] = uh->uh_sport;
4649 key.port[0] = uh->uh_dport;
4652 STATE_LOOKUP(kif, &key, direction, *state, pd);
4654 if (direction == (*state)->direction) {
4655 src = &(*state)->src;
4656 dst = &(*state)->dst;
4658 src = &(*state)->dst;
4659 dst = &(*state)->src;
4663 if (src->state < PFUDPS_SINGLE)
4664 src->state = PFUDPS_SINGLE;
4665 if (dst->state == PFUDPS_SINGLE)
4666 dst->state = PFUDPS_MULTIPLE;
4668 /* update expire time */
4669 (*state)->expire = time_uptime;
4670 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4671 (*state)->timeout = PFTM_UDP_MULTIPLE;
4673 (*state)->timeout = PFTM_UDP_SINGLE;
4675 /* translate source/destination address, if necessary */
4676 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4677 struct pf_state_key *nk = (*state)->key[pd->didx];
4679 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4680 nk->port[pd->sidx] != uh->uh_sport)
4681 pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
4682 &uh->uh_sum, &nk->addr[pd->sidx],
4683 nk->port[pd->sidx], 1, pd->af);
4685 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4686 nk->port[pd->didx] != uh->uh_dport)
4687 pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
4688 &uh->uh_sum, &nk->addr[pd->didx],
4689 nk->port[pd->didx], 1, pd->af);
4690 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4697 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kkif *kif,
4698 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4700 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4701 u_int16_t icmpid = 0, *icmpsum;
4702 u_int8_t icmptype, icmpcode;
4704 struct pf_state_key_cmp key;
4706 bzero(&key, sizeof(key));
4707 switch (pd->proto) {
4710 icmptype = pd->hdr.icmp->icmp_type;
4711 icmpcode = pd->hdr.icmp->icmp_code;
4712 icmpid = pd->hdr.icmp->icmp_id;
4713 icmpsum = &pd->hdr.icmp->icmp_cksum;
4715 if (icmptype == ICMP_UNREACH ||
4716 icmptype == ICMP_SOURCEQUENCH ||
4717 icmptype == ICMP_REDIRECT ||
4718 icmptype == ICMP_TIMXCEED ||
4719 icmptype == ICMP_PARAMPROB)
4724 case IPPROTO_ICMPV6:
4725 icmptype = pd->hdr.icmp6->icmp6_type;
4726 icmpcode = pd->hdr.icmp6->icmp6_code;
4727 icmpid = pd->hdr.icmp6->icmp6_id;
4728 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4730 if (icmptype == ICMP6_DST_UNREACH ||
4731 icmptype == ICMP6_PACKET_TOO_BIG ||
4732 icmptype == ICMP6_TIME_EXCEEDED ||
4733 icmptype == ICMP6_PARAM_PROB)
4741 * ICMP query/reply message not related to a TCP/UDP packet.
4742 * Search for an ICMP state.
4745 key.proto = pd->proto;
4746 key.port[0] = key.port[1] = icmpid;
4747 if (direction == PF_IN) { /* wire side, straight */
4748 PF_ACPY(&key.addr[0], pd->src, key.af);
4749 PF_ACPY(&key.addr[1], pd->dst, key.af);
4750 } else { /* stack side, reverse */
4751 PF_ACPY(&key.addr[1], pd->src, key.af);
4752 PF_ACPY(&key.addr[0], pd->dst, key.af);
4755 STATE_LOOKUP(kif, &key, direction, *state, pd);
4757 (*state)->expire = time_uptime;
4758 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4760 /* translate source/destination address, if necessary */
4761 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4762 struct pf_state_key *nk = (*state)->key[pd->didx];
4767 if (PF_ANEQ(pd->src,
4768 &nk->addr[pd->sidx], AF_INET))
4769 pf_change_a(&saddr->v4.s_addr,
4771 nk->addr[pd->sidx].v4.s_addr, 0);
4773 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
4775 pf_change_a(&daddr->v4.s_addr,
4777 nk->addr[pd->didx].v4.s_addr, 0);
4780 pd->hdr.icmp->icmp_id) {
4781 pd->hdr.icmp->icmp_cksum =
4783 pd->hdr.icmp->icmp_cksum, icmpid,
4784 nk->port[pd->sidx], 0);
4785 pd->hdr.icmp->icmp_id =
4789 m_copyback(m, off, ICMP_MINLEN,
4790 (caddr_t )pd->hdr.icmp);
4795 if (PF_ANEQ(pd->src,
4796 &nk->addr[pd->sidx], AF_INET6))
4798 &pd->hdr.icmp6->icmp6_cksum,
4799 &nk->addr[pd->sidx], 0);
4801 if (PF_ANEQ(pd->dst,
4802 &nk->addr[pd->didx], AF_INET6))
4804 &pd->hdr.icmp6->icmp6_cksum,
4805 &nk->addr[pd->didx], 0);
4807 m_copyback(m, off, sizeof(struct icmp6_hdr),
4808 (caddr_t )pd->hdr.icmp6);
4817 * ICMP error message in response to a TCP/UDP packet.
4818 * Extract the inner TCP/UDP header and search for that state.
4821 struct pf_pdesc pd2;
4822 bzero(&pd2, sizeof pd2);
4827 struct ip6_hdr h2_6;
4834 /* Payload packet is from the opposite direction. */
4835 pd2.sidx = (direction == PF_IN) ? 1 : 0;
4836 pd2.didx = (direction == PF_IN) ? 0 : 1;
4840 /* offset of h2 in mbuf chain */
4841 ipoff2 = off + ICMP_MINLEN;
4843 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
4844 NULL, reason, pd2.af)) {
4845 DPFPRINTF(PF_DEBUG_MISC,
4846 ("pf: ICMP error message too short "
4851 * ICMP error messages don't refer to non-first
4854 if (h2.ip_off & htons(IP_OFFMASK)) {
4855 REASON_SET(reason, PFRES_FRAG);
4859 /* offset of protocol header that follows h2 */
4860 off2 = ipoff2 + (h2.ip_hl << 2);
4862 pd2.proto = h2.ip_p;
4863 pd2.src = (struct pf_addr *)&h2.ip_src;
4864 pd2.dst = (struct pf_addr *)&h2.ip_dst;
4865 pd2.ip_sum = &h2.ip_sum;
4870 ipoff2 = off + sizeof(struct icmp6_hdr);
4872 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
4873 NULL, reason, pd2.af)) {
4874 DPFPRINTF(PF_DEBUG_MISC,
4875 ("pf: ICMP error message too short "
4879 pd2.proto = h2_6.ip6_nxt;
4880 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
4881 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
4883 off2 = ipoff2 + sizeof(h2_6);
4885 switch (pd2.proto) {
4886 case IPPROTO_FRAGMENT:
4888 * ICMPv6 error messages for
4889 * non-first fragments
4891 REASON_SET(reason, PFRES_FRAG);
4894 case IPPROTO_HOPOPTS:
4895 case IPPROTO_ROUTING:
4896 case IPPROTO_DSTOPTS: {
4897 /* get next header and header length */
4898 struct ip6_ext opt6;
4900 if (!pf_pull_hdr(m, off2, &opt6,
4901 sizeof(opt6), NULL, reason,
4903 DPFPRINTF(PF_DEBUG_MISC,
4904 ("pf: ICMPv6 short opt\n"));
4907 if (pd2.proto == IPPROTO_AH)
4908 off2 += (opt6.ip6e_len + 2) * 4;
4910 off2 += (opt6.ip6e_len + 1) * 8;
4911 pd2.proto = opt6.ip6e_nxt;
4912 /* goto the next header */
4919 } while (!terminal);
4924 if (PF_ANEQ(pd->dst, pd2.src, pd->af)) {
4925 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4926 printf("pf: BAD ICMP %d:%d outer dst: ",
4927 icmptype, icmpcode);
4928 pf_print_host(pd->src, 0, pd->af);
4930 pf_print_host(pd->dst, 0, pd->af);
4931 printf(" inner src: ");
4932 pf_print_host(pd2.src, 0, pd2.af);
4934 pf_print_host(pd2.dst, 0, pd2.af);
4937 REASON_SET(reason, PFRES_BADSTATE);
4941 switch (pd2.proto) {
4945 struct pf_state_peer *src, *dst;
4950 * Only the first 8 bytes of the TCP header can be
4951 * expected. Don't access any TCP header fields after
4952 * th_seq, an ackskew test is not possible.
4954 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
4956 DPFPRINTF(PF_DEBUG_MISC,
4957 ("pf: ICMP error message too short "
4963 key.proto = IPPROTO_TCP;
4964 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4965 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4966 key.port[pd2.sidx] = th.th_sport;
4967 key.port[pd2.didx] = th.th_dport;
4969 STATE_LOOKUP(kif, &key, direction, *state, pd);
4971 if (direction == (*state)->direction) {
4972 src = &(*state)->dst;
4973 dst = &(*state)->src;
4975 src = &(*state)->src;
4976 dst = &(*state)->dst;
4979 if (src->wscale && dst->wscale)
4980 dws = dst->wscale & PF_WSCALE_MASK;
4984 /* Demodulate sequence number */
4985 seq = ntohl(th.th_seq) - src->seqdiff;
4987 pf_change_a(&th.th_seq, icmpsum,
4992 if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
4993 (!SEQ_GEQ(src->seqhi, seq) ||
4994 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
4995 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4996 printf("pf: BAD ICMP %d:%d ",
4997 icmptype, icmpcode);
4998 pf_print_host(pd->src, 0, pd->af);
5000 pf_print_host(pd->dst, 0, pd->af);
5002 pf_print_state(*state);
5003 printf(" seq=%u\n", seq);
5005 REASON_SET(reason, PFRES_BADSTATE);
5008 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5009 printf("pf: OK ICMP %d:%d ",
5010 icmptype, icmpcode);
5011 pf_print_host(pd->src, 0, pd->af);
5013 pf_print_host(pd->dst, 0, pd->af);
5015 pf_print_state(*state);
5016 printf(" seq=%u\n", seq);
5020 /* translate source/destination address, if necessary */
5021 if ((*state)->key[PF_SK_WIRE] !=
5022 (*state)->key[PF_SK_STACK]) {
5023 struct pf_state_key *nk =
5024 (*state)->key[pd->didx];
5026 if (PF_ANEQ(pd2.src,
5027 &nk->addr[pd2.sidx], pd2.af) ||
5028 nk->port[pd2.sidx] != th.th_sport)
5029 pf_change_icmp(pd2.src, &th.th_sport,
5030 daddr, &nk->addr[pd2.sidx],
5031 nk->port[pd2.sidx], NULL,
5032 pd2.ip_sum, icmpsum,
5033 pd->ip_sum, 0, pd2.af);
5035 if (PF_ANEQ(pd2.dst,
5036 &nk->addr[pd2.didx], pd2.af) ||
5037 nk->port[pd2.didx] != th.th_dport)
5038 pf_change_icmp(pd2.dst, &th.th_dport,
5039 saddr, &nk->addr[pd2.didx],
5040 nk->port[pd2.didx], NULL,
5041 pd2.ip_sum, icmpsum,
5042 pd->ip_sum, 0, pd2.af);
5050 m_copyback(m, off, ICMP_MINLEN,
5051 (caddr_t )pd->hdr.icmp);
5052 m_copyback(m, ipoff2, sizeof(h2),
5059 sizeof(struct icmp6_hdr),
5060 (caddr_t )pd->hdr.icmp6);
5061 m_copyback(m, ipoff2, sizeof(h2_6),
5066 m_copyback(m, off2, 8, (caddr_t)&th);
5075 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
5076 NULL, reason, pd2.af)) {
5077 DPFPRINTF(PF_DEBUG_MISC,
5078 ("pf: ICMP error message too short "
5084 key.proto = IPPROTO_UDP;
5085 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5086 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5087 key.port[pd2.sidx] = uh.uh_sport;
5088 key.port[pd2.didx] = uh.uh_dport;
5090 STATE_LOOKUP(kif, &key, direction, *state, pd);
5092 /* translate source/destination address, if necessary */
5093 if ((*state)->key[PF_SK_WIRE] !=
5094 (*state)->key[PF_SK_STACK]) {
5095 struct pf_state_key *nk =
5096 (*state)->key[pd->didx];
5098 if (PF_ANEQ(pd2.src,
5099 &nk->addr[pd2.sidx], pd2.af) ||
5100 nk->port[pd2.sidx] != uh.uh_sport)
5101 pf_change_icmp(pd2.src, &uh.uh_sport,
5102 daddr, &nk->addr[pd2.sidx],
5103 nk->port[pd2.sidx], &uh.uh_sum,
5104 pd2.ip_sum, icmpsum,
5105 pd->ip_sum, 1, pd2.af);
5107 if (PF_ANEQ(pd2.dst,
5108 &nk->addr[pd2.didx], pd2.af) ||
5109 nk->port[pd2.didx] != uh.uh_dport)
5110 pf_change_icmp(pd2.dst, &uh.uh_dport,
5111 saddr, &nk->addr[pd2.didx],
5112 nk->port[pd2.didx], &uh.uh_sum,
5113 pd2.ip_sum, icmpsum,
5114 pd->ip_sum, 1, pd2.af);
5119 m_copyback(m, off, ICMP_MINLEN,
5120 (caddr_t )pd->hdr.icmp);
5121 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5127 sizeof(struct icmp6_hdr),
5128 (caddr_t )pd->hdr.icmp6);
5129 m_copyback(m, ipoff2, sizeof(h2_6),
5134 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
5140 case IPPROTO_ICMP: {
5143 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
5144 NULL, reason, pd2.af)) {
5145 DPFPRINTF(PF_DEBUG_MISC,
5146 ("pf: ICMP error message too short i"
5152 key.proto = IPPROTO_ICMP;
5153 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5154 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5155 key.port[0] = key.port[1] = iih.icmp_id;
5157 STATE_LOOKUP(kif, &key, direction, *state, pd);
5159 /* translate source/destination address, if necessary */
5160 if ((*state)->key[PF_SK_WIRE] !=
5161 (*state)->key[PF_SK_STACK]) {
5162 struct pf_state_key *nk =
5163 (*state)->key[pd->didx];
5165 if (PF_ANEQ(pd2.src,
5166 &nk->addr[pd2.sidx], pd2.af) ||
5167 nk->port[pd2.sidx] != iih.icmp_id)
5168 pf_change_icmp(pd2.src, &iih.icmp_id,
5169 daddr, &nk->addr[pd2.sidx],
5170 nk->port[pd2.sidx], NULL,
5171 pd2.ip_sum, icmpsum,
5172 pd->ip_sum, 0, AF_INET);
5174 if (PF_ANEQ(pd2.dst,
5175 &nk->addr[pd2.didx], pd2.af) ||
5176 nk->port[pd2.didx] != iih.icmp_id)
5177 pf_change_icmp(pd2.dst, &iih.icmp_id,
5178 saddr, &nk->addr[pd2.didx],
5179 nk->port[pd2.didx], NULL,
5180 pd2.ip_sum, icmpsum,
5181 pd->ip_sum, 0, AF_INET);
5183 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
5184 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5185 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
5192 case IPPROTO_ICMPV6: {
5193 struct icmp6_hdr iih;
5195 if (!pf_pull_hdr(m, off2, &iih,
5196 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
5197 DPFPRINTF(PF_DEBUG_MISC,
5198 ("pf: ICMP error message too short "
5204 key.proto = IPPROTO_ICMPV6;
5205 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5206 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5207 key.port[0] = key.port[1] = iih.icmp6_id;
5209 STATE_LOOKUP(kif, &key, direction, *state, pd);
5211 /* translate source/destination address, if necessary */
5212 if ((*state)->key[PF_SK_WIRE] !=
5213 (*state)->key[PF_SK_STACK]) {
5214 struct pf_state_key *nk =
5215 (*state)->key[pd->didx];
5217 if (PF_ANEQ(pd2.src,
5218 &nk->addr[pd2.sidx], pd2.af) ||
5219 nk->port[pd2.sidx] != iih.icmp6_id)
5220 pf_change_icmp(pd2.src, &iih.icmp6_id,
5221 daddr, &nk->addr[pd2.sidx],
5222 nk->port[pd2.sidx], NULL,
5223 pd2.ip_sum, icmpsum,
5224 pd->ip_sum, 0, AF_INET6);
5226 if (PF_ANEQ(pd2.dst,
5227 &nk->addr[pd2.didx], pd2.af) ||
5228 nk->port[pd2.didx] != iih.icmp6_id)
5229 pf_change_icmp(pd2.dst, &iih.icmp6_id,
5230 saddr, &nk->addr[pd2.didx],
5231 nk->port[pd2.didx], NULL,
5232 pd2.ip_sum, icmpsum,
5233 pd->ip_sum, 0, AF_INET6);
5235 m_copyback(m, off, sizeof(struct icmp6_hdr),
5236 (caddr_t)pd->hdr.icmp6);
5237 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
5238 m_copyback(m, off2, sizeof(struct icmp6_hdr),
5247 key.proto = pd2.proto;
5248 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5249 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5250 key.port[0] = key.port[1] = 0;
5252 STATE_LOOKUP(kif, &key, direction, *state, pd);
5254 /* translate source/destination address, if necessary */
5255 if ((*state)->key[PF_SK_WIRE] !=
5256 (*state)->key[PF_SK_STACK]) {
5257 struct pf_state_key *nk =
5258 (*state)->key[pd->didx];
5260 if (PF_ANEQ(pd2.src,
5261 &nk->addr[pd2.sidx], pd2.af))
5262 pf_change_icmp(pd2.src, NULL, daddr,
5263 &nk->addr[pd2.sidx], 0, NULL,
5264 pd2.ip_sum, icmpsum,
5265 pd->ip_sum, 0, pd2.af);
5267 if (PF_ANEQ(pd2.dst,
5268 &nk->addr[pd2.didx], pd2.af))
5269 pf_change_icmp(pd2.dst, NULL, saddr,
5270 &nk->addr[pd2.didx], 0, NULL,
5271 pd2.ip_sum, icmpsum,
5272 pd->ip_sum, 0, pd2.af);
5277 m_copyback(m, off, ICMP_MINLEN,
5278 (caddr_t)pd->hdr.icmp);
5279 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5285 sizeof(struct icmp6_hdr),
5286 (caddr_t )pd->hdr.icmp6);
5287 m_copyback(m, ipoff2, sizeof(h2_6),
5301 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kkif *kif,
5302 struct mbuf *m, struct pf_pdesc *pd)
5304 struct pf_state_peer *src, *dst;
5305 struct pf_state_key_cmp key;
5307 bzero(&key, sizeof(key));
5309 key.proto = pd->proto;
5310 if (direction == PF_IN) {
5311 PF_ACPY(&key.addr[0], pd->src, key.af);
5312 PF_ACPY(&key.addr[1], pd->dst, key.af);
5313 key.port[0] = key.port[1] = 0;
5315 PF_ACPY(&key.addr[1], pd->src, key.af);
5316 PF_ACPY(&key.addr[0], pd->dst, key.af);
5317 key.port[1] = key.port[0] = 0;
5320 STATE_LOOKUP(kif, &key, direction, *state, pd);
5322 if (direction == (*state)->direction) {
5323 src = &(*state)->src;
5324 dst = &(*state)->dst;
5326 src = &(*state)->dst;
5327 dst = &(*state)->src;
5331 if (src->state < PFOTHERS_SINGLE)
5332 src->state = PFOTHERS_SINGLE;
5333 if (dst->state == PFOTHERS_SINGLE)
5334 dst->state = PFOTHERS_MULTIPLE;
5336 /* update expire time */
5337 (*state)->expire = time_uptime;
5338 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5339 (*state)->timeout = PFTM_OTHER_MULTIPLE;
5341 (*state)->timeout = PFTM_OTHER_SINGLE;
5343 /* translate source/destination address, if necessary */
5344 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5345 struct pf_state_key *nk = (*state)->key[pd->didx];
5347 KASSERT(nk, ("%s: nk is null", __func__));
5348 KASSERT(pd, ("%s: pd is null", __func__));
5349 KASSERT(pd->src, ("%s: pd->src is null", __func__));
5350 KASSERT(pd->dst, ("%s: pd->dst is null", __func__));
5354 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5355 pf_change_a(&pd->src->v4.s_addr,
5357 nk->addr[pd->sidx].v4.s_addr,
5360 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5361 pf_change_a(&pd->dst->v4.s_addr,
5363 nk->addr[pd->didx].v4.s_addr,
5370 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5371 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5373 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5374 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5382 * ipoff and off are measured from the start of the mbuf chain.
5383 * h must be at "ipoff" on the mbuf chain.
5386 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5387 u_short *actionp, u_short *reasonp, sa_family_t af)
5392 struct ip *h = mtod(m, struct ip *);
5393 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
5397 ACTION_SET(actionp, PF_PASS);
5399 ACTION_SET(actionp, PF_DROP);
5400 REASON_SET(reasonp, PFRES_FRAG);
5404 if (m->m_pkthdr.len < off + len ||
5405 ntohs(h->ip_len) < off + len) {
5406 ACTION_SET(actionp, PF_DROP);
5407 REASON_SET(reasonp, PFRES_SHORT);
5415 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
5417 if (m->m_pkthdr.len < off + len ||
5418 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5419 (unsigned)(off + len)) {
5420 ACTION_SET(actionp, PF_DROP);
5421 REASON_SET(reasonp, PFRES_SHORT);
5428 m_copydata(m, off, len, p);
5433 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kkif *kif,
5439 * Skip check for addresses with embedded interface scope,
5440 * as they would always match anyway.
5442 if (af == AF_INET6 && IN6_IS_SCOPE_EMBED(&addr->v6))
5445 if (af != AF_INET && af != AF_INET6)
5448 /* Skip checks for ipsec interfaces */
5449 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5452 ifp = (kif != NULL) ? kif->pfik_ifp : NULL;
5457 return (fib6_check_urpf(rtableid, &addr->v6, 0, NHR_NONE,
5462 return (fib4_check_urpf(rtableid, addr->v4, 0, NHR_NONE,
5472 pf_route(struct mbuf **m, struct pf_krule *r, int dir, struct ifnet *oifp,
5473 struct pf_state *s, struct pf_pdesc *pd, struct inpcb *inp)
5475 struct mbuf *m0, *m1;
5476 struct sockaddr_in dst;
5478 struct ifnet *ifp = NULL;
5479 struct pf_addr naddr;
5480 struct pf_ksrc_node *sn = NULL;
5482 uint16_t ip_len, ip_off;
5484 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5485 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5488 if ((pd->pf_mtag == NULL &&
5489 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5490 pd->pf_mtag->routed++ > 3) {
5496 if (r->rt == PF_DUPTO) {
5497 if ((pd->pf_mtag->flags & PF_DUPLICATED)) {
5499 ifp = r->rpool.cur->kif ?
5500 r->rpool.cur->kif->pfik_ifp : NULL;
5502 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5506 /* When the 2nd interface is not skipped */
5514 pd->pf_mtag->flags |= PF_DUPLICATED;
5515 if (((m0 = m_dup(*m, M_NOWAIT)) == NULL)) {
5522 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5530 ip = mtod(m0, struct ip *);
5532 bzero(&dst, sizeof(dst));
5533 dst.sin_family = AF_INET;
5534 dst.sin_len = sizeof(dst);
5535 dst.sin_addr = ip->ip_dst;
5537 bzero(&naddr, sizeof(naddr));
5539 if (TAILQ_EMPTY(&r->rpool.list)) {
5540 DPFPRINTF(PF_DEBUG_URGENT,
5541 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5545 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5547 if (!PF_AZERO(&naddr, AF_INET))
5548 dst.sin_addr.s_addr = naddr.v4.s_addr;
5549 ifp = r->rpool.cur->kif ?
5550 r->rpool.cur->kif->pfik_ifp : NULL;
5552 if (!PF_AZERO(&s->rt_addr, AF_INET))
5553 dst.sin_addr.s_addr =
5554 s->rt_addr.v4.s_addr;
5555 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5562 if (pf_test(PF_OUT, 0, ifp, &m0, inp) != PF_PASS)
5564 else if (m0 == NULL)
5566 if (m0->m_len < sizeof(struct ip)) {
5567 DPFPRINTF(PF_DEBUG_URGENT,
5568 ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
5571 ip = mtod(m0, struct ip *);
5574 if (ifp->if_flags & IFF_LOOPBACK)
5575 m0->m_flags |= M_SKIP_FIREWALL;
5577 ip_len = ntohs(ip->ip_len);
5578 ip_off = ntohs(ip->ip_off);
5580 /* Copied from FreeBSD 10.0-CURRENT ip_output. */
5581 m0->m_pkthdr.csum_flags |= CSUM_IP;
5582 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
5583 m0 = mb_unmapped_to_ext(m0);
5586 in_delayed_cksum(m0);
5587 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
5589 #if defined(SCTP) || defined(SCTP_SUPPORT)
5590 if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
5591 m0 = mb_unmapped_to_ext(m0);
5594 sctp_delayed_cksum(m0, (uint32_t)(ip->ip_hl << 2));
5595 m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
5600 * If small enough for interface, or the interface will take
5601 * care of the fragmentation for us, we can just send directly.
5603 if (ip_len <= ifp->if_mtu ||
5604 (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0) {
5606 if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
5607 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
5608 m0->m_pkthdr.csum_flags &= ~CSUM_IP;
5610 m_clrprotoflags(m0); /* Avoid confusing lower layers. */
5611 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5615 /* Balk when DF bit is set or the interface didn't support TSO. */
5616 if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5618 KMOD_IPSTAT_INC(ips_cantfrag);
5619 if (r->rt != PF_DUPTO) {
5620 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
5627 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
5631 for (; m0; m0 = m1) {
5633 m0->m_nextpkt = NULL;
5635 m_clrprotoflags(m0);
5636 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5642 KMOD_IPSTAT_INC(ips_fragmented);
5645 if (r->rt != PF_DUPTO)
5660 pf_route6(struct mbuf **m, struct pf_krule *r, int dir, struct ifnet *oifp,
5661 struct pf_state *s, struct pf_pdesc *pd, struct inpcb *inp)
5664 struct sockaddr_in6 dst;
5665 struct ip6_hdr *ip6;
5666 struct ifnet *ifp = NULL;
5667 struct pf_addr naddr;
5668 struct pf_ksrc_node *sn = NULL;
5670 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5671 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5674 if ((pd->pf_mtag == NULL &&
5675 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5676 pd->pf_mtag->routed++ > 3) {
5682 if (r->rt == PF_DUPTO) {
5683 if ((pd->pf_mtag->flags & PF_DUPLICATED)) {
5685 ifp = r->rpool.cur->kif ?
5686 r->rpool.cur->kif->pfik_ifp : NULL;
5688 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5692 /* When the 2nd interface is not skipped */
5700 pd->pf_mtag->flags |= PF_DUPLICATED;
5701 if (((m0 = m_dup(*m, M_NOWAIT)) == NULL)) {
5708 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5716 ip6 = mtod(m0, struct ip6_hdr *);
5718 bzero(&dst, sizeof(dst));
5719 dst.sin6_family = AF_INET6;
5720 dst.sin6_len = sizeof(dst);
5721 dst.sin6_addr = ip6->ip6_dst;
5723 bzero(&naddr, sizeof(naddr));
5725 if (TAILQ_EMPTY(&r->rpool.list)) {
5726 DPFPRINTF(PF_DEBUG_URGENT,
5727 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5731 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
5733 if (!PF_AZERO(&naddr, AF_INET6))
5734 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5736 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
5738 if (!PF_AZERO(&s->rt_addr, AF_INET6))
5739 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5740 &s->rt_addr, AF_INET6);
5741 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5751 if (pf_test6(PF_OUT, PFIL_FWD, ifp, &m0, inp) != PF_PASS)
5753 else if (m0 == NULL)
5755 if (m0->m_len < sizeof(struct ip6_hdr)) {
5756 DPFPRINTF(PF_DEBUG_URGENT,
5757 ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
5761 ip6 = mtod(m0, struct ip6_hdr *);
5764 if (ifp->if_flags & IFF_LOOPBACK)
5765 m0->m_flags |= M_SKIP_FIREWALL;
5767 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 &
5768 ~ifp->if_hwassist) {
5769 uint32_t plen = m0->m_pkthdr.len - sizeof(*ip6);
5770 m0 = mb_unmapped_to_ext(m0);
5773 in6_delayed_cksum(m0, plen, sizeof(struct ip6_hdr));
5774 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6;
5778 * If the packet is too large for the outgoing interface,
5779 * send back an icmp6 error.
5781 if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
5782 dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
5783 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu)
5784 nd6_output_ifp(ifp, ifp, m0, &dst, NULL);
5786 in6_ifstat_inc(ifp, ifs6_in_toobig);
5787 if (r->rt != PF_DUPTO)
5788 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
5794 if (r->rt != PF_DUPTO)
5808 * FreeBSD supports cksum offloads for the following drivers.
5809 * em(4), fxp(4), lge(4), nge(4), re(4), ti(4), txp(4), xl(4)
5811 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
5812 * network driver performed cksum including pseudo header, need to verify
5815 * network driver performed cksum, needs to additional pseudo header
5816 * cksum computation with partial csum_data(i.e. lack of H/W support for
5817 * pseudo header, for instance sk(4) and possibly gem(4))
5819 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
5820 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
5822 * Also, set csum_data to 0xffff to force cksum validation.
5825 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
5831 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
5833 if (m->m_pkthdr.len < off + len)
5838 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5839 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5840 sum = m->m_pkthdr.csum_data;
5842 ip = mtod(m, struct ip *);
5843 sum = in_pseudo(ip->ip_src.s_addr,
5844 ip->ip_dst.s_addr, htonl((u_short)len +
5845 m->m_pkthdr.csum_data + IPPROTO_TCP));
5852 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5853 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5854 sum = m->m_pkthdr.csum_data;
5856 ip = mtod(m, struct ip *);
5857 sum = in_pseudo(ip->ip_src.s_addr,
5858 ip->ip_dst.s_addr, htonl((u_short)len +
5859 m->m_pkthdr.csum_data + IPPROTO_UDP));
5867 case IPPROTO_ICMPV6:
5877 if (p == IPPROTO_ICMP) {
5882 sum = in_cksum(m, len);
5886 if (m->m_len < sizeof(struct ip))
5888 sum = in4_cksum(m, p, off, len);
5893 if (m->m_len < sizeof(struct ip6_hdr))
5895 sum = in6_cksum(m, p, off, len);
5906 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
5911 KMOD_UDPSTAT_INC(udps_badsum);
5917 KMOD_ICMPSTAT_INC(icps_checksum);
5922 case IPPROTO_ICMPV6:
5924 KMOD_ICMP6STAT_INC(icp6s_checksum);
5931 if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
5932 m->m_pkthdr.csum_flags |=
5933 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
5934 m->m_pkthdr.csum_data = 0xffff;
5942 pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
5944 struct pfi_kkif *kif;
5945 u_short action, reason = 0, log = 0;
5946 struct mbuf *m = *m0;
5947 struct ip *h = NULL;
5948 struct m_tag *ipfwtag;
5949 struct pf_krule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
5950 struct pf_state *s = NULL;
5951 struct pf_kruleset *ruleset = NULL;
5953 int off, dirndx, pqid = 0;
5955 PF_RULES_RLOCK_TRACKER;
5959 if (!V_pf_status.running)
5962 memset(&pd, 0, sizeof(pd));
5964 kif = (struct pfi_kkif *)ifp->if_pf_kif;
5967 DPFPRINTF(PF_DEBUG_URGENT,
5968 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
5971 if (kif->pfik_flags & PFI_IFLAG_SKIP)
5974 if (m->m_flags & M_SKIP_FIREWALL)
5977 pd.pf_mtag = pf_find_mtag(m);
5981 if (ip_divert_ptr != NULL &&
5982 ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
5983 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
5984 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
5985 if (pd.pf_mtag == NULL &&
5986 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5990 pd.pf_mtag->flags |= PF_PACKET_LOOPED;
5991 m_tag_delete(m, ipfwtag);
5993 if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
5994 m->m_flags |= M_FASTFWD_OURS;
5995 pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
5997 } else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
5998 /* We do IP header normalization and packet reassembly here */
6002 m = *m0; /* pf_normalize messes with m0 */
6003 h = mtod(m, struct ip *);
6005 off = h->ip_hl << 2;
6006 if (off < (int)sizeof(struct ip)) {
6008 REASON_SET(&reason, PFRES_SHORT);
6013 pd.src = (struct pf_addr *)&h->ip_src;
6014 pd.dst = (struct pf_addr *)&h->ip_dst;
6015 pd.sport = pd.dport = NULL;
6016 pd.ip_sum = &h->ip_sum;
6017 pd.proto_sum = NULL;
6020 pd.sidx = (dir == PF_IN) ? 0 : 1;
6021 pd.didx = (dir == PF_IN) ? 1 : 0;
6023 pd.tos = h->ip_tos & ~IPTOS_ECN_MASK;
6024 pd.tot_len = ntohs(h->ip_len);
6026 /* handle fragments that didn't get reassembled by normalization */
6027 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
6028 action = pf_test_fragment(&r, dir, kif, m, h,
6038 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6039 &action, &reason, AF_INET)) {
6040 log = action != PF_PASS;
6043 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6044 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
6046 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6047 if (action == PF_DROP)
6049 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6051 if (action == PF_PASS) {
6052 if (V_pfsync_update_state_ptr != NULL)
6053 V_pfsync_update_state_ptr(s);
6057 } else if (s == NULL)
6058 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6067 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6068 &action, &reason, AF_INET)) {
6069 log = action != PF_PASS;
6072 if (uh.uh_dport == 0 ||
6073 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6074 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6076 REASON_SET(&reason, PFRES_SHORT);
6079 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6080 if (action == PF_PASS) {
6081 if (V_pfsync_update_state_ptr != NULL)
6082 V_pfsync_update_state_ptr(s);
6086 } else if (s == NULL)
6087 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6092 case IPPROTO_ICMP: {
6096 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
6097 &action, &reason, AF_INET)) {
6098 log = action != PF_PASS;
6101 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
6103 if (action == PF_PASS) {
6104 if (V_pfsync_update_state_ptr != NULL)
6105 V_pfsync_update_state_ptr(s);
6109 } else if (s == NULL)
6110 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6116 case IPPROTO_ICMPV6: {
6118 DPFPRINTF(PF_DEBUG_MISC,
6119 ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
6125 action = pf_test_state_other(&s, dir, kif, m, &pd);
6126 if (action == PF_PASS) {
6127 if (V_pfsync_update_state_ptr != NULL)
6128 V_pfsync_update_state_ptr(s);
6132 } else if (s == NULL)
6133 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6140 if (action == PF_PASS && h->ip_hl > 5 &&
6141 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6143 REASON_SET(&reason, PFRES_IPOPTIONS);
6145 DPFPRINTF(PF_DEBUG_MISC,
6146 ("pf: dropping packet with ip options\n"));
6149 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6151 REASON_SET(&reason, PFRES_MEMORY);
6153 if (r->rtableid >= 0)
6154 M_SETFIB(m, r->rtableid);
6156 if (r->scrub_flags & PFSTATE_SETPRIO) {
6157 if (pd.tos & IPTOS_LOWDELAY)
6159 if (pf_ieee8021q_setpcp(m, r->set_prio[pqid])) {
6161 REASON_SET(&reason, PFRES_MEMORY);
6163 DPFPRINTF(PF_DEBUG_MISC,
6164 ("pf: failed to allocate 802.1q mtag\n"));
6169 if (action == PF_PASS && r->qid) {
6170 if (pd.pf_mtag == NULL &&
6171 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6173 REASON_SET(&reason, PFRES_MEMORY);
6176 pd.pf_mtag->qid_hash = pf_state_hash(s);
6177 if (pqid || (pd.tos & IPTOS_LOWDELAY))
6178 pd.pf_mtag->qid = r->pqid;
6180 pd.pf_mtag->qid = r->qid;
6181 /* Add hints for ecn. */
6182 pd.pf_mtag->hdr = h;
6188 * connections redirected to loopback should not match sockets
6189 * bound specifically to loopback due to security implications,
6190 * see tcp_input() and in_pcblookup_listen().
6192 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6193 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6194 (s->nat_rule.ptr->action == PF_RDR ||
6195 s->nat_rule.ptr->action == PF_BINAT) &&
6196 IN_LOOPBACK(ntohl(pd.dst->v4.s_addr)))
6197 m->m_flags |= M_SKIP_FIREWALL;
6199 if (action == PF_PASS && r->divert.port && ip_divert_ptr != NULL &&
6200 !PACKET_LOOPED(&pd)) {
6201 ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
6202 sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
6203 if (ipfwtag != NULL) {
6204 ((struct ipfw_rule_ref *)(ipfwtag+1))->info =
6205 ntohs(r->divert.port);
6206 ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
6211 m_tag_prepend(m, ipfwtag);
6212 if (m->m_flags & M_FASTFWD_OURS) {
6213 if (pd.pf_mtag == NULL &&
6214 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6216 REASON_SET(&reason, PFRES_MEMORY);
6218 DPFPRINTF(PF_DEBUG_MISC,
6219 ("pf: failed to allocate tag\n"));
6221 pd.pf_mtag->flags |=
6222 PF_FASTFWD_OURS_PRESENT;
6223 m->m_flags &= ~M_FASTFWD_OURS;
6226 ip_divert_ptr(*m0, dir == PF_IN);
6231 /* XXX: ipfw has the same behaviour! */
6233 REASON_SET(&reason, PFRES_MEMORY);
6235 DPFPRINTF(PF_DEBUG_MISC,
6236 ("pf: failed to allocate divert tag\n"));
6241 struct pf_krule *lr;
6243 if (s != NULL && s->nat_rule.ptr != NULL &&
6244 s->nat_rule.ptr->log & PF_LOG_ALL)
6245 lr = s->nat_rule.ptr;
6248 PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd,
6252 counter_u64_add(kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS],
6254 counter_u64_add(kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS],
6257 if (action == PF_PASS || r->action == PF_DROP) {
6258 dirndx = (dir == PF_OUT);
6259 counter_u64_add(r->packets[dirndx], 1);
6260 counter_u64_add(r->bytes[dirndx], pd.tot_len);
6262 counter_u64_add(a->packets[dirndx], 1);
6263 counter_u64_add(a->bytes[dirndx], pd.tot_len);
6266 if (s->nat_rule.ptr != NULL) {
6267 counter_u64_add(s->nat_rule.ptr->packets[dirndx],
6269 counter_u64_add(s->nat_rule.ptr->bytes[dirndx],
6272 if (s->src_node != NULL) {
6273 counter_u64_add(s->src_node->packets[dirndx],
6275 counter_u64_add(s->src_node->bytes[dirndx],
6278 if (s->nat_src_node != NULL) {
6279 counter_u64_add(s->nat_src_node->packets[dirndx],
6281 counter_u64_add(s->nat_src_node->bytes[dirndx],
6284 dirndx = (dir == s->direction) ? 0 : 1;
6285 counter_u64_add(s->packets[dirndx], 1);
6286 counter_u64_add(s->bytes[dirndx], pd.tot_len);
6289 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6290 if (nr != NULL && r == &V_pf_default_rule)
6292 if (tr->src.addr.type == PF_ADDR_TABLE)
6293 pfr_update_stats(tr->src.addr.p.tbl,
6294 (s == NULL) ? pd.src :
6295 &s->key[(s->direction == PF_IN)]->
6296 addr[(s->direction == PF_OUT)],
6297 pd.af, pd.tot_len, dir == PF_OUT,
6298 r->action == PF_PASS, tr->src.neg);
6299 if (tr->dst.addr.type == PF_ADDR_TABLE)
6300 pfr_update_stats(tr->dst.addr.p.tbl,
6301 (s == NULL) ? pd.dst :
6302 &s->key[(s->direction == PF_IN)]->
6303 addr[(s->direction == PF_IN)],
6304 pd.af, pd.tot_len, dir == PF_OUT,
6305 r->action == PF_PASS, tr->dst.neg);
6309 case PF_SYNPROXY_DROP:
6320 /* pf_route() returns unlocked. */
6322 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd, inp);
6330 SDT_PROBE4(pf, ip, test, done, action, reason, r, s);
6338 pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
6340 struct pfi_kkif *kif;
6341 u_short action, reason = 0, log = 0;
6342 struct mbuf *m = *m0, *n = NULL;
6344 struct ip6_hdr *h = NULL;
6345 struct pf_krule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6346 struct pf_state *s = NULL;
6347 struct pf_kruleset *ruleset = NULL;
6349 int off, terminal = 0, dirndx, rh_cnt = 0, pqid = 0;
6351 PF_RULES_RLOCK_TRACKER;
6354 if (!V_pf_status.running)
6357 memset(&pd, 0, sizeof(pd));
6358 pd.pf_mtag = pf_find_mtag(m);
6360 if (pd.pf_mtag && pd.pf_mtag->flags & PF_TAG_GENERATED)
6363 kif = (struct pfi_kkif *)ifp->if_pf_kif;
6365 DPFPRINTF(PF_DEBUG_URGENT,
6366 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
6369 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6372 if (m->m_flags & M_SKIP_FIREWALL)
6377 /* We do IP header normalization and packet reassembly here */
6378 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
6382 m = *m0; /* pf_normalize messes with m0 */
6383 h = mtod(m, struct ip6_hdr *);
6386 * we do not support jumbogram. if we keep going, zero ip6_plen
6387 * will do something bad, so drop the packet for now.
6389 if (htons(h->ip6_plen) == 0) {
6391 REASON_SET(&reason, PFRES_NORM); /*XXX*/
6395 pd.src = (struct pf_addr *)&h->ip6_src;
6396 pd.dst = (struct pf_addr *)&h->ip6_dst;
6397 pd.sport = pd.dport = NULL;
6399 pd.proto_sum = NULL;
6401 pd.sidx = (dir == PF_IN) ? 0 : 1;
6402 pd.didx = (dir == PF_IN) ? 1 : 0;
6404 pd.tos = IPV6_DSCP(h);
6405 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6407 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
6408 pd.proto = h->ip6_nxt;
6411 case IPPROTO_FRAGMENT:
6412 action = pf_test_fragment(&r, dir, kif, m, h,
6414 if (action == PF_DROP)
6415 REASON_SET(&reason, PFRES_FRAG);
6417 case IPPROTO_ROUTING: {
6418 struct ip6_rthdr rthdr;
6421 DPFPRINTF(PF_DEBUG_MISC,
6422 ("pf: IPv6 more than one rthdr\n"));
6424 REASON_SET(&reason, PFRES_IPOPTIONS);
6428 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
6430 DPFPRINTF(PF_DEBUG_MISC,
6431 ("pf: IPv6 short rthdr\n"));
6433 REASON_SET(&reason, PFRES_SHORT);
6437 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
6438 DPFPRINTF(PF_DEBUG_MISC,
6439 ("pf: IPv6 rthdr0\n"));
6441 REASON_SET(&reason, PFRES_IPOPTIONS);
6448 case IPPROTO_HOPOPTS:
6449 case IPPROTO_DSTOPTS: {
6450 /* get next header and header length */
6451 struct ip6_ext opt6;
6453 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
6454 NULL, &reason, pd.af)) {
6455 DPFPRINTF(PF_DEBUG_MISC,
6456 ("pf: IPv6 short opt\n"));
6461 if (pd.proto == IPPROTO_AH)
6462 off += (opt6.ip6e_len + 2) * 4;
6464 off += (opt6.ip6e_len + 1) * 8;
6465 pd.proto = opt6.ip6e_nxt;
6466 /* goto the next header */
6473 } while (!terminal);
6475 /* if there's no routing header, use unmodified mbuf for checksumming */
6484 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6485 &action, &reason, AF_INET6)) {
6486 log = action != PF_PASS;
6489 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6490 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6491 if (action == PF_DROP)
6493 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6495 if (action == PF_PASS) {
6496 if (V_pfsync_update_state_ptr != NULL)
6497 V_pfsync_update_state_ptr(s);
6501 } else if (s == NULL)
6502 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6511 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6512 &action, &reason, AF_INET6)) {
6513 log = action != PF_PASS;
6516 if (uh.uh_dport == 0 ||
6517 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6518 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6520 REASON_SET(&reason, PFRES_SHORT);
6523 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6524 if (action == PF_PASS) {
6525 if (V_pfsync_update_state_ptr != NULL)
6526 V_pfsync_update_state_ptr(s);
6530 } else if (s == NULL)
6531 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6536 case IPPROTO_ICMP: {
6538 DPFPRINTF(PF_DEBUG_MISC,
6539 ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
6543 case IPPROTO_ICMPV6: {
6544 struct icmp6_hdr ih;
6547 if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
6548 &action, &reason, AF_INET6)) {
6549 log = action != PF_PASS;
6552 action = pf_test_state_icmp(&s, dir, kif,
6553 m, off, h, &pd, &reason);
6554 if (action == PF_PASS) {
6555 if (V_pfsync_update_state_ptr != NULL)
6556 V_pfsync_update_state_ptr(s);
6560 } else if (s == NULL)
6561 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6567 action = pf_test_state_other(&s, dir, kif, m, &pd);
6568 if (action == PF_PASS) {
6569 if (V_pfsync_update_state_ptr != NULL)
6570 V_pfsync_update_state_ptr(s);
6574 } else if (s == NULL)
6575 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6587 /* handle dangerous IPv6 extension headers. */
6588 if (action == PF_PASS && rh_cnt &&
6589 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6591 REASON_SET(&reason, PFRES_IPOPTIONS);
6593 DPFPRINTF(PF_DEBUG_MISC,
6594 ("pf: dropping packet with dangerous v6 headers\n"));
6597 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6599 REASON_SET(&reason, PFRES_MEMORY);
6601 if (r->rtableid >= 0)
6602 M_SETFIB(m, r->rtableid);
6604 if (r->scrub_flags & PFSTATE_SETPRIO) {
6605 if (pd.tos & IPTOS_LOWDELAY)
6607 if (pf_ieee8021q_setpcp(m, r->set_prio[pqid])) {
6609 REASON_SET(&reason, PFRES_MEMORY);
6611 DPFPRINTF(PF_DEBUG_MISC,
6612 ("pf: failed to allocate 802.1q mtag\n"));
6617 if (action == PF_PASS && r->qid) {
6618 if (pd.pf_mtag == NULL &&
6619 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6621 REASON_SET(&reason, PFRES_MEMORY);
6624 pd.pf_mtag->qid_hash = pf_state_hash(s);
6625 if (pd.tos & IPTOS_LOWDELAY)
6626 pd.pf_mtag->qid = r->pqid;
6628 pd.pf_mtag->qid = r->qid;
6629 /* Add hints for ecn. */
6630 pd.pf_mtag->hdr = h;
6635 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6636 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6637 (s->nat_rule.ptr->action == PF_RDR ||
6638 s->nat_rule.ptr->action == PF_BINAT) &&
6639 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
6640 m->m_flags |= M_SKIP_FIREWALL;
6642 /* XXX: Anybody working on it?! */
6644 printf("pf: divert(9) is not supported for IPv6\n");
6647 struct pf_krule *lr;
6649 if (s != NULL && s->nat_rule.ptr != NULL &&
6650 s->nat_rule.ptr->log & PF_LOG_ALL)
6651 lr = s->nat_rule.ptr;
6654 PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset,
6658 counter_u64_add(kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS],
6660 counter_u64_add(kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS],
6663 if (action == PF_PASS || r->action == PF_DROP) {
6664 dirndx = (dir == PF_OUT);
6665 counter_u64_add(r->packets[dirndx], 1);
6666 counter_u64_add(r->bytes[dirndx], pd.tot_len);
6668 counter_u64_add(a->packets[dirndx], 1);
6669 counter_u64_add(a->bytes[dirndx], pd.tot_len);
6672 if (s->nat_rule.ptr != NULL) {
6673 counter_u64_add(s->nat_rule.ptr->packets[dirndx],
6675 counter_u64_add(s->nat_rule.ptr->bytes[dirndx],
6678 if (s->src_node != NULL) {
6679 counter_u64_add(s->src_node->packets[dirndx],
6681 counter_u64_add(s->src_node->bytes[dirndx],
6684 if (s->nat_src_node != NULL) {
6685 counter_u64_add(s->nat_src_node->packets[dirndx],
6687 counter_u64_add(s->nat_src_node->bytes[dirndx],
6690 dirndx = (dir == s->direction) ? 0 : 1;
6691 counter_u64_add(s->packets[dirndx], 1);
6692 counter_u64_add(s->bytes[dirndx], pd.tot_len);
6695 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6696 if (nr != NULL && r == &V_pf_default_rule)
6698 if (tr->src.addr.type == PF_ADDR_TABLE)
6699 pfr_update_stats(tr->src.addr.p.tbl,
6700 (s == NULL) ? pd.src :
6701 &s->key[(s->direction == PF_IN)]->addr[0],
6702 pd.af, pd.tot_len, dir == PF_OUT,
6703 r->action == PF_PASS, tr->src.neg);
6704 if (tr->dst.addr.type == PF_ADDR_TABLE)
6705 pfr_update_stats(tr->dst.addr.p.tbl,
6706 (s == NULL) ? pd.dst :
6707 &s->key[(s->direction == PF_IN)]->addr[1],
6708 pd.af, pd.tot_len, dir == PF_OUT,
6709 r->action == PF_PASS, tr->dst.neg);
6713 case PF_SYNPROXY_DROP:
6724 /* pf_route6() returns unlocked. */
6726 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd, inp);
6735 /* If reassembled packet passed, create new fragments. */
6736 if (action == PF_PASS && *m0 && (pflags & PFIL_FWD) &&
6737 (mtag = m_tag_find(m, PF_REASSEMBLED, NULL)) != NULL)
6738 action = pf_refragment6(ifp, m0, mtag);
6740 SDT_PROBE4(pf, ip, test6, done, action, reason, r, s);