2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002 - 2008 Henning Brauer
6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
37 * $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 #include "opt_inet6.h"
48 #include <sys/param.h>
50 #include <sys/endian.h>
52 #include <sys/interrupt.h>
53 #include <sys/kernel.h>
54 #include <sys/kthread.h>
55 #include <sys/limits.h>
58 #include <sys/random.h>
59 #include <sys/refcount.h>
60 #include <sys/socket.h>
61 #include <sys/sysctl.h>
62 #include <sys/taskqueue.h>
63 #include <sys/ucred.h>
66 #include <net/if_var.h>
67 #include <net/if_types.h>
68 #include <net/if_vlan_var.h>
69 #include <net/route.h>
70 #include <net/radix_mpath.h>
74 #include <net/pfvar.h>
75 #include <net/if_pflog.h>
76 #include <net/if_pfsync.h>
78 #include <netinet/in_pcb.h>
79 #include <netinet/in_var.h>
80 #include <netinet/in_fib.h>
81 #include <netinet/ip.h>
82 #include <netinet/ip_fw.h>
83 #include <netinet/ip_icmp.h>
84 #include <netinet/icmp_var.h>
85 #include <netinet/ip_var.h>
86 #include <netinet/tcp.h>
87 #include <netinet/tcp_fsm.h>
88 #include <netinet/tcp_seq.h>
89 #include <netinet/tcp_timer.h>
90 #include <netinet/tcp_var.h>
91 #include <netinet/udp.h>
92 #include <netinet/udp_var.h>
94 #include <netpfil/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */
97 #include <netinet/ip6.h>
98 #include <netinet/icmp6.h>
99 #include <netinet6/nd6.h>
100 #include <netinet6/ip6_var.h>
101 #include <netinet6/in6_pcb.h>
102 #include <netinet6/in6_fib.h>
103 #include <netinet6/scope6_var.h>
106 #include <machine/in_cksum.h>
107 #include <security/mac/mac_framework.h>
109 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
116 VNET_DEFINE(struct pf_altqqueue, pf_altqs[2]);
117 VNET_DEFINE(struct pf_palist, pf_pabuf);
118 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active);
119 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive);
120 VNET_DEFINE(struct pf_kstatus, pf_status);
122 VNET_DEFINE(u_int32_t, ticket_altqs_active);
123 VNET_DEFINE(u_int32_t, ticket_altqs_inactive);
124 VNET_DEFINE(int, altqs_inactive_open);
125 VNET_DEFINE(u_int32_t, ticket_pabuf);
127 VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx);
128 #define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx)
129 VNET_DEFINE(u_char, pf_tcp_secret[16]);
130 #define V_pf_tcp_secret VNET(pf_tcp_secret)
131 VNET_DEFINE(int, pf_tcp_secret_init);
132 #define V_pf_tcp_secret_init VNET(pf_tcp_secret_init)
133 VNET_DEFINE(int, pf_tcp_iss_off);
134 #define V_pf_tcp_iss_off VNET(pf_tcp_iss_off)
135 VNET_DECLARE(int, pf_vnet_active);
136 #define V_pf_vnet_active VNET(pf_vnet_active)
138 VNET_DEFINE_STATIC(uint32_t, pf_purge_idx);
139 #define V_pf_purge_idx VNET(pf_purge_idx)
142 * Queue for pf_intr() sends.
144 static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
145 struct pf_send_entry {
146 STAILQ_ENTRY(pf_send_entry) pfse_next;
161 STAILQ_HEAD(pf_send_head, pf_send_entry);
162 VNET_DEFINE_STATIC(struct pf_send_head, pf_sendqueue);
163 #define V_pf_sendqueue VNET(pf_sendqueue)
165 static struct mtx pf_sendqueue_mtx;
166 MTX_SYSINIT(pf_sendqueue_mtx, &pf_sendqueue_mtx, "pf send queue", MTX_DEF);
167 #define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx)
168 #define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx)
171 * Queue for pf_overload_task() tasks.
173 struct pf_overload_entry {
174 SLIST_ENTRY(pf_overload_entry) next;
178 struct pf_rule *rule;
181 SLIST_HEAD(pf_overload_head, pf_overload_entry);
182 VNET_DEFINE_STATIC(struct pf_overload_head, pf_overloadqueue);
183 #define V_pf_overloadqueue VNET(pf_overloadqueue)
184 VNET_DEFINE_STATIC(struct task, pf_overloadtask);
185 #define V_pf_overloadtask VNET(pf_overloadtask)
187 static struct mtx pf_overloadqueue_mtx;
188 MTX_SYSINIT(pf_overloadqueue_mtx, &pf_overloadqueue_mtx,
189 "pf overload/flush queue", MTX_DEF);
190 #define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx)
191 #define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx)
193 VNET_DEFINE(struct pf_rulequeue, pf_unlinked_rules);
194 struct mtx pf_unlnkdrules_mtx;
195 MTX_SYSINIT(pf_unlnkdrules_mtx, &pf_unlnkdrules_mtx, "pf unlinked rules",
198 VNET_DEFINE_STATIC(uma_zone_t, pf_sources_z);
199 #define V_pf_sources_z VNET(pf_sources_z)
200 uma_zone_t pf_mtag_z;
201 VNET_DEFINE(uma_zone_t, pf_state_z);
202 VNET_DEFINE(uma_zone_t, pf_state_key_z);
204 VNET_DEFINE(uint64_t, pf_stateid[MAXCPU]);
205 #define PFID_CPUBITS 8
206 #define PFID_CPUSHIFT (sizeof(uint64_t) * NBBY - PFID_CPUBITS)
207 #define PFID_CPUMASK ((uint64_t)((1 << PFID_CPUBITS) - 1) << PFID_CPUSHIFT)
208 #define PFID_MAXID (~PFID_CPUMASK)
209 CTASSERT((1 << PFID_CPUBITS) >= MAXCPU);
211 static void pf_src_tree_remove_state(struct pf_state *);
212 static void pf_init_threshold(struct pf_threshold *, u_int32_t,
214 static void pf_add_threshold(struct pf_threshold *);
215 static int pf_check_threshold(struct pf_threshold *);
217 static void pf_change_ap(struct mbuf *, struct pf_addr *, u_int16_t *,
218 u_int16_t *, u_int16_t *, struct pf_addr *,
219 u_int16_t, u_int8_t, sa_family_t);
220 static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
221 struct tcphdr *, struct pf_state_peer *);
222 static void pf_change_icmp(struct pf_addr *, u_int16_t *,
223 struct pf_addr *, struct pf_addr *, u_int16_t,
224 u_int16_t *, u_int16_t *, u_int16_t *,
225 u_int16_t *, u_int8_t, sa_family_t);
226 static void pf_send_tcp(struct mbuf *,
227 const struct pf_rule *, sa_family_t,
228 const struct pf_addr *, const struct pf_addr *,
229 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
230 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
231 u_int16_t, struct ifnet *);
232 static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
233 sa_family_t, struct pf_rule *);
234 static void pf_detach_state(struct pf_state *);
235 static int pf_state_key_attach(struct pf_state_key *,
236 struct pf_state_key *, struct pf_state *);
237 static void pf_state_key_detach(struct pf_state *, int);
238 static int pf_state_key_ctor(void *, int, void *, int);
239 static u_int32_t pf_tcp_iss(struct pf_pdesc *);
240 static int pf_test_rule(struct pf_rule **, struct pf_state **,
241 int, struct pfi_kif *, struct mbuf *, int,
242 struct pf_pdesc *, struct pf_rule **,
243 struct pf_ruleset **, struct inpcb *);
244 static int pf_create_state(struct pf_rule *, struct pf_rule *,
245 struct pf_rule *, struct pf_pdesc *,
246 struct pf_src_node *, struct pf_state_key *,
247 struct pf_state_key *, struct mbuf *, int,
248 u_int16_t, u_int16_t, int *, struct pfi_kif *,
249 struct pf_state **, int, u_int16_t, u_int16_t,
251 static int pf_test_fragment(struct pf_rule **, int,
252 struct pfi_kif *, struct mbuf *, void *,
253 struct pf_pdesc *, struct pf_rule **,
254 struct pf_ruleset **);
255 static int pf_tcp_track_full(struct pf_state_peer *,
256 struct pf_state_peer *, struct pf_state **,
257 struct pfi_kif *, struct mbuf *, int,
258 struct pf_pdesc *, u_short *, int *);
259 static int pf_tcp_track_sloppy(struct pf_state_peer *,
260 struct pf_state_peer *, struct pf_state **,
261 struct pf_pdesc *, u_short *);
262 static int pf_test_state_tcp(struct pf_state **, int,
263 struct pfi_kif *, struct mbuf *, int,
264 void *, struct pf_pdesc *, u_short *);
265 static int pf_test_state_udp(struct pf_state **, int,
266 struct pfi_kif *, struct mbuf *, int,
267 void *, struct pf_pdesc *);
268 static int pf_test_state_icmp(struct pf_state **, int,
269 struct pfi_kif *, struct mbuf *, int,
270 void *, struct pf_pdesc *, u_short *);
271 static int pf_test_state_other(struct pf_state **, int,
272 struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
273 static u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
275 static u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
277 static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
279 static int pf_check_proto_cksum(struct mbuf *, int, int,
280 u_int8_t, sa_family_t);
281 static void pf_print_state_parts(struct pf_state *,
282 struct pf_state_key *, struct pf_state_key *);
283 static int pf_addr_wrap_neq(struct pf_addr_wrap *,
284 struct pf_addr_wrap *);
285 static struct pf_state *pf_find_state(struct pfi_kif *,
286 struct pf_state_key_cmp *, u_int);
287 static int pf_src_connlimit(struct pf_state **);
288 static void pf_overload_task(void *v, int pending);
289 static int pf_insert_src_node(struct pf_src_node **,
290 struct pf_rule *, struct pf_addr *, sa_family_t);
291 static u_int pf_purge_expired_states(u_int, int);
292 static void pf_purge_unlinked_rules(void);
293 static int pf_mtag_uminit(void *, int, int);
294 static void pf_mtag_free(struct m_tag *);
296 static void pf_route(struct mbuf **, struct pf_rule *, int,
297 struct ifnet *, struct pf_state *,
298 struct pf_pdesc *, struct inpcb *);
301 static void pf_change_a6(struct pf_addr *, u_int16_t *,
302 struct pf_addr *, u_int8_t);
303 static void pf_route6(struct mbuf **, struct pf_rule *, int,
304 struct ifnet *, struct pf_state *,
305 struct pf_pdesc *, struct inpcb *);
308 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
310 extern int pf_end_threads;
311 extern struct proc *pf_purge_proc;
313 VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
315 #define PACKET_LOOPED(pd) ((pd)->pf_mtag && \
316 (pd)->pf_mtag->flags & PF_PACKET_LOOPED)
318 #define STATE_LOOKUP(i, k, d, s, pd) \
320 (s) = pf_find_state((i), (k), (d)); \
323 if (PACKET_LOOPED(pd)) \
325 if ((d) == PF_OUT && \
326 (((s)->rule.ptr->rt == PF_ROUTETO && \
327 (s)->rule.ptr->direction == PF_OUT) || \
328 ((s)->rule.ptr->rt == PF_REPLYTO && \
329 (s)->rule.ptr->direction == PF_IN)) && \
330 (s)->rt_kif != NULL && \
331 (s)->rt_kif != (i)) \
335 #define BOUND_IFACE(r, k) \
336 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
338 #define STATE_INC_COUNTERS(s) \
340 counter_u64_add(s->rule.ptr->states_cur, 1); \
341 counter_u64_add(s->rule.ptr->states_tot, 1); \
342 if (s->anchor.ptr != NULL) { \
343 counter_u64_add(s->anchor.ptr->states_cur, 1); \
344 counter_u64_add(s->anchor.ptr->states_tot, 1); \
346 if (s->nat_rule.ptr != NULL) { \
347 counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
348 counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
352 #define STATE_DEC_COUNTERS(s) \
354 if (s->nat_rule.ptr != NULL) \
355 counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
356 if (s->anchor.ptr != NULL) \
357 counter_u64_add(s->anchor.ptr->states_cur, -1); \
358 counter_u64_add(s->rule.ptr->states_cur, -1); \
361 static MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
362 VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
363 VNET_DEFINE(struct pf_idhash *, pf_idhash);
364 VNET_DEFINE(struct pf_srchash *, pf_srchash);
366 SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW, 0, "pf(4)");
369 u_long pf_srchashmask;
370 static u_long pf_hashsize;
371 static u_long pf_srchashsize;
372 u_long pf_ioctl_maxcount = 65535;
374 SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
375 &pf_hashsize, 0, "Size of pf(4) states hashtable");
376 SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
377 &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable");
378 SYSCTL_ULONG(_net_pf, OID_AUTO, request_maxcount, CTLFLAG_RDTUN,
379 &pf_ioctl_maxcount, 0, "Maximum number of tables, addresses, ... in a single ioctl() call");
381 VNET_DEFINE(void *, pf_swi_cookie);
383 VNET_DEFINE(uint32_t, pf_hashseed);
384 #define V_pf_hashseed VNET(pf_hashseed)
387 pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
393 if (a->addr32[0] > b->addr32[0])
395 if (a->addr32[0] < b->addr32[0])
401 if (a->addr32[3] > b->addr32[3])
403 if (a->addr32[3] < b->addr32[3])
405 if (a->addr32[2] > b->addr32[2])
407 if (a->addr32[2] < b->addr32[2])
409 if (a->addr32[1] > b->addr32[1])
411 if (a->addr32[1] < b->addr32[1])
413 if (a->addr32[0] > b->addr32[0])
415 if (a->addr32[0] < b->addr32[0])
420 panic("%s: unknown address family %u", __func__, af);
425 static __inline uint32_t
426 pf_hashkey(struct pf_state_key *sk)
430 h = murmur3_32_hash32((uint32_t *)sk,
431 sizeof(struct pf_state_key_cmp)/sizeof(uint32_t),
434 return (h & pf_hashmask);
437 static __inline uint32_t
438 pf_hashsrc(struct pf_addr *addr, sa_family_t af)
444 h = murmur3_32_hash32((uint32_t *)&addr->v4,
445 sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed);
448 h = murmur3_32_hash32((uint32_t *)&addr->v6,
449 sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed);
452 panic("%s: unknown address family %u", __func__, af);
455 return (h & pf_srchashmask);
460 pf_state_hash(struct pf_state *s)
462 u_int32_t hv = (intptr_t)s / sizeof(*s);
464 hv ^= crc32(&s->src, sizeof(s->src));
465 hv ^= crc32(&s->dst, sizeof(s->dst));
474 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
479 dst->addr32[0] = src->addr32[0];
483 dst->addr32[0] = src->addr32[0];
484 dst->addr32[1] = src->addr32[1];
485 dst->addr32[2] = src->addr32[2];
486 dst->addr32[3] = src->addr32[3];
493 pf_init_threshold(struct pf_threshold *threshold,
494 u_int32_t limit, u_int32_t seconds)
496 threshold->limit = limit * PF_THRESHOLD_MULT;
497 threshold->seconds = seconds;
498 threshold->count = 0;
499 threshold->last = time_uptime;
503 pf_add_threshold(struct pf_threshold *threshold)
505 u_int32_t t = time_uptime, diff = t - threshold->last;
507 if (diff >= threshold->seconds)
508 threshold->count = 0;
510 threshold->count -= threshold->count * diff /
512 threshold->count += PF_THRESHOLD_MULT;
517 pf_check_threshold(struct pf_threshold *threshold)
519 return (threshold->count > threshold->limit);
523 pf_src_connlimit(struct pf_state **state)
525 struct pf_overload_entry *pfoe;
528 PF_STATE_LOCK_ASSERT(*state);
530 (*state)->src_node->conn++;
531 (*state)->src.tcp_est = 1;
532 pf_add_threshold(&(*state)->src_node->conn_rate);
534 if ((*state)->rule.ptr->max_src_conn &&
535 (*state)->rule.ptr->max_src_conn <
536 (*state)->src_node->conn) {
537 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1);
541 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
542 pf_check_threshold(&(*state)->src_node->conn_rate)) {
543 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1);
550 /* Kill this state. */
551 (*state)->timeout = PFTM_PURGE;
552 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
554 if ((*state)->rule.ptr->overload_tbl == NULL)
557 /* Schedule overloading and flushing task. */
558 pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
560 return (1); /* too bad :( */
562 bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
563 pfoe->af = (*state)->key[PF_SK_WIRE]->af;
564 pfoe->rule = (*state)->rule.ptr;
565 pfoe->dir = (*state)->direction;
567 SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
568 PF_OVERLOADQ_UNLOCK();
569 taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
575 pf_overload_task(void *v, int pending)
577 struct pf_overload_head queue;
579 struct pf_overload_entry *pfoe, *pfoe1;
582 CURVNET_SET((struct vnet *)v);
585 queue = V_pf_overloadqueue;
586 SLIST_INIT(&V_pf_overloadqueue);
587 PF_OVERLOADQ_UNLOCK();
589 bzero(&p, sizeof(p));
590 SLIST_FOREACH(pfoe, &queue, next) {
591 counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1);
592 if (V_pf_status.debug >= PF_DEBUG_MISC) {
593 printf("%s: blocking address ", __func__);
594 pf_print_host(&pfoe->addr, 0, pfoe->af);
598 p.pfra_af = pfoe->af;
603 p.pfra_ip4addr = pfoe->addr.v4;
609 p.pfra_ip6addr = pfoe->addr.v6;
615 pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
620 * Remove those entries, that don't need flushing.
622 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
623 if (pfoe->rule->flush == 0) {
624 SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
625 free(pfoe, M_PFTEMP);
628 V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1);
630 /* If nothing to flush, return. */
631 if (SLIST_EMPTY(&queue)) {
636 for (int i = 0; i <= pf_hashmask; i++) {
637 struct pf_idhash *ih = &V_pf_idhash[i];
638 struct pf_state_key *sk;
642 LIST_FOREACH(s, &ih->states, entry) {
643 sk = s->key[PF_SK_WIRE];
644 SLIST_FOREACH(pfoe, &queue, next)
645 if (sk->af == pfoe->af &&
646 ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
647 pfoe->rule == s->rule.ptr) &&
648 ((pfoe->dir == PF_OUT &&
649 PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
650 (pfoe->dir == PF_IN &&
651 PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
652 s->timeout = PFTM_PURGE;
653 s->src.state = s->dst.state = TCPS_CLOSED;
657 PF_HASHROW_UNLOCK(ih);
659 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
660 free(pfoe, M_PFTEMP);
661 if (V_pf_status.debug >= PF_DEBUG_MISC)
662 printf("%s: %u states killed", __func__, killed);
668 * Can return locked on failure, so that we can consistently
669 * allocate and insert a new one.
672 pf_find_src_node(struct pf_addr *src, struct pf_rule *rule, sa_family_t af,
675 struct pf_srchash *sh;
676 struct pf_src_node *n;
678 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1);
680 sh = &V_pf_srchash[pf_hashsrc(src, af)];
682 LIST_FOREACH(n, &sh->nodes, entry)
683 if (n->rule.ptr == rule && n->af == af &&
684 ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
685 (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
689 PF_HASHROW_UNLOCK(sh);
690 } else if (returnlocked == 0)
691 PF_HASHROW_UNLOCK(sh);
697 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
698 struct pf_addr *src, sa_family_t af)
701 KASSERT((rule->rule_flag & PFRULE_RULESRCTRACK ||
702 rule->rpool.opts & PF_POOL_STICKYADDR),
703 ("%s for non-tracking rule %p", __func__, rule));
706 *sn = pf_find_src_node(src, rule, af, 1);
709 struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)];
711 PF_HASHROW_ASSERT(sh);
713 if (!rule->max_src_nodes ||
714 counter_u64_fetch(rule->src_nodes) < rule->max_src_nodes)
715 (*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
717 counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES],
720 PF_HASHROW_UNLOCK(sh);
724 pf_init_threshold(&(*sn)->conn_rate,
725 rule->max_src_conn_rate.limit,
726 rule->max_src_conn_rate.seconds);
729 (*sn)->rule.ptr = rule;
730 PF_ACPY(&(*sn)->addr, src, af);
731 LIST_INSERT_HEAD(&sh->nodes, *sn, entry);
732 (*sn)->creation = time_uptime;
733 (*sn)->ruletype = rule->action;
735 if ((*sn)->rule.ptr != NULL)
736 counter_u64_add((*sn)->rule.ptr->src_nodes, 1);
737 PF_HASHROW_UNLOCK(sh);
738 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1);
740 if (rule->max_src_states &&
741 (*sn)->states >= rule->max_src_states) {
742 counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES],
751 pf_unlink_src_node(struct pf_src_node *src)
754 PF_HASHROW_ASSERT(&V_pf_srchash[pf_hashsrc(&src->addr, src->af)]);
755 LIST_REMOVE(src, entry);
757 counter_u64_add(src->rule.ptr->src_nodes, -1);
761 pf_free_src_nodes(struct pf_src_node_list *head)
763 struct pf_src_node *sn, *tmp;
766 LIST_FOREACH_SAFE(sn, head, entry, tmp) {
767 uma_zfree(V_pf_sources_z, sn);
771 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], count);
780 pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
781 sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL,
785 /* Per-vnet data storage structures initialization. */
789 struct pf_keyhash *kh;
790 struct pf_idhash *ih;
791 struct pf_srchash *sh;
794 if (pf_hashsize == 0 || !powerof2(pf_hashsize))
795 pf_hashsize = PF_HASHSIZ;
796 if (pf_srchashsize == 0 || !powerof2(pf_srchashsize))
797 pf_srchashsize = PF_SRCHASHSIZ;
799 V_pf_hashseed = arc4random();
801 /* States and state keys storage. */
802 V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_state),
803 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
804 V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
805 uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
806 uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
808 V_pf_state_key_z = uma_zcreate("pf state keys",
809 sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
812 V_pf_keyhash = mallocarray(pf_hashsize, sizeof(struct pf_keyhash),
813 M_PFHASH, M_NOWAIT | M_ZERO);
814 V_pf_idhash = mallocarray(pf_hashsize, sizeof(struct pf_idhash),
815 M_PFHASH, M_NOWAIT | M_ZERO);
816 if (V_pf_keyhash == NULL || V_pf_idhash == NULL) {
817 printf("pf: Unable to allocate memory for "
818 "state_hashsize %lu.\n", pf_hashsize);
820 free(V_pf_keyhash, M_PFHASH);
821 free(V_pf_idhash, M_PFHASH);
823 pf_hashsize = PF_HASHSIZ;
824 V_pf_keyhash = mallocarray(pf_hashsize,
825 sizeof(struct pf_keyhash), M_PFHASH, M_WAITOK | M_ZERO);
826 V_pf_idhash = mallocarray(pf_hashsize,
827 sizeof(struct pf_idhash), M_PFHASH, M_WAITOK | M_ZERO);
830 pf_hashmask = pf_hashsize - 1;
831 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
833 mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
834 mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
838 V_pf_sources_z = uma_zcreate("pf source nodes",
839 sizeof(struct pf_src_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
841 V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
842 uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
843 uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
845 V_pf_srchash = mallocarray(pf_srchashsize,
846 sizeof(struct pf_srchash), M_PFHASH, M_NOWAIT | M_ZERO);
847 if (V_pf_srchash == NULL) {
848 printf("pf: Unable to allocate memory for "
849 "source_hashsize %lu.\n", pf_srchashsize);
851 pf_srchashsize = PF_SRCHASHSIZ;
852 V_pf_srchash = mallocarray(pf_srchashsize,
853 sizeof(struct pf_srchash), M_PFHASH, M_WAITOK | M_ZERO);
856 pf_srchashmask = pf_srchashsize - 1;
857 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++)
858 mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
861 TAILQ_INIT(&V_pf_altqs[0]);
862 TAILQ_INIT(&V_pf_altqs[1]);
863 TAILQ_INIT(&V_pf_pabuf);
864 V_pf_altqs_active = &V_pf_altqs[0];
865 V_pf_altqs_inactive = &V_pf_altqs[1];
867 /* Send & overload+flush queues. */
868 STAILQ_INIT(&V_pf_sendqueue);
869 SLIST_INIT(&V_pf_overloadqueue);
870 TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
872 /* Unlinked, but may be referenced rules. */
873 TAILQ_INIT(&V_pf_unlinked_rules);
880 uma_zdestroy(pf_mtag_z);
886 struct pf_keyhash *kh;
887 struct pf_idhash *ih;
888 struct pf_srchash *sh;
889 struct pf_send_entry *pfse, *next;
892 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
894 KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
896 KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
898 mtx_destroy(&kh->lock);
899 mtx_destroy(&ih->lock);
901 free(V_pf_keyhash, M_PFHASH);
902 free(V_pf_idhash, M_PFHASH);
904 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
905 KASSERT(LIST_EMPTY(&sh->nodes),
906 ("%s: source node hash not empty", __func__));
907 mtx_destroy(&sh->lock);
909 free(V_pf_srchash, M_PFHASH);
911 STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
912 m_freem(pfse->pfse_m);
913 free(pfse, M_PFTEMP);
916 uma_zdestroy(V_pf_sources_z);
917 uma_zdestroy(V_pf_state_z);
918 uma_zdestroy(V_pf_state_key_z);
922 pf_mtag_uminit(void *mem, int size, int how)
926 t = (struct m_tag *)mem;
927 t->m_tag_cookie = MTAG_ABI_COMPAT;
928 t->m_tag_id = PACKET_TAG_PF;
929 t->m_tag_len = sizeof(struct pf_mtag);
930 t->m_tag_free = pf_mtag_free;
936 pf_mtag_free(struct m_tag *t)
939 uma_zfree(pf_mtag_z, t);
943 pf_get_mtag(struct mbuf *m)
947 if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
948 return ((struct pf_mtag *)(mtag + 1));
950 mtag = uma_zalloc(pf_mtag_z, M_NOWAIT);
953 bzero(mtag + 1, sizeof(struct pf_mtag));
954 m_tag_prepend(m, mtag);
956 return ((struct pf_mtag *)(mtag + 1));
960 pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
963 struct pf_keyhash *khs, *khw, *kh;
964 struct pf_state_key *sk, *cur;
965 struct pf_state *si, *olds = NULL;
968 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
969 KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
970 KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
973 * We need to lock hash slots of both keys. To avoid deadlock
974 * we always lock the slot with lower address first. Unlock order
977 * We also need to lock ID hash slot before dropping key
978 * locks. On success we return with ID hash slot locked.
982 khs = khw = &V_pf_keyhash[pf_hashkey(skw)];
983 PF_HASHROW_LOCK(khs);
985 khs = &V_pf_keyhash[pf_hashkey(sks)];
986 khw = &V_pf_keyhash[pf_hashkey(skw)];
988 PF_HASHROW_LOCK(khs);
989 } else if (khs < khw) {
990 PF_HASHROW_LOCK(khs);
991 PF_HASHROW_LOCK(khw);
993 PF_HASHROW_LOCK(khw);
994 PF_HASHROW_LOCK(khs);
998 #define KEYS_UNLOCK() do { \
1000 PF_HASHROW_UNLOCK(khs); \
1001 PF_HASHROW_UNLOCK(khw); \
1003 PF_HASHROW_UNLOCK(khs); \
1007 * First run: start with wire key.
1014 LIST_FOREACH(cur, &kh->keys, entry)
1015 if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
1019 /* Key exists. Check for same kif, if none, add to key. */
1020 TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
1021 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
1023 PF_HASHROW_LOCK(ih);
1024 if (si->kif == s->kif &&
1025 si->direction == s->direction) {
1026 if (sk->proto == IPPROTO_TCP &&
1027 si->src.state >= TCPS_FIN_WAIT_2 &&
1028 si->dst.state >= TCPS_FIN_WAIT_2) {
1030 * New state matches an old >FIN_WAIT_2
1031 * state. We can't drop key hash locks,
1032 * thus we can't unlink it properly.
1034 * As a workaround we drop it into
1035 * TCPS_CLOSED state, schedule purge
1036 * ASAP and push it into the very end
1037 * of the slot TAILQ, so that it won't
1038 * conflict with our new state.
1040 si->src.state = si->dst.state =
1042 si->timeout = PFTM_PURGE;
1045 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1046 printf("pf: %s key attach "
1048 (idx == PF_SK_WIRE) ?
1051 pf_print_state_parts(s,
1052 (idx == PF_SK_WIRE) ?
1054 (idx == PF_SK_STACK) ?
1056 printf(", existing: ");
1057 pf_print_state_parts(si,
1058 (idx == PF_SK_WIRE) ?
1060 (idx == PF_SK_STACK) ?
1064 PF_HASHROW_UNLOCK(ih);
1066 uma_zfree(V_pf_state_key_z, sk);
1067 if (idx == PF_SK_STACK)
1069 return (EEXIST); /* collision! */
1072 PF_HASHROW_UNLOCK(ih);
1074 uma_zfree(V_pf_state_key_z, sk);
1077 LIST_INSERT_HEAD(&kh->keys, sk, entry);
1082 /* List is sorted, if-bound states before floating. */
1083 if (s->kif == V_pfi_all)
1084 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
1086 TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
1089 TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]);
1090 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds,
1096 * Attach done. See how should we (or should not?)
1097 * attach a second key.
1100 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1104 } else if (sks != NULL) {
1106 * Continue attaching with stack key.
1118 KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
1119 ("%s failure", __func__));
1126 pf_detach_state(struct pf_state *s)
1128 struct pf_state_key *sks = s->key[PF_SK_STACK];
1129 struct pf_keyhash *kh;
1132 kh = &V_pf_keyhash[pf_hashkey(sks)];
1133 PF_HASHROW_LOCK(kh);
1134 if (s->key[PF_SK_STACK] != NULL)
1135 pf_state_key_detach(s, PF_SK_STACK);
1137 * If both point to same key, then we are done.
1139 if (sks == s->key[PF_SK_WIRE]) {
1140 pf_state_key_detach(s, PF_SK_WIRE);
1141 PF_HASHROW_UNLOCK(kh);
1144 PF_HASHROW_UNLOCK(kh);
1147 if (s->key[PF_SK_WIRE] != NULL) {
1148 kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
1149 PF_HASHROW_LOCK(kh);
1150 if (s->key[PF_SK_WIRE] != NULL)
1151 pf_state_key_detach(s, PF_SK_WIRE);
1152 PF_HASHROW_UNLOCK(kh);
1157 pf_state_key_detach(struct pf_state *s, int idx)
1159 struct pf_state_key *sk = s->key[idx];
1161 struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1163 PF_HASHROW_ASSERT(kh);
1165 TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1168 if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1169 LIST_REMOVE(sk, entry);
1170 uma_zfree(V_pf_state_key_z, sk);
1175 pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1177 struct pf_state_key *sk = mem;
1179 bzero(sk, sizeof(struct pf_state_key_cmp));
1180 TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1181 TAILQ_INIT(&sk->states[PF_SK_STACK]);
1186 struct pf_state_key *
1187 pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
1188 struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
1190 struct pf_state_key *sk;
1192 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1196 PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
1197 PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
1198 sk->port[pd->sidx] = sport;
1199 sk->port[pd->didx] = dport;
1200 sk->proto = pd->proto;
1206 struct pf_state_key *
1207 pf_state_key_clone(struct pf_state_key *orig)
1209 struct pf_state_key *sk;
1211 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1215 bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1221 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw,
1222 struct pf_state_key *sks, struct pf_state *s)
1224 struct pf_idhash *ih;
1225 struct pf_state *cur;
1228 KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1229 ("%s: sks not pristine", __func__));
1230 KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1231 ("%s: skw not pristine", __func__));
1232 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1236 if (s->id == 0 && s->creatorid == 0) {
1237 /* XXX: should be atomic, but probability of collision low */
1238 if ((s->id = V_pf_stateid[curcpu]++) == PFID_MAXID)
1239 V_pf_stateid[curcpu] = 1;
1240 s->id |= (uint64_t )curcpu << PFID_CPUSHIFT;
1241 s->id = htobe64(s->id);
1242 s->creatorid = V_pf_status.hostid;
1245 /* Returns with ID locked on success. */
1246 if ((error = pf_state_key_attach(skw, sks, s)) != 0)
1249 ih = &V_pf_idhash[PF_IDHASH(s)];
1250 PF_HASHROW_ASSERT(ih);
1251 LIST_FOREACH(cur, &ih->states, entry)
1252 if (cur->id == s->id && cur->creatorid == s->creatorid)
1256 PF_HASHROW_UNLOCK(ih);
1257 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1258 printf("pf: state ID collision: "
1259 "id: %016llx creatorid: %08x\n",
1260 (unsigned long long)be64toh(s->id),
1261 ntohl(s->creatorid));
1266 LIST_INSERT_HEAD(&ih->states, s, entry);
1267 /* One for keys, one for ID hash. */
1268 refcount_init(&s->refs, 2);
1270 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_INSERT], 1);
1271 if (pfsync_insert_state_ptr != NULL)
1272 pfsync_insert_state_ptr(s);
1274 /* Returns locked. */
1279 * Find state by ID: returns with locked row on success.
1282 pf_find_state_byid(uint64_t id, uint32_t creatorid)
1284 struct pf_idhash *ih;
1287 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1289 ih = &V_pf_idhash[(be64toh(id) % (pf_hashmask + 1))];
1291 PF_HASHROW_LOCK(ih);
1292 LIST_FOREACH(s, &ih->states, entry)
1293 if (s->id == id && s->creatorid == creatorid)
1297 PF_HASHROW_UNLOCK(ih);
1303 * Find state by key.
1304 * Returns with ID hash slot locked on success.
1306 static struct pf_state *
1307 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir)
1309 struct pf_keyhash *kh;
1310 struct pf_state_key *sk;
1314 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1316 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1318 PF_HASHROW_LOCK(kh);
1319 LIST_FOREACH(sk, &kh->keys, entry)
1320 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1323 PF_HASHROW_UNLOCK(kh);
1327 idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1329 /* List is sorted, if-bound states before floating ones. */
1330 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1331 if (s->kif == V_pfi_all || s->kif == kif) {
1333 PF_HASHROW_UNLOCK(kh);
1334 if (s->timeout >= PFTM_MAX) {
1336 * State is either being processed by
1337 * pf_unlink_state() in an other thread, or
1338 * is scheduled for immediate expiry.
1345 PF_HASHROW_UNLOCK(kh);
1351 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1353 struct pf_keyhash *kh;
1354 struct pf_state_key *sk;
1355 struct pf_state *s, *ret = NULL;
1358 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1360 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1362 PF_HASHROW_LOCK(kh);
1363 LIST_FOREACH(sk, &kh->keys, entry)
1364 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1367 PF_HASHROW_UNLOCK(kh);
1382 panic("%s: dir %u", __func__, dir);
1385 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1387 PF_HASHROW_UNLOCK(kh);
1401 PF_HASHROW_UNLOCK(kh);
1406 /* END state table stuff */
1409 pf_send(struct pf_send_entry *pfse)
1413 STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
1415 swi_sched(V_pf_swi_cookie, 0);
1421 struct pf_send_head queue;
1422 struct pf_send_entry *pfse, *next;
1424 CURVNET_SET((struct vnet *)v);
1427 queue = V_pf_sendqueue;
1428 STAILQ_INIT(&V_pf_sendqueue);
1431 STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
1432 switch (pfse->pfse_type) {
1435 ip_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL);
1438 icmp_error(pfse->pfse_m, pfse->icmpopts.type,
1439 pfse->icmpopts.code, 0, pfse->icmpopts.mtu);
1444 ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL,
1448 icmp6_error(pfse->pfse_m, pfse->icmpopts.type,
1449 pfse->icmpopts.code, pfse->icmpopts.mtu);
1453 panic("%s: unknown type", __func__);
1455 free(pfse, M_PFTEMP);
1461 pf_purge_thread(void *unused __unused)
1463 VNET_ITERATOR_DECL(vnet_iter);
1465 sx_xlock(&pf_end_lock);
1466 while (pf_end_threads == 0) {
1467 sx_sleep(pf_purge_thread, &pf_end_lock, 0, "pftm", hz / 10);
1470 VNET_FOREACH(vnet_iter) {
1471 CURVNET_SET(vnet_iter);
1474 /* Wait until V_pf_default_rule is initialized. */
1475 if (V_pf_vnet_active == 0) {
1481 * Process 1/interval fraction of the state
1485 pf_purge_expired_states(V_pf_purge_idx, pf_hashmask /
1486 (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
1489 * Purge other expired types every
1490 * PFTM_INTERVAL seconds.
1492 if (V_pf_purge_idx == 0) {
1494 * Order is important:
1495 * - states and src nodes reference rules
1496 * - states and rules reference kifs
1498 pf_purge_expired_fragments();
1499 pf_purge_expired_src_nodes();
1500 pf_purge_unlinked_rules();
1505 VNET_LIST_RUNLOCK();
1509 sx_xunlock(&pf_end_lock);
1514 pf_unload_vnet_purge(void)
1518 * To cleanse up all kifs and rules we need
1519 * two runs: first one clears reference flags,
1520 * then pf_purge_expired_states() doesn't
1521 * raise them, and then second run frees.
1523 pf_purge_unlinked_rules();
1527 * Now purge everything.
1529 pf_purge_expired_states(0, pf_hashmask);
1530 pf_purge_fragments(UINT_MAX);
1531 pf_purge_expired_src_nodes();
1534 * Now all kifs & rules should be unreferenced,
1535 * thus should be successfully freed.
1537 pf_purge_unlinked_rules();
1543 pf_state_expires(const struct pf_state *state)
1550 /* handle all PFTM_* > PFTM_MAX here */
1551 if (state->timeout == PFTM_PURGE)
1552 return (time_uptime);
1553 KASSERT(state->timeout != PFTM_UNLINKED,
1554 ("pf_state_expires: timeout == PFTM_UNLINKED"));
1555 KASSERT((state->timeout < PFTM_MAX),
1556 ("pf_state_expires: timeout > PFTM_MAX"));
1557 timeout = state->rule.ptr->timeout[state->timeout];
1559 timeout = V_pf_default_rule.timeout[state->timeout];
1560 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1562 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1563 states = counter_u64_fetch(state->rule.ptr->states_cur);
1565 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1566 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1567 states = V_pf_status.states;
1569 if (end && states > start && start < end) {
1571 return (state->expire + timeout * (end - states) /
1574 return (time_uptime);
1576 return (state->expire + timeout);
1580 pf_purge_expired_src_nodes()
1582 struct pf_src_node_list freelist;
1583 struct pf_srchash *sh;
1584 struct pf_src_node *cur, *next;
1587 LIST_INIT(&freelist);
1588 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
1589 PF_HASHROW_LOCK(sh);
1590 LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
1591 if (cur->states == 0 && cur->expire <= time_uptime) {
1592 pf_unlink_src_node(cur);
1593 LIST_INSERT_HEAD(&freelist, cur, entry);
1594 } else if (cur->rule.ptr != NULL)
1595 cur->rule.ptr->rule_flag |= PFRULE_REFS;
1596 PF_HASHROW_UNLOCK(sh);
1599 pf_free_src_nodes(&freelist);
1601 V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z);
1605 pf_src_tree_remove_state(struct pf_state *s)
1607 struct pf_src_node *sn;
1608 struct pf_srchash *sh;
1611 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE] ?
1612 s->rule.ptr->timeout[PFTM_SRC_NODE] :
1613 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1615 if (s->src_node != NULL) {
1617 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1618 PF_HASHROW_LOCK(sh);
1621 if (--sn->states == 0)
1622 sn->expire = time_uptime + timeout;
1623 PF_HASHROW_UNLOCK(sh);
1625 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1626 sn = s->nat_src_node;
1627 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1628 PF_HASHROW_LOCK(sh);
1629 if (--sn->states == 0)
1630 sn->expire = time_uptime + timeout;
1631 PF_HASHROW_UNLOCK(sh);
1633 s->src_node = s->nat_src_node = NULL;
1637 * Unlink and potentilly free a state. Function may be
1638 * called with ID hash row locked, but always returns
1639 * unlocked, since it needs to go through key hash locking.
1642 pf_unlink_state(struct pf_state *s, u_int flags)
1644 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
1646 if ((flags & PF_ENTER_LOCKED) == 0)
1647 PF_HASHROW_LOCK(ih);
1649 PF_HASHROW_ASSERT(ih);
1651 if (s->timeout == PFTM_UNLINKED) {
1653 * State is being processed
1654 * by pf_unlink_state() in
1657 PF_HASHROW_UNLOCK(ih);
1658 return (0); /* XXXGL: undefined actually */
1661 if (s->src.state == PF_TCPS_PROXY_DST) {
1662 /* XXX wire key the right one? */
1663 pf_send_tcp(NULL, s->rule.ptr, s->key[PF_SK_WIRE]->af,
1664 &s->key[PF_SK_WIRE]->addr[1],
1665 &s->key[PF_SK_WIRE]->addr[0],
1666 s->key[PF_SK_WIRE]->port[1],
1667 s->key[PF_SK_WIRE]->port[0],
1668 s->src.seqhi, s->src.seqlo + 1,
1669 TH_RST|TH_ACK, 0, 0, 0, 1, s->tag, NULL);
1672 LIST_REMOVE(s, entry);
1673 pf_src_tree_remove_state(s);
1675 if (pfsync_delete_state_ptr != NULL)
1676 pfsync_delete_state_ptr(s);
1678 STATE_DEC_COUNTERS(s);
1680 s->timeout = PFTM_UNLINKED;
1682 PF_HASHROW_UNLOCK(ih);
1685 /* pf_state_insert() initialises refs to 2, so we can never release the
1686 * last reference here, only in pf_release_state(). */
1687 (void)refcount_release(&s->refs);
1689 return (pf_release_state(s));
1693 pf_free_state(struct pf_state *cur)
1696 KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
1697 KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
1700 pf_normalize_tcp_cleanup(cur);
1701 uma_zfree(V_pf_state_z, cur);
1702 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
1706 * Called only from pf_purge_thread(), thus serialized.
1709 pf_purge_expired_states(u_int i, int maxcheck)
1711 struct pf_idhash *ih;
1714 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1717 * Go through hash and unlink states that expire now.
1719 while (maxcheck > 0) {
1721 ih = &V_pf_idhash[i];
1723 /* only take the lock if we expect to do work */
1724 if (!LIST_EMPTY(&ih->states)) {
1726 PF_HASHROW_LOCK(ih);
1727 LIST_FOREACH(s, &ih->states, entry) {
1728 if (pf_state_expires(s) <= time_uptime) {
1729 V_pf_status.states -=
1730 pf_unlink_state(s, PF_ENTER_LOCKED);
1733 s->rule.ptr->rule_flag |= PFRULE_REFS;
1734 if (s->nat_rule.ptr != NULL)
1735 s->nat_rule.ptr->rule_flag |= PFRULE_REFS;
1736 if (s->anchor.ptr != NULL)
1737 s->anchor.ptr->rule_flag |= PFRULE_REFS;
1738 s->kif->pfik_flags |= PFI_IFLAG_REFS;
1740 s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
1742 PF_HASHROW_UNLOCK(ih);
1745 /* Return when we hit end of hash. */
1746 if (++i > pf_hashmask) {
1747 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1754 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1760 pf_purge_unlinked_rules()
1762 struct pf_rulequeue tmpq;
1763 struct pf_rule *r, *r1;
1766 * If we have overloading task pending, then we'd
1767 * better skip purging this time. There is a tiny
1768 * probability that overloading task references
1769 * an already unlinked rule.
1771 PF_OVERLOADQ_LOCK();
1772 if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
1773 PF_OVERLOADQ_UNLOCK();
1776 PF_OVERLOADQ_UNLOCK();
1779 * Do naive mark-and-sweep garbage collecting of old rules.
1780 * Reference flag is raised by pf_purge_expired_states()
1781 * and pf_purge_expired_src_nodes().
1783 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
1784 * use a temporary queue.
1787 PF_UNLNKDRULES_LOCK();
1788 TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
1789 if (!(r->rule_flag & PFRULE_REFS)) {
1790 TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
1791 TAILQ_INSERT_TAIL(&tmpq, r, entries);
1793 r->rule_flag &= ~PFRULE_REFS;
1795 PF_UNLNKDRULES_UNLOCK();
1797 if (!TAILQ_EMPTY(&tmpq)) {
1799 TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
1800 TAILQ_REMOVE(&tmpq, r, entries);
1808 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1813 u_int32_t a = ntohl(addr->addr32[0]);
1814 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1826 u_int8_t i, curstart, curend, maxstart, maxend;
1827 curstart = curend = maxstart = maxend = 255;
1828 for (i = 0; i < 8; i++) {
1829 if (!addr->addr16[i]) {
1830 if (curstart == 255)
1834 if ((curend - curstart) >
1835 (maxend - maxstart)) {
1836 maxstart = curstart;
1839 curstart = curend = 255;
1842 if ((curend - curstart) >
1843 (maxend - maxstart)) {
1844 maxstart = curstart;
1847 for (i = 0; i < 8; i++) {
1848 if (i >= maxstart && i <= maxend) {
1854 b = ntohs(addr->addr16[i]);
1871 pf_print_state(struct pf_state *s)
1873 pf_print_state_parts(s, NULL, NULL);
1877 pf_print_state_parts(struct pf_state *s,
1878 struct pf_state_key *skwp, struct pf_state_key *sksp)
1880 struct pf_state_key *skw, *sks;
1881 u_int8_t proto, dir;
1883 /* Do our best to fill these, but they're skipped if NULL */
1884 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1885 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1886 proto = skw ? skw->proto : (sks ? sks->proto : 0);
1887 dir = s ? s->direction : 0;
1905 case IPPROTO_ICMPV6:
1909 printf("%u", proto);
1922 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1924 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1929 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1931 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1936 if (proto == IPPROTO_TCP) {
1937 printf(" [lo=%u high=%u win=%u modulator=%u",
1938 s->src.seqlo, s->src.seqhi,
1939 s->src.max_win, s->src.seqdiff);
1940 if (s->src.wscale && s->dst.wscale)
1941 printf(" wscale=%u",
1942 s->src.wscale & PF_WSCALE_MASK);
1944 printf(" [lo=%u high=%u win=%u modulator=%u",
1945 s->dst.seqlo, s->dst.seqhi,
1946 s->dst.max_win, s->dst.seqdiff);
1947 if (s->src.wscale && s->dst.wscale)
1948 printf(" wscale=%u",
1949 s->dst.wscale & PF_WSCALE_MASK);
1952 printf(" %u:%u", s->src.state, s->dst.state);
1957 pf_print_flags(u_int8_t f)
1979 #define PF_SET_SKIP_STEPS(i) \
1981 while (head[i] != cur) { \
1982 head[i]->skip[i].ptr = cur; \
1983 head[i] = TAILQ_NEXT(head[i], entries); \
1988 pf_calc_skip_steps(struct pf_rulequeue *rules)
1990 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1993 cur = TAILQ_FIRST(rules);
1995 for (i = 0; i < PF_SKIP_COUNT; ++i)
1997 while (cur != NULL) {
1999 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
2000 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
2001 if (cur->direction != prev->direction)
2002 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
2003 if (cur->af != prev->af)
2004 PF_SET_SKIP_STEPS(PF_SKIP_AF);
2005 if (cur->proto != prev->proto)
2006 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
2007 if (cur->src.neg != prev->src.neg ||
2008 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
2009 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
2010 if (cur->src.port[0] != prev->src.port[0] ||
2011 cur->src.port[1] != prev->src.port[1] ||
2012 cur->src.port_op != prev->src.port_op)
2013 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
2014 if (cur->dst.neg != prev->dst.neg ||
2015 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
2016 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
2017 if (cur->dst.port[0] != prev->dst.port[0] ||
2018 cur->dst.port[1] != prev->dst.port[1] ||
2019 cur->dst.port_op != prev->dst.port_op)
2020 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2023 cur = TAILQ_NEXT(cur, entries);
2025 for (i = 0; i < PF_SKIP_COUNT; ++i)
2026 PF_SET_SKIP_STEPS(i);
2030 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
2032 if (aw1->type != aw2->type)
2034 switch (aw1->type) {
2035 case PF_ADDR_ADDRMASK:
2037 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6))
2039 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6))
2042 case PF_ADDR_DYNIFTL:
2043 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
2044 case PF_ADDR_NOROUTE:
2045 case PF_ADDR_URPFFAILED:
2048 return (aw1->p.tbl != aw2->p.tbl);
2050 printf("invalid address type: %d\n", aw1->type);
2056 * Checksum updates are a little complicated because the checksum in the TCP/UDP
2057 * header isn't always a full checksum. In some cases (i.e. output) it's a
2058 * pseudo-header checksum, which is a partial checksum over src/dst IP
2059 * addresses, protocol number and length.
2061 * That means we have the following cases:
2062 * * Input or forwarding: we don't have TSO, the checksum fields are full
2063 * checksums, we need to update the checksum whenever we change anything.
2064 * * Output (i.e. the checksum is a pseudo-header checksum):
2065 * x The field being updated is src/dst address or affects the length of
2066 * the packet. We need to update the pseudo-header checksum (note that this
2067 * checksum is not ones' complement).
2068 * x Some other field is being modified (e.g. src/dst port numbers): We
2069 * don't have to update anything.
2072 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2078 l = cksum + old - new;
2079 l = (l >> 16) + (l & 65535);
2087 pf_proto_cksum_fixup(struct mbuf *m, u_int16_t cksum, u_int16_t old,
2088 u_int16_t new, u_int8_t udp)
2090 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2093 return (pf_cksum_fixup(cksum, old, new, udp));
2097 pf_change_ap(struct mbuf *m, struct pf_addr *a, u_int16_t *p, u_int16_t *ic,
2098 u_int16_t *pc, struct pf_addr *an, u_int16_t pn, u_int8_t u,
2104 PF_ACPY(&ao, a, af);
2107 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2115 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2116 ao.addr16[0], an->addr16[0], 0),
2117 ao.addr16[1], an->addr16[1], 0);
2120 *pc = pf_cksum_fixup(pf_cksum_fixup(*pc,
2121 ao.addr16[0], an->addr16[0], u),
2122 ao.addr16[1], an->addr16[1], u);
2124 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2129 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2130 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2131 pf_cksum_fixup(pf_cksum_fixup(*pc,
2132 ao.addr16[0], an->addr16[0], u),
2133 ao.addr16[1], an->addr16[1], u),
2134 ao.addr16[2], an->addr16[2], u),
2135 ao.addr16[3], an->addr16[3], u),
2136 ao.addr16[4], an->addr16[4], u),
2137 ao.addr16[5], an->addr16[5], u),
2138 ao.addr16[6], an->addr16[6], u),
2139 ao.addr16[7], an->addr16[7], u);
2141 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2146 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA |
2147 CSUM_DELAY_DATA_IPV6)) {
2154 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2156 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2160 memcpy(&ao, a, sizeof(ao));
2161 memcpy(a, &an, sizeof(u_int32_t));
2162 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2163 ao % 65536, an % 65536, u);
2167 pf_change_proto_a(struct mbuf *m, void *a, u_int16_t *c, u_int32_t an, u_int8_t udp)
2171 memcpy(&ao, a, sizeof(ao));
2172 memcpy(a, &an, sizeof(u_int32_t));
2174 *c = pf_proto_cksum_fixup(m,
2175 pf_proto_cksum_fixup(m, *c, ao / 65536, an / 65536, udp),
2176 ao % 65536, an % 65536, udp);
2181 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2185 PF_ACPY(&ao, a, AF_INET6);
2186 PF_ACPY(a, an, AF_INET6);
2188 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2189 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2190 pf_cksum_fixup(pf_cksum_fixup(*c,
2191 ao.addr16[0], an->addr16[0], u),
2192 ao.addr16[1], an->addr16[1], u),
2193 ao.addr16[2], an->addr16[2], u),
2194 ao.addr16[3], an->addr16[3], u),
2195 ao.addr16[4], an->addr16[4], u),
2196 ao.addr16[5], an->addr16[5], u),
2197 ao.addr16[6], an->addr16[6], u),
2198 ao.addr16[7], an->addr16[7], u);
2203 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2204 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2205 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2207 struct pf_addr oia, ooa;
2209 PF_ACPY(&oia, ia, af);
2211 PF_ACPY(&ooa, oa, af);
2213 /* Change inner protocol port, fix inner protocol checksum. */
2215 u_int16_t oip = *ip;
2222 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2223 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2225 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2227 /* Change inner ip address, fix inner ip and icmp checksums. */
2228 PF_ACPY(ia, na, af);
2232 u_int32_t oh2c = *h2c;
2234 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2235 oia.addr16[0], ia->addr16[0], 0),
2236 oia.addr16[1], ia->addr16[1], 0);
2237 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2238 oia.addr16[0], ia->addr16[0], 0),
2239 oia.addr16[1], ia->addr16[1], 0);
2240 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2246 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2247 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2248 pf_cksum_fixup(pf_cksum_fixup(*ic,
2249 oia.addr16[0], ia->addr16[0], u),
2250 oia.addr16[1], ia->addr16[1], u),
2251 oia.addr16[2], ia->addr16[2], u),
2252 oia.addr16[3], ia->addr16[3], u),
2253 oia.addr16[4], ia->addr16[4], u),
2254 oia.addr16[5], ia->addr16[5], u),
2255 oia.addr16[6], ia->addr16[6], u),
2256 oia.addr16[7], ia->addr16[7], u);
2260 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2262 PF_ACPY(oa, na, af);
2266 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2267 ooa.addr16[0], oa->addr16[0], 0),
2268 ooa.addr16[1], oa->addr16[1], 0);
2273 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2274 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2275 pf_cksum_fixup(pf_cksum_fixup(*ic,
2276 ooa.addr16[0], oa->addr16[0], u),
2277 ooa.addr16[1], oa->addr16[1], u),
2278 ooa.addr16[2], oa->addr16[2], u),
2279 ooa.addr16[3], oa->addr16[3], u),
2280 ooa.addr16[4], oa->addr16[4], u),
2281 ooa.addr16[5], oa->addr16[5], u),
2282 ooa.addr16[6], oa->addr16[6], u),
2283 ooa.addr16[7], oa->addr16[7], u);
2292 * Need to modulate the sequence numbers in the TCP SACK option
2293 * (credits to Krzysztof Pfaff for report and patch)
2296 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2297 struct tcphdr *th, struct pf_state_peer *dst)
2299 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2300 u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2301 int copyback = 0, i, olen;
2302 struct sackblk sack;
2304 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2305 if (hlen < TCPOLEN_SACKLEN ||
2306 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2309 while (hlen >= TCPOLEN_SACKLEN) {
2312 case TCPOPT_EOL: /* FALLTHROUGH */
2320 if (olen >= TCPOLEN_SACKLEN) {
2321 for (i = 2; i + TCPOLEN_SACK <= olen;
2322 i += TCPOLEN_SACK) {
2323 memcpy(&sack, &opt[i], sizeof(sack));
2324 pf_change_proto_a(m, &sack.start, &th->th_sum,
2325 htonl(ntohl(sack.start) - dst->seqdiff), 0);
2326 pf_change_proto_a(m, &sack.end, &th->th_sum,
2327 htonl(ntohl(sack.end) - dst->seqdiff), 0);
2328 memcpy(&opt[i], &sack, sizeof(sack));
2342 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2347 pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af,
2348 const struct pf_addr *saddr, const struct pf_addr *daddr,
2349 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2350 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2351 u_int16_t rtag, struct ifnet *ifp)
2353 struct pf_send_entry *pfse;
2357 struct ip *h = NULL;
2360 struct ip6_hdr *h6 = NULL;
2364 struct pf_mtag *pf_mtag;
2369 /* maximum segment size tcp option */
2370 tlen = sizeof(struct tcphdr);
2377 len = sizeof(struct ip) + tlen;
2382 len = sizeof(struct ip6_hdr) + tlen;
2386 panic("%s: unsupported af %d", __func__, af);
2389 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2390 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2393 m = m_gethdr(M_NOWAIT, MT_DATA);
2395 free(pfse, M_PFTEMP);
2399 mac_netinet_firewall_send(m);
2401 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2402 free(pfse, M_PFTEMP);
2407 m->m_flags |= M_SKIP_FIREWALL;
2408 pf_mtag->tag = rtag;
2410 if (r != NULL && r->rtableid >= 0)
2411 M_SETFIB(m, r->rtableid);
2414 if (r != NULL && r->qid) {
2415 pf_mtag->qid = r->qid;
2417 /* add hints for ecn */
2418 pf_mtag->hdr = mtod(m, struct ip *);
2421 m->m_data += max_linkhdr;
2422 m->m_pkthdr.len = m->m_len = len;
2423 m->m_pkthdr.rcvif = NULL;
2424 bzero(m->m_data, len);
2428 h = mtod(m, struct ip *);
2430 /* IP header fields included in the TCP checksum */
2431 h->ip_p = IPPROTO_TCP;
2432 h->ip_len = htons(tlen);
2433 h->ip_src.s_addr = saddr->v4.s_addr;
2434 h->ip_dst.s_addr = daddr->v4.s_addr;
2436 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2441 h6 = mtod(m, struct ip6_hdr *);
2443 /* IP header fields included in the TCP checksum */
2444 h6->ip6_nxt = IPPROTO_TCP;
2445 h6->ip6_plen = htons(tlen);
2446 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2447 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2449 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2455 th->th_sport = sport;
2456 th->th_dport = dport;
2457 th->th_seq = htonl(seq);
2458 th->th_ack = htonl(ack);
2459 th->th_off = tlen >> 2;
2460 th->th_flags = flags;
2461 th->th_win = htons(win);
2464 opt = (char *)(th + 1);
2465 opt[0] = TCPOPT_MAXSEG;
2468 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2475 th->th_sum = in_cksum(m, len);
2477 /* Finish the IP header */
2479 h->ip_hl = sizeof(*h) >> 2;
2480 h->ip_tos = IPTOS_LOWDELAY;
2481 h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
2482 h->ip_len = htons(len);
2483 h->ip_ttl = ttl ? ttl : V_ip_defttl;
2486 pfse->pfse_type = PFSE_IP;
2492 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2493 sizeof(struct ip6_hdr), tlen);
2495 h6->ip6_vfc |= IPV6_VERSION;
2496 h6->ip6_hlim = IPV6_DEFHLIM;
2498 pfse->pfse_type = PFSE_IP6;
2507 pf_return(struct pf_rule *r, struct pf_rule *nr, struct pf_pdesc *pd,
2508 struct pf_state_key *sk, int off, struct mbuf *m, struct tcphdr *th,
2509 struct pfi_kif *kif, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen,
2512 struct pf_addr * const saddr = pd->src;
2513 struct pf_addr * const daddr = pd->dst;
2514 sa_family_t af = pd->af;
2516 /* undo NAT changes, if they have taken place */
2518 PF_ACPY(saddr, &sk->addr[pd->sidx], af);
2519 PF_ACPY(daddr, &sk->addr[pd->didx], af);
2521 *pd->sport = sk->port[pd->sidx];
2523 *pd->dport = sk->port[pd->didx];
2525 *pd->proto_sum = bproto_sum;
2527 *pd->ip_sum = bip_sum;
2528 m_copyback(m, off, hdrlen, pd->hdr.any);
2530 if (pd->proto == IPPROTO_TCP &&
2531 ((r->rule_flag & PFRULE_RETURNRST) ||
2532 (r->rule_flag & PFRULE_RETURN)) &&
2533 !(th->th_flags & TH_RST)) {
2534 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
2546 h4 = mtod(m, struct ip *);
2547 len = ntohs(h4->ip_len) - off;
2552 h6 = mtod(m, struct ip6_hdr *);
2553 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
2558 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
2559 REASON_SET(reason, PFRES_PROTCKSUM);
2561 if (th->th_flags & TH_SYN)
2563 if (th->th_flags & TH_FIN)
2565 pf_send_tcp(m, r, af, pd->dst,
2566 pd->src, th->th_dport, th->th_sport,
2567 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
2568 r->return_ttl, 1, 0, kif->pfik_ifp);
2570 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
2572 pf_send_icmp(m, r->return_icmp >> 8,
2573 r->return_icmp & 255, af, r);
2574 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
2576 pf_send_icmp(m, r->return_icmp6 >> 8,
2577 r->return_icmp6 & 255, af, r);
2582 pf_ieee8021q_setpcp(struct mbuf *m, u_int8_t prio)
2586 KASSERT(prio <= PF_PRIO_MAX,
2587 ("%s with invalid pcp", __func__));
2589 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_OUT, NULL);
2591 mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_OUT,
2592 sizeof(uint8_t), M_NOWAIT);
2595 m_tag_prepend(m, mtag);
2598 *(uint8_t *)(mtag + 1) = prio;
2603 pf_match_ieee8021q_pcp(u_int8_t prio, struct mbuf *m)
2608 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL);
2612 if (prio == PF_PRIO_ZERO)
2615 mpcp = *(uint8_t *)(mtag + 1);
2617 return (mpcp == prio);
2621 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2624 struct pf_send_entry *pfse;
2626 struct pf_mtag *pf_mtag;
2628 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2629 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2633 if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
2634 free(pfse, M_PFTEMP);
2638 if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
2639 free(pfse, M_PFTEMP);
2643 m0->m_flags |= M_SKIP_FIREWALL;
2645 if (r->rtableid >= 0)
2646 M_SETFIB(m0, r->rtableid);
2650 pf_mtag->qid = r->qid;
2651 /* add hints for ecn */
2652 pf_mtag->hdr = mtod(m0, struct ip *);
2659 pfse->pfse_type = PFSE_ICMP;
2664 pfse->pfse_type = PFSE_ICMP6;
2669 pfse->icmpopts.type = type;
2670 pfse->icmpopts.code = code;
2675 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2676 * If n is 0, they match if they are equal. If n is != 0, they match if they
2680 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2681 struct pf_addr *b, sa_family_t af)
2688 if ((a->addr32[0] & m->addr32[0]) ==
2689 (b->addr32[0] & m->addr32[0]))
2695 if (((a->addr32[0] & m->addr32[0]) ==
2696 (b->addr32[0] & m->addr32[0])) &&
2697 ((a->addr32[1] & m->addr32[1]) ==
2698 (b->addr32[1] & m->addr32[1])) &&
2699 ((a->addr32[2] & m->addr32[2]) ==
2700 (b->addr32[2] & m->addr32[2])) &&
2701 ((a->addr32[3] & m->addr32[3]) ==
2702 (b->addr32[3] & m->addr32[3])))
2721 * Return 1 if b <= a <= e, otherwise return 0.
2724 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2725 struct pf_addr *a, sa_family_t af)
2730 if ((ntohl(a->addr32[0]) < ntohl(b->addr32[0])) ||
2731 (ntohl(a->addr32[0]) > ntohl(e->addr32[0])))
2740 for (i = 0; i < 4; ++i)
2741 if (ntohl(a->addr32[i]) > ntohl(b->addr32[i]))
2743 else if (ntohl(a->addr32[i]) < ntohl(b->addr32[i]))
2746 for (i = 0; i < 4; ++i)
2747 if (ntohl(a->addr32[i]) < ntohl(e->addr32[i]))
2749 else if (ntohl(a->addr32[i]) > ntohl(e->addr32[i]))
2759 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2763 return ((p > a1) && (p < a2));
2765 return ((p < a1) || (p > a2));
2767 return ((p >= a1) && (p <= a2));
2781 return (0); /* never reached */
2785 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2790 return (pf_match(op, a1, a2, p));
2794 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2796 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2798 return (pf_match(op, a1, a2, u));
2802 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2804 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2806 return (pf_match(op, a1, a2, g));
2810 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag, int mtag)
2815 return ((!r->match_tag_not && r->match_tag == *tag) ||
2816 (r->match_tag_not && r->match_tag != *tag));
2820 pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag)
2823 KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
2825 if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL))
2828 pd->pf_mtag->tag = tag;
2833 #define PF_ANCHOR_STACKSIZE 32
2834 struct pf_anchor_stackframe {
2835 struct pf_ruleset *rs;
2836 struct pf_rule *r; /* XXX: + match bit */
2837 struct pf_anchor *child;
2841 * XXX: We rely on malloc(9) returning pointer aligned addresses.
2843 #define PF_ANCHORSTACK_MATCH 0x00000001
2844 #define PF_ANCHORSTACK_MASK (PF_ANCHORSTACK_MATCH)
2846 #define PF_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
2847 #define PF_ANCHOR_RULE(f) (struct pf_rule *) \
2848 ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
2849 #define PF_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \
2850 ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \
2854 pf_step_into_anchor(struct pf_anchor_stackframe *stack, int *depth,
2855 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2858 struct pf_anchor_stackframe *f;
2864 if (*depth >= PF_ANCHOR_STACKSIZE) {
2865 printf("%s: anchor stack overflow on %s\n",
2866 __func__, (*r)->anchor->name);
2867 *r = TAILQ_NEXT(*r, entries);
2869 } else if (*depth == 0 && a != NULL)
2871 f = stack + (*depth)++;
2874 if ((*r)->anchor_wildcard) {
2875 struct pf_anchor_node *parent = &(*r)->anchor->children;
2877 if ((f->child = RB_MIN(pf_anchor_node, parent)) == NULL) {
2881 *rs = &f->child->ruleset;
2884 *rs = &(*r)->anchor->ruleset;
2886 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2890 pf_step_out_of_anchor(struct pf_anchor_stackframe *stack, int *depth,
2891 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2894 struct pf_anchor_stackframe *f;
2903 f = stack + *depth - 1;
2904 fr = PF_ANCHOR_RULE(f);
2905 if (f->child != NULL) {
2906 struct pf_anchor_node *parent;
2909 * This block traverses through
2910 * a wildcard anchor.
2912 parent = &fr->anchor->children;
2913 if (match != NULL && *match) {
2915 * If any of "*" matched, then
2916 * "foo/ *" matched, mark frame
2919 PF_ANCHOR_SET_MATCH(f);
2922 f->child = RB_NEXT(pf_anchor_node, parent, f->child);
2923 if (f->child != NULL) {
2924 *rs = &f->child->ruleset;
2925 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2933 if (*depth == 0 && a != NULL)
2936 if (PF_ANCHOR_MATCH(f) || (match != NULL && *match))
2938 *r = TAILQ_NEXT(fr, entries);
2939 } while (*r == NULL);
2946 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2947 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2952 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2953 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2957 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2958 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2959 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
2960 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
2961 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
2962 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
2963 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
2964 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
2970 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
2975 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
2979 if (addr->addr32[3] == 0xffffffff) {
2980 addr->addr32[3] = 0;
2981 if (addr->addr32[2] == 0xffffffff) {
2982 addr->addr32[2] = 0;
2983 if (addr->addr32[1] == 0xffffffff) {
2984 addr->addr32[1] = 0;
2986 htonl(ntohl(addr->addr32[0]) + 1);
2989 htonl(ntohl(addr->addr32[1]) + 1);
2992 htonl(ntohl(addr->addr32[2]) + 1);
2995 htonl(ntohl(addr->addr32[3]) + 1);
3002 pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
3004 struct pf_addr *saddr, *daddr;
3005 u_int16_t sport, dport;
3006 struct inpcbinfo *pi;
3009 pd->lookup.uid = UID_MAX;
3010 pd->lookup.gid = GID_MAX;
3012 switch (pd->proto) {
3014 if (pd->hdr.tcp == NULL)
3016 sport = pd->hdr.tcp->th_sport;
3017 dport = pd->hdr.tcp->th_dport;
3021 if (pd->hdr.udp == NULL)
3023 sport = pd->hdr.udp->uh_sport;
3024 dport = pd->hdr.udp->uh_dport;
3030 if (direction == PF_IN) {
3045 inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
3046 dport, INPLOOKUP_RLOCKPCB, NULL, m);
3048 inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
3049 daddr->v4, dport, INPLOOKUP_WILDCARD |
3050 INPLOOKUP_RLOCKPCB, NULL, m);
3058 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
3059 dport, INPLOOKUP_RLOCKPCB, NULL, m);
3061 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
3062 &daddr->v6, dport, INPLOOKUP_WILDCARD |
3063 INPLOOKUP_RLOCKPCB, NULL, m);
3073 INP_RLOCK_ASSERT(inp);
3074 pd->lookup.uid = inp->inp_cred->cr_uid;
3075 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3082 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3086 u_int8_t *opt, optlen;
3087 u_int8_t wscale = 0;
3089 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3090 if (hlen <= sizeof(struct tcphdr))
3092 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3094 opt = hdr + sizeof(struct tcphdr);
3095 hlen -= sizeof(struct tcphdr);
3105 if (wscale > TCP_MAX_WINSHIFT)
3106 wscale = TCP_MAX_WINSHIFT;
3107 wscale |= PF_WSCALE_FLAG;
3122 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3126 u_int8_t *opt, optlen;
3127 u_int16_t mss = V_tcp_mssdflt;
3129 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3130 if (hlen <= sizeof(struct tcphdr))
3132 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3134 opt = hdr + sizeof(struct tcphdr);
3135 hlen -= sizeof(struct tcphdr);
3136 while (hlen >= TCPOLEN_MAXSEG) {
3144 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
3160 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
3163 struct nhop4_basic nh4;
3166 struct nhop6_basic nh6;
3167 struct in6_addr dst6;
3176 hlen = sizeof(struct ip);
3177 if (fib4_lookup_nh_basic(rtableid, addr->v4, 0, 0, &nh4) == 0)
3178 mss = nh4.nh_mtu - hlen - sizeof(struct tcphdr);
3183 hlen = sizeof(struct ip6_hdr);
3184 in6_splitscope(&addr->v6, &dst6, &scopeid);
3185 if (fib6_lookup_nh_basic(rtableid, &dst6, scopeid, 0,0,&nh6)==0)
3186 mss = nh6.nh_mtu - hlen - sizeof(struct tcphdr);
3191 mss = max(V_tcp_mssdflt, mss);
3192 mss = min(mss, offer);
3193 mss = max(mss, 64); /* sanity - at least max opt space */
3198 pf_tcp_iss(struct pf_pdesc *pd)
3201 u_int32_t digest[4];
3203 if (V_pf_tcp_secret_init == 0) {
3204 read_random(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
3205 MD5Init(&V_pf_tcp_secret_ctx);
3206 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
3207 sizeof(V_pf_tcp_secret));
3208 V_pf_tcp_secret_init = 1;
3211 ctx = V_pf_tcp_secret_ctx;
3213 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
3214 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
3215 if (pd->af == AF_INET6) {
3216 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3217 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3219 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3220 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3222 MD5Final((u_char *)digest, &ctx);
3223 V_pf_tcp_iss_off += 4096;
3224 #define ISN_RANDOM_INCREMENT (4096 - 1)
3225 return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3227 #undef ISN_RANDOM_INCREMENT
3231 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
3232 struct pfi_kif *kif, struct mbuf *m, int off, struct pf_pdesc *pd,
3233 struct pf_rule **am, struct pf_ruleset **rsm, struct inpcb *inp)
3235 struct pf_rule *nr = NULL;
3236 struct pf_addr * const saddr = pd->src;
3237 struct pf_addr * const daddr = pd->dst;
3238 sa_family_t af = pd->af;
3239 struct pf_rule *r, *a = NULL;
3240 struct pf_ruleset *ruleset = NULL;
3241 struct pf_src_node *nsn = NULL;
3242 struct tcphdr *th = pd->hdr.tcp;
3243 struct pf_state_key *sk = NULL, *nk = NULL;
3245 int rewrite = 0, hdrlen = 0;
3246 int tag = -1, rtableid = -1;
3250 u_int16_t sport = 0, dport = 0;
3251 u_int16_t bproto_sum = 0, bip_sum = 0;
3252 u_int8_t icmptype = 0, icmpcode = 0;
3253 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3258 INP_LOCK_ASSERT(inp);
3259 pd->lookup.uid = inp->inp_cred->cr_uid;
3260 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3261 pd->lookup.done = 1;
3264 switch (pd->proto) {
3266 sport = th->th_sport;
3267 dport = th->th_dport;
3268 hdrlen = sizeof(*th);
3271 sport = pd->hdr.udp->uh_sport;
3272 dport = pd->hdr.udp->uh_dport;
3273 hdrlen = sizeof(*pd->hdr.udp);
3277 if (pd->af != AF_INET)
3279 sport = dport = pd->hdr.icmp->icmp_id;
3280 hdrlen = sizeof(*pd->hdr.icmp);
3281 icmptype = pd->hdr.icmp->icmp_type;
3282 icmpcode = pd->hdr.icmp->icmp_code;
3284 if (icmptype == ICMP_UNREACH ||
3285 icmptype == ICMP_SOURCEQUENCH ||
3286 icmptype == ICMP_REDIRECT ||
3287 icmptype == ICMP_TIMXCEED ||
3288 icmptype == ICMP_PARAMPROB)
3293 case IPPROTO_ICMPV6:
3296 sport = dport = pd->hdr.icmp6->icmp6_id;
3297 hdrlen = sizeof(*pd->hdr.icmp6);
3298 icmptype = pd->hdr.icmp6->icmp6_type;
3299 icmpcode = pd->hdr.icmp6->icmp6_code;
3301 if (icmptype == ICMP6_DST_UNREACH ||
3302 icmptype == ICMP6_PACKET_TOO_BIG ||
3303 icmptype == ICMP6_TIME_EXCEEDED ||
3304 icmptype == ICMP6_PARAM_PROB)
3309 sport = dport = hdrlen = 0;
3313 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3315 /* check packet for BINAT/NAT/RDR */
3316 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk,
3317 &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) {
3318 KASSERT(sk != NULL, ("%s: null sk", __func__));
3319 KASSERT(nk != NULL, ("%s: null nk", __func__));
3322 bip_sum = *pd->ip_sum;
3324 switch (pd->proto) {
3326 bproto_sum = th->th_sum;
3327 pd->proto_sum = &th->th_sum;
3329 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3330 nk->port[pd->sidx] != sport) {
3331 pf_change_ap(m, saddr, &th->th_sport, pd->ip_sum,
3332 &th->th_sum, &nk->addr[pd->sidx],
3333 nk->port[pd->sidx], 0, af);
3334 pd->sport = &th->th_sport;
3335 sport = th->th_sport;
3338 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3339 nk->port[pd->didx] != dport) {
3340 pf_change_ap(m, daddr, &th->th_dport, pd->ip_sum,
3341 &th->th_sum, &nk->addr[pd->didx],
3342 nk->port[pd->didx], 0, af);
3343 dport = th->th_dport;
3344 pd->dport = &th->th_dport;
3349 bproto_sum = pd->hdr.udp->uh_sum;
3350 pd->proto_sum = &pd->hdr.udp->uh_sum;
3352 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3353 nk->port[pd->sidx] != sport) {
3354 pf_change_ap(m, saddr, &pd->hdr.udp->uh_sport,
3355 pd->ip_sum, &pd->hdr.udp->uh_sum,
3356 &nk->addr[pd->sidx],
3357 nk->port[pd->sidx], 1, af);
3358 sport = pd->hdr.udp->uh_sport;
3359 pd->sport = &pd->hdr.udp->uh_sport;
3362 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3363 nk->port[pd->didx] != dport) {
3364 pf_change_ap(m, daddr, &pd->hdr.udp->uh_dport,
3365 pd->ip_sum, &pd->hdr.udp->uh_sum,
3366 &nk->addr[pd->didx],
3367 nk->port[pd->didx], 1, af);
3368 dport = pd->hdr.udp->uh_dport;
3369 pd->dport = &pd->hdr.udp->uh_dport;
3375 nk->port[0] = nk->port[1];
3376 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3377 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3378 nk->addr[pd->sidx].v4.s_addr, 0);
3380 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3381 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3382 nk->addr[pd->didx].v4.s_addr, 0);
3384 if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3385 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3386 pd->hdr.icmp->icmp_cksum, sport,
3388 pd->hdr.icmp->icmp_id = nk->port[1];
3389 pd->sport = &pd->hdr.icmp->icmp_id;
3391 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3395 case IPPROTO_ICMPV6:
3396 nk->port[0] = nk->port[1];
3397 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3398 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3399 &nk->addr[pd->sidx], 0);
3401 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3402 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3403 &nk->addr[pd->didx], 0);
3412 &nk->addr[pd->sidx], AF_INET))
3413 pf_change_a(&saddr->v4.s_addr,
3415 nk->addr[pd->sidx].v4.s_addr, 0);
3418 &nk->addr[pd->didx], AF_INET))
3419 pf_change_a(&daddr->v4.s_addr,
3421 nk->addr[pd->didx].v4.s_addr, 0);
3427 &nk->addr[pd->sidx], AF_INET6))
3428 PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3431 &nk->addr[pd->didx], AF_INET6))
3432 PF_ACPY(saddr, &nk->addr[pd->didx], af);
3445 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3446 r = r->skip[PF_SKIP_IFP].ptr;
3447 else if (r->direction && r->direction != direction)
3448 r = r->skip[PF_SKIP_DIR].ptr;
3449 else if (r->af && r->af != af)
3450 r = r->skip[PF_SKIP_AF].ptr;
3451 else if (r->proto && r->proto != pd->proto)
3452 r = r->skip[PF_SKIP_PROTO].ptr;
3453 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3454 r->src.neg, kif, M_GETFIB(m)))
3455 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3456 /* tcp/udp only. port_op always 0 in other cases */
3457 else if (r->src.port_op && !pf_match_port(r->src.port_op,
3458 r->src.port[0], r->src.port[1], sport))
3459 r = r->skip[PF_SKIP_SRC_PORT].ptr;
3460 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3461 r->dst.neg, NULL, M_GETFIB(m)))
3462 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3463 /* tcp/udp only. port_op always 0 in other cases */
3464 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3465 r->dst.port[0], r->dst.port[1], dport))
3466 r = r->skip[PF_SKIP_DST_PORT].ptr;
3467 /* icmp only. type always 0 in other cases */
3468 else if (r->type && r->type != icmptype + 1)
3469 r = TAILQ_NEXT(r, entries);
3470 /* icmp only. type always 0 in other cases */
3471 else if (r->code && r->code != icmpcode + 1)
3472 r = TAILQ_NEXT(r, entries);
3473 else if (r->tos && !(r->tos == pd->tos))
3474 r = TAILQ_NEXT(r, entries);
3475 else if (r->rule_flag & PFRULE_FRAGMENT)
3476 r = TAILQ_NEXT(r, entries);
3477 else if (pd->proto == IPPROTO_TCP &&
3478 (r->flagset & th->th_flags) != r->flags)
3479 r = TAILQ_NEXT(r, entries);
3480 /* tcp/udp only. uid.op always 0 in other cases */
3481 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3482 pf_socket_lookup(direction, pd, m), 1)) &&
3483 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3485 r = TAILQ_NEXT(r, entries);
3486 /* tcp/udp only. gid.op always 0 in other cases */
3487 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3488 pf_socket_lookup(direction, pd, m), 1)) &&
3489 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3491 r = TAILQ_NEXT(r, entries);
3493 !pf_match_ieee8021q_pcp(r->prio, m))
3494 r = TAILQ_NEXT(r, entries);
3496 r->prob <= arc4random())
3497 r = TAILQ_NEXT(r, entries);
3498 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3499 pd->pf_mtag ? pd->pf_mtag->tag : 0))
3500 r = TAILQ_NEXT(r, entries);
3501 else if (r->os_fingerprint != PF_OSFP_ANY &&
3502 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3503 pf_osfp_fingerprint(pd, m, off, th),
3504 r->os_fingerprint)))
3505 r = TAILQ_NEXT(r, entries);
3509 if (r->rtableid >= 0)
3510 rtableid = r->rtableid;
3511 if (r->anchor == NULL) {
3518 r = TAILQ_NEXT(r, entries);
3520 pf_step_into_anchor(anchor_stack, &asd,
3521 &ruleset, PF_RULESET_FILTER, &r, &a,
3524 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3525 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3532 REASON_SET(&reason, PFRES_MATCH);
3534 if (r->log || (nr != NULL && nr->log)) {
3536 m_copyback(m, off, hdrlen, pd->hdr.any);
3537 PFLOG_PACKET(kif, m, af, direction, reason, r->log ? r : nr, a,
3541 if ((r->action == PF_DROP) &&
3542 ((r->rule_flag & PFRULE_RETURNRST) ||
3543 (r->rule_flag & PFRULE_RETURNICMP) ||
3544 (r->rule_flag & PFRULE_RETURN))) {
3545 pf_return(r, nr, pd, sk, off, m, th, kif, bproto_sum,
3546 bip_sum, hdrlen, &reason);
3549 if (r->action == PF_DROP)
3552 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3553 REASON_SET(&reason, PFRES_MEMORY);
3557 M_SETFIB(m, rtableid);
3559 if (!state_icmp && (r->keep_state || nr != NULL ||
3560 (pd->flags & PFDESC_TCP_NORM))) {
3562 action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
3563 sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
3565 if (action != PF_PASS) {
3566 if (action == PF_DROP &&
3567 (r->rule_flag & PFRULE_RETURN))
3568 pf_return(r, nr, pd, sk, off, m, th, kif,
3569 bproto_sum, bip_sum, hdrlen, &reason);
3574 uma_zfree(V_pf_state_key_z, sk);
3576 uma_zfree(V_pf_state_key_z, nk);
3579 /* copy back packet headers if we performed NAT operations */
3581 m_copyback(m, off, hdrlen, pd->hdr.any);
3583 if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
3584 direction == PF_OUT &&
3585 pfsync_defer_ptr != NULL && pfsync_defer_ptr(*sm, m))
3587 * We want the state created, but we dont
3588 * want to send this in case a partner
3589 * firewall has to know about it to allow
3590 * replies through it.
3598 uma_zfree(V_pf_state_key_z, sk);
3600 uma_zfree(V_pf_state_key_z, nk);
3605 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
3606 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *nk,
3607 struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
3608 u_int16_t dport, int *rewrite, struct pfi_kif *kif, struct pf_state **sm,
3609 int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen)
3611 struct pf_state *s = NULL;
3612 struct pf_src_node *sn = NULL;
3613 struct tcphdr *th = pd->hdr.tcp;
3614 u_int16_t mss = V_tcp_mssdflt;
3617 /* check maximums */
3618 if (r->max_states &&
3619 (counter_u64_fetch(r->states_cur) >= r->max_states)) {
3620 counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1);
3621 REASON_SET(&reason, PFRES_MAXSTATES);
3624 /* src node for filter rule */
3625 if ((r->rule_flag & PFRULE_SRCTRACK ||
3626 r->rpool.opts & PF_POOL_STICKYADDR) &&
3627 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3628 REASON_SET(&reason, PFRES_SRCLIMIT);
3631 /* src node for translation rule */
3632 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3633 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3634 REASON_SET(&reason, PFRES_SRCLIMIT);
3637 s = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO);
3639 REASON_SET(&reason, PFRES_MEMORY);
3643 s->nat_rule.ptr = nr;
3645 STATE_INC_COUNTERS(s);
3647 s->state_flags |= PFSTATE_ALLOWOPTS;
3648 if (r->rule_flag & PFRULE_STATESLOPPY)
3649 s->state_flags |= PFSTATE_SLOPPY;
3650 s->log = r->log & PF_LOG_ALL;
3651 s->sync_state = PFSYNC_S_NONE;
3653 s->log |= nr->log & PF_LOG_ALL;
3654 switch (pd->proto) {
3656 s->src.seqlo = ntohl(th->th_seq);
3657 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3658 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3659 r->keep_state == PF_STATE_MODULATE) {
3660 /* Generate sequence number modulator */
3661 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3664 pf_change_proto_a(m, &th->th_seq, &th->th_sum,
3665 htonl(s->src.seqlo + s->src.seqdiff), 0);
3669 if (th->th_flags & TH_SYN) {
3671 s->src.wscale = pf_get_wscale(m, off,
3672 th->th_off, pd->af);
3674 s->src.max_win = MAX(ntohs(th->th_win), 1);
3675 if (s->src.wscale & PF_WSCALE_MASK) {
3676 /* Remove scale factor from initial window */
3677 int win = s->src.max_win;
3678 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3679 s->src.max_win = (win - 1) >>
3680 (s->src.wscale & PF_WSCALE_MASK);
3682 if (th->th_flags & TH_FIN)
3686 s->src.state = TCPS_SYN_SENT;
3687 s->dst.state = TCPS_CLOSED;
3688 s->timeout = PFTM_TCP_FIRST_PACKET;
3691 s->src.state = PFUDPS_SINGLE;
3692 s->dst.state = PFUDPS_NO_TRAFFIC;
3693 s->timeout = PFTM_UDP_FIRST_PACKET;
3697 case IPPROTO_ICMPV6:
3699 s->timeout = PFTM_ICMP_FIRST_PACKET;
3702 s->src.state = PFOTHERS_SINGLE;
3703 s->dst.state = PFOTHERS_NO_TRAFFIC;
3704 s->timeout = PFTM_OTHER_FIRST_PACKET;
3708 if (pf_map_addr(pd->af, r, pd->src, &s->rt_addr, NULL, &sn)) {
3709 REASON_SET(&reason, PFRES_MAPFAILED);
3710 pf_src_tree_remove_state(s);
3711 STATE_DEC_COUNTERS(s);
3712 uma_zfree(V_pf_state_z, s);
3715 s->rt_kif = r->rpool.cur->kif;
3718 s->creation = time_uptime;
3719 s->expire = time_uptime;
3724 /* XXX We only modify one side for now. */
3725 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
3726 s->nat_src_node = nsn;
3728 if (pd->proto == IPPROTO_TCP) {
3729 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
3730 off, pd, th, &s->src, &s->dst)) {
3731 REASON_SET(&reason, PFRES_MEMORY);
3732 pf_src_tree_remove_state(s);
3733 STATE_DEC_COUNTERS(s);
3734 uma_zfree(V_pf_state_z, s);
3737 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
3738 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
3739 &s->src, &s->dst, rewrite)) {
3740 /* This really shouldn't happen!!! */
3741 DPFPRINTF(PF_DEBUG_URGENT,
3742 ("pf_normalize_tcp_stateful failed on first pkt"));
3743 pf_normalize_tcp_cleanup(s);
3744 pf_src_tree_remove_state(s);
3745 STATE_DEC_COUNTERS(s);
3746 uma_zfree(V_pf_state_z, s);
3750 s->direction = pd->dir;
3753 * sk/nk could already been setup by pf_get_translation().
3756 KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
3757 __func__, nr, sk, nk));
3758 sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
3763 KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p",
3764 __func__, nr, sk, nk));
3766 /* Swap sk/nk for PF_OUT. */
3767 if (pf_state_insert(BOUND_IFACE(r, kif),
3768 (pd->dir == PF_IN) ? sk : nk,
3769 (pd->dir == PF_IN) ? nk : sk, s)) {
3770 if (pd->proto == IPPROTO_TCP)
3771 pf_normalize_tcp_cleanup(s);
3772 REASON_SET(&reason, PFRES_STATEINS);
3773 pf_src_tree_remove_state(s);
3774 STATE_DEC_COUNTERS(s);
3775 uma_zfree(V_pf_state_z, s);
3782 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
3783 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
3784 s->src.state = PF_TCPS_PROXY_SRC;
3785 /* undo NAT changes, if they have taken place */
3787 struct pf_state_key *skt = s->key[PF_SK_WIRE];
3788 if (pd->dir == PF_OUT)
3789 skt = s->key[PF_SK_STACK];
3790 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
3791 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
3793 *pd->sport = skt->port[pd->sidx];
3795 *pd->dport = skt->port[pd->didx];
3797 *pd->proto_sum = bproto_sum;
3799 *pd->ip_sum = bip_sum;
3800 m_copyback(m, off, hdrlen, pd->hdr.any);
3802 s->src.seqhi = htonl(arc4random());
3803 /* Find mss option */
3804 int rtid = M_GETFIB(m);
3805 mss = pf_get_mss(m, off, th->th_off, pd->af);
3806 mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
3807 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
3809 pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport,
3810 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
3811 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL);
3812 REASON_SET(&reason, PFRES_SYNPROXY);
3813 return (PF_SYNPROXY_DROP);
3820 uma_zfree(V_pf_state_key_z, sk);
3822 uma_zfree(V_pf_state_key_z, nk);
3825 struct pf_srchash *sh;
3827 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
3828 PF_HASHROW_LOCK(sh);
3829 if (--sn->states == 0 && sn->expire == 0) {
3830 pf_unlink_src_node(sn);
3831 uma_zfree(V_pf_sources_z, sn);
3833 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
3835 PF_HASHROW_UNLOCK(sh);
3838 if (nsn != sn && nsn != NULL) {
3839 struct pf_srchash *sh;
3841 sh = &V_pf_srchash[pf_hashsrc(&nsn->addr, nsn->af)];
3842 PF_HASHROW_LOCK(sh);
3843 if (--nsn->states == 0 && nsn->expire == 0) {
3844 pf_unlink_src_node(nsn);
3845 uma_zfree(V_pf_sources_z, nsn);
3847 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
3849 PF_HASHROW_UNLOCK(sh);
3856 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
3857 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
3858 struct pf_ruleset **rsm)
3860 struct pf_rule *r, *a = NULL;
3861 struct pf_ruleset *ruleset = NULL;
3862 sa_family_t af = pd->af;
3867 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3871 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3874 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3875 r = r->skip[PF_SKIP_IFP].ptr;
3876 else if (r->direction && r->direction != direction)
3877 r = r->skip[PF_SKIP_DIR].ptr;
3878 else if (r->af && r->af != af)
3879 r = r->skip[PF_SKIP_AF].ptr;
3880 else if (r->proto && r->proto != pd->proto)
3881 r = r->skip[PF_SKIP_PROTO].ptr;
3882 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
3883 r->src.neg, kif, M_GETFIB(m)))
3884 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3885 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
3886 r->dst.neg, NULL, M_GETFIB(m)))
3887 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3888 else if (r->tos && !(r->tos == pd->tos))
3889 r = TAILQ_NEXT(r, entries);
3890 else if (r->os_fingerprint != PF_OSFP_ANY)
3891 r = TAILQ_NEXT(r, entries);
3892 else if (pd->proto == IPPROTO_UDP &&
3893 (r->src.port_op || r->dst.port_op))
3894 r = TAILQ_NEXT(r, entries);
3895 else if (pd->proto == IPPROTO_TCP &&
3896 (r->src.port_op || r->dst.port_op || r->flagset))
3897 r = TAILQ_NEXT(r, entries);
3898 else if ((pd->proto == IPPROTO_ICMP ||
3899 pd->proto == IPPROTO_ICMPV6) &&
3900 (r->type || r->code))
3901 r = TAILQ_NEXT(r, entries);
3903 !pf_match_ieee8021q_pcp(r->prio, m))
3904 r = TAILQ_NEXT(r, entries);
3905 else if (r->prob && r->prob <=
3906 (arc4random() % (UINT_MAX - 1) + 1))
3907 r = TAILQ_NEXT(r, entries);
3908 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3909 pd->pf_mtag ? pd->pf_mtag->tag : 0))
3910 r = TAILQ_NEXT(r, entries);
3912 if (r->anchor == NULL) {
3919 r = TAILQ_NEXT(r, entries);
3921 pf_step_into_anchor(anchor_stack, &asd,
3922 &ruleset, PF_RULESET_FILTER, &r, &a,
3925 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3926 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3933 REASON_SET(&reason, PFRES_MATCH);
3936 PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
3939 if (r->action != PF_PASS)
3942 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3943 REASON_SET(&reason, PFRES_MEMORY);
3951 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
3952 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
3953 struct pf_pdesc *pd, u_short *reason, int *copyback)
3955 struct tcphdr *th = pd->hdr.tcp;
3956 u_int16_t win = ntohs(th->th_win);
3957 u_int32_t ack, end, seq, orig_seq;
3961 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
3962 sws = src->wscale & PF_WSCALE_MASK;
3963 dws = dst->wscale & PF_WSCALE_MASK;
3968 * Sequence tracking algorithm from Guido van Rooij's paper:
3969 * http://www.madison-gurkha.com/publications/tcp_filtering/
3973 orig_seq = seq = ntohl(th->th_seq);
3974 if (src->seqlo == 0) {
3975 /* First packet from this end. Set its state */
3977 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
3978 src->scrub == NULL) {
3979 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
3980 REASON_SET(reason, PFRES_MEMORY);
3985 /* Deferred generation of sequence number modulator */
3986 if (dst->seqdiff && !src->seqdiff) {
3987 /* use random iss for the TCP server */
3988 while ((src->seqdiff = arc4random() - seq) == 0)
3990 ack = ntohl(th->th_ack) - dst->seqdiff;
3991 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
3993 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
3996 ack = ntohl(th->th_ack);
3999 end = seq + pd->p_len;
4000 if (th->th_flags & TH_SYN) {
4002 if (dst->wscale & PF_WSCALE_FLAG) {
4003 src->wscale = pf_get_wscale(m, off, th->th_off,
4005 if (src->wscale & PF_WSCALE_FLAG) {
4006 /* Remove scale factor from initial
4008 sws = src->wscale & PF_WSCALE_MASK;
4009 win = ((u_int32_t)win + (1 << sws) - 1)
4011 dws = dst->wscale & PF_WSCALE_MASK;
4013 /* fixup other window */
4014 dst->max_win <<= dst->wscale &
4016 /* in case of a retrans SYN|ACK */
4021 if (th->th_flags & TH_FIN)
4025 if (src->state < TCPS_SYN_SENT)
4026 src->state = TCPS_SYN_SENT;
4029 * May need to slide the window (seqhi may have been set by
4030 * the crappy stack check or if we picked up the connection
4031 * after establishment)
4033 if (src->seqhi == 1 ||
4034 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
4035 src->seqhi = end + MAX(1, dst->max_win << dws);
4036 if (win > src->max_win)
4040 ack = ntohl(th->th_ack) - dst->seqdiff;
4042 /* Modulate sequence numbers */
4043 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
4045 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
4048 end = seq + pd->p_len;
4049 if (th->th_flags & TH_SYN)
4051 if (th->th_flags & TH_FIN)
4055 if ((th->th_flags & TH_ACK) == 0) {
4056 /* Let it pass through the ack skew check */
4058 } else if ((ack == 0 &&
4059 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
4060 /* broken tcp stacks do not set ack */
4061 (dst->state < TCPS_SYN_SENT)) {
4063 * Many stacks (ours included) will set the ACK number in an
4064 * FIN|ACK if the SYN times out -- no sequence to ACK.
4070 /* Ease sequencing restrictions on no data packets */
4075 ackskew = dst->seqlo - ack;
4079 * Need to demodulate the sequence numbers in any TCP SACK options
4080 * (Selective ACK). We could optionally validate the SACK values
4081 * against the current ACK window, either forwards or backwards, but
4082 * I'm not confident that SACK has been implemented properly
4083 * everywhere. It wouldn't surprise me if several stacks accidentally
4084 * SACK too far backwards of previously ACKed data. There really aren't
4085 * any security implications of bad SACKing unless the target stack
4086 * doesn't validate the option length correctly. Someone trying to
4087 * spoof into a TCP connection won't bother blindly sending SACK
4090 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
4091 if (pf_modulate_sack(m, off, pd, th, dst))
4096 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
4097 if (SEQ_GEQ(src->seqhi, end) &&
4098 /* Last octet inside other's window space */
4099 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
4100 /* Retrans: not more than one window back */
4101 (ackskew >= -MAXACKWINDOW) &&
4102 /* Acking not more than one reassembled fragment backwards */
4103 (ackskew <= (MAXACKWINDOW << sws)) &&
4104 /* Acking not more than one window forward */
4105 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
4106 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
4107 (pd->flags & PFDESC_IP_REAS) == 0)) {
4108 /* Require an exact/+1 sequence match on resets when possible */
4110 if (dst->scrub || src->scrub) {
4111 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4112 *state, src, dst, copyback))
4116 /* update max window */
4117 if (src->max_win < win)
4119 /* synchronize sequencing */
4120 if (SEQ_GT(end, src->seqlo))
4122 /* slide the window of what the other end can send */
4123 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4124 dst->seqhi = ack + MAX((win << sws), 1);
4128 if (th->th_flags & TH_SYN)
4129 if (src->state < TCPS_SYN_SENT)
4130 src->state = TCPS_SYN_SENT;
4131 if (th->th_flags & TH_FIN)
4132 if (src->state < TCPS_CLOSING)
4133 src->state = TCPS_CLOSING;
4134 if (th->th_flags & TH_ACK) {
4135 if (dst->state == TCPS_SYN_SENT) {
4136 dst->state = TCPS_ESTABLISHED;
4137 if (src->state == TCPS_ESTABLISHED &&
4138 (*state)->src_node != NULL &&
4139 pf_src_connlimit(state)) {
4140 REASON_SET(reason, PFRES_SRCLIMIT);
4143 } else if (dst->state == TCPS_CLOSING)
4144 dst->state = TCPS_FIN_WAIT_2;
4146 if (th->th_flags & TH_RST)
4147 src->state = dst->state = TCPS_TIME_WAIT;
4149 /* update expire time */
4150 (*state)->expire = time_uptime;
4151 if (src->state >= TCPS_FIN_WAIT_2 &&
4152 dst->state >= TCPS_FIN_WAIT_2)
4153 (*state)->timeout = PFTM_TCP_CLOSED;
4154 else if (src->state >= TCPS_CLOSING &&
4155 dst->state >= TCPS_CLOSING)
4156 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4157 else if (src->state < TCPS_ESTABLISHED ||
4158 dst->state < TCPS_ESTABLISHED)
4159 (*state)->timeout = PFTM_TCP_OPENING;
4160 else if (src->state >= TCPS_CLOSING ||
4161 dst->state >= TCPS_CLOSING)
4162 (*state)->timeout = PFTM_TCP_CLOSING;
4164 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4166 /* Fall through to PASS packet */
4168 } else if ((dst->state < TCPS_SYN_SENT ||
4169 dst->state >= TCPS_FIN_WAIT_2 ||
4170 src->state >= TCPS_FIN_WAIT_2) &&
4171 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
4172 /* Within a window forward of the originating packet */
4173 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
4174 /* Within a window backward of the originating packet */
4177 * This currently handles three situations:
4178 * 1) Stupid stacks will shotgun SYNs before their peer
4180 * 2) When PF catches an already established stream (the
4181 * firewall rebooted, the state table was flushed, routes
4183 * 3) Packets get funky immediately after the connection
4184 * closes (this should catch Solaris spurious ACK|FINs
4185 * that web servers like to spew after a close)
4187 * This must be a little more careful than the above code
4188 * since packet floods will also be caught here. We don't
4189 * update the TTL here to mitigate the damage of a packet
4190 * flood and so the same code can handle awkward establishment
4191 * and a loosened connection close.
4192 * In the establishment case, a correct peer response will
4193 * validate the connection, go through the normal state code
4194 * and keep updating the state TTL.
4197 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4198 printf("pf: loose state match: ");
4199 pf_print_state(*state);
4200 pf_print_flags(th->th_flags);
4201 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4202 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
4203 pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
4204 (unsigned long long)(*state)->packets[1],
4205 pd->dir == PF_IN ? "in" : "out",
4206 pd->dir == (*state)->direction ? "fwd" : "rev");
4209 if (dst->scrub || src->scrub) {
4210 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4211 *state, src, dst, copyback))
4215 /* update max window */
4216 if (src->max_win < win)
4218 /* synchronize sequencing */
4219 if (SEQ_GT(end, src->seqlo))
4221 /* slide the window of what the other end can send */
4222 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4223 dst->seqhi = ack + MAX((win << sws), 1);
4226 * Cannot set dst->seqhi here since this could be a shotgunned
4227 * SYN and not an already established connection.
4230 if (th->th_flags & TH_FIN)
4231 if (src->state < TCPS_CLOSING)
4232 src->state = TCPS_CLOSING;
4233 if (th->th_flags & TH_RST)
4234 src->state = dst->state = TCPS_TIME_WAIT;
4236 /* Fall through to PASS packet */
4239 if ((*state)->dst.state == TCPS_SYN_SENT &&
4240 (*state)->src.state == TCPS_SYN_SENT) {
4241 /* Send RST for state mismatches during handshake */
4242 if (!(th->th_flags & TH_RST))
4243 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4244 pd->dst, pd->src, th->th_dport,
4245 th->th_sport, ntohl(th->th_ack), 0,
4247 (*state)->rule.ptr->return_ttl, 1, 0,
4252 } else if (V_pf_status.debug >= PF_DEBUG_MISC) {
4253 printf("pf: BAD state: ");
4254 pf_print_state(*state);
4255 pf_print_flags(th->th_flags);
4256 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4257 "pkts=%llu:%llu dir=%s,%s\n",
4258 seq, orig_seq, ack, pd->p_len, ackskew,
4259 (unsigned long long)(*state)->packets[0],
4260 (unsigned long long)(*state)->packets[1],
4261 pd->dir == PF_IN ? "in" : "out",
4262 pd->dir == (*state)->direction ? "fwd" : "rev");
4263 printf("pf: State failure on: %c %c %c %c | %c %c\n",
4264 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4265 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4267 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4268 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4269 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4270 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4272 REASON_SET(reason, PFRES_BADSTATE);
4280 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4281 struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4283 struct tcphdr *th = pd->hdr.tcp;
4285 if (th->th_flags & TH_SYN)
4286 if (src->state < TCPS_SYN_SENT)
4287 src->state = TCPS_SYN_SENT;
4288 if (th->th_flags & TH_FIN)
4289 if (src->state < TCPS_CLOSING)
4290 src->state = TCPS_CLOSING;
4291 if (th->th_flags & TH_ACK) {
4292 if (dst->state == TCPS_SYN_SENT) {
4293 dst->state = TCPS_ESTABLISHED;
4294 if (src->state == TCPS_ESTABLISHED &&
4295 (*state)->src_node != NULL &&
4296 pf_src_connlimit(state)) {
4297 REASON_SET(reason, PFRES_SRCLIMIT);
4300 } else if (dst->state == TCPS_CLOSING) {
4301 dst->state = TCPS_FIN_WAIT_2;
4302 } else if (src->state == TCPS_SYN_SENT &&
4303 dst->state < TCPS_SYN_SENT) {
4305 * Handle a special sloppy case where we only see one
4306 * half of the connection. If there is a ACK after
4307 * the initial SYN without ever seeing a packet from
4308 * the destination, set the connection to established.
4310 dst->state = src->state = TCPS_ESTABLISHED;
4311 if ((*state)->src_node != NULL &&
4312 pf_src_connlimit(state)) {
4313 REASON_SET(reason, PFRES_SRCLIMIT);
4316 } else if (src->state == TCPS_CLOSING &&
4317 dst->state == TCPS_ESTABLISHED &&
4320 * Handle the closing of half connections where we
4321 * don't see the full bidirectional FIN/ACK+ACK
4324 dst->state = TCPS_CLOSING;
4327 if (th->th_flags & TH_RST)
4328 src->state = dst->state = TCPS_TIME_WAIT;
4330 /* update expire time */
4331 (*state)->expire = time_uptime;
4332 if (src->state >= TCPS_FIN_WAIT_2 &&
4333 dst->state >= TCPS_FIN_WAIT_2)
4334 (*state)->timeout = PFTM_TCP_CLOSED;
4335 else if (src->state >= TCPS_CLOSING &&
4336 dst->state >= TCPS_CLOSING)
4337 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4338 else if (src->state < TCPS_ESTABLISHED ||
4339 dst->state < TCPS_ESTABLISHED)
4340 (*state)->timeout = PFTM_TCP_OPENING;
4341 else if (src->state >= TCPS_CLOSING ||
4342 dst->state >= TCPS_CLOSING)
4343 (*state)->timeout = PFTM_TCP_CLOSING;
4345 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4351 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
4352 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4355 struct pf_state_key_cmp key;
4356 struct tcphdr *th = pd->hdr.tcp;
4358 struct pf_state_peer *src, *dst;
4359 struct pf_state_key *sk;
4361 bzero(&key, sizeof(key));
4363 key.proto = IPPROTO_TCP;
4364 if (direction == PF_IN) { /* wire side, straight */
4365 PF_ACPY(&key.addr[0], pd->src, key.af);
4366 PF_ACPY(&key.addr[1], pd->dst, key.af);
4367 key.port[0] = th->th_sport;
4368 key.port[1] = th->th_dport;
4369 } else { /* stack side, reverse */
4370 PF_ACPY(&key.addr[1], pd->src, key.af);
4371 PF_ACPY(&key.addr[0], pd->dst, key.af);
4372 key.port[1] = th->th_sport;
4373 key.port[0] = th->th_dport;
4376 STATE_LOOKUP(kif, &key, direction, *state, pd);
4378 if (direction == (*state)->direction) {
4379 src = &(*state)->src;
4380 dst = &(*state)->dst;
4382 src = &(*state)->dst;
4383 dst = &(*state)->src;
4386 sk = (*state)->key[pd->didx];
4388 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4389 if (direction != (*state)->direction) {
4390 REASON_SET(reason, PFRES_SYNPROXY);
4391 return (PF_SYNPROXY_DROP);
4393 if (th->th_flags & TH_SYN) {
4394 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4395 REASON_SET(reason, PFRES_SYNPROXY);
4398 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4399 pd->src, th->th_dport, th->th_sport,
4400 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4401 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0, NULL);
4402 REASON_SET(reason, PFRES_SYNPROXY);
4403 return (PF_SYNPROXY_DROP);
4404 } else if (!(th->th_flags & TH_ACK) ||
4405 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4406 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4407 REASON_SET(reason, PFRES_SYNPROXY);
4409 } else if ((*state)->src_node != NULL &&
4410 pf_src_connlimit(state)) {
4411 REASON_SET(reason, PFRES_SRCLIMIT);
4414 (*state)->src.state = PF_TCPS_PROXY_DST;
4416 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4417 if (direction == (*state)->direction) {
4418 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4419 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4420 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4421 REASON_SET(reason, PFRES_SYNPROXY);
4424 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4425 if ((*state)->dst.seqhi == 1)
4426 (*state)->dst.seqhi = htonl(arc4random());
4427 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4428 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4429 sk->port[pd->sidx], sk->port[pd->didx],
4430 (*state)->dst.seqhi, 0, TH_SYN, 0,
4431 (*state)->src.mss, 0, 0, (*state)->tag, NULL);
4432 REASON_SET(reason, PFRES_SYNPROXY);
4433 return (PF_SYNPROXY_DROP);
4434 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4436 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4437 REASON_SET(reason, PFRES_SYNPROXY);
4440 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4441 (*state)->dst.seqlo = ntohl(th->th_seq);
4442 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4443 pd->src, th->th_dport, th->th_sport,
4444 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4445 TH_ACK, (*state)->src.max_win, 0, 0, 0,
4446 (*state)->tag, NULL);
4447 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4448 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4449 sk->port[pd->sidx], sk->port[pd->didx],
4450 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4451 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0, NULL);
4452 (*state)->src.seqdiff = (*state)->dst.seqhi -
4453 (*state)->src.seqlo;
4454 (*state)->dst.seqdiff = (*state)->src.seqhi -
4455 (*state)->dst.seqlo;
4456 (*state)->src.seqhi = (*state)->src.seqlo +
4457 (*state)->dst.max_win;
4458 (*state)->dst.seqhi = (*state)->dst.seqlo +
4459 (*state)->src.max_win;
4460 (*state)->src.wscale = (*state)->dst.wscale = 0;
4461 (*state)->src.state = (*state)->dst.state =
4463 REASON_SET(reason, PFRES_SYNPROXY);
4464 return (PF_SYNPROXY_DROP);
4468 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4469 dst->state >= TCPS_FIN_WAIT_2 &&
4470 src->state >= TCPS_FIN_WAIT_2) {
4471 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4472 printf("pf: state reuse ");
4473 pf_print_state(*state);
4474 pf_print_flags(th->th_flags);
4477 /* XXX make sure it's the same direction ?? */
4478 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4479 pf_unlink_state(*state, PF_ENTER_LOCKED);
4484 if ((*state)->state_flags & PFSTATE_SLOPPY) {
4485 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
4488 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
4489 ©back) == PF_DROP)
4493 /* translate source/destination address, if necessary */
4494 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4495 struct pf_state_key *nk = (*state)->key[pd->didx];
4497 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4498 nk->port[pd->sidx] != th->th_sport)
4499 pf_change_ap(m, pd->src, &th->th_sport,
4500 pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx],
4501 nk->port[pd->sidx], 0, pd->af);
4503 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4504 nk->port[pd->didx] != th->th_dport)
4505 pf_change_ap(m, pd->dst, &th->th_dport,
4506 pd->ip_sum, &th->th_sum, &nk->addr[pd->didx],
4507 nk->port[pd->didx], 0, pd->af);
4511 /* Copyback sequence modulation or stateful scrub changes if needed */
4513 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4519 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4520 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4522 struct pf_state_peer *src, *dst;
4523 struct pf_state_key_cmp key;
4524 struct udphdr *uh = pd->hdr.udp;
4526 bzero(&key, sizeof(key));
4528 key.proto = IPPROTO_UDP;
4529 if (direction == PF_IN) { /* wire side, straight */
4530 PF_ACPY(&key.addr[0], pd->src, key.af);
4531 PF_ACPY(&key.addr[1], pd->dst, key.af);
4532 key.port[0] = uh->uh_sport;
4533 key.port[1] = uh->uh_dport;
4534 } else { /* stack side, reverse */
4535 PF_ACPY(&key.addr[1], pd->src, key.af);
4536 PF_ACPY(&key.addr[0], pd->dst, key.af);
4537 key.port[1] = uh->uh_sport;
4538 key.port[0] = uh->uh_dport;
4541 STATE_LOOKUP(kif, &key, direction, *state, pd);
4543 if (direction == (*state)->direction) {
4544 src = &(*state)->src;
4545 dst = &(*state)->dst;
4547 src = &(*state)->dst;
4548 dst = &(*state)->src;
4552 if (src->state < PFUDPS_SINGLE)
4553 src->state = PFUDPS_SINGLE;
4554 if (dst->state == PFUDPS_SINGLE)
4555 dst->state = PFUDPS_MULTIPLE;
4557 /* update expire time */
4558 (*state)->expire = time_uptime;
4559 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4560 (*state)->timeout = PFTM_UDP_MULTIPLE;
4562 (*state)->timeout = PFTM_UDP_SINGLE;
4564 /* translate source/destination address, if necessary */
4565 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4566 struct pf_state_key *nk = (*state)->key[pd->didx];
4568 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4569 nk->port[pd->sidx] != uh->uh_sport)
4570 pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
4571 &uh->uh_sum, &nk->addr[pd->sidx],
4572 nk->port[pd->sidx], 1, pd->af);
4574 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4575 nk->port[pd->didx] != uh->uh_dport)
4576 pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
4577 &uh->uh_sum, &nk->addr[pd->didx],
4578 nk->port[pd->didx], 1, pd->af);
4579 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4586 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4587 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4589 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4590 u_int16_t icmpid = 0, *icmpsum;
4593 struct pf_state_key_cmp key;
4595 bzero(&key, sizeof(key));
4596 switch (pd->proto) {
4599 icmptype = pd->hdr.icmp->icmp_type;
4600 icmpid = pd->hdr.icmp->icmp_id;
4601 icmpsum = &pd->hdr.icmp->icmp_cksum;
4603 if (icmptype == ICMP_UNREACH ||
4604 icmptype == ICMP_SOURCEQUENCH ||
4605 icmptype == ICMP_REDIRECT ||
4606 icmptype == ICMP_TIMXCEED ||
4607 icmptype == ICMP_PARAMPROB)
4612 case IPPROTO_ICMPV6:
4613 icmptype = pd->hdr.icmp6->icmp6_type;
4614 icmpid = pd->hdr.icmp6->icmp6_id;
4615 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4617 if (icmptype == ICMP6_DST_UNREACH ||
4618 icmptype == ICMP6_PACKET_TOO_BIG ||
4619 icmptype == ICMP6_TIME_EXCEEDED ||
4620 icmptype == ICMP6_PARAM_PROB)
4629 * ICMP query/reply message not related to a TCP/UDP packet.
4630 * Search for an ICMP state.
4633 key.proto = pd->proto;
4634 key.port[0] = key.port[1] = icmpid;
4635 if (direction == PF_IN) { /* wire side, straight */
4636 PF_ACPY(&key.addr[0], pd->src, key.af);
4637 PF_ACPY(&key.addr[1], pd->dst, key.af);
4638 } else { /* stack side, reverse */
4639 PF_ACPY(&key.addr[1], pd->src, key.af);
4640 PF_ACPY(&key.addr[0], pd->dst, key.af);
4643 STATE_LOOKUP(kif, &key, direction, *state, pd);
4645 (*state)->expire = time_uptime;
4646 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4648 /* translate source/destination address, if necessary */
4649 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4650 struct pf_state_key *nk = (*state)->key[pd->didx];
4655 if (PF_ANEQ(pd->src,
4656 &nk->addr[pd->sidx], AF_INET))
4657 pf_change_a(&saddr->v4.s_addr,
4659 nk->addr[pd->sidx].v4.s_addr, 0);
4661 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
4663 pf_change_a(&daddr->v4.s_addr,
4665 nk->addr[pd->didx].v4.s_addr, 0);
4668 pd->hdr.icmp->icmp_id) {
4669 pd->hdr.icmp->icmp_cksum =
4671 pd->hdr.icmp->icmp_cksum, icmpid,
4672 nk->port[pd->sidx], 0);
4673 pd->hdr.icmp->icmp_id =
4677 m_copyback(m, off, ICMP_MINLEN,
4678 (caddr_t )pd->hdr.icmp);
4683 if (PF_ANEQ(pd->src,
4684 &nk->addr[pd->sidx], AF_INET6))
4686 &pd->hdr.icmp6->icmp6_cksum,
4687 &nk->addr[pd->sidx], 0);
4689 if (PF_ANEQ(pd->dst,
4690 &nk->addr[pd->didx], AF_INET6))
4692 &pd->hdr.icmp6->icmp6_cksum,
4693 &nk->addr[pd->didx], 0);
4695 m_copyback(m, off, sizeof(struct icmp6_hdr),
4696 (caddr_t )pd->hdr.icmp6);
4705 * ICMP error message in response to a TCP/UDP packet.
4706 * Extract the inner TCP/UDP header and search for that state.
4709 struct pf_pdesc pd2;
4710 bzero(&pd2, sizeof pd2);
4715 struct ip6_hdr h2_6;
4722 /* Payload packet is from the opposite direction. */
4723 pd2.sidx = (direction == PF_IN) ? 1 : 0;
4724 pd2.didx = (direction == PF_IN) ? 0 : 1;
4728 /* offset of h2 in mbuf chain */
4729 ipoff2 = off + ICMP_MINLEN;
4731 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
4732 NULL, reason, pd2.af)) {
4733 DPFPRINTF(PF_DEBUG_MISC,
4734 ("pf: ICMP error message too short "
4739 * ICMP error messages don't refer to non-first
4742 if (h2.ip_off & htons(IP_OFFMASK)) {
4743 REASON_SET(reason, PFRES_FRAG);
4747 /* offset of protocol header that follows h2 */
4748 off2 = ipoff2 + (h2.ip_hl << 2);
4750 pd2.proto = h2.ip_p;
4751 pd2.src = (struct pf_addr *)&h2.ip_src;
4752 pd2.dst = (struct pf_addr *)&h2.ip_dst;
4753 pd2.ip_sum = &h2.ip_sum;
4758 ipoff2 = off + sizeof(struct icmp6_hdr);
4760 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
4761 NULL, reason, pd2.af)) {
4762 DPFPRINTF(PF_DEBUG_MISC,
4763 ("pf: ICMP error message too short "
4767 pd2.proto = h2_6.ip6_nxt;
4768 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
4769 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
4771 off2 = ipoff2 + sizeof(h2_6);
4773 switch (pd2.proto) {
4774 case IPPROTO_FRAGMENT:
4776 * ICMPv6 error messages for
4777 * non-first fragments
4779 REASON_SET(reason, PFRES_FRAG);
4782 case IPPROTO_HOPOPTS:
4783 case IPPROTO_ROUTING:
4784 case IPPROTO_DSTOPTS: {
4785 /* get next header and header length */
4786 struct ip6_ext opt6;
4788 if (!pf_pull_hdr(m, off2, &opt6,
4789 sizeof(opt6), NULL, reason,
4791 DPFPRINTF(PF_DEBUG_MISC,
4792 ("pf: ICMPv6 short opt\n"));
4795 if (pd2.proto == IPPROTO_AH)
4796 off2 += (opt6.ip6e_len + 2) * 4;
4798 off2 += (opt6.ip6e_len + 1) * 8;
4799 pd2.proto = opt6.ip6e_nxt;
4800 /* goto the next header */
4807 } while (!terminal);
4812 switch (pd2.proto) {
4816 struct pf_state_peer *src, *dst;
4821 * Only the first 8 bytes of the TCP header can be
4822 * expected. Don't access any TCP header fields after
4823 * th_seq, an ackskew test is not possible.
4825 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
4827 DPFPRINTF(PF_DEBUG_MISC,
4828 ("pf: ICMP error message too short "
4834 key.proto = IPPROTO_TCP;
4835 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4836 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4837 key.port[pd2.sidx] = th.th_sport;
4838 key.port[pd2.didx] = th.th_dport;
4840 STATE_LOOKUP(kif, &key, direction, *state, pd);
4842 if (direction == (*state)->direction) {
4843 src = &(*state)->dst;
4844 dst = &(*state)->src;
4846 src = &(*state)->src;
4847 dst = &(*state)->dst;
4850 if (src->wscale && dst->wscale)
4851 dws = dst->wscale & PF_WSCALE_MASK;
4855 /* Demodulate sequence number */
4856 seq = ntohl(th.th_seq) - src->seqdiff;
4858 pf_change_a(&th.th_seq, icmpsum,
4863 if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
4864 (!SEQ_GEQ(src->seqhi, seq) ||
4865 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
4866 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4867 printf("pf: BAD ICMP %d:%d ",
4868 icmptype, pd->hdr.icmp->icmp_code);
4869 pf_print_host(pd->src, 0, pd->af);
4871 pf_print_host(pd->dst, 0, pd->af);
4873 pf_print_state(*state);
4874 printf(" seq=%u\n", seq);
4876 REASON_SET(reason, PFRES_BADSTATE);
4879 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4880 printf("pf: OK ICMP %d:%d ",
4881 icmptype, pd->hdr.icmp->icmp_code);
4882 pf_print_host(pd->src, 0, pd->af);
4884 pf_print_host(pd->dst, 0, pd->af);
4886 pf_print_state(*state);
4887 printf(" seq=%u\n", seq);
4891 /* translate source/destination address, if necessary */
4892 if ((*state)->key[PF_SK_WIRE] !=
4893 (*state)->key[PF_SK_STACK]) {
4894 struct pf_state_key *nk =
4895 (*state)->key[pd->didx];
4897 if (PF_ANEQ(pd2.src,
4898 &nk->addr[pd2.sidx], pd2.af) ||
4899 nk->port[pd2.sidx] != th.th_sport)
4900 pf_change_icmp(pd2.src, &th.th_sport,
4901 daddr, &nk->addr[pd2.sidx],
4902 nk->port[pd2.sidx], NULL,
4903 pd2.ip_sum, icmpsum,
4904 pd->ip_sum, 0, pd2.af);
4906 if (PF_ANEQ(pd2.dst,
4907 &nk->addr[pd2.didx], pd2.af) ||
4908 nk->port[pd2.didx] != th.th_dport)
4909 pf_change_icmp(pd2.dst, &th.th_dport,
4910 saddr, &nk->addr[pd2.didx],
4911 nk->port[pd2.didx], NULL,
4912 pd2.ip_sum, icmpsum,
4913 pd->ip_sum, 0, pd2.af);
4921 m_copyback(m, off, ICMP_MINLEN,
4922 (caddr_t )pd->hdr.icmp);
4923 m_copyback(m, ipoff2, sizeof(h2),
4930 sizeof(struct icmp6_hdr),
4931 (caddr_t )pd->hdr.icmp6);
4932 m_copyback(m, ipoff2, sizeof(h2_6),
4937 m_copyback(m, off2, 8, (caddr_t)&th);
4946 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
4947 NULL, reason, pd2.af)) {
4948 DPFPRINTF(PF_DEBUG_MISC,
4949 ("pf: ICMP error message too short "
4955 key.proto = IPPROTO_UDP;
4956 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4957 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4958 key.port[pd2.sidx] = uh.uh_sport;
4959 key.port[pd2.didx] = uh.uh_dport;
4961 STATE_LOOKUP(kif, &key, direction, *state, pd);
4963 /* translate source/destination address, if necessary */
4964 if ((*state)->key[PF_SK_WIRE] !=
4965 (*state)->key[PF_SK_STACK]) {
4966 struct pf_state_key *nk =
4967 (*state)->key[pd->didx];
4969 if (PF_ANEQ(pd2.src,
4970 &nk->addr[pd2.sidx], pd2.af) ||
4971 nk->port[pd2.sidx] != uh.uh_sport)
4972 pf_change_icmp(pd2.src, &uh.uh_sport,
4973 daddr, &nk->addr[pd2.sidx],
4974 nk->port[pd2.sidx], &uh.uh_sum,
4975 pd2.ip_sum, icmpsum,
4976 pd->ip_sum, 1, pd2.af);
4978 if (PF_ANEQ(pd2.dst,
4979 &nk->addr[pd2.didx], pd2.af) ||
4980 nk->port[pd2.didx] != uh.uh_dport)
4981 pf_change_icmp(pd2.dst, &uh.uh_dport,
4982 saddr, &nk->addr[pd2.didx],
4983 nk->port[pd2.didx], &uh.uh_sum,
4984 pd2.ip_sum, icmpsum,
4985 pd->ip_sum, 1, pd2.af);
4990 m_copyback(m, off, ICMP_MINLEN,
4991 (caddr_t )pd->hdr.icmp);
4992 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4998 sizeof(struct icmp6_hdr),
4999 (caddr_t )pd->hdr.icmp6);
5000 m_copyback(m, ipoff2, sizeof(h2_6),
5005 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
5011 case IPPROTO_ICMP: {
5014 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
5015 NULL, reason, pd2.af)) {
5016 DPFPRINTF(PF_DEBUG_MISC,
5017 ("pf: ICMP error message too short i"
5023 key.proto = IPPROTO_ICMP;
5024 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5025 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5026 key.port[0] = key.port[1] = iih.icmp_id;
5028 STATE_LOOKUP(kif, &key, direction, *state, pd);
5030 /* translate source/destination address, if necessary */
5031 if ((*state)->key[PF_SK_WIRE] !=
5032 (*state)->key[PF_SK_STACK]) {
5033 struct pf_state_key *nk =
5034 (*state)->key[pd->didx];
5036 if (PF_ANEQ(pd2.src,
5037 &nk->addr[pd2.sidx], pd2.af) ||
5038 nk->port[pd2.sidx] != iih.icmp_id)
5039 pf_change_icmp(pd2.src, &iih.icmp_id,
5040 daddr, &nk->addr[pd2.sidx],
5041 nk->port[pd2.sidx], NULL,
5042 pd2.ip_sum, icmpsum,
5043 pd->ip_sum, 0, AF_INET);
5045 if (PF_ANEQ(pd2.dst,
5046 &nk->addr[pd2.didx], pd2.af) ||
5047 nk->port[pd2.didx] != iih.icmp_id)
5048 pf_change_icmp(pd2.dst, &iih.icmp_id,
5049 saddr, &nk->addr[pd2.didx],
5050 nk->port[pd2.didx], NULL,
5051 pd2.ip_sum, icmpsum,
5052 pd->ip_sum, 0, AF_INET);
5054 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
5055 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5056 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
5063 case IPPROTO_ICMPV6: {
5064 struct icmp6_hdr iih;
5066 if (!pf_pull_hdr(m, off2, &iih,
5067 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
5068 DPFPRINTF(PF_DEBUG_MISC,
5069 ("pf: ICMP error message too short "
5075 key.proto = IPPROTO_ICMPV6;
5076 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5077 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5078 key.port[0] = key.port[1] = iih.icmp6_id;
5080 STATE_LOOKUP(kif, &key, direction, *state, pd);
5082 /* translate source/destination address, if necessary */
5083 if ((*state)->key[PF_SK_WIRE] !=
5084 (*state)->key[PF_SK_STACK]) {
5085 struct pf_state_key *nk =
5086 (*state)->key[pd->didx];
5088 if (PF_ANEQ(pd2.src,
5089 &nk->addr[pd2.sidx], pd2.af) ||
5090 nk->port[pd2.sidx] != iih.icmp6_id)
5091 pf_change_icmp(pd2.src, &iih.icmp6_id,
5092 daddr, &nk->addr[pd2.sidx],
5093 nk->port[pd2.sidx], NULL,
5094 pd2.ip_sum, icmpsum,
5095 pd->ip_sum, 0, AF_INET6);
5097 if (PF_ANEQ(pd2.dst,
5098 &nk->addr[pd2.didx], pd2.af) ||
5099 nk->port[pd2.didx] != iih.icmp6_id)
5100 pf_change_icmp(pd2.dst, &iih.icmp6_id,
5101 saddr, &nk->addr[pd2.didx],
5102 nk->port[pd2.didx], NULL,
5103 pd2.ip_sum, icmpsum,
5104 pd->ip_sum, 0, AF_INET6);
5106 m_copyback(m, off, sizeof(struct icmp6_hdr),
5107 (caddr_t)pd->hdr.icmp6);
5108 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
5109 m_copyback(m, off2, sizeof(struct icmp6_hdr),
5118 key.proto = pd2.proto;
5119 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5120 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5121 key.port[0] = key.port[1] = 0;
5123 STATE_LOOKUP(kif, &key, direction, *state, pd);
5125 /* translate source/destination address, if necessary */
5126 if ((*state)->key[PF_SK_WIRE] !=
5127 (*state)->key[PF_SK_STACK]) {
5128 struct pf_state_key *nk =
5129 (*state)->key[pd->didx];
5131 if (PF_ANEQ(pd2.src,
5132 &nk->addr[pd2.sidx], pd2.af))
5133 pf_change_icmp(pd2.src, NULL, daddr,
5134 &nk->addr[pd2.sidx], 0, NULL,
5135 pd2.ip_sum, icmpsum,
5136 pd->ip_sum, 0, pd2.af);
5138 if (PF_ANEQ(pd2.dst,
5139 &nk->addr[pd2.didx], pd2.af))
5140 pf_change_icmp(pd2.dst, NULL, saddr,
5141 &nk->addr[pd2.didx], 0, NULL,
5142 pd2.ip_sum, icmpsum,
5143 pd->ip_sum, 0, pd2.af);
5148 m_copyback(m, off, ICMP_MINLEN,
5149 (caddr_t)pd->hdr.icmp);
5150 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5156 sizeof(struct icmp6_hdr),
5157 (caddr_t )pd->hdr.icmp6);
5158 m_copyback(m, ipoff2, sizeof(h2_6),
5172 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
5173 struct mbuf *m, struct pf_pdesc *pd)
5175 struct pf_state_peer *src, *dst;
5176 struct pf_state_key_cmp key;
5178 bzero(&key, sizeof(key));
5180 key.proto = pd->proto;
5181 if (direction == PF_IN) {
5182 PF_ACPY(&key.addr[0], pd->src, key.af);
5183 PF_ACPY(&key.addr[1], pd->dst, key.af);
5184 key.port[0] = key.port[1] = 0;
5186 PF_ACPY(&key.addr[1], pd->src, key.af);
5187 PF_ACPY(&key.addr[0], pd->dst, key.af);
5188 key.port[1] = key.port[0] = 0;
5191 STATE_LOOKUP(kif, &key, direction, *state, pd);
5193 if (direction == (*state)->direction) {
5194 src = &(*state)->src;
5195 dst = &(*state)->dst;
5197 src = &(*state)->dst;
5198 dst = &(*state)->src;
5202 if (src->state < PFOTHERS_SINGLE)
5203 src->state = PFOTHERS_SINGLE;
5204 if (dst->state == PFOTHERS_SINGLE)
5205 dst->state = PFOTHERS_MULTIPLE;
5207 /* update expire time */
5208 (*state)->expire = time_uptime;
5209 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5210 (*state)->timeout = PFTM_OTHER_MULTIPLE;
5212 (*state)->timeout = PFTM_OTHER_SINGLE;
5214 /* translate source/destination address, if necessary */
5215 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5216 struct pf_state_key *nk = (*state)->key[pd->didx];
5218 KASSERT(nk, ("%s: nk is null", __func__));
5219 KASSERT(pd, ("%s: pd is null", __func__));
5220 KASSERT(pd->src, ("%s: pd->src is null", __func__));
5221 KASSERT(pd->dst, ("%s: pd->dst is null", __func__));
5225 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5226 pf_change_a(&pd->src->v4.s_addr,
5228 nk->addr[pd->sidx].v4.s_addr,
5232 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5233 pf_change_a(&pd->dst->v4.s_addr,
5235 nk->addr[pd->didx].v4.s_addr,
5242 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5243 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5245 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5246 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5254 * ipoff and off are measured from the start of the mbuf chain.
5255 * h must be at "ipoff" on the mbuf chain.
5258 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5259 u_short *actionp, u_short *reasonp, sa_family_t af)
5264 struct ip *h = mtod(m, struct ip *);
5265 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
5269 ACTION_SET(actionp, PF_PASS);
5271 ACTION_SET(actionp, PF_DROP);
5272 REASON_SET(reasonp, PFRES_FRAG);
5276 if (m->m_pkthdr.len < off + len ||
5277 ntohs(h->ip_len) < off + len) {
5278 ACTION_SET(actionp, PF_DROP);
5279 REASON_SET(reasonp, PFRES_SHORT);
5287 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
5289 if (m->m_pkthdr.len < off + len ||
5290 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5291 (unsigned)(off + len)) {
5292 ACTION_SET(actionp, PF_DROP);
5293 REASON_SET(reasonp, PFRES_SHORT);
5300 m_copydata(m, off, len, p);
5306 pf_routable_oldmpath(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5309 struct radix_node_head *rnh;
5310 struct sockaddr_in *dst;
5314 struct sockaddr_in6 *dst6;
5315 struct route_in6 ro;
5319 struct radix_node *rn;
5324 /* XXX: stick to table 0 for now */
5325 rnh = rt_tables_get_rnh(0, af);
5326 if (rnh != NULL && rn_mpath_capable(rnh))
5328 bzero(&ro, sizeof(ro));
5331 dst = satosin(&ro.ro_dst);
5332 dst->sin_family = AF_INET;
5333 dst->sin_len = sizeof(*dst);
5334 dst->sin_addr = addr->v4;
5339 * Skip check for addresses with embedded interface scope,
5340 * as they would always match anyway.
5342 if (IN6_IS_SCOPE_EMBED(&addr->v6))
5344 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5345 dst6->sin6_family = AF_INET6;
5346 dst6->sin6_len = sizeof(*dst6);
5347 dst6->sin6_addr = addr->v6;
5354 /* Skip checks for ipsec interfaces */
5355 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5361 in6_rtalloc_ign(&ro, 0, rtableid);
5366 in_rtalloc_ign((struct route *)&ro, 0, rtableid);
5371 if (ro.ro_rt != NULL) {
5372 /* No interface given, this is a no-route check */
5376 if (kif->pfik_ifp == NULL) {
5381 /* Perform uRPF check if passed input interface */
5383 rn = (struct radix_node *)ro.ro_rt;
5385 rt = (struct rtentry *)rn;
5388 if (kif->pfik_ifp == ifp)
5390 rn = rn_mpath_next(rn);
5391 } while (check_mpath == 1 && rn != NULL && ret == 0);
5395 if (ro.ro_rt != NULL)
5402 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5406 struct nhop4_basic nh4;
5409 struct nhop6_basic nh6;
5413 struct radix_node_head *rnh;
5415 /* XXX: stick to table 0 for now */
5416 rnh = rt_tables_get_rnh(0, af);
5417 if (rnh != NULL && rn_mpath_capable(rnh))
5418 return (pf_routable_oldmpath(addr, af, kif, rtableid));
5421 * Skip check for addresses with embedded interface scope,
5422 * as they would always match anyway.
5424 if (af == AF_INET6 && IN6_IS_SCOPE_EMBED(&addr->v6))
5427 if (af != AF_INET && af != AF_INET6)
5430 /* Skip checks for ipsec interfaces */
5431 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5439 if (fib6_lookup_nh_basic(rtableid, &addr->v6, 0, 0, 0, &nh6)!=0)
5446 if (fib4_lookup_nh_basic(rtableid, addr->v4, 0, 0, &nh4) != 0)
5453 /* No interface given, this is a no-route check */
5457 if (kif->pfik_ifp == NULL)
5460 /* Perform uRPF check if passed input interface */
5461 if (kif->pfik_ifp == ifp)
5468 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5469 struct pf_state *s, struct pf_pdesc *pd, struct inpcb *inp)
5471 struct mbuf *m0, *m1;
5472 struct sockaddr_in dst;
5474 struct ifnet *ifp = NULL;
5475 struct pf_addr naddr;
5476 struct pf_src_node *sn = NULL;
5478 uint16_t ip_len, ip_off;
5480 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5481 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5484 if ((pd->pf_mtag == NULL &&
5485 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5486 pd->pf_mtag->routed++ > 3) {
5492 if (r->rt == PF_DUPTO) {
5493 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5499 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5507 ip = mtod(m0, struct ip *);
5509 bzero(&dst, sizeof(dst));
5510 dst.sin_family = AF_INET;
5511 dst.sin_len = sizeof(dst);
5512 dst.sin_addr = ip->ip_dst;
5514 if (TAILQ_EMPTY(&r->rpool.list)) {
5515 DPFPRINTF(PF_DEBUG_URGENT,
5516 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5520 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5522 if (!PF_AZERO(&naddr, AF_INET))
5523 dst.sin_addr.s_addr = naddr.v4.s_addr;
5524 ifp = r->rpool.cur->kif ?
5525 r->rpool.cur->kif->pfik_ifp : NULL;
5527 if (!PF_AZERO(&s->rt_addr, AF_INET))
5528 dst.sin_addr.s_addr =
5529 s->rt_addr.v4.s_addr;
5530 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5537 if (pf_test(PF_OUT, 0, ifp, &m0, inp) != PF_PASS)
5539 else if (m0 == NULL)
5541 if (m0->m_len < sizeof(struct ip)) {
5542 DPFPRINTF(PF_DEBUG_URGENT,
5543 ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
5546 ip = mtod(m0, struct ip *);
5549 if (ifp->if_flags & IFF_LOOPBACK)
5550 m0->m_flags |= M_SKIP_FIREWALL;
5552 ip_len = ntohs(ip->ip_len);
5553 ip_off = ntohs(ip->ip_off);
5555 /* Copied from FreeBSD 10.0-CURRENT ip_output. */
5556 m0->m_pkthdr.csum_flags |= CSUM_IP;
5557 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
5558 in_delayed_cksum(m0);
5559 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
5562 if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
5563 sctp_delayed_cksum(m, (uint32_t)(ip->ip_hl << 2));
5564 m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
5569 * If small enough for interface, or the interface will take
5570 * care of the fragmentation for us, we can just send directly.
5572 if (ip_len <= ifp->if_mtu ||
5573 (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0) {
5575 if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
5576 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
5577 m0->m_pkthdr.csum_flags &= ~CSUM_IP;
5579 m_clrprotoflags(m0); /* Avoid confusing lower layers. */
5580 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5584 /* Balk when DF bit is set or the interface didn't support TSO. */
5585 if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5587 KMOD_IPSTAT_INC(ips_cantfrag);
5588 if (r->rt != PF_DUPTO) {
5589 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
5596 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
5600 for (; m0; m0 = m1) {
5602 m0->m_nextpkt = NULL;
5604 m_clrprotoflags(m0);
5605 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5611 KMOD_IPSTAT_INC(ips_fragmented);
5614 if (r->rt != PF_DUPTO)
5629 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5630 struct pf_state *s, struct pf_pdesc *pd, struct inpcb *inp)
5633 struct sockaddr_in6 dst;
5634 struct ip6_hdr *ip6;
5635 struct ifnet *ifp = NULL;
5636 struct pf_addr naddr;
5637 struct pf_src_node *sn = NULL;
5639 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5640 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5643 if ((pd->pf_mtag == NULL &&
5644 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5645 pd->pf_mtag->routed++ > 3) {
5651 if (r->rt == PF_DUPTO) {
5652 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5658 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5666 ip6 = mtod(m0, struct ip6_hdr *);
5668 bzero(&dst, sizeof(dst));
5669 dst.sin6_family = AF_INET6;
5670 dst.sin6_len = sizeof(dst);
5671 dst.sin6_addr = ip6->ip6_dst;
5673 if (TAILQ_EMPTY(&r->rpool.list)) {
5674 DPFPRINTF(PF_DEBUG_URGENT,
5675 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5679 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
5681 if (!PF_AZERO(&naddr, AF_INET6))
5682 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5684 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
5686 if (!PF_AZERO(&s->rt_addr, AF_INET6))
5687 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5688 &s->rt_addr, AF_INET6);
5689 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5699 if (pf_test6(PF_OUT, PFIL_FWD, ifp, &m0, inp) != PF_PASS)
5701 else if (m0 == NULL)
5703 if (m0->m_len < sizeof(struct ip6_hdr)) {
5704 DPFPRINTF(PF_DEBUG_URGENT,
5705 ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
5709 ip6 = mtod(m0, struct ip6_hdr *);
5712 if (ifp->if_flags & IFF_LOOPBACK)
5713 m0->m_flags |= M_SKIP_FIREWALL;
5715 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 &
5716 ~ifp->if_hwassist) {
5717 uint32_t plen = m0->m_pkthdr.len - sizeof(*ip6);
5718 in6_delayed_cksum(m0, plen, sizeof(struct ip6_hdr));
5719 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6;
5723 * If the packet is too large for the outgoing interface,
5724 * send back an icmp6 error.
5726 if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
5727 dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
5728 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu)
5729 nd6_output_ifp(ifp, ifp, m0, &dst, NULL);
5731 in6_ifstat_inc(ifp, ifs6_in_toobig);
5732 if (r->rt != PF_DUPTO)
5733 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
5739 if (r->rt != PF_DUPTO)
5753 * FreeBSD supports cksum offloads for the following drivers.
5754 * em(4), fxp(4), lge(4), ndis(4), nge(4), re(4), ti(4), txp(4), xl(4)
5756 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
5757 * network driver performed cksum including pseudo header, need to verify
5760 * network driver performed cksum, needs to additional pseudo header
5761 * cksum computation with partial csum_data(i.e. lack of H/W support for
5762 * pseudo header, for instance hme(4), sk(4) and possibly gem(4))
5764 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
5765 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
5767 * Also, set csum_data to 0xffff to force cksum validation.
5770 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
5776 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
5778 if (m->m_pkthdr.len < off + len)
5783 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5784 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5785 sum = m->m_pkthdr.csum_data;
5787 ip = mtod(m, struct ip *);
5788 sum = in_pseudo(ip->ip_src.s_addr,
5789 ip->ip_dst.s_addr, htonl((u_short)len +
5790 m->m_pkthdr.csum_data + IPPROTO_TCP));
5797 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5798 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5799 sum = m->m_pkthdr.csum_data;
5801 ip = mtod(m, struct ip *);
5802 sum = in_pseudo(ip->ip_src.s_addr,
5803 ip->ip_dst.s_addr, htonl((u_short)len +
5804 m->m_pkthdr.csum_data + IPPROTO_UDP));
5812 case IPPROTO_ICMPV6:
5822 if (p == IPPROTO_ICMP) {
5827 sum = in_cksum(m, len);
5831 if (m->m_len < sizeof(struct ip))
5833 sum = in4_cksum(m, p, off, len);
5838 if (m->m_len < sizeof(struct ip6_hdr))
5840 sum = in6_cksum(m, p, off, len);
5851 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
5856 KMOD_UDPSTAT_INC(udps_badsum);
5862 KMOD_ICMPSTAT_INC(icps_checksum);
5867 case IPPROTO_ICMPV6:
5869 KMOD_ICMP6STAT_INC(icp6s_checksum);
5876 if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
5877 m->m_pkthdr.csum_flags |=
5878 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
5879 m->m_pkthdr.csum_data = 0xffff;
5888 pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
5890 struct pfi_kif *kif;
5891 u_short action, reason = 0, log = 0;
5892 struct mbuf *m = *m0;
5893 struct ip *h = NULL;
5894 struct m_tag *ipfwtag;
5895 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
5896 struct pf_state *s = NULL;
5897 struct pf_ruleset *ruleset = NULL;
5899 int off, dirndx, pqid = 0;
5901 PF_RULES_RLOCK_TRACKER;
5905 if (!V_pf_status.running)
5908 memset(&pd, 0, sizeof(pd));
5910 kif = (struct pfi_kif *)ifp->if_pf_kif;
5913 DPFPRINTF(PF_DEBUG_URGENT,
5914 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
5917 if (kif->pfik_flags & PFI_IFLAG_SKIP)
5920 if (m->m_flags & M_SKIP_FIREWALL)
5923 pd.pf_mtag = pf_find_mtag(m);
5927 if (ip_divert_ptr != NULL &&
5928 ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
5929 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
5930 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
5931 if (pd.pf_mtag == NULL &&
5932 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5936 pd.pf_mtag->flags |= PF_PACKET_LOOPED;
5937 m_tag_delete(m, ipfwtag);
5939 if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
5940 m->m_flags |= M_FASTFWD_OURS;
5941 pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
5943 } else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
5944 /* We do IP header normalization and packet reassembly here */
5948 m = *m0; /* pf_normalize messes with m0 */
5949 h = mtod(m, struct ip *);
5951 off = h->ip_hl << 2;
5952 if (off < (int)sizeof(struct ip)) {
5954 REASON_SET(&reason, PFRES_SHORT);
5959 pd.src = (struct pf_addr *)&h->ip_src;
5960 pd.dst = (struct pf_addr *)&h->ip_dst;
5961 pd.sport = pd.dport = NULL;
5962 pd.ip_sum = &h->ip_sum;
5963 pd.proto_sum = NULL;
5966 pd.sidx = (dir == PF_IN) ? 0 : 1;
5967 pd.didx = (dir == PF_IN) ? 1 : 0;
5969 pd.tos = h->ip_tos & ~IPTOS_ECN_MASK;
5970 pd.tot_len = ntohs(h->ip_len);
5972 /* handle fragments that didn't get reassembled by normalization */
5973 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
5974 action = pf_test_fragment(&r, dir, kif, m, h,
5985 if (!pf_pull_hdr(m, off, &th, sizeof(th),
5986 &action, &reason, AF_INET)) {
5987 log = action != PF_PASS;
5990 pd.p_len = pd.tot_len - off - (th.th_off << 2);
5991 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
5993 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
5994 if (action == PF_DROP)
5996 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
5998 if (action == PF_PASS) {
5999 if (pfsync_update_state_ptr != NULL)
6000 pfsync_update_state_ptr(s);
6004 } else if (s == NULL)
6005 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6014 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6015 &action, &reason, AF_INET)) {
6016 log = action != PF_PASS;
6019 if (uh.uh_dport == 0 ||
6020 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6021 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6023 REASON_SET(&reason, PFRES_SHORT);
6026 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6027 if (action == PF_PASS) {
6028 if (pfsync_update_state_ptr != NULL)
6029 pfsync_update_state_ptr(s);
6033 } else if (s == NULL)
6034 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6039 case IPPROTO_ICMP: {
6043 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
6044 &action, &reason, AF_INET)) {
6045 log = action != PF_PASS;
6048 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
6050 if (action == PF_PASS) {
6051 if (pfsync_update_state_ptr != NULL)
6052 pfsync_update_state_ptr(s);
6056 } else if (s == NULL)
6057 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6063 case IPPROTO_ICMPV6: {
6065 DPFPRINTF(PF_DEBUG_MISC,
6066 ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
6072 action = pf_test_state_other(&s, dir, kif, m, &pd);
6073 if (action == PF_PASS) {
6074 if (pfsync_update_state_ptr != NULL)
6075 pfsync_update_state_ptr(s);
6079 } else if (s == NULL)
6080 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6087 if (action == PF_PASS && h->ip_hl > 5 &&
6088 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6090 REASON_SET(&reason, PFRES_IPOPTIONS);
6092 DPFPRINTF(PF_DEBUG_MISC,
6093 ("pf: dropping packet with ip options\n"));
6096 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6098 REASON_SET(&reason, PFRES_MEMORY);
6100 if (r->rtableid >= 0)
6101 M_SETFIB(m, r->rtableid);
6103 if (r->scrub_flags & PFSTATE_SETPRIO) {
6104 if (pd.tos & IPTOS_LOWDELAY)
6106 if (pf_ieee8021q_setpcp(m, r->set_prio[pqid])) {
6108 REASON_SET(&reason, PFRES_MEMORY);
6110 DPFPRINTF(PF_DEBUG_MISC,
6111 ("pf: failed to allocate 802.1q mtag\n"));
6116 if (action == PF_PASS && r->qid) {
6117 if (pd.pf_mtag == NULL &&
6118 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6120 REASON_SET(&reason, PFRES_MEMORY);
6123 pd.pf_mtag->qid_hash = pf_state_hash(s);
6124 if (pqid || (pd.tos & IPTOS_LOWDELAY))
6125 pd.pf_mtag->qid = r->pqid;
6127 pd.pf_mtag->qid = r->qid;
6128 /* Add hints for ecn. */
6129 pd.pf_mtag->hdr = h;
6136 * connections redirected to loopback should not match sockets
6137 * bound specifically to loopback due to security implications,
6138 * see tcp_input() and in_pcblookup_listen().
6140 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6141 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6142 (s->nat_rule.ptr->action == PF_RDR ||
6143 s->nat_rule.ptr->action == PF_BINAT) &&
6144 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
6145 m->m_flags |= M_SKIP_FIREWALL;
6147 if (action == PF_PASS && r->divert.port && ip_divert_ptr != NULL &&
6148 !PACKET_LOOPED(&pd)) {
6150 ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
6151 sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
6152 if (ipfwtag != NULL) {
6153 ((struct ipfw_rule_ref *)(ipfwtag+1))->info =
6154 ntohs(r->divert.port);
6155 ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
6160 m_tag_prepend(m, ipfwtag);
6161 if (m->m_flags & M_FASTFWD_OURS) {
6162 if (pd.pf_mtag == NULL &&
6163 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6165 REASON_SET(&reason, PFRES_MEMORY);
6167 DPFPRINTF(PF_DEBUG_MISC,
6168 ("pf: failed to allocate tag\n"));
6170 pd.pf_mtag->flags |=
6171 PF_FASTFWD_OURS_PRESENT;
6172 m->m_flags &= ~M_FASTFWD_OURS;
6175 ip_divert_ptr(*m0, dir == PF_IN ? DIR_IN : DIR_OUT);
6180 /* XXX: ipfw has the same behaviour! */
6182 REASON_SET(&reason, PFRES_MEMORY);
6184 DPFPRINTF(PF_DEBUG_MISC,
6185 ("pf: failed to allocate divert tag\n"));
6192 if (s != NULL && s->nat_rule.ptr != NULL &&
6193 s->nat_rule.ptr->log & PF_LOG_ALL)
6194 lr = s->nat_rule.ptr;
6197 PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd,
6201 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6202 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
6204 if (action == PF_PASS || r->action == PF_DROP) {
6205 dirndx = (dir == PF_OUT);
6206 r->packets[dirndx]++;
6207 r->bytes[dirndx] += pd.tot_len;
6209 a->packets[dirndx]++;
6210 a->bytes[dirndx] += pd.tot_len;
6213 if (s->nat_rule.ptr != NULL) {
6214 s->nat_rule.ptr->packets[dirndx]++;
6215 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6217 if (s->src_node != NULL) {
6218 s->src_node->packets[dirndx]++;
6219 s->src_node->bytes[dirndx] += pd.tot_len;
6221 if (s->nat_src_node != NULL) {
6222 s->nat_src_node->packets[dirndx]++;
6223 s->nat_src_node->bytes[dirndx] += pd.tot_len;
6225 dirndx = (dir == s->direction) ? 0 : 1;
6226 s->packets[dirndx]++;
6227 s->bytes[dirndx] += pd.tot_len;
6230 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6231 if (nr != NULL && r == &V_pf_default_rule)
6233 if (tr->src.addr.type == PF_ADDR_TABLE)
6234 pfr_update_stats(tr->src.addr.p.tbl,
6235 (s == NULL) ? pd.src :
6236 &s->key[(s->direction == PF_IN)]->
6237 addr[(s->direction == PF_OUT)],
6238 pd.af, pd.tot_len, dir == PF_OUT,
6239 r->action == PF_PASS, tr->src.neg);
6240 if (tr->dst.addr.type == PF_ADDR_TABLE)
6241 pfr_update_stats(tr->dst.addr.p.tbl,
6242 (s == NULL) ? pd.dst :
6243 &s->key[(s->direction == PF_IN)]->
6244 addr[(s->direction == PF_IN)],
6245 pd.af, pd.tot_len, dir == PF_OUT,
6246 r->action == PF_PASS, tr->dst.neg);
6250 case PF_SYNPROXY_DROP:
6261 /* pf_route() returns unlocked. */
6263 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd, inp);
6277 pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
6279 struct pfi_kif *kif;
6280 u_short action, reason = 0, log = 0;
6281 struct mbuf *m = *m0, *n = NULL;
6283 struct ip6_hdr *h = NULL;
6284 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6285 struct pf_state *s = NULL;
6286 struct pf_ruleset *ruleset = NULL;
6288 int off, terminal = 0, dirndx, rh_cnt = 0, pqid = 0;
6290 PF_RULES_RLOCK_TRACKER;
6293 if (!V_pf_status.running)
6296 memset(&pd, 0, sizeof(pd));
6297 pd.pf_mtag = pf_find_mtag(m);
6299 if (pd.pf_mtag && pd.pf_mtag->flags & PF_TAG_GENERATED)
6302 kif = (struct pfi_kif *)ifp->if_pf_kif;
6304 DPFPRINTF(PF_DEBUG_URGENT,
6305 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
6308 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6311 if (m->m_flags & M_SKIP_FIREWALL)
6316 /* We do IP header normalization and packet reassembly here */
6317 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
6321 m = *m0; /* pf_normalize messes with m0 */
6322 h = mtod(m, struct ip6_hdr *);
6326 * we do not support jumbogram yet. if we keep going, zero ip6_plen
6327 * will do something bad, so drop the packet for now.
6329 if (htons(h->ip6_plen) == 0) {
6331 REASON_SET(&reason, PFRES_NORM); /*XXX*/
6336 pd.src = (struct pf_addr *)&h->ip6_src;
6337 pd.dst = (struct pf_addr *)&h->ip6_dst;
6338 pd.sport = pd.dport = NULL;
6340 pd.proto_sum = NULL;
6342 pd.sidx = (dir == PF_IN) ? 0 : 1;
6343 pd.didx = (dir == PF_IN) ? 1 : 0;
6346 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6348 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
6349 pd.proto = h->ip6_nxt;
6352 case IPPROTO_FRAGMENT:
6353 action = pf_test_fragment(&r, dir, kif, m, h,
6355 if (action == PF_DROP)
6356 REASON_SET(&reason, PFRES_FRAG);
6358 case IPPROTO_ROUTING: {
6359 struct ip6_rthdr rthdr;
6362 DPFPRINTF(PF_DEBUG_MISC,
6363 ("pf: IPv6 more than one rthdr\n"));
6365 REASON_SET(&reason, PFRES_IPOPTIONS);
6369 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
6371 DPFPRINTF(PF_DEBUG_MISC,
6372 ("pf: IPv6 short rthdr\n"));
6374 REASON_SET(&reason, PFRES_SHORT);
6378 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
6379 DPFPRINTF(PF_DEBUG_MISC,
6380 ("pf: IPv6 rthdr0\n"));
6382 REASON_SET(&reason, PFRES_IPOPTIONS);
6389 case IPPROTO_HOPOPTS:
6390 case IPPROTO_DSTOPTS: {
6391 /* get next header and header length */
6392 struct ip6_ext opt6;
6394 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
6395 NULL, &reason, pd.af)) {
6396 DPFPRINTF(PF_DEBUG_MISC,
6397 ("pf: IPv6 short opt\n"));
6402 if (pd.proto == IPPROTO_AH)
6403 off += (opt6.ip6e_len + 2) * 4;
6405 off += (opt6.ip6e_len + 1) * 8;
6406 pd.proto = opt6.ip6e_nxt;
6407 /* goto the next header */
6414 } while (!terminal);
6416 /* if there's no routing header, use unmodified mbuf for checksumming */
6426 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6427 &action, &reason, AF_INET6)) {
6428 log = action != PF_PASS;
6431 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6432 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6433 if (action == PF_DROP)
6435 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6437 if (action == PF_PASS) {
6438 if (pfsync_update_state_ptr != NULL)
6439 pfsync_update_state_ptr(s);
6443 } else if (s == NULL)
6444 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6453 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6454 &action, &reason, AF_INET6)) {
6455 log = action != PF_PASS;
6458 if (uh.uh_dport == 0 ||
6459 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6460 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6462 REASON_SET(&reason, PFRES_SHORT);
6465 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6466 if (action == PF_PASS) {
6467 if (pfsync_update_state_ptr != NULL)
6468 pfsync_update_state_ptr(s);
6472 } else if (s == NULL)
6473 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6478 case IPPROTO_ICMP: {
6480 DPFPRINTF(PF_DEBUG_MISC,
6481 ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
6485 case IPPROTO_ICMPV6: {
6486 struct icmp6_hdr ih;
6489 if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
6490 &action, &reason, AF_INET6)) {
6491 log = action != PF_PASS;
6494 action = pf_test_state_icmp(&s, dir, kif,
6495 m, off, h, &pd, &reason);
6496 if (action == PF_PASS) {
6497 if (pfsync_update_state_ptr != NULL)
6498 pfsync_update_state_ptr(s);
6502 } else if (s == NULL)
6503 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6509 action = pf_test_state_other(&s, dir, kif, m, &pd);
6510 if (action == PF_PASS) {
6511 if (pfsync_update_state_ptr != NULL)
6512 pfsync_update_state_ptr(s);
6516 } else if (s == NULL)
6517 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6529 /* handle dangerous IPv6 extension headers. */
6530 if (action == PF_PASS && rh_cnt &&
6531 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6533 REASON_SET(&reason, PFRES_IPOPTIONS);
6535 DPFPRINTF(PF_DEBUG_MISC,
6536 ("pf: dropping packet with dangerous v6 headers\n"));
6539 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6541 REASON_SET(&reason, PFRES_MEMORY);
6543 if (r->rtableid >= 0)
6544 M_SETFIB(m, r->rtableid);
6546 if (r->scrub_flags & PFSTATE_SETPRIO) {
6547 if (pd.tos & IPTOS_LOWDELAY)
6549 if (pf_ieee8021q_setpcp(m, r->set_prio[pqid])) {
6551 REASON_SET(&reason, PFRES_MEMORY);
6553 DPFPRINTF(PF_DEBUG_MISC,
6554 ("pf: failed to allocate 802.1q mtag\n"));
6559 if (action == PF_PASS && r->qid) {
6560 if (pd.pf_mtag == NULL &&
6561 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6563 REASON_SET(&reason, PFRES_MEMORY);
6566 pd.pf_mtag->qid_hash = pf_state_hash(s);
6567 if (pd.tos & IPTOS_LOWDELAY)
6568 pd.pf_mtag->qid = r->pqid;
6570 pd.pf_mtag->qid = r->qid;
6571 /* Add hints for ecn. */
6572 pd.pf_mtag->hdr = h;
6577 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6578 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6579 (s->nat_rule.ptr->action == PF_RDR ||
6580 s->nat_rule.ptr->action == PF_BINAT) &&
6581 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
6582 m->m_flags |= M_SKIP_FIREWALL;
6584 /* XXX: Anybody working on it?! */
6586 printf("pf: divert(9) is not supported for IPv6\n");
6591 if (s != NULL && s->nat_rule.ptr != NULL &&
6592 s->nat_rule.ptr->log & PF_LOG_ALL)
6593 lr = s->nat_rule.ptr;
6596 PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset,
6600 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6601 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
6603 if (action == PF_PASS || r->action == PF_DROP) {
6604 dirndx = (dir == PF_OUT);
6605 r->packets[dirndx]++;
6606 r->bytes[dirndx] += pd.tot_len;
6608 a->packets[dirndx]++;
6609 a->bytes[dirndx] += pd.tot_len;
6612 if (s->nat_rule.ptr != NULL) {
6613 s->nat_rule.ptr->packets[dirndx]++;
6614 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6616 if (s->src_node != NULL) {
6617 s->src_node->packets[dirndx]++;
6618 s->src_node->bytes[dirndx] += pd.tot_len;
6620 if (s->nat_src_node != NULL) {
6621 s->nat_src_node->packets[dirndx]++;
6622 s->nat_src_node->bytes[dirndx] += pd.tot_len;
6624 dirndx = (dir == s->direction) ? 0 : 1;
6625 s->packets[dirndx]++;
6626 s->bytes[dirndx] += pd.tot_len;
6629 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6630 if (nr != NULL && r == &V_pf_default_rule)
6632 if (tr->src.addr.type == PF_ADDR_TABLE)
6633 pfr_update_stats(tr->src.addr.p.tbl,
6634 (s == NULL) ? pd.src :
6635 &s->key[(s->direction == PF_IN)]->addr[0],
6636 pd.af, pd.tot_len, dir == PF_OUT,
6637 r->action == PF_PASS, tr->src.neg);
6638 if (tr->dst.addr.type == PF_ADDR_TABLE)
6639 pfr_update_stats(tr->dst.addr.p.tbl,
6640 (s == NULL) ? pd.dst :
6641 &s->key[(s->direction == PF_IN)]->addr[1],
6642 pd.af, pd.tot_len, dir == PF_OUT,
6643 r->action == PF_PASS, tr->dst.neg);
6647 case PF_SYNPROXY_DROP:
6658 /* pf_route6() returns unlocked. */
6660 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd, inp);
6669 /* If reassembled packet passed, create new fragments. */
6670 if (action == PF_PASS && *m0 && (pflags & PFIL_FWD) &&
6671 (mtag = m_tag_find(m, PF_REASSEMBLED, NULL)) != NULL)
6672 action = pf_refragment6(ifp, m0, mtag);