2 * Copyright (c) 2001 Daniel Hartmeier
3 * Copyright (c) 2002 - 2008 Henning Brauer
4 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * Effort sponsored in part by the Defense Advanced Research Projects
32 * Agency (DARPA) and Air Force Research Laboratory, Air Force
33 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35 * $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
42 #include "opt_inet6.h"
46 #include <sys/param.h>
48 #include <sys/endian.h>
50 #include <sys/interrupt.h>
51 #include <sys/kernel.h>
52 #include <sys/kthread.h>
53 #include <sys/limits.h>
56 #include <sys/random.h>
57 #include <sys/refcount.h>
58 #include <sys/socket.h>
59 #include <sys/sysctl.h>
60 #include <sys/taskqueue.h>
61 #include <sys/ucred.h>
64 #include <net/if_types.h>
65 #include <net/route.h>
66 #include <net/radix_mpath.h>
69 #include <net/pfvar.h>
70 #include <net/if_pflog.h>
71 #include <net/if_pfsync.h>
73 #include <netinet/in_pcb.h>
74 #include <netinet/in_var.h>
75 #include <netinet/ip.h>
76 #include <netinet/ip_fw.h>
77 #include <netinet/ip_icmp.h>
78 #include <netinet/icmp_var.h>
79 #include <netinet/ip_var.h>
80 #include <netinet/tcp.h>
81 #include <netinet/tcp_fsm.h>
82 #include <netinet/tcp_seq.h>
83 #include <netinet/tcp_timer.h>
84 #include <netinet/tcp_var.h>
85 #include <netinet/udp.h>
86 #include <netinet/udp_var.h>
88 #include <netpfil/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */
91 #include <netinet/ip6.h>
92 #include <netinet/icmp6.h>
93 #include <netinet6/nd6.h>
94 #include <netinet6/ip6_var.h>
95 #include <netinet6/in6_pcb.h>
98 #include <machine/in_cksum.h>
99 #include <security/mac/mac_framework.h>
101 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
108 VNET_DEFINE(struct pf_altqqueue, pf_altqs[2]);
109 VNET_DEFINE(struct pf_palist, pf_pabuf);
110 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active);
111 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive);
112 VNET_DEFINE(struct pf_kstatus, pf_status);
114 VNET_DEFINE(u_int32_t, ticket_altqs_active);
115 VNET_DEFINE(u_int32_t, ticket_altqs_inactive);
116 VNET_DEFINE(int, altqs_inactive_open);
117 VNET_DEFINE(u_int32_t, ticket_pabuf);
119 VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx);
120 #define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx)
121 VNET_DEFINE(u_char, pf_tcp_secret[16]);
122 #define V_pf_tcp_secret VNET(pf_tcp_secret)
123 VNET_DEFINE(int, pf_tcp_secret_init);
124 #define V_pf_tcp_secret_init VNET(pf_tcp_secret_init)
125 VNET_DEFINE(int, pf_tcp_iss_off);
126 #define V_pf_tcp_iss_off VNET(pf_tcp_iss_off)
129 * Queue for pf_intr() sends.
131 static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
132 struct pf_send_entry {
133 STAILQ_ENTRY(pf_send_entry) pfse_next;
150 #define pfse_icmp_type u.icmpopts.type
151 #define pfse_icmp_code u.icmpopts.code
152 #define pfse_icmp_mtu u.icmpopts.mtu
155 STAILQ_HEAD(pf_send_head, pf_send_entry);
156 static VNET_DEFINE(struct pf_send_head, pf_sendqueue);
157 #define V_pf_sendqueue VNET(pf_sendqueue)
159 static struct mtx pf_sendqueue_mtx;
160 #define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx)
161 #define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx)
164 * Queue for pf_overload_task() tasks.
166 struct pf_overload_entry {
167 SLIST_ENTRY(pf_overload_entry) next;
171 struct pf_rule *rule;
174 SLIST_HEAD(pf_overload_head, pf_overload_entry);
175 static VNET_DEFINE(struct pf_overload_head, pf_overloadqueue);
176 #define V_pf_overloadqueue VNET(pf_overloadqueue)
177 static VNET_DEFINE(struct task, pf_overloadtask);
178 #define V_pf_overloadtask VNET(pf_overloadtask)
180 static struct mtx pf_overloadqueue_mtx;
181 #define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx)
182 #define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx)
184 VNET_DEFINE(struct pf_rulequeue, pf_unlinked_rules);
185 struct mtx pf_unlnkdrules_mtx;
187 static VNET_DEFINE(uma_zone_t, pf_sources_z);
188 #define V_pf_sources_z VNET(pf_sources_z)
189 uma_zone_t pf_mtag_z;
190 VNET_DEFINE(uma_zone_t, pf_state_z);
191 VNET_DEFINE(uma_zone_t, pf_state_key_z);
193 VNET_DEFINE(uint64_t, pf_stateid[MAXCPU]);
194 #define PFID_CPUBITS 8
195 #define PFID_CPUSHIFT (sizeof(uint64_t) * NBBY - PFID_CPUBITS)
196 #define PFID_CPUMASK ((uint64_t)((1 << PFID_CPUBITS) - 1) << PFID_CPUSHIFT)
197 #define PFID_MAXID (~PFID_CPUMASK)
198 CTASSERT((1 << PFID_CPUBITS) >= MAXCPU);
200 static void pf_src_tree_remove_state(struct pf_state *);
201 static void pf_init_threshold(struct pf_threshold *, u_int32_t,
203 static void pf_add_threshold(struct pf_threshold *);
204 static int pf_check_threshold(struct pf_threshold *);
206 static void pf_change_ap(struct mbuf *, struct pf_addr *, u_int16_t *,
207 u_int16_t *, u_int16_t *, struct pf_addr *,
208 u_int16_t, u_int8_t, sa_family_t);
209 static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
210 struct tcphdr *, struct pf_state_peer *);
211 static void pf_change_icmp(struct pf_addr *, u_int16_t *,
212 struct pf_addr *, struct pf_addr *, u_int16_t,
213 u_int16_t *, u_int16_t *, u_int16_t *,
214 u_int16_t *, u_int8_t, sa_family_t);
215 static void pf_send_tcp(struct mbuf *,
216 const struct pf_rule *, sa_family_t,
217 const struct pf_addr *, const struct pf_addr *,
218 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
219 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
220 u_int16_t, struct ifnet *);
221 static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
222 sa_family_t, struct pf_rule *);
223 static void pf_detach_state(struct pf_state *);
224 static int pf_state_key_attach(struct pf_state_key *,
225 struct pf_state_key *, struct pf_state *);
226 static void pf_state_key_detach(struct pf_state *, int);
227 static int pf_state_key_ctor(void *, int, void *, int);
228 static u_int32_t pf_tcp_iss(struct pf_pdesc *);
229 static int pf_test_rule(struct pf_rule **, struct pf_state **,
230 int, struct pfi_kif *, struct mbuf *, int,
231 struct pf_pdesc *, struct pf_rule **,
232 struct pf_ruleset **, struct inpcb *);
233 static int pf_create_state(struct pf_rule *, struct pf_rule *,
234 struct pf_rule *, struct pf_pdesc *,
235 struct pf_src_node *, struct pf_state_key *,
236 struct pf_state_key *, struct mbuf *, int,
237 u_int16_t, u_int16_t, int *, struct pfi_kif *,
238 struct pf_state **, int, u_int16_t, u_int16_t,
240 static int pf_test_fragment(struct pf_rule **, int,
241 struct pfi_kif *, struct mbuf *, void *,
242 struct pf_pdesc *, struct pf_rule **,
243 struct pf_ruleset **);
244 static int pf_tcp_track_full(struct pf_state_peer *,
245 struct pf_state_peer *, struct pf_state **,
246 struct pfi_kif *, struct mbuf *, int,
247 struct pf_pdesc *, u_short *, int *);
248 static int pf_tcp_track_sloppy(struct pf_state_peer *,
249 struct pf_state_peer *, struct pf_state **,
250 struct pf_pdesc *, u_short *);
251 static int pf_test_state_tcp(struct pf_state **, int,
252 struct pfi_kif *, struct mbuf *, int,
253 void *, struct pf_pdesc *, u_short *);
254 static int pf_test_state_udp(struct pf_state **, int,
255 struct pfi_kif *, struct mbuf *, int,
256 void *, struct pf_pdesc *);
257 static int pf_test_state_icmp(struct pf_state **, int,
258 struct pfi_kif *, struct mbuf *, int,
259 void *, struct pf_pdesc *, u_short *);
260 static int pf_test_state_other(struct pf_state **, int,
261 struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
262 static u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
264 static u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
266 static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
268 static int pf_check_proto_cksum(struct mbuf *, int, int,
269 u_int8_t, sa_family_t);
270 static void pf_print_state_parts(struct pf_state *,
271 struct pf_state_key *, struct pf_state_key *);
272 static int pf_addr_wrap_neq(struct pf_addr_wrap *,
273 struct pf_addr_wrap *);
274 static struct pf_state *pf_find_state(struct pfi_kif *,
275 struct pf_state_key_cmp *, u_int);
276 static int pf_src_connlimit(struct pf_state **);
277 static void pf_overload_task(void *v, int pending);
278 static int pf_insert_src_node(struct pf_src_node **,
279 struct pf_rule *, struct pf_addr *, sa_family_t);
280 static u_int pf_purge_expired_states(u_int, int);
281 static void pf_purge_unlinked_rules(void);
282 static int pf_mtag_uminit(void *, int, int);
283 static void pf_mtag_free(struct m_tag *);
285 static void pf_route(struct mbuf **, struct pf_rule *, int,
286 struct ifnet *, struct pf_state *,
290 static void pf_change_a6(struct pf_addr *, u_int16_t *,
291 struct pf_addr *, u_int8_t);
292 static void pf_route6(struct mbuf **, struct pf_rule *, int,
293 struct ifnet *, struct pf_state *,
297 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
299 VNET_DECLARE(int, pf_end_threads);
301 VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
303 #define PACKET_LOOPED(pd) ((pd)->pf_mtag && \
304 (pd)->pf_mtag->flags & PF_PACKET_LOOPED)
306 #define STATE_LOOKUP(i, k, d, s, pd) \
308 (s) = pf_find_state((i), (k), (d)); \
311 if (PACKET_LOOPED(pd)) \
313 if ((d) == PF_OUT && \
314 (((s)->rule.ptr->rt == PF_ROUTETO && \
315 (s)->rule.ptr->direction == PF_OUT) || \
316 ((s)->rule.ptr->rt == PF_REPLYTO && \
317 (s)->rule.ptr->direction == PF_IN)) && \
318 (s)->rt_kif != NULL && \
319 (s)->rt_kif != (i)) \
323 #define BOUND_IFACE(r, k) \
324 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
326 #define STATE_INC_COUNTERS(s) \
328 counter_u64_add(s->rule.ptr->states_cur, 1); \
329 counter_u64_add(s->rule.ptr->states_tot, 1); \
330 if (s->anchor.ptr != NULL) { \
331 counter_u64_add(s->anchor.ptr->states_cur, 1); \
332 counter_u64_add(s->anchor.ptr->states_tot, 1); \
334 if (s->nat_rule.ptr != NULL) { \
335 counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
336 counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
340 #define STATE_DEC_COUNTERS(s) \
342 if (s->nat_rule.ptr != NULL) \
343 counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
344 if (s->anchor.ptr != NULL) \
345 counter_u64_add(s->anchor.ptr->states_cur, -1); \
346 counter_u64_add(s->rule.ptr->states_cur, -1); \
349 static MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
350 VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
351 VNET_DEFINE(struct pf_idhash *, pf_idhash);
352 VNET_DEFINE(struct pf_srchash *, pf_srchash);
354 SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW, 0, "pf(4)");
357 u_long pf_srchashmask;
358 static u_long pf_hashsize;
359 static u_long pf_srchashsize;
361 SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
362 &pf_hashsize, 0, "Size of pf(4) states hashtable");
363 SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
364 &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable");
366 VNET_DEFINE(void *, pf_swi_cookie);
368 VNET_DEFINE(uint32_t, pf_hashseed);
369 #define V_pf_hashseed VNET(pf_hashseed)
372 pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
378 if (a->addr32[0] > b->addr32[0])
380 if (a->addr32[0] < b->addr32[0])
386 if (a->addr32[3] > b->addr32[3])
388 if (a->addr32[3] < b->addr32[3])
390 if (a->addr32[2] > b->addr32[2])
392 if (a->addr32[2] < b->addr32[2])
394 if (a->addr32[1] > b->addr32[1])
396 if (a->addr32[1] < b->addr32[1])
398 if (a->addr32[0] > b->addr32[0])
400 if (a->addr32[0] < b->addr32[0])
405 panic("%s: unknown address family %u", __func__, af);
410 static __inline uint32_t
411 pf_hashkey(struct pf_state_key *sk)
415 h = murmur3_aligned_32((uint32_t *)sk,
416 sizeof(struct pf_state_key_cmp),
419 return (h & pf_hashmask);
422 static __inline uint32_t
423 pf_hashsrc(struct pf_addr *addr, sa_family_t af)
429 h = murmur3_aligned_32((uint32_t *)&addr->v4,
430 sizeof(addr->v4), V_pf_hashseed);
433 h = murmur3_aligned_32((uint32_t *)&addr->v6,
434 sizeof(addr->v6), V_pf_hashseed);
437 panic("%s: unknown address family %u", __func__, af);
440 return (h & pf_srchashmask);
445 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
450 dst->addr32[0] = src->addr32[0];
454 dst->addr32[0] = src->addr32[0];
455 dst->addr32[1] = src->addr32[1];
456 dst->addr32[2] = src->addr32[2];
457 dst->addr32[3] = src->addr32[3];
464 pf_init_threshold(struct pf_threshold *threshold,
465 u_int32_t limit, u_int32_t seconds)
467 threshold->limit = limit * PF_THRESHOLD_MULT;
468 threshold->seconds = seconds;
469 threshold->count = 0;
470 threshold->last = time_uptime;
474 pf_add_threshold(struct pf_threshold *threshold)
476 u_int32_t t = time_uptime, diff = t - threshold->last;
478 if (diff >= threshold->seconds)
479 threshold->count = 0;
481 threshold->count -= threshold->count * diff /
483 threshold->count += PF_THRESHOLD_MULT;
488 pf_check_threshold(struct pf_threshold *threshold)
490 return (threshold->count > threshold->limit);
494 pf_src_connlimit(struct pf_state **state)
496 struct pf_overload_entry *pfoe;
499 PF_STATE_LOCK_ASSERT(*state);
501 (*state)->src_node->conn++;
502 (*state)->src.tcp_est = 1;
503 pf_add_threshold(&(*state)->src_node->conn_rate);
505 if ((*state)->rule.ptr->max_src_conn &&
506 (*state)->rule.ptr->max_src_conn <
507 (*state)->src_node->conn) {
508 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1);
512 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
513 pf_check_threshold(&(*state)->src_node->conn_rate)) {
514 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1);
521 /* Kill this state. */
522 (*state)->timeout = PFTM_PURGE;
523 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
525 if ((*state)->rule.ptr->overload_tbl == NULL)
528 /* Schedule overloading and flushing task. */
529 pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
531 return (1); /* too bad :( */
533 bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
534 pfoe->af = (*state)->key[PF_SK_WIRE]->af;
535 pfoe->rule = (*state)->rule.ptr;
536 pfoe->dir = (*state)->direction;
538 SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
539 PF_OVERLOADQ_UNLOCK();
540 taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
546 pf_overload_task(void *v, int pending)
548 struct pf_overload_head queue;
550 struct pf_overload_entry *pfoe, *pfoe1;
553 CURVNET_SET((struct vnet *)v);
556 queue = V_pf_overloadqueue;
557 SLIST_INIT(&V_pf_overloadqueue);
558 PF_OVERLOADQ_UNLOCK();
560 bzero(&p, sizeof(p));
561 SLIST_FOREACH(pfoe, &queue, next) {
562 counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1);
563 if (V_pf_status.debug >= PF_DEBUG_MISC) {
564 printf("%s: blocking address ", __func__);
565 pf_print_host(&pfoe->addr, 0, pfoe->af);
569 p.pfra_af = pfoe->af;
574 p.pfra_ip4addr = pfoe->addr.v4;
580 p.pfra_ip6addr = pfoe->addr.v6;
586 pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
591 * Remove those entries, that don't need flushing.
593 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
594 if (pfoe->rule->flush == 0) {
595 SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
596 free(pfoe, M_PFTEMP);
599 V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1);
601 /* If nothing to flush, return. */
602 if (SLIST_EMPTY(&queue)) {
607 for (int i = 0; i <= pf_hashmask; i++) {
608 struct pf_idhash *ih = &V_pf_idhash[i];
609 struct pf_state_key *sk;
613 LIST_FOREACH(s, &ih->states, entry) {
614 sk = s->key[PF_SK_WIRE];
615 SLIST_FOREACH(pfoe, &queue, next)
616 if (sk->af == pfoe->af &&
617 ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
618 pfoe->rule == s->rule.ptr) &&
619 ((pfoe->dir == PF_OUT &&
620 PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
621 (pfoe->dir == PF_IN &&
622 PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
623 s->timeout = PFTM_PURGE;
624 s->src.state = s->dst.state = TCPS_CLOSED;
628 PF_HASHROW_UNLOCK(ih);
630 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
631 free(pfoe, M_PFTEMP);
632 if (V_pf_status.debug >= PF_DEBUG_MISC)
633 printf("%s: %u states killed", __func__, killed);
639 * Can return locked on failure, so that we can consistently
640 * allocate and insert a new one.
643 pf_find_src_node(struct pf_addr *src, struct pf_rule *rule, sa_family_t af,
646 struct pf_srchash *sh;
647 struct pf_src_node *n;
649 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1);
651 sh = &V_pf_srchash[pf_hashsrc(src, af)];
653 LIST_FOREACH(n, &sh->nodes, entry)
654 if (n->rule.ptr == rule && n->af == af &&
655 ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
656 (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
660 PF_HASHROW_UNLOCK(sh);
661 } else if (returnlocked == 0)
662 PF_HASHROW_UNLOCK(sh);
668 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
669 struct pf_addr *src, sa_family_t af)
672 KASSERT((rule->rule_flag & PFRULE_RULESRCTRACK ||
673 rule->rpool.opts & PF_POOL_STICKYADDR),
674 ("%s for non-tracking rule %p", __func__, rule));
677 *sn = pf_find_src_node(src, rule, af, 1);
680 struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)];
682 PF_HASHROW_ASSERT(sh);
684 if (!rule->max_src_nodes ||
685 counter_u64_fetch(rule->src_nodes) < rule->max_src_nodes)
686 (*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
688 counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES],
691 PF_HASHROW_UNLOCK(sh);
695 pf_init_threshold(&(*sn)->conn_rate,
696 rule->max_src_conn_rate.limit,
697 rule->max_src_conn_rate.seconds);
700 (*sn)->rule.ptr = rule;
701 PF_ACPY(&(*sn)->addr, src, af);
702 LIST_INSERT_HEAD(&sh->nodes, *sn, entry);
703 (*sn)->creation = time_uptime;
704 (*sn)->ruletype = rule->action;
706 if ((*sn)->rule.ptr != NULL)
707 counter_u64_add((*sn)->rule.ptr->src_nodes, 1);
708 PF_HASHROW_UNLOCK(sh);
709 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1);
711 if (rule->max_src_states &&
712 (*sn)->states >= rule->max_src_states) {
713 counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES],
722 pf_unlink_src_node(struct pf_src_node *src)
725 PF_HASHROW_ASSERT(&V_pf_srchash[pf_hashsrc(&src->addr, src->af)]);
726 LIST_REMOVE(src, entry);
728 counter_u64_add(src->rule.ptr->src_nodes, -1);
732 pf_free_src_nodes(struct pf_src_node_list *head)
734 struct pf_src_node *sn, *tmp;
737 LIST_FOREACH_SAFE(sn, head, entry, tmp) {
738 uma_zfree(V_pf_sources_z, sn);
742 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], count);
751 pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
752 sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL,
756 /* Per-vnet data storage structures initialization. */
760 struct pf_keyhash *kh;
761 struct pf_idhash *ih;
762 struct pf_srchash *sh;
765 TUNABLE_ULONG_FETCH("net.pf.states_hashsize", &pf_hashsize);
766 if (pf_hashsize == 0 || !powerof2(pf_hashsize))
767 pf_hashsize = PF_HASHSIZ;
768 TUNABLE_ULONG_FETCH("net.pf.source_nodes_hashsize", &pf_srchashsize);
769 if (pf_srchashsize == 0 || !powerof2(pf_srchashsize))
770 pf_srchashsize = PF_HASHSIZ / 4;
772 V_pf_hashseed = arc4random();
774 /* States and state keys storage. */
775 V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_state),
776 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
777 V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
778 uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
779 uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
781 V_pf_state_key_z = uma_zcreate("pf state keys",
782 sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
784 V_pf_keyhash = malloc(pf_hashsize * sizeof(struct pf_keyhash),
785 M_PFHASH, M_WAITOK | M_ZERO);
786 V_pf_idhash = malloc(pf_hashsize * sizeof(struct pf_idhash),
787 M_PFHASH, M_WAITOK | M_ZERO);
788 pf_hashmask = pf_hashsize - 1;
789 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
791 mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
792 mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
796 V_pf_sources_z = uma_zcreate("pf source nodes",
797 sizeof(struct pf_src_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
799 V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
800 uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
801 uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
802 V_pf_srchash = malloc(pf_srchashsize * sizeof(struct pf_srchash),
803 M_PFHASH, M_WAITOK|M_ZERO);
804 pf_srchashmask = pf_srchashsize - 1;
805 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++)
806 mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
809 TAILQ_INIT(&V_pf_altqs[0]);
810 TAILQ_INIT(&V_pf_altqs[1]);
811 TAILQ_INIT(&V_pf_pabuf);
812 V_pf_altqs_active = &V_pf_altqs[0];
813 V_pf_altqs_inactive = &V_pf_altqs[1];
816 /* Send & overload+flush queues. */
817 STAILQ_INIT(&V_pf_sendqueue);
818 SLIST_INIT(&V_pf_overloadqueue);
819 TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
820 mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF);
821 mtx_init(&pf_overloadqueue_mtx, "pf overload/flush queue", NULL,
824 /* Unlinked, but may be referenced rules. */
825 TAILQ_INIT(&V_pf_unlinked_rules);
826 mtx_init(&pf_unlnkdrules_mtx, "pf unlinked rules", NULL, MTX_DEF);
833 uma_zdestroy(pf_mtag_z);
839 struct pf_keyhash *kh;
840 struct pf_idhash *ih;
841 struct pf_srchash *sh;
842 struct pf_send_entry *pfse, *next;
845 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
847 KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
849 KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
851 mtx_destroy(&kh->lock);
852 mtx_destroy(&ih->lock);
854 free(V_pf_keyhash, M_PFHASH);
855 free(V_pf_idhash, M_PFHASH);
857 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
858 KASSERT(LIST_EMPTY(&sh->nodes),
859 ("%s: source node hash not empty", __func__));
860 mtx_destroy(&sh->lock);
862 free(V_pf_srchash, M_PFHASH);
864 STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
865 m_freem(pfse->pfse_m);
866 free(pfse, M_PFTEMP);
869 mtx_destroy(&pf_sendqueue_mtx);
870 mtx_destroy(&pf_overloadqueue_mtx);
871 mtx_destroy(&pf_unlnkdrules_mtx);
873 uma_zdestroy(V_pf_sources_z);
874 uma_zdestroy(V_pf_state_z);
875 uma_zdestroy(V_pf_state_key_z);
879 pf_mtag_uminit(void *mem, int size, int how)
883 t = (struct m_tag *)mem;
884 t->m_tag_cookie = MTAG_ABI_COMPAT;
885 t->m_tag_id = PACKET_TAG_PF;
886 t->m_tag_len = sizeof(struct pf_mtag);
887 t->m_tag_free = pf_mtag_free;
893 pf_mtag_free(struct m_tag *t)
896 uma_zfree(pf_mtag_z, t);
900 pf_get_mtag(struct mbuf *m)
904 if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
905 return ((struct pf_mtag *)(mtag + 1));
907 mtag = uma_zalloc(pf_mtag_z, M_NOWAIT);
910 bzero(mtag + 1, sizeof(struct pf_mtag));
911 m_tag_prepend(m, mtag);
913 return ((struct pf_mtag *)(mtag + 1));
917 pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
920 struct pf_keyhash *khs, *khw, *kh;
921 struct pf_state_key *sk, *cur;
922 struct pf_state *si, *olds = NULL;
925 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
926 KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
927 KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
930 * We need to lock hash slots of both keys. To avoid deadlock
931 * we always lock the slot with lower address first. Unlock order
934 * We also need to lock ID hash slot before dropping key
935 * locks. On success we return with ID hash slot locked.
939 khs = khw = &V_pf_keyhash[pf_hashkey(skw)];
940 PF_HASHROW_LOCK(khs);
942 khs = &V_pf_keyhash[pf_hashkey(sks)];
943 khw = &V_pf_keyhash[pf_hashkey(skw)];
945 PF_HASHROW_LOCK(khs);
946 } else if (khs < khw) {
947 PF_HASHROW_LOCK(khs);
948 PF_HASHROW_LOCK(khw);
950 PF_HASHROW_LOCK(khw);
951 PF_HASHROW_LOCK(khs);
955 #define KEYS_UNLOCK() do { \
957 PF_HASHROW_UNLOCK(khs); \
958 PF_HASHROW_UNLOCK(khw); \
960 PF_HASHROW_UNLOCK(khs); \
964 * First run: start with wire key.
971 LIST_FOREACH(cur, &kh->keys, entry)
972 if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
976 /* Key exists. Check for same kif, if none, add to key. */
977 TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
978 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
981 if (si->kif == s->kif &&
982 si->direction == s->direction) {
983 if (sk->proto == IPPROTO_TCP &&
984 si->src.state >= TCPS_FIN_WAIT_2 &&
985 si->dst.state >= TCPS_FIN_WAIT_2) {
987 * New state matches an old >FIN_WAIT_2
988 * state. We can't drop key hash locks,
989 * thus we can't unlink it properly.
991 * As a workaround we drop it into
992 * TCPS_CLOSED state, schedule purge
993 * ASAP and push it into the very end
994 * of the slot TAILQ, so that it won't
995 * conflict with our new state.
997 si->src.state = si->dst.state =
999 si->timeout = PFTM_PURGE;
1002 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1003 printf("pf: %s key attach "
1005 (idx == PF_SK_WIRE) ?
1008 pf_print_state_parts(s,
1009 (idx == PF_SK_WIRE) ?
1011 (idx == PF_SK_STACK) ?
1013 printf(", existing: ");
1014 pf_print_state_parts(si,
1015 (idx == PF_SK_WIRE) ?
1017 (idx == PF_SK_STACK) ?
1021 PF_HASHROW_UNLOCK(ih);
1023 uma_zfree(V_pf_state_key_z, sk);
1024 if (idx == PF_SK_STACK)
1026 return (EEXIST); /* collision! */
1029 PF_HASHROW_UNLOCK(ih);
1031 uma_zfree(V_pf_state_key_z, sk);
1034 LIST_INSERT_HEAD(&kh->keys, sk, entry);
1039 /* List is sorted, if-bound states before floating. */
1040 if (s->kif == V_pfi_all)
1041 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
1043 TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
1046 TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]);
1047 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds,
1053 * Attach done. See how should we (or should not?)
1054 * attach a second key.
1057 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1061 } else if (sks != NULL) {
1063 * Continue attaching with stack key.
1075 KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
1076 ("%s failure", __func__));
1083 pf_detach_state(struct pf_state *s)
1085 struct pf_state_key *sks = s->key[PF_SK_STACK];
1086 struct pf_keyhash *kh;
1089 kh = &V_pf_keyhash[pf_hashkey(sks)];
1090 PF_HASHROW_LOCK(kh);
1091 if (s->key[PF_SK_STACK] != NULL)
1092 pf_state_key_detach(s, PF_SK_STACK);
1094 * If both point to same key, then we are done.
1096 if (sks == s->key[PF_SK_WIRE]) {
1097 pf_state_key_detach(s, PF_SK_WIRE);
1098 PF_HASHROW_UNLOCK(kh);
1101 PF_HASHROW_UNLOCK(kh);
1104 if (s->key[PF_SK_WIRE] != NULL) {
1105 kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
1106 PF_HASHROW_LOCK(kh);
1107 if (s->key[PF_SK_WIRE] != NULL)
1108 pf_state_key_detach(s, PF_SK_WIRE);
1109 PF_HASHROW_UNLOCK(kh);
1114 pf_state_key_detach(struct pf_state *s, int idx)
1116 struct pf_state_key *sk = s->key[idx];
1118 struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1120 PF_HASHROW_ASSERT(kh);
1122 TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1125 if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1126 LIST_REMOVE(sk, entry);
1127 uma_zfree(V_pf_state_key_z, sk);
1132 pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1134 struct pf_state_key *sk = mem;
1136 bzero(sk, sizeof(struct pf_state_key_cmp));
1137 TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1138 TAILQ_INIT(&sk->states[PF_SK_STACK]);
1143 struct pf_state_key *
1144 pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
1145 struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
1147 struct pf_state_key *sk;
1149 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1153 PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
1154 PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
1155 sk->port[pd->sidx] = sport;
1156 sk->port[pd->didx] = dport;
1157 sk->proto = pd->proto;
1163 struct pf_state_key *
1164 pf_state_key_clone(struct pf_state_key *orig)
1166 struct pf_state_key *sk;
1168 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1172 bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1178 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw,
1179 struct pf_state_key *sks, struct pf_state *s)
1181 struct pf_idhash *ih;
1182 struct pf_state *cur;
1185 KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1186 ("%s: sks not pristine", __func__));
1187 KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1188 ("%s: skw not pristine", __func__));
1189 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1193 if (s->id == 0 && s->creatorid == 0) {
1194 /* XXX: should be atomic, but probability of collision low */
1195 if ((s->id = V_pf_stateid[curcpu]++) == PFID_MAXID)
1196 V_pf_stateid[curcpu] = 1;
1197 s->id |= (uint64_t )curcpu << PFID_CPUSHIFT;
1198 s->id = htobe64(s->id);
1199 s->creatorid = V_pf_status.hostid;
1202 /* Returns with ID locked on success. */
1203 if ((error = pf_state_key_attach(skw, sks, s)) != 0)
1206 ih = &V_pf_idhash[PF_IDHASH(s)];
1207 PF_HASHROW_ASSERT(ih);
1208 LIST_FOREACH(cur, &ih->states, entry)
1209 if (cur->id == s->id && cur->creatorid == s->creatorid)
1213 PF_HASHROW_UNLOCK(ih);
1214 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1215 printf("pf: state ID collision: "
1216 "id: %016llx creatorid: %08x\n",
1217 (unsigned long long)be64toh(s->id),
1218 ntohl(s->creatorid));
1223 LIST_INSERT_HEAD(&ih->states, s, entry);
1224 /* One for keys, one for ID hash. */
1225 refcount_init(&s->refs, 2);
1227 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_INSERT], 1);
1228 if (pfsync_insert_state_ptr != NULL)
1229 pfsync_insert_state_ptr(s);
1231 /* Returns locked. */
1236 * Find state by ID: returns with locked row on success.
1239 pf_find_state_byid(uint64_t id, uint32_t creatorid)
1241 struct pf_idhash *ih;
1244 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1246 ih = &V_pf_idhash[(be64toh(id) % (pf_hashmask + 1))];
1248 PF_HASHROW_LOCK(ih);
1249 LIST_FOREACH(s, &ih->states, entry)
1250 if (s->id == id && s->creatorid == creatorid)
1254 PF_HASHROW_UNLOCK(ih);
1260 * Find state by key.
1261 * Returns with ID hash slot locked on success.
1263 static struct pf_state *
1264 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir)
1266 struct pf_keyhash *kh;
1267 struct pf_state_key *sk;
1271 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1273 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1275 PF_HASHROW_LOCK(kh);
1276 LIST_FOREACH(sk, &kh->keys, entry)
1277 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1280 PF_HASHROW_UNLOCK(kh);
1284 idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1286 /* List is sorted, if-bound states before floating ones. */
1287 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1288 if (s->kif == V_pfi_all || s->kif == kif) {
1290 PF_HASHROW_UNLOCK(kh);
1291 if (s->timeout >= PFTM_MAX) {
1293 * State is either being processed by
1294 * pf_unlink_state() in an other thread, or
1295 * is scheduled for immediate expiry.
1302 PF_HASHROW_UNLOCK(kh);
1308 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1310 struct pf_keyhash *kh;
1311 struct pf_state_key *sk;
1312 struct pf_state *s, *ret = NULL;
1315 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1317 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1319 PF_HASHROW_LOCK(kh);
1320 LIST_FOREACH(sk, &kh->keys, entry)
1321 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1324 PF_HASHROW_UNLOCK(kh);
1339 panic("%s: dir %u", __func__, dir);
1342 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1344 PF_HASHROW_UNLOCK(kh);
1358 PF_HASHROW_UNLOCK(kh);
1363 /* END state table stuff */
1366 pf_send(struct pf_send_entry *pfse)
1370 STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
1372 swi_sched(V_pf_swi_cookie, 0);
1378 struct pf_send_head queue;
1379 struct pf_send_entry *pfse, *next;
1381 CURVNET_SET((struct vnet *)v);
1384 queue = V_pf_sendqueue;
1385 STAILQ_INIT(&V_pf_sendqueue);
1388 STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
1389 switch (pfse->pfse_type) {
1392 ip_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL);
1395 icmp_error(pfse->pfse_m, pfse->pfse_icmp_type,
1396 pfse->pfse_icmp_code, 0, pfse->pfse_icmp_mtu);
1401 ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL,
1405 icmp6_error(pfse->pfse_m, pfse->pfse_icmp_type,
1406 pfse->pfse_icmp_code, pfse->pfse_icmp_mtu);
1410 panic("%s: unknown type", __func__);
1412 free(pfse, M_PFTEMP);
1418 pf_purge_thread(void *v)
1422 CURVNET_SET((struct vnet *)v);
1426 rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftm", hz / 10);
1428 if (V_pf_end_threads) {
1430 * To cleanse up all kifs and rules we need
1431 * two runs: first one clears reference flags,
1432 * then pf_purge_expired_states() doesn't
1433 * raise them, and then second run frees.
1436 pf_purge_unlinked_rules();
1440 * Now purge everything.
1442 pf_purge_expired_states(0, pf_hashmask);
1443 pf_purge_expired_fragments();
1444 pf_purge_expired_src_nodes();
1447 * Now all kifs & rules should be unreferenced,
1448 * thus should be successfully freed.
1450 pf_purge_unlinked_rules();
1454 * Announce success and exit.
1459 wakeup(pf_purge_thread);
1464 /* Process 1/interval fraction of the state table every run. */
1465 idx = pf_purge_expired_states(idx, pf_hashmask /
1466 (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
1468 /* Purge other expired types every PFTM_INTERVAL seconds. */
1471 * Order is important:
1472 * - states and src nodes reference rules
1473 * - states and rules reference kifs
1475 pf_purge_expired_fragments();
1476 pf_purge_expired_src_nodes();
1477 pf_purge_unlinked_rules();
1486 pf_state_expires(const struct pf_state *state)
1493 /* handle all PFTM_* > PFTM_MAX here */
1494 if (state->timeout == PFTM_PURGE)
1495 return (time_uptime);
1496 KASSERT(state->timeout != PFTM_UNLINKED,
1497 ("pf_state_expires: timeout == PFTM_UNLINKED"));
1498 KASSERT((state->timeout < PFTM_MAX),
1499 ("pf_state_expires: timeout > PFTM_MAX"));
1500 timeout = state->rule.ptr->timeout[state->timeout];
1502 timeout = V_pf_default_rule.timeout[state->timeout];
1503 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1505 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1506 states = counter_u64_fetch(state->rule.ptr->states_cur);
1508 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1509 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1510 states = V_pf_status.states;
1512 if (end && states > start && start < end) {
1514 return (state->expire + timeout * (end - states) /
1517 return (time_uptime);
1519 return (state->expire + timeout);
1523 pf_purge_expired_src_nodes()
1525 struct pf_src_node_list freelist;
1526 struct pf_srchash *sh;
1527 struct pf_src_node *cur, *next;
1530 LIST_INIT(&freelist);
1531 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
1532 PF_HASHROW_LOCK(sh);
1533 LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
1534 if (cur->states == 0 && cur->expire <= time_uptime) {
1535 pf_unlink_src_node(cur);
1536 LIST_INSERT_HEAD(&freelist, cur, entry);
1537 } else if (cur->rule.ptr != NULL)
1538 cur->rule.ptr->rule_flag |= PFRULE_REFS;
1539 PF_HASHROW_UNLOCK(sh);
1542 pf_free_src_nodes(&freelist);
1544 V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z);
1548 pf_src_tree_remove_state(struct pf_state *s)
1550 struct pf_src_node *sn;
1551 struct pf_srchash *sh;
1554 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE] ?
1555 s->rule.ptr->timeout[PFTM_SRC_NODE] :
1556 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1558 if (s->src_node != NULL) {
1560 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1561 PF_HASHROW_LOCK(sh);
1564 if (--sn->states == 0)
1565 sn->expire = time_uptime + timeout;
1566 PF_HASHROW_UNLOCK(sh);
1568 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1569 sn = s->nat_src_node;
1570 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1571 PF_HASHROW_LOCK(sh);
1572 if (--sn->states == 0)
1573 sn->expire = time_uptime + timeout;
1574 PF_HASHROW_UNLOCK(sh);
1576 s->src_node = s->nat_src_node = NULL;
1580 * Unlink and potentilly free a state. Function may be
1581 * called with ID hash row locked, but always returns
1582 * unlocked, since it needs to go through key hash locking.
1585 pf_unlink_state(struct pf_state *s, u_int flags)
1587 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
1589 if ((flags & PF_ENTER_LOCKED) == 0)
1590 PF_HASHROW_LOCK(ih);
1592 PF_HASHROW_ASSERT(ih);
1594 if (s->timeout == PFTM_UNLINKED) {
1596 * State is being processed
1597 * by pf_unlink_state() in
1600 PF_HASHROW_UNLOCK(ih);
1601 return (0); /* XXXGL: undefined actually */
1604 if (s->src.state == PF_TCPS_PROXY_DST) {
1605 /* XXX wire key the right one? */
1606 pf_send_tcp(NULL, s->rule.ptr, s->key[PF_SK_WIRE]->af,
1607 &s->key[PF_SK_WIRE]->addr[1],
1608 &s->key[PF_SK_WIRE]->addr[0],
1609 s->key[PF_SK_WIRE]->port[1],
1610 s->key[PF_SK_WIRE]->port[0],
1611 s->src.seqhi, s->src.seqlo + 1,
1612 TH_RST|TH_ACK, 0, 0, 0, 1, s->tag, NULL);
1615 LIST_REMOVE(s, entry);
1616 pf_src_tree_remove_state(s);
1618 if (pfsync_delete_state_ptr != NULL)
1619 pfsync_delete_state_ptr(s);
1621 STATE_DEC_COUNTERS(s);
1623 s->timeout = PFTM_UNLINKED;
1625 PF_HASHROW_UNLOCK(ih);
1628 refcount_release(&s->refs);
1630 return (pf_release_state(s));
1634 pf_free_state(struct pf_state *cur)
1637 KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
1638 KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
1641 pf_normalize_tcp_cleanup(cur);
1642 uma_zfree(V_pf_state_z, cur);
1643 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
1647 * Called only from pf_purge_thread(), thus serialized.
1650 pf_purge_expired_states(u_int i, int maxcheck)
1652 struct pf_idhash *ih;
1655 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1658 * Go through hash and unlink states that expire now.
1660 while (maxcheck > 0) {
1662 ih = &V_pf_idhash[i];
1664 PF_HASHROW_LOCK(ih);
1665 LIST_FOREACH(s, &ih->states, entry) {
1666 if (pf_state_expires(s) <= time_uptime) {
1667 V_pf_status.states -=
1668 pf_unlink_state(s, PF_ENTER_LOCKED);
1671 s->rule.ptr->rule_flag |= PFRULE_REFS;
1672 if (s->nat_rule.ptr != NULL)
1673 s->nat_rule.ptr->rule_flag |= PFRULE_REFS;
1674 if (s->anchor.ptr != NULL)
1675 s->anchor.ptr->rule_flag |= PFRULE_REFS;
1676 s->kif->pfik_flags |= PFI_IFLAG_REFS;
1678 s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
1680 PF_HASHROW_UNLOCK(ih);
1682 /* Return when we hit end of hash. */
1683 if (++i > pf_hashmask) {
1684 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1691 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1697 pf_purge_unlinked_rules()
1699 struct pf_rulequeue tmpq;
1700 struct pf_rule *r, *r1;
1703 * If we have overloading task pending, then we'd
1704 * better skip purging this time. There is a tiny
1705 * probability that overloading task references
1706 * an already unlinked rule.
1708 PF_OVERLOADQ_LOCK();
1709 if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
1710 PF_OVERLOADQ_UNLOCK();
1713 PF_OVERLOADQ_UNLOCK();
1716 * Do naive mark-and-sweep garbage collecting of old rules.
1717 * Reference flag is raised by pf_purge_expired_states()
1718 * and pf_purge_expired_src_nodes().
1720 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
1721 * use a temporary queue.
1724 PF_UNLNKDRULES_LOCK();
1725 TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
1726 if (!(r->rule_flag & PFRULE_REFS)) {
1727 TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
1728 TAILQ_INSERT_TAIL(&tmpq, r, entries);
1730 r->rule_flag &= ~PFRULE_REFS;
1732 PF_UNLNKDRULES_UNLOCK();
1734 if (!TAILQ_EMPTY(&tmpq)) {
1736 TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
1737 TAILQ_REMOVE(&tmpq, r, entries);
1745 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1750 u_int32_t a = ntohl(addr->addr32[0]);
1751 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1763 u_int8_t i, curstart, curend, maxstart, maxend;
1764 curstart = curend = maxstart = maxend = 255;
1765 for (i = 0; i < 8; i++) {
1766 if (!addr->addr16[i]) {
1767 if (curstart == 255)
1771 if ((curend - curstart) >
1772 (maxend - maxstart)) {
1773 maxstart = curstart;
1776 curstart = curend = 255;
1779 if ((curend - curstart) >
1780 (maxend - maxstart)) {
1781 maxstart = curstart;
1784 for (i = 0; i < 8; i++) {
1785 if (i >= maxstart && i <= maxend) {
1791 b = ntohs(addr->addr16[i]);
1808 pf_print_state(struct pf_state *s)
1810 pf_print_state_parts(s, NULL, NULL);
1814 pf_print_state_parts(struct pf_state *s,
1815 struct pf_state_key *skwp, struct pf_state_key *sksp)
1817 struct pf_state_key *skw, *sks;
1818 u_int8_t proto, dir;
1820 /* Do our best to fill these, but they're skipped if NULL */
1821 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1822 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1823 proto = skw ? skw->proto : (sks ? sks->proto : 0);
1824 dir = s ? s->direction : 0;
1842 case IPPROTO_ICMPV6:
1846 printf("%u", skw->proto);
1859 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1861 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1866 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1868 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1873 if (proto == IPPROTO_TCP) {
1874 printf(" [lo=%u high=%u win=%u modulator=%u",
1875 s->src.seqlo, s->src.seqhi,
1876 s->src.max_win, s->src.seqdiff);
1877 if (s->src.wscale && s->dst.wscale)
1878 printf(" wscale=%u",
1879 s->src.wscale & PF_WSCALE_MASK);
1881 printf(" [lo=%u high=%u win=%u modulator=%u",
1882 s->dst.seqlo, s->dst.seqhi,
1883 s->dst.max_win, s->dst.seqdiff);
1884 if (s->src.wscale && s->dst.wscale)
1885 printf(" wscale=%u",
1886 s->dst.wscale & PF_WSCALE_MASK);
1889 printf(" %u:%u", s->src.state, s->dst.state);
1894 pf_print_flags(u_int8_t f)
1916 #define PF_SET_SKIP_STEPS(i) \
1918 while (head[i] != cur) { \
1919 head[i]->skip[i].ptr = cur; \
1920 head[i] = TAILQ_NEXT(head[i], entries); \
1925 pf_calc_skip_steps(struct pf_rulequeue *rules)
1927 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1930 cur = TAILQ_FIRST(rules);
1932 for (i = 0; i < PF_SKIP_COUNT; ++i)
1934 while (cur != NULL) {
1936 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1937 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1938 if (cur->direction != prev->direction)
1939 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1940 if (cur->af != prev->af)
1941 PF_SET_SKIP_STEPS(PF_SKIP_AF);
1942 if (cur->proto != prev->proto)
1943 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1944 if (cur->src.neg != prev->src.neg ||
1945 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1946 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1947 if (cur->src.port[0] != prev->src.port[0] ||
1948 cur->src.port[1] != prev->src.port[1] ||
1949 cur->src.port_op != prev->src.port_op)
1950 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1951 if (cur->dst.neg != prev->dst.neg ||
1952 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1953 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1954 if (cur->dst.port[0] != prev->dst.port[0] ||
1955 cur->dst.port[1] != prev->dst.port[1] ||
1956 cur->dst.port_op != prev->dst.port_op)
1957 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1960 cur = TAILQ_NEXT(cur, entries);
1962 for (i = 0; i < PF_SKIP_COUNT; ++i)
1963 PF_SET_SKIP_STEPS(i);
1967 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
1969 if (aw1->type != aw2->type)
1971 switch (aw1->type) {
1972 case PF_ADDR_ADDRMASK:
1974 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
1976 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
1979 case PF_ADDR_DYNIFTL:
1980 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
1981 case PF_ADDR_NOROUTE:
1982 case PF_ADDR_URPFFAILED:
1985 return (aw1->p.tbl != aw2->p.tbl);
1987 printf("invalid address type: %d\n", aw1->type);
1993 * Checksum updates are a little complicated because the checksum in the TCP/UDP
1994 * header isn't always a full checksum. In some cases (i.e. output) it's a
1995 * pseudo-header checksum, which is a partial checksum over src/dst IP
1996 * addresses, protocol number and length.
1998 * That means we have the following cases:
1999 * * Input or forwarding: we don't have TSO, the checksum fields are full
2000 * checksums, we need to update the checksum whenever we change anything.
2001 * * Output (i.e. the checksum is a pseudo-header checksum):
2002 * x The field being updated is src/dst address or affects the length of
2003 * the packet. We need to update the pseudo-header checksum (note that this
2004 * checksum is not ones' complement).
2005 * x Some other field is being modified (e.g. src/dst port numbers): We
2006 * don't have to update anything.
2009 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2015 l = cksum + old - new;
2016 l = (l >> 16) + (l & 65535);
2024 pf_proto_cksum_fixup(struct mbuf *m, u_int16_t cksum, u_int16_t old,
2025 u_int16_t new, u_int8_t udp)
2027 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2030 return (pf_cksum_fixup(cksum, old, new, udp));
2034 pf_change_ap(struct mbuf *m, struct pf_addr *a, u_int16_t *p, u_int16_t *ic,
2035 u_int16_t *pc, struct pf_addr *an, u_int16_t pn, u_int8_t u,
2041 PF_ACPY(&ao, a, af);
2044 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2052 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2053 ao.addr16[0], an->addr16[0], 0),
2054 ao.addr16[1], an->addr16[1], 0);
2057 *pc = pf_cksum_fixup(pf_cksum_fixup(*pc,
2058 ao.addr16[0], an->addr16[0], u),
2059 ao.addr16[1], an->addr16[1], u);
2061 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2066 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2067 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2068 pf_cksum_fixup(pf_cksum_fixup(*pc,
2069 ao.addr16[0], an->addr16[0], u),
2070 ao.addr16[1], an->addr16[1], u),
2071 ao.addr16[2], an->addr16[2], u),
2072 ao.addr16[3], an->addr16[3], u),
2073 ao.addr16[4], an->addr16[4], u),
2074 ao.addr16[5], an->addr16[5], u),
2075 ao.addr16[6], an->addr16[6], u),
2076 ao.addr16[7], an->addr16[7], u);
2078 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2083 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA |
2084 CSUM_DELAY_DATA_IPV6)) {
2091 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2093 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2097 memcpy(&ao, a, sizeof(ao));
2098 memcpy(a, &an, sizeof(u_int32_t));
2099 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2100 ao % 65536, an % 65536, u);
2104 pf_change_proto_a(struct mbuf *m, void *a, u_int16_t *c, u_int32_t an, u_int8_t udp)
2108 memcpy(&ao, a, sizeof(ao));
2109 memcpy(a, &an, sizeof(u_int32_t));
2111 *c = pf_proto_cksum_fixup(m,
2112 pf_proto_cksum_fixup(m, *c, ao / 65536, an / 65536, udp),
2113 ao % 65536, an % 65536, udp);
2118 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2122 PF_ACPY(&ao, a, AF_INET6);
2123 PF_ACPY(a, an, AF_INET6);
2125 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2126 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2127 pf_cksum_fixup(pf_cksum_fixup(*c,
2128 ao.addr16[0], an->addr16[0], u),
2129 ao.addr16[1], an->addr16[1], u),
2130 ao.addr16[2], an->addr16[2], u),
2131 ao.addr16[3], an->addr16[3], u),
2132 ao.addr16[4], an->addr16[4], u),
2133 ao.addr16[5], an->addr16[5], u),
2134 ao.addr16[6], an->addr16[6], u),
2135 ao.addr16[7], an->addr16[7], u);
2140 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2141 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2142 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2144 struct pf_addr oia, ooa;
2146 PF_ACPY(&oia, ia, af);
2148 PF_ACPY(&ooa, oa, af);
2150 /* Change inner protocol port, fix inner protocol checksum. */
2152 u_int16_t oip = *ip;
2159 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2160 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2162 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2164 /* Change inner ip address, fix inner ip and icmp checksums. */
2165 PF_ACPY(ia, na, af);
2169 u_int32_t oh2c = *h2c;
2171 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2172 oia.addr16[0], ia->addr16[0], 0),
2173 oia.addr16[1], ia->addr16[1], 0);
2174 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2175 oia.addr16[0], ia->addr16[0], 0),
2176 oia.addr16[1], ia->addr16[1], 0);
2177 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2183 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2184 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2185 pf_cksum_fixup(pf_cksum_fixup(*ic,
2186 oia.addr16[0], ia->addr16[0], u),
2187 oia.addr16[1], ia->addr16[1], u),
2188 oia.addr16[2], ia->addr16[2], u),
2189 oia.addr16[3], ia->addr16[3], u),
2190 oia.addr16[4], ia->addr16[4], u),
2191 oia.addr16[5], ia->addr16[5], u),
2192 oia.addr16[6], ia->addr16[6], u),
2193 oia.addr16[7], ia->addr16[7], u);
2197 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2199 PF_ACPY(oa, na, af);
2203 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2204 ooa.addr16[0], oa->addr16[0], 0),
2205 ooa.addr16[1], oa->addr16[1], 0);
2210 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2211 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2212 pf_cksum_fixup(pf_cksum_fixup(*ic,
2213 ooa.addr16[0], oa->addr16[0], u),
2214 ooa.addr16[1], oa->addr16[1], u),
2215 ooa.addr16[2], oa->addr16[2], u),
2216 ooa.addr16[3], oa->addr16[3], u),
2217 ooa.addr16[4], oa->addr16[4], u),
2218 ooa.addr16[5], oa->addr16[5], u),
2219 ooa.addr16[6], oa->addr16[6], u),
2220 ooa.addr16[7], oa->addr16[7], u);
2229 * Need to modulate the sequence numbers in the TCP SACK option
2230 * (credits to Krzysztof Pfaff for report and patch)
2233 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2234 struct tcphdr *th, struct pf_state_peer *dst)
2236 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2237 u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2238 int copyback = 0, i, olen;
2239 struct sackblk sack;
2241 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2242 if (hlen < TCPOLEN_SACKLEN ||
2243 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2246 while (hlen >= TCPOLEN_SACKLEN) {
2249 case TCPOPT_EOL: /* FALLTHROUGH */
2257 if (olen >= TCPOLEN_SACKLEN) {
2258 for (i = 2; i + TCPOLEN_SACK <= olen;
2259 i += TCPOLEN_SACK) {
2260 memcpy(&sack, &opt[i], sizeof(sack));
2261 pf_change_proto_a(m, &sack.start, &th->th_sum,
2262 htonl(ntohl(sack.start) - dst->seqdiff), 0);
2263 pf_change_proto_a(m, &sack.end, &th->th_sum,
2264 htonl(ntohl(sack.end) - dst->seqdiff), 0);
2265 memcpy(&opt[i], &sack, sizeof(sack));
2279 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2284 pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af,
2285 const struct pf_addr *saddr, const struct pf_addr *daddr,
2286 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2287 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2288 u_int16_t rtag, struct ifnet *ifp)
2290 struct pf_send_entry *pfse;
2294 struct ip *h = NULL;
2297 struct ip6_hdr *h6 = NULL;
2301 struct pf_mtag *pf_mtag;
2306 /* maximum segment size tcp option */
2307 tlen = sizeof(struct tcphdr);
2314 len = sizeof(struct ip) + tlen;
2319 len = sizeof(struct ip6_hdr) + tlen;
2323 panic("%s: unsupported af %d", __func__, af);
2326 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2327 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2330 m = m_gethdr(M_NOWAIT, MT_DATA);
2332 free(pfse, M_PFTEMP);
2336 mac_netinet_firewall_send(m);
2338 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2339 free(pfse, M_PFTEMP);
2344 m->m_flags |= M_SKIP_FIREWALL;
2345 pf_mtag->tag = rtag;
2347 if (r != NULL && r->rtableid >= 0)
2348 M_SETFIB(m, r->rtableid);
2351 if (r != NULL && r->qid) {
2352 pf_mtag->qid = r->qid;
2354 /* add hints for ecn */
2355 pf_mtag->hdr = mtod(m, struct ip *);
2358 m->m_data += max_linkhdr;
2359 m->m_pkthdr.len = m->m_len = len;
2360 m->m_pkthdr.rcvif = NULL;
2361 bzero(m->m_data, len);
2365 h = mtod(m, struct ip *);
2367 /* IP header fields included in the TCP checksum */
2368 h->ip_p = IPPROTO_TCP;
2369 h->ip_len = htons(tlen);
2370 h->ip_src.s_addr = saddr->v4.s_addr;
2371 h->ip_dst.s_addr = daddr->v4.s_addr;
2373 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2378 h6 = mtod(m, struct ip6_hdr *);
2380 /* IP header fields included in the TCP checksum */
2381 h6->ip6_nxt = IPPROTO_TCP;
2382 h6->ip6_plen = htons(tlen);
2383 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2384 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2386 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2392 th->th_sport = sport;
2393 th->th_dport = dport;
2394 th->th_seq = htonl(seq);
2395 th->th_ack = htonl(ack);
2396 th->th_off = tlen >> 2;
2397 th->th_flags = flags;
2398 th->th_win = htons(win);
2401 opt = (char *)(th + 1);
2402 opt[0] = TCPOPT_MAXSEG;
2405 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2412 th->th_sum = in_cksum(m, len);
2414 /* Finish the IP header */
2416 h->ip_hl = sizeof(*h) >> 2;
2417 h->ip_tos = IPTOS_LOWDELAY;
2418 h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
2419 h->ip_len = htons(len);
2420 h->ip_ttl = ttl ? ttl : V_ip_defttl;
2423 pfse->pfse_type = PFSE_IP;
2429 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2430 sizeof(struct ip6_hdr), tlen);
2432 h6->ip6_vfc |= IPV6_VERSION;
2433 h6->ip6_hlim = IPV6_DEFHLIM;
2435 pfse->pfse_type = PFSE_IP6;
2444 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2447 struct pf_send_entry *pfse;
2449 struct pf_mtag *pf_mtag;
2451 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2452 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2456 if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
2457 free(pfse, M_PFTEMP);
2461 if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
2462 free(pfse, M_PFTEMP);
2466 m0->m_flags |= M_SKIP_FIREWALL;
2468 if (r->rtableid >= 0)
2469 M_SETFIB(m0, r->rtableid);
2473 pf_mtag->qid = r->qid;
2474 /* add hints for ecn */
2475 pf_mtag->hdr = mtod(m0, struct ip *);
2482 pfse->pfse_type = PFSE_ICMP;
2487 pfse->pfse_type = PFSE_ICMP6;
2492 pfse->pfse_icmp_type = type;
2493 pfse->pfse_icmp_code = code;
2498 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2499 * If n is 0, they match if they are equal. If n is != 0, they match if they
2503 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2504 struct pf_addr *b, sa_family_t af)
2511 if ((a->addr32[0] & m->addr32[0]) ==
2512 (b->addr32[0] & m->addr32[0]))
2518 if (((a->addr32[0] & m->addr32[0]) ==
2519 (b->addr32[0] & m->addr32[0])) &&
2520 ((a->addr32[1] & m->addr32[1]) ==
2521 (b->addr32[1] & m->addr32[1])) &&
2522 ((a->addr32[2] & m->addr32[2]) ==
2523 (b->addr32[2] & m->addr32[2])) &&
2524 ((a->addr32[3] & m->addr32[3]) ==
2525 (b->addr32[3] & m->addr32[3])))
2544 * Return 1 if b <= a <= e, otherwise return 0.
2547 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2548 struct pf_addr *a, sa_family_t af)
2553 if ((a->addr32[0] < b->addr32[0]) ||
2554 (a->addr32[0] > e->addr32[0]))
2563 for (i = 0; i < 4; ++i)
2564 if (a->addr32[i] > b->addr32[i])
2566 else if (a->addr32[i] < b->addr32[i])
2569 for (i = 0; i < 4; ++i)
2570 if (a->addr32[i] < e->addr32[i])
2572 else if (a->addr32[i] > e->addr32[i])
2582 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2586 return ((p > a1) && (p < a2));
2588 return ((p < a1) || (p > a2));
2590 return ((p >= a1) && (p <= a2));
2604 return (0); /* never reached */
2608 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2613 return (pf_match(op, a1, a2, p));
2617 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2619 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2621 return (pf_match(op, a1, a2, u));
2625 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2627 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2629 return (pf_match(op, a1, a2, g));
2633 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag, int mtag)
2638 return ((!r->match_tag_not && r->match_tag == *tag) ||
2639 (r->match_tag_not && r->match_tag != *tag));
2643 pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag)
2646 KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
2648 if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL))
2651 pd->pf_mtag->tag = tag;
2656 #define PF_ANCHOR_STACKSIZE 32
2657 struct pf_anchor_stackframe {
2658 struct pf_ruleset *rs;
2659 struct pf_rule *r; /* XXX: + match bit */
2660 struct pf_anchor *child;
2664 * XXX: We rely on malloc(9) returning pointer aligned addresses.
2666 #define PF_ANCHORSTACK_MATCH 0x00000001
2667 #define PF_ANCHORSTACK_MASK (PF_ANCHORSTACK_MATCH)
2669 #define PF_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
2670 #define PF_ANCHOR_RULE(f) (struct pf_rule *) \
2671 ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
2672 #define PF_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \
2673 ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \
2677 pf_step_into_anchor(struct pf_anchor_stackframe *stack, int *depth,
2678 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2681 struct pf_anchor_stackframe *f;
2687 if (*depth >= PF_ANCHOR_STACKSIZE) {
2688 printf("%s: anchor stack overflow on %s\n",
2689 __func__, (*r)->anchor->name);
2690 *r = TAILQ_NEXT(*r, entries);
2692 } else if (*depth == 0 && a != NULL)
2694 f = stack + (*depth)++;
2697 if ((*r)->anchor_wildcard) {
2698 struct pf_anchor_node *parent = &(*r)->anchor->children;
2700 if ((f->child = RB_MIN(pf_anchor_node, parent)) == NULL) {
2704 *rs = &f->child->ruleset;
2707 *rs = &(*r)->anchor->ruleset;
2709 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2713 pf_step_out_of_anchor(struct pf_anchor_stackframe *stack, int *depth,
2714 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2717 struct pf_anchor_stackframe *f;
2726 f = stack + *depth - 1;
2727 fr = PF_ANCHOR_RULE(f);
2728 if (f->child != NULL) {
2729 struct pf_anchor_node *parent;
2732 * This block traverses through
2733 * a wildcard anchor.
2735 parent = &fr->anchor->children;
2736 if (match != NULL && *match) {
2738 * If any of "*" matched, then
2739 * "foo/ *" matched, mark frame
2742 PF_ANCHOR_SET_MATCH(f);
2745 f->child = RB_NEXT(pf_anchor_node, parent, f->child);
2746 if (f->child != NULL) {
2747 *rs = &f->child->ruleset;
2748 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2756 if (*depth == 0 && a != NULL)
2759 if (PF_ANCHOR_MATCH(f) || (match != NULL && *match))
2761 *r = TAILQ_NEXT(fr, entries);
2762 } while (*r == NULL);
2769 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2770 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2775 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2776 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2780 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2781 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2782 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
2783 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
2784 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
2785 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
2786 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
2787 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
2793 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
2798 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
2802 if (addr->addr32[3] == 0xffffffff) {
2803 addr->addr32[3] = 0;
2804 if (addr->addr32[2] == 0xffffffff) {
2805 addr->addr32[2] = 0;
2806 if (addr->addr32[1] == 0xffffffff) {
2807 addr->addr32[1] = 0;
2809 htonl(ntohl(addr->addr32[0]) + 1);
2812 htonl(ntohl(addr->addr32[1]) + 1);
2815 htonl(ntohl(addr->addr32[2]) + 1);
2818 htonl(ntohl(addr->addr32[3]) + 1);
2825 pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
2827 struct pf_addr *saddr, *daddr;
2828 u_int16_t sport, dport;
2829 struct inpcbinfo *pi;
2832 pd->lookup.uid = UID_MAX;
2833 pd->lookup.gid = GID_MAX;
2835 switch (pd->proto) {
2837 if (pd->hdr.tcp == NULL)
2839 sport = pd->hdr.tcp->th_sport;
2840 dport = pd->hdr.tcp->th_dport;
2844 if (pd->hdr.udp == NULL)
2846 sport = pd->hdr.udp->uh_sport;
2847 dport = pd->hdr.udp->uh_dport;
2853 if (direction == PF_IN) {
2868 inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
2869 dport, INPLOOKUP_RLOCKPCB, NULL, m);
2871 inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
2872 daddr->v4, dport, INPLOOKUP_WILDCARD |
2873 INPLOOKUP_RLOCKPCB, NULL, m);
2881 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
2882 dport, INPLOOKUP_RLOCKPCB, NULL, m);
2884 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
2885 &daddr->v6, dport, INPLOOKUP_WILDCARD |
2886 INPLOOKUP_RLOCKPCB, NULL, m);
2896 INP_RLOCK_ASSERT(inp);
2897 pd->lookup.uid = inp->inp_cred->cr_uid;
2898 pd->lookup.gid = inp->inp_cred->cr_groups[0];
2905 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2909 u_int8_t *opt, optlen;
2910 u_int8_t wscale = 0;
2912 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
2913 if (hlen <= sizeof(struct tcphdr))
2915 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2917 opt = hdr + sizeof(struct tcphdr);
2918 hlen -= sizeof(struct tcphdr);
2928 if (wscale > TCP_MAX_WINSHIFT)
2929 wscale = TCP_MAX_WINSHIFT;
2930 wscale |= PF_WSCALE_FLAG;
2945 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2949 u_int8_t *opt, optlen;
2950 u_int16_t mss = V_tcp_mssdflt;
2952 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
2953 if (hlen <= sizeof(struct tcphdr))
2955 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2957 opt = hdr + sizeof(struct tcphdr);
2958 hlen -= sizeof(struct tcphdr);
2959 while (hlen >= TCPOLEN_MAXSEG) {
2967 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
2983 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
2986 struct sockaddr_in *dst;
2990 struct sockaddr_in6 *dst6;
2991 struct route_in6 ro6;
2993 struct rtentry *rt = NULL;
2995 u_int16_t mss = V_tcp_mssdflt;
3000 hlen = sizeof(struct ip);
3001 bzero(&ro, sizeof(ro));
3002 dst = (struct sockaddr_in *)&ro.ro_dst;
3003 dst->sin_family = AF_INET;
3004 dst->sin_len = sizeof(*dst);
3005 dst->sin_addr = addr->v4;
3006 in_rtalloc_ign(&ro, 0, rtableid);
3012 hlen = sizeof(struct ip6_hdr);
3013 bzero(&ro6, sizeof(ro6));
3014 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
3015 dst6->sin6_family = AF_INET6;
3016 dst6->sin6_len = sizeof(*dst6);
3017 dst6->sin6_addr = addr->v6;
3018 in6_rtalloc_ign(&ro6, 0, rtableid);
3024 if (rt && rt->rt_ifp) {
3025 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr);
3026 mss = max(V_tcp_mssdflt, mss);
3029 mss = min(mss, offer);
3030 mss = max(mss, 64); /* sanity - at least max opt space */
3035 pf_tcp_iss(struct pf_pdesc *pd)
3038 u_int32_t digest[4];
3040 if (V_pf_tcp_secret_init == 0) {
3041 read_random(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
3042 MD5Init(&V_pf_tcp_secret_ctx);
3043 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
3044 sizeof(V_pf_tcp_secret));
3045 V_pf_tcp_secret_init = 1;
3048 ctx = V_pf_tcp_secret_ctx;
3050 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
3051 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
3052 if (pd->af == AF_INET6) {
3053 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3054 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3056 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3057 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3059 MD5Final((u_char *)digest, &ctx);
3060 V_pf_tcp_iss_off += 4096;
3061 #define ISN_RANDOM_INCREMENT (4096 - 1)
3062 return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3064 #undef ISN_RANDOM_INCREMENT
3068 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
3069 struct pfi_kif *kif, struct mbuf *m, int off, struct pf_pdesc *pd,
3070 struct pf_rule **am, struct pf_ruleset **rsm, struct inpcb *inp)
3072 struct pf_rule *nr = NULL;
3073 struct pf_addr * const saddr = pd->src;
3074 struct pf_addr * const daddr = pd->dst;
3075 sa_family_t af = pd->af;
3076 struct pf_rule *r, *a = NULL;
3077 struct pf_ruleset *ruleset = NULL;
3078 struct pf_src_node *nsn = NULL;
3079 struct tcphdr *th = pd->hdr.tcp;
3080 struct pf_state_key *sk = NULL, *nk = NULL;
3082 int rewrite = 0, hdrlen = 0;
3083 int tag = -1, rtableid = -1;
3087 u_int16_t sport = 0, dport = 0;
3088 u_int16_t bproto_sum = 0, bip_sum = 0;
3089 u_int8_t icmptype = 0, icmpcode = 0;
3090 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3095 INP_LOCK_ASSERT(inp);
3096 pd->lookup.uid = inp->inp_cred->cr_uid;
3097 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3098 pd->lookup.done = 1;
3101 switch (pd->proto) {
3103 sport = th->th_sport;
3104 dport = th->th_dport;
3105 hdrlen = sizeof(*th);
3108 sport = pd->hdr.udp->uh_sport;
3109 dport = pd->hdr.udp->uh_dport;
3110 hdrlen = sizeof(*pd->hdr.udp);
3114 if (pd->af != AF_INET)
3116 sport = dport = pd->hdr.icmp->icmp_id;
3117 hdrlen = sizeof(*pd->hdr.icmp);
3118 icmptype = pd->hdr.icmp->icmp_type;
3119 icmpcode = pd->hdr.icmp->icmp_code;
3121 if (icmptype == ICMP_UNREACH ||
3122 icmptype == ICMP_SOURCEQUENCH ||
3123 icmptype == ICMP_REDIRECT ||
3124 icmptype == ICMP_TIMXCEED ||
3125 icmptype == ICMP_PARAMPROB)
3130 case IPPROTO_ICMPV6:
3133 sport = dport = pd->hdr.icmp6->icmp6_id;
3134 hdrlen = sizeof(*pd->hdr.icmp6);
3135 icmptype = pd->hdr.icmp6->icmp6_type;
3136 icmpcode = pd->hdr.icmp6->icmp6_code;
3138 if (icmptype == ICMP6_DST_UNREACH ||
3139 icmptype == ICMP6_PACKET_TOO_BIG ||
3140 icmptype == ICMP6_TIME_EXCEEDED ||
3141 icmptype == ICMP6_PARAM_PROB)
3146 sport = dport = hdrlen = 0;
3150 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3152 /* check packet for BINAT/NAT/RDR */
3153 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk,
3154 &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) {
3155 KASSERT(sk != NULL, ("%s: null sk", __func__));
3156 KASSERT(nk != NULL, ("%s: null nk", __func__));
3159 bip_sum = *pd->ip_sum;
3161 switch (pd->proto) {
3163 bproto_sum = th->th_sum;
3164 pd->proto_sum = &th->th_sum;
3166 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3167 nk->port[pd->sidx] != sport) {
3168 pf_change_ap(m, saddr, &th->th_sport, pd->ip_sum,
3169 &th->th_sum, &nk->addr[pd->sidx],
3170 nk->port[pd->sidx], 0, af);
3171 pd->sport = &th->th_sport;
3172 sport = th->th_sport;
3175 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3176 nk->port[pd->didx] != dport) {
3177 pf_change_ap(m, daddr, &th->th_dport, pd->ip_sum,
3178 &th->th_sum, &nk->addr[pd->didx],
3179 nk->port[pd->didx], 0, af);
3180 dport = th->th_dport;
3181 pd->dport = &th->th_dport;
3186 bproto_sum = pd->hdr.udp->uh_sum;
3187 pd->proto_sum = &pd->hdr.udp->uh_sum;
3189 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3190 nk->port[pd->sidx] != sport) {
3191 pf_change_ap(m, saddr, &pd->hdr.udp->uh_sport,
3192 pd->ip_sum, &pd->hdr.udp->uh_sum,
3193 &nk->addr[pd->sidx],
3194 nk->port[pd->sidx], 1, af);
3195 sport = pd->hdr.udp->uh_sport;
3196 pd->sport = &pd->hdr.udp->uh_sport;
3199 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3200 nk->port[pd->didx] != dport) {
3201 pf_change_ap(m, daddr, &pd->hdr.udp->uh_dport,
3202 pd->ip_sum, &pd->hdr.udp->uh_sum,
3203 &nk->addr[pd->didx],
3204 nk->port[pd->didx], 1, af);
3205 dport = pd->hdr.udp->uh_dport;
3206 pd->dport = &pd->hdr.udp->uh_dport;
3212 nk->port[0] = nk->port[1];
3213 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3214 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3215 nk->addr[pd->sidx].v4.s_addr, 0);
3217 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3218 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3219 nk->addr[pd->didx].v4.s_addr, 0);
3221 if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3222 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3223 pd->hdr.icmp->icmp_cksum, sport,
3225 pd->hdr.icmp->icmp_id = nk->port[1];
3226 pd->sport = &pd->hdr.icmp->icmp_id;
3228 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3232 case IPPROTO_ICMPV6:
3233 nk->port[0] = nk->port[1];
3234 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3235 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3236 &nk->addr[pd->sidx], 0);
3238 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3239 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3240 &nk->addr[pd->didx], 0);
3249 &nk->addr[pd->sidx], AF_INET))
3250 pf_change_a(&saddr->v4.s_addr,
3252 nk->addr[pd->sidx].v4.s_addr, 0);
3255 &nk->addr[pd->didx], AF_INET))
3256 pf_change_a(&daddr->v4.s_addr,
3258 nk->addr[pd->didx].v4.s_addr, 0);
3264 &nk->addr[pd->sidx], AF_INET6))
3265 PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3268 &nk->addr[pd->didx], AF_INET6))
3269 PF_ACPY(saddr, &nk->addr[pd->didx], af);
3282 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3283 r = r->skip[PF_SKIP_IFP].ptr;
3284 else if (r->direction && r->direction != direction)
3285 r = r->skip[PF_SKIP_DIR].ptr;
3286 else if (r->af && r->af != af)
3287 r = r->skip[PF_SKIP_AF].ptr;
3288 else if (r->proto && r->proto != pd->proto)
3289 r = r->skip[PF_SKIP_PROTO].ptr;
3290 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3291 r->src.neg, kif, M_GETFIB(m)))
3292 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3293 /* tcp/udp only. port_op always 0 in other cases */
3294 else if (r->src.port_op && !pf_match_port(r->src.port_op,
3295 r->src.port[0], r->src.port[1], sport))
3296 r = r->skip[PF_SKIP_SRC_PORT].ptr;
3297 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3298 r->dst.neg, NULL, M_GETFIB(m)))
3299 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3300 /* tcp/udp only. port_op always 0 in other cases */
3301 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3302 r->dst.port[0], r->dst.port[1], dport))
3303 r = r->skip[PF_SKIP_DST_PORT].ptr;
3304 /* icmp only. type always 0 in other cases */
3305 else if (r->type && r->type != icmptype + 1)
3306 r = TAILQ_NEXT(r, entries);
3307 /* icmp only. type always 0 in other cases */
3308 else if (r->code && r->code != icmpcode + 1)
3309 r = TAILQ_NEXT(r, entries);
3310 else if (r->tos && !(r->tos == pd->tos))
3311 r = TAILQ_NEXT(r, entries);
3312 else if (r->rule_flag & PFRULE_FRAGMENT)
3313 r = TAILQ_NEXT(r, entries);
3314 else if (pd->proto == IPPROTO_TCP &&
3315 (r->flagset & th->th_flags) != r->flags)
3316 r = TAILQ_NEXT(r, entries);
3317 /* tcp/udp only. uid.op always 0 in other cases */
3318 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3319 pf_socket_lookup(direction, pd, m), 1)) &&
3320 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3322 r = TAILQ_NEXT(r, entries);
3323 /* tcp/udp only. gid.op always 0 in other cases */
3324 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3325 pf_socket_lookup(direction, pd, m), 1)) &&
3326 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3328 r = TAILQ_NEXT(r, entries);
3330 r->prob <= arc4random())
3331 r = TAILQ_NEXT(r, entries);
3332 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3333 pd->pf_mtag ? pd->pf_mtag->tag : 0))
3334 r = TAILQ_NEXT(r, entries);
3335 else if (r->os_fingerprint != PF_OSFP_ANY &&
3336 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3337 pf_osfp_fingerprint(pd, m, off, th),
3338 r->os_fingerprint)))
3339 r = TAILQ_NEXT(r, entries);
3343 if (r->rtableid >= 0)
3344 rtableid = r->rtableid;
3345 if (r->anchor == NULL) {
3352 r = TAILQ_NEXT(r, entries);
3354 pf_step_into_anchor(anchor_stack, &asd,
3355 &ruleset, PF_RULESET_FILTER, &r, &a,
3358 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3359 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3366 REASON_SET(&reason, PFRES_MATCH);
3368 if (r->log || (nr != NULL && nr->log)) {
3370 m_copyback(m, off, hdrlen, pd->hdr.any);
3371 PFLOG_PACKET(kif, m, af, direction, reason, r->log ? r : nr, a,
3375 if ((r->action == PF_DROP) &&
3376 ((r->rule_flag & PFRULE_RETURNRST) ||
3377 (r->rule_flag & PFRULE_RETURNICMP) ||
3378 (r->rule_flag & PFRULE_RETURN))) {
3379 /* undo NAT changes, if they have taken place */
3381 PF_ACPY(saddr, &sk->addr[pd->sidx], af);
3382 PF_ACPY(daddr, &sk->addr[pd->didx], af);
3384 *pd->sport = sk->port[pd->sidx];
3386 *pd->dport = sk->port[pd->didx];
3388 *pd->proto_sum = bproto_sum;
3390 *pd->ip_sum = bip_sum;
3391 m_copyback(m, off, hdrlen, pd->hdr.any);
3393 if (pd->proto == IPPROTO_TCP &&
3394 ((r->rule_flag & PFRULE_RETURNRST) ||
3395 (r->rule_flag & PFRULE_RETURN)) &&
3396 !(th->th_flags & TH_RST)) {
3397 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
3409 h4 = mtod(m, struct ip *);
3410 len = ntohs(h4->ip_len) - off;
3415 h6 = mtod(m, struct ip6_hdr *);
3416 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
3421 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
3422 REASON_SET(&reason, PFRES_PROTCKSUM);
3424 if (th->th_flags & TH_SYN)
3426 if (th->th_flags & TH_FIN)
3428 pf_send_tcp(m, r, af, pd->dst,
3429 pd->src, th->th_dport, th->th_sport,
3430 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3431 r->return_ttl, 1, 0, kif->pfik_ifp);
3433 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3435 pf_send_icmp(m, r->return_icmp >> 8,
3436 r->return_icmp & 255, af, r);
3437 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3439 pf_send_icmp(m, r->return_icmp6 >> 8,
3440 r->return_icmp6 & 255, af, r);
3443 if (r->action == PF_DROP)
3446 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3447 REASON_SET(&reason, PFRES_MEMORY);
3451 M_SETFIB(m, rtableid);
3453 if (!state_icmp && (r->keep_state || nr != NULL ||
3454 (pd->flags & PFDESC_TCP_NORM))) {
3456 action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
3457 sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
3459 if (action != PF_PASS)
3463 uma_zfree(V_pf_state_key_z, sk);
3465 uma_zfree(V_pf_state_key_z, nk);
3468 /* copy back packet headers if we performed NAT operations */
3470 m_copyback(m, off, hdrlen, pd->hdr.any);
3472 if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
3473 direction == PF_OUT &&
3474 pfsync_defer_ptr != NULL && pfsync_defer_ptr(*sm, m))
3476 * We want the state created, but we dont
3477 * want to send this in case a partner
3478 * firewall has to know about it to allow
3479 * replies through it.
3487 uma_zfree(V_pf_state_key_z, sk);
3489 uma_zfree(V_pf_state_key_z, nk);
3494 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
3495 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *nk,
3496 struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
3497 u_int16_t dport, int *rewrite, struct pfi_kif *kif, struct pf_state **sm,
3498 int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen)
3500 struct pf_state *s = NULL;
3501 struct pf_src_node *sn = NULL;
3502 struct tcphdr *th = pd->hdr.tcp;
3503 u_int16_t mss = V_tcp_mssdflt;
3506 /* check maximums */
3507 if (r->max_states &&
3508 (counter_u64_fetch(r->states_cur) >= r->max_states)) {
3509 counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1);
3510 REASON_SET(&reason, PFRES_MAXSTATES);
3513 /* src node for filter rule */
3514 if ((r->rule_flag & PFRULE_SRCTRACK ||
3515 r->rpool.opts & PF_POOL_STICKYADDR) &&
3516 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3517 REASON_SET(&reason, PFRES_SRCLIMIT);
3520 /* src node for translation rule */
3521 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3522 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3523 REASON_SET(&reason, PFRES_SRCLIMIT);
3526 s = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO);
3528 REASON_SET(&reason, PFRES_MEMORY);
3532 s->nat_rule.ptr = nr;
3534 STATE_INC_COUNTERS(s);
3536 s->state_flags |= PFSTATE_ALLOWOPTS;
3537 if (r->rule_flag & PFRULE_STATESLOPPY)
3538 s->state_flags |= PFSTATE_SLOPPY;
3539 s->log = r->log & PF_LOG_ALL;
3540 s->sync_state = PFSYNC_S_NONE;
3542 s->log |= nr->log & PF_LOG_ALL;
3543 switch (pd->proto) {
3545 s->src.seqlo = ntohl(th->th_seq);
3546 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3547 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3548 r->keep_state == PF_STATE_MODULATE) {
3549 /* Generate sequence number modulator */
3550 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3553 pf_change_proto_a(m, &th->th_seq, &th->th_sum,
3554 htonl(s->src.seqlo + s->src.seqdiff), 0);
3558 if (th->th_flags & TH_SYN) {
3560 s->src.wscale = pf_get_wscale(m, off,
3561 th->th_off, pd->af);
3563 s->src.max_win = MAX(ntohs(th->th_win), 1);
3564 if (s->src.wscale & PF_WSCALE_MASK) {
3565 /* Remove scale factor from initial window */
3566 int win = s->src.max_win;
3567 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3568 s->src.max_win = (win - 1) >>
3569 (s->src.wscale & PF_WSCALE_MASK);
3571 if (th->th_flags & TH_FIN)
3575 s->src.state = TCPS_SYN_SENT;
3576 s->dst.state = TCPS_CLOSED;
3577 s->timeout = PFTM_TCP_FIRST_PACKET;
3580 s->src.state = PFUDPS_SINGLE;
3581 s->dst.state = PFUDPS_NO_TRAFFIC;
3582 s->timeout = PFTM_UDP_FIRST_PACKET;
3586 case IPPROTO_ICMPV6:
3588 s->timeout = PFTM_ICMP_FIRST_PACKET;
3591 s->src.state = PFOTHERS_SINGLE;
3592 s->dst.state = PFOTHERS_NO_TRAFFIC;
3593 s->timeout = PFTM_OTHER_FIRST_PACKET;
3596 if (r->rt && r->rt != PF_FASTROUTE) {
3597 if (pf_map_addr(pd->af, r, pd->src, &s->rt_addr, NULL, &sn)) {
3598 REASON_SET(&reason, PFRES_BADSTATE);
3599 pf_src_tree_remove_state(s);
3600 STATE_DEC_COUNTERS(s);
3601 uma_zfree(V_pf_state_z, s);
3604 s->rt_kif = r->rpool.cur->kif;
3607 s->creation = time_uptime;
3608 s->expire = time_uptime;
3613 /* XXX We only modify one side for now. */
3614 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
3615 s->nat_src_node = nsn;
3617 if (pd->proto == IPPROTO_TCP) {
3618 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
3619 off, pd, th, &s->src, &s->dst)) {
3620 REASON_SET(&reason, PFRES_MEMORY);
3621 pf_src_tree_remove_state(s);
3622 STATE_DEC_COUNTERS(s);
3623 uma_zfree(V_pf_state_z, s);
3626 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
3627 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
3628 &s->src, &s->dst, rewrite)) {
3629 /* This really shouldn't happen!!! */
3630 DPFPRINTF(PF_DEBUG_URGENT,
3631 ("pf_normalize_tcp_stateful failed on first pkt"));
3632 pf_normalize_tcp_cleanup(s);
3633 pf_src_tree_remove_state(s);
3634 STATE_DEC_COUNTERS(s);
3635 uma_zfree(V_pf_state_z, s);
3639 s->direction = pd->dir;
3642 * sk/nk could already been setup by pf_get_translation().
3645 KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
3646 __func__, nr, sk, nk));
3647 sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
3652 KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p",
3653 __func__, nr, sk, nk));
3655 /* Swap sk/nk for PF_OUT. */
3656 if (pf_state_insert(BOUND_IFACE(r, kif),
3657 (pd->dir == PF_IN) ? sk : nk,
3658 (pd->dir == PF_IN) ? nk : sk, s)) {
3659 if (pd->proto == IPPROTO_TCP)
3660 pf_normalize_tcp_cleanup(s);
3661 REASON_SET(&reason, PFRES_STATEINS);
3662 pf_src_tree_remove_state(s);
3663 STATE_DEC_COUNTERS(s);
3664 uma_zfree(V_pf_state_z, s);
3671 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
3672 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
3673 s->src.state = PF_TCPS_PROXY_SRC;
3674 /* undo NAT changes, if they have taken place */
3676 struct pf_state_key *skt = s->key[PF_SK_WIRE];
3677 if (pd->dir == PF_OUT)
3678 skt = s->key[PF_SK_STACK];
3679 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
3680 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
3682 *pd->sport = skt->port[pd->sidx];
3684 *pd->dport = skt->port[pd->didx];
3686 *pd->proto_sum = bproto_sum;
3688 *pd->ip_sum = bip_sum;
3689 m_copyback(m, off, hdrlen, pd->hdr.any);
3691 s->src.seqhi = htonl(arc4random());
3692 /* Find mss option */
3693 int rtid = M_GETFIB(m);
3694 mss = pf_get_mss(m, off, th->th_off, pd->af);
3695 mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
3696 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
3698 pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport,
3699 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
3700 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL);
3701 REASON_SET(&reason, PFRES_SYNPROXY);
3702 return (PF_SYNPROXY_DROP);
3709 uma_zfree(V_pf_state_key_z, sk);
3711 uma_zfree(V_pf_state_key_z, nk);
3714 struct pf_srchash *sh;
3716 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
3717 PF_HASHROW_LOCK(sh);
3718 if (--sn->states == 0 && sn->expire == 0) {
3719 pf_unlink_src_node(sn);
3720 uma_zfree(V_pf_sources_z, sn);
3722 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
3724 PF_HASHROW_UNLOCK(sh);
3727 if (nsn != sn && nsn != NULL) {
3728 struct pf_srchash *sh;
3730 sh = &V_pf_srchash[pf_hashsrc(&nsn->addr, nsn->af)];
3731 PF_HASHROW_LOCK(sh);
3732 if (--nsn->states == 0 && nsn->expire == 0) {
3733 pf_unlink_src_node(nsn);
3734 uma_zfree(V_pf_sources_z, nsn);
3736 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
3738 PF_HASHROW_UNLOCK(sh);
3745 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
3746 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
3747 struct pf_ruleset **rsm)
3749 struct pf_rule *r, *a = NULL;
3750 struct pf_ruleset *ruleset = NULL;
3751 sa_family_t af = pd->af;
3756 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3760 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3763 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3764 r = r->skip[PF_SKIP_IFP].ptr;
3765 else if (r->direction && r->direction != direction)
3766 r = r->skip[PF_SKIP_DIR].ptr;
3767 else if (r->af && r->af != af)
3768 r = r->skip[PF_SKIP_AF].ptr;
3769 else if (r->proto && r->proto != pd->proto)
3770 r = r->skip[PF_SKIP_PROTO].ptr;
3771 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
3772 r->src.neg, kif, M_GETFIB(m)))
3773 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3774 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
3775 r->dst.neg, NULL, M_GETFIB(m)))
3776 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3777 else if (r->tos && !(r->tos == pd->tos))
3778 r = TAILQ_NEXT(r, entries);
3779 else if (r->os_fingerprint != PF_OSFP_ANY)
3780 r = TAILQ_NEXT(r, entries);
3781 else if (pd->proto == IPPROTO_UDP &&
3782 (r->src.port_op || r->dst.port_op))
3783 r = TAILQ_NEXT(r, entries);
3784 else if (pd->proto == IPPROTO_TCP &&
3785 (r->src.port_op || r->dst.port_op || r->flagset))
3786 r = TAILQ_NEXT(r, entries);
3787 else if ((pd->proto == IPPROTO_ICMP ||
3788 pd->proto == IPPROTO_ICMPV6) &&
3789 (r->type || r->code))
3790 r = TAILQ_NEXT(r, entries);
3791 else if (r->prob && r->prob <=
3792 (arc4random() % (UINT_MAX - 1) + 1))
3793 r = TAILQ_NEXT(r, entries);
3794 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3795 pd->pf_mtag ? pd->pf_mtag->tag : 0))
3796 r = TAILQ_NEXT(r, entries);
3798 if (r->anchor == NULL) {
3805 r = TAILQ_NEXT(r, entries);
3807 pf_step_into_anchor(anchor_stack, &asd,
3808 &ruleset, PF_RULESET_FILTER, &r, &a,
3811 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3812 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3819 REASON_SET(&reason, PFRES_MATCH);
3822 PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
3825 if (r->action != PF_PASS)
3828 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3829 REASON_SET(&reason, PFRES_MEMORY);
3837 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
3838 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
3839 struct pf_pdesc *pd, u_short *reason, int *copyback)
3841 struct tcphdr *th = pd->hdr.tcp;
3842 u_int16_t win = ntohs(th->th_win);
3843 u_int32_t ack, end, seq, orig_seq;
3847 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
3848 sws = src->wscale & PF_WSCALE_MASK;
3849 dws = dst->wscale & PF_WSCALE_MASK;
3854 * Sequence tracking algorithm from Guido van Rooij's paper:
3855 * http://www.madison-gurkha.com/publications/tcp_filtering/
3859 orig_seq = seq = ntohl(th->th_seq);
3860 if (src->seqlo == 0) {
3861 /* First packet from this end. Set its state */
3863 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
3864 src->scrub == NULL) {
3865 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
3866 REASON_SET(reason, PFRES_MEMORY);
3871 /* Deferred generation of sequence number modulator */
3872 if (dst->seqdiff && !src->seqdiff) {
3873 /* use random iss for the TCP server */
3874 while ((src->seqdiff = arc4random() - seq) == 0)
3876 ack = ntohl(th->th_ack) - dst->seqdiff;
3877 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
3879 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
3882 ack = ntohl(th->th_ack);
3885 end = seq + pd->p_len;
3886 if (th->th_flags & TH_SYN) {
3888 if (dst->wscale & PF_WSCALE_FLAG) {
3889 src->wscale = pf_get_wscale(m, off, th->th_off,
3891 if (src->wscale & PF_WSCALE_FLAG) {
3892 /* Remove scale factor from initial
3894 sws = src->wscale & PF_WSCALE_MASK;
3895 win = ((u_int32_t)win + (1 << sws) - 1)
3897 dws = dst->wscale & PF_WSCALE_MASK;
3899 /* fixup other window */
3900 dst->max_win <<= dst->wscale &
3902 /* in case of a retrans SYN|ACK */
3907 if (th->th_flags & TH_FIN)
3911 if (src->state < TCPS_SYN_SENT)
3912 src->state = TCPS_SYN_SENT;
3915 * May need to slide the window (seqhi may have been set by
3916 * the crappy stack check or if we picked up the connection
3917 * after establishment)
3919 if (src->seqhi == 1 ||
3920 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
3921 src->seqhi = end + MAX(1, dst->max_win << dws);
3922 if (win > src->max_win)
3926 ack = ntohl(th->th_ack) - dst->seqdiff;
3928 /* Modulate sequence numbers */
3929 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
3931 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
3934 end = seq + pd->p_len;
3935 if (th->th_flags & TH_SYN)
3937 if (th->th_flags & TH_FIN)
3941 if ((th->th_flags & TH_ACK) == 0) {
3942 /* Let it pass through the ack skew check */
3944 } else if ((ack == 0 &&
3945 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
3946 /* broken tcp stacks do not set ack */
3947 (dst->state < TCPS_SYN_SENT)) {
3949 * Many stacks (ours included) will set the ACK number in an
3950 * FIN|ACK if the SYN times out -- no sequence to ACK.
3956 /* Ease sequencing restrictions on no data packets */
3961 ackskew = dst->seqlo - ack;
3965 * Need to demodulate the sequence numbers in any TCP SACK options
3966 * (Selective ACK). We could optionally validate the SACK values
3967 * against the current ACK window, either forwards or backwards, but
3968 * I'm not confident that SACK has been implemented properly
3969 * everywhere. It wouldn't surprise me if several stacks accidently
3970 * SACK too far backwards of previously ACKed data. There really aren't
3971 * any security implications of bad SACKing unless the target stack
3972 * doesn't validate the option length correctly. Someone trying to
3973 * spoof into a TCP connection won't bother blindly sending SACK
3976 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
3977 if (pf_modulate_sack(m, off, pd, th, dst))
3982 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
3983 if (SEQ_GEQ(src->seqhi, end) &&
3984 /* Last octet inside other's window space */
3985 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
3986 /* Retrans: not more than one window back */
3987 (ackskew >= -MAXACKWINDOW) &&
3988 /* Acking not more than one reassembled fragment backwards */
3989 (ackskew <= (MAXACKWINDOW << sws)) &&
3990 /* Acking not more than one window forward */
3991 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
3992 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
3993 (pd->flags & PFDESC_IP_REAS) == 0)) {
3994 /* Require an exact/+1 sequence match on resets when possible */
3996 if (dst->scrub || src->scrub) {
3997 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
3998 *state, src, dst, copyback))
4002 /* update max window */
4003 if (src->max_win < win)
4005 /* synchronize sequencing */
4006 if (SEQ_GT(end, src->seqlo))
4008 /* slide the window of what the other end can send */
4009 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4010 dst->seqhi = ack + MAX((win << sws), 1);
4014 if (th->th_flags & TH_SYN)
4015 if (src->state < TCPS_SYN_SENT)
4016 src->state = TCPS_SYN_SENT;
4017 if (th->th_flags & TH_FIN)
4018 if (src->state < TCPS_CLOSING)
4019 src->state = TCPS_CLOSING;
4020 if (th->th_flags & TH_ACK) {
4021 if (dst->state == TCPS_SYN_SENT) {
4022 dst->state = TCPS_ESTABLISHED;
4023 if (src->state == TCPS_ESTABLISHED &&
4024 (*state)->src_node != NULL &&
4025 pf_src_connlimit(state)) {
4026 REASON_SET(reason, PFRES_SRCLIMIT);
4029 } else if (dst->state == TCPS_CLOSING)
4030 dst->state = TCPS_FIN_WAIT_2;
4032 if (th->th_flags & TH_RST)
4033 src->state = dst->state = TCPS_TIME_WAIT;
4035 /* update expire time */
4036 (*state)->expire = time_uptime;
4037 if (src->state >= TCPS_FIN_WAIT_2 &&
4038 dst->state >= TCPS_FIN_WAIT_2)
4039 (*state)->timeout = PFTM_TCP_CLOSED;
4040 else if (src->state >= TCPS_CLOSING &&
4041 dst->state >= TCPS_CLOSING)
4042 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4043 else if (src->state < TCPS_ESTABLISHED ||
4044 dst->state < TCPS_ESTABLISHED)
4045 (*state)->timeout = PFTM_TCP_OPENING;
4046 else if (src->state >= TCPS_CLOSING ||
4047 dst->state >= TCPS_CLOSING)
4048 (*state)->timeout = PFTM_TCP_CLOSING;
4050 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4052 /* Fall through to PASS packet */
4054 } else if ((dst->state < TCPS_SYN_SENT ||
4055 dst->state >= TCPS_FIN_WAIT_2 ||
4056 src->state >= TCPS_FIN_WAIT_2) &&
4057 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
4058 /* Within a window forward of the originating packet */
4059 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
4060 /* Within a window backward of the originating packet */
4063 * This currently handles three situations:
4064 * 1) Stupid stacks will shotgun SYNs before their peer
4066 * 2) When PF catches an already established stream (the
4067 * firewall rebooted, the state table was flushed, routes
4069 * 3) Packets get funky immediately after the connection
4070 * closes (this should catch Solaris spurious ACK|FINs
4071 * that web servers like to spew after a close)
4073 * This must be a little more careful than the above code
4074 * since packet floods will also be caught here. We don't
4075 * update the TTL here to mitigate the damage of a packet
4076 * flood and so the same code can handle awkward establishment
4077 * and a loosened connection close.
4078 * In the establishment case, a correct peer response will
4079 * validate the connection, go through the normal state code
4080 * and keep updating the state TTL.
4083 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4084 printf("pf: loose state match: ");
4085 pf_print_state(*state);
4086 pf_print_flags(th->th_flags);
4087 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4088 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
4089 pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
4090 (unsigned long long)(*state)->packets[1],
4091 pd->dir == PF_IN ? "in" : "out",
4092 pd->dir == (*state)->direction ? "fwd" : "rev");
4095 if (dst->scrub || src->scrub) {
4096 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4097 *state, src, dst, copyback))
4101 /* update max window */
4102 if (src->max_win < win)
4104 /* synchronize sequencing */
4105 if (SEQ_GT(end, src->seqlo))
4107 /* slide the window of what the other end can send */
4108 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4109 dst->seqhi = ack + MAX((win << sws), 1);
4112 * Cannot set dst->seqhi here since this could be a shotgunned
4113 * SYN and not an already established connection.
4116 if (th->th_flags & TH_FIN)
4117 if (src->state < TCPS_CLOSING)
4118 src->state = TCPS_CLOSING;
4119 if (th->th_flags & TH_RST)
4120 src->state = dst->state = TCPS_TIME_WAIT;
4122 /* Fall through to PASS packet */
4125 if ((*state)->dst.state == TCPS_SYN_SENT &&
4126 (*state)->src.state == TCPS_SYN_SENT) {
4127 /* Send RST for state mismatches during handshake */
4128 if (!(th->th_flags & TH_RST))
4129 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4130 pd->dst, pd->src, th->th_dport,
4131 th->th_sport, ntohl(th->th_ack), 0,
4133 (*state)->rule.ptr->return_ttl, 1, 0,
4138 } else if (V_pf_status.debug >= PF_DEBUG_MISC) {
4139 printf("pf: BAD state: ");
4140 pf_print_state(*state);
4141 pf_print_flags(th->th_flags);
4142 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4143 "pkts=%llu:%llu dir=%s,%s\n",
4144 seq, orig_seq, ack, pd->p_len, ackskew,
4145 (unsigned long long)(*state)->packets[0],
4146 (unsigned long long)(*state)->packets[1],
4147 pd->dir == PF_IN ? "in" : "out",
4148 pd->dir == (*state)->direction ? "fwd" : "rev");
4149 printf("pf: State failure on: %c %c %c %c | %c %c\n",
4150 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4151 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4153 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4154 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4155 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4156 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4158 REASON_SET(reason, PFRES_BADSTATE);
4166 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4167 struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4169 struct tcphdr *th = pd->hdr.tcp;
4171 if (th->th_flags & TH_SYN)
4172 if (src->state < TCPS_SYN_SENT)
4173 src->state = TCPS_SYN_SENT;
4174 if (th->th_flags & TH_FIN)
4175 if (src->state < TCPS_CLOSING)
4176 src->state = TCPS_CLOSING;
4177 if (th->th_flags & TH_ACK) {
4178 if (dst->state == TCPS_SYN_SENT) {
4179 dst->state = TCPS_ESTABLISHED;
4180 if (src->state == TCPS_ESTABLISHED &&
4181 (*state)->src_node != NULL &&
4182 pf_src_connlimit(state)) {
4183 REASON_SET(reason, PFRES_SRCLIMIT);
4186 } else if (dst->state == TCPS_CLOSING) {
4187 dst->state = TCPS_FIN_WAIT_2;
4188 } else if (src->state == TCPS_SYN_SENT &&
4189 dst->state < TCPS_SYN_SENT) {
4191 * Handle a special sloppy case where we only see one
4192 * half of the connection. If there is a ACK after
4193 * the initial SYN without ever seeing a packet from
4194 * the destination, set the connection to established.
4196 dst->state = src->state = TCPS_ESTABLISHED;
4197 if ((*state)->src_node != NULL &&
4198 pf_src_connlimit(state)) {
4199 REASON_SET(reason, PFRES_SRCLIMIT);
4202 } else if (src->state == TCPS_CLOSING &&
4203 dst->state == TCPS_ESTABLISHED &&
4206 * Handle the closing of half connections where we
4207 * don't see the full bidirectional FIN/ACK+ACK
4210 dst->state = TCPS_CLOSING;
4213 if (th->th_flags & TH_RST)
4214 src->state = dst->state = TCPS_TIME_WAIT;
4216 /* update expire time */
4217 (*state)->expire = time_uptime;
4218 if (src->state >= TCPS_FIN_WAIT_2 &&
4219 dst->state >= TCPS_FIN_WAIT_2)
4220 (*state)->timeout = PFTM_TCP_CLOSED;
4221 else if (src->state >= TCPS_CLOSING &&
4222 dst->state >= TCPS_CLOSING)
4223 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4224 else if (src->state < TCPS_ESTABLISHED ||
4225 dst->state < TCPS_ESTABLISHED)
4226 (*state)->timeout = PFTM_TCP_OPENING;
4227 else if (src->state >= TCPS_CLOSING ||
4228 dst->state >= TCPS_CLOSING)
4229 (*state)->timeout = PFTM_TCP_CLOSING;
4231 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4237 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
4238 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4241 struct pf_state_key_cmp key;
4242 struct tcphdr *th = pd->hdr.tcp;
4244 struct pf_state_peer *src, *dst;
4245 struct pf_state_key *sk;
4247 bzero(&key, sizeof(key));
4249 key.proto = IPPROTO_TCP;
4250 if (direction == PF_IN) { /* wire side, straight */
4251 PF_ACPY(&key.addr[0], pd->src, key.af);
4252 PF_ACPY(&key.addr[1], pd->dst, key.af);
4253 key.port[0] = th->th_sport;
4254 key.port[1] = th->th_dport;
4255 } else { /* stack side, reverse */
4256 PF_ACPY(&key.addr[1], pd->src, key.af);
4257 PF_ACPY(&key.addr[0], pd->dst, key.af);
4258 key.port[1] = th->th_sport;
4259 key.port[0] = th->th_dport;
4262 STATE_LOOKUP(kif, &key, direction, *state, pd);
4264 if (direction == (*state)->direction) {
4265 src = &(*state)->src;
4266 dst = &(*state)->dst;
4268 src = &(*state)->dst;
4269 dst = &(*state)->src;
4272 sk = (*state)->key[pd->didx];
4274 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4275 if (direction != (*state)->direction) {
4276 REASON_SET(reason, PFRES_SYNPROXY);
4277 return (PF_SYNPROXY_DROP);
4279 if (th->th_flags & TH_SYN) {
4280 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4281 REASON_SET(reason, PFRES_SYNPROXY);
4284 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4285 pd->src, th->th_dport, th->th_sport,
4286 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4287 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0, NULL);
4288 REASON_SET(reason, PFRES_SYNPROXY);
4289 return (PF_SYNPROXY_DROP);
4290 } else if (!(th->th_flags & TH_ACK) ||
4291 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4292 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4293 REASON_SET(reason, PFRES_SYNPROXY);
4295 } else if ((*state)->src_node != NULL &&
4296 pf_src_connlimit(state)) {
4297 REASON_SET(reason, PFRES_SRCLIMIT);
4300 (*state)->src.state = PF_TCPS_PROXY_DST;
4302 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4303 if (direction == (*state)->direction) {
4304 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4305 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4306 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4307 REASON_SET(reason, PFRES_SYNPROXY);
4310 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4311 if ((*state)->dst.seqhi == 1)
4312 (*state)->dst.seqhi = htonl(arc4random());
4313 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4314 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4315 sk->port[pd->sidx], sk->port[pd->didx],
4316 (*state)->dst.seqhi, 0, TH_SYN, 0,
4317 (*state)->src.mss, 0, 0, (*state)->tag, NULL);
4318 REASON_SET(reason, PFRES_SYNPROXY);
4319 return (PF_SYNPROXY_DROP);
4320 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4322 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4323 REASON_SET(reason, PFRES_SYNPROXY);
4326 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4327 (*state)->dst.seqlo = ntohl(th->th_seq);
4328 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4329 pd->src, th->th_dport, th->th_sport,
4330 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4331 TH_ACK, (*state)->src.max_win, 0, 0, 0,
4332 (*state)->tag, NULL);
4333 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4334 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4335 sk->port[pd->sidx], sk->port[pd->didx],
4336 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4337 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0, NULL);
4338 (*state)->src.seqdiff = (*state)->dst.seqhi -
4339 (*state)->src.seqlo;
4340 (*state)->dst.seqdiff = (*state)->src.seqhi -
4341 (*state)->dst.seqlo;
4342 (*state)->src.seqhi = (*state)->src.seqlo +
4343 (*state)->dst.max_win;
4344 (*state)->dst.seqhi = (*state)->dst.seqlo +
4345 (*state)->src.max_win;
4346 (*state)->src.wscale = (*state)->dst.wscale = 0;
4347 (*state)->src.state = (*state)->dst.state =
4349 REASON_SET(reason, PFRES_SYNPROXY);
4350 return (PF_SYNPROXY_DROP);
4354 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4355 dst->state >= TCPS_FIN_WAIT_2 &&
4356 src->state >= TCPS_FIN_WAIT_2) {
4357 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4358 printf("pf: state reuse ");
4359 pf_print_state(*state);
4360 pf_print_flags(th->th_flags);
4363 /* XXX make sure it's the same direction ?? */
4364 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4365 pf_unlink_state(*state, PF_ENTER_LOCKED);
4370 if ((*state)->state_flags & PFSTATE_SLOPPY) {
4371 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
4374 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
4375 ©back) == PF_DROP)
4379 /* translate source/destination address, if necessary */
4380 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4381 struct pf_state_key *nk = (*state)->key[pd->didx];
4383 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4384 nk->port[pd->sidx] != th->th_sport)
4385 pf_change_ap(m, pd->src, &th->th_sport,
4386 pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx],
4387 nk->port[pd->sidx], 0, pd->af);
4389 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4390 nk->port[pd->didx] != th->th_dport)
4391 pf_change_ap(m, pd->dst, &th->th_dport,
4392 pd->ip_sum, &th->th_sum, &nk->addr[pd->didx],
4393 nk->port[pd->didx], 0, pd->af);
4397 /* Copyback sequence modulation or stateful scrub changes if needed */
4399 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4405 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4406 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4408 struct pf_state_peer *src, *dst;
4409 struct pf_state_key_cmp key;
4410 struct udphdr *uh = pd->hdr.udp;
4412 bzero(&key, sizeof(key));
4414 key.proto = IPPROTO_UDP;
4415 if (direction == PF_IN) { /* wire side, straight */
4416 PF_ACPY(&key.addr[0], pd->src, key.af);
4417 PF_ACPY(&key.addr[1], pd->dst, key.af);
4418 key.port[0] = uh->uh_sport;
4419 key.port[1] = uh->uh_dport;
4420 } else { /* stack side, reverse */
4421 PF_ACPY(&key.addr[1], pd->src, key.af);
4422 PF_ACPY(&key.addr[0], pd->dst, key.af);
4423 key.port[1] = uh->uh_sport;
4424 key.port[0] = uh->uh_dport;
4427 STATE_LOOKUP(kif, &key, direction, *state, pd);
4429 if (direction == (*state)->direction) {
4430 src = &(*state)->src;
4431 dst = &(*state)->dst;
4433 src = &(*state)->dst;
4434 dst = &(*state)->src;
4438 if (src->state < PFUDPS_SINGLE)
4439 src->state = PFUDPS_SINGLE;
4440 if (dst->state == PFUDPS_SINGLE)
4441 dst->state = PFUDPS_MULTIPLE;
4443 /* update expire time */
4444 (*state)->expire = time_uptime;
4445 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4446 (*state)->timeout = PFTM_UDP_MULTIPLE;
4448 (*state)->timeout = PFTM_UDP_SINGLE;
4450 /* translate source/destination address, if necessary */
4451 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4452 struct pf_state_key *nk = (*state)->key[pd->didx];
4454 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4455 nk->port[pd->sidx] != uh->uh_sport)
4456 pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
4457 &uh->uh_sum, &nk->addr[pd->sidx],
4458 nk->port[pd->sidx], 1, pd->af);
4460 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4461 nk->port[pd->didx] != uh->uh_dport)
4462 pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
4463 &uh->uh_sum, &nk->addr[pd->didx],
4464 nk->port[pd->didx], 1, pd->af);
4465 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4472 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4473 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4475 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4476 u_int16_t icmpid = 0, *icmpsum;
4479 struct pf_state_key_cmp key;
4481 bzero(&key, sizeof(key));
4482 switch (pd->proto) {
4485 icmptype = pd->hdr.icmp->icmp_type;
4486 icmpid = pd->hdr.icmp->icmp_id;
4487 icmpsum = &pd->hdr.icmp->icmp_cksum;
4489 if (icmptype == ICMP_UNREACH ||
4490 icmptype == ICMP_SOURCEQUENCH ||
4491 icmptype == ICMP_REDIRECT ||
4492 icmptype == ICMP_TIMXCEED ||
4493 icmptype == ICMP_PARAMPROB)
4498 case IPPROTO_ICMPV6:
4499 icmptype = pd->hdr.icmp6->icmp6_type;
4500 icmpid = pd->hdr.icmp6->icmp6_id;
4501 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4503 if (icmptype == ICMP6_DST_UNREACH ||
4504 icmptype == ICMP6_PACKET_TOO_BIG ||
4505 icmptype == ICMP6_TIME_EXCEEDED ||
4506 icmptype == ICMP6_PARAM_PROB)
4515 * ICMP query/reply message not related to a TCP/UDP packet.
4516 * Search for an ICMP state.
4519 key.proto = pd->proto;
4520 key.port[0] = key.port[1] = icmpid;
4521 if (direction == PF_IN) { /* wire side, straight */
4522 PF_ACPY(&key.addr[0], pd->src, key.af);
4523 PF_ACPY(&key.addr[1], pd->dst, key.af);
4524 } else { /* stack side, reverse */
4525 PF_ACPY(&key.addr[1], pd->src, key.af);
4526 PF_ACPY(&key.addr[0], pd->dst, key.af);
4529 STATE_LOOKUP(kif, &key, direction, *state, pd);
4531 (*state)->expire = time_uptime;
4532 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4534 /* translate source/destination address, if necessary */
4535 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4536 struct pf_state_key *nk = (*state)->key[pd->didx];
4541 if (PF_ANEQ(pd->src,
4542 &nk->addr[pd->sidx], AF_INET))
4543 pf_change_a(&saddr->v4.s_addr,
4545 nk->addr[pd->sidx].v4.s_addr, 0);
4547 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
4549 pf_change_a(&daddr->v4.s_addr,
4551 nk->addr[pd->didx].v4.s_addr, 0);
4554 pd->hdr.icmp->icmp_id) {
4555 pd->hdr.icmp->icmp_cksum =
4557 pd->hdr.icmp->icmp_cksum, icmpid,
4558 nk->port[pd->sidx], 0);
4559 pd->hdr.icmp->icmp_id =
4563 m_copyback(m, off, ICMP_MINLEN,
4564 (caddr_t )pd->hdr.icmp);
4569 if (PF_ANEQ(pd->src,
4570 &nk->addr[pd->sidx], AF_INET6))
4572 &pd->hdr.icmp6->icmp6_cksum,
4573 &nk->addr[pd->sidx], 0);
4575 if (PF_ANEQ(pd->dst,
4576 &nk->addr[pd->didx], AF_INET6))
4578 &pd->hdr.icmp6->icmp6_cksum,
4579 &nk->addr[pd->didx], 0);
4581 m_copyback(m, off, sizeof(struct icmp6_hdr),
4582 (caddr_t )pd->hdr.icmp6);
4591 * ICMP error message in response to a TCP/UDP packet.
4592 * Extract the inner TCP/UDP header and search for that state.
4595 struct pf_pdesc pd2;
4596 bzero(&pd2, sizeof pd2);
4601 struct ip6_hdr h2_6;
4608 /* Payload packet is from the opposite direction. */
4609 pd2.sidx = (direction == PF_IN) ? 1 : 0;
4610 pd2.didx = (direction == PF_IN) ? 0 : 1;
4614 /* offset of h2 in mbuf chain */
4615 ipoff2 = off + ICMP_MINLEN;
4617 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
4618 NULL, reason, pd2.af)) {
4619 DPFPRINTF(PF_DEBUG_MISC,
4620 ("pf: ICMP error message too short "
4625 * ICMP error messages don't refer to non-first
4628 if (h2.ip_off & htons(IP_OFFMASK)) {
4629 REASON_SET(reason, PFRES_FRAG);
4633 /* offset of protocol header that follows h2 */
4634 off2 = ipoff2 + (h2.ip_hl << 2);
4636 pd2.proto = h2.ip_p;
4637 pd2.src = (struct pf_addr *)&h2.ip_src;
4638 pd2.dst = (struct pf_addr *)&h2.ip_dst;
4639 pd2.ip_sum = &h2.ip_sum;
4644 ipoff2 = off + sizeof(struct icmp6_hdr);
4646 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
4647 NULL, reason, pd2.af)) {
4648 DPFPRINTF(PF_DEBUG_MISC,
4649 ("pf: ICMP error message too short "
4653 pd2.proto = h2_6.ip6_nxt;
4654 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
4655 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
4657 off2 = ipoff2 + sizeof(h2_6);
4659 switch (pd2.proto) {
4660 case IPPROTO_FRAGMENT:
4662 * ICMPv6 error messages for
4663 * non-first fragments
4665 REASON_SET(reason, PFRES_FRAG);
4668 case IPPROTO_HOPOPTS:
4669 case IPPROTO_ROUTING:
4670 case IPPROTO_DSTOPTS: {
4671 /* get next header and header length */
4672 struct ip6_ext opt6;
4674 if (!pf_pull_hdr(m, off2, &opt6,
4675 sizeof(opt6), NULL, reason,
4677 DPFPRINTF(PF_DEBUG_MISC,
4678 ("pf: ICMPv6 short opt\n"));
4681 if (pd2.proto == IPPROTO_AH)
4682 off2 += (opt6.ip6e_len + 2) * 4;
4684 off2 += (opt6.ip6e_len + 1) * 8;
4685 pd2.proto = opt6.ip6e_nxt;
4686 /* goto the next header */
4693 } while (!terminal);
4698 switch (pd2.proto) {
4702 struct pf_state_peer *src, *dst;
4707 * Only the first 8 bytes of the TCP header can be
4708 * expected. Don't access any TCP header fields after
4709 * th_seq, an ackskew test is not possible.
4711 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
4713 DPFPRINTF(PF_DEBUG_MISC,
4714 ("pf: ICMP error message too short "
4720 key.proto = IPPROTO_TCP;
4721 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4722 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4723 key.port[pd2.sidx] = th.th_sport;
4724 key.port[pd2.didx] = th.th_dport;
4726 STATE_LOOKUP(kif, &key, direction, *state, pd);
4728 if (direction == (*state)->direction) {
4729 src = &(*state)->dst;
4730 dst = &(*state)->src;
4732 src = &(*state)->src;
4733 dst = &(*state)->dst;
4736 if (src->wscale && dst->wscale)
4737 dws = dst->wscale & PF_WSCALE_MASK;
4741 /* Demodulate sequence number */
4742 seq = ntohl(th.th_seq) - src->seqdiff;
4744 pf_change_a(&th.th_seq, icmpsum,
4749 if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
4750 (!SEQ_GEQ(src->seqhi, seq) ||
4751 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
4752 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4753 printf("pf: BAD ICMP %d:%d ",
4754 icmptype, pd->hdr.icmp->icmp_code);
4755 pf_print_host(pd->src, 0, pd->af);
4757 pf_print_host(pd->dst, 0, pd->af);
4759 pf_print_state(*state);
4760 printf(" seq=%u\n", seq);
4762 REASON_SET(reason, PFRES_BADSTATE);
4765 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4766 printf("pf: OK ICMP %d:%d ",
4767 icmptype, pd->hdr.icmp->icmp_code);
4768 pf_print_host(pd->src, 0, pd->af);
4770 pf_print_host(pd->dst, 0, pd->af);
4772 pf_print_state(*state);
4773 printf(" seq=%u\n", seq);
4777 /* translate source/destination address, if necessary */
4778 if ((*state)->key[PF_SK_WIRE] !=
4779 (*state)->key[PF_SK_STACK]) {
4780 struct pf_state_key *nk =
4781 (*state)->key[pd->didx];
4783 if (PF_ANEQ(pd2.src,
4784 &nk->addr[pd2.sidx], pd2.af) ||
4785 nk->port[pd2.sidx] != th.th_sport)
4786 pf_change_icmp(pd2.src, &th.th_sport,
4787 daddr, &nk->addr[pd2.sidx],
4788 nk->port[pd2.sidx], NULL,
4789 pd2.ip_sum, icmpsum,
4790 pd->ip_sum, 0, pd2.af);
4792 if (PF_ANEQ(pd2.dst,
4793 &nk->addr[pd2.didx], pd2.af) ||
4794 nk->port[pd2.didx] != th.th_dport)
4795 pf_change_icmp(pd2.dst, &th.th_dport,
4796 NULL, /* XXX Inbound NAT? */
4797 &nk->addr[pd2.didx],
4798 nk->port[pd2.didx], NULL,
4799 pd2.ip_sum, icmpsum,
4800 pd->ip_sum, 0, pd2.af);
4808 m_copyback(m, off, ICMP_MINLEN,
4809 (caddr_t )pd->hdr.icmp);
4810 m_copyback(m, ipoff2, sizeof(h2),
4817 sizeof(struct icmp6_hdr),
4818 (caddr_t )pd->hdr.icmp6);
4819 m_copyback(m, ipoff2, sizeof(h2_6),
4824 m_copyback(m, off2, 8, (caddr_t)&th);
4833 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
4834 NULL, reason, pd2.af)) {
4835 DPFPRINTF(PF_DEBUG_MISC,
4836 ("pf: ICMP error message too short "
4842 key.proto = IPPROTO_UDP;
4843 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4844 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4845 key.port[pd2.sidx] = uh.uh_sport;
4846 key.port[pd2.didx] = uh.uh_dport;
4848 STATE_LOOKUP(kif, &key, direction, *state, pd);
4850 /* translate source/destination address, if necessary */
4851 if ((*state)->key[PF_SK_WIRE] !=
4852 (*state)->key[PF_SK_STACK]) {
4853 struct pf_state_key *nk =
4854 (*state)->key[pd->didx];
4856 if (PF_ANEQ(pd2.src,
4857 &nk->addr[pd2.sidx], pd2.af) ||
4858 nk->port[pd2.sidx] != uh.uh_sport)
4859 pf_change_icmp(pd2.src, &uh.uh_sport,
4860 daddr, &nk->addr[pd2.sidx],
4861 nk->port[pd2.sidx], &uh.uh_sum,
4862 pd2.ip_sum, icmpsum,
4863 pd->ip_sum, 1, pd2.af);
4865 if (PF_ANEQ(pd2.dst,
4866 &nk->addr[pd2.didx], pd2.af) ||
4867 nk->port[pd2.didx] != uh.uh_dport)
4868 pf_change_icmp(pd2.dst, &uh.uh_dport,
4869 NULL, /* XXX Inbound NAT? */
4870 &nk->addr[pd2.didx],
4871 nk->port[pd2.didx], &uh.uh_sum,
4872 pd2.ip_sum, icmpsum,
4873 pd->ip_sum, 1, pd2.af);
4878 m_copyback(m, off, ICMP_MINLEN,
4879 (caddr_t )pd->hdr.icmp);
4880 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4886 sizeof(struct icmp6_hdr),
4887 (caddr_t )pd->hdr.icmp6);
4888 m_copyback(m, ipoff2, sizeof(h2_6),
4893 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
4899 case IPPROTO_ICMP: {
4902 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
4903 NULL, reason, pd2.af)) {
4904 DPFPRINTF(PF_DEBUG_MISC,
4905 ("pf: ICMP error message too short i"
4911 key.proto = IPPROTO_ICMP;
4912 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4913 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4914 key.port[0] = key.port[1] = iih.icmp_id;
4916 STATE_LOOKUP(kif, &key, direction, *state, pd);
4918 /* translate source/destination address, if necessary */
4919 if ((*state)->key[PF_SK_WIRE] !=
4920 (*state)->key[PF_SK_STACK]) {
4921 struct pf_state_key *nk =
4922 (*state)->key[pd->didx];
4924 if (PF_ANEQ(pd2.src,
4925 &nk->addr[pd2.sidx], pd2.af) ||
4926 nk->port[pd2.sidx] != iih.icmp_id)
4927 pf_change_icmp(pd2.src, &iih.icmp_id,
4928 daddr, &nk->addr[pd2.sidx],
4929 nk->port[pd2.sidx], NULL,
4930 pd2.ip_sum, icmpsum,
4931 pd->ip_sum, 0, AF_INET);
4933 if (PF_ANEQ(pd2.dst,
4934 &nk->addr[pd2.didx], pd2.af) ||
4935 nk->port[pd2.didx] != iih.icmp_id)
4936 pf_change_icmp(pd2.dst, &iih.icmp_id,
4937 NULL, /* XXX Inbound NAT? */
4938 &nk->addr[pd2.didx],
4939 nk->port[pd2.didx], NULL,
4940 pd2.ip_sum, icmpsum,
4941 pd->ip_sum, 0, AF_INET);
4943 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
4944 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4945 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
4952 case IPPROTO_ICMPV6: {
4953 struct icmp6_hdr iih;
4955 if (!pf_pull_hdr(m, off2, &iih,
4956 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
4957 DPFPRINTF(PF_DEBUG_MISC,
4958 ("pf: ICMP error message too short "
4964 key.proto = IPPROTO_ICMPV6;
4965 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4966 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4967 key.port[0] = key.port[1] = iih.icmp6_id;
4969 STATE_LOOKUP(kif, &key, direction, *state, pd);
4971 /* translate source/destination address, if necessary */
4972 if ((*state)->key[PF_SK_WIRE] !=
4973 (*state)->key[PF_SK_STACK]) {
4974 struct pf_state_key *nk =
4975 (*state)->key[pd->didx];
4977 if (PF_ANEQ(pd2.src,
4978 &nk->addr[pd2.sidx], pd2.af) ||
4979 nk->port[pd2.sidx] != iih.icmp6_id)
4980 pf_change_icmp(pd2.src, &iih.icmp6_id,
4981 daddr, &nk->addr[pd2.sidx],
4982 nk->port[pd2.sidx], NULL,
4983 pd2.ip_sum, icmpsum,
4984 pd->ip_sum, 0, AF_INET6);
4986 if (PF_ANEQ(pd2.dst,
4987 &nk->addr[pd2.didx], pd2.af) ||
4988 nk->port[pd2.didx] != iih.icmp6_id)
4989 pf_change_icmp(pd2.dst, &iih.icmp6_id,
4990 NULL, /* XXX Inbound NAT? */
4991 &nk->addr[pd2.didx],
4992 nk->port[pd2.didx], NULL,
4993 pd2.ip_sum, icmpsum,
4994 pd->ip_sum, 0, AF_INET6);
4996 m_copyback(m, off, sizeof(struct icmp6_hdr),
4997 (caddr_t)pd->hdr.icmp6);
4998 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
4999 m_copyback(m, off2, sizeof(struct icmp6_hdr),
5008 key.proto = pd2.proto;
5009 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5010 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5011 key.port[0] = key.port[1] = 0;
5013 STATE_LOOKUP(kif, &key, direction, *state, pd);
5015 /* translate source/destination address, if necessary */
5016 if ((*state)->key[PF_SK_WIRE] !=
5017 (*state)->key[PF_SK_STACK]) {
5018 struct pf_state_key *nk =
5019 (*state)->key[pd->didx];
5021 if (PF_ANEQ(pd2.src,
5022 &nk->addr[pd2.sidx], pd2.af))
5023 pf_change_icmp(pd2.src, NULL, daddr,
5024 &nk->addr[pd2.sidx], 0, NULL,
5025 pd2.ip_sum, icmpsum,
5026 pd->ip_sum, 0, pd2.af);
5028 if (PF_ANEQ(pd2.dst,
5029 &nk->addr[pd2.didx], pd2.af))
5030 pf_change_icmp(pd2.src, NULL,
5031 NULL, /* XXX Inbound NAT? */
5032 &nk->addr[pd2.didx], 0, NULL,
5033 pd2.ip_sum, icmpsum,
5034 pd->ip_sum, 0, pd2.af);
5039 m_copyback(m, off, ICMP_MINLEN,
5040 (caddr_t)pd->hdr.icmp);
5041 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5047 sizeof(struct icmp6_hdr),
5048 (caddr_t )pd->hdr.icmp6);
5049 m_copyback(m, ipoff2, sizeof(h2_6),
5063 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
5064 struct mbuf *m, struct pf_pdesc *pd)
5066 struct pf_state_peer *src, *dst;
5067 struct pf_state_key_cmp key;
5069 bzero(&key, sizeof(key));
5071 key.proto = pd->proto;
5072 if (direction == PF_IN) {
5073 PF_ACPY(&key.addr[0], pd->src, key.af);
5074 PF_ACPY(&key.addr[1], pd->dst, key.af);
5075 key.port[0] = key.port[1] = 0;
5077 PF_ACPY(&key.addr[1], pd->src, key.af);
5078 PF_ACPY(&key.addr[0], pd->dst, key.af);
5079 key.port[1] = key.port[0] = 0;
5082 STATE_LOOKUP(kif, &key, direction, *state, pd);
5084 if (direction == (*state)->direction) {
5085 src = &(*state)->src;
5086 dst = &(*state)->dst;
5088 src = &(*state)->dst;
5089 dst = &(*state)->src;
5093 if (src->state < PFOTHERS_SINGLE)
5094 src->state = PFOTHERS_SINGLE;
5095 if (dst->state == PFOTHERS_SINGLE)
5096 dst->state = PFOTHERS_MULTIPLE;
5098 /* update expire time */
5099 (*state)->expire = time_uptime;
5100 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5101 (*state)->timeout = PFTM_OTHER_MULTIPLE;
5103 (*state)->timeout = PFTM_OTHER_SINGLE;
5105 /* translate source/destination address, if necessary */
5106 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5107 struct pf_state_key *nk = (*state)->key[pd->didx];
5109 KASSERT(nk, ("%s: nk is null", __func__));
5110 KASSERT(pd, ("%s: pd is null", __func__));
5111 KASSERT(pd->src, ("%s: pd->src is null", __func__));
5112 KASSERT(pd->dst, ("%s: pd->dst is null", __func__));
5116 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5117 pf_change_a(&pd->src->v4.s_addr,
5119 nk->addr[pd->sidx].v4.s_addr,
5123 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5124 pf_change_a(&pd->dst->v4.s_addr,
5126 nk->addr[pd->didx].v4.s_addr,
5133 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5134 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5136 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5137 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5145 * ipoff and off are measured from the start of the mbuf chain.
5146 * h must be at "ipoff" on the mbuf chain.
5149 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5150 u_short *actionp, u_short *reasonp, sa_family_t af)
5155 struct ip *h = mtod(m, struct ip *);
5156 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
5160 ACTION_SET(actionp, PF_PASS);
5162 ACTION_SET(actionp, PF_DROP);
5163 REASON_SET(reasonp, PFRES_FRAG);
5167 if (m->m_pkthdr.len < off + len ||
5168 ntohs(h->ip_len) < off + len) {
5169 ACTION_SET(actionp, PF_DROP);
5170 REASON_SET(reasonp, PFRES_SHORT);
5178 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
5180 if (m->m_pkthdr.len < off + len ||
5181 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5182 (unsigned)(off + len)) {
5183 ACTION_SET(actionp, PF_DROP);
5184 REASON_SET(reasonp, PFRES_SHORT);
5191 m_copydata(m, off, len, p);
5196 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5200 struct radix_node_head *rnh;
5202 struct sockaddr_in *dst;
5206 struct sockaddr_in6 *dst6;
5207 struct route_in6 ro;
5211 struct radix_node *rn;
5217 /* XXX: stick to table 0 for now */
5218 rnh = rt_tables_get_rnh(0, af);
5219 if (rnh != NULL && rn_mpath_capable(rnh))
5222 bzero(&ro, sizeof(ro));
5225 dst = satosin(&ro.ro_dst);
5226 dst->sin_family = AF_INET;
5227 dst->sin_len = sizeof(*dst);
5228 dst->sin_addr = addr->v4;
5233 * Skip check for addresses with embedded interface scope,
5234 * as they would always match anyway.
5236 if (IN6_IS_SCOPE_EMBED(&addr->v6))
5238 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5239 dst6->sin6_family = AF_INET6;
5240 dst6->sin6_len = sizeof(*dst6);
5241 dst6->sin6_addr = addr->v6;
5248 /* Skip checks for ipsec interfaces */
5249 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5255 in6_rtalloc_ign(&ro, 0, rtableid);
5260 in_rtalloc_ign((struct route *)&ro, 0, rtableid);
5264 rtalloc_ign((struct route *)&ro, 0); /* No/default FIB. */
5268 if (ro.ro_rt != NULL) {
5269 /* No interface given, this is a no-route check */
5273 if (kif->pfik_ifp == NULL) {
5278 /* Perform uRPF check if passed input interface */
5280 rn = (struct radix_node *)ro.ro_rt;
5282 rt = (struct rtentry *)rn;
5285 if (kif->pfik_ifp == ifp)
5288 rn = rn_mpath_next(rn);
5290 } while (check_mpath == 1 && rn != NULL && ret == 0);
5294 if (ro.ro_rt != NULL)
5301 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5302 struct pf_state *s, struct pf_pdesc *pd)
5304 struct mbuf *m0, *m1;
5305 struct sockaddr_in dst;
5307 struct ifnet *ifp = NULL;
5308 struct pf_addr naddr;
5309 struct pf_src_node *sn = NULL;
5311 uint16_t ip_len, ip_off;
5313 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5314 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5317 if ((pd->pf_mtag == NULL &&
5318 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5319 pd->pf_mtag->routed++ > 3) {
5325 if (r->rt == PF_DUPTO) {
5326 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5332 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5340 ip = mtod(m0, struct ip *);
5342 bzero(&dst, sizeof(dst));
5343 dst.sin_family = AF_INET;
5344 dst.sin_len = sizeof(dst);
5345 dst.sin_addr = ip->ip_dst;
5347 if (r->rt == PF_FASTROUTE) {
5352 rt = rtalloc1_fib(sintosa(&dst), 0, 0, M_GETFIB(m0));
5354 KMOD_IPSTAT_INC(ips_noroute);
5355 error = EHOSTUNREACH;
5360 counter_u64_add(rt->rt_pksent, 1);
5362 if (rt->rt_flags & RTF_GATEWAY)
5363 bcopy(satosin(rt->rt_gateway), &dst, sizeof(dst));
5366 if (TAILQ_EMPTY(&r->rpool.list)) {
5367 DPFPRINTF(PF_DEBUG_URGENT,
5368 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5372 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5374 if (!PF_AZERO(&naddr, AF_INET))
5375 dst.sin_addr.s_addr = naddr.v4.s_addr;
5376 ifp = r->rpool.cur->kif ?
5377 r->rpool.cur->kif->pfik_ifp : NULL;
5379 if (!PF_AZERO(&s->rt_addr, AF_INET))
5380 dst.sin_addr.s_addr =
5381 s->rt_addr.v4.s_addr;
5382 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5390 if (pf_test(PF_OUT, ifp, &m0, NULL) != PF_PASS)
5392 else if (m0 == NULL)
5394 if (m0->m_len < sizeof(struct ip)) {
5395 DPFPRINTF(PF_DEBUG_URGENT,
5396 ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
5399 ip = mtod(m0, struct ip *);
5402 if (ifp->if_flags & IFF_LOOPBACK)
5403 m0->m_flags |= M_SKIP_FIREWALL;
5405 ip_len = ntohs(ip->ip_len);
5406 ip_off = ntohs(ip->ip_off);
5408 /* Copied from FreeBSD 10.0-CURRENT ip_output. */
5409 m0->m_pkthdr.csum_flags |= CSUM_IP;
5410 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
5411 in_delayed_cksum(m0);
5412 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
5415 if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
5416 sctp_delayed_cksum(m, (uint32_t)(ip->ip_hl << 2));
5417 m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
5422 * If small enough for interface, or the interface will take
5423 * care of the fragmentation for us, we can just send directly.
5425 if (ip_len <= ifp->if_mtu ||
5426 (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0 ||
5427 ((ip_off & IP_DF) == 0 && (ifp->if_hwassist & CSUM_FRAGMENT))) {
5429 if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
5430 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
5431 m0->m_pkthdr.csum_flags &= ~CSUM_IP;
5433 m_clrprotoflags(m0); /* Avoid confusing lower layers. */
5434 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5438 /* Balk when DF bit is set or the interface didn't support TSO. */
5439 if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5441 KMOD_IPSTAT_INC(ips_cantfrag);
5442 if (r->rt != PF_DUPTO) {
5443 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
5450 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
5454 for (; m0; m0 = m1) {
5456 m0->m_nextpkt = NULL;
5458 m_clrprotoflags(m0);
5459 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5465 KMOD_IPSTAT_INC(ips_fragmented);
5468 if (r->rt != PF_DUPTO)
5483 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5484 struct pf_state *s, struct pf_pdesc *pd)
5487 struct sockaddr_in6 dst;
5488 struct ip6_hdr *ip6;
5489 struct ifnet *ifp = NULL;
5490 struct pf_addr naddr;
5491 struct pf_src_node *sn = NULL;
5493 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5494 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5497 if ((pd->pf_mtag == NULL &&
5498 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5499 pd->pf_mtag->routed++ > 3) {
5505 if (r->rt == PF_DUPTO) {
5506 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5512 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5520 ip6 = mtod(m0, struct ip6_hdr *);
5522 bzero(&dst, sizeof(dst));
5523 dst.sin6_family = AF_INET6;
5524 dst.sin6_len = sizeof(dst);
5525 dst.sin6_addr = ip6->ip6_dst;
5527 /* Cheat. XXX why only in the v6 case??? */
5528 if (r->rt == PF_FASTROUTE) {
5531 m0->m_flags |= M_SKIP_FIREWALL;
5532 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
5537 if (TAILQ_EMPTY(&r->rpool.list)) {
5538 DPFPRINTF(PF_DEBUG_URGENT,
5539 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5543 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
5545 if (!PF_AZERO(&naddr, AF_INET6))
5546 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5548 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
5550 if (!PF_AZERO(&s->rt_addr, AF_INET6))
5551 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5552 &s->rt_addr, AF_INET6);
5553 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5563 if (pf_test6(PF_FWD, ifp, &m0, NULL) != PF_PASS)
5565 else if (m0 == NULL)
5567 if (m0->m_len < sizeof(struct ip6_hdr)) {
5568 DPFPRINTF(PF_DEBUG_URGENT,
5569 ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
5573 ip6 = mtod(m0, struct ip6_hdr *);
5576 if (ifp->if_flags & IFF_LOOPBACK)
5577 m0->m_flags |= M_SKIP_FIREWALL;
5579 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 &
5580 ~ifp->if_hwassist) {
5581 uint32_t plen = m0->m_pkthdr.len - sizeof(*ip6);
5582 in6_delayed_cksum(m0, plen, sizeof(struct ip6_hdr));
5583 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6;
5587 * If the packet is too large for the outgoing interface,
5588 * send back an icmp6 error.
5590 if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
5591 dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
5592 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu)
5593 nd6_output(ifp, ifp, m0, &dst, NULL);
5595 in6_ifstat_inc(ifp, ifs6_in_toobig);
5596 if (r->rt != PF_DUPTO)
5597 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
5603 if (r->rt != PF_DUPTO)
5617 * FreeBSD supports cksum offloads for the following drivers.
5618 * em(4), fxp(4), ixgb(4), lge(4), ndis(4), nge(4), re(4),
5619 * ti(4), txp(4), xl(4)
5621 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
5622 * network driver performed cksum including pseudo header, need to verify
5625 * network driver performed cksum, needs to additional pseudo header
5626 * cksum computation with partial csum_data(i.e. lack of H/W support for
5627 * pseudo header, for instance hme(4), sk(4) and possibly gem(4))
5629 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
5630 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
5632 * Also, set csum_data to 0xffff to force cksum validation.
5635 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
5641 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
5643 if (m->m_pkthdr.len < off + len)
5648 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5649 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5650 sum = m->m_pkthdr.csum_data;
5652 ip = mtod(m, struct ip *);
5653 sum = in_pseudo(ip->ip_src.s_addr,
5654 ip->ip_dst.s_addr, htonl((u_short)len +
5655 m->m_pkthdr.csum_data + IPPROTO_TCP));
5662 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5663 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5664 sum = m->m_pkthdr.csum_data;
5666 ip = mtod(m, struct ip *);
5667 sum = in_pseudo(ip->ip_src.s_addr,
5668 ip->ip_dst.s_addr, htonl((u_short)len +
5669 m->m_pkthdr.csum_data + IPPROTO_UDP));
5677 case IPPROTO_ICMPV6:
5687 if (p == IPPROTO_ICMP) {
5692 sum = in_cksum(m, len);
5696 if (m->m_len < sizeof(struct ip))
5698 sum = in4_cksum(m, p, off, len);
5703 if (m->m_len < sizeof(struct ip6_hdr))
5705 sum = in6_cksum(m, p, off, len);
5716 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
5721 KMOD_UDPSTAT_INC(udps_badsum);
5727 KMOD_ICMPSTAT_INC(icps_checksum);
5732 case IPPROTO_ICMPV6:
5734 KMOD_ICMP6STAT_INC(icp6s_checksum);
5741 if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
5742 m->m_pkthdr.csum_flags |=
5743 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
5744 m->m_pkthdr.csum_data = 0xffff;
5753 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
5755 struct pfi_kif *kif;
5756 u_short action, reason = 0, log = 0;
5757 struct mbuf *m = *m0;
5758 struct ip *h = NULL;
5759 struct m_tag *ipfwtag;
5760 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
5761 struct pf_state *s = NULL;
5762 struct pf_ruleset *ruleset = NULL;
5764 int off, dirndx, pqid = 0;
5768 if (!V_pf_status.running)
5771 memset(&pd, 0, sizeof(pd));
5773 kif = (struct pfi_kif *)ifp->if_pf_kif;
5776 DPFPRINTF(PF_DEBUG_URGENT,
5777 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
5780 if (kif->pfik_flags & PFI_IFLAG_SKIP)
5783 if (m->m_flags & M_SKIP_FIREWALL)
5786 pd.pf_mtag = pf_find_mtag(m);
5790 if (ip_divert_ptr != NULL &&
5791 ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
5792 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
5793 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
5794 if (pd.pf_mtag == NULL &&
5795 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5799 pd.pf_mtag->flags |= PF_PACKET_LOOPED;
5800 m_tag_delete(m, ipfwtag);
5802 if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
5803 m->m_flags |= M_FASTFWD_OURS;
5804 pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
5806 } else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
5807 /* We do IP header normalization and packet reassembly here */
5811 m = *m0; /* pf_normalize messes with m0 */
5812 h = mtod(m, struct ip *);
5814 off = h->ip_hl << 2;
5815 if (off < (int)sizeof(struct ip)) {
5817 REASON_SET(&reason, PFRES_SHORT);
5822 pd.src = (struct pf_addr *)&h->ip_src;
5823 pd.dst = (struct pf_addr *)&h->ip_dst;
5824 pd.sport = pd.dport = NULL;
5825 pd.ip_sum = &h->ip_sum;
5826 pd.proto_sum = NULL;
5829 pd.sidx = (dir == PF_IN) ? 0 : 1;
5830 pd.didx = (dir == PF_IN) ? 1 : 0;
5833 pd.tot_len = ntohs(h->ip_len);
5835 /* handle fragments that didn't get reassembled by normalization */
5836 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
5837 action = pf_test_fragment(&r, dir, kif, m, h,
5848 if (!pf_pull_hdr(m, off, &th, sizeof(th),
5849 &action, &reason, AF_INET)) {
5850 log = action != PF_PASS;
5853 pd.p_len = pd.tot_len - off - (th.th_off << 2);
5854 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
5856 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
5857 if (action == PF_DROP)
5859 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
5861 if (action == PF_PASS) {
5862 if (pfsync_update_state_ptr != NULL)
5863 pfsync_update_state_ptr(s);
5867 } else if (s == NULL)
5868 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5877 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
5878 &action, &reason, AF_INET)) {
5879 log = action != PF_PASS;
5882 if (uh.uh_dport == 0 ||
5883 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
5884 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
5886 REASON_SET(&reason, PFRES_SHORT);
5889 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
5890 if (action == PF_PASS) {
5891 if (pfsync_update_state_ptr != NULL)
5892 pfsync_update_state_ptr(s);
5896 } else if (s == NULL)
5897 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5902 case IPPROTO_ICMP: {
5906 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
5907 &action, &reason, AF_INET)) {
5908 log = action != PF_PASS;
5911 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
5913 if (action == PF_PASS) {
5914 if (pfsync_update_state_ptr != NULL)
5915 pfsync_update_state_ptr(s);
5919 } else if (s == NULL)
5920 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5926 case IPPROTO_ICMPV6: {
5928 DPFPRINTF(PF_DEBUG_MISC,
5929 ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
5935 action = pf_test_state_other(&s, dir, kif, m, &pd);
5936 if (action == PF_PASS) {
5937 if (pfsync_update_state_ptr != NULL)
5938 pfsync_update_state_ptr(s);
5942 } else if (s == NULL)
5943 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5950 if (action == PF_PASS && h->ip_hl > 5 &&
5951 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
5953 REASON_SET(&reason, PFRES_IPOPTIONS);
5955 DPFPRINTF(PF_DEBUG_MISC,
5956 ("pf: dropping packet with ip options\n"));
5959 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
5961 REASON_SET(&reason, PFRES_MEMORY);
5963 if (r->rtableid >= 0)
5964 M_SETFIB(m, r->rtableid);
5967 if (action == PF_PASS && r->qid) {
5968 if (pd.pf_mtag == NULL &&
5969 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5971 REASON_SET(&reason, PFRES_MEMORY);
5973 if (pqid || (pd.tos & IPTOS_LOWDELAY))
5974 pd.pf_mtag->qid = r->pqid;
5976 pd.pf_mtag->qid = r->qid;
5977 /* Add hints for ecn. */
5978 pd.pf_mtag->hdr = h;
5985 * connections redirected to loopback should not match sockets
5986 * bound specifically to loopback due to security implications,
5987 * see tcp_input() and in_pcblookup_listen().
5989 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
5990 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
5991 (s->nat_rule.ptr->action == PF_RDR ||
5992 s->nat_rule.ptr->action == PF_BINAT) &&
5993 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
5994 m->m_flags |= M_SKIP_FIREWALL;
5996 if (action == PF_PASS && r->divert.port && ip_divert_ptr != NULL &&
5997 !PACKET_LOOPED(&pd)) {
5999 ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
6000 sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
6001 if (ipfwtag != NULL) {
6002 ((struct ipfw_rule_ref *)(ipfwtag+1))->info =
6003 ntohs(r->divert.port);
6004 ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
6009 m_tag_prepend(m, ipfwtag);
6010 if (m->m_flags & M_FASTFWD_OURS) {
6011 if (pd.pf_mtag == NULL &&
6012 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6014 REASON_SET(&reason, PFRES_MEMORY);
6016 DPFPRINTF(PF_DEBUG_MISC,
6017 ("pf: failed to allocate tag\n"));
6019 pd.pf_mtag->flags |=
6020 PF_FASTFWD_OURS_PRESENT;
6021 m->m_flags &= ~M_FASTFWD_OURS;
6024 ip_divert_ptr(*m0, dir == PF_IN ? DIR_IN : DIR_OUT);
6029 /* XXX: ipfw has the same behaviour! */
6031 REASON_SET(&reason, PFRES_MEMORY);
6033 DPFPRINTF(PF_DEBUG_MISC,
6034 ("pf: failed to allocate divert tag\n"));
6041 if (s != NULL && s->nat_rule.ptr != NULL &&
6042 s->nat_rule.ptr->log & PF_LOG_ALL)
6043 lr = s->nat_rule.ptr;
6046 PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd,
6050 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6051 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
6053 if (action == PF_PASS || r->action == PF_DROP) {
6054 dirndx = (dir == PF_OUT);
6055 r->packets[dirndx]++;
6056 r->bytes[dirndx] += pd.tot_len;
6058 a->packets[dirndx]++;
6059 a->bytes[dirndx] += pd.tot_len;
6062 if (s->nat_rule.ptr != NULL) {
6063 s->nat_rule.ptr->packets[dirndx]++;
6064 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6066 if (s->src_node != NULL) {
6067 s->src_node->packets[dirndx]++;
6068 s->src_node->bytes[dirndx] += pd.tot_len;
6070 if (s->nat_src_node != NULL) {
6071 s->nat_src_node->packets[dirndx]++;
6072 s->nat_src_node->bytes[dirndx] += pd.tot_len;
6074 dirndx = (dir == s->direction) ? 0 : 1;
6075 s->packets[dirndx]++;
6076 s->bytes[dirndx] += pd.tot_len;
6079 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6080 if (nr != NULL && r == &V_pf_default_rule)
6082 if (tr->src.addr.type == PF_ADDR_TABLE)
6083 pfr_update_stats(tr->src.addr.p.tbl,
6084 (s == NULL) ? pd.src :
6085 &s->key[(s->direction == PF_IN)]->
6086 addr[(s->direction == PF_OUT)],
6087 pd.af, pd.tot_len, dir == PF_OUT,
6088 r->action == PF_PASS, tr->src.neg);
6089 if (tr->dst.addr.type == PF_ADDR_TABLE)
6090 pfr_update_stats(tr->dst.addr.p.tbl,
6091 (s == NULL) ? pd.dst :
6092 &s->key[(s->direction == PF_IN)]->
6093 addr[(s->direction == PF_IN)],
6094 pd.af, pd.tot_len, dir == PF_OUT,
6095 r->action == PF_PASS, tr->dst.neg);
6099 case PF_SYNPROXY_DROP:
6110 /* pf_route() returns unlocked. */
6112 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
6126 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
6128 struct pfi_kif *kif;
6129 u_short action, reason = 0, log = 0;
6130 struct mbuf *m = *m0, *n = NULL;
6132 struct ip6_hdr *h = NULL;
6133 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6134 struct pf_state *s = NULL;
6135 struct pf_ruleset *ruleset = NULL;
6137 int off, terminal = 0, dirndx, rh_cnt = 0;
6142 if (dir == PF_OUT && m->m_pkthdr.rcvif && ifp != m->m_pkthdr.rcvif)
6145 if (!V_pf_status.running)
6148 memset(&pd, 0, sizeof(pd));
6149 pd.pf_mtag = pf_find_mtag(m);
6151 if (pd.pf_mtag && pd.pf_mtag->flags & PF_TAG_GENERATED)
6154 kif = (struct pfi_kif *)ifp->if_pf_kif;
6156 DPFPRINTF(PF_DEBUG_URGENT,
6157 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
6160 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6163 if (m->m_flags & M_SKIP_FIREWALL)
6168 /* We do IP header normalization and packet reassembly here */
6169 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
6173 m = *m0; /* pf_normalize messes with m0 */
6174 h = mtod(m, struct ip6_hdr *);
6178 * we do not support jumbogram yet. if we keep going, zero ip6_plen
6179 * will do something bad, so drop the packet for now.
6181 if (htons(h->ip6_plen) == 0) {
6183 REASON_SET(&reason, PFRES_NORM); /*XXX*/
6188 pd.src = (struct pf_addr *)&h->ip6_src;
6189 pd.dst = (struct pf_addr *)&h->ip6_dst;
6190 pd.sport = pd.dport = NULL;
6192 pd.proto_sum = NULL;
6194 pd.sidx = (dir == PF_IN) ? 0 : 1;
6195 pd.didx = (dir == PF_IN) ? 1 : 0;
6198 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6200 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
6201 pd.proto = h->ip6_nxt;
6204 case IPPROTO_FRAGMENT:
6205 action = pf_test_fragment(&r, dir, kif, m, h,
6207 if (action == PF_DROP)
6208 REASON_SET(&reason, PFRES_FRAG);
6210 case IPPROTO_ROUTING: {
6211 struct ip6_rthdr rthdr;
6214 DPFPRINTF(PF_DEBUG_MISC,
6215 ("pf: IPv6 more than one rthdr\n"));
6217 REASON_SET(&reason, PFRES_IPOPTIONS);
6221 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
6223 DPFPRINTF(PF_DEBUG_MISC,
6224 ("pf: IPv6 short rthdr\n"));
6226 REASON_SET(&reason, PFRES_SHORT);
6230 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
6231 DPFPRINTF(PF_DEBUG_MISC,
6232 ("pf: IPv6 rthdr0\n"));
6234 REASON_SET(&reason, PFRES_IPOPTIONS);
6241 case IPPROTO_HOPOPTS:
6242 case IPPROTO_DSTOPTS: {
6243 /* get next header and header length */
6244 struct ip6_ext opt6;
6246 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
6247 NULL, &reason, pd.af)) {
6248 DPFPRINTF(PF_DEBUG_MISC,
6249 ("pf: IPv6 short opt\n"));
6254 if (pd.proto == IPPROTO_AH)
6255 off += (opt6.ip6e_len + 2) * 4;
6257 off += (opt6.ip6e_len + 1) * 8;
6258 pd.proto = opt6.ip6e_nxt;
6259 /* goto the next header */
6266 } while (!terminal);
6268 /* if there's no routing header, use unmodified mbuf for checksumming */
6278 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6279 &action, &reason, AF_INET6)) {
6280 log = action != PF_PASS;
6283 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6284 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6285 if (action == PF_DROP)
6287 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6289 if (action == PF_PASS) {
6290 if (pfsync_update_state_ptr != NULL)
6291 pfsync_update_state_ptr(s);
6295 } else if (s == NULL)
6296 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6305 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6306 &action, &reason, AF_INET6)) {
6307 log = action != PF_PASS;
6310 if (uh.uh_dport == 0 ||
6311 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6312 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6314 REASON_SET(&reason, PFRES_SHORT);
6317 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6318 if (action == PF_PASS) {
6319 if (pfsync_update_state_ptr != NULL)
6320 pfsync_update_state_ptr(s);
6324 } else if (s == NULL)
6325 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6330 case IPPROTO_ICMP: {
6332 DPFPRINTF(PF_DEBUG_MISC,
6333 ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
6337 case IPPROTO_ICMPV6: {
6338 struct icmp6_hdr ih;
6341 if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
6342 &action, &reason, AF_INET6)) {
6343 log = action != PF_PASS;
6346 action = pf_test_state_icmp(&s, dir, kif,
6347 m, off, h, &pd, &reason);
6348 if (action == PF_PASS) {
6349 if (pfsync_update_state_ptr != NULL)
6350 pfsync_update_state_ptr(s);
6354 } else if (s == NULL)
6355 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6361 action = pf_test_state_other(&s, dir, kif, m, &pd);
6362 if (action == PF_PASS) {
6363 if (pfsync_update_state_ptr != NULL)
6364 pfsync_update_state_ptr(s);
6368 } else if (s == NULL)
6369 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6381 /* handle dangerous IPv6 extension headers. */
6382 if (action == PF_PASS && rh_cnt &&
6383 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6385 REASON_SET(&reason, PFRES_IPOPTIONS);
6387 DPFPRINTF(PF_DEBUG_MISC,
6388 ("pf: dropping packet with dangerous v6 headers\n"));
6391 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6393 REASON_SET(&reason, PFRES_MEMORY);
6395 if (r->rtableid >= 0)
6396 M_SETFIB(m, r->rtableid);
6399 if (action == PF_PASS && r->qid) {
6400 if (pd.pf_mtag == NULL &&
6401 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6403 REASON_SET(&reason, PFRES_MEMORY);
6405 if (pd.tos & IPTOS_LOWDELAY)
6406 pd.pf_mtag->qid = r->pqid;
6408 pd.pf_mtag->qid = r->qid;
6409 /* Add hints for ecn. */
6410 pd.pf_mtag->hdr = h;
6415 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6416 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6417 (s->nat_rule.ptr->action == PF_RDR ||
6418 s->nat_rule.ptr->action == PF_BINAT) &&
6419 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
6420 m->m_flags |= M_SKIP_FIREWALL;
6422 /* XXX: Anybody working on it?! */
6424 printf("pf: divert(9) is not supported for IPv6\n");
6429 if (s != NULL && s->nat_rule.ptr != NULL &&
6430 s->nat_rule.ptr->log & PF_LOG_ALL)
6431 lr = s->nat_rule.ptr;
6434 PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset,
6438 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6439 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
6441 if (action == PF_PASS || r->action == PF_DROP) {
6442 dirndx = (dir == PF_OUT);
6443 r->packets[dirndx]++;
6444 r->bytes[dirndx] += pd.tot_len;
6446 a->packets[dirndx]++;
6447 a->bytes[dirndx] += pd.tot_len;
6450 if (s->nat_rule.ptr != NULL) {
6451 s->nat_rule.ptr->packets[dirndx]++;
6452 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6454 if (s->src_node != NULL) {
6455 s->src_node->packets[dirndx]++;
6456 s->src_node->bytes[dirndx] += pd.tot_len;
6458 if (s->nat_src_node != NULL) {
6459 s->nat_src_node->packets[dirndx]++;
6460 s->nat_src_node->bytes[dirndx] += pd.tot_len;
6462 dirndx = (dir == s->direction) ? 0 : 1;
6463 s->packets[dirndx]++;
6464 s->bytes[dirndx] += pd.tot_len;
6467 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6468 if (nr != NULL && r == &V_pf_default_rule)
6470 if (tr->src.addr.type == PF_ADDR_TABLE)
6471 pfr_update_stats(tr->src.addr.p.tbl,
6472 (s == NULL) ? pd.src :
6473 &s->key[(s->direction == PF_IN)]->addr[0],
6474 pd.af, pd.tot_len, dir == PF_OUT,
6475 r->action == PF_PASS, tr->src.neg);
6476 if (tr->dst.addr.type == PF_ADDR_TABLE)
6477 pfr_update_stats(tr->dst.addr.p.tbl,
6478 (s == NULL) ? pd.dst :
6479 &s->key[(s->direction == PF_IN)]->addr[1],
6480 pd.af, pd.tot_len, dir == PF_OUT,
6481 r->action == PF_PASS, tr->dst.neg);
6485 case PF_SYNPROXY_DROP:
6496 /* pf_route6() returns unlocked. */
6498 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
6507 /* If reassembled packet passed, create new fragments. */
6508 if (action == PF_PASS && *m0 && fwdir == PF_FWD &&
6509 (mtag = m_tag_find(m, PF_REASSEMBLED, NULL)) != NULL)
6510 action = pf_refragment6(ifp, m0, mtag);