2 * Copyright (c) 2001 Daniel Hartmeier
3 * Copyright (c) 2002 - 2008 Henning Brauer
4 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * Effort sponsored in part by the Defense Advanced Research Projects
32 * Agency (DARPA) and Air Force Research Laboratory, Air Force
33 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35 * $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
42 #include "opt_inet6.h"
46 #include <sys/param.h>
48 #include <sys/endian.h>
50 #include <sys/interrupt.h>
51 #include <sys/kernel.h>
52 #include <sys/kthread.h>
53 #include <sys/limits.h>
56 #include <sys/random.h>
57 #include <sys/refcount.h>
58 #include <sys/socket.h>
59 #include <sys/sysctl.h>
60 #include <sys/taskqueue.h>
61 #include <sys/ucred.h>
64 #include <net/if_types.h>
65 #include <net/route.h>
66 #include <net/radix_mpath.h>
69 #include <net/pfvar.h>
70 #include <net/if_pflog.h>
71 #include <net/if_pfsync.h>
73 #include <netinet/in_pcb.h>
74 #include <netinet/in_var.h>
75 #include <netinet/ip.h>
76 #include <netinet/ip_fw.h>
77 #include <netinet/ip_icmp.h>
78 #include <netinet/icmp_var.h>
79 #include <netinet/ip_var.h>
80 #include <netinet/tcp.h>
81 #include <netinet/tcp_fsm.h>
82 #include <netinet/tcp_seq.h>
83 #include <netinet/tcp_timer.h>
84 #include <netinet/tcp_var.h>
85 #include <netinet/udp.h>
86 #include <netinet/udp_var.h>
88 #include <netpfil/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */
91 #include <netinet/ip6.h>
92 #include <netinet/icmp6.h>
93 #include <netinet6/nd6.h>
94 #include <netinet6/ip6_var.h>
95 #include <netinet6/in6_pcb.h>
98 #include <machine/in_cksum.h>
99 #include <security/mac/mac_framework.h>
101 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
108 VNET_DEFINE(struct pf_altqqueue, pf_altqs[2]);
109 VNET_DEFINE(struct pf_palist, pf_pabuf);
110 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active);
111 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive);
112 VNET_DEFINE(struct pf_kstatus, pf_status);
114 VNET_DEFINE(u_int32_t, ticket_altqs_active);
115 VNET_DEFINE(u_int32_t, ticket_altqs_inactive);
116 VNET_DEFINE(int, altqs_inactive_open);
117 VNET_DEFINE(u_int32_t, ticket_pabuf);
119 VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx);
120 #define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx)
121 VNET_DEFINE(u_char, pf_tcp_secret[16]);
122 #define V_pf_tcp_secret VNET(pf_tcp_secret)
123 VNET_DEFINE(int, pf_tcp_secret_init);
124 #define V_pf_tcp_secret_init VNET(pf_tcp_secret_init)
125 VNET_DEFINE(int, pf_tcp_iss_off);
126 #define V_pf_tcp_iss_off VNET(pf_tcp_iss_off)
129 * Queue for pf_intr() sends.
131 static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
132 struct pf_send_entry {
133 STAILQ_ENTRY(pf_send_entry) pfse_next;
150 #define pfse_icmp_type u.icmpopts.type
151 #define pfse_icmp_code u.icmpopts.code
152 #define pfse_icmp_mtu u.icmpopts.mtu
155 STAILQ_HEAD(pf_send_head, pf_send_entry);
156 static VNET_DEFINE(struct pf_send_head, pf_sendqueue);
157 #define V_pf_sendqueue VNET(pf_sendqueue)
159 static struct mtx pf_sendqueue_mtx;
160 #define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx)
161 #define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx)
164 * Queue for pf_overload_task() tasks.
166 struct pf_overload_entry {
167 SLIST_ENTRY(pf_overload_entry) next;
171 struct pf_rule *rule;
174 SLIST_HEAD(pf_overload_head, pf_overload_entry);
175 static VNET_DEFINE(struct pf_overload_head, pf_overloadqueue);
176 #define V_pf_overloadqueue VNET(pf_overloadqueue)
177 static VNET_DEFINE(struct task, pf_overloadtask);
178 #define V_pf_overloadtask VNET(pf_overloadtask)
180 static struct mtx pf_overloadqueue_mtx;
181 #define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx)
182 #define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx)
184 VNET_DEFINE(struct pf_rulequeue, pf_unlinked_rules);
185 struct mtx pf_unlnkdrules_mtx;
187 static VNET_DEFINE(uma_zone_t, pf_sources_z);
188 #define V_pf_sources_z VNET(pf_sources_z)
189 uma_zone_t pf_mtag_z;
190 VNET_DEFINE(uma_zone_t, pf_state_z);
191 VNET_DEFINE(uma_zone_t, pf_state_key_z);
193 VNET_DEFINE(uint64_t, pf_stateid[MAXCPU]);
194 #define PFID_CPUBITS 8
195 #define PFID_CPUSHIFT (sizeof(uint64_t) * NBBY - PFID_CPUBITS)
196 #define PFID_CPUMASK ((uint64_t)((1 << PFID_CPUBITS) - 1) << PFID_CPUSHIFT)
197 #define PFID_MAXID (~PFID_CPUMASK)
198 CTASSERT((1 << PFID_CPUBITS) > MAXCPU);
200 static void pf_src_tree_remove_state(struct pf_state *);
201 static void pf_init_threshold(struct pf_threshold *, u_int32_t,
203 static void pf_add_threshold(struct pf_threshold *);
204 static int pf_check_threshold(struct pf_threshold *);
206 static void pf_change_ap(struct pf_addr *, u_int16_t *,
207 u_int16_t *, u_int16_t *, struct pf_addr *,
208 u_int16_t, u_int8_t, sa_family_t);
209 static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
210 struct tcphdr *, struct pf_state_peer *);
211 static void pf_change_icmp(struct pf_addr *, u_int16_t *,
212 struct pf_addr *, struct pf_addr *, u_int16_t,
213 u_int16_t *, u_int16_t *, u_int16_t *,
214 u_int16_t *, u_int8_t, sa_family_t);
215 static void pf_send_tcp(struct mbuf *,
216 const struct pf_rule *, sa_family_t,
217 const struct pf_addr *, const struct pf_addr *,
218 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
219 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
220 u_int16_t, struct ifnet *);
221 static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
222 sa_family_t, struct pf_rule *);
223 static void pf_detach_state(struct pf_state *);
224 static int pf_state_key_attach(struct pf_state_key *,
225 struct pf_state_key *, struct pf_state *);
226 static void pf_state_key_detach(struct pf_state *, int);
227 static int pf_state_key_ctor(void *, int, void *, int);
228 static u_int32_t pf_tcp_iss(struct pf_pdesc *);
229 static int pf_test_rule(struct pf_rule **, struct pf_state **,
230 int, struct pfi_kif *, struct mbuf *, int,
231 struct pf_pdesc *, struct pf_rule **,
232 struct pf_ruleset **, struct inpcb *);
233 static int pf_create_state(struct pf_rule *, struct pf_rule *,
234 struct pf_rule *, struct pf_pdesc *,
235 struct pf_src_node *, struct pf_state_key *,
236 struct pf_state_key *, struct mbuf *, int,
237 u_int16_t, u_int16_t, int *, struct pfi_kif *,
238 struct pf_state **, int, u_int16_t, u_int16_t,
240 static int pf_test_fragment(struct pf_rule **, int,
241 struct pfi_kif *, struct mbuf *, void *,
242 struct pf_pdesc *, struct pf_rule **,
243 struct pf_ruleset **);
244 static int pf_tcp_track_full(struct pf_state_peer *,
245 struct pf_state_peer *, struct pf_state **,
246 struct pfi_kif *, struct mbuf *, int,
247 struct pf_pdesc *, u_short *, int *);
248 static int pf_tcp_track_sloppy(struct pf_state_peer *,
249 struct pf_state_peer *, struct pf_state **,
250 struct pf_pdesc *, u_short *);
251 static int pf_test_state_tcp(struct pf_state **, int,
252 struct pfi_kif *, struct mbuf *, int,
253 void *, struct pf_pdesc *, u_short *);
254 static int pf_test_state_udp(struct pf_state **, int,
255 struct pfi_kif *, struct mbuf *, int,
256 void *, struct pf_pdesc *);
257 static int pf_test_state_icmp(struct pf_state **, int,
258 struct pfi_kif *, struct mbuf *, int,
259 void *, struct pf_pdesc *, u_short *);
260 static int pf_test_state_other(struct pf_state **, int,
261 struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
262 static u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
264 static u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
266 static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
268 static void pf_set_rt_ifp(struct pf_state *,
270 static int pf_check_proto_cksum(struct mbuf *, int, int,
271 u_int8_t, sa_family_t);
272 static void pf_print_state_parts(struct pf_state *,
273 struct pf_state_key *, struct pf_state_key *);
274 static int pf_addr_wrap_neq(struct pf_addr_wrap *,
275 struct pf_addr_wrap *);
276 static struct pf_state *pf_find_state(struct pfi_kif *,
277 struct pf_state_key_cmp *, u_int);
278 static int pf_src_connlimit(struct pf_state **);
279 static void pf_overload_task(void *v, int pending);
280 static int pf_insert_src_node(struct pf_src_node **,
281 struct pf_rule *, struct pf_addr *, sa_family_t);
282 static u_int pf_purge_expired_states(u_int, int);
283 static void pf_purge_unlinked_rules(void);
284 static int pf_mtag_uminit(void *, int, int);
285 static void pf_mtag_free(struct m_tag *);
287 static void pf_route(struct mbuf **, struct pf_rule *, int,
288 struct ifnet *, struct pf_state *,
292 static void pf_change_a6(struct pf_addr *, u_int16_t *,
293 struct pf_addr *, u_int8_t);
294 static void pf_route6(struct mbuf **, struct pf_rule *, int,
295 struct ifnet *, struct pf_state *,
299 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
301 VNET_DECLARE(int, pf_end_threads);
303 VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
305 #define PACKET_LOOPED(pd) ((pd)->pf_mtag && \
306 (pd)->pf_mtag->flags & PF_PACKET_LOOPED)
308 #define STATE_LOOKUP(i, k, d, s, pd) \
310 (s) = pf_find_state((i), (k), (d)); \
313 if (PACKET_LOOPED(pd)) \
315 if ((d) == PF_OUT && \
316 (((s)->rule.ptr->rt == PF_ROUTETO && \
317 (s)->rule.ptr->direction == PF_OUT) || \
318 ((s)->rule.ptr->rt == PF_REPLYTO && \
319 (s)->rule.ptr->direction == PF_IN)) && \
320 (s)->rt_kif != NULL && \
321 (s)->rt_kif != (i)) \
325 #define BOUND_IFACE(r, k) \
326 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
328 #define STATE_INC_COUNTERS(s) \
330 counter_u64_add(s->rule.ptr->states_cur, 1); \
331 counter_u64_add(s->rule.ptr->states_tot, 1); \
332 if (s->anchor.ptr != NULL) { \
333 counter_u64_add(s->anchor.ptr->states_cur, 1); \
334 counter_u64_add(s->anchor.ptr->states_tot, 1); \
336 if (s->nat_rule.ptr != NULL) { \
337 counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
338 counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
342 #define STATE_DEC_COUNTERS(s) \
344 if (s->nat_rule.ptr != NULL) \
345 counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
346 if (s->anchor.ptr != NULL) \
347 counter_u64_add(s->anchor.ptr->states_cur, -1); \
348 counter_u64_add(s->rule.ptr->states_cur, -1); \
351 static MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
352 VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
353 VNET_DEFINE(struct pf_idhash *, pf_idhash);
354 VNET_DEFINE(u_long, pf_hashmask);
355 VNET_DEFINE(struct pf_srchash *, pf_srchash);
356 VNET_DEFINE(u_long, pf_srchashmask);
358 SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW, 0, "pf(4)");
360 VNET_DEFINE(u_long, pf_hashsize);
361 #define V_pf_hashsize VNET(pf_hashsize)
362 SYSCTL_VNET_UINT(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
363 &VNET_NAME(pf_hashsize), 0, "Size of pf(4) states hashtable");
365 VNET_DEFINE(u_long, pf_srchashsize);
366 #define V_pf_srchashsize VNET(pf_srchashsize)
367 SYSCTL_VNET_UINT(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
368 &VNET_NAME(pf_srchashsize), 0, "Size of pf(4) source nodes hashtable");
370 VNET_DEFINE(void *, pf_swi_cookie);
372 VNET_DEFINE(uint32_t, pf_hashseed);
373 #define V_pf_hashseed VNET(pf_hashseed)
375 static __inline uint32_t
376 pf_hashkey(struct pf_state_key *sk)
380 h = jenkins_hash32((uint32_t *)sk,
381 sizeof(struct pf_state_key_cmp)/sizeof(uint32_t),
384 return (h & V_pf_hashmask);
387 static __inline uint32_t
388 pf_hashsrc(struct pf_addr *addr, sa_family_t af)
394 h = jenkins_hash32((uint32_t *)&addr->v4,
395 sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed);
398 h = jenkins_hash32((uint32_t *)&addr->v6,
399 sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed);
402 panic("%s: unknown address family %u", __func__, af);
405 return (h & V_pf_srchashmask);
410 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
415 dst->addr32[0] = src->addr32[0];
419 dst->addr32[0] = src->addr32[0];
420 dst->addr32[1] = src->addr32[1];
421 dst->addr32[2] = src->addr32[2];
422 dst->addr32[3] = src->addr32[3];
429 pf_init_threshold(struct pf_threshold *threshold,
430 u_int32_t limit, u_int32_t seconds)
432 threshold->limit = limit * PF_THRESHOLD_MULT;
433 threshold->seconds = seconds;
434 threshold->count = 0;
435 threshold->last = time_uptime;
439 pf_add_threshold(struct pf_threshold *threshold)
441 u_int32_t t = time_uptime, diff = t - threshold->last;
443 if (diff >= threshold->seconds)
444 threshold->count = 0;
446 threshold->count -= threshold->count * diff /
448 threshold->count += PF_THRESHOLD_MULT;
453 pf_check_threshold(struct pf_threshold *threshold)
455 return (threshold->count > threshold->limit);
459 pf_src_connlimit(struct pf_state **state)
461 struct pf_overload_entry *pfoe;
464 PF_STATE_LOCK_ASSERT(*state);
466 (*state)->src_node->conn++;
467 (*state)->src.tcp_est = 1;
468 pf_add_threshold(&(*state)->src_node->conn_rate);
470 if ((*state)->rule.ptr->max_src_conn &&
471 (*state)->rule.ptr->max_src_conn <
472 (*state)->src_node->conn) {
473 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1);
477 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
478 pf_check_threshold(&(*state)->src_node->conn_rate)) {
479 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1);
486 /* Kill this state. */
487 (*state)->timeout = PFTM_PURGE;
488 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
490 if ((*state)->rule.ptr->overload_tbl == NULL)
493 /* Schedule overloading and flushing task. */
494 pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
496 return (1); /* too bad :( */
498 bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
499 pfoe->af = (*state)->key[PF_SK_WIRE]->af;
500 pfoe->rule = (*state)->rule.ptr;
501 pfoe->dir = (*state)->direction;
503 SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
504 PF_OVERLOADQ_UNLOCK();
505 taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
511 pf_overload_task(void *v, int pending)
513 struct pf_overload_head queue;
515 struct pf_overload_entry *pfoe, *pfoe1;
518 CURVNET_SET((struct vnet *)v);
521 queue = V_pf_overloadqueue;
522 SLIST_INIT(&V_pf_overloadqueue);
523 PF_OVERLOADQ_UNLOCK();
525 bzero(&p, sizeof(p));
526 SLIST_FOREACH(pfoe, &queue, next) {
527 counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1);
528 if (V_pf_status.debug >= PF_DEBUG_MISC) {
529 printf("%s: blocking address ", __func__);
530 pf_print_host(&pfoe->addr, 0, pfoe->af);
534 p.pfra_af = pfoe->af;
539 p.pfra_ip4addr = pfoe->addr.v4;
545 p.pfra_ip6addr = pfoe->addr.v6;
551 pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
556 * Remove those entries, that don't need flushing.
558 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
559 if (pfoe->rule->flush == 0) {
560 SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
561 free(pfoe, M_PFTEMP);
564 V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1);
566 /* If nothing to flush, return. */
567 if (SLIST_EMPTY(&queue)) {
572 for (int i = 0; i <= V_pf_hashmask; i++) {
573 struct pf_idhash *ih = &V_pf_idhash[i];
574 struct pf_state_key *sk;
578 LIST_FOREACH(s, &ih->states, entry) {
579 sk = s->key[PF_SK_WIRE];
580 SLIST_FOREACH(pfoe, &queue, next)
581 if (sk->af == pfoe->af &&
582 ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
583 pfoe->rule == s->rule.ptr) &&
584 ((pfoe->dir == PF_OUT &&
585 PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
586 (pfoe->dir == PF_IN &&
587 PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
588 s->timeout = PFTM_PURGE;
589 s->src.state = s->dst.state = TCPS_CLOSED;
593 PF_HASHROW_UNLOCK(ih);
595 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
596 free(pfoe, M_PFTEMP);
597 if (V_pf_status.debug >= PF_DEBUG_MISC)
598 printf("%s: %u states killed", __func__, killed);
604 * Can return locked on failure, so that we can consistently
605 * allocate and insert a new one.
608 pf_find_src_node(struct pf_addr *src, struct pf_rule *rule, sa_family_t af,
611 struct pf_srchash *sh;
612 struct pf_src_node *n;
614 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1);
616 sh = &V_pf_srchash[pf_hashsrc(src, af)];
618 LIST_FOREACH(n, &sh->nodes, entry)
619 if (n->rule.ptr == rule && n->af == af &&
620 ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
621 (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
623 if (n != NULL || returnlocked == 0)
624 PF_HASHROW_UNLOCK(sh);
630 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
631 struct pf_addr *src, sa_family_t af)
634 KASSERT((rule->rule_flag & PFRULE_RULESRCTRACK ||
635 rule->rpool.opts & PF_POOL_STICKYADDR),
636 ("%s for non-tracking rule %p", __func__, rule));
639 *sn = pf_find_src_node(src, rule, af, 1);
642 struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)];
644 PF_HASHROW_ASSERT(sh);
646 if (!rule->max_src_nodes ||
647 counter_u64_fetch(rule->src_nodes) < rule->max_src_nodes)
648 (*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
650 counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES],
653 PF_HASHROW_UNLOCK(sh);
657 pf_init_threshold(&(*sn)->conn_rate,
658 rule->max_src_conn_rate.limit,
659 rule->max_src_conn_rate.seconds);
662 (*sn)->rule.ptr = rule;
663 PF_ACPY(&(*sn)->addr, src, af);
664 LIST_INSERT_HEAD(&sh->nodes, *sn, entry);
665 (*sn)->creation = time_uptime;
666 (*sn)->ruletype = rule->action;
667 if ((*sn)->rule.ptr != NULL)
668 counter_u64_add((*sn)->rule.ptr->src_nodes, 1);
669 PF_HASHROW_UNLOCK(sh);
670 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1);
672 if (rule->max_src_states &&
673 (*sn)->states >= rule->max_src_states) {
674 counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES],
683 pf_unlink_src_node_locked(struct pf_src_node *src)
686 struct pf_srchash *sh;
688 sh = &V_pf_srchash[pf_hashsrc(&src->addr, src->af)];
689 PF_HASHROW_ASSERT(sh);
691 LIST_REMOVE(src, entry);
693 counter_u64_add(src->rule.ptr->src_nodes, -1);
694 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
698 pf_unlink_src_node(struct pf_src_node *src)
700 struct pf_srchash *sh;
702 sh = &V_pf_srchash[pf_hashsrc(&src->addr, src->af)];
704 pf_unlink_src_node_locked(src);
705 PF_HASHROW_UNLOCK(sh);
709 pf_free_src_node(struct pf_src_node *sn)
712 KASSERT(sn->states == 0, ("%s: %p has refs", __func__, sn));
713 uma_zfree(V_pf_sources_z, sn);
717 pf_free_src_nodes(struct pf_src_node_list *head)
719 struct pf_src_node *sn, *tmp;
722 LIST_FOREACH_SAFE(sn, head, entry, tmp) {
723 pf_free_src_node(sn);
734 pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
735 sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL,
739 /* Per-vnet data storage structures initialization. */
743 struct pf_keyhash *kh;
744 struct pf_idhash *ih;
745 struct pf_srchash *sh;
748 TUNABLE_ULONG_FETCH("net.pf.states_hashsize", &V_pf_hashsize);
749 if (V_pf_hashsize == 0 || !powerof2(V_pf_hashsize))
750 V_pf_hashsize = PF_HASHSIZ;
751 TUNABLE_ULONG_FETCH("net.pf.source_nodes_hashsize", &V_pf_srchashsize);
752 if (V_pf_srchashsize == 0 || !powerof2(V_pf_srchashsize))
753 V_pf_srchashsize = PF_HASHSIZ / 4;
755 V_pf_hashseed = arc4random();
757 /* States and state keys storage. */
758 V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_state),
759 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
760 V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
761 uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
762 uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
764 V_pf_state_key_z = uma_zcreate("pf state keys",
765 sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
767 V_pf_keyhash = malloc(V_pf_hashsize * sizeof(struct pf_keyhash),
768 M_PFHASH, M_WAITOK | M_ZERO);
769 V_pf_idhash = malloc(V_pf_hashsize * sizeof(struct pf_idhash),
770 M_PFHASH, M_WAITOK | M_ZERO);
771 V_pf_hashmask = V_pf_hashsize - 1;
772 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask;
774 mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
775 mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
779 V_pf_sources_z = uma_zcreate("pf source nodes",
780 sizeof(struct pf_src_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
782 V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
783 uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
784 uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
785 V_pf_srchash = malloc(V_pf_srchashsize * sizeof(struct pf_srchash),
786 M_PFHASH, M_WAITOK|M_ZERO);
787 V_pf_srchashmask = V_pf_srchashsize - 1;
788 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++)
789 mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
792 TAILQ_INIT(&V_pf_altqs[0]);
793 TAILQ_INIT(&V_pf_altqs[1]);
794 TAILQ_INIT(&V_pf_pabuf);
795 V_pf_altqs_active = &V_pf_altqs[0];
796 V_pf_altqs_inactive = &V_pf_altqs[1];
799 /* Send & overload+flush queues. */
800 STAILQ_INIT(&V_pf_sendqueue);
801 SLIST_INIT(&V_pf_overloadqueue);
802 TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
803 mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF);
804 mtx_init(&pf_overloadqueue_mtx, "pf overload/flush queue", NULL,
807 /* Unlinked, but may be referenced rules. */
808 TAILQ_INIT(&V_pf_unlinked_rules);
809 mtx_init(&pf_unlnkdrules_mtx, "pf unlinked rules", NULL, MTX_DEF);
816 uma_zdestroy(pf_mtag_z);
822 struct pf_keyhash *kh;
823 struct pf_idhash *ih;
824 struct pf_srchash *sh;
825 struct pf_send_entry *pfse, *next;
828 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask;
830 KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
832 KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
834 mtx_destroy(&kh->lock);
835 mtx_destroy(&ih->lock);
837 free(V_pf_keyhash, M_PFHASH);
838 free(V_pf_idhash, M_PFHASH);
840 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) {
841 KASSERT(LIST_EMPTY(&sh->nodes),
842 ("%s: source node hash not empty", __func__));
843 mtx_destroy(&sh->lock);
845 free(V_pf_srchash, M_PFHASH);
847 STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
848 m_freem(pfse->pfse_m);
849 free(pfse, M_PFTEMP);
852 mtx_destroy(&pf_sendqueue_mtx);
853 mtx_destroy(&pf_overloadqueue_mtx);
854 mtx_destroy(&pf_unlnkdrules_mtx);
856 uma_zdestroy(V_pf_sources_z);
857 uma_zdestroy(V_pf_state_z);
858 uma_zdestroy(V_pf_state_key_z);
862 pf_mtag_uminit(void *mem, int size, int how)
866 t = (struct m_tag *)mem;
867 t->m_tag_cookie = MTAG_ABI_COMPAT;
868 t->m_tag_id = PACKET_TAG_PF;
869 t->m_tag_len = sizeof(struct pf_mtag);
870 t->m_tag_free = pf_mtag_free;
876 pf_mtag_free(struct m_tag *t)
879 uma_zfree(pf_mtag_z, t);
883 pf_get_mtag(struct mbuf *m)
887 if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
888 return ((struct pf_mtag *)(mtag + 1));
890 mtag = uma_zalloc(pf_mtag_z, M_NOWAIT);
893 bzero(mtag + 1, sizeof(struct pf_mtag));
894 m_tag_prepend(m, mtag);
896 return ((struct pf_mtag *)(mtag + 1));
900 pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
903 struct pf_keyhash *khs, *khw, *kh;
904 struct pf_state_key *sk, *cur;
905 struct pf_state *si, *olds = NULL;
908 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
909 KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
910 KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
913 * We need to lock hash slots of both keys. To avoid deadlock
914 * we always lock the slot with lower address first. Unlock order
917 * We also need to lock ID hash slot before dropping key
918 * locks. On success we return with ID hash slot locked.
922 khs = khw = &V_pf_keyhash[pf_hashkey(skw)];
923 PF_HASHROW_LOCK(khs);
925 khs = &V_pf_keyhash[pf_hashkey(sks)];
926 khw = &V_pf_keyhash[pf_hashkey(skw)];
928 PF_HASHROW_LOCK(khs);
929 } else if (khs < khw) {
930 PF_HASHROW_LOCK(khs);
931 PF_HASHROW_LOCK(khw);
933 PF_HASHROW_LOCK(khw);
934 PF_HASHROW_LOCK(khs);
938 #define KEYS_UNLOCK() do { \
940 PF_HASHROW_UNLOCK(khs); \
941 PF_HASHROW_UNLOCK(khw); \
943 PF_HASHROW_UNLOCK(khs); \
947 * First run: start with wire key.
954 LIST_FOREACH(cur, &kh->keys, entry)
955 if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
959 /* Key exists. Check for same kif, if none, add to key. */
960 TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
961 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
964 if (si->kif == s->kif &&
965 si->direction == s->direction) {
966 if (sk->proto == IPPROTO_TCP &&
967 si->src.state >= TCPS_FIN_WAIT_2 &&
968 si->dst.state >= TCPS_FIN_WAIT_2) {
970 * New state matches an old >FIN_WAIT_2
971 * state. We can't drop key hash locks,
972 * thus we can't unlink it properly.
974 * As a workaround we drop it into
975 * TCPS_CLOSED state, schedule purge
976 * ASAP and push it into the very end
977 * of the slot TAILQ, so that it won't
978 * conflict with our new state.
980 si->src.state = si->dst.state =
982 si->timeout = PFTM_PURGE;
985 if (V_pf_status.debug >= PF_DEBUG_MISC) {
986 printf("pf: %s key attach "
988 (idx == PF_SK_WIRE) ?
991 pf_print_state_parts(s,
992 (idx == PF_SK_WIRE) ?
994 (idx == PF_SK_STACK) ?
996 printf(", existing: ");
997 pf_print_state_parts(si,
998 (idx == PF_SK_WIRE) ?
1000 (idx == PF_SK_STACK) ?
1004 PF_HASHROW_UNLOCK(ih);
1006 uma_zfree(V_pf_state_key_z, sk);
1007 if (idx == PF_SK_STACK)
1009 return (EEXIST); /* collision! */
1012 PF_HASHROW_UNLOCK(ih);
1014 uma_zfree(V_pf_state_key_z, sk);
1017 LIST_INSERT_HEAD(&kh->keys, sk, entry);
1022 /* List is sorted, if-bound states before floating. */
1023 if (s->kif == V_pfi_all)
1024 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
1026 TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
1029 TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]);
1030 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds,
1036 * Attach done. See how should we (or should not?)
1037 * attach a second key.
1040 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1044 } else if (sks != NULL) {
1046 * Continue attaching with stack key.
1058 KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
1059 ("%s failure", __func__));
1066 pf_detach_state(struct pf_state *s)
1068 struct pf_state_key *sks = s->key[PF_SK_STACK];
1069 struct pf_keyhash *kh;
1072 kh = &V_pf_keyhash[pf_hashkey(sks)];
1073 PF_HASHROW_LOCK(kh);
1074 if (s->key[PF_SK_STACK] != NULL)
1075 pf_state_key_detach(s, PF_SK_STACK);
1077 * If both point to same key, then we are done.
1079 if (sks == s->key[PF_SK_WIRE]) {
1080 pf_state_key_detach(s, PF_SK_WIRE);
1081 PF_HASHROW_UNLOCK(kh);
1084 PF_HASHROW_UNLOCK(kh);
1087 if (s->key[PF_SK_WIRE] != NULL) {
1088 kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
1089 PF_HASHROW_LOCK(kh);
1090 if (s->key[PF_SK_WIRE] != NULL)
1091 pf_state_key_detach(s, PF_SK_WIRE);
1092 PF_HASHROW_UNLOCK(kh);
1097 pf_state_key_detach(struct pf_state *s, int idx)
1099 struct pf_state_key *sk = s->key[idx];
1101 struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1103 PF_HASHROW_ASSERT(kh);
1105 TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1108 if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1109 LIST_REMOVE(sk, entry);
1110 uma_zfree(V_pf_state_key_z, sk);
1115 pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1117 struct pf_state_key *sk = mem;
1119 bzero(sk, sizeof(struct pf_state_key_cmp));
1120 TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1121 TAILQ_INIT(&sk->states[PF_SK_STACK]);
1126 struct pf_state_key *
1127 pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
1128 struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
1130 struct pf_state_key *sk;
1132 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1136 PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
1137 PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
1138 sk->port[pd->sidx] = sport;
1139 sk->port[pd->didx] = dport;
1140 sk->proto = pd->proto;
1146 struct pf_state_key *
1147 pf_state_key_clone(struct pf_state_key *orig)
1149 struct pf_state_key *sk;
1151 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1155 bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1161 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw,
1162 struct pf_state_key *sks, struct pf_state *s)
1164 struct pf_idhash *ih;
1165 struct pf_state *cur;
1168 KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1169 ("%s: sks not pristine", __func__));
1170 KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1171 ("%s: skw not pristine", __func__));
1172 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1176 if (s->id == 0 && s->creatorid == 0) {
1177 /* XXX: should be atomic, but probability of collision low */
1178 if ((s->id = V_pf_stateid[curcpu]++) == PFID_MAXID)
1179 V_pf_stateid[curcpu] = 1;
1180 s->id |= (uint64_t )curcpu << PFID_CPUSHIFT;
1181 s->id = htobe64(s->id);
1182 s->creatorid = V_pf_status.hostid;
1185 /* Returns with ID locked on success. */
1186 if ((error = pf_state_key_attach(skw, sks, s)) != 0)
1189 ih = &V_pf_idhash[PF_IDHASH(s)];
1190 PF_HASHROW_ASSERT(ih);
1191 LIST_FOREACH(cur, &ih->states, entry)
1192 if (cur->id == s->id && cur->creatorid == s->creatorid)
1196 PF_HASHROW_UNLOCK(ih);
1197 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1198 printf("pf: state ID collision: "
1199 "id: %016llx creatorid: %08x\n",
1200 (unsigned long long)be64toh(s->id),
1201 ntohl(s->creatorid));
1206 LIST_INSERT_HEAD(&ih->states, s, entry);
1207 /* One for keys, one for ID hash. */
1208 refcount_init(&s->refs, 2);
1210 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_INSERT], 1);
1211 if (pfsync_insert_state_ptr != NULL)
1212 pfsync_insert_state_ptr(s);
1214 /* Returns locked. */
1219 * Find state by ID: returns with locked row on success.
1222 pf_find_state_byid(uint64_t id, uint32_t creatorid)
1224 struct pf_idhash *ih;
1227 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1229 ih = &V_pf_idhash[(be64toh(id) % (V_pf_hashmask + 1))];
1231 PF_HASHROW_LOCK(ih);
1232 LIST_FOREACH(s, &ih->states, entry)
1233 if (s->id == id && s->creatorid == creatorid)
1237 PF_HASHROW_UNLOCK(ih);
1243 * Find state by key.
1244 * Returns with ID hash slot locked on success.
1246 static struct pf_state *
1247 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir)
1249 struct pf_keyhash *kh;
1250 struct pf_state_key *sk;
1254 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1256 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1258 PF_HASHROW_LOCK(kh);
1259 LIST_FOREACH(sk, &kh->keys, entry)
1260 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1263 PF_HASHROW_UNLOCK(kh);
1267 idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1269 /* List is sorted, if-bound states before floating ones. */
1270 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1271 if (s->kif == V_pfi_all || s->kif == kif) {
1273 PF_HASHROW_UNLOCK(kh);
1274 if (s->timeout >= PFTM_MAX) {
1276 * State is either being processed by
1277 * pf_unlink_state() in an other thread, or
1278 * is scheduled for immediate expiry.
1285 PF_HASHROW_UNLOCK(kh);
1291 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1293 struct pf_keyhash *kh;
1294 struct pf_state_key *sk;
1295 struct pf_state *s, *ret = NULL;
1298 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1300 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1302 PF_HASHROW_LOCK(kh);
1303 LIST_FOREACH(sk, &kh->keys, entry)
1304 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1307 PF_HASHROW_UNLOCK(kh);
1322 panic("%s: dir %u", __func__, dir);
1325 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1327 PF_HASHROW_UNLOCK(kh);
1341 PF_HASHROW_UNLOCK(kh);
1346 /* END state table stuff */
1349 pf_send(struct pf_send_entry *pfse)
1353 STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
1355 swi_sched(V_pf_swi_cookie, 0);
1361 struct pf_send_head queue;
1362 struct pf_send_entry *pfse, *next;
1364 CURVNET_SET((struct vnet *)v);
1367 queue = V_pf_sendqueue;
1368 STAILQ_INIT(&V_pf_sendqueue);
1371 STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
1372 switch (pfse->pfse_type) {
1375 ip_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL);
1378 icmp_error(pfse->pfse_m, pfse->pfse_icmp_type,
1379 pfse->pfse_icmp_code, 0, pfse->pfse_icmp_mtu);
1384 ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL,
1388 icmp6_error(pfse->pfse_m, pfse->pfse_icmp_type,
1389 pfse->pfse_icmp_code, pfse->pfse_icmp_mtu);
1393 panic("%s: unknown type", __func__);
1395 free(pfse, M_PFTEMP);
1401 pf_purge_thread(void *v)
1405 CURVNET_SET((struct vnet *)v);
1409 rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftm", hz / 10);
1411 if (V_pf_end_threads) {
1413 * To cleanse up all kifs and rules we need
1414 * two runs: first one clears reference flags,
1415 * then pf_purge_expired_states() doesn't
1416 * raise them, and then second run frees.
1419 pf_purge_unlinked_rules();
1423 * Now purge everything.
1425 pf_purge_expired_states(0, V_pf_hashmask);
1426 pf_purge_expired_fragments();
1427 pf_purge_expired_src_nodes();
1430 * Now all kifs & rules should be unreferenced,
1431 * thus should be successfully freed.
1433 pf_purge_unlinked_rules();
1437 * Announce success and exit.
1442 wakeup(pf_purge_thread);
1447 /* Process 1/interval fraction of the state table every run. */
1448 idx = pf_purge_expired_states(idx, V_pf_hashmask /
1449 (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
1451 /* Purge other expired types every PFTM_INTERVAL seconds. */
1454 * Order is important:
1455 * - states and src nodes reference rules
1456 * - states and rules reference kifs
1458 pf_purge_expired_fragments();
1459 pf_purge_expired_src_nodes();
1460 pf_purge_unlinked_rules();
1469 pf_state_expires(const struct pf_state *state)
1476 /* handle all PFTM_* > PFTM_MAX here */
1477 if (state->timeout == PFTM_PURGE)
1478 return (time_uptime);
1479 KASSERT(state->timeout != PFTM_UNLINKED,
1480 ("pf_state_expires: timeout == PFTM_UNLINKED"));
1481 KASSERT((state->timeout < PFTM_MAX),
1482 ("pf_state_expires: timeout > PFTM_MAX"));
1483 timeout = state->rule.ptr->timeout[state->timeout];
1485 timeout = V_pf_default_rule.timeout[state->timeout];
1486 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1488 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1489 states = counter_u64_fetch(state->rule.ptr->states_cur);
1491 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1492 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1493 states = V_pf_status.states;
1495 if (end && states > start && start < end) {
1497 return (state->expire + timeout * (end - states) /
1500 return (time_uptime);
1502 return (state->expire + timeout);
1506 pf_purge_expired_src_nodes()
1508 struct pf_src_node_list freelist;
1509 struct pf_srchash *sh;
1510 struct pf_src_node *cur, *next;
1513 LIST_INIT(&freelist);
1514 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) {
1515 PF_HASHROW_LOCK(sh);
1516 LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
1517 if (cur->states == 0 && cur->expire <= time_uptime) {
1518 pf_unlink_src_node_locked(cur);
1519 LIST_INSERT_HEAD(&freelist, cur, entry);
1520 } else if (cur->rule.ptr != NULL)
1521 cur->rule.ptr->rule_flag |= PFRULE_REFS;
1522 PF_HASHROW_UNLOCK(sh);
1525 pf_free_src_nodes(&freelist);
1527 V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z);
1531 pf_src_tree_remove_state(struct pf_state *s)
1535 if (s->src_node != NULL) {
1537 --s->src_node->conn;
1538 if (--s->src_node->states == 0) {
1539 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1542 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1543 s->src_node->expire = time_uptime + timeout;
1546 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1547 if (--s->nat_src_node->states == 0) {
1548 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1551 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1552 s->nat_src_node->expire = time_uptime + timeout;
1555 s->src_node = s->nat_src_node = NULL;
1559 * Unlink and potentilly free a state. Function may be
1560 * called with ID hash row locked, but always returns
1561 * unlocked, since it needs to go through key hash locking.
1564 pf_unlink_state(struct pf_state *s, u_int flags)
1566 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
1568 if ((flags & PF_ENTER_LOCKED) == 0)
1569 PF_HASHROW_LOCK(ih);
1571 PF_HASHROW_ASSERT(ih);
1573 if (s->timeout == PFTM_UNLINKED) {
1575 * State is being processed
1576 * by pf_unlink_state() in
1579 PF_HASHROW_UNLOCK(ih);
1580 return (0); /* XXXGL: undefined actually */
1583 if (s->src.state == PF_TCPS_PROXY_DST) {
1584 /* XXX wire key the right one? */
1585 pf_send_tcp(NULL, s->rule.ptr, s->key[PF_SK_WIRE]->af,
1586 &s->key[PF_SK_WIRE]->addr[1],
1587 &s->key[PF_SK_WIRE]->addr[0],
1588 s->key[PF_SK_WIRE]->port[1],
1589 s->key[PF_SK_WIRE]->port[0],
1590 s->src.seqhi, s->src.seqlo + 1,
1591 TH_RST|TH_ACK, 0, 0, 0, 1, s->tag, NULL);
1594 LIST_REMOVE(s, entry);
1595 pf_src_tree_remove_state(s);
1597 if (pfsync_delete_state_ptr != NULL)
1598 pfsync_delete_state_ptr(s);
1600 STATE_DEC_COUNTERS(s);
1602 s->timeout = PFTM_UNLINKED;
1604 PF_HASHROW_UNLOCK(ih);
1607 refcount_release(&s->refs);
1609 return (pf_release_state(s));
1613 pf_free_state(struct pf_state *cur)
1616 KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
1617 KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
1620 pf_normalize_tcp_cleanup(cur);
1621 uma_zfree(V_pf_state_z, cur);
1622 counter_u64_add(V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
1626 * Called only from pf_purge_thread(), thus serialized.
1629 pf_purge_expired_states(u_int i, int maxcheck)
1631 struct pf_idhash *ih;
1634 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1637 * Go through hash and unlink states that expire now.
1639 while (maxcheck > 0) {
1641 ih = &V_pf_idhash[i];
1643 PF_HASHROW_LOCK(ih);
1644 LIST_FOREACH(s, &ih->states, entry) {
1645 if (pf_state_expires(s) <= time_uptime) {
1646 V_pf_status.states -=
1647 pf_unlink_state(s, PF_ENTER_LOCKED);
1650 s->rule.ptr->rule_flag |= PFRULE_REFS;
1651 if (s->nat_rule.ptr != NULL)
1652 s->nat_rule.ptr->rule_flag |= PFRULE_REFS;
1653 if (s->anchor.ptr != NULL)
1654 s->anchor.ptr->rule_flag |= PFRULE_REFS;
1655 s->kif->pfik_flags |= PFI_IFLAG_REFS;
1657 s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
1659 PF_HASHROW_UNLOCK(ih);
1661 /* Return when we hit end of hash. */
1662 if (++i > V_pf_hashmask) {
1663 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1670 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1676 pf_purge_unlinked_rules()
1678 struct pf_rulequeue tmpq;
1679 struct pf_rule *r, *r1;
1682 * If we have overloading task pending, then we'd
1683 * better skip purging this time. There is a tiny
1684 * probability that overloading task references
1685 * an already unlinked rule.
1687 PF_OVERLOADQ_LOCK();
1688 if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
1689 PF_OVERLOADQ_UNLOCK();
1692 PF_OVERLOADQ_UNLOCK();
1695 * Do naive mark-and-sweep garbage collecting of old rules.
1696 * Reference flag is raised by pf_purge_expired_states()
1697 * and pf_purge_expired_src_nodes().
1699 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
1700 * use a temporary queue.
1703 PF_UNLNKDRULES_LOCK();
1704 TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
1705 if (!(r->rule_flag & PFRULE_REFS)) {
1706 TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
1707 TAILQ_INSERT_TAIL(&tmpq, r, entries);
1709 r->rule_flag &= ~PFRULE_REFS;
1711 PF_UNLNKDRULES_UNLOCK();
1713 if (!TAILQ_EMPTY(&tmpq)) {
1715 TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
1716 TAILQ_REMOVE(&tmpq, r, entries);
1724 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1729 u_int32_t a = ntohl(addr->addr32[0]);
1730 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1742 u_int8_t i, curstart, curend, maxstart, maxend;
1743 curstart = curend = maxstart = maxend = 255;
1744 for (i = 0; i < 8; i++) {
1745 if (!addr->addr16[i]) {
1746 if (curstart == 255)
1750 if ((curend - curstart) >
1751 (maxend - maxstart)) {
1752 maxstart = curstart;
1755 curstart = curend = 255;
1758 if ((curend - curstart) >
1759 (maxend - maxstart)) {
1760 maxstart = curstart;
1763 for (i = 0; i < 8; i++) {
1764 if (i >= maxstart && i <= maxend) {
1770 b = ntohs(addr->addr16[i]);
1787 pf_print_state(struct pf_state *s)
1789 pf_print_state_parts(s, NULL, NULL);
1793 pf_print_state_parts(struct pf_state *s,
1794 struct pf_state_key *skwp, struct pf_state_key *sksp)
1796 struct pf_state_key *skw, *sks;
1797 u_int8_t proto, dir;
1799 /* Do our best to fill these, but they're skipped if NULL */
1800 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1801 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1802 proto = skw ? skw->proto : (sks ? sks->proto : 0);
1803 dir = s ? s->direction : 0;
1821 case IPPROTO_ICMPV6:
1825 printf("%u", skw->proto);
1838 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1840 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1845 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1847 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1852 if (proto == IPPROTO_TCP) {
1853 printf(" [lo=%u high=%u win=%u modulator=%u",
1854 s->src.seqlo, s->src.seqhi,
1855 s->src.max_win, s->src.seqdiff);
1856 if (s->src.wscale && s->dst.wscale)
1857 printf(" wscale=%u",
1858 s->src.wscale & PF_WSCALE_MASK);
1860 printf(" [lo=%u high=%u win=%u modulator=%u",
1861 s->dst.seqlo, s->dst.seqhi,
1862 s->dst.max_win, s->dst.seqdiff);
1863 if (s->src.wscale && s->dst.wscale)
1864 printf(" wscale=%u",
1865 s->dst.wscale & PF_WSCALE_MASK);
1868 printf(" %u:%u", s->src.state, s->dst.state);
1873 pf_print_flags(u_int8_t f)
1895 #define PF_SET_SKIP_STEPS(i) \
1897 while (head[i] != cur) { \
1898 head[i]->skip[i].ptr = cur; \
1899 head[i] = TAILQ_NEXT(head[i], entries); \
1904 pf_calc_skip_steps(struct pf_rulequeue *rules)
1906 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1909 cur = TAILQ_FIRST(rules);
1911 for (i = 0; i < PF_SKIP_COUNT; ++i)
1913 while (cur != NULL) {
1915 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1916 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1917 if (cur->direction != prev->direction)
1918 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1919 if (cur->af != prev->af)
1920 PF_SET_SKIP_STEPS(PF_SKIP_AF);
1921 if (cur->proto != prev->proto)
1922 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1923 if (cur->src.neg != prev->src.neg ||
1924 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1925 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1926 if (cur->src.port[0] != prev->src.port[0] ||
1927 cur->src.port[1] != prev->src.port[1] ||
1928 cur->src.port_op != prev->src.port_op)
1929 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1930 if (cur->dst.neg != prev->dst.neg ||
1931 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1932 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1933 if (cur->dst.port[0] != prev->dst.port[0] ||
1934 cur->dst.port[1] != prev->dst.port[1] ||
1935 cur->dst.port_op != prev->dst.port_op)
1936 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1939 cur = TAILQ_NEXT(cur, entries);
1941 for (i = 0; i < PF_SKIP_COUNT; ++i)
1942 PF_SET_SKIP_STEPS(i);
1946 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
1948 if (aw1->type != aw2->type)
1950 switch (aw1->type) {
1951 case PF_ADDR_ADDRMASK:
1953 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
1955 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
1958 case PF_ADDR_DYNIFTL:
1959 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
1960 case PF_ADDR_NOROUTE:
1961 case PF_ADDR_URPFFAILED:
1964 return (aw1->p.tbl != aw2->p.tbl);
1966 printf("invalid address type: %d\n", aw1->type);
1972 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
1978 l = cksum + old - new;
1979 l = (l >> 16) + (l & 65535);
1987 pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc,
1988 struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af)
1993 PF_ACPY(&ao, a, af);
2001 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2002 ao.addr16[0], an->addr16[0], 0),
2003 ao.addr16[1], an->addr16[1], 0);
2005 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2006 ao.addr16[0], an->addr16[0], u),
2007 ao.addr16[1], an->addr16[1], u),
2013 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2014 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2015 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2016 ao.addr16[0], an->addr16[0], u),
2017 ao.addr16[1], an->addr16[1], u),
2018 ao.addr16[2], an->addr16[2], u),
2019 ao.addr16[3], an->addr16[3], u),
2020 ao.addr16[4], an->addr16[4], u),
2021 ao.addr16[5], an->addr16[5], u),
2022 ao.addr16[6], an->addr16[6], u),
2023 ao.addr16[7], an->addr16[7], u),
2031 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2033 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2037 memcpy(&ao, a, sizeof(ao));
2038 memcpy(a, &an, sizeof(u_int32_t));
2039 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2040 ao % 65536, an % 65536, u);
2045 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2049 PF_ACPY(&ao, a, AF_INET6);
2050 PF_ACPY(a, an, AF_INET6);
2052 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2053 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2054 pf_cksum_fixup(pf_cksum_fixup(*c,
2055 ao.addr16[0], an->addr16[0], u),
2056 ao.addr16[1], an->addr16[1], u),
2057 ao.addr16[2], an->addr16[2], u),
2058 ao.addr16[3], an->addr16[3], u),
2059 ao.addr16[4], an->addr16[4], u),
2060 ao.addr16[5], an->addr16[5], u),
2061 ao.addr16[6], an->addr16[6], u),
2062 ao.addr16[7], an->addr16[7], u);
2067 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2068 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2069 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2071 struct pf_addr oia, ooa;
2073 PF_ACPY(&oia, ia, af);
2075 PF_ACPY(&ooa, oa, af);
2077 /* Change inner protocol port, fix inner protocol checksum. */
2079 u_int16_t oip = *ip;
2086 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2087 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2089 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2091 /* Change inner ip address, fix inner ip and icmp checksums. */
2092 PF_ACPY(ia, na, af);
2096 u_int32_t oh2c = *h2c;
2098 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2099 oia.addr16[0], ia->addr16[0], 0),
2100 oia.addr16[1], ia->addr16[1], 0);
2101 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2102 oia.addr16[0], ia->addr16[0], 0),
2103 oia.addr16[1], ia->addr16[1], 0);
2104 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2110 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2111 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2112 pf_cksum_fixup(pf_cksum_fixup(*ic,
2113 oia.addr16[0], ia->addr16[0], u),
2114 oia.addr16[1], ia->addr16[1], u),
2115 oia.addr16[2], ia->addr16[2], u),
2116 oia.addr16[3], ia->addr16[3], u),
2117 oia.addr16[4], ia->addr16[4], u),
2118 oia.addr16[5], ia->addr16[5], u),
2119 oia.addr16[6], ia->addr16[6], u),
2120 oia.addr16[7], ia->addr16[7], u);
2124 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2126 PF_ACPY(oa, na, af);
2130 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2131 ooa.addr16[0], oa->addr16[0], 0),
2132 ooa.addr16[1], oa->addr16[1], 0);
2137 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2138 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2139 pf_cksum_fixup(pf_cksum_fixup(*ic,
2140 ooa.addr16[0], oa->addr16[0], u),
2141 ooa.addr16[1], oa->addr16[1], u),
2142 ooa.addr16[2], oa->addr16[2], u),
2143 ooa.addr16[3], oa->addr16[3], u),
2144 ooa.addr16[4], oa->addr16[4], u),
2145 ooa.addr16[5], oa->addr16[5], u),
2146 ooa.addr16[6], oa->addr16[6], u),
2147 ooa.addr16[7], oa->addr16[7], u);
2156 * Need to modulate the sequence numbers in the TCP SACK option
2157 * (credits to Krzysztof Pfaff for report and patch)
2160 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2161 struct tcphdr *th, struct pf_state_peer *dst)
2163 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2164 u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2165 int copyback = 0, i, olen;
2166 struct sackblk sack;
2168 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2169 if (hlen < TCPOLEN_SACKLEN ||
2170 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2173 while (hlen >= TCPOLEN_SACKLEN) {
2176 case TCPOPT_EOL: /* FALLTHROUGH */
2184 if (olen >= TCPOLEN_SACKLEN) {
2185 for (i = 2; i + TCPOLEN_SACK <= olen;
2186 i += TCPOLEN_SACK) {
2187 memcpy(&sack, &opt[i], sizeof(sack));
2188 pf_change_a(&sack.start, &th->th_sum,
2189 htonl(ntohl(sack.start) -
2191 pf_change_a(&sack.end, &th->th_sum,
2192 htonl(ntohl(sack.end) -
2194 memcpy(&opt[i], &sack, sizeof(sack));
2208 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2213 pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af,
2214 const struct pf_addr *saddr, const struct pf_addr *daddr,
2215 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2216 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2217 u_int16_t rtag, struct ifnet *ifp)
2219 struct pf_send_entry *pfse;
2223 struct ip *h = NULL;
2226 struct ip6_hdr *h6 = NULL;
2230 struct pf_mtag *pf_mtag;
2235 /* maximum segment size tcp option */
2236 tlen = sizeof(struct tcphdr);
2243 len = sizeof(struct ip) + tlen;
2248 len = sizeof(struct ip6_hdr) + tlen;
2252 panic("%s: unsupported af %d", __func__, af);
2255 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2256 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2259 m = m_gethdr(M_NOWAIT, MT_DATA);
2261 free(pfse, M_PFTEMP);
2265 mac_netinet_firewall_send(m);
2267 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2268 free(pfse, M_PFTEMP);
2273 m->m_flags |= M_SKIP_FIREWALL;
2274 pf_mtag->tag = rtag;
2276 if (r != NULL && r->rtableid >= 0)
2277 M_SETFIB(m, r->rtableid);
2280 if (r != NULL && r->qid) {
2281 pf_mtag->qid = r->qid;
2283 /* add hints for ecn */
2284 pf_mtag->hdr = mtod(m, struct ip *);
2287 m->m_data += max_linkhdr;
2288 m->m_pkthdr.len = m->m_len = len;
2289 m->m_pkthdr.rcvif = NULL;
2290 bzero(m->m_data, len);
2294 h = mtod(m, struct ip *);
2296 /* IP header fields included in the TCP checksum */
2297 h->ip_p = IPPROTO_TCP;
2298 h->ip_len = htons(tlen);
2299 h->ip_src.s_addr = saddr->v4.s_addr;
2300 h->ip_dst.s_addr = daddr->v4.s_addr;
2302 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2307 h6 = mtod(m, struct ip6_hdr *);
2309 /* IP header fields included in the TCP checksum */
2310 h6->ip6_nxt = IPPROTO_TCP;
2311 h6->ip6_plen = htons(tlen);
2312 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2313 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2315 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2321 th->th_sport = sport;
2322 th->th_dport = dport;
2323 th->th_seq = htonl(seq);
2324 th->th_ack = htonl(ack);
2325 th->th_off = tlen >> 2;
2326 th->th_flags = flags;
2327 th->th_win = htons(win);
2330 opt = (char *)(th + 1);
2331 opt[0] = TCPOPT_MAXSEG;
2334 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2341 th->th_sum = in_cksum(m, len);
2343 /* Finish the IP header */
2345 h->ip_hl = sizeof(*h) >> 2;
2346 h->ip_tos = IPTOS_LOWDELAY;
2347 h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
2348 h->ip_len = htons(len);
2349 h->ip_ttl = ttl ? ttl : V_ip_defttl;
2352 pfse->pfse_type = PFSE_IP;
2358 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2359 sizeof(struct ip6_hdr), tlen);
2361 h6->ip6_vfc |= IPV6_VERSION;
2362 h6->ip6_hlim = IPV6_DEFHLIM;
2364 pfse->pfse_type = PFSE_IP6;
2373 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2376 struct pf_send_entry *pfse;
2378 struct pf_mtag *pf_mtag;
2380 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2381 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2385 if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
2386 free(pfse, M_PFTEMP);
2390 if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
2391 free(pfse, M_PFTEMP);
2395 m0->m_flags |= M_SKIP_FIREWALL;
2397 if (r->rtableid >= 0)
2398 M_SETFIB(m0, r->rtableid);
2402 pf_mtag->qid = r->qid;
2403 /* add hints for ecn */
2404 pf_mtag->hdr = mtod(m0, struct ip *);
2411 pfse->pfse_type = PFSE_ICMP;
2416 pfse->pfse_type = PFSE_ICMP6;
2421 pfse->pfse_icmp_type = type;
2422 pfse->pfse_icmp_code = code;
2427 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2428 * If n is 0, they match if they are equal. If n is != 0, they match if they
2432 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2433 struct pf_addr *b, sa_family_t af)
2440 if ((a->addr32[0] & m->addr32[0]) ==
2441 (b->addr32[0] & m->addr32[0]))
2447 if (((a->addr32[0] & m->addr32[0]) ==
2448 (b->addr32[0] & m->addr32[0])) &&
2449 ((a->addr32[1] & m->addr32[1]) ==
2450 (b->addr32[1] & m->addr32[1])) &&
2451 ((a->addr32[2] & m->addr32[2]) ==
2452 (b->addr32[2] & m->addr32[2])) &&
2453 ((a->addr32[3] & m->addr32[3]) ==
2454 (b->addr32[3] & m->addr32[3])))
2473 * Return 1 if b <= a <= e, otherwise return 0.
2476 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2477 struct pf_addr *a, sa_family_t af)
2482 if ((a->addr32[0] < b->addr32[0]) ||
2483 (a->addr32[0] > e->addr32[0]))
2492 for (i = 0; i < 4; ++i)
2493 if (a->addr32[i] > b->addr32[i])
2495 else if (a->addr32[i] < b->addr32[i])
2498 for (i = 0; i < 4; ++i)
2499 if (a->addr32[i] < e->addr32[i])
2501 else if (a->addr32[i] > e->addr32[i])
2511 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2515 return ((p > a1) && (p < a2));
2517 return ((p < a1) || (p > a2));
2519 return ((p >= a1) && (p <= a2));
2533 return (0); /* never reached */
2537 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2542 return (pf_match(op, a1, a2, p));
2546 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2548 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2550 return (pf_match(op, a1, a2, u));
2554 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2556 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2558 return (pf_match(op, a1, a2, g));
2562 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag, int mtag)
2567 return ((!r->match_tag_not && r->match_tag == *tag) ||
2568 (r->match_tag_not && r->match_tag != *tag));
2572 pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag)
2575 KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
2577 if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL))
2580 pd->pf_mtag->tag = tag;
2585 #define PF_ANCHOR_STACKSIZE 32
2586 struct pf_anchor_stackframe {
2587 struct pf_ruleset *rs;
2588 struct pf_rule *r; /* XXX: + match bit */
2589 struct pf_anchor *child;
2593 * XXX: We rely on malloc(9) returning pointer aligned addresses.
2595 #define PF_ANCHORSTACK_MATCH 0x00000001
2596 #define PF_ANCHORSTACK_MASK (PF_ANCHORSTACK_MATCH)
2598 #define PF_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
2599 #define PF_ANCHOR_RULE(f) (struct pf_rule *) \
2600 ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
2601 #define PF_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \
2602 ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \
2606 pf_step_into_anchor(struct pf_anchor_stackframe *stack, int *depth,
2607 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2610 struct pf_anchor_stackframe *f;
2616 if (*depth >= PF_ANCHOR_STACKSIZE) {
2617 printf("%s: anchor stack overflow on %s\n",
2618 __func__, (*r)->anchor->name);
2619 *r = TAILQ_NEXT(*r, entries);
2621 } else if (*depth == 0 && a != NULL)
2623 f = stack + (*depth)++;
2626 if ((*r)->anchor_wildcard) {
2627 struct pf_anchor_node *parent = &(*r)->anchor->children;
2629 if ((f->child = RB_MIN(pf_anchor_node, parent)) == NULL) {
2633 *rs = &f->child->ruleset;
2636 *rs = &(*r)->anchor->ruleset;
2638 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2642 pf_step_out_of_anchor(struct pf_anchor_stackframe *stack, int *depth,
2643 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2646 struct pf_anchor_stackframe *f;
2655 f = stack + *depth - 1;
2656 fr = PF_ANCHOR_RULE(f);
2657 if (f->child != NULL) {
2658 struct pf_anchor_node *parent;
2661 * This block traverses through
2662 * a wildcard anchor.
2664 parent = &fr->anchor->children;
2665 if (match != NULL && *match) {
2667 * If any of "*" matched, then
2668 * "foo/ *" matched, mark frame
2671 PF_ANCHOR_SET_MATCH(f);
2674 f->child = RB_NEXT(pf_anchor_node, parent, f->child);
2675 if (f->child != NULL) {
2676 *rs = &f->child->ruleset;
2677 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2685 if (*depth == 0 && a != NULL)
2688 if (PF_ANCHOR_MATCH(f) || (match != NULL && *match))
2690 *r = TAILQ_NEXT(fr, entries);
2691 } while (*r == NULL);
2698 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2699 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2704 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2705 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2709 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2710 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2711 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
2712 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
2713 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
2714 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
2715 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
2716 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
2722 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
2727 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
2731 if (addr->addr32[3] == 0xffffffff) {
2732 addr->addr32[3] = 0;
2733 if (addr->addr32[2] == 0xffffffff) {
2734 addr->addr32[2] = 0;
2735 if (addr->addr32[1] == 0xffffffff) {
2736 addr->addr32[1] = 0;
2738 htonl(ntohl(addr->addr32[0]) + 1);
2741 htonl(ntohl(addr->addr32[1]) + 1);
2744 htonl(ntohl(addr->addr32[2]) + 1);
2747 htonl(ntohl(addr->addr32[3]) + 1);
2754 pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
2756 struct pf_addr *saddr, *daddr;
2757 u_int16_t sport, dport;
2758 struct inpcbinfo *pi;
2761 pd->lookup.uid = UID_MAX;
2762 pd->lookup.gid = GID_MAX;
2764 switch (pd->proto) {
2766 if (pd->hdr.tcp == NULL)
2768 sport = pd->hdr.tcp->th_sport;
2769 dport = pd->hdr.tcp->th_dport;
2773 if (pd->hdr.udp == NULL)
2775 sport = pd->hdr.udp->uh_sport;
2776 dport = pd->hdr.udp->uh_dport;
2782 if (direction == PF_IN) {
2797 inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
2798 dport, INPLOOKUP_RLOCKPCB, NULL, m);
2800 inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
2801 daddr->v4, dport, INPLOOKUP_WILDCARD |
2802 INPLOOKUP_RLOCKPCB, NULL, m);
2810 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
2811 dport, INPLOOKUP_RLOCKPCB, NULL, m);
2813 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
2814 &daddr->v6, dport, INPLOOKUP_WILDCARD |
2815 INPLOOKUP_RLOCKPCB, NULL, m);
2825 INP_RLOCK_ASSERT(inp);
2826 pd->lookup.uid = inp->inp_cred->cr_uid;
2827 pd->lookup.gid = inp->inp_cred->cr_groups[0];
2834 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2838 u_int8_t *opt, optlen;
2839 u_int8_t wscale = 0;
2841 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
2842 if (hlen <= sizeof(struct tcphdr))
2844 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2846 opt = hdr + sizeof(struct tcphdr);
2847 hlen -= sizeof(struct tcphdr);
2857 if (wscale > TCP_MAX_WINSHIFT)
2858 wscale = TCP_MAX_WINSHIFT;
2859 wscale |= PF_WSCALE_FLAG;
2874 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2878 u_int8_t *opt, optlen;
2879 u_int16_t mss = V_tcp_mssdflt;
2881 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
2882 if (hlen <= sizeof(struct tcphdr))
2884 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2886 opt = hdr + sizeof(struct tcphdr);
2887 hlen -= sizeof(struct tcphdr);
2888 while (hlen >= TCPOLEN_MAXSEG) {
2896 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
2912 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
2915 struct sockaddr_in *dst;
2919 struct sockaddr_in6 *dst6;
2920 struct route_in6 ro6;
2922 struct rtentry *rt = NULL;
2924 u_int16_t mss = V_tcp_mssdflt;
2929 hlen = sizeof(struct ip);
2930 bzero(&ro, sizeof(ro));
2931 dst = (struct sockaddr_in *)&ro.ro_dst;
2932 dst->sin_family = AF_INET;
2933 dst->sin_len = sizeof(*dst);
2934 dst->sin_addr = addr->v4;
2935 in_rtalloc_ign(&ro, 0, rtableid);
2941 hlen = sizeof(struct ip6_hdr);
2942 bzero(&ro6, sizeof(ro6));
2943 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
2944 dst6->sin6_family = AF_INET6;
2945 dst6->sin6_len = sizeof(*dst6);
2946 dst6->sin6_addr = addr->v6;
2947 in6_rtalloc_ign(&ro6, 0, rtableid);
2953 if (rt && rt->rt_ifp) {
2954 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr);
2955 mss = max(V_tcp_mssdflt, mss);
2958 mss = min(mss, offer);
2959 mss = max(mss, 64); /* sanity - at least max opt space */
2964 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr)
2966 struct pf_rule *r = s->rule.ptr;
2967 struct pf_src_node *sn = NULL;
2970 if (!r->rt || r->rt == PF_FASTROUTE)
2972 switch (s->key[PF_SK_WIRE]->af) {
2975 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL, &sn);
2976 s->rt_kif = r->rpool.cur->kif;
2981 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL, &sn);
2982 s->rt_kif = r->rpool.cur->kif;
2989 pf_tcp_iss(struct pf_pdesc *pd)
2992 u_int32_t digest[4];
2994 if (V_pf_tcp_secret_init == 0) {
2995 read_random(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
2996 MD5Init(&V_pf_tcp_secret_ctx);
2997 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
2998 sizeof(V_pf_tcp_secret));
2999 V_pf_tcp_secret_init = 1;
3002 ctx = V_pf_tcp_secret_ctx;
3004 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
3005 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
3006 if (pd->af == AF_INET6) {
3007 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3008 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3010 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3011 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3013 MD5Final((u_char *)digest, &ctx);
3014 V_pf_tcp_iss_off += 4096;
3015 #define ISN_RANDOM_INCREMENT (4096 - 1)
3016 return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3018 #undef ISN_RANDOM_INCREMENT
3022 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
3023 struct pfi_kif *kif, struct mbuf *m, int off, struct pf_pdesc *pd,
3024 struct pf_rule **am, struct pf_ruleset **rsm, struct inpcb *inp)
3026 struct pf_rule *nr = NULL;
3027 struct pf_addr * const saddr = pd->src;
3028 struct pf_addr * const daddr = pd->dst;
3029 sa_family_t af = pd->af;
3030 struct pf_rule *r, *a = NULL;
3031 struct pf_ruleset *ruleset = NULL;
3032 struct pf_src_node *nsn = NULL;
3033 struct tcphdr *th = pd->hdr.tcp;
3034 struct pf_state_key *sk = NULL, *nk = NULL;
3036 int rewrite = 0, hdrlen = 0;
3037 int tag = -1, rtableid = -1;
3041 u_int16_t sport = 0, dport = 0;
3042 u_int16_t bproto_sum = 0, bip_sum = 0;
3043 u_int8_t icmptype = 0, icmpcode = 0;
3044 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3049 INP_LOCK_ASSERT(inp);
3050 pd->lookup.uid = inp->inp_cred->cr_uid;
3051 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3052 pd->lookup.done = 1;
3055 switch (pd->proto) {
3057 sport = th->th_sport;
3058 dport = th->th_dport;
3059 hdrlen = sizeof(*th);
3062 sport = pd->hdr.udp->uh_sport;
3063 dport = pd->hdr.udp->uh_dport;
3064 hdrlen = sizeof(*pd->hdr.udp);
3068 if (pd->af != AF_INET)
3070 sport = dport = pd->hdr.icmp->icmp_id;
3071 hdrlen = sizeof(*pd->hdr.icmp);
3072 icmptype = pd->hdr.icmp->icmp_type;
3073 icmpcode = pd->hdr.icmp->icmp_code;
3075 if (icmptype == ICMP_UNREACH ||
3076 icmptype == ICMP_SOURCEQUENCH ||
3077 icmptype == ICMP_REDIRECT ||
3078 icmptype == ICMP_TIMXCEED ||
3079 icmptype == ICMP_PARAMPROB)
3084 case IPPROTO_ICMPV6:
3087 sport = dport = pd->hdr.icmp6->icmp6_id;
3088 hdrlen = sizeof(*pd->hdr.icmp6);
3089 icmptype = pd->hdr.icmp6->icmp6_type;
3090 icmpcode = pd->hdr.icmp6->icmp6_code;
3092 if (icmptype == ICMP6_DST_UNREACH ||
3093 icmptype == ICMP6_PACKET_TOO_BIG ||
3094 icmptype == ICMP6_TIME_EXCEEDED ||
3095 icmptype == ICMP6_PARAM_PROB)
3100 sport = dport = hdrlen = 0;
3104 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3106 /* check packet for BINAT/NAT/RDR */
3107 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk,
3108 &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) {
3109 KASSERT(sk != NULL, ("%s: null sk", __func__));
3110 KASSERT(nk != NULL, ("%s: null nk", __func__));
3113 bip_sum = *pd->ip_sum;
3115 switch (pd->proto) {
3117 bproto_sum = th->th_sum;
3118 pd->proto_sum = &th->th_sum;
3120 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3121 nk->port[pd->sidx] != sport) {
3122 pf_change_ap(saddr, &th->th_sport, pd->ip_sum,
3123 &th->th_sum, &nk->addr[pd->sidx],
3124 nk->port[pd->sidx], 0, af);
3125 pd->sport = &th->th_sport;
3126 sport = th->th_sport;
3129 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3130 nk->port[pd->didx] != dport) {
3131 pf_change_ap(daddr, &th->th_dport, pd->ip_sum,
3132 &th->th_sum, &nk->addr[pd->didx],
3133 nk->port[pd->didx], 0, af);
3134 dport = th->th_dport;
3135 pd->dport = &th->th_dport;
3140 bproto_sum = pd->hdr.udp->uh_sum;
3141 pd->proto_sum = &pd->hdr.udp->uh_sum;
3143 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3144 nk->port[pd->sidx] != sport) {
3145 pf_change_ap(saddr, &pd->hdr.udp->uh_sport,
3146 pd->ip_sum, &pd->hdr.udp->uh_sum,
3147 &nk->addr[pd->sidx],
3148 nk->port[pd->sidx], 1, af);
3149 sport = pd->hdr.udp->uh_sport;
3150 pd->sport = &pd->hdr.udp->uh_sport;
3153 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3154 nk->port[pd->didx] != dport) {
3155 pf_change_ap(daddr, &pd->hdr.udp->uh_dport,
3156 pd->ip_sum, &pd->hdr.udp->uh_sum,
3157 &nk->addr[pd->didx],
3158 nk->port[pd->didx], 1, af);
3159 dport = pd->hdr.udp->uh_dport;
3160 pd->dport = &pd->hdr.udp->uh_dport;
3166 nk->port[0] = nk->port[1];
3167 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3168 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3169 nk->addr[pd->sidx].v4.s_addr, 0);
3171 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3172 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3173 nk->addr[pd->didx].v4.s_addr, 0);
3175 if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3176 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3177 pd->hdr.icmp->icmp_cksum, sport,
3179 pd->hdr.icmp->icmp_id = nk->port[1];
3180 pd->sport = &pd->hdr.icmp->icmp_id;
3182 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3186 case IPPROTO_ICMPV6:
3187 nk->port[0] = nk->port[1];
3188 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3189 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3190 &nk->addr[pd->sidx], 0);
3192 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3193 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3194 &nk->addr[pd->didx], 0);
3203 &nk->addr[pd->sidx], AF_INET))
3204 pf_change_a(&saddr->v4.s_addr,
3206 nk->addr[pd->sidx].v4.s_addr, 0);
3209 &nk->addr[pd->didx], AF_INET))
3210 pf_change_a(&daddr->v4.s_addr,
3212 nk->addr[pd->didx].v4.s_addr, 0);
3218 &nk->addr[pd->sidx], AF_INET6))
3219 PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3222 &nk->addr[pd->didx], AF_INET6))
3223 PF_ACPY(saddr, &nk->addr[pd->didx], af);
3236 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3237 r = r->skip[PF_SKIP_IFP].ptr;
3238 else if (r->direction && r->direction != direction)
3239 r = r->skip[PF_SKIP_DIR].ptr;
3240 else if (r->af && r->af != af)
3241 r = r->skip[PF_SKIP_AF].ptr;
3242 else if (r->proto && r->proto != pd->proto)
3243 r = r->skip[PF_SKIP_PROTO].ptr;
3244 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3245 r->src.neg, kif, M_GETFIB(m)))
3246 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3247 /* tcp/udp only. port_op always 0 in other cases */
3248 else if (r->src.port_op && !pf_match_port(r->src.port_op,
3249 r->src.port[0], r->src.port[1], sport))
3250 r = r->skip[PF_SKIP_SRC_PORT].ptr;
3251 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3252 r->dst.neg, NULL, M_GETFIB(m)))
3253 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3254 /* tcp/udp only. port_op always 0 in other cases */
3255 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3256 r->dst.port[0], r->dst.port[1], dport))
3257 r = r->skip[PF_SKIP_DST_PORT].ptr;
3258 /* icmp only. type always 0 in other cases */
3259 else if (r->type && r->type != icmptype + 1)
3260 r = TAILQ_NEXT(r, entries);
3261 /* icmp only. type always 0 in other cases */
3262 else if (r->code && r->code != icmpcode + 1)
3263 r = TAILQ_NEXT(r, entries);
3264 else if (r->tos && !(r->tos == pd->tos))
3265 r = TAILQ_NEXT(r, entries);
3266 else if (r->rule_flag & PFRULE_FRAGMENT)
3267 r = TAILQ_NEXT(r, entries);
3268 else if (pd->proto == IPPROTO_TCP &&
3269 (r->flagset & th->th_flags) != r->flags)
3270 r = TAILQ_NEXT(r, entries);
3271 /* tcp/udp only. uid.op always 0 in other cases */
3272 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3273 pf_socket_lookup(direction, pd, m), 1)) &&
3274 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3276 r = TAILQ_NEXT(r, entries);
3277 /* tcp/udp only. gid.op always 0 in other cases */
3278 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3279 pf_socket_lookup(direction, pd, m), 1)) &&
3280 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3282 r = TAILQ_NEXT(r, entries);
3284 r->prob <= arc4random())
3285 r = TAILQ_NEXT(r, entries);
3286 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3287 pd->pf_mtag ? pd->pf_mtag->tag : 0))
3288 r = TAILQ_NEXT(r, entries);
3289 else if (r->os_fingerprint != PF_OSFP_ANY &&
3290 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3291 pf_osfp_fingerprint(pd, m, off, th),
3292 r->os_fingerprint)))
3293 r = TAILQ_NEXT(r, entries);
3297 if (r->rtableid >= 0)
3298 rtableid = r->rtableid;
3299 if (r->anchor == NULL) {
3306 r = TAILQ_NEXT(r, entries);
3308 pf_step_into_anchor(anchor_stack, &asd,
3309 &ruleset, PF_RULESET_FILTER, &r, &a,
3312 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3313 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3320 REASON_SET(&reason, PFRES_MATCH);
3322 if (r->log || (nr != NULL && nr->log)) {
3324 m_copyback(m, off, hdrlen, pd->hdr.any);
3325 PFLOG_PACKET(kif, m, af, direction, reason, r->log ? r : nr, a,
3329 if ((r->action == PF_DROP) &&
3330 ((r->rule_flag & PFRULE_RETURNRST) ||
3331 (r->rule_flag & PFRULE_RETURNICMP) ||
3332 (r->rule_flag & PFRULE_RETURN))) {
3333 /* undo NAT changes, if they have taken place */
3335 PF_ACPY(saddr, &sk->addr[pd->sidx], af);
3336 PF_ACPY(daddr, &sk->addr[pd->didx], af);
3338 *pd->sport = sk->port[pd->sidx];
3340 *pd->dport = sk->port[pd->didx];
3342 *pd->proto_sum = bproto_sum;
3344 *pd->ip_sum = bip_sum;
3345 m_copyback(m, off, hdrlen, pd->hdr.any);
3347 if (pd->proto == IPPROTO_TCP &&
3348 ((r->rule_flag & PFRULE_RETURNRST) ||
3349 (r->rule_flag & PFRULE_RETURN)) &&
3350 !(th->th_flags & TH_RST)) {
3351 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
3363 h4 = mtod(m, struct ip *);
3364 len = ntohs(h4->ip_len) - off;
3369 h6 = mtod(m, struct ip6_hdr *);
3370 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
3375 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
3376 REASON_SET(&reason, PFRES_PROTCKSUM);
3378 if (th->th_flags & TH_SYN)
3380 if (th->th_flags & TH_FIN)
3382 pf_send_tcp(m, r, af, pd->dst,
3383 pd->src, th->th_dport, th->th_sport,
3384 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3385 r->return_ttl, 1, 0, kif->pfik_ifp);
3387 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3389 pf_send_icmp(m, r->return_icmp >> 8,
3390 r->return_icmp & 255, af, r);
3391 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3393 pf_send_icmp(m, r->return_icmp6 >> 8,
3394 r->return_icmp6 & 255, af, r);
3397 if (r->action == PF_DROP)
3400 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3401 REASON_SET(&reason, PFRES_MEMORY);
3405 M_SETFIB(m, rtableid);
3407 if (!state_icmp && (r->keep_state || nr != NULL ||
3408 (pd->flags & PFDESC_TCP_NORM))) {
3410 action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
3411 sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
3413 if (action != PF_PASS)
3417 uma_zfree(V_pf_state_key_z, sk);
3419 uma_zfree(V_pf_state_key_z, nk);
3422 /* copy back packet headers if we performed NAT operations */
3424 m_copyback(m, off, hdrlen, pd->hdr.any);
3426 if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
3427 direction == PF_OUT &&
3428 pfsync_defer_ptr != NULL && pfsync_defer_ptr(*sm, m))
3430 * We want the state created, but we dont
3431 * want to send this in case a partner
3432 * firewall has to know about it to allow
3433 * replies through it.
3441 uma_zfree(V_pf_state_key_z, sk);
3443 uma_zfree(V_pf_state_key_z, nk);
3448 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
3449 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *nk,
3450 struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
3451 u_int16_t dport, int *rewrite, struct pfi_kif *kif, struct pf_state **sm,
3452 int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen)
3454 struct pf_state *s = NULL;
3455 struct pf_src_node *sn = NULL;
3456 struct tcphdr *th = pd->hdr.tcp;
3457 u_int16_t mss = V_tcp_mssdflt;
3460 /* check maximums */
3461 if (r->max_states &&
3462 (counter_u64_fetch(r->states_cur) >= r->max_states)) {
3463 counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1);
3464 REASON_SET(&reason, PFRES_MAXSTATES);
3467 /* src node for filter rule */
3468 if ((r->rule_flag & PFRULE_SRCTRACK ||
3469 r->rpool.opts & PF_POOL_STICKYADDR) &&
3470 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3471 REASON_SET(&reason, PFRES_SRCLIMIT);
3474 /* src node for translation rule */
3475 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3476 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3477 REASON_SET(&reason, PFRES_SRCLIMIT);
3480 s = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO);
3482 REASON_SET(&reason, PFRES_MEMORY);
3486 s->nat_rule.ptr = nr;
3488 STATE_INC_COUNTERS(s);
3490 s->state_flags |= PFSTATE_ALLOWOPTS;
3491 if (r->rule_flag & PFRULE_STATESLOPPY)
3492 s->state_flags |= PFSTATE_SLOPPY;
3493 s->log = r->log & PF_LOG_ALL;
3494 s->sync_state = PFSYNC_S_NONE;
3496 s->log |= nr->log & PF_LOG_ALL;
3497 switch (pd->proto) {
3499 s->src.seqlo = ntohl(th->th_seq);
3500 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3501 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3502 r->keep_state == PF_STATE_MODULATE) {
3503 /* Generate sequence number modulator */
3504 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3507 pf_change_a(&th->th_seq, &th->th_sum,
3508 htonl(s->src.seqlo + s->src.seqdiff), 0);
3512 if (th->th_flags & TH_SYN) {
3514 s->src.wscale = pf_get_wscale(m, off,
3515 th->th_off, pd->af);
3517 s->src.max_win = MAX(ntohs(th->th_win), 1);
3518 if (s->src.wscale & PF_WSCALE_MASK) {
3519 /* Remove scale factor from initial window */
3520 int win = s->src.max_win;
3521 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3522 s->src.max_win = (win - 1) >>
3523 (s->src.wscale & PF_WSCALE_MASK);
3525 if (th->th_flags & TH_FIN)
3529 s->src.state = TCPS_SYN_SENT;
3530 s->dst.state = TCPS_CLOSED;
3531 s->timeout = PFTM_TCP_FIRST_PACKET;
3534 s->src.state = PFUDPS_SINGLE;
3535 s->dst.state = PFUDPS_NO_TRAFFIC;
3536 s->timeout = PFTM_UDP_FIRST_PACKET;
3540 case IPPROTO_ICMPV6:
3542 s->timeout = PFTM_ICMP_FIRST_PACKET;
3545 s->src.state = PFOTHERS_SINGLE;
3546 s->dst.state = PFOTHERS_NO_TRAFFIC;
3547 s->timeout = PFTM_OTHER_FIRST_PACKET;
3550 s->creation = time_uptime;
3551 s->expire = time_uptime;
3555 s->src_node->states++;
3558 /* XXX We only modify one side for now. */
3559 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
3560 s->nat_src_node = nsn;
3561 s->nat_src_node->states++;
3563 if (pd->proto == IPPROTO_TCP) {
3564 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
3565 off, pd, th, &s->src, &s->dst)) {
3566 REASON_SET(&reason, PFRES_MEMORY);
3567 pf_src_tree_remove_state(s);
3568 STATE_DEC_COUNTERS(s);
3569 uma_zfree(V_pf_state_z, s);
3572 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
3573 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
3574 &s->src, &s->dst, rewrite)) {
3575 /* This really shouldn't happen!!! */
3576 DPFPRINTF(PF_DEBUG_URGENT,
3577 ("pf_normalize_tcp_stateful failed on first pkt"));
3578 pf_normalize_tcp_cleanup(s);
3579 pf_src_tree_remove_state(s);
3580 STATE_DEC_COUNTERS(s);
3581 uma_zfree(V_pf_state_z, s);
3585 s->direction = pd->dir;
3588 * sk/nk could already been setup by pf_get_translation().
3591 KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
3592 __func__, nr, sk, nk));
3593 sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
3598 KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p",
3599 __func__, nr, sk, nk));
3601 /* Swap sk/nk for PF_OUT. */
3602 if (pf_state_insert(BOUND_IFACE(r, kif),
3603 (pd->dir == PF_IN) ? sk : nk,
3604 (pd->dir == PF_IN) ? nk : sk, s)) {
3605 if (pd->proto == IPPROTO_TCP)
3606 pf_normalize_tcp_cleanup(s);
3607 REASON_SET(&reason, PFRES_STATEINS);
3608 pf_src_tree_remove_state(s);
3609 STATE_DEC_COUNTERS(s);
3610 uma_zfree(V_pf_state_z, s);
3615 pf_set_rt_ifp(s, pd->src); /* needs s->state_key set */
3618 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
3619 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
3620 s->src.state = PF_TCPS_PROXY_SRC;
3621 /* undo NAT changes, if they have taken place */
3623 struct pf_state_key *skt = s->key[PF_SK_WIRE];
3624 if (pd->dir == PF_OUT)
3625 skt = s->key[PF_SK_STACK];
3626 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
3627 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
3629 *pd->sport = skt->port[pd->sidx];
3631 *pd->dport = skt->port[pd->didx];
3633 *pd->proto_sum = bproto_sum;
3635 *pd->ip_sum = bip_sum;
3636 m_copyback(m, off, hdrlen, pd->hdr.any);
3638 s->src.seqhi = htonl(arc4random());
3639 /* Find mss option */
3640 int rtid = M_GETFIB(m);
3641 mss = pf_get_mss(m, off, th->th_off, pd->af);
3642 mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
3643 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
3645 pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport,
3646 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
3647 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL);
3648 REASON_SET(&reason, PFRES_SYNPROXY);
3649 return (PF_SYNPROXY_DROP);
3656 uma_zfree(V_pf_state_key_z, sk);
3658 uma_zfree(V_pf_state_key_z, nk);
3660 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
3661 pf_unlink_src_node(sn);
3662 pf_free_src_node(sn);
3665 if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) {
3666 pf_unlink_src_node(nsn);
3667 pf_free_src_node(nsn);
3674 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
3675 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
3676 struct pf_ruleset **rsm)
3678 struct pf_rule *r, *a = NULL;
3679 struct pf_ruleset *ruleset = NULL;
3680 sa_family_t af = pd->af;
3685 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3689 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3692 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3693 r = r->skip[PF_SKIP_IFP].ptr;
3694 else if (r->direction && r->direction != direction)
3695 r = r->skip[PF_SKIP_DIR].ptr;
3696 else if (r->af && r->af != af)
3697 r = r->skip[PF_SKIP_AF].ptr;
3698 else if (r->proto && r->proto != pd->proto)
3699 r = r->skip[PF_SKIP_PROTO].ptr;
3700 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
3701 r->src.neg, kif, M_GETFIB(m)))
3702 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3703 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
3704 r->dst.neg, NULL, M_GETFIB(m)))
3705 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3706 else if (r->tos && !(r->tos == pd->tos))
3707 r = TAILQ_NEXT(r, entries);
3708 else if (r->os_fingerprint != PF_OSFP_ANY)
3709 r = TAILQ_NEXT(r, entries);
3710 else if (pd->proto == IPPROTO_UDP &&
3711 (r->src.port_op || r->dst.port_op))
3712 r = TAILQ_NEXT(r, entries);
3713 else if (pd->proto == IPPROTO_TCP &&
3714 (r->src.port_op || r->dst.port_op || r->flagset))
3715 r = TAILQ_NEXT(r, entries);
3716 else if ((pd->proto == IPPROTO_ICMP ||
3717 pd->proto == IPPROTO_ICMPV6) &&
3718 (r->type || r->code))
3719 r = TAILQ_NEXT(r, entries);
3720 else if (r->prob && r->prob <=
3721 (arc4random() % (UINT_MAX - 1) + 1))
3722 r = TAILQ_NEXT(r, entries);
3723 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3724 pd->pf_mtag ? pd->pf_mtag->tag : 0))
3725 r = TAILQ_NEXT(r, entries);
3727 if (r->anchor == NULL) {
3734 r = TAILQ_NEXT(r, entries);
3736 pf_step_into_anchor(anchor_stack, &asd,
3737 &ruleset, PF_RULESET_FILTER, &r, &a,
3740 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3741 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3748 REASON_SET(&reason, PFRES_MATCH);
3751 PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
3754 if (r->action != PF_PASS)
3757 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3758 REASON_SET(&reason, PFRES_MEMORY);
3766 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
3767 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
3768 struct pf_pdesc *pd, u_short *reason, int *copyback)
3770 struct tcphdr *th = pd->hdr.tcp;
3771 u_int16_t win = ntohs(th->th_win);
3772 u_int32_t ack, end, seq, orig_seq;
3776 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
3777 sws = src->wscale & PF_WSCALE_MASK;
3778 dws = dst->wscale & PF_WSCALE_MASK;
3783 * Sequence tracking algorithm from Guido van Rooij's paper:
3784 * http://www.madison-gurkha.com/publications/tcp_filtering/
3788 orig_seq = seq = ntohl(th->th_seq);
3789 if (src->seqlo == 0) {
3790 /* First packet from this end. Set its state */
3792 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
3793 src->scrub == NULL) {
3794 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
3795 REASON_SET(reason, PFRES_MEMORY);
3800 /* Deferred generation of sequence number modulator */
3801 if (dst->seqdiff && !src->seqdiff) {
3802 /* use random iss for the TCP server */
3803 while ((src->seqdiff = arc4random() - seq) == 0)
3805 ack = ntohl(th->th_ack) - dst->seqdiff;
3806 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
3808 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
3811 ack = ntohl(th->th_ack);
3814 end = seq + pd->p_len;
3815 if (th->th_flags & TH_SYN) {
3817 if (dst->wscale & PF_WSCALE_FLAG) {
3818 src->wscale = pf_get_wscale(m, off, th->th_off,
3820 if (src->wscale & PF_WSCALE_FLAG) {
3821 /* Remove scale factor from initial
3823 sws = src->wscale & PF_WSCALE_MASK;
3824 win = ((u_int32_t)win + (1 << sws) - 1)
3826 dws = dst->wscale & PF_WSCALE_MASK;
3828 /* fixup other window */
3829 dst->max_win <<= dst->wscale &
3831 /* in case of a retrans SYN|ACK */
3836 if (th->th_flags & TH_FIN)
3840 if (src->state < TCPS_SYN_SENT)
3841 src->state = TCPS_SYN_SENT;
3844 * May need to slide the window (seqhi may have been set by
3845 * the crappy stack check or if we picked up the connection
3846 * after establishment)
3848 if (src->seqhi == 1 ||
3849 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
3850 src->seqhi = end + MAX(1, dst->max_win << dws);
3851 if (win > src->max_win)
3855 ack = ntohl(th->th_ack) - dst->seqdiff;
3857 /* Modulate sequence numbers */
3858 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
3860 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
3863 end = seq + pd->p_len;
3864 if (th->th_flags & TH_SYN)
3866 if (th->th_flags & TH_FIN)
3870 if ((th->th_flags & TH_ACK) == 0) {
3871 /* Let it pass through the ack skew check */
3873 } else if ((ack == 0 &&
3874 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
3875 /* broken tcp stacks do not set ack */
3876 (dst->state < TCPS_SYN_SENT)) {
3878 * Many stacks (ours included) will set the ACK number in an
3879 * FIN|ACK if the SYN times out -- no sequence to ACK.
3885 /* Ease sequencing restrictions on no data packets */
3890 ackskew = dst->seqlo - ack;
3894 * Need to demodulate the sequence numbers in any TCP SACK options
3895 * (Selective ACK). We could optionally validate the SACK values
3896 * against the current ACK window, either forwards or backwards, but
3897 * I'm not confident that SACK has been implemented properly
3898 * everywhere. It wouldn't surprise me if several stacks accidently
3899 * SACK too far backwards of previously ACKed data. There really aren't
3900 * any security implications of bad SACKing unless the target stack
3901 * doesn't validate the option length correctly. Someone trying to
3902 * spoof into a TCP connection won't bother blindly sending SACK
3905 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
3906 if (pf_modulate_sack(m, off, pd, th, dst))
3911 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
3912 if (SEQ_GEQ(src->seqhi, end) &&
3913 /* Last octet inside other's window space */
3914 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
3915 /* Retrans: not more than one window back */
3916 (ackskew >= -MAXACKWINDOW) &&
3917 /* Acking not more than one reassembled fragment backwards */
3918 (ackskew <= (MAXACKWINDOW << sws)) &&
3919 /* Acking not more than one window forward */
3920 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
3921 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
3922 (pd->flags & PFDESC_IP_REAS) == 0)) {
3923 /* Require an exact/+1 sequence match on resets when possible */
3925 if (dst->scrub || src->scrub) {
3926 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
3927 *state, src, dst, copyback))
3931 /* update max window */
3932 if (src->max_win < win)
3934 /* synchronize sequencing */
3935 if (SEQ_GT(end, src->seqlo))
3937 /* slide the window of what the other end can send */
3938 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
3939 dst->seqhi = ack + MAX((win << sws), 1);
3943 if (th->th_flags & TH_SYN)
3944 if (src->state < TCPS_SYN_SENT)
3945 src->state = TCPS_SYN_SENT;
3946 if (th->th_flags & TH_FIN)
3947 if (src->state < TCPS_CLOSING)
3948 src->state = TCPS_CLOSING;
3949 if (th->th_flags & TH_ACK) {
3950 if (dst->state == TCPS_SYN_SENT) {
3951 dst->state = TCPS_ESTABLISHED;
3952 if (src->state == TCPS_ESTABLISHED &&
3953 (*state)->src_node != NULL &&
3954 pf_src_connlimit(state)) {
3955 REASON_SET(reason, PFRES_SRCLIMIT);
3958 } else if (dst->state == TCPS_CLOSING)
3959 dst->state = TCPS_FIN_WAIT_2;
3961 if (th->th_flags & TH_RST)
3962 src->state = dst->state = TCPS_TIME_WAIT;
3964 /* update expire time */
3965 (*state)->expire = time_uptime;
3966 if (src->state >= TCPS_FIN_WAIT_2 &&
3967 dst->state >= TCPS_FIN_WAIT_2)
3968 (*state)->timeout = PFTM_TCP_CLOSED;
3969 else if (src->state >= TCPS_CLOSING &&
3970 dst->state >= TCPS_CLOSING)
3971 (*state)->timeout = PFTM_TCP_FIN_WAIT;
3972 else if (src->state < TCPS_ESTABLISHED ||
3973 dst->state < TCPS_ESTABLISHED)
3974 (*state)->timeout = PFTM_TCP_OPENING;
3975 else if (src->state >= TCPS_CLOSING ||
3976 dst->state >= TCPS_CLOSING)
3977 (*state)->timeout = PFTM_TCP_CLOSING;
3979 (*state)->timeout = PFTM_TCP_ESTABLISHED;
3981 /* Fall through to PASS packet */
3983 } else if ((dst->state < TCPS_SYN_SENT ||
3984 dst->state >= TCPS_FIN_WAIT_2 ||
3985 src->state >= TCPS_FIN_WAIT_2) &&
3986 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
3987 /* Within a window forward of the originating packet */
3988 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
3989 /* Within a window backward of the originating packet */
3992 * This currently handles three situations:
3993 * 1) Stupid stacks will shotgun SYNs before their peer
3995 * 2) When PF catches an already established stream (the
3996 * firewall rebooted, the state table was flushed, routes
3998 * 3) Packets get funky immediately after the connection
3999 * closes (this should catch Solaris spurious ACK|FINs
4000 * that web servers like to spew after a close)
4002 * This must be a little more careful than the above code
4003 * since packet floods will also be caught here. We don't
4004 * update the TTL here to mitigate the damage of a packet
4005 * flood and so the same code can handle awkward establishment
4006 * and a loosened connection close.
4007 * In the establishment case, a correct peer response will
4008 * validate the connection, go through the normal state code
4009 * and keep updating the state TTL.
4012 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4013 printf("pf: loose state match: ");
4014 pf_print_state(*state);
4015 pf_print_flags(th->th_flags);
4016 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4017 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
4018 pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
4019 (unsigned long long)(*state)->packets[1],
4020 pd->dir == PF_IN ? "in" : "out",
4021 pd->dir == (*state)->direction ? "fwd" : "rev");
4024 if (dst->scrub || src->scrub) {
4025 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4026 *state, src, dst, copyback))
4030 /* update max window */
4031 if (src->max_win < win)
4033 /* synchronize sequencing */
4034 if (SEQ_GT(end, src->seqlo))
4036 /* slide the window of what the other end can send */
4037 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4038 dst->seqhi = ack + MAX((win << sws), 1);
4041 * Cannot set dst->seqhi here since this could be a shotgunned
4042 * SYN and not an already established connection.
4045 if (th->th_flags & TH_FIN)
4046 if (src->state < TCPS_CLOSING)
4047 src->state = TCPS_CLOSING;
4048 if (th->th_flags & TH_RST)
4049 src->state = dst->state = TCPS_TIME_WAIT;
4051 /* Fall through to PASS packet */
4054 if ((*state)->dst.state == TCPS_SYN_SENT &&
4055 (*state)->src.state == TCPS_SYN_SENT) {
4056 /* Send RST for state mismatches during handshake */
4057 if (!(th->th_flags & TH_RST))
4058 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4059 pd->dst, pd->src, th->th_dport,
4060 th->th_sport, ntohl(th->th_ack), 0,
4062 (*state)->rule.ptr->return_ttl, 1, 0,
4067 } else if (V_pf_status.debug >= PF_DEBUG_MISC) {
4068 printf("pf: BAD state: ");
4069 pf_print_state(*state);
4070 pf_print_flags(th->th_flags);
4071 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4072 "pkts=%llu:%llu dir=%s,%s\n",
4073 seq, orig_seq, ack, pd->p_len, ackskew,
4074 (unsigned long long)(*state)->packets[0],
4075 (unsigned long long)(*state)->packets[1],
4076 pd->dir == PF_IN ? "in" : "out",
4077 pd->dir == (*state)->direction ? "fwd" : "rev");
4078 printf("pf: State failure on: %c %c %c %c | %c %c\n",
4079 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4080 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4082 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4083 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4084 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4085 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4087 REASON_SET(reason, PFRES_BADSTATE);
4095 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4096 struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4098 struct tcphdr *th = pd->hdr.tcp;
4100 if (th->th_flags & TH_SYN)
4101 if (src->state < TCPS_SYN_SENT)
4102 src->state = TCPS_SYN_SENT;
4103 if (th->th_flags & TH_FIN)
4104 if (src->state < TCPS_CLOSING)
4105 src->state = TCPS_CLOSING;
4106 if (th->th_flags & TH_ACK) {
4107 if (dst->state == TCPS_SYN_SENT) {
4108 dst->state = TCPS_ESTABLISHED;
4109 if (src->state == TCPS_ESTABLISHED &&
4110 (*state)->src_node != NULL &&
4111 pf_src_connlimit(state)) {
4112 REASON_SET(reason, PFRES_SRCLIMIT);
4115 } else if (dst->state == TCPS_CLOSING) {
4116 dst->state = TCPS_FIN_WAIT_2;
4117 } else if (src->state == TCPS_SYN_SENT &&
4118 dst->state < TCPS_SYN_SENT) {
4120 * Handle a special sloppy case where we only see one
4121 * half of the connection. If there is a ACK after
4122 * the initial SYN without ever seeing a packet from
4123 * the destination, set the connection to established.
4125 dst->state = src->state = TCPS_ESTABLISHED;
4126 if ((*state)->src_node != NULL &&
4127 pf_src_connlimit(state)) {
4128 REASON_SET(reason, PFRES_SRCLIMIT);
4131 } else if (src->state == TCPS_CLOSING &&
4132 dst->state == TCPS_ESTABLISHED &&
4135 * Handle the closing of half connections where we
4136 * don't see the full bidirectional FIN/ACK+ACK
4139 dst->state = TCPS_CLOSING;
4142 if (th->th_flags & TH_RST)
4143 src->state = dst->state = TCPS_TIME_WAIT;
4145 /* update expire time */
4146 (*state)->expire = time_uptime;
4147 if (src->state >= TCPS_FIN_WAIT_2 &&
4148 dst->state >= TCPS_FIN_WAIT_2)
4149 (*state)->timeout = PFTM_TCP_CLOSED;
4150 else if (src->state >= TCPS_CLOSING &&
4151 dst->state >= TCPS_CLOSING)
4152 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4153 else if (src->state < TCPS_ESTABLISHED ||
4154 dst->state < TCPS_ESTABLISHED)
4155 (*state)->timeout = PFTM_TCP_OPENING;
4156 else if (src->state >= TCPS_CLOSING ||
4157 dst->state >= TCPS_CLOSING)
4158 (*state)->timeout = PFTM_TCP_CLOSING;
4160 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4166 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
4167 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4170 struct pf_state_key_cmp key;
4171 struct tcphdr *th = pd->hdr.tcp;
4173 struct pf_state_peer *src, *dst;
4174 struct pf_state_key *sk;
4176 bzero(&key, sizeof(key));
4178 key.proto = IPPROTO_TCP;
4179 if (direction == PF_IN) { /* wire side, straight */
4180 PF_ACPY(&key.addr[0], pd->src, key.af);
4181 PF_ACPY(&key.addr[1], pd->dst, key.af);
4182 key.port[0] = th->th_sport;
4183 key.port[1] = th->th_dport;
4184 } else { /* stack side, reverse */
4185 PF_ACPY(&key.addr[1], pd->src, key.af);
4186 PF_ACPY(&key.addr[0], pd->dst, key.af);
4187 key.port[1] = th->th_sport;
4188 key.port[0] = th->th_dport;
4191 STATE_LOOKUP(kif, &key, direction, *state, pd);
4193 if (direction == (*state)->direction) {
4194 src = &(*state)->src;
4195 dst = &(*state)->dst;
4197 src = &(*state)->dst;
4198 dst = &(*state)->src;
4201 sk = (*state)->key[pd->didx];
4203 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4204 if (direction != (*state)->direction) {
4205 REASON_SET(reason, PFRES_SYNPROXY);
4206 return (PF_SYNPROXY_DROP);
4208 if (th->th_flags & TH_SYN) {
4209 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4210 REASON_SET(reason, PFRES_SYNPROXY);
4213 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4214 pd->src, th->th_dport, th->th_sport,
4215 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4216 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0, NULL);
4217 REASON_SET(reason, PFRES_SYNPROXY);
4218 return (PF_SYNPROXY_DROP);
4219 } else if (!(th->th_flags & TH_ACK) ||
4220 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4221 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4222 REASON_SET(reason, PFRES_SYNPROXY);
4224 } else if ((*state)->src_node != NULL &&
4225 pf_src_connlimit(state)) {
4226 REASON_SET(reason, PFRES_SRCLIMIT);
4229 (*state)->src.state = PF_TCPS_PROXY_DST;
4231 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4232 if (direction == (*state)->direction) {
4233 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4234 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4235 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4236 REASON_SET(reason, PFRES_SYNPROXY);
4239 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4240 if ((*state)->dst.seqhi == 1)
4241 (*state)->dst.seqhi = htonl(arc4random());
4242 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4243 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4244 sk->port[pd->sidx], sk->port[pd->didx],
4245 (*state)->dst.seqhi, 0, TH_SYN, 0,
4246 (*state)->src.mss, 0, 0, (*state)->tag, NULL);
4247 REASON_SET(reason, PFRES_SYNPROXY);
4248 return (PF_SYNPROXY_DROP);
4249 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4251 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4252 REASON_SET(reason, PFRES_SYNPROXY);
4255 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4256 (*state)->dst.seqlo = ntohl(th->th_seq);
4257 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4258 pd->src, th->th_dport, th->th_sport,
4259 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4260 TH_ACK, (*state)->src.max_win, 0, 0, 0,
4261 (*state)->tag, NULL);
4262 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4263 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4264 sk->port[pd->sidx], sk->port[pd->didx],
4265 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4266 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0, NULL);
4267 (*state)->src.seqdiff = (*state)->dst.seqhi -
4268 (*state)->src.seqlo;
4269 (*state)->dst.seqdiff = (*state)->src.seqhi -
4270 (*state)->dst.seqlo;
4271 (*state)->src.seqhi = (*state)->src.seqlo +
4272 (*state)->dst.max_win;
4273 (*state)->dst.seqhi = (*state)->dst.seqlo +
4274 (*state)->src.max_win;
4275 (*state)->src.wscale = (*state)->dst.wscale = 0;
4276 (*state)->src.state = (*state)->dst.state =
4278 REASON_SET(reason, PFRES_SYNPROXY);
4279 return (PF_SYNPROXY_DROP);
4283 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4284 dst->state >= TCPS_FIN_WAIT_2 &&
4285 src->state >= TCPS_FIN_WAIT_2) {
4286 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4287 printf("pf: state reuse ");
4288 pf_print_state(*state);
4289 pf_print_flags(th->th_flags);
4292 /* XXX make sure it's the same direction ?? */
4293 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4294 pf_unlink_state(*state, PF_ENTER_LOCKED);
4299 if ((*state)->state_flags & PFSTATE_SLOPPY) {
4300 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
4303 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
4304 ©back) == PF_DROP)
4308 /* translate source/destination address, if necessary */
4309 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4310 struct pf_state_key *nk = (*state)->key[pd->didx];
4312 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4313 nk->port[pd->sidx] != th->th_sport)
4314 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum,
4315 &th->th_sum, &nk->addr[pd->sidx],
4316 nk->port[pd->sidx], 0, pd->af);
4318 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4319 nk->port[pd->didx] != th->th_dport)
4320 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum,
4321 &th->th_sum, &nk->addr[pd->didx],
4322 nk->port[pd->didx], 0, pd->af);
4326 /* Copyback sequence modulation or stateful scrub changes if needed */
4328 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4334 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4335 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4337 struct pf_state_peer *src, *dst;
4338 struct pf_state_key_cmp key;
4339 struct udphdr *uh = pd->hdr.udp;
4341 bzero(&key, sizeof(key));
4343 key.proto = IPPROTO_UDP;
4344 if (direction == PF_IN) { /* wire side, straight */
4345 PF_ACPY(&key.addr[0], pd->src, key.af);
4346 PF_ACPY(&key.addr[1], pd->dst, key.af);
4347 key.port[0] = uh->uh_sport;
4348 key.port[1] = uh->uh_dport;
4349 } else { /* stack side, reverse */
4350 PF_ACPY(&key.addr[1], pd->src, key.af);
4351 PF_ACPY(&key.addr[0], pd->dst, key.af);
4352 key.port[1] = uh->uh_sport;
4353 key.port[0] = uh->uh_dport;
4356 STATE_LOOKUP(kif, &key, direction, *state, pd);
4358 if (direction == (*state)->direction) {
4359 src = &(*state)->src;
4360 dst = &(*state)->dst;
4362 src = &(*state)->dst;
4363 dst = &(*state)->src;
4367 if (src->state < PFUDPS_SINGLE)
4368 src->state = PFUDPS_SINGLE;
4369 if (dst->state == PFUDPS_SINGLE)
4370 dst->state = PFUDPS_MULTIPLE;
4372 /* update expire time */
4373 (*state)->expire = time_uptime;
4374 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4375 (*state)->timeout = PFTM_UDP_MULTIPLE;
4377 (*state)->timeout = PFTM_UDP_SINGLE;
4379 /* translate source/destination address, if necessary */
4380 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4381 struct pf_state_key *nk = (*state)->key[pd->didx];
4383 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4384 nk->port[pd->sidx] != uh->uh_sport)
4385 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum,
4386 &uh->uh_sum, &nk->addr[pd->sidx],
4387 nk->port[pd->sidx], 1, pd->af);
4389 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4390 nk->port[pd->didx] != uh->uh_dport)
4391 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum,
4392 &uh->uh_sum, &nk->addr[pd->didx],
4393 nk->port[pd->didx], 1, pd->af);
4394 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4401 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4402 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4404 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4405 u_int16_t icmpid = 0, *icmpsum;
4408 struct pf_state_key_cmp key;
4410 bzero(&key, sizeof(key));
4411 switch (pd->proto) {
4414 icmptype = pd->hdr.icmp->icmp_type;
4415 icmpid = pd->hdr.icmp->icmp_id;
4416 icmpsum = &pd->hdr.icmp->icmp_cksum;
4418 if (icmptype == ICMP_UNREACH ||
4419 icmptype == ICMP_SOURCEQUENCH ||
4420 icmptype == ICMP_REDIRECT ||
4421 icmptype == ICMP_TIMXCEED ||
4422 icmptype == ICMP_PARAMPROB)
4427 case IPPROTO_ICMPV6:
4428 icmptype = pd->hdr.icmp6->icmp6_type;
4429 icmpid = pd->hdr.icmp6->icmp6_id;
4430 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4432 if (icmptype == ICMP6_DST_UNREACH ||
4433 icmptype == ICMP6_PACKET_TOO_BIG ||
4434 icmptype == ICMP6_TIME_EXCEEDED ||
4435 icmptype == ICMP6_PARAM_PROB)
4444 * ICMP query/reply message not related to a TCP/UDP packet.
4445 * Search for an ICMP state.
4448 key.proto = pd->proto;
4449 key.port[0] = key.port[1] = icmpid;
4450 if (direction == PF_IN) { /* wire side, straight */
4451 PF_ACPY(&key.addr[0], pd->src, key.af);
4452 PF_ACPY(&key.addr[1], pd->dst, key.af);
4453 } else { /* stack side, reverse */
4454 PF_ACPY(&key.addr[1], pd->src, key.af);
4455 PF_ACPY(&key.addr[0], pd->dst, key.af);
4458 STATE_LOOKUP(kif, &key, direction, *state, pd);
4460 (*state)->expire = time_uptime;
4461 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4463 /* translate source/destination address, if necessary */
4464 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4465 struct pf_state_key *nk = (*state)->key[pd->didx];
4470 if (PF_ANEQ(pd->src,
4471 &nk->addr[pd->sidx], AF_INET))
4472 pf_change_a(&saddr->v4.s_addr,
4474 nk->addr[pd->sidx].v4.s_addr, 0);
4476 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
4478 pf_change_a(&daddr->v4.s_addr,
4480 nk->addr[pd->didx].v4.s_addr, 0);
4483 pd->hdr.icmp->icmp_id) {
4484 pd->hdr.icmp->icmp_cksum =
4486 pd->hdr.icmp->icmp_cksum, icmpid,
4487 nk->port[pd->sidx], 0);
4488 pd->hdr.icmp->icmp_id =
4492 m_copyback(m, off, ICMP_MINLEN,
4493 (caddr_t )pd->hdr.icmp);
4498 if (PF_ANEQ(pd->src,
4499 &nk->addr[pd->sidx], AF_INET6))
4501 &pd->hdr.icmp6->icmp6_cksum,
4502 &nk->addr[pd->sidx], 0);
4504 if (PF_ANEQ(pd->dst,
4505 &nk->addr[pd->didx], AF_INET6))
4507 &pd->hdr.icmp6->icmp6_cksum,
4508 &nk->addr[pd->didx], 0);
4510 m_copyback(m, off, sizeof(struct icmp6_hdr),
4511 (caddr_t )pd->hdr.icmp6);
4520 * ICMP error message in response to a TCP/UDP packet.
4521 * Extract the inner TCP/UDP header and search for that state.
4524 struct pf_pdesc pd2;
4525 bzero(&pd2, sizeof pd2);
4530 struct ip6_hdr h2_6;
4537 /* Payload packet is from the opposite direction. */
4538 pd2.sidx = (direction == PF_IN) ? 1 : 0;
4539 pd2.didx = (direction == PF_IN) ? 0 : 1;
4543 /* offset of h2 in mbuf chain */
4544 ipoff2 = off + ICMP_MINLEN;
4546 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
4547 NULL, reason, pd2.af)) {
4548 DPFPRINTF(PF_DEBUG_MISC,
4549 ("pf: ICMP error message too short "
4554 * ICMP error messages don't refer to non-first
4557 if (h2.ip_off & htons(IP_OFFMASK)) {
4558 REASON_SET(reason, PFRES_FRAG);
4562 /* offset of protocol header that follows h2 */
4563 off2 = ipoff2 + (h2.ip_hl << 2);
4565 pd2.proto = h2.ip_p;
4566 pd2.src = (struct pf_addr *)&h2.ip_src;
4567 pd2.dst = (struct pf_addr *)&h2.ip_dst;
4568 pd2.ip_sum = &h2.ip_sum;
4573 ipoff2 = off + sizeof(struct icmp6_hdr);
4575 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
4576 NULL, reason, pd2.af)) {
4577 DPFPRINTF(PF_DEBUG_MISC,
4578 ("pf: ICMP error message too short "
4582 pd2.proto = h2_6.ip6_nxt;
4583 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
4584 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
4586 off2 = ipoff2 + sizeof(h2_6);
4588 switch (pd2.proto) {
4589 case IPPROTO_FRAGMENT:
4591 * ICMPv6 error messages for
4592 * non-first fragments
4594 REASON_SET(reason, PFRES_FRAG);
4597 case IPPROTO_HOPOPTS:
4598 case IPPROTO_ROUTING:
4599 case IPPROTO_DSTOPTS: {
4600 /* get next header and header length */
4601 struct ip6_ext opt6;
4603 if (!pf_pull_hdr(m, off2, &opt6,
4604 sizeof(opt6), NULL, reason,
4606 DPFPRINTF(PF_DEBUG_MISC,
4607 ("pf: ICMPv6 short opt\n"));
4610 if (pd2.proto == IPPROTO_AH)
4611 off2 += (opt6.ip6e_len + 2) * 4;
4613 off2 += (opt6.ip6e_len + 1) * 8;
4614 pd2.proto = opt6.ip6e_nxt;
4615 /* goto the next header */
4622 } while (!terminal);
4627 switch (pd2.proto) {
4631 struct pf_state_peer *src, *dst;
4636 * Only the first 8 bytes of the TCP header can be
4637 * expected. Don't access any TCP header fields after
4638 * th_seq, an ackskew test is not possible.
4640 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
4642 DPFPRINTF(PF_DEBUG_MISC,
4643 ("pf: ICMP error message too short "
4649 key.proto = IPPROTO_TCP;
4650 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4651 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4652 key.port[pd2.sidx] = th.th_sport;
4653 key.port[pd2.didx] = th.th_dport;
4655 STATE_LOOKUP(kif, &key, direction, *state, pd);
4657 if (direction == (*state)->direction) {
4658 src = &(*state)->dst;
4659 dst = &(*state)->src;
4661 src = &(*state)->src;
4662 dst = &(*state)->dst;
4665 if (src->wscale && dst->wscale)
4666 dws = dst->wscale & PF_WSCALE_MASK;
4670 /* Demodulate sequence number */
4671 seq = ntohl(th.th_seq) - src->seqdiff;
4673 pf_change_a(&th.th_seq, icmpsum,
4678 if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
4679 (!SEQ_GEQ(src->seqhi, seq) ||
4680 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
4681 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4682 printf("pf: BAD ICMP %d:%d ",
4683 icmptype, pd->hdr.icmp->icmp_code);
4684 pf_print_host(pd->src, 0, pd->af);
4686 pf_print_host(pd->dst, 0, pd->af);
4688 pf_print_state(*state);
4689 printf(" seq=%u\n", seq);
4691 REASON_SET(reason, PFRES_BADSTATE);
4694 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4695 printf("pf: OK ICMP %d:%d ",
4696 icmptype, pd->hdr.icmp->icmp_code);
4697 pf_print_host(pd->src, 0, pd->af);
4699 pf_print_host(pd->dst, 0, pd->af);
4701 pf_print_state(*state);
4702 printf(" seq=%u\n", seq);
4706 /* translate source/destination address, if necessary */
4707 if ((*state)->key[PF_SK_WIRE] !=
4708 (*state)->key[PF_SK_STACK]) {
4709 struct pf_state_key *nk =
4710 (*state)->key[pd->didx];
4712 if (PF_ANEQ(pd2.src,
4713 &nk->addr[pd2.sidx], pd2.af) ||
4714 nk->port[pd2.sidx] != th.th_sport)
4715 pf_change_icmp(pd2.src, &th.th_sport,
4716 daddr, &nk->addr[pd2.sidx],
4717 nk->port[pd2.sidx], NULL,
4718 pd2.ip_sum, icmpsum,
4719 pd->ip_sum, 0, pd2.af);
4721 if (PF_ANEQ(pd2.dst,
4722 &nk->addr[pd2.didx], pd2.af) ||
4723 nk->port[pd2.didx] != th.th_dport)
4724 pf_change_icmp(pd2.dst, &th.th_dport,
4725 NULL, /* XXX Inbound NAT? */
4726 &nk->addr[pd2.didx],
4727 nk->port[pd2.didx], NULL,
4728 pd2.ip_sum, icmpsum,
4729 pd->ip_sum, 0, pd2.af);
4737 m_copyback(m, off, ICMP_MINLEN,
4738 (caddr_t )pd->hdr.icmp);
4739 m_copyback(m, ipoff2, sizeof(h2),
4746 sizeof(struct icmp6_hdr),
4747 (caddr_t )pd->hdr.icmp6);
4748 m_copyback(m, ipoff2, sizeof(h2_6),
4753 m_copyback(m, off2, 8, (caddr_t)&th);
4762 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
4763 NULL, reason, pd2.af)) {
4764 DPFPRINTF(PF_DEBUG_MISC,
4765 ("pf: ICMP error message too short "
4771 key.proto = IPPROTO_UDP;
4772 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4773 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4774 key.port[pd2.sidx] = uh.uh_sport;
4775 key.port[pd2.didx] = uh.uh_dport;
4777 STATE_LOOKUP(kif, &key, direction, *state, pd);
4779 /* translate source/destination address, if necessary */
4780 if ((*state)->key[PF_SK_WIRE] !=
4781 (*state)->key[PF_SK_STACK]) {
4782 struct pf_state_key *nk =
4783 (*state)->key[pd->didx];
4785 if (PF_ANEQ(pd2.src,
4786 &nk->addr[pd2.sidx], pd2.af) ||
4787 nk->port[pd2.sidx] != uh.uh_sport)
4788 pf_change_icmp(pd2.src, &uh.uh_sport,
4789 daddr, &nk->addr[pd2.sidx],
4790 nk->port[pd2.sidx], &uh.uh_sum,
4791 pd2.ip_sum, icmpsum,
4792 pd->ip_sum, 1, pd2.af);
4794 if (PF_ANEQ(pd2.dst,
4795 &nk->addr[pd2.didx], pd2.af) ||
4796 nk->port[pd2.didx] != uh.uh_dport)
4797 pf_change_icmp(pd2.dst, &uh.uh_dport,
4798 NULL, /* XXX Inbound NAT? */
4799 &nk->addr[pd2.didx],
4800 nk->port[pd2.didx], &uh.uh_sum,
4801 pd2.ip_sum, icmpsum,
4802 pd->ip_sum, 1, pd2.af);
4807 m_copyback(m, off, ICMP_MINLEN,
4808 (caddr_t )pd->hdr.icmp);
4809 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4815 sizeof(struct icmp6_hdr),
4816 (caddr_t )pd->hdr.icmp6);
4817 m_copyback(m, ipoff2, sizeof(h2_6),
4822 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
4828 case IPPROTO_ICMP: {
4831 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
4832 NULL, reason, pd2.af)) {
4833 DPFPRINTF(PF_DEBUG_MISC,
4834 ("pf: ICMP error message too short i"
4840 key.proto = IPPROTO_ICMP;
4841 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4842 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4843 key.port[0] = key.port[1] = iih.icmp_id;
4845 STATE_LOOKUP(kif, &key, direction, *state, pd);
4847 /* translate source/destination address, if necessary */
4848 if ((*state)->key[PF_SK_WIRE] !=
4849 (*state)->key[PF_SK_STACK]) {
4850 struct pf_state_key *nk =
4851 (*state)->key[pd->didx];
4853 if (PF_ANEQ(pd2.src,
4854 &nk->addr[pd2.sidx], pd2.af) ||
4855 nk->port[pd2.sidx] != iih.icmp_id)
4856 pf_change_icmp(pd2.src, &iih.icmp_id,
4857 daddr, &nk->addr[pd2.sidx],
4858 nk->port[pd2.sidx], NULL,
4859 pd2.ip_sum, icmpsum,
4860 pd->ip_sum, 0, AF_INET);
4862 if (PF_ANEQ(pd2.dst,
4863 &nk->addr[pd2.didx], pd2.af) ||
4864 nk->port[pd2.didx] != iih.icmp_id)
4865 pf_change_icmp(pd2.dst, &iih.icmp_id,
4866 NULL, /* XXX Inbound NAT? */
4867 &nk->addr[pd2.didx],
4868 nk->port[pd2.didx], NULL,
4869 pd2.ip_sum, icmpsum,
4870 pd->ip_sum, 0, AF_INET);
4872 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
4873 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4874 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
4881 case IPPROTO_ICMPV6: {
4882 struct icmp6_hdr iih;
4884 if (!pf_pull_hdr(m, off2, &iih,
4885 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
4886 DPFPRINTF(PF_DEBUG_MISC,
4887 ("pf: ICMP error message too short "
4893 key.proto = IPPROTO_ICMPV6;
4894 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4895 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4896 key.port[0] = key.port[1] = iih.icmp6_id;
4898 STATE_LOOKUP(kif, &key, direction, *state, pd);
4900 /* translate source/destination address, if necessary */
4901 if ((*state)->key[PF_SK_WIRE] !=
4902 (*state)->key[PF_SK_STACK]) {
4903 struct pf_state_key *nk =
4904 (*state)->key[pd->didx];
4906 if (PF_ANEQ(pd2.src,
4907 &nk->addr[pd2.sidx], pd2.af) ||
4908 nk->port[pd2.sidx] != iih.icmp6_id)
4909 pf_change_icmp(pd2.src, &iih.icmp6_id,
4910 daddr, &nk->addr[pd2.sidx],
4911 nk->port[pd2.sidx], NULL,
4912 pd2.ip_sum, icmpsum,
4913 pd->ip_sum, 0, AF_INET6);
4915 if (PF_ANEQ(pd2.dst,
4916 &nk->addr[pd2.didx], pd2.af) ||
4917 nk->port[pd2.didx] != iih.icmp6_id)
4918 pf_change_icmp(pd2.dst, &iih.icmp6_id,
4919 NULL, /* XXX Inbound NAT? */
4920 &nk->addr[pd2.didx],
4921 nk->port[pd2.didx], NULL,
4922 pd2.ip_sum, icmpsum,
4923 pd->ip_sum, 0, AF_INET6);
4925 m_copyback(m, off, sizeof(struct icmp6_hdr),
4926 (caddr_t)pd->hdr.icmp6);
4927 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
4928 m_copyback(m, off2, sizeof(struct icmp6_hdr),
4937 key.proto = pd2.proto;
4938 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4939 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4940 key.port[0] = key.port[1] = 0;
4942 STATE_LOOKUP(kif, &key, direction, *state, pd);
4944 /* translate source/destination address, if necessary */
4945 if ((*state)->key[PF_SK_WIRE] !=
4946 (*state)->key[PF_SK_STACK]) {
4947 struct pf_state_key *nk =
4948 (*state)->key[pd->didx];
4950 if (PF_ANEQ(pd2.src,
4951 &nk->addr[pd2.sidx], pd2.af))
4952 pf_change_icmp(pd2.src, NULL, daddr,
4953 &nk->addr[pd2.sidx], 0, NULL,
4954 pd2.ip_sum, icmpsum,
4955 pd->ip_sum, 0, pd2.af);
4957 if (PF_ANEQ(pd2.dst,
4958 &nk->addr[pd2.didx], pd2.af))
4959 pf_change_icmp(pd2.src, NULL,
4960 NULL, /* XXX Inbound NAT? */
4961 &nk->addr[pd2.didx], 0, NULL,
4962 pd2.ip_sum, icmpsum,
4963 pd->ip_sum, 0, pd2.af);
4968 m_copyback(m, off, ICMP_MINLEN,
4969 (caddr_t)pd->hdr.icmp);
4970 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4976 sizeof(struct icmp6_hdr),
4977 (caddr_t )pd->hdr.icmp6);
4978 m_copyback(m, ipoff2, sizeof(h2_6),
4992 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
4993 struct mbuf *m, struct pf_pdesc *pd)
4995 struct pf_state_peer *src, *dst;
4996 struct pf_state_key_cmp key;
4998 bzero(&key, sizeof(key));
5000 key.proto = pd->proto;
5001 if (direction == PF_IN) {
5002 PF_ACPY(&key.addr[0], pd->src, key.af);
5003 PF_ACPY(&key.addr[1], pd->dst, key.af);
5004 key.port[0] = key.port[1] = 0;
5006 PF_ACPY(&key.addr[1], pd->src, key.af);
5007 PF_ACPY(&key.addr[0], pd->dst, key.af);
5008 key.port[1] = key.port[0] = 0;
5011 STATE_LOOKUP(kif, &key, direction, *state, pd);
5013 if (direction == (*state)->direction) {
5014 src = &(*state)->src;
5015 dst = &(*state)->dst;
5017 src = &(*state)->dst;
5018 dst = &(*state)->src;
5022 if (src->state < PFOTHERS_SINGLE)
5023 src->state = PFOTHERS_SINGLE;
5024 if (dst->state == PFOTHERS_SINGLE)
5025 dst->state = PFOTHERS_MULTIPLE;
5027 /* update expire time */
5028 (*state)->expire = time_uptime;
5029 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5030 (*state)->timeout = PFTM_OTHER_MULTIPLE;
5032 (*state)->timeout = PFTM_OTHER_SINGLE;
5034 /* translate source/destination address, if necessary */
5035 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5036 struct pf_state_key *nk = (*state)->key[pd->didx];
5038 KASSERT(nk, ("%s: nk is null", __func__));
5039 KASSERT(pd, ("%s: pd is null", __func__));
5040 KASSERT(pd->src, ("%s: pd->src is null", __func__));
5041 KASSERT(pd->dst, ("%s: pd->dst is null", __func__));
5045 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5046 pf_change_a(&pd->src->v4.s_addr,
5048 nk->addr[pd->sidx].v4.s_addr,
5052 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5053 pf_change_a(&pd->dst->v4.s_addr,
5055 nk->addr[pd->didx].v4.s_addr,
5062 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5063 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5065 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5066 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5074 * ipoff and off are measured from the start of the mbuf chain.
5075 * h must be at "ipoff" on the mbuf chain.
5078 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5079 u_short *actionp, u_short *reasonp, sa_family_t af)
5084 struct ip *h = mtod(m, struct ip *);
5085 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
5089 ACTION_SET(actionp, PF_PASS);
5091 ACTION_SET(actionp, PF_DROP);
5092 REASON_SET(reasonp, PFRES_FRAG);
5096 if (m->m_pkthdr.len < off + len ||
5097 ntohs(h->ip_len) < off + len) {
5098 ACTION_SET(actionp, PF_DROP);
5099 REASON_SET(reasonp, PFRES_SHORT);
5107 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
5109 if (m->m_pkthdr.len < off + len ||
5110 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5111 (unsigned)(off + len)) {
5112 ACTION_SET(actionp, PF_DROP);
5113 REASON_SET(reasonp, PFRES_SHORT);
5120 m_copydata(m, off, len, p);
5125 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5129 struct radix_node_head *rnh;
5131 struct sockaddr_in *dst;
5135 struct sockaddr_in6 *dst6;
5136 struct route_in6 ro;
5140 struct radix_node *rn;
5146 /* XXX: stick to table 0 for now */
5147 rnh = rt_tables_get_rnh(0, af);
5148 if (rnh != NULL && rn_mpath_capable(rnh))
5151 bzero(&ro, sizeof(ro));
5154 dst = satosin(&ro.ro_dst);
5155 dst->sin_family = AF_INET;
5156 dst->sin_len = sizeof(*dst);
5157 dst->sin_addr = addr->v4;
5162 * Skip check for addresses with embedded interface scope,
5163 * as they would always match anyway.
5165 if (IN6_IS_SCOPE_EMBED(&addr->v6))
5167 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5168 dst6->sin6_family = AF_INET6;
5169 dst6->sin6_len = sizeof(*dst6);
5170 dst6->sin6_addr = addr->v6;
5177 /* Skip checks for ipsec interfaces */
5178 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5184 in6_rtalloc_ign(&ro, 0, rtableid);
5189 in_rtalloc_ign((struct route *)&ro, 0, rtableid);
5193 rtalloc_ign((struct route *)&ro, 0); /* No/default FIB. */
5197 if (ro.ro_rt != NULL) {
5198 /* No interface given, this is a no-route check */
5202 if (kif->pfik_ifp == NULL) {
5207 /* Perform uRPF check if passed input interface */
5209 rn = (struct radix_node *)ro.ro_rt;
5211 rt = (struct rtentry *)rn;
5214 if (kif->pfik_ifp == ifp)
5217 rn = rn_mpath_next(rn);
5219 } while (check_mpath == 1 && rn != NULL && ret == 0);
5223 if (ro.ro_rt != NULL)
5230 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5231 struct pf_state *s, struct pf_pdesc *pd)
5233 struct mbuf *m0, *m1;
5234 struct sockaddr_in dst;
5236 struct ifnet *ifp = NULL;
5237 struct pf_addr naddr;
5238 struct pf_src_node *sn = NULL;
5240 uint16_t ip_len, ip_off;
5242 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5243 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5246 if ((pd->pf_mtag == NULL &&
5247 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5248 pd->pf_mtag->routed++ > 3) {
5254 if (r->rt == PF_DUPTO) {
5255 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5261 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5269 ip = mtod(m0, struct ip *);
5271 bzero(&dst, sizeof(dst));
5272 dst.sin_family = AF_INET;
5273 dst.sin_len = sizeof(dst);
5274 dst.sin_addr = ip->ip_dst;
5276 if (r->rt == PF_FASTROUTE) {
5281 rt = rtalloc1_fib(sintosa(&dst), 0, 0, M_GETFIB(m0));
5283 KMOD_IPSTAT_INC(ips_noroute);
5284 error = EHOSTUNREACH;
5289 counter_u64_add(rt->rt_pksent, 1);
5291 if (rt->rt_flags & RTF_GATEWAY)
5292 bcopy(satosin(rt->rt_gateway), &dst, sizeof(dst));
5295 if (TAILQ_EMPTY(&r->rpool.list)) {
5296 DPFPRINTF(PF_DEBUG_URGENT,
5297 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5301 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5303 if (!PF_AZERO(&naddr, AF_INET))
5304 dst.sin_addr.s_addr = naddr.v4.s_addr;
5305 ifp = r->rpool.cur->kif ?
5306 r->rpool.cur->kif->pfik_ifp : NULL;
5308 if (!PF_AZERO(&s->rt_addr, AF_INET))
5309 dst.sin_addr.s_addr =
5310 s->rt_addr.v4.s_addr;
5311 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5319 if (pf_test(PF_OUT, ifp, &m0, NULL) != PF_PASS)
5321 else if (m0 == NULL)
5323 if (m0->m_len < sizeof(struct ip)) {
5324 DPFPRINTF(PF_DEBUG_URGENT,
5325 ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
5328 ip = mtod(m0, struct ip *);
5331 if (ifp->if_flags & IFF_LOOPBACK)
5332 m0->m_flags |= M_SKIP_FIREWALL;
5334 ip_len = ntohs(ip->ip_len);
5335 ip_off = ntohs(ip->ip_off);
5337 /* Copied from FreeBSD 10.0-CURRENT ip_output. */
5338 m0->m_pkthdr.csum_flags |= CSUM_IP;
5339 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
5340 in_delayed_cksum(m0);
5341 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
5344 if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
5345 sctp_delayed_cksum(m, (uint32_t)(ip->ip_hl << 2));
5346 m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
5351 * If small enough for interface, or the interface will take
5352 * care of the fragmentation for us, we can just send directly.
5354 if (ip_len <= ifp->if_mtu ||
5355 (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0 ||
5356 ((ip_off & IP_DF) == 0 && (ifp->if_hwassist & CSUM_FRAGMENT))) {
5358 if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
5359 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
5360 m0->m_pkthdr.csum_flags &= ~CSUM_IP;
5362 m_clrprotoflags(m0); /* Avoid confusing lower layers. */
5363 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5367 /* Balk when DF bit is set or the interface didn't support TSO. */
5368 if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5370 KMOD_IPSTAT_INC(ips_cantfrag);
5371 if (r->rt != PF_DUPTO) {
5372 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
5379 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
5383 for (; m0; m0 = m1) {
5385 m0->m_nextpkt = NULL;
5387 m_clrprotoflags(m0);
5388 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5394 KMOD_IPSTAT_INC(ips_fragmented);
5397 if (r->rt != PF_DUPTO)
5412 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5413 struct pf_state *s, struct pf_pdesc *pd)
5416 struct sockaddr_in6 dst;
5417 struct ip6_hdr *ip6;
5418 struct ifnet *ifp = NULL;
5419 struct pf_addr naddr;
5420 struct pf_src_node *sn = NULL;
5422 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5423 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5426 if ((pd->pf_mtag == NULL &&
5427 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5428 pd->pf_mtag->routed++ > 3) {
5434 if (r->rt == PF_DUPTO) {
5435 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5441 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5449 ip6 = mtod(m0, struct ip6_hdr *);
5451 bzero(&dst, sizeof(dst));
5452 dst.sin6_family = AF_INET6;
5453 dst.sin6_len = sizeof(dst);
5454 dst.sin6_addr = ip6->ip6_dst;
5456 /* Cheat. XXX why only in the v6 case??? */
5457 if (r->rt == PF_FASTROUTE) {
5460 m0->m_flags |= M_SKIP_FIREWALL;
5461 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
5465 if (TAILQ_EMPTY(&r->rpool.list)) {
5466 DPFPRINTF(PF_DEBUG_URGENT,
5467 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5471 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
5473 if (!PF_AZERO(&naddr, AF_INET6))
5474 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5476 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
5478 if (!PF_AZERO(&s->rt_addr, AF_INET6))
5479 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5480 &s->rt_addr, AF_INET6);
5481 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5491 if (pf_test6(PF_OUT, ifp, &m0, NULL) != PF_PASS)
5493 else if (m0 == NULL)
5495 if (m0->m_len < sizeof(struct ip6_hdr)) {
5496 DPFPRINTF(PF_DEBUG_URGENT,
5497 ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
5501 ip6 = mtod(m0, struct ip6_hdr *);
5504 if (ifp->if_flags & IFF_LOOPBACK)
5505 m0->m_flags |= M_SKIP_FIREWALL;
5508 * If the packet is too large for the outgoing interface,
5509 * send back an icmp6 error.
5511 if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
5512 dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
5513 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu)
5514 nd6_output(ifp, ifp, m0, &dst, NULL);
5516 in6_ifstat_inc(ifp, ifs6_in_toobig);
5517 if (r->rt != PF_DUPTO)
5518 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
5524 if (r->rt != PF_DUPTO)
5538 * FreeBSD supports cksum offloads for the following drivers.
5539 * em(4), fxp(4), ixgb(4), lge(4), ndis(4), nge(4), re(4),
5540 * ti(4), txp(4), xl(4)
5542 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
5543 * network driver performed cksum including pseudo header, need to verify
5546 * network driver performed cksum, needs to additional pseudo header
5547 * cksum computation with partial csum_data(i.e. lack of H/W support for
5548 * pseudo header, for instance hme(4), sk(4) and possibly gem(4))
5550 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
5551 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
5553 * Also, set csum_data to 0xffff to force cksum validation.
5556 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
5562 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
5564 if (m->m_pkthdr.len < off + len)
5569 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5570 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5571 sum = m->m_pkthdr.csum_data;
5573 ip = mtod(m, struct ip *);
5574 sum = in_pseudo(ip->ip_src.s_addr,
5575 ip->ip_dst.s_addr, htonl((u_short)len +
5576 m->m_pkthdr.csum_data + IPPROTO_TCP));
5583 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5584 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5585 sum = m->m_pkthdr.csum_data;
5587 ip = mtod(m, struct ip *);
5588 sum = in_pseudo(ip->ip_src.s_addr,
5589 ip->ip_dst.s_addr, htonl((u_short)len +
5590 m->m_pkthdr.csum_data + IPPROTO_UDP));
5598 case IPPROTO_ICMPV6:
5608 if (p == IPPROTO_ICMP) {
5613 sum = in_cksum(m, len);
5617 if (m->m_len < sizeof(struct ip))
5619 sum = in4_cksum(m, p, off, len);
5624 if (m->m_len < sizeof(struct ip6_hdr))
5626 sum = in6_cksum(m, p, off, len);
5637 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
5642 KMOD_UDPSTAT_INC(udps_badsum);
5648 KMOD_ICMPSTAT_INC(icps_checksum);
5653 case IPPROTO_ICMPV6:
5655 KMOD_ICMP6STAT_INC(icp6s_checksum);
5662 if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
5663 m->m_pkthdr.csum_flags |=
5664 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
5665 m->m_pkthdr.csum_data = 0xffff;
5674 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
5676 struct pfi_kif *kif;
5677 u_short action, reason = 0, log = 0;
5678 struct mbuf *m = *m0;
5679 struct ip *h = NULL;
5680 struct m_tag *ipfwtag;
5681 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
5682 struct pf_state *s = NULL;
5683 struct pf_ruleset *ruleset = NULL;
5685 int off, dirndx, pqid = 0;
5689 if (!V_pf_status.running)
5692 memset(&pd, 0, sizeof(pd));
5694 kif = (struct pfi_kif *)ifp->if_pf_kif;
5697 DPFPRINTF(PF_DEBUG_URGENT,
5698 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
5701 if (kif->pfik_flags & PFI_IFLAG_SKIP)
5704 if (m->m_flags & M_SKIP_FIREWALL)
5707 pd.pf_mtag = pf_find_mtag(m);
5711 if (ip_divert_ptr != NULL &&
5712 ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
5713 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
5714 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
5715 if (pd.pf_mtag == NULL &&
5716 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5720 pd.pf_mtag->flags |= PF_PACKET_LOOPED;
5721 m_tag_delete(m, ipfwtag);
5723 if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
5724 m->m_flags |= M_FASTFWD_OURS;
5725 pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
5727 } else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
5728 /* We do IP header normalization and packet reassembly here */
5732 m = *m0; /* pf_normalize messes with m0 */
5733 h = mtod(m, struct ip *);
5735 off = h->ip_hl << 2;
5736 if (off < (int)sizeof(struct ip)) {
5738 REASON_SET(&reason, PFRES_SHORT);
5743 pd.src = (struct pf_addr *)&h->ip_src;
5744 pd.dst = (struct pf_addr *)&h->ip_dst;
5745 pd.sport = pd.dport = NULL;
5746 pd.ip_sum = &h->ip_sum;
5747 pd.proto_sum = NULL;
5750 pd.sidx = (dir == PF_IN) ? 0 : 1;
5751 pd.didx = (dir == PF_IN) ? 1 : 0;
5754 pd.tot_len = ntohs(h->ip_len);
5756 /* handle fragments that didn't get reassembled by normalization */
5757 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
5758 action = pf_test_fragment(&r, dir, kif, m, h,
5769 if (!pf_pull_hdr(m, off, &th, sizeof(th),
5770 &action, &reason, AF_INET)) {
5771 log = action != PF_PASS;
5774 pd.p_len = pd.tot_len - off - (th.th_off << 2);
5775 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
5777 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
5778 if (action == PF_DROP)
5780 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
5782 if (action == PF_PASS) {
5783 if (pfsync_update_state_ptr != NULL)
5784 pfsync_update_state_ptr(s);
5788 } else if (s == NULL)
5789 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5798 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
5799 &action, &reason, AF_INET)) {
5800 log = action != PF_PASS;
5803 if (uh.uh_dport == 0 ||
5804 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
5805 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
5807 REASON_SET(&reason, PFRES_SHORT);
5810 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
5811 if (action == PF_PASS) {
5812 if (pfsync_update_state_ptr != NULL)
5813 pfsync_update_state_ptr(s);
5817 } else if (s == NULL)
5818 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5823 case IPPROTO_ICMP: {
5827 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
5828 &action, &reason, AF_INET)) {
5829 log = action != PF_PASS;
5832 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
5834 if (action == PF_PASS) {
5835 if (pfsync_update_state_ptr != NULL)
5836 pfsync_update_state_ptr(s);
5840 } else if (s == NULL)
5841 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5847 case IPPROTO_ICMPV6: {
5849 DPFPRINTF(PF_DEBUG_MISC,
5850 ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
5856 action = pf_test_state_other(&s, dir, kif, m, &pd);
5857 if (action == PF_PASS) {
5858 if (pfsync_update_state_ptr != NULL)
5859 pfsync_update_state_ptr(s);
5863 } else if (s == NULL)
5864 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5871 if (action == PF_PASS && h->ip_hl > 5 &&
5872 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
5874 REASON_SET(&reason, PFRES_IPOPTIONS);
5876 DPFPRINTF(PF_DEBUG_MISC,
5877 ("pf: dropping packet with ip options\n"));
5880 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
5882 REASON_SET(&reason, PFRES_MEMORY);
5884 if (r->rtableid >= 0)
5885 M_SETFIB(m, r->rtableid);
5888 if (action == PF_PASS && r->qid) {
5889 if (pd.pf_mtag == NULL &&
5890 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5892 REASON_SET(&reason, PFRES_MEMORY);
5894 if (pqid || (pd.tos & IPTOS_LOWDELAY))
5895 pd.pf_mtag->qid = r->pqid;
5897 pd.pf_mtag->qid = r->qid;
5898 /* add hints for ecn */
5899 pd.pf_mtag->hdr = h;
5905 * connections redirected to loopback should not match sockets
5906 * bound specifically to loopback due to security implications,
5907 * see tcp_input() and in_pcblookup_listen().
5909 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
5910 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
5911 (s->nat_rule.ptr->action == PF_RDR ||
5912 s->nat_rule.ptr->action == PF_BINAT) &&
5913 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
5914 m->m_flags |= M_SKIP_FIREWALL;
5916 if (action == PF_PASS && r->divert.port && ip_divert_ptr != NULL &&
5917 !PACKET_LOOPED(&pd)) {
5919 ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
5920 sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
5921 if (ipfwtag != NULL) {
5922 ((struct ipfw_rule_ref *)(ipfwtag+1))->info =
5923 ntohs(r->divert.port);
5924 ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
5929 m_tag_prepend(m, ipfwtag);
5930 if (m->m_flags & M_FASTFWD_OURS) {
5931 if (pd.pf_mtag == NULL &&
5932 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5934 REASON_SET(&reason, PFRES_MEMORY);
5936 DPFPRINTF(PF_DEBUG_MISC,
5937 ("pf: failed to allocate tag\n"));
5939 pd.pf_mtag->flags |= PF_FASTFWD_OURS_PRESENT;
5940 m->m_flags &= ~M_FASTFWD_OURS;
5942 ip_divert_ptr(*m0, dir == PF_IN ? DIR_IN : DIR_OUT);
5947 /* XXX: ipfw has the same behaviour! */
5949 REASON_SET(&reason, PFRES_MEMORY);
5951 DPFPRINTF(PF_DEBUG_MISC,
5952 ("pf: failed to allocate divert tag\n"));
5959 if (s != NULL && s->nat_rule.ptr != NULL &&
5960 s->nat_rule.ptr->log & PF_LOG_ALL)
5961 lr = s->nat_rule.ptr;
5964 PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd,
5968 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
5969 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
5971 if (action == PF_PASS || r->action == PF_DROP) {
5972 dirndx = (dir == PF_OUT);
5973 r->packets[dirndx]++;
5974 r->bytes[dirndx] += pd.tot_len;
5976 a->packets[dirndx]++;
5977 a->bytes[dirndx] += pd.tot_len;
5980 if (s->nat_rule.ptr != NULL) {
5981 s->nat_rule.ptr->packets[dirndx]++;
5982 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
5984 if (s->src_node != NULL) {
5985 s->src_node->packets[dirndx]++;
5986 s->src_node->bytes[dirndx] += pd.tot_len;
5988 if (s->nat_src_node != NULL) {
5989 s->nat_src_node->packets[dirndx]++;
5990 s->nat_src_node->bytes[dirndx] += pd.tot_len;
5992 dirndx = (dir == s->direction) ? 0 : 1;
5993 s->packets[dirndx]++;
5994 s->bytes[dirndx] += pd.tot_len;
5997 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
5998 if (nr != NULL && r == &V_pf_default_rule)
6000 if (tr->src.addr.type == PF_ADDR_TABLE)
6001 pfr_update_stats(tr->src.addr.p.tbl,
6002 (s == NULL) ? pd.src :
6003 &s->key[(s->direction == PF_IN)]->
6004 addr[(s->direction == PF_OUT)],
6005 pd.af, pd.tot_len, dir == PF_OUT,
6006 r->action == PF_PASS, tr->src.neg);
6007 if (tr->dst.addr.type == PF_ADDR_TABLE)
6008 pfr_update_stats(tr->dst.addr.p.tbl,
6009 (s == NULL) ? pd.dst :
6010 &s->key[(s->direction == PF_IN)]->
6011 addr[(s->direction == PF_IN)],
6012 pd.af, pd.tot_len, dir == PF_OUT,
6013 r->action == PF_PASS, tr->dst.neg);
6017 case PF_SYNPROXY_DROP:
6024 /* pf_route() returns unlocked. */
6026 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
6040 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
6042 struct pfi_kif *kif;
6043 u_short action, reason = 0, log = 0;
6044 struct mbuf *m = *m0, *n = NULL;
6045 struct ip6_hdr *h = NULL;
6046 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6047 struct pf_state *s = NULL;
6048 struct pf_ruleset *ruleset = NULL;
6050 int off, terminal = 0, dirndx, rh_cnt = 0;
6054 if (!V_pf_status.running)
6057 memset(&pd, 0, sizeof(pd));
6058 pd.pf_mtag = pf_find_mtag(m);
6060 if (pd.pf_mtag && pd.pf_mtag->flags & PF_TAG_GENERATED)
6063 kif = (struct pfi_kif *)ifp->if_pf_kif;
6065 DPFPRINTF(PF_DEBUG_URGENT,
6066 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
6069 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6072 if (m->m_flags & M_SKIP_FIREWALL)
6077 /* We do IP header normalization and packet reassembly here */
6078 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
6082 m = *m0; /* pf_normalize messes with m0 */
6083 h = mtod(m, struct ip6_hdr *);
6087 * we do not support jumbogram yet. if we keep going, zero ip6_plen
6088 * will do something bad, so drop the packet for now.
6090 if (htons(h->ip6_plen) == 0) {
6092 REASON_SET(&reason, PFRES_NORM); /*XXX*/
6097 pd.src = (struct pf_addr *)&h->ip6_src;
6098 pd.dst = (struct pf_addr *)&h->ip6_dst;
6099 pd.sport = pd.dport = NULL;
6101 pd.proto_sum = NULL;
6103 pd.sidx = (dir == PF_IN) ? 0 : 1;
6104 pd.didx = (dir == PF_IN) ? 1 : 0;
6107 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6109 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
6110 pd.proto = h->ip6_nxt;
6113 case IPPROTO_FRAGMENT:
6114 action = pf_test_fragment(&r, dir, kif, m, h,
6116 if (action == PF_DROP)
6117 REASON_SET(&reason, PFRES_FRAG);
6119 case IPPROTO_ROUTING: {
6120 struct ip6_rthdr rthdr;
6123 DPFPRINTF(PF_DEBUG_MISC,
6124 ("pf: IPv6 more than one rthdr\n"));
6126 REASON_SET(&reason, PFRES_IPOPTIONS);
6130 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
6132 DPFPRINTF(PF_DEBUG_MISC,
6133 ("pf: IPv6 short rthdr\n"));
6135 REASON_SET(&reason, PFRES_SHORT);
6139 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
6140 DPFPRINTF(PF_DEBUG_MISC,
6141 ("pf: IPv6 rthdr0\n"));
6143 REASON_SET(&reason, PFRES_IPOPTIONS);
6150 case IPPROTO_HOPOPTS:
6151 case IPPROTO_DSTOPTS: {
6152 /* get next header and header length */
6153 struct ip6_ext opt6;
6155 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
6156 NULL, &reason, pd.af)) {
6157 DPFPRINTF(PF_DEBUG_MISC,
6158 ("pf: IPv6 short opt\n"));
6163 if (pd.proto == IPPROTO_AH)
6164 off += (opt6.ip6e_len + 2) * 4;
6166 off += (opt6.ip6e_len + 1) * 8;
6167 pd.proto = opt6.ip6e_nxt;
6168 /* goto the next header */
6175 } while (!terminal);
6177 /* if there's no routing header, use unmodified mbuf for checksumming */
6187 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6188 &action, &reason, AF_INET6)) {
6189 log = action != PF_PASS;
6192 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6193 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6194 if (action == PF_DROP)
6196 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6198 if (action == PF_PASS) {
6199 if (pfsync_update_state_ptr != NULL)
6200 pfsync_update_state_ptr(s);
6204 } else if (s == NULL)
6205 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6214 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6215 &action, &reason, AF_INET6)) {
6216 log = action != PF_PASS;
6219 if (uh.uh_dport == 0 ||
6220 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6221 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6223 REASON_SET(&reason, PFRES_SHORT);
6226 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6227 if (action == PF_PASS) {
6228 if (pfsync_update_state_ptr != NULL)
6229 pfsync_update_state_ptr(s);
6233 } else if (s == NULL)
6234 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6239 case IPPROTO_ICMP: {
6241 DPFPRINTF(PF_DEBUG_MISC,
6242 ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
6246 case IPPROTO_ICMPV6: {
6247 struct icmp6_hdr ih;
6250 if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
6251 &action, &reason, AF_INET6)) {
6252 log = action != PF_PASS;
6255 action = pf_test_state_icmp(&s, dir, kif,
6256 m, off, h, &pd, &reason);
6257 if (action == PF_PASS) {
6258 if (pfsync_update_state_ptr != NULL)
6259 pfsync_update_state_ptr(s);
6263 } else if (s == NULL)
6264 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6270 action = pf_test_state_other(&s, dir, kif, m, &pd);
6271 if (action == PF_PASS) {
6272 if (pfsync_update_state_ptr != NULL)
6273 pfsync_update_state_ptr(s);
6277 } else if (s == NULL)
6278 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6290 /* handle dangerous IPv6 extension headers. */
6291 if (action == PF_PASS && rh_cnt &&
6292 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6294 REASON_SET(&reason, PFRES_IPOPTIONS);
6296 DPFPRINTF(PF_DEBUG_MISC,
6297 ("pf: dropping packet with dangerous v6 headers\n"));
6300 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6302 REASON_SET(&reason, PFRES_MEMORY);
6304 if (r->rtableid >= 0)
6305 M_SETFIB(m, r->rtableid);
6308 if (action == PF_PASS && r->qid) {
6309 if (pd.pf_mtag == NULL &&
6310 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6312 REASON_SET(&reason, PFRES_MEMORY);
6314 if (pd.tos & IPTOS_LOWDELAY)
6315 pd.pf_mtag->qid = r->pqid;
6317 pd.pf_mtag->qid = r->qid;
6318 /* add hints for ecn */
6319 pd.pf_mtag->hdr = h;
6323 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6324 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6325 (s->nat_rule.ptr->action == PF_RDR ||
6326 s->nat_rule.ptr->action == PF_BINAT) &&
6327 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
6328 m->m_flags |= M_SKIP_FIREWALL;
6330 /* XXX: Anybody working on it?! */
6332 printf("pf: divert(9) is not supported for IPv6\n");
6337 if (s != NULL && s->nat_rule.ptr != NULL &&
6338 s->nat_rule.ptr->log & PF_LOG_ALL)
6339 lr = s->nat_rule.ptr;
6342 PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset,
6346 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6347 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
6349 if (action == PF_PASS || r->action == PF_DROP) {
6350 dirndx = (dir == PF_OUT);
6351 r->packets[dirndx]++;
6352 r->bytes[dirndx] += pd.tot_len;
6354 a->packets[dirndx]++;
6355 a->bytes[dirndx] += pd.tot_len;
6358 if (s->nat_rule.ptr != NULL) {
6359 s->nat_rule.ptr->packets[dirndx]++;
6360 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6362 if (s->src_node != NULL) {
6363 s->src_node->packets[dirndx]++;
6364 s->src_node->bytes[dirndx] += pd.tot_len;
6366 if (s->nat_src_node != NULL) {
6367 s->nat_src_node->packets[dirndx]++;
6368 s->nat_src_node->bytes[dirndx] += pd.tot_len;
6370 dirndx = (dir == s->direction) ? 0 : 1;
6371 s->packets[dirndx]++;
6372 s->bytes[dirndx] += pd.tot_len;
6375 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6376 if (nr != NULL && r == &V_pf_default_rule)
6378 if (tr->src.addr.type == PF_ADDR_TABLE)
6379 pfr_update_stats(tr->src.addr.p.tbl,
6380 (s == NULL) ? pd.src :
6381 &s->key[(s->direction == PF_IN)]->addr[0],
6382 pd.af, pd.tot_len, dir == PF_OUT,
6383 r->action == PF_PASS, tr->src.neg);
6384 if (tr->dst.addr.type == PF_ADDR_TABLE)
6385 pfr_update_stats(tr->dst.addr.p.tbl,
6386 (s == NULL) ? pd.dst :
6387 &s->key[(s->direction == PF_IN)]->addr[1],
6388 pd.af, pd.tot_len, dir == PF_OUT,
6389 r->action == PF_PASS, tr->dst.neg);
6393 case PF_SYNPROXY_DROP:
6400 /* pf_route6() returns unlocked. */
6402 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);