2 * Copyright (c) 2001 Daniel Hartmeier
3 * Copyright (c) 2002 - 2008 Henning Brauer
4 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * Effort sponsored in part by the Defense Advanced Research Projects
32 * Agency (DARPA) and Air Force Research Laboratory, Air Force
33 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35 * $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
42 #include "opt_inet6.h"
46 #include <sys/param.h>
48 #include <sys/endian.h>
50 #include <sys/interrupt.h>
51 #include <sys/kernel.h>
52 #include <sys/kthread.h>
53 #include <sys/limits.h>
56 #include <sys/random.h>
57 #include <sys/refcount.h>
58 #include <sys/socket.h>
59 #include <sys/sysctl.h>
60 #include <sys/taskqueue.h>
61 #include <sys/ucred.h>
64 #include <net/if_types.h>
65 #include <net/route.h>
66 #include <net/radix_mpath.h>
69 #include <net/pfvar.h>
70 #include <net/pf_mtag.h>
71 #include <net/if_pflog.h>
72 #include <net/if_pfsync.h>
74 #include <netinet/in_pcb.h>
75 #include <netinet/in_var.h>
76 #include <netinet/ip.h>
77 #include <netinet/ip_fw.h>
78 #include <netinet/ip_icmp.h>
79 #include <netinet/icmp_var.h>
80 #include <netinet/ip_var.h>
81 #include <netinet/tcp.h>
82 #include <netinet/tcp_fsm.h>
83 #include <netinet/tcp_seq.h>
84 #include <netinet/tcp_timer.h>
85 #include <netinet/tcp_var.h>
86 #include <netinet/udp.h>
87 #include <netinet/udp_var.h>
89 #include <netpfil/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */
92 #include <netinet/ip6.h>
93 #include <netinet/icmp6.h>
94 #include <netinet6/nd6.h>
95 #include <netinet6/ip6_var.h>
96 #include <netinet6/in6_pcb.h>
99 #include <machine/in_cksum.h>
100 #include <security/mac/mac_framework.h>
102 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
109 VNET_DEFINE(struct pf_altqqueue, pf_altqs[2]);
110 VNET_DEFINE(struct pf_palist, pf_pabuf);
111 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active);
112 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive);
113 VNET_DEFINE(struct pf_status, pf_status);
115 VNET_DEFINE(u_int32_t, ticket_altqs_active);
116 VNET_DEFINE(u_int32_t, ticket_altqs_inactive);
117 VNET_DEFINE(int, altqs_inactive_open);
118 VNET_DEFINE(u_int32_t, ticket_pabuf);
120 VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx);
121 #define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx)
122 VNET_DEFINE(u_char, pf_tcp_secret[16]);
123 #define V_pf_tcp_secret VNET(pf_tcp_secret)
124 VNET_DEFINE(int, pf_tcp_secret_init);
125 #define V_pf_tcp_secret_init VNET(pf_tcp_secret_init)
126 VNET_DEFINE(int, pf_tcp_iss_off);
127 #define V_pf_tcp_iss_off VNET(pf_tcp_iss_off)
130 * Queue for pf_intr() sends.
132 static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
133 struct pf_send_entry {
134 STAILQ_ENTRY(pf_send_entry) pfse_next;
151 #define pfse_icmp_type u.icmpopts.type
152 #define pfse_icmp_code u.icmpopts.code
153 #define pfse_icmp_mtu u.icmpopts.mtu
156 STAILQ_HEAD(pf_send_head, pf_send_entry);
157 static VNET_DEFINE(struct pf_send_head, pf_sendqueue);
158 #define V_pf_sendqueue VNET(pf_sendqueue)
160 static struct mtx pf_sendqueue_mtx;
161 #define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx)
162 #define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx)
165 * Queue for pf_overload_task() tasks.
167 struct pf_overload_entry {
168 SLIST_ENTRY(pf_overload_entry) next;
172 struct pf_rule *rule;
175 SLIST_HEAD(pf_overload_head, pf_overload_entry);
176 static VNET_DEFINE(struct pf_overload_head, pf_overloadqueue);
177 #define V_pf_overloadqueue VNET(pf_overloadqueue)
178 static VNET_DEFINE(struct task, pf_overloadtask);
179 #define V_pf_overloadtask VNET(pf_overloadtask)
181 static struct mtx pf_overloadqueue_mtx;
182 #define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx)
183 #define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx)
185 VNET_DEFINE(struct pf_rulequeue, pf_unlinked_rules);
186 struct mtx pf_unlnkdrules_mtx;
188 static VNET_DEFINE(uma_zone_t, pf_sources_z);
189 #define V_pf_sources_z VNET(pf_sources_z)
190 static VNET_DEFINE(uma_zone_t, pf_mtag_z);
191 #define V_pf_mtag_z VNET(pf_mtag_z)
192 VNET_DEFINE(uma_zone_t, pf_state_z);
193 VNET_DEFINE(uma_zone_t, pf_state_key_z);
195 VNET_DEFINE(uint64_t, pf_stateid[MAXCPU]);
196 #define PFID_CPUBITS 8
197 #define PFID_CPUSHIFT (sizeof(uint64_t) * NBBY - PFID_CPUBITS)
198 #define PFID_CPUMASK ((uint64_t)((1 << PFID_CPUBITS) - 1) << PFID_CPUSHIFT)
199 #define PFID_MAXID (~PFID_CPUMASK)
200 CTASSERT((1 << PFID_CPUBITS) > MAXCPU);
202 static void pf_src_tree_remove_state(struct pf_state *);
203 static void pf_init_threshold(struct pf_threshold *, u_int32_t,
205 static void pf_add_threshold(struct pf_threshold *);
206 static int pf_check_threshold(struct pf_threshold *);
208 static void pf_change_ap(struct pf_addr *, u_int16_t *,
209 u_int16_t *, u_int16_t *, struct pf_addr *,
210 u_int16_t, u_int8_t, sa_family_t);
211 static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
212 struct tcphdr *, struct pf_state_peer *);
213 static void pf_change_icmp(struct pf_addr *, u_int16_t *,
214 struct pf_addr *, struct pf_addr *, u_int16_t,
215 u_int16_t *, u_int16_t *, u_int16_t *,
216 u_int16_t *, u_int8_t, sa_family_t);
217 static void pf_send_tcp(struct mbuf *,
218 const struct pf_rule *, sa_family_t,
219 const struct pf_addr *, const struct pf_addr *,
220 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
221 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
222 u_int16_t, struct ifnet *);
223 static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
224 sa_family_t, struct pf_rule *);
225 static void pf_detach_state(struct pf_state *);
226 static int pf_state_key_attach(struct pf_state_key *,
227 struct pf_state_key *, struct pf_state *);
228 static void pf_state_key_detach(struct pf_state *, int);
229 static int pf_state_key_ctor(void *, int, void *, int);
230 static u_int32_t pf_tcp_iss(struct pf_pdesc *);
231 static int pf_test_rule(struct pf_rule **, struct pf_state **,
232 int, struct pfi_kif *, struct mbuf *, int,
233 struct pf_pdesc *, struct pf_rule **,
234 struct pf_ruleset **, struct inpcb *);
235 static int pf_create_state(struct pf_rule *, struct pf_rule *,
236 struct pf_rule *, struct pf_pdesc *,
237 struct pf_src_node *, struct pf_state_key *,
238 struct pf_state_key *, struct mbuf *, int,
239 u_int16_t, u_int16_t, int *, struct pfi_kif *,
240 struct pf_state **, int, u_int16_t, u_int16_t,
242 static int pf_test_fragment(struct pf_rule **, int,
243 struct pfi_kif *, struct mbuf *, void *,
244 struct pf_pdesc *, struct pf_rule **,
245 struct pf_ruleset **);
246 static int pf_tcp_track_full(struct pf_state_peer *,
247 struct pf_state_peer *, struct pf_state **,
248 struct pfi_kif *, struct mbuf *, int,
249 struct pf_pdesc *, u_short *, int *);
250 static int pf_tcp_track_sloppy(struct pf_state_peer *,
251 struct pf_state_peer *, struct pf_state **,
252 struct pf_pdesc *, u_short *);
253 static int pf_test_state_tcp(struct pf_state **, int,
254 struct pfi_kif *, struct mbuf *, int,
255 void *, struct pf_pdesc *, u_short *);
256 static int pf_test_state_udp(struct pf_state **, int,
257 struct pfi_kif *, struct mbuf *, int,
258 void *, struct pf_pdesc *);
259 static int pf_test_state_icmp(struct pf_state **, int,
260 struct pfi_kif *, struct mbuf *, int,
261 void *, struct pf_pdesc *, u_short *);
262 static int pf_test_state_other(struct pf_state **, int,
263 struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
264 static u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
266 static u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
268 static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
270 static void pf_set_rt_ifp(struct pf_state *,
272 static int pf_check_proto_cksum(struct mbuf *, int, int,
273 u_int8_t, sa_family_t);
274 static void pf_print_state_parts(struct pf_state *,
275 struct pf_state_key *, struct pf_state_key *);
276 static int pf_addr_wrap_neq(struct pf_addr_wrap *,
277 struct pf_addr_wrap *);
278 static struct pf_state *pf_find_state(struct pfi_kif *,
279 struct pf_state_key_cmp *, u_int);
280 static int pf_src_connlimit(struct pf_state **);
281 static void pf_overload_task(void *c, int pending);
282 static int pf_insert_src_node(struct pf_src_node **,
283 struct pf_rule *, struct pf_addr *, sa_family_t);
284 static u_int pf_purge_expired_states(u_int, int);
285 static void pf_purge_unlinked_rules(void);
286 static int pf_mtag_init(void *, int, int);
287 static void pf_mtag_free(struct m_tag *);
289 static void pf_route(struct mbuf **, struct pf_rule *, int,
290 struct ifnet *, struct pf_state *,
294 static void pf_change_a6(struct pf_addr *, u_int16_t *,
295 struct pf_addr *, u_int8_t);
296 static void pf_route6(struct mbuf **, struct pf_rule *, int,
297 struct ifnet *, struct pf_state *,
301 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
303 VNET_DECLARE(int, pf_end_threads);
305 VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
307 #define PACKET_LOOPED(pd) ((pd)->pf_mtag && \
308 (pd)->pf_mtag->flags & PF_PACKET_LOOPED)
310 #define STATE_LOOKUP(i, k, d, s, pd) \
312 (s) = pf_find_state((i), (k), (d)); \
315 if (PACKET_LOOPED(pd)) \
317 if ((d) == PF_OUT && \
318 (((s)->rule.ptr->rt == PF_ROUTETO && \
319 (s)->rule.ptr->direction == PF_OUT) || \
320 ((s)->rule.ptr->rt == PF_REPLYTO && \
321 (s)->rule.ptr->direction == PF_IN)) && \
322 (s)->rt_kif != NULL && \
323 (s)->rt_kif != (i)) \
327 #define BOUND_IFACE(r, k) \
328 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
330 #define STATE_INC_COUNTERS(s) \
332 s->rule.ptr->states_cur++; \
333 s->rule.ptr->states_tot++; \
334 if (s->anchor.ptr != NULL) { \
335 s->anchor.ptr->states_cur++; \
336 s->anchor.ptr->states_tot++; \
338 if (s->nat_rule.ptr != NULL) { \
339 s->nat_rule.ptr->states_cur++; \
340 s->nat_rule.ptr->states_tot++; \
344 #define STATE_DEC_COUNTERS(s) \
346 if (s->nat_rule.ptr != NULL) \
347 s->nat_rule.ptr->states_cur--; \
348 if (s->anchor.ptr != NULL) \
349 s->anchor.ptr->states_cur--; \
350 s->rule.ptr->states_cur--; \
353 static MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
354 VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
355 VNET_DEFINE(struct pf_idhash *, pf_idhash);
356 VNET_DEFINE(u_long, pf_hashmask);
357 VNET_DEFINE(struct pf_srchash *, pf_srchash);
358 VNET_DEFINE(u_long, pf_srchashmask);
360 SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW, 0, "pf(4)");
362 VNET_DEFINE(u_long, pf_hashsize);
363 #define V_pf_hashsize VNET(pf_hashsize)
364 SYSCTL_VNET_UINT(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
365 &VNET_NAME(pf_hashsize), 0, "Size of pf(4) states hashtable");
367 VNET_DEFINE(u_long, pf_srchashsize);
368 #define V_pf_srchashsize VNET(pf_srchashsize)
369 SYSCTL_VNET_UINT(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
370 &VNET_NAME(pf_srchashsize), 0, "Size of pf(4) source nodes hashtable");
372 VNET_DEFINE(void *, pf_swi_cookie);
374 VNET_DEFINE(uint32_t, pf_hashseed);
375 #define V_pf_hashseed VNET(pf_hashseed)
377 static __inline uint32_t
378 pf_hashkey(struct pf_state_key *sk)
382 h = jenkins_hash32((uint32_t *)sk,
383 sizeof(struct pf_state_key_cmp)/sizeof(uint32_t),
386 return (h & V_pf_hashmask);
389 static __inline uint32_t
390 pf_hashsrc(struct pf_addr *addr, sa_family_t af)
396 h = jenkins_hash32((uint32_t *)&addr->v4,
397 sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed);
400 h = jenkins_hash32((uint32_t *)&addr->v6,
401 sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed);
404 panic("%s: unknown address family %u", __func__, af);
407 return (h & V_pf_srchashmask);
412 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
417 dst->addr32[0] = src->addr32[0];
421 dst->addr32[0] = src->addr32[0];
422 dst->addr32[1] = src->addr32[1];
423 dst->addr32[2] = src->addr32[2];
424 dst->addr32[3] = src->addr32[3];
431 pf_init_threshold(struct pf_threshold *threshold,
432 u_int32_t limit, u_int32_t seconds)
434 threshold->limit = limit * PF_THRESHOLD_MULT;
435 threshold->seconds = seconds;
436 threshold->count = 0;
437 threshold->last = time_uptime;
441 pf_add_threshold(struct pf_threshold *threshold)
443 u_int32_t t = time_uptime, diff = t - threshold->last;
445 if (diff >= threshold->seconds)
446 threshold->count = 0;
448 threshold->count -= threshold->count * diff /
450 threshold->count += PF_THRESHOLD_MULT;
455 pf_check_threshold(struct pf_threshold *threshold)
457 return (threshold->count > threshold->limit);
461 pf_src_connlimit(struct pf_state **state)
463 struct pf_overload_entry *pfoe;
466 PF_STATE_LOCK_ASSERT(*state);
468 (*state)->src_node->conn++;
469 (*state)->src.tcp_est = 1;
470 pf_add_threshold(&(*state)->src_node->conn_rate);
472 if ((*state)->rule.ptr->max_src_conn &&
473 (*state)->rule.ptr->max_src_conn <
474 (*state)->src_node->conn) {
475 V_pf_status.lcounters[LCNT_SRCCONN]++;
479 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
480 pf_check_threshold(&(*state)->src_node->conn_rate)) {
481 V_pf_status.lcounters[LCNT_SRCCONNRATE]++;
488 /* Kill this state. */
489 (*state)->timeout = PFTM_PURGE;
490 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
492 if ((*state)->rule.ptr->overload_tbl == NULL)
495 /* Schedule overloading and flushing task. */
496 pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
498 return (1); /* too bad :( */
500 bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
501 pfoe->af = (*state)->key[PF_SK_WIRE]->af;
502 pfoe->rule = (*state)->rule.ptr;
503 pfoe->dir = (*state)->direction;
505 SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
506 PF_OVERLOADQ_UNLOCK();
507 taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
513 pf_overload_task(void *c, int pending)
515 struct pf_overload_head queue;
517 struct pf_overload_entry *pfoe, *pfoe1;
521 queue = *(struct pf_overload_head *)c;
522 SLIST_INIT((struct pf_overload_head *)c);
523 PF_OVERLOADQ_UNLOCK();
525 bzero(&p, sizeof(p));
526 SLIST_FOREACH(pfoe, &queue, next) {
527 V_pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
528 if (V_pf_status.debug >= PF_DEBUG_MISC) {
529 printf("%s: blocking address ", __func__);
530 pf_print_host(&pfoe->addr, 0, pfoe->af);
534 p.pfra_af = pfoe->af;
539 p.pfra_ip4addr = pfoe->addr.v4;
545 p.pfra_ip6addr = pfoe->addr.v6;
551 pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
556 * Remove those entries, that don't need flushing.
558 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
559 if (pfoe->rule->flush == 0) {
560 SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
561 free(pfoe, M_PFTEMP);
563 V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
565 /* If nothing to flush, return. */
566 if (SLIST_EMPTY(&queue))
569 for (int i = 0; i <= V_pf_hashmask; i++) {
570 struct pf_idhash *ih = &V_pf_idhash[i];
571 struct pf_state_key *sk;
575 LIST_FOREACH(s, &ih->states, entry) {
576 sk = s->key[PF_SK_WIRE];
577 SLIST_FOREACH(pfoe, &queue, next)
578 if (sk->af == pfoe->af &&
579 ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
580 pfoe->rule == s->rule.ptr) &&
581 ((pfoe->dir == PF_OUT &&
582 PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
583 (pfoe->dir == PF_IN &&
584 PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
585 s->timeout = PFTM_PURGE;
586 s->src.state = s->dst.state = TCPS_CLOSED;
590 PF_HASHROW_UNLOCK(ih);
592 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
593 free(pfoe, M_PFTEMP);
594 if (V_pf_status.debug >= PF_DEBUG_MISC)
595 printf("%s: %u states killed", __func__, killed);
599 * Can return locked on failure, so that we can consistently
600 * allocate and insert a new one.
603 pf_find_src_node(struct pf_addr *src, struct pf_rule *rule, sa_family_t af,
606 struct pf_srchash *sh;
607 struct pf_src_node *n;
609 V_pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
611 sh = &V_pf_srchash[pf_hashsrc(src, af)];
613 LIST_FOREACH(n, &sh->nodes, entry)
614 if (n->rule.ptr == rule && n->af == af &&
615 ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
616 (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
618 if (n != NULL || returnlocked == 0)
619 PF_HASHROW_UNLOCK(sh);
625 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
626 struct pf_addr *src, sa_family_t af)
629 KASSERT((rule->rule_flag & PFRULE_RULESRCTRACK ||
630 rule->rpool.opts & PF_POOL_STICKYADDR),
631 ("%s for non-tracking rule %p", __func__, rule));
634 *sn = pf_find_src_node(src, rule, af, 1);
637 struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)];
639 PF_HASHROW_ASSERT(sh);
641 if (!rule->max_src_nodes ||
642 rule->src_nodes < rule->max_src_nodes)
643 (*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
645 V_pf_status.lcounters[LCNT_SRCNODES]++;
647 PF_HASHROW_UNLOCK(sh);
651 pf_init_threshold(&(*sn)->conn_rate,
652 rule->max_src_conn_rate.limit,
653 rule->max_src_conn_rate.seconds);
656 (*sn)->rule.ptr = rule;
657 PF_ACPY(&(*sn)->addr, src, af);
658 LIST_INSERT_HEAD(&sh->nodes, *sn, entry);
659 (*sn)->creation = time_uptime;
660 (*sn)->ruletype = rule->action;
661 if ((*sn)->rule.ptr != NULL)
662 (*sn)->rule.ptr->src_nodes++;
663 PF_HASHROW_UNLOCK(sh);
664 V_pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
665 V_pf_status.src_nodes++;
667 if (rule->max_src_states &&
668 (*sn)->states >= rule->max_src_states) {
669 V_pf_status.lcounters[LCNT_SRCSTATES]++;
677 pf_unlink_src_node_locked(struct pf_src_node *src)
680 struct pf_srchash *sh;
682 sh = &V_pf_srchash[pf_hashsrc(&src->addr, src->af)];
683 PF_HASHROW_ASSERT(sh);
685 LIST_REMOVE(src, entry);
687 src->rule.ptr->src_nodes--;
688 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
689 V_pf_status.src_nodes--;
693 pf_unlink_src_node(struct pf_src_node *src)
695 struct pf_srchash *sh;
697 sh = &V_pf_srchash[pf_hashsrc(&src->addr, src->af)];
699 pf_unlink_src_node_locked(src);
700 PF_HASHROW_UNLOCK(sh);
704 pf_free_src_node(struct pf_src_node *sn)
707 KASSERT(sn->states == 0, ("%s: %p has refs", __func__, sn));
708 uma_zfree(V_pf_sources_z, sn);
712 pf_free_src_nodes(struct pf_src_node_list *head)
714 struct pf_src_node *sn, *tmp;
717 LIST_FOREACH_SAFE(sn, head, entry, tmp) {
718 pf_free_src_node(sn);
725 /* Data storage structures initialization. */
729 struct pf_keyhash *kh;
730 struct pf_idhash *ih;
731 struct pf_srchash *sh;
734 TUNABLE_ULONG_FETCH("net.pf.states_hashsize", &V_pf_hashsize);
735 if (V_pf_hashsize == 0 || !powerof2(V_pf_hashsize))
736 V_pf_hashsize = PF_HASHSIZ;
737 TUNABLE_ULONG_FETCH("net.pf.source_nodes_hashsize", &V_pf_srchashsize);
738 if (V_pf_srchashsize == 0 || !powerof2(V_pf_srchashsize))
739 V_pf_srchashsize = PF_HASHSIZ / 4;
741 V_pf_hashseed = arc4random();
743 /* States and state keys storage. */
744 V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_state),
745 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
746 V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
747 uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
748 uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
750 V_pf_state_key_z = uma_zcreate("pf state keys",
751 sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
753 V_pf_keyhash = malloc(V_pf_hashsize * sizeof(struct pf_keyhash),
754 M_PFHASH, M_WAITOK | M_ZERO);
755 V_pf_idhash = malloc(V_pf_hashsize * sizeof(struct pf_idhash),
756 M_PFHASH, M_WAITOK | M_ZERO);
757 V_pf_hashmask = V_pf_hashsize - 1;
758 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask;
760 mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
761 mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
765 V_pf_sources_z = uma_zcreate("pf source nodes",
766 sizeof(struct pf_src_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
768 V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
769 uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
770 uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
771 V_pf_srchash = malloc(V_pf_srchashsize * sizeof(struct pf_srchash),
772 M_PFHASH, M_WAITOK|M_ZERO);
773 V_pf_srchashmask = V_pf_srchashsize - 1;
774 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++)
775 mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
778 TAILQ_INIT(&V_pf_altqs[0]);
779 TAILQ_INIT(&V_pf_altqs[1]);
780 TAILQ_INIT(&V_pf_pabuf);
781 V_pf_altqs_active = &V_pf_altqs[0];
782 V_pf_altqs_inactive = &V_pf_altqs[1];
785 V_pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
786 sizeof(struct pf_mtag), NULL, NULL, pf_mtag_init, NULL,
789 /* Send & overload+flush queues. */
790 STAILQ_INIT(&V_pf_sendqueue);
791 SLIST_INIT(&V_pf_overloadqueue);
792 TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, &V_pf_overloadqueue);
793 mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF);
794 mtx_init(&pf_overloadqueue_mtx, "pf overload/flush queue", NULL,
797 /* Unlinked, but may be referenced rules. */
798 TAILQ_INIT(&V_pf_unlinked_rules);
799 mtx_init(&pf_unlnkdrules_mtx, "pf unlinked rules", NULL, MTX_DEF);
805 struct pf_keyhash *kh;
806 struct pf_idhash *ih;
807 struct pf_srchash *sh;
808 struct pf_send_entry *pfse, *next;
811 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask;
813 KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
815 KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
817 mtx_destroy(&kh->lock);
818 mtx_destroy(&ih->lock);
820 free(V_pf_keyhash, M_PFHASH);
821 free(V_pf_idhash, M_PFHASH);
823 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) {
824 KASSERT(LIST_EMPTY(&sh->nodes),
825 ("%s: source node hash not empty", __func__));
826 mtx_destroy(&sh->lock);
828 free(V_pf_srchash, M_PFHASH);
830 STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
831 m_freem(pfse->pfse_m);
832 free(pfse, M_PFTEMP);
835 mtx_destroy(&pf_sendqueue_mtx);
836 mtx_destroy(&pf_overloadqueue_mtx);
837 mtx_destroy(&pf_unlnkdrules_mtx);
839 uma_zdestroy(V_pf_mtag_z);
840 uma_zdestroy(V_pf_sources_z);
841 uma_zdestroy(V_pf_state_z);
842 uma_zdestroy(V_pf_state_key_z);
846 pf_mtag_init(void *mem, int size, int how)
850 t = (struct m_tag *)mem;
851 t->m_tag_cookie = MTAG_ABI_COMPAT;
852 t->m_tag_id = PACKET_TAG_PF;
853 t->m_tag_len = sizeof(struct pf_mtag);
854 t->m_tag_free = pf_mtag_free;
860 pf_mtag_free(struct m_tag *t)
863 uma_zfree(V_pf_mtag_z, t);
867 pf_get_mtag(struct mbuf *m)
871 if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
872 return ((struct pf_mtag *)(mtag + 1));
874 mtag = uma_zalloc(V_pf_mtag_z, M_NOWAIT);
877 bzero(mtag + 1, sizeof(struct pf_mtag));
878 m_tag_prepend(m, mtag);
880 return ((struct pf_mtag *)(mtag + 1));
884 pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
887 struct pf_keyhash *khs, *khw, *kh;
888 struct pf_state_key *sk, *cur;
889 struct pf_state *si, *olds = NULL;
892 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
893 KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
894 KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
897 * We need to lock hash slots of both keys. To avoid deadlock
898 * we always lock the slot with lower address first. Unlock order
901 * We also need to lock ID hash slot before dropping key
902 * locks. On success we return with ID hash slot locked.
906 khs = khw = &V_pf_keyhash[pf_hashkey(skw)];
907 PF_HASHROW_LOCK(khs);
909 khs = &V_pf_keyhash[pf_hashkey(sks)];
910 khw = &V_pf_keyhash[pf_hashkey(skw)];
912 PF_HASHROW_LOCK(khs);
913 } else if (khs < khw) {
914 PF_HASHROW_LOCK(khs);
915 PF_HASHROW_LOCK(khw);
917 PF_HASHROW_LOCK(khw);
918 PF_HASHROW_LOCK(khs);
922 #define KEYS_UNLOCK() do { \
924 PF_HASHROW_UNLOCK(khs); \
925 PF_HASHROW_UNLOCK(khw); \
927 PF_HASHROW_UNLOCK(khs); \
931 * First run: start with wire key.
938 LIST_FOREACH(cur, &kh->keys, entry)
939 if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
943 /* Key exists. Check for same kif, if none, add to key. */
944 TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
945 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
948 if (si->kif == s->kif &&
949 si->direction == s->direction) {
950 if (sk->proto == IPPROTO_TCP &&
951 si->src.state >= TCPS_FIN_WAIT_2 &&
952 si->dst.state >= TCPS_FIN_WAIT_2) {
954 * New state matches an old >FIN_WAIT_2
955 * state. We can't drop key hash locks,
956 * thus we can't unlink it properly.
958 * As a workaround we drop it into
959 * TCPS_CLOSED state, schedule purge
960 * ASAP and push it into the very end
961 * of the slot TAILQ, so that it won't
962 * conflict with our new state.
964 si->src.state = si->dst.state =
966 si->timeout = PFTM_PURGE;
969 if (V_pf_status.debug >= PF_DEBUG_MISC) {
970 printf("pf: %s key attach "
972 (idx == PF_SK_WIRE) ?
975 pf_print_state_parts(s,
976 (idx == PF_SK_WIRE) ?
978 (idx == PF_SK_STACK) ?
980 printf(", existing: ");
981 pf_print_state_parts(si,
982 (idx == PF_SK_WIRE) ?
984 (idx == PF_SK_STACK) ?
988 PF_HASHROW_UNLOCK(ih);
990 uma_zfree(V_pf_state_key_z, sk);
991 if (idx == PF_SK_STACK)
993 return (EEXIST); /* collision! */
996 PF_HASHROW_UNLOCK(ih);
998 uma_zfree(V_pf_state_key_z, sk);
1001 LIST_INSERT_HEAD(&kh->keys, sk, entry);
1006 /* List is sorted, if-bound states before floating. */
1007 if (s->kif == V_pfi_all)
1008 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
1010 TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
1013 TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]);
1014 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds,
1020 * Attach done. See how should we (or should not?)
1021 * attach a second key.
1024 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1028 } else if (sks != NULL) {
1030 * Continue attaching with stack key.
1042 KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
1043 ("%s failure", __func__));
1050 pf_detach_state(struct pf_state *s)
1052 struct pf_state_key *sks = s->key[PF_SK_STACK];
1053 struct pf_keyhash *kh;
1056 kh = &V_pf_keyhash[pf_hashkey(sks)];
1057 PF_HASHROW_LOCK(kh);
1058 if (s->key[PF_SK_STACK] != NULL)
1059 pf_state_key_detach(s, PF_SK_STACK);
1061 * If both point to same key, then we are done.
1063 if (sks == s->key[PF_SK_WIRE]) {
1064 pf_state_key_detach(s, PF_SK_WIRE);
1065 PF_HASHROW_UNLOCK(kh);
1068 PF_HASHROW_UNLOCK(kh);
1071 if (s->key[PF_SK_WIRE] != NULL) {
1072 kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
1073 PF_HASHROW_LOCK(kh);
1074 if (s->key[PF_SK_WIRE] != NULL)
1075 pf_state_key_detach(s, PF_SK_WIRE);
1076 PF_HASHROW_UNLOCK(kh);
1081 pf_state_key_detach(struct pf_state *s, int idx)
1083 struct pf_state_key *sk = s->key[idx];
1085 struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1087 PF_HASHROW_ASSERT(kh);
1089 TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1092 if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1093 LIST_REMOVE(sk, entry);
1094 uma_zfree(V_pf_state_key_z, sk);
1099 pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1101 struct pf_state_key *sk = mem;
1103 bzero(sk, sizeof(struct pf_state_key_cmp));
1104 TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1105 TAILQ_INIT(&sk->states[PF_SK_STACK]);
1110 struct pf_state_key *
1111 pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
1112 struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
1114 struct pf_state_key *sk;
1116 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1120 PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
1121 PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
1122 sk->port[pd->sidx] = sport;
1123 sk->port[pd->didx] = dport;
1124 sk->proto = pd->proto;
1130 struct pf_state_key *
1131 pf_state_key_clone(struct pf_state_key *orig)
1133 struct pf_state_key *sk;
1135 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1139 bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1145 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw,
1146 struct pf_state_key *sks, struct pf_state *s)
1148 struct pf_idhash *ih;
1149 struct pf_state *cur;
1152 KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1153 ("%s: sks not pristine", __func__));
1154 KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1155 ("%s: skw not pristine", __func__));
1156 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1160 if (s->id == 0 && s->creatorid == 0) {
1161 /* XXX: should be atomic, but probability of collision low */
1162 if ((s->id = V_pf_stateid[curcpu]++) == PFID_MAXID)
1163 V_pf_stateid[curcpu] = 1;
1164 s->id |= (uint64_t )curcpu << PFID_CPUSHIFT;
1165 s->id = htobe64(s->id);
1166 s->creatorid = V_pf_status.hostid;
1169 /* Returns with ID locked on success. */
1170 if ((error = pf_state_key_attach(skw, sks, s)) != 0)
1173 ih = &V_pf_idhash[PF_IDHASH(s)];
1174 PF_HASHROW_ASSERT(ih);
1175 LIST_FOREACH(cur, &ih->states, entry)
1176 if (cur->id == s->id && cur->creatorid == s->creatorid)
1180 PF_HASHROW_UNLOCK(ih);
1181 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1182 printf("pf: state ID collision: "
1183 "id: %016llx creatorid: %08x\n",
1184 (unsigned long long)be64toh(s->id),
1185 ntohl(s->creatorid));
1190 LIST_INSERT_HEAD(&ih->states, s, entry);
1191 /* One for keys, one for ID hash. */
1192 refcount_init(&s->refs, 2);
1194 V_pf_status.fcounters[FCNT_STATE_INSERT]++;
1195 if (pfsync_insert_state_ptr != NULL)
1196 pfsync_insert_state_ptr(s);
1198 /* Returns locked. */
1203 * Find state by ID: returns with locked row on success.
1206 pf_find_state_byid(uint64_t id, uint32_t creatorid)
1208 struct pf_idhash *ih;
1211 V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
1213 ih = &V_pf_idhash[(be64toh(id) % (V_pf_hashmask + 1))];
1215 PF_HASHROW_LOCK(ih);
1216 LIST_FOREACH(s, &ih->states, entry)
1217 if (s->id == id && s->creatorid == creatorid)
1221 PF_HASHROW_UNLOCK(ih);
1227 * Find state by key.
1228 * Returns with ID hash slot locked on success.
1230 static struct pf_state *
1231 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir)
1233 struct pf_keyhash *kh;
1234 struct pf_state_key *sk;
1238 V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
1240 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1242 PF_HASHROW_LOCK(kh);
1243 LIST_FOREACH(sk, &kh->keys, entry)
1244 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1247 PF_HASHROW_UNLOCK(kh);
1251 idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1253 /* List is sorted, if-bound states before floating ones. */
1254 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1255 if (s->kif == V_pfi_all || s->kif == kif) {
1257 PF_HASHROW_UNLOCK(kh);
1258 if (s->timeout >= PFTM_MAX) {
1260 * State is either being processed by
1261 * pf_unlink_state() in an other thread, or
1262 * is scheduled for immediate expiry.
1269 PF_HASHROW_UNLOCK(kh);
1275 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1277 struct pf_keyhash *kh;
1278 struct pf_state_key *sk;
1279 struct pf_state *s, *ret = NULL;
1282 V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
1284 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1286 PF_HASHROW_LOCK(kh);
1287 LIST_FOREACH(sk, &kh->keys, entry)
1288 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1291 PF_HASHROW_UNLOCK(kh);
1306 panic("%s: dir %u", __func__, dir);
1309 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1311 PF_HASHROW_UNLOCK(kh);
1325 PF_HASHROW_UNLOCK(kh);
1330 /* END state table stuff */
1333 pf_send(struct pf_send_entry *pfse)
1337 STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
1339 swi_sched(V_pf_swi_cookie, 0);
1345 struct pf_send_head queue;
1346 struct pf_send_entry *pfse, *next;
1348 CURVNET_SET((struct vnet *)v);
1351 queue = V_pf_sendqueue;
1352 STAILQ_INIT(&V_pf_sendqueue);
1355 STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
1356 switch (pfse->pfse_type) {
1359 ip_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL);
1362 icmp_error(pfse->pfse_m, pfse->pfse_icmp_type,
1363 pfse->pfse_icmp_code, 0, pfse->pfse_icmp_mtu);
1368 ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL,
1372 icmp6_error(pfse->pfse_m, pfse->pfse_icmp_type,
1373 pfse->pfse_icmp_code, pfse->pfse_icmp_mtu);
1377 panic("%s: unknown type", __func__);
1379 free(pfse, M_PFTEMP);
1385 pf_purge_thread(void *v)
1389 CURVNET_SET((struct vnet *)v);
1393 rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftm", hz / 10);
1395 if (V_pf_end_threads) {
1397 * To cleanse up all kifs and rules we need
1398 * two runs: first one clears reference flags,
1399 * then pf_purge_expired_states() doesn't
1400 * raise them, and then second run frees.
1403 pf_purge_unlinked_rules();
1407 * Now purge everything.
1409 pf_purge_expired_states(0, V_pf_hashmask);
1410 pf_purge_expired_fragments();
1411 pf_purge_expired_src_nodes();
1414 * Now all kifs & rules should be unreferenced,
1415 * thus should be successfully freed.
1417 pf_purge_unlinked_rules();
1421 * Announce success and exit.
1426 wakeup(pf_purge_thread);
1431 /* Process 1/interval fraction of the state table every run. */
1432 idx = pf_purge_expired_states(idx, V_pf_hashmask /
1433 (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
1435 /* Purge other expired types every PFTM_INTERVAL seconds. */
1438 * Order is important:
1439 * - states and src nodes reference rules
1440 * - states and rules reference kifs
1442 pf_purge_expired_fragments();
1443 pf_purge_expired_src_nodes();
1444 pf_purge_unlinked_rules();
1453 pf_state_expires(const struct pf_state *state)
1460 /* handle all PFTM_* > PFTM_MAX here */
1461 if (state->timeout == PFTM_PURGE)
1462 return (time_uptime);
1463 KASSERT(state->timeout != PFTM_UNLINKED,
1464 ("pf_state_expires: timeout == PFTM_UNLINKED"));
1465 KASSERT((state->timeout < PFTM_MAX),
1466 ("pf_state_expires: timeout > PFTM_MAX"));
1467 timeout = state->rule.ptr->timeout[state->timeout];
1469 timeout = V_pf_default_rule.timeout[state->timeout];
1470 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1472 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1473 states = state->rule.ptr->states_cur; /* XXXGL */
1475 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1476 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1477 states = V_pf_status.states;
1479 if (end && states > start && start < end) {
1481 return (state->expire + timeout * (end - states) /
1484 return (time_uptime);
1486 return (state->expire + timeout);
1490 pf_purge_expired_src_nodes()
1492 struct pf_src_node_list freelist;
1493 struct pf_srchash *sh;
1494 struct pf_src_node *cur, *next;
1497 LIST_INIT(&freelist);
1498 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) {
1499 PF_HASHROW_LOCK(sh);
1500 LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
1501 if (cur->states == 0 && cur->expire <= time_uptime) {
1502 pf_unlink_src_node_locked(cur);
1503 LIST_INSERT_HEAD(&freelist, cur, entry);
1504 } else if (cur->rule.ptr != NULL)
1505 cur->rule.ptr->rule_flag |= PFRULE_REFS;
1506 PF_HASHROW_UNLOCK(sh);
1509 pf_free_src_nodes(&freelist);
1513 pf_src_tree_remove_state(struct pf_state *s)
1517 if (s->src_node != NULL) {
1519 --s->src_node->conn;
1520 if (--s->src_node->states == 0) {
1521 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1524 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1525 s->src_node->expire = time_uptime + timeout;
1528 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1529 if (--s->nat_src_node->states == 0) {
1530 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1533 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1534 s->nat_src_node->expire = time_uptime + timeout;
1537 s->src_node = s->nat_src_node = NULL;
1541 * Unlink and potentilly free a state. Function may be
1542 * called with ID hash row locked, but always returns
1543 * unlocked, since it needs to go through key hash locking.
1546 pf_unlink_state(struct pf_state *s, u_int flags)
1548 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
1550 if ((flags & PF_ENTER_LOCKED) == 0)
1551 PF_HASHROW_LOCK(ih);
1553 PF_HASHROW_ASSERT(ih);
1555 if (s->timeout == PFTM_UNLINKED) {
1557 * State is being processed
1558 * by pf_unlink_state() in
1561 PF_HASHROW_UNLOCK(ih);
1562 return (0); /* XXXGL: undefined actually */
1565 if (s->src.state == PF_TCPS_PROXY_DST) {
1566 /* XXX wire key the right one? */
1567 pf_send_tcp(NULL, s->rule.ptr, s->key[PF_SK_WIRE]->af,
1568 &s->key[PF_SK_WIRE]->addr[1],
1569 &s->key[PF_SK_WIRE]->addr[0],
1570 s->key[PF_SK_WIRE]->port[1],
1571 s->key[PF_SK_WIRE]->port[0],
1572 s->src.seqhi, s->src.seqlo + 1,
1573 TH_RST|TH_ACK, 0, 0, 0, 1, s->tag, NULL);
1576 LIST_REMOVE(s, entry);
1577 pf_src_tree_remove_state(s);
1579 if (pfsync_delete_state_ptr != NULL)
1580 pfsync_delete_state_ptr(s);
1582 --s->rule.ptr->states_cur;
1583 if (s->nat_rule.ptr != NULL)
1584 --s->nat_rule.ptr->states_cur;
1585 if (s->anchor.ptr != NULL)
1586 --s->anchor.ptr->states_cur;
1588 s->timeout = PFTM_UNLINKED;
1590 PF_HASHROW_UNLOCK(ih);
1593 refcount_release(&s->refs);
1595 return (pf_release_state(s));
1599 pf_free_state(struct pf_state *cur)
1602 KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
1603 KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
1606 pf_normalize_tcp_cleanup(cur);
1607 uma_zfree(V_pf_state_z, cur);
1608 V_pf_status.fcounters[FCNT_STATE_REMOVALS]++;
1612 * Called only from pf_purge_thread(), thus serialized.
1615 pf_purge_expired_states(u_int i, int maxcheck)
1617 struct pf_idhash *ih;
1620 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1623 * Go through hash and unlink states that expire now.
1625 while (maxcheck > 0) {
1627 ih = &V_pf_idhash[i];
1629 PF_HASHROW_LOCK(ih);
1630 LIST_FOREACH(s, &ih->states, entry) {
1631 if (pf_state_expires(s) <= time_uptime) {
1632 V_pf_status.states -=
1633 pf_unlink_state(s, PF_ENTER_LOCKED);
1636 s->rule.ptr->rule_flag |= PFRULE_REFS;
1637 if (s->nat_rule.ptr != NULL)
1638 s->nat_rule.ptr->rule_flag |= PFRULE_REFS;
1639 if (s->anchor.ptr != NULL)
1640 s->anchor.ptr->rule_flag |= PFRULE_REFS;
1641 s->kif->pfik_flags |= PFI_IFLAG_REFS;
1643 s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
1645 PF_HASHROW_UNLOCK(ih);
1647 /* Return when we hit end of hash. */
1648 if (++i > V_pf_hashmask) {
1649 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1656 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1662 pf_purge_unlinked_rules()
1664 struct pf_rulequeue tmpq;
1665 struct pf_rule *r, *r1;
1668 * If we have overloading task pending, then we'd
1669 * better skip purging this time. There is a tiny
1670 * probability that overloading task references
1671 * an already unlinked rule.
1673 PF_OVERLOADQ_LOCK();
1674 if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
1675 PF_OVERLOADQ_UNLOCK();
1678 PF_OVERLOADQ_UNLOCK();
1681 * Do naive mark-and-sweep garbage collecting of old rules.
1682 * Reference flag is raised by pf_purge_expired_states()
1683 * and pf_purge_expired_src_nodes().
1685 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
1686 * use a temporary queue.
1689 PF_UNLNKDRULES_LOCK();
1690 TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
1691 if (!(r->rule_flag & PFRULE_REFS)) {
1692 TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
1693 TAILQ_INSERT_TAIL(&tmpq, r, entries);
1695 r->rule_flag &= ~PFRULE_REFS;
1697 PF_UNLNKDRULES_UNLOCK();
1699 if (!TAILQ_EMPTY(&tmpq)) {
1701 TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
1702 TAILQ_REMOVE(&tmpq, r, entries);
1710 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1715 u_int32_t a = ntohl(addr->addr32[0]);
1716 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1728 u_int8_t i, curstart, curend, maxstart, maxend;
1729 curstart = curend = maxstart = maxend = 255;
1730 for (i = 0; i < 8; i++) {
1731 if (!addr->addr16[i]) {
1732 if (curstart == 255)
1736 if ((curend - curstart) >
1737 (maxend - maxstart)) {
1738 maxstart = curstart;
1741 curstart = curend = 255;
1744 if ((curend - curstart) >
1745 (maxend - maxstart)) {
1746 maxstart = curstart;
1749 for (i = 0; i < 8; i++) {
1750 if (i >= maxstart && i <= maxend) {
1756 b = ntohs(addr->addr16[i]);
1773 pf_print_state(struct pf_state *s)
1775 pf_print_state_parts(s, NULL, NULL);
1779 pf_print_state_parts(struct pf_state *s,
1780 struct pf_state_key *skwp, struct pf_state_key *sksp)
1782 struct pf_state_key *skw, *sks;
1783 u_int8_t proto, dir;
1785 /* Do our best to fill these, but they're skipped if NULL */
1786 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1787 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1788 proto = skw ? skw->proto : (sks ? sks->proto : 0);
1789 dir = s ? s->direction : 0;
1807 case IPPROTO_ICMPV6:
1811 printf("%u", skw->proto);
1824 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1826 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1831 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1833 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1838 if (proto == IPPROTO_TCP) {
1839 printf(" [lo=%u high=%u win=%u modulator=%u",
1840 s->src.seqlo, s->src.seqhi,
1841 s->src.max_win, s->src.seqdiff);
1842 if (s->src.wscale && s->dst.wscale)
1843 printf(" wscale=%u",
1844 s->src.wscale & PF_WSCALE_MASK);
1846 printf(" [lo=%u high=%u win=%u modulator=%u",
1847 s->dst.seqlo, s->dst.seqhi,
1848 s->dst.max_win, s->dst.seqdiff);
1849 if (s->src.wscale && s->dst.wscale)
1850 printf(" wscale=%u",
1851 s->dst.wscale & PF_WSCALE_MASK);
1854 printf(" %u:%u", s->src.state, s->dst.state);
1859 pf_print_flags(u_int8_t f)
1881 #define PF_SET_SKIP_STEPS(i) \
1883 while (head[i] != cur) { \
1884 head[i]->skip[i].ptr = cur; \
1885 head[i] = TAILQ_NEXT(head[i], entries); \
1890 pf_calc_skip_steps(struct pf_rulequeue *rules)
1892 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1895 cur = TAILQ_FIRST(rules);
1897 for (i = 0; i < PF_SKIP_COUNT; ++i)
1899 while (cur != NULL) {
1901 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1902 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1903 if (cur->direction != prev->direction)
1904 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1905 if (cur->af != prev->af)
1906 PF_SET_SKIP_STEPS(PF_SKIP_AF);
1907 if (cur->proto != prev->proto)
1908 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1909 if (cur->src.neg != prev->src.neg ||
1910 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1911 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1912 if (cur->src.port[0] != prev->src.port[0] ||
1913 cur->src.port[1] != prev->src.port[1] ||
1914 cur->src.port_op != prev->src.port_op)
1915 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1916 if (cur->dst.neg != prev->dst.neg ||
1917 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1918 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1919 if (cur->dst.port[0] != prev->dst.port[0] ||
1920 cur->dst.port[1] != prev->dst.port[1] ||
1921 cur->dst.port_op != prev->dst.port_op)
1922 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1925 cur = TAILQ_NEXT(cur, entries);
1927 for (i = 0; i < PF_SKIP_COUNT; ++i)
1928 PF_SET_SKIP_STEPS(i);
1932 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
1934 if (aw1->type != aw2->type)
1936 switch (aw1->type) {
1937 case PF_ADDR_ADDRMASK:
1939 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
1941 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
1944 case PF_ADDR_DYNIFTL:
1945 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
1946 case PF_ADDR_NOROUTE:
1947 case PF_ADDR_URPFFAILED:
1950 return (aw1->p.tbl != aw2->p.tbl);
1952 printf("invalid address type: %d\n", aw1->type);
1958 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
1964 l = cksum + old - new;
1965 l = (l >> 16) + (l & 65535);
1973 pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc,
1974 struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af)
1979 PF_ACPY(&ao, a, af);
1987 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
1988 ao.addr16[0], an->addr16[0], 0),
1989 ao.addr16[1], an->addr16[1], 0);
1991 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
1992 ao.addr16[0], an->addr16[0], u),
1993 ao.addr16[1], an->addr16[1], u),
1999 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2000 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2001 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2002 ao.addr16[0], an->addr16[0], u),
2003 ao.addr16[1], an->addr16[1], u),
2004 ao.addr16[2], an->addr16[2], u),
2005 ao.addr16[3], an->addr16[3], u),
2006 ao.addr16[4], an->addr16[4], u),
2007 ao.addr16[5], an->addr16[5], u),
2008 ao.addr16[6], an->addr16[6], u),
2009 ao.addr16[7], an->addr16[7], u),
2017 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2019 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2023 memcpy(&ao, a, sizeof(ao));
2024 memcpy(a, &an, sizeof(u_int32_t));
2025 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2026 ao % 65536, an % 65536, u);
2031 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2035 PF_ACPY(&ao, a, AF_INET6);
2036 PF_ACPY(a, an, AF_INET6);
2038 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2039 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2040 pf_cksum_fixup(pf_cksum_fixup(*c,
2041 ao.addr16[0], an->addr16[0], u),
2042 ao.addr16[1], an->addr16[1], u),
2043 ao.addr16[2], an->addr16[2], u),
2044 ao.addr16[3], an->addr16[3], u),
2045 ao.addr16[4], an->addr16[4], u),
2046 ao.addr16[5], an->addr16[5], u),
2047 ao.addr16[6], an->addr16[6], u),
2048 ao.addr16[7], an->addr16[7], u);
2053 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2054 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2055 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2057 struct pf_addr oia, ooa;
2059 PF_ACPY(&oia, ia, af);
2061 PF_ACPY(&ooa, oa, af);
2063 /* Change inner protocol port, fix inner protocol checksum. */
2065 u_int16_t oip = *ip;
2072 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2073 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2075 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2077 /* Change inner ip address, fix inner ip and icmp checksums. */
2078 PF_ACPY(ia, na, af);
2082 u_int32_t oh2c = *h2c;
2084 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2085 oia.addr16[0], ia->addr16[0], 0),
2086 oia.addr16[1], ia->addr16[1], 0);
2087 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2088 oia.addr16[0], ia->addr16[0], 0),
2089 oia.addr16[1], ia->addr16[1], 0);
2090 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2096 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2097 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2098 pf_cksum_fixup(pf_cksum_fixup(*ic,
2099 oia.addr16[0], ia->addr16[0], u),
2100 oia.addr16[1], ia->addr16[1], u),
2101 oia.addr16[2], ia->addr16[2], u),
2102 oia.addr16[3], ia->addr16[3], u),
2103 oia.addr16[4], ia->addr16[4], u),
2104 oia.addr16[5], ia->addr16[5], u),
2105 oia.addr16[6], ia->addr16[6], u),
2106 oia.addr16[7], ia->addr16[7], u);
2110 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2112 PF_ACPY(oa, na, af);
2116 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2117 ooa.addr16[0], oa->addr16[0], 0),
2118 ooa.addr16[1], oa->addr16[1], 0);
2123 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2124 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2125 pf_cksum_fixup(pf_cksum_fixup(*ic,
2126 ooa.addr16[0], oa->addr16[0], u),
2127 ooa.addr16[1], oa->addr16[1], u),
2128 ooa.addr16[2], oa->addr16[2], u),
2129 ooa.addr16[3], oa->addr16[3], u),
2130 ooa.addr16[4], oa->addr16[4], u),
2131 ooa.addr16[5], oa->addr16[5], u),
2132 ooa.addr16[6], oa->addr16[6], u),
2133 ooa.addr16[7], oa->addr16[7], u);
2142 * Need to modulate the sequence numbers in the TCP SACK option
2143 * (credits to Krzysztof Pfaff for report and patch)
2146 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2147 struct tcphdr *th, struct pf_state_peer *dst)
2149 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2150 u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2151 int copyback = 0, i, olen;
2152 struct sackblk sack;
2154 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2155 if (hlen < TCPOLEN_SACKLEN ||
2156 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2159 while (hlen >= TCPOLEN_SACKLEN) {
2162 case TCPOPT_EOL: /* FALLTHROUGH */
2170 if (olen >= TCPOLEN_SACKLEN) {
2171 for (i = 2; i + TCPOLEN_SACK <= olen;
2172 i += TCPOLEN_SACK) {
2173 memcpy(&sack, &opt[i], sizeof(sack));
2174 pf_change_a(&sack.start, &th->th_sum,
2175 htonl(ntohl(sack.start) -
2177 pf_change_a(&sack.end, &th->th_sum,
2178 htonl(ntohl(sack.end) -
2180 memcpy(&opt[i], &sack, sizeof(sack));
2194 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2199 pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af,
2200 const struct pf_addr *saddr, const struct pf_addr *daddr,
2201 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2202 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2203 u_int16_t rtag, struct ifnet *ifp)
2205 struct pf_send_entry *pfse;
2209 struct ip *h = NULL;
2212 struct ip6_hdr *h6 = NULL;
2216 struct pf_mtag *pf_mtag;
2221 /* maximum segment size tcp option */
2222 tlen = sizeof(struct tcphdr);
2229 len = sizeof(struct ip) + tlen;
2234 len = sizeof(struct ip6_hdr) + tlen;
2238 panic("%s: unsupported af %d", __func__, af);
2241 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2242 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2245 m = m_gethdr(M_NOWAIT, MT_DATA);
2247 free(pfse, M_PFTEMP);
2251 mac_netinet_firewall_send(m);
2253 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2254 free(pfse, M_PFTEMP);
2259 m->m_flags |= M_SKIP_FIREWALL;
2260 pf_mtag->tag = rtag;
2262 if (r != NULL && r->rtableid >= 0)
2263 M_SETFIB(m, r->rtableid);
2266 if (r != NULL && r->qid) {
2267 pf_mtag->qid = r->qid;
2269 /* add hints for ecn */
2270 pf_mtag->hdr = mtod(m, struct ip *);
2273 m->m_data += max_linkhdr;
2274 m->m_pkthdr.len = m->m_len = len;
2275 m->m_pkthdr.rcvif = NULL;
2276 bzero(m->m_data, len);
2280 h = mtod(m, struct ip *);
2282 /* IP header fields included in the TCP checksum */
2283 h->ip_p = IPPROTO_TCP;
2284 h->ip_len = htons(tlen);
2285 h->ip_src.s_addr = saddr->v4.s_addr;
2286 h->ip_dst.s_addr = daddr->v4.s_addr;
2288 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2293 h6 = mtod(m, struct ip6_hdr *);
2295 /* IP header fields included in the TCP checksum */
2296 h6->ip6_nxt = IPPROTO_TCP;
2297 h6->ip6_plen = htons(tlen);
2298 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2299 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2301 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2307 th->th_sport = sport;
2308 th->th_dport = dport;
2309 th->th_seq = htonl(seq);
2310 th->th_ack = htonl(ack);
2311 th->th_off = tlen >> 2;
2312 th->th_flags = flags;
2313 th->th_win = htons(win);
2316 opt = (char *)(th + 1);
2317 opt[0] = TCPOPT_MAXSEG;
2320 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2327 th->th_sum = in_cksum(m, len);
2329 /* Finish the IP header */
2331 h->ip_hl = sizeof(*h) >> 2;
2332 h->ip_tos = IPTOS_LOWDELAY;
2333 h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
2334 h->ip_len = htons(len);
2335 h->ip_ttl = ttl ? ttl : V_ip_defttl;
2338 pfse->pfse_type = PFSE_IP;
2344 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2345 sizeof(struct ip6_hdr), tlen);
2347 h6->ip6_vfc |= IPV6_VERSION;
2348 h6->ip6_hlim = IPV6_DEFHLIM;
2350 pfse->pfse_type = PFSE_IP6;
2359 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2362 struct pf_send_entry *pfse;
2364 struct pf_mtag *pf_mtag;
2366 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2367 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2371 if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
2372 free(pfse, M_PFTEMP);
2376 if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
2377 free(pfse, M_PFTEMP);
2381 m0->m_flags |= M_SKIP_FIREWALL;
2383 if (r->rtableid >= 0)
2384 M_SETFIB(m0, r->rtableid);
2388 pf_mtag->qid = r->qid;
2389 /* add hints for ecn */
2390 pf_mtag->hdr = mtod(m0, struct ip *);
2397 pfse->pfse_type = PFSE_ICMP;
2402 pfse->pfse_type = PFSE_ICMP6;
2407 pfse->pfse_icmp_type = type;
2408 pfse->pfse_icmp_code = code;
2413 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2414 * If n is 0, they match if they are equal. If n is != 0, they match if they
2418 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2419 struct pf_addr *b, sa_family_t af)
2426 if ((a->addr32[0] & m->addr32[0]) ==
2427 (b->addr32[0] & m->addr32[0]))
2433 if (((a->addr32[0] & m->addr32[0]) ==
2434 (b->addr32[0] & m->addr32[0])) &&
2435 ((a->addr32[1] & m->addr32[1]) ==
2436 (b->addr32[1] & m->addr32[1])) &&
2437 ((a->addr32[2] & m->addr32[2]) ==
2438 (b->addr32[2] & m->addr32[2])) &&
2439 ((a->addr32[3] & m->addr32[3]) ==
2440 (b->addr32[3] & m->addr32[3])))
2459 * Return 1 if b <= a <= e, otherwise return 0.
2462 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2463 struct pf_addr *a, sa_family_t af)
2468 if ((a->addr32[0] < b->addr32[0]) ||
2469 (a->addr32[0] > e->addr32[0]))
2478 for (i = 0; i < 4; ++i)
2479 if (a->addr32[i] > b->addr32[i])
2481 else if (a->addr32[i] < b->addr32[i])
2484 for (i = 0; i < 4; ++i)
2485 if (a->addr32[i] < e->addr32[i])
2487 else if (a->addr32[i] > e->addr32[i])
2497 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2501 return ((p > a1) && (p < a2));
2503 return ((p < a1) || (p > a2));
2505 return ((p >= a1) && (p <= a2));
2519 return (0); /* never reached */
2523 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2528 return (pf_match(op, a1, a2, p));
2532 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2534 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2536 return (pf_match(op, a1, a2, u));
2540 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2542 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2544 return (pf_match(op, a1, a2, g));
2548 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag, int mtag)
2553 return ((!r->match_tag_not && r->match_tag == *tag) ||
2554 (r->match_tag_not && r->match_tag != *tag));
2558 pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag)
2561 KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
2563 if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL))
2566 pd->pf_mtag->tag = tag;
2571 #define PF_ANCHOR_STACKSIZE 32
2572 struct pf_anchor_stackframe {
2573 struct pf_ruleset *rs;
2574 struct pf_rule *r; /* XXX: + match bit */
2575 struct pf_anchor *child;
2579 * XXX: We rely on malloc(9) returning pointer aligned addresses.
2581 #define PF_ANCHORSTACK_MATCH 0x00000001
2582 #define PF_ANCHORSTACK_MASK (PF_ANCHORSTACK_MATCH)
2584 #define PF_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
2585 #define PF_ANCHOR_RULE(f) (struct pf_rule *) \
2586 ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
2587 #define PF_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \
2588 ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \
2592 pf_step_into_anchor(struct pf_anchor_stackframe *stack, int *depth,
2593 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2596 struct pf_anchor_stackframe *f;
2602 if (*depth >= PF_ANCHOR_STACKSIZE) {
2603 printf("%s: anchor stack overflow on %s\n",
2604 __func__, (*r)->anchor->name);
2605 *r = TAILQ_NEXT(*r, entries);
2607 } else if (*depth == 0 && a != NULL)
2609 f = stack + (*depth)++;
2612 if ((*r)->anchor_wildcard) {
2613 struct pf_anchor_node *parent = &(*r)->anchor->children;
2615 if ((f->child = RB_MIN(pf_anchor_node, parent)) == NULL) {
2619 *rs = &f->child->ruleset;
2622 *rs = &(*r)->anchor->ruleset;
2624 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2628 pf_step_out_of_anchor(struct pf_anchor_stackframe *stack, int *depth,
2629 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2632 struct pf_anchor_stackframe *f;
2641 f = stack + *depth - 1;
2642 fr = PF_ANCHOR_RULE(f);
2643 if (f->child != NULL) {
2644 struct pf_anchor_node *parent;
2647 * This block traverses through
2648 * a wildcard anchor.
2650 parent = &fr->anchor->children;
2651 if (match != NULL && *match) {
2653 * If any of "*" matched, then
2654 * "foo/ *" matched, mark frame
2657 PF_ANCHOR_SET_MATCH(f);
2660 f->child = RB_NEXT(pf_anchor_node, parent, f->child);
2661 if (f->child != NULL) {
2662 *rs = &f->child->ruleset;
2663 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2671 if (*depth == 0 && a != NULL)
2674 if (PF_ANCHOR_MATCH(f) || (match != NULL && *match))
2676 *r = TAILQ_NEXT(fr, entries);
2677 } while (*r == NULL);
2684 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2685 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2690 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2691 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2695 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2696 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2697 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
2698 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
2699 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
2700 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
2701 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
2702 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
2708 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
2713 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
2717 if (addr->addr32[3] == 0xffffffff) {
2718 addr->addr32[3] = 0;
2719 if (addr->addr32[2] == 0xffffffff) {
2720 addr->addr32[2] = 0;
2721 if (addr->addr32[1] == 0xffffffff) {
2722 addr->addr32[1] = 0;
2724 htonl(ntohl(addr->addr32[0]) + 1);
2727 htonl(ntohl(addr->addr32[1]) + 1);
2730 htonl(ntohl(addr->addr32[2]) + 1);
2733 htonl(ntohl(addr->addr32[3]) + 1);
2740 pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
2742 struct pf_addr *saddr, *daddr;
2743 u_int16_t sport, dport;
2744 struct inpcbinfo *pi;
2747 pd->lookup.uid = UID_MAX;
2748 pd->lookup.gid = GID_MAX;
2750 switch (pd->proto) {
2752 if (pd->hdr.tcp == NULL)
2754 sport = pd->hdr.tcp->th_sport;
2755 dport = pd->hdr.tcp->th_dport;
2759 if (pd->hdr.udp == NULL)
2761 sport = pd->hdr.udp->uh_sport;
2762 dport = pd->hdr.udp->uh_dport;
2768 if (direction == PF_IN) {
2783 inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
2784 dport, INPLOOKUP_RLOCKPCB, NULL, m);
2786 inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
2787 daddr->v4, dport, INPLOOKUP_WILDCARD |
2788 INPLOOKUP_RLOCKPCB, NULL, m);
2796 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
2797 dport, INPLOOKUP_RLOCKPCB, NULL, m);
2799 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
2800 &daddr->v6, dport, INPLOOKUP_WILDCARD |
2801 INPLOOKUP_RLOCKPCB, NULL, m);
2811 INP_RLOCK_ASSERT(inp);
2812 pd->lookup.uid = inp->inp_cred->cr_uid;
2813 pd->lookup.gid = inp->inp_cred->cr_groups[0];
2820 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2824 u_int8_t *opt, optlen;
2825 u_int8_t wscale = 0;
2827 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
2828 if (hlen <= sizeof(struct tcphdr))
2830 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2832 opt = hdr + sizeof(struct tcphdr);
2833 hlen -= sizeof(struct tcphdr);
2843 if (wscale > TCP_MAX_WINSHIFT)
2844 wscale = TCP_MAX_WINSHIFT;
2845 wscale |= PF_WSCALE_FLAG;
2860 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2864 u_int8_t *opt, optlen;
2865 u_int16_t mss = V_tcp_mssdflt;
2867 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
2868 if (hlen <= sizeof(struct tcphdr))
2870 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2872 opt = hdr + sizeof(struct tcphdr);
2873 hlen -= sizeof(struct tcphdr);
2874 while (hlen >= TCPOLEN_MAXSEG) {
2882 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
2898 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
2901 struct sockaddr_in *dst;
2905 struct sockaddr_in6 *dst6;
2906 struct route_in6 ro6;
2908 struct rtentry *rt = NULL;
2910 u_int16_t mss = V_tcp_mssdflt;
2915 hlen = sizeof(struct ip);
2916 bzero(&ro, sizeof(ro));
2917 dst = (struct sockaddr_in *)&ro.ro_dst;
2918 dst->sin_family = AF_INET;
2919 dst->sin_len = sizeof(*dst);
2920 dst->sin_addr = addr->v4;
2921 in_rtalloc_ign(&ro, 0, rtableid);
2927 hlen = sizeof(struct ip6_hdr);
2928 bzero(&ro6, sizeof(ro6));
2929 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
2930 dst6->sin6_family = AF_INET6;
2931 dst6->sin6_len = sizeof(*dst6);
2932 dst6->sin6_addr = addr->v6;
2933 in6_rtalloc_ign(&ro6, 0, rtableid);
2939 if (rt && rt->rt_ifp) {
2940 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr);
2941 mss = max(V_tcp_mssdflt, mss);
2944 mss = min(mss, offer);
2945 mss = max(mss, 64); /* sanity - at least max opt space */
2950 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr)
2952 struct pf_rule *r = s->rule.ptr;
2953 struct pf_src_node *sn = NULL;
2956 if (!r->rt || r->rt == PF_FASTROUTE)
2958 switch (s->key[PF_SK_WIRE]->af) {
2961 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL, &sn);
2962 s->rt_kif = r->rpool.cur->kif;
2967 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL, &sn);
2968 s->rt_kif = r->rpool.cur->kif;
2975 pf_tcp_iss(struct pf_pdesc *pd)
2978 u_int32_t digest[4];
2980 if (V_pf_tcp_secret_init == 0) {
2981 read_random(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
2982 MD5Init(&V_pf_tcp_secret_ctx);
2983 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
2984 sizeof(V_pf_tcp_secret));
2985 V_pf_tcp_secret_init = 1;
2988 ctx = V_pf_tcp_secret_ctx;
2990 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
2991 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
2992 if (pd->af == AF_INET6) {
2993 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
2994 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
2996 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
2997 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
2999 MD5Final((u_char *)digest, &ctx);
3000 V_pf_tcp_iss_off += 4096;
3001 #define ISN_RANDOM_INCREMENT (4096 - 1)
3002 return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3004 #undef ISN_RANDOM_INCREMENT
3008 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
3009 struct pfi_kif *kif, struct mbuf *m, int off, struct pf_pdesc *pd,
3010 struct pf_rule **am, struct pf_ruleset **rsm, struct inpcb *inp)
3012 struct pf_rule *nr = NULL;
3013 struct pf_addr * const saddr = pd->src;
3014 struct pf_addr * const daddr = pd->dst;
3015 sa_family_t af = pd->af;
3016 struct pf_rule *r, *a = NULL;
3017 struct pf_ruleset *ruleset = NULL;
3018 struct pf_src_node *nsn = NULL;
3019 struct tcphdr *th = pd->hdr.tcp;
3020 struct pf_state_key *sk = NULL, *nk = NULL;
3022 int rewrite = 0, hdrlen = 0;
3023 int tag = -1, rtableid = -1;
3027 u_int16_t sport = 0, dport = 0;
3028 u_int16_t bproto_sum = 0, bip_sum = 0;
3029 u_int8_t icmptype = 0, icmpcode = 0;
3030 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3035 INP_LOCK_ASSERT(inp);
3036 pd->lookup.uid = inp->inp_cred->cr_uid;
3037 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3038 pd->lookup.done = 1;
3041 switch (pd->proto) {
3043 sport = th->th_sport;
3044 dport = th->th_dport;
3045 hdrlen = sizeof(*th);
3048 sport = pd->hdr.udp->uh_sport;
3049 dport = pd->hdr.udp->uh_dport;
3050 hdrlen = sizeof(*pd->hdr.udp);
3054 if (pd->af != AF_INET)
3056 sport = dport = pd->hdr.icmp->icmp_id;
3057 hdrlen = sizeof(*pd->hdr.icmp);
3058 icmptype = pd->hdr.icmp->icmp_type;
3059 icmpcode = pd->hdr.icmp->icmp_code;
3061 if (icmptype == ICMP_UNREACH ||
3062 icmptype == ICMP_SOURCEQUENCH ||
3063 icmptype == ICMP_REDIRECT ||
3064 icmptype == ICMP_TIMXCEED ||
3065 icmptype == ICMP_PARAMPROB)
3070 case IPPROTO_ICMPV6:
3073 sport = dport = pd->hdr.icmp6->icmp6_id;
3074 hdrlen = sizeof(*pd->hdr.icmp6);
3075 icmptype = pd->hdr.icmp6->icmp6_type;
3076 icmpcode = pd->hdr.icmp6->icmp6_code;
3078 if (icmptype == ICMP6_DST_UNREACH ||
3079 icmptype == ICMP6_PACKET_TOO_BIG ||
3080 icmptype == ICMP6_TIME_EXCEEDED ||
3081 icmptype == ICMP6_PARAM_PROB)
3086 sport = dport = hdrlen = 0;
3090 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3092 /* check packet for BINAT/NAT/RDR */
3093 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk,
3094 &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) {
3095 KASSERT(sk != NULL, ("%s: null sk", __func__));
3096 KASSERT(nk != NULL, ("%s: null nk", __func__));
3099 bip_sum = *pd->ip_sum;
3101 switch (pd->proto) {
3103 bproto_sum = th->th_sum;
3104 pd->proto_sum = &th->th_sum;
3106 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3107 nk->port[pd->sidx] != sport) {
3108 pf_change_ap(saddr, &th->th_sport, pd->ip_sum,
3109 &th->th_sum, &nk->addr[pd->sidx],
3110 nk->port[pd->sidx], 0, af);
3111 pd->sport = &th->th_sport;
3112 sport = th->th_sport;
3115 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3116 nk->port[pd->didx] != dport) {
3117 pf_change_ap(daddr, &th->th_dport, pd->ip_sum,
3118 &th->th_sum, &nk->addr[pd->didx],
3119 nk->port[pd->didx], 0, af);
3120 dport = th->th_dport;
3121 pd->dport = &th->th_dport;
3126 bproto_sum = pd->hdr.udp->uh_sum;
3127 pd->proto_sum = &pd->hdr.udp->uh_sum;
3129 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3130 nk->port[pd->sidx] != sport) {
3131 pf_change_ap(saddr, &pd->hdr.udp->uh_sport,
3132 pd->ip_sum, &pd->hdr.udp->uh_sum,
3133 &nk->addr[pd->sidx],
3134 nk->port[pd->sidx], 1, af);
3135 sport = pd->hdr.udp->uh_sport;
3136 pd->sport = &pd->hdr.udp->uh_sport;
3139 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3140 nk->port[pd->didx] != dport) {
3141 pf_change_ap(daddr, &pd->hdr.udp->uh_dport,
3142 pd->ip_sum, &pd->hdr.udp->uh_sum,
3143 &nk->addr[pd->didx],
3144 nk->port[pd->didx], 1, af);
3145 dport = pd->hdr.udp->uh_dport;
3146 pd->dport = &pd->hdr.udp->uh_dport;
3152 nk->port[0] = nk->port[1];
3153 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3154 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3155 nk->addr[pd->sidx].v4.s_addr, 0);
3157 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3158 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3159 nk->addr[pd->didx].v4.s_addr, 0);
3161 if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3162 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3163 pd->hdr.icmp->icmp_cksum, sport,
3165 pd->hdr.icmp->icmp_id = nk->port[1];
3166 pd->sport = &pd->hdr.icmp->icmp_id;
3168 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3172 case IPPROTO_ICMPV6:
3173 nk->port[0] = nk->port[1];
3174 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3175 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3176 &nk->addr[pd->sidx], 0);
3178 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3179 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3180 &nk->addr[pd->didx], 0);
3189 &nk->addr[pd->sidx], AF_INET))
3190 pf_change_a(&saddr->v4.s_addr,
3192 nk->addr[pd->sidx].v4.s_addr, 0);
3195 &nk->addr[pd->didx], AF_INET))
3196 pf_change_a(&daddr->v4.s_addr,
3198 nk->addr[pd->didx].v4.s_addr, 0);
3204 &nk->addr[pd->sidx], AF_INET6))
3205 PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3208 &nk->addr[pd->didx], AF_INET6))
3209 PF_ACPY(saddr, &nk->addr[pd->didx], af);
3222 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3223 r = r->skip[PF_SKIP_IFP].ptr;
3224 else if (r->direction && r->direction != direction)
3225 r = r->skip[PF_SKIP_DIR].ptr;
3226 else if (r->af && r->af != af)
3227 r = r->skip[PF_SKIP_AF].ptr;
3228 else if (r->proto && r->proto != pd->proto)
3229 r = r->skip[PF_SKIP_PROTO].ptr;
3230 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3231 r->src.neg, kif, M_GETFIB(m)))
3232 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3233 /* tcp/udp only. port_op always 0 in other cases */
3234 else if (r->src.port_op && !pf_match_port(r->src.port_op,
3235 r->src.port[0], r->src.port[1], sport))
3236 r = r->skip[PF_SKIP_SRC_PORT].ptr;
3237 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3238 r->dst.neg, NULL, M_GETFIB(m)))
3239 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3240 /* tcp/udp only. port_op always 0 in other cases */
3241 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3242 r->dst.port[0], r->dst.port[1], dport))
3243 r = r->skip[PF_SKIP_DST_PORT].ptr;
3244 /* icmp only. type always 0 in other cases */
3245 else if (r->type && r->type != icmptype + 1)
3246 r = TAILQ_NEXT(r, entries);
3247 /* icmp only. type always 0 in other cases */
3248 else if (r->code && r->code != icmpcode + 1)
3249 r = TAILQ_NEXT(r, entries);
3250 else if (r->tos && !(r->tos == pd->tos))
3251 r = TAILQ_NEXT(r, entries);
3252 else if (r->rule_flag & PFRULE_FRAGMENT)
3253 r = TAILQ_NEXT(r, entries);
3254 else if (pd->proto == IPPROTO_TCP &&
3255 (r->flagset & th->th_flags) != r->flags)
3256 r = TAILQ_NEXT(r, entries);
3257 /* tcp/udp only. uid.op always 0 in other cases */
3258 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3259 pf_socket_lookup(direction, pd, m), 1)) &&
3260 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3262 r = TAILQ_NEXT(r, entries);
3263 /* tcp/udp only. gid.op always 0 in other cases */
3264 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3265 pf_socket_lookup(direction, pd, m), 1)) &&
3266 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3268 r = TAILQ_NEXT(r, entries);
3270 r->prob <= arc4random())
3271 r = TAILQ_NEXT(r, entries);
3272 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3273 pd->pf_mtag ? pd->pf_mtag->tag : 0))
3274 r = TAILQ_NEXT(r, entries);
3275 else if (r->os_fingerprint != PF_OSFP_ANY &&
3276 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3277 pf_osfp_fingerprint(pd, m, off, th),
3278 r->os_fingerprint)))
3279 r = TAILQ_NEXT(r, entries);
3283 if (r->rtableid >= 0)
3284 rtableid = r->rtableid;
3285 if (r->anchor == NULL) {
3292 r = TAILQ_NEXT(r, entries);
3294 pf_step_into_anchor(anchor_stack, &asd,
3295 &ruleset, PF_RULESET_FILTER, &r, &a,
3298 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3299 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3306 REASON_SET(&reason, PFRES_MATCH);
3308 if (r->log || (nr != NULL && nr->log)) {
3310 m_copyback(m, off, hdrlen, pd->hdr.any);
3311 PFLOG_PACKET(kif, m, af, direction, reason, r->log ? r : nr, a,
3315 if ((r->action == PF_DROP) &&
3316 ((r->rule_flag & PFRULE_RETURNRST) ||
3317 (r->rule_flag & PFRULE_RETURNICMP) ||
3318 (r->rule_flag & PFRULE_RETURN))) {
3319 /* undo NAT changes, if they have taken place */
3321 PF_ACPY(saddr, &sk->addr[pd->sidx], af);
3322 PF_ACPY(daddr, &sk->addr[pd->didx], af);
3324 *pd->sport = sk->port[pd->sidx];
3326 *pd->dport = sk->port[pd->didx];
3328 *pd->proto_sum = bproto_sum;
3330 *pd->ip_sum = bip_sum;
3331 m_copyback(m, off, hdrlen, pd->hdr.any);
3333 if (pd->proto == IPPROTO_TCP &&
3334 ((r->rule_flag & PFRULE_RETURNRST) ||
3335 (r->rule_flag & PFRULE_RETURN)) &&
3336 !(th->th_flags & TH_RST)) {
3337 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
3349 h4 = mtod(m, struct ip *);
3350 len = ntohs(h4->ip_len) - off;
3355 h6 = mtod(m, struct ip6_hdr *);
3356 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
3361 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
3362 REASON_SET(&reason, PFRES_PROTCKSUM);
3364 if (th->th_flags & TH_SYN)
3366 if (th->th_flags & TH_FIN)
3368 pf_send_tcp(m, r, af, pd->dst,
3369 pd->src, th->th_dport, th->th_sport,
3370 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3371 r->return_ttl, 1, 0, kif->pfik_ifp);
3373 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3375 pf_send_icmp(m, r->return_icmp >> 8,
3376 r->return_icmp & 255, af, r);
3377 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3379 pf_send_icmp(m, r->return_icmp6 >> 8,
3380 r->return_icmp6 & 255, af, r);
3383 if (r->action == PF_DROP)
3386 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3387 REASON_SET(&reason, PFRES_MEMORY);
3391 M_SETFIB(m, rtableid);
3393 if (!state_icmp && (r->keep_state || nr != NULL ||
3394 (pd->flags & PFDESC_TCP_NORM))) {
3396 action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
3397 sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
3399 if (action != PF_PASS)
3403 uma_zfree(V_pf_state_key_z, sk);
3405 uma_zfree(V_pf_state_key_z, nk);
3408 /* copy back packet headers if we performed NAT operations */
3410 m_copyback(m, off, hdrlen, pd->hdr.any);
3412 if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
3413 direction == PF_OUT &&
3414 pfsync_defer_ptr != NULL && pfsync_defer_ptr(*sm, m))
3416 * We want the state created, but we dont
3417 * want to send this in case a partner
3418 * firewall has to know about it to allow
3419 * replies through it.
3427 uma_zfree(V_pf_state_key_z, sk);
3429 uma_zfree(V_pf_state_key_z, nk);
3434 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
3435 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *nk,
3436 struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
3437 u_int16_t dport, int *rewrite, struct pfi_kif *kif, struct pf_state **sm,
3438 int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen)
3440 struct pf_state *s = NULL;
3441 struct pf_src_node *sn = NULL;
3442 struct tcphdr *th = pd->hdr.tcp;
3443 u_int16_t mss = V_tcp_mssdflt;
3446 /* check maximums */
3447 if (r->max_states && (r->states_cur >= r->max_states)) {
3448 V_pf_status.lcounters[LCNT_STATES]++;
3449 REASON_SET(&reason, PFRES_MAXSTATES);
3452 /* src node for filter rule */
3453 if ((r->rule_flag & PFRULE_SRCTRACK ||
3454 r->rpool.opts & PF_POOL_STICKYADDR) &&
3455 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3456 REASON_SET(&reason, PFRES_SRCLIMIT);
3459 /* src node for translation rule */
3460 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3461 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3462 REASON_SET(&reason, PFRES_SRCLIMIT);
3465 s = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO);
3467 REASON_SET(&reason, PFRES_MEMORY);
3471 s->nat_rule.ptr = nr;
3473 STATE_INC_COUNTERS(s);
3475 s->state_flags |= PFSTATE_ALLOWOPTS;
3476 if (r->rule_flag & PFRULE_STATESLOPPY)
3477 s->state_flags |= PFSTATE_SLOPPY;
3478 s->log = r->log & PF_LOG_ALL;
3479 s->sync_state = PFSYNC_S_NONE;
3481 s->log |= nr->log & PF_LOG_ALL;
3482 switch (pd->proto) {
3484 s->src.seqlo = ntohl(th->th_seq);
3485 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3486 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3487 r->keep_state == PF_STATE_MODULATE) {
3488 /* Generate sequence number modulator */
3489 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3492 pf_change_a(&th->th_seq, &th->th_sum,
3493 htonl(s->src.seqlo + s->src.seqdiff), 0);
3497 if (th->th_flags & TH_SYN) {
3499 s->src.wscale = pf_get_wscale(m, off,
3500 th->th_off, pd->af);
3502 s->src.max_win = MAX(ntohs(th->th_win), 1);
3503 if (s->src.wscale & PF_WSCALE_MASK) {
3504 /* Remove scale factor from initial window */
3505 int win = s->src.max_win;
3506 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3507 s->src.max_win = (win - 1) >>
3508 (s->src.wscale & PF_WSCALE_MASK);
3510 if (th->th_flags & TH_FIN)
3514 s->src.state = TCPS_SYN_SENT;
3515 s->dst.state = TCPS_CLOSED;
3516 s->timeout = PFTM_TCP_FIRST_PACKET;
3519 s->src.state = PFUDPS_SINGLE;
3520 s->dst.state = PFUDPS_NO_TRAFFIC;
3521 s->timeout = PFTM_UDP_FIRST_PACKET;
3525 case IPPROTO_ICMPV6:
3527 s->timeout = PFTM_ICMP_FIRST_PACKET;
3530 s->src.state = PFOTHERS_SINGLE;
3531 s->dst.state = PFOTHERS_NO_TRAFFIC;
3532 s->timeout = PFTM_OTHER_FIRST_PACKET;
3535 s->creation = time_uptime;
3536 s->expire = time_uptime;
3540 s->src_node->states++;
3543 /* XXX We only modify one side for now. */
3544 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
3545 s->nat_src_node = nsn;
3546 s->nat_src_node->states++;
3548 if (pd->proto == IPPROTO_TCP) {
3549 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
3550 off, pd, th, &s->src, &s->dst)) {
3551 REASON_SET(&reason, PFRES_MEMORY);
3552 pf_src_tree_remove_state(s);
3553 STATE_DEC_COUNTERS(s);
3554 uma_zfree(V_pf_state_z, s);
3557 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
3558 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
3559 &s->src, &s->dst, rewrite)) {
3560 /* This really shouldn't happen!!! */
3561 DPFPRINTF(PF_DEBUG_URGENT,
3562 ("pf_normalize_tcp_stateful failed on first pkt"));
3563 pf_normalize_tcp_cleanup(s);
3564 pf_src_tree_remove_state(s);
3565 STATE_DEC_COUNTERS(s);
3566 uma_zfree(V_pf_state_z, s);
3570 s->direction = pd->dir;
3573 * sk/nk could already been setup by pf_get_translation().
3576 KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
3577 __func__, nr, sk, nk));
3578 sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
3583 KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p",
3584 __func__, nr, sk, nk));
3586 /* Swap sk/nk for PF_OUT. */
3587 if (pf_state_insert(BOUND_IFACE(r, kif),
3588 (pd->dir == PF_IN) ? sk : nk,
3589 (pd->dir == PF_IN) ? nk : sk, s)) {
3590 if (pd->proto == IPPROTO_TCP)
3591 pf_normalize_tcp_cleanup(s);
3592 REASON_SET(&reason, PFRES_STATEINS);
3593 pf_src_tree_remove_state(s);
3594 STATE_DEC_COUNTERS(s);
3595 uma_zfree(V_pf_state_z, s);
3600 pf_set_rt_ifp(s, pd->src); /* needs s->state_key set */
3603 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
3604 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
3605 s->src.state = PF_TCPS_PROXY_SRC;
3606 /* undo NAT changes, if they have taken place */
3608 struct pf_state_key *skt = s->key[PF_SK_WIRE];
3609 if (pd->dir == PF_OUT)
3610 skt = s->key[PF_SK_STACK];
3611 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
3612 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
3614 *pd->sport = skt->port[pd->sidx];
3616 *pd->dport = skt->port[pd->didx];
3618 *pd->proto_sum = bproto_sum;
3620 *pd->ip_sum = bip_sum;
3621 m_copyback(m, off, hdrlen, pd->hdr.any);
3623 s->src.seqhi = htonl(arc4random());
3624 /* Find mss option */
3625 int rtid = M_GETFIB(m);
3626 mss = pf_get_mss(m, off, th->th_off, pd->af);
3627 mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
3628 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
3630 pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport,
3631 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
3632 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL);
3633 REASON_SET(&reason, PFRES_SYNPROXY);
3634 return (PF_SYNPROXY_DROP);
3641 uma_zfree(V_pf_state_key_z, sk);
3643 uma_zfree(V_pf_state_key_z, nk);
3645 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
3646 pf_unlink_src_node(sn);
3647 pf_free_src_node(sn);
3650 if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) {
3651 pf_unlink_src_node(nsn);
3652 pf_free_src_node(nsn);
3659 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
3660 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
3661 struct pf_ruleset **rsm)
3663 struct pf_rule *r, *a = NULL;
3664 struct pf_ruleset *ruleset = NULL;
3665 sa_family_t af = pd->af;
3670 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3674 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3677 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3678 r = r->skip[PF_SKIP_IFP].ptr;
3679 else if (r->direction && r->direction != direction)
3680 r = r->skip[PF_SKIP_DIR].ptr;
3681 else if (r->af && r->af != af)
3682 r = r->skip[PF_SKIP_AF].ptr;
3683 else if (r->proto && r->proto != pd->proto)
3684 r = r->skip[PF_SKIP_PROTO].ptr;
3685 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
3686 r->src.neg, kif, M_GETFIB(m)))
3687 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3688 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
3689 r->dst.neg, NULL, M_GETFIB(m)))
3690 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3691 else if (r->tos && !(r->tos == pd->tos))
3692 r = TAILQ_NEXT(r, entries);
3693 else if (r->os_fingerprint != PF_OSFP_ANY)
3694 r = TAILQ_NEXT(r, entries);
3695 else if (pd->proto == IPPROTO_UDP &&
3696 (r->src.port_op || r->dst.port_op))
3697 r = TAILQ_NEXT(r, entries);
3698 else if (pd->proto == IPPROTO_TCP &&
3699 (r->src.port_op || r->dst.port_op || r->flagset))
3700 r = TAILQ_NEXT(r, entries);
3701 else if ((pd->proto == IPPROTO_ICMP ||
3702 pd->proto == IPPROTO_ICMPV6) &&
3703 (r->type || r->code))
3704 r = TAILQ_NEXT(r, entries);
3705 else if (r->prob && r->prob <=
3706 (arc4random() % (UINT_MAX - 1) + 1))
3707 r = TAILQ_NEXT(r, entries);
3708 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3709 pd->pf_mtag ? pd->pf_mtag->tag : 0))
3710 r = TAILQ_NEXT(r, entries);
3712 if (r->anchor == NULL) {
3719 r = TAILQ_NEXT(r, entries);
3721 pf_step_into_anchor(anchor_stack, &asd,
3722 &ruleset, PF_RULESET_FILTER, &r, &a,
3725 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3726 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3733 REASON_SET(&reason, PFRES_MATCH);
3736 PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
3739 if (r->action != PF_PASS)
3742 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3743 REASON_SET(&reason, PFRES_MEMORY);
3751 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
3752 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
3753 struct pf_pdesc *pd, u_short *reason, int *copyback)
3755 struct tcphdr *th = pd->hdr.tcp;
3756 u_int16_t win = ntohs(th->th_win);
3757 u_int32_t ack, end, seq, orig_seq;
3761 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
3762 sws = src->wscale & PF_WSCALE_MASK;
3763 dws = dst->wscale & PF_WSCALE_MASK;
3768 * Sequence tracking algorithm from Guido van Rooij's paper:
3769 * http://www.madison-gurkha.com/publications/tcp_filtering/
3773 orig_seq = seq = ntohl(th->th_seq);
3774 if (src->seqlo == 0) {
3775 /* First packet from this end. Set its state */
3777 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
3778 src->scrub == NULL) {
3779 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
3780 REASON_SET(reason, PFRES_MEMORY);
3785 /* Deferred generation of sequence number modulator */
3786 if (dst->seqdiff && !src->seqdiff) {
3787 /* use random iss for the TCP server */
3788 while ((src->seqdiff = arc4random() - seq) == 0)
3790 ack = ntohl(th->th_ack) - dst->seqdiff;
3791 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
3793 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
3796 ack = ntohl(th->th_ack);
3799 end = seq + pd->p_len;
3800 if (th->th_flags & TH_SYN) {
3802 if (dst->wscale & PF_WSCALE_FLAG) {
3803 src->wscale = pf_get_wscale(m, off, th->th_off,
3805 if (src->wscale & PF_WSCALE_FLAG) {
3806 /* Remove scale factor from initial
3808 sws = src->wscale & PF_WSCALE_MASK;
3809 win = ((u_int32_t)win + (1 << sws) - 1)
3811 dws = dst->wscale & PF_WSCALE_MASK;
3813 /* fixup other window */
3814 dst->max_win <<= dst->wscale &
3816 /* in case of a retrans SYN|ACK */
3821 if (th->th_flags & TH_FIN)
3825 if (src->state < TCPS_SYN_SENT)
3826 src->state = TCPS_SYN_SENT;
3829 * May need to slide the window (seqhi may have been set by
3830 * the crappy stack check or if we picked up the connection
3831 * after establishment)
3833 if (src->seqhi == 1 ||
3834 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
3835 src->seqhi = end + MAX(1, dst->max_win << dws);
3836 if (win > src->max_win)
3840 ack = ntohl(th->th_ack) - dst->seqdiff;
3842 /* Modulate sequence numbers */
3843 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
3845 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
3848 end = seq + pd->p_len;
3849 if (th->th_flags & TH_SYN)
3851 if (th->th_flags & TH_FIN)
3855 if ((th->th_flags & TH_ACK) == 0) {
3856 /* Let it pass through the ack skew check */
3858 } else if ((ack == 0 &&
3859 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
3860 /* broken tcp stacks do not set ack */
3861 (dst->state < TCPS_SYN_SENT)) {
3863 * Many stacks (ours included) will set the ACK number in an
3864 * FIN|ACK if the SYN times out -- no sequence to ACK.
3870 /* Ease sequencing restrictions on no data packets */
3875 ackskew = dst->seqlo - ack;
3879 * Need to demodulate the sequence numbers in any TCP SACK options
3880 * (Selective ACK). We could optionally validate the SACK values
3881 * against the current ACK window, either forwards or backwards, but
3882 * I'm not confident that SACK has been implemented properly
3883 * everywhere. It wouldn't surprise me if several stacks accidently
3884 * SACK too far backwards of previously ACKed data. There really aren't
3885 * any security implications of bad SACKing unless the target stack
3886 * doesn't validate the option length correctly. Someone trying to
3887 * spoof into a TCP connection won't bother blindly sending SACK
3890 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
3891 if (pf_modulate_sack(m, off, pd, th, dst))
3896 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
3897 if (SEQ_GEQ(src->seqhi, end) &&
3898 /* Last octet inside other's window space */
3899 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
3900 /* Retrans: not more than one window back */
3901 (ackskew >= -MAXACKWINDOW) &&
3902 /* Acking not more than one reassembled fragment backwards */
3903 (ackskew <= (MAXACKWINDOW << sws)) &&
3904 /* Acking not more than one window forward */
3905 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
3906 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
3907 (pd->flags & PFDESC_IP_REAS) == 0)) {
3908 /* Require an exact/+1 sequence match on resets when possible */
3910 if (dst->scrub || src->scrub) {
3911 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
3912 *state, src, dst, copyback))
3916 /* update max window */
3917 if (src->max_win < win)
3919 /* synchronize sequencing */
3920 if (SEQ_GT(end, src->seqlo))
3922 /* slide the window of what the other end can send */
3923 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
3924 dst->seqhi = ack + MAX((win << sws), 1);
3928 if (th->th_flags & TH_SYN)
3929 if (src->state < TCPS_SYN_SENT)
3930 src->state = TCPS_SYN_SENT;
3931 if (th->th_flags & TH_FIN)
3932 if (src->state < TCPS_CLOSING)
3933 src->state = TCPS_CLOSING;
3934 if (th->th_flags & TH_ACK) {
3935 if (dst->state == TCPS_SYN_SENT) {
3936 dst->state = TCPS_ESTABLISHED;
3937 if (src->state == TCPS_ESTABLISHED &&
3938 (*state)->src_node != NULL &&
3939 pf_src_connlimit(state)) {
3940 REASON_SET(reason, PFRES_SRCLIMIT);
3943 } else if (dst->state == TCPS_CLOSING)
3944 dst->state = TCPS_FIN_WAIT_2;
3946 if (th->th_flags & TH_RST)
3947 src->state = dst->state = TCPS_TIME_WAIT;
3949 /* update expire time */
3950 (*state)->expire = time_uptime;
3951 if (src->state >= TCPS_FIN_WAIT_2 &&
3952 dst->state >= TCPS_FIN_WAIT_2)
3953 (*state)->timeout = PFTM_TCP_CLOSED;
3954 else if (src->state >= TCPS_CLOSING &&
3955 dst->state >= TCPS_CLOSING)
3956 (*state)->timeout = PFTM_TCP_FIN_WAIT;
3957 else if (src->state < TCPS_ESTABLISHED ||
3958 dst->state < TCPS_ESTABLISHED)
3959 (*state)->timeout = PFTM_TCP_OPENING;
3960 else if (src->state >= TCPS_CLOSING ||
3961 dst->state >= TCPS_CLOSING)
3962 (*state)->timeout = PFTM_TCP_CLOSING;
3964 (*state)->timeout = PFTM_TCP_ESTABLISHED;
3966 /* Fall through to PASS packet */
3968 } else if ((dst->state < TCPS_SYN_SENT ||
3969 dst->state >= TCPS_FIN_WAIT_2 ||
3970 src->state >= TCPS_FIN_WAIT_2) &&
3971 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
3972 /* Within a window forward of the originating packet */
3973 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
3974 /* Within a window backward of the originating packet */
3977 * This currently handles three situations:
3978 * 1) Stupid stacks will shotgun SYNs before their peer
3980 * 2) When PF catches an already established stream (the
3981 * firewall rebooted, the state table was flushed, routes
3983 * 3) Packets get funky immediately after the connection
3984 * closes (this should catch Solaris spurious ACK|FINs
3985 * that web servers like to spew after a close)
3987 * This must be a little more careful than the above code
3988 * since packet floods will also be caught here. We don't
3989 * update the TTL here to mitigate the damage of a packet
3990 * flood and so the same code can handle awkward establishment
3991 * and a loosened connection close.
3992 * In the establishment case, a correct peer response will
3993 * validate the connection, go through the normal state code
3994 * and keep updating the state TTL.
3997 if (V_pf_status.debug >= PF_DEBUG_MISC) {
3998 printf("pf: loose state match: ");
3999 pf_print_state(*state);
4000 pf_print_flags(th->th_flags);
4001 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4002 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
4003 pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
4004 (unsigned long long)(*state)->packets[1],
4005 pd->dir == PF_IN ? "in" : "out",
4006 pd->dir == (*state)->direction ? "fwd" : "rev");
4009 if (dst->scrub || src->scrub) {
4010 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4011 *state, src, dst, copyback))
4015 /* update max window */
4016 if (src->max_win < win)
4018 /* synchronize sequencing */
4019 if (SEQ_GT(end, src->seqlo))
4021 /* slide the window of what the other end can send */
4022 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4023 dst->seqhi = ack + MAX((win << sws), 1);
4026 * Cannot set dst->seqhi here since this could be a shotgunned
4027 * SYN and not an already established connection.
4030 if (th->th_flags & TH_FIN)
4031 if (src->state < TCPS_CLOSING)
4032 src->state = TCPS_CLOSING;
4033 if (th->th_flags & TH_RST)
4034 src->state = dst->state = TCPS_TIME_WAIT;
4036 /* Fall through to PASS packet */
4039 if ((*state)->dst.state == TCPS_SYN_SENT &&
4040 (*state)->src.state == TCPS_SYN_SENT) {
4041 /* Send RST for state mismatches during handshake */
4042 if (!(th->th_flags & TH_RST))
4043 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4044 pd->dst, pd->src, th->th_dport,
4045 th->th_sport, ntohl(th->th_ack), 0,
4047 (*state)->rule.ptr->return_ttl, 1, 0,
4052 } else if (V_pf_status.debug >= PF_DEBUG_MISC) {
4053 printf("pf: BAD state: ");
4054 pf_print_state(*state);
4055 pf_print_flags(th->th_flags);
4056 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4057 "pkts=%llu:%llu dir=%s,%s\n",
4058 seq, orig_seq, ack, pd->p_len, ackskew,
4059 (unsigned long long)(*state)->packets[0],
4060 (unsigned long long)(*state)->packets[1],
4061 pd->dir == PF_IN ? "in" : "out",
4062 pd->dir == (*state)->direction ? "fwd" : "rev");
4063 printf("pf: State failure on: %c %c %c %c | %c %c\n",
4064 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4065 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4067 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4068 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4069 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4070 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4072 REASON_SET(reason, PFRES_BADSTATE);
4080 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4081 struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4083 struct tcphdr *th = pd->hdr.tcp;
4085 if (th->th_flags & TH_SYN)
4086 if (src->state < TCPS_SYN_SENT)
4087 src->state = TCPS_SYN_SENT;
4088 if (th->th_flags & TH_FIN)
4089 if (src->state < TCPS_CLOSING)
4090 src->state = TCPS_CLOSING;
4091 if (th->th_flags & TH_ACK) {
4092 if (dst->state == TCPS_SYN_SENT) {
4093 dst->state = TCPS_ESTABLISHED;
4094 if (src->state == TCPS_ESTABLISHED &&
4095 (*state)->src_node != NULL &&
4096 pf_src_connlimit(state)) {
4097 REASON_SET(reason, PFRES_SRCLIMIT);
4100 } else if (dst->state == TCPS_CLOSING) {
4101 dst->state = TCPS_FIN_WAIT_2;
4102 } else if (src->state == TCPS_SYN_SENT &&
4103 dst->state < TCPS_SYN_SENT) {
4105 * Handle a special sloppy case where we only see one
4106 * half of the connection. If there is a ACK after
4107 * the initial SYN without ever seeing a packet from
4108 * the destination, set the connection to established.
4110 dst->state = src->state = TCPS_ESTABLISHED;
4111 if ((*state)->src_node != NULL &&
4112 pf_src_connlimit(state)) {
4113 REASON_SET(reason, PFRES_SRCLIMIT);
4116 } else if (src->state == TCPS_CLOSING &&
4117 dst->state == TCPS_ESTABLISHED &&
4120 * Handle the closing of half connections where we
4121 * don't see the full bidirectional FIN/ACK+ACK
4124 dst->state = TCPS_CLOSING;
4127 if (th->th_flags & TH_RST)
4128 src->state = dst->state = TCPS_TIME_WAIT;
4130 /* update expire time */
4131 (*state)->expire = time_uptime;
4132 if (src->state >= TCPS_FIN_WAIT_2 &&
4133 dst->state >= TCPS_FIN_WAIT_2)
4134 (*state)->timeout = PFTM_TCP_CLOSED;
4135 else if (src->state >= TCPS_CLOSING &&
4136 dst->state >= TCPS_CLOSING)
4137 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4138 else if (src->state < TCPS_ESTABLISHED ||
4139 dst->state < TCPS_ESTABLISHED)
4140 (*state)->timeout = PFTM_TCP_OPENING;
4141 else if (src->state >= TCPS_CLOSING ||
4142 dst->state >= TCPS_CLOSING)
4143 (*state)->timeout = PFTM_TCP_CLOSING;
4145 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4151 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
4152 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4155 struct pf_state_key_cmp key;
4156 struct tcphdr *th = pd->hdr.tcp;
4158 struct pf_state_peer *src, *dst;
4159 struct pf_state_key *sk;
4161 bzero(&key, sizeof(key));
4163 key.proto = IPPROTO_TCP;
4164 if (direction == PF_IN) { /* wire side, straight */
4165 PF_ACPY(&key.addr[0], pd->src, key.af);
4166 PF_ACPY(&key.addr[1], pd->dst, key.af);
4167 key.port[0] = th->th_sport;
4168 key.port[1] = th->th_dport;
4169 } else { /* stack side, reverse */
4170 PF_ACPY(&key.addr[1], pd->src, key.af);
4171 PF_ACPY(&key.addr[0], pd->dst, key.af);
4172 key.port[1] = th->th_sport;
4173 key.port[0] = th->th_dport;
4176 STATE_LOOKUP(kif, &key, direction, *state, pd);
4178 if (direction == (*state)->direction) {
4179 src = &(*state)->src;
4180 dst = &(*state)->dst;
4182 src = &(*state)->dst;
4183 dst = &(*state)->src;
4186 sk = (*state)->key[pd->didx];
4188 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4189 if (direction != (*state)->direction) {
4190 REASON_SET(reason, PFRES_SYNPROXY);
4191 return (PF_SYNPROXY_DROP);
4193 if (th->th_flags & TH_SYN) {
4194 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4195 REASON_SET(reason, PFRES_SYNPROXY);
4198 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4199 pd->src, th->th_dport, th->th_sport,
4200 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4201 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0, NULL);
4202 REASON_SET(reason, PFRES_SYNPROXY);
4203 return (PF_SYNPROXY_DROP);
4204 } else if (!(th->th_flags & TH_ACK) ||
4205 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4206 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4207 REASON_SET(reason, PFRES_SYNPROXY);
4209 } else if ((*state)->src_node != NULL &&
4210 pf_src_connlimit(state)) {
4211 REASON_SET(reason, PFRES_SRCLIMIT);
4214 (*state)->src.state = PF_TCPS_PROXY_DST;
4216 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4217 if (direction == (*state)->direction) {
4218 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4219 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4220 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4221 REASON_SET(reason, PFRES_SYNPROXY);
4224 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4225 if ((*state)->dst.seqhi == 1)
4226 (*state)->dst.seqhi = htonl(arc4random());
4227 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4228 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4229 sk->port[pd->sidx], sk->port[pd->didx],
4230 (*state)->dst.seqhi, 0, TH_SYN, 0,
4231 (*state)->src.mss, 0, 0, (*state)->tag, NULL);
4232 REASON_SET(reason, PFRES_SYNPROXY);
4233 return (PF_SYNPROXY_DROP);
4234 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4236 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4237 REASON_SET(reason, PFRES_SYNPROXY);
4240 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4241 (*state)->dst.seqlo = ntohl(th->th_seq);
4242 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4243 pd->src, th->th_dport, th->th_sport,
4244 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4245 TH_ACK, (*state)->src.max_win, 0, 0, 0,
4246 (*state)->tag, NULL);
4247 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4248 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4249 sk->port[pd->sidx], sk->port[pd->didx],
4250 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4251 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0, NULL);
4252 (*state)->src.seqdiff = (*state)->dst.seqhi -
4253 (*state)->src.seqlo;
4254 (*state)->dst.seqdiff = (*state)->src.seqhi -
4255 (*state)->dst.seqlo;
4256 (*state)->src.seqhi = (*state)->src.seqlo +
4257 (*state)->dst.max_win;
4258 (*state)->dst.seqhi = (*state)->dst.seqlo +
4259 (*state)->src.max_win;
4260 (*state)->src.wscale = (*state)->dst.wscale = 0;
4261 (*state)->src.state = (*state)->dst.state =
4263 REASON_SET(reason, PFRES_SYNPROXY);
4264 return (PF_SYNPROXY_DROP);
4268 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4269 dst->state >= TCPS_FIN_WAIT_2 &&
4270 src->state >= TCPS_FIN_WAIT_2) {
4271 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4272 printf("pf: state reuse ");
4273 pf_print_state(*state);
4274 pf_print_flags(th->th_flags);
4277 /* XXX make sure it's the same direction ?? */
4278 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4279 pf_unlink_state(*state, PF_ENTER_LOCKED);
4284 if ((*state)->state_flags & PFSTATE_SLOPPY) {
4285 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
4288 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
4289 ©back) == PF_DROP)
4293 /* translate source/destination address, if necessary */
4294 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4295 struct pf_state_key *nk = (*state)->key[pd->didx];
4297 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4298 nk->port[pd->sidx] != th->th_sport)
4299 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum,
4300 &th->th_sum, &nk->addr[pd->sidx],
4301 nk->port[pd->sidx], 0, pd->af);
4303 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4304 nk->port[pd->didx] != th->th_dport)
4305 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum,
4306 &th->th_sum, &nk->addr[pd->didx],
4307 nk->port[pd->didx], 0, pd->af);
4311 /* Copyback sequence modulation or stateful scrub changes if needed */
4313 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4319 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4320 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4322 struct pf_state_peer *src, *dst;
4323 struct pf_state_key_cmp key;
4324 struct udphdr *uh = pd->hdr.udp;
4326 bzero(&key, sizeof(key));
4328 key.proto = IPPROTO_UDP;
4329 if (direction == PF_IN) { /* wire side, straight */
4330 PF_ACPY(&key.addr[0], pd->src, key.af);
4331 PF_ACPY(&key.addr[1], pd->dst, key.af);
4332 key.port[0] = uh->uh_sport;
4333 key.port[1] = uh->uh_dport;
4334 } else { /* stack side, reverse */
4335 PF_ACPY(&key.addr[1], pd->src, key.af);
4336 PF_ACPY(&key.addr[0], pd->dst, key.af);
4337 key.port[1] = uh->uh_sport;
4338 key.port[0] = uh->uh_dport;
4341 STATE_LOOKUP(kif, &key, direction, *state, pd);
4343 if (direction == (*state)->direction) {
4344 src = &(*state)->src;
4345 dst = &(*state)->dst;
4347 src = &(*state)->dst;
4348 dst = &(*state)->src;
4352 if (src->state < PFUDPS_SINGLE)
4353 src->state = PFUDPS_SINGLE;
4354 if (dst->state == PFUDPS_SINGLE)
4355 dst->state = PFUDPS_MULTIPLE;
4357 /* update expire time */
4358 (*state)->expire = time_uptime;
4359 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4360 (*state)->timeout = PFTM_UDP_MULTIPLE;
4362 (*state)->timeout = PFTM_UDP_SINGLE;
4364 /* translate source/destination address, if necessary */
4365 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4366 struct pf_state_key *nk = (*state)->key[pd->didx];
4368 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4369 nk->port[pd->sidx] != uh->uh_sport)
4370 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum,
4371 &uh->uh_sum, &nk->addr[pd->sidx],
4372 nk->port[pd->sidx], 1, pd->af);
4374 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4375 nk->port[pd->didx] != uh->uh_dport)
4376 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum,
4377 &uh->uh_sum, &nk->addr[pd->didx],
4378 nk->port[pd->didx], 1, pd->af);
4379 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4386 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4387 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4389 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4390 u_int16_t icmpid = 0, *icmpsum;
4393 struct pf_state_key_cmp key;
4395 bzero(&key, sizeof(key));
4396 switch (pd->proto) {
4399 icmptype = pd->hdr.icmp->icmp_type;
4400 icmpid = pd->hdr.icmp->icmp_id;
4401 icmpsum = &pd->hdr.icmp->icmp_cksum;
4403 if (icmptype == ICMP_UNREACH ||
4404 icmptype == ICMP_SOURCEQUENCH ||
4405 icmptype == ICMP_REDIRECT ||
4406 icmptype == ICMP_TIMXCEED ||
4407 icmptype == ICMP_PARAMPROB)
4412 case IPPROTO_ICMPV6:
4413 icmptype = pd->hdr.icmp6->icmp6_type;
4414 icmpid = pd->hdr.icmp6->icmp6_id;
4415 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4417 if (icmptype == ICMP6_DST_UNREACH ||
4418 icmptype == ICMP6_PACKET_TOO_BIG ||
4419 icmptype == ICMP6_TIME_EXCEEDED ||
4420 icmptype == ICMP6_PARAM_PROB)
4429 * ICMP query/reply message not related to a TCP/UDP packet.
4430 * Search for an ICMP state.
4433 key.proto = pd->proto;
4434 key.port[0] = key.port[1] = icmpid;
4435 if (direction == PF_IN) { /* wire side, straight */
4436 PF_ACPY(&key.addr[0], pd->src, key.af);
4437 PF_ACPY(&key.addr[1], pd->dst, key.af);
4438 } else { /* stack side, reverse */
4439 PF_ACPY(&key.addr[1], pd->src, key.af);
4440 PF_ACPY(&key.addr[0], pd->dst, key.af);
4443 STATE_LOOKUP(kif, &key, direction, *state, pd);
4445 (*state)->expire = time_uptime;
4446 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4448 /* translate source/destination address, if necessary */
4449 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4450 struct pf_state_key *nk = (*state)->key[pd->didx];
4455 if (PF_ANEQ(pd->src,
4456 &nk->addr[pd->sidx], AF_INET))
4457 pf_change_a(&saddr->v4.s_addr,
4459 nk->addr[pd->sidx].v4.s_addr, 0);
4461 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
4463 pf_change_a(&daddr->v4.s_addr,
4465 nk->addr[pd->didx].v4.s_addr, 0);
4468 pd->hdr.icmp->icmp_id) {
4469 pd->hdr.icmp->icmp_cksum =
4471 pd->hdr.icmp->icmp_cksum, icmpid,
4472 nk->port[pd->sidx], 0);
4473 pd->hdr.icmp->icmp_id =
4477 m_copyback(m, off, ICMP_MINLEN,
4478 (caddr_t )pd->hdr.icmp);
4483 if (PF_ANEQ(pd->src,
4484 &nk->addr[pd->sidx], AF_INET6))
4486 &pd->hdr.icmp6->icmp6_cksum,
4487 &nk->addr[pd->sidx], 0);
4489 if (PF_ANEQ(pd->dst,
4490 &nk->addr[pd->didx], AF_INET6))
4492 &pd->hdr.icmp6->icmp6_cksum,
4493 &nk->addr[pd->didx], 0);
4495 m_copyback(m, off, sizeof(struct icmp6_hdr),
4496 (caddr_t )pd->hdr.icmp6);
4505 * ICMP error message in response to a TCP/UDP packet.
4506 * Extract the inner TCP/UDP header and search for that state.
4509 struct pf_pdesc pd2;
4510 bzero(&pd2, sizeof pd2);
4515 struct ip6_hdr h2_6;
4522 /* Payload packet is from the opposite direction. */
4523 pd2.sidx = (direction == PF_IN) ? 1 : 0;
4524 pd2.didx = (direction == PF_IN) ? 0 : 1;
4528 /* offset of h2 in mbuf chain */
4529 ipoff2 = off + ICMP_MINLEN;
4531 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
4532 NULL, reason, pd2.af)) {
4533 DPFPRINTF(PF_DEBUG_MISC,
4534 ("pf: ICMP error message too short "
4539 * ICMP error messages don't refer to non-first
4542 if (h2.ip_off & htons(IP_OFFMASK)) {
4543 REASON_SET(reason, PFRES_FRAG);
4547 /* offset of protocol header that follows h2 */
4548 off2 = ipoff2 + (h2.ip_hl << 2);
4550 pd2.proto = h2.ip_p;
4551 pd2.src = (struct pf_addr *)&h2.ip_src;
4552 pd2.dst = (struct pf_addr *)&h2.ip_dst;
4553 pd2.ip_sum = &h2.ip_sum;
4558 ipoff2 = off + sizeof(struct icmp6_hdr);
4560 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
4561 NULL, reason, pd2.af)) {
4562 DPFPRINTF(PF_DEBUG_MISC,
4563 ("pf: ICMP error message too short "
4567 pd2.proto = h2_6.ip6_nxt;
4568 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
4569 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
4571 off2 = ipoff2 + sizeof(h2_6);
4573 switch (pd2.proto) {
4574 case IPPROTO_FRAGMENT:
4576 * ICMPv6 error messages for
4577 * non-first fragments
4579 REASON_SET(reason, PFRES_FRAG);
4582 case IPPROTO_HOPOPTS:
4583 case IPPROTO_ROUTING:
4584 case IPPROTO_DSTOPTS: {
4585 /* get next header and header length */
4586 struct ip6_ext opt6;
4588 if (!pf_pull_hdr(m, off2, &opt6,
4589 sizeof(opt6), NULL, reason,
4591 DPFPRINTF(PF_DEBUG_MISC,
4592 ("pf: ICMPv6 short opt\n"));
4595 if (pd2.proto == IPPROTO_AH)
4596 off2 += (opt6.ip6e_len + 2) * 4;
4598 off2 += (opt6.ip6e_len + 1) * 8;
4599 pd2.proto = opt6.ip6e_nxt;
4600 /* goto the next header */
4607 } while (!terminal);
4612 switch (pd2.proto) {
4616 struct pf_state_peer *src, *dst;
4621 * Only the first 8 bytes of the TCP header can be
4622 * expected. Don't access any TCP header fields after
4623 * th_seq, an ackskew test is not possible.
4625 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
4627 DPFPRINTF(PF_DEBUG_MISC,
4628 ("pf: ICMP error message too short "
4634 key.proto = IPPROTO_TCP;
4635 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4636 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4637 key.port[pd2.sidx] = th.th_sport;
4638 key.port[pd2.didx] = th.th_dport;
4640 STATE_LOOKUP(kif, &key, direction, *state, pd);
4642 if (direction == (*state)->direction) {
4643 src = &(*state)->dst;
4644 dst = &(*state)->src;
4646 src = &(*state)->src;
4647 dst = &(*state)->dst;
4650 if (src->wscale && dst->wscale)
4651 dws = dst->wscale & PF_WSCALE_MASK;
4655 /* Demodulate sequence number */
4656 seq = ntohl(th.th_seq) - src->seqdiff;
4658 pf_change_a(&th.th_seq, icmpsum,
4663 if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
4664 (!SEQ_GEQ(src->seqhi, seq) ||
4665 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
4666 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4667 printf("pf: BAD ICMP %d:%d ",
4668 icmptype, pd->hdr.icmp->icmp_code);
4669 pf_print_host(pd->src, 0, pd->af);
4671 pf_print_host(pd->dst, 0, pd->af);
4673 pf_print_state(*state);
4674 printf(" seq=%u\n", seq);
4676 REASON_SET(reason, PFRES_BADSTATE);
4679 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4680 printf("pf: OK ICMP %d:%d ",
4681 icmptype, pd->hdr.icmp->icmp_code);
4682 pf_print_host(pd->src, 0, pd->af);
4684 pf_print_host(pd->dst, 0, pd->af);
4686 pf_print_state(*state);
4687 printf(" seq=%u\n", seq);
4691 /* translate source/destination address, if necessary */
4692 if ((*state)->key[PF_SK_WIRE] !=
4693 (*state)->key[PF_SK_STACK]) {
4694 struct pf_state_key *nk =
4695 (*state)->key[pd->didx];
4697 if (PF_ANEQ(pd2.src,
4698 &nk->addr[pd2.sidx], pd2.af) ||
4699 nk->port[pd2.sidx] != th.th_sport)
4700 pf_change_icmp(pd2.src, &th.th_sport,
4701 daddr, &nk->addr[pd2.sidx],
4702 nk->port[pd2.sidx], NULL,
4703 pd2.ip_sum, icmpsum,
4704 pd->ip_sum, 0, pd2.af);
4706 if (PF_ANEQ(pd2.dst,
4707 &nk->addr[pd2.didx], pd2.af) ||
4708 nk->port[pd2.didx] != th.th_dport)
4709 pf_change_icmp(pd2.dst, &th.th_dport,
4710 NULL, /* XXX Inbound NAT? */
4711 &nk->addr[pd2.didx],
4712 nk->port[pd2.didx], NULL,
4713 pd2.ip_sum, icmpsum,
4714 pd->ip_sum, 0, pd2.af);
4722 m_copyback(m, off, ICMP_MINLEN,
4723 (caddr_t )pd->hdr.icmp);
4724 m_copyback(m, ipoff2, sizeof(h2),
4731 sizeof(struct icmp6_hdr),
4732 (caddr_t )pd->hdr.icmp6);
4733 m_copyback(m, ipoff2, sizeof(h2_6),
4738 m_copyback(m, off2, 8, (caddr_t)&th);
4747 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
4748 NULL, reason, pd2.af)) {
4749 DPFPRINTF(PF_DEBUG_MISC,
4750 ("pf: ICMP error message too short "
4756 key.proto = IPPROTO_UDP;
4757 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4758 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4759 key.port[pd2.sidx] = uh.uh_sport;
4760 key.port[pd2.didx] = uh.uh_dport;
4762 STATE_LOOKUP(kif, &key, direction, *state, pd);
4764 /* translate source/destination address, if necessary */
4765 if ((*state)->key[PF_SK_WIRE] !=
4766 (*state)->key[PF_SK_STACK]) {
4767 struct pf_state_key *nk =
4768 (*state)->key[pd->didx];
4770 if (PF_ANEQ(pd2.src,
4771 &nk->addr[pd2.sidx], pd2.af) ||
4772 nk->port[pd2.sidx] != uh.uh_sport)
4773 pf_change_icmp(pd2.src, &uh.uh_sport,
4774 daddr, &nk->addr[pd2.sidx],
4775 nk->port[pd2.sidx], &uh.uh_sum,
4776 pd2.ip_sum, icmpsum,
4777 pd->ip_sum, 1, pd2.af);
4779 if (PF_ANEQ(pd2.dst,
4780 &nk->addr[pd2.didx], pd2.af) ||
4781 nk->port[pd2.didx] != uh.uh_dport)
4782 pf_change_icmp(pd2.dst, &uh.uh_dport,
4783 NULL, /* XXX Inbound NAT? */
4784 &nk->addr[pd2.didx],
4785 nk->port[pd2.didx], &uh.uh_sum,
4786 pd2.ip_sum, icmpsum,
4787 pd->ip_sum, 1, pd2.af);
4792 m_copyback(m, off, ICMP_MINLEN,
4793 (caddr_t )pd->hdr.icmp);
4794 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4800 sizeof(struct icmp6_hdr),
4801 (caddr_t )pd->hdr.icmp6);
4802 m_copyback(m, ipoff2, sizeof(h2_6),
4807 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
4813 case IPPROTO_ICMP: {
4816 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
4817 NULL, reason, pd2.af)) {
4818 DPFPRINTF(PF_DEBUG_MISC,
4819 ("pf: ICMP error message too short i"
4825 key.proto = IPPROTO_ICMP;
4826 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4827 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4828 key.port[0] = key.port[1] = iih.icmp_id;
4830 STATE_LOOKUP(kif, &key, direction, *state, pd);
4832 /* translate source/destination address, if necessary */
4833 if ((*state)->key[PF_SK_WIRE] !=
4834 (*state)->key[PF_SK_STACK]) {
4835 struct pf_state_key *nk =
4836 (*state)->key[pd->didx];
4838 if (PF_ANEQ(pd2.src,
4839 &nk->addr[pd2.sidx], pd2.af) ||
4840 nk->port[pd2.sidx] != iih.icmp_id)
4841 pf_change_icmp(pd2.src, &iih.icmp_id,
4842 daddr, &nk->addr[pd2.sidx],
4843 nk->port[pd2.sidx], NULL,
4844 pd2.ip_sum, icmpsum,
4845 pd->ip_sum, 0, AF_INET);
4847 if (PF_ANEQ(pd2.dst,
4848 &nk->addr[pd2.didx], pd2.af) ||
4849 nk->port[pd2.didx] != iih.icmp_id)
4850 pf_change_icmp(pd2.dst, &iih.icmp_id,
4851 NULL, /* XXX Inbound NAT? */
4852 &nk->addr[pd2.didx],
4853 nk->port[pd2.didx], NULL,
4854 pd2.ip_sum, icmpsum,
4855 pd->ip_sum, 0, AF_INET);
4857 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
4858 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4859 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
4866 case IPPROTO_ICMPV6: {
4867 struct icmp6_hdr iih;
4869 if (!pf_pull_hdr(m, off2, &iih,
4870 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
4871 DPFPRINTF(PF_DEBUG_MISC,
4872 ("pf: ICMP error message too short "
4878 key.proto = IPPROTO_ICMPV6;
4879 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4880 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4881 key.port[0] = key.port[1] = iih.icmp6_id;
4883 STATE_LOOKUP(kif, &key, direction, *state, pd);
4885 /* translate source/destination address, if necessary */
4886 if ((*state)->key[PF_SK_WIRE] !=
4887 (*state)->key[PF_SK_STACK]) {
4888 struct pf_state_key *nk =
4889 (*state)->key[pd->didx];
4891 if (PF_ANEQ(pd2.src,
4892 &nk->addr[pd2.sidx], pd2.af) ||
4893 nk->port[pd2.sidx] != iih.icmp6_id)
4894 pf_change_icmp(pd2.src, &iih.icmp6_id,
4895 daddr, &nk->addr[pd2.sidx],
4896 nk->port[pd2.sidx], NULL,
4897 pd2.ip_sum, icmpsum,
4898 pd->ip_sum, 0, AF_INET6);
4900 if (PF_ANEQ(pd2.dst,
4901 &nk->addr[pd2.didx], pd2.af) ||
4902 nk->port[pd2.didx] != iih.icmp6_id)
4903 pf_change_icmp(pd2.dst, &iih.icmp6_id,
4904 NULL, /* XXX Inbound NAT? */
4905 &nk->addr[pd2.didx],
4906 nk->port[pd2.didx], NULL,
4907 pd2.ip_sum, icmpsum,
4908 pd->ip_sum, 0, AF_INET6);
4910 m_copyback(m, off, sizeof(struct icmp6_hdr),
4911 (caddr_t)pd->hdr.icmp6);
4912 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
4913 m_copyback(m, off2, sizeof(struct icmp6_hdr),
4922 key.proto = pd2.proto;
4923 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4924 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4925 key.port[0] = key.port[1] = 0;
4927 STATE_LOOKUP(kif, &key, direction, *state, pd);
4929 /* translate source/destination address, if necessary */
4930 if ((*state)->key[PF_SK_WIRE] !=
4931 (*state)->key[PF_SK_STACK]) {
4932 struct pf_state_key *nk =
4933 (*state)->key[pd->didx];
4935 if (PF_ANEQ(pd2.src,
4936 &nk->addr[pd2.sidx], pd2.af))
4937 pf_change_icmp(pd2.src, NULL, daddr,
4938 &nk->addr[pd2.sidx], 0, NULL,
4939 pd2.ip_sum, icmpsum,
4940 pd->ip_sum, 0, pd2.af);
4942 if (PF_ANEQ(pd2.dst,
4943 &nk->addr[pd2.didx], pd2.af))
4944 pf_change_icmp(pd2.src, NULL,
4945 NULL, /* XXX Inbound NAT? */
4946 &nk->addr[pd2.didx], 0, NULL,
4947 pd2.ip_sum, icmpsum,
4948 pd->ip_sum, 0, pd2.af);
4953 m_copyback(m, off, ICMP_MINLEN,
4954 (caddr_t)pd->hdr.icmp);
4955 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4961 sizeof(struct icmp6_hdr),
4962 (caddr_t )pd->hdr.icmp6);
4963 m_copyback(m, ipoff2, sizeof(h2_6),
4977 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
4978 struct mbuf *m, struct pf_pdesc *pd)
4980 struct pf_state_peer *src, *dst;
4981 struct pf_state_key_cmp key;
4983 bzero(&key, sizeof(key));
4985 key.proto = pd->proto;
4986 if (direction == PF_IN) {
4987 PF_ACPY(&key.addr[0], pd->src, key.af);
4988 PF_ACPY(&key.addr[1], pd->dst, key.af);
4989 key.port[0] = key.port[1] = 0;
4991 PF_ACPY(&key.addr[1], pd->src, key.af);
4992 PF_ACPY(&key.addr[0], pd->dst, key.af);
4993 key.port[1] = key.port[0] = 0;
4996 STATE_LOOKUP(kif, &key, direction, *state, pd);
4998 if (direction == (*state)->direction) {
4999 src = &(*state)->src;
5000 dst = &(*state)->dst;
5002 src = &(*state)->dst;
5003 dst = &(*state)->src;
5007 if (src->state < PFOTHERS_SINGLE)
5008 src->state = PFOTHERS_SINGLE;
5009 if (dst->state == PFOTHERS_SINGLE)
5010 dst->state = PFOTHERS_MULTIPLE;
5012 /* update expire time */
5013 (*state)->expire = time_uptime;
5014 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5015 (*state)->timeout = PFTM_OTHER_MULTIPLE;
5017 (*state)->timeout = PFTM_OTHER_SINGLE;
5019 /* translate source/destination address, if necessary */
5020 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5021 struct pf_state_key *nk = (*state)->key[pd->didx];
5023 KASSERT(nk, ("%s: nk is null", __func__));
5024 KASSERT(pd, ("%s: pd is null", __func__));
5025 KASSERT(pd->src, ("%s: pd->src is null", __func__));
5026 KASSERT(pd->dst, ("%s: pd->dst is null", __func__));
5030 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5031 pf_change_a(&pd->src->v4.s_addr,
5033 nk->addr[pd->sidx].v4.s_addr,
5037 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5038 pf_change_a(&pd->dst->v4.s_addr,
5040 nk->addr[pd->didx].v4.s_addr,
5047 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5048 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5050 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5051 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5059 * ipoff and off are measured from the start of the mbuf chain.
5060 * h must be at "ipoff" on the mbuf chain.
5063 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5064 u_short *actionp, u_short *reasonp, sa_family_t af)
5069 struct ip *h = mtod(m, struct ip *);
5070 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
5074 ACTION_SET(actionp, PF_PASS);
5076 ACTION_SET(actionp, PF_DROP);
5077 REASON_SET(reasonp, PFRES_FRAG);
5081 if (m->m_pkthdr.len < off + len ||
5082 ntohs(h->ip_len) < off + len) {
5083 ACTION_SET(actionp, PF_DROP);
5084 REASON_SET(reasonp, PFRES_SHORT);
5092 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
5094 if (m->m_pkthdr.len < off + len ||
5095 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5096 (unsigned)(off + len)) {
5097 ACTION_SET(actionp, PF_DROP);
5098 REASON_SET(reasonp, PFRES_SHORT);
5105 m_copydata(m, off, len, p);
5110 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5114 struct radix_node_head *rnh;
5116 struct sockaddr_in *dst;
5120 struct sockaddr_in6 *dst6;
5121 struct route_in6 ro;
5125 struct radix_node *rn;
5131 /* XXX: stick to table 0 for now */
5132 rnh = rt_tables_get_rnh(0, af);
5133 if (rnh != NULL && rn_mpath_capable(rnh))
5136 bzero(&ro, sizeof(ro));
5139 dst = satosin(&ro.ro_dst);
5140 dst->sin_family = AF_INET;
5141 dst->sin_len = sizeof(*dst);
5142 dst->sin_addr = addr->v4;
5147 * Skip check for addresses with embedded interface scope,
5148 * as they would always match anyway.
5150 if (IN6_IS_SCOPE_EMBED(&addr->v6))
5152 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5153 dst6->sin6_family = AF_INET6;
5154 dst6->sin6_len = sizeof(*dst6);
5155 dst6->sin6_addr = addr->v6;
5162 /* Skip checks for ipsec interfaces */
5163 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5169 in6_rtalloc_ign(&ro, 0, rtableid);
5174 in_rtalloc_ign((struct route *)&ro, 0, rtableid);
5178 rtalloc_ign((struct route *)&ro, 0); /* No/default FIB. */
5182 if (ro.ro_rt != NULL) {
5183 /* No interface given, this is a no-route check */
5187 if (kif->pfik_ifp == NULL) {
5192 /* Perform uRPF check if passed input interface */
5194 rn = (struct radix_node *)ro.ro_rt;
5196 rt = (struct rtentry *)rn;
5199 if (kif->pfik_ifp == ifp)
5202 rn = rn_mpath_next(rn);
5204 } while (check_mpath == 1 && rn != NULL && ret == 0);
5208 if (ro.ro_rt != NULL)
5215 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5216 struct pf_state *s, struct pf_pdesc *pd)
5218 struct mbuf *m0, *m1;
5219 struct sockaddr_in dst;
5221 struct ifnet *ifp = NULL;
5222 struct pf_addr naddr;
5223 struct pf_src_node *sn = NULL;
5225 uint16_t ip_len, ip_off;
5227 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5228 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5231 if ((pd->pf_mtag == NULL &&
5232 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5233 pd->pf_mtag->routed++ > 3) {
5239 if (r->rt == PF_DUPTO) {
5240 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5246 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5254 ip = mtod(m0, struct ip *);
5256 bzero(&dst, sizeof(dst));
5257 dst.sin_family = AF_INET;
5258 dst.sin_len = sizeof(dst);
5259 dst.sin_addr = ip->ip_dst;
5261 if (r->rt == PF_FASTROUTE) {
5266 rt = rtalloc1_fib(sintosa(&dst), 0, 0, M_GETFIB(m0));
5268 KMOD_IPSTAT_INC(ips_noroute);
5269 error = EHOSTUNREACH;
5274 rt->rt_rmx.rmx_pksent++;
5276 if (rt->rt_flags & RTF_GATEWAY)
5277 bcopy(satosin(rt->rt_gateway), &dst, sizeof(dst));
5280 if (TAILQ_EMPTY(&r->rpool.list)) {
5281 DPFPRINTF(PF_DEBUG_URGENT,
5282 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5286 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5288 if (!PF_AZERO(&naddr, AF_INET))
5289 dst.sin_addr.s_addr = naddr.v4.s_addr;
5290 ifp = r->rpool.cur->kif ?
5291 r->rpool.cur->kif->pfik_ifp : NULL;
5293 if (!PF_AZERO(&s->rt_addr, AF_INET))
5294 dst.sin_addr.s_addr =
5295 s->rt_addr.v4.s_addr;
5296 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5304 if (pf_test(PF_OUT, ifp, &m0, NULL) != PF_PASS)
5306 else if (m0 == NULL)
5308 if (m0->m_len < sizeof(struct ip)) {
5309 DPFPRINTF(PF_DEBUG_URGENT,
5310 ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
5313 ip = mtod(m0, struct ip *);
5316 if (ifp->if_flags & IFF_LOOPBACK)
5317 m0->m_flags |= M_SKIP_FIREWALL;
5319 ip_len = ntohs(ip->ip_len);
5320 ip_off = ntohs(ip->ip_off);
5322 /* Copied from FreeBSD 10.0-CURRENT ip_output. */
5323 m0->m_pkthdr.csum_flags |= CSUM_IP;
5324 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
5325 in_delayed_cksum(m0);
5326 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
5329 if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
5330 sctp_delayed_cksum(m, (uint32_t)(ip->ip_hl << 2));
5331 m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
5336 * If small enough for interface, or the interface will take
5337 * care of the fragmentation for us, we can just send directly.
5339 if (ip_len <= ifp->if_mtu ||
5340 (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0 ||
5341 ((ip_off & IP_DF) == 0 && (ifp->if_hwassist & CSUM_FRAGMENT))) {
5343 if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
5344 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
5345 m0->m_pkthdr.csum_flags &= ~CSUM_IP;
5347 m_clrprotoflags(m0); /* Avoid confusing lower layers. */
5348 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5352 /* Balk when DF bit is set or the interface didn't support TSO. */
5353 if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5355 KMOD_IPSTAT_INC(ips_cantfrag);
5356 if (r->rt != PF_DUPTO) {
5357 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
5364 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
5368 for (; m0; m0 = m1) {
5370 m0->m_nextpkt = NULL;
5372 m_clrprotoflags(m0);
5373 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5379 KMOD_IPSTAT_INC(ips_fragmented);
5382 if (r->rt != PF_DUPTO)
5397 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5398 struct pf_state *s, struct pf_pdesc *pd)
5401 struct sockaddr_in6 dst;
5402 struct ip6_hdr *ip6;
5403 struct ifnet *ifp = NULL;
5404 struct pf_addr naddr;
5405 struct pf_src_node *sn = NULL;
5407 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5408 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5411 if ((pd->pf_mtag == NULL &&
5412 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5413 pd->pf_mtag->routed++ > 3) {
5419 if (r->rt == PF_DUPTO) {
5420 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5426 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5434 ip6 = mtod(m0, struct ip6_hdr *);
5436 bzero(&dst, sizeof(dst));
5437 dst.sin6_family = AF_INET6;
5438 dst.sin6_len = sizeof(dst);
5439 dst.sin6_addr = ip6->ip6_dst;
5441 /* Cheat. XXX why only in the v6 case??? */
5442 if (r->rt == PF_FASTROUTE) {
5445 m0->m_flags |= M_SKIP_FIREWALL;
5446 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
5450 if (TAILQ_EMPTY(&r->rpool.list)) {
5451 DPFPRINTF(PF_DEBUG_URGENT,
5452 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5456 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
5458 if (!PF_AZERO(&naddr, AF_INET6))
5459 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5461 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
5463 if (!PF_AZERO(&s->rt_addr, AF_INET6))
5464 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5465 &s->rt_addr, AF_INET6);
5466 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5476 if (pf_test6(PF_OUT, ifp, &m0, NULL) != PF_PASS)
5478 else if (m0 == NULL)
5480 if (m0->m_len < sizeof(struct ip6_hdr)) {
5481 DPFPRINTF(PF_DEBUG_URGENT,
5482 ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
5486 ip6 = mtod(m0, struct ip6_hdr *);
5489 if (ifp->if_flags & IFF_LOOPBACK)
5490 m0->m_flags |= M_SKIP_FIREWALL;
5493 * If the packet is too large for the outgoing interface,
5494 * send back an icmp6 error.
5496 if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
5497 dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
5498 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu)
5499 nd6_output(ifp, ifp, m0, &dst, NULL);
5501 in6_ifstat_inc(ifp, ifs6_in_toobig);
5502 if (r->rt != PF_DUPTO)
5503 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
5509 if (r->rt != PF_DUPTO)
5523 * FreeBSD supports cksum offloads for the following drivers.
5524 * em(4), fxp(4), ixgb(4), lge(4), ndis(4), nge(4), re(4),
5525 * ti(4), txp(4), xl(4)
5527 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
5528 * network driver performed cksum including pseudo header, need to verify
5531 * network driver performed cksum, needs to additional pseudo header
5532 * cksum computation with partial csum_data(i.e. lack of H/W support for
5533 * pseudo header, for instance hme(4), sk(4) and possibly gem(4))
5535 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
5536 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
5538 * Also, set csum_data to 0xffff to force cksum validation.
5541 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
5547 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
5549 if (m->m_pkthdr.len < off + len)
5554 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5555 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5556 sum = m->m_pkthdr.csum_data;
5558 ip = mtod(m, struct ip *);
5559 sum = in_pseudo(ip->ip_src.s_addr,
5560 ip->ip_dst.s_addr, htonl((u_short)len +
5561 m->m_pkthdr.csum_data + IPPROTO_TCP));
5568 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5569 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5570 sum = m->m_pkthdr.csum_data;
5572 ip = mtod(m, struct ip *);
5573 sum = in_pseudo(ip->ip_src.s_addr,
5574 ip->ip_dst.s_addr, htonl((u_short)len +
5575 m->m_pkthdr.csum_data + IPPROTO_UDP));
5583 case IPPROTO_ICMPV6:
5593 if (p == IPPROTO_ICMP) {
5598 sum = in_cksum(m, len);
5602 if (m->m_len < sizeof(struct ip))
5604 sum = in4_cksum(m, p, off, len);
5609 if (m->m_len < sizeof(struct ip6_hdr))
5611 sum = in6_cksum(m, p, off, len);
5622 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
5627 KMOD_UDPSTAT_INC(udps_badsum);
5633 KMOD_ICMPSTAT_INC(icps_checksum);
5638 case IPPROTO_ICMPV6:
5640 KMOD_ICMP6STAT_INC(icp6s_checksum);
5647 if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
5648 m->m_pkthdr.csum_flags |=
5649 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
5650 m->m_pkthdr.csum_data = 0xffff;
5659 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
5661 struct pfi_kif *kif;
5662 u_short action, reason = 0, log = 0;
5663 struct mbuf *m = *m0;
5664 struct ip *h = NULL;
5665 struct m_tag *ipfwtag;
5666 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
5667 struct pf_state *s = NULL;
5668 struct pf_ruleset *ruleset = NULL;
5670 int off, dirndx, pqid = 0;
5674 if (!V_pf_status.running)
5677 memset(&pd, 0, sizeof(pd));
5679 kif = (struct pfi_kif *)ifp->if_pf_kif;
5682 DPFPRINTF(PF_DEBUG_URGENT,
5683 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
5686 if (kif->pfik_flags & PFI_IFLAG_SKIP)
5689 if (m->m_flags & M_SKIP_FIREWALL)
5692 pd.pf_mtag = pf_find_mtag(m);
5696 if (ip_divert_ptr != NULL &&
5697 ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
5698 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
5699 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
5700 if (pd.pf_mtag == NULL &&
5701 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5705 pd.pf_mtag->flags |= PF_PACKET_LOOPED;
5706 m_tag_delete(m, ipfwtag);
5708 if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
5709 m->m_flags |= M_FASTFWD_OURS;
5710 pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
5712 } else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
5713 /* We do IP header normalization and packet reassembly here */
5717 m = *m0; /* pf_normalize messes with m0 */
5718 h = mtod(m, struct ip *);
5720 off = h->ip_hl << 2;
5721 if (off < (int)sizeof(struct ip)) {
5723 REASON_SET(&reason, PFRES_SHORT);
5728 pd.src = (struct pf_addr *)&h->ip_src;
5729 pd.dst = (struct pf_addr *)&h->ip_dst;
5730 pd.sport = pd.dport = NULL;
5731 pd.ip_sum = &h->ip_sum;
5732 pd.proto_sum = NULL;
5735 pd.sidx = (dir == PF_IN) ? 0 : 1;
5736 pd.didx = (dir == PF_IN) ? 1 : 0;
5739 pd.tot_len = ntohs(h->ip_len);
5741 /* handle fragments that didn't get reassembled by normalization */
5742 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
5743 action = pf_test_fragment(&r, dir, kif, m, h,
5754 if (!pf_pull_hdr(m, off, &th, sizeof(th),
5755 &action, &reason, AF_INET)) {
5756 log = action != PF_PASS;
5759 pd.p_len = pd.tot_len - off - (th.th_off << 2);
5760 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
5762 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
5763 if (action == PF_DROP)
5765 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
5767 if (action == PF_PASS) {
5768 if (pfsync_update_state_ptr != NULL)
5769 pfsync_update_state_ptr(s);
5773 } else if (s == NULL)
5774 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5783 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
5784 &action, &reason, AF_INET)) {
5785 log = action != PF_PASS;
5788 if (uh.uh_dport == 0 ||
5789 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
5790 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
5792 REASON_SET(&reason, PFRES_SHORT);
5795 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
5796 if (action == PF_PASS) {
5797 if (pfsync_update_state_ptr != NULL)
5798 pfsync_update_state_ptr(s);
5802 } else if (s == NULL)
5803 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5808 case IPPROTO_ICMP: {
5812 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
5813 &action, &reason, AF_INET)) {
5814 log = action != PF_PASS;
5817 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
5819 if (action == PF_PASS) {
5820 if (pfsync_update_state_ptr != NULL)
5821 pfsync_update_state_ptr(s);
5825 } else if (s == NULL)
5826 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5832 case IPPROTO_ICMPV6: {
5834 DPFPRINTF(PF_DEBUG_MISC,
5835 ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
5841 action = pf_test_state_other(&s, dir, kif, m, &pd);
5842 if (action == PF_PASS) {
5843 if (pfsync_update_state_ptr != NULL)
5844 pfsync_update_state_ptr(s);
5848 } else if (s == NULL)
5849 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5856 if (action == PF_PASS && h->ip_hl > 5 &&
5857 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
5859 REASON_SET(&reason, PFRES_IPOPTIONS);
5861 DPFPRINTF(PF_DEBUG_MISC,
5862 ("pf: dropping packet with ip options\n"));
5865 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
5867 REASON_SET(&reason, PFRES_MEMORY);
5869 if (r->rtableid >= 0)
5870 M_SETFIB(m, r->rtableid);
5873 if (action == PF_PASS && r->qid) {
5874 if (pd.pf_mtag == NULL &&
5875 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5877 REASON_SET(&reason, PFRES_MEMORY);
5879 if (pqid || (pd.tos & IPTOS_LOWDELAY))
5880 pd.pf_mtag->qid = r->pqid;
5882 pd.pf_mtag->qid = r->qid;
5883 /* add hints for ecn */
5884 pd.pf_mtag->hdr = h;
5890 * connections redirected to loopback should not match sockets
5891 * bound specifically to loopback due to security implications,
5892 * see tcp_input() and in_pcblookup_listen().
5894 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
5895 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
5896 (s->nat_rule.ptr->action == PF_RDR ||
5897 s->nat_rule.ptr->action == PF_BINAT) &&
5898 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
5899 m->m_flags |= M_SKIP_FIREWALL;
5901 if (action == PF_PASS && r->divert.port && ip_divert_ptr != NULL &&
5902 !PACKET_LOOPED(&pd)) {
5904 ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
5905 sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
5906 if (ipfwtag != NULL) {
5907 ((struct ipfw_rule_ref *)(ipfwtag+1))->info =
5908 ntohs(r->divert.port);
5909 ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
5914 m_tag_prepend(m, ipfwtag);
5915 if (m->m_flags & M_FASTFWD_OURS) {
5916 if (pd.pf_mtag == NULL &&
5917 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5919 REASON_SET(&reason, PFRES_MEMORY);
5921 DPFPRINTF(PF_DEBUG_MISC,
5922 ("pf: failed to allocate tag\n"));
5924 pd.pf_mtag->flags |= PF_FASTFWD_OURS_PRESENT;
5925 m->m_flags &= ~M_FASTFWD_OURS;
5927 ip_divert_ptr(*m0, dir == PF_IN ? DIR_IN : DIR_OUT);
5932 /* XXX: ipfw has the same behaviour! */
5934 REASON_SET(&reason, PFRES_MEMORY);
5936 DPFPRINTF(PF_DEBUG_MISC,
5937 ("pf: failed to allocate divert tag\n"));
5944 if (s != NULL && s->nat_rule.ptr != NULL &&
5945 s->nat_rule.ptr->log & PF_LOG_ALL)
5946 lr = s->nat_rule.ptr;
5949 PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd,
5953 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
5954 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
5956 if (action == PF_PASS || r->action == PF_DROP) {
5957 dirndx = (dir == PF_OUT);
5958 r->packets[dirndx]++;
5959 r->bytes[dirndx] += pd.tot_len;
5961 a->packets[dirndx]++;
5962 a->bytes[dirndx] += pd.tot_len;
5965 if (s->nat_rule.ptr != NULL) {
5966 s->nat_rule.ptr->packets[dirndx]++;
5967 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
5969 if (s->src_node != NULL) {
5970 s->src_node->packets[dirndx]++;
5971 s->src_node->bytes[dirndx] += pd.tot_len;
5973 if (s->nat_src_node != NULL) {
5974 s->nat_src_node->packets[dirndx]++;
5975 s->nat_src_node->bytes[dirndx] += pd.tot_len;
5977 dirndx = (dir == s->direction) ? 0 : 1;
5978 s->packets[dirndx]++;
5979 s->bytes[dirndx] += pd.tot_len;
5982 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
5983 if (nr != NULL && r == &V_pf_default_rule)
5985 if (tr->src.addr.type == PF_ADDR_TABLE)
5986 pfr_update_stats(tr->src.addr.p.tbl,
5987 (s == NULL) ? pd.src :
5988 &s->key[(s->direction == PF_IN)]->
5989 addr[(s->direction == PF_OUT)],
5990 pd.af, pd.tot_len, dir == PF_OUT,
5991 r->action == PF_PASS, tr->src.neg);
5992 if (tr->dst.addr.type == PF_ADDR_TABLE)
5993 pfr_update_stats(tr->dst.addr.p.tbl,
5994 (s == NULL) ? pd.dst :
5995 &s->key[(s->direction == PF_IN)]->
5996 addr[(s->direction == PF_IN)],
5997 pd.af, pd.tot_len, dir == PF_OUT,
5998 r->action == PF_PASS, tr->dst.neg);
6002 case PF_SYNPROXY_DROP:
6009 /* pf_route() returns unlocked. */
6011 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
6025 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
6027 struct pfi_kif *kif;
6028 u_short action, reason = 0, log = 0;
6029 struct mbuf *m = *m0, *n = NULL;
6030 struct ip6_hdr *h = NULL;
6031 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6032 struct pf_state *s = NULL;
6033 struct pf_ruleset *ruleset = NULL;
6035 int off, terminal = 0, dirndx, rh_cnt = 0;
6039 if (!V_pf_status.running)
6042 memset(&pd, 0, sizeof(pd));
6043 pd.pf_mtag = pf_find_mtag(m);
6045 if (pd.pf_mtag && pd.pf_mtag->flags & PF_TAG_GENERATED)
6048 kif = (struct pfi_kif *)ifp->if_pf_kif;
6050 DPFPRINTF(PF_DEBUG_URGENT,
6051 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
6054 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6059 /* We do IP header normalization and packet reassembly here */
6060 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
6064 m = *m0; /* pf_normalize messes with m0 */
6065 h = mtod(m, struct ip6_hdr *);
6069 * we do not support jumbogram yet. if we keep going, zero ip6_plen
6070 * will do something bad, so drop the packet for now.
6072 if (htons(h->ip6_plen) == 0) {
6074 REASON_SET(&reason, PFRES_NORM); /*XXX*/
6079 pd.src = (struct pf_addr *)&h->ip6_src;
6080 pd.dst = (struct pf_addr *)&h->ip6_dst;
6081 pd.sport = pd.dport = NULL;
6083 pd.proto_sum = NULL;
6085 pd.sidx = (dir == PF_IN) ? 0 : 1;
6086 pd.didx = (dir == PF_IN) ? 1 : 0;
6089 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6091 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
6092 pd.proto = h->ip6_nxt;
6095 case IPPROTO_FRAGMENT:
6096 action = pf_test_fragment(&r, dir, kif, m, h,
6098 if (action == PF_DROP)
6099 REASON_SET(&reason, PFRES_FRAG);
6101 case IPPROTO_ROUTING: {
6102 struct ip6_rthdr rthdr;
6105 DPFPRINTF(PF_DEBUG_MISC,
6106 ("pf: IPv6 more than one rthdr\n"));
6108 REASON_SET(&reason, PFRES_IPOPTIONS);
6112 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
6114 DPFPRINTF(PF_DEBUG_MISC,
6115 ("pf: IPv6 short rthdr\n"));
6117 REASON_SET(&reason, PFRES_SHORT);
6121 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
6122 DPFPRINTF(PF_DEBUG_MISC,
6123 ("pf: IPv6 rthdr0\n"));
6125 REASON_SET(&reason, PFRES_IPOPTIONS);
6132 case IPPROTO_HOPOPTS:
6133 case IPPROTO_DSTOPTS: {
6134 /* get next header and header length */
6135 struct ip6_ext opt6;
6137 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
6138 NULL, &reason, pd.af)) {
6139 DPFPRINTF(PF_DEBUG_MISC,
6140 ("pf: IPv6 short opt\n"));
6145 if (pd.proto == IPPROTO_AH)
6146 off += (opt6.ip6e_len + 2) * 4;
6148 off += (opt6.ip6e_len + 1) * 8;
6149 pd.proto = opt6.ip6e_nxt;
6150 /* goto the next header */
6157 } while (!terminal);
6159 /* if there's no routing header, use unmodified mbuf for checksumming */
6169 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6170 &action, &reason, AF_INET6)) {
6171 log = action != PF_PASS;
6174 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6175 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6176 if (action == PF_DROP)
6178 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6180 if (action == PF_PASS) {
6181 if (pfsync_update_state_ptr != NULL)
6182 pfsync_update_state_ptr(s);
6186 } else if (s == NULL)
6187 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6196 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6197 &action, &reason, AF_INET6)) {
6198 log = action != PF_PASS;
6201 if (uh.uh_dport == 0 ||
6202 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6203 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6205 REASON_SET(&reason, PFRES_SHORT);
6208 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6209 if (action == PF_PASS) {
6210 if (pfsync_update_state_ptr != NULL)
6211 pfsync_update_state_ptr(s);
6215 } else if (s == NULL)
6216 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6221 case IPPROTO_ICMP: {
6223 DPFPRINTF(PF_DEBUG_MISC,
6224 ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
6228 case IPPROTO_ICMPV6: {
6229 struct icmp6_hdr ih;
6232 if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
6233 &action, &reason, AF_INET6)) {
6234 log = action != PF_PASS;
6237 action = pf_test_state_icmp(&s, dir, kif,
6238 m, off, h, &pd, &reason);
6239 if (action == PF_PASS) {
6240 if (pfsync_update_state_ptr != NULL)
6241 pfsync_update_state_ptr(s);
6245 } else if (s == NULL)
6246 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6252 action = pf_test_state_other(&s, dir, kif, m, &pd);
6253 if (action == PF_PASS) {
6254 if (pfsync_update_state_ptr != NULL)
6255 pfsync_update_state_ptr(s);
6259 } else if (s == NULL)
6260 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6272 /* handle dangerous IPv6 extension headers. */
6273 if (action == PF_PASS && rh_cnt &&
6274 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6276 REASON_SET(&reason, PFRES_IPOPTIONS);
6278 DPFPRINTF(PF_DEBUG_MISC,
6279 ("pf: dropping packet with dangerous v6 headers\n"));
6282 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6284 REASON_SET(&reason, PFRES_MEMORY);
6286 if (r->rtableid >= 0)
6287 M_SETFIB(m, r->rtableid);
6290 if (action == PF_PASS && r->qid) {
6291 if (pd.pf_mtag == NULL &&
6292 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6294 REASON_SET(&reason, PFRES_MEMORY);
6296 if (pd.tos & IPTOS_LOWDELAY)
6297 pd.pf_mtag->qid = r->pqid;
6299 pd.pf_mtag->qid = r->qid;
6300 /* add hints for ecn */
6301 pd.pf_mtag->hdr = h;
6305 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6306 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6307 (s->nat_rule.ptr->action == PF_RDR ||
6308 s->nat_rule.ptr->action == PF_BINAT) &&
6309 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
6310 m->m_flags |= M_SKIP_FIREWALL;
6312 /* XXX: Anybody working on it?! */
6314 printf("pf: divert(9) is not supported for IPv6\n");
6319 if (s != NULL && s->nat_rule.ptr != NULL &&
6320 s->nat_rule.ptr->log & PF_LOG_ALL)
6321 lr = s->nat_rule.ptr;
6324 PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset,
6328 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6329 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
6331 if (action == PF_PASS || r->action == PF_DROP) {
6332 dirndx = (dir == PF_OUT);
6333 r->packets[dirndx]++;
6334 r->bytes[dirndx] += pd.tot_len;
6336 a->packets[dirndx]++;
6337 a->bytes[dirndx] += pd.tot_len;
6340 if (s->nat_rule.ptr != NULL) {
6341 s->nat_rule.ptr->packets[dirndx]++;
6342 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6344 if (s->src_node != NULL) {
6345 s->src_node->packets[dirndx]++;
6346 s->src_node->bytes[dirndx] += pd.tot_len;
6348 if (s->nat_src_node != NULL) {
6349 s->nat_src_node->packets[dirndx]++;
6350 s->nat_src_node->bytes[dirndx] += pd.tot_len;
6352 dirndx = (dir == s->direction) ? 0 : 1;
6353 s->packets[dirndx]++;
6354 s->bytes[dirndx] += pd.tot_len;
6357 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6358 if (nr != NULL && r == &V_pf_default_rule)
6360 if (tr->src.addr.type == PF_ADDR_TABLE)
6361 pfr_update_stats(tr->src.addr.p.tbl,
6362 (s == NULL) ? pd.src :
6363 &s->key[(s->direction == PF_IN)]->addr[0],
6364 pd.af, pd.tot_len, dir == PF_OUT,
6365 r->action == PF_PASS, tr->src.neg);
6366 if (tr->dst.addr.type == PF_ADDR_TABLE)
6367 pfr_update_stats(tr->dst.addr.p.tbl,
6368 (s == NULL) ? pd.dst :
6369 &s->key[(s->direction == PF_IN)]->addr[1],
6370 pd.af, pd.tot_len, dir == PF_OUT,
6371 r->action == PF_PASS, tr->dst.neg);
6375 case PF_SYNPROXY_DROP:
6382 /* pf_route6() returns unlocked. */
6384 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);