2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002 - 2008 Henning Brauer
6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
37 * $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
45 #include "opt_inet6.h"
49 #include <sys/param.h>
51 #include <sys/endian.h>
52 #include <sys/gsb_crc32.h>
54 #include <sys/interrupt.h>
55 #include <sys/kernel.h>
56 #include <sys/kthread.h>
57 #include <sys/limits.h>
60 #include <sys/random.h>
61 #include <sys/refcount.h>
63 #include <sys/socket.h>
64 #include <sys/sysctl.h>
65 #include <sys/taskqueue.h>
66 #include <sys/ucred.h>
69 #include <net/if_var.h>
70 #include <net/if_types.h>
71 #include <net/if_vlan_var.h>
72 #include <net/route.h>
73 #include <net/route/nhop.h>
77 #include <net/pfvar.h>
78 #include <net/if_pflog.h>
79 #include <net/if_pfsync.h>
81 #include <netinet/in_pcb.h>
82 #include <netinet/in_var.h>
83 #include <netinet/in_fib.h>
84 #include <netinet/ip.h>
85 #include <netinet/ip_fw.h>
86 #include <netinet/ip_icmp.h>
87 #include <netinet/icmp_var.h>
88 #include <netinet/ip_var.h>
89 #include <netinet/tcp.h>
90 #include <netinet/tcp_fsm.h>
91 #include <netinet/tcp_seq.h>
92 #include <netinet/tcp_timer.h>
93 #include <netinet/tcp_var.h>
94 #include <netinet/udp.h>
95 #include <netinet/udp_var.h>
98 #include <netinet/ip6.h>
99 #include <netinet/icmp6.h>
100 #include <netinet6/nd6.h>
101 #include <netinet6/ip6_var.h>
102 #include <netinet6/in6_pcb.h>
103 #include <netinet6/in6_fib.h>
104 #include <netinet6/scope6_var.h>
107 #if defined(SCTP) || defined(SCTP_SUPPORT)
108 #include <netinet/sctp_crc32.h>
111 #include <machine/in_cksum.h>
112 #include <security/mac/mac_framework.h>
114 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
116 SDT_PROVIDER_DEFINE(pf);
117 SDT_PROBE_DEFINE4(pf, ip, test, done, "int", "int", "struct pf_krule *",
118 "struct pf_kstate *");
119 SDT_PROBE_DEFINE4(pf, ip, test6, done, "int", "int", "struct pf_krule *",
120 "struct pf_kstate *");
121 SDT_PROBE_DEFINE5(pf, ip, state, lookup, "struct pfi_kkif *",
122 "struct pf_state_key_cmp *", "int", "struct pf_pdesc *",
123 "struct pf_kstate *");
130 VNET_DEFINE(struct pf_altqqueue, pf_altqs[4]);
131 VNET_DEFINE(struct pf_kpalist, pf_pabuf);
132 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active);
133 VNET_DEFINE(struct pf_altqqueue *, pf_altq_ifs_active);
134 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive);
135 VNET_DEFINE(struct pf_altqqueue *, pf_altq_ifs_inactive);
136 VNET_DEFINE(struct pf_kstatus, pf_status);
138 VNET_DEFINE(u_int32_t, ticket_altqs_active);
139 VNET_DEFINE(u_int32_t, ticket_altqs_inactive);
140 VNET_DEFINE(int, altqs_inactive_open);
141 VNET_DEFINE(u_int32_t, ticket_pabuf);
143 VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx);
144 #define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx)
145 VNET_DEFINE(u_char, pf_tcp_secret[16]);
146 #define V_pf_tcp_secret VNET(pf_tcp_secret)
147 VNET_DEFINE(int, pf_tcp_secret_init);
148 #define V_pf_tcp_secret_init VNET(pf_tcp_secret_init)
149 VNET_DEFINE(int, pf_tcp_iss_off);
150 #define V_pf_tcp_iss_off VNET(pf_tcp_iss_off)
151 VNET_DECLARE(int, pf_vnet_active);
152 #define V_pf_vnet_active VNET(pf_vnet_active)
154 VNET_DEFINE_STATIC(uint32_t, pf_purge_idx);
155 #define V_pf_purge_idx VNET(pf_purge_idx)
157 #ifdef PF_WANT_32_TO_64_COUNTER
158 VNET_DEFINE_STATIC(uint32_t, pf_counter_periodic_iter);
159 #define V_pf_counter_periodic_iter VNET(pf_counter_periodic_iter)
161 VNET_DEFINE(struct allrulelist_head, pf_allrulelist);
162 VNET_DEFINE(size_t, pf_allrulecount);
163 VNET_DEFINE(struct pf_krule *, pf_rulemarker);
167 * Queue for pf_intr() sends.
169 static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
170 struct pf_send_entry {
171 STAILQ_ENTRY(pf_send_entry) pfse_next;
186 STAILQ_HEAD(pf_send_head, pf_send_entry);
187 VNET_DEFINE_STATIC(struct pf_send_head, pf_sendqueue);
188 #define V_pf_sendqueue VNET(pf_sendqueue)
190 static struct mtx_padalign pf_sendqueue_mtx;
191 MTX_SYSINIT(pf_sendqueue_mtx, &pf_sendqueue_mtx, "pf send queue", MTX_DEF);
192 #define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx)
193 #define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx)
196 * Queue for pf_overload_task() tasks.
198 struct pf_overload_entry {
199 SLIST_ENTRY(pf_overload_entry) next;
203 struct pf_krule *rule;
206 SLIST_HEAD(pf_overload_head, pf_overload_entry);
207 VNET_DEFINE_STATIC(struct pf_overload_head, pf_overloadqueue);
208 #define V_pf_overloadqueue VNET(pf_overloadqueue)
209 VNET_DEFINE_STATIC(struct task, pf_overloadtask);
210 #define V_pf_overloadtask VNET(pf_overloadtask)
212 static struct mtx_padalign pf_overloadqueue_mtx;
213 MTX_SYSINIT(pf_overloadqueue_mtx, &pf_overloadqueue_mtx,
214 "pf overload/flush queue", MTX_DEF);
215 #define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx)
216 #define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx)
218 VNET_DEFINE(struct pf_krulequeue, pf_unlinked_rules);
219 struct mtx_padalign pf_unlnkdrules_mtx;
220 MTX_SYSINIT(pf_unlnkdrules_mtx, &pf_unlnkdrules_mtx, "pf unlinked rules",
223 struct mtx_padalign pf_table_stats_lock;
224 MTX_SYSINIT(pf_table_stats_lock, &pf_table_stats_lock, "pf table stats",
227 VNET_DEFINE_STATIC(uma_zone_t, pf_sources_z);
228 #define V_pf_sources_z VNET(pf_sources_z)
229 uma_zone_t pf_mtag_z;
230 VNET_DEFINE(uma_zone_t, pf_state_z);
231 VNET_DEFINE(uma_zone_t, pf_state_key_z);
233 VNET_DEFINE(uint64_t, pf_stateid[MAXCPU]);
234 #define PFID_CPUBITS 8
235 #define PFID_CPUSHIFT (sizeof(uint64_t) * NBBY - PFID_CPUBITS)
236 #define PFID_CPUMASK ((uint64_t)((1 << PFID_CPUBITS) - 1) << PFID_CPUSHIFT)
237 #define PFID_MAXID (~PFID_CPUMASK)
238 CTASSERT((1 << PFID_CPUBITS) >= MAXCPU);
240 static void pf_src_tree_remove_state(struct pf_kstate *);
241 static void pf_init_threshold(struct pf_threshold *, u_int32_t,
243 static void pf_add_threshold(struct pf_threshold *);
244 static int pf_check_threshold(struct pf_threshold *);
246 static void pf_change_ap(struct mbuf *, struct pf_addr *, u_int16_t *,
247 u_int16_t *, u_int16_t *, struct pf_addr *,
248 u_int16_t, u_int8_t, sa_family_t);
249 static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
250 struct tcphdr *, struct pf_state_peer *);
251 static void pf_change_icmp(struct pf_addr *, u_int16_t *,
252 struct pf_addr *, struct pf_addr *, u_int16_t,
253 u_int16_t *, u_int16_t *, u_int16_t *,
254 u_int16_t *, u_int8_t, sa_family_t);
255 static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
256 sa_family_t, struct pf_krule *);
257 static void pf_detach_state(struct pf_kstate *);
258 static int pf_state_key_attach(struct pf_state_key *,
259 struct pf_state_key *, struct pf_kstate *);
260 static void pf_state_key_detach(struct pf_kstate *, int);
261 static int pf_state_key_ctor(void *, int, void *, int);
262 static u_int32_t pf_tcp_iss(struct pf_pdesc *);
263 void pf_rule_to_actions(struct pf_krule *,
264 struct pf_rule_actions *);
265 static int pf_test_rule(struct pf_krule **, struct pf_kstate **,
266 int, struct pfi_kkif *, struct mbuf *, int,
267 struct pf_pdesc *, struct pf_krule **,
268 struct pf_kruleset **, struct inpcb *);
269 static int pf_create_state(struct pf_krule *, struct pf_krule *,
270 struct pf_krule *, struct pf_pdesc *,
271 struct pf_ksrc_node *, struct pf_state_key *,
272 struct pf_state_key *, struct mbuf *, int,
273 u_int16_t, u_int16_t, int *, struct pfi_kkif *,
274 struct pf_kstate **, int, u_int16_t, u_int16_t,
276 static int pf_test_fragment(struct pf_krule **, int,
277 struct pfi_kkif *, struct mbuf *, void *,
278 struct pf_pdesc *, struct pf_krule **,
279 struct pf_kruleset **);
280 static int pf_tcp_track_full(struct pf_kstate **,
281 struct pfi_kkif *, struct mbuf *, int,
282 struct pf_pdesc *, u_short *, int *);
283 static int pf_tcp_track_sloppy(struct pf_kstate **,
284 struct pf_pdesc *, u_short *);
285 static int pf_test_state_tcp(struct pf_kstate **, int,
286 struct pfi_kkif *, struct mbuf *, int,
287 void *, struct pf_pdesc *, u_short *);
288 static int pf_test_state_udp(struct pf_kstate **, int,
289 struct pfi_kkif *, struct mbuf *, int,
290 void *, struct pf_pdesc *);
291 static int pf_test_state_icmp(struct pf_kstate **, int,
292 struct pfi_kkif *, struct mbuf *, int,
293 void *, struct pf_pdesc *, u_short *);
294 static int pf_test_state_other(struct pf_kstate **, int,
295 struct pfi_kkif *, struct mbuf *, struct pf_pdesc *);
296 static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
298 static int pf_check_proto_cksum(struct mbuf *, int, int,
299 u_int8_t, sa_family_t);
300 static void pf_print_state_parts(struct pf_kstate *,
301 struct pf_state_key *, struct pf_state_key *);
302 static int pf_addr_wrap_neq(struct pf_addr_wrap *,
303 struct pf_addr_wrap *);
304 static void pf_patch_8(struct mbuf *, u_int16_t *, u_int8_t *, u_int8_t,
306 static struct pf_kstate *pf_find_state(struct pfi_kkif *,
307 struct pf_state_key_cmp *, u_int);
308 static int pf_src_connlimit(struct pf_kstate **);
309 static void pf_overload_task(void *v, int pending);
310 static int pf_insert_src_node(struct pf_ksrc_node **,
311 struct pf_krule *, struct pf_addr *, sa_family_t);
312 static u_int pf_purge_expired_states(u_int, int);
313 static void pf_purge_unlinked_rules(void);
314 static int pf_mtag_uminit(void *, int, int);
315 static void pf_mtag_free(struct m_tag *);
317 static void pf_route(struct mbuf **, struct pf_krule *, int,
318 struct ifnet *, struct pf_kstate *,
319 struct pf_pdesc *, struct inpcb *);
322 static void pf_change_a6(struct pf_addr *, u_int16_t *,
323 struct pf_addr *, u_int8_t);
324 static void pf_route6(struct mbuf **, struct pf_krule *, int,
325 struct ifnet *, struct pf_kstate *,
326 struct pf_pdesc *, struct inpcb *);
328 static __inline void pf_set_protostate(struct pf_kstate *, int, u_int8_t);
330 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
332 extern int pf_end_threads;
333 extern struct proc *pf_purge_proc;
335 VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
337 #define PACKET_LOOPED(pd) ((pd)->pf_mtag && \
338 (pd)->pf_mtag->flags & PF_PACKET_LOOPED)
340 #define STATE_LOOKUP(i, k, d, s, pd) \
342 (s) = pf_find_state((i), (k), (d)); \
343 SDT_PROBE5(pf, ip, state, lookup, i, k, d, pd, (s)); \
346 if (PACKET_LOOPED(pd)) \
350 #define BOUND_IFACE(r, k) \
351 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
353 #define STATE_INC_COUNTERS(s) \
355 counter_u64_add(s->rule.ptr->states_cur, 1); \
356 counter_u64_add(s->rule.ptr->states_tot, 1); \
357 if (s->anchor.ptr != NULL) { \
358 counter_u64_add(s->anchor.ptr->states_cur, 1); \
359 counter_u64_add(s->anchor.ptr->states_tot, 1); \
361 if (s->nat_rule.ptr != NULL) { \
362 counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
363 counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
367 #define STATE_DEC_COUNTERS(s) \
369 if (s->nat_rule.ptr != NULL) \
370 counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
371 if (s->anchor.ptr != NULL) \
372 counter_u64_add(s->anchor.ptr->states_cur, -1); \
373 counter_u64_add(s->rule.ptr->states_cur, -1); \
376 MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
377 VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
378 VNET_DEFINE(struct pf_idhash *, pf_idhash);
379 VNET_DEFINE(struct pf_srchash *, pf_srchash);
381 SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
385 u_long pf_srchashmask;
386 static u_long pf_hashsize;
387 static u_long pf_srchashsize;
388 u_long pf_ioctl_maxcount = 65535;
390 SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
391 &pf_hashsize, 0, "Size of pf(4) states hashtable");
392 SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
393 &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable");
394 SYSCTL_ULONG(_net_pf, OID_AUTO, request_maxcount, CTLFLAG_RWTUN,
395 &pf_ioctl_maxcount, 0, "Maximum number of tables, addresses, ... in a single ioctl() call");
397 VNET_DEFINE(void *, pf_swi_cookie);
398 VNET_DEFINE(struct intr_event *, pf_swi_ie);
400 VNET_DEFINE(uint32_t, pf_hashseed);
401 #define V_pf_hashseed VNET(pf_hashseed)
404 pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
410 if (a->addr32[0] > b->addr32[0])
412 if (a->addr32[0] < b->addr32[0])
418 if (a->addr32[3] > b->addr32[3])
420 if (a->addr32[3] < b->addr32[3])
422 if (a->addr32[2] > b->addr32[2])
424 if (a->addr32[2] < b->addr32[2])
426 if (a->addr32[1] > b->addr32[1])
428 if (a->addr32[1] < b->addr32[1])
430 if (a->addr32[0] > b->addr32[0])
432 if (a->addr32[0] < b->addr32[0])
437 panic("%s: unknown address family %u", __func__, af);
442 static __inline uint32_t
443 pf_hashkey(struct pf_state_key *sk)
447 h = murmur3_32_hash32((uint32_t *)sk,
448 sizeof(struct pf_state_key_cmp)/sizeof(uint32_t),
451 return (h & pf_hashmask);
454 static __inline uint32_t
455 pf_hashsrc(struct pf_addr *addr, sa_family_t af)
461 h = murmur3_32_hash32((uint32_t *)&addr->v4,
462 sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed);
465 h = murmur3_32_hash32((uint32_t *)&addr->v6,
466 sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed);
469 panic("%s: unknown address family %u", __func__, af);
472 return (h & pf_srchashmask);
477 pf_state_hash(struct pf_kstate *s)
479 u_int32_t hv = (intptr_t)s / sizeof(*s);
481 hv ^= crc32(&s->src, sizeof(s->src));
482 hv ^= crc32(&s->dst, sizeof(s->dst));
490 pf_set_protostate(struct pf_kstate *s, int which, u_int8_t newstate)
492 if (which == PF_PEER_DST || which == PF_PEER_BOTH)
493 s->dst.state = newstate;
494 if (which == PF_PEER_DST)
497 s->src.state = newstate;
502 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
507 dst->addr32[0] = src->addr32[0];
511 dst->addr32[0] = src->addr32[0];
512 dst->addr32[1] = src->addr32[1];
513 dst->addr32[2] = src->addr32[2];
514 dst->addr32[3] = src->addr32[3];
521 pf_init_threshold(struct pf_threshold *threshold,
522 u_int32_t limit, u_int32_t seconds)
524 threshold->limit = limit * PF_THRESHOLD_MULT;
525 threshold->seconds = seconds;
526 threshold->count = 0;
527 threshold->last = time_uptime;
531 pf_add_threshold(struct pf_threshold *threshold)
533 u_int32_t t = time_uptime, diff = t - threshold->last;
535 if (diff >= threshold->seconds)
536 threshold->count = 0;
538 threshold->count -= threshold->count * diff /
540 threshold->count += PF_THRESHOLD_MULT;
545 pf_check_threshold(struct pf_threshold *threshold)
547 return (threshold->count > threshold->limit);
551 pf_src_connlimit(struct pf_kstate **state)
553 struct pf_overload_entry *pfoe;
556 PF_STATE_LOCK_ASSERT(*state);
558 (*state)->src_node->conn++;
559 (*state)->src.tcp_est = 1;
560 pf_add_threshold(&(*state)->src_node->conn_rate);
562 if ((*state)->rule.ptr->max_src_conn &&
563 (*state)->rule.ptr->max_src_conn <
564 (*state)->src_node->conn) {
565 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1);
569 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
570 pf_check_threshold(&(*state)->src_node->conn_rate)) {
571 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1);
578 /* Kill this state. */
579 (*state)->timeout = PFTM_PURGE;
580 pf_set_protostate(*state, PF_PEER_BOTH, TCPS_CLOSED);
582 if ((*state)->rule.ptr->overload_tbl == NULL)
585 /* Schedule overloading and flushing task. */
586 pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
588 return (1); /* too bad :( */
590 bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
591 pfoe->af = (*state)->key[PF_SK_WIRE]->af;
592 pfoe->rule = (*state)->rule.ptr;
593 pfoe->dir = (*state)->direction;
595 SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
596 PF_OVERLOADQ_UNLOCK();
597 taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
603 pf_overload_task(void *v, int pending)
605 struct pf_overload_head queue;
607 struct pf_overload_entry *pfoe, *pfoe1;
610 CURVNET_SET((struct vnet *)v);
613 queue = V_pf_overloadqueue;
614 SLIST_INIT(&V_pf_overloadqueue);
615 PF_OVERLOADQ_UNLOCK();
617 bzero(&p, sizeof(p));
618 SLIST_FOREACH(pfoe, &queue, next) {
619 counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1);
620 if (V_pf_status.debug >= PF_DEBUG_MISC) {
621 printf("%s: blocking address ", __func__);
622 pf_print_host(&pfoe->addr, 0, pfoe->af);
626 p.pfra_af = pfoe->af;
631 p.pfra_ip4addr = pfoe->addr.v4;
637 p.pfra_ip6addr = pfoe->addr.v6;
643 pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
648 * Remove those entries, that don't need flushing.
650 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
651 if (pfoe->rule->flush == 0) {
652 SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
653 free(pfoe, M_PFTEMP);
656 V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1);
658 /* If nothing to flush, return. */
659 if (SLIST_EMPTY(&queue)) {
664 for (int i = 0; i <= pf_hashmask; i++) {
665 struct pf_idhash *ih = &V_pf_idhash[i];
666 struct pf_state_key *sk;
670 LIST_FOREACH(s, &ih->states, entry) {
671 sk = s->key[PF_SK_WIRE];
672 SLIST_FOREACH(pfoe, &queue, next)
673 if (sk->af == pfoe->af &&
674 ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
675 pfoe->rule == s->rule.ptr) &&
676 ((pfoe->dir == PF_OUT &&
677 PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
678 (pfoe->dir == PF_IN &&
679 PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
680 s->timeout = PFTM_PURGE;
681 pf_set_protostate(s, PF_PEER_BOTH, TCPS_CLOSED);
685 PF_HASHROW_UNLOCK(ih);
687 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
688 free(pfoe, M_PFTEMP);
689 if (V_pf_status.debug >= PF_DEBUG_MISC)
690 printf("%s: %u states killed", __func__, killed);
696 * Can return locked on failure, so that we can consistently
697 * allocate and insert a new one.
699 struct pf_ksrc_node *
700 pf_find_src_node(struct pf_addr *src, struct pf_krule *rule, sa_family_t af,
703 struct pf_srchash *sh;
704 struct pf_ksrc_node *n;
706 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1);
708 sh = &V_pf_srchash[pf_hashsrc(src, af)];
710 LIST_FOREACH(n, &sh->nodes, entry)
711 if (n->rule.ptr == rule && n->af == af &&
712 ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
713 (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
717 PF_HASHROW_UNLOCK(sh);
718 } else if (returnlocked == 0)
719 PF_HASHROW_UNLOCK(sh);
725 pf_free_src_node(struct pf_ksrc_node *sn)
728 for (int i = 0; i < 2; i++) {
729 counter_u64_free(sn->bytes[i]);
730 counter_u64_free(sn->packets[i]);
732 uma_zfree(V_pf_sources_z, sn);
736 pf_insert_src_node(struct pf_ksrc_node **sn, struct pf_krule *rule,
737 struct pf_addr *src, sa_family_t af)
740 KASSERT((rule->rule_flag & PFRULE_SRCTRACK ||
741 rule->rpool.opts & PF_POOL_STICKYADDR),
742 ("%s for non-tracking rule %p", __func__, rule));
745 *sn = pf_find_src_node(src, rule, af, 1);
748 struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)];
750 PF_HASHROW_ASSERT(sh);
752 if (!rule->max_src_nodes ||
753 counter_u64_fetch(rule->src_nodes) < rule->max_src_nodes)
754 (*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
756 counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES],
759 PF_HASHROW_UNLOCK(sh);
763 for (int i = 0; i < 2; i++) {
764 (*sn)->bytes[i] = counter_u64_alloc(M_NOWAIT);
765 (*sn)->packets[i] = counter_u64_alloc(M_NOWAIT);
767 if ((*sn)->bytes[i] == NULL || (*sn)->packets[i] == NULL) {
768 pf_free_src_node(*sn);
769 PF_HASHROW_UNLOCK(sh);
774 pf_init_threshold(&(*sn)->conn_rate,
775 rule->max_src_conn_rate.limit,
776 rule->max_src_conn_rate.seconds);
779 (*sn)->rule.ptr = rule;
780 PF_ACPY(&(*sn)->addr, src, af);
781 LIST_INSERT_HEAD(&sh->nodes, *sn, entry);
782 (*sn)->creation = time_uptime;
783 (*sn)->ruletype = rule->action;
785 if ((*sn)->rule.ptr != NULL)
786 counter_u64_add((*sn)->rule.ptr->src_nodes, 1);
787 PF_HASHROW_UNLOCK(sh);
788 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1);
790 if (rule->max_src_states &&
791 (*sn)->states >= rule->max_src_states) {
792 counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES],
801 pf_unlink_src_node(struct pf_ksrc_node *src)
804 PF_HASHROW_ASSERT(&V_pf_srchash[pf_hashsrc(&src->addr, src->af)]);
805 LIST_REMOVE(src, entry);
807 counter_u64_add(src->rule.ptr->src_nodes, -1);
811 pf_free_src_nodes(struct pf_ksrc_node_list *head)
813 struct pf_ksrc_node *sn, *tmp;
816 LIST_FOREACH_SAFE(sn, head, entry, tmp) {
817 pf_free_src_node(sn);
821 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], count);
830 pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
831 sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL,
835 /* Per-vnet data storage structures initialization. */
839 struct pf_keyhash *kh;
840 struct pf_idhash *ih;
841 struct pf_srchash *sh;
844 if (pf_hashsize == 0 || !powerof2(pf_hashsize))
845 pf_hashsize = PF_HASHSIZ;
846 if (pf_srchashsize == 0 || !powerof2(pf_srchashsize))
847 pf_srchashsize = PF_SRCHASHSIZ;
849 V_pf_hashseed = arc4random();
851 /* States and state keys storage. */
852 V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_kstate),
853 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
854 V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
855 uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
856 uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
858 V_pf_state_key_z = uma_zcreate("pf state keys",
859 sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
862 V_pf_keyhash = mallocarray(pf_hashsize, sizeof(struct pf_keyhash),
863 M_PFHASH, M_NOWAIT | M_ZERO);
864 V_pf_idhash = mallocarray(pf_hashsize, sizeof(struct pf_idhash),
865 M_PFHASH, M_NOWAIT | M_ZERO);
866 if (V_pf_keyhash == NULL || V_pf_idhash == NULL) {
867 printf("pf: Unable to allocate memory for "
868 "state_hashsize %lu.\n", pf_hashsize);
870 free(V_pf_keyhash, M_PFHASH);
871 free(V_pf_idhash, M_PFHASH);
873 pf_hashsize = PF_HASHSIZ;
874 V_pf_keyhash = mallocarray(pf_hashsize,
875 sizeof(struct pf_keyhash), M_PFHASH, M_WAITOK | M_ZERO);
876 V_pf_idhash = mallocarray(pf_hashsize,
877 sizeof(struct pf_idhash), M_PFHASH, M_WAITOK | M_ZERO);
880 pf_hashmask = pf_hashsize - 1;
881 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
883 mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
884 mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
888 V_pf_sources_z = uma_zcreate("pf source nodes",
889 sizeof(struct pf_ksrc_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
891 V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
892 uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
893 uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
895 V_pf_srchash = mallocarray(pf_srchashsize,
896 sizeof(struct pf_srchash), M_PFHASH, M_NOWAIT | M_ZERO);
897 if (V_pf_srchash == NULL) {
898 printf("pf: Unable to allocate memory for "
899 "source_hashsize %lu.\n", pf_srchashsize);
901 pf_srchashsize = PF_SRCHASHSIZ;
902 V_pf_srchash = mallocarray(pf_srchashsize,
903 sizeof(struct pf_srchash), M_PFHASH, M_WAITOK | M_ZERO);
906 pf_srchashmask = pf_srchashsize - 1;
907 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++)
908 mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
911 TAILQ_INIT(&V_pf_altqs[0]);
912 TAILQ_INIT(&V_pf_altqs[1]);
913 TAILQ_INIT(&V_pf_altqs[2]);
914 TAILQ_INIT(&V_pf_altqs[3]);
915 TAILQ_INIT(&V_pf_pabuf);
916 V_pf_altqs_active = &V_pf_altqs[0];
917 V_pf_altq_ifs_active = &V_pf_altqs[1];
918 V_pf_altqs_inactive = &V_pf_altqs[2];
919 V_pf_altq_ifs_inactive = &V_pf_altqs[3];
921 /* Send & overload+flush queues. */
922 STAILQ_INIT(&V_pf_sendqueue);
923 SLIST_INIT(&V_pf_overloadqueue);
924 TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
926 /* Unlinked, but may be referenced rules. */
927 TAILQ_INIT(&V_pf_unlinked_rules);
934 uma_zdestroy(pf_mtag_z);
940 struct pf_keyhash *kh;
941 struct pf_idhash *ih;
942 struct pf_srchash *sh;
943 struct pf_send_entry *pfse, *next;
946 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
948 KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
950 KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
952 mtx_destroy(&kh->lock);
953 mtx_destroy(&ih->lock);
955 free(V_pf_keyhash, M_PFHASH);
956 free(V_pf_idhash, M_PFHASH);
958 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
959 KASSERT(LIST_EMPTY(&sh->nodes),
960 ("%s: source node hash not empty", __func__));
961 mtx_destroy(&sh->lock);
963 free(V_pf_srchash, M_PFHASH);
965 STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
966 m_freem(pfse->pfse_m);
967 free(pfse, M_PFTEMP);
970 uma_zdestroy(V_pf_sources_z);
971 uma_zdestroy(V_pf_state_z);
972 uma_zdestroy(V_pf_state_key_z);
976 pf_mtag_uminit(void *mem, int size, int how)
980 t = (struct m_tag *)mem;
981 t->m_tag_cookie = MTAG_ABI_COMPAT;
982 t->m_tag_id = PACKET_TAG_PF;
983 t->m_tag_len = sizeof(struct pf_mtag);
984 t->m_tag_free = pf_mtag_free;
990 pf_mtag_free(struct m_tag *t)
993 uma_zfree(pf_mtag_z, t);
997 pf_get_mtag(struct mbuf *m)
1001 if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
1002 return ((struct pf_mtag *)(mtag + 1));
1004 mtag = uma_zalloc(pf_mtag_z, M_NOWAIT);
1007 bzero(mtag + 1, sizeof(struct pf_mtag));
1008 m_tag_prepend(m, mtag);
1010 return ((struct pf_mtag *)(mtag + 1));
1014 pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
1015 struct pf_kstate *s)
1017 struct pf_keyhash *khs, *khw, *kh;
1018 struct pf_state_key *sk, *cur;
1019 struct pf_kstate *si, *olds = NULL;
1022 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1023 KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
1024 KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
1027 * We need to lock hash slots of both keys. To avoid deadlock
1028 * we always lock the slot with lower address first. Unlock order
1031 * We also need to lock ID hash slot before dropping key
1032 * locks. On success we return with ID hash slot locked.
1036 khs = khw = &V_pf_keyhash[pf_hashkey(skw)];
1037 PF_HASHROW_LOCK(khs);
1039 khs = &V_pf_keyhash[pf_hashkey(sks)];
1040 khw = &V_pf_keyhash[pf_hashkey(skw)];
1042 PF_HASHROW_LOCK(khs);
1043 } else if (khs < khw) {
1044 PF_HASHROW_LOCK(khs);
1045 PF_HASHROW_LOCK(khw);
1047 PF_HASHROW_LOCK(khw);
1048 PF_HASHROW_LOCK(khs);
1052 #define KEYS_UNLOCK() do { \
1054 PF_HASHROW_UNLOCK(khs); \
1055 PF_HASHROW_UNLOCK(khw); \
1057 PF_HASHROW_UNLOCK(khs); \
1061 * First run: start with wire key.
1067 MPASS(s->lock == NULL);
1068 s->lock = &V_pf_idhash[PF_IDHASH(s)].lock;
1071 LIST_FOREACH(cur, &kh->keys, entry)
1072 if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
1076 /* Key exists. Check for same kif, if none, add to key. */
1077 TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
1078 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
1080 PF_HASHROW_LOCK(ih);
1081 if (si->kif == s->kif &&
1082 si->direction == s->direction) {
1083 if (sk->proto == IPPROTO_TCP &&
1084 si->src.state >= TCPS_FIN_WAIT_2 &&
1085 si->dst.state >= TCPS_FIN_WAIT_2) {
1087 * New state matches an old >FIN_WAIT_2
1088 * state. We can't drop key hash locks,
1089 * thus we can't unlink it properly.
1091 * As a workaround we drop it into
1092 * TCPS_CLOSED state, schedule purge
1093 * ASAP and push it into the very end
1094 * of the slot TAILQ, so that it won't
1095 * conflict with our new state.
1097 pf_set_protostate(si, PF_PEER_BOTH,
1099 si->timeout = PFTM_PURGE;
1102 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1103 printf("pf: %s key attach "
1105 (idx == PF_SK_WIRE) ?
1108 pf_print_state_parts(s,
1109 (idx == PF_SK_WIRE) ?
1111 (idx == PF_SK_STACK) ?
1113 printf(", existing: ");
1114 pf_print_state_parts(si,
1115 (idx == PF_SK_WIRE) ?
1117 (idx == PF_SK_STACK) ?
1121 PF_HASHROW_UNLOCK(ih);
1123 uma_zfree(V_pf_state_key_z, sk);
1124 if (idx == PF_SK_STACK)
1126 return (EEXIST); /* collision! */
1129 PF_HASHROW_UNLOCK(ih);
1131 uma_zfree(V_pf_state_key_z, sk);
1134 LIST_INSERT_HEAD(&kh->keys, sk, entry);
1139 /* List is sorted, if-bound states before floating. */
1140 if (s->kif == V_pfi_all)
1141 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
1143 TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
1146 TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]);
1147 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds,
1153 * Attach done. See how should we (or should not?)
1154 * attach a second key.
1157 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1161 } else if (sks != NULL) {
1163 * Continue attaching with stack key.
1175 KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
1176 ("%s failure", __func__));
1183 pf_detach_state(struct pf_kstate *s)
1185 struct pf_state_key *sks = s->key[PF_SK_STACK];
1186 struct pf_keyhash *kh;
1189 kh = &V_pf_keyhash[pf_hashkey(sks)];
1190 PF_HASHROW_LOCK(kh);
1191 if (s->key[PF_SK_STACK] != NULL)
1192 pf_state_key_detach(s, PF_SK_STACK);
1194 * If both point to same key, then we are done.
1196 if (sks == s->key[PF_SK_WIRE]) {
1197 pf_state_key_detach(s, PF_SK_WIRE);
1198 PF_HASHROW_UNLOCK(kh);
1201 PF_HASHROW_UNLOCK(kh);
1204 if (s->key[PF_SK_WIRE] != NULL) {
1205 kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
1206 PF_HASHROW_LOCK(kh);
1207 if (s->key[PF_SK_WIRE] != NULL)
1208 pf_state_key_detach(s, PF_SK_WIRE);
1209 PF_HASHROW_UNLOCK(kh);
1214 pf_state_key_detach(struct pf_kstate *s, int idx)
1216 struct pf_state_key *sk = s->key[idx];
1218 struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1220 PF_HASHROW_ASSERT(kh);
1222 TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1225 if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1226 LIST_REMOVE(sk, entry);
1227 uma_zfree(V_pf_state_key_z, sk);
1232 pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1234 struct pf_state_key *sk = mem;
1236 bzero(sk, sizeof(struct pf_state_key_cmp));
1237 TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1238 TAILQ_INIT(&sk->states[PF_SK_STACK]);
1243 struct pf_state_key *
1244 pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
1245 struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
1247 struct pf_state_key *sk;
1249 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1253 PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
1254 PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
1255 sk->port[pd->sidx] = sport;
1256 sk->port[pd->didx] = dport;
1257 sk->proto = pd->proto;
1263 struct pf_state_key *
1264 pf_state_key_clone(struct pf_state_key *orig)
1266 struct pf_state_key *sk;
1268 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1272 bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1278 pf_state_insert(struct pfi_kkif *kif, struct pfi_kkif *orig_kif,
1279 struct pf_state_key *skw, struct pf_state_key *sks, struct pf_kstate *s)
1281 struct pf_idhash *ih;
1282 struct pf_kstate *cur;
1285 KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1286 ("%s: sks not pristine", __func__));
1287 KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1288 ("%s: skw not pristine", __func__));
1289 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1292 s->orig_kif = orig_kif;
1294 if (s->id == 0 && s->creatorid == 0) {
1295 /* XXX: should be atomic, but probability of collision low */
1296 if ((s->id = V_pf_stateid[curcpu]++) == PFID_MAXID)
1297 V_pf_stateid[curcpu] = 1;
1298 s->id |= (uint64_t )curcpu << PFID_CPUSHIFT;
1299 s->id = htobe64(s->id);
1300 s->creatorid = V_pf_status.hostid;
1303 /* Returns with ID locked on success. */
1304 if ((error = pf_state_key_attach(skw, sks, s)) != 0)
1307 ih = &V_pf_idhash[PF_IDHASH(s)];
1308 PF_HASHROW_ASSERT(ih);
1309 LIST_FOREACH(cur, &ih->states, entry)
1310 if (cur->id == s->id && cur->creatorid == s->creatorid)
1314 PF_HASHROW_UNLOCK(ih);
1315 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1316 printf("pf: state ID collision: "
1317 "id: %016llx creatorid: %08x\n",
1318 (unsigned long long)be64toh(s->id),
1319 ntohl(s->creatorid));
1324 LIST_INSERT_HEAD(&ih->states, s, entry);
1325 /* One for keys, one for ID hash. */
1326 refcount_init(&s->refs, 2);
1328 pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_INSERT], 1);
1329 if (V_pfsync_insert_state_ptr != NULL)
1330 V_pfsync_insert_state_ptr(s);
1332 /* Returns locked. */
1337 * Find state by ID: returns with locked row on success.
1340 pf_find_state_byid(uint64_t id, uint32_t creatorid)
1342 struct pf_idhash *ih;
1343 struct pf_kstate *s;
1345 pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1347 ih = &V_pf_idhash[(be64toh(id) % (pf_hashmask + 1))];
1349 PF_HASHROW_LOCK(ih);
1350 LIST_FOREACH(s, &ih->states, entry)
1351 if (s->id == id && s->creatorid == creatorid)
1355 PF_HASHROW_UNLOCK(ih);
1361 * Find state by key.
1362 * Returns with ID hash slot locked on success.
1364 static struct pf_kstate *
1365 pf_find_state(struct pfi_kkif *kif, struct pf_state_key_cmp *key, u_int dir)
1367 struct pf_keyhash *kh;
1368 struct pf_state_key *sk;
1369 struct pf_kstate *s;
1372 pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1374 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1376 PF_HASHROW_LOCK(kh);
1377 LIST_FOREACH(sk, &kh->keys, entry)
1378 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1381 PF_HASHROW_UNLOCK(kh);
1385 idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1387 /* List is sorted, if-bound states before floating ones. */
1388 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1389 if (s->kif == V_pfi_all || s->kif == kif) {
1391 PF_HASHROW_UNLOCK(kh);
1392 if (__predict_false(s->timeout >= PFTM_MAX)) {
1394 * State is either being processed by
1395 * pf_unlink_state() in an other thread, or
1396 * is scheduled for immediate expiry.
1403 PF_HASHROW_UNLOCK(kh);
1409 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1411 struct pf_keyhash *kh;
1412 struct pf_state_key *sk;
1413 struct pf_kstate *s, *ret = NULL;
1416 pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1418 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1420 PF_HASHROW_LOCK(kh);
1421 LIST_FOREACH(sk, &kh->keys, entry)
1422 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1425 PF_HASHROW_UNLOCK(kh);
1440 panic("%s: dir %u", __func__, dir);
1443 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1445 PF_HASHROW_UNLOCK(kh);
1459 PF_HASHROW_UNLOCK(kh);
1465 pf_find_state_all_exists(struct pf_state_key_cmp *key, u_int dir)
1467 struct pf_kstate *s;
1469 s = pf_find_state_all(key, dir, NULL);
1473 /* END state table stuff */
1476 pf_send(struct pf_send_entry *pfse)
1480 STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
1482 swi_sched(V_pf_swi_cookie, 0);
1486 pf_isforlocal(struct mbuf *m, int af)
1491 struct rm_priotracker in_ifa_tracker;
1493 struct in_ifaddr *ia = NULL;
1495 ip = mtod(m, struct ip *);
1496 IN_IFADDR_RLOCK(&in_ifa_tracker);
1497 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) {
1498 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr) {
1499 IN_IFADDR_RUNLOCK(&in_ifa_tracker);
1503 IN_IFADDR_RUNLOCK(&in_ifa_tracker);
1509 struct ip6_hdr *ip6;
1510 struct in6_ifaddr *ia;
1511 ip6 = mtod(m, struct ip6_hdr *);
1512 ia = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false);
1515 return (! (ia->ia6_flags & IN6_IFF_NOTREADY));
1519 panic("Unsupported af %d", af);
1528 struct epoch_tracker et;
1529 struct pf_send_head queue;
1530 struct pf_send_entry *pfse, *next;
1532 CURVNET_SET((struct vnet *)v);
1535 queue = V_pf_sendqueue;
1536 STAILQ_INIT(&V_pf_sendqueue);
1539 NET_EPOCH_ENTER(et);
1541 STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
1542 switch (pfse->pfse_type) {
1545 if (pf_isforlocal(pfse->pfse_m, AF_INET)) {
1546 pfse->pfse_m->m_flags |= M_SKIP_FIREWALL;
1547 pfse->pfse_m->m_pkthdr.csum_flags |=
1548 CSUM_IP_VALID | CSUM_IP_CHECKED;
1549 ip_input(pfse->pfse_m);
1551 ip_output(pfse->pfse_m, NULL, NULL, 0, NULL,
1557 icmp_error(pfse->pfse_m, pfse->icmpopts.type,
1558 pfse->icmpopts.code, 0, pfse->icmpopts.mtu);
1563 if (pf_isforlocal(pfse->pfse_m, AF_INET6)) {
1564 pfse->pfse_m->m_flags |= M_SKIP_FIREWALL;
1565 ip6_input(pfse->pfse_m);
1567 ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL,
1572 icmp6_error(pfse->pfse_m, pfse->icmpopts.type,
1573 pfse->icmpopts.code, pfse->icmpopts.mtu);
1577 panic("%s: unknown type", __func__);
1579 free(pfse, M_PFTEMP);
1585 #define pf_purge_thread_period (hz / 10)
1587 #ifdef PF_WANT_32_TO_64_COUNTER
1589 pf_status_counter_u64_periodic(void)
1594 if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 60)) != 0) {
1598 for (int i = 0; i < FCNT_MAX; i++) {
1599 pf_counter_u64_periodic(&V_pf_status.fcounters[i]);
1604 pf_kif_counter_u64_periodic(void)
1606 struct pfi_kkif *kif;
1611 if (__predict_false(V_pf_allkifcount == 0)) {
1615 if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 300)) != 0) {
1619 run = V_pf_allkifcount / 10;
1623 for (r = 0; r < run; r++) {
1624 kif = LIST_NEXT(V_pf_kifmarker, pfik_allkiflist);
1626 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
1627 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
1631 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
1632 LIST_INSERT_AFTER(kif, V_pf_kifmarker, pfik_allkiflist);
1634 for (int i = 0; i < 2; i++) {
1635 for (int j = 0; j < 2; j++) {
1636 for (int k = 0; k < 2; k++) {
1637 pf_counter_u64_periodic(&kif->pfik_packets[i][j][k]);
1638 pf_counter_u64_periodic(&kif->pfik_bytes[i][j][k]);
1646 pf_rule_counter_u64_periodic(void)
1648 struct pf_krule *rule;
1653 if (__predict_false(V_pf_allrulecount == 0)) {
1657 if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 300)) != 0) {
1661 run = V_pf_allrulecount / 10;
1665 for (r = 0; r < run; r++) {
1666 rule = LIST_NEXT(V_pf_rulemarker, allrulelist);
1668 LIST_REMOVE(V_pf_rulemarker, allrulelist);
1669 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
1673 LIST_REMOVE(V_pf_rulemarker, allrulelist);
1674 LIST_INSERT_AFTER(rule, V_pf_rulemarker, allrulelist);
1676 pf_counter_u64_periodic(&rule->evaluations);
1677 for (int i = 0; i < 2; i++) {
1678 pf_counter_u64_periodic(&rule->packets[i]);
1679 pf_counter_u64_periodic(&rule->bytes[i]);
1685 pf_counter_u64_periodic_main(void)
1687 PF_RULES_RLOCK_TRACKER;
1689 V_pf_counter_periodic_iter++;
1692 pf_counter_u64_critical_enter();
1693 pf_status_counter_u64_periodic();
1694 pf_kif_counter_u64_periodic();
1695 pf_rule_counter_u64_periodic();
1696 pf_counter_u64_critical_exit();
1700 #define pf_counter_u64_periodic_main() do { } while (0)
1704 pf_purge_thread(void *unused __unused)
1706 VNET_ITERATOR_DECL(vnet_iter);
1708 sx_xlock(&pf_end_lock);
1709 while (pf_end_threads == 0) {
1710 sx_sleep(pf_purge_thread, &pf_end_lock, 0, "pftm", pf_purge_thread_period);
1713 VNET_FOREACH(vnet_iter) {
1714 CURVNET_SET(vnet_iter);
1716 /* Wait until V_pf_default_rule is initialized. */
1717 if (V_pf_vnet_active == 0) {
1722 pf_counter_u64_periodic_main();
1725 * Process 1/interval fraction of the state
1729 pf_purge_expired_states(V_pf_purge_idx, pf_hashmask /
1730 (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
1733 * Purge other expired types every
1734 * PFTM_INTERVAL seconds.
1736 if (V_pf_purge_idx == 0) {
1738 * Order is important:
1739 * - states and src nodes reference rules
1740 * - states and rules reference kifs
1742 pf_purge_expired_fragments();
1743 pf_purge_expired_src_nodes();
1744 pf_purge_unlinked_rules();
1749 VNET_LIST_RUNLOCK();
1753 sx_xunlock(&pf_end_lock);
1758 pf_unload_vnet_purge(void)
1762 * To cleanse up all kifs and rules we need
1763 * two runs: first one clears reference flags,
1764 * then pf_purge_expired_states() doesn't
1765 * raise them, and then second run frees.
1767 pf_purge_unlinked_rules();
1771 * Now purge everything.
1773 pf_purge_expired_states(0, pf_hashmask);
1774 pf_purge_fragments(UINT_MAX);
1775 pf_purge_expired_src_nodes();
1778 * Now all kifs & rules should be unreferenced,
1779 * thus should be successfully freed.
1781 pf_purge_unlinked_rules();
1786 pf_state_expires(const struct pf_kstate *state)
1793 /* handle all PFTM_* > PFTM_MAX here */
1794 if (state->timeout == PFTM_PURGE)
1795 return (time_uptime);
1796 KASSERT(state->timeout != PFTM_UNLINKED,
1797 ("pf_state_expires: timeout == PFTM_UNLINKED"));
1798 KASSERT((state->timeout < PFTM_MAX),
1799 ("pf_state_expires: timeout > PFTM_MAX"));
1800 timeout = state->rule.ptr->timeout[state->timeout];
1802 timeout = V_pf_default_rule.timeout[state->timeout];
1803 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1804 if (start && state->rule.ptr != &V_pf_default_rule) {
1805 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1806 states = counter_u64_fetch(state->rule.ptr->states_cur);
1808 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1809 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1810 states = V_pf_status.states;
1812 if (end && states > start && start < end) {
1814 timeout = (u_int64_t)timeout * (end - states) /
1816 return (state->expire + timeout);
1819 return (time_uptime);
1821 return (state->expire + timeout);
1825 pf_purge_expired_src_nodes()
1827 struct pf_ksrc_node_list freelist;
1828 struct pf_srchash *sh;
1829 struct pf_ksrc_node *cur, *next;
1832 LIST_INIT(&freelist);
1833 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
1834 PF_HASHROW_LOCK(sh);
1835 LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
1836 if (cur->states == 0 && cur->expire <= time_uptime) {
1837 pf_unlink_src_node(cur);
1838 LIST_INSERT_HEAD(&freelist, cur, entry);
1839 } else if (cur->rule.ptr != NULL)
1840 cur->rule.ptr->rule_ref |= PFRULE_REFS;
1841 PF_HASHROW_UNLOCK(sh);
1844 pf_free_src_nodes(&freelist);
1846 V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z);
1850 pf_src_tree_remove_state(struct pf_kstate *s)
1852 struct pf_ksrc_node *sn;
1853 struct pf_srchash *sh;
1856 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE] ?
1857 s->rule.ptr->timeout[PFTM_SRC_NODE] :
1858 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1860 if (s->src_node != NULL) {
1862 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1863 PF_HASHROW_LOCK(sh);
1866 if (--sn->states == 0)
1867 sn->expire = time_uptime + timeout;
1868 PF_HASHROW_UNLOCK(sh);
1870 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1871 sn = s->nat_src_node;
1872 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1873 PF_HASHROW_LOCK(sh);
1874 if (--sn->states == 0)
1875 sn->expire = time_uptime + timeout;
1876 PF_HASHROW_UNLOCK(sh);
1878 s->src_node = s->nat_src_node = NULL;
1882 * Unlink and potentilly free a state. Function may be
1883 * called with ID hash row locked, but always returns
1884 * unlocked, since it needs to go through key hash locking.
1887 pf_unlink_state(struct pf_kstate *s, u_int flags)
1889 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
1891 if ((flags & PF_ENTER_LOCKED) == 0)
1892 PF_HASHROW_LOCK(ih);
1894 PF_HASHROW_ASSERT(ih);
1896 if (s->timeout == PFTM_UNLINKED) {
1898 * State is being processed
1899 * by pf_unlink_state() in
1902 PF_HASHROW_UNLOCK(ih);
1903 return (0); /* XXXGL: undefined actually */
1906 if (s->src.state == PF_TCPS_PROXY_DST) {
1907 /* XXX wire key the right one? */
1908 pf_send_tcp(s->rule.ptr, s->key[PF_SK_WIRE]->af,
1909 &s->key[PF_SK_WIRE]->addr[1],
1910 &s->key[PF_SK_WIRE]->addr[0],
1911 s->key[PF_SK_WIRE]->port[1],
1912 s->key[PF_SK_WIRE]->port[0],
1913 s->src.seqhi, s->src.seqlo + 1,
1914 TH_RST|TH_ACK, 0, 0, 0, 1, s->tag);
1917 LIST_REMOVE(s, entry);
1918 pf_src_tree_remove_state(s);
1920 if (V_pfsync_delete_state_ptr != NULL)
1921 V_pfsync_delete_state_ptr(s);
1923 STATE_DEC_COUNTERS(s);
1925 s->timeout = PFTM_UNLINKED;
1927 PF_HASHROW_UNLOCK(ih);
1930 /* pf_state_insert() initialises refs to 2 */
1931 return (pf_release_staten(s, 2));
1935 pf_alloc_state(int flags)
1938 return (uma_zalloc(V_pf_state_z, flags | M_ZERO));
1942 pf_free_state(struct pf_kstate *cur)
1945 KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
1946 KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
1949 pf_normalize_tcp_cleanup(cur);
1950 uma_zfree(V_pf_state_z, cur);
1951 pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
1955 * Called only from pf_purge_thread(), thus serialized.
1958 pf_purge_expired_states(u_int i, int maxcheck)
1960 struct pf_idhash *ih;
1961 struct pf_kstate *s;
1963 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1966 * Go through hash and unlink states that expire now.
1968 while (maxcheck > 0) {
1969 ih = &V_pf_idhash[i];
1971 /* only take the lock if we expect to do work */
1972 if (!LIST_EMPTY(&ih->states)) {
1974 PF_HASHROW_LOCK(ih);
1975 LIST_FOREACH(s, &ih->states, entry) {
1976 if (pf_state_expires(s) <= time_uptime) {
1977 V_pf_status.states -=
1978 pf_unlink_state(s, PF_ENTER_LOCKED);
1981 s->rule.ptr->rule_ref |= PFRULE_REFS;
1982 if (s->nat_rule.ptr != NULL)
1983 s->nat_rule.ptr->rule_ref |= PFRULE_REFS;
1984 if (s->anchor.ptr != NULL)
1985 s->anchor.ptr->rule_ref |= PFRULE_REFS;
1986 s->kif->pfik_flags |= PFI_IFLAG_REFS;
1988 s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
1990 PF_HASHROW_UNLOCK(ih);
1993 /* Return when we hit end of hash. */
1994 if (++i > pf_hashmask) {
1995 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
2002 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
2008 pf_purge_unlinked_rules()
2010 struct pf_krulequeue tmpq;
2011 struct pf_krule *r, *r1;
2014 * If we have overloading task pending, then we'd
2015 * better skip purging this time. There is a tiny
2016 * probability that overloading task references
2017 * an already unlinked rule.
2019 PF_OVERLOADQ_LOCK();
2020 if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
2021 PF_OVERLOADQ_UNLOCK();
2024 PF_OVERLOADQ_UNLOCK();
2027 * Do naive mark-and-sweep garbage collecting of old rules.
2028 * Reference flag is raised by pf_purge_expired_states()
2029 * and pf_purge_expired_src_nodes().
2031 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
2032 * use a temporary queue.
2035 PF_UNLNKDRULES_LOCK();
2036 TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
2037 if (!(r->rule_ref & PFRULE_REFS)) {
2038 TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
2039 TAILQ_INSERT_TAIL(&tmpq, r, entries);
2041 r->rule_ref &= ~PFRULE_REFS;
2043 PF_UNLNKDRULES_UNLOCK();
2045 if (!TAILQ_EMPTY(&tmpq)) {
2047 TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
2048 TAILQ_REMOVE(&tmpq, r, entries);
2056 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
2061 u_int32_t a = ntohl(addr->addr32[0]);
2062 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
2074 u_int8_t i, curstart, curend, maxstart, maxend;
2075 curstart = curend = maxstart = maxend = 255;
2076 for (i = 0; i < 8; i++) {
2077 if (!addr->addr16[i]) {
2078 if (curstart == 255)
2082 if ((curend - curstart) >
2083 (maxend - maxstart)) {
2084 maxstart = curstart;
2087 curstart = curend = 255;
2090 if ((curend - curstart) >
2091 (maxend - maxstart)) {
2092 maxstart = curstart;
2095 for (i = 0; i < 8; i++) {
2096 if (i >= maxstart && i <= maxend) {
2102 b = ntohs(addr->addr16[i]);
2119 pf_print_state(struct pf_kstate *s)
2121 pf_print_state_parts(s, NULL, NULL);
2125 pf_print_state_parts(struct pf_kstate *s,
2126 struct pf_state_key *skwp, struct pf_state_key *sksp)
2128 struct pf_state_key *skw, *sks;
2129 u_int8_t proto, dir;
2131 /* Do our best to fill these, but they're skipped if NULL */
2132 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
2133 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
2134 proto = skw ? skw->proto : (sks ? sks->proto : 0);
2135 dir = s ? s->direction : 0;
2153 case IPPROTO_ICMPV6:
2157 printf("%u", proto);
2170 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
2172 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
2177 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
2179 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
2184 if (proto == IPPROTO_TCP) {
2185 printf(" [lo=%u high=%u win=%u modulator=%u",
2186 s->src.seqlo, s->src.seqhi,
2187 s->src.max_win, s->src.seqdiff);
2188 if (s->src.wscale && s->dst.wscale)
2189 printf(" wscale=%u",
2190 s->src.wscale & PF_WSCALE_MASK);
2192 printf(" [lo=%u high=%u win=%u modulator=%u",
2193 s->dst.seqlo, s->dst.seqhi,
2194 s->dst.max_win, s->dst.seqdiff);
2195 if (s->src.wscale && s->dst.wscale)
2196 printf(" wscale=%u",
2197 s->dst.wscale & PF_WSCALE_MASK);
2200 printf(" %u:%u", s->src.state, s->dst.state);
2205 pf_print_flags(u_int8_t f)
2227 #define PF_SET_SKIP_STEPS(i) \
2229 while (head[i] != cur) { \
2230 head[i]->skip[i].ptr = cur; \
2231 head[i] = TAILQ_NEXT(head[i], entries); \
2236 pf_calc_skip_steps(struct pf_krulequeue *rules)
2238 struct pf_krule *cur, *prev, *head[PF_SKIP_COUNT];
2241 cur = TAILQ_FIRST(rules);
2243 for (i = 0; i < PF_SKIP_COUNT; ++i)
2245 while (cur != NULL) {
2246 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
2247 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
2248 if (cur->direction != prev->direction)
2249 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
2250 if (cur->af != prev->af)
2251 PF_SET_SKIP_STEPS(PF_SKIP_AF);
2252 if (cur->proto != prev->proto)
2253 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
2254 if (cur->src.neg != prev->src.neg ||
2255 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
2256 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
2257 if (cur->src.port[0] != prev->src.port[0] ||
2258 cur->src.port[1] != prev->src.port[1] ||
2259 cur->src.port_op != prev->src.port_op)
2260 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
2261 if (cur->dst.neg != prev->dst.neg ||
2262 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
2263 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
2264 if (cur->dst.port[0] != prev->dst.port[0] ||
2265 cur->dst.port[1] != prev->dst.port[1] ||
2266 cur->dst.port_op != prev->dst.port_op)
2267 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2270 cur = TAILQ_NEXT(cur, entries);
2272 for (i = 0; i < PF_SKIP_COUNT; ++i)
2273 PF_SET_SKIP_STEPS(i);
2277 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
2279 if (aw1->type != aw2->type)
2281 switch (aw1->type) {
2282 case PF_ADDR_ADDRMASK:
2284 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6))
2286 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6))
2289 case PF_ADDR_DYNIFTL:
2290 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
2291 case PF_ADDR_NOROUTE:
2292 case PF_ADDR_URPFFAILED:
2295 return (aw1->p.tbl != aw2->p.tbl);
2297 printf("invalid address type: %d\n", aw1->type);
2303 * Checksum updates are a little complicated because the checksum in the TCP/UDP
2304 * header isn't always a full checksum. In some cases (i.e. output) it's a
2305 * pseudo-header checksum, which is a partial checksum over src/dst IP
2306 * addresses, protocol number and length.
2308 * That means we have the following cases:
2309 * * Input or forwarding: we don't have TSO, the checksum fields are full
2310 * checksums, we need to update the checksum whenever we change anything.
2311 * * Output (i.e. the checksum is a pseudo-header checksum):
2312 * x The field being updated is src/dst address or affects the length of
2313 * the packet. We need to update the pseudo-header checksum (note that this
2314 * checksum is not ones' complement).
2315 * x Some other field is being modified (e.g. src/dst port numbers): We
2316 * don't have to update anything.
2319 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2323 x = cksum + old - new;
2324 x = (x + (x >> 16)) & 0xffff;
2326 /* optimise: eliminate a branch when not udp */
2327 if (udp && cksum == 0x0000)
2329 if (udp && x == 0x0000)
2332 return (u_int16_t)(x);
2336 pf_patch_8(struct mbuf *m, u_int16_t *cksum, u_int8_t *f, u_int8_t v, bool hi,
2339 u_int16_t old = htons(hi ? (*f << 8) : *f);
2340 u_int16_t new = htons(hi ? ( v << 8) : v);
2347 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2350 *cksum = pf_cksum_fixup(*cksum, old, new, udp);
2354 pf_patch_16_unaligned(struct mbuf *m, u_int16_t *cksum, void *f, u_int16_t v,
2355 bool hi, u_int8_t udp)
2357 u_int8_t *fb = (u_int8_t *)f;
2358 u_int8_t *vb = (u_int8_t *)&v;
2360 pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2361 pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2365 pf_patch_32_unaligned(struct mbuf *m, u_int16_t *cksum, void *f, u_int32_t v,
2366 bool hi, u_int8_t udp)
2368 u_int8_t *fb = (u_int8_t *)f;
2369 u_int8_t *vb = (u_int8_t *)&v;
2371 pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2372 pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2373 pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2374 pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2378 pf_proto_cksum_fixup(struct mbuf *m, u_int16_t cksum, u_int16_t old,
2379 u_int16_t new, u_int8_t udp)
2381 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2384 return (pf_cksum_fixup(cksum, old, new, udp));
2388 pf_change_ap(struct mbuf *m, struct pf_addr *a, u_int16_t *p, u_int16_t *ic,
2389 u_int16_t *pc, struct pf_addr *an, u_int16_t pn, u_int8_t u,
2395 PF_ACPY(&ao, a, af);
2398 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2406 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2407 ao.addr16[0], an->addr16[0], 0),
2408 ao.addr16[1], an->addr16[1], 0);
2411 *pc = pf_cksum_fixup(pf_cksum_fixup(*pc,
2412 ao.addr16[0], an->addr16[0], u),
2413 ao.addr16[1], an->addr16[1], u);
2415 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2420 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2421 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2422 pf_cksum_fixup(pf_cksum_fixup(*pc,
2423 ao.addr16[0], an->addr16[0], u),
2424 ao.addr16[1], an->addr16[1], u),
2425 ao.addr16[2], an->addr16[2], u),
2426 ao.addr16[3], an->addr16[3], u),
2427 ao.addr16[4], an->addr16[4], u),
2428 ao.addr16[5], an->addr16[5], u),
2429 ao.addr16[6], an->addr16[6], u),
2430 ao.addr16[7], an->addr16[7], u);
2432 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2437 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA |
2438 CSUM_DELAY_DATA_IPV6)) {
2445 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2447 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2451 memcpy(&ao, a, sizeof(ao));
2452 memcpy(a, &an, sizeof(u_int32_t));
2453 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2454 ao % 65536, an % 65536, u);
2458 pf_change_proto_a(struct mbuf *m, void *a, u_int16_t *c, u_int32_t an, u_int8_t udp)
2462 memcpy(&ao, a, sizeof(ao));
2463 memcpy(a, &an, sizeof(u_int32_t));
2465 *c = pf_proto_cksum_fixup(m,
2466 pf_proto_cksum_fixup(m, *c, ao / 65536, an / 65536, udp),
2467 ao % 65536, an % 65536, udp);
2472 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2476 PF_ACPY(&ao, a, AF_INET6);
2477 PF_ACPY(a, an, AF_INET6);
2479 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2480 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2481 pf_cksum_fixup(pf_cksum_fixup(*c,
2482 ao.addr16[0], an->addr16[0], u),
2483 ao.addr16[1], an->addr16[1], u),
2484 ao.addr16[2], an->addr16[2], u),
2485 ao.addr16[3], an->addr16[3], u),
2486 ao.addr16[4], an->addr16[4], u),
2487 ao.addr16[5], an->addr16[5], u),
2488 ao.addr16[6], an->addr16[6], u),
2489 ao.addr16[7], an->addr16[7], u);
2494 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2495 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2496 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2498 struct pf_addr oia, ooa;
2500 PF_ACPY(&oia, ia, af);
2502 PF_ACPY(&ooa, oa, af);
2504 /* Change inner protocol port, fix inner protocol checksum. */
2506 u_int16_t oip = *ip;
2513 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2514 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2516 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2518 /* Change inner ip address, fix inner ip and icmp checksums. */
2519 PF_ACPY(ia, na, af);
2523 u_int32_t oh2c = *h2c;
2525 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2526 oia.addr16[0], ia->addr16[0], 0),
2527 oia.addr16[1], ia->addr16[1], 0);
2528 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2529 oia.addr16[0], ia->addr16[0], 0),
2530 oia.addr16[1], ia->addr16[1], 0);
2531 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2537 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2538 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2539 pf_cksum_fixup(pf_cksum_fixup(*ic,
2540 oia.addr16[0], ia->addr16[0], u),
2541 oia.addr16[1], ia->addr16[1], u),
2542 oia.addr16[2], ia->addr16[2], u),
2543 oia.addr16[3], ia->addr16[3], u),
2544 oia.addr16[4], ia->addr16[4], u),
2545 oia.addr16[5], ia->addr16[5], u),
2546 oia.addr16[6], ia->addr16[6], u),
2547 oia.addr16[7], ia->addr16[7], u);
2551 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2553 PF_ACPY(oa, na, af);
2557 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2558 ooa.addr16[0], oa->addr16[0], 0),
2559 ooa.addr16[1], oa->addr16[1], 0);
2564 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2565 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2566 pf_cksum_fixup(pf_cksum_fixup(*ic,
2567 ooa.addr16[0], oa->addr16[0], u),
2568 ooa.addr16[1], oa->addr16[1], u),
2569 ooa.addr16[2], oa->addr16[2], u),
2570 ooa.addr16[3], oa->addr16[3], u),
2571 ooa.addr16[4], oa->addr16[4], u),
2572 ooa.addr16[5], oa->addr16[5], u),
2573 ooa.addr16[6], oa->addr16[6], u),
2574 ooa.addr16[7], oa->addr16[7], u);
2582 * Need to modulate the sequence numbers in the TCP SACK option
2583 * (credits to Krzysztof Pfaff for report and patch)
2586 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2587 struct tcphdr *th, struct pf_state_peer *dst)
2589 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2590 u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2591 int copyback = 0, i, olen;
2592 struct sackblk sack;
2594 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2595 if (hlen < TCPOLEN_SACKLEN ||
2596 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2599 while (hlen >= TCPOLEN_SACKLEN) {
2600 size_t startoff = opt - opts;
2603 case TCPOPT_EOL: /* FALLTHROUGH */
2611 if (olen >= TCPOLEN_SACKLEN) {
2612 for (i = 2; i + TCPOLEN_SACK <= olen;
2613 i += TCPOLEN_SACK) {
2614 memcpy(&sack, &opt[i], sizeof(sack));
2615 pf_patch_32_unaligned(m,
2616 &th->th_sum, &sack.start,
2617 htonl(ntohl(sack.start) - dst->seqdiff),
2618 PF_ALGNMNT(startoff),
2620 pf_patch_32_unaligned(m, &th->th_sum,
2622 htonl(ntohl(sack.end) - dst->seqdiff),
2623 PF_ALGNMNT(startoff),
2625 memcpy(&opt[i], &sack, sizeof(sack));
2639 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2644 pf_build_tcp(const struct pf_krule *r, sa_family_t af,
2645 const struct pf_addr *saddr, const struct pf_addr *daddr,
2646 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2647 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2653 struct ip *h = NULL;
2656 struct ip6_hdr *h6 = NULL;
2660 struct pf_mtag *pf_mtag;
2665 /* maximum segment size tcp option */
2666 tlen = sizeof(struct tcphdr);
2673 len = sizeof(struct ip) + tlen;
2678 len = sizeof(struct ip6_hdr) + tlen;
2682 panic("%s: unsupported af %d", __func__, af);
2685 m = m_gethdr(M_NOWAIT, MT_DATA);
2690 mac_netinet_firewall_send(m);
2692 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2697 m->m_flags |= M_SKIP_FIREWALL;
2698 pf_mtag->tag = rtag;
2700 if (r != NULL && r->rtableid >= 0)
2701 M_SETFIB(m, r->rtableid);
2704 if (r != NULL && r->qid) {
2705 pf_mtag->qid = r->qid;
2707 /* add hints for ecn */
2708 pf_mtag->hdr = mtod(m, struct ip *);
2711 m->m_data += max_linkhdr;
2712 m->m_pkthdr.len = m->m_len = len;
2713 /* The rest of the stack assumes a rcvif, so provide one.
2714 * This is a locally generated packet, so .. close enough. */
2715 m->m_pkthdr.rcvif = V_loif;
2716 bzero(m->m_data, len);
2720 h = mtod(m, struct ip *);
2722 /* IP header fields included in the TCP checksum */
2723 h->ip_p = IPPROTO_TCP;
2724 h->ip_len = htons(tlen);
2725 h->ip_src.s_addr = saddr->v4.s_addr;
2726 h->ip_dst.s_addr = daddr->v4.s_addr;
2728 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2733 h6 = mtod(m, struct ip6_hdr *);
2735 /* IP header fields included in the TCP checksum */
2736 h6->ip6_nxt = IPPROTO_TCP;
2737 h6->ip6_plen = htons(tlen);
2738 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2739 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2741 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2747 th->th_sport = sport;
2748 th->th_dport = dport;
2749 th->th_seq = htonl(seq);
2750 th->th_ack = htonl(ack);
2751 th->th_off = tlen >> 2;
2752 th->th_flags = flags;
2753 th->th_win = htons(win);
2756 opt = (char *)(th + 1);
2757 opt[0] = TCPOPT_MAXSEG;
2760 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2767 th->th_sum = in_cksum(m, len);
2769 /* Finish the IP header */
2771 h->ip_hl = sizeof(*h) >> 2;
2772 h->ip_tos = IPTOS_LOWDELAY;
2773 h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
2774 h->ip_len = htons(len);
2775 h->ip_ttl = ttl ? ttl : V_ip_defttl;
2782 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2783 sizeof(struct ip6_hdr), tlen);
2785 h6->ip6_vfc |= IPV6_VERSION;
2786 h6->ip6_hlim = IPV6_DEFHLIM;
2795 pf_send_tcp(const struct pf_krule *r, sa_family_t af,
2796 const struct pf_addr *saddr, const struct pf_addr *daddr,
2797 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2798 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2801 struct pf_send_entry *pfse;
2804 m = pf_build_tcp(r, af, saddr, daddr, sport, dport, seq, ack, flags,
2805 win, mss, ttl, tag, rtag);
2809 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2810 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2819 pfse->pfse_type = PFSE_IP;
2824 pfse->pfse_type = PFSE_IP6;
2834 pf_return(struct pf_krule *r, struct pf_krule *nr, struct pf_pdesc *pd,
2835 struct pf_state_key *sk, int off, struct mbuf *m, struct tcphdr *th,
2836 struct pfi_kkif *kif, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen,
2839 struct pf_addr * const saddr = pd->src;
2840 struct pf_addr * const daddr = pd->dst;
2841 sa_family_t af = pd->af;
2843 /* undo NAT changes, if they have taken place */
2845 PF_ACPY(saddr, &sk->addr[pd->sidx], af);
2846 PF_ACPY(daddr, &sk->addr[pd->didx], af);
2848 *pd->sport = sk->port[pd->sidx];
2850 *pd->dport = sk->port[pd->didx];
2852 *pd->proto_sum = bproto_sum;
2854 *pd->ip_sum = bip_sum;
2855 m_copyback(m, off, hdrlen, pd->hdr.any);
2857 if (pd->proto == IPPROTO_TCP &&
2858 ((r->rule_flag & PFRULE_RETURNRST) ||
2859 (r->rule_flag & PFRULE_RETURN)) &&
2860 !(th->th_flags & TH_RST)) {
2861 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
2873 h4 = mtod(m, struct ip *);
2874 len = ntohs(h4->ip_len) - off;
2879 h6 = mtod(m, struct ip6_hdr *);
2880 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
2885 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
2886 REASON_SET(reason, PFRES_PROTCKSUM);
2888 if (th->th_flags & TH_SYN)
2890 if (th->th_flags & TH_FIN)
2892 pf_send_tcp(r, af, pd->dst,
2893 pd->src, th->th_dport, th->th_sport,
2894 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
2895 r->return_ttl, 1, 0);
2897 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
2899 pf_send_icmp(m, r->return_icmp >> 8,
2900 r->return_icmp & 255, af, r);
2901 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
2903 pf_send_icmp(m, r->return_icmp6 >> 8,
2904 r->return_icmp6 & 255, af, r);
2908 pf_match_ieee8021q_pcp(u_int8_t prio, struct mbuf *m)
2913 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL);
2917 if (prio == PF_PRIO_ZERO)
2920 mpcp = *(uint8_t *)(mtag + 1);
2922 return (mpcp == prio);
2926 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2929 struct pf_send_entry *pfse;
2931 struct pf_mtag *pf_mtag;
2933 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2934 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2938 if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
2939 free(pfse, M_PFTEMP);
2943 if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
2944 free(pfse, M_PFTEMP);
2948 m0->m_flags |= M_SKIP_FIREWALL;
2950 if (r->rtableid >= 0)
2951 M_SETFIB(m0, r->rtableid);
2955 pf_mtag->qid = r->qid;
2956 /* add hints for ecn */
2957 pf_mtag->hdr = mtod(m0, struct ip *);
2964 pfse->pfse_type = PFSE_ICMP;
2969 pfse->pfse_type = PFSE_ICMP6;
2974 pfse->icmpopts.type = type;
2975 pfse->icmpopts.code = code;
2980 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2981 * If n is 0, they match if they are equal. If n is != 0, they match if they
2985 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2986 struct pf_addr *b, sa_family_t af)
2993 if ((a->addr32[0] & m->addr32[0]) ==
2994 (b->addr32[0] & m->addr32[0]))
3000 if (((a->addr32[0] & m->addr32[0]) ==
3001 (b->addr32[0] & m->addr32[0])) &&
3002 ((a->addr32[1] & m->addr32[1]) ==
3003 (b->addr32[1] & m->addr32[1])) &&
3004 ((a->addr32[2] & m->addr32[2]) ==
3005 (b->addr32[2] & m->addr32[2])) &&
3006 ((a->addr32[3] & m->addr32[3]) ==
3007 (b->addr32[3] & m->addr32[3])))
3026 * Return 1 if b <= a <= e, otherwise return 0.
3029 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
3030 struct pf_addr *a, sa_family_t af)
3035 if ((ntohl(a->addr32[0]) < ntohl(b->addr32[0])) ||
3036 (ntohl(a->addr32[0]) > ntohl(e->addr32[0])))
3045 for (i = 0; i < 4; ++i)
3046 if (ntohl(a->addr32[i]) > ntohl(b->addr32[i]))
3048 else if (ntohl(a->addr32[i]) < ntohl(b->addr32[i]))
3051 for (i = 0; i < 4; ++i)
3052 if (ntohl(a->addr32[i]) < ntohl(e->addr32[i]))
3054 else if (ntohl(a->addr32[i]) > ntohl(e->addr32[i]))
3064 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
3068 return ((p > a1) && (p < a2));
3070 return ((p < a1) || (p > a2));
3072 return ((p >= a1) && (p <= a2));
3086 return (0); /* never reached */
3090 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
3095 return (pf_match(op, a1, a2, p));
3099 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
3101 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
3103 return (pf_match(op, a1, a2, u));
3107 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
3109 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
3111 return (pf_match(op, a1, a2, g));
3115 pf_match_tag(struct mbuf *m, struct pf_krule *r, int *tag, int mtag)
3120 return ((!r->match_tag_not && r->match_tag == *tag) ||
3121 (r->match_tag_not && r->match_tag != *tag));
3125 pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag)
3128 KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
3130 if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL))
3133 pd->pf_mtag->tag = tag;
3138 #define PF_ANCHOR_STACKSIZE 32
3139 struct pf_kanchor_stackframe {
3140 struct pf_kruleset *rs;
3141 struct pf_krule *r; /* XXX: + match bit */
3142 struct pf_kanchor *child;
3146 * XXX: We rely on malloc(9) returning pointer aligned addresses.
3148 #define PF_ANCHORSTACK_MATCH 0x00000001
3149 #define PF_ANCHORSTACK_MASK (PF_ANCHORSTACK_MATCH)
3151 #define PF_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
3152 #define PF_ANCHOR_RULE(f) (struct pf_krule *) \
3153 ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
3154 #define PF_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \
3155 ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \
3159 pf_step_into_anchor(struct pf_kanchor_stackframe *stack, int *depth,
3160 struct pf_kruleset **rs, int n, struct pf_krule **r, struct pf_krule **a,
3163 struct pf_kanchor_stackframe *f;
3169 if (*depth >= PF_ANCHOR_STACKSIZE) {
3170 printf("%s: anchor stack overflow on %s\n",
3171 __func__, (*r)->anchor->name);
3172 *r = TAILQ_NEXT(*r, entries);
3174 } else if (*depth == 0 && a != NULL)
3176 f = stack + (*depth)++;
3179 if ((*r)->anchor_wildcard) {
3180 struct pf_kanchor_node *parent = &(*r)->anchor->children;
3182 if ((f->child = RB_MIN(pf_kanchor_node, parent)) == NULL) {
3186 *rs = &f->child->ruleset;
3189 *rs = &(*r)->anchor->ruleset;
3191 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
3195 pf_step_out_of_anchor(struct pf_kanchor_stackframe *stack, int *depth,
3196 struct pf_kruleset **rs, int n, struct pf_krule **r, struct pf_krule **a,
3199 struct pf_kanchor_stackframe *f;
3200 struct pf_krule *fr;
3208 f = stack + *depth - 1;
3209 fr = PF_ANCHOR_RULE(f);
3210 if (f->child != NULL) {
3211 struct pf_kanchor_node *parent;
3214 * This block traverses through
3215 * a wildcard anchor.
3217 parent = &fr->anchor->children;
3218 if (match != NULL && *match) {
3220 * If any of "*" matched, then
3221 * "foo/ *" matched, mark frame
3224 PF_ANCHOR_SET_MATCH(f);
3227 f->child = RB_NEXT(pf_kanchor_node, parent, f->child);
3228 if (f->child != NULL) {
3229 *rs = &f->child->ruleset;
3230 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
3238 if (*depth == 0 && a != NULL)
3241 if (PF_ANCHOR_MATCH(f) || (match != NULL && *match))
3243 *r = TAILQ_NEXT(fr, entries);
3244 } while (*r == NULL);
3251 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
3252 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
3257 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3258 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
3262 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3263 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
3264 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
3265 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
3266 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
3267 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
3268 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
3269 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
3275 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
3280 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
3284 if (addr->addr32[3] == 0xffffffff) {
3285 addr->addr32[3] = 0;
3286 if (addr->addr32[2] == 0xffffffff) {
3287 addr->addr32[2] = 0;
3288 if (addr->addr32[1] == 0xffffffff) {
3289 addr->addr32[1] = 0;
3291 htonl(ntohl(addr->addr32[0]) + 1);
3294 htonl(ntohl(addr->addr32[1]) + 1);
3297 htonl(ntohl(addr->addr32[2]) + 1);
3300 htonl(ntohl(addr->addr32[3]) + 1);
3307 pf_rule_to_actions(struct pf_krule *r, struct pf_rule_actions *a)
3316 pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
3318 struct pf_addr *saddr, *daddr;
3319 u_int16_t sport, dport;
3320 struct inpcbinfo *pi;
3323 pd->lookup.uid = UID_MAX;
3324 pd->lookup.gid = GID_MAX;
3326 switch (pd->proto) {
3328 sport = pd->hdr.tcp.th_sport;
3329 dport = pd->hdr.tcp.th_dport;
3333 sport = pd->hdr.udp.uh_sport;
3334 dport = pd->hdr.udp.uh_dport;
3340 if (direction == PF_IN) {
3355 inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
3356 dport, INPLOOKUP_RLOCKPCB, NULL, m);
3358 inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
3359 daddr->v4, dport, INPLOOKUP_WILDCARD |
3360 INPLOOKUP_RLOCKPCB, NULL, m);
3368 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
3369 dport, INPLOOKUP_RLOCKPCB, NULL, m);
3371 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
3372 &daddr->v6, dport, INPLOOKUP_WILDCARD |
3373 INPLOOKUP_RLOCKPCB, NULL, m);
3383 INP_RLOCK_ASSERT(inp);
3384 pd->lookup.uid = inp->inp_cred->cr_uid;
3385 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3392 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3396 u_int8_t *opt, optlen;
3397 u_int8_t wscale = 0;
3399 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3400 if (hlen <= sizeof(struct tcphdr))
3402 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3404 opt = hdr + sizeof(struct tcphdr);
3405 hlen -= sizeof(struct tcphdr);
3415 if (wscale > TCP_MAX_WINSHIFT)
3416 wscale = TCP_MAX_WINSHIFT;
3417 wscale |= PF_WSCALE_FLAG;
3432 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3436 u_int8_t *opt, optlen;
3437 u_int16_t mss = V_tcp_mssdflt;
3439 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3440 if (hlen <= sizeof(struct tcphdr))
3442 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3444 opt = hdr + sizeof(struct tcphdr);
3445 hlen -= sizeof(struct tcphdr);
3446 while (hlen >= TCPOLEN_MAXSEG) {
3454 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
3470 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
3472 struct nhop_object *nh;
3474 struct in6_addr dst6;
3485 hlen = sizeof(struct ip);
3486 nh = fib4_lookup(rtableid, addr->v4, 0, 0, 0);
3488 mss = nh->nh_mtu - hlen - sizeof(struct tcphdr);
3493 hlen = sizeof(struct ip6_hdr);
3494 in6_splitscope(&addr->v6, &dst6, &scopeid);
3495 nh = fib6_lookup(rtableid, &dst6, scopeid, 0, 0);
3497 mss = nh->nh_mtu - hlen - sizeof(struct tcphdr);
3502 mss = max(V_tcp_mssdflt, mss);
3503 mss = min(mss, offer);
3504 mss = max(mss, 64); /* sanity - at least max opt space */
3509 pf_tcp_iss(struct pf_pdesc *pd)
3512 u_int32_t digest[4];
3514 if (V_pf_tcp_secret_init == 0) {
3515 arc4random_buf(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
3516 MD5Init(&V_pf_tcp_secret_ctx);
3517 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
3518 sizeof(V_pf_tcp_secret));
3519 V_pf_tcp_secret_init = 1;
3522 ctx = V_pf_tcp_secret_ctx;
3524 MD5Update(&ctx, (char *)&pd->hdr.tcp.th_sport, sizeof(u_short));
3525 MD5Update(&ctx, (char *)&pd->hdr.tcp.th_dport, sizeof(u_short));
3526 if (pd->af == AF_INET6) {
3527 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3528 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3530 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3531 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3533 MD5Final((u_char *)digest, &ctx);
3534 V_pf_tcp_iss_off += 4096;
3535 #define ISN_RANDOM_INCREMENT (4096 - 1)
3536 return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3538 #undef ISN_RANDOM_INCREMENT
3542 pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, int direction,
3543 struct pfi_kkif *kif, struct mbuf *m, int off, struct pf_pdesc *pd,
3544 struct pf_krule **am, struct pf_kruleset **rsm, struct inpcb *inp)
3546 struct pf_krule *nr = NULL;
3547 struct pf_addr * const saddr = pd->src;
3548 struct pf_addr * const daddr = pd->dst;
3549 sa_family_t af = pd->af;
3550 struct pf_krule *r, *a = NULL;
3551 struct pf_kruleset *ruleset = NULL;
3552 struct pf_ksrc_node *nsn = NULL;
3553 struct tcphdr *th = &pd->hdr.tcp;
3554 struct pf_state_key *sk = NULL, *nk = NULL;
3556 int rewrite = 0, hdrlen = 0;
3557 int tag = -1, rtableid = -1;
3561 u_int16_t sport = 0, dport = 0;
3562 u_int16_t bproto_sum = 0, bip_sum = 0;
3563 u_int8_t icmptype = 0, icmpcode = 0;
3564 struct pf_kanchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3569 INP_LOCK_ASSERT(inp);
3570 pd->lookup.uid = inp->inp_cred->cr_uid;
3571 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3572 pd->lookup.done = 1;
3575 switch (pd->proto) {
3577 sport = th->th_sport;
3578 dport = th->th_dport;
3579 hdrlen = sizeof(*th);
3582 sport = pd->hdr.udp.uh_sport;
3583 dport = pd->hdr.udp.uh_dport;
3584 hdrlen = sizeof(pd->hdr.udp);
3588 if (pd->af != AF_INET)
3590 sport = dport = pd->hdr.icmp.icmp_id;
3591 hdrlen = sizeof(pd->hdr.icmp);
3592 icmptype = pd->hdr.icmp.icmp_type;
3593 icmpcode = pd->hdr.icmp.icmp_code;
3595 if (icmptype == ICMP_UNREACH ||
3596 icmptype == ICMP_SOURCEQUENCH ||
3597 icmptype == ICMP_REDIRECT ||
3598 icmptype == ICMP_TIMXCEED ||
3599 icmptype == ICMP_PARAMPROB)
3604 case IPPROTO_ICMPV6:
3607 sport = dport = pd->hdr.icmp6.icmp6_id;
3608 hdrlen = sizeof(pd->hdr.icmp6);
3609 icmptype = pd->hdr.icmp6.icmp6_type;
3610 icmpcode = pd->hdr.icmp6.icmp6_code;
3612 if (icmptype == ICMP6_DST_UNREACH ||
3613 icmptype == ICMP6_PACKET_TOO_BIG ||
3614 icmptype == ICMP6_TIME_EXCEEDED ||
3615 icmptype == ICMP6_PARAM_PROB)
3620 sport = dport = hdrlen = 0;
3624 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3626 /* check packet for BINAT/NAT/RDR */
3627 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk,
3628 &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) {
3629 KASSERT(sk != NULL, ("%s: null sk", __func__));
3630 KASSERT(nk != NULL, ("%s: null nk", __func__));
3633 PFLOG_PACKET(kif, m, af, direction, PFRES_MATCH, nr, a,
3638 bip_sum = *pd->ip_sum;
3640 switch (pd->proto) {
3642 bproto_sum = th->th_sum;
3643 pd->proto_sum = &th->th_sum;
3645 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3646 nk->port[pd->sidx] != sport) {
3647 pf_change_ap(m, saddr, &th->th_sport, pd->ip_sum,
3648 &th->th_sum, &nk->addr[pd->sidx],
3649 nk->port[pd->sidx], 0, af);
3650 pd->sport = &th->th_sport;
3651 sport = th->th_sport;
3654 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3655 nk->port[pd->didx] != dport) {
3656 pf_change_ap(m, daddr, &th->th_dport, pd->ip_sum,
3657 &th->th_sum, &nk->addr[pd->didx],
3658 nk->port[pd->didx], 0, af);
3659 dport = th->th_dport;
3660 pd->dport = &th->th_dport;
3665 bproto_sum = pd->hdr.udp.uh_sum;
3666 pd->proto_sum = &pd->hdr.udp.uh_sum;
3668 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3669 nk->port[pd->sidx] != sport) {
3670 pf_change_ap(m, saddr, &pd->hdr.udp.uh_sport,
3671 pd->ip_sum, &pd->hdr.udp.uh_sum,
3672 &nk->addr[pd->sidx],
3673 nk->port[pd->sidx], 1, af);
3674 sport = pd->hdr.udp.uh_sport;
3675 pd->sport = &pd->hdr.udp.uh_sport;
3678 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3679 nk->port[pd->didx] != dport) {
3680 pf_change_ap(m, daddr, &pd->hdr.udp.uh_dport,
3681 pd->ip_sum, &pd->hdr.udp.uh_sum,
3682 &nk->addr[pd->didx],
3683 nk->port[pd->didx], 1, af);
3684 dport = pd->hdr.udp.uh_dport;
3685 pd->dport = &pd->hdr.udp.uh_dport;
3691 nk->port[0] = nk->port[1];
3692 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3693 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3694 nk->addr[pd->sidx].v4.s_addr, 0);
3696 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3697 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3698 nk->addr[pd->didx].v4.s_addr, 0);
3700 if (nk->port[1] != pd->hdr.icmp.icmp_id) {
3701 pd->hdr.icmp.icmp_cksum = pf_cksum_fixup(
3702 pd->hdr.icmp.icmp_cksum, sport,
3704 pd->hdr.icmp.icmp_id = nk->port[1];
3705 pd->sport = &pd->hdr.icmp.icmp_id;
3707 m_copyback(m, off, ICMP_MINLEN, (caddr_t)&pd->hdr.icmp);
3711 case IPPROTO_ICMPV6:
3712 nk->port[0] = nk->port[1];
3713 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3714 pf_change_a6(saddr, &pd->hdr.icmp6.icmp6_cksum,
3715 &nk->addr[pd->sidx], 0);
3717 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3718 pf_change_a6(daddr, &pd->hdr.icmp6.icmp6_cksum,
3719 &nk->addr[pd->didx], 0);
3728 &nk->addr[pd->sidx], AF_INET))
3729 pf_change_a(&saddr->v4.s_addr,
3731 nk->addr[pd->sidx].v4.s_addr, 0);
3734 &nk->addr[pd->didx], AF_INET))
3735 pf_change_a(&daddr->v4.s_addr,
3737 nk->addr[pd->didx].v4.s_addr, 0);
3743 &nk->addr[pd->sidx], AF_INET6))
3744 PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3747 &nk->addr[pd->didx], AF_INET6))
3748 PF_ACPY(daddr, &nk->addr[pd->didx], af);
3760 pf_counter_u64_add(&r->evaluations, 1);
3761 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
3762 r = r->skip[PF_SKIP_IFP].ptr;
3763 else if (r->direction && r->direction != direction)
3764 r = r->skip[PF_SKIP_DIR].ptr;
3765 else if (r->af && r->af != af)
3766 r = r->skip[PF_SKIP_AF].ptr;
3767 else if (r->proto && r->proto != pd->proto)
3768 r = r->skip[PF_SKIP_PROTO].ptr;
3769 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3770 r->src.neg, kif, M_GETFIB(m)))
3771 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3772 /* tcp/udp only. port_op always 0 in other cases */
3773 else if (r->src.port_op && !pf_match_port(r->src.port_op,
3774 r->src.port[0], r->src.port[1], sport))
3775 r = r->skip[PF_SKIP_SRC_PORT].ptr;
3776 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3777 r->dst.neg, NULL, M_GETFIB(m)))
3778 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3779 /* tcp/udp only. port_op always 0 in other cases */
3780 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3781 r->dst.port[0], r->dst.port[1], dport))
3782 r = r->skip[PF_SKIP_DST_PORT].ptr;
3783 /* icmp only. type always 0 in other cases */
3784 else if (r->type && r->type != icmptype + 1)
3785 r = TAILQ_NEXT(r, entries);
3786 /* icmp only. type always 0 in other cases */
3787 else if (r->code && r->code != icmpcode + 1)
3788 r = TAILQ_NEXT(r, entries);
3789 else if (r->tos && !(r->tos == pd->tos))
3790 r = TAILQ_NEXT(r, entries);
3791 else if (r->rule_flag & PFRULE_FRAGMENT)
3792 r = TAILQ_NEXT(r, entries);
3793 else if (pd->proto == IPPROTO_TCP &&
3794 (r->flagset & th->th_flags) != r->flags)
3795 r = TAILQ_NEXT(r, entries);
3796 /* tcp/udp only. uid.op always 0 in other cases */
3797 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3798 pf_socket_lookup(direction, pd, m), 1)) &&
3799 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3801 r = TAILQ_NEXT(r, entries);
3802 /* tcp/udp only. gid.op always 0 in other cases */
3803 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3804 pf_socket_lookup(direction, pd, m), 1)) &&
3805 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3807 r = TAILQ_NEXT(r, entries);
3809 !pf_match_ieee8021q_pcp(r->prio, m))
3810 r = TAILQ_NEXT(r, entries);
3812 r->prob <= arc4random())
3813 r = TAILQ_NEXT(r, entries);
3814 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3815 pd->pf_mtag ? pd->pf_mtag->tag : 0))
3816 r = TAILQ_NEXT(r, entries);
3817 else if (r->os_fingerprint != PF_OSFP_ANY &&
3818 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3819 pf_osfp_fingerprint(pd, m, off, th),
3820 r->os_fingerprint)))
3821 r = TAILQ_NEXT(r, entries);
3825 if (r->rtableid >= 0)
3826 rtableid = r->rtableid;
3827 if (r->anchor == NULL) {
3828 if (r->action == PF_MATCH) {
3829 pf_counter_u64_critical_enter();
3830 pf_counter_u64_add_protected(&r->packets[direction == PF_OUT], 1);
3831 pf_counter_u64_add_protected(&r->bytes[direction == PF_OUT], pd->tot_len);
3832 pf_counter_u64_critical_exit();
3833 pf_rule_to_actions(r, &pd->act);
3835 PFLOG_PACKET(kif, m, af,
3836 direction, PFRES_MATCH, r,
3846 r = TAILQ_NEXT(r, entries);
3848 pf_step_into_anchor(anchor_stack, &asd,
3849 &ruleset, PF_RULESET_FILTER, &r, &a,
3852 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3853 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3860 REASON_SET(&reason, PFRES_MATCH);
3862 /* apply actions for last matching pass/block rule */
3863 pf_rule_to_actions(r, &pd->act);
3867 m_copyback(m, off, hdrlen, pd->hdr.any);
3868 PFLOG_PACKET(kif, m, af, direction, reason, r, a,
3872 if ((r->action == PF_DROP) &&
3873 ((r->rule_flag & PFRULE_RETURNRST) ||
3874 (r->rule_flag & PFRULE_RETURNICMP) ||
3875 (r->rule_flag & PFRULE_RETURN))) {
3876 pf_return(r, nr, pd, sk, off, m, th, kif, bproto_sum,
3877 bip_sum, hdrlen, &reason);
3880 if (r->action == PF_DROP)
3883 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3884 REASON_SET(&reason, PFRES_MEMORY);
3888 M_SETFIB(m, rtableid);
3890 if (!state_icmp && (r->keep_state || nr != NULL ||
3891 (pd->flags & PFDESC_TCP_NORM))) {
3893 action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
3894 sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
3896 if (action != PF_PASS) {
3897 if (action == PF_DROP &&
3898 (r->rule_flag & PFRULE_RETURN))
3899 pf_return(r, nr, pd, sk, off, m, th, kif,
3900 bproto_sum, bip_sum, hdrlen, &reason);
3905 uma_zfree(V_pf_state_key_z, sk);
3907 uma_zfree(V_pf_state_key_z, nk);
3910 /* copy back packet headers if we performed NAT operations */
3912 m_copyback(m, off, hdrlen, pd->hdr.any);
3914 if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
3915 direction == PF_OUT &&
3916 V_pfsync_defer_ptr != NULL && V_pfsync_defer_ptr(*sm, m))
3918 * We want the state created, but we dont
3919 * want to send this in case a partner
3920 * firewall has to know about it to allow
3921 * replies through it.
3929 uma_zfree(V_pf_state_key_z, sk);
3931 uma_zfree(V_pf_state_key_z, nk);
3936 pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a,
3937 struct pf_pdesc *pd, struct pf_ksrc_node *nsn, struct pf_state_key *nk,
3938 struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
3939 u_int16_t dport, int *rewrite, struct pfi_kkif *kif, struct pf_kstate **sm,
3940 int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen)
3942 struct pf_kstate *s = NULL;
3943 struct pf_ksrc_node *sn = NULL;
3944 struct tcphdr *th = &pd->hdr.tcp;
3945 u_int16_t mss = V_tcp_mssdflt;
3948 /* check maximums */
3949 if (r->max_states &&
3950 (counter_u64_fetch(r->states_cur) >= r->max_states)) {
3951 counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1);
3952 REASON_SET(&reason, PFRES_MAXSTATES);
3955 /* src node for filter rule */
3956 if ((r->rule_flag & PFRULE_SRCTRACK ||
3957 r->rpool.opts & PF_POOL_STICKYADDR) &&
3958 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3959 REASON_SET(&reason, PFRES_SRCLIMIT);
3962 /* src node for translation rule */
3963 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3964 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3965 REASON_SET(&reason, PFRES_SRCLIMIT);
3968 s = pf_alloc_state(M_NOWAIT);
3970 REASON_SET(&reason, PFRES_MEMORY);
3974 s->nat_rule.ptr = nr;
3976 STATE_INC_COUNTERS(s);
3978 s->state_flags |= PFSTATE_ALLOWOPTS;
3979 if (r->rule_flag & PFRULE_STATESLOPPY)
3980 s->state_flags |= PFSTATE_SLOPPY;
3981 s->log = r->log & PF_LOG_ALL;
3982 s->sync_state = PFSYNC_S_NONE;
3983 s->qid = pd->act.qid;
3984 s->pqid = pd->act.pqid;
3986 s->log |= nr->log & PF_LOG_ALL;
3987 switch (pd->proto) {
3989 s->src.seqlo = ntohl(th->th_seq);
3990 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3991 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3992 r->keep_state == PF_STATE_MODULATE) {
3993 /* Generate sequence number modulator */
3994 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3997 pf_change_proto_a(m, &th->th_seq, &th->th_sum,
3998 htonl(s->src.seqlo + s->src.seqdiff), 0);
4002 if (th->th_flags & TH_SYN) {
4004 s->src.wscale = pf_get_wscale(m, off,
4005 th->th_off, pd->af);
4007 s->src.max_win = MAX(ntohs(th->th_win), 1);
4008 if (s->src.wscale & PF_WSCALE_MASK) {
4009 /* Remove scale factor from initial window */
4010 int win = s->src.max_win;
4011 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
4012 s->src.max_win = (win - 1) >>
4013 (s->src.wscale & PF_WSCALE_MASK);
4015 if (th->th_flags & TH_FIN)
4019 pf_set_protostate(s, PF_PEER_SRC, TCPS_SYN_SENT);
4020 pf_set_protostate(s, PF_PEER_DST, TCPS_CLOSED);
4021 s->timeout = PFTM_TCP_FIRST_PACKET;
4024 pf_set_protostate(s, PF_PEER_SRC, PFUDPS_SINGLE);
4025 pf_set_protostate(s, PF_PEER_DST, PFUDPS_NO_TRAFFIC);
4026 s->timeout = PFTM_UDP_FIRST_PACKET;
4030 case IPPROTO_ICMPV6:
4032 s->timeout = PFTM_ICMP_FIRST_PACKET;
4035 pf_set_protostate(s, PF_PEER_SRC, PFOTHERS_SINGLE);
4036 pf_set_protostate(s, PF_PEER_DST, PFOTHERS_NO_TRAFFIC);
4037 s->timeout = PFTM_OTHER_FIRST_PACKET;
4041 if (pf_map_addr(pd->af, r, pd->src, &s->rt_addr, NULL, &sn)) {
4042 REASON_SET(&reason, PFRES_MAPFAILED);
4043 pf_src_tree_remove_state(s);
4044 s->timeout = PFTM_UNLINKED;
4045 STATE_DEC_COUNTERS(s);
4049 s->rt_kif = r->rpool.cur->kif;
4052 s->creation = time_uptime;
4053 s->expire = time_uptime;
4058 /* XXX We only modify one side for now. */
4059 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
4060 s->nat_src_node = nsn;
4062 if (pd->proto == IPPROTO_TCP) {
4063 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
4064 off, pd, th, &s->src, &s->dst)) {
4065 REASON_SET(&reason, PFRES_MEMORY);
4066 pf_src_tree_remove_state(s);
4067 s->timeout = PFTM_UNLINKED;
4068 STATE_DEC_COUNTERS(s);
4072 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
4073 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
4074 &s->src, &s->dst, rewrite)) {
4075 /* This really shouldn't happen!!! */
4076 DPFPRINTF(PF_DEBUG_URGENT,
4077 ("pf_normalize_tcp_stateful failed on first "
4079 pf_src_tree_remove_state(s);
4080 s->timeout = PFTM_UNLINKED;
4081 STATE_DEC_COUNTERS(s);
4086 s->direction = pd->dir;
4089 * sk/nk could already been setup by pf_get_translation().
4092 KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
4093 __func__, nr, sk, nk));
4094 sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
4099 KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p",
4100 __func__, nr, sk, nk));
4102 /* Swap sk/nk for PF_OUT. */
4103 if (pf_state_insert(BOUND_IFACE(r, kif), kif,
4104 (pd->dir == PF_IN) ? sk : nk,
4105 (pd->dir == PF_IN) ? nk : sk, s)) {
4106 REASON_SET(&reason, PFRES_STATEINS);
4107 pf_src_tree_remove_state(s);
4108 s->timeout = PFTM_UNLINKED;
4109 STATE_DEC_COUNTERS(s);
4117 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
4118 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
4119 pf_set_protostate(s, PF_PEER_SRC, PF_TCPS_PROXY_SRC);
4120 /* undo NAT changes, if they have taken place */
4122 struct pf_state_key *skt = s->key[PF_SK_WIRE];
4123 if (pd->dir == PF_OUT)
4124 skt = s->key[PF_SK_STACK];
4125 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
4126 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
4128 *pd->sport = skt->port[pd->sidx];
4130 *pd->dport = skt->port[pd->didx];
4132 *pd->proto_sum = bproto_sum;
4134 *pd->ip_sum = bip_sum;
4135 m_copyback(m, off, hdrlen, pd->hdr.any);
4137 s->src.seqhi = htonl(arc4random());
4138 /* Find mss option */
4139 int rtid = M_GETFIB(m);
4140 mss = pf_get_mss(m, off, th->th_off, pd->af);
4141 mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
4142 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
4144 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
4145 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
4146 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0);
4147 REASON_SET(&reason, PFRES_SYNPROXY);
4148 return (PF_SYNPROXY_DROP);
4155 uma_zfree(V_pf_state_key_z, sk);
4157 uma_zfree(V_pf_state_key_z, nk);
4160 struct pf_srchash *sh;
4162 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
4163 PF_HASHROW_LOCK(sh);
4164 if (--sn->states == 0 && sn->expire == 0) {
4165 pf_unlink_src_node(sn);
4166 uma_zfree(V_pf_sources_z, sn);
4168 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
4170 PF_HASHROW_UNLOCK(sh);
4173 if (nsn != sn && nsn != NULL) {
4174 struct pf_srchash *sh;
4176 sh = &V_pf_srchash[pf_hashsrc(&nsn->addr, nsn->af)];
4177 PF_HASHROW_LOCK(sh);
4178 if (--nsn->states == 0 && nsn->expire == 0) {
4179 pf_unlink_src_node(nsn);
4180 uma_zfree(V_pf_sources_z, nsn);
4182 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
4184 PF_HASHROW_UNLOCK(sh);
4191 pf_test_fragment(struct pf_krule **rm, int direction, struct pfi_kkif *kif,
4192 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_krule **am,
4193 struct pf_kruleset **rsm)
4195 struct pf_krule *r, *a = NULL;
4196 struct pf_kruleset *ruleset = NULL;
4197 sa_family_t af = pd->af;
4202 struct pf_kanchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
4206 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
4208 pf_counter_u64_add(&r->evaluations, 1);
4209 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
4210 r = r->skip[PF_SKIP_IFP].ptr;
4211 else if (r->direction && r->direction != direction)
4212 r = r->skip[PF_SKIP_DIR].ptr;
4213 else if (r->af && r->af != af)
4214 r = r->skip[PF_SKIP_AF].ptr;
4215 else if (r->proto && r->proto != pd->proto)
4216 r = r->skip[PF_SKIP_PROTO].ptr;
4217 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
4218 r->src.neg, kif, M_GETFIB(m)))
4219 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
4220 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
4221 r->dst.neg, NULL, M_GETFIB(m)))
4222 r = r->skip[PF_SKIP_DST_ADDR].ptr;
4223 else if (r->tos && !(r->tos == pd->tos))
4224 r = TAILQ_NEXT(r, entries);
4225 else if (r->os_fingerprint != PF_OSFP_ANY)
4226 r = TAILQ_NEXT(r, entries);
4227 else if (pd->proto == IPPROTO_UDP &&
4228 (r->src.port_op || r->dst.port_op))
4229 r = TAILQ_NEXT(r, entries);
4230 else if (pd->proto == IPPROTO_TCP &&
4231 (r->src.port_op || r->dst.port_op || r->flagset))
4232 r = TAILQ_NEXT(r, entries);
4233 else if ((pd->proto == IPPROTO_ICMP ||
4234 pd->proto == IPPROTO_ICMPV6) &&
4235 (r->type || r->code))
4236 r = TAILQ_NEXT(r, entries);
4238 !pf_match_ieee8021q_pcp(r->prio, m))
4239 r = TAILQ_NEXT(r, entries);
4240 else if (r->prob && r->prob <=
4241 (arc4random() % (UINT_MAX - 1) + 1))
4242 r = TAILQ_NEXT(r, entries);
4243 else if (r->match_tag && !pf_match_tag(m, r, &tag,
4244 pd->pf_mtag ? pd->pf_mtag->tag : 0))
4245 r = TAILQ_NEXT(r, entries);
4247 if (r->anchor == NULL) {
4248 if (r->action == PF_MATCH) {
4249 pf_counter_u64_critical_enter();
4250 pf_counter_u64_add_protected(&r->packets[direction == PF_OUT], 1);
4251 pf_counter_u64_add_protected(&r->bytes[direction == PF_OUT], pd->tot_len);
4252 pf_counter_u64_critical_exit();
4253 pf_rule_to_actions(r, &pd->act);
4255 PFLOG_PACKET(kif, m, af,
4256 direction, PFRES_MATCH, r,
4266 r = TAILQ_NEXT(r, entries);
4268 pf_step_into_anchor(anchor_stack, &asd,
4269 &ruleset, PF_RULESET_FILTER, &r, &a,
4272 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
4273 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
4280 REASON_SET(&reason, PFRES_MATCH);
4282 /* apply actions for last matching pass/block rule */
4283 pf_rule_to_actions(r, &pd->act);
4286 PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
4289 if (r->action != PF_PASS)
4292 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
4293 REASON_SET(&reason, PFRES_MEMORY);
4301 pf_tcp_track_full(struct pf_kstate **state, struct pfi_kkif *kif,
4302 struct mbuf *m, int off, struct pf_pdesc *pd, u_short *reason,
4305 struct tcphdr *th = &pd->hdr.tcp;
4306 struct pf_state_peer *src, *dst;
4307 u_int16_t win = ntohs(th->th_win);
4308 u_int32_t ack, end, seq, orig_seq;
4309 u_int8_t sws, dws, psrc, pdst;
4312 if (pd->dir == (*state)->direction) {
4313 src = &(*state)->src;
4314 dst = &(*state)->dst;
4318 src = &(*state)->dst;
4319 dst = &(*state)->src;
4324 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
4325 sws = src->wscale & PF_WSCALE_MASK;
4326 dws = dst->wscale & PF_WSCALE_MASK;
4331 * Sequence tracking algorithm from Guido van Rooij's paper:
4332 * http://www.madison-gurkha.com/publications/tcp_filtering/
4336 orig_seq = seq = ntohl(th->th_seq);
4337 if (src->seqlo == 0) {
4338 /* First packet from this end. Set its state */
4340 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
4341 src->scrub == NULL) {
4342 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
4343 REASON_SET(reason, PFRES_MEMORY);
4348 /* Deferred generation of sequence number modulator */
4349 if (dst->seqdiff && !src->seqdiff) {
4350 /* use random iss for the TCP server */
4351 while ((src->seqdiff = arc4random() - seq) == 0)
4353 ack = ntohl(th->th_ack) - dst->seqdiff;
4354 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
4356 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
4359 ack = ntohl(th->th_ack);
4362 end = seq + pd->p_len;
4363 if (th->th_flags & TH_SYN) {
4365 if (dst->wscale & PF_WSCALE_FLAG) {
4366 src->wscale = pf_get_wscale(m, off, th->th_off,
4368 if (src->wscale & PF_WSCALE_FLAG) {
4369 /* Remove scale factor from initial
4371 sws = src->wscale & PF_WSCALE_MASK;
4372 win = ((u_int32_t)win + (1 << sws) - 1)
4374 dws = dst->wscale & PF_WSCALE_MASK;
4376 /* fixup other window */
4377 dst->max_win <<= dst->wscale &
4379 /* in case of a retrans SYN|ACK */
4384 if (th->th_flags & TH_FIN)
4388 if (src->state < TCPS_SYN_SENT)
4389 pf_set_protostate(*state, psrc, TCPS_SYN_SENT);
4392 * May need to slide the window (seqhi may have been set by
4393 * the crappy stack check or if we picked up the connection
4394 * after establishment)
4396 if (src->seqhi == 1 ||
4397 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
4398 src->seqhi = end + MAX(1, dst->max_win << dws);
4399 if (win > src->max_win)
4403 ack = ntohl(th->th_ack) - dst->seqdiff;
4405 /* Modulate sequence numbers */
4406 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
4408 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
4411 end = seq + pd->p_len;
4412 if (th->th_flags & TH_SYN)
4414 if (th->th_flags & TH_FIN)
4418 if ((th->th_flags & TH_ACK) == 0) {
4419 /* Let it pass through the ack skew check */
4421 } else if ((ack == 0 &&
4422 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
4423 /* broken tcp stacks do not set ack */
4424 (dst->state < TCPS_SYN_SENT)) {
4426 * Many stacks (ours included) will set the ACK number in an
4427 * FIN|ACK if the SYN times out -- no sequence to ACK.
4433 /* Ease sequencing restrictions on no data packets */
4438 ackskew = dst->seqlo - ack;
4441 * Need to demodulate the sequence numbers in any TCP SACK options
4442 * (Selective ACK). We could optionally validate the SACK values
4443 * against the current ACK window, either forwards or backwards, but
4444 * I'm not confident that SACK has been implemented properly
4445 * everywhere. It wouldn't surprise me if several stacks accidentally
4446 * SACK too far backwards of previously ACKed data. There really aren't
4447 * any security implications of bad SACKing unless the target stack
4448 * doesn't validate the option length correctly. Someone trying to
4449 * spoof into a TCP connection won't bother blindly sending SACK
4452 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
4453 if (pf_modulate_sack(m, off, pd, th, dst))
4457 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
4458 if (SEQ_GEQ(src->seqhi, end) &&
4459 /* Last octet inside other's window space */
4460 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
4461 /* Retrans: not more than one window back */
4462 (ackskew >= -MAXACKWINDOW) &&
4463 /* Acking not more than one reassembled fragment backwards */
4464 (ackskew <= (MAXACKWINDOW << sws)) &&
4465 /* Acking not more than one window forward */
4466 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
4467 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
4468 (pd->flags & PFDESC_IP_REAS) == 0)) {
4469 /* Require an exact/+1 sequence match on resets when possible */
4471 if (dst->scrub || src->scrub) {
4472 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4473 *state, src, dst, copyback))
4477 /* update max window */
4478 if (src->max_win < win)
4480 /* synchronize sequencing */
4481 if (SEQ_GT(end, src->seqlo))
4483 /* slide the window of what the other end can send */
4484 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4485 dst->seqhi = ack + MAX((win << sws), 1);
4488 if (th->th_flags & TH_SYN)
4489 if (src->state < TCPS_SYN_SENT)
4490 pf_set_protostate(*state, psrc, TCPS_SYN_SENT);
4491 if (th->th_flags & TH_FIN)
4492 if (src->state < TCPS_CLOSING)
4493 pf_set_protostate(*state, psrc, TCPS_CLOSING);
4494 if (th->th_flags & TH_ACK) {
4495 if (dst->state == TCPS_SYN_SENT) {
4496 pf_set_protostate(*state, pdst,
4498 if (src->state == TCPS_ESTABLISHED &&
4499 (*state)->src_node != NULL &&
4500 pf_src_connlimit(state)) {
4501 REASON_SET(reason, PFRES_SRCLIMIT);
4504 } else if (dst->state == TCPS_CLOSING)
4505 pf_set_protostate(*state, pdst,
4508 if (th->th_flags & TH_RST)
4509 pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT);
4511 /* update expire time */
4512 (*state)->expire = time_uptime;
4513 if (src->state >= TCPS_FIN_WAIT_2 &&
4514 dst->state >= TCPS_FIN_WAIT_2)
4515 (*state)->timeout = PFTM_TCP_CLOSED;
4516 else if (src->state >= TCPS_CLOSING &&
4517 dst->state >= TCPS_CLOSING)
4518 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4519 else if (src->state < TCPS_ESTABLISHED ||
4520 dst->state < TCPS_ESTABLISHED)
4521 (*state)->timeout = PFTM_TCP_OPENING;
4522 else if (src->state >= TCPS_CLOSING ||
4523 dst->state >= TCPS_CLOSING)
4524 (*state)->timeout = PFTM_TCP_CLOSING;
4526 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4528 /* Fall through to PASS packet */
4530 } else if ((dst->state < TCPS_SYN_SENT ||
4531 dst->state >= TCPS_FIN_WAIT_2 ||
4532 src->state >= TCPS_FIN_WAIT_2) &&
4533 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
4534 /* Within a window forward of the originating packet */
4535 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
4536 /* Within a window backward of the originating packet */
4539 * This currently handles three situations:
4540 * 1) Stupid stacks will shotgun SYNs before their peer
4542 * 2) When PF catches an already established stream (the
4543 * firewall rebooted, the state table was flushed, routes
4545 * 3) Packets get funky immediately after the connection
4546 * closes (this should catch Solaris spurious ACK|FINs
4547 * that web servers like to spew after a close)
4549 * This must be a little more careful than the above code
4550 * since packet floods will also be caught here. We don't
4551 * update the TTL here to mitigate the damage of a packet
4552 * flood and so the same code can handle awkward establishment
4553 * and a loosened connection close.
4554 * In the establishment case, a correct peer response will
4555 * validate the connection, go through the normal state code
4556 * and keep updating the state TTL.
4559 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4560 printf("pf: loose state match: ");
4561 pf_print_state(*state);
4562 pf_print_flags(th->th_flags);
4563 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4564 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
4565 pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
4566 (unsigned long long)(*state)->packets[1],
4567 pd->dir == PF_IN ? "in" : "out",
4568 pd->dir == (*state)->direction ? "fwd" : "rev");
4571 if (dst->scrub || src->scrub) {
4572 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4573 *state, src, dst, copyback))
4577 /* update max window */
4578 if (src->max_win < win)
4580 /* synchronize sequencing */
4581 if (SEQ_GT(end, src->seqlo))
4583 /* slide the window of what the other end can send */
4584 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4585 dst->seqhi = ack + MAX((win << sws), 1);
4588 * Cannot set dst->seqhi here since this could be a shotgunned
4589 * SYN and not an already established connection.
4592 if (th->th_flags & TH_FIN)
4593 if (src->state < TCPS_CLOSING)
4594 pf_set_protostate(*state, psrc, TCPS_CLOSING);
4595 if (th->th_flags & TH_RST)
4596 pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT);
4598 /* Fall through to PASS packet */
4601 if ((*state)->dst.state == TCPS_SYN_SENT &&
4602 (*state)->src.state == TCPS_SYN_SENT) {
4603 /* Send RST for state mismatches during handshake */
4604 if (!(th->th_flags & TH_RST))
4605 pf_send_tcp((*state)->rule.ptr, pd->af,
4606 pd->dst, pd->src, th->th_dport,
4607 th->th_sport, ntohl(th->th_ack), 0,
4609 (*state)->rule.ptr->return_ttl, 1, 0);
4613 } else if (V_pf_status.debug >= PF_DEBUG_MISC) {
4614 printf("pf: BAD state: ");
4615 pf_print_state(*state);
4616 pf_print_flags(th->th_flags);
4617 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4618 "pkts=%llu:%llu dir=%s,%s\n",
4619 seq, orig_seq, ack, pd->p_len, ackskew,
4620 (unsigned long long)(*state)->packets[0],
4621 (unsigned long long)(*state)->packets[1],
4622 pd->dir == PF_IN ? "in" : "out",
4623 pd->dir == (*state)->direction ? "fwd" : "rev");
4624 printf("pf: State failure on: %c %c %c %c | %c %c\n",
4625 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4626 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4628 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4629 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4630 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4631 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4633 REASON_SET(reason, PFRES_BADSTATE);
4641 pf_tcp_track_sloppy(struct pf_kstate **state, struct pf_pdesc *pd, u_short *reason)
4643 struct tcphdr *th = &pd->hdr.tcp;
4644 struct pf_state_peer *src, *dst;
4645 u_int8_t psrc, pdst;
4647 if (pd->dir == (*state)->direction) {
4648 src = &(*state)->src;
4649 dst = &(*state)->dst;
4653 src = &(*state)->dst;
4654 dst = &(*state)->src;
4659 if (th->th_flags & TH_SYN)
4660 if (src->state < TCPS_SYN_SENT)
4661 pf_set_protostate(*state, psrc, TCPS_SYN_SENT);
4662 if (th->th_flags & TH_FIN)
4663 if (src->state < TCPS_CLOSING)
4664 pf_set_protostate(*state, psrc, TCPS_CLOSING);
4665 if (th->th_flags & TH_ACK) {
4666 if (dst->state == TCPS_SYN_SENT) {
4667 pf_set_protostate(*state, pdst, TCPS_ESTABLISHED);
4668 if (src->state == TCPS_ESTABLISHED &&
4669 (*state)->src_node != NULL &&
4670 pf_src_connlimit(state)) {
4671 REASON_SET(reason, PFRES_SRCLIMIT);
4674 } else if (dst->state == TCPS_CLOSING) {
4675 pf_set_protostate(*state, pdst, TCPS_FIN_WAIT_2);
4676 } else if (src->state == TCPS_SYN_SENT &&
4677 dst->state < TCPS_SYN_SENT) {
4679 * Handle a special sloppy case where we only see one
4680 * half of the connection. If there is a ACK after
4681 * the initial SYN without ever seeing a packet from
4682 * the destination, set the connection to established.
4684 pf_set_protostate(*state, PF_PEER_BOTH,
4686 dst->state = src->state = TCPS_ESTABLISHED;
4687 if ((*state)->src_node != NULL &&
4688 pf_src_connlimit(state)) {
4689 REASON_SET(reason, PFRES_SRCLIMIT);
4692 } else if (src->state == TCPS_CLOSING &&
4693 dst->state == TCPS_ESTABLISHED &&
4696 * Handle the closing of half connections where we
4697 * don't see the full bidirectional FIN/ACK+ACK
4700 pf_set_protostate(*state, pdst, TCPS_CLOSING);
4703 if (th->th_flags & TH_RST)
4704 pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT);
4706 /* update expire time */
4707 (*state)->expire = time_uptime;
4708 if (src->state >= TCPS_FIN_WAIT_2 &&
4709 dst->state >= TCPS_FIN_WAIT_2)
4710 (*state)->timeout = PFTM_TCP_CLOSED;
4711 else if (src->state >= TCPS_CLOSING &&
4712 dst->state >= TCPS_CLOSING)
4713 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4714 else if (src->state < TCPS_ESTABLISHED ||
4715 dst->state < TCPS_ESTABLISHED)
4716 (*state)->timeout = PFTM_TCP_OPENING;
4717 else if (src->state >= TCPS_CLOSING ||
4718 dst->state >= TCPS_CLOSING)
4719 (*state)->timeout = PFTM_TCP_CLOSING;
4721 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4727 pf_synproxy(struct pf_pdesc *pd, struct pf_kstate **state, u_short *reason)
4729 struct pf_state_key *sk = (*state)->key[pd->didx];
4730 struct tcphdr *th = &pd->hdr.tcp;
4732 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4733 if (pd->dir != (*state)->direction) {
4734 REASON_SET(reason, PFRES_SYNPROXY);
4735 return (PF_SYNPROXY_DROP);
4737 if (th->th_flags & TH_SYN) {
4738 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4739 REASON_SET(reason, PFRES_SYNPROXY);
4742 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
4743 pd->src, th->th_dport, th->th_sport,
4744 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4745 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0);
4746 REASON_SET(reason, PFRES_SYNPROXY);
4747 return (PF_SYNPROXY_DROP);
4748 } else if ((th->th_flags & (TH_ACK|TH_RST|TH_FIN)) != TH_ACK ||
4749 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4750 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4751 REASON_SET(reason, PFRES_SYNPROXY);
4753 } else if ((*state)->src_node != NULL &&
4754 pf_src_connlimit(state)) {
4755 REASON_SET(reason, PFRES_SRCLIMIT);
4758 pf_set_protostate(*state, PF_PEER_SRC,
4761 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4762 if (pd->dir == (*state)->direction) {
4763 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4764 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4765 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4766 REASON_SET(reason, PFRES_SYNPROXY);
4769 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4770 if ((*state)->dst.seqhi == 1)
4771 (*state)->dst.seqhi = htonl(arc4random());
4772 pf_send_tcp((*state)->rule.ptr, pd->af,
4773 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4774 sk->port[pd->sidx], sk->port[pd->didx],
4775 (*state)->dst.seqhi, 0, TH_SYN, 0,
4776 (*state)->src.mss, 0, 0, (*state)->tag);
4777 REASON_SET(reason, PFRES_SYNPROXY);
4778 return (PF_SYNPROXY_DROP);
4779 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4781 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4782 REASON_SET(reason, PFRES_SYNPROXY);
4785 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4786 (*state)->dst.seqlo = ntohl(th->th_seq);
4787 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
4788 pd->src, th->th_dport, th->th_sport,
4789 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4790 TH_ACK, (*state)->src.max_win, 0, 0, 0,
4792 pf_send_tcp((*state)->rule.ptr, pd->af,
4793 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4794 sk->port[pd->sidx], sk->port[pd->didx],
4795 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4796 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0);
4797 (*state)->src.seqdiff = (*state)->dst.seqhi -
4798 (*state)->src.seqlo;
4799 (*state)->dst.seqdiff = (*state)->src.seqhi -
4800 (*state)->dst.seqlo;
4801 (*state)->src.seqhi = (*state)->src.seqlo +
4802 (*state)->dst.max_win;
4803 (*state)->dst.seqhi = (*state)->dst.seqlo +
4804 (*state)->src.max_win;
4805 (*state)->src.wscale = (*state)->dst.wscale = 0;
4806 pf_set_protostate(*state, PF_PEER_BOTH,
4808 REASON_SET(reason, PFRES_SYNPROXY);
4809 return (PF_SYNPROXY_DROP);
4817 pf_test_state_tcp(struct pf_kstate **state, int direction, struct pfi_kkif *kif,
4818 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4821 struct pf_state_key_cmp key;
4822 struct tcphdr *th = &pd->hdr.tcp;
4825 struct pf_state_peer *src, *dst;
4826 struct pf_state_key *sk;
4828 bzero(&key, sizeof(key));
4830 key.proto = IPPROTO_TCP;
4831 if (direction == PF_IN) { /* wire side, straight */
4832 PF_ACPY(&key.addr[0], pd->src, key.af);
4833 PF_ACPY(&key.addr[1], pd->dst, key.af);
4834 key.port[0] = th->th_sport;
4835 key.port[1] = th->th_dport;
4836 } else { /* stack side, reverse */
4837 PF_ACPY(&key.addr[1], pd->src, key.af);
4838 PF_ACPY(&key.addr[0], pd->dst, key.af);
4839 key.port[1] = th->th_sport;
4840 key.port[0] = th->th_dport;
4843 STATE_LOOKUP(kif, &key, direction, *state, pd);
4845 if (direction == (*state)->direction) {
4846 src = &(*state)->src;
4847 dst = &(*state)->dst;
4849 src = &(*state)->dst;
4850 dst = &(*state)->src;
4853 sk = (*state)->key[pd->didx];
4855 if ((action = pf_synproxy(pd, state, reason)) != PF_PASS)
4858 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4859 dst->state >= TCPS_FIN_WAIT_2 &&
4860 src->state >= TCPS_FIN_WAIT_2) {
4861 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4862 printf("pf: state reuse ");
4863 pf_print_state(*state);
4864 pf_print_flags(th->th_flags);
4867 /* XXX make sure it's the same direction ?? */
4868 pf_set_protostate(*state, PF_PEER_BOTH, TCPS_CLOSED);
4869 pf_unlink_state(*state, PF_ENTER_LOCKED);
4874 if ((*state)->state_flags & PFSTATE_SLOPPY) {
4875 if (pf_tcp_track_sloppy(state, pd, reason) == PF_DROP)
4878 if (pf_tcp_track_full(state, kif, m, off, pd, reason,
4879 ©back) == PF_DROP)
4883 /* translate source/destination address, if necessary */
4884 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4885 struct pf_state_key *nk = (*state)->key[pd->didx];
4887 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4888 nk->port[pd->sidx] != th->th_sport)
4889 pf_change_ap(m, pd->src, &th->th_sport,
4890 pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx],
4891 nk->port[pd->sidx], 0, pd->af);
4893 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4894 nk->port[pd->didx] != th->th_dport)
4895 pf_change_ap(m, pd->dst, &th->th_dport,
4896 pd->ip_sum, &th->th_sum, &nk->addr[pd->didx],
4897 nk->port[pd->didx], 0, pd->af);
4901 /* Copyback sequence modulation or stateful scrub changes if needed */
4903 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4909 pf_test_state_udp(struct pf_kstate **state, int direction, struct pfi_kkif *kif,
4910 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4912 struct pf_state_peer *src, *dst;
4913 struct pf_state_key_cmp key;
4914 struct udphdr *uh = &pd->hdr.udp;
4917 bzero(&key, sizeof(key));
4919 key.proto = IPPROTO_UDP;
4920 if (direction == PF_IN) { /* wire side, straight */
4921 PF_ACPY(&key.addr[0], pd->src, key.af);
4922 PF_ACPY(&key.addr[1], pd->dst, key.af);
4923 key.port[0] = uh->uh_sport;
4924 key.port[1] = uh->uh_dport;
4925 } else { /* stack side, reverse */
4926 PF_ACPY(&key.addr[1], pd->src, key.af);
4927 PF_ACPY(&key.addr[0], pd->dst, key.af);
4928 key.port[1] = uh->uh_sport;
4929 key.port[0] = uh->uh_dport;
4932 STATE_LOOKUP(kif, &key, direction, *state, pd);
4934 if (direction == (*state)->direction) {
4935 src = &(*state)->src;
4936 dst = &(*state)->dst;
4940 src = &(*state)->dst;
4941 dst = &(*state)->src;
4947 if (src->state < PFUDPS_SINGLE)
4948 pf_set_protostate(*state, psrc, PFUDPS_SINGLE);
4949 if (dst->state == PFUDPS_SINGLE)
4950 pf_set_protostate(*state, pdst, PFUDPS_MULTIPLE);
4952 /* update expire time */
4953 (*state)->expire = time_uptime;
4954 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4955 (*state)->timeout = PFTM_UDP_MULTIPLE;
4957 (*state)->timeout = PFTM_UDP_SINGLE;
4959 /* translate source/destination address, if necessary */
4960 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4961 struct pf_state_key *nk = (*state)->key[pd->didx];
4963 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4964 nk->port[pd->sidx] != uh->uh_sport)
4965 pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
4966 &uh->uh_sum, &nk->addr[pd->sidx],
4967 nk->port[pd->sidx], 1, pd->af);
4969 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4970 nk->port[pd->didx] != uh->uh_dport)
4971 pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
4972 &uh->uh_sum, &nk->addr[pd->didx],
4973 nk->port[pd->didx], 1, pd->af);
4974 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4981 pf_test_state_icmp(struct pf_kstate **state, int direction, struct pfi_kkif *kif,
4982 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4984 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4985 u_int16_t icmpid = 0, *icmpsum;
4986 u_int8_t icmptype, icmpcode;
4988 struct pf_state_key_cmp key;
4990 bzero(&key, sizeof(key));
4991 switch (pd->proto) {
4994 icmptype = pd->hdr.icmp.icmp_type;
4995 icmpcode = pd->hdr.icmp.icmp_code;
4996 icmpid = pd->hdr.icmp.icmp_id;
4997 icmpsum = &pd->hdr.icmp.icmp_cksum;
4999 if (icmptype == ICMP_UNREACH ||
5000 icmptype == ICMP_SOURCEQUENCH ||
5001 icmptype == ICMP_REDIRECT ||
5002 icmptype == ICMP_TIMXCEED ||
5003 icmptype == ICMP_PARAMPROB)
5008 case IPPROTO_ICMPV6:
5009 icmptype = pd->hdr.icmp6.icmp6_type;
5010 icmpcode = pd->hdr.icmp6.icmp6_code;
5011 icmpid = pd->hdr.icmp6.icmp6_id;
5012 icmpsum = &pd->hdr.icmp6.icmp6_cksum;
5014 if (icmptype == ICMP6_DST_UNREACH ||
5015 icmptype == ICMP6_PACKET_TOO_BIG ||
5016 icmptype == ICMP6_TIME_EXCEEDED ||
5017 icmptype == ICMP6_PARAM_PROB)
5025 * ICMP query/reply message not related to a TCP/UDP packet.
5026 * Search for an ICMP state.
5029 key.proto = pd->proto;
5030 key.port[0] = key.port[1] = icmpid;
5031 if (direction == PF_IN) { /* wire side, straight */
5032 PF_ACPY(&key.addr[0], pd->src, key.af);
5033 PF_ACPY(&key.addr[1], pd->dst, key.af);
5034 } else { /* stack side, reverse */
5035 PF_ACPY(&key.addr[1], pd->src, key.af);
5036 PF_ACPY(&key.addr[0], pd->dst, key.af);
5039 STATE_LOOKUP(kif, &key, direction, *state, pd);
5041 (*state)->expire = time_uptime;
5042 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
5044 /* translate source/destination address, if necessary */
5045 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5046 struct pf_state_key *nk = (*state)->key[pd->didx];
5051 if (PF_ANEQ(pd->src,
5052 &nk->addr[pd->sidx], AF_INET))
5053 pf_change_a(&saddr->v4.s_addr,
5055 nk->addr[pd->sidx].v4.s_addr, 0);
5057 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
5059 pf_change_a(&daddr->v4.s_addr,
5061 nk->addr[pd->didx].v4.s_addr, 0);
5064 pd->hdr.icmp.icmp_id) {
5065 pd->hdr.icmp.icmp_cksum =
5067 pd->hdr.icmp.icmp_cksum, icmpid,
5068 nk->port[pd->sidx], 0);
5069 pd->hdr.icmp.icmp_id =
5073 m_copyback(m, off, ICMP_MINLEN,
5074 (caddr_t )&pd->hdr.icmp);
5079 if (PF_ANEQ(pd->src,
5080 &nk->addr[pd->sidx], AF_INET6))
5082 &pd->hdr.icmp6.icmp6_cksum,
5083 &nk->addr[pd->sidx], 0);
5085 if (PF_ANEQ(pd->dst,
5086 &nk->addr[pd->didx], AF_INET6))
5088 &pd->hdr.icmp6.icmp6_cksum,
5089 &nk->addr[pd->didx], 0);
5091 m_copyback(m, off, sizeof(struct icmp6_hdr),
5092 (caddr_t )&pd->hdr.icmp6);
5101 * ICMP error message in response to a TCP/UDP packet.
5102 * Extract the inner TCP/UDP header and search for that state.
5105 struct pf_pdesc pd2;
5106 bzero(&pd2, sizeof pd2);
5111 struct ip6_hdr h2_6;
5118 /* Payload packet is from the opposite direction. */
5119 pd2.sidx = (direction == PF_IN) ? 1 : 0;
5120 pd2.didx = (direction == PF_IN) ? 0 : 1;
5124 /* offset of h2 in mbuf chain */
5125 ipoff2 = off + ICMP_MINLEN;
5127 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
5128 NULL, reason, pd2.af)) {
5129 DPFPRINTF(PF_DEBUG_MISC,
5130 ("pf: ICMP error message too short "
5135 * ICMP error messages don't refer to non-first
5138 if (h2.ip_off & htons(IP_OFFMASK)) {
5139 REASON_SET(reason, PFRES_FRAG);
5143 /* offset of protocol header that follows h2 */
5144 off2 = ipoff2 + (h2.ip_hl << 2);
5146 pd2.proto = h2.ip_p;
5147 pd2.src = (struct pf_addr *)&h2.ip_src;
5148 pd2.dst = (struct pf_addr *)&h2.ip_dst;
5149 pd2.ip_sum = &h2.ip_sum;
5154 ipoff2 = off + sizeof(struct icmp6_hdr);
5156 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
5157 NULL, reason, pd2.af)) {
5158 DPFPRINTF(PF_DEBUG_MISC,
5159 ("pf: ICMP error message too short "
5163 pd2.proto = h2_6.ip6_nxt;
5164 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
5165 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
5167 off2 = ipoff2 + sizeof(h2_6);
5169 switch (pd2.proto) {
5170 case IPPROTO_FRAGMENT:
5172 * ICMPv6 error messages for
5173 * non-first fragments
5175 REASON_SET(reason, PFRES_FRAG);
5178 case IPPROTO_HOPOPTS:
5179 case IPPROTO_ROUTING:
5180 case IPPROTO_DSTOPTS: {
5181 /* get next header and header length */
5182 struct ip6_ext opt6;
5184 if (!pf_pull_hdr(m, off2, &opt6,
5185 sizeof(opt6), NULL, reason,
5187 DPFPRINTF(PF_DEBUG_MISC,
5188 ("pf: ICMPv6 short opt\n"));
5191 if (pd2.proto == IPPROTO_AH)
5192 off2 += (opt6.ip6e_len + 2) * 4;
5194 off2 += (opt6.ip6e_len + 1) * 8;
5195 pd2.proto = opt6.ip6e_nxt;
5196 /* goto the next header */
5203 } while (!terminal);
5208 if (PF_ANEQ(pd->dst, pd2.src, pd->af)) {
5209 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5210 printf("pf: BAD ICMP %d:%d outer dst: ",
5211 icmptype, icmpcode);
5212 pf_print_host(pd->src, 0, pd->af);
5214 pf_print_host(pd->dst, 0, pd->af);
5215 printf(" inner src: ");
5216 pf_print_host(pd2.src, 0, pd2.af);
5218 pf_print_host(pd2.dst, 0, pd2.af);
5221 REASON_SET(reason, PFRES_BADSTATE);
5225 switch (pd2.proto) {
5229 struct pf_state_peer *src, *dst;
5234 * Only the first 8 bytes of the TCP header can be
5235 * expected. Don't access any TCP header fields after
5236 * th_seq, an ackskew test is not possible.
5238 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
5240 DPFPRINTF(PF_DEBUG_MISC,
5241 ("pf: ICMP error message too short "
5247 key.proto = IPPROTO_TCP;
5248 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5249 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5250 key.port[pd2.sidx] = th.th_sport;
5251 key.port[pd2.didx] = th.th_dport;
5253 STATE_LOOKUP(kif, &key, direction, *state, pd);
5255 if (direction == (*state)->direction) {
5256 src = &(*state)->dst;
5257 dst = &(*state)->src;
5259 src = &(*state)->src;
5260 dst = &(*state)->dst;
5263 if (src->wscale && dst->wscale)
5264 dws = dst->wscale & PF_WSCALE_MASK;
5268 /* Demodulate sequence number */
5269 seq = ntohl(th.th_seq) - src->seqdiff;
5271 pf_change_a(&th.th_seq, icmpsum,
5276 if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
5277 (!SEQ_GEQ(src->seqhi, seq) ||
5278 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
5279 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5280 printf("pf: BAD ICMP %d:%d ",
5281 icmptype, icmpcode);
5282 pf_print_host(pd->src, 0, pd->af);
5284 pf_print_host(pd->dst, 0, pd->af);
5286 pf_print_state(*state);
5287 printf(" seq=%u\n", seq);
5289 REASON_SET(reason, PFRES_BADSTATE);
5292 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5293 printf("pf: OK ICMP %d:%d ",
5294 icmptype, icmpcode);
5295 pf_print_host(pd->src, 0, pd->af);
5297 pf_print_host(pd->dst, 0, pd->af);
5299 pf_print_state(*state);
5300 printf(" seq=%u\n", seq);
5304 /* translate source/destination address, if necessary */
5305 if ((*state)->key[PF_SK_WIRE] !=
5306 (*state)->key[PF_SK_STACK]) {
5307 struct pf_state_key *nk =
5308 (*state)->key[pd->didx];
5310 if (PF_ANEQ(pd2.src,
5311 &nk->addr[pd2.sidx], pd2.af) ||
5312 nk->port[pd2.sidx] != th.th_sport)
5313 pf_change_icmp(pd2.src, &th.th_sport,
5314 daddr, &nk->addr[pd2.sidx],
5315 nk->port[pd2.sidx], NULL,
5316 pd2.ip_sum, icmpsum,
5317 pd->ip_sum, 0, pd2.af);
5319 if (PF_ANEQ(pd2.dst,
5320 &nk->addr[pd2.didx], pd2.af) ||
5321 nk->port[pd2.didx] != th.th_dport)
5322 pf_change_icmp(pd2.dst, &th.th_dport,
5323 saddr, &nk->addr[pd2.didx],
5324 nk->port[pd2.didx], NULL,
5325 pd2.ip_sum, icmpsum,
5326 pd->ip_sum, 0, pd2.af);
5334 m_copyback(m, off, ICMP_MINLEN,
5335 (caddr_t )&pd->hdr.icmp);
5336 m_copyback(m, ipoff2, sizeof(h2),
5343 sizeof(struct icmp6_hdr),
5344 (caddr_t )&pd->hdr.icmp6);
5345 m_copyback(m, ipoff2, sizeof(h2_6),
5350 m_copyback(m, off2, 8, (caddr_t)&th);
5359 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
5360 NULL, reason, pd2.af)) {
5361 DPFPRINTF(PF_DEBUG_MISC,
5362 ("pf: ICMP error message too short "
5368 key.proto = IPPROTO_UDP;
5369 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5370 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5371 key.port[pd2.sidx] = uh.uh_sport;
5372 key.port[pd2.didx] = uh.uh_dport;
5374 STATE_LOOKUP(kif, &key, direction, *state, pd);
5376 /* translate source/destination address, if necessary */
5377 if ((*state)->key[PF_SK_WIRE] !=
5378 (*state)->key[PF_SK_STACK]) {
5379 struct pf_state_key *nk =
5380 (*state)->key[pd->didx];
5382 if (PF_ANEQ(pd2.src,
5383 &nk->addr[pd2.sidx], pd2.af) ||
5384 nk->port[pd2.sidx] != uh.uh_sport)
5385 pf_change_icmp(pd2.src, &uh.uh_sport,
5386 daddr, &nk->addr[pd2.sidx],
5387 nk->port[pd2.sidx], &uh.uh_sum,
5388 pd2.ip_sum, icmpsum,
5389 pd->ip_sum, 1, pd2.af);
5391 if (PF_ANEQ(pd2.dst,
5392 &nk->addr[pd2.didx], pd2.af) ||
5393 nk->port[pd2.didx] != uh.uh_dport)
5394 pf_change_icmp(pd2.dst, &uh.uh_dport,
5395 saddr, &nk->addr[pd2.didx],
5396 nk->port[pd2.didx], &uh.uh_sum,
5397 pd2.ip_sum, icmpsum,
5398 pd->ip_sum, 1, pd2.af);
5403 m_copyback(m, off, ICMP_MINLEN,
5404 (caddr_t )&pd->hdr.icmp);
5405 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5411 sizeof(struct icmp6_hdr),
5412 (caddr_t )&pd->hdr.icmp6);
5413 m_copyback(m, ipoff2, sizeof(h2_6),
5418 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
5424 case IPPROTO_ICMP: {
5427 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
5428 NULL, reason, pd2.af)) {
5429 DPFPRINTF(PF_DEBUG_MISC,
5430 ("pf: ICMP error message too short i"
5436 key.proto = IPPROTO_ICMP;
5437 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5438 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5439 key.port[0] = key.port[1] = iih.icmp_id;
5441 STATE_LOOKUP(kif, &key, direction, *state, pd);
5443 /* translate source/destination address, if necessary */
5444 if ((*state)->key[PF_SK_WIRE] !=
5445 (*state)->key[PF_SK_STACK]) {
5446 struct pf_state_key *nk =
5447 (*state)->key[pd->didx];
5449 if (PF_ANEQ(pd2.src,
5450 &nk->addr[pd2.sidx], pd2.af) ||
5451 nk->port[pd2.sidx] != iih.icmp_id)
5452 pf_change_icmp(pd2.src, &iih.icmp_id,
5453 daddr, &nk->addr[pd2.sidx],
5454 nk->port[pd2.sidx], NULL,
5455 pd2.ip_sum, icmpsum,
5456 pd->ip_sum, 0, AF_INET);
5458 if (PF_ANEQ(pd2.dst,
5459 &nk->addr[pd2.didx], pd2.af) ||
5460 nk->port[pd2.didx] != iih.icmp_id)
5461 pf_change_icmp(pd2.dst, &iih.icmp_id,
5462 saddr, &nk->addr[pd2.didx],
5463 nk->port[pd2.didx], NULL,
5464 pd2.ip_sum, icmpsum,
5465 pd->ip_sum, 0, AF_INET);
5467 m_copyback(m, off, ICMP_MINLEN, (caddr_t)&pd->hdr.icmp);
5468 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5469 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
5476 case IPPROTO_ICMPV6: {
5477 struct icmp6_hdr iih;
5479 if (!pf_pull_hdr(m, off2, &iih,
5480 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
5481 DPFPRINTF(PF_DEBUG_MISC,
5482 ("pf: ICMP error message too short "
5488 key.proto = IPPROTO_ICMPV6;
5489 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5490 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5491 key.port[0] = key.port[1] = iih.icmp6_id;
5493 STATE_LOOKUP(kif, &key, direction, *state, pd);
5495 /* translate source/destination address, if necessary */
5496 if ((*state)->key[PF_SK_WIRE] !=
5497 (*state)->key[PF_SK_STACK]) {
5498 struct pf_state_key *nk =
5499 (*state)->key[pd->didx];
5501 if (PF_ANEQ(pd2.src,
5502 &nk->addr[pd2.sidx], pd2.af) ||
5503 nk->port[pd2.sidx] != iih.icmp6_id)
5504 pf_change_icmp(pd2.src, &iih.icmp6_id,
5505 daddr, &nk->addr[pd2.sidx],
5506 nk->port[pd2.sidx], NULL,
5507 pd2.ip_sum, icmpsum,
5508 pd->ip_sum, 0, AF_INET6);
5510 if (PF_ANEQ(pd2.dst,
5511 &nk->addr[pd2.didx], pd2.af) ||
5512 nk->port[pd2.didx] != iih.icmp6_id)
5513 pf_change_icmp(pd2.dst, &iih.icmp6_id,
5514 saddr, &nk->addr[pd2.didx],
5515 nk->port[pd2.didx], NULL,
5516 pd2.ip_sum, icmpsum,
5517 pd->ip_sum, 0, AF_INET6);
5519 m_copyback(m, off, sizeof(struct icmp6_hdr),
5520 (caddr_t)&pd->hdr.icmp6);
5521 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
5522 m_copyback(m, off2, sizeof(struct icmp6_hdr),
5531 key.proto = pd2.proto;
5532 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5533 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5534 key.port[0] = key.port[1] = 0;
5536 STATE_LOOKUP(kif, &key, direction, *state, pd);
5538 /* translate source/destination address, if necessary */
5539 if ((*state)->key[PF_SK_WIRE] !=
5540 (*state)->key[PF_SK_STACK]) {
5541 struct pf_state_key *nk =
5542 (*state)->key[pd->didx];
5544 if (PF_ANEQ(pd2.src,
5545 &nk->addr[pd2.sidx], pd2.af))
5546 pf_change_icmp(pd2.src, NULL, daddr,
5547 &nk->addr[pd2.sidx], 0, NULL,
5548 pd2.ip_sum, icmpsum,
5549 pd->ip_sum, 0, pd2.af);
5551 if (PF_ANEQ(pd2.dst,
5552 &nk->addr[pd2.didx], pd2.af))
5553 pf_change_icmp(pd2.dst, NULL, saddr,
5554 &nk->addr[pd2.didx], 0, NULL,
5555 pd2.ip_sum, icmpsum,
5556 pd->ip_sum, 0, pd2.af);
5561 m_copyback(m, off, ICMP_MINLEN,
5562 (caddr_t)&pd->hdr.icmp);
5563 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5569 sizeof(struct icmp6_hdr),
5570 (caddr_t )&pd->hdr.icmp6);
5571 m_copyback(m, ipoff2, sizeof(h2_6),
5585 pf_test_state_other(struct pf_kstate **state, int direction, struct pfi_kkif *kif,
5586 struct mbuf *m, struct pf_pdesc *pd)
5588 struct pf_state_peer *src, *dst;
5589 struct pf_state_key_cmp key;
5592 bzero(&key, sizeof(key));
5594 key.proto = pd->proto;
5595 if (direction == PF_IN) {
5596 PF_ACPY(&key.addr[0], pd->src, key.af);
5597 PF_ACPY(&key.addr[1], pd->dst, key.af);
5598 key.port[0] = key.port[1] = 0;
5600 PF_ACPY(&key.addr[1], pd->src, key.af);
5601 PF_ACPY(&key.addr[0], pd->dst, key.af);
5602 key.port[1] = key.port[0] = 0;
5605 STATE_LOOKUP(kif, &key, direction, *state, pd);
5607 if (direction == (*state)->direction) {
5608 src = &(*state)->src;
5609 dst = &(*state)->dst;
5613 src = &(*state)->dst;
5614 dst = &(*state)->src;
5620 if (src->state < PFOTHERS_SINGLE)
5621 pf_set_protostate(*state, psrc, PFOTHERS_SINGLE);
5622 if (dst->state == PFOTHERS_SINGLE)
5623 pf_set_protostate(*state, pdst, PFOTHERS_MULTIPLE);
5625 /* update expire time */
5626 (*state)->expire = time_uptime;
5627 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5628 (*state)->timeout = PFTM_OTHER_MULTIPLE;
5630 (*state)->timeout = PFTM_OTHER_SINGLE;
5632 /* translate source/destination address, if necessary */
5633 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5634 struct pf_state_key *nk = (*state)->key[pd->didx];
5636 KASSERT(nk, ("%s: nk is null", __func__));
5637 KASSERT(pd, ("%s: pd is null", __func__));
5638 KASSERT(pd->src, ("%s: pd->src is null", __func__));
5639 KASSERT(pd->dst, ("%s: pd->dst is null", __func__));
5643 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5644 pf_change_a(&pd->src->v4.s_addr,
5646 nk->addr[pd->sidx].v4.s_addr,
5649 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5650 pf_change_a(&pd->dst->v4.s_addr,
5652 nk->addr[pd->didx].v4.s_addr,
5659 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5660 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5662 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5663 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5671 * ipoff and off are measured from the start of the mbuf chain.
5672 * h must be at "ipoff" on the mbuf chain.
5675 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5676 u_short *actionp, u_short *reasonp, sa_family_t af)
5681 struct ip *h = mtod(m, struct ip *);
5682 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
5686 ACTION_SET(actionp, PF_PASS);
5688 ACTION_SET(actionp, PF_DROP);
5689 REASON_SET(reasonp, PFRES_FRAG);
5693 if (m->m_pkthdr.len < off + len ||
5694 ntohs(h->ip_len) < off + len) {
5695 ACTION_SET(actionp, PF_DROP);
5696 REASON_SET(reasonp, PFRES_SHORT);
5704 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
5706 if (m->m_pkthdr.len < off + len ||
5707 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5708 (unsigned)(off + len)) {
5709 ACTION_SET(actionp, PF_DROP);
5710 REASON_SET(reasonp, PFRES_SHORT);
5717 m_copydata(m, off, len, p);
5722 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kkif *kif,
5728 * Skip check for addresses with embedded interface scope,
5729 * as they would always match anyway.
5731 if (af == AF_INET6 && IN6_IS_SCOPE_EMBED(&addr->v6))
5734 if (af != AF_INET && af != AF_INET6)
5737 /* Skip checks for ipsec interfaces */
5738 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5741 ifp = (kif != NULL) ? kif->pfik_ifp : NULL;
5746 return (fib6_check_urpf(rtableid, &addr->v6, 0, NHR_NONE,
5751 return (fib4_check_urpf(rtableid, addr->v4, 0, NHR_NONE,
5761 pf_route(struct mbuf **m, struct pf_krule *r, int dir, struct ifnet *oifp,
5762 struct pf_kstate *s, struct pf_pdesc *pd, struct inpcb *inp)
5764 struct mbuf *m0, *m1;
5765 struct sockaddr_in dst;
5767 struct ifnet *ifp = NULL;
5768 struct pf_addr naddr;
5769 struct pf_ksrc_node *sn = NULL;
5771 uint16_t ip_len, ip_off;
5773 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5774 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5777 if ((pd->pf_mtag == NULL &&
5778 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5779 pd->pf_mtag->routed++ > 3) {
5785 if (r->rt == PF_DUPTO) {
5786 if ((pd->pf_mtag->flags & PF_DUPLICATED)) {
5788 ifp = r->rpool.cur->kif ?
5789 r->rpool.cur->kif->pfik_ifp : NULL;
5791 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5795 /* When the 2nd interface is not skipped */
5803 pd->pf_mtag->flags |= PF_DUPLICATED;
5804 if (((m0 = m_dup(*m, M_NOWAIT)) == NULL)) {
5811 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5819 ip = mtod(m0, struct ip *);
5821 bzero(&dst, sizeof(dst));
5822 dst.sin_family = AF_INET;
5823 dst.sin_len = sizeof(dst);
5824 dst.sin_addr = ip->ip_dst;
5826 bzero(&naddr, sizeof(naddr));
5828 if (TAILQ_EMPTY(&r->rpool.list)) {
5829 DPFPRINTF(PF_DEBUG_URGENT,
5830 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5834 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5836 if (!PF_AZERO(&naddr, AF_INET))
5837 dst.sin_addr.s_addr = naddr.v4.s_addr;
5838 ifp = r->rpool.cur->kif ?
5839 r->rpool.cur->kif->pfik_ifp : NULL;
5841 if (!PF_AZERO(&s->rt_addr, AF_INET))
5842 dst.sin_addr.s_addr =
5843 s->rt_addr.v4.s_addr;
5844 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5851 if (pf_test(PF_OUT, 0, ifp, &m0, inp) != PF_PASS)
5853 else if (m0 == NULL)
5855 if (m0->m_len < sizeof(struct ip)) {
5856 DPFPRINTF(PF_DEBUG_URGENT,
5857 ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
5860 ip = mtod(m0, struct ip *);
5863 if (ifp->if_flags & IFF_LOOPBACK)
5864 m0->m_flags |= M_SKIP_FIREWALL;
5866 ip_len = ntohs(ip->ip_len);
5867 ip_off = ntohs(ip->ip_off);
5869 /* Copied from FreeBSD 10.0-CURRENT ip_output. */
5870 m0->m_pkthdr.csum_flags |= CSUM_IP;
5871 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
5872 m0 = mb_unmapped_to_ext(m0);
5875 in_delayed_cksum(m0);
5876 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
5878 #if defined(SCTP) || defined(SCTP_SUPPORT)
5879 if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
5880 m0 = mb_unmapped_to_ext(m0);
5883 sctp_delayed_cksum(m0, (uint32_t)(ip->ip_hl << 2));
5884 m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
5889 * If small enough for interface, or the interface will take
5890 * care of the fragmentation for us, we can just send directly.
5892 if (ip_len <= ifp->if_mtu ||
5893 (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0) {
5895 if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
5896 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
5897 m0->m_pkthdr.csum_flags &= ~CSUM_IP;
5899 m_clrprotoflags(m0); /* Avoid confusing lower layers. */
5900 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5904 /* Balk when DF bit is set or the interface didn't support TSO. */
5905 if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5907 KMOD_IPSTAT_INC(ips_cantfrag);
5908 if (r->rt != PF_DUPTO) {
5909 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
5916 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
5920 for (; m0; m0 = m1) {
5922 m0->m_nextpkt = NULL;
5924 m_clrprotoflags(m0);
5925 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5931 KMOD_IPSTAT_INC(ips_fragmented);
5934 if (r->rt != PF_DUPTO)
5949 pf_route6(struct mbuf **m, struct pf_krule *r, int dir, struct ifnet *oifp,
5950 struct pf_kstate *s, struct pf_pdesc *pd, struct inpcb *inp)
5953 struct sockaddr_in6 dst;
5954 struct ip6_hdr *ip6;
5955 struct ifnet *ifp = NULL;
5956 struct pf_addr naddr;
5957 struct pf_ksrc_node *sn = NULL;
5959 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5960 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5963 if ((pd->pf_mtag == NULL &&
5964 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5965 pd->pf_mtag->routed++ > 3) {
5971 if (r->rt == PF_DUPTO) {
5972 if ((pd->pf_mtag->flags & PF_DUPLICATED)) {
5974 ifp = r->rpool.cur->kif ?
5975 r->rpool.cur->kif->pfik_ifp : NULL;
5977 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5981 /* When the 2nd interface is not skipped */
5989 pd->pf_mtag->flags |= PF_DUPLICATED;
5990 if (((m0 = m_dup(*m, M_NOWAIT)) == NULL)) {
5997 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
6005 ip6 = mtod(m0, struct ip6_hdr *);
6007 bzero(&dst, sizeof(dst));
6008 dst.sin6_family = AF_INET6;
6009 dst.sin6_len = sizeof(dst);
6010 dst.sin6_addr = ip6->ip6_dst;
6012 bzero(&naddr, sizeof(naddr));
6014 if (TAILQ_EMPTY(&r->rpool.list)) {
6015 DPFPRINTF(PF_DEBUG_URGENT,
6016 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
6020 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
6022 if (!PF_AZERO(&naddr, AF_INET6))
6023 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
6025 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
6027 if (!PF_AZERO(&s->rt_addr, AF_INET6))
6028 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
6029 &s->rt_addr, AF_INET6);
6030 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
6040 if (pf_test6(PF_OUT, PFIL_FWD, ifp, &m0, inp) != PF_PASS)
6042 else if (m0 == NULL)
6044 if (m0->m_len < sizeof(struct ip6_hdr)) {
6045 DPFPRINTF(PF_DEBUG_URGENT,
6046 ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
6050 ip6 = mtod(m0, struct ip6_hdr *);
6053 if (ifp->if_flags & IFF_LOOPBACK)
6054 m0->m_flags |= M_SKIP_FIREWALL;
6056 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 &
6057 ~ifp->if_hwassist) {
6058 uint32_t plen = m0->m_pkthdr.len - sizeof(*ip6);
6059 m0 = mb_unmapped_to_ext(m0);
6062 in6_delayed_cksum(m0, plen, sizeof(struct ip6_hdr));
6063 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6;
6067 * If the packet is too large for the outgoing interface,
6068 * send back an icmp6 error.
6070 if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
6071 dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
6072 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu)
6073 nd6_output_ifp(ifp, ifp, m0, &dst, NULL);
6075 in6_ifstat_inc(ifp, ifs6_in_toobig);
6076 if (r->rt != PF_DUPTO)
6077 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
6083 if (r->rt != PF_DUPTO)
6097 * FreeBSD supports cksum offloads for the following drivers.
6098 * em(4), fxp(4), lge(4), nge(4), re(4), ti(4), txp(4), xl(4)
6100 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
6101 * network driver performed cksum including pseudo header, need to verify
6104 * network driver performed cksum, needs to additional pseudo header
6105 * cksum computation with partial csum_data(i.e. lack of H/W support for
6106 * pseudo header, for instance sk(4) and possibly gem(4))
6108 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
6109 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
6111 * Also, set csum_data to 0xffff to force cksum validation.
6114 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
6120 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
6122 if (m->m_pkthdr.len < off + len)
6127 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
6128 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
6129 sum = m->m_pkthdr.csum_data;
6131 ip = mtod(m, struct ip *);
6132 sum = in_pseudo(ip->ip_src.s_addr,
6133 ip->ip_dst.s_addr, htonl((u_short)len +
6134 m->m_pkthdr.csum_data + IPPROTO_TCP));
6141 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
6142 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
6143 sum = m->m_pkthdr.csum_data;
6145 ip = mtod(m, struct ip *);
6146 sum = in_pseudo(ip->ip_src.s_addr,
6147 ip->ip_dst.s_addr, htonl((u_short)len +
6148 m->m_pkthdr.csum_data + IPPROTO_UDP));
6156 case IPPROTO_ICMPV6:
6166 if (p == IPPROTO_ICMP) {
6171 sum = in_cksum(m, len);
6175 if (m->m_len < sizeof(struct ip))
6177 sum = in4_cksum(m, p, off, len);
6182 if (m->m_len < sizeof(struct ip6_hdr))
6184 sum = in6_cksum(m, p, off, len);
6195 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
6200 KMOD_UDPSTAT_INC(udps_badsum);
6206 KMOD_ICMPSTAT_INC(icps_checksum);
6211 case IPPROTO_ICMPV6:
6213 KMOD_ICMP6STAT_INC(icp6s_checksum);
6220 if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
6221 m->m_pkthdr.csum_flags |=
6222 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
6223 m->m_pkthdr.csum_data = 0xffff;
6231 pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
6233 struct pfi_kkif *kif;
6234 u_short action, reason = 0, log = 0;
6235 struct mbuf *m = *m0;
6236 struct ip *h = NULL;
6237 struct m_tag *ipfwtag;
6238 struct pf_krule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6239 struct pf_kstate *s = NULL;
6240 struct pf_kruleset *ruleset = NULL;
6242 int off, dirndx, pqid = 0;
6244 PF_RULES_RLOCK_TRACKER;
6245 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: bad direction %d\n", __func__, dir));
6248 if (!V_pf_status.running)
6251 memset(&pd, 0, sizeof(pd));
6253 kif = (struct pfi_kkif *)ifp->if_pf_kif;
6256 DPFPRINTF(PF_DEBUG_URGENT,
6257 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
6260 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6263 if (m->m_flags & M_SKIP_FIREWALL)
6266 pd.pf_mtag = pf_find_mtag(m);
6270 if (__predict_false(ip_divert_ptr != NULL) &&
6271 ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
6272 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
6273 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
6274 if (pd.pf_mtag == NULL &&
6275 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6279 pd.pf_mtag->flags |= PF_PACKET_LOOPED;
6280 m_tag_delete(m, ipfwtag);
6282 if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
6283 m->m_flags |= M_FASTFWD_OURS;
6284 pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
6286 } else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
6287 /* We do IP header normalization and packet reassembly here */
6291 m = *m0; /* pf_normalize messes with m0 */
6292 h = mtod(m, struct ip *);
6294 off = h->ip_hl << 2;
6295 if (off < (int)sizeof(struct ip)) {
6297 REASON_SET(&reason, PFRES_SHORT);
6302 pd.src = (struct pf_addr *)&h->ip_src;
6303 pd.dst = (struct pf_addr *)&h->ip_dst;
6304 pd.sport = pd.dport = NULL;
6305 pd.ip_sum = &h->ip_sum;
6306 pd.proto_sum = NULL;
6309 pd.sidx = (dir == PF_IN) ? 0 : 1;
6310 pd.didx = (dir == PF_IN) ? 1 : 0;
6312 pd.tos = h->ip_tos & ~IPTOS_ECN_MASK;
6313 pd.tot_len = ntohs(h->ip_len);
6315 /* handle fragments that didn't get reassembled by normalization */
6316 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
6317 action = pf_test_fragment(&r, dir, kif, m, h,
6324 if (!pf_pull_hdr(m, off, &pd.hdr.tcp, sizeof(pd.hdr.tcp),
6325 &action, &reason, AF_INET)) {
6326 log = action != PF_PASS;
6329 pd.p_len = pd.tot_len - off - (pd.hdr.tcp.th_off << 2);
6331 pd.sport = &pd.hdr.tcp.th_sport;
6332 pd.dport = &pd.hdr.tcp.th_dport;
6334 /* Respond to SYN with a syncookie. */
6335 if ((pd.hdr.tcp.th_flags & (TH_SYN|TH_ACK|TH_RST)) == TH_SYN &&
6336 pd.dir == PF_IN && pf_synflood_check(&pd)) {
6337 pf_syncookie_send(m, off, &pd);
6342 if ((pd.hdr.tcp.th_flags & TH_ACK) && pd.p_len == 0)
6344 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6345 if (action == PF_DROP)
6347 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6349 if (action == PF_PASS) {
6350 if (V_pfsync_update_state_ptr != NULL)
6351 V_pfsync_update_state_ptr(s);
6355 } else if (s == NULL) {
6356 /* Validate remote SYN|ACK, re-create original SYN if
6358 if ((pd.hdr.tcp.th_flags & (TH_SYN|TH_ACK|TH_RST)) ==
6359 TH_ACK && pf_syncookie_validate(&pd) &&
6363 msyn = pf_syncookie_recreate_syn(h->ip_ttl,
6370 action = pf_test(dir, pflags, ifp, &msyn, inp);
6373 if (action == PF_PASS) {
6374 action = pf_test_state_tcp(&s, dir,
6375 kif, m, off, h, &pd, &reason);
6376 if (action != PF_PASS || s == NULL) {
6381 s->src.seqhi = ntohl(pd.hdr.tcp.th_ack)
6383 s->src.seqlo = ntohl(pd.hdr.tcp.th_seq)
6385 pf_set_protostate(s, PF_PEER_SRC,
6388 action = pf_synproxy(&pd, &s, &reason);
6389 if (action != PF_PASS)
6395 action = pf_test_rule(&r, &s, dir, kif, m, off,
6396 &pd, &a, &ruleset, inp);
6403 if (!pf_pull_hdr(m, off, &pd.hdr.udp, sizeof(pd.hdr.udp),
6404 &action, &reason, AF_INET)) {
6405 log = action != PF_PASS;
6408 if (pd.hdr.udp.uh_dport == 0 ||
6409 ntohs(pd.hdr.udp.uh_ulen) > m->m_pkthdr.len - off ||
6410 ntohs(pd.hdr.udp.uh_ulen) < sizeof(struct udphdr)) {
6412 REASON_SET(&reason, PFRES_SHORT);
6415 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6416 if (action == PF_PASS) {
6417 if (V_pfsync_update_state_ptr != NULL)
6418 V_pfsync_update_state_ptr(s);
6422 } else if (s == NULL)
6423 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6428 case IPPROTO_ICMP: {
6429 if (!pf_pull_hdr(m, off, &pd.hdr.icmp, ICMP_MINLEN,
6430 &action, &reason, AF_INET)) {
6431 log = action != PF_PASS;
6434 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
6436 if (action == PF_PASS) {
6437 if (V_pfsync_update_state_ptr != NULL)
6438 V_pfsync_update_state_ptr(s);
6442 } else if (s == NULL)
6443 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6449 case IPPROTO_ICMPV6: {
6451 DPFPRINTF(PF_DEBUG_MISC,
6452 ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
6458 action = pf_test_state_other(&s, dir, kif, m, &pd);
6459 if (action == PF_PASS) {
6460 if (V_pfsync_update_state_ptr != NULL)
6461 V_pfsync_update_state_ptr(s);
6465 } else if (s == NULL)
6466 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6473 if (action == PF_PASS && h->ip_hl > 5 &&
6474 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6476 REASON_SET(&reason, PFRES_IPOPTIONS);
6478 DPFPRINTF(PF_DEBUG_MISC,
6479 ("pf: dropping packet with ip options\n"));
6482 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6484 REASON_SET(&reason, PFRES_MEMORY);
6486 if (r->rtableid >= 0)
6487 M_SETFIB(m, r->rtableid);
6489 if (r->scrub_flags & PFSTATE_SETPRIO) {
6490 if (pd.tos & IPTOS_LOWDELAY)
6492 if (vlan_set_pcp(m, r->set_prio[pqid])) {
6494 REASON_SET(&reason, PFRES_MEMORY);
6496 DPFPRINTF(PF_DEBUG_MISC,
6497 ("pf: failed to allocate 802.1q mtag\n"));
6503 pd.act.pqid = s->pqid;
6504 pd.act.qid = s->qid;
6505 } else if (r->qid) {
6506 pd.act.pqid = r->pqid;
6507 pd.act.qid = r->qid;
6509 if (action == PF_PASS && pd.act.qid) {
6510 if (pd.pf_mtag == NULL &&
6511 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6513 REASON_SET(&reason, PFRES_MEMORY);
6516 pd.pf_mtag->qid_hash = pf_state_hash(s);
6517 if (pqid || (pd.tos & IPTOS_LOWDELAY))
6518 pd.pf_mtag->qid = pd.act.pqid;
6520 pd.pf_mtag->qid = pd.act.qid;
6521 /* Add hints for ecn. */
6522 pd.pf_mtag->hdr = h;
6528 * connections redirected to loopback should not match sockets
6529 * bound specifically to loopback due to security implications,
6530 * see tcp_input() and in_pcblookup_listen().
6532 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6533 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6534 (s->nat_rule.ptr->action == PF_RDR ||
6535 s->nat_rule.ptr->action == PF_BINAT) &&
6536 IN_LOOPBACK(ntohl(pd.dst->v4.s_addr)))
6537 m->m_flags |= M_SKIP_FIREWALL;
6539 if (__predict_false(ip_divert_ptr != NULL) && action == PF_PASS &&
6540 r->divert.port && !PACKET_LOOPED(&pd)) {
6541 ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
6542 sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
6543 if (ipfwtag != NULL) {
6544 ((struct ipfw_rule_ref *)(ipfwtag+1))->info =
6545 ntohs(r->divert.port);
6546 ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
6551 m_tag_prepend(m, ipfwtag);
6552 if (m->m_flags & M_FASTFWD_OURS) {
6553 if (pd.pf_mtag == NULL &&
6554 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6556 REASON_SET(&reason, PFRES_MEMORY);
6558 DPFPRINTF(PF_DEBUG_MISC,
6559 ("pf: failed to allocate tag\n"));
6561 pd.pf_mtag->flags |=
6562 PF_FASTFWD_OURS_PRESENT;
6563 m->m_flags &= ~M_FASTFWD_OURS;
6566 ip_divert_ptr(*m0, dir == PF_IN);
6571 /* XXX: ipfw has the same behaviour! */
6573 REASON_SET(&reason, PFRES_MEMORY);
6575 DPFPRINTF(PF_DEBUG_MISC,
6576 ("pf: failed to allocate divert tag\n"));
6581 struct pf_krule *lr;
6583 if (s != NULL && s->nat_rule.ptr != NULL &&
6584 s->nat_rule.ptr->log & PF_LOG_ALL)
6585 lr = s->nat_rule.ptr;
6588 PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd,
6592 pf_counter_u64_critical_enter();
6593 pf_counter_u64_add_protected(&kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS],
6595 pf_counter_u64_add_protected(&kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS],
6598 if (action == PF_PASS || r->action == PF_DROP) {
6599 dirndx = (dir == PF_OUT);
6600 pf_counter_u64_add_protected(&r->packets[dirndx], 1);
6601 pf_counter_u64_add_protected(&r->bytes[dirndx], pd.tot_len);
6603 pf_counter_u64_add_protected(&a->packets[dirndx], 1);
6604 pf_counter_u64_add_protected(&a->bytes[dirndx], pd.tot_len);
6607 if (s->nat_rule.ptr != NULL) {
6608 pf_counter_u64_add_protected(&s->nat_rule.ptr->packets[dirndx],
6610 pf_counter_u64_add_protected(&s->nat_rule.ptr->bytes[dirndx],
6613 if (s->src_node != NULL) {
6614 counter_u64_add(s->src_node->packets[dirndx],
6616 counter_u64_add(s->src_node->bytes[dirndx],
6619 if (s->nat_src_node != NULL) {
6620 counter_u64_add(s->nat_src_node->packets[dirndx],
6622 counter_u64_add(s->nat_src_node->bytes[dirndx],
6625 dirndx = (dir == s->direction) ? 0 : 1;
6626 s->packets[dirndx]++;
6627 s->bytes[dirndx] += pd.tot_len;
6630 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6631 if (nr != NULL && r == &V_pf_default_rule)
6633 if (tr->src.addr.type == PF_ADDR_TABLE)
6634 pfr_update_stats(tr->src.addr.p.tbl,
6635 (s == NULL) ? pd.src :
6636 &s->key[(s->direction == PF_IN)]->
6637 addr[(s->direction == PF_OUT)],
6638 pd.af, pd.tot_len, dir == PF_OUT,
6639 r->action == PF_PASS, tr->src.neg);
6640 if (tr->dst.addr.type == PF_ADDR_TABLE)
6641 pfr_update_stats(tr->dst.addr.p.tbl,
6642 (s == NULL) ? pd.dst :
6643 &s->key[(s->direction == PF_IN)]->
6644 addr[(s->direction == PF_IN)],
6645 pd.af, pd.tot_len, dir == PF_OUT,
6646 r->action == PF_PASS, tr->dst.neg);
6648 pf_counter_u64_critical_exit();
6651 case PF_SYNPROXY_DROP:
6662 /* pf_route() returns unlocked. */
6664 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd, inp);
6670 SDT_PROBE4(pf, ip, test, done, action, reason, r, s);
6681 pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
6683 struct pfi_kkif *kif;
6684 u_short action, reason = 0, log = 0;
6685 struct mbuf *m = *m0, *n = NULL;
6687 struct ip6_hdr *h = NULL;
6688 struct pf_krule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6689 struct pf_kstate *s = NULL;
6690 struct pf_kruleset *ruleset = NULL;
6692 int off, terminal = 0, dirndx, rh_cnt = 0, pqid = 0;
6694 PF_RULES_RLOCK_TRACKER;
6695 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: bad direction %d\n", __func__, dir));
6698 if (!V_pf_status.running)
6701 memset(&pd, 0, sizeof(pd));
6702 pd.pf_mtag = pf_find_mtag(m);
6704 if (pd.pf_mtag && pd.pf_mtag->flags & PF_TAG_GENERATED)
6707 kif = (struct pfi_kkif *)ifp->if_pf_kif;
6709 DPFPRINTF(PF_DEBUG_URGENT,
6710 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
6713 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6716 if (m->m_flags & M_SKIP_FIREWALL)
6721 /* We do IP header normalization and packet reassembly here */
6722 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
6726 m = *m0; /* pf_normalize messes with m0 */
6727 h = mtod(m, struct ip6_hdr *);
6730 * we do not support jumbogram. if we keep going, zero ip6_plen
6731 * will do something bad, so drop the packet for now.
6733 if (htons(h->ip6_plen) == 0) {
6735 REASON_SET(&reason, PFRES_NORM); /*XXX*/
6739 pd.src = (struct pf_addr *)&h->ip6_src;
6740 pd.dst = (struct pf_addr *)&h->ip6_dst;
6741 pd.sport = pd.dport = NULL;
6743 pd.proto_sum = NULL;
6745 pd.sidx = (dir == PF_IN) ? 0 : 1;
6746 pd.didx = (dir == PF_IN) ? 1 : 0;
6748 pd.tos = IPV6_DSCP(h);
6749 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6751 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
6752 pd.proto = h->ip6_nxt;
6755 case IPPROTO_FRAGMENT:
6756 action = pf_test_fragment(&r, dir, kif, m, h,
6758 if (action == PF_DROP)
6759 REASON_SET(&reason, PFRES_FRAG);
6761 case IPPROTO_ROUTING: {
6762 struct ip6_rthdr rthdr;
6765 DPFPRINTF(PF_DEBUG_MISC,
6766 ("pf: IPv6 more than one rthdr\n"));
6768 REASON_SET(&reason, PFRES_IPOPTIONS);
6772 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
6774 DPFPRINTF(PF_DEBUG_MISC,
6775 ("pf: IPv6 short rthdr\n"));
6777 REASON_SET(&reason, PFRES_SHORT);
6781 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
6782 DPFPRINTF(PF_DEBUG_MISC,
6783 ("pf: IPv6 rthdr0\n"));
6785 REASON_SET(&reason, PFRES_IPOPTIONS);
6792 case IPPROTO_HOPOPTS:
6793 case IPPROTO_DSTOPTS: {
6794 /* get next header and header length */
6795 struct ip6_ext opt6;
6797 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
6798 NULL, &reason, pd.af)) {
6799 DPFPRINTF(PF_DEBUG_MISC,
6800 ("pf: IPv6 short opt\n"));
6805 if (pd.proto == IPPROTO_AH)
6806 off += (opt6.ip6e_len + 2) * 4;
6808 off += (opt6.ip6e_len + 1) * 8;
6809 pd.proto = opt6.ip6e_nxt;
6810 /* goto the next header */
6817 } while (!terminal);
6819 /* if there's no routing header, use unmodified mbuf for checksumming */
6825 if (!pf_pull_hdr(m, off, &pd.hdr.tcp, sizeof(pd.hdr.tcp),
6826 &action, &reason, AF_INET6)) {
6827 log = action != PF_PASS;
6830 pd.p_len = pd.tot_len - off - (pd.hdr.tcp.th_off << 2);
6831 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6832 if (action == PF_DROP)
6834 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6836 if (action == PF_PASS) {
6837 if (V_pfsync_update_state_ptr != NULL)
6838 V_pfsync_update_state_ptr(s);
6842 } else if (s == NULL)
6843 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6849 if (!pf_pull_hdr(m, off, &pd.hdr.udp, sizeof(pd.hdr.udp),
6850 &action, &reason, AF_INET6)) {
6851 log = action != PF_PASS;
6854 if (pd.hdr.udp.uh_dport == 0 ||
6855 ntohs(pd.hdr.udp.uh_ulen) > m->m_pkthdr.len - off ||
6856 ntohs(pd.hdr.udp.uh_ulen) < sizeof(struct udphdr)) {
6858 REASON_SET(&reason, PFRES_SHORT);
6861 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6862 if (action == PF_PASS) {
6863 if (V_pfsync_update_state_ptr != NULL)
6864 V_pfsync_update_state_ptr(s);
6868 } else if (s == NULL)
6869 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6874 case IPPROTO_ICMP: {
6876 DPFPRINTF(PF_DEBUG_MISC,
6877 ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
6881 case IPPROTO_ICMPV6: {
6882 if (!pf_pull_hdr(m, off, &pd.hdr.icmp6, sizeof(pd.hdr.icmp6),
6883 &action, &reason, AF_INET6)) {
6884 log = action != PF_PASS;
6887 action = pf_test_state_icmp(&s, dir, kif,
6888 m, off, h, &pd, &reason);
6889 if (action == PF_PASS) {
6890 if (V_pfsync_update_state_ptr != NULL)
6891 V_pfsync_update_state_ptr(s);
6895 } else if (s == NULL)
6896 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6902 action = pf_test_state_other(&s, dir, kif, m, &pd);
6903 if (action == PF_PASS) {
6904 if (V_pfsync_update_state_ptr != NULL)
6905 V_pfsync_update_state_ptr(s);
6909 } else if (s == NULL)
6910 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6922 /* handle dangerous IPv6 extension headers. */
6923 if (action == PF_PASS && rh_cnt &&
6924 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6926 REASON_SET(&reason, PFRES_IPOPTIONS);
6928 DPFPRINTF(PF_DEBUG_MISC,
6929 ("pf: dropping packet with dangerous v6 headers\n"));
6932 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6934 REASON_SET(&reason, PFRES_MEMORY);
6936 if (r->rtableid >= 0)
6937 M_SETFIB(m, r->rtableid);
6939 if (r->scrub_flags & PFSTATE_SETPRIO) {
6940 if (pd.tos & IPTOS_LOWDELAY)
6942 if (vlan_set_pcp(m, r->set_prio[pqid])) {
6944 REASON_SET(&reason, PFRES_MEMORY);
6946 DPFPRINTF(PF_DEBUG_MISC,
6947 ("pf: failed to allocate 802.1q mtag\n"));
6953 pd.act.pqid = s->pqid;
6954 pd.act.qid = s->qid;
6955 } else if (r->qid) {
6956 pd.act.pqid = r->pqid;
6957 pd.act.qid = r->qid;
6959 if (action == PF_PASS && pd.act.qid) {
6960 if (pd.pf_mtag == NULL &&
6961 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6963 REASON_SET(&reason, PFRES_MEMORY);
6966 pd.pf_mtag->qid_hash = pf_state_hash(s);
6967 if (pd.tos & IPTOS_LOWDELAY)
6968 pd.pf_mtag->qid = pd.act.pqid;
6970 pd.pf_mtag->qid = pd.act.qid;
6971 /* Add hints for ecn. */
6972 pd.pf_mtag->hdr = h;
6977 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6978 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6979 (s->nat_rule.ptr->action == PF_RDR ||
6980 s->nat_rule.ptr->action == PF_BINAT) &&
6981 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
6982 m->m_flags |= M_SKIP_FIREWALL;
6984 /* XXX: Anybody working on it?! */
6986 printf("pf: divert(9) is not supported for IPv6\n");
6989 struct pf_krule *lr;
6991 if (s != NULL && s->nat_rule.ptr != NULL &&
6992 s->nat_rule.ptr->log & PF_LOG_ALL)
6993 lr = s->nat_rule.ptr;
6996 PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset,
7000 pf_counter_u64_critical_enter();
7001 pf_counter_u64_add_protected(&kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS],
7003 pf_counter_u64_add_protected(&kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS],
7006 if (action == PF_PASS || r->action == PF_DROP) {
7007 dirndx = (dir == PF_OUT);
7008 pf_counter_u64_add_protected(&r->packets[dirndx], 1);
7009 pf_counter_u64_add_protected(&r->bytes[dirndx], pd.tot_len);
7011 pf_counter_u64_add_protected(&a->packets[dirndx], 1);
7012 pf_counter_u64_add_protected(&a->bytes[dirndx], pd.tot_len);
7015 if (s->nat_rule.ptr != NULL) {
7016 pf_counter_u64_add_protected(&s->nat_rule.ptr->packets[dirndx],
7018 pf_counter_u64_add_protected(&s->nat_rule.ptr->bytes[dirndx],
7021 if (s->src_node != NULL) {
7022 counter_u64_add(s->src_node->packets[dirndx],
7024 counter_u64_add(s->src_node->bytes[dirndx],
7027 if (s->nat_src_node != NULL) {
7028 counter_u64_add(s->nat_src_node->packets[dirndx],
7030 counter_u64_add(s->nat_src_node->bytes[dirndx],
7033 dirndx = (dir == s->direction) ? 0 : 1;
7034 s->packets[dirndx]++;
7035 s->bytes[dirndx] += pd.tot_len;
7038 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
7039 if (nr != NULL && r == &V_pf_default_rule)
7041 if (tr->src.addr.type == PF_ADDR_TABLE)
7042 pfr_update_stats(tr->src.addr.p.tbl,
7043 (s == NULL) ? pd.src :
7044 &s->key[(s->direction == PF_IN)]->addr[0],
7045 pd.af, pd.tot_len, dir == PF_OUT,
7046 r->action == PF_PASS, tr->src.neg);
7047 if (tr->dst.addr.type == PF_ADDR_TABLE)
7048 pfr_update_stats(tr->dst.addr.p.tbl,
7049 (s == NULL) ? pd.dst :
7050 &s->key[(s->direction == PF_IN)]->addr[1],
7051 pd.af, pd.tot_len, dir == PF_OUT,
7052 r->action == PF_PASS, tr->dst.neg);
7054 pf_counter_u64_critical_exit();
7057 case PF_SYNPROXY_DROP:
7068 /* pf_route6() returns unlocked. */
7070 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd, inp);
7079 /* If reassembled packet passed, create new fragments. */
7080 if (action == PF_PASS && *m0 && (pflags & PFIL_FWD) &&
7081 (mtag = m_tag_find(m, PF_REASSEMBLED, NULL)) != NULL)
7082 action = pf_refragment6(ifp, m0, mtag);
7084 SDT_PROBE4(pf, ip, test6, done, action, reason, r, s);