2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002 - 2008 Henning Brauer
6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
37 * $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
45 #include "opt_inet6.h"
49 #include <sys/param.h>
51 #include <sys/endian.h>
52 #include <sys/gsb_crc32.h>
54 #include <sys/interrupt.h>
55 #include <sys/kernel.h>
56 #include <sys/kthread.h>
57 #include <sys/limits.h>
60 #include <sys/random.h>
61 #include <sys/refcount.h>
63 #include <sys/socket.h>
64 #include <sys/sysctl.h>
65 #include <sys/taskqueue.h>
66 #include <sys/ucred.h>
69 #include <net/if_var.h>
70 #include <net/if_types.h>
71 #include <net/if_vlan_var.h>
72 #include <net/route.h>
73 #include <net/route/nhop.h>
77 #include <net/pfvar.h>
78 #include <net/if_pflog.h>
79 #include <net/if_pfsync.h>
81 #include <netinet/in_pcb.h>
82 #include <netinet/in_var.h>
83 #include <netinet/in_fib.h>
84 #include <netinet/ip.h>
85 #include <netinet/ip_fw.h>
86 #include <netinet/ip_icmp.h>
87 #include <netinet/icmp_var.h>
88 #include <netinet/ip_var.h>
89 #include <netinet/tcp.h>
90 #include <netinet/tcp_fsm.h>
91 #include <netinet/tcp_seq.h>
92 #include <netinet/tcp_timer.h>
93 #include <netinet/tcp_var.h>
94 #include <netinet/udp.h>
95 #include <netinet/udp_var.h>
98 #include <netinet/ip_dummynet.h>
99 #include <netinet/ip_fw.h>
100 #include <netpfil/ipfw/dn_heap.h>
101 #include <netpfil/ipfw/ip_fw_private.h>
102 #include <netpfil/ipfw/ip_dn_private.h>
105 #include <netinet/ip6.h>
106 #include <netinet/icmp6.h>
107 #include <netinet6/nd6.h>
108 #include <netinet6/ip6_var.h>
109 #include <netinet6/in6_pcb.h>
110 #include <netinet6/in6_fib.h>
111 #include <netinet6/scope6_var.h>
114 #if defined(SCTP) || defined(SCTP_SUPPORT)
115 #include <netinet/sctp_crc32.h>
118 #include <machine/in_cksum.h>
119 #include <security/mac/mac_framework.h>
121 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
123 SDT_PROVIDER_DEFINE(pf);
124 SDT_PROBE_DEFINE4(pf, ip, test, done, "int", "int", "struct pf_krule *",
125 "struct pf_kstate *");
126 SDT_PROBE_DEFINE4(pf, ip, test6, done, "int", "int", "struct pf_krule *",
127 "struct pf_kstate *");
128 SDT_PROBE_DEFINE5(pf, ip, state, lookup, "struct pfi_kkif *",
129 "struct pf_state_key_cmp *", "int", "struct pf_pdesc *",
130 "struct pf_kstate *");
137 VNET_DEFINE(struct pf_altqqueue, pf_altqs[4]);
138 VNET_DEFINE(struct pf_kpalist, pf_pabuf);
139 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active);
140 VNET_DEFINE(struct pf_altqqueue *, pf_altq_ifs_active);
141 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive);
142 VNET_DEFINE(struct pf_altqqueue *, pf_altq_ifs_inactive);
143 VNET_DEFINE(struct pf_kstatus, pf_status);
145 VNET_DEFINE(u_int32_t, ticket_altqs_active);
146 VNET_DEFINE(u_int32_t, ticket_altqs_inactive);
147 VNET_DEFINE(int, altqs_inactive_open);
148 VNET_DEFINE(u_int32_t, ticket_pabuf);
150 VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx);
151 #define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx)
152 VNET_DEFINE(u_char, pf_tcp_secret[16]);
153 #define V_pf_tcp_secret VNET(pf_tcp_secret)
154 VNET_DEFINE(int, pf_tcp_secret_init);
155 #define V_pf_tcp_secret_init VNET(pf_tcp_secret_init)
156 VNET_DEFINE(int, pf_tcp_iss_off);
157 #define V_pf_tcp_iss_off VNET(pf_tcp_iss_off)
158 VNET_DECLARE(int, pf_vnet_active);
159 #define V_pf_vnet_active VNET(pf_vnet_active)
161 VNET_DEFINE_STATIC(uint32_t, pf_purge_idx);
162 #define V_pf_purge_idx VNET(pf_purge_idx)
164 #ifdef PF_WANT_32_TO_64_COUNTER
165 VNET_DEFINE_STATIC(uint32_t, pf_counter_periodic_iter);
166 #define V_pf_counter_periodic_iter VNET(pf_counter_periodic_iter)
168 VNET_DEFINE(struct allrulelist_head, pf_allrulelist);
169 VNET_DEFINE(size_t, pf_allrulecount);
170 VNET_DEFINE(struct pf_krule *, pf_rulemarker);
174 * Queue for pf_intr() sends.
176 static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
177 struct pf_send_entry {
178 STAILQ_ENTRY(pf_send_entry) pfse_next;
193 STAILQ_HEAD(pf_send_head, pf_send_entry);
194 VNET_DEFINE_STATIC(struct pf_send_head, pf_sendqueue);
195 #define V_pf_sendqueue VNET(pf_sendqueue)
197 static struct mtx_padalign pf_sendqueue_mtx;
198 MTX_SYSINIT(pf_sendqueue_mtx, &pf_sendqueue_mtx, "pf send queue", MTX_DEF);
199 #define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx)
200 #define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx)
203 * Queue for pf_overload_task() tasks.
205 struct pf_overload_entry {
206 SLIST_ENTRY(pf_overload_entry) next;
210 struct pf_krule *rule;
213 SLIST_HEAD(pf_overload_head, pf_overload_entry);
214 VNET_DEFINE_STATIC(struct pf_overload_head, pf_overloadqueue);
215 #define V_pf_overloadqueue VNET(pf_overloadqueue)
216 VNET_DEFINE_STATIC(struct task, pf_overloadtask);
217 #define V_pf_overloadtask VNET(pf_overloadtask)
219 static struct mtx_padalign pf_overloadqueue_mtx;
220 MTX_SYSINIT(pf_overloadqueue_mtx, &pf_overloadqueue_mtx,
221 "pf overload/flush queue", MTX_DEF);
222 #define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx)
223 #define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx)
225 VNET_DEFINE(struct pf_krulequeue, pf_unlinked_rules);
226 struct mtx_padalign pf_unlnkdrules_mtx;
227 MTX_SYSINIT(pf_unlnkdrules_mtx, &pf_unlnkdrules_mtx, "pf unlinked rules",
230 struct mtx_padalign pf_table_stats_lock;
231 MTX_SYSINIT(pf_table_stats_lock, &pf_table_stats_lock, "pf table stats",
234 VNET_DEFINE_STATIC(uma_zone_t, pf_sources_z);
235 #define V_pf_sources_z VNET(pf_sources_z)
236 uma_zone_t pf_mtag_z;
237 VNET_DEFINE(uma_zone_t, pf_state_z);
238 VNET_DEFINE(uma_zone_t, pf_state_key_z);
240 VNET_DEFINE(uint64_t, pf_stateid[MAXCPU]);
241 #define PFID_CPUBITS 8
242 #define PFID_CPUSHIFT (sizeof(uint64_t) * NBBY - PFID_CPUBITS)
243 #define PFID_CPUMASK ((uint64_t)((1 << PFID_CPUBITS) - 1) << PFID_CPUSHIFT)
244 #define PFID_MAXID (~PFID_CPUMASK)
245 CTASSERT((1 << PFID_CPUBITS) >= MAXCPU);
247 static void pf_src_tree_remove_state(struct pf_kstate *);
248 static void pf_init_threshold(struct pf_threshold *, u_int32_t,
250 static void pf_add_threshold(struct pf_threshold *);
251 static int pf_check_threshold(struct pf_threshold *);
253 static void pf_change_ap(struct mbuf *, struct pf_addr *, u_int16_t *,
254 u_int16_t *, u_int16_t *, struct pf_addr *,
255 u_int16_t, u_int8_t, sa_family_t);
256 static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
257 struct tcphdr *, struct pf_state_peer *);
258 static void pf_change_icmp(struct pf_addr *, u_int16_t *,
259 struct pf_addr *, struct pf_addr *, u_int16_t,
260 u_int16_t *, u_int16_t *, u_int16_t *,
261 u_int16_t *, u_int8_t, sa_family_t);
262 static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
263 sa_family_t, struct pf_krule *);
264 static void pf_detach_state(struct pf_kstate *);
265 static int pf_state_key_attach(struct pf_state_key *,
266 struct pf_state_key *, struct pf_kstate *);
267 static void pf_state_key_detach(struct pf_kstate *, int);
268 static int pf_state_key_ctor(void *, int, void *, int);
269 static u_int32_t pf_tcp_iss(struct pf_pdesc *);
270 void pf_rule_to_actions(struct pf_krule *,
271 struct pf_rule_actions *);
272 static int pf_test_rule(struct pf_krule **, struct pf_kstate **,
273 int, struct pfi_kkif *, struct mbuf *, int,
274 struct pf_pdesc *, struct pf_krule **,
275 struct pf_kruleset **, struct inpcb *);
276 static int pf_create_state(struct pf_krule *, struct pf_krule *,
277 struct pf_krule *, struct pf_pdesc *,
278 struct pf_ksrc_node *, struct pf_state_key *,
279 struct pf_state_key *, struct mbuf *, int,
280 u_int16_t, u_int16_t, int *, struct pfi_kkif *,
281 struct pf_kstate **, int, u_int16_t, u_int16_t,
283 static int pf_test_fragment(struct pf_krule **, int,
284 struct pfi_kkif *, struct mbuf *, void *,
285 struct pf_pdesc *, struct pf_krule **,
286 struct pf_kruleset **);
287 static int pf_tcp_track_full(struct pf_kstate **,
288 struct pfi_kkif *, struct mbuf *, int,
289 struct pf_pdesc *, u_short *, int *);
290 static int pf_tcp_track_sloppy(struct pf_kstate **,
291 struct pf_pdesc *, u_short *);
292 static int pf_test_state_tcp(struct pf_kstate **, int,
293 struct pfi_kkif *, struct mbuf *, int,
294 void *, struct pf_pdesc *, u_short *);
295 static int pf_test_state_udp(struct pf_kstate **, int,
296 struct pfi_kkif *, struct mbuf *, int,
297 void *, struct pf_pdesc *);
298 static int pf_test_state_icmp(struct pf_kstate **, int,
299 struct pfi_kkif *, struct mbuf *, int,
300 void *, struct pf_pdesc *, u_short *);
301 static int pf_test_state_other(struct pf_kstate **, int,
302 struct pfi_kkif *, struct mbuf *, struct pf_pdesc *);
303 static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
305 static int pf_check_proto_cksum(struct mbuf *, int, int,
306 u_int8_t, sa_family_t);
307 static void pf_print_state_parts(struct pf_kstate *,
308 struct pf_state_key *, struct pf_state_key *);
309 static int pf_addr_wrap_neq(struct pf_addr_wrap *,
310 struct pf_addr_wrap *);
311 static void pf_patch_8(struct mbuf *, u_int16_t *, u_int8_t *, u_int8_t,
313 static struct pf_kstate *pf_find_state(struct pfi_kkif *,
314 struct pf_state_key_cmp *, u_int);
315 static int pf_src_connlimit(struct pf_kstate **);
316 static void pf_overload_task(void *v, int pending);
317 static int pf_insert_src_node(struct pf_ksrc_node **,
318 struct pf_krule *, struct pf_addr *, sa_family_t);
319 static u_int pf_purge_expired_states(u_int, int);
320 static void pf_purge_unlinked_rules(void);
321 static int pf_mtag_uminit(void *, int, int);
322 static void pf_mtag_free(struct m_tag *);
323 static void pf_packet_rework_nat(struct mbuf *, struct pf_pdesc *,
324 int, struct pf_state_key *);
326 static void pf_route(struct mbuf **, struct pf_krule *, int,
327 struct ifnet *, struct pf_kstate *,
328 struct pf_pdesc *, struct inpcb *);
331 static void pf_change_a6(struct pf_addr *, u_int16_t *,
332 struct pf_addr *, u_int8_t);
333 static void pf_route6(struct mbuf **, struct pf_krule *, int,
334 struct ifnet *, struct pf_kstate *,
335 struct pf_pdesc *, struct inpcb *);
337 static __inline void pf_set_protostate(struct pf_kstate *, int, u_int8_t);
339 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
341 extern int pf_end_threads;
342 extern struct proc *pf_purge_proc;
344 VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
346 #define PACKET_UNDO_NAT(_m, _pd, _off, _s, _dir) \
348 struct pf_state_key *nk; \
349 if ((_dir) == PF_OUT) \
350 nk = (_s)->key[PF_SK_STACK]; \
352 nk = (_s)->key[PF_SK_WIRE]; \
353 pf_packet_rework_nat(_m, _pd, _off, nk); \
356 #define PACKET_LOOPED(pd) ((pd)->pf_mtag && \
357 (pd)->pf_mtag->flags & PF_PACKET_LOOPED)
359 #define STATE_LOOKUP(i, k, d, s, pd) \
361 (s) = pf_find_state((i), (k), (d)); \
362 SDT_PROBE5(pf, ip, state, lookup, i, k, d, pd, (s)); \
365 if (PACKET_LOOPED(pd)) \
369 #define BOUND_IFACE(r, k) \
370 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
372 #define STATE_INC_COUNTERS(s) \
374 counter_u64_add(s->rule.ptr->states_cur, 1); \
375 counter_u64_add(s->rule.ptr->states_tot, 1); \
376 if (s->anchor.ptr != NULL) { \
377 counter_u64_add(s->anchor.ptr->states_cur, 1); \
378 counter_u64_add(s->anchor.ptr->states_tot, 1); \
380 if (s->nat_rule.ptr != NULL) { \
381 counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
382 counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
386 #define STATE_DEC_COUNTERS(s) \
388 if (s->nat_rule.ptr != NULL) \
389 counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
390 if (s->anchor.ptr != NULL) \
391 counter_u64_add(s->anchor.ptr->states_cur, -1); \
392 counter_u64_add(s->rule.ptr->states_cur, -1); \
395 MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
396 VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
397 VNET_DEFINE(struct pf_idhash *, pf_idhash);
398 VNET_DEFINE(struct pf_srchash *, pf_srchash);
400 SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
404 u_long pf_srchashmask;
405 static u_long pf_hashsize;
406 static u_long pf_srchashsize;
407 u_long pf_ioctl_maxcount = 65535;
409 SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
410 &pf_hashsize, 0, "Size of pf(4) states hashtable");
411 SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
412 &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable");
413 SYSCTL_ULONG(_net_pf, OID_AUTO, request_maxcount, CTLFLAG_RWTUN,
414 &pf_ioctl_maxcount, 0, "Maximum number of tables, addresses, ... in a single ioctl() call");
416 VNET_DEFINE(void *, pf_swi_cookie);
417 VNET_DEFINE(struct intr_event *, pf_swi_ie);
419 VNET_DEFINE(uint32_t, pf_hashseed);
420 #define V_pf_hashseed VNET(pf_hashseed)
423 pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
429 if (a->addr32[0] > b->addr32[0])
431 if (a->addr32[0] < b->addr32[0])
437 if (a->addr32[3] > b->addr32[3])
439 if (a->addr32[3] < b->addr32[3])
441 if (a->addr32[2] > b->addr32[2])
443 if (a->addr32[2] < b->addr32[2])
445 if (a->addr32[1] > b->addr32[1])
447 if (a->addr32[1] < b->addr32[1])
449 if (a->addr32[0] > b->addr32[0])
451 if (a->addr32[0] < b->addr32[0])
456 panic("%s: unknown address family %u", __func__, af);
462 pf_packet_rework_nat(struct mbuf *m, struct pf_pdesc *pd, int off,
463 struct pf_state_key *nk)
468 struct tcphdr *th = &pd->hdr.tcp;
470 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af))
471 pf_change_ap(m, pd->src, &th->th_sport, pd->ip_sum,
472 &th->th_sum, &nk->addr[pd->sidx],
473 nk->port[pd->sidx], 0, pd->af);
474 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af))
475 pf_change_ap(m, pd->dst, &th->th_dport, pd->ip_sum,
476 &th->th_sum, &nk->addr[pd->didx],
477 nk->port[pd->didx], 0, pd->af);
478 m_copyback(m, off, sizeof(*th), (caddr_t)th);
482 struct udphdr *uh = &pd->hdr.udp;
484 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af))
485 pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
486 &uh->uh_sum, &nk->addr[pd->sidx],
487 nk->port[pd->sidx], 1, pd->af);
488 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af))
489 pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
490 &uh->uh_sum, &nk->addr[pd->didx],
491 nk->port[pd->didx], 1, pd->af);
492 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
496 struct icmp *ih = &pd->hdr.icmp;
498 if (nk->port[pd->sidx] != ih->icmp_id) {
499 pd->hdr.icmp.icmp_cksum = pf_cksum_fixup(
500 ih->icmp_cksum, ih->icmp_id,
501 nk->port[pd->sidx], 0);
502 ih->icmp_id = nk->port[pd->sidx];
503 pd->sport = &ih->icmp_id;
505 m_copyback(m, off, ICMP_MINLEN, (caddr_t)ih);
510 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) {
513 pf_change_a(&pd->src->v4.s_addr,
514 pd->ip_sum, nk->addr[pd->sidx].v4.s_addr,
518 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
522 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) {
525 pf_change_a(&pd->dst->v4.s_addr,
526 pd->ip_sum, nk->addr[pd->didx].v4.s_addr,
530 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
538 static __inline uint32_t
539 pf_hashkey(struct pf_state_key *sk)
543 h = murmur3_32_hash32((uint32_t *)sk,
544 sizeof(struct pf_state_key_cmp)/sizeof(uint32_t),
547 return (h & pf_hashmask);
550 static __inline uint32_t
551 pf_hashsrc(struct pf_addr *addr, sa_family_t af)
557 h = murmur3_32_hash32((uint32_t *)&addr->v4,
558 sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed);
561 h = murmur3_32_hash32((uint32_t *)&addr->v6,
562 sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed);
565 panic("%s: unknown address family %u", __func__, af);
568 return (h & pf_srchashmask);
573 pf_state_hash(struct pf_kstate *s)
575 u_int32_t hv = (intptr_t)s / sizeof(*s);
577 hv ^= crc32(&s->src, sizeof(s->src));
578 hv ^= crc32(&s->dst, sizeof(s->dst));
586 pf_set_protostate(struct pf_kstate *s, int which, u_int8_t newstate)
588 if (which == PF_PEER_DST || which == PF_PEER_BOTH)
589 s->dst.state = newstate;
590 if (which == PF_PEER_DST)
592 if (s->src.state == newstate)
594 if (s->creatorid == V_pf_status.hostid &&
595 s->key[PF_SK_STACK] != NULL &&
596 s->key[PF_SK_STACK]->proto == IPPROTO_TCP &&
597 !(TCPS_HAVEESTABLISHED(s->src.state) ||
598 s->src.state == TCPS_CLOSED) &&
599 (TCPS_HAVEESTABLISHED(newstate) || newstate == TCPS_CLOSED))
600 atomic_add_32(&V_pf_status.states_halfopen, -1);
602 s->src.state = newstate;
607 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
612 dst->addr32[0] = src->addr32[0];
616 dst->addr32[0] = src->addr32[0];
617 dst->addr32[1] = src->addr32[1];
618 dst->addr32[2] = src->addr32[2];
619 dst->addr32[3] = src->addr32[3];
626 pf_init_threshold(struct pf_threshold *threshold,
627 u_int32_t limit, u_int32_t seconds)
629 threshold->limit = limit * PF_THRESHOLD_MULT;
630 threshold->seconds = seconds;
631 threshold->count = 0;
632 threshold->last = time_uptime;
636 pf_add_threshold(struct pf_threshold *threshold)
638 u_int32_t t = time_uptime, diff = t - threshold->last;
640 if (diff >= threshold->seconds)
641 threshold->count = 0;
643 threshold->count -= threshold->count * diff /
645 threshold->count += PF_THRESHOLD_MULT;
650 pf_check_threshold(struct pf_threshold *threshold)
652 return (threshold->count > threshold->limit);
656 pf_src_connlimit(struct pf_kstate **state)
658 struct pf_overload_entry *pfoe;
661 PF_STATE_LOCK_ASSERT(*state);
663 (*state)->src_node->conn++;
664 (*state)->src.tcp_est = 1;
665 pf_add_threshold(&(*state)->src_node->conn_rate);
667 if ((*state)->rule.ptr->max_src_conn &&
668 (*state)->rule.ptr->max_src_conn <
669 (*state)->src_node->conn) {
670 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1);
674 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
675 pf_check_threshold(&(*state)->src_node->conn_rate)) {
676 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1);
683 /* Kill this state. */
684 (*state)->timeout = PFTM_PURGE;
685 pf_set_protostate(*state, PF_PEER_BOTH, TCPS_CLOSED);
687 if ((*state)->rule.ptr->overload_tbl == NULL)
690 /* Schedule overloading and flushing task. */
691 pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
693 return (1); /* too bad :( */
695 bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
696 pfoe->af = (*state)->key[PF_SK_WIRE]->af;
697 pfoe->rule = (*state)->rule.ptr;
698 pfoe->dir = (*state)->direction;
700 SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
701 PF_OVERLOADQ_UNLOCK();
702 taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
708 pf_overload_task(void *v, int pending)
710 struct pf_overload_head queue;
712 struct pf_overload_entry *pfoe, *pfoe1;
715 CURVNET_SET((struct vnet *)v);
718 queue = V_pf_overloadqueue;
719 SLIST_INIT(&V_pf_overloadqueue);
720 PF_OVERLOADQ_UNLOCK();
722 bzero(&p, sizeof(p));
723 SLIST_FOREACH(pfoe, &queue, next) {
724 counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1);
725 if (V_pf_status.debug >= PF_DEBUG_MISC) {
726 printf("%s: blocking address ", __func__);
727 pf_print_host(&pfoe->addr, 0, pfoe->af);
731 p.pfra_af = pfoe->af;
736 p.pfra_ip4addr = pfoe->addr.v4;
742 p.pfra_ip6addr = pfoe->addr.v6;
748 pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
753 * Remove those entries, that don't need flushing.
755 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
756 if (pfoe->rule->flush == 0) {
757 SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
758 free(pfoe, M_PFTEMP);
761 V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1);
763 /* If nothing to flush, return. */
764 if (SLIST_EMPTY(&queue)) {
769 for (int i = 0; i <= pf_hashmask; i++) {
770 struct pf_idhash *ih = &V_pf_idhash[i];
771 struct pf_state_key *sk;
775 LIST_FOREACH(s, &ih->states, entry) {
776 sk = s->key[PF_SK_WIRE];
777 SLIST_FOREACH(pfoe, &queue, next)
778 if (sk->af == pfoe->af &&
779 ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
780 pfoe->rule == s->rule.ptr) &&
781 ((pfoe->dir == PF_OUT &&
782 PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
783 (pfoe->dir == PF_IN &&
784 PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
785 s->timeout = PFTM_PURGE;
786 pf_set_protostate(s, PF_PEER_BOTH, TCPS_CLOSED);
790 PF_HASHROW_UNLOCK(ih);
792 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
793 free(pfoe, M_PFTEMP);
794 if (V_pf_status.debug >= PF_DEBUG_MISC)
795 printf("%s: %u states killed", __func__, killed);
801 * Can return locked on failure, so that we can consistently
802 * allocate and insert a new one.
804 struct pf_ksrc_node *
805 pf_find_src_node(struct pf_addr *src, struct pf_krule *rule, sa_family_t af,
808 struct pf_srchash *sh;
809 struct pf_ksrc_node *n;
811 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1);
813 sh = &V_pf_srchash[pf_hashsrc(src, af)];
815 LIST_FOREACH(n, &sh->nodes, entry)
816 if (n->rule.ptr == rule && n->af == af &&
817 ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
818 (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
822 PF_HASHROW_UNLOCK(sh);
823 } else if (returnlocked == 0)
824 PF_HASHROW_UNLOCK(sh);
830 pf_free_src_node(struct pf_ksrc_node *sn)
833 for (int i = 0; i < 2; i++) {
834 counter_u64_free(sn->bytes[i]);
835 counter_u64_free(sn->packets[i]);
837 uma_zfree(V_pf_sources_z, sn);
841 pf_insert_src_node(struct pf_ksrc_node **sn, struct pf_krule *rule,
842 struct pf_addr *src, sa_family_t af)
845 KASSERT((rule->rule_flag & PFRULE_SRCTRACK ||
846 rule->rpool.opts & PF_POOL_STICKYADDR),
847 ("%s for non-tracking rule %p", __func__, rule));
850 *sn = pf_find_src_node(src, rule, af, 1);
853 struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)];
855 PF_HASHROW_ASSERT(sh);
857 if (!rule->max_src_nodes ||
858 counter_u64_fetch(rule->src_nodes) < rule->max_src_nodes)
859 (*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
861 counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES],
864 PF_HASHROW_UNLOCK(sh);
868 for (int i = 0; i < 2; i++) {
869 (*sn)->bytes[i] = counter_u64_alloc(M_NOWAIT);
870 (*sn)->packets[i] = counter_u64_alloc(M_NOWAIT);
872 if ((*sn)->bytes[i] == NULL || (*sn)->packets[i] == NULL) {
873 pf_free_src_node(*sn);
874 PF_HASHROW_UNLOCK(sh);
879 pf_init_threshold(&(*sn)->conn_rate,
880 rule->max_src_conn_rate.limit,
881 rule->max_src_conn_rate.seconds);
884 (*sn)->rule.ptr = rule;
885 PF_ACPY(&(*sn)->addr, src, af);
886 LIST_INSERT_HEAD(&sh->nodes, *sn, entry);
887 (*sn)->creation = time_uptime;
888 (*sn)->ruletype = rule->action;
890 if ((*sn)->rule.ptr != NULL)
891 counter_u64_add((*sn)->rule.ptr->src_nodes, 1);
892 PF_HASHROW_UNLOCK(sh);
893 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1);
895 if (rule->max_src_states &&
896 (*sn)->states >= rule->max_src_states) {
897 counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES],
906 pf_unlink_src_node(struct pf_ksrc_node *src)
909 PF_HASHROW_ASSERT(&V_pf_srchash[pf_hashsrc(&src->addr, src->af)]);
910 LIST_REMOVE(src, entry);
912 counter_u64_add(src->rule.ptr->src_nodes, -1);
916 pf_free_src_nodes(struct pf_ksrc_node_list *head)
918 struct pf_ksrc_node *sn, *tmp;
921 LIST_FOREACH_SAFE(sn, head, entry, tmp) {
922 pf_free_src_node(sn);
926 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], count);
935 pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
936 sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL,
940 /* Per-vnet data storage structures initialization. */
944 struct pf_keyhash *kh;
945 struct pf_idhash *ih;
946 struct pf_srchash *sh;
949 if (pf_hashsize == 0 || !powerof2(pf_hashsize))
950 pf_hashsize = PF_HASHSIZ;
951 if (pf_srchashsize == 0 || !powerof2(pf_srchashsize))
952 pf_srchashsize = PF_SRCHASHSIZ;
954 V_pf_hashseed = arc4random();
956 /* States and state keys storage. */
957 V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_kstate),
958 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
959 V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
960 uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
961 uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
963 V_pf_state_key_z = uma_zcreate("pf state keys",
964 sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
967 V_pf_keyhash = mallocarray(pf_hashsize, sizeof(struct pf_keyhash),
968 M_PFHASH, M_NOWAIT | M_ZERO);
969 V_pf_idhash = mallocarray(pf_hashsize, sizeof(struct pf_idhash),
970 M_PFHASH, M_NOWAIT | M_ZERO);
971 if (V_pf_keyhash == NULL || V_pf_idhash == NULL) {
972 printf("pf: Unable to allocate memory for "
973 "state_hashsize %lu.\n", pf_hashsize);
975 free(V_pf_keyhash, M_PFHASH);
976 free(V_pf_idhash, M_PFHASH);
978 pf_hashsize = PF_HASHSIZ;
979 V_pf_keyhash = mallocarray(pf_hashsize,
980 sizeof(struct pf_keyhash), M_PFHASH, M_WAITOK | M_ZERO);
981 V_pf_idhash = mallocarray(pf_hashsize,
982 sizeof(struct pf_idhash), M_PFHASH, M_WAITOK | M_ZERO);
985 pf_hashmask = pf_hashsize - 1;
986 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
988 mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
989 mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
993 V_pf_sources_z = uma_zcreate("pf source nodes",
994 sizeof(struct pf_ksrc_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
996 V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
997 uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
998 uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
1000 V_pf_srchash = mallocarray(pf_srchashsize,
1001 sizeof(struct pf_srchash), M_PFHASH, M_NOWAIT | M_ZERO);
1002 if (V_pf_srchash == NULL) {
1003 printf("pf: Unable to allocate memory for "
1004 "source_hashsize %lu.\n", pf_srchashsize);
1006 pf_srchashsize = PF_SRCHASHSIZ;
1007 V_pf_srchash = mallocarray(pf_srchashsize,
1008 sizeof(struct pf_srchash), M_PFHASH, M_WAITOK | M_ZERO);
1011 pf_srchashmask = pf_srchashsize - 1;
1012 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++)
1013 mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
1016 TAILQ_INIT(&V_pf_altqs[0]);
1017 TAILQ_INIT(&V_pf_altqs[1]);
1018 TAILQ_INIT(&V_pf_altqs[2]);
1019 TAILQ_INIT(&V_pf_altqs[3]);
1020 TAILQ_INIT(&V_pf_pabuf);
1021 V_pf_altqs_active = &V_pf_altqs[0];
1022 V_pf_altq_ifs_active = &V_pf_altqs[1];
1023 V_pf_altqs_inactive = &V_pf_altqs[2];
1024 V_pf_altq_ifs_inactive = &V_pf_altqs[3];
1026 /* Send & overload+flush queues. */
1027 STAILQ_INIT(&V_pf_sendqueue);
1028 SLIST_INIT(&V_pf_overloadqueue);
1029 TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
1031 /* Unlinked, but may be referenced rules. */
1032 TAILQ_INIT(&V_pf_unlinked_rules);
1039 uma_zdestroy(pf_mtag_z);
1045 struct pf_keyhash *kh;
1046 struct pf_idhash *ih;
1047 struct pf_srchash *sh;
1048 struct pf_send_entry *pfse, *next;
1051 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
1053 KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
1055 KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
1057 mtx_destroy(&kh->lock);
1058 mtx_destroy(&ih->lock);
1060 free(V_pf_keyhash, M_PFHASH);
1061 free(V_pf_idhash, M_PFHASH);
1063 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
1064 KASSERT(LIST_EMPTY(&sh->nodes),
1065 ("%s: source node hash not empty", __func__));
1066 mtx_destroy(&sh->lock);
1068 free(V_pf_srchash, M_PFHASH);
1070 STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
1071 m_freem(pfse->pfse_m);
1072 free(pfse, M_PFTEMP);
1075 uma_zdestroy(V_pf_sources_z);
1076 uma_zdestroy(V_pf_state_z);
1077 uma_zdestroy(V_pf_state_key_z);
1081 pf_mtag_uminit(void *mem, int size, int how)
1085 t = (struct m_tag *)mem;
1086 t->m_tag_cookie = MTAG_ABI_COMPAT;
1087 t->m_tag_id = PACKET_TAG_PF;
1088 t->m_tag_len = sizeof(struct pf_mtag);
1089 t->m_tag_free = pf_mtag_free;
1095 pf_mtag_free(struct m_tag *t)
1098 uma_zfree(pf_mtag_z, t);
1102 pf_get_mtag(struct mbuf *m)
1106 if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
1107 return ((struct pf_mtag *)(mtag + 1));
1109 mtag = uma_zalloc(pf_mtag_z, M_NOWAIT);
1112 bzero(mtag + 1, sizeof(struct pf_mtag));
1113 m_tag_prepend(m, mtag);
1115 return ((struct pf_mtag *)(mtag + 1));
1119 pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
1120 struct pf_kstate *s)
1122 struct pf_keyhash *khs, *khw, *kh;
1123 struct pf_state_key *sk, *cur;
1124 struct pf_kstate *si, *olds = NULL;
1127 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1128 KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
1129 KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
1132 * We need to lock hash slots of both keys. To avoid deadlock
1133 * we always lock the slot with lower address first. Unlock order
1136 * We also need to lock ID hash slot before dropping key
1137 * locks. On success we return with ID hash slot locked.
1141 khs = khw = &V_pf_keyhash[pf_hashkey(skw)];
1142 PF_HASHROW_LOCK(khs);
1144 khs = &V_pf_keyhash[pf_hashkey(sks)];
1145 khw = &V_pf_keyhash[pf_hashkey(skw)];
1147 PF_HASHROW_LOCK(khs);
1148 } else if (khs < khw) {
1149 PF_HASHROW_LOCK(khs);
1150 PF_HASHROW_LOCK(khw);
1152 PF_HASHROW_LOCK(khw);
1153 PF_HASHROW_LOCK(khs);
1157 #define KEYS_UNLOCK() do { \
1159 PF_HASHROW_UNLOCK(khs); \
1160 PF_HASHROW_UNLOCK(khw); \
1162 PF_HASHROW_UNLOCK(khs); \
1166 * First run: start with wire key.
1172 MPASS(s->lock == NULL);
1173 s->lock = &V_pf_idhash[PF_IDHASH(s)].lock;
1176 LIST_FOREACH(cur, &kh->keys, entry)
1177 if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
1181 /* Key exists. Check for same kif, if none, add to key. */
1182 TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
1183 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
1185 PF_HASHROW_LOCK(ih);
1186 if (si->kif == s->kif &&
1187 si->direction == s->direction) {
1188 if (sk->proto == IPPROTO_TCP &&
1189 si->src.state >= TCPS_FIN_WAIT_2 &&
1190 si->dst.state >= TCPS_FIN_WAIT_2) {
1192 * New state matches an old >FIN_WAIT_2
1193 * state. We can't drop key hash locks,
1194 * thus we can't unlink it properly.
1196 * As a workaround we drop it into
1197 * TCPS_CLOSED state, schedule purge
1198 * ASAP and push it into the very end
1199 * of the slot TAILQ, so that it won't
1200 * conflict with our new state.
1202 pf_set_protostate(si, PF_PEER_BOTH,
1204 si->timeout = PFTM_PURGE;
1207 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1208 printf("pf: %s key attach "
1210 (idx == PF_SK_WIRE) ?
1213 pf_print_state_parts(s,
1214 (idx == PF_SK_WIRE) ?
1216 (idx == PF_SK_STACK) ?
1218 printf(", existing: ");
1219 pf_print_state_parts(si,
1220 (idx == PF_SK_WIRE) ?
1222 (idx == PF_SK_STACK) ?
1226 PF_HASHROW_UNLOCK(ih);
1228 uma_zfree(V_pf_state_key_z, sk);
1229 if (idx == PF_SK_STACK)
1231 return (EEXIST); /* collision! */
1234 PF_HASHROW_UNLOCK(ih);
1236 uma_zfree(V_pf_state_key_z, sk);
1239 LIST_INSERT_HEAD(&kh->keys, sk, entry);
1244 /* List is sorted, if-bound states before floating. */
1245 if (s->kif == V_pfi_all)
1246 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
1248 TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
1251 TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]);
1252 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds,
1258 * Attach done. See how should we (or should not?)
1259 * attach a second key.
1262 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1266 } else if (sks != NULL) {
1268 * Continue attaching with stack key.
1280 KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
1281 ("%s failure", __func__));
1288 pf_detach_state(struct pf_kstate *s)
1290 struct pf_state_key *sks = s->key[PF_SK_STACK];
1291 struct pf_keyhash *kh;
1294 kh = &V_pf_keyhash[pf_hashkey(sks)];
1295 PF_HASHROW_LOCK(kh);
1296 if (s->key[PF_SK_STACK] != NULL)
1297 pf_state_key_detach(s, PF_SK_STACK);
1299 * If both point to same key, then we are done.
1301 if (sks == s->key[PF_SK_WIRE]) {
1302 pf_state_key_detach(s, PF_SK_WIRE);
1303 PF_HASHROW_UNLOCK(kh);
1306 PF_HASHROW_UNLOCK(kh);
1309 if (s->key[PF_SK_WIRE] != NULL) {
1310 kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
1311 PF_HASHROW_LOCK(kh);
1312 if (s->key[PF_SK_WIRE] != NULL)
1313 pf_state_key_detach(s, PF_SK_WIRE);
1314 PF_HASHROW_UNLOCK(kh);
1319 pf_state_key_detach(struct pf_kstate *s, int idx)
1321 struct pf_state_key *sk = s->key[idx];
1323 struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1325 PF_HASHROW_ASSERT(kh);
1327 TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1330 if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1331 LIST_REMOVE(sk, entry);
1332 uma_zfree(V_pf_state_key_z, sk);
1337 pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1339 struct pf_state_key *sk = mem;
1341 bzero(sk, sizeof(struct pf_state_key_cmp));
1342 TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1343 TAILQ_INIT(&sk->states[PF_SK_STACK]);
1348 struct pf_state_key *
1349 pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
1350 struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
1352 struct pf_state_key *sk;
1354 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1358 PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
1359 PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
1360 sk->port[pd->sidx] = sport;
1361 sk->port[pd->didx] = dport;
1362 sk->proto = pd->proto;
1368 struct pf_state_key *
1369 pf_state_key_clone(struct pf_state_key *orig)
1371 struct pf_state_key *sk;
1373 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1377 bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1383 pf_state_insert(struct pfi_kkif *kif, struct pfi_kkif *orig_kif,
1384 struct pf_state_key *skw, struct pf_state_key *sks, struct pf_kstate *s)
1386 struct pf_idhash *ih;
1387 struct pf_kstate *cur;
1390 KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1391 ("%s: sks not pristine", __func__));
1392 KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1393 ("%s: skw not pristine", __func__));
1394 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1397 s->orig_kif = orig_kif;
1399 if (s->id == 0 && s->creatorid == 0) {
1400 /* XXX: should be atomic, but probability of collision low */
1401 if ((s->id = V_pf_stateid[curcpu]++) == PFID_MAXID)
1402 V_pf_stateid[curcpu] = 1;
1403 s->id |= (uint64_t )curcpu << PFID_CPUSHIFT;
1404 s->id = htobe64(s->id);
1405 s->creatorid = V_pf_status.hostid;
1408 /* Returns with ID locked on success. */
1409 if ((error = pf_state_key_attach(skw, sks, s)) != 0)
1412 ih = &V_pf_idhash[PF_IDHASH(s)];
1413 PF_HASHROW_ASSERT(ih);
1414 LIST_FOREACH(cur, &ih->states, entry)
1415 if (cur->id == s->id && cur->creatorid == s->creatorid)
1419 PF_HASHROW_UNLOCK(ih);
1420 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1421 printf("pf: state ID collision: "
1422 "id: %016llx creatorid: %08x\n",
1423 (unsigned long long)be64toh(s->id),
1424 ntohl(s->creatorid));
1429 LIST_INSERT_HEAD(&ih->states, s, entry);
1430 /* One for keys, one for ID hash. */
1431 refcount_init(&s->refs, 2);
1433 pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_INSERT], 1);
1434 if (V_pfsync_insert_state_ptr != NULL)
1435 V_pfsync_insert_state_ptr(s);
1437 /* Returns locked. */
1442 * Find state by ID: returns with locked row on success.
1445 pf_find_state_byid(uint64_t id, uint32_t creatorid)
1447 struct pf_idhash *ih;
1448 struct pf_kstate *s;
1450 pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1452 ih = &V_pf_idhash[(be64toh(id) % (pf_hashmask + 1))];
1454 PF_HASHROW_LOCK(ih);
1455 LIST_FOREACH(s, &ih->states, entry)
1456 if (s->id == id && s->creatorid == creatorid)
1460 PF_HASHROW_UNLOCK(ih);
1466 * Find state by key.
1467 * Returns with ID hash slot locked on success.
1469 static struct pf_kstate *
1470 pf_find_state(struct pfi_kkif *kif, struct pf_state_key_cmp *key, u_int dir)
1472 struct pf_keyhash *kh;
1473 struct pf_state_key *sk;
1474 struct pf_kstate *s;
1477 pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1479 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1481 PF_HASHROW_LOCK(kh);
1482 LIST_FOREACH(sk, &kh->keys, entry)
1483 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1486 PF_HASHROW_UNLOCK(kh);
1490 idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1492 /* List is sorted, if-bound states before floating ones. */
1493 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1494 if (s->kif == V_pfi_all || s->kif == kif) {
1496 PF_HASHROW_UNLOCK(kh);
1497 if (__predict_false(s->timeout >= PFTM_MAX)) {
1499 * State is either being processed by
1500 * pf_unlink_state() in an other thread, or
1501 * is scheduled for immediate expiry.
1508 PF_HASHROW_UNLOCK(kh);
1514 * Returns with ID hash slot locked on success.
1517 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1519 struct pf_keyhash *kh;
1520 struct pf_state_key *sk;
1521 struct pf_kstate *s, *ret = NULL;
1524 pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1526 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1528 PF_HASHROW_LOCK(kh);
1529 LIST_FOREACH(sk, &kh->keys, entry)
1530 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1533 PF_HASHROW_UNLOCK(kh);
1548 panic("%s: dir %u", __func__, dir);
1551 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1554 PF_HASHROW_UNLOCK(kh);
1570 PF_HASHROW_UNLOCK(kh);
1577 * This routine is inefficient -- locks the state only to unlock immediately on
1579 * It is racy -- after the state is unlocked nothing stops other threads from
1583 pf_find_state_all_exists(struct pf_state_key_cmp *key, u_int dir)
1585 struct pf_kstate *s;
1587 s = pf_find_state_all(key, dir, NULL);
1595 /* END state table stuff */
1598 pf_send(struct pf_send_entry *pfse)
1602 STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
1604 swi_sched(V_pf_swi_cookie, 0);
1608 pf_isforlocal(struct mbuf *m, int af)
1613 struct ip *ip = mtod(m, struct ip *);
1615 return (in_localip(ip->ip_dst));
1620 struct ip6_hdr *ip6;
1621 struct in6_ifaddr *ia;
1622 ip6 = mtod(m, struct ip6_hdr *);
1623 ia = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false);
1626 return (! (ia->ia6_flags & IN6_IFF_NOTREADY));
1630 panic("Unsupported af %d", af);
1639 struct epoch_tracker et;
1640 struct pf_send_head queue;
1641 struct pf_send_entry *pfse, *next;
1643 CURVNET_SET((struct vnet *)v);
1646 queue = V_pf_sendqueue;
1647 STAILQ_INIT(&V_pf_sendqueue);
1650 NET_EPOCH_ENTER(et);
1652 STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
1653 switch (pfse->pfse_type) {
1656 if (pf_isforlocal(pfse->pfse_m, AF_INET)) {
1657 pfse->pfse_m->m_flags |= M_SKIP_FIREWALL;
1658 pfse->pfse_m->m_pkthdr.csum_flags |=
1659 CSUM_IP_VALID | CSUM_IP_CHECKED;
1660 ip_input(pfse->pfse_m);
1662 ip_output(pfse->pfse_m, NULL, NULL, 0, NULL,
1668 icmp_error(pfse->pfse_m, pfse->icmpopts.type,
1669 pfse->icmpopts.code, 0, pfse->icmpopts.mtu);
1674 if (pf_isforlocal(pfse->pfse_m, AF_INET6)) {
1675 pfse->pfse_m->m_flags |= M_SKIP_FIREWALL;
1676 ip6_input(pfse->pfse_m);
1678 ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL,
1683 icmp6_error(pfse->pfse_m, pfse->icmpopts.type,
1684 pfse->icmpopts.code, pfse->icmpopts.mtu);
1688 panic("%s: unknown type", __func__);
1690 free(pfse, M_PFTEMP);
1696 #define pf_purge_thread_period (hz / 10)
1698 #ifdef PF_WANT_32_TO_64_COUNTER
1700 pf_status_counter_u64_periodic(void)
1705 if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 60)) != 0) {
1709 for (int i = 0; i < FCNT_MAX; i++) {
1710 pf_counter_u64_periodic(&V_pf_status.fcounters[i]);
1715 pf_kif_counter_u64_periodic(void)
1717 struct pfi_kkif *kif;
1722 if (__predict_false(V_pf_allkifcount == 0)) {
1726 if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 300)) != 0) {
1730 run = V_pf_allkifcount / 10;
1734 for (r = 0; r < run; r++) {
1735 kif = LIST_NEXT(V_pf_kifmarker, pfik_allkiflist);
1737 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
1738 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
1742 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
1743 LIST_INSERT_AFTER(kif, V_pf_kifmarker, pfik_allkiflist);
1745 for (int i = 0; i < 2; i++) {
1746 for (int j = 0; j < 2; j++) {
1747 for (int k = 0; k < 2; k++) {
1748 pf_counter_u64_periodic(&kif->pfik_packets[i][j][k]);
1749 pf_counter_u64_periodic(&kif->pfik_bytes[i][j][k]);
1757 pf_rule_counter_u64_periodic(void)
1759 struct pf_krule *rule;
1764 if (__predict_false(V_pf_allrulecount == 0)) {
1768 if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 300)) != 0) {
1772 run = V_pf_allrulecount / 10;
1776 for (r = 0; r < run; r++) {
1777 rule = LIST_NEXT(V_pf_rulemarker, allrulelist);
1779 LIST_REMOVE(V_pf_rulemarker, allrulelist);
1780 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
1784 LIST_REMOVE(V_pf_rulemarker, allrulelist);
1785 LIST_INSERT_AFTER(rule, V_pf_rulemarker, allrulelist);
1787 pf_counter_u64_periodic(&rule->evaluations);
1788 for (int i = 0; i < 2; i++) {
1789 pf_counter_u64_periodic(&rule->packets[i]);
1790 pf_counter_u64_periodic(&rule->bytes[i]);
1796 pf_counter_u64_periodic_main(void)
1798 PF_RULES_RLOCK_TRACKER;
1800 V_pf_counter_periodic_iter++;
1803 pf_counter_u64_critical_enter();
1804 pf_status_counter_u64_periodic();
1805 pf_kif_counter_u64_periodic();
1806 pf_rule_counter_u64_periodic();
1807 pf_counter_u64_critical_exit();
1811 #define pf_counter_u64_periodic_main() do { } while (0)
1815 pf_purge_thread(void *unused __unused)
1817 VNET_ITERATOR_DECL(vnet_iter);
1819 sx_xlock(&pf_end_lock);
1820 while (pf_end_threads == 0) {
1821 sx_sleep(pf_purge_thread, &pf_end_lock, 0, "pftm", pf_purge_thread_period);
1824 VNET_FOREACH(vnet_iter) {
1825 CURVNET_SET(vnet_iter);
1827 /* Wait until V_pf_default_rule is initialized. */
1828 if (V_pf_vnet_active == 0) {
1833 pf_counter_u64_periodic_main();
1836 * Process 1/interval fraction of the state
1840 pf_purge_expired_states(V_pf_purge_idx, pf_hashmask /
1841 (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
1844 * Purge other expired types every
1845 * PFTM_INTERVAL seconds.
1847 if (V_pf_purge_idx == 0) {
1849 * Order is important:
1850 * - states and src nodes reference rules
1851 * - states and rules reference kifs
1853 pf_purge_expired_fragments();
1854 pf_purge_expired_src_nodes();
1855 pf_purge_unlinked_rules();
1860 VNET_LIST_RUNLOCK();
1864 sx_xunlock(&pf_end_lock);
1869 pf_unload_vnet_purge(void)
1873 * To cleanse up all kifs and rules we need
1874 * two runs: first one clears reference flags,
1875 * then pf_purge_expired_states() doesn't
1876 * raise them, and then second run frees.
1878 pf_purge_unlinked_rules();
1882 * Now purge everything.
1884 pf_purge_expired_states(0, pf_hashmask);
1885 pf_purge_fragments(UINT_MAX);
1886 pf_purge_expired_src_nodes();
1889 * Now all kifs & rules should be unreferenced,
1890 * thus should be successfully freed.
1892 pf_purge_unlinked_rules();
1897 pf_state_expires(const struct pf_kstate *state)
1904 /* handle all PFTM_* > PFTM_MAX here */
1905 if (state->timeout == PFTM_PURGE)
1906 return (time_uptime);
1907 KASSERT(state->timeout != PFTM_UNLINKED,
1908 ("pf_state_expires: timeout == PFTM_UNLINKED"));
1909 KASSERT((state->timeout < PFTM_MAX),
1910 ("pf_state_expires: timeout > PFTM_MAX"));
1911 timeout = state->rule.ptr->timeout[state->timeout];
1913 timeout = V_pf_default_rule.timeout[state->timeout];
1914 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1915 if (start && state->rule.ptr != &V_pf_default_rule) {
1916 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1917 states = counter_u64_fetch(state->rule.ptr->states_cur);
1919 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1920 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1921 states = V_pf_status.states;
1923 if (end && states > start && start < end) {
1925 timeout = (u_int64_t)timeout * (end - states) /
1927 return (state->expire + timeout);
1930 return (time_uptime);
1932 return (state->expire + timeout);
1936 pf_purge_expired_src_nodes()
1938 struct pf_ksrc_node_list freelist;
1939 struct pf_srchash *sh;
1940 struct pf_ksrc_node *cur, *next;
1943 LIST_INIT(&freelist);
1944 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
1945 PF_HASHROW_LOCK(sh);
1946 LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
1947 if (cur->states == 0 && cur->expire <= time_uptime) {
1948 pf_unlink_src_node(cur);
1949 LIST_INSERT_HEAD(&freelist, cur, entry);
1950 } else if (cur->rule.ptr != NULL)
1951 cur->rule.ptr->rule_ref |= PFRULE_REFS;
1952 PF_HASHROW_UNLOCK(sh);
1955 pf_free_src_nodes(&freelist);
1957 V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z);
1961 pf_src_tree_remove_state(struct pf_kstate *s)
1963 struct pf_ksrc_node *sn;
1964 struct pf_srchash *sh;
1967 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE] ?
1968 s->rule.ptr->timeout[PFTM_SRC_NODE] :
1969 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1971 if (s->src_node != NULL) {
1973 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1974 PF_HASHROW_LOCK(sh);
1977 if (--sn->states == 0)
1978 sn->expire = time_uptime + timeout;
1979 PF_HASHROW_UNLOCK(sh);
1981 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1982 sn = s->nat_src_node;
1983 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1984 PF_HASHROW_LOCK(sh);
1985 if (--sn->states == 0)
1986 sn->expire = time_uptime + timeout;
1987 PF_HASHROW_UNLOCK(sh);
1989 s->src_node = s->nat_src_node = NULL;
1993 * Unlink and potentilly free a state. Function may be
1994 * called with ID hash row locked, but always returns
1995 * unlocked, since it needs to go through key hash locking.
1998 pf_unlink_state(struct pf_kstate *s)
2000 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
2002 PF_HASHROW_ASSERT(ih);
2004 if (s->timeout == PFTM_UNLINKED) {
2006 * State is being processed
2007 * by pf_unlink_state() in
2010 PF_HASHROW_UNLOCK(ih);
2011 return (0); /* XXXGL: undefined actually */
2014 if (s->src.state == PF_TCPS_PROXY_DST) {
2015 /* XXX wire key the right one? */
2016 pf_send_tcp(s->rule.ptr, s->key[PF_SK_WIRE]->af,
2017 &s->key[PF_SK_WIRE]->addr[1],
2018 &s->key[PF_SK_WIRE]->addr[0],
2019 s->key[PF_SK_WIRE]->port[1],
2020 s->key[PF_SK_WIRE]->port[0],
2021 s->src.seqhi, s->src.seqlo + 1,
2022 TH_RST|TH_ACK, 0, 0, 0, 1, s->tag);
2025 LIST_REMOVE(s, entry);
2026 pf_src_tree_remove_state(s);
2028 if (V_pfsync_delete_state_ptr != NULL)
2029 V_pfsync_delete_state_ptr(s);
2031 STATE_DEC_COUNTERS(s);
2033 s->timeout = PFTM_UNLINKED;
2035 /* Ensure we remove it from the list of halfopen states, if needed. */
2036 if (s->key[PF_SK_STACK] != NULL &&
2037 s->key[PF_SK_STACK]->proto == IPPROTO_TCP)
2038 pf_set_protostate(s, PF_PEER_BOTH, TCPS_CLOSED);
2040 PF_HASHROW_UNLOCK(ih);
2043 /* pf_state_insert() initialises refs to 2 */
2044 return (pf_release_staten(s, 2));
2048 pf_alloc_state(int flags)
2051 return (uma_zalloc(V_pf_state_z, flags | M_ZERO));
2055 pf_free_state(struct pf_kstate *cur)
2058 KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
2059 KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
2062 pf_normalize_tcp_cleanup(cur);
2063 uma_zfree(V_pf_state_z, cur);
2064 pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
2068 * Called only from pf_purge_thread(), thus serialized.
2071 pf_purge_expired_states(u_int i, int maxcheck)
2073 struct pf_idhash *ih;
2074 struct pf_kstate *s;
2076 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
2079 * Go through hash and unlink states that expire now.
2081 while (maxcheck > 0) {
2082 ih = &V_pf_idhash[i];
2084 /* only take the lock if we expect to do work */
2085 if (!LIST_EMPTY(&ih->states)) {
2087 PF_HASHROW_LOCK(ih);
2088 LIST_FOREACH(s, &ih->states, entry) {
2089 if (pf_state_expires(s) <= time_uptime) {
2090 V_pf_status.states -=
2094 s->rule.ptr->rule_ref |= PFRULE_REFS;
2095 if (s->nat_rule.ptr != NULL)
2096 s->nat_rule.ptr->rule_ref |= PFRULE_REFS;
2097 if (s->anchor.ptr != NULL)
2098 s->anchor.ptr->rule_ref |= PFRULE_REFS;
2099 s->kif->pfik_flags |= PFI_IFLAG_REFS;
2101 s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
2103 PF_HASHROW_UNLOCK(ih);
2106 /* Return when we hit end of hash. */
2107 if (++i > pf_hashmask) {
2108 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
2115 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
2121 pf_purge_unlinked_rules()
2123 struct pf_krulequeue tmpq;
2124 struct pf_krule *r, *r1;
2127 * If we have overloading task pending, then we'd
2128 * better skip purging this time. There is a tiny
2129 * probability that overloading task references
2130 * an already unlinked rule.
2132 PF_OVERLOADQ_LOCK();
2133 if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
2134 PF_OVERLOADQ_UNLOCK();
2137 PF_OVERLOADQ_UNLOCK();
2140 * Do naive mark-and-sweep garbage collecting of old rules.
2141 * Reference flag is raised by pf_purge_expired_states()
2142 * and pf_purge_expired_src_nodes().
2144 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
2145 * use a temporary queue.
2148 PF_UNLNKDRULES_LOCK();
2149 TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
2150 if (!(r->rule_ref & PFRULE_REFS)) {
2151 TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
2152 TAILQ_INSERT_TAIL(&tmpq, r, entries);
2154 r->rule_ref &= ~PFRULE_REFS;
2156 PF_UNLNKDRULES_UNLOCK();
2158 if (!TAILQ_EMPTY(&tmpq)) {
2160 TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
2161 TAILQ_REMOVE(&tmpq, r, entries);
2169 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
2174 u_int32_t a = ntohl(addr->addr32[0]);
2175 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
2187 u_int8_t i, curstart, curend, maxstart, maxend;
2188 curstart = curend = maxstart = maxend = 255;
2189 for (i = 0; i < 8; i++) {
2190 if (!addr->addr16[i]) {
2191 if (curstart == 255)
2195 if ((curend - curstart) >
2196 (maxend - maxstart)) {
2197 maxstart = curstart;
2200 curstart = curend = 255;
2203 if ((curend - curstart) >
2204 (maxend - maxstart)) {
2205 maxstart = curstart;
2208 for (i = 0; i < 8; i++) {
2209 if (i >= maxstart && i <= maxend) {
2215 b = ntohs(addr->addr16[i]);
2232 pf_print_state(struct pf_kstate *s)
2234 pf_print_state_parts(s, NULL, NULL);
2238 pf_print_state_parts(struct pf_kstate *s,
2239 struct pf_state_key *skwp, struct pf_state_key *sksp)
2241 struct pf_state_key *skw, *sks;
2242 u_int8_t proto, dir;
2244 /* Do our best to fill these, but they're skipped if NULL */
2245 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
2246 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
2247 proto = skw ? skw->proto : (sks ? sks->proto : 0);
2248 dir = s ? s->direction : 0;
2266 case IPPROTO_ICMPV6:
2270 printf("%u", proto);
2283 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
2285 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
2290 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
2292 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
2297 if (proto == IPPROTO_TCP) {
2298 printf(" [lo=%u high=%u win=%u modulator=%u",
2299 s->src.seqlo, s->src.seqhi,
2300 s->src.max_win, s->src.seqdiff);
2301 if (s->src.wscale && s->dst.wscale)
2302 printf(" wscale=%u",
2303 s->src.wscale & PF_WSCALE_MASK);
2305 printf(" [lo=%u high=%u win=%u modulator=%u",
2306 s->dst.seqlo, s->dst.seqhi,
2307 s->dst.max_win, s->dst.seqdiff);
2308 if (s->src.wscale && s->dst.wscale)
2309 printf(" wscale=%u",
2310 s->dst.wscale & PF_WSCALE_MASK);
2313 printf(" %u:%u", s->src.state, s->dst.state);
2318 pf_print_flags(u_int8_t f)
2340 #define PF_SET_SKIP_STEPS(i) \
2342 while (head[i] != cur) { \
2343 head[i]->skip[i].ptr = cur; \
2344 head[i] = TAILQ_NEXT(head[i], entries); \
2349 pf_calc_skip_steps(struct pf_krulequeue *rules)
2351 struct pf_krule *cur, *prev, *head[PF_SKIP_COUNT];
2354 cur = TAILQ_FIRST(rules);
2356 for (i = 0; i < PF_SKIP_COUNT; ++i)
2358 while (cur != NULL) {
2359 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
2360 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
2361 if (cur->direction != prev->direction)
2362 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
2363 if (cur->af != prev->af)
2364 PF_SET_SKIP_STEPS(PF_SKIP_AF);
2365 if (cur->proto != prev->proto)
2366 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
2367 if (cur->src.neg != prev->src.neg ||
2368 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
2369 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
2370 if (cur->src.port[0] != prev->src.port[0] ||
2371 cur->src.port[1] != prev->src.port[1] ||
2372 cur->src.port_op != prev->src.port_op)
2373 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
2374 if (cur->dst.neg != prev->dst.neg ||
2375 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
2376 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
2377 if (cur->dst.port[0] != prev->dst.port[0] ||
2378 cur->dst.port[1] != prev->dst.port[1] ||
2379 cur->dst.port_op != prev->dst.port_op)
2380 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2383 cur = TAILQ_NEXT(cur, entries);
2385 for (i = 0; i < PF_SKIP_COUNT; ++i)
2386 PF_SET_SKIP_STEPS(i);
2390 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
2392 if (aw1->type != aw2->type)
2394 switch (aw1->type) {
2395 case PF_ADDR_ADDRMASK:
2397 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6))
2399 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6))
2402 case PF_ADDR_DYNIFTL:
2403 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
2404 case PF_ADDR_NOROUTE:
2405 case PF_ADDR_URPFFAILED:
2408 return (aw1->p.tbl != aw2->p.tbl);
2410 printf("invalid address type: %d\n", aw1->type);
2416 * Checksum updates are a little complicated because the checksum in the TCP/UDP
2417 * header isn't always a full checksum. In some cases (i.e. output) it's a
2418 * pseudo-header checksum, which is a partial checksum over src/dst IP
2419 * addresses, protocol number and length.
2421 * That means we have the following cases:
2422 * * Input or forwarding: we don't have TSO, the checksum fields are full
2423 * checksums, we need to update the checksum whenever we change anything.
2424 * * Output (i.e. the checksum is a pseudo-header checksum):
2425 * x The field being updated is src/dst address or affects the length of
2426 * the packet. We need to update the pseudo-header checksum (note that this
2427 * checksum is not ones' complement).
2428 * x Some other field is being modified (e.g. src/dst port numbers): We
2429 * don't have to update anything.
2432 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2436 x = cksum + old - new;
2437 x = (x + (x >> 16)) & 0xffff;
2439 /* optimise: eliminate a branch when not udp */
2440 if (udp && cksum == 0x0000)
2442 if (udp && x == 0x0000)
2445 return (u_int16_t)(x);
2449 pf_patch_8(struct mbuf *m, u_int16_t *cksum, u_int8_t *f, u_int8_t v, bool hi,
2452 u_int16_t old = htons(hi ? (*f << 8) : *f);
2453 u_int16_t new = htons(hi ? ( v << 8) : v);
2460 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2463 *cksum = pf_cksum_fixup(*cksum, old, new, udp);
2467 pf_patch_16_unaligned(struct mbuf *m, u_int16_t *cksum, void *f, u_int16_t v,
2468 bool hi, u_int8_t udp)
2470 u_int8_t *fb = (u_int8_t *)f;
2471 u_int8_t *vb = (u_int8_t *)&v;
2473 pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2474 pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2478 pf_patch_32_unaligned(struct mbuf *m, u_int16_t *cksum, void *f, u_int32_t v,
2479 bool hi, u_int8_t udp)
2481 u_int8_t *fb = (u_int8_t *)f;
2482 u_int8_t *vb = (u_int8_t *)&v;
2484 pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2485 pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2486 pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2487 pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2491 pf_proto_cksum_fixup(struct mbuf *m, u_int16_t cksum, u_int16_t old,
2492 u_int16_t new, u_int8_t udp)
2494 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2497 return (pf_cksum_fixup(cksum, old, new, udp));
2501 pf_change_ap(struct mbuf *m, struct pf_addr *a, u_int16_t *p, u_int16_t *ic,
2502 u_int16_t *pc, struct pf_addr *an, u_int16_t pn, u_int8_t u,
2508 PF_ACPY(&ao, a, af);
2511 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2519 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2520 ao.addr16[0], an->addr16[0], 0),
2521 ao.addr16[1], an->addr16[1], 0);
2524 *pc = pf_cksum_fixup(pf_cksum_fixup(*pc,
2525 ao.addr16[0], an->addr16[0], u),
2526 ao.addr16[1], an->addr16[1], u);
2528 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2533 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2534 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2535 pf_cksum_fixup(pf_cksum_fixup(*pc,
2536 ao.addr16[0], an->addr16[0], u),
2537 ao.addr16[1], an->addr16[1], u),
2538 ao.addr16[2], an->addr16[2], u),
2539 ao.addr16[3], an->addr16[3], u),
2540 ao.addr16[4], an->addr16[4], u),
2541 ao.addr16[5], an->addr16[5], u),
2542 ao.addr16[6], an->addr16[6], u),
2543 ao.addr16[7], an->addr16[7], u);
2545 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2550 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA |
2551 CSUM_DELAY_DATA_IPV6)) {
2558 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2560 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2564 memcpy(&ao, a, sizeof(ao));
2565 memcpy(a, &an, sizeof(u_int32_t));
2566 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2567 ao % 65536, an % 65536, u);
2571 pf_change_proto_a(struct mbuf *m, void *a, u_int16_t *c, u_int32_t an, u_int8_t udp)
2575 memcpy(&ao, a, sizeof(ao));
2576 memcpy(a, &an, sizeof(u_int32_t));
2578 *c = pf_proto_cksum_fixup(m,
2579 pf_proto_cksum_fixup(m, *c, ao / 65536, an / 65536, udp),
2580 ao % 65536, an % 65536, udp);
2585 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2589 PF_ACPY(&ao, a, AF_INET6);
2590 PF_ACPY(a, an, AF_INET6);
2592 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2593 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2594 pf_cksum_fixup(pf_cksum_fixup(*c,
2595 ao.addr16[0], an->addr16[0], u),
2596 ao.addr16[1], an->addr16[1], u),
2597 ao.addr16[2], an->addr16[2], u),
2598 ao.addr16[3], an->addr16[3], u),
2599 ao.addr16[4], an->addr16[4], u),
2600 ao.addr16[5], an->addr16[5], u),
2601 ao.addr16[6], an->addr16[6], u),
2602 ao.addr16[7], an->addr16[7], u);
2607 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2608 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2609 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2611 struct pf_addr oia, ooa;
2613 PF_ACPY(&oia, ia, af);
2615 PF_ACPY(&ooa, oa, af);
2617 /* Change inner protocol port, fix inner protocol checksum. */
2619 u_int16_t oip = *ip;
2626 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2627 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2629 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2631 /* Change inner ip address, fix inner ip and icmp checksums. */
2632 PF_ACPY(ia, na, af);
2636 u_int32_t oh2c = *h2c;
2638 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2639 oia.addr16[0], ia->addr16[0], 0),
2640 oia.addr16[1], ia->addr16[1], 0);
2641 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2642 oia.addr16[0], ia->addr16[0], 0),
2643 oia.addr16[1], ia->addr16[1], 0);
2644 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2650 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2651 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2652 pf_cksum_fixup(pf_cksum_fixup(*ic,
2653 oia.addr16[0], ia->addr16[0], u),
2654 oia.addr16[1], ia->addr16[1], u),
2655 oia.addr16[2], ia->addr16[2], u),
2656 oia.addr16[3], ia->addr16[3], u),
2657 oia.addr16[4], ia->addr16[4], u),
2658 oia.addr16[5], ia->addr16[5], u),
2659 oia.addr16[6], ia->addr16[6], u),
2660 oia.addr16[7], ia->addr16[7], u);
2664 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2666 PF_ACPY(oa, na, af);
2670 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2671 ooa.addr16[0], oa->addr16[0], 0),
2672 ooa.addr16[1], oa->addr16[1], 0);
2677 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2678 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2679 pf_cksum_fixup(pf_cksum_fixup(*ic,
2680 ooa.addr16[0], oa->addr16[0], u),
2681 ooa.addr16[1], oa->addr16[1], u),
2682 ooa.addr16[2], oa->addr16[2], u),
2683 ooa.addr16[3], oa->addr16[3], u),
2684 ooa.addr16[4], oa->addr16[4], u),
2685 ooa.addr16[5], oa->addr16[5], u),
2686 ooa.addr16[6], oa->addr16[6], u),
2687 ooa.addr16[7], oa->addr16[7], u);
2695 * Need to modulate the sequence numbers in the TCP SACK option
2696 * (credits to Krzysztof Pfaff for report and patch)
2699 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2700 struct tcphdr *th, struct pf_state_peer *dst)
2702 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2703 u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2704 int copyback = 0, i, olen;
2705 struct sackblk sack;
2707 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2708 if (hlen < TCPOLEN_SACKLEN ||
2709 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2712 while (hlen >= TCPOLEN_SACKLEN) {
2713 size_t startoff = opt - opts;
2716 case TCPOPT_EOL: /* FALLTHROUGH */
2724 if (olen >= TCPOLEN_SACKLEN) {
2725 for (i = 2; i + TCPOLEN_SACK <= olen;
2726 i += TCPOLEN_SACK) {
2727 memcpy(&sack, &opt[i], sizeof(sack));
2728 pf_patch_32_unaligned(m,
2729 &th->th_sum, &sack.start,
2730 htonl(ntohl(sack.start) - dst->seqdiff),
2731 PF_ALGNMNT(startoff),
2733 pf_patch_32_unaligned(m, &th->th_sum,
2735 htonl(ntohl(sack.end) - dst->seqdiff),
2736 PF_ALGNMNT(startoff),
2738 memcpy(&opt[i], &sack, sizeof(sack));
2752 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2757 pf_build_tcp(const struct pf_krule *r, sa_family_t af,
2758 const struct pf_addr *saddr, const struct pf_addr *daddr,
2759 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2760 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2766 struct ip *h = NULL;
2769 struct ip6_hdr *h6 = NULL;
2773 struct pf_mtag *pf_mtag;
2778 /* maximum segment size tcp option */
2779 tlen = sizeof(struct tcphdr);
2786 len = sizeof(struct ip) + tlen;
2791 len = sizeof(struct ip6_hdr) + tlen;
2795 panic("%s: unsupported af %d", __func__, af);
2798 m = m_gethdr(M_NOWAIT, MT_DATA);
2803 mac_netinet_firewall_send(m);
2805 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2810 m->m_flags |= M_SKIP_FIREWALL;
2811 pf_mtag->tag = rtag;
2813 if (r != NULL && r->rtableid >= 0)
2814 M_SETFIB(m, r->rtableid);
2817 if (r != NULL && r->qid) {
2818 pf_mtag->qid = r->qid;
2820 /* add hints for ecn */
2821 pf_mtag->hdr = mtod(m, struct ip *);
2824 m->m_data += max_linkhdr;
2825 m->m_pkthdr.len = m->m_len = len;
2826 /* The rest of the stack assumes a rcvif, so provide one.
2827 * This is a locally generated packet, so .. close enough. */
2828 m->m_pkthdr.rcvif = V_loif;
2829 bzero(m->m_data, len);
2833 h = mtod(m, struct ip *);
2835 /* IP header fields included in the TCP checksum */
2836 h->ip_p = IPPROTO_TCP;
2837 h->ip_len = htons(tlen);
2838 h->ip_src.s_addr = saddr->v4.s_addr;
2839 h->ip_dst.s_addr = daddr->v4.s_addr;
2841 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2846 h6 = mtod(m, struct ip6_hdr *);
2848 /* IP header fields included in the TCP checksum */
2849 h6->ip6_nxt = IPPROTO_TCP;
2850 h6->ip6_plen = htons(tlen);
2851 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2852 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2854 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2860 th->th_sport = sport;
2861 th->th_dport = dport;
2862 th->th_seq = htonl(seq);
2863 th->th_ack = htonl(ack);
2864 th->th_off = tlen >> 2;
2865 th->th_flags = flags;
2866 th->th_win = htons(win);
2869 opt = (char *)(th + 1);
2870 opt[0] = TCPOPT_MAXSEG;
2873 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2880 th->th_sum = in_cksum(m, len);
2882 /* Finish the IP header */
2884 h->ip_hl = sizeof(*h) >> 2;
2885 h->ip_tos = IPTOS_LOWDELAY;
2886 h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
2887 h->ip_len = htons(len);
2888 h->ip_ttl = ttl ? ttl : V_ip_defttl;
2895 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2896 sizeof(struct ip6_hdr), tlen);
2898 h6->ip6_vfc |= IPV6_VERSION;
2899 h6->ip6_hlim = IPV6_DEFHLIM;
2908 pf_send_tcp(const struct pf_krule *r, sa_family_t af,
2909 const struct pf_addr *saddr, const struct pf_addr *daddr,
2910 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2911 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2914 struct pf_send_entry *pfse;
2917 m = pf_build_tcp(r, af, saddr, daddr, sport, dport, seq, ack, flags,
2918 win, mss, ttl, tag, rtag);
2922 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2923 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2932 pfse->pfse_type = PFSE_IP;
2937 pfse->pfse_type = PFSE_IP6;
2947 pf_return(struct pf_krule *r, struct pf_krule *nr, struct pf_pdesc *pd,
2948 struct pf_state_key *sk, int off, struct mbuf *m, struct tcphdr *th,
2949 struct pfi_kkif *kif, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen,
2952 struct pf_addr * const saddr = pd->src;
2953 struct pf_addr * const daddr = pd->dst;
2954 sa_family_t af = pd->af;
2956 /* undo NAT changes, if they have taken place */
2958 PF_ACPY(saddr, &sk->addr[pd->sidx], af);
2959 PF_ACPY(daddr, &sk->addr[pd->didx], af);
2961 *pd->sport = sk->port[pd->sidx];
2963 *pd->dport = sk->port[pd->didx];
2965 *pd->proto_sum = bproto_sum;
2967 *pd->ip_sum = bip_sum;
2968 m_copyback(m, off, hdrlen, pd->hdr.any);
2970 if (pd->proto == IPPROTO_TCP &&
2971 ((r->rule_flag & PFRULE_RETURNRST) ||
2972 (r->rule_flag & PFRULE_RETURN)) &&
2973 !(th->th_flags & TH_RST)) {
2974 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
2986 h4 = mtod(m, struct ip *);
2987 len = ntohs(h4->ip_len) - off;
2992 h6 = mtod(m, struct ip6_hdr *);
2993 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
2998 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
2999 REASON_SET(reason, PFRES_PROTCKSUM);
3001 if (th->th_flags & TH_SYN)
3003 if (th->th_flags & TH_FIN)
3005 pf_send_tcp(r, af, pd->dst,
3006 pd->src, th->th_dport, th->th_sport,
3007 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3008 r->return_ttl, 1, 0);
3010 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3012 pf_send_icmp(m, r->return_icmp >> 8,
3013 r->return_icmp & 255, af, r);
3014 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3016 pf_send_icmp(m, r->return_icmp6 >> 8,
3017 r->return_icmp6 & 255, af, r);
3021 pf_match_ieee8021q_pcp(u_int8_t prio, struct mbuf *m)
3026 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL);
3030 if (prio == PF_PRIO_ZERO)
3033 mpcp = *(uint8_t *)(mtag + 1);
3035 return (mpcp == prio);
3039 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
3042 struct pf_send_entry *pfse;
3044 struct pf_mtag *pf_mtag;
3046 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
3047 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
3051 if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
3052 free(pfse, M_PFTEMP);
3056 if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
3057 free(pfse, M_PFTEMP);
3061 m0->m_flags |= M_SKIP_FIREWALL;
3063 if (r->rtableid >= 0)
3064 M_SETFIB(m0, r->rtableid);
3068 pf_mtag->qid = r->qid;
3069 /* add hints for ecn */
3070 pf_mtag->hdr = mtod(m0, struct ip *);
3077 pfse->pfse_type = PFSE_ICMP;
3082 pfse->pfse_type = PFSE_ICMP6;
3087 pfse->icmpopts.type = type;
3088 pfse->icmpopts.code = code;
3093 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
3094 * If n is 0, they match if they are equal. If n is != 0, they match if they
3098 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
3099 struct pf_addr *b, sa_family_t af)
3106 if ((a->addr32[0] & m->addr32[0]) ==
3107 (b->addr32[0] & m->addr32[0]))
3113 if (((a->addr32[0] & m->addr32[0]) ==
3114 (b->addr32[0] & m->addr32[0])) &&
3115 ((a->addr32[1] & m->addr32[1]) ==
3116 (b->addr32[1] & m->addr32[1])) &&
3117 ((a->addr32[2] & m->addr32[2]) ==
3118 (b->addr32[2] & m->addr32[2])) &&
3119 ((a->addr32[3] & m->addr32[3]) ==
3120 (b->addr32[3] & m->addr32[3])))
3139 * Return 1 if b <= a <= e, otherwise return 0.
3142 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
3143 struct pf_addr *a, sa_family_t af)
3148 if ((ntohl(a->addr32[0]) < ntohl(b->addr32[0])) ||
3149 (ntohl(a->addr32[0]) > ntohl(e->addr32[0])))
3158 for (i = 0; i < 4; ++i)
3159 if (ntohl(a->addr32[i]) > ntohl(b->addr32[i]))
3161 else if (ntohl(a->addr32[i]) < ntohl(b->addr32[i]))
3164 for (i = 0; i < 4; ++i)
3165 if (ntohl(a->addr32[i]) < ntohl(e->addr32[i]))
3167 else if (ntohl(a->addr32[i]) > ntohl(e->addr32[i]))
3177 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
3181 return ((p > a1) && (p < a2));
3183 return ((p < a1) || (p > a2));
3185 return ((p >= a1) && (p <= a2));
3199 return (0); /* never reached */
3203 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
3208 return (pf_match(op, a1, a2, p));
3212 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
3214 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
3216 return (pf_match(op, a1, a2, u));
3220 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
3222 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
3224 return (pf_match(op, a1, a2, g));
3228 pf_match_tag(struct mbuf *m, struct pf_krule *r, int *tag, int mtag)
3233 return ((!r->match_tag_not && r->match_tag == *tag) ||
3234 (r->match_tag_not && r->match_tag != *tag));
3238 pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag)
3241 KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
3243 if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL))
3246 pd->pf_mtag->tag = tag;
3251 #define PF_ANCHOR_STACKSIZE 32
3252 struct pf_kanchor_stackframe {
3253 struct pf_kruleset *rs;
3254 struct pf_krule *r; /* XXX: + match bit */
3255 struct pf_kanchor *child;
3259 * XXX: We rely on malloc(9) returning pointer aligned addresses.
3261 #define PF_ANCHORSTACK_MATCH 0x00000001
3262 #define PF_ANCHORSTACK_MASK (PF_ANCHORSTACK_MATCH)
3264 #define PF_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
3265 #define PF_ANCHOR_RULE(f) (struct pf_krule *) \
3266 ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
3267 #define PF_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \
3268 ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \
3272 pf_step_into_anchor(struct pf_kanchor_stackframe *stack, int *depth,
3273 struct pf_kruleset **rs, int n, struct pf_krule **r, struct pf_krule **a,
3276 struct pf_kanchor_stackframe *f;
3282 if (*depth >= PF_ANCHOR_STACKSIZE) {
3283 printf("%s: anchor stack overflow on %s\n",
3284 __func__, (*r)->anchor->name);
3285 *r = TAILQ_NEXT(*r, entries);
3287 } else if (*depth == 0 && a != NULL)
3289 f = stack + (*depth)++;
3292 if ((*r)->anchor_wildcard) {
3293 struct pf_kanchor_node *parent = &(*r)->anchor->children;
3295 if ((f->child = RB_MIN(pf_kanchor_node, parent)) == NULL) {
3299 *rs = &f->child->ruleset;
3302 *rs = &(*r)->anchor->ruleset;
3304 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
3308 pf_step_out_of_anchor(struct pf_kanchor_stackframe *stack, int *depth,
3309 struct pf_kruleset **rs, int n, struct pf_krule **r, struct pf_krule **a,
3312 struct pf_kanchor_stackframe *f;
3313 struct pf_krule *fr;
3321 f = stack + *depth - 1;
3322 fr = PF_ANCHOR_RULE(f);
3323 if (f->child != NULL) {
3324 struct pf_kanchor_node *parent;
3327 * This block traverses through
3328 * a wildcard anchor.
3330 parent = &fr->anchor->children;
3331 if (match != NULL && *match) {
3333 * If any of "*" matched, then
3334 * "foo/ *" matched, mark frame
3337 PF_ANCHOR_SET_MATCH(f);
3340 f->child = RB_NEXT(pf_kanchor_node, parent, f->child);
3341 if (f->child != NULL) {
3342 *rs = &f->child->ruleset;
3343 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
3351 if (*depth == 0 && a != NULL)
3354 if (PF_ANCHOR_MATCH(f) || (match != NULL && *match))
3356 *r = TAILQ_NEXT(fr, entries);
3357 } while (*r == NULL);
3364 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
3365 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
3370 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3371 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
3375 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3376 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
3377 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
3378 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
3379 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
3380 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
3381 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
3382 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
3388 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
3393 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
3397 if (addr->addr32[3] == 0xffffffff) {
3398 addr->addr32[3] = 0;
3399 if (addr->addr32[2] == 0xffffffff) {
3400 addr->addr32[2] = 0;
3401 if (addr->addr32[1] == 0xffffffff) {
3402 addr->addr32[1] = 0;
3404 htonl(ntohl(addr->addr32[0]) + 1);
3407 htonl(ntohl(addr->addr32[1]) + 1);
3410 htonl(ntohl(addr->addr32[2]) + 1);
3413 htonl(ntohl(addr->addr32[3]) + 1);
3420 pf_rule_to_actions(struct pf_krule *r, struct pf_rule_actions *a)
3427 a->dnpipe = r->dnpipe;
3429 a->dnpipe = r->dnrpipe;
3430 if (r->free_flags & PFRULE_DN_IS_PIPE)
3431 a->flags |= PFRULE_DN_IS_PIPE;
3435 pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
3437 struct pf_addr *saddr, *daddr;
3438 u_int16_t sport, dport;
3439 struct inpcbinfo *pi;
3442 pd->lookup.uid = UID_MAX;
3443 pd->lookup.gid = GID_MAX;
3445 switch (pd->proto) {
3447 sport = pd->hdr.tcp.th_sport;
3448 dport = pd->hdr.tcp.th_dport;
3452 sport = pd->hdr.udp.uh_sport;
3453 dport = pd->hdr.udp.uh_dport;
3459 if (direction == PF_IN) {
3474 inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
3475 dport, INPLOOKUP_RLOCKPCB, NULL, m);
3477 inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
3478 daddr->v4, dport, INPLOOKUP_WILDCARD |
3479 INPLOOKUP_RLOCKPCB, NULL, m);
3487 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
3488 dport, INPLOOKUP_RLOCKPCB, NULL, m);
3490 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
3491 &daddr->v6, dport, INPLOOKUP_WILDCARD |
3492 INPLOOKUP_RLOCKPCB, NULL, m);
3502 INP_RLOCK_ASSERT(inp);
3503 pd->lookup.uid = inp->inp_cred->cr_uid;
3504 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3511 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3515 u_int8_t *opt, optlen;
3516 u_int8_t wscale = 0;
3518 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3519 if (hlen <= sizeof(struct tcphdr))
3521 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3523 opt = hdr + sizeof(struct tcphdr);
3524 hlen -= sizeof(struct tcphdr);
3534 if (wscale > TCP_MAX_WINSHIFT)
3535 wscale = TCP_MAX_WINSHIFT;
3536 wscale |= PF_WSCALE_FLAG;
3551 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3555 u_int8_t *opt, optlen;
3556 u_int16_t mss = V_tcp_mssdflt;
3558 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3559 if (hlen <= sizeof(struct tcphdr))
3561 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3563 opt = hdr + sizeof(struct tcphdr);
3564 hlen -= sizeof(struct tcphdr);
3565 while (hlen >= TCPOLEN_MAXSEG) {
3573 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
3589 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
3591 struct nhop_object *nh;
3593 struct in6_addr dst6;
3604 hlen = sizeof(struct ip);
3605 nh = fib4_lookup(rtableid, addr->v4, 0, 0, 0);
3607 mss = nh->nh_mtu - hlen - sizeof(struct tcphdr);
3612 hlen = sizeof(struct ip6_hdr);
3613 in6_splitscope(&addr->v6, &dst6, &scopeid);
3614 nh = fib6_lookup(rtableid, &dst6, scopeid, 0, 0);
3616 mss = nh->nh_mtu - hlen - sizeof(struct tcphdr);
3621 mss = max(V_tcp_mssdflt, mss);
3622 mss = min(mss, offer);
3623 mss = max(mss, 64); /* sanity - at least max opt space */
3628 pf_tcp_iss(struct pf_pdesc *pd)
3631 u_int32_t digest[4];
3633 if (V_pf_tcp_secret_init == 0) {
3634 arc4random_buf(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
3635 MD5Init(&V_pf_tcp_secret_ctx);
3636 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
3637 sizeof(V_pf_tcp_secret));
3638 V_pf_tcp_secret_init = 1;
3641 ctx = V_pf_tcp_secret_ctx;
3643 MD5Update(&ctx, (char *)&pd->hdr.tcp.th_sport, sizeof(u_short));
3644 MD5Update(&ctx, (char *)&pd->hdr.tcp.th_dport, sizeof(u_short));
3645 if (pd->af == AF_INET6) {
3646 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3647 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3649 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3650 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3652 MD5Final((u_char *)digest, &ctx);
3653 V_pf_tcp_iss_off += 4096;
3654 #define ISN_RANDOM_INCREMENT (4096 - 1)
3655 return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3657 #undef ISN_RANDOM_INCREMENT
3661 pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, int direction,
3662 struct pfi_kkif *kif, struct mbuf *m, int off, struct pf_pdesc *pd,
3663 struct pf_krule **am, struct pf_kruleset **rsm, struct inpcb *inp)
3665 struct pf_krule *nr = NULL;
3666 struct pf_addr * const saddr = pd->src;
3667 struct pf_addr * const daddr = pd->dst;
3668 sa_family_t af = pd->af;
3669 struct pf_krule *r, *a = NULL;
3670 struct pf_kruleset *ruleset = NULL;
3671 struct pf_ksrc_node *nsn = NULL;
3672 struct tcphdr *th = &pd->hdr.tcp;
3673 struct pf_state_key *sk = NULL, *nk = NULL;
3675 int rewrite = 0, hdrlen = 0;
3676 int tag = -1, rtableid = -1;
3680 u_int16_t sport = 0, dport = 0;
3681 u_int16_t bproto_sum = 0, bip_sum = 0;
3682 u_int8_t icmptype = 0, icmpcode = 0;
3683 struct pf_kanchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3688 INP_LOCK_ASSERT(inp);
3689 pd->lookup.uid = inp->inp_cred->cr_uid;
3690 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3691 pd->lookup.done = 1;
3694 switch (pd->proto) {
3696 sport = th->th_sport;
3697 dport = th->th_dport;
3698 hdrlen = sizeof(*th);
3701 sport = pd->hdr.udp.uh_sport;
3702 dport = pd->hdr.udp.uh_dport;
3703 hdrlen = sizeof(pd->hdr.udp);
3707 if (pd->af != AF_INET)
3709 sport = dport = pd->hdr.icmp.icmp_id;
3710 hdrlen = sizeof(pd->hdr.icmp);
3711 icmptype = pd->hdr.icmp.icmp_type;
3712 icmpcode = pd->hdr.icmp.icmp_code;
3714 if (icmptype == ICMP_UNREACH ||
3715 icmptype == ICMP_SOURCEQUENCH ||
3716 icmptype == ICMP_REDIRECT ||
3717 icmptype == ICMP_TIMXCEED ||
3718 icmptype == ICMP_PARAMPROB)
3723 case IPPROTO_ICMPV6:
3726 sport = dport = pd->hdr.icmp6.icmp6_id;
3727 hdrlen = sizeof(pd->hdr.icmp6);
3728 icmptype = pd->hdr.icmp6.icmp6_type;
3729 icmpcode = pd->hdr.icmp6.icmp6_code;
3731 if (icmptype == ICMP6_DST_UNREACH ||
3732 icmptype == ICMP6_PACKET_TOO_BIG ||
3733 icmptype == ICMP6_TIME_EXCEEDED ||
3734 icmptype == ICMP6_PARAM_PROB)
3739 sport = dport = hdrlen = 0;
3743 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3745 /* check packet for BINAT/NAT/RDR */
3746 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk,
3747 &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) {
3748 KASSERT(sk != NULL, ("%s: null sk", __func__));
3749 KASSERT(nk != NULL, ("%s: null nk", __func__));
3752 PFLOG_PACKET(kif, m, af, direction, PFRES_MATCH, nr, a,
3757 bip_sum = *pd->ip_sum;
3759 switch (pd->proto) {
3761 bproto_sum = th->th_sum;
3762 pd->proto_sum = &th->th_sum;
3764 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3765 nk->port[pd->sidx] != sport) {
3766 pf_change_ap(m, saddr, &th->th_sport, pd->ip_sum,
3767 &th->th_sum, &nk->addr[pd->sidx],
3768 nk->port[pd->sidx], 0, af);
3769 pd->sport = &th->th_sport;
3770 sport = th->th_sport;
3773 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3774 nk->port[pd->didx] != dport) {
3775 pf_change_ap(m, daddr, &th->th_dport, pd->ip_sum,
3776 &th->th_sum, &nk->addr[pd->didx],
3777 nk->port[pd->didx], 0, af);
3778 dport = th->th_dport;
3779 pd->dport = &th->th_dport;
3784 bproto_sum = pd->hdr.udp.uh_sum;
3785 pd->proto_sum = &pd->hdr.udp.uh_sum;
3787 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3788 nk->port[pd->sidx] != sport) {
3789 pf_change_ap(m, saddr, &pd->hdr.udp.uh_sport,
3790 pd->ip_sum, &pd->hdr.udp.uh_sum,
3791 &nk->addr[pd->sidx],
3792 nk->port[pd->sidx], 1, af);
3793 sport = pd->hdr.udp.uh_sport;
3794 pd->sport = &pd->hdr.udp.uh_sport;
3797 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3798 nk->port[pd->didx] != dport) {
3799 pf_change_ap(m, daddr, &pd->hdr.udp.uh_dport,
3800 pd->ip_sum, &pd->hdr.udp.uh_sum,
3801 &nk->addr[pd->didx],
3802 nk->port[pd->didx], 1, af);
3803 dport = pd->hdr.udp.uh_dport;
3804 pd->dport = &pd->hdr.udp.uh_dport;
3810 nk->port[0] = nk->port[1];
3811 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3812 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3813 nk->addr[pd->sidx].v4.s_addr, 0);
3815 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3816 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3817 nk->addr[pd->didx].v4.s_addr, 0);
3819 if (nk->port[1] != pd->hdr.icmp.icmp_id) {
3820 pd->hdr.icmp.icmp_cksum = pf_cksum_fixup(
3821 pd->hdr.icmp.icmp_cksum, sport,
3823 pd->hdr.icmp.icmp_id = nk->port[1];
3824 pd->sport = &pd->hdr.icmp.icmp_id;
3826 m_copyback(m, off, ICMP_MINLEN, (caddr_t)&pd->hdr.icmp);
3830 case IPPROTO_ICMPV6:
3831 nk->port[0] = nk->port[1];
3832 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3833 pf_change_a6(saddr, &pd->hdr.icmp6.icmp6_cksum,
3834 &nk->addr[pd->sidx], 0);
3836 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3837 pf_change_a6(daddr, &pd->hdr.icmp6.icmp6_cksum,
3838 &nk->addr[pd->didx], 0);
3847 &nk->addr[pd->sidx], AF_INET))
3848 pf_change_a(&saddr->v4.s_addr,
3850 nk->addr[pd->sidx].v4.s_addr, 0);
3853 &nk->addr[pd->didx], AF_INET))
3854 pf_change_a(&daddr->v4.s_addr,
3856 nk->addr[pd->didx].v4.s_addr, 0);
3862 &nk->addr[pd->sidx], AF_INET6))
3863 PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3866 &nk->addr[pd->didx], AF_INET6))
3867 PF_ACPY(daddr, &nk->addr[pd->didx], af);
3879 pf_counter_u64_add(&r->evaluations, 1);
3880 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
3881 r = r->skip[PF_SKIP_IFP].ptr;
3882 else if (r->direction && r->direction != direction)
3883 r = r->skip[PF_SKIP_DIR].ptr;
3884 else if (r->af && r->af != af)
3885 r = r->skip[PF_SKIP_AF].ptr;
3886 else if (r->proto && r->proto != pd->proto)
3887 r = r->skip[PF_SKIP_PROTO].ptr;
3888 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3889 r->src.neg, kif, M_GETFIB(m)))
3890 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3891 /* tcp/udp only. port_op always 0 in other cases */
3892 else if (r->src.port_op && !pf_match_port(r->src.port_op,
3893 r->src.port[0], r->src.port[1], sport))
3894 r = r->skip[PF_SKIP_SRC_PORT].ptr;
3895 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3896 r->dst.neg, NULL, M_GETFIB(m)))
3897 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3898 /* tcp/udp only. port_op always 0 in other cases */
3899 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3900 r->dst.port[0], r->dst.port[1], dport))
3901 r = r->skip[PF_SKIP_DST_PORT].ptr;
3902 /* icmp only. type always 0 in other cases */
3903 else if (r->type && r->type != icmptype + 1)
3904 r = TAILQ_NEXT(r, entries);
3905 /* icmp only. type always 0 in other cases */
3906 else if (r->code && r->code != icmpcode + 1)
3907 r = TAILQ_NEXT(r, entries);
3908 else if (r->tos && !(r->tos == pd->tos))
3909 r = TAILQ_NEXT(r, entries);
3910 else if (r->rule_flag & PFRULE_FRAGMENT)
3911 r = TAILQ_NEXT(r, entries);
3912 else if (pd->proto == IPPROTO_TCP &&
3913 (r->flagset & th->th_flags) != r->flags)
3914 r = TAILQ_NEXT(r, entries);
3915 /* tcp/udp only. uid.op always 0 in other cases */
3916 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3917 pf_socket_lookup(direction, pd, m), 1)) &&
3918 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3920 r = TAILQ_NEXT(r, entries);
3921 /* tcp/udp only. gid.op always 0 in other cases */
3922 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3923 pf_socket_lookup(direction, pd, m), 1)) &&
3924 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3926 r = TAILQ_NEXT(r, entries);
3928 !pf_match_ieee8021q_pcp(r->prio, m))
3929 r = TAILQ_NEXT(r, entries);
3931 r->prob <= arc4random())
3932 r = TAILQ_NEXT(r, entries);
3933 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3934 pd->pf_mtag ? pd->pf_mtag->tag : 0))
3935 r = TAILQ_NEXT(r, entries);
3936 else if (r->os_fingerprint != PF_OSFP_ANY &&
3937 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3938 pf_osfp_fingerprint(pd, m, off, th),
3939 r->os_fingerprint)))
3940 r = TAILQ_NEXT(r, entries);
3944 if (r->rtableid >= 0)
3945 rtableid = r->rtableid;
3946 if (r->anchor == NULL) {
3947 if (r->action == PF_MATCH) {
3948 pf_counter_u64_critical_enter();
3949 pf_counter_u64_add_protected(&r->packets[direction == PF_OUT], 1);
3950 pf_counter_u64_add_protected(&r->bytes[direction == PF_OUT], pd->tot_len);
3951 pf_counter_u64_critical_exit();
3952 pf_rule_to_actions(r, &pd->act);
3954 PFLOG_PACKET(kif, m, af,
3955 direction, PFRES_MATCH, r,
3965 r = TAILQ_NEXT(r, entries);
3967 pf_step_into_anchor(anchor_stack, &asd,
3968 &ruleset, PF_RULESET_FILTER, &r, &a,
3971 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3972 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3979 REASON_SET(&reason, PFRES_MATCH);
3981 /* apply actions for last matching pass/block rule */
3982 pf_rule_to_actions(r, &pd->act);
3986 m_copyback(m, off, hdrlen, pd->hdr.any);
3987 PFLOG_PACKET(kif, m, af, direction, reason, r, a,
3991 if ((r->action == PF_DROP) &&
3992 ((r->rule_flag & PFRULE_RETURNRST) ||
3993 (r->rule_flag & PFRULE_RETURNICMP) ||
3994 (r->rule_flag & PFRULE_RETURN))) {
3995 pf_return(r, nr, pd, sk, off, m, th, kif, bproto_sum,
3996 bip_sum, hdrlen, &reason);
3999 if (r->action == PF_DROP)
4002 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
4003 REASON_SET(&reason, PFRES_MEMORY);
4007 M_SETFIB(m, rtableid);
4009 if (!state_icmp && (r->keep_state || nr != NULL ||
4010 (pd->flags & PFDESC_TCP_NORM))) {
4012 action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
4013 sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
4015 if (action != PF_PASS) {
4016 if (action == PF_DROP &&
4017 (r->rule_flag & PFRULE_RETURN))
4018 pf_return(r, nr, pd, sk, off, m, th, kif,
4019 bproto_sum, bip_sum, hdrlen, &reason);
4024 uma_zfree(V_pf_state_key_z, sk);
4026 uma_zfree(V_pf_state_key_z, nk);
4029 /* copy back packet headers if we performed NAT operations */
4031 m_copyback(m, off, hdrlen, pd->hdr.any);
4033 if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
4034 direction == PF_OUT &&
4035 V_pfsync_defer_ptr != NULL && V_pfsync_defer_ptr(*sm, m))
4037 * We want the state created, but we dont
4038 * want to send this in case a partner
4039 * firewall has to know about it to allow
4040 * replies through it.
4048 uma_zfree(V_pf_state_key_z, sk);
4050 uma_zfree(V_pf_state_key_z, nk);
4055 pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a,
4056 struct pf_pdesc *pd, struct pf_ksrc_node *nsn, struct pf_state_key *nk,
4057 struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
4058 u_int16_t dport, int *rewrite, struct pfi_kkif *kif, struct pf_kstate **sm,
4059 int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen)
4061 struct pf_kstate *s = NULL;
4062 struct pf_ksrc_node *sn = NULL;
4063 struct tcphdr *th = &pd->hdr.tcp;
4064 u_int16_t mss = V_tcp_mssdflt;
4067 /* check maximums */
4068 if (r->max_states &&
4069 (counter_u64_fetch(r->states_cur) >= r->max_states)) {
4070 counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1);
4071 REASON_SET(&reason, PFRES_MAXSTATES);
4074 /* src node for filter rule */
4075 if ((r->rule_flag & PFRULE_SRCTRACK ||
4076 r->rpool.opts & PF_POOL_STICKYADDR) &&
4077 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
4078 REASON_SET(&reason, PFRES_SRCLIMIT);
4081 /* src node for translation rule */
4082 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
4083 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
4084 REASON_SET(&reason, PFRES_SRCLIMIT);
4087 s = pf_alloc_state(M_NOWAIT);
4089 REASON_SET(&reason, PFRES_MEMORY);
4093 s->nat_rule.ptr = nr;
4095 STATE_INC_COUNTERS(s);
4097 s->state_flags |= PFSTATE_ALLOWOPTS;
4098 if (r->rule_flag & PFRULE_STATESLOPPY)
4099 s->state_flags |= PFSTATE_SLOPPY;
4100 s->log = r->log & PF_LOG_ALL;
4101 s->sync_state = PFSYNC_S_NONE;
4102 s->qid = pd->act.qid;
4103 s->pqid = pd->act.pqid;
4104 s->dnpipe = pd->act.dnpipe;
4105 s->dnrpipe = pd->act.dnrpipe;
4106 s->state_flags |= pd->act.flags;
4108 s->log |= nr->log & PF_LOG_ALL;
4109 switch (pd->proto) {
4111 s->src.seqlo = ntohl(th->th_seq);
4112 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
4113 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
4114 r->keep_state == PF_STATE_MODULATE) {
4115 /* Generate sequence number modulator */
4116 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
4119 pf_change_proto_a(m, &th->th_seq, &th->th_sum,
4120 htonl(s->src.seqlo + s->src.seqdiff), 0);
4124 if (th->th_flags & TH_SYN) {
4126 s->src.wscale = pf_get_wscale(m, off,
4127 th->th_off, pd->af);
4129 s->src.max_win = MAX(ntohs(th->th_win), 1);
4130 if (s->src.wscale & PF_WSCALE_MASK) {
4131 /* Remove scale factor from initial window */
4132 int win = s->src.max_win;
4133 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
4134 s->src.max_win = (win - 1) >>
4135 (s->src.wscale & PF_WSCALE_MASK);
4137 if (th->th_flags & TH_FIN)
4141 pf_set_protostate(s, PF_PEER_SRC, TCPS_SYN_SENT);
4142 pf_set_protostate(s, PF_PEER_DST, TCPS_CLOSED);
4143 s->timeout = PFTM_TCP_FIRST_PACKET;
4144 atomic_add_32(&V_pf_status.states_halfopen, 1);
4147 pf_set_protostate(s, PF_PEER_SRC, PFUDPS_SINGLE);
4148 pf_set_protostate(s, PF_PEER_DST, PFUDPS_NO_TRAFFIC);
4149 s->timeout = PFTM_UDP_FIRST_PACKET;
4153 case IPPROTO_ICMPV6:
4155 s->timeout = PFTM_ICMP_FIRST_PACKET;
4158 pf_set_protostate(s, PF_PEER_SRC, PFOTHERS_SINGLE);
4159 pf_set_protostate(s, PF_PEER_DST, PFOTHERS_NO_TRAFFIC);
4160 s->timeout = PFTM_OTHER_FIRST_PACKET;
4164 if (pf_map_addr(pd->af, r, pd->src, &s->rt_addr, NULL, &sn)) {
4165 REASON_SET(&reason, PFRES_MAPFAILED);
4166 pf_src_tree_remove_state(s);
4167 s->timeout = PFTM_UNLINKED;
4168 STATE_DEC_COUNTERS(s);
4172 s->rt_kif = r->rpool.cur->kif;
4175 s->creation = time_uptime;
4176 s->expire = time_uptime;
4181 /* XXX We only modify one side for now. */
4182 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
4183 s->nat_src_node = nsn;
4185 if (pd->proto == IPPROTO_TCP) {
4186 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
4187 off, pd, th, &s->src, &s->dst)) {
4188 REASON_SET(&reason, PFRES_MEMORY);
4189 pf_src_tree_remove_state(s);
4190 s->timeout = PFTM_UNLINKED;
4191 STATE_DEC_COUNTERS(s);
4195 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
4196 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
4197 &s->src, &s->dst, rewrite)) {
4198 /* This really shouldn't happen!!! */
4199 DPFPRINTF(PF_DEBUG_URGENT,
4200 ("pf_normalize_tcp_stateful failed on first "
4202 pf_src_tree_remove_state(s);
4203 s->timeout = PFTM_UNLINKED;
4204 STATE_DEC_COUNTERS(s);
4209 s->direction = pd->dir;
4212 * sk/nk could already been setup by pf_get_translation().
4215 KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
4216 __func__, nr, sk, nk));
4217 sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
4222 KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p",
4223 __func__, nr, sk, nk));
4225 /* Swap sk/nk for PF_OUT. */
4226 if (pf_state_insert(BOUND_IFACE(r, kif), kif,
4227 (pd->dir == PF_IN) ? sk : nk,
4228 (pd->dir == PF_IN) ? nk : sk, s)) {
4229 REASON_SET(&reason, PFRES_STATEINS);
4230 pf_src_tree_remove_state(s);
4231 s->timeout = PFTM_UNLINKED;
4232 STATE_DEC_COUNTERS(s);
4240 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
4241 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
4242 pf_set_protostate(s, PF_PEER_SRC, PF_TCPS_PROXY_SRC);
4243 /* undo NAT changes, if they have taken place */
4245 struct pf_state_key *skt = s->key[PF_SK_WIRE];
4246 if (pd->dir == PF_OUT)
4247 skt = s->key[PF_SK_STACK];
4248 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
4249 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
4251 *pd->sport = skt->port[pd->sidx];
4253 *pd->dport = skt->port[pd->didx];
4255 *pd->proto_sum = bproto_sum;
4257 *pd->ip_sum = bip_sum;
4258 m_copyback(m, off, hdrlen, pd->hdr.any);
4260 s->src.seqhi = htonl(arc4random());
4261 /* Find mss option */
4262 int rtid = M_GETFIB(m);
4263 mss = pf_get_mss(m, off, th->th_off, pd->af);
4264 mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
4265 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
4267 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
4268 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
4269 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0);
4270 REASON_SET(&reason, PFRES_SYNPROXY);
4271 return (PF_SYNPROXY_DROP);
4278 uma_zfree(V_pf_state_key_z, sk);
4280 uma_zfree(V_pf_state_key_z, nk);
4283 struct pf_srchash *sh;
4285 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
4286 PF_HASHROW_LOCK(sh);
4287 if (--sn->states == 0 && sn->expire == 0) {
4288 pf_unlink_src_node(sn);
4289 uma_zfree(V_pf_sources_z, sn);
4291 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
4293 PF_HASHROW_UNLOCK(sh);
4296 if (nsn != sn && nsn != NULL) {
4297 struct pf_srchash *sh;
4299 sh = &V_pf_srchash[pf_hashsrc(&nsn->addr, nsn->af)];
4300 PF_HASHROW_LOCK(sh);
4301 if (--nsn->states == 0 && nsn->expire == 0) {
4302 pf_unlink_src_node(nsn);
4303 uma_zfree(V_pf_sources_z, nsn);
4305 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
4307 PF_HASHROW_UNLOCK(sh);
4314 pf_test_fragment(struct pf_krule **rm, int direction, struct pfi_kkif *kif,
4315 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_krule **am,
4316 struct pf_kruleset **rsm)
4318 struct pf_krule *r, *a = NULL;
4319 struct pf_kruleset *ruleset = NULL;
4320 sa_family_t af = pd->af;
4325 struct pf_kanchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
4329 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
4331 pf_counter_u64_add(&r->evaluations, 1);
4332 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
4333 r = r->skip[PF_SKIP_IFP].ptr;
4334 else if (r->direction && r->direction != direction)
4335 r = r->skip[PF_SKIP_DIR].ptr;
4336 else if (r->af && r->af != af)
4337 r = r->skip[PF_SKIP_AF].ptr;
4338 else if (r->proto && r->proto != pd->proto)
4339 r = r->skip[PF_SKIP_PROTO].ptr;
4340 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
4341 r->src.neg, kif, M_GETFIB(m)))
4342 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
4343 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
4344 r->dst.neg, NULL, M_GETFIB(m)))
4345 r = r->skip[PF_SKIP_DST_ADDR].ptr;
4346 else if (r->tos && !(r->tos == pd->tos))
4347 r = TAILQ_NEXT(r, entries);
4348 else if (r->os_fingerprint != PF_OSFP_ANY)
4349 r = TAILQ_NEXT(r, entries);
4350 else if (pd->proto == IPPROTO_UDP &&
4351 (r->src.port_op || r->dst.port_op))
4352 r = TAILQ_NEXT(r, entries);
4353 else if (pd->proto == IPPROTO_TCP &&
4354 (r->src.port_op || r->dst.port_op || r->flagset))
4355 r = TAILQ_NEXT(r, entries);
4356 else if ((pd->proto == IPPROTO_ICMP ||
4357 pd->proto == IPPROTO_ICMPV6) &&
4358 (r->type || r->code))
4359 r = TAILQ_NEXT(r, entries);
4361 !pf_match_ieee8021q_pcp(r->prio, m))
4362 r = TAILQ_NEXT(r, entries);
4363 else if (r->prob && r->prob <=
4364 (arc4random() % (UINT_MAX - 1) + 1))
4365 r = TAILQ_NEXT(r, entries);
4366 else if (r->match_tag && !pf_match_tag(m, r, &tag,
4367 pd->pf_mtag ? pd->pf_mtag->tag : 0))
4368 r = TAILQ_NEXT(r, entries);
4370 if (r->anchor == NULL) {
4371 if (r->action == PF_MATCH) {
4372 pf_counter_u64_critical_enter();
4373 pf_counter_u64_add_protected(&r->packets[direction == PF_OUT], 1);
4374 pf_counter_u64_add_protected(&r->bytes[direction == PF_OUT], pd->tot_len);
4375 pf_counter_u64_critical_exit();
4376 pf_rule_to_actions(r, &pd->act);
4378 PFLOG_PACKET(kif, m, af,
4379 direction, PFRES_MATCH, r,
4389 r = TAILQ_NEXT(r, entries);
4391 pf_step_into_anchor(anchor_stack, &asd,
4392 &ruleset, PF_RULESET_FILTER, &r, &a,
4395 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
4396 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
4403 REASON_SET(&reason, PFRES_MATCH);
4405 /* apply actions for last matching pass/block rule */
4406 pf_rule_to_actions(r, &pd->act);
4409 PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
4412 if (r->action != PF_PASS)
4415 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
4416 REASON_SET(&reason, PFRES_MEMORY);
4424 pf_tcp_track_full(struct pf_kstate **state, struct pfi_kkif *kif,
4425 struct mbuf *m, int off, struct pf_pdesc *pd, u_short *reason,
4428 struct tcphdr *th = &pd->hdr.tcp;
4429 struct pf_state_peer *src, *dst;
4430 u_int16_t win = ntohs(th->th_win);
4431 u_int32_t ack, end, seq, orig_seq;
4432 u_int8_t sws, dws, psrc, pdst;
4435 if (pd->dir == (*state)->direction) {
4436 src = &(*state)->src;
4437 dst = &(*state)->dst;
4441 src = &(*state)->dst;
4442 dst = &(*state)->src;
4447 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
4448 sws = src->wscale & PF_WSCALE_MASK;
4449 dws = dst->wscale & PF_WSCALE_MASK;
4454 * Sequence tracking algorithm from Guido van Rooij's paper:
4455 * http://www.madison-gurkha.com/publications/tcp_filtering/
4459 orig_seq = seq = ntohl(th->th_seq);
4460 if (src->seqlo == 0) {
4461 /* First packet from this end. Set its state */
4463 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
4464 src->scrub == NULL) {
4465 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
4466 REASON_SET(reason, PFRES_MEMORY);
4471 /* Deferred generation of sequence number modulator */
4472 if (dst->seqdiff && !src->seqdiff) {
4473 /* use random iss for the TCP server */
4474 while ((src->seqdiff = arc4random() - seq) == 0)
4476 ack = ntohl(th->th_ack) - dst->seqdiff;
4477 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
4479 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
4482 ack = ntohl(th->th_ack);
4485 end = seq + pd->p_len;
4486 if (th->th_flags & TH_SYN) {
4488 if (dst->wscale & PF_WSCALE_FLAG) {
4489 src->wscale = pf_get_wscale(m, off, th->th_off,
4491 if (src->wscale & PF_WSCALE_FLAG) {
4492 /* Remove scale factor from initial
4494 sws = src->wscale & PF_WSCALE_MASK;
4495 win = ((u_int32_t)win + (1 << sws) - 1)
4497 dws = dst->wscale & PF_WSCALE_MASK;
4499 /* fixup other window */
4500 dst->max_win <<= dst->wscale &
4502 /* in case of a retrans SYN|ACK */
4507 if (th->th_flags & TH_FIN)
4511 if (src->state < TCPS_SYN_SENT)
4512 pf_set_protostate(*state, psrc, TCPS_SYN_SENT);
4515 * May need to slide the window (seqhi may have been set by
4516 * the crappy stack check or if we picked up the connection
4517 * after establishment)
4519 if (src->seqhi == 1 ||
4520 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
4521 src->seqhi = end + MAX(1, dst->max_win << dws);
4522 if (win > src->max_win)
4526 ack = ntohl(th->th_ack) - dst->seqdiff;
4528 /* Modulate sequence numbers */
4529 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
4531 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
4534 end = seq + pd->p_len;
4535 if (th->th_flags & TH_SYN)
4537 if (th->th_flags & TH_FIN)
4541 if ((th->th_flags & TH_ACK) == 0) {
4542 /* Let it pass through the ack skew check */
4544 } else if ((ack == 0 &&
4545 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
4546 /* broken tcp stacks do not set ack */
4547 (dst->state < TCPS_SYN_SENT)) {
4549 * Many stacks (ours included) will set the ACK number in an
4550 * FIN|ACK if the SYN times out -- no sequence to ACK.
4556 /* Ease sequencing restrictions on no data packets */
4561 ackskew = dst->seqlo - ack;
4564 * Need to demodulate the sequence numbers in any TCP SACK options
4565 * (Selective ACK). We could optionally validate the SACK values
4566 * against the current ACK window, either forwards or backwards, but
4567 * I'm not confident that SACK has been implemented properly
4568 * everywhere. It wouldn't surprise me if several stacks accidentally
4569 * SACK too far backwards of previously ACKed data. There really aren't
4570 * any security implications of bad SACKing unless the target stack
4571 * doesn't validate the option length correctly. Someone trying to
4572 * spoof into a TCP connection won't bother blindly sending SACK
4575 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
4576 if (pf_modulate_sack(m, off, pd, th, dst))
4580 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
4581 if (SEQ_GEQ(src->seqhi, end) &&
4582 /* Last octet inside other's window space */
4583 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
4584 /* Retrans: not more than one window back */
4585 (ackskew >= -MAXACKWINDOW) &&
4586 /* Acking not more than one reassembled fragment backwards */
4587 (ackskew <= (MAXACKWINDOW << sws)) &&
4588 /* Acking not more than one window forward */
4589 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
4590 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
4591 (pd->flags & PFDESC_IP_REAS) == 0)) {
4592 /* Require an exact/+1 sequence match on resets when possible */
4594 if (dst->scrub || src->scrub) {
4595 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4596 *state, src, dst, copyback))
4600 /* update max window */
4601 if (src->max_win < win)
4603 /* synchronize sequencing */
4604 if (SEQ_GT(end, src->seqlo))
4606 /* slide the window of what the other end can send */
4607 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4608 dst->seqhi = ack + MAX((win << sws), 1);
4611 if (th->th_flags & TH_SYN)
4612 if (src->state < TCPS_SYN_SENT)
4613 pf_set_protostate(*state, psrc, TCPS_SYN_SENT);
4614 if (th->th_flags & TH_FIN)
4615 if (src->state < TCPS_CLOSING)
4616 pf_set_protostate(*state, psrc, TCPS_CLOSING);
4617 if (th->th_flags & TH_ACK) {
4618 if (dst->state == TCPS_SYN_SENT) {
4619 pf_set_protostate(*state, pdst,
4621 if (src->state == TCPS_ESTABLISHED &&
4622 (*state)->src_node != NULL &&
4623 pf_src_connlimit(state)) {
4624 REASON_SET(reason, PFRES_SRCLIMIT);
4627 } else if (dst->state == TCPS_CLOSING)
4628 pf_set_protostate(*state, pdst,
4631 if (th->th_flags & TH_RST)
4632 pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT);
4634 /* update expire time */
4635 (*state)->expire = time_uptime;
4636 if (src->state >= TCPS_FIN_WAIT_2 &&
4637 dst->state >= TCPS_FIN_WAIT_2)
4638 (*state)->timeout = PFTM_TCP_CLOSED;
4639 else if (src->state >= TCPS_CLOSING &&
4640 dst->state >= TCPS_CLOSING)
4641 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4642 else if (src->state < TCPS_ESTABLISHED ||
4643 dst->state < TCPS_ESTABLISHED)
4644 (*state)->timeout = PFTM_TCP_OPENING;
4645 else if (src->state >= TCPS_CLOSING ||
4646 dst->state >= TCPS_CLOSING)
4647 (*state)->timeout = PFTM_TCP_CLOSING;
4649 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4651 /* Fall through to PASS packet */
4653 } else if ((dst->state < TCPS_SYN_SENT ||
4654 dst->state >= TCPS_FIN_WAIT_2 ||
4655 src->state >= TCPS_FIN_WAIT_2) &&
4656 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
4657 /* Within a window forward of the originating packet */
4658 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
4659 /* Within a window backward of the originating packet */
4662 * This currently handles three situations:
4663 * 1) Stupid stacks will shotgun SYNs before their peer
4665 * 2) When PF catches an already established stream (the
4666 * firewall rebooted, the state table was flushed, routes
4668 * 3) Packets get funky immediately after the connection
4669 * closes (this should catch Solaris spurious ACK|FINs
4670 * that web servers like to spew after a close)
4672 * This must be a little more careful than the above code
4673 * since packet floods will also be caught here. We don't
4674 * update the TTL here to mitigate the damage of a packet
4675 * flood and so the same code can handle awkward establishment
4676 * and a loosened connection close.
4677 * In the establishment case, a correct peer response will
4678 * validate the connection, go through the normal state code
4679 * and keep updating the state TTL.
4682 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4683 printf("pf: loose state match: ");
4684 pf_print_state(*state);
4685 pf_print_flags(th->th_flags);
4686 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4687 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
4688 pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
4689 (unsigned long long)(*state)->packets[1],
4690 pd->dir == PF_IN ? "in" : "out",
4691 pd->dir == (*state)->direction ? "fwd" : "rev");
4694 if (dst->scrub || src->scrub) {
4695 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4696 *state, src, dst, copyback))
4700 /* update max window */
4701 if (src->max_win < win)
4703 /* synchronize sequencing */
4704 if (SEQ_GT(end, src->seqlo))
4706 /* slide the window of what the other end can send */
4707 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4708 dst->seqhi = ack + MAX((win << sws), 1);
4711 * Cannot set dst->seqhi here since this could be a shotgunned
4712 * SYN and not an already established connection.
4715 if (th->th_flags & TH_FIN)
4716 if (src->state < TCPS_CLOSING)
4717 pf_set_protostate(*state, psrc, TCPS_CLOSING);
4718 if (th->th_flags & TH_RST)
4719 pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT);
4721 /* Fall through to PASS packet */
4724 if ((*state)->dst.state == TCPS_SYN_SENT &&
4725 (*state)->src.state == TCPS_SYN_SENT) {
4726 /* Send RST for state mismatches during handshake */
4727 if (!(th->th_flags & TH_RST))
4728 pf_send_tcp((*state)->rule.ptr, pd->af,
4729 pd->dst, pd->src, th->th_dport,
4730 th->th_sport, ntohl(th->th_ack), 0,
4732 (*state)->rule.ptr->return_ttl, 1, 0);
4736 } else if (V_pf_status.debug >= PF_DEBUG_MISC) {
4737 printf("pf: BAD state: ");
4738 pf_print_state(*state);
4739 pf_print_flags(th->th_flags);
4740 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4741 "pkts=%llu:%llu dir=%s,%s\n",
4742 seq, orig_seq, ack, pd->p_len, ackskew,
4743 (unsigned long long)(*state)->packets[0],
4744 (unsigned long long)(*state)->packets[1],
4745 pd->dir == PF_IN ? "in" : "out",
4746 pd->dir == (*state)->direction ? "fwd" : "rev");
4747 printf("pf: State failure on: %c %c %c %c | %c %c\n",
4748 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4749 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4751 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4752 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4753 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4754 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4756 REASON_SET(reason, PFRES_BADSTATE);
4764 pf_tcp_track_sloppy(struct pf_kstate **state, struct pf_pdesc *pd, u_short *reason)
4766 struct tcphdr *th = &pd->hdr.tcp;
4767 struct pf_state_peer *src, *dst;
4768 u_int8_t psrc, pdst;
4770 if (pd->dir == (*state)->direction) {
4771 src = &(*state)->src;
4772 dst = &(*state)->dst;
4776 src = &(*state)->dst;
4777 dst = &(*state)->src;
4782 if (th->th_flags & TH_SYN)
4783 if (src->state < TCPS_SYN_SENT)
4784 pf_set_protostate(*state, psrc, TCPS_SYN_SENT);
4785 if (th->th_flags & TH_FIN)
4786 if (src->state < TCPS_CLOSING)
4787 pf_set_protostate(*state, psrc, TCPS_CLOSING);
4788 if (th->th_flags & TH_ACK) {
4789 if (dst->state == TCPS_SYN_SENT) {
4790 pf_set_protostate(*state, pdst, TCPS_ESTABLISHED);
4791 if (src->state == TCPS_ESTABLISHED &&
4792 (*state)->src_node != NULL &&
4793 pf_src_connlimit(state)) {
4794 REASON_SET(reason, PFRES_SRCLIMIT);
4797 } else if (dst->state == TCPS_CLOSING) {
4798 pf_set_protostate(*state, pdst, TCPS_FIN_WAIT_2);
4799 } else if (src->state == TCPS_SYN_SENT &&
4800 dst->state < TCPS_SYN_SENT) {
4802 * Handle a special sloppy case where we only see one
4803 * half of the connection. If there is a ACK after
4804 * the initial SYN without ever seeing a packet from
4805 * the destination, set the connection to established.
4807 pf_set_protostate(*state, PF_PEER_BOTH,
4809 dst->state = src->state = TCPS_ESTABLISHED;
4810 if ((*state)->src_node != NULL &&
4811 pf_src_connlimit(state)) {
4812 REASON_SET(reason, PFRES_SRCLIMIT);
4815 } else if (src->state == TCPS_CLOSING &&
4816 dst->state == TCPS_ESTABLISHED &&
4819 * Handle the closing of half connections where we
4820 * don't see the full bidirectional FIN/ACK+ACK
4823 pf_set_protostate(*state, pdst, TCPS_CLOSING);
4826 if (th->th_flags & TH_RST)
4827 pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT);
4829 /* update expire time */
4830 (*state)->expire = time_uptime;
4831 if (src->state >= TCPS_FIN_WAIT_2 &&
4832 dst->state >= TCPS_FIN_WAIT_2)
4833 (*state)->timeout = PFTM_TCP_CLOSED;
4834 else if (src->state >= TCPS_CLOSING &&
4835 dst->state >= TCPS_CLOSING)
4836 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4837 else if (src->state < TCPS_ESTABLISHED ||
4838 dst->state < TCPS_ESTABLISHED)
4839 (*state)->timeout = PFTM_TCP_OPENING;
4840 else if (src->state >= TCPS_CLOSING ||
4841 dst->state >= TCPS_CLOSING)
4842 (*state)->timeout = PFTM_TCP_CLOSING;
4844 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4850 pf_synproxy(struct pf_pdesc *pd, struct pf_kstate **state, u_short *reason)
4852 struct pf_state_key *sk = (*state)->key[pd->didx];
4853 struct tcphdr *th = &pd->hdr.tcp;
4855 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4856 if (pd->dir != (*state)->direction) {
4857 REASON_SET(reason, PFRES_SYNPROXY);
4858 return (PF_SYNPROXY_DROP);
4860 if (th->th_flags & TH_SYN) {
4861 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4862 REASON_SET(reason, PFRES_SYNPROXY);
4865 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
4866 pd->src, th->th_dport, th->th_sport,
4867 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4868 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0);
4869 REASON_SET(reason, PFRES_SYNPROXY);
4870 return (PF_SYNPROXY_DROP);
4871 } else if ((th->th_flags & (TH_ACK|TH_RST|TH_FIN)) != TH_ACK ||
4872 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4873 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4874 REASON_SET(reason, PFRES_SYNPROXY);
4876 } else if ((*state)->src_node != NULL &&
4877 pf_src_connlimit(state)) {
4878 REASON_SET(reason, PFRES_SRCLIMIT);
4881 pf_set_protostate(*state, PF_PEER_SRC,
4884 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4885 if (pd->dir == (*state)->direction) {
4886 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4887 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4888 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4889 REASON_SET(reason, PFRES_SYNPROXY);
4892 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4893 if ((*state)->dst.seqhi == 1)
4894 (*state)->dst.seqhi = htonl(arc4random());
4895 pf_send_tcp((*state)->rule.ptr, pd->af,
4896 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4897 sk->port[pd->sidx], sk->port[pd->didx],
4898 (*state)->dst.seqhi, 0, TH_SYN, 0,
4899 (*state)->src.mss, 0, 0, (*state)->tag);
4900 REASON_SET(reason, PFRES_SYNPROXY);
4901 return (PF_SYNPROXY_DROP);
4902 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4904 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4905 REASON_SET(reason, PFRES_SYNPROXY);
4908 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4909 (*state)->dst.seqlo = ntohl(th->th_seq);
4910 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
4911 pd->src, th->th_dport, th->th_sport,
4912 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4913 TH_ACK, (*state)->src.max_win, 0, 0, 0,
4915 pf_send_tcp((*state)->rule.ptr, pd->af,
4916 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4917 sk->port[pd->sidx], sk->port[pd->didx],
4918 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4919 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0);
4920 (*state)->src.seqdiff = (*state)->dst.seqhi -
4921 (*state)->src.seqlo;
4922 (*state)->dst.seqdiff = (*state)->src.seqhi -
4923 (*state)->dst.seqlo;
4924 (*state)->src.seqhi = (*state)->src.seqlo +
4925 (*state)->dst.max_win;
4926 (*state)->dst.seqhi = (*state)->dst.seqlo +
4927 (*state)->src.max_win;
4928 (*state)->src.wscale = (*state)->dst.wscale = 0;
4929 pf_set_protostate(*state, PF_PEER_BOTH,
4931 REASON_SET(reason, PFRES_SYNPROXY);
4932 return (PF_SYNPROXY_DROP);
4940 pf_test_state_tcp(struct pf_kstate **state, int direction, struct pfi_kkif *kif,
4941 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4944 struct pf_state_key_cmp key;
4945 struct tcphdr *th = &pd->hdr.tcp;
4948 struct pf_state_peer *src, *dst;
4950 bzero(&key, sizeof(key));
4952 key.proto = IPPROTO_TCP;
4953 if (direction == PF_IN) { /* wire side, straight */
4954 PF_ACPY(&key.addr[0], pd->src, key.af);
4955 PF_ACPY(&key.addr[1], pd->dst, key.af);
4956 key.port[0] = th->th_sport;
4957 key.port[1] = th->th_dport;
4958 } else { /* stack side, reverse */
4959 PF_ACPY(&key.addr[1], pd->src, key.af);
4960 PF_ACPY(&key.addr[0], pd->dst, key.af);
4961 key.port[1] = th->th_sport;
4962 key.port[0] = th->th_dport;
4965 STATE_LOOKUP(kif, &key, direction, *state, pd);
4967 if (direction == (*state)->direction) {
4968 src = &(*state)->src;
4969 dst = &(*state)->dst;
4971 src = &(*state)->dst;
4972 dst = &(*state)->src;
4975 if ((action = pf_synproxy(pd, state, reason)) != PF_PASS)
4978 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4979 dst->state >= TCPS_FIN_WAIT_2 &&
4980 src->state >= TCPS_FIN_WAIT_2) {
4981 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4982 printf("pf: state reuse ");
4983 pf_print_state(*state);
4984 pf_print_flags(th->th_flags);
4987 /* XXX make sure it's the same direction ?? */
4988 pf_set_protostate(*state, PF_PEER_BOTH, TCPS_CLOSED);
4989 pf_unlink_state(*state);
4994 if ((*state)->state_flags & PFSTATE_SLOPPY) {
4995 if (pf_tcp_track_sloppy(state, pd, reason) == PF_DROP)
4998 if (pf_tcp_track_full(state, kif, m, off, pd, reason,
4999 ©back) == PF_DROP)
5003 /* translate source/destination address, if necessary */
5004 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5005 struct pf_state_key *nk = (*state)->key[pd->didx];
5007 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
5008 nk->port[pd->sidx] != th->th_sport)
5009 pf_change_ap(m, pd->src, &th->th_sport,
5010 pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx],
5011 nk->port[pd->sidx], 0, pd->af);
5013 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
5014 nk->port[pd->didx] != th->th_dport)
5015 pf_change_ap(m, pd->dst, &th->th_dport,
5016 pd->ip_sum, &th->th_sum, &nk->addr[pd->didx],
5017 nk->port[pd->didx], 0, pd->af);
5021 /* Copyback sequence modulation or stateful scrub changes if needed */
5023 m_copyback(m, off, sizeof(*th), (caddr_t)th);
5029 pf_test_state_udp(struct pf_kstate **state, int direction, struct pfi_kkif *kif,
5030 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
5032 struct pf_state_peer *src, *dst;
5033 struct pf_state_key_cmp key;
5034 struct udphdr *uh = &pd->hdr.udp;
5037 bzero(&key, sizeof(key));
5039 key.proto = IPPROTO_UDP;
5040 if (direction == PF_IN) { /* wire side, straight */
5041 PF_ACPY(&key.addr[0], pd->src, key.af);
5042 PF_ACPY(&key.addr[1], pd->dst, key.af);
5043 key.port[0] = uh->uh_sport;
5044 key.port[1] = uh->uh_dport;
5045 } else { /* stack side, reverse */
5046 PF_ACPY(&key.addr[1], pd->src, key.af);
5047 PF_ACPY(&key.addr[0], pd->dst, key.af);
5048 key.port[1] = uh->uh_sport;
5049 key.port[0] = uh->uh_dport;
5052 STATE_LOOKUP(kif, &key, direction, *state, pd);
5054 if (direction == (*state)->direction) {
5055 src = &(*state)->src;
5056 dst = &(*state)->dst;
5060 src = &(*state)->dst;
5061 dst = &(*state)->src;
5067 if (src->state < PFUDPS_SINGLE)
5068 pf_set_protostate(*state, psrc, PFUDPS_SINGLE);
5069 if (dst->state == PFUDPS_SINGLE)
5070 pf_set_protostate(*state, pdst, PFUDPS_MULTIPLE);
5072 /* update expire time */
5073 (*state)->expire = time_uptime;
5074 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
5075 (*state)->timeout = PFTM_UDP_MULTIPLE;
5077 (*state)->timeout = PFTM_UDP_SINGLE;
5079 /* translate source/destination address, if necessary */
5080 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5081 struct pf_state_key *nk = (*state)->key[pd->didx];
5083 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
5084 nk->port[pd->sidx] != uh->uh_sport)
5085 pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
5086 &uh->uh_sum, &nk->addr[pd->sidx],
5087 nk->port[pd->sidx], 1, pd->af);
5089 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
5090 nk->port[pd->didx] != uh->uh_dport)
5091 pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
5092 &uh->uh_sum, &nk->addr[pd->didx],
5093 nk->port[pd->didx], 1, pd->af);
5094 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
5101 pf_test_state_icmp(struct pf_kstate **state, int direction, struct pfi_kkif *kif,
5102 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
5104 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
5105 u_int16_t icmpid = 0, *icmpsum;
5106 u_int8_t icmptype, icmpcode;
5108 struct pf_state_key_cmp key;
5110 bzero(&key, sizeof(key));
5111 switch (pd->proto) {
5114 icmptype = pd->hdr.icmp.icmp_type;
5115 icmpcode = pd->hdr.icmp.icmp_code;
5116 icmpid = pd->hdr.icmp.icmp_id;
5117 icmpsum = &pd->hdr.icmp.icmp_cksum;
5119 if (icmptype == ICMP_UNREACH ||
5120 icmptype == ICMP_SOURCEQUENCH ||
5121 icmptype == ICMP_REDIRECT ||
5122 icmptype == ICMP_TIMXCEED ||
5123 icmptype == ICMP_PARAMPROB)
5128 case IPPROTO_ICMPV6:
5129 icmptype = pd->hdr.icmp6.icmp6_type;
5130 icmpcode = pd->hdr.icmp6.icmp6_code;
5131 icmpid = pd->hdr.icmp6.icmp6_id;
5132 icmpsum = &pd->hdr.icmp6.icmp6_cksum;
5134 if (icmptype == ICMP6_DST_UNREACH ||
5135 icmptype == ICMP6_PACKET_TOO_BIG ||
5136 icmptype == ICMP6_TIME_EXCEEDED ||
5137 icmptype == ICMP6_PARAM_PROB)
5145 * ICMP query/reply message not related to a TCP/UDP packet.
5146 * Search for an ICMP state.
5149 key.proto = pd->proto;
5150 key.port[0] = key.port[1] = icmpid;
5151 if (direction == PF_IN) { /* wire side, straight */
5152 PF_ACPY(&key.addr[0], pd->src, key.af);
5153 PF_ACPY(&key.addr[1], pd->dst, key.af);
5154 } else { /* stack side, reverse */
5155 PF_ACPY(&key.addr[1], pd->src, key.af);
5156 PF_ACPY(&key.addr[0], pd->dst, key.af);
5159 STATE_LOOKUP(kif, &key, direction, *state, pd);
5161 (*state)->expire = time_uptime;
5162 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
5164 /* translate source/destination address, if necessary */
5165 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5166 struct pf_state_key *nk = (*state)->key[pd->didx];
5171 if (PF_ANEQ(pd->src,
5172 &nk->addr[pd->sidx], AF_INET))
5173 pf_change_a(&saddr->v4.s_addr,
5175 nk->addr[pd->sidx].v4.s_addr, 0);
5177 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
5179 pf_change_a(&daddr->v4.s_addr,
5181 nk->addr[pd->didx].v4.s_addr, 0);
5184 pd->hdr.icmp.icmp_id) {
5185 pd->hdr.icmp.icmp_cksum =
5187 pd->hdr.icmp.icmp_cksum, icmpid,
5188 nk->port[pd->sidx], 0);
5189 pd->hdr.icmp.icmp_id =
5193 m_copyback(m, off, ICMP_MINLEN,
5194 (caddr_t )&pd->hdr.icmp);
5199 if (PF_ANEQ(pd->src,
5200 &nk->addr[pd->sidx], AF_INET6))
5202 &pd->hdr.icmp6.icmp6_cksum,
5203 &nk->addr[pd->sidx], 0);
5205 if (PF_ANEQ(pd->dst,
5206 &nk->addr[pd->didx], AF_INET6))
5208 &pd->hdr.icmp6.icmp6_cksum,
5209 &nk->addr[pd->didx], 0);
5211 m_copyback(m, off, sizeof(struct icmp6_hdr),
5212 (caddr_t )&pd->hdr.icmp6);
5221 * ICMP error message in response to a TCP/UDP packet.
5222 * Extract the inner TCP/UDP header and search for that state.
5225 struct pf_pdesc pd2;
5226 bzero(&pd2, sizeof pd2);
5231 struct ip6_hdr h2_6;
5238 /* Payload packet is from the opposite direction. */
5239 pd2.sidx = (direction == PF_IN) ? 1 : 0;
5240 pd2.didx = (direction == PF_IN) ? 0 : 1;
5244 /* offset of h2 in mbuf chain */
5245 ipoff2 = off + ICMP_MINLEN;
5247 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
5248 NULL, reason, pd2.af)) {
5249 DPFPRINTF(PF_DEBUG_MISC,
5250 ("pf: ICMP error message too short "
5255 * ICMP error messages don't refer to non-first
5258 if (h2.ip_off & htons(IP_OFFMASK)) {
5259 REASON_SET(reason, PFRES_FRAG);
5263 /* offset of protocol header that follows h2 */
5264 off2 = ipoff2 + (h2.ip_hl << 2);
5266 pd2.proto = h2.ip_p;
5267 pd2.src = (struct pf_addr *)&h2.ip_src;
5268 pd2.dst = (struct pf_addr *)&h2.ip_dst;
5269 pd2.ip_sum = &h2.ip_sum;
5274 ipoff2 = off + sizeof(struct icmp6_hdr);
5276 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
5277 NULL, reason, pd2.af)) {
5278 DPFPRINTF(PF_DEBUG_MISC,
5279 ("pf: ICMP error message too short "
5283 pd2.proto = h2_6.ip6_nxt;
5284 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
5285 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
5287 off2 = ipoff2 + sizeof(h2_6);
5289 switch (pd2.proto) {
5290 case IPPROTO_FRAGMENT:
5292 * ICMPv6 error messages for
5293 * non-first fragments
5295 REASON_SET(reason, PFRES_FRAG);
5298 case IPPROTO_HOPOPTS:
5299 case IPPROTO_ROUTING:
5300 case IPPROTO_DSTOPTS: {
5301 /* get next header and header length */
5302 struct ip6_ext opt6;
5304 if (!pf_pull_hdr(m, off2, &opt6,
5305 sizeof(opt6), NULL, reason,
5307 DPFPRINTF(PF_DEBUG_MISC,
5308 ("pf: ICMPv6 short opt\n"));
5311 if (pd2.proto == IPPROTO_AH)
5312 off2 += (opt6.ip6e_len + 2) * 4;
5314 off2 += (opt6.ip6e_len + 1) * 8;
5315 pd2.proto = opt6.ip6e_nxt;
5316 /* goto the next header */
5323 } while (!terminal);
5328 if (PF_ANEQ(pd->dst, pd2.src, pd->af)) {
5329 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5330 printf("pf: BAD ICMP %d:%d outer dst: ",
5331 icmptype, icmpcode);
5332 pf_print_host(pd->src, 0, pd->af);
5334 pf_print_host(pd->dst, 0, pd->af);
5335 printf(" inner src: ");
5336 pf_print_host(pd2.src, 0, pd2.af);
5338 pf_print_host(pd2.dst, 0, pd2.af);
5341 REASON_SET(reason, PFRES_BADSTATE);
5345 switch (pd2.proto) {
5349 struct pf_state_peer *src, *dst;
5354 * Only the first 8 bytes of the TCP header can be
5355 * expected. Don't access any TCP header fields after
5356 * th_seq, an ackskew test is not possible.
5358 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
5360 DPFPRINTF(PF_DEBUG_MISC,
5361 ("pf: ICMP error message too short "
5367 key.proto = IPPROTO_TCP;
5368 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5369 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5370 key.port[pd2.sidx] = th.th_sport;
5371 key.port[pd2.didx] = th.th_dport;
5373 STATE_LOOKUP(kif, &key, direction, *state, pd);
5375 if (direction == (*state)->direction) {
5376 src = &(*state)->dst;
5377 dst = &(*state)->src;
5379 src = &(*state)->src;
5380 dst = &(*state)->dst;
5383 if (src->wscale && dst->wscale)
5384 dws = dst->wscale & PF_WSCALE_MASK;
5388 /* Demodulate sequence number */
5389 seq = ntohl(th.th_seq) - src->seqdiff;
5391 pf_change_a(&th.th_seq, icmpsum,
5396 if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
5397 (!SEQ_GEQ(src->seqhi, seq) ||
5398 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
5399 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5400 printf("pf: BAD ICMP %d:%d ",
5401 icmptype, icmpcode);
5402 pf_print_host(pd->src, 0, pd->af);
5404 pf_print_host(pd->dst, 0, pd->af);
5406 pf_print_state(*state);
5407 printf(" seq=%u\n", seq);
5409 REASON_SET(reason, PFRES_BADSTATE);
5412 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5413 printf("pf: OK ICMP %d:%d ",
5414 icmptype, icmpcode);
5415 pf_print_host(pd->src, 0, pd->af);
5417 pf_print_host(pd->dst, 0, pd->af);
5419 pf_print_state(*state);
5420 printf(" seq=%u\n", seq);
5424 /* translate source/destination address, if necessary */
5425 if ((*state)->key[PF_SK_WIRE] !=
5426 (*state)->key[PF_SK_STACK]) {
5427 struct pf_state_key *nk =
5428 (*state)->key[pd->didx];
5430 if (PF_ANEQ(pd2.src,
5431 &nk->addr[pd2.sidx], pd2.af) ||
5432 nk->port[pd2.sidx] != th.th_sport)
5433 pf_change_icmp(pd2.src, &th.th_sport,
5434 daddr, &nk->addr[pd2.sidx],
5435 nk->port[pd2.sidx], NULL,
5436 pd2.ip_sum, icmpsum,
5437 pd->ip_sum, 0, pd2.af);
5439 if (PF_ANEQ(pd2.dst,
5440 &nk->addr[pd2.didx], pd2.af) ||
5441 nk->port[pd2.didx] != th.th_dport)
5442 pf_change_icmp(pd2.dst, &th.th_dport,
5443 saddr, &nk->addr[pd2.didx],
5444 nk->port[pd2.didx], NULL,
5445 pd2.ip_sum, icmpsum,
5446 pd->ip_sum, 0, pd2.af);
5454 m_copyback(m, off, ICMP_MINLEN,
5455 (caddr_t )&pd->hdr.icmp);
5456 m_copyback(m, ipoff2, sizeof(h2),
5463 sizeof(struct icmp6_hdr),
5464 (caddr_t )&pd->hdr.icmp6);
5465 m_copyback(m, ipoff2, sizeof(h2_6),
5470 m_copyback(m, off2, 8, (caddr_t)&th);
5479 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
5480 NULL, reason, pd2.af)) {
5481 DPFPRINTF(PF_DEBUG_MISC,
5482 ("pf: ICMP error message too short "
5488 key.proto = IPPROTO_UDP;
5489 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5490 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5491 key.port[pd2.sidx] = uh.uh_sport;
5492 key.port[pd2.didx] = uh.uh_dport;
5494 STATE_LOOKUP(kif, &key, direction, *state, pd);
5496 /* translate source/destination address, if necessary */
5497 if ((*state)->key[PF_SK_WIRE] !=
5498 (*state)->key[PF_SK_STACK]) {
5499 struct pf_state_key *nk =
5500 (*state)->key[pd->didx];
5502 if (PF_ANEQ(pd2.src,
5503 &nk->addr[pd2.sidx], pd2.af) ||
5504 nk->port[pd2.sidx] != uh.uh_sport)
5505 pf_change_icmp(pd2.src, &uh.uh_sport,
5506 daddr, &nk->addr[pd2.sidx],
5507 nk->port[pd2.sidx], &uh.uh_sum,
5508 pd2.ip_sum, icmpsum,
5509 pd->ip_sum, 1, pd2.af);
5511 if (PF_ANEQ(pd2.dst,
5512 &nk->addr[pd2.didx], pd2.af) ||
5513 nk->port[pd2.didx] != uh.uh_dport)
5514 pf_change_icmp(pd2.dst, &uh.uh_dport,
5515 saddr, &nk->addr[pd2.didx],
5516 nk->port[pd2.didx], &uh.uh_sum,
5517 pd2.ip_sum, icmpsum,
5518 pd->ip_sum, 1, pd2.af);
5523 m_copyback(m, off, ICMP_MINLEN,
5524 (caddr_t )&pd->hdr.icmp);
5525 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5531 sizeof(struct icmp6_hdr),
5532 (caddr_t )&pd->hdr.icmp6);
5533 m_copyback(m, ipoff2, sizeof(h2_6),
5538 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
5544 case IPPROTO_ICMP: {
5547 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
5548 NULL, reason, pd2.af)) {
5549 DPFPRINTF(PF_DEBUG_MISC,
5550 ("pf: ICMP error message too short i"
5556 key.proto = IPPROTO_ICMP;
5557 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5558 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5559 key.port[0] = key.port[1] = iih.icmp_id;
5561 STATE_LOOKUP(kif, &key, direction, *state, pd);
5563 /* translate source/destination address, if necessary */
5564 if ((*state)->key[PF_SK_WIRE] !=
5565 (*state)->key[PF_SK_STACK]) {
5566 struct pf_state_key *nk =
5567 (*state)->key[pd->didx];
5569 if (PF_ANEQ(pd2.src,
5570 &nk->addr[pd2.sidx], pd2.af) ||
5571 nk->port[pd2.sidx] != iih.icmp_id)
5572 pf_change_icmp(pd2.src, &iih.icmp_id,
5573 daddr, &nk->addr[pd2.sidx],
5574 nk->port[pd2.sidx], NULL,
5575 pd2.ip_sum, icmpsum,
5576 pd->ip_sum, 0, AF_INET);
5578 if (PF_ANEQ(pd2.dst,
5579 &nk->addr[pd2.didx], pd2.af) ||
5580 nk->port[pd2.didx] != iih.icmp_id)
5581 pf_change_icmp(pd2.dst, &iih.icmp_id,
5582 saddr, &nk->addr[pd2.didx],
5583 nk->port[pd2.didx], NULL,
5584 pd2.ip_sum, icmpsum,
5585 pd->ip_sum, 0, AF_INET);
5587 m_copyback(m, off, ICMP_MINLEN, (caddr_t)&pd->hdr.icmp);
5588 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5589 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
5596 case IPPROTO_ICMPV6: {
5597 struct icmp6_hdr iih;
5599 if (!pf_pull_hdr(m, off2, &iih,
5600 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
5601 DPFPRINTF(PF_DEBUG_MISC,
5602 ("pf: ICMP error message too short "
5608 key.proto = IPPROTO_ICMPV6;
5609 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5610 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5611 key.port[0] = key.port[1] = iih.icmp6_id;
5613 STATE_LOOKUP(kif, &key, direction, *state, pd);
5615 /* translate source/destination address, if necessary */
5616 if ((*state)->key[PF_SK_WIRE] !=
5617 (*state)->key[PF_SK_STACK]) {
5618 struct pf_state_key *nk =
5619 (*state)->key[pd->didx];
5621 if (PF_ANEQ(pd2.src,
5622 &nk->addr[pd2.sidx], pd2.af) ||
5623 nk->port[pd2.sidx] != iih.icmp6_id)
5624 pf_change_icmp(pd2.src, &iih.icmp6_id,
5625 daddr, &nk->addr[pd2.sidx],
5626 nk->port[pd2.sidx], NULL,
5627 pd2.ip_sum, icmpsum,
5628 pd->ip_sum, 0, AF_INET6);
5630 if (PF_ANEQ(pd2.dst,
5631 &nk->addr[pd2.didx], pd2.af) ||
5632 nk->port[pd2.didx] != iih.icmp6_id)
5633 pf_change_icmp(pd2.dst, &iih.icmp6_id,
5634 saddr, &nk->addr[pd2.didx],
5635 nk->port[pd2.didx], NULL,
5636 pd2.ip_sum, icmpsum,
5637 pd->ip_sum, 0, AF_INET6);
5639 m_copyback(m, off, sizeof(struct icmp6_hdr),
5640 (caddr_t)&pd->hdr.icmp6);
5641 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
5642 m_copyback(m, off2, sizeof(struct icmp6_hdr),
5651 key.proto = pd2.proto;
5652 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5653 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5654 key.port[0] = key.port[1] = 0;
5656 STATE_LOOKUP(kif, &key, direction, *state, pd);
5658 /* translate source/destination address, if necessary */
5659 if ((*state)->key[PF_SK_WIRE] !=
5660 (*state)->key[PF_SK_STACK]) {
5661 struct pf_state_key *nk =
5662 (*state)->key[pd->didx];
5664 if (PF_ANEQ(pd2.src,
5665 &nk->addr[pd2.sidx], pd2.af))
5666 pf_change_icmp(pd2.src, NULL, daddr,
5667 &nk->addr[pd2.sidx], 0, NULL,
5668 pd2.ip_sum, icmpsum,
5669 pd->ip_sum, 0, pd2.af);
5671 if (PF_ANEQ(pd2.dst,
5672 &nk->addr[pd2.didx], pd2.af))
5673 pf_change_icmp(pd2.dst, NULL, saddr,
5674 &nk->addr[pd2.didx], 0, NULL,
5675 pd2.ip_sum, icmpsum,
5676 pd->ip_sum, 0, pd2.af);
5681 m_copyback(m, off, ICMP_MINLEN,
5682 (caddr_t)&pd->hdr.icmp);
5683 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5689 sizeof(struct icmp6_hdr),
5690 (caddr_t )&pd->hdr.icmp6);
5691 m_copyback(m, ipoff2, sizeof(h2_6),
5705 pf_test_state_other(struct pf_kstate **state, int direction, struct pfi_kkif *kif,
5706 struct mbuf *m, struct pf_pdesc *pd)
5708 struct pf_state_peer *src, *dst;
5709 struct pf_state_key_cmp key;
5712 bzero(&key, sizeof(key));
5714 key.proto = pd->proto;
5715 if (direction == PF_IN) {
5716 PF_ACPY(&key.addr[0], pd->src, key.af);
5717 PF_ACPY(&key.addr[1], pd->dst, key.af);
5718 key.port[0] = key.port[1] = 0;
5720 PF_ACPY(&key.addr[1], pd->src, key.af);
5721 PF_ACPY(&key.addr[0], pd->dst, key.af);
5722 key.port[1] = key.port[0] = 0;
5725 STATE_LOOKUP(kif, &key, direction, *state, pd);
5727 if (direction == (*state)->direction) {
5728 src = &(*state)->src;
5729 dst = &(*state)->dst;
5733 src = &(*state)->dst;
5734 dst = &(*state)->src;
5740 if (src->state < PFOTHERS_SINGLE)
5741 pf_set_protostate(*state, psrc, PFOTHERS_SINGLE);
5742 if (dst->state == PFOTHERS_SINGLE)
5743 pf_set_protostate(*state, pdst, PFOTHERS_MULTIPLE);
5745 /* update expire time */
5746 (*state)->expire = time_uptime;
5747 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5748 (*state)->timeout = PFTM_OTHER_MULTIPLE;
5750 (*state)->timeout = PFTM_OTHER_SINGLE;
5752 /* translate source/destination address, if necessary */
5753 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5754 struct pf_state_key *nk = (*state)->key[pd->didx];
5756 KASSERT(nk, ("%s: nk is null", __func__));
5757 KASSERT(pd, ("%s: pd is null", __func__));
5758 KASSERT(pd->src, ("%s: pd->src is null", __func__));
5759 KASSERT(pd->dst, ("%s: pd->dst is null", __func__));
5763 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5764 pf_change_a(&pd->src->v4.s_addr,
5766 nk->addr[pd->sidx].v4.s_addr,
5769 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5770 pf_change_a(&pd->dst->v4.s_addr,
5772 nk->addr[pd->didx].v4.s_addr,
5779 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5780 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5782 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5783 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5791 * ipoff and off are measured from the start of the mbuf chain.
5792 * h must be at "ipoff" on the mbuf chain.
5795 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5796 u_short *actionp, u_short *reasonp, sa_family_t af)
5801 struct ip *h = mtod(m, struct ip *);
5802 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
5806 ACTION_SET(actionp, PF_PASS);
5808 ACTION_SET(actionp, PF_DROP);
5809 REASON_SET(reasonp, PFRES_FRAG);
5813 if (m->m_pkthdr.len < off + len ||
5814 ntohs(h->ip_len) < off + len) {
5815 ACTION_SET(actionp, PF_DROP);
5816 REASON_SET(reasonp, PFRES_SHORT);
5824 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
5826 if (m->m_pkthdr.len < off + len ||
5827 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5828 (unsigned)(off + len)) {
5829 ACTION_SET(actionp, PF_DROP);
5830 REASON_SET(reasonp, PFRES_SHORT);
5837 m_copydata(m, off, len, p);
5842 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kkif *kif,
5848 * Skip check for addresses with embedded interface scope,
5849 * as they would always match anyway.
5851 if (af == AF_INET6 && IN6_IS_SCOPE_EMBED(&addr->v6))
5854 if (af != AF_INET && af != AF_INET6)
5857 /* Skip checks for ipsec interfaces */
5858 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5861 ifp = (kif != NULL) ? kif->pfik_ifp : NULL;
5866 return (fib6_check_urpf(rtableid, &addr->v6, 0, NHR_NONE,
5871 return (fib4_check_urpf(rtableid, addr->v4, 0, NHR_NONE,
5881 pf_route(struct mbuf **m, struct pf_krule *r, int dir, struct ifnet *oifp,
5882 struct pf_kstate *s, struct pf_pdesc *pd, struct inpcb *inp)
5884 struct mbuf *m0, *m1;
5885 struct sockaddr_in dst;
5887 struct ifnet *ifp = NULL;
5888 struct pf_addr naddr;
5889 struct pf_ksrc_node *sn = NULL;
5891 uint16_t ip_len, ip_off;
5893 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5894 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5897 if ((pd->pf_mtag == NULL &&
5898 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5899 pd->pf_mtag->routed++ > 3) {
5905 if (r->rt == PF_DUPTO) {
5906 if ((pd->pf_mtag->flags & PF_DUPLICATED)) {
5908 ifp = r->rpool.cur->kif ?
5909 r->rpool.cur->kif->pfik_ifp : NULL;
5911 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5915 /* When the 2nd interface is not skipped */
5923 pd->pf_mtag->flags |= PF_DUPLICATED;
5924 if (((m0 = m_dup(*m, M_NOWAIT)) == NULL)) {
5931 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5939 ip = mtod(m0, struct ip *);
5941 bzero(&dst, sizeof(dst));
5942 dst.sin_family = AF_INET;
5943 dst.sin_len = sizeof(dst);
5944 dst.sin_addr = ip->ip_dst;
5946 bzero(&naddr, sizeof(naddr));
5949 if (TAILQ_EMPTY(&r->rpool.list)) {
5950 DPFPRINTF(PF_DEBUG_URGENT,
5951 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5954 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5956 if (!PF_AZERO(&naddr, AF_INET))
5957 dst.sin_addr.s_addr = naddr.v4.s_addr;
5958 ifp = r->rpool.cur->kif ?
5959 r->rpool.cur->kif->pfik_ifp : NULL;
5961 if (!PF_AZERO(&s->rt_addr, AF_INET))
5962 dst.sin_addr.s_addr =
5963 s->rt_addr.v4.s_addr;
5964 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5971 if (pf_test(PF_OUT, 0, ifp, &m0, inp) != PF_PASS)
5973 else if (m0 == NULL)
5975 if (m0->m_len < sizeof(struct ip)) {
5976 DPFPRINTF(PF_DEBUG_URGENT,
5977 ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
5980 ip = mtod(m0, struct ip *);
5983 if (ifp->if_flags & IFF_LOOPBACK)
5984 m0->m_flags |= M_SKIP_FIREWALL;
5986 ip_len = ntohs(ip->ip_len);
5987 ip_off = ntohs(ip->ip_off);
5989 /* Copied from FreeBSD 10.0-CURRENT ip_output. */
5990 m0->m_pkthdr.csum_flags |= CSUM_IP;
5991 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
5992 m0 = mb_unmapped_to_ext(m0);
5995 in_delayed_cksum(m0);
5996 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
5998 #if defined(SCTP) || defined(SCTP_SUPPORT)
5999 if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
6000 sctp_delayed_cksum(m0, (uint32_t)(ip->ip_hl << 2));
6001 m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
6006 * If small enough for interface, or the interface will take
6007 * care of the fragmentation for us, we can just send directly.
6009 if (ip_len <= ifp->if_mtu ||
6010 (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0) {
6012 if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
6013 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
6014 m0->m_pkthdr.csum_flags &= ~CSUM_IP;
6016 m_clrprotoflags(m0); /* Avoid confusing lower layers. */
6017 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
6021 /* Balk when DF bit is set or the interface didn't support TSO. */
6022 if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
6024 KMOD_IPSTAT_INC(ips_cantfrag);
6025 if (r->rt != PF_DUPTO) {
6026 if (s && pd->nat_rule != NULL)
6027 PACKET_UNDO_NAT(m0, pd,
6028 (ip->ip_hl << 2) + (ip_off & IP_OFFMASK),
6031 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
6038 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
6042 for (; m0; m0 = m1) {
6044 m0->m_nextpkt = NULL;
6046 m_clrprotoflags(m0);
6047 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
6053 KMOD_IPSTAT_INC(ips_fragmented);
6056 if (r->rt != PF_DUPTO)
6071 pf_route6(struct mbuf **m, struct pf_krule *r, int dir, struct ifnet *oifp,
6072 struct pf_kstate *s, struct pf_pdesc *pd, struct inpcb *inp)
6075 struct sockaddr_in6 dst;
6076 struct ip6_hdr *ip6;
6077 struct ifnet *ifp = NULL;
6078 struct pf_addr naddr;
6079 struct pf_ksrc_node *sn = NULL;
6081 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
6082 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
6085 if ((pd->pf_mtag == NULL &&
6086 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
6087 pd->pf_mtag->routed++ > 3) {
6093 if (r->rt == PF_DUPTO) {
6094 if ((pd->pf_mtag->flags & PF_DUPLICATED)) {
6096 ifp = r->rpool.cur->kif ?
6097 r->rpool.cur->kif->pfik_ifp : NULL;
6099 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
6103 /* When the 2nd interface is not skipped */
6111 pd->pf_mtag->flags |= PF_DUPLICATED;
6112 if (((m0 = m_dup(*m, M_NOWAIT)) == NULL)) {
6119 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
6127 ip6 = mtod(m0, struct ip6_hdr *);
6129 bzero(&dst, sizeof(dst));
6130 dst.sin6_family = AF_INET6;
6131 dst.sin6_len = sizeof(dst);
6132 dst.sin6_addr = ip6->ip6_dst;
6134 bzero(&naddr, sizeof(naddr));
6137 if (TAILQ_EMPTY(&r->rpool.list)) {
6138 DPFPRINTF(PF_DEBUG_URGENT,
6139 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
6142 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
6144 if (!PF_AZERO(&naddr, AF_INET6))
6145 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
6147 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
6149 if (!PF_AZERO(&s->rt_addr, AF_INET6))
6150 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
6151 &s->rt_addr, AF_INET6);
6152 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
6162 if (pf_test6(PF_OUT, PFIL_FWD, ifp, &m0, inp) != PF_PASS)
6164 else if (m0 == NULL)
6166 if (m0->m_len < sizeof(struct ip6_hdr)) {
6167 DPFPRINTF(PF_DEBUG_URGENT,
6168 ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
6172 ip6 = mtod(m0, struct ip6_hdr *);
6175 if (ifp->if_flags & IFF_LOOPBACK)
6176 m0->m_flags |= M_SKIP_FIREWALL;
6178 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 &
6179 ~ifp->if_hwassist) {
6180 uint32_t plen = m0->m_pkthdr.len - sizeof(*ip6);
6181 m0 = mb_unmapped_to_ext(m0);
6184 in6_delayed_cksum(m0, plen, sizeof(struct ip6_hdr));
6185 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6;
6189 * If the packet is too large for the outgoing interface,
6190 * send back an icmp6 error.
6192 if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
6193 dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
6194 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu)
6195 nd6_output_ifp(ifp, ifp, m0, &dst, NULL);
6197 in6_ifstat_inc(ifp, ifs6_in_toobig);
6198 if (r->rt != PF_DUPTO) {
6199 if (s && pd->nat_rule != NULL)
6200 PACKET_UNDO_NAT(m0, pd,
6201 ((caddr_t)ip6 - m0->m_data) +
6202 sizeof(struct ip6_hdr), s, dir);
6204 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
6210 if (r->rt != PF_DUPTO)
6224 * FreeBSD supports cksum offloads for the following drivers.
6225 * em(4), fxp(4), lge(4), nge(4), re(4), ti(4), txp(4), xl(4)
6227 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
6228 * network driver performed cksum including pseudo header, need to verify
6231 * network driver performed cksum, needs to additional pseudo header
6232 * cksum computation with partial csum_data(i.e. lack of H/W support for
6233 * pseudo header, for instance sk(4) and possibly gem(4))
6235 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
6236 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
6238 * Also, set csum_data to 0xffff to force cksum validation.
6241 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
6247 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
6249 if (m->m_pkthdr.len < off + len)
6254 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
6255 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
6256 sum = m->m_pkthdr.csum_data;
6258 ip = mtod(m, struct ip *);
6259 sum = in_pseudo(ip->ip_src.s_addr,
6260 ip->ip_dst.s_addr, htonl((u_short)len +
6261 m->m_pkthdr.csum_data + IPPROTO_TCP));
6268 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
6269 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
6270 sum = m->m_pkthdr.csum_data;
6272 ip = mtod(m, struct ip *);
6273 sum = in_pseudo(ip->ip_src.s_addr,
6274 ip->ip_dst.s_addr, htonl((u_short)len +
6275 m->m_pkthdr.csum_data + IPPROTO_UDP));
6283 case IPPROTO_ICMPV6:
6293 if (p == IPPROTO_ICMP) {
6298 sum = in_cksum(m, len);
6302 if (m->m_len < sizeof(struct ip))
6304 sum = in4_cksum(m, p, off, len);
6309 if (m->m_len < sizeof(struct ip6_hdr))
6311 sum = in6_cksum(m, p, off, len);
6322 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
6327 KMOD_UDPSTAT_INC(udps_badsum);
6333 KMOD_ICMPSTAT_INC(icps_checksum);
6338 case IPPROTO_ICMPV6:
6340 KMOD_ICMP6STAT_INC(icp6s_checksum);
6347 if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
6348 m->m_pkthdr.csum_flags |=
6349 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
6350 m->m_pkthdr.csum_data = 0xffff;
6357 pf_pdesc_to_dnflow(int dir, const struct pf_pdesc *pd,
6358 const struct pf_krule *r, const struct pf_kstate *s,
6359 struct ip_fw_args *dnflow)
6361 int dndir = r->direction;
6363 if (s && dndir == PF_INOUT)
6364 dndir = s->direction;
6366 memset(dnflow, 0, sizeof(*dnflow));
6368 if (pd->dport != NULL)
6369 dnflow->f_id.dst_port = ntohs(*pd->dport);
6370 if (pd->sport != NULL)
6371 dnflow->f_id.src_port = ntohs(*pd->sport);
6374 dnflow->flags |= IPFW_ARGS_IN;
6376 dnflow->flags |= IPFW_ARGS_OUT;
6378 if (dir != dndir && pd->act.dnrpipe) {
6379 dnflow->rule.info = pd->act.dnrpipe;
6381 else if (dir == dndir) {
6382 dnflow->rule.info = pd->act.dnpipe;
6388 dnflow->rule.info |= IPFW_IS_DUMMYNET;
6389 if (r->free_flags & PFRULE_DN_IS_PIPE)
6390 dnflow->rule.info |= IPFW_IS_PIPE;
6392 dnflow->f_id.proto = pd->proto;
6393 dnflow->f_id.extra = dnflow->rule.info;
6396 dnflow->f_id.addr_type = 4;
6397 dnflow->f_id.src_ip = ntohl(pd->src->v4.s_addr);
6398 dnflow->f_id.dst_ip = ntohl(pd->dst->v4.s_addr);
6401 dnflow->flags |= IPFW_ARGS_IP6;
6402 dnflow->f_id.addr_type = 6;
6403 dnflow->f_id.src_ip6 = pd->src->v6;
6404 dnflow->f_id.dst_ip6 = pd->dst->v6;
6407 panic("Invalid AF");
6416 pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
6418 struct pfi_kkif *kif;
6419 u_short action, reason = 0, log = 0;
6420 struct mbuf *m = *m0;
6421 struct ip *h = NULL;
6422 struct m_tag *ipfwtag;
6423 struct pf_krule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6424 struct pf_kstate *s = NULL;
6425 struct pf_kruleset *ruleset = NULL;
6427 int off, dirndx, pqid = 0;
6429 PF_RULES_RLOCK_TRACKER;
6430 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: bad direction %d\n", __func__, dir));
6433 if (!V_pf_status.running)
6436 memset(&pd, 0, sizeof(pd));
6438 kif = (struct pfi_kkif *)ifp->if_pf_kif;
6441 DPFPRINTF(PF_DEBUG_URGENT,
6442 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
6445 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6448 if (m->m_flags & M_SKIP_FIREWALL)
6451 pd.pf_mtag = pf_find_mtag(m);
6453 if (ip_dn_io_ptr != NULL && pd.pf_mtag != NULL &&
6454 pd.pf_mtag->flags & PF_TAG_DUMMYNET) {
6455 /* Dummynet re-injects packets after they've
6456 * completed their delay. We've already
6457 * processed them, so pass unconditionally. */
6459 /* But only once. We may see the packet multiple times (e.g.
6460 * PFIL_IN/PFIL_OUT). */
6461 pd.pf_mtag->flags &= ~PF_TAG_DUMMYNET;
6468 if (__predict_false(ip_divert_ptr != NULL) &&
6469 ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
6470 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
6471 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
6472 if (pd.pf_mtag == NULL &&
6473 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6477 pd.pf_mtag->flags |= PF_PACKET_LOOPED;
6478 m_tag_delete(m, ipfwtag);
6480 if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
6481 m->m_flags |= M_FASTFWD_OURS;
6482 pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
6484 } else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
6485 /* We do IP header normalization and packet reassembly here */
6489 m = *m0; /* pf_normalize messes with m0 */
6490 h = mtod(m, struct ip *);
6492 off = h->ip_hl << 2;
6493 if (off < (int)sizeof(struct ip)) {
6495 REASON_SET(&reason, PFRES_SHORT);
6500 pd.src = (struct pf_addr *)&h->ip_src;
6501 pd.dst = (struct pf_addr *)&h->ip_dst;
6502 pd.sport = pd.dport = NULL;
6503 pd.ip_sum = &h->ip_sum;
6504 pd.proto_sum = NULL;
6507 pd.sidx = (dir == PF_IN) ? 0 : 1;
6508 pd.didx = (dir == PF_IN) ? 1 : 0;
6510 pd.tos = h->ip_tos & ~IPTOS_ECN_MASK;
6511 pd.tot_len = ntohs(h->ip_len);
6513 /* handle fragments that didn't get reassembled by normalization */
6514 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
6515 action = pf_test_fragment(&r, dir, kif, m, h,
6522 if (!pf_pull_hdr(m, off, &pd.hdr.tcp, sizeof(pd.hdr.tcp),
6523 &action, &reason, AF_INET)) {
6524 log = action != PF_PASS;
6527 pd.p_len = pd.tot_len - off - (pd.hdr.tcp.th_off << 2);
6529 pd.sport = &pd.hdr.tcp.th_sport;
6530 pd.dport = &pd.hdr.tcp.th_dport;
6532 /* Respond to SYN with a syncookie. */
6533 if ((pd.hdr.tcp.th_flags & (TH_SYN|TH_ACK|TH_RST)) == TH_SYN &&
6534 pd.dir == PF_IN && pf_synflood_check(&pd)) {
6535 pf_syncookie_send(m, off, &pd);
6540 if ((pd.hdr.tcp.th_flags & TH_ACK) && pd.p_len == 0)
6542 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6543 if (action == PF_DROP)
6545 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6547 if (action == PF_PASS) {
6548 if (V_pfsync_update_state_ptr != NULL)
6549 V_pfsync_update_state_ptr(s);
6553 } else if (s == NULL) {
6554 /* Validate remote SYN|ACK, re-create original SYN if
6556 if ((pd.hdr.tcp.th_flags & (TH_SYN|TH_ACK|TH_RST)) ==
6557 TH_ACK && pf_syncookie_validate(&pd) &&
6561 msyn = pf_syncookie_recreate_syn(h->ip_ttl,
6568 action = pf_test(dir, pflags, ifp, &msyn, inp);
6571 if (action == PF_PASS) {
6572 action = pf_test_state_tcp(&s, dir,
6573 kif, m, off, h, &pd, &reason);
6574 if (action != PF_PASS || s == NULL) {
6579 s->src.seqhi = ntohl(pd.hdr.tcp.th_ack)
6581 s->src.seqlo = ntohl(pd.hdr.tcp.th_seq)
6583 pf_set_protostate(s, PF_PEER_SRC,
6586 action = pf_synproxy(&pd, &s, &reason);
6587 if (action != PF_PASS)
6593 action = pf_test_rule(&r, &s, dir, kif, m, off,
6594 &pd, &a, &ruleset, inp);
6601 if (!pf_pull_hdr(m, off, &pd.hdr.udp, sizeof(pd.hdr.udp),
6602 &action, &reason, AF_INET)) {
6603 log = action != PF_PASS;
6606 pd.sport = &pd.hdr.udp.uh_sport;
6607 pd.dport = &pd.hdr.udp.uh_dport;
6608 if (pd.hdr.udp.uh_dport == 0 ||
6609 ntohs(pd.hdr.udp.uh_ulen) > m->m_pkthdr.len - off ||
6610 ntohs(pd.hdr.udp.uh_ulen) < sizeof(struct udphdr)) {
6612 REASON_SET(&reason, PFRES_SHORT);
6615 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6616 if (action == PF_PASS) {
6617 if (V_pfsync_update_state_ptr != NULL)
6618 V_pfsync_update_state_ptr(s);
6622 } else if (s == NULL)
6623 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6628 case IPPROTO_ICMP: {
6629 if (!pf_pull_hdr(m, off, &pd.hdr.icmp, ICMP_MINLEN,
6630 &action, &reason, AF_INET)) {
6631 log = action != PF_PASS;
6634 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
6636 if (action == PF_PASS) {
6637 if (V_pfsync_update_state_ptr != NULL)
6638 V_pfsync_update_state_ptr(s);
6642 } else if (s == NULL)
6643 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6649 case IPPROTO_ICMPV6: {
6651 DPFPRINTF(PF_DEBUG_MISC,
6652 ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
6658 action = pf_test_state_other(&s, dir, kif, m, &pd);
6659 if (action == PF_PASS) {
6660 if (V_pfsync_update_state_ptr != NULL)
6661 V_pfsync_update_state_ptr(s);
6665 } else if (s == NULL)
6666 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6673 if (action == PF_PASS && h->ip_hl > 5 &&
6674 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6676 REASON_SET(&reason, PFRES_IPOPTIONS);
6678 DPFPRINTF(PF_DEBUG_MISC,
6679 ("pf: dropping packet with ip options\n"));
6682 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6684 REASON_SET(&reason, PFRES_MEMORY);
6686 if (r->rtableid >= 0)
6687 M_SETFIB(m, r->rtableid);
6689 if (r->scrub_flags & PFSTATE_SETPRIO) {
6690 if (pd.tos & IPTOS_LOWDELAY)
6692 if (vlan_set_pcp(m, r->set_prio[pqid])) {
6694 REASON_SET(&reason, PFRES_MEMORY);
6696 DPFPRINTF(PF_DEBUG_MISC,
6697 ("pf: failed to allocate 802.1q mtag\n"));
6703 pd.act.pqid = s->pqid;
6704 pd.act.qid = s->qid;
6705 } else if (r->qid) {
6706 pd.act.pqid = r->pqid;
6707 pd.act.qid = r->qid;
6709 if (action == PF_PASS && pd.act.qid) {
6710 if (pd.pf_mtag == NULL &&
6711 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6713 REASON_SET(&reason, PFRES_MEMORY);
6716 pd.pf_mtag->qid_hash = pf_state_hash(s);
6717 if (pqid || (pd.tos & IPTOS_LOWDELAY))
6718 pd.pf_mtag->qid = pd.act.pqid;
6720 pd.pf_mtag->qid = pd.act.qid;
6721 /* Add hints for ecn. */
6722 pd.pf_mtag->hdr = h;
6728 * connections redirected to loopback should not match sockets
6729 * bound specifically to loopback due to security implications,
6730 * see tcp_input() and in_pcblookup_listen().
6732 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6733 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6734 (s->nat_rule.ptr->action == PF_RDR ||
6735 s->nat_rule.ptr->action == PF_BINAT) &&
6736 IN_LOOPBACK(ntohl(pd.dst->v4.s_addr)))
6737 m->m_flags |= M_SKIP_FIREWALL;
6739 if (__predict_false(ip_divert_ptr != NULL) && action == PF_PASS &&
6740 r->divert.port && !PACKET_LOOPED(&pd)) {
6741 ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
6742 sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
6743 if (ipfwtag != NULL) {
6744 ((struct ipfw_rule_ref *)(ipfwtag+1))->info =
6745 ntohs(r->divert.port);
6746 ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
6751 m_tag_prepend(m, ipfwtag);
6752 if (m->m_flags & M_FASTFWD_OURS) {
6753 if (pd.pf_mtag == NULL &&
6754 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6756 REASON_SET(&reason, PFRES_MEMORY);
6758 DPFPRINTF(PF_DEBUG_MISC,
6759 ("pf: failed to allocate tag\n"));
6761 pd.pf_mtag->flags |=
6762 PF_FASTFWD_OURS_PRESENT;
6763 m->m_flags &= ~M_FASTFWD_OURS;
6766 ip_divert_ptr(*m0, dir == PF_IN);
6771 /* XXX: ipfw has the same behaviour! */
6773 REASON_SET(&reason, PFRES_MEMORY);
6775 DPFPRINTF(PF_DEBUG_MISC,
6776 ("pf: failed to allocate divert tag\n"));
6781 struct pf_krule *lr;
6783 if (s != NULL && s->nat_rule.ptr != NULL &&
6784 s->nat_rule.ptr->log & PF_LOG_ALL)
6785 lr = s->nat_rule.ptr;
6788 PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd,
6792 pf_counter_u64_critical_enter();
6793 pf_counter_u64_add_protected(&kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS],
6795 pf_counter_u64_add_protected(&kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS],
6798 if (action == PF_PASS || r->action == PF_DROP) {
6799 dirndx = (dir == PF_OUT);
6800 pf_counter_u64_add_protected(&r->packets[dirndx], 1);
6801 pf_counter_u64_add_protected(&r->bytes[dirndx], pd.tot_len);
6803 pf_counter_u64_add_protected(&a->packets[dirndx], 1);
6804 pf_counter_u64_add_protected(&a->bytes[dirndx], pd.tot_len);
6807 if (s->nat_rule.ptr != NULL) {
6808 pf_counter_u64_add_protected(&s->nat_rule.ptr->packets[dirndx],
6810 pf_counter_u64_add_protected(&s->nat_rule.ptr->bytes[dirndx],
6813 if (s->src_node != NULL) {
6814 counter_u64_add(s->src_node->packets[dirndx],
6816 counter_u64_add(s->src_node->bytes[dirndx],
6819 if (s->nat_src_node != NULL) {
6820 counter_u64_add(s->nat_src_node->packets[dirndx],
6822 counter_u64_add(s->nat_src_node->bytes[dirndx],
6825 dirndx = (dir == s->direction) ? 0 : 1;
6826 s->packets[dirndx]++;
6827 s->bytes[dirndx] += pd.tot_len;
6830 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6831 if (nr != NULL && r == &V_pf_default_rule)
6833 if (tr->src.addr.type == PF_ADDR_TABLE)
6834 pfr_update_stats(tr->src.addr.p.tbl,
6835 (s == NULL) ? pd.src :
6836 &s->key[(s->direction == PF_IN)]->
6837 addr[(s->direction == PF_OUT)],
6838 pd.af, pd.tot_len, dir == PF_OUT,
6839 r->action == PF_PASS, tr->src.neg);
6840 if (tr->dst.addr.type == PF_ADDR_TABLE)
6841 pfr_update_stats(tr->dst.addr.p.tbl,
6842 (s == NULL) ? pd.dst :
6843 &s->key[(s->direction == PF_IN)]->
6844 addr[(s->direction == PF_IN)],
6845 pd.af, pd.tot_len, dir == PF_OUT,
6846 r->action == PF_PASS, tr->dst.neg);
6848 pf_counter_u64_critical_exit();
6851 case PF_SYNPROXY_DROP:
6862 /* pf_route() returns unlocked. */
6864 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd, inp);
6867 /* Dummynet processing. */
6868 if (s && (s->dnpipe || s->dnrpipe)) {
6869 pd.act.dnpipe = s->dnpipe;
6870 pd.act.dnrpipe = s->dnrpipe;
6871 pd.act.flags = s->state_flags;
6872 } else if (r->dnpipe || r->dnrpipe) {
6873 pd.act.dnpipe = r->dnpipe;
6874 pd.act.dnrpipe = r->dnrpipe;
6875 pd.act.flags = r->free_flags;
6877 if (pd.act.dnpipe || pd.act.dnrpipe) {
6878 struct ip_fw_args dnflow;
6879 if (ip_dn_io_ptr == NULL) {
6883 REASON_SET(&reason, PFRES_MEMORY);
6887 if (pd.pf_mtag == NULL &&
6888 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6892 REASON_SET(&reason, PFRES_MEMORY);
6896 if (pf_pdesc_to_dnflow(dir, &pd, r, s, &dnflow)) {
6897 pd.pf_mtag->flags |= PF_TAG_DUMMYNET;
6898 ip_dn_io_ptr(m0, &dnflow);
6906 SDT_PROBE4(pf, ip, test, done, action, reason, r, s);
6917 pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
6919 struct pfi_kkif *kif;
6920 u_short action, reason = 0, log = 0;
6921 struct mbuf *m = *m0, *n = NULL;
6923 struct ip6_hdr *h = NULL;
6924 struct pf_krule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6925 struct pf_kstate *s = NULL;
6926 struct pf_kruleset *ruleset = NULL;
6928 int off, terminal = 0, dirndx, rh_cnt = 0, pqid = 0;
6930 PF_RULES_RLOCK_TRACKER;
6931 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: bad direction %d\n", __func__, dir));
6934 if (!V_pf_status.running)
6937 memset(&pd, 0, sizeof(pd));
6938 pd.pf_mtag = pf_find_mtag(m);
6940 if (pd.pf_mtag && pd.pf_mtag->flags & PF_TAG_GENERATED)
6943 kif = (struct pfi_kkif *)ifp->if_pf_kif;
6945 DPFPRINTF(PF_DEBUG_URGENT,
6946 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
6949 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6952 if (m->m_flags & M_SKIP_FIREWALL)
6955 if (ip_dn_io_ptr != NULL && pd.pf_mtag != NULL &&
6956 pd.pf_mtag->flags & PF_TAG_DUMMYNET) {
6957 pd.pf_mtag->flags &= ~PF_TAG_DUMMYNET;
6958 /* Dummynet re-injects packets after they've
6959 * completed their delay. We've already
6960 * processed them, so pass unconditionally. */
6966 /* We do IP header normalization and packet reassembly here */
6967 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
6971 m = *m0; /* pf_normalize messes with m0 */
6972 h = mtod(m, struct ip6_hdr *);
6975 * we do not support jumbogram. if we keep going, zero ip6_plen
6976 * will do something bad, so drop the packet for now.
6978 if (htons(h->ip6_plen) == 0) {
6980 REASON_SET(&reason, PFRES_NORM); /*XXX*/
6984 pd.src = (struct pf_addr *)&h->ip6_src;
6985 pd.dst = (struct pf_addr *)&h->ip6_dst;
6986 pd.sport = pd.dport = NULL;
6988 pd.proto_sum = NULL;
6990 pd.sidx = (dir == PF_IN) ? 0 : 1;
6991 pd.didx = (dir == PF_IN) ? 1 : 0;
6993 pd.tos = IPV6_DSCP(h);
6994 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6996 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
6997 pd.proto = h->ip6_nxt;
7000 case IPPROTO_FRAGMENT:
7001 action = pf_test_fragment(&r, dir, kif, m, h,
7003 if (action == PF_DROP)
7004 REASON_SET(&reason, PFRES_FRAG);
7006 case IPPROTO_ROUTING: {
7007 struct ip6_rthdr rthdr;
7010 DPFPRINTF(PF_DEBUG_MISC,
7011 ("pf: IPv6 more than one rthdr\n"));
7013 REASON_SET(&reason, PFRES_IPOPTIONS);
7017 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
7019 DPFPRINTF(PF_DEBUG_MISC,
7020 ("pf: IPv6 short rthdr\n"));
7022 REASON_SET(&reason, PFRES_SHORT);
7026 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
7027 DPFPRINTF(PF_DEBUG_MISC,
7028 ("pf: IPv6 rthdr0\n"));
7030 REASON_SET(&reason, PFRES_IPOPTIONS);
7037 case IPPROTO_HOPOPTS:
7038 case IPPROTO_DSTOPTS: {
7039 /* get next header and header length */
7040 struct ip6_ext opt6;
7042 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
7043 NULL, &reason, pd.af)) {
7044 DPFPRINTF(PF_DEBUG_MISC,
7045 ("pf: IPv6 short opt\n"));
7050 if (pd.proto == IPPROTO_AH)
7051 off += (opt6.ip6e_len + 2) * 4;
7053 off += (opt6.ip6e_len + 1) * 8;
7054 pd.proto = opt6.ip6e_nxt;
7055 /* goto the next header */
7062 } while (!terminal);
7064 /* if there's no routing header, use unmodified mbuf for checksumming */
7070 if (!pf_pull_hdr(m, off, &pd.hdr.tcp, sizeof(pd.hdr.tcp),
7071 &action, &reason, AF_INET6)) {
7072 log = action != PF_PASS;
7075 pd.p_len = pd.tot_len - off - (pd.hdr.tcp.th_off << 2);
7076 pd.sport = &pd.hdr.tcp.th_sport;
7077 pd.dport = &pd.hdr.tcp.th_dport;
7078 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
7079 if (action == PF_DROP)
7081 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
7083 if (action == PF_PASS) {
7084 if (V_pfsync_update_state_ptr != NULL)
7085 V_pfsync_update_state_ptr(s);
7089 } else if (s == NULL)
7090 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
7096 if (!pf_pull_hdr(m, off, &pd.hdr.udp, sizeof(pd.hdr.udp),
7097 &action, &reason, AF_INET6)) {
7098 log = action != PF_PASS;
7101 pd.sport = &pd.hdr.udp.uh_sport;
7102 pd.dport = &pd.hdr.udp.uh_dport;
7103 if (pd.hdr.udp.uh_dport == 0 ||
7104 ntohs(pd.hdr.udp.uh_ulen) > m->m_pkthdr.len - off ||
7105 ntohs(pd.hdr.udp.uh_ulen) < sizeof(struct udphdr)) {
7107 REASON_SET(&reason, PFRES_SHORT);
7110 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
7111 if (action == PF_PASS) {
7112 if (V_pfsync_update_state_ptr != NULL)
7113 V_pfsync_update_state_ptr(s);
7117 } else if (s == NULL)
7118 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
7123 case IPPROTO_ICMP: {
7125 DPFPRINTF(PF_DEBUG_MISC,
7126 ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
7130 case IPPROTO_ICMPV6: {
7131 if (!pf_pull_hdr(m, off, &pd.hdr.icmp6, sizeof(pd.hdr.icmp6),
7132 &action, &reason, AF_INET6)) {
7133 log = action != PF_PASS;
7136 action = pf_test_state_icmp(&s, dir, kif,
7137 m, off, h, &pd, &reason);
7138 if (action == PF_PASS) {
7139 if (V_pfsync_update_state_ptr != NULL)
7140 V_pfsync_update_state_ptr(s);
7144 } else if (s == NULL)
7145 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
7151 action = pf_test_state_other(&s, dir, kif, m, &pd);
7152 if (action == PF_PASS) {
7153 if (V_pfsync_update_state_ptr != NULL)
7154 V_pfsync_update_state_ptr(s);
7158 } else if (s == NULL)
7159 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
7171 /* handle dangerous IPv6 extension headers. */
7172 if (action == PF_PASS && rh_cnt &&
7173 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
7175 REASON_SET(&reason, PFRES_IPOPTIONS);
7177 DPFPRINTF(PF_DEBUG_MISC,
7178 ("pf: dropping packet with dangerous v6 headers\n"));
7181 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
7183 REASON_SET(&reason, PFRES_MEMORY);
7185 if (r->rtableid >= 0)
7186 M_SETFIB(m, r->rtableid);
7188 if (r->scrub_flags & PFSTATE_SETPRIO) {
7189 if (pd.tos & IPTOS_LOWDELAY)
7191 if (vlan_set_pcp(m, r->set_prio[pqid])) {
7193 REASON_SET(&reason, PFRES_MEMORY);
7195 DPFPRINTF(PF_DEBUG_MISC,
7196 ("pf: failed to allocate 802.1q mtag\n"));
7202 pd.act.pqid = s->pqid;
7203 pd.act.qid = s->qid;
7204 } else if (r->qid) {
7205 pd.act.pqid = r->pqid;
7206 pd.act.qid = r->qid;
7208 if (action == PF_PASS && pd.act.qid) {
7209 if (pd.pf_mtag == NULL &&
7210 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
7212 REASON_SET(&reason, PFRES_MEMORY);
7215 pd.pf_mtag->qid_hash = pf_state_hash(s);
7216 if (pd.tos & IPTOS_LOWDELAY)
7217 pd.pf_mtag->qid = pd.act.pqid;
7219 pd.pf_mtag->qid = pd.act.qid;
7220 /* Add hints for ecn. */
7221 pd.pf_mtag->hdr = h;
7226 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
7227 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
7228 (s->nat_rule.ptr->action == PF_RDR ||
7229 s->nat_rule.ptr->action == PF_BINAT) &&
7230 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
7231 m->m_flags |= M_SKIP_FIREWALL;
7233 /* XXX: Anybody working on it?! */
7235 printf("pf: divert(9) is not supported for IPv6\n");
7238 struct pf_krule *lr;
7240 if (s != NULL && s->nat_rule.ptr != NULL &&
7241 s->nat_rule.ptr->log & PF_LOG_ALL)
7242 lr = s->nat_rule.ptr;
7245 PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset,
7249 pf_counter_u64_critical_enter();
7250 pf_counter_u64_add_protected(&kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS],
7252 pf_counter_u64_add_protected(&kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS],
7255 if (action == PF_PASS || r->action == PF_DROP) {
7256 dirndx = (dir == PF_OUT);
7257 pf_counter_u64_add_protected(&r->packets[dirndx], 1);
7258 pf_counter_u64_add_protected(&r->bytes[dirndx], pd.tot_len);
7260 pf_counter_u64_add_protected(&a->packets[dirndx], 1);
7261 pf_counter_u64_add_protected(&a->bytes[dirndx], pd.tot_len);
7264 if (s->nat_rule.ptr != NULL) {
7265 pf_counter_u64_add_protected(&s->nat_rule.ptr->packets[dirndx],
7267 pf_counter_u64_add_protected(&s->nat_rule.ptr->bytes[dirndx],
7270 if (s->src_node != NULL) {
7271 counter_u64_add(s->src_node->packets[dirndx],
7273 counter_u64_add(s->src_node->bytes[dirndx],
7276 if (s->nat_src_node != NULL) {
7277 counter_u64_add(s->nat_src_node->packets[dirndx],
7279 counter_u64_add(s->nat_src_node->bytes[dirndx],
7282 dirndx = (dir == s->direction) ? 0 : 1;
7283 s->packets[dirndx]++;
7284 s->bytes[dirndx] += pd.tot_len;
7287 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
7288 if (nr != NULL && r == &V_pf_default_rule)
7290 if (tr->src.addr.type == PF_ADDR_TABLE)
7291 pfr_update_stats(tr->src.addr.p.tbl,
7292 (s == NULL) ? pd.src :
7293 &s->key[(s->direction == PF_IN)]->addr[0],
7294 pd.af, pd.tot_len, dir == PF_OUT,
7295 r->action == PF_PASS, tr->src.neg);
7296 if (tr->dst.addr.type == PF_ADDR_TABLE)
7297 pfr_update_stats(tr->dst.addr.p.tbl,
7298 (s == NULL) ? pd.dst :
7299 &s->key[(s->direction == PF_IN)]->addr[1],
7300 pd.af, pd.tot_len, dir == PF_OUT,
7301 r->action == PF_PASS, tr->dst.neg);
7303 pf_counter_u64_critical_exit();
7306 case PF_SYNPROXY_DROP:
7317 /* pf_route6() returns unlocked. */
7319 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd, inp);
7322 /* Dummynet processing. */
7323 if (s && (s->dnpipe || s->dnrpipe)) {
7324 pd.act.dnpipe = s->dnpipe;
7325 pd.act.dnrpipe = s->dnrpipe;
7326 pd.act.flags = s->state_flags;
7328 pd.act.dnpipe = r->dnpipe;
7329 pd.act.dnrpipe = r->dnrpipe;
7330 pd.act.flags = r->free_flags;
7332 if (pd.act.dnpipe || pd.act.dnrpipe) {
7333 struct ip_fw_args dnflow;
7335 if (ip_dn_io_ptr == NULL) {
7339 REASON_SET(&reason, PFRES_MEMORY);
7343 if (pd.pf_mtag == NULL &&
7344 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
7348 REASON_SET(&reason, PFRES_MEMORY);
7352 if (pf_pdesc_to_dnflow(dir, &pd, r, s, &dnflow)) {
7353 pd.pf_mtag->flags |= PF_TAG_DUMMYNET;
7354 ip_dn_io_ptr(m0, &dnflow);
7365 /* If reassembled packet passed, create new fragments. */
7366 if (action == PF_PASS && *m0 && (pflags & PFIL_FWD) &&
7367 (mtag = m_tag_find(m, PF_REASSEMBLED, NULL)) != NULL)
7368 action = pf_refragment6(ifp, m0, mtag);
7370 SDT_PROBE4(pf, ip, test6, done, action, reason, r, s);