2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
5 * Copyright 2011-2018 Alexander Bluhm <bluhm@openbsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 #include "opt_inet6.h"
38 #include <sys/param.h>
39 #include <sys/kernel.h>
42 #include <sys/mutex.h>
43 #include <sys/refcount.h>
44 #include <sys/socket.h>
48 #include <net/pfvar.h>
49 #include <net/if_pflog.h>
51 #include <netinet/in.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip_var.h>
54 #include <netinet6/ip6_var.h>
55 #include <netinet6/scope6_var.h>
56 #include <netinet/tcp.h>
57 #include <netinet/tcp_fsm.h>
58 #include <netinet/tcp_seq.h>
59 #include <netinet/sctp_constants.h>
60 #include <netinet/sctp_header.h>
63 #include <netinet/ip6.h>
67 TAILQ_ENTRY(pf_frent) fr_next;
69 uint16_t fe_hdrlen; /* ipv4 header length with ip options
70 ipv6, extension, fragment header */
71 uint16_t fe_extoff; /* last extension header offset or 0 */
72 uint16_t fe_len; /* fragment length */
73 uint16_t fe_off; /* fragment offset */
74 uint16_t fe_mff; /* more fragment flag */
77 struct pf_fragment_cmp {
78 struct pf_addr frc_src;
79 struct pf_addr frc_dst;
86 struct pf_fragment_cmp fr_key;
87 #define fr_src fr_key.frc_src
88 #define fr_dst fr_key.frc_dst
89 #define fr_id fr_key.frc_id
90 #define fr_af fr_key.frc_af
91 #define fr_proto fr_key.frc_proto
93 /* pointers to queue element */
94 struct pf_frent *fr_firstoff[PF_FRAG_ENTRY_POINTS];
95 /* count entries between pointers */
96 uint8_t fr_entries[PF_FRAG_ENTRY_POINTS];
97 RB_ENTRY(pf_fragment) fr_entry;
98 TAILQ_ENTRY(pf_fragment) frag_next;
100 uint16_t fr_maxlen; /* maximum length of single fragment */
101 u_int16_t fr_holes; /* number of holes in the queue */
102 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
105 struct pf_fragment_tag {
106 uint16_t ft_hdrlen; /* header length of reassembled pkt */
107 uint16_t ft_extoff; /* last extension header offset or 0 */
108 uint16_t ft_maxlen; /* maximum fragment payload length */
109 uint32_t ft_id; /* fragment id */
112 VNET_DEFINE_STATIC(struct mtx, pf_frag_mtx);
113 #define V_pf_frag_mtx VNET(pf_frag_mtx)
114 #define PF_FRAG_LOCK() mtx_lock(&V_pf_frag_mtx)
115 #define PF_FRAG_UNLOCK() mtx_unlock(&V_pf_frag_mtx)
116 #define PF_FRAG_ASSERT() mtx_assert(&V_pf_frag_mtx, MA_OWNED)
118 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */
120 VNET_DEFINE_STATIC(uma_zone_t, pf_frent_z);
121 #define V_pf_frent_z VNET(pf_frent_z)
122 VNET_DEFINE_STATIC(uma_zone_t, pf_frag_z);
123 #define V_pf_frag_z VNET(pf_frag_z)
125 TAILQ_HEAD(pf_fragqueue, pf_fragment);
126 TAILQ_HEAD(pf_cachequeue, pf_fragment);
127 VNET_DEFINE_STATIC(struct pf_fragqueue, pf_fragqueue);
128 #define V_pf_fragqueue VNET(pf_fragqueue)
129 RB_HEAD(pf_frag_tree, pf_fragment);
130 VNET_DEFINE_STATIC(struct pf_frag_tree, pf_frag_tree);
131 #define V_pf_frag_tree VNET(pf_frag_tree)
132 static int pf_frag_compare(struct pf_fragment *,
133 struct pf_fragment *);
134 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
135 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
137 static void pf_flush_fragments(void);
138 static void pf_free_fragment(struct pf_fragment *);
139 static void pf_remove_fragment(struct pf_fragment *);
141 static struct pf_frent *pf_create_fragment(u_short *);
142 static int pf_frent_holes(struct pf_frent *frent);
143 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key,
144 struct pf_frag_tree *tree);
145 static inline int pf_frent_index(struct pf_frent *);
146 static int pf_frent_insert(struct pf_fragment *,
147 struct pf_frent *, struct pf_frent *);
148 void pf_frent_remove(struct pf_fragment *,
150 struct pf_frent *pf_frent_previous(struct pf_fragment *,
152 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *,
153 struct pf_frent *, u_short *);
154 static struct mbuf *pf_join_fragment(struct pf_fragment *);
156 static int pf_reassemble(struct mbuf **, struct ip *, int, u_short *);
159 static int pf_reassemble6(struct mbuf **, struct ip6_hdr *,
160 struct ip6_frag *, uint16_t, uint16_t, u_short *);
163 #define DPFPRINTF(x) do { \
164 if (V_pf_status.debug >= PF_DEBUG_MISC) { \
165 printf("%s: ", __func__); \
172 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key)
175 key->frc_src.v4 = ip->ip_src;
176 key->frc_dst.v4 = ip->ip_dst;
177 key->frc_af = AF_INET;
178 key->frc_proto = ip->ip_p;
179 key->frc_id = ip->ip_id;
184 pf_normalize_init(void)
187 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
188 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
189 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
190 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
191 V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
192 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL,
195 mtx_init(&V_pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
197 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
198 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
199 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
200 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
202 TAILQ_INIT(&V_pf_fragqueue);
206 pf_normalize_cleanup(void)
209 uma_zdestroy(V_pf_state_scrub_z);
210 uma_zdestroy(V_pf_frent_z);
211 uma_zdestroy(V_pf_frag_z);
213 mtx_destroy(&V_pf_frag_mtx);
217 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
221 if ((diff = a->fr_id - b->fr_id) != 0)
223 if ((diff = a->fr_proto - b->fr_proto) != 0)
225 if ((diff = a->fr_af - b->fr_af) != 0)
227 if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0)
229 if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0)
235 pf_purge_expired_fragments(void)
237 u_int32_t expire = time_uptime -
238 V_pf_default_rule.timeout[PFTM_FRAG];
240 pf_purge_fragments(expire);
244 pf_purge_fragments(uint32_t expire)
246 struct pf_fragment *frag;
249 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
250 if (frag->fr_timeout > expire)
253 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
254 pf_free_fragment(frag);
261 * Try to flush old fragments to make space for new ones
264 pf_flush_fragments(void)
266 struct pf_fragment *frag;
271 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
272 DPFPRINTF(("trying to free %d frag entriess\n", goal));
273 while (goal < uma_zone_get_cur(V_pf_frent_z)) {
274 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
276 pf_free_fragment(frag);
282 /* Frees the fragments and all associated entries */
284 pf_free_fragment(struct pf_fragment *frag)
286 struct pf_frent *frent;
290 /* Free all fragments */
291 for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
292 frent = TAILQ_FIRST(&frag->fr_queue)) {
293 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
295 m_freem(frent->fe_m);
296 uma_zfree(V_pf_frent_z, frent);
299 pf_remove_fragment(frag);
302 static struct pf_fragment *
303 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree)
305 struct pf_fragment *frag;
309 frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key);
311 /* XXX Are we sure we want to update the timeout? */
312 frag->fr_timeout = time_uptime;
313 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
314 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
320 /* Removes a fragment from the fragment queue and frees the fragment */
322 pf_remove_fragment(struct pf_fragment *frag)
326 KASSERT(frag, ("frag != NULL"));
328 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
329 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
330 uma_zfree(V_pf_frag_z, frag);
333 static struct pf_frent *
334 pf_create_fragment(u_short *reason)
336 struct pf_frent *frent;
340 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
342 pf_flush_fragments();
343 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
345 REASON_SET(reason, PFRES_MEMORY);
354 * Calculate the additional holes that were created in the fragment
355 * queue by inserting this fragment. A fragment in the middle
356 * creates one more hole by splitting. For each connected side,
358 * Fragment entry must be in the queue when calling this function.
361 pf_frent_holes(struct pf_frent *frent)
363 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
364 struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
368 if (frent->fe_off == 0)
371 KASSERT(frent->fe_off != 0, ("frent->fe_off != 0"));
372 if (frent->fe_off == prev->fe_off + prev->fe_len)
379 KASSERT(frent->fe_mff, ("frent->fe_mff"));
380 if (next->fe_off == frent->fe_off + frent->fe_len)
387 pf_frent_index(struct pf_frent *frent)
390 * We have an array of 16 entry points to the queue. A full size
391 * 65535 octet IP packet can have 8192 fragments. So the queue
392 * traversal length is at most 512 and at most 16 entry points are
393 * checked. We need 128 additional bytes on a 64 bit architecture.
395 CTASSERT(((u_int16_t)0xffff &~ 7) / (0x10000 / PF_FRAG_ENTRY_POINTS) ==
397 CTASSERT(((u_int16_t)0xffff >> 3) / PF_FRAG_ENTRY_POINTS == 512 - 1);
399 return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS);
403 pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent,
404 struct pf_frent *prev)
408 CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff);
411 * A packet has at most 65536 octets. With 16 entry points, each one
412 * spawns 4096 octets. We limit these to 64 fragments each, which
413 * means on average every fragment must have at least 64 octets.
415 index = pf_frent_index(frent);
416 if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT)
418 frag->fr_entries[index]++;
421 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
423 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
424 ("overlapping fragment"));
425 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
428 if (frag->fr_firstoff[index] == NULL) {
429 KASSERT(prev == NULL || pf_frent_index(prev) < index,
430 ("prev == NULL || pf_frent_index(pref) < index"));
431 frag->fr_firstoff[index] = frent;
433 if (frent->fe_off < frag->fr_firstoff[index]->fe_off) {
434 KASSERT(prev == NULL || pf_frent_index(prev) < index,
435 ("prev == NULL || pf_frent_index(pref) < index"));
436 frag->fr_firstoff[index] = frent;
438 KASSERT(prev != NULL, ("prev != NULL"));
439 KASSERT(pf_frent_index(prev) == index,
440 ("pf_frent_index(prev) == index"));
444 frag->fr_holes += pf_frent_holes(frent);
450 pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent)
453 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
455 struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
458 frag->fr_holes -= pf_frent_holes(frent);
460 index = pf_frent_index(frent);
461 KASSERT(frag->fr_firstoff[index] != NULL, ("frent not found"));
462 if (frag->fr_firstoff[index]->fe_off == frent->fe_off) {
464 frag->fr_firstoff[index] = NULL;
466 KASSERT(frent->fe_off + frent->fe_len <= next->fe_off,
467 ("overlapping fragment"));
468 if (pf_frent_index(next) == index) {
469 frag->fr_firstoff[index] = next;
471 frag->fr_firstoff[index] = NULL;
475 KASSERT(frag->fr_firstoff[index]->fe_off < frent->fe_off,
476 ("frag->fr_firstoff[index]->fe_off < frent->fe_off"));
477 KASSERT(prev != NULL, ("prev != NULL"));
478 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
479 ("overlapping fragment"));
480 KASSERT(pf_frent_index(prev) == index,
481 ("pf_frent_index(prev) == index"));
484 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
486 KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining"));
487 frag->fr_entries[index]--;
491 pf_frent_previous(struct pf_fragment *frag, struct pf_frent *frent)
493 struct pf_frent *prev, *next;
497 * If there are no fragments after frag, take the final one. Assume
498 * that the global queue is not empty.
500 prev = TAILQ_LAST(&frag->fr_queue, pf_fragq);
501 KASSERT(prev != NULL, ("prev != NULL"));
502 if (prev->fe_off <= frent->fe_off)
505 * We want to find a fragment entry that is before frag, but still
506 * close to it. Find the first fragment entry that is in the same
507 * entry point or in the first entry point after that. As we have
508 * already checked that there are entries behind frag, this will
511 for (index = pf_frent_index(frent); index < PF_FRAG_ENTRY_POINTS;
513 prev = frag->fr_firstoff[index];
517 KASSERT(prev != NULL, ("prev != NULL"));
519 * In prev we may have a fragment from the same entry point that is
520 * before frent, or one that is just one position behind frent.
521 * In the latter case, we go back one step and have the predecessor.
522 * There may be none if the new fragment will be the first one.
524 if (prev->fe_off > frent->fe_off) {
525 prev = TAILQ_PREV(prev, pf_fragq, fr_next);
528 KASSERT(prev->fe_off <= frent->fe_off,
529 ("prev->fe_off <= frent->fe_off"));
533 * In prev is the first fragment of the entry point. The offset
534 * of frag is behind it. Find the closest previous fragment.
536 for (next = TAILQ_NEXT(prev, fr_next); next != NULL;
537 next = TAILQ_NEXT(next, fr_next)) {
538 if (next->fe_off > frent->fe_off)
545 static struct pf_fragment *
546 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
549 struct pf_frent *after, *next, *prev;
550 struct pf_fragment *frag;
552 int old_index, new_index;
556 /* No empty fragments. */
557 if (frent->fe_len == 0) {
558 DPFPRINTF(("bad fragment: len 0\n"));
562 /* All fragments are 8 byte aligned. */
563 if (frent->fe_mff && (frent->fe_len & 0x7)) {
564 DPFPRINTF(("bad fragment: mff and len %d\n", frent->fe_len));
568 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */
569 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
570 DPFPRINTF(("bad fragment: max packet %d\n",
571 frent->fe_off + frent->fe_len));
575 DPFPRINTF((key->frc_af == AF_INET ?
576 "reass frag %d @ %d-%d\n" : "reass frag %#08x @ %d-%d\n",
577 key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len));
579 /* Fully buffer all of the fragments in this fragment queue. */
580 frag = pf_find_fragment(key, &V_pf_frag_tree);
582 /* Create a new reassembly queue for this packet. */
584 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
586 pf_flush_fragments();
587 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
589 REASON_SET(reason, PFRES_MEMORY);
594 *(struct pf_fragment_cmp *)frag = *key;
595 memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff));
596 memset(frag->fr_entries, 0, sizeof(frag->fr_entries));
597 frag->fr_timeout = time_uptime;
598 frag->fr_maxlen = frent->fe_len;
600 TAILQ_INIT(&frag->fr_queue);
602 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag);
603 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
605 /* We do not have a previous fragment, cannot fail. */
606 pf_frent_insert(frag, frent, NULL);
611 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
613 /* Remember maximum fragment len for refragmentation. */
614 if (frent->fe_len > frag->fr_maxlen)
615 frag->fr_maxlen = frent->fe_len;
617 /* Maximum data we have seen already. */
618 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
619 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
621 /* Non terminal fragments must have more fragments flag. */
622 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
625 /* Check if we saw the last fragment already. */
626 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
627 if (frent->fe_off + frent->fe_len > total ||
628 (frent->fe_off + frent->fe_len == total && frent->fe_mff))
631 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
635 /* Find neighbors for newly inserted fragment */
636 prev = pf_frent_previous(frag, frent);
638 after = TAILQ_FIRST(&frag->fr_queue);
639 KASSERT(after != NULL, ("after != NULL"));
641 after = TAILQ_NEXT(prev, fr_next);
644 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
647 precut = prev->fe_off + prev->fe_len - frent->fe_off;
648 if (precut >= frent->fe_len)
650 DPFPRINTF(("overlap -%d\n", precut));
651 m_adj(frent->fe_m, precut);
652 frent->fe_off += precut;
653 frent->fe_len -= precut;
656 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
660 aftercut = frent->fe_off + frent->fe_len - after->fe_off;
661 DPFPRINTF(("adjust overlap %d\n", aftercut));
662 if (aftercut < after->fe_len) {
663 m_adj(after->fe_m, aftercut);
664 old_index = pf_frent_index(after);
665 after->fe_off += aftercut;
666 after->fe_len -= aftercut;
667 new_index = pf_frent_index(after);
668 if (old_index != new_index) {
669 DPFPRINTF(("frag index %d, new %d",
670 old_index, new_index));
671 /* Fragment switched queue as fe_off changed */
672 after->fe_off -= aftercut;
673 after->fe_len += aftercut;
674 /* Remove restored fragment from old queue */
675 pf_frent_remove(frag, after);
676 after->fe_off += aftercut;
677 after->fe_len -= aftercut;
678 /* Insert into correct queue */
679 if (pf_frent_insert(frag, after, prev)) {
681 ("fragment requeue limit exceeded"));
682 m_freem(after->fe_m);
683 uma_zfree(V_pf_frent_z, after);
684 /* There is not way to recover */
691 /* This fragment is completely overlapped, lose it. */
692 next = TAILQ_NEXT(after, fr_next);
693 pf_frent_remove(frag, after);
694 m_freem(after->fe_m);
695 uma_zfree(V_pf_frent_z, after);
698 /* If part of the queue gets too long, there is not way to recover. */
699 if (pf_frent_insert(frag, frent, prev)) {
700 DPFPRINTF(("fragment queue limit exceeded\n"));
707 REASON_SET(reason, PFRES_FRAG);
709 uma_zfree(V_pf_frent_z, frent);
714 pf_join_fragment(struct pf_fragment *frag)
717 struct pf_frent *frent, *next;
719 frent = TAILQ_FIRST(&frag->fr_queue);
720 next = TAILQ_NEXT(frent, fr_next);
723 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
724 uma_zfree(V_pf_frent_z, frent);
725 for (frent = next; frent != NULL; frent = next) {
726 next = TAILQ_NEXT(frent, fr_next);
729 /* Strip off ip header. */
730 m_adj(m2, frent->fe_hdrlen);
731 /* Strip off any trailing bytes. */
732 m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
734 uma_zfree(V_pf_frent_z, frent);
738 /* Remove from fragment queue. */
739 pf_remove_fragment(frag);
746 pf_reassemble(struct mbuf **m0, struct ip *ip, int dir, u_short *reason)
748 struct mbuf *m = *m0;
749 struct pf_frent *frent;
750 struct pf_fragment *frag;
751 struct pf_fragment_cmp key;
752 uint16_t total, hdrlen;
754 /* Get an entry for the fragment queue */
755 if ((frent = pf_create_fragment(reason)) == NULL)
759 frent->fe_hdrlen = ip->ip_hl << 2;
760 frent->fe_extoff = 0;
761 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
762 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
763 frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
765 pf_ip2key(ip, dir, &key);
767 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL)
770 /* The mbuf is part of the fragment entry, no direct free or access */
773 if (frag->fr_holes) {
774 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, frag->fr_holes));
775 return (PF_PASS); /* drop because *m0 is NULL, no error */
778 /* We have all the data */
779 frent = TAILQ_FIRST(&frag->fr_queue);
780 KASSERT(frent != NULL, ("frent != NULL"));
781 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
782 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
783 hdrlen = frent->fe_hdrlen;
785 m = *m0 = pf_join_fragment(frag);
788 if (m->m_flags & M_PKTHDR) {
790 for (m = *m0; m; m = m->m_next)
793 m->m_pkthdr.len = plen;
796 ip = mtod(m, struct ip *);
797 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_len,
798 htons(hdrlen + total), 0);
799 ip->ip_len = htons(hdrlen + total);
800 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_off,
801 ip->ip_off & ~(IP_MF|IP_OFFMASK), 0);
802 ip->ip_off &= ~(IP_MF|IP_OFFMASK);
804 if (hdrlen + total > IP_MAXPACKET) {
805 DPFPRINTF(("drop: too big: %d\n", total));
807 REASON_SET(reason, PFRES_SHORT);
808 /* PF_DROP requires a valid mbuf *m0 in pf_test() */
812 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
819 pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr,
820 uint16_t hdrlen, uint16_t extoff, u_short *reason)
822 struct mbuf *m = *m0;
823 struct pf_frent *frent;
824 struct pf_fragment *frag;
825 struct pf_fragment_cmp key;
827 struct pf_fragment_tag *ftag;
830 uint16_t total, maxlen;
835 /* Get an entry for the fragment queue. */
836 if ((frent = pf_create_fragment(reason)) == NULL) {
842 frent->fe_hdrlen = hdrlen;
843 frent->fe_extoff = extoff;
844 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
845 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
846 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
848 key.frc_src.v6 = ip6->ip6_src;
849 key.frc_dst.v6 = ip6->ip6_dst;
850 key.frc_af = AF_INET6;
851 /* Only the first fragment's protocol is relevant. */
853 key.frc_id = fraghdr->ip6f_ident;
855 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) {
860 /* The mbuf is part of the fragment entry, no direct free or access. */
863 if (frag->fr_holes) {
864 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id,
867 return (PF_PASS); /* Drop because *m0 is NULL, no error. */
870 /* We have all the data. */
871 frent = TAILQ_FIRST(&frag->fr_queue);
872 KASSERT(frent != NULL, ("frent != NULL"));
873 extoff = frent->fe_extoff;
874 maxlen = frag->fr_maxlen;
875 frag_id = frag->fr_id;
876 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
877 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
878 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
880 m = *m0 = pf_join_fragment(frag);
885 /* Take protocol from first fragment header. */
886 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off);
887 KASSERT(m, ("%s: short mbuf chain", __func__));
888 proto = *(mtod(m, uint8_t *) + off);
891 /* Delete frag6 header */
892 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0)
895 if (m->m_flags & M_PKTHDR) {
897 for (m = *m0; m; m = m->m_next)
900 m->m_pkthdr.len = plen;
903 if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED,
904 sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL)
906 ftag = (struct pf_fragment_tag *)(mtag + 1);
907 ftag->ft_hdrlen = hdrlen;
908 ftag->ft_extoff = extoff;
909 ftag->ft_maxlen = maxlen;
910 ftag->ft_id = frag_id;
911 m_tag_prepend(m, mtag);
913 ip6 = mtod(m, struct ip6_hdr *);
914 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
916 /* Write protocol into next field of last extension header. */
917 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
919 KASSERT(m, ("%s: short mbuf chain", __func__));
920 *(mtod(m, char *) + off) = proto;
923 ip6->ip6_nxt = proto;
925 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
926 DPFPRINTF(("drop: too big: %d\n", total));
928 REASON_SET(reason, PFRES_SHORT);
929 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */
933 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip6->ip6_plen)));
937 REASON_SET(reason, PFRES_MEMORY);
938 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */
945 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag,
948 struct mbuf *m = *m0, *t;
950 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1);
953 uint16_t hdrlen, extoff, maxlen;
957 hdrlen = ftag->ft_hdrlen;
958 extoff = ftag->ft_extoff;
959 maxlen = ftag->ft_maxlen;
960 frag_id = ftag->ft_id;
961 m_tag_delete(m, mtag);
968 /* Use protocol from next field of last extension header */
969 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
971 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain"));
972 proto = *(mtod(m, uint8_t *) + off);
973 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT;
976 hdr = mtod(m, struct ip6_hdr *);
977 proto = hdr->ip6_nxt;
978 hdr->ip6_nxt = IPPROTO_FRAGMENT;
981 /* In case of link-local traffic we'll need a scope set. */
982 hdr = mtod(m, struct ip6_hdr *);
984 in6_setscope(&hdr->ip6_src, ifp, NULL);
985 in6_setscope(&hdr->ip6_dst, ifp, NULL);
987 /* The MTU must be a multiple of 8 bytes, or we risk doing the
988 * fragmentation wrong. */
989 maxlen = maxlen & ~7;
992 * Maxlen may be less than 8 if there was only a single
993 * fragment. As it was fragmented before, add a fragment
994 * header also for a single fragment. If total or maxlen
995 * is less than 8, ip6_fragment() will return EMSGSIZE and
996 * we drop the packet.
998 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id);
999 m = (*m0)->m_nextpkt;
1000 (*m0)->m_nextpkt = NULL;
1002 /* The first mbuf contains the unfragmented packet. */
1007 /* Drop expects an mbuf to free. */
1008 DPFPRINTF(("refragment error %d\n", error));
1013 m->m_nextpkt = NULL;
1014 m->m_flags |= M_SKIP_FIREWALL;
1015 memset(&pd, 0, sizeof(pd));
1016 pd.pf_mtag = pf_find_mtag(m);
1019 MPASS(m->m_pkthdr.rcvif != NULL);
1022 (void)ip6_output(m, NULL, NULL, 0, NULL, NULL,
1035 pf_normalize_ip(struct mbuf **m0, struct pfi_kkif *kif, u_short *reason,
1036 struct pf_pdesc *pd)
1038 struct mbuf *m = *m0;
1040 struct ip *h = mtod(m, struct ip *);
1041 int mff = (ntohs(h->ip_off) & IP_MF);
1042 int hlen = h->ip_hl << 2;
1043 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1052 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1053 /* Check if there any scrub rules. Lack of scrub rules means enforced
1054 * packet normalization operation just like in OpenBSD. */
1057 pf_counter_u64_add(&r->evaluations, 1);
1058 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
1059 r = r->skip[PF_SKIP_IFP].ptr;
1060 else if (r->direction && r->direction != pd->dir)
1061 r = r->skip[PF_SKIP_DIR].ptr;
1062 else if (r->af && r->af != AF_INET)
1063 r = r->skip[PF_SKIP_AF].ptr;
1064 else if (r->proto && r->proto != h->ip_p)
1065 r = r->skip[PF_SKIP_PROTO].ptr;
1066 else if (PF_MISMATCHAW(&r->src.addr,
1067 (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
1068 r->src.neg, kif, M_GETFIB(m)))
1069 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1070 else if (PF_MISMATCHAW(&r->dst.addr,
1071 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
1072 r->dst.neg, NULL, M_GETFIB(m)))
1073 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1074 else if (r->match_tag && !pf_match_tag(m, r, &tag,
1075 pd->pf_mtag ? pd->pf_mtag->tag : 0))
1076 r = TAILQ_NEXT(r, entries);
1082 /* With scrub rules present IPv4 normalization happens only
1083 * if one of rules has matched and it's not a "no scrub" rule */
1084 if (r == NULL || r->action == PF_NOSCRUB)
1087 pf_counter_u64_critical_enter();
1088 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1089 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1090 pf_counter_u64_critical_exit();
1091 pf_rule_to_actions(r, &pd->act);
1092 } else if ((!V_pf_status.reass && (h->ip_off & htons(IP_MF | IP_OFFMASK)))) {
1093 /* With no scrub rules IPv4 fragment reassembly depends on the
1094 * global switch. Fragments can be dropped early if reassembly
1096 REASON_SET(reason, PFRES_NORM);
1100 /* Check for illegal packets */
1101 if (hlen < (int)sizeof(struct ip)) {
1102 REASON_SET(reason, PFRES_NORM);
1106 if (hlen > ntohs(h->ip_len)) {
1107 REASON_SET(reason, PFRES_NORM);
1111 /* Clear IP_DF if the rule uses the no-df option or we're in no-df mode */
1112 if ((((r && r->rule_flag & PFRULE_NODF) ||
1113 (V_pf_status.reass & PF_REASS_NODF)) && h->ip_off & htons(IP_DF)
1115 u_int16_t ip_off = h->ip_off;
1117 h->ip_off &= htons(~IP_DF);
1118 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1121 /* We will need other tests here */
1122 if (!fragoff && !mff)
1125 /* We're dealing with a fragment now. Don't allow fragments
1126 * with IP_DF to enter the cache. If the flag was cleared by
1127 * no-df above, fine. Otherwise drop it.
1129 if (h->ip_off & htons(IP_DF)) {
1130 DPFPRINTF(("IP_DF\n"));
1134 ip_len = ntohs(h->ip_len) - hlen;
1136 /* All fragments are 8 byte aligned */
1137 if (mff && (ip_len & 0x7)) {
1138 DPFPRINTF(("mff and %d\n", ip_len));
1142 /* Respect maximum length */
1143 if (fragoff + ip_len > IP_MAXPACKET) {
1144 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
1148 if (r==NULL || !(r->rule_flag & PFRULE_FRAGMENT_NOREASS)) {
1149 max = fragoff + ip_len;
1151 /* Fully buffer all of the fragments
1152 * Might return a completely reassembled mbuf, or NULL */
1154 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
1155 verdict = pf_reassemble(m0, h, pd->dir, reason);
1158 if (verdict != PF_PASS)
1165 h = mtod(m, struct ip *);
1168 /* At this point, only IP_DF is allowed in ip_off */
1169 if (h->ip_off & ~htons(IP_DF)) {
1170 u_int16_t ip_off = h->ip_off;
1172 h->ip_off &= htons(IP_DF);
1173 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1180 DPFPRINTF(("dropping bad fragment\n"));
1181 REASON_SET(reason, PFRES_FRAG);
1183 if (r != NULL && r->log)
1184 PFLOG_PACKET(kif, m, AF_INET, *reason, r, NULL, NULL, pd, 1);
1192 pf_normalize_ip6(struct mbuf **m0, struct pfi_kkif *kif,
1193 u_short *reason, struct pf_pdesc *pd)
1195 struct mbuf *m = *m0;
1197 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1202 struct ip6_frag frag;
1212 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1213 /* Check if there any scrub rules. Lack of scrub rules means enforced
1214 * packet normalization operation just like in OpenBSD. */
1217 pf_counter_u64_add(&r->evaluations, 1);
1218 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
1219 r = r->skip[PF_SKIP_IFP].ptr;
1220 else if (r->direction && r->direction != pd->dir)
1221 r = r->skip[PF_SKIP_DIR].ptr;
1222 else if (r->af && r->af != AF_INET6)
1223 r = r->skip[PF_SKIP_AF].ptr;
1224 #if 0 /* header chain! */
1225 else if (r->proto && r->proto != h->ip6_nxt)
1226 r = r->skip[PF_SKIP_PROTO].ptr;
1228 else if (PF_MISMATCHAW(&r->src.addr,
1229 (struct pf_addr *)&h->ip6_src, AF_INET6,
1230 r->src.neg, kif, M_GETFIB(m)))
1231 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1232 else if (PF_MISMATCHAW(&r->dst.addr,
1233 (struct pf_addr *)&h->ip6_dst, AF_INET6,
1234 r->dst.neg, NULL, M_GETFIB(m)))
1235 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1241 /* With scrub rules present IPv6 normalization happens only
1242 * if one of rules has matched and it's not a "no scrub" rule */
1243 if (r == NULL || r->action == PF_NOSCRUB)
1246 pf_counter_u64_critical_enter();
1247 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1248 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1249 pf_counter_u64_critical_exit();
1250 pf_rule_to_actions(r, &pd->act);
1253 /* Check for illegal packets */
1254 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1257 plen = ntohs(h->ip6_plen);
1258 /* jumbo payload option not supported */
1263 off = sizeof(struct ip6_hdr);
1268 case IPPROTO_FRAGMENT:
1272 case IPPROTO_ROUTING:
1273 case IPPROTO_DSTOPTS:
1274 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1278 if (proto == IPPROTO_AH)
1279 off += (ext.ip6e_len + 2) * 4;
1281 off += (ext.ip6e_len + 1) * 8;
1282 proto = ext.ip6e_nxt;
1284 case IPPROTO_HOPOPTS:
1285 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1289 optend = off + (ext.ip6e_len + 1) * 8;
1290 ooff = off + sizeof(ext);
1292 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1293 sizeof(opt.ip6o_type), NULL, NULL,
1296 if (opt.ip6o_type == IP6OPT_PAD1) {
1300 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1301 NULL, NULL, AF_INET6))
1303 if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1305 if (opt.ip6o_type == IP6OPT_JUMBO)
1307 ooff += sizeof(opt) + opt.ip6o_len;
1308 } while (ooff < optend);
1311 proto = ext.ip6e_nxt;
1317 } while (!terminal);
1319 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1325 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1328 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1331 /* Offset now points to data portion. */
1332 off += sizeof(frag);
1334 /* Returns PF_DROP or *m0 is NULL or completely reassembled mbuf. */
1335 if (pf_reassemble6(m0, h, &frag, off, extoff, reason) != PF_PASS)
1341 pd->flags |= PFDESC_IP_REAS;
1345 REASON_SET(reason, PFRES_SHORT);
1346 if (r != NULL && r->log)
1347 PFLOG_PACKET(kif, m, AF_INET6, *reason, r, NULL, NULL, pd, 1);
1351 REASON_SET(reason, PFRES_NORM);
1352 if (r != NULL && r->log)
1353 PFLOG_PACKET(kif, m, AF_INET6, *reason, r, NULL, NULL, pd, 1);
1359 pf_normalize_tcp(struct pfi_kkif *kif, struct mbuf *m, int ipoff,
1360 int off, void *h, struct pf_pdesc *pd)
1362 struct pf_krule *r, *rm = NULL;
1363 struct tcphdr *th = &pd->hdr.tcp;
1367 sa_family_t af = pd->af;
1372 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1373 /* Check if there any scrub rules. Lack of scrub rules means enforced
1374 * packet normalization operation just like in OpenBSD. */
1377 pf_counter_u64_add(&r->evaluations, 1);
1378 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
1379 r = r->skip[PF_SKIP_IFP].ptr;
1380 else if (r->direction && r->direction != pd->dir)
1381 r = r->skip[PF_SKIP_DIR].ptr;
1382 else if (r->af && r->af != af)
1383 r = r->skip[PF_SKIP_AF].ptr;
1384 else if (r->proto && r->proto != pd->proto)
1385 r = r->skip[PF_SKIP_PROTO].ptr;
1386 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1387 r->src.neg, kif, M_GETFIB(m)))
1388 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1389 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1390 r->src.port[0], r->src.port[1], th->th_sport))
1391 r = r->skip[PF_SKIP_SRC_PORT].ptr;
1392 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1393 r->dst.neg, NULL, M_GETFIB(m)))
1394 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1395 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1396 r->dst.port[0], r->dst.port[1], th->th_dport))
1397 r = r->skip[PF_SKIP_DST_PORT].ptr;
1398 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1399 pf_osfp_fingerprint(pd, m, off, th),
1401 r = TAILQ_NEXT(r, entries);
1409 /* With scrub rules present TCP normalization happens only
1410 * if one of rules has matched and it's not a "no scrub" rule */
1411 if (rm == NULL || rm->action == PF_NOSCRUB)
1414 pf_counter_u64_critical_enter();
1415 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1416 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1417 pf_counter_u64_critical_exit();
1418 pf_rule_to_actions(rm, &pd->act);
1421 if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1422 pd->flags |= PFDESC_TCP_NORM;
1424 flags = th->th_flags;
1425 if (flags & TH_SYN) {
1426 /* Illegal packet */
1433 /* Illegal packet */
1434 if (!(flags & (TH_ACK|TH_RST)))
1438 if (!(flags & TH_ACK)) {
1439 /* These flags are only valid if ACK is set */
1440 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1444 /* Check for illegal header length */
1445 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1448 /* If flags changed, or reserved data set, then adjust */
1449 if (flags != th->th_flags || th->th_x2 != 0) {
1452 ov = *(u_int16_t *)(&th->th_ack + 1);
1453 th->th_flags = flags;
1455 nv = *(u_int16_t *)(&th->th_ack + 1);
1457 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, ov, nv, 0);
1461 /* Remove urgent pointer, if TH_URG is not set */
1462 if (!(flags & TH_URG) && th->th_urp) {
1463 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp,
1469 /* copy back packet headers if we sanitized */
1471 m_copyback(m, off, sizeof(*th), (caddr_t)th);
1476 REASON_SET(&reason, PFRES_NORM);
1477 if (rm != NULL && r->log)
1478 PFLOG_PACKET(kif, m, AF_INET, reason, r, NULL, NULL, pd, 1);
1483 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1484 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1486 u_int32_t tsval, tsecr;
1490 KASSERT((src->scrub == NULL),
1491 ("pf_normalize_tcp_init: src->scrub != NULL"));
1493 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1494 if (src->scrub == NULL)
1500 struct ip *h = mtod(m, struct ip *);
1501 src->scrub->pfss_ttl = h->ip_ttl;
1507 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1508 src->scrub->pfss_ttl = h->ip6_hlim;
1515 * All normalizations below are only begun if we see the start of
1516 * the connections. They must all set an enabled bit in pfss_flags
1518 if ((th->th_flags & TH_SYN) == 0)
1521 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1522 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1523 /* Diddle with TCP options */
1525 opt = hdr + sizeof(struct tcphdr);
1526 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1527 while (hlen >= TCPOLEN_TIMESTAMP) {
1529 case TCPOPT_EOL: /* FALLTHROUGH */
1534 case TCPOPT_TIMESTAMP:
1535 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1536 src->scrub->pfss_flags |=
1538 src->scrub->pfss_ts_mod =
1539 htonl(arc4random());
1541 /* note PFSS_PAWS not set yet */
1542 memcpy(&tsval, &opt[2],
1544 memcpy(&tsecr, &opt[6],
1546 src->scrub->pfss_tsval0 = ntohl(tsval);
1547 src->scrub->pfss_tsval = ntohl(tsval);
1548 src->scrub->pfss_tsecr = ntohl(tsecr);
1549 getmicrouptime(&src->scrub->pfss_last);
1553 hlen -= MAX(opt[1], 2);
1554 opt += MAX(opt[1], 2);
1564 pf_normalize_tcp_cleanup(struct pf_kstate *state)
1566 uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1567 uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1569 /* Someday... flush the TCP segment reassembly descriptors. */
1573 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1574 u_short *reason, struct tcphdr *th, struct pf_kstate *state,
1575 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1577 struct timeval uptime;
1578 u_int32_t tsval, tsecr;
1579 u_int tsval_from_last;
1586 KASSERT((src->scrub || dst->scrub),
1587 ("%s: src->scrub && dst->scrub!", __func__));
1590 * Enforce the minimum TTL seen for this connection. Negate a common
1591 * technique to evade an intrusion detection system and confuse
1592 * firewall state code.
1598 struct ip *h = mtod(m, struct ip *);
1599 if (h->ip_ttl > src->scrub->pfss_ttl)
1600 src->scrub->pfss_ttl = h->ip_ttl;
1601 h->ip_ttl = src->scrub->pfss_ttl;
1609 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1610 if (h->ip6_hlim > src->scrub->pfss_ttl)
1611 src->scrub->pfss_ttl = h->ip6_hlim;
1612 h->ip6_hlim = src->scrub->pfss_ttl;
1619 if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1620 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1621 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1622 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1623 /* Diddle with TCP options */
1625 opt = hdr + sizeof(struct tcphdr);
1626 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1627 while (hlen >= TCPOLEN_TIMESTAMP) {
1628 startoff = opt - (hdr + sizeof(struct tcphdr));
1630 case TCPOPT_EOL: /* FALLTHROUGH */
1635 case TCPOPT_TIMESTAMP:
1636 /* Modulate the timestamps. Can be used for
1637 * NAT detection, OS uptime determination or
1642 /* Huh? Multiple timestamps!? */
1643 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1644 DPFPRINTF(("multiple TS??\n"));
1645 pf_print_state(state);
1648 REASON_SET(reason, PFRES_TS);
1651 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1652 memcpy(&tsval, &opt[2],
1654 if (tsval && src->scrub &&
1655 (src->scrub->pfss_flags &
1657 tsval = ntohl(tsval);
1658 pf_patch_32_unaligned(m,
1662 src->scrub->pfss_ts_mod),
1663 PF_ALGNMNT(startoff),
1668 /* Modulate TS reply iff valid (!0) */
1669 memcpy(&tsecr, &opt[6],
1671 if (tsecr && dst->scrub &&
1672 (dst->scrub->pfss_flags &
1674 tsecr = ntohl(tsecr)
1675 - dst->scrub->pfss_ts_mod;
1676 pf_patch_32_unaligned(m,
1680 PF_ALGNMNT(startoff),
1688 hlen -= MAX(opt[1], 2);
1689 opt += MAX(opt[1], 2);
1694 /* Copyback the options, caller copys back header */
1696 m_copyback(m, off + sizeof(struct tcphdr),
1697 (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1698 sizeof(struct tcphdr));
1703 * Must invalidate PAWS checks on connections idle for too long.
1704 * The fastest allowed timestamp clock is 1ms. That turns out to
1705 * be about 24 days before it wraps. XXX Right now our lowerbound
1706 * TS echo check only works for the first 12 days of a connection
1707 * when the TS has exhausted half its 32bit space
1709 #define TS_MAX_IDLE (24*24*60*60)
1710 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
1712 getmicrouptime(&uptime);
1713 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1714 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1715 time_uptime - state->creation > TS_MAX_CONN)) {
1716 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1717 DPFPRINTF(("src idled out of PAWS\n"));
1718 pf_print_state(state);
1721 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1724 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1725 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1726 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1727 DPFPRINTF(("dst idled out of PAWS\n"));
1728 pf_print_state(state);
1731 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1735 if (got_ts && src->scrub && dst->scrub &&
1736 (src->scrub->pfss_flags & PFSS_PAWS) &&
1737 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1738 /* Validate that the timestamps are "in-window".
1739 * RFC1323 describes TCP Timestamp options that allow
1740 * measurement of RTT (round trip time) and PAWS
1741 * (protection against wrapped sequence numbers). PAWS
1742 * gives us a set of rules for rejecting packets on
1743 * long fat pipes (packets that were somehow delayed
1744 * in transit longer than the time it took to send the
1745 * full TCP sequence space of 4Gb). We can use these
1746 * rules and infer a few others that will let us treat
1747 * the 32bit timestamp and the 32bit echoed timestamp
1748 * as sequence numbers to prevent a blind attacker from
1749 * inserting packets into a connection.
1752 * - The timestamp on this packet must be greater than
1753 * or equal to the last value echoed by the other
1754 * endpoint. The RFC says those will be discarded
1755 * since it is a dup that has already been acked.
1756 * This gives us a lowerbound on the timestamp.
1757 * timestamp >= other last echoed timestamp
1758 * - The timestamp will be less than or equal to
1759 * the last timestamp plus the time between the
1760 * last packet and now. The RFC defines the max
1761 * clock rate as 1ms. We will allow clocks to be
1762 * up to 10% fast and will allow a total difference
1763 * or 30 seconds due to a route change. And this
1764 * gives us an upperbound on the timestamp.
1765 * timestamp <= last timestamp + max ticks
1766 * We have to be careful here. Windows will send an
1767 * initial timestamp of zero and then initialize it
1768 * to a random value after the 3whs; presumably to
1769 * avoid a DoS by having to call an expensive RNG
1770 * during a SYN flood. Proof MS has at least one
1771 * good security geek.
1773 * - The TCP timestamp option must also echo the other
1774 * endpoints timestamp. The timestamp echoed is the
1775 * one carried on the earliest unacknowledged segment
1776 * on the left edge of the sequence window. The RFC
1777 * states that the host will reject any echoed
1778 * timestamps that were larger than any ever sent.
1779 * This gives us an upperbound on the TS echo.
1780 * tescr <= largest_tsval
1781 * - The lowerbound on the TS echo is a little more
1782 * tricky to determine. The other endpoint's echoed
1783 * values will not decrease. But there may be
1784 * network conditions that re-order packets and
1785 * cause our view of them to decrease. For now the
1786 * only lowerbound we can safely determine is that
1787 * the TS echo will never be less than the original
1788 * TS. XXX There is probably a better lowerbound.
1789 * Remove TS_MAX_CONN with better lowerbound check.
1790 * tescr >= other original TS
1792 * It is also important to note that the fastest
1793 * timestamp clock of 1ms will wrap its 32bit space in
1794 * 24 days. So we just disable TS checking after 24
1795 * days of idle time. We actually must use a 12d
1796 * connection limit until we can come up with a better
1797 * lowerbound to the TS echo check.
1799 struct timeval delta_ts;
1803 * PFTM_TS_DIFF is how many seconds of leeway to allow
1804 * a host's timestamp. This can happen if the previous
1805 * packet got delayed in transit for much longer than
1808 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
1809 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
1811 /* Calculate max ticks since the last timestamp */
1812 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
1813 #define TS_MICROSECS 1000000 /* microseconds per second */
1815 timevalsub(&delta_ts, &src->scrub->pfss_last);
1816 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1817 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1819 if ((src->state >= TCPS_ESTABLISHED &&
1820 dst->state >= TCPS_ESTABLISHED) &&
1821 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1822 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1823 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1824 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1825 /* Bad RFC1323 implementation or an insertion attack.
1827 * - Solaris 2.6 and 2.7 are known to send another ACK
1828 * after the FIN,FIN|ACK,ACK closing that carries
1832 DPFPRINTF(("Timestamp failed %c%c%c%c\n",
1833 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
1834 SEQ_GT(tsval, src->scrub->pfss_tsval +
1835 tsval_from_last) ? '1' : ' ',
1836 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1837 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
1838 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u "
1839 "idle: %jus %lums\n",
1840 tsval, tsecr, tsval_from_last,
1841 (uintmax_t)delta_ts.tv_sec,
1842 delta_ts.tv_usec / 1000));
1843 DPFPRINTF((" src->tsval: %u tsecr: %u\n",
1844 src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
1845 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u"
1846 "\n", dst->scrub->pfss_tsval,
1847 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
1848 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1849 pf_print_state(state);
1850 pf_print_flags(th->th_flags);
1853 REASON_SET(reason, PFRES_TS);
1857 /* XXX I'd really like to require tsecr but it's optional */
1859 } else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
1860 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1861 || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
1862 src->scrub && dst->scrub &&
1863 (src->scrub->pfss_flags & PFSS_PAWS) &&
1864 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1865 /* Didn't send a timestamp. Timestamps aren't really useful
1867 * - connection opening or closing (often not even sent).
1868 * but we must not let an attacker to put a FIN on a
1869 * data packet to sneak it through our ESTABLISHED check.
1870 * - on a TCP reset. RFC suggests not even looking at TS.
1871 * - on an empty ACK. The TS will not be echoed so it will
1872 * probably not help keep the RTT calculation in sync and
1873 * there isn't as much danger when the sequence numbers
1874 * got wrapped. So some stacks don't include TS on empty
1877 * To minimize the disruption to mostly RFC1323 conformant
1878 * stacks, we will only require timestamps on data packets.
1880 * And what do ya know, we cannot require timestamps on data
1881 * packets. There appear to be devices that do legitimate
1882 * TCP connection hijacking. There are HTTP devices that allow
1883 * a 3whs (with timestamps) and then buffer the HTTP request.
1884 * If the intermediate device has the HTTP response cache, it
1885 * will spoof the response but not bother timestamping its
1886 * packets. So we can look for the presence of a timestamp in
1887 * the first data packet and if there, require it in all future
1891 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1893 * Hey! Someone tried to sneak a packet in. Or the
1894 * stack changed its RFC1323 behavior?!?!
1896 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1897 DPFPRINTF(("Did not receive expected RFC1323 "
1899 pf_print_state(state);
1900 pf_print_flags(th->th_flags);
1903 REASON_SET(reason, PFRES_TS);
1909 * We will note if a host sends his data packets with or without
1910 * timestamps. And require all data packets to contain a timestamp
1911 * if the first does. PAWS implicitly requires that all data packets be
1912 * timestamped. But I think there are middle-man devices that hijack
1913 * TCP streams immediately after the 3whs and don't timestamp their
1914 * packets (seen in a WWW accelerator or cache).
1916 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1917 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1919 src->scrub->pfss_flags |= PFSS_DATA_TS;
1921 src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1922 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
1923 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1924 /* Don't warn if other host rejected RFC1323 */
1925 DPFPRINTF(("Broken RFC1323 stack did not "
1926 "timestamp data packet. Disabled PAWS "
1928 pf_print_state(state);
1929 pf_print_flags(th->th_flags);
1936 * Update PAWS values
1938 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1939 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1940 getmicrouptime(&src->scrub->pfss_last);
1941 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1942 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1943 src->scrub->pfss_tsval = tsval;
1946 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1947 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1948 src->scrub->pfss_tsecr = tsecr;
1950 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1951 (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
1952 src->scrub->pfss_tsval0 == 0)) {
1953 /* tsval0 MUST be the lowest timestamp */
1954 src->scrub->pfss_tsval0 = tsval;
1957 /* Only fully initialized after a TS gets echoed */
1958 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
1959 src->scrub->pfss_flags |= PFSS_PAWS;
1963 /* I have a dream.... TCP segment reassembly.... */
1968 pf_normalize_mss(struct mbuf *m, int off, struct pf_pdesc *pd)
1970 struct tcphdr *th = &pd->hdr.tcp;
1973 int opt, cnt, optlen = 0;
1974 u_char opts[TCP_MAXOLEN];
1975 u_char *optp = opts;
1978 thoff = th->th_off << 2;
1979 cnt = thoff - sizeof(struct tcphdr);
1981 if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt,
1982 NULL, NULL, pd->af))
1985 for (; cnt > 0; cnt -= optlen, optp += optlen) {
1986 startoff = optp - opts;
1988 if (opt == TCPOPT_EOL)
1990 if (opt == TCPOPT_NOP)
1996 if (optlen < 2 || optlen > cnt)
2001 mss = (u_int16_t *)(optp + 2);
2002 if ((ntohs(*mss)) > pd->act.max_mss) {
2003 pf_patch_16_unaligned(m,
2005 mss, htons(pd->act.max_mss),
2006 PF_ALGNMNT(startoff),
2008 m_copyback(m, off + sizeof(*th),
2009 thoff - sizeof(*th), opts);
2010 m_copyback(m, off, sizeof(*th), (caddr_t)th);
2022 pf_scan_sctp(struct mbuf *m, int ipoff, int off, struct pf_pdesc *pd)
2024 struct sctp_chunkhdr ch = { };
2025 int chunk_off = sizeof(struct sctphdr);
2028 while (off + chunk_off < pd->tot_len) {
2029 if (!pf_pull_hdr(m, off + chunk_off, &ch, sizeof(ch), NULL,
2033 /* Length includes the header, this must be at least 4. */
2034 if (ntohs(ch.chunk_length) < 4)
2037 chunk_start = chunk_off;
2038 chunk_off += roundup(ntohs(ch.chunk_length), 4);
2040 switch (ch.chunk_type) {
2041 case SCTP_INITIATION: {
2042 struct sctp_init_chunk init;
2044 if (!pf_pull_hdr(m, off + chunk_start, &init,
2045 sizeof(init), NULL, NULL, pd->af))
2049 * RFC 9620, Section 3.3.2, "The Initiate Tag is allowed to have
2050 * any value except 0."
2052 if (init.init.initiate_tag == 0)
2054 if (init.init.num_inbound_streams == 0)
2056 if (init.init.num_outbound_streams == 0)
2058 if (ntohl(init.init.a_rwnd) < SCTP_MIN_RWND)
2062 * RFC 9260, Section 3.1, INIT chunks MUST have zero
2065 if (pd->hdr.sctp.v_tag != 0)
2068 pd->sctp_flags |= PFDESC_SCTP_INIT;
2071 case SCTP_INITIATION_ACK:
2072 pd->sctp_flags |= PFDESC_SCTP_INIT_ACK;
2074 case SCTP_ABORT_ASSOCIATION:
2075 pd->sctp_flags |= PFDESC_SCTP_ABORT;
2078 case SCTP_SHUTDOWN_ACK:
2079 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN;
2081 case SCTP_SHUTDOWN_COMPLETE:
2082 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN_COMPLETE;
2084 case SCTP_COOKIE_ECHO:
2085 case SCTP_COOKIE_ACK:
2086 pd->sctp_flags |= PFDESC_SCTP_COOKIE;
2089 pd->sctp_flags |= PFDESC_SCTP_DATA;
2092 pd->sctp_flags |= PFDESC_SCTP_OTHER;
2097 /* Validate chunk lengths vs. packet length. */
2098 if (off + chunk_off != pd->tot_len)
2102 * INIT, INIT_ACK or SHUTDOWN_COMPLETE chunks must always be the only
2105 if ((pd->sctp_flags & PFDESC_SCTP_INIT) &&
2106 (pd->sctp_flags & ~PFDESC_SCTP_INIT))
2108 if ((pd->sctp_flags & PFDESC_SCTP_INIT_ACK) &&
2109 (pd->sctp_flags & ~PFDESC_SCTP_INIT_ACK))
2111 if ((pd->sctp_flags & PFDESC_SCTP_SHUTDOWN_COMPLETE) &&
2112 (pd->sctp_flags & ~PFDESC_SCTP_SHUTDOWN_COMPLETE))
2119 pf_normalize_sctp(int dir, struct pfi_kkif *kif, struct mbuf *m, int ipoff,
2120 int off, void *h, struct pf_pdesc *pd)
2122 struct pf_krule *r, *rm = NULL;
2123 struct sctphdr *sh = &pd->hdr.sctp;
2125 sa_family_t af = pd->af;
2130 /* Unconditionally scan the SCTP packet, because we need to look for
2131 * things like shutdown and asconf chunks. */
2132 if (pf_scan_sctp(m, ipoff, off, pd) != PF_PASS)
2135 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
2136 /* Check if there any scrub rules. Lack of scrub rules means enforced
2137 * packet normalization operation just like in OpenBSD. */
2140 pf_counter_u64_add(&r->evaluations, 1);
2141 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
2142 r = r->skip[PF_SKIP_IFP].ptr;
2143 else if (r->direction && r->direction != dir)
2144 r = r->skip[PF_SKIP_DIR].ptr;
2145 else if (r->af && r->af != af)
2146 r = r->skip[PF_SKIP_AF].ptr;
2147 else if (r->proto && r->proto != pd->proto)
2148 r = r->skip[PF_SKIP_PROTO].ptr;
2149 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
2150 r->src.neg, kif, M_GETFIB(m)))
2151 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
2152 else if (r->src.port_op && !pf_match_port(r->src.port_op,
2153 r->src.port[0], r->src.port[1], sh->src_port))
2154 r = r->skip[PF_SKIP_SRC_PORT].ptr;
2155 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
2156 r->dst.neg, NULL, M_GETFIB(m)))
2157 r = r->skip[PF_SKIP_DST_ADDR].ptr;
2158 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
2159 r->dst.port[0], r->dst.port[1], sh->dest_port))
2160 r = r->skip[PF_SKIP_DST_PORT].ptr;
2168 /* With scrub rules present SCTP normalization happens only
2169 * if one of rules has matched and it's not a "no scrub" rule */
2170 if (rm == NULL || rm->action == PF_NOSCRUB)
2173 pf_counter_u64_critical_enter();
2174 pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1);
2175 pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len);
2176 pf_counter_u64_critical_exit();
2179 /* Verify we're a multiple of 4 bytes long */
2180 if ((pd->tot_len - off - sizeof(struct sctphdr)) % 4)
2183 /* INIT chunk needs to be the only chunk */
2184 if (pd->sctp_flags & PFDESC_SCTP_INIT)
2185 if (pd->sctp_flags & ~PFDESC_SCTP_INIT)
2191 REASON_SET(&reason, PFRES_NORM);
2192 if (rm != NULL && r->log)
2193 PFLOG_PACKET(kif, m, AF_INET, reason, r, NULL, NULL, pd,
2201 pf_scrub_ip(struct mbuf **m0, struct pf_pdesc *pd)
2203 struct mbuf *m = *m0;
2204 struct ip *h = mtod(m, struct ip *);
2206 /* Clear IP_DF if no-df was requested */
2207 if (pd->act.flags & PFSTATE_NODF && h->ip_off & htons(IP_DF)) {
2208 u_int16_t ip_off = h->ip_off;
2210 h->ip_off &= htons(~IP_DF);
2211 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
2214 /* Enforce a minimum ttl, may cause endless packet loops */
2215 if (pd->act.min_ttl && h->ip_ttl < pd->act.min_ttl) {
2216 u_int16_t ip_ttl = h->ip_ttl;
2218 h->ip_ttl = pd->act.min_ttl;
2219 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
2223 if (pd->act.flags & PFSTATE_SETTOS) {
2226 ov = *(u_int16_t *)h;
2227 h->ip_tos = pd->act.set_tos | (h->ip_tos & IPTOS_ECN_MASK);
2228 nv = *(u_int16_t *)h;
2230 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
2233 /* random-id, but not for fragments */
2234 if (pd->act.flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
2235 uint16_t ip_id = h->ip_id;
2238 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
2245 pf_scrub_ip6(struct mbuf **m0, struct pf_pdesc *pd)
2247 struct mbuf *m = *m0;
2248 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
2250 /* Enforce a minimum ttl, may cause endless packet loops */
2251 if (pd->act.min_ttl && h->ip6_hlim < pd->act.min_ttl)
2252 h->ip6_hlim = pd->act.min_ttl;
2254 /* Enforce tos. Set traffic class bits */
2255 if (pd->act.flags & PFSTATE_SETTOS) {
2256 h->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK;
2257 h->ip6_flow |= htonl((pd->act.set_tos | IPV6_ECN(h)) << 20);