2 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
3 * Copyright 2011 Alexander Bluhm <bluhm@openbsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
33 #include "opt_inet6.h"
36 #include <sys/param.h>
39 #include <sys/mutex.h>
40 #include <sys/refcount.h>
41 #include <sys/rwlock.h>
42 #include <sys/socket.h>
46 #include <net/pfvar.h>
47 #include <net/if_pflog.h>
49 #include <netinet/in.h>
50 #include <netinet/ip.h>
51 #include <netinet/ip_var.h>
52 #include <netinet6/ip6_var.h>
53 #include <netinet/tcp.h>
54 #include <netinet/tcp_fsm.h>
55 #include <netinet/tcp_seq.h>
58 #include <netinet/ip6.h>
62 TAILQ_ENTRY(pf_frent) fr_next;
64 uint16_t fe_hdrlen; /* ipv4 header lenght with ip options
65 ipv6, extension, fragment header */
66 uint16_t fe_extoff; /* last extension header offset or 0 */
67 uint16_t fe_len; /* fragment length */
68 uint16_t fe_off; /* fragment offset */
69 uint16_t fe_mff; /* more fragment flag */
72 struct pf_fragment_cmp {
73 struct pf_addr frc_src;
74 struct pf_addr frc_dst;
81 struct pf_fragment_cmp fr_key;
82 #define fr_src fr_key.frc_src
83 #define fr_dst fr_key.frc_dst
84 #define fr_id fr_key.frc_id
85 #define fr_af fr_key.frc_af
86 #define fr_proto fr_key.frc_proto
88 RB_ENTRY(pf_fragment) fr_entry;
89 TAILQ_ENTRY(pf_fragment) frag_next;
90 uint8_t fr_flags; /* status flags */
91 #define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */
92 #define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */
93 #define PFFRAG_DROP 0x0004 /* Drop all fragments */
94 #define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER))
95 uint16_t fr_max; /* fragment data max */
97 uint16_t fr_maxlen; /* maximum length of single fragment */
98 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
101 struct pf_fragment_tag {
102 uint16_t ft_hdrlen; /* header length of reassembled pkt */
103 uint16_t ft_extoff; /* last extension header offset or 0 */
104 uint16_t ft_maxlen; /* maximum fragment payload length */
105 uint32_t ft_id; /* fragment id */
108 static struct mtx pf_frag_mtx;
109 #define PF_FRAG_LOCK() mtx_lock(&pf_frag_mtx)
110 #define PF_FRAG_UNLOCK() mtx_unlock(&pf_frag_mtx)
111 #define PF_FRAG_ASSERT() mtx_assert(&pf_frag_mtx, MA_OWNED)
113 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */
115 static VNET_DEFINE(uma_zone_t, pf_frent_z);
116 #define V_pf_frent_z VNET(pf_frent_z)
117 static VNET_DEFINE(uma_zone_t, pf_frag_z);
118 #define V_pf_frag_z VNET(pf_frag_z)
120 TAILQ_HEAD(pf_fragqueue, pf_fragment);
121 TAILQ_HEAD(pf_cachequeue, pf_fragment);
122 static VNET_DEFINE(struct pf_fragqueue, pf_fragqueue);
123 #define V_pf_fragqueue VNET(pf_fragqueue)
124 static VNET_DEFINE(struct pf_cachequeue, pf_cachequeue);
125 #define V_pf_cachequeue VNET(pf_cachequeue)
126 RB_HEAD(pf_frag_tree, pf_fragment);
127 static VNET_DEFINE(struct pf_frag_tree, pf_frag_tree);
128 #define V_pf_frag_tree VNET(pf_frag_tree)
129 static VNET_DEFINE(struct pf_frag_tree, pf_cache_tree);
130 #define V_pf_cache_tree VNET(pf_cache_tree)
131 static int pf_frag_compare(struct pf_fragment *,
132 struct pf_fragment *);
133 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
134 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
136 static void pf_flush_fragments(void);
137 static void pf_free_fragment(struct pf_fragment *);
138 static void pf_remove_fragment(struct pf_fragment *);
139 static int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
140 struct tcphdr *, int, sa_family_t);
141 static struct pf_frent *pf_create_fragment(u_short *);
142 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key,
143 struct pf_frag_tree *tree);
144 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *,
145 struct pf_frent *, u_short *);
146 static int pf_isfull_fragment(struct pf_fragment *);
147 static struct mbuf *pf_join_fragment(struct pf_fragment *);
149 static void pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t);
150 static int pf_reassemble(struct mbuf **, struct ip *, int, u_short *);
151 static struct mbuf *pf_fragcache(struct mbuf **, struct ip*,
152 struct pf_fragment **, int, int, int *);
155 static int pf_reassemble6(struct mbuf **, struct ip6_hdr *,
156 struct ip6_frag *, uint16_t, uint16_t, u_short *);
157 static void pf_scrub_ip6(struct mbuf **, uint8_t);
160 #define DPFPRINTF(x) do { \
161 if (V_pf_status.debug >= PF_DEBUG_MISC) { \
162 printf("%s: ", __func__); \
169 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key)
172 key->frc_src.v4 = ip->ip_src;
173 key->frc_dst.v4 = ip->ip_dst;
174 key->frc_af = AF_INET;
175 key->frc_proto = ip->ip_p;
176 key->frc_id = ip->ip_id;
181 pf_normalize_init(void)
184 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
185 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
186 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
187 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
188 V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
189 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL,
192 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
193 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
194 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
195 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
197 mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
199 TAILQ_INIT(&V_pf_fragqueue);
200 TAILQ_INIT(&V_pf_cachequeue);
204 pf_normalize_cleanup(void)
207 uma_zdestroy(V_pf_state_scrub_z);
208 uma_zdestroy(V_pf_frent_z);
209 uma_zdestroy(V_pf_frag_z);
211 mtx_destroy(&pf_frag_mtx);
215 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
219 if ((diff = a->fr_id - b->fr_id) != 0)
221 if ((diff = a->fr_proto - b->fr_proto) != 0)
223 if ((diff = a->fr_af - b->fr_af) != 0)
225 if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0)
227 if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0)
233 pf_purge_expired_fragments(void)
235 struct pf_fragment *frag;
236 u_int32_t expire = time_uptime -
237 V_pf_default_rule.timeout[PFTM_FRAG];
240 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
241 KASSERT((BUFFER_FRAGMENTS(frag)),
242 ("BUFFER_FRAGMENTS(frag) == 0: %s", __FUNCTION__));
243 if (frag->fr_timeout > expire)
246 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
247 pf_free_fragment(frag);
250 while ((frag = TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue)) != NULL) {
251 KASSERT((!BUFFER_FRAGMENTS(frag)),
252 ("BUFFER_FRAGMENTS(frag) != 0: %s", __FUNCTION__));
253 if (frag->fr_timeout > expire)
256 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
257 pf_free_fragment(frag);
258 KASSERT((TAILQ_EMPTY(&V_pf_cachequeue) ||
259 TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue) != frag),
260 ("!(TAILQ_EMPTY() || TAILQ_LAST() == farg): %s",
267 * Try to flush old fragments to make space for new ones
270 pf_flush_fragments(void)
272 struct pf_fragment *frag, *cache;
277 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
278 DPFPRINTF(("trying to free %d frag entriess\n", goal));
279 while (goal < uma_zone_get_cur(V_pf_frent_z)) {
280 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
282 pf_free_fragment(frag);
283 cache = TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue);
285 pf_free_fragment(cache);
286 if (frag == NULL && cache == NULL)
291 /* Frees the fragments and all associated entries */
293 pf_free_fragment(struct pf_fragment *frag)
295 struct pf_frent *frent;
299 /* Free all fragments */
300 if (BUFFER_FRAGMENTS(frag)) {
301 for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
302 frent = TAILQ_FIRST(&frag->fr_queue)) {
303 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
305 m_freem(frent->fe_m);
306 uma_zfree(V_pf_frent_z, frent);
309 for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
310 frent = TAILQ_FIRST(&frag->fr_queue)) {
311 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
313 KASSERT((TAILQ_EMPTY(&frag->fr_queue) ||
314 TAILQ_FIRST(&frag->fr_queue)->fe_off >
316 ("! (TAILQ_EMPTY() || TAILQ_FIRST()->fe_off >"
317 " frent->fe_len): %s", __func__));
319 uma_zfree(V_pf_frent_z, frent);
323 pf_remove_fragment(frag);
326 static struct pf_fragment *
327 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree)
329 struct pf_fragment *frag;
333 frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key);
335 /* XXX Are we sure we want to update the timeout? */
336 frag->fr_timeout = time_uptime;
337 if (BUFFER_FRAGMENTS(frag)) {
338 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
339 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
341 TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next);
342 TAILQ_INSERT_HEAD(&V_pf_cachequeue, frag, frag_next);
349 /* Removes a fragment from the fragment queue and frees the fragment */
351 pf_remove_fragment(struct pf_fragment *frag)
356 if (BUFFER_FRAGMENTS(frag)) {
357 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
358 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
359 uma_zfree(V_pf_frag_z, frag);
361 RB_REMOVE(pf_frag_tree, &V_pf_cache_tree, frag);
362 TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next);
363 uma_zfree(V_pf_frag_z, frag);
367 static struct pf_frent *
368 pf_create_fragment(u_short *reason)
370 struct pf_frent *frent;
374 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
376 pf_flush_fragments();
377 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
379 REASON_SET(reason, PFRES_MEMORY);
388 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
391 struct pf_frent *after, *next, *prev;
392 struct pf_fragment *frag;
397 /* No empty fragments. */
398 if (frent->fe_len == 0) {
399 DPFPRINTF(("bad fragment: len 0"));
403 /* All fragments are 8 byte aligned. */
404 if (frent->fe_mff && (frent->fe_len & 0x7)) {
405 DPFPRINTF(("bad fragment: mff and len %d", frent->fe_len));
409 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */
410 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
411 DPFPRINTF(("bad fragment: max packet %d",
412 frent->fe_off + frent->fe_len));
416 DPFPRINTF((key->frc_af == AF_INET ?
417 "reass frag %d @ %d-%d" : "reass frag %#08x @ %d-%d",
418 key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len));
420 /* Fully buffer all of the fragments in this fragment queue. */
421 frag = pf_find_fragment(key, &V_pf_frag_tree);
423 /* Create a new reassembly queue for this packet. */
425 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
427 pf_flush_fragments();
428 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
430 REASON_SET(reason, PFRES_MEMORY);
435 *(struct pf_fragment_cmp *)frag = *key;
437 frag->fr_timeout = time_second;
438 frag->fr_maxlen = frent->fe_len;
439 TAILQ_INIT(&frag->fr_queue);
441 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag);
442 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
444 /* We do not have a previous fragment. */
445 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
450 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
452 /* Remember maximum fragment len for refragmentation. */
453 if (frent->fe_len > frag->fr_maxlen)
454 frag->fr_maxlen = frent->fe_len;
456 /* Maximum data we have seen already. */
457 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
458 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
460 /* Non terminal fragments must have more fragments flag. */
461 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
464 /* Check if we saw the last fragment already. */
465 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
466 if (frent->fe_off + frent->fe_len > total ||
467 (frent->fe_off + frent->fe_len == total && frent->fe_mff))
470 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
474 /* Find a fragment after the current one. */
476 TAILQ_FOREACH(after, &frag->fr_queue, fr_next) {
477 if (after->fe_off > frent->fe_off)
482 KASSERT(prev != NULL || after != NULL,
483 ("prev != NULL || after != NULL"));
485 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
488 precut = prev->fe_off + prev->fe_len - frent->fe_off;
489 if (precut >= frent->fe_len)
491 DPFPRINTF(("overlap -%d", precut));
492 m_adj(frent->fe_m, precut);
493 frent->fe_off += precut;
494 frent->fe_len -= precut;
497 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
501 aftercut = frent->fe_off + frent->fe_len - after->fe_off;
502 DPFPRINTF(("adjust overlap %d", aftercut));
503 if (aftercut < after->fe_len) {
504 m_adj(after->fe_m, aftercut);
505 after->fe_off += aftercut;
506 after->fe_len -= aftercut;
510 /* This fragment is completely overlapped, lose it. */
511 next = TAILQ_NEXT(after, fr_next);
512 m_freem(after->fe_m);
513 TAILQ_REMOVE(&frag->fr_queue, after, fr_next);
514 uma_zfree(V_pf_frent_z, after);
518 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
520 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
525 REASON_SET(reason, PFRES_FRAG);
527 uma_zfree(V_pf_frent_z, frent);
532 pf_isfull_fragment(struct pf_fragment *frag)
534 struct pf_frent *frent, *next;
537 /* Check if we are completely reassembled */
538 if (TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff)
541 /* Maximum data we have seen already */
542 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
543 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
545 /* Check if we have all the data */
547 for (frent = TAILQ_FIRST(&frag->fr_queue); frent; frent = next) {
548 next = TAILQ_NEXT(frent, fr_next);
550 off += frent->fe_len;
551 if (off < total && (next == NULL || next->fe_off != off)) {
552 DPFPRINTF(("missing fragment at %d, next %d, total %d",
553 off, next == NULL ? -1 : next->fe_off, total));
557 DPFPRINTF(("%d < %d?", off, total));
560 KASSERT(off == total, ("off == total"));
566 pf_join_fragment(struct pf_fragment *frag)
569 struct pf_frent *frent, *next;
571 frent = TAILQ_FIRST(&frag->fr_queue);
572 next = TAILQ_NEXT(frent, fr_next);
575 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
576 uma_zfree(V_pf_frent_z, frent);
577 for (frent = next; frent != NULL; frent = next) {
578 next = TAILQ_NEXT(frent, fr_next);
581 /* Strip off ip header. */
582 m_adj(m2, frent->fe_hdrlen);
583 /* Strip off any trailing bytes. */
584 m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
586 uma_zfree(V_pf_frent_z, frent);
590 /* Remove from fragment queue. */
591 pf_remove_fragment(frag);
598 pf_reassemble(struct mbuf **m0, struct ip *ip, int dir, u_short *reason)
600 struct mbuf *m = *m0;
601 struct pf_frent *frent;
602 struct pf_fragment *frag;
603 struct pf_fragment_cmp key;
604 uint16_t total, hdrlen;
606 /* Get an entry for the fragment queue */
607 if ((frent = pf_create_fragment(reason)) == NULL)
611 frent->fe_hdrlen = ip->ip_hl << 2;
612 frent->fe_extoff = 0;
613 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
614 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
615 frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
617 pf_ip2key(ip, dir, &key);
619 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL)
622 /* The mbuf is part of the fragment entry, no direct free or access */
625 if (!pf_isfull_fragment(frag))
626 return (PF_PASS); /* drop because *m0 is NULL, no error */
628 /* We have all the data */
629 frent = TAILQ_FIRST(&frag->fr_queue);
630 KASSERT(frent != NULL, ("frent != NULL"));
631 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
632 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
633 hdrlen = frent->fe_hdrlen;
635 m = *m0 = pf_join_fragment(frag);
638 if (m->m_flags & M_PKTHDR) {
640 for (m = *m0; m; m = m->m_next)
643 m->m_pkthdr.len = plen;
646 ip = mtod(m, struct ip *);
647 ip->ip_len = htons(hdrlen + total);
648 ip->ip_off &= ~(IP_MF|IP_OFFMASK);
650 if (hdrlen + total > IP_MAXPACKET) {
651 DPFPRINTF(("drop: too big: %d", total));
653 REASON_SET(reason, PFRES_SHORT);
654 /* PF_DROP requires a valid mbuf *m0 in pf_test() */
658 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
665 pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr,
666 uint16_t hdrlen, uint16_t extoff, u_short *reason)
668 struct mbuf *m = *m0;
669 struct pf_frent *frent;
670 struct pf_fragment *frag;
671 struct pf_fragment_cmp key;
673 struct pf_fragment_tag *ftag;
676 uint16_t total, maxlen;
681 /* Get an entry for the fragment queue. */
682 if ((frent = pf_create_fragment(reason)) == NULL) {
688 frent->fe_hdrlen = hdrlen;
689 frent->fe_extoff = extoff;
690 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
691 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
692 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
694 key.frc_src.v6 = ip6->ip6_src;
695 key.frc_dst.v6 = ip6->ip6_dst;
696 key.frc_af = AF_INET6;
697 /* Only the first fragment's protocol is relevant. */
699 key.frc_id = fraghdr->ip6f_ident;
701 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) {
706 /* The mbuf is part of the fragment entry, no direct free or access. */
709 if (!pf_isfull_fragment(frag)) {
711 return (PF_PASS); /* Drop because *m0 is NULL, no error. */
714 /* We have all the data. */
715 extoff = frent->fe_extoff;
716 maxlen = frag->fr_maxlen;
717 frag_id = frag->fr_id;
718 frent = TAILQ_FIRST(&frag->fr_queue);
719 KASSERT(frent != NULL, ("frent != NULL"));
720 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
721 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
722 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
724 m = *m0 = pf_join_fragment(frag);
729 /* Take protocol from first fragment header. */
730 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off);
731 KASSERT(m, ("%s: short mbuf chain", __func__));
732 proto = *(mtod(m, caddr_t) + off);
735 /* Delete frag6 header */
736 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0)
739 if (m->m_flags & M_PKTHDR) {
741 for (m = *m0; m; m = m->m_next)
744 m->m_pkthdr.len = plen;
747 if ((mtag = m_tag_get(PF_REASSEMBLED, sizeof(struct pf_fragment_tag),
750 ftag = (struct pf_fragment_tag *)(mtag + 1);
751 ftag->ft_hdrlen = hdrlen;
752 ftag->ft_extoff = extoff;
753 ftag->ft_maxlen = maxlen;
754 ftag->ft_id = frag_id;
755 m_tag_prepend(m, mtag);
757 ip6 = mtod(m, struct ip6_hdr *);
758 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
760 /* Write protocol into next field of last extension header. */
761 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
763 KASSERT(m, ("%s: short mbuf chain", __func__));
764 *(mtod(m, char *) + off) = proto;
767 ip6->ip6_nxt = proto;
769 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
770 DPFPRINTF(("drop: too big: %d", total));
772 REASON_SET(reason, PFRES_SHORT);
773 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */
777 DPFPRINTF(("complete: %p(%d)", m, ntohs(ip6->ip6_plen)));
781 REASON_SET(reason, PFRES_MEMORY);
782 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */
789 pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
790 int drop, int *nomem)
792 struct mbuf *m = *m0;
793 struct pf_frent *frp, *fra, *cur = NULL;
794 int ip_len = ntohs(h->ip_len) - (h->ip_hl << 2);
795 u_int16_t off = ntohs(h->ip_off) << 3;
796 u_int16_t max = ip_len + off;
800 KASSERT((*frag == NULL || !BUFFER_FRAGMENTS(*frag)),
801 ("!(*frag == NULL || !BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__));
803 /* Create a new range queue for this packet */
805 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
807 pf_flush_fragments();
808 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
813 /* Get an entry for the queue */
814 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
816 uma_zfree(V_pf_frag_z, *frag);
821 (*frag)->fr_flags = PFFRAG_NOBUFFER;
823 (*frag)->fr_src.v4 = h->ip_src;
824 (*frag)->fr_dst.v4 = h->ip_dst;
825 (*frag)->fr_af = AF_INET;
826 (*frag)->fr_proto = h->ip_p;
827 (*frag)->fr_id = h->ip_id;
828 (*frag)->fr_timeout = time_uptime;
831 cur->fe_len = max; /* TODO: fe_len = max - off ? */
832 TAILQ_INIT(&(*frag)->fr_queue);
833 TAILQ_INSERT_HEAD(&(*frag)->fr_queue, cur, fr_next);
835 RB_INSERT(pf_frag_tree, &V_pf_cache_tree, *frag);
836 TAILQ_INSERT_HEAD(&V_pf_cachequeue, *frag, frag_next);
838 DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
844 * Find a fragment after the current one:
845 * - off contains the real shifted offset.
848 TAILQ_FOREACH(fra, &(*frag)->fr_queue, fr_next) {
849 if (fra->fe_off > off)
854 KASSERT((frp != NULL || fra != NULL),
855 ("!(frp != NULL || fra != NULL): %s", __FUNCTION__));
860 precut = frp->fe_len - off;
861 if (precut >= ip_len) {
862 /* Fragment is entirely a duplicate */
863 DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
864 h->ip_id, frp->fe_off, frp->fe_len, off, max));
868 /* They are adjacent. Fixup cache entry */
869 DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
870 h->ip_id, frp->fe_off, frp->fe_len, off, max));
872 } else if (precut > 0) {
873 /* The first part of this payload overlaps with a
874 * fragment that has already been passed.
875 * Need to trim off the first part of the payload.
876 * But to do so easily, we need to create another
877 * mbuf to throw the original header into.
880 DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
881 h->ip_id, precut, frp->fe_off, frp->fe_len, off,
886 /* Update the previous frag to encompass this one */
890 /* XXX Optimization opportunity
891 * This is a very heavy way to trim the payload.
892 * we could do it much faster by diddling mbuf
893 * internals but that would be even less legible
894 * than this mbuf magic. For my next trick,
895 * I'll pull a rabbit out of my laptop.
897 *m0 = m_dup(m, M_NOWAIT);
900 /* From KAME Project : We have missed this! */
901 m_adj(*m0, (h->ip_hl << 2) -
902 (*m0)->m_pkthdr.len);
904 KASSERT(((*m0)->m_next == NULL),
905 ("(*m0)->m_next != NULL: %s",
907 m_adj(m, precut + (h->ip_hl << 2));
910 if (m->m_flags & M_PKTHDR) {
913 for (t = m; t; t = t->m_next)
915 m->m_pkthdr.len = plen;
919 h = mtod(m, struct ip *);
921 KASSERT(((int)m->m_len ==
922 ntohs(h->ip_len) - precut),
923 ("m->m_len != ntohs(h->ip_len) - precut: %s",
925 h->ip_off = htons(ntohs(h->ip_off) +
927 h->ip_len = htons(ntohs(h->ip_len) - precut);
932 /* There is a gap between fragments */
934 DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
935 h->ip_id, -precut, frp->fe_off, frp->fe_len, off,
938 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
944 TAILQ_INSERT_AFTER(&(*frag)->fr_queue, frp, cur, fr_next);
952 aftercut = max - fra->fe_off;
954 /* Adjacent fragments */
955 DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
956 h->ip_id, off, max, fra->fe_off, fra->fe_len));
959 } else if (aftercut > 0) {
960 /* Need to chop off the tail of this fragment */
961 DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
962 h->ip_id, aftercut, off, max, fra->fe_off,
971 if (m->m_flags & M_PKTHDR) {
974 for (t = m; t; t = t->m_next)
976 m->m_pkthdr.len = plen;
978 h = mtod(m, struct ip *);
979 KASSERT(((int)m->m_len == ntohs(h->ip_len) - aftercut),
980 ("m->m_len != ntohs(h->ip_len) - aftercut: %s",
982 h->ip_len = htons(ntohs(h->ip_len) - aftercut);
986 } else if (frp == NULL) {
987 /* There is a gap between fragments */
988 DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
989 h->ip_id, -aftercut, off, max, fra->fe_off,
992 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
998 TAILQ_INSERT_HEAD(&(*frag)->fr_queue, cur, fr_next);
1002 /* Need to glue together two separate fragment descriptors */
1004 if (cur && fra->fe_off <= cur->fe_len) {
1005 /* Need to merge in a previous 'cur' */
1006 DPFPRINTF(("fragcache[%d]: adjacent(merge "
1007 "%d-%d) %d-%d (%d-%d)\n",
1008 h->ip_id, cur->fe_off, cur->fe_len, off,
1009 max, fra->fe_off, fra->fe_len));
1010 fra->fe_off = cur->fe_off;
1011 TAILQ_REMOVE(&(*frag)->fr_queue, cur, fr_next);
1012 uma_zfree(V_pf_frent_z, cur);
1015 } else if (frp && fra->fe_off <= frp->fe_len) {
1016 /* Need to merge in a modified 'frp' */
1017 KASSERT((cur == NULL), ("cur != NULL: %s",
1019 DPFPRINTF(("fragcache[%d]: adjacent(merge "
1020 "%d-%d) %d-%d (%d-%d)\n",
1021 h->ip_id, frp->fe_off, frp->fe_len, off,
1022 max, fra->fe_off, fra->fe_len));
1023 fra->fe_off = frp->fe_off;
1024 TAILQ_REMOVE(&(*frag)->fr_queue, frp, fr_next);
1025 uma_zfree(V_pf_frent_z, frp);
1034 * We must keep tracking the overall fragment even when
1035 * we're going to drop it anyway so that we know when to
1036 * free the overall descriptor. Thus we drop the frag late.
1043 /* Update maximum data size */
1044 if ((*frag)->fr_max < max)
1045 (*frag)->fr_max = max;
1047 /* This is the last segment */
1049 (*frag)->fr_flags |= PFFRAG_SEENLAST;
1051 /* Check if we are completely reassembled */
1052 if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
1053 TAILQ_FIRST(&(*frag)->fr_queue)->fe_off == 0 &&
1054 TAILQ_FIRST(&(*frag)->fr_queue)->fe_len == (*frag)->fr_max) {
1055 /* Remove from fragment queue */
1056 DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
1058 pf_free_fragment(*frag);
1067 /* Still need to pay attention to !IP_MF */
1068 if (!mff && *frag != NULL)
1069 (*frag)->fr_flags |= PFFRAG_SEENLAST;
1076 /* Still need to pay attention to !IP_MF */
1077 if (!mff && *frag != NULL)
1078 (*frag)->fr_flags |= PFFRAG_SEENLAST;
1081 /* This fragment has been deemed bad. Don't reass */
1082 if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
1083 DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
1085 (*frag)->fr_flags |= PFFRAG_DROP;
1095 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag)
1097 struct mbuf *m = *m0, *t;
1098 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1);
1101 uint16_t hdrlen, extoff, maxlen;
1105 hdrlen = ftag->ft_hdrlen;
1106 extoff = ftag->ft_extoff;
1107 maxlen = ftag->ft_maxlen;
1108 frag_id = ftag->ft_id;
1109 m_tag_delete(m, mtag);
1116 /* Use protocol from next field of last extension header */
1117 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
1119 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain"));
1120 proto = *(mtod(m, caddr_t) + off);
1121 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT;
1124 struct ip6_hdr *hdr;
1126 hdr = mtod(m, struct ip6_hdr *);
1127 proto = hdr->ip6_nxt;
1128 hdr->ip6_nxt = IPPROTO_FRAGMENT;
1132 * Maxlen may be less than 8 if there was only a single
1133 * fragment. As it was fragmented before, add a fragment
1134 * header also for a single fragment. If total or maxlen
1135 * is less than 8, ip6_fragment() will return EMSGSIZE and
1136 * we drop the packet.
1138 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id);
1139 m = (*m0)->m_nextpkt;
1140 (*m0)->m_nextpkt = NULL;
1142 /* The first mbuf contains the unfragmented packet. */
1147 /* Drop expects an mbuf to free. */
1148 DPFPRINTF(("refragment error %d", error));
1151 for (t = m; m; m = t) {
1153 m->m_nextpkt = NULL;
1154 m->m_flags |= M_SKIP_FIREWALL;
1155 memset(&pd, 0, sizeof(pd));
1156 pd.pf_mtag = pf_find_mtag(m);
1169 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
1170 struct pf_pdesc *pd)
1172 struct mbuf *m = *m0;
1174 struct pf_fragment *frag = NULL;
1175 struct pf_fragment_cmp key;
1176 struct ip *h = mtod(m, struct ip *);
1177 int mff = (ntohs(h->ip_off) & IP_MF);
1178 int hlen = h->ip_hl << 2;
1179 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1188 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1191 if (pfi_kif_match(r->kif, kif) == r->ifnot)
1192 r = r->skip[PF_SKIP_IFP].ptr;
1193 else if (r->direction && r->direction != dir)
1194 r = r->skip[PF_SKIP_DIR].ptr;
1195 else if (r->af && r->af != AF_INET)
1196 r = r->skip[PF_SKIP_AF].ptr;
1197 else if (r->proto && r->proto != h->ip_p)
1198 r = r->skip[PF_SKIP_PROTO].ptr;
1199 else if (PF_MISMATCHAW(&r->src.addr,
1200 (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
1201 r->src.neg, kif, M_GETFIB(m)))
1202 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1203 else if (PF_MISMATCHAW(&r->dst.addr,
1204 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
1205 r->dst.neg, NULL, M_GETFIB(m)))
1206 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1207 else if (r->match_tag && !pf_match_tag(m, r, &tag,
1208 pd->pf_mtag ? pd->pf_mtag->tag : 0))
1209 r = TAILQ_NEXT(r, entries);
1214 if (r == NULL || r->action == PF_NOSCRUB)
1217 r->packets[dir == PF_OUT]++;
1218 r->bytes[dir == PF_OUT] += pd->tot_len;
1221 /* Check for illegal packets */
1222 if (hlen < (int)sizeof(struct ip))
1225 if (hlen > ntohs(h->ip_len))
1228 /* Clear IP_DF if the rule uses the no-df option */
1229 if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
1230 u_int16_t ip_off = h->ip_off;
1232 h->ip_off &= htons(~IP_DF);
1233 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1236 /* We will need other tests here */
1237 if (!fragoff && !mff)
1240 /* We're dealing with a fragment now. Don't allow fragments
1241 * with IP_DF to enter the cache. If the flag was cleared by
1242 * no-df above, fine. Otherwise drop it.
1244 if (h->ip_off & htons(IP_DF)) {
1245 DPFPRINTF(("IP_DF\n"));
1249 ip_len = ntohs(h->ip_len) - hlen;
1250 ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1252 /* All fragments are 8 byte aligned */
1253 if (mff && (ip_len & 0x7)) {
1254 DPFPRINTF(("mff and %d\n", ip_len));
1258 /* Respect maximum length */
1259 if (fragoff + ip_len > IP_MAXPACKET) {
1260 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
1263 max = fragoff + ip_len;
1265 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
1267 /* Fully buffer all of the fragments */
1270 pf_ip2key(h, dir, &key);
1271 frag = pf_find_fragment(&key, &V_pf_frag_tree);
1273 /* Check if we saw the last fragment already */
1274 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
1278 /* Might return a completely reassembled mbuf, or NULL */
1279 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
1280 verdict = pf_reassemble(m0, h, dir, reason);
1283 if (verdict != PF_PASS)
1290 /* use mtag from concatenated mbuf chain */
1291 pd->pf_mtag = pf_find_mtag(m);
1293 if (pd->pf_mtag == NULL) {
1294 printf("%s: pf_find_mtag returned NULL(1)\n", __func__);
1295 if ((pd->pf_mtag = pf_get_mtag(m)) == NULL) {
1302 h = mtod(m, struct ip *);
1304 /* non-buffering fragment cache (drops or masks overlaps) */
1307 if (dir == PF_OUT && pd->pf_mtag->flags & PF_TAG_FRAGCACHE) {
1309 * Already passed the fragment cache in the
1310 * input direction. If we continued, it would
1311 * appear to be a dup and would be dropped.
1317 pf_ip2key(h, dir, &key);
1318 frag = pf_find_fragment(&key, &V_pf_cache_tree);
1320 /* Check if we saw the last fragment already */
1321 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
1322 max > frag->fr_max) {
1323 if (r->rule_flag & PFRULE_FRAGDROP)
1324 frag->fr_flags |= PFFRAG_DROP;
1328 *m0 = m = pf_fragcache(m0, h, &frag, mff,
1329 (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
1337 /* use mtag from copied and trimmed mbuf chain */
1338 pd->pf_mtag = pf_find_mtag(m);
1340 if (pd->pf_mtag == NULL) {
1341 printf("%s: pf_find_mtag returned NULL(2)\n", __func__);
1342 if ((pd->pf_mtag = pf_get_mtag(m)) == NULL) {
1350 pd->pf_mtag->flags |= PF_TAG_FRAGCACHE;
1352 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
1358 /* At this point, only IP_DF is allowed in ip_off */
1359 if (h->ip_off & ~htons(IP_DF)) {
1360 u_int16_t ip_off = h->ip_off;
1362 h->ip_off &= htons(IP_DF);
1363 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1366 /* not missing a return here */
1369 pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos);
1371 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0)
1372 pd->flags |= PFDESC_IP_REAS;
1376 REASON_SET(reason, PFRES_MEMORY);
1377 if (r != NULL && r->log)
1378 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1383 REASON_SET(reason, PFRES_NORM);
1384 if (r != NULL && r->log)
1385 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1390 DPFPRINTF(("dropping bad fragment\n"));
1392 /* Free associated fragments */
1394 pf_free_fragment(frag);
1398 REASON_SET(reason, PFRES_FRAG);
1399 if (r != NULL && r->log)
1400 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1409 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
1410 u_short *reason, struct pf_pdesc *pd)
1412 struct mbuf *m = *m0;
1414 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1419 struct ip6_opt_jumbo jumbo;
1420 struct ip6_frag frag;
1421 u_int32_t jumbolen = 0, plen;
1429 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1432 if (pfi_kif_match(r->kif, kif) == r->ifnot)
1433 r = r->skip[PF_SKIP_IFP].ptr;
1434 else if (r->direction && r->direction != dir)
1435 r = r->skip[PF_SKIP_DIR].ptr;
1436 else if (r->af && r->af != AF_INET6)
1437 r = r->skip[PF_SKIP_AF].ptr;
1438 #if 0 /* header chain! */
1439 else if (r->proto && r->proto != h->ip6_nxt)
1440 r = r->skip[PF_SKIP_PROTO].ptr;
1442 else if (PF_MISMATCHAW(&r->src.addr,
1443 (struct pf_addr *)&h->ip6_src, AF_INET6,
1444 r->src.neg, kif, M_GETFIB(m)))
1445 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1446 else if (PF_MISMATCHAW(&r->dst.addr,
1447 (struct pf_addr *)&h->ip6_dst, AF_INET6,
1448 r->dst.neg, NULL, M_GETFIB(m)))
1449 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1454 if (r == NULL || r->action == PF_NOSCRUB)
1457 r->packets[dir == PF_OUT]++;
1458 r->bytes[dir == PF_OUT] += pd->tot_len;
1461 /* Check for illegal packets */
1462 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1466 off = sizeof(struct ip6_hdr);
1471 case IPPROTO_FRAGMENT:
1475 case IPPROTO_ROUTING:
1476 case IPPROTO_DSTOPTS:
1477 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1481 if (proto == IPPROTO_AH)
1482 off += (ext.ip6e_len + 2) * 4;
1484 off += (ext.ip6e_len + 1) * 8;
1485 proto = ext.ip6e_nxt;
1487 case IPPROTO_HOPOPTS:
1488 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1492 optend = off + (ext.ip6e_len + 1) * 8;
1493 ooff = off + sizeof(ext);
1495 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1496 sizeof(opt.ip6o_type), NULL, NULL,
1499 if (opt.ip6o_type == IP6OPT_PAD1) {
1503 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1504 NULL, NULL, AF_INET6))
1506 if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1508 switch (opt.ip6o_type) {
1510 if (h->ip6_plen != 0)
1512 if (!pf_pull_hdr(m, ooff, &jumbo,
1513 sizeof(jumbo), NULL, NULL,
1516 memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
1518 jumbolen = ntohl(jumbolen);
1519 if (jumbolen <= IPV6_MAXPACKET)
1521 if (sizeof(struct ip6_hdr) + jumbolen !=
1528 ooff += sizeof(opt) + opt.ip6o_len;
1529 } while (ooff < optend);
1532 proto = ext.ip6e_nxt;
1538 } while (!terminal);
1540 /* jumbo payload option must be present, or plen > 0 */
1541 if (ntohs(h->ip6_plen) == 0)
1544 plen = ntohs(h->ip6_plen);
1547 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1550 pf_scrub_ip6(&m, r->min_ttl);
1555 /* Jumbo payload packets cannot be fragmented. */
1556 plen = ntohs(h->ip6_plen);
1557 if (plen == 0 || jumbolen)
1559 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1562 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1565 /* Offset now points to data portion. */
1566 off += sizeof(frag);
1568 /* Returns PF_DROP or *m0 is NULL or completely reassembled mbuf. */
1569 if (pf_reassemble6(m0, h, &frag, off, extoff, reason) != PF_PASS)
1575 pd->flags |= PFDESC_IP_REAS;
1579 REASON_SET(reason, PFRES_SHORT);
1580 if (r != NULL && r->log)
1581 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1586 REASON_SET(reason, PFRES_NORM);
1587 if (r != NULL && r->log)
1588 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1595 pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
1596 int off, void *h, struct pf_pdesc *pd)
1598 struct pf_rule *r, *rm = NULL;
1599 struct tcphdr *th = pd->hdr.tcp;
1603 sa_family_t af = pd->af;
1607 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1610 if (pfi_kif_match(r->kif, kif) == r->ifnot)
1611 r = r->skip[PF_SKIP_IFP].ptr;
1612 else if (r->direction && r->direction != dir)
1613 r = r->skip[PF_SKIP_DIR].ptr;
1614 else if (r->af && r->af != af)
1615 r = r->skip[PF_SKIP_AF].ptr;
1616 else if (r->proto && r->proto != pd->proto)
1617 r = r->skip[PF_SKIP_PROTO].ptr;
1618 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1619 r->src.neg, kif, M_GETFIB(m)))
1620 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1621 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1622 r->src.port[0], r->src.port[1], th->th_sport))
1623 r = r->skip[PF_SKIP_SRC_PORT].ptr;
1624 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1625 r->dst.neg, NULL, M_GETFIB(m)))
1626 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1627 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1628 r->dst.port[0], r->dst.port[1], th->th_dport))
1629 r = r->skip[PF_SKIP_DST_PORT].ptr;
1630 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1631 pf_osfp_fingerprint(pd, m, off, th),
1633 r = TAILQ_NEXT(r, entries);
1640 if (rm == NULL || rm->action == PF_NOSCRUB)
1643 r->packets[dir == PF_OUT]++;
1644 r->bytes[dir == PF_OUT] += pd->tot_len;
1647 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1648 pd->flags |= PFDESC_TCP_NORM;
1650 flags = th->th_flags;
1651 if (flags & TH_SYN) {
1652 /* Illegal packet */
1659 /* Illegal packet */
1660 if (!(flags & (TH_ACK|TH_RST)))
1664 if (!(flags & TH_ACK)) {
1665 /* These flags are only valid if ACK is set */
1666 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1670 /* Check for illegal header length */
1671 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1674 /* If flags changed, or reserved data set, then adjust */
1675 if (flags != th->th_flags || th->th_x2 != 0) {
1678 ov = *(u_int16_t *)(&th->th_ack + 1);
1679 th->th_flags = flags;
1681 nv = *(u_int16_t *)(&th->th_ack + 1);
1683 th->th_sum = pf_cksum_fixup(th->th_sum, ov, nv, 0);
1687 /* Remove urgent pointer, if TH_URG is not set */
1688 if (!(flags & TH_URG) && th->th_urp) {
1689 th->th_sum = pf_cksum_fixup(th->th_sum, th->th_urp, 0, 0);
1694 /* Process options */
1695 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af))
1698 /* copy back packet headers if we sanitized */
1700 m_copyback(m, off, sizeof(*th), (caddr_t)th);
1705 REASON_SET(&reason, PFRES_NORM);
1706 if (rm != NULL && r->log)
1707 PFLOG_PACKET(kif, m, AF_INET, dir, reason, r, NULL, NULL, pd,
1713 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1714 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1716 u_int32_t tsval, tsecr;
1720 KASSERT((src->scrub == NULL),
1721 ("pf_normalize_tcp_init: src->scrub != NULL"));
1723 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1724 if (src->scrub == NULL)
1730 struct ip *h = mtod(m, struct ip *);
1731 src->scrub->pfss_ttl = h->ip_ttl;
1737 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1738 src->scrub->pfss_ttl = h->ip6_hlim;
1746 * All normalizations below are only begun if we see the start of
1747 * the connections. They must all set an enabled bit in pfss_flags
1749 if ((th->th_flags & TH_SYN) == 0)
1753 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1754 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1755 /* Diddle with TCP options */
1757 opt = hdr + sizeof(struct tcphdr);
1758 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1759 while (hlen >= TCPOLEN_TIMESTAMP) {
1761 case TCPOPT_EOL: /* FALLTHROUGH */
1766 case TCPOPT_TIMESTAMP:
1767 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1768 src->scrub->pfss_flags |=
1770 src->scrub->pfss_ts_mod =
1771 htonl(arc4random());
1773 /* note PFSS_PAWS not set yet */
1774 memcpy(&tsval, &opt[2],
1776 memcpy(&tsecr, &opt[6],
1778 src->scrub->pfss_tsval0 = ntohl(tsval);
1779 src->scrub->pfss_tsval = ntohl(tsval);
1780 src->scrub->pfss_tsecr = ntohl(tsecr);
1781 getmicrouptime(&src->scrub->pfss_last);
1785 hlen -= MAX(opt[1], 2);
1786 opt += MAX(opt[1], 2);
1796 pf_normalize_tcp_cleanup(struct pf_state *state)
1798 if (state->src.scrub)
1799 uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1800 if (state->dst.scrub)
1801 uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1803 /* Someday... flush the TCP segment reassembly descriptors. */
1807 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1808 u_short *reason, struct tcphdr *th, struct pf_state *state,
1809 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1811 struct timeval uptime;
1812 u_int32_t tsval, tsecr;
1813 u_int tsval_from_last;
1819 KASSERT((src->scrub || dst->scrub),
1820 ("%s: src->scrub && dst->scrub!", __func__));
1823 * Enforce the minimum TTL seen for this connection. Negate a common
1824 * technique to evade an intrusion detection system and confuse
1825 * firewall state code.
1831 struct ip *h = mtod(m, struct ip *);
1832 if (h->ip_ttl > src->scrub->pfss_ttl)
1833 src->scrub->pfss_ttl = h->ip_ttl;
1834 h->ip_ttl = src->scrub->pfss_ttl;
1842 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1843 if (h->ip6_hlim > src->scrub->pfss_ttl)
1844 src->scrub->pfss_ttl = h->ip6_hlim;
1845 h->ip6_hlim = src->scrub->pfss_ttl;
1852 if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1853 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1854 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1855 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1856 /* Diddle with TCP options */
1858 opt = hdr + sizeof(struct tcphdr);
1859 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1860 while (hlen >= TCPOLEN_TIMESTAMP) {
1862 case TCPOPT_EOL: /* FALLTHROUGH */
1867 case TCPOPT_TIMESTAMP:
1868 /* Modulate the timestamps. Can be used for
1869 * NAT detection, OS uptime determination or
1874 /* Huh? Multiple timestamps!? */
1875 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1876 DPFPRINTF(("multiple TS??"));
1877 pf_print_state(state);
1880 REASON_SET(reason, PFRES_TS);
1883 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1884 memcpy(&tsval, &opt[2],
1886 if (tsval && src->scrub &&
1887 (src->scrub->pfss_flags &
1889 tsval = ntohl(tsval);
1890 pf_change_a(&opt[2],
1893 src->scrub->pfss_ts_mod),
1898 /* Modulate TS reply iff valid (!0) */
1899 memcpy(&tsecr, &opt[6],
1901 if (tsecr && dst->scrub &&
1902 (dst->scrub->pfss_flags &
1904 tsecr = ntohl(tsecr)
1905 - dst->scrub->pfss_ts_mod;
1906 pf_change_a(&opt[6],
1907 &th->th_sum, htonl(tsecr),
1915 hlen -= MAX(opt[1], 2);
1916 opt += MAX(opt[1], 2);
1921 /* Copyback the options, caller copys back header */
1923 m_copyback(m, off + sizeof(struct tcphdr),
1924 (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1925 sizeof(struct tcphdr));
1931 * Must invalidate PAWS checks on connections idle for too long.
1932 * The fastest allowed timestamp clock is 1ms. That turns out to
1933 * be about 24 days before it wraps. XXX Right now our lowerbound
1934 * TS echo check only works for the first 12 days of a connection
1935 * when the TS has exhausted half its 32bit space
1937 #define TS_MAX_IDLE (24*24*60*60)
1938 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
1940 getmicrouptime(&uptime);
1941 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1942 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1943 time_uptime - state->creation > TS_MAX_CONN)) {
1944 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1945 DPFPRINTF(("src idled out of PAWS\n"));
1946 pf_print_state(state);
1949 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1952 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1953 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1954 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1955 DPFPRINTF(("dst idled out of PAWS\n"));
1956 pf_print_state(state);
1959 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1963 if (got_ts && src->scrub && dst->scrub &&
1964 (src->scrub->pfss_flags & PFSS_PAWS) &&
1965 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1966 /* Validate that the timestamps are "in-window".
1967 * RFC1323 describes TCP Timestamp options that allow
1968 * measurement of RTT (round trip time) and PAWS
1969 * (protection against wrapped sequence numbers). PAWS
1970 * gives us a set of rules for rejecting packets on
1971 * long fat pipes (packets that were somehow delayed
1972 * in transit longer than the time it took to send the
1973 * full TCP sequence space of 4Gb). We can use these
1974 * rules and infer a few others that will let us treat
1975 * the 32bit timestamp and the 32bit echoed timestamp
1976 * as sequence numbers to prevent a blind attacker from
1977 * inserting packets into a connection.
1980 * - The timestamp on this packet must be greater than
1981 * or equal to the last value echoed by the other
1982 * endpoint. The RFC says those will be discarded
1983 * since it is a dup that has already been acked.
1984 * This gives us a lowerbound on the timestamp.
1985 * timestamp >= other last echoed timestamp
1986 * - The timestamp will be less than or equal to
1987 * the last timestamp plus the time between the
1988 * last packet and now. The RFC defines the max
1989 * clock rate as 1ms. We will allow clocks to be
1990 * up to 10% fast and will allow a total difference
1991 * or 30 seconds due to a route change. And this
1992 * gives us an upperbound on the timestamp.
1993 * timestamp <= last timestamp + max ticks
1994 * We have to be careful here. Windows will send an
1995 * initial timestamp of zero and then initialize it
1996 * to a random value after the 3whs; presumably to
1997 * avoid a DoS by having to call an expensive RNG
1998 * during a SYN flood. Proof MS has at least one
1999 * good security geek.
2001 * - The TCP timestamp option must also echo the other
2002 * endpoints timestamp. The timestamp echoed is the
2003 * one carried on the earliest unacknowledged segment
2004 * on the left edge of the sequence window. The RFC
2005 * states that the host will reject any echoed
2006 * timestamps that were larger than any ever sent.
2007 * This gives us an upperbound on the TS echo.
2008 * tescr <= largest_tsval
2009 * - The lowerbound on the TS echo is a little more
2010 * tricky to determine. The other endpoint's echoed
2011 * values will not decrease. But there may be
2012 * network conditions that re-order packets and
2013 * cause our view of them to decrease. For now the
2014 * only lowerbound we can safely determine is that
2015 * the TS echo will never be less than the original
2016 * TS. XXX There is probably a better lowerbound.
2017 * Remove TS_MAX_CONN with better lowerbound check.
2018 * tescr >= other original TS
2020 * It is also important to note that the fastest
2021 * timestamp clock of 1ms will wrap its 32bit space in
2022 * 24 days. So we just disable TS checking after 24
2023 * days of idle time. We actually must use a 12d
2024 * connection limit until we can come up with a better
2025 * lowerbound to the TS echo check.
2027 struct timeval delta_ts;
2032 * PFTM_TS_DIFF is how many seconds of leeway to allow
2033 * a host's timestamp. This can happen if the previous
2034 * packet got delayed in transit for much longer than
2037 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
2038 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
2040 /* Calculate max ticks since the last timestamp */
2041 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
2042 #define TS_MICROSECS 1000000 /* microseconds per second */
2044 timevalsub(&delta_ts, &src->scrub->pfss_last);
2045 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
2046 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
2048 if ((src->state >= TCPS_ESTABLISHED &&
2049 dst->state >= TCPS_ESTABLISHED) &&
2050 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
2051 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
2052 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
2053 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
2054 /* Bad RFC1323 implementation or an insertion attack.
2056 * - Solaris 2.6 and 2.7 are known to send another ACK
2057 * after the FIN,FIN|ACK,ACK closing that carries
2061 DPFPRINTF(("Timestamp failed %c%c%c%c\n",
2062 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
2063 SEQ_GT(tsval, src->scrub->pfss_tsval +
2064 tsval_from_last) ? '1' : ' ',
2065 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
2066 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
2067 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u "
2068 "idle: %jus %lums\n",
2069 tsval, tsecr, tsval_from_last,
2070 (uintmax_t)delta_ts.tv_sec,
2071 delta_ts.tv_usec / 1000));
2072 DPFPRINTF((" src->tsval: %u tsecr: %u\n",
2073 src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
2074 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u"
2075 "\n", dst->scrub->pfss_tsval,
2076 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
2077 if (V_pf_status.debug >= PF_DEBUG_MISC) {
2078 pf_print_state(state);
2079 pf_print_flags(th->th_flags);
2082 REASON_SET(reason, PFRES_TS);
2086 /* XXX I'd really like to require tsecr but it's optional */
2088 } else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
2089 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
2090 || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
2091 src->scrub && dst->scrub &&
2092 (src->scrub->pfss_flags & PFSS_PAWS) &&
2093 (dst->scrub->pfss_flags & PFSS_PAWS)) {
2094 /* Didn't send a timestamp. Timestamps aren't really useful
2096 * - connection opening or closing (often not even sent).
2097 * but we must not let an attacker to put a FIN on a
2098 * data packet to sneak it through our ESTABLISHED check.
2099 * - on a TCP reset. RFC suggests not even looking at TS.
2100 * - on an empty ACK. The TS will not be echoed so it will
2101 * probably not help keep the RTT calculation in sync and
2102 * there isn't as much danger when the sequence numbers
2103 * got wrapped. So some stacks don't include TS on empty
2106 * To minimize the disruption to mostly RFC1323 conformant
2107 * stacks, we will only require timestamps on data packets.
2109 * And what do ya know, we cannot require timestamps on data
2110 * packets. There appear to be devices that do legitimate
2111 * TCP connection hijacking. There are HTTP devices that allow
2112 * a 3whs (with timestamps) and then buffer the HTTP request.
2113 * If the intermediate device has the HTTP response cache, it
2114 * will spoof the response but not bother timestamping its
2115 * packets. So we can look for the presence of a timestamp in
2116 * the first data packet and if there, require it in all future
2120 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
2122 * Hey! Someone tried to sneak a packet in. Or the
2123 * stack changed its RFC1323 behavior?!?!
2125 if (V_pf_status.debug >= PF_DEBUG_MISC) {
2126 DPFPRINTF(("Did not receive expected RFC1323 "
2128 pf_print_state(state);
2129 pf_print_flags(th->th_flags);
2132 REASON_SET(reason, PFRES_TS);
2139 * We will note if a host sends his data packets with or without
2140 * timestamps. And require all data packets to contain a timestamp
2141 * if the first does. PAWS implicitly requires that all data packets be
2142 * timestamped. But I think there are middle-man devices that hijack
2143 * TCP streams immediately after the 3whs and don't timestamp their
2144 * packets (seen in a WWW accelerator or cache).
2146 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
2147 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
2149 src->scrub->pfss_flags |= PFSS_DATA_TS;
2151 src->scrub->pfss_flags |= PFSS_DATA_NOTS;
2152 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
2153 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
2154 /* Don't warn if other host rejected RFC1323 */
2155 DPFPRINTF(("Broken RFC1323 stack did not "
2156 "timestamp data packet. Disabled PAWS "
2158 pf_print_state(state);
2159 pf_print_flags(th->th_flags);
2167 * Update PAWS values
2169 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
2170 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
2171 getmicrouptime(&src->scrub->pfss_last);
2172 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
2173 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
2174 src->scrub->pfss_tsval = tsval;
2177 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
2178 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
2179 src->scrub->pfss_tsecr = tsecr;
2181 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
2182 (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
2183 src->scrub->pfss_tsval0 == 0)) {
2184 /* tsval0 MUST be the lowest timestamp */
2185 src->scrub->pfss_tsval0 = tsval;
2188 /* Only fully initialized after a TS gets echoed */
2189 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
2190 src->scrub->pfss_flags |= PFSS_PAWS;
2194 /* I have a dream.... TCP segment reassembly.... */
2199 pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
2200 int off, sa_family_t af)
2204 int opt, cnt, optlen = 0;
2206 u_char opts[TCP_MAXOLEN];
2207 u_char *optp = opts;
2209 thoff = th->th_off << 2;
2210 cnt = thoff - sizeof(struct tcphdr);
2212 if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt,
2216 for (; cnt > 0; cnt -= optlen, optp += optlen) {
2218 if (opt == TCPOPT_EOL)
2220 if (opt == TCPOPT_NOP)
2226 if (optlen < 2 || optlen > cnt)
2231 mss = (u_int16_t *)(optp + 2);
2232 if ((ntohs(*mss)) > r->max_mss) {
2233 th->th_sum = pf_cksum_fixup(th->th_sum,
2234 *mss, htons(r->max_mss), 0);
2235 *mss = htons(r->max_mss);
2245 m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts);
2252 pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
2254 struct mbuf *m = *m0;
2255 struct ip *h = mtod(m, struct ip *);
2257 /* Clear IP_DF if no-df was requested */
2258 if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
2259 u_int16_t ip_off = h->ip_off;
2261 h->ip_off &= htons(~IP_DF);
2262 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
2265 /* Enforce a minimum ttl, may cause endless packet loops */
2266 if (min_ttl && h->ip_ttl < min_ttl) {
2267 u_int16_t ip_ttl = h->ip_ttl;
2269 h->ip_ttl = min_ttl;
2270 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
2274 if (flags & PFRULE_SET_TOS) {
2277 ov = *(u_int16_t *)h;
2279 nv = *(u_int16_t *)h;
2281 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
2284 /* random-id, but not for fragments */
2285 if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
2286 u_int16_t ip_id = h->ip_id;
2288 h->ip_id = ip_randomid();
2289 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
2296 pf_scrub_ip6(struct mbuf **m0, u_int8_t min_ttl)
2298 struct mbuf *m = *m0;
2299 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
2301 /* Enforce a minimum ttl, may cause endless packet loops */
2302 if (min_ttl && h->ip6_hlim < min_ttl)
2303 h->ip6_hlim = min_ttl;