2 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
3 * Copyright 2011 Alexander Bluhm <bluhm@openbsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
33 #include "opt_inet6.h"
36 #include <sys/param.h>
39 #include <sys/mutex.h>
40 #include <sys/refcount.h>
41 #include <sys/rwlock.h>
42 #include <sys/socket.h>
46 #include <net/pfvar.h>
47 #include <net/if_pflog.h>
49 #include <netinet/in.h>
50 #include <netinet/ip.h>
51 #include <netinet/ip_var.h>
52 #include <netinet6/ip6_var.h>
53 #include <netinet/tcp.h>
54 #include <netinet/tcp_fsm.h>
55 #include <netinet/tcp_seq.h>
58 #include <netinet/ip6.h>
62 TAILQ_ENTRY(pf_frent) fr_next;
64 uint16_t fe_hdrlen; /* ipv4 header lenght with ip options
65 ipv6, extension, fragment header */
66 uint16_t fe_extoff; /* last extension header offset or 0 */
67 uint16_t fe_len; /* fragment length */
68 uint16_t fe_off; /* fragment offset */
69 uint16_t fe_mff; /* more fragment flag */
72 struct pf_fragment_cmp {
73 struct pf_addr frc_src;
74 struct pf_addr frc_dst;
78 uint8_t frc_direction;
82 struct pf_fragment_cmp fr_key;
83 #define fr_src fr_key.frc_src
84 #define fr_dst fr_key.frc_dst
85 #define fr_id fr_key.frc_id
86 #define fr_af fr_key.frc_af
87 #define fr_proto fr_key.frc_proto
88 #define fr_direction fr_key.frc_direction
90 RB_ENTRY(pf_fragment) fr_entry;
91 TAILQ_ENTRY(pf_fragment) frag_next;
92 uint8_t fr_flags; /* status flags */
93 #define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */
94 #define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */
95 #define PFFRAG_DROP 0x0004 /* Drop all fragments */
96 #define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER))
97 uint16_t fr_max; /* fragment data max */
99 uint16_t fr_maxlen; /* maximum length of single fragment */
100 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
103 struct pf_fragment_tag {
104 uint16_t ft_hdrlen; /* header length of reassembled pkt */
105 uint16_t ft_extoff; /* last extension header offset or 0 */
106 uint16_t ft_maxlen; /* maximum fragment payload length */
107 uint32_t ft_id; /* fragment id */
110 static struct mtx pf_frag_mtx;
111 #define PF_FRAG_LOCK() mtx_lock(&pf_frag_mtx)
112 #define PF_FRAG_UNLOCK() mtx_unlock(&pf_frag_mtx)
113 #define PF_FRAG_ASSERT() mtx_assert(&pf_frag_mtx, MA_OWNED)
115 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */
117 static VNET_DEFINE(uma_zone_t, pf_frent_z);
118 #define V_pf_frent_z VNET(pf_frent_z)
119 static VNET_DEFINE(uma_zone_t, pf_frag_z);
120 #define V_pf_frag_z VNET(pf_frag_z)
122 TAILQ_HEAD(pf_fragqueue, pf_fragment);
123 TAILQ_HEAD(pf_cachequeue, pf_fragment);
124 static VNET_DEFINE(struct pf_fragqueue, pf_fragqueue);
125 #define V_pf_fragqueue VNET(pf_fragqueue)
126 static VNET_DEFINE(struct pf_cachequeue, pf_cachequeue);
127 #define V_pf_cachequeue VNET(pf_cachequeue)
128 RB_HEAD(pf_frag_tree, pf_fragment);
129 static VNET_DEFINE(struct pf_frag_tree, pf_frag_tree);
130 #define V_pf_frag_tree VNET(pf_frag_tree)
131 static VNET_DEFINE(struct pf_frag_tree, pf_cache_tree);
132 #define V_pf_cache_tree VNET(pf_cache_tree)
133 static int pf_frag_compare(struct pf_fragment *,
134 struct pf_fragment *);
135 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
136 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
138 static void pf_flush_fragments(void);
139 static void pf_free_fragment(struct pf_fragment *);
140 static void pf_remove_fragment(struct pf_fragment *);
141 static int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
142 struct tcphdr *, int, sa_family_t);
143 static struct pf_frent *pf_create_fragment(u_short *);
144 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key,
145 struct pf_frag_tree *tree);
146 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *,
147 struct pf_frent *, u_short *);
148 static int pf_isfull_fragment(struct pf_fragment *);
149 static struct mbuf *pf_join_fragment(struct pf_fragment *);
151 static void pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t);
152 static int pf_reassemble(struct mbuf **, struct ip *, int, u_short *);
153 static struct mbuf *pf_fragcache(struct mbuf **, struct ip*,
154 struct pf_fragment **, int, int, int *);
157 static int pf_reassemble6(struct mbuf **, struct ip6_hdr *,
158 struct ip6_frag *, uint16_t, uint16_t, int, u_short *);
159 static void pf_scrub_ip6(struct mbuf **, uint8_t);
162 #define DPFPRINTF(x) do { \
163 if (V_pf_status.debug >= PF_DEBUG_MISC) { \
164 printf("%s: ", __func__); \
171 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key)
174 key->frc_src.v4 = ip->ip_src;
175 key->frc_dst.v4 = ip->ip_dst;
176 key->frc_af = AF_INET;
177 key->frc_proto = ip->ip_p;
178 key->frc_id = ip->ip_id;
179 key->frc_direction = dir;
184 pf_normalize_init(void)
187 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
188 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
189 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
190 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
191 V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
192 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL,
195 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
196 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
197 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
198 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
200 mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
202 TAILQ_INIT(&V_pf_fragqueue);
203 TAILQ_INIT(&V_pf_cachequeue);
207 pf_normalize_cleanup(void)
210 uma_zdestroy(V_pf_state_scrub_z);
211 uma_zdestroy(V_pf_frent_z);
212 uma_zdestroy(V_pf_frag_z);
214 mtx_destroy(&pf_frag_mtx);
218 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
222 if ((diff = a->fr_id - b->fr_id) != 0)
224 if ((diff = a->fr_proto - b->fr_proto) != 0)
226 if ((diff = a->fr_af - b->fr_af) != 0)
228 if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0)
230 if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0)
236 pf_purge_expired_fragments(void)
238 struct pf_fragment *frag;
239 u_int32_t expire = time_uptime -
240 V_pf_default_rule.timeout[PFTM_FRAG];
243 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
244 KASSERT((BUFFER_FRAGMENTS(frag)),
245 ("BUFFER_FRAGMENTS(frag) == 0: %s", __FUNCTION__));
246 if (frag->fr_timeout > expire)
249 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
250 pf_free_fragment(frag);
253 while ((frag = TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue)) != NULL) {
254 KASSERT((!BUFFER_FRAGMENTS(frag)),
255 ("BUFFER_FRAGMENTS(frag) != 0: %s", __FUNCTION__));
256 if (frag->fr_timeout > expire)
259 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
260 pf_free_fragment(frag);
261 KASSERT((TAILQ_EMPTY(&V_pf_cachequeue) ||
262 TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue) != frag),
263 ("!(TAILQ_EMPTY() || TAILQ_LAST() == farg): %s",
270 * Try to flush old fragments to make space for new ones
273 pf_flush_fragments(void)
275 struct pf_fragment *frag, *cache;
280 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
281 DPFPRINTF(("trying to free %d frag entriess\n", goal));
282 while (goal < uma_zone_get_cur(V_pf_frent_z)) {
283 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
285 pf_free_fragment(frag);
286 cache = TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue);
288 pf_free_fragment(cache);
289 if (frag == NULL && cache == NULL)
294 /* Frees the fragments and all associated entries */
296 pf_free_fragment(struct pf_fragment *frag)
298 struct pf_frent *frent;
302 /* Free all fragments */
303 if (BUFFER_FRAGMENTS(frag)) {
304 for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
305 frent = TAILQ_FIRST(&frag->fr_queue)) {
306 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
308 m_freem(frent->fe_m);
309 uma_zfree(V_pf_frent_z, frent);
312 for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
313 frent = TAILQ_FIRST(&frag->fr_queue)) {
314 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
316 KASSERT((TAILQ_EMPTY(&frag->fr_queue) ||
317 TAILQ_FIRST(&frag->fr_queue)->fe_off >
319 ("! (TAILQ_EMPTY() || TAILQ_FIRST()->fe_off >"
320 " frent->fe_len): %s", __func__));
322 uma_zfree(V_pf_frent_z, frent);
326 pf_remove_fragment(frag);
329 static struct pf_fragment *
330 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree)
332 struct pf_fragment *frag;
336 frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key);
338 /* XXX Are we sure we want to update the timeout? */
339 frag->fr_timeout = time_uptime;
340 if (BUFFER_FRAGMENTS(frag)) {
341 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
342 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
344 TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next);
345 TAILQ_INSERT_HEAD(&V_pf_cachequeue, frag, frag_next);
352 /* Removes a fragment from the fragment queue and frees the fragment */
354 pf_remove_fragment(struct pf_fragment *frag)
359 if (BUFFER_FRAGMENTS(frag)) {
360 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
361 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
362 uma_zfree(V_pf_frag_z, frag);
364 RB_REMOVE(pf_frag_tree, &V_pf_cache_tree, frag);
365 TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next);
366 uma_zfree(V_pf_frag_z, frag);
370 static struct pf_frent *
371 pf_create_fragment(u_short *reason)
373 struct pf_frent *frent;
377 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
379 pf_flush_fragments();
380 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
382 REASON_SET(reason, PFRES_MEMORY);
391 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
394 struct pf_frent *after, *next, *prev;
395 struct pf_fragment *frag;
400 /* No empty fragments. */
401 if (frent->fe_len == 0) {
402 DPFPRINTF(("bad fragment: len 0"));
406 /* All fragments are 8 byte aligned. */
407 if (frent->fe_mff && (frent->fe_len & 0x7)) {
408 DPFPRINTF(("bad fragment: mff and len %d", frent->fe_len));
412 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */
413 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
414 DPFPRINTF(("bad fragment: max packet %d",
415 frent->fe_off + frent->fe_len));
419 DPFPRINTF((key->frc_af == AF_INET ?
420 "reass frag %d @ %d-%d" : "reass frag %#08x @ %d-%d",
421 key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len));
423 /* Fully buffer all of the fragments in this fragment queue. */
424 frag = pf_find_fragment(key, &V_pf_frag_tree);
426 /* Create a new reassembly queue for this packet. */
428 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
430 pf_flush_fragments();
431 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
433 REASON_SET(reason, PFRES_MEMORY);
438 *(struct pf_fragment_cmp *)frag = *key;
439 frag->fr_timeout = time_second;
440 frag->fr_maxlen = frent->fe_len;
441 TAILQ_INIT(&frag->fr_queue);
443 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag);
444 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
446 /* We do not have a previous fragment. */
447 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
452 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
454 /* Remember maximum fragment len for refragmentation. */
455 if (frent->fe_len > frag->fr_maxlen)
456 frag->fr_maxlen = frent->fe_len;
458 /* Maximum data we have seen already. */
459 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
460 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
462 /* Non terminal fragments must have more fragments flag. */
463 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
466 /* Check if we saw the last fragment already. */
467 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
468 if (frent->fe_off + frent->fe_len > total ||
469 (frent->fe_off + frent->fe_len == total && frent->fe_mff))
472 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
476 /* Find a fragment after the current one. */
478 TAILQ_FOREACH(after, &frag->fr_queue, fr_next) {
479 if (after->fe_off > frent->fe_off)
484 KASSERT(prev != NULL || after != NULL,
485 ("prev != NULL || after != NULL"));
487 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
490 precut = prev->fe_off + prev->fe_len - frent->fe_off;
491 if (precut >= frent->fe_len)
493 DPFPRINTF(("overlap -%d", precut));
494 m_adj(frent->fe_m, precut);
495 frent->fe_off += precut;
496 frent->fe_len -= precut;
499 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
503 aftercut = frent->fe_off + frent->fe_len - after->fe_off;
504 DPFPRINTF(("adjust overlap %d", aftercut));
505 if (aftercut < after->fe_len) {
506 m_adj(after->fe_m, aftercut);
507 after->fe_off += aftercut;
508 after->fe_len -= aftercut;
512 /* This fragment is completely overlapped, lose it. */
513 next = TAILQ_NEXT(after, fr_next);
514 m_freem(after->fe_m);
515 TAILQ_REMOVE(&frag->fr_queue, after, fr_next);
516 uma_zfree(V_pf_frent_z, after);
520 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
522 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
527 REASON_SET(reason, PFRES_FRAG);
529 uma_zfree(V_pf_frent_z, frent);
534 pf_isfull_fragment(struct pf_fragment *frag)
536 struct pf_frent *frent, *next;
539 /* Check if we are completely reassembled */
540 if (TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff)
543 /* Maximum data we have seen already */
544 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
545 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
547 /* Check if we have all the data */
549 for (frent = TAILQ_FIRST(&frag->fr_queue); frent; frent = next) {
550 next = TAILQ_NEXT(frent, fr_next);
552 off += frent->fe_len;
553 if (off < total && (next == NULL || next->fe_off != off)) {
554 DPFPRINTF(("missing fragment at %d, next %d, total %d",
555 off, next == NULL ? -1 : next->fe_off, total));
559 DPFPRINTF(("%d < %d?", off, total));
562 KASSERT(off == total, ("off == total"));
568 pf_join_fragment(struct pf_fragment *frag)
571 struct pf_frent *frent, *next;
573 frent = TAILQ_FIRST(&frag->fr_queue);
574 next = TAILQ_NEXT(frent, fr_next);
577 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
578 uma_zfree(V_pf_frent_z, frent);
579 for (frent = next; frent != NULL; frent = next) {
580 next = TAILQ_NEXT(frent, fr_next);
583 /* Strip off ip header. */
584 m_adj(m2, frent->fe_hdrlen);
585 /* Strip off any trailing bytes. */
586 m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
588 uma_zfree(V_pf_frent_z, frent);
592 /* Remove from fragment queue. */
593 pf_remove_fragment(frag);
600 pf_reassemble(struct mbuf **m0, struct ip *ip, int dir, u_short *reason)
602 struct mbuf *m = *m0;
603 struct pf_frent *frent;
604 struct pf_fragment *frag;
605 struct pf_fragment_cmp key;
606 uint16_t total, hdrlen;
608 /* Get an entry for the fragment queue */
609 if ((frent = pf_create_fragment(reason)) == NULL)
613 frent->fe_hdrlen = ip->ip_hl << 2;
614 frent->fe_extoff = 0;
615 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
616 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
617 frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
619 pf_ip2key(ip, dir, &key);
621 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL)
624 /* The mbuf is part of the fragment entry, no direct free or access */
627 if (!pf_isfull_fragment(frag))
628 return (PF_PASS); /* drop because *m0 is NULL, no error */
630 /* We have all the data */
631 frent = TAILQ_FIRST(&frag->fr_queue);
632 KASSERT(frent != NULL, ("frent != NULL"));
633 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
634 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
635 hdrlen = frent->fe_hdrlen;
637 m = *m0 = pf_join_fragment(frag);
640 if (m->m_flags & M_PKTHDR) {
642 for (m = *m0; m; m = m->m_next)
645 m->m_pkthdr.len = plen;
648 ip = mtod(m, struct ip *);
649 ip->ip_len = htons(hdrlen + total);
650 ip->ip_off &= ~(IP_MF|IP_OFFMASK);
652 if (hdrlen + total > IP_MAXPACKET) {
653 DPFPRINTF(("drop: too big: %d", total));
655 REASON_SET(reason, PFRES_SHORT);
656 /* PF_DROP requires a valid mbuf *m0 in pf_test() */
660 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
667 pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr,
668 uint16_t hdrlen, uint16_t extoff, int dir, u_short *reason)
670 struct mbuf *m = *m0;
671 struct pf_frent *frent;
672 struct pf_fragment *frag;
673 struct pf_fragment_cmp key;
675 struct pf_fragment_tag *ftag;
678 uint16_t total, maxlen;
683 /* Get an entry for the fragment queue. */
684 if ((frent = pf_create_fragment(reason)) == NULL) {
690 frent->fe_hdrlen = hdrlen;
691 frent->fe_extoff = extoff;
692 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
693 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
694 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
696 key.frc_src.v6 = ip6->ip6_src;
697 key.frc_dst.v6 = ip6->ip6_dst;
698 key.frc_af = AF_INET6;
699 /* Only the first fragment's protocol is relevant. */
701 key.frc_id = fraghdr->ip6f_ident;
702 key.frc_direction = dir;
704 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) {
709 /* The mbuf is part of the fragment entry, no direct free or access. */
712 if (!pf_isfull_fragment(frag)) {
714 return (PF_PASS); /* Drop because *m0 is NULL, no error. */
717 /* We have all the data. */
718 extoff = frent->fe_extoff;
719 maxlen = frag->fr_maxlen;
720 frag_id = frag->fr_id;
721 frent = TAILQ_FIRST(&frag->fr_queue);
722 KASSERT(frent != NULL, ("frent != NULL"));
723 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
724 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
725 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
727 m = *m0 = pf_join_fragment(frag);
732 /* Take protocol from first fragment header. */
733 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off);
734 KASSERT(m, ("%s: short mbuf chain", __func__));
735 proto = *(mtod(m, caddr_t) + off);
738 /* Delete frag6 header */
739 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0)
742 if (m->m_flags & M_PKTHDR) {
744 for (m = *m0; m; m = m->m_next)
747 m->m_pkthdr.len = plen;
750 if ((mtag = m_tag_get(PF_REASSEMBLED, sizeof(struct pf_fragment_tag),
753 ftag = (struct pf_fragment_tag *)(mtag + 1);
754 ftag->ft_hdrlen = hdrlen;
755 ftag->ft_extoff = extoff;
756 ftag->ft_maxlen = maxlen;
757 ftag->ft_id = frag_id;
758 m_tag_prepend(m, mtag);
760 ip6 = mtod(m, struct ip6_hdr *);
761 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
763 /* Write protocol into next field of last extension header. */
764 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
766 KASSERT(m, ("%s: short mbuf chain", __func__));
767 *(mtod(m, char *) + off) = proto;
770 ip6->ip6_nxt = proto;
772 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
773 DPFPRINTF(("drop: too big: %d", total));
775 REASON_SET(reason, PFRES_SHORT);
776 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */
780 DPFPRINTF(("complete: %p(%d)", m, ntohs(ip6->ip6_plen)));
784 REASON_SET(reason, PFRES_MEMORY);
785 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */
792 pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
793 int drop, int *nomem)
795 struct mbuf *m = *m0;
796 struct pf_frent *frp, *fra, *cur = NULL;
797 int ip_len = ntohs(h->ip_len) - (h->ip_hl << 2);
798 u_int16_t off = ntohs(h->ip_off) << 3;
799 u_int16_t max = ip_len + off;
803 KASSERT((*frag == NULL || !BUFFER_FRAGMENTS(*frag)),
804 ("!(*frag == NULL || !BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__));
806 /* Create a new range queue for this packet */
808 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
810 pf_flush_fragments();
811 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
816 /* Get an entry for the queue */
817 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
819 uma_zfree(V_pf_frag_z, *frag);
824 (*frag)->fr_flags = PFFRAG_NOBUFFER;
826 (*frag)->fr_src.v4 = h->ip_src;
827 (*frag)->fr_dst.v4 = h->ip_dst;
828 (*frag)->fr_id = h->ip_id;
829 (*frag)->fr_timeout = time_uptime;
832 cur->fe_len = max; /* TODO: fe_len = max - off ? */
833 TAILQ_INIT(&(*frag)->fr_queue);
834 TAILQ_INSERT_HEAD(&(*frag)->fr_queue, cur, fr_next);
836 RB_INSERT(pf_frag_tree, &V_pf_cache_tree, *frag);
837 TAILQ_INSERT_HEAD(&V_pf_cachequeue, *frag, frag_next);
839 DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
845 * Find a fragment after the current one:
846 * - off contains the real shifted offset.
849 TAILQ_FOREACH(fra, &(*frag)->fr_queue, fr_next) {
850 if (fra->fe_off > off)
855 KASSERT((frp != NULL || fra != NULL),
856 ("!(frp != NULL || fra != NULL): %s", __FUNCTION__));
861 precut = frp->fe_len - off;
862 if (precut >= ip_len) {
863 /* Fragment is entirely a duplicate */
864 DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
865 h->ip_id, frp->fe_off, frp->fe_len, off, max));
869 /* They are adjacent. Fixup cache entry */
870 DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
871 h->ip_id, frp->fe_off, frp->fe_len, off, max));
873 } else if (precut > 0) {
874 /* The first part of this payload overlaps with a
875 * fragment that has already been passed.
876 * Need to trim off the first part of the payload.
877 * But to do so easily, we need to create another
878 * mbuf to throw the original header into.
881 DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
882 h->ip_id, precut, frp->fe_off, frp->fe_len, off,
887 /* Update the previous frag to encompass this one */
891 /* XXX Optimization opportunity
892 * This is a very heavy way to trim the payload.
893 * we could do it much faster by diddling mbuf
894 * internals but that would be even less legible
895 * than this mbuf magic. For my next trick,
896 * I'll pull a rabbit out of my laptop.
898 *m0 = m_dup(m, M_NOWAIT);
901 /* From KAME Project : We have missed this! */
902 m_adj(*m0, (h->ip_hl << 2) -
903 (*m0)->m_pkthdr.len);
905 KASSERT(((*m0)->m_next == NULL),
906 ("(*m0)->m_next != NULL: %s",
908 m_adj(m, precut + (h->ip_hl << 2));
911 if (m->m_flags & M_PKTHDR) {
914 for (t = m; t; t = t->m_next)
916 m->m_pkthdr.len = plen;
920 h = mtod(m, struct ip *);
922 KASSERT(((int)m->m_len ==
923 ntohs(h->ip_len) - precut),
924 ("m->m_len != ntohs(h->ip_len) - precut: %s",
926 h->ip_off = htons(ntohs(h->ip_off) +
928 h->ip_len = htons(ntohs(h->ip_len) - precut);
933 /* There is a gap between fragments */
935 DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
936 h->ip_id, -precut, frp->fe_off, frp->fe_len, off,
939 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
945 TAILQ_INSERT_AFTER(&(*frag)->fr_queue, frp, cur, fr_next);
953 aftercut = max - fra->fe_off;
955 /* Adjacent fragments */
956 DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
957 h->ip_id, off, max, fra->fe_off, fra->fe_len));
960 } else if (aftercut > 0) {
961 /* Need to chop off the tail of this fragment */
962 DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
963 h->ip_id, aftercut, off, max, fra->fe_off,
972 if (m->m_flags & M_PKTHDR) {
975 for (t = m; t; t = t->m_next)
977 m->m_pkthdr.len = plen;
979 h = mtod(m, struct ip *);
980 KASSERT(((int)m->m_len == ntohs(h->ip_len) - aftercut),
981 ("m->m_len != ntohs(h->ip_len) - aftercut: %s",
983 h->ip_len = htons(ntohs(h->ip_len) - aftercut);
987 } else if (frp == NULL) {
988 /* There is a gap between fragments */
989 DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
990 h->ip_id, -aftercut, off, max, fra->fe_off,
993 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
999 TAILQ_INSERT_HEAD(&(*frag)->fr_queue, cur, fr_next);
1003 /* Need to glue together two separate fragment descriptors */
1005 if (cur && fra->fe_off <= cur->fe_len) {
1006 /* Need to merge in a previous 'cur' */
1007 DPFPRINTF(("fragcache[%d]: adjacent(merge "
1008 "%d-%d) %d-%d (%d-%d)\n",
1009 h->ip_id, cur->fe_off, cur->fe_len, off,
1010 max, fra->fe_off, fra->fe_len));
1011 fra->fe_off = cur->fe_off;
1012 TAILQ_REMOVE(&(*frag)->fr_queue, cur, fr_next);
1013 uma_zfree(V_pf_frent_z, cur);
1016 } else if (frp && fra->fe_off <= frp->fe_len) {
1017 /* Need to merge in a modified 'frp' */
1018 KASSERT((cur == NULL), ("cur != NULL: %s",
1020 DPFPRINTF(("fragcache[%d]: adjacent(merge "
1021 "%d-%d) %d-%d (%d-%d)\n",
1022 h->ip_id, frp->fe_off, frp->fe_len, off,
1023 max, fra->fe_off, fra->fe_len));
1024 fra->fe_off = frp->fe_off;
1025 TAILQ_REMOVE(&(*frag)->fr_queue, frp, fr_next);
1026 uma_zfree(V_pf_frent_z, frp);
1035 * We must keep tracking the overall fragment even when
1036 * we're going to drop it anyway so that we know when to
1037 * free the overall descriptor. Thus we drop the frag late.
1044 /* Update maximum data size */
1045 if ((*frag)->fr_max < max)
1046 (*frag)->fr_max = max;
1048 /* This is the last segment */
1050 (*frag)->fr_flags |= PFFRAG_SEENLAST;
1052 /* Check if we are completely reassembled */
1053 if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
1054 TAILQ_FIRST(&(*frag)->fr_queue)->fe_off == 0 &&
1055 TAILQ_FIRST(&(*frag)->fr_queue)->fe_len == (*frag)->fr_max) {
1056 /* Remove from fragment queue */
1057 DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
1059 pf_free_fragment(*frag);
1068 /* Still need to pay attention to !IP_MF */
1069 if (!mff && *frag != NULL)
1070 (*frag)->fr_flags |= PFFRAG_SEENLAST;
1077 /* Still need to pay attention to !IP_MF */
1078 if (!mff && *frag != NULL)
1079 (*frag)->fr_flags |= PFFRAG_SEENLAST;
1082 /* This fragment has been deemed bad. Don't reass */
1083 if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
1084 DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
1086 (*frag)->fr_flags |= PFFRAG_DROP;
1096 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag)
1098 struct mbuf *m = *m0, *t;
1099 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1);
1102 uint16_t hdrlen, extoff, maxlen;
1106 hdrlen = ftag->ft_hdrlen;
1107 extoff = ftag->ft_extoff;
1108 maxlen = ftag->ft_maxlen;
1109 frag_id = ftag->ft_id;
1110 m_tag_delete(m, mtag);
1117 /* Use protocol from next field of last extension header */
1118 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
1120 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain"));
1121 proto = *(mtod(m, caddr_t) + off);
1122 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT;
1125 struct ip6_hdr *hdr;
1127 hdr = mtod(m, struct ip6_hdr *);
1128 proto = hdr->ip6_nxt;
1129 hdr->ip6_nxt = IPPROTO_FRAGMENT;
1133 * Maxlen may be less than 8 if there was only a single
1134 * fragment. As it was fragmented before, add a fragment
1135 * header also for a single fragment. If total or maxlen
1136 * is less than 8, ip6_fragment() will return EMSGSIZE and
1137 * we drop the packet.
1139 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id);
1140 m = (*m0)->m_nextpkt;
1141 (*m0)->m_nextpkt = NULL;
1143 /* The first mbuf contains the unfragmented packet. */
1148 /* Drop expects an mbuf to free. */
1149 DPFPRINTF(("refragment error %d", error));
1152 for (t = m; m; m = t) {
1154 m->m_nextpkt = NULL;
1155 m->m_flags |= M_SKIP_FIREWALL;
1156 memset(&pd, 0, sizeof(pd));
1157 pd.pf_mtag = pf_find_mtag(m);
1170 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
1171 struct pf_pdesc *pd)
1173 struct mbuf *m = *m0;
1175 struct pf_fragment *frag = NULL;
1176 struct pf_fragment_cmp key;
1177 struct ip *h = mtod(m, struct ip *);
1178 int mff = (ntohs(h->ip_off) & IP_MF);
1179 int hlen = h->ip_hl << 2;
1180 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1189 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1192 if (pfi_kif_match(r->kif, kif) == r->ifnot)
1193 r = r->skip[PF_SKIP_IFP].ptr;
1194 else if (r->direction && r->direction != dir)
1195 r = r->skip[PF_SKIP_DIR].ptr;
1196 else if (r->af && r->af != AF_INET)
1197 r = r->skip[PF_SKIP_AF].ptr;
1198 else if (r->proto && r->proto != h->ip_p)
1199 r = r->skip[PF_SKIP_PROTO].ptr;
1200 else if (PF_MISMATCHAW(&r->src.addr,
1201 (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
1202 r->src.neg, kif, M_GETFIB(m)))
1203 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1204 else if (PF_MISMATCHAW(&r->dst.addr,
1205 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
1206 r->dst.neg, NULL, M_GETFIB(m)))
1207 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1208 else if (r->match_tag && !pf_match_tag(m, r, &tag,
1209 pd->pf_mtag ? pd->pf_mtag->tag : 0))
1210 r = TAILQ_NEXT(r, entries);
1215 if (r == NULL || r->action == PF_NOSCRUB)
1218 r->packets[dir == PF_OUT]++;
1219 r->bytes[dir == PF_OUT] += pd->tot_len;
1222 /* Check for illegal packets */
1223 if (hlen < (int)sizeof(struct ip))
1226 if (hlen > ntohs(h->ip_len))
1229 /* Clear IP_DF if the rule uses the no-df option */
1230 if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
1231 u_int16_t ip_off = h->ip_off;
1233 h->ip_off &= htons(~IP_DF);
1234 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1237 /* We will need other tests here */
1238 if (!fragoff && !mff)
1241 /* We're dealing with a fragment now. Don't allow fragments
1242 * with IP_DF to enter the cache. If the flag was cleared by
1243 * no-df above, fine. Otherwise drop it.
1245 if (h->ip_off & htons(IP_DF)) {
1246 DPFPRINTF(("IP_DF\n"));
1250 ip_len = ntohs(h->ip_len) - hlen;
1251 ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1253 /* All fragments are 8 byte aligned */
1254 if (mff && (ip_len & 0x7)) {
1255 DPFPRINTF(("mff and %d\n", ip_len));
1259 /* Respect maximum length */
1260 if (fragoff + ip_len > IP_MAXPACKET) {
1261 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
1264 max = fragoff + ip_len;
1266 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
1268 /* Fully buffer all of the fragments */
1271 pf_ip2key(h, dir, &key);
1272 frag = pf_find_fragment(&key, &V_pf_frag_tree);
1274 /* Check if we saw the last fragment already */
1275 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
1279 /* Might return a completely reassembled mbuf, or NULL */
1280 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
1281 verdict = pf_reassemble(m0, h, dir, reason);
1284 if (verdict != PF_PASS)
1291 /* use mtag from concatenated mbuf chain */
1292 pd->pf_mtag = pf_find_mtag(m);
1294 if (pd->pf_mtag == NULL) {
1295 printf("%s: pf_find_mtag returned NULL(1)\n", __func__);
1296 if ((pd->pf_mtag = pf_get_mtag(m)) == NULL) {
1303 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
1306 h = mtod(m, struct ip *);
1308 /* non-buffering fragment cache (drops or masks overlaps) */
1311 if (dir == PF_OUT && pd->pf_mtag->flags & PF_TAG_FRAGCACHE) {
1313 * Already passed the fragment cache in the
1314 * input direction. If we continued, it would
1315 * appear to be a dup and would be dropped.
1321 pf_ip2key(h, dir, &key);
1322 frag = pf_find_fragment(&key, &V_pf_cache_tree);
1324 /* Check if we saw the last fragment already */
1325 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
1326 max > frag->fr_max) {
1327 if (r->rule_flag & PFRULE_FRAGDROP)
1328 frag->fr_flags |= PFFRAG_DROP;
1332 *m0 = m = pf_fragcache(m0, h, &frag, mff,
1333 (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
1341 /* use mtag from copied and trimmed mbuf chain */
1342 pd->pf_mtag = pf_find_mtag(m);
1344 if (pd->pf_mtag == NULL) {
1345 printf("%s: pf_find_mtag returned NULL(2)\n", __func__);
1346 if ((pd->pf_mtag = pf_get_mtag(m)) == NULL) {
1354 pd->pf_mtag->flags |= PF_TAG_FRAGCACHE;
1356 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
1362 /* At this point, only IP_DF is allowed in ip_off */
1363 if (h->ip_off & ~htons(IP_DF)) {
1364 u_int16_t ip_off = h->ip_off;
1366 h->ip_off &= htons(IP_DF);
1367 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1370 /* not missing a return here */
1373 pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos);
1375 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0)
1376 pd->flags |= PFDESC_IP_REAS;
1380 REASON_SET(reason, PFRES_MEMORY);
1381 if (r != NULL && r->log)
1382 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1387 REASON_SET(reason, PFRES_NORM);
1388 if (r != NULL && r->log)
1389 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1394 DPFPRINTF(("dropping bad fragment\n"));
1396 /* Free associated fragments */
1398 pf_free_fragment(frag);
1402 REASON_SET(reason, PFRES_FRAG);
1403 if (r != NULL && r->log)
1404 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1413 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
1414 u_short *reason, struct pf_pdesc *pd)
1416 struct mbuf *m = *m0;
1418 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1423 struct ip6_opt_jumbo jumbo;
1424 struct ip6_frag frag;
1425 u_int32_t jumbolen = 0, plen;
1433 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1436 if (pfi_kif_match(r->kif, kif) == r->ifnot)
1437 r = r->skip[PF_SKIP_IFP].ptr;
1438 else if (r->direction && r->direction != dir)
1439 r = r->skip[PF_SKIP_DIR].ptr;
1440 else if (r->af && r->af != AF_INET6)
1441 r = r->skip[PF_SKIP_AF].ptr;
1442 #if 0 /* header chain! */
1443 else if (r->proto && r->proto != h->ip6_nxt)
1444 r = r->skip[PF_SKIP_PROTO].ptr;
1446 else if (PF_MISMATCHAW(&r->src.addr,
1447 (struct pf_addr *)&h->ip6_src, AF_INET6,
1448 r->src.neg, kif, M_GETFIB(m)))
1449 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1450 else if (PF_MISMATCHAW(&r->dst.addr,
1451 (struct pf_addr *)&h->ip6_dst, AF_INET6,
1452 r->dst.neg, NULL, M_GETFIB(m)))
1453 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1458 if (r == NULL || r->action == PF_NOSCRUB)
1461 r->packets[dir == PF_OUT]++;
1462 r->bytes[dir == PF_OUT] += pd->tot_len;
1465 /* Check for illegal packets */
1466 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1470 off = sizeof(struct ip6_hdr);
1475 case IPPROTO_FRAGMENT:
1479 case IPPROTO_ROUTING:
1480 case IPPROTO_DSTOPTS:
1481 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1485 if (proto == IPPROTO_AH)
1486 off += (ext.ip6e_len + 2) * 4;
1488 off += (ext.ip6e_len + 1) * 8;
1489 proto = ext.ip6e_nxt;
1491 case IPPROTO_HOPOPTS:
1492 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1496 optend = off + (ext.ip6e_len + 1) * 8;
1497 ooff = off + sizeof(ext);
1499 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1500 sizeof(opt.ip6o_type), NULL, NULL,
1503 if (opt.ip6o_type == IP6OPT_PAD1) {
1507 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1508 NULL, NULL, AF_INET6))
1510 if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1512 switch (opt.ip6o_type) {
1514 if (h->ip6_plen != 0)
1516 if (!pf_pull_hdr(m, ooff, &jumbo,
1517 sizeof(jumbo), NULL, NULL,
1520 memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
1522 jumbolen = ntohl(jumbolen);
1523 if (jumbolen <= IPV6_MAXPACKET)
1525 if (sizeof(struct ip6_hdr) + jumbolen !=
1532 ooff += sizeof(opt) + opt.ip6o_len;
1533 } while (ooff < optend);
1536 proto = ext.ip6e_nxt;
1542 } while (!terminal);
1544 /* jumbo payload option must be present, or plen > 0 */
1545 if (ntohs(h->ip6_plen) == 0)
1548 plen = ntohs(h->ip6_plen);
1551 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1554 pf_scrub_ip6(&m, r->min_ttl);
1559 /* Jumbo payload packets cannot be fragmented. */
1560 plen = ntohs(h->ip6_plen);
1561 if (plen == 0 || jumbolen)
1563 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1566 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1569 /* Offset now points to data portion. */
1570 off += sizeof(frag);
1572 /* Returns PF_DROP or *m0 is NULL or completely reassembled mbuf. */
1573 if (pf_reassemble6(m0, h, &frag, off, extoff, dir, reason) != PF_PASS)
1579 pd->flags |= PFDESC_IP_REAS;
1583 REASON_SET(reason, PFRES_SHORT);
1584 if (r != NULL && r->log)
1585 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1590 REASON_SET(reason, PFRES_NORM);
1591 if (r != NULL && r->log)
1592 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1599 pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
1600 int off, void *h, struct pf_pdesc *pd)
1602 struct pf_rule *r, *rm = NULL;
1603 struct tcphdr *th = pd->hdr.tcp;
1607 sa_family_t af = pd->af;
1611 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1614 if (pfi_kif_match(r->kif, kif) == r->ifnot)
1615 r = r->skip[PF_SKIP_IFP].ptr;
1616 else if (r->direction && r->direction != dir)
1617 r = r->skip[PF_SKIP_DIR].ptr;
1618 else if (r->af && r->af != af)
1619 r = r->skip[PF_SKIP_AF].ptr;
1620 else if (r->proto && r->proto != pd->proto)
1621 r = r->skip[PF_SKIP_PROTO].ptr;
1622 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1623 r->src.neg, kif, M_GETFIB(m)))
1624 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1625 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1626 r->src.port[0], r->src.port[1], th->th_sport))
1627 r = r->skip[PF_SKIP_SRC_PORT].ptr;
1628 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1629 r->dst.neg, NULL, M_GETFIB(m)))
1630 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1631 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1632 r->dst.port[0], r->dst.port[1], th->th_dport))
1633 r = r->skip[PF_SKIP_DST_PORT].ptr;
1634 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1635 pf_osfp_fingerprint(pd, m, off, th),
1637 r = TAILQ_NEXT(r, entries);
1644 if (rm == NULL || rm->action == PF_NOSCRUB)
1647 r->packets[dir == PF_OUT]++;
1648 r->bytes[dir == PF_OUT] += pd->tot_len;
1651 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1652 pd->flags |= PFDESC_TCP_NORM;
1654 flags = th->th_flags;
1655 if (flags & TH_SYN) {
1656 /* Illegal packet */
1663 /* Illegal packet */
1664 if (!(flags & (TH_ACK|TH_RST)))
1668 if (!(flags & TH_ACK)) {
1669 /* These flags are only valid if ACK is set */
1670 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1674 /* Check for illegal header length */
1675 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1678 /* If flags changed, or reserved data set, then adjust */
1679 if (flags != th->th_flags || th->th_x2 != 0) {
1682 ov = *(u_int16_t *)(&th->th_ack + 1);
1683 th->th_flags = flags;
1685 nv = *(u_int16_t *)(&th->th_ack + 1);
1687 th->th_sum = pf_cksum_fixup(th->th_sum, ov, nv, 0);
1691 /* Remove urgent pointer, if TH_URG is not set */
1692 if (!(flags & TH_URG) && th->th_urp) {
1693 th->th_sum = pf_cksum_fixup(th->th_sum, th->th_urp, 0, 0);
1698 /* Process options */
1699 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af))
1702 /* copy back packet headers if we sanitized */
1704 m_copyback(m, off, sizeof(*th), (caddr_t)th);
1709 REASON_SET(&reason, PFRES_NORM);
1710 if (rm != NULL && r->log)
1711 PFLOG_PACKET(kif, m, AF_INET, dir, reason, r, NULL, NULL, pd,
1717 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1718 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1720 u_int32_t tsval, tsecr;
1724 KASSERT((src->scrub == NULL),
1725 ("pf_normalize_tcp_init: src->scrub != NULL"));
1727 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1728 if (src->scrub == NULL)
1734 struct ip *h = mtod(m, struct ip *);
1735 src->scrub->pfss_ttl = h->ip_ttl;
1741 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1742 src->scrub->pfss_ttl = h->ip6_hlim;
1750 * All normalizations below are only begun if we see the start of
1751 * the connections. They must all set an enabled bit in pfss_flags
1753 if ((th->th_flags & TH_SYN) == 0)
1757 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1758 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1759 /* Diddle with TCP options */
1761 opt = hdr + sizeof(struct tcphdr);
1762 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1763 while (hlen >= TCPOLEN_TIMESTAMP) {
1765 case TCPOPT_EOL: /* FALLTHROUGH */
1770 case TCPOPT_TIMESTAMP:
1771 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1772 src->scrub->pfss_flags |=
1774 src->scrub->pfss_ts_mod =
1775 htonl(arc4random());
1777 /* note PFSS_PAWS not set yet */
1778 memcpy(&tsval, &opt[2],
1780 memcpy(&tsecr, &opt[6],
1782 src->scrub->pfss_tsval0 = ntohl(tsval);
1783 src->scrub->pfss_tsval = ntohl(tsval);
1784 src->scrub->pfss_tsecr = ntohl(tsecr);
1785 getmicrouptime(&src->scrub->pfss_last);
1789 hlen -= MAX(opt[1], 2);
1790 opt += MAX(opt[1], 2);
1800 pf_normalize_tcp_cleanup(struct pf_state *state)
1802 if (state->src.scrub)
1803 uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1804 if (state->dst.scrub)
1805 uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1807 /* Someday... flush the TCP segment reassembly descriptors. */
1811 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1812 u_short *reason, struct tcphdr *th, struct pf_state *state,
1813 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1815 struct timeval uptime;
1816 u_int32_t tsval, tsecr;
1817 u_int tsval_from_last;
1823 KASSERT((src->scrub || dst->scrub),
1824 ("%s: src->scrub && dst->scrub!", __func__));
1827 * Enforce the minimum TTL seen for this connection. Negate a common
1828 * technique to evade an intrusion detection system and confuse
1829 * firewall state code.
1835 struct ip *h = mtod(m, struct ip *);
1836 if (h->ip_ttl > src->scrub->pfss_ttl)
1837 src->scrub->pfss_ttl = h->ip_ttl;
1838 h->ip_ttl = src->scrub->pfss_ttl;
1846 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1847 if (h->ip6_hlim > src->scrub->pfss_ttl)
1848 src->scrub->pfss_ttl = h->ip6_hlim;
1849 h->ip6_hlim = src->scrub->pfss_ttl;
1856 if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1857 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1858 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1859 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1860 /* Diddle with TCP options */
1862 opt = hdr + sizeof(struct tcphdr);
1863 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1864 while (hlen >= TCPOLEN_TIMESTAMP) {
1866 case TCPOPT_EOL: /* FALLTHROUGH */
1871 case TCPOPT_TIMESTAMP:
1872 /* Modulate the timestamps. Can be used for
1873 * NAT detection, OS uptime determination or
1878 /* Huh? Multiple timestamps!? */
1879 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1880 DPFPRINTF(("multiple TS??"));
1881 pf_print_state(state);
1884 REASON_SET(reason, PFRES_TS);
1887 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1888 memcpy(&tsval, &opt[2],
1890 if (tsval && src->scrub &&
1891 (src->scrub->pfss_flags &
1893 tsval = ntohl(tsval);
1894 pf_change_a(&opt[2],
1897 src->scrub->pfss_ts_mod),
1902 /* Modulate TS reply iff valid (!0) */
1903 memcpy(&tsecr, &opt[6],
1905 if (tsecr && dst->scrub &&
1906 (dst->scrub->pfss_flags &
1908 tsecr = ntohl(tsecr)
1909 - dst->scrub->pfss_ts_mod;
1910 pf_change_a(&opt[6],
1911 &th->th_sum, htonl(tsecr),
1919 hlen -= MAX(opt[1], 2);
1920 opt += MAX(opt[1], 2);
1925 /* Copyback the options, caller copys back header */
1927 m_copyback(m, off + sizeof(struct tcphdr),
1928 (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1929 sizeof(struct tcphdr));
1935 * Must invalidate PAWS checks on connections idle for too long.
1936 * The fastest allowed timestamp clock is 1ms. That turns out to
1937 * be about 24 days before it wraps. XXX Right now our lowerbound
1938 * TS echo check only works for the first 12 days of a connection
1939 * when the TS has exhausted half its 32bit space
1941 #define TS_MAX_IDLE (24*24*60*60)
1942 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
1944 getmicrouptime(&uptime);
1945 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1946 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1947 time_uptime - state->creation > TS_MAX_CONN)) {
1948 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1949 DPFPRINTF(("src idled out of PAWS\n"));
1950 pf_print_state(state);
1953 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1956 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1957 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1958 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1959 DPFPRINTF(("dst idled out of PAWS\n"));
1960 pf_print_state(state);
1963 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1967 if (got_ts && src->scrub && dst->scrub &&
1968 (src->scrub->pfss_flags & PFSS_PAWS) &&
1969 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1970 /* Validate that the timestamps are "in-window".
1971 * RFC1323 describes TCP Timestamp options that allow
1972 * measurement of RTT (round trip time) and PAWS
1973 * (protection against wrapped sequence numbers). PAWS
1974 * gives us a set of rules for rejecting packets on
1975 * long fat pipes (packets that were somehow delayed
1976 * in transit longer than the time it took to send the
1977 * full TCP sequence space of 4Gb). We can use these
1978 * rules and infer a few others that will let us treat
1979 * the 32bit timestamp and the 32bit echoed timestamp
1980 * as sequence numbers to prevent a blind attacker from
1981 * inserting packets into a connection.
1984 * - The timestamp on this packet must be greater than
1985 * or equal to the last value echoed by the other
1986 * endpoint. The RFC says those will be discarded
1987 * since it is a dup that has already been acked.
1988 * This gives us a lowerbound on the timestamp.
1989 * timestamp >= other last echoed timestamp
1990 * - The timestamp will be less than or equal to
1991 * the last timestamp plus the time between the
1992 * last packet and now. The RFC defines the max
1993 * clock rate as 1ms. We will allow clocks to be
1994 * up to 10% fast and will allow a total difference
1995 * or 30 seconds due to a route change. And this
1996 * gives us an upperbound on the timestamp.
1997 * timestamp <= last timestamp + max ticks
1998 * We have to be careful here. Windows will send an
1999 * initial timestamp of zero and then initialize it
2000 * to a random value after the 3whs; presumably to
2001 * avoid a DoS by having to call an expensive RNG
2002 * during a SYN flood. Proof MS has at least one
2003 * good security geek.
2005 * - The TCP timestamp option must also echo the other
2006 * endpoints timestamp. The timestamp echoed is the
2007 * one carried on the earliest unacknowledged segment
2008 * on the left edge of the sequence window. The RFC
2009 * states that the host will reject any echoed
2010 * timestamps that were larger than any ever sent.
2011 * This gives us an upperbound on the TS echo.
2012 * tescr <= largest_tsval
2013 * - The lowerbound on the TS echo is a little more
2014 * tricky to determine. The other endpoint's echoed
2015 * values will not decrease. But there may be
2016 * network conditions that re-order packets and
2017 * cause our view of them to decrease. For now the
2018 * only lowerbound we can safely determine is that
2019 * the TS echo will never be less than the original
2020 * TS. XXX There is probably a better lowerbound.
2021 * Remove TS_MAX_CONN with better lowerbound check.
2022 * tescr >= other original TS
2024 * It is also important to note that the fastest
2025 * timestamp clock of 1ms will wrap its 32bit space in
2026 * 24 days. So we just disable TS checking after 24
2027 * days of idle time. We actually must use a 12d
2028 * connection limit until we can come up with a better
2029 * lowerbound to the TS echo check.
2031 struct timeval delta_ts;
2036 * PFTM_TS_DIFF is how many seconds of leeway to allow
2037 * a host's timestamp. This can happen if the previous
2038 * packet got delayed in transit for much longer than
2041 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
2042 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
2044 /* Calculate max ticks since the last timestamp */
2045 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
2046 #define TS_MICROSECS 1000000 /* microseconds per second */
2048 timevalsub(&delta_ts, &src->scrub->pfss_last);
2049 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
2050 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
2052 if ((src->state >= TCPS_ESTABLISHED &&
2053 dst->state >= TCPS_ESTABLISHED) &&
2054 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
2055 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
2056 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
2057 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
2058 /* Bad RFC1323 implementation or an insertion attack.
2060 * - Solaris 2.6 and 2.7 are known to send another ACK
2061 * after the FIN,FIN|ACK,ACK closing that carries
2065 DPFPRINTF(("Timestamp failed %c%c%c%c\n",
2066 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
2067 SEQ_GT(tsval, src->scrub->pfss_tsval +
2068 tsval_from_last) ? '1' : ' ',
2069 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
2070 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
2071 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u "
2072 "idle: %jus %lums\n",
2073 tsval, tsecr, tsval_from_last,
2074 (uintmax_t)delta_ts.tv_sec,
2075 delta_ts.tv_usec / 1000));
2076 DPFPRINTF((" src->tsval: %u tsecr: %u\n",
2077 src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
2078 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u"
2079 "\n", dst->scrub->pfss_tsval,
2080 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
2081 if (V_pf_status.debug >= PF_DEBUG_MISC) {
2082 pf_print_state(state);
2083 pf_print_flags(th->th_flags);
2086 REASON_SET(reason, PFRES_TS);
2090 /* XXX I'd really like to require tsecr but it's optional */
2092 } else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
2093 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
2094 || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
2095 src->scrub && dst->scrub &&
2096 (src->scrub->pfss_flags & PFSS_PAWS) &&
2097 (dst->scrub->pfss_flags & PFSS_PAWS)) {
2098 /* Didn't send a timestamp. Timestamps aren't really useful
2100 * - connection opening or closing (often not even sent).
2101 * but we must not let an attacker to put a FIN on a
2102 * data packet to sneak it through our ESTABLISHED check.
2103 * - on a TCP reset. RFC suggests not even looking at TS.
2104 * - on an empty ACK. The TS will not be echoed so it will
2105 * probably not help keep the RTT calculation in sync and
2106 * there isn't as much danger when the sequence numbers
2107 * got wrapped. So some stacks don't include TS on empty
2110 * To minimize the disruption to mostly RFC1323 conformant
2111 * stacks, we will only require timestamps on data packets.
2113 * And what do ya know, we cannot require timestamps on data
2114 * packets. There appear to be devices that do legitimate
2115 * TCP connection hijacking. There are HTTP devices that allow
2116 * a 3whs (with timestamps) and then buffer the HTTP request.
2117 * If the intermediate device has the HTTP response cache, it
2118 * will spoof the response but not bother timestamping its
2119 * packets. So we can look for the presence of a timestamp in
2120 * the first data packet and if there, require it in all future
2124 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
2126 * Hey! Someone tried to sneak a packet in. Or the
2127 * stack changed its RFC1323 behavior?!?!
2129 if (V_pf_status.debug >= PF_DEBUG_MISC) {
2130 DPFPRINTF(("Did not receive expected RFC1323 "
2132 pf_print_state(state);
2133 pf_print_flags(th->th_flags);
2136 REASON_SET(reason, PFRES_TS);
2143 * We will note if a host sends his data packets with or without
2144 * timestamps. And require all data packets to contain a timestamp
2145 * if the first does. PAWS implicitly requires that all data packets be
2146 * timestamped. But I think there are middle-man devices that hijack
2147 * TCP streams immediately after the 3whs and don't timestamp their
2148 * packets (seen in a WWW accelerator or cache).
2150 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
2151 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
2153 src->scrub->pfss_flags |= PFSS_DATA_TS;
2155 src->scrub->pfss_flags |= PFSS_DATA_NOTS;
2156 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
2157 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
2158 /* Don't warn if other host rejected RFC1323 */
2159 DPFPRINTF(("Broken RFC1323 stack did not "
2160 "timestamp data packet. Disabled PAWS "
2162 pf_print_state(state);
2163 pf_print_flags(th->th_flags);
2171 * Update PAWS values
2173 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
2174 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
2175 getmicrouptime(&src->scrub->pfss_last);
2176 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
2177 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
2178 src->scrub->pfss_tsval = tsval;
2181 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
2182 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
2183 src->scrub->pfss_tsecr = tsecr;
2185 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
2186 (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
2187 src->scrub->pfss_tsval0 == 0)) {
2188 /* tsval0 MUST be the lowest timestamp */
2189 src->scrub->pfss_tsval0 = tsval;
2192 /* Only fully initialized after a TS gets echoed */
2193 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
2194 src->scrub->pfss_flags |= PFSS_PAWS;
2198 /* I have a dream.... TCP segment reassembly.... */
2203 pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
2204 int off, sa_family_t af)
2208 int opt, cnt, optlen = 0;
2210 u_char opts[TCP_MAXOLEN];
2211 u_char *optp = opts;
2213 thoff = th->th_off << 2;
2214 cnt = thoff - sizeof(struct tcphdr);
2216 if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt,
2220 for (; cnt > 0; cnt -= optlen, optp += optlen) {
2222 if (opt == TCPOPT_EOL)
2224 if (opt == TCPOPT_NOP)
2230 if (optlen < 2 || optlen > cnt)
2235 mss = (u_int16_t *)(optp + 2);
2236 if ((ntohs(*mss)) > r->max_mss) {
2237 th->th_sum = pf_cksum_fixup(th->th_sum,
2238 *mss, htons(r->max_mss), 0);
2239 *mss = htons(r->max_mss);
2249 m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts);
2256 pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
2258 struct mbuf *m = *m0;
2259 struct ip *h = mtod(m, struct ip *);
2261 /* Clear IP_DF if no-df was requested */
2262 if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
2263 u_int16_t ip_off = h->ip_off;
2265 h->ip_off &= htons(~IP_DF);
2266 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
2269 /* Enforce a minimum ttl, may cause endless packet loops */
2270 if (min_ttl && h->ip_ttl < min_ttl) {
2271 u_int16_t ip_ttl = h->ip_ttl;
2273 h->ip_ttl = min_ttl;
2274 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
2278 if (flags & PFRULE_SET_TOS) {
2281 ov = *(u_int16_t *)h;
2283 nv = *(u_int16_t *)h;
2285 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
2288 /* random-id, but not for fragments */
2289 if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
2290 u_int16_t ip_id = h->ip_id;
2292 h->ip_id = ip_randomid();
2293 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
2300 pf_scrub_ip6(struct mbuf **m0, u_int8_t min_ttl)
2302 struct mbuf *m = *m0;
2303 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
2305 /* Enforce a minimum ttl, may cause endless packet loops */
2306 if (min_ttl && h->ip6_hlim < min_ttl)
2307 h->ip6_hlim = min_ttl;