2 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
3 * Copyright 2011 Alexander Bluhm <bluhm@openbsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
33 #include "opt_inet6.h"
36 #include <sys/param.h>
39 #include <sys/mutex.h>
40 #include <sys/refcount.h>
41 #include <sys/rwlock.h>
42 #include <sys/socket.h>
46 #include <net/pfvar.h>
47 #include <net/if_pflog.h>
49 #include <netinet/in.h>
50 #include <netinet/ip.h>
51 #include <netinet/ip_var.h>
52 #include <netinet6/ip6_var.h>
53 #include <netinet/tcp.h>
54 #include <netinet/tcp_fsm.h>
55 #include <netinet/tcp_seq.h>
58 #include <netinet/ip6.h>
62 TAILQ_ENTRY(pf_frent) fr_next;
64 uint16_t fe_hdrlen; /* ipv4 header lenght with ip options
65 ipv6, extension, fragment header */
66 uint16_t fe_extoff; /* last extension header offset or 0 */
67 uint16_t fe_len; /* fragment length */
68 uint16_t fe_off; /* fragment offset */
69 uint16_t fe_mff; /* more fragment flag */
72 struct pf_fragment_cmp {
73 struct pf_addr frc_src;
74 struct pf_addr frc_dst;
81 struct pf_fragment_cmp fr_key;
82 #define fr_src fr_key.frc_src
83 #define fr_dst fr_key.frc_dst
84 #define fr_id fr_key.frc_id
85 #define fr_af fr_key.frc_af
86 #define fr_proto fr_key.frc_proto
88 RB_ENTRY(pf_fragment) fr_entry;
89 TAILQ_ENTRY(pf_fragment) frag_next;
90 uint8_t fr_flags; /* status flags */
91 #define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */
92 #define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */
93 #define PFFRAG_DROP 0x0004 /* Drop all fragments */
94 #define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER))
95 uint16_t fr_max; /* fragment data max */
97 uint16_t fr_maxlen; /* maximum length of single fragment */
98 uint16_t fr_entries; /* Total number of pf_fragment entries */
99 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
101 #define PF_MAX_FRENT_PER_FRAGMENT 64
103 struct pf_fragment_tag {
104 uint16_t ft_hdrlen; /* header length of reassembled pkt */
105 uint16_t ft_extoff; /* last extension header offset or 0 */
106 uint16_t ft_maxlen; /* maximum fragment payload length */
107 uint32_t ft_id; /* fragment id */
110 static struct mtx pf_frag_mtx;
111 #define PF_FRAG_LOCK() mtx_lock(&pf_frag_mtx)
112 #define PF_FRAG_UNLOCK() mtx_unlock(&pf_frag_mtx)
113 #define PF_FRAG_ASSERT() mtx_assert(&pf_frag_mtx, MA_OWNED)
115 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */
117 static VNET_DEFINE(uma_zone_t, pf_frent_z);
118 #define V_pf_frent_z VNET(pf_frent_z)
119 static VNET_DEFINE(uma_zone_t, pf_frag_z);
120 #define V_pf_frag_z VNET(pf_frag_z)
122 TAILQ_HEAD(pf_fragqueue, pf_fragment);
123 TAILQ_HEAD(pf_cachequeue, pf_fragment);
124 static VNET_DEFINE(struct pf_fragqueue, pf_fragqueue);
125 #define V_pf_fragqueue VNET(pf_fragqueue)
126 static VNET_DEFINE(struct pf_cachequeue, pf_cachequeue);
127 #define V_pf_cachequeue VNET(pf_cachequeue)
128 RB_HEAD(pf_frag_tree, pf_fragment);
129 static VNET_DEFINE(struct pf_frag_tree, pf_frag_tree);
130 #define V_pf_frag_tree VNET(pf_frag_tree)
131 static VNET_DEFINE(struct pf_frag_tree, pf_cache_tree);
132 #define V_pf_cache_tree VNET(pf_cache_tree)
133 static int pf_frag_compare(struct pf_fragment *,
134 struct pf_fragment *);
135 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
136 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
138 static void pf_flush_fragments(void);
139 static void pf_free_fragment(struct pf_fragment *);
140 static void pf_remove_fragment(struct pf_fragment *);
141 static int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
142 struct tcphdr *, int, sa_family_t);
143 static struct pf_frent *pf_create_fragment(u_short *);
144 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key,
145 struct pf_frag_tree *tree);
146 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *,
147 struct pf_frent *, u_short *);
148 static int pf_isfull_fragment(struct pf_fragment *);
149 static struct mbuf *pf_join_fragment(struct pf_fragment *);
151 static void pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t);
152 static int pf_reassemble(struct mbuf **, struct ip *, int, u_short *);
153 static struct mbuf *pf_fragcache(struct mbuf **, struct ip*,
154 struct pf_fragment **, int, int, int *);
157 static int pf_reassemble6(struct mbuf **, struct ip6_hdr *,
158 struct ip6_frag *, uint16_t, uint16_t, u_short *);
159 static void pf_scrub_ip6(struct mbuf **, uint8_t);
162 #define DPFPRINTF(x) do { \
163 if (V_pf_status.debug >= PF_DEBUG_MISC) { \
164 printf("%s: ", __func__); \
171 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key)
174 key->frc_src.v4 = ip->ip_src;
175 key->frc_dst.v4 = ip->ip_dst;
176 key->frc_af = AF_INET;
177 key->frc_proto = ip->ip_p;
178 key->frc_id = ip->ip_id;
183 pf_normalize_init(void)
186 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
187 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
188 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
189 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
190 V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
191 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL,
194 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
195 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
196 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
197 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
199 mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
201 TAILQ_INIT(&V_pf_fragqueue);
202 TAILQ_INIT(&V_pf_cachequeue);
206 pf_normalize_cleanup(void)
209 uma_zdestroy(V_pf_state_scrub_z);
210 uma_zdestroy(V_pf_frent_z);
211 uma_zdestroy(V_pf_frag_z);
213 mtx_destroy(&pf_frag_mtx);
217 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
221 if ((diff = a->fr_id - b->fr_id) != 0)
223 if ((diff = a->fr_proto - b->fr_proto) != 0)
225 if ((diff = a->fr_af - b->fr_af) != 0)
227 if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0)
229 if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0)
235 pf_purge_expired_fragments(void)
237 struct pf_fragment *frag;
238 u_int32_t expire = time_uptime -
239 V_pf_default_rule.timeout[PFTM_FRAG];
242 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
243 KASSERT((BUFFER_FRAGMENTS(frag)),
244 ("BUFFER_FRAGMENTS(frag) == 0: %s", __FUNCTION__));
245 if (frag->fr_timeout > expire)
248 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
249 pf_free_fragment(frag);
252 while ((frag = TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue)) != NULL) {
253 KASSERT((!BUFFER_FRAGMENTS(frag)),
254 ("BUFFER_FRAGMENTS(frag) != 0: %s", __FUNCTION__));
255 if (frag->fr_timeout > expire)
258 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
259 pf_free_fragment(frag);
260 KASSERT((TAILQ_EMPTY(&V_pf_cachequeue) ||
261 TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue) != frag),
262 ("!(TAILQ_EMPTY() || TAILQ_LAST() == farg): %s",
269 * Try to flush old fragments to make space for new ones
272 pf_flush_fragments(void)
274 struct pf_fragment *frag, *cache;
279 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
280 DPFPRINTF(("trying to free %d frag entriess\n", goal));
281 while (goal < uma_zone_get_cur(V_pf_frent_z)) {
282 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
284 pf_free_fragment(frag);
285 cache = TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue);
287 pf_free_fragment(cache);
288 if (frag == NULL && cache == NULL)
293 /* Frees the fragments and all associated entries */
295 pf_free_fragment(struct pf_fragment *frag)
297 struct pf_frent *frent;
301 /* Free all fragments */
302 if (BUFFER_FRAGMENTS(frag)) {
303 for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
304 frent = TAILQ_FIRST(&frag->fr_queue)) {
305 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
307 m_freem(frent->fe_m);
308 uma_zfree(V_pf_frent_z, frent);
311 for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
312 frent = TAILQ_FIRST(&frag->fr_queue)) {
313 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
315 KASSERT((TAILQ_EMPTY(&frag->fr_queue) ||
316 TAILQ_FIRST(&frag->fr_queue)->fe_off >
318 ("! (TAILQ_EMPTY() || TAILQ_FIRST()->fe_off >"
319 " frent->fe_len): %s", __func__));
321 uma_zfree(V_pf_frent_z, frent);
325 pf_remove_fragment(frag);
328 static struct pf_fragment *
329 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree)
331 struct pf_fragment *frag;
335 frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key);
337 /* XXX Are we sure we want to update the timeout? */
338 frag->fr_timeout = time_uptime;
339 if (BUFFER_FRAGMENTS(frag)) {
340 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
341 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
343 TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next);
344 TAILQ_INSERT_HEAD(&V_pf_cachequeue, frag, frag_next);
351 /* Removes a fragment from the fragment queue and frees the fragment */
353 pf_remove_fragment(struct pf_fragment *frag)
358 if (BUFFER_FRAGMENTS(frag)) {
359 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
360 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
361 uma_zfree(V_pf_frag_z, frag);
363 RB_REMOVE(pf_frag_tree, &V_pf_cache_tree, frag);
364 TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next);
365 uma_zfree(V_pf_frag_z, frag);
369 static struct pf_frent *
370 pf_create_fragment(u_short *reason)
372 struct pf_frent *frent;
376 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
378 pf_flush_fragments();
379 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
381 REASON_SET(reason, PFRES_MEMORY);
390 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
393 struct pf_frent *after, *next, *prev;
394 struct pf_fragment *frag;
399 /* No empty fragments. */
400 if (frent->fe_len == 0) {
401 DPFPRINTF(("bad fragment: len 0"));
405 /* All fragments are 8 byte aligned. */
406 if (frent->fe_mff && (frent->fe_len & 0x7)) {
407 DPFPRINTF(("bad fragment: mff and len %d", frent->fe_len));
411 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */
412 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
413 DPFPRINTF(("bad fragment: max packet %d",
414 frent->fe_off + frent->fe_len));
418 DPFPRINTF((key->frc_af == AF_INET ?
419 "reass frag %d @ %d-%d" : "reass frag %#08x @ %d-%d",
420 key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len));
422 /* Fully buffer all of the fragments in this fragment queue. */
423 frag = pf_find_fragment(key, &V_pf_frag_tree);
425 /* Create a new reassembly queue for this packet. */
427 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
429 pf_flush_fragments();
430 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
432 REASON_SET(reason, PFRES_MEMORY);
437 *(struct pf_fragment_cmp *)frag = *key;
439 frag->fr_timeout = time_uptime;
440 frag->fr_maxlen = frent->fe_len;
441 frag->fr_entries = 0;
442 TAILQ_INIT(&frag->fr_queue);
444 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag);
445 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
447 /* We do not have a previous fragment. */
448 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
453 if (frag->fr_entries >= PF_MAX_FRENT_PER_FRAGMENT)
456 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
458 /* Remember maximum fragment len for refragmentation. */
459 if (frent->fe_len > frag->fr_maxlen)
460 frag->fr_maxlen = frent->fe_len;
462 /* Maximum data we have seen already. */
463 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
464 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
466 /* Non terminal fragments must have more fragments flag. */
467 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
470 /* Check if we saw the last fragment already. */
471 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
472 if (frent->fe_off + frent->fe_len > total ||
473 (frent->fe_off + frent->fe_len == total && frent->fe_mff))
476 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
480 /* Find a fragment after the current one. */
482 TAILQ_FOREACH(after, &frag->fr_queue, fr_next) {
483 if (after->fe_off > frent->fe_off)
488 KASSERT(prev != NULL || after != NULL,
489 ("prev != NULL || after != NULL"));
491 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
494 precut = prev->fe_off + prev->fe_len - frent->fe_off;
495 if (precut >= frent->fe_len)
497 DPFPRINTF(("overlap -%d", precut));
498 m_adj(frent->fe_m, precut);
499 frent->fe_off += precut;
500 frent->fe_len -= precut;
503 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
507 aftercut = frent->fe_off + frent->fe_len - after->fe_off;
508 DPFPRINTF(("adjust overlap %d", aftercut));
509 if (aftercut < after->fe_len) {
510 m_adj(after->fe_m, aftercut);
511 after->fe_off += aftercut;
512 after->fe_len -= aftercut;
516 /* This fragment is completely overlapped, lose it. */
517 next = TAILQ_NEXT(after, fr_next);
518 m_freem(after->fe_m);
519 TAILQ_REMOVE(&frag->fr_queue, after, fr_next);
520 uma_zfree(V_pf_frent_z, after);
524 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
526 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
533 REASON_SET(reason, PFRES_FRAG);
535 uma_zfree(V_pf_frent_z, frent);
540 pf_isfull_fragment(struct pf_fragment *frag)
542 struct pf_frent *frent, *next;
545 /* Check if we are completely reassembled */
546 if (TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff)
549 /* Maximum data we have seen already */
550 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
551 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
553 /* Check if we have all the data */
555 for (frent = TAILQ_FIRST(&frag->fr_queue); frent; frent = next) {
556 next = TAILQ_NEXT(frent, fr_next);
558 off += frent->fe_len;
559 if (off < total && (next == NULL || next->fe_off != off)) {
560 DPFPRINTF(("missing fragment at %d, next %d, total %d",
561 off, next == NULL ? -1 : next->fe_off, total));
565 DPFPRINTF(("%d < %d?", off, total));
568 KASSERT(off == total, ("off == total"));
574 pf_join_fragment(struct pf_fragment *frag)
577 struct pf_frent *frent, *next;
579 frent = TAILQ_FIRST(&frag->fr_queue);
580 next = TAILQ_NEXT(frent, fr_next);
583 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
584 uma_zfree(V_pf_frent_z, frent);
585 for (frent = next; frent != NULL; frent = next) {
586 next = TAILQ_NEXT(frent, fr_next);
589 /* Strip off ip header. */
590 m_adj(m2, frent->fe_hdrlen);
591 /* Strip off any trailing bytes. */
592 m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
594 uma_zfree(V_pf_frent_z, frent);
598 /* Remove from fragment queue. */
599 pf_remove_fragment(frag);
606 pf_reassemble(struct mbuf **m0, struct ip *ip, int dir, u_short *reason)
608 struct mbuf *m = *m0;
609 struct pf_frent *frent;
610 struct pf_fragment *frag;
611 struct pf_fragment_cmp key;
612 uint16_t total, hdrlen;
614 /* Get an entry for the fragment queue */
615 if ((frent = pf_create_fragment(reason)) == NULL)
619 frent->fe_hdrlen = ip->ip_hl << 2;
620 frent->fe_extoff = 0;
621 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
622 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
623 frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
625 pf_ip2key(ip, dir, &key);
627 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL)
630 /* The mbuf is part of the fragment entry, no direct free or access */
633 if (!pf_isfull_fragment(frag))
634 return (PF_PASS); /* drop because *m0 is NULL, no error */
636 /* We have all the data */
637 frent = TAILQ_FIRST(&frag->fr_queue);
638 KASSERT(frent != NULL, ("frent != NULL"));
639 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
640 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
641 hdrlen = frent->fe_hdrlen;
643 m = *m0 = pf_join_fragment(frag);
646 if (m->m_flags & M_PKTHDR) {
648 for (m = *m0; m; m = m->m_next)
651 m->m_pkthdr.len = plen;
654 ip = mtod(m, struct ip *);
655 ip->ip_len = htons(hdrlen + total);
656 ip->ip_off &= ~(IP_MF|IP_OFFMASK);
658 if (hdrlen + total > IP_MAXPACKET) {
659 DPFPRINTF(("drop: too big: %d", total));
661 REASON_SET(reason, PFRES_SHORT);
662 /* PF_DROP requires a valid mbuf *m0 in pf_test() */
666 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
673 pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr,
674 uint16_t hdrlen, uint16_t extoff, u_short *reason)
676 struct mbuf *m = *m0;
677 struct pf_frent *frent;
678 struct pf_fragment *frag;
679 struct pf_fragment_cmp key;
681 struct pf_fragment_tag *ftag;
684 uint16_t total, maxlen;
689 /* Get an entry for the fragment queue. */
690 if ((frent = pf_create_fragment(reason)) == NULL) {
696 frent->fe_hdrlen = hdrlen;
697 frent->fe_extoff = extoff;
698 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
699 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
700 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
702 key.frc_src.v6 = ip6->ip6_src;
703 key.frc_dst.v6 = ip6->ip6_dst;
704 key.frc_af = AF_INET6;
705 /* Only the first fragment's protocol is relevant. */
707 key.frc_id = fraghdr->ip6f_ident;
709 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) {
714 /* The mbuf is part of the fragment entry, no direct free or access. */
717 if (!pf_isfull_fragment(frag)) {
719 return (PF_PASS); /* Drop because *m0 is NULL, no error. */
722 /* We have all the data. */
723 extoff = frent->fe_extoff;
724 maxlen = frag->fr_maxlen;
725 frag_id = frag->fr_id;
726 frent = TAILQ_FIRST(&frag->fr_queue);
727 KASSERT(frent != NULL, ("frent != NULL"));
728 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
729 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
730 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
732 m = *m0 = pf_join_fragment(frag);
737 /* Take protocol from first fragment header. */
738 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off);
739 KASSERT(m, ("%s: short mbuf chain", __func__));
740 proto = *(mtod(m, caddr_t) + off);
743 /* Delete frag6 header */
744 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0)
747 if (m->m_flags & M_PKTHDR) {
749 for (m = *m0; m; m = m->m_next)
752 m->m_pkthdr.len = plen;
755 if ((mtag = m_tag_get(PF_REASSEMBLED, sizeof(struct pf_fragment_tag),
758 ftag = (struct pf_fragment_tag *)(mtag + 1);
759 ftag->ft_hdrlen = hdrlen;
760 ftag->ft_extoff = extoff;
761 ftag->ft_maxlen = maxlen;
762 ftag->ft_id = frag_id;
763 m_tag_prepend(m, mtag);
765 ip6 = mtod(m, struct ip6_hdr *);
766 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
768 /* Write protocol into next field of last extension header. */
769 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
771 KASSERT(m, ("%s: short mbuf chain", __func__));
772 *(mtod(m, char *) + off) = proto;
775 ip6->ip6_nxt = proto;
777 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
778 DPFPRINTF(("drop: too big: %d", total));
780 REASON_SET(reason, PFRES_SHORT);
781 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */
785 DPFPRINTF(("complete: %p(%d)", m, ntohs(ip6->ip6_plen)));
789 REASON_SET(reason, PFRES_MEMORY);
790 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */
797 pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
798 int drop, int *nomem)
800 struct mbuf *m = *m0;
801 struct pf_frent *frp, *fra, *cur = NULL;
802 int ip_len = ntohs(h->ip_len) - (h->ip_hl << 2);
803 u_int16_t off = ntohs(h->ip_off) << 3;
804 u_int16_t max = ip_len + off;
808 KASSERT((*frag == NULL || !BUFFER_FRAGMENTS(*frag)),
809 ("!(*frag == NULL || !BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__));
811 /* Create a new range queue for this packet */
813 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
815 pf_flush_fragments();
816 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
821 /* Get an entry for the queue */
822 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
824 uma_zfree(V_pf_frag_z, *frag);
829 (*frag)->fr_flags = PFFRAG_NOBUFFER;
831 (*frag)->fr_src.v4 = h->ip_src;
832 (*frag)->fr_dst.v4 = h->ip_dst;
833 (*frag)->fr_af = AF_INET;
834 (*frag)->fr_proto = h->ip_p;
835 (*frag)->fr_id = h->ip_id;
836 (*frag)->fr_timeout = time_uptime;
839 cur->fe_len = max; /* TODO: fe_len = max - off ? */
840 TAILQ_INIT(&(*frag)->fr_queue);
841 TAILQ_INSERT_HEAD(&(*frag)->fr_queue, cur, fr_next);
843 RB_INSERT(pf_frag_tree, &V_pf_cache_tree, *frag);
844 TAILQ_INSERT_HEAD(&V_pf_cachequeue, *frag, frag_next);
846 DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
852 * Find a fragment after the current one:
853 * - off contains the real shifted offset.
856 TAILQ_FOREACH(fra, &(*frag)->fr_queue, fr_next) {
857 if (fra->fe_off > off)
862 KASSERT((frp != NULL || fra != NULL),
863 ("!(frp != NULL || fra != NULL): %s", __FUNCTION__));
868 precut = frp->fe_len - off;
869 if (precut >= ip_len) {
870 /* Fragment is entirely a duplicate */
871 DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
872 h->ip_id, frp->fe_off, frp->fe_len, off, max));
876 /* They are adjacent. Fixup cache entry */
877 DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
878 h->ip_id, frp->fe_off, frp->fe_len, off, max));
880 } else if (precut > 0) {
881 /* The first part of this payload overlaps with a
882 * fragment that has already been passed.
883 * Need to trim off the first part of the payload.
884 * But to do so easily, we need to create another
885 * mbuf to throw the original header into.
888 DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
889 h->ip_id, precut, frp->fe_off, frp->fe_len, off,
894 /* Update the previous frag to encompass this one */
898 /* XXX Optimization opportunity
899 * This is a very heavy way to trim the payload.
900 * we could do it much faster by diddling mbuf
901 * internals but that would be even less legible
902 * than this mbuf magic. For my next trick,
903 * I'll pull a rabbit out of my laptop.
905 *m0 = m_dup(m, M_NOWAIT);
908 /* From KAME Project : We have missed this! */
909 m_adj(*m0, (h->ip_hl << 2) -
910 (*m0)->m_pkthdr.len);
912 KASSERT(((*m0)->m_next == NULL),
913 ("(*m0)->m_next != NULL: %s",
915 m_adj(m, precut + (h->ip_hl << 2));
918 if (m->m_flags & M_PKTHDR) {
921 for (t = m; t; t = t->m_next)
923 m->m_pkthdr.len = plen;
927 h = mtod(m, struct ip *);
929 KASSERT(((int)m->m_len ==
930 ntohs(h->ip_len) - precut),
931 ("m->m_len != ntohs(h->ip_len) - precut: %s",
933 h->ip_off = htons(ntohs(h->ip_off) +
935 h->ip_len = htons(ntohs(h->ip_len) - precut);
940 /* There is a gap between fragments */
942 DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
943 h->ip_id, -precut, frp->fe_off, frp->fe_len, off,
946 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
952 TAILQ_INSERT_AFTER(&(*frag)->fr_queue, frp, cur, fr_next);
960 aftercut = max - fra->fe_off;
962 /* Adjacent fragments */
963 DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
964 h->ip_id, off, max, fra->fe_off, fra->fe_len));
967 } else if (aftercut > 0) {
968 /* Need to chop off the tail of this fragment */
969 DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
970 h->ip_id, aftercut, off, max, fra->fe_off,
979 if (m->m_flags & M_PKTHDR) {
982 for (t = m; t; t = t->m_next)
984 m->m_pkthdr.len = plen;
986 h = mtod(m, struct ip *);
987 KASSERT(((int)m->m_len == ntohs(h->ip_len) - aftercut),
988 ("m->m_len != ntohs(h->ip_len) - aftercut: %s",
990 h->ip_len = htons(ntohs(h->ip_len) - aftercut);
994 } else if (frp == NULL) {
995 /* There is a gap between fragments */
996 DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
997 h->ip_id, -aftercut, off, max, fra->fe_off,
1000 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
1006 TAILQ_INSERT_HEAD(&(*frag)->fr_queue, cur, fr_next);
1010 /* Need to glue together two separate fragment descriptors */
1012 if (cur && fra->fe_off <= cur->fe_len) {
1013 /* Need to merge in a previous 'cur' */
1014 DPFPRINTF(("fragcache[%d]: adjacent(merge "
1015 "%d-%d) %d-%d (%d-%d)\n",
1016 h->ip_id, cur->fe_off, cur->fe_len, off,
1017 max, fra->fe_off, fra->fe_len));
1018 fra->fe_off = cur->fe_off;
1019 TAILQ_REMOVE(&(*frag)->fr_queue, cur, fr_next);
1020 uma_zfree(V_pf_frent_z, cur);
1023 } else if (frp && fra->fe_off <= frp->fe_len) {
1024 /* Need to merge in a modified 'frp' */
1025 KASSERT((cur == NULL), ("cur != NULL: %s",
1027 DPFPRINTF(("fragcache[%d]: adjacent(merge "
1028 "%d-%d) %d-%d (%d-%d)\n",
1029 h->ip_id, frp->fe_off, frp->fe_len, off,
1030 max, fra->fe_off, fra->fe_len));
1031 fra->fe_off = frp->fe_off;
1032 TAILQ_REMOVE(&(*frag)->fr_queue, frp, fr_next);
1033 uma_zfree(V_pf_frent_z, frp);
1042 * We must keep tracking the overall fragment even when
1043 * we're going to drop it anyway so that we know when to
1044 * free the overall descriptor. Thus we drop the frag late.
1051 /* Update maximum data size */
1052 if ((*frag)->fr_max < max)
1053 (*frag)->fr_max = max;
1055 /* This is the last segment */
1057 (*frag)->fr_flags |= PFFRAG_SEENLAST;
1059 /* Check if we are completely reassembled */
1060 if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
1061 TAILQ_FIRST(&(*frag)->fr_queue)->fe_off == 0 &&
1062 TAILQ_FIRST(&(*frag)->fr_queue)->fe_len == (*frag)->fr_max) {
1063 /* Remove from fragment queue */
1064 DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
1066 pf_free_fragment(*frag);
1075 /* Still need to pay attention to !IP_MF */
1076 if (!mff && *frag != NULL)
1077 (*frag)->fr_flags |= PFFRAG_SEENLAST;
1084 /* Still need to pay attention to !IP_MF */
1085 if (!mff && *frag != NULL)
1086 (*frag)->fr_flags |= PFFRAG_SEENLAST;
1089 /* This fragment has been deemed bad. Don't reass */
1090 if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
1091 DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
1093 (*frag)->fr_flags |= PFFRAG_DROP;
1103 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag)
1105 struct mbuf *m = *m0, *t;
1106 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1);
1109 uint16_t hdrlen, extoff, maxlen;
1113 hdrlen = ftag->ft_hdrlen;
1114 extoff = ftag->ft_extoff;
1115 maxlen = ftag->ft_maxlen;
1116 frag_id = ftag->ft_id;
1117 m_tag_delete(m, mtag);
1124 /* Use protocol from next field of last extension header */
1125 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
1127 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain"));
1128 proto = *(mtod(m, caddr_t) + off);
1129 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT;
1132 struct ip6_hdr *hdr;
1134 hdr = mtod(m, struct ip6_hdr *);
1135 proto = hdr->ip6_nxt;
1136 hdr->ip6_nxt = IPPROTO_FRAGMENT;
1139 /* The MTU must be a multiple of 8 bytes, or we risk doing the
1140 * fragmentation wrong. */
1141 maxlen = maxlen & ~7;
1144 * Maxlen may be less than 8 if there was only a single
1145 * fragment. As it was fragmented before, add a fragment
1146 * header also for a single fragment. If total or maxlen
1147 * is less than 8, ip6_fragment() will return EMSGSIZE and
1148 * we drop the packet.
1150 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id);
1151 m = (*m0)->m_nextpkt;
1152 (*m0)->m_nextpkt = NULL;
1154 /* The first mbuf contains the unfragmented packet. */
1159 /* Drop expects an mbuf to free. */
1160 DPFPRINTF(("refragment error %d", error));
1163 for (t = m; m; m = t) {
1165 m->m_nextpkt = NULL;
1166 m->m_flags |= M_SKIP_FIREWALL;
1167 memset(&pd, 0, sizeof(pd));
1168 pd.pf_mtag = pf_find_mtag(m);
1181 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
1182 struct pf_pdesc *pd)
1184 struct mbuf *m = *m0;
1186 struct pf_fragment *frag = NULL;
1187 struct pf_fragment_cmp key;
1188 struct ip *h = mtod(m, struct ip *);
1189 int mff = (ntohs(h->ip_off) & IP_MF);
1190 int hlen = h->ip_hl << 2;
1191 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1200 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1203 if (pfi_kif_match(r->kif, kif) == r->ifnot)
1204 r = r->skip[PF_SKIP_IFP].ptr;
1205 else if (r->direction && r->direction != dir)
1206 r = r->skip[PF_SKIP_DIR].ptr;
1207 else if (r->af && r->af != AF_INET)
1208 r = r->skip[PF_SKIP_AF].ptr;
1209 else if (r->proto && r->proto != h->ip_p)
1210 r = r->skip[PF_SKIP_PROTO].ptr;
1211 else if (PF_MISMATCHAW(&r->src.addr,
1212 (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
1213 r->src.neg, kif, M_GETFIB(m)))
1214 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1215 else if (PF_MISMATCHAW(&r->dst.addr,
1216 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
1217 r->dst.neg, NULL, M_GETFIB(m)))
1218 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1219 else if (r->match_tag && !pf_match_tag(m, r, &tag,
1220 pd->pf_mtag ? pd->pf_mtag->tag : 0))
1221 r = TAILQ_NEXT(r, entries);
1226 if (r == NULL || r->action == PF_NOSCRUB)
1229 r->packets[dir == PF_OUT]++;
1230 r->bytes[dir == PF_OUT] += pd->tot_len;
1233 /* Check for illegal packets */
1234 if (hlen < (int)sizeof(struct ip))
1237 if (hlen > ntohs(h->ip_len))
1240 /* Clear IP_DF if the rule uses the no-df option */
1241 if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
1242 u_int16_t ip_off = h->ip_off;
1244 h->ip_off &= htons(~IP_DF);
1245 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1248 /* We will need other tests here */
1249 if (!fragoff && !mff)
1252 /* We're dealing with a fragment now. Don't allow fragments
1253 * with IP_DF to enter the cache. If the flag was cleared by
1254 * no-df above, fine. Otherwise drop it.
1256 if (h->ip_off & htons(IP_DF)) {
1257 DPFPRINTF(("IP_DF\n"));
1261 ip_len = ntohs(h->ip_len) - hlen;
1262 ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1264 /* All fragments are 8 byte aligned */
1265 if (mff && (ip_len & 0x7)) {
1266 DPFPRINTF(("mff and %d\n", ip_len));
1270 /* Respect maximum length */
1271 if (fragoff + ip_len > IP_MAXPACKET) {
1272 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
1275 max = fragoff + ip_len;
1277 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
1279 /* Fully buffer all of the fragments */
1282 pf_ip2key(h, dir, &key);
1283 frag = pf_find_fragment(&key, &V_pf_frag_tree);
1285 /* Check if we saw the last fragment already */
1286 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
1290 /* Might return a completely reassembled mbuf, or NULL */
1291 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
1292 verdict = pf_reassemble(m0, h, dir, reason);
1295 if (verdict != PF_PASS)
1302 /* use mtag from concatenated mbuf chain */
1303 pd->pf_mtag = pf_find_mtag(m);
1305 if (pd->pf_mtag == NULL) {
1306 printf("%s: pf_find_mtag returned NULL(1)\n", __func__);
1307 if ((pd->pf_mtag = pf_get_mtag(m)) == NULL) {
1314 h = mtod(m, struct ip *);
1316 /* non-buffering fragment cache (drops or masks overlaps) */
1319 if (dir == PF_OUT && pd->pf_mtag->flags & PF_TAG_FRAGCACHE) {
1321 * Already passed the fragment cache in the
1322 * input direction. If we continued, it would
1323 * appear to be a dup and would be dropped.
1329 pf_ip2key(h, dir, &key);
1330 frag = pf_find_fragment(&key, &V_pf_cache_tree);
1332 /* Check if we saw the last fragment already */
1333 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
1334 max > frag->fr_max) {
1335 if (r->rule_flag & PFRULE_FRAGDROP)
1336 frag->fr_flags |= PFFRAG_DROP;
1340 *m0 = m = pf_fragcache(m0, h, &frag, mff,
1341 (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
1349 /* use mtag from copied and trimmed mbuf chain */
1350 pd->pf_mtag = pf_find_mtag(m);
1352 if (pd->pf_mtag == NULL) {
1353 printf("%s: pf_find_mtag returned NULL(2)\n", __func__);
1354 if ((pd->pf_mtag = pf_get_mtag(m)) == NULL) {
1362 pd->pf_mtag->flags |= PF_TAG_FRAGCACHE;
1364 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
1370 /* At this point, only IP_DF is allowed in ip_off */
1371 if (h->ip_off & ~htons(IP_DF)) {
1372 u_int16_t ip_off = h->ip_off;
1374 h->ip_off &= htons(IP_DF);
1375 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1378 /* not missing a return here */
1381 pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos);
1383 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0)
1384 pd->flags |= PFDESC_IP_REAS;
1388 REASON_SET(reason, PFRES_MEMORY);
1389 if (r != NULL && r->log)
1390 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1395 REASON_SET(reason, PFRES_NORM);
1396 if (r != NULL && r->log)
1397 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1402 DPFPRINTF(("dropping bad fragment\n"));
1404 /* Free associated fragments */
1406 pf_free_fragment(frag);
1410 REASON_SET(reason, PFRES_FRAG);
1411 if (r != NULL && r->log)
1412 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1421 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
1422 u_short *reason, struct pf_pdesc *pd)
1424 struct mbuf *m = *m0;
1426 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1431 struct ip6_opt_jumbo jumbo;
1432 struct ip6_frag frag;
1433 u_int32_t jumbolen = 0, plen;
1441 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1444 if (pfi_kif_match(r->kif, kif) == r->ifnot)
1445 r = r->skip[PF_SKIP_IFP].ptr;
1446 else if (r->direction && r->direction != dir)
1447 r = r->skip[PF_SKIP_DIR].ptr;
1448 else if (r->af && r->af != AF_INET6)
1449 r = r->skip[PF_SKIP_AF].ptr;
1450 #if 0 /* header chain! */
1451 else if (r->proto && r->proto != h->ip6_nxt)
1452 r = r->skip[PF_SKIP_PROTO].ptr;
1454 else if (PF_MISMATCHAW(&r->src.addr,
1455 (struct pf_addr *)&h->ip6_src, AF_INET6,
1456 r->src.neg, kif, M_GETFIB(m)))
1457 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1458 else if (PF_MISMATCHAW(&r->dst.addr,
1459 (struct pf_addr *)&h->ip6_dst, AF_INET6,
1460 r->dst.neg, NULL, M_GETFIB(m)))
1461 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1466 if (r == NULL || r->action == PF_NOSCRUB)
1469 r->packets[dir == PF_OUT]++;
1470 r->bytes[dir == PF_OUT] += pd->tot_len;
1473 /* Check for illegal packets */
1474 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1478 off = sizeof(struct ip6_hdr);
1483 case IPPROTO_FRAGMENT:
1487 case IPPROTO_ROUTING:
1488 case IPPROTO_DSTOPTS:
1489 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1493 if (proto == IPPROTO_AH)
1494 off += (ext.ip6e_len + 2) * 4;
1496 off += (ext.ip6e_len + 1) * 8;
1497 proto = ext.ip6e_nxt;
1499 case IPPROTO_HOPOPTS:
1500 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1504 optend = off + (ext.ip6e_len + 1) * 8;
1505 ooff = off + sizeof(ext);
1507 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1508 sizeof(opt.ip6o_type), NULL, NULL,
1511 if (opt.ip6o_type == IP6OPT_PAD1) {
1515 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1516 NULL, NULL, AF_INET6))
1518 if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1520 switch (opt.ip6o_type) {
1522 if (h->ip6_plen != 0)
1524 if (!pf_pull_hdr(m, ooff, &jumbo,
1525 sizeof(jumbo), NULL, NULL,
1528 memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
1530 jumbolen = ntohl(jumbolen);
1531 if (jumbolen <= IPV6_MAXPACKET)
1533 if (sizeof(struct ip6_hdr) + jumbolen !=
1540 ooff += sizeof(opt) + opt.ip6o_len;
1541 } while (ooff < optend);
1544 proto = ext.ip6e_nxt;
1550 } while (!terminal);
1552 /* jumbo payload option must be present, or plen > 0 */
1553 if (ntohs(h->ip6_plen) == 0)
1556 plen = ntohs(h->ip6_plen);
1559 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1562 pf_scrub_ip6(&m, r->min_ttl);
1567 /* Jumbo payload packets cannot be fragmented. */
1568 plen = ntohs(h->ip6_plen);
1569 if (plen == 0 || jumbolen)
1571 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1574 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1577 /* Offset now points to data portion. */
1578 off += sizeof(frag);
1580 /* Returns PF_DROP or *m0 is NULL or completely reassembled mbuf. */
1581 if (pf_reassemble6(m0, h, &frag, off, extoff, reason) != PF_PASS)
1587 pd->flags |= PFDESC_IP_REAS;
1591 REASON_SET(reason, PFRES_SHORT);
1592 if (r != NULL && r->log)
1593 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1598 REASON_SET(reason, PFRES_NORM);
1599 if (r != NULL && r->log)
1600 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1607 pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
1608 int off, void *h, struct pf_pdesc *pd)
1610 struct pf_rule *r, *rm = NULL;
1611 struct tcphdr *th = pd->hdr.tcp;
1615 sa_family_t af = pd->af;
1619 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1622 if (pfi_kif_match(r->kif, kif) == r->ifnot)
1623 r = r->skip[PF_SKIP_IFP].ptr;
1624 else if (r->direction && r->direction != dir)
1625 r = r->skip[PF_SKIP_DIR].ptr;
1626 else if (r->af && r->af != af)
1627 r = r->skip[PF_SKIP_AF].ptr;
1628 else if (r->proto && r->proto != pd->proto)
1629 r = r->skip[PF_SKIP_PROTO].ptr;
1630 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1631 r->src.neg, kif, M_GETFIB(m)))
1632 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1633 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1634 r->src.port[0], r->src.port[1], th->th_sport))
1635 r = r->skip[PF_SKIP_SRC_PORT].ptr;
1636 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1637 r->dst.neg, NULL, M_GETFIB(m)))
1638 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1639 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1640 r->dst.port[0], r->dst.port[1], th->th_dport))
1641 r = r->skip[PF_SKIP_DST_PORT].ptr;
1642 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1643 pf_osfp_fingerprint(pd, m, off, th),
1645 r = TAILQ_NEXT(r, entries);
1652 if (rm == NULL || rm->action == PF_NOSCRUB)
1655 r->packets[dir == PF_OUT]++;
1656 r->bytes[dir == PF_OUT] += pd->tot_len;
1659 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1660 pd->flags |= PFDESC_TCP_NORM;
1662 flags = th->th_flags;
1663 if (flags & TH_SYN) {
1664 /* Illegal packet */
1671 /* Illegal packet */
1672 if (!(flags & (TH_ACK|TH_RST)))
1676 if (!(flags & TH_ACK)) {
1677 /* These flags are only valid if ACK is set */
1678 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1682 /* Check for illegal header length */
1683 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1686 /* If flags changed, or reserved data set, then adjust */
1687 if (flags != th->th_flags || th->th_x2 != 0) {
1690 ov = *(u_int16_t *)(&th->th_ack + 1);
1691 th->th_flags = flags;
1693 nv = *(u_int16_t *)(&th->th_ack + 1);
1695 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, ov, nv, 0);
1699 /* Remove urgent pointer, if TH_URG is not set */
1700 if (!(flags & TH_URG) && th->th_urp) {
1701 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp,
1707 /* Process options */
1708 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af))
1711 /* copy back packet headers if we sanitized */
1713 m_copyback(m, off, sizeof(*th), (caddr_t)th);
1718 REASON_SET(&reason, PFRES_NORM);
1719 if (rm != NULL && r->log)
1720 PFLOG_PACKET(kif, m, AF_INET, dir, reason, r, NULL, NULL, pd,
1726 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1727 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1729 u_int32_t tsval, tsecr;
1733 KASSERT((src->scrub == NULL),
1734 ("pf_normalize_tcp_init: src->scrub != NULL"));
1736 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1737 if (src->scrub == NULL)
1743 struct ip *h = mtod(m, struct ip *);
1744 src->scrub->pfss_ttl = h->ip_ttl;
1750 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1751 src->scrub->pfss_ttl = h->ip6_hlim;
1759 * All normalizations below are only begun if we see the start of
1760 * the connections. They must all set an enabled bit in pfss_flags
1762 if ((th->th_flags & TH_SYN) == 0)
1766 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1767 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1768 /* Diddle with TCP options */
1770 opt = hdr + sizeof(struct tcphdr);
1771 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1772 while (hlen >= TCPOLEN_TIMESTAMP) {
1774 case TCPOPT_EOL: /* FALLTHROUGH */
1779 case TCPOPT_TIMESTAMP:
1780 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1781 src->scrub->pfss_flags |=
1783 src->scrub->pfss_ts_mod =
1784 htonl(arc4random());
1786 /* note PFSS_PAWS not set yet */
1787 memcpy(&tsval, &opt[2],
1789 memcpy(&tsecr, &opt[6],
1791 src->scrub->pfss_tsval0 = ntohl(tsval);
1792 src->scrub->pfss_tsval = ntohl(tsval);
1793 src->scrub->pfss_tsecr = ntohl(tsecr);
1794 getmicrouptime(&src->scrub->pfss_last);
1798 hlen -= MAX(opt[1], 2);
1799 opt += MAX(opt[1], 2);
1809 pf_normalize_tcp_cleanup(struct pf_state *state)
1811 if (state->src.scrub)
1812 uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1813 if (state->dst.scrub)
1814 uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1816 /* Someday... flush the TCP segment reassembly descriptors. */
1820 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1821 u_short *reason, struct tcphdr *th, struct pf_state *state,
1822 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1824 struct timeval uptime;
1825 u_int32_t tsval, tsecr;
1826 u_int tsval_from_last;
1832 KASSERT((src->scrub || dst->scrub),
1833 ("%s: src->scrub && dst->scrub!", __func__));
1836 * Enforce the minimum TTL seen for this connection. Negate a common
1837 * technique to evade an intrusion detection system and confuse
1838 * firewall state code.
1844 struct ip *h = mtod(m, struct ip *);
1845 if (h->ip_ttl > src->scrub->pfss_ttl)
1846 src->scrub->pfss_ttl = h->ip_ttl;
1847 h->ip_ttl = src->scrub->pfss_ttl;
1855 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1856 if (h->ip6_hlim > src->scrub->pfss_ttl)
1857 src->scrub->pfss_ttl = h->ip6_hlim;
1858 h->ip6_hlim = src->scrub->pfss_ttl;
1865 if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1866 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1867 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1868 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1869 /* Diddle with TCP options */
1871 opt = hdr + sizeof(struct tcphdr);
1872 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1873 while (hlen >= TCPOLEN_TIMESTAMP) {
1875 case TCPOPT_EOL: /* FALLTHROUGH */
1880 case TCPOPT_TIMESTAMP:
1881 /* Modulate the timestamps. Can be used for
1882 * NAT detection, OS uptime determination or
1887 /* Huh? Multiple timestamps!? */
1888 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1889 DPFPRINTF(("multiple TS??"));
1890 pf_print_state(state);
1893 REASON_SET(reason, PFRES_TS);
1896 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1897 memcpy(&tsval, &opt[2],
1899 if (tsval && src->scrub &&
1900 (src->scrub->pfss_flags &
1902 tsval = ntohl(tsval);
1903 pf_change_proto_a(m, &opt[2],
1906 src->scrub->pfss_ts_mod),
1911 /* Modulate TS reply iff valid (!0) */
1912 memcpy(&tsecr, &opt[6],
1914 if (tsecr && dst->scrub &&
1915 (dst->scrub->pfss_flags &
1917 tsecr = ntohl(tsecr)
1918 - dst->scrub->pfss_ts_mod;
1919 pf_change_proto_a(m, &opt[6],
1920 &th->th_sum, htonl(tsecr),
1928 hlen -= MAX(opt[1], 2);
1929 opt += MAX(opt[1], 2);
1934 /* Copyback the options, caller copys back header */
1936 m_copyback(m, off + sizeof(struct tcphdr),
1937 (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1938 sizeof(struct tcphdr));
1944 * Must invalidate PAWS checks on connections idle for too long.
1945 * The fastest allowed timestamp clock is 1ms. That turns out to
1946 * be about 24 days before it wraps. XXX Right now our lowerbound
1947 * TS echo check only works for the first 12 days of a connection
1948 * when the TS has exhausted half its 32bit space
1950 #define TS_MAX_IDLE (24*24*60*60)
1951 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
1953 getmicrouptime(&uptime);
1954 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1955 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1956 time_uptime - state->creation > TS_MAX_CONN)) {
1957 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1958 DPFPRINTF(("src idled out of PAWS\n"));
1959 pf_print_state(state);
1962 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1965 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1966 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1967 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1968 DPFPRINTF(("dst idled out of PAWS\n"));
1969 pf_print_state(state);
1972 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1976 if (got_ts && src->scrub && dst->scrub &&
1977 (src->scrub->pfss_flags & PFSS_PAWS) &&
1978 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1979 /* Validate that the timestamps are "in-window".
1980 * RFC1323 describes TCP Timestamp options that allow
1981 * measurement of RTT (round trip time) and PAWS
1982 * (protection against wrapped sequence numbers). PAWS
1983 * gives us a set of rules for rejecting packets on
1984 * long fat pipes (packets that were somehow delayed
1985 * in transit longer than the time it took to send the
1986 * full TCP sequence space of 4Gb). We can use these
1987 * rules and infer a few others that will let us treat
1988 * the 32bit timestamp and the 32bit echoed timestamp
1989 * as sequence numbers to prevent a blind attacker from
1990 * inserting packets into a connection.
1993 * - The timestamp on this packet must be greater than
1994 * or equal to the last value echoed by the other
1995 * endpoint. The RFC says those will be discarded
1996 * since it is a dup that has already been acked.
1997 * This gives us a lowerbound on the timestamp.
1998 * timestamp >= other last echoed timestamp
1999 * - The timestamp will be less than or equal to
2000 * the last timestamp plus the time between the
2001 * last packet and now. The RFC defines the max
2002 * clock rate as 1ms. We will allow clocks to be
2003 * up to 10% fast and will allow a total difference
2004 * or 30 seconds due to a route change. And this
2005 * gives us an upperbound on the timestamp.
2006 * timestamp <= last timestamp + max ticks
2007 * We have to be careful here. Windows will send an
2008 * initial timestamp of zero and then initialize it
2009 * to a random value after the 3whs; presumably to
2010 * avoid a DoS by having to call an expensive RNG
2011 * during a SYN flood. Proof MS has at least one
2012 * good security geek.
2014 * - The TCP timestamp option must also echo the other
2015 * endpoints timestamp. The timestamp echoed is the
2016 * one carried on the earliest unacknowledged segment
2017 * on the left edge of the sequence window. The RFC
2018 * states that the host will reject any echoed
2019 * timestamps that were larger than any ever sent.
2020 * This gives us an upperbound on the TS echo.
2021 * tescr <= largest_tsval
2022 * - The lowerbound on the TS echo is a little more
2023 * tricky to determine. The other endpoint's echoed
2024 * values will not decrease. But there may be
2025 * network conditions that re-order packets and
2026 * cause our view of them to decrease. For now the
2027 * only lowerbound we can safely determine is that
2028 * the TS echo will never be less than the original
2029 * TS. XXX There is probably a better lowerbound.
2030 * Remove TS_MAX_CONN with better lowerbound check.
2031 * tescr >= other original TS
2033 * It is also important to note that the fastest
2034 * timestamp clock of 1ms will wrap its 32bit space in
2035 * 24 days. So we just disable TS checking after 24
2036 * days of idle time. We actually must use a 12d
2037 * connection limit until we can come up with a better
2038 * lowerbound to the TS echo check.
2040 struct timeval delta_ts;
2045 * PFTM_TS_DIFF is how many seconds of leeway to allow
2046 * a host's timestamp. This can happen if the previous
2047 * packet got delayed in transit for much longer than
2050 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
2051 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
2053 /* Calculate max ticks since the last timestamp */
2054 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
2055 #define TS_MICROSECS 1000000 /* microseconds per second */
2057 timevalsub(&delta_ts, &src->scrub->pfss_last);
2058 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
2059 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
2061 if ((src->state >= TCPS_ESTABLISHED &&
2062 dst->state >= TCPS_ESTABLISHED) &&
2063 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
2064 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
2065 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
2066 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
2067 /* Bad RFC1323 implementation or an insertion attack.
2069 * - Solaris 2.6 and 2.7 are known to send another ACK
2070 * after the FIN,FIN|ACK,ACK closing that carries
2074 DPFPRINTF(("Timestamp failed %c%c%c%c\n",
2075 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
2076 SEQ_GT(tsval, src->scrub->pfss_tsval +
2077 tsval_from_last) ? '1' : ' ',
2078 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
2079 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
2080 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u "
2081 "idle: %jus %lums\n",
2082 tsval, tsecr, tsval_from_last,
2083 (uintmax_t)delta_ts.tv_sec,
2084 delta_ts.tv_usec / 1000));
2085 DPFPRINTF((" src->tsval: %u tsecr: %u\n",
2086 src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
2087 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u"
2088 "\n", dst->scrub->pfss_tsval,
2089 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
2090 if (V_pf_status.debug >= PF_DEBUG_MISC) {
2091 pf_print_state(state);
2092 pf_print_flags(th->th_flags);
2095 REASON_SET(reason, PFRES_TS);
2099 /* XXX I'd really like to require tsecr but it's optional */
2101 } else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
2102 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
2103 || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
2104 src->scrub && dst->scrub &&
2105 (src->scrub->pfss_flags & PFSS_PAWS) &&
2106 (dst->scrub->pfss_flags & PFSS_PAWS)) {
2107 /* Didn't send a timestamp. Timestamps aren't really useful
2109 * - connection opening or closing (often not even sent).
2110 * but we must not let an attacker to put a FIN on a
2111 * data packet to sneak it through our ESTABLISHED check.
2112 * - on a TCP reset. RFC suggests not even looking at TS.
2113 * - on an empty ACK. The TS will not be echoed so it will
2114 * probably not help keep the RTT calculation in sync and
2115 * there isn't as much danger when the sequence numbers
2116 * got wrapped. So some stacks don't include TS on empty
2119 * To minimize the disruption to mostly RFC1323 conformant
2120 * stacks, we will only require timestamps on data packets.
2122 * And what do ya know, we cannot require timestamps on data
2123 * packets. There appear to be devices that do legitimate
2124 * TCP connection hijacking. There are HTTP devices that allow
2125 * a 3whs (with timestamps) and then buffer the HTTP request.
2126 * If the intermediate device has the HTTP response cache, it
2127 * will spoof the response but not bother timestamping its
2128 * packets. So we can look for the presence of a timestamp in
2129 * the first data packet and if there, require it in all future
2133 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
2135 * Hey! Someone tried to sneak a packet in. Or the
2136 * stack changed its RFC1323 behavior?!?!
2138 if (V_pf_status.debug >= PF_DEBUG_MISC) {
2139 DPFPRINTF(("Did not receive expected RFC1323 "
2141 pf_print_state(state);
2142 pf_print_flags(th->th_flags);
2145 REASON_SET(reason, PFRES_TS);
2152 * We will note if a host sends his data packets with or without
2153 * timestamps. And require all data packets to contain a timestamp
2154 * if the first does. PAWS implicitly requires that all data packets be
2155 * timestamped. But I think there are middle-man devices that hijack
2156 * TCP streams immediately after the 3whs and don't timestamp their
2157 * packets (seen in a WWW accelerator or cache).
2159 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
2160 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
2162 src->scrub->pfss_flags |= PFSS_DATA_TS;
2164 src->scrub->pfss_flags |= PFSS_DATA_NOTS;
2165 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
2166 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
2167 /* Don't warn if other host rejected RFC1323 */
2168 DPFPRINTF(("Broken RFC1323 stack did not "
2169 "timestamp data packet. Disabled PAWS "
2171 pf_print_state(state);
2172 pf_print_flags(th->th_flags);
2180 * Update PAWS values
2182 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
2183 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
2184 getmicrouptime(&src->scrub->pfss_last);
2185 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
2186 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
2187 src->scrub->pfss_tsval = tsval;
2190 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
2191 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
2192 src->scrub->pfss_tsecr = tsecr;
2194 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
2195 (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
2196 src->scrub->pfss_tsval0 == 0)) {
2197 /* tsval0 MUST be the lowest timestamp */
2198 src->scrub->pfss_tsval0 = tsval;
2201 /* Only fully initialized after a TS gets echoed */
2202 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
2203 src->scrub->pfss_flags |= PFSS_PAWS;
2207 /* I have a dream.... TCP segment reassembly.... */
2212 pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
2213 int off, sa_family_t af)
2217 int opt, cnt, optlen = 0;
2219 u_char opts[TCP_MAXOLEN];
2220 u_char *optp = opts;
2222 thoff = th->th_off << 2;
2223 cnt = thoff - sizeof(struct tcphdr);
2225 if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt,
2229 for (; cnt > 0; cnt -= optlen, optp += optlen) {
2231 if (opt == TCPOPT_EOL)
2233 if (opt == TCPOPT_NOP)
2239 if (optlen < 2 || optlen > cnt)
2244 mss = (u_int16_t *)(optp + 2);
2245 if ((ntohs(*mss)) > r->max_mss) {
2246 th->th_sum = pf_proto_cksum_fixup(m,
2247 th->th_sum, *mss, htons(r->max_mss), 0);
2248 *mss = htons(r->max_mss);
2258 m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts);
2265 pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
2267 struct mbuf *m = *m0;
2268 struct ip *h = mtod(m, struct ip *);
2270 /* Clear IP_DF if no-df was requested */
2271 if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
2272 u_int16_t ip_off = h->ip_off;
2274 h->ip_off &= htons(~IP_DF);
2275 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
2278 /* Enforce a minimum ttl, may cause endless packet loops */
2279 if (min_ttl && h->ip_ttl < min_ttl) {
2280 u_int16_t ip_ttl = h->ip_ttl;
2282 h->ip_ttl = min_ttl;
2283 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
2287 if (flags & PFRULE_SET_TOS) {
2290 ov = *(u_int16_t *)h;
2292 nv = *(u_int16_t *)h;
2294 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
2297 /* random-id, but not for fragments */
2298 if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
2299 u_int16_t ip_id = h->ip_id;
2301 h->ip_id = ip_randomid();
2302 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
2309 pf_scrub_ip6(struct mbuf **m0, u_int8_t min_ttl)
2311 struct mbuf *m = *m0;
2312 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
2314 /* Enforce a minimum ttl, may cause endless packet loops */
2315 if (min_ttl && h->ip6_hlim < min_ttl)
2316 h->ip6_hlim = min_ttl;