2 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
35 #include <sys/param.h>
38 #include <sys/mutex.h>
39 #include <sys/refcount.h>
40 #include <sys/rwlock.h>
41 #include <sys/socket.h>
45 #include <net/pfvar.h>
46 #include <net/if_pflog.h>
48 #include <netinet/in.h>
49 #include <netinet/ip.h>
50 #include <netinet/ip_var.h>
51 #include <netinet/tcp.h>
52 #include <netinet/tcp_fsm.h>
53 #include <netinet/tcp_seq.h>
56 #include <netinet/ip6.h>
60 LIST_ENTRY(pf_frent) fr_next;
72 #define fr_ip _u._frag._fr_ip
73 #define fr_m _u._frag._fr_m
74 #define fr_off _u._cache._fr_off
75 #define fr_end _u._cache._fr_end
78 RB_ENTRY(pf_fragment) fr_entry;
79 TAILQ_ENTRY(pf_fragment) frag_next;
80 struct in_addr fr_src;
81 struct in_addr fr_dst;
82 u_int8_t fr_p; /* protocol of this fragment */
83 u_int8_t fr_flags; /* status flags */
84 #define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */
85 #define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */
86 #define PFFRAG_DROP 0x0004 /* Drop all fragments */
87 #define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER))
88 u_int16_t fr_id; /* fragment id for reassemble */
89 u_int16_t fr_max; /* fragment data max */
91 LIST_HEAD(, pf_frent) fr_queue;
94 static struct mtx pf_frag_mtx;
95 #define PF_FRAG_LOCK() mtx_lock(&pf_frag_mtx)
96 #define PF_FRAG_UNLOCK() mtx_unlock(&pf_frag_mtx)
97 #define PF_FRAG_ASSERT() mtx_assert(&pf_frag_mtx, MA_OWNED)
99 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */
101 static VNET_DEFINE(uma_zone_t, pf_frent_z);
102 #define V_pf_frent_z VNET(pf_frent_z)
103 static VNET_DEFINE(uma_zone_t, pf_frag_z);
104 #define V_pf_frag_z VNET(pf_frag_z)
106 TAILQ_HEAD(pf_fragqueue, pf_fragment);
107 TAILQ_HEAD(pf_cachequeue, pf_fragment);
108 static VNET_DEFINE(struct pf_fragqueue, pf_fragqueue);
109 #define V_pf_fragqueue VNET(pf_fragqueue)
110 static VNET_DEFINE(struct pf_cachequeue, pf_cachequeue);
111 #define V_pf_cachequeue VNET(pf_cachequeue)
112 RB_HEAD(pf_frag_tree, pf_fragment);
113 static VNET_DEFINE(struct pf_frag_tree, pf_frag_tree);
114 #define V_pf_frag_tree VNET(pf_frag_tree)
115 static VNET_DEFINE(struct pf_frag_tree, pf_cache_tree);
116 #define V_pf_cache_tree VNET(pf_cache_tree)
117 static int pf_frag_compare(struct pf_fragment *,
118 struct pf_fragment *);
119 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
120 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
122 /* Private prototypes */
123 static void pf_free_fragment(struct pf_fragment *);
124 static void pf_remove_fragment(struct pf_fragment *);
125 static int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
126 struct tcphdr *, int, sa_family_t);
128 static void pf_ip2key(struct pf_fragment *, struct ip *);
129 static void pf_scrub_ip(struct mbuf **, u_int32_t, u_int8_t,
131 static void pf_flush_fragments(void);
132 static struct pf_fragment *pf_find_fragment(struct ip *, struct pf_frag_tree *);
133 static struct mbuf *pf_reassemble(struct mbuf **, struct pf_fragment **,
134 struct pf_frent *, int);
135 static struct mbuf *pf_fragcache(struct mbuf **, struct ip*,
136 struct pf_fragment **, int, int, int *);
139 static void pf_scrub_ip6(struct mbuf **, u_int8_t);
141 #define DPFPRINTF(x) do { \
142 if (V_pf_status.debug >= PF_DEBUG_MISC) { \
143 printf("%s: ", __func__); \
149 pf_normalize_init(void)
152 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
153 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
154 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
155 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
156 V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
157 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL,
160 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
161 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
162 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
163 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
165 mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
167 TAILQ_INIT(&V_pf_fragqueue);
168 TAILQ_INIT(&V_pf_cachequeue);
172 pf_normalize_cleanup(void)
175 uma_zdestroy(V_pf_state_scrub_z);
176 uma_zdestroy(V_pf_frent_z);
177 uma_zdestroy(V_pf_frag_z);
179 mtx_destroy(&pf_frag_mtx);
183 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
187 if ((diff = a->fr_id - b->fr_id))
189 else if ((diff = a->fr_p - b->fr_p))
191 else if (a->fr_src.s_addr < b->fr_src.s_addr)
193 else if (a->fr_src.s_addr > b->fr_src.s_addr)
195 else if (a->fr_dst.s_addr < b->fr_dst.s_addr)
197 else if (a->fr_dst.s_addr > b->fr_dst.s_addr)
203 pf_purge_expired_fragments(void)
205 struct pf_fragment *frag;
206 u_int32_t expire = time_uptime -
207 V_pf_default_rule.timeout[PFTM_FRAG];
210 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
211 KASSERT((BUFFER_FRAGMENTS(frag)),
212 ("BUFFER_FRAGMENTS(frag) == 0: %s", __FUNCTION__));
213 if (frag->fr_timeout > expire)
216 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
217 pf_free_fragment(frag);
220 while ((frag = TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue)) != NULL) {
221 KASSERT((!BUFFER_FRAGMENTS(frag)),
222 ("BUFFER_FRAGMENTS(frag) != 0: %s", __FUNCTION__));
223 if (frag->fr_timeout > expire)
226 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
227 pf_free_fragment(frag);
228 KASSERT((TAILQ_EMPTY(&V_pf_cachequeue) ||
229 TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue) != frag),
230 ("!(TAILQ_EMPTY() || TAILQ_LAST() == farg): %s",
238 * Try to flush old fragments to make space for new ones
241 pf_flush_fragments(void)
243 struct pf_fragment *frag, *cache;
248 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
249 DPFPRINTF(("trying to free %d frag entriess\n", goal));
250 while (goal < uma_zone_get_cur(V_pf_frent_z)) {
251 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
253 pf_free_fragment(frag);
254 cache = TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue);
256 pf_free_fragment(cache);
257 if (frag == NULL && cache == NULL)
263 /* Frees the fragments and all associated entries */
265 pf_free_fragment(struct pf_fragment *frag)
267 struct pf_frent *frent;
271 /* Free all fragments */
272 if (BUFFER_FRAGMENTS(frag)) {
273 for (frent = LIST_FIRST(&frag->fr_queue); frent;
274 frent = LIST_FIRST(&frag->fr_queue)) {
275 LIST_REMOVE(frent, fr_next);
277 m_freem(frent->fr_m);
278 uma_zfree(V_pf_frent_z, frent);
281 for (frent = LIST_FIRST(&frag->fr_queue); frent;
282 frent = LIST_FIRST(&frag->fr_queue)) {
283 LIST_REMOVE(frent, fr_next);
285 KASSERT((LIST_EMPTY(&frag->fr_queue) ||
286 LIST_FIRST(&frag->fr_queue)->fr_off >
288 ("! (LIST_EMPTY() || LIST_FIRST()->fr_off >"
289 " frent->fr_end): %s", __func__));
291 uma_zfree(V_pf_frent_z, frent);
295 pf_remove_fragment(frag);
300 pf_ip2key(struct pf_fragment *key, struct ip *ip)
302 key->fr_p = ip->ip_p;
303 key->fr_id = ip->ip_id;
304 key->fr_src.s_addr = ip->ip_src.s_addr;
305 key->fr_dst.s_addr = ip->ip_dst.s_addr;
308 static struct pf_fragment *
309 pf_find_fragment(struct ip *ip, struct pf_frag_tree *tree)
311 struct pf_fragment key;
312 struct pf_fragment *frag;
318 frag = RB_FIND(pf_frag_tree, tree, &key);
320 /* XXX Are we sure we want to update the timeout? */
321 frag->fr_timeout = time_uptime;
322 if (BUFFER_FRAGMENTS(frag)) {
323 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
324 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
326 TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next);
327 TAILQ_INSERT_HEAD(&V_pf_cachequeue, frag, frag_next);
335 /* Removes a fragment from the fragment queue and frees the fragment */
338 pf_remove_fragment(struct pf_fragment *frag)
343 if (BUFFER_FRAGMENTS(frag)) {
344 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
345 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
346 uma_zfree(V_pf_frag_z, frag);
348 RB_REMOVE(pf_frag_tree, &V_pf_cache_tree, frag);
349 TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next);
350 uma_zfree(V_pf_frag_z, frag);
355 #define FR_IP_OFF(fr) ((ntohs((fr)->fr_ip->ip_off) & IP_OFFMASK) << 3)
357 pf_reassemble(struct mbuf **m0, struct pf_fragment **frag,
358 struct pf_frent *frent, int mff)
360 struct mbuf *m = *m0, *m2;
361 struct pf_frent *frea, *next;
362 struct pf_frent *frep = NULL;
363 struct ip *ip = frent->fr_ip;
364 int hlen = ip->ip_hl << 2;
365 u_int16_t off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
366 u_int16_t ip_len = ntohs(ip->ip_len) - ip->ip_hl * 4;
367 u_int16_t max = ip_len + off;
370 KASSERT((*frag == NULL || BUFFER_FRAGMENTS(*frag)),
371 ("! (*frag == NULL || BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__));
373 /* Strip off ip header */
377 /* Create a new reassembly queue for this packet */
379 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
381 pf_flush_fragments();
382 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
387 (*frag)->fr_flags = 0;
389 (*frag)->fr_src = frent->fr_ip->ip_src;
390 (*frag)->fr_dst = frent->fr_ip->ip_dst;
391 (*frag)->fr_p = frent->fr_ip->ip_p;
392 (*frag)->fr_id = frent->fr_ip->ip_id;
393 (*frag)->fr_timeout = time_uptime;
394 LIST_INIT(&(*frag)->fr_queue);
396 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, *frag);
397 TAILQ_INSERT_HEAD(&V_pf_fragqueue, *frag, frag_next);
399 /* We do not have a previous fragment */
405 * Find a fragment after the current one:
406 * - off contains the real shifted offset.
408 LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) {
409 if (FR_IP_OFF(frea) > off)
414 KASSERT((frep != NULL || frea != NULL),
415 ("!(frep != NULL || frea != NULL): %s", __FUNCTION__));;
418 FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl *
423 precut = FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) -
424 frep->fr_ip->ip_hl * 4 - off;
425 if (precut >= ip_len)
427 m_adj(frent->fr_m, precut);
428 DPFPRINTF(("overlap -%d\n", precut));
429 /* Enforce 8 byte boundaries */
430 ip->ip_off = htons(ntohs(ip->ip_off) + (precut >> 3));
431 off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
433 ip->ip_len = htons(ip_len);
436 for (; frea != NULL && ip_len + off > FR_IP_OFF(frea);
441 aftercut = ip_len + off - FR_IP_OFF(frea);
442 DPFPRINTF(("adjust overlap %d\n", aftercut));
443 if (aftercut < ntohs(frea->fr_ip->ip_len) - frea->fr_ip->ip_hl
446 frea->fr_ip->ip_len =
447 htons(ntohs(frea->fr_ip->ip_len) - aftercut);
448 frea->fr_ip->ip_off = htons(ntohs(frea->fr_ip->ip_off) +
450 m_adj(frea->fr_m, aftercut);
454 /* This fragment is completely overlapped, lose it */
455 next = LIST_NEXT(frea, fr_next);
457 LIST_REMOVE(frea, fr_next);
458 uma_zfree(V_pf_frent_z, frea);
462 /* Update maximum data size */
463 if ((*frag)->fr_max < max)
464 (*frag)->fr_max = max;
465 /* This is the last segment */
467 (*frag)->fr_flags |= PFFRAG_SEENLAST;
470 LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next);
472 LIST_INSERT_AFTER(frep, frent, fr_next);
474 /* Check if we are completely reassembled */
475 if (!((*frag)->fr_flags & PFFRAG_SEENLAST))
478 /* Check if we have all the data */
480 for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) {
481 next = LIST_NEXT(frep, fr_next);
483 off += ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl * 4;
484 if (off < (*frag)->fr_max &&
485 (next == NULL || FR_IP_OFF(next) != off))
487 DPFPRINTF(("missing fragment at %d, next %d, max %d\n",
488 off, next == NULL ? -1 : FR_IP_OFF(next),
493 DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max));
494 if (off < (*frag)->fr_max)
497 /* We have all the data */
498 frent = LIST_FIRST(&(*frag)->fr_queue);
499 KASSERT((frent != NULL), ("frent == NULL: %s", __FUNCTION__));
500 if ((frent->fr_ip->ip_hl << 2) + off > IP_MAXPACKET) {
501 DPFPRINTF(("drop: too big: %d\n", off));
502 pf_free_fragment(*frag);
506 next = LIST_NEXT(frent, fr_next);
508 /* Magic from ip_input */
514 uma_zfree(V_pf_frent_z, frent);
515 for (frent = next; frent != NULL; frent = next) {
516 next = LIST_NEXT(frent, fr_next);
519 uma_zfree(V_pf_frent_z, frent);
520 m->m_pkthdr.csum_flags &= m2->m_pkthdr.csum_flags;
521 m->m_pkthdr.csum_data += m2->m_pkthdr.csum_data;
525 while (m->m_pkthdr.csum_data & 0xffff0000)
526 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
527 (m->m_pkthdr.csum_data >> 16);
528 ip->ip_src = (*frag)->fr_src;
529 ip->ip_dst = (*frag)->fr_dst;
531 /* Remove from fragment queue */
532 pf_remove_fragment(*frag);
535 hlen = ip->ip_hl << 2;
536 ip->ip_len = htons(off + hlen);
540 /* some debugging cruft by sklower, below, will go away soon */
541 /* XXX this should be done elsewhere */
542 if (m->m_flags & M_PKTHDR) {
544 for (m2 = m; m2; m2 = m2->m_next)
546 m->m_pkthdr.len = plen;
549 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
553 /* Oops - fail safe - drop packet */
554 uma_zfree(V_pf_frent_z, frent);
560 pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
561 int drop, int *nomem)
563 struct mbuf *m = *m0;
564 struct pf_frent *frp, *fra, *cur = NULL;
565 int ip_len = ntohs(h->ip_len) - (h->ip_hl << 2);
566 u_int16_t off = ntohs(h->ip_off) << 3;
567 u_int16_t max = ip_len + off;
571 KASSERT((*frag == NULL || !BUFFER_FRAGMENTS(*frag)),
572 ("!(*frag == NULL || !BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__));
574 /* Create a new range queue for this packet */
576 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
578 pf_flush_fragments();
579 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
584 /* Get an entry for the queue */
585 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
587 uma_zfree(V_pf_frag_z, *frag);
592 (*frag)->fr_flags = PFFRAG_NOBUFFER;
594 (*frag)->fr_src = h->ip_src;
595 (*frag)->fr_dst = h->ip_dst;
596 (*frag)->fr_p = h->ip_p;
597 (*frag)->fr_id = h->ip_id;
598 (*frag)->fr_timeout = time_uptime;
602 LIST_INIT(&(*frag)->fr_queue);
603 LIST_INSERT_HEAD(&(*frag)->fr_queue, cur, fr_next);
605 RB_INSERT(pf_frag_tree, &V_pf_cache_tree, *frag);
606 TAILQ_INSERT_HEAD(&V_pf_cachequeue, *frag, frag_next);
608 DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
614 * Find a fragment after the current one:
615 * - off contains the real shifted offset.
618 LIST_FOREACH(fra, &(*frag)->fr_queue, fr_next) {
619 if (fra->fr_off > off)
624 KASSERT((frp != NULL || fra != NULL),
625 ("!(frp != NULL || fra != NULL): %s", __FUNCTION__));
630 precut = frp->fr_end - off;
631 if (precut >= ip_len) {
632 /* Fragment is entirely a duplicate */
633 DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
634 h->ip_id, frp->fr_off, frp->fr_end, off, max));
638 /* They are adjacent. Fixup cache entry */
639 DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
640 h->ip_id, frp->fr_off, frp->fr_end, off, max));
642 } else if (precut > 0) {
643 /* The first part of this payload overlaps with a
644 * fragment that has already been passed.
645 * Need to trim off the first part of the payload.
646 * But to do so easily, we need to create another
647 * mbuf to throw the original header into.
650 DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
651 h->ip_id, precut, frp->fr_off, frp->fr_end, off,
656 /* Update the previous frag to encompass this one */
660 /* XXX Optimization opportunity
661 * This is a very heavy way to trim the payload.
662 * we could do it much faster by diddling mbuf
663 * internals but that would be even less legible
664 * than this mbuf magic. For my next trick,
665 * I'll pull a rabbit out of my laptop.
667 *m0 = m_dup(m, M_NOWAIT);
670 /* From KAME Project : We have missed this! */
671 m_adj(*m0, (h->ip_hl << 2) -
672 (*m0)->m_pkthdr.len);
674 KASSERT(((*m0)->m_next == NULL),
675 ("(*m0)->m_next != NULL: %s",
677 m_adj(m, precut + (h->ip_hl << 2));
680 if (m->m_flags & M_PKTHDR) {
683 for (t = m; t; t = t->m_next)
685 m->m_pkthdr.len = plen;
689 h = mtod(m, struct ip *);
691 KASSERT(((int)m->m_len ==
692 ntohs(h->ip_len) - precut),
693 ("m->m_len != ntohs(h->ip_len) - precut: %s",
695 h->ip_off = htons(ntohs(h->ip_off) +
697 h->ip_len = htons(ntohs(h->ip_len) - precut);
702 /* There is a gap between fragments */
704 DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
705 h->ip_id, -precut, frp->fr_off, frp->fr_end, off,
708 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
714 LIST_INSERT_AFTER(frp, cur, fr_next);
722 aftercut = max - fra->fr_off;
724 /* Adjacent fragments */
725 DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
726 h->ip_id, off, max, fra->fr_off, fra->fr_end));
729 } else if (aftercut > 0) {
730 /* Need to chop off the tail of this fragment */
731 DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
732 h->ip_id, aftercut, off, max, fra->fr_off,
741 if (m->m_flags & M_PKTHDR) {
744 for (t = m; t; t = t->m_next)
746 m->m_pkthdr.len = plen;
748 h = mtod(m, struct ip *);
749 KASSERT(((int)m->m_len == ntohs(h->ip_len) - aftercut),
750 ("m->m_len != ntohs(h->ip_len) - aftercut: %s",
752 h->ip_len = htons(ntohs(h->ip_len) - aftercut);
756 } else if (frp == NULL) {
757 /* There is a gap between fragments */
758 DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
759 h->ip_id, -aftercut, off, max, fra->fr_off,
762 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
768 LIST_INSERT_BEFORE(fra, cur, fr_next);
772 /* Need to glue together two separate fragment descriptors */
774 if (cur && fra->fr_off <= cur->fr_end) {
775 /* Need to merge in a previous 'cur' */
776 DPFPRINTF(("fragcache[%d]: adjacent(merge "
777 "%d-%d) %d-%d (%d-%d)\n",
778 h->ip_id, cur->fr_off, cur->fr_end, off,
779 max, fra->fr_off, fra->fr_end));
780 fra->fr_off = cur->fr_off;
781 LIST_REMOVE(cur, fr_next);
782 uma_zfree(V_pf_frent_z, cur);
785 } else if (frp && fra->fr_off <= frp->fr_end) {
786 /* Need to merge in a modified 'frp' */
787 KASSERT((cur == NULL), ("cur != NULL: %s",
789 DPFPRINTF(("fragcache[%d]: adjacent(merge "
790 "%d-%d) %d-%d (%d-%d)\n",
791 h->ip_id, frp->fr_off, frp->fr_end, off,
792 max, fra->fr_off, fra->fr_end));
793 fra->fr_off = frp->fr_off;
794 LIST_REMOVE(frp, fr_next);
795 uma_zfree(V_pf_frent_z, frp);
804 * We must keep tracking the overall fragment even when
805 * we're going to drop it anyway so that we know when to
806 * free the overall descriptor. Thus we drop the frag late.
813 /* Update maximum data size */
814 if ((*frag)->fr_max < max)
815 (*frag)->fr_max = max;
817 /* This is the last segment */
819 (*frag)->fr_flags |= PFFRAG_SEENLAST;
821 /* Check if we are completely reassembled */
822 if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
823 LIST_FIRST(&(*frag)->fr_queue)->fr_off == 0 &&
824 LIST_FIRST(&(*frag)->fr_queue)->fr_end == (*frag)->fr_max) {
825 /* Remove from fragment queue */
826 DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
828 pf_free_fragment(*frag);
837 /* Still need to pay attention to !IP_MF */
838 if (!mff && *frag != NULL)
839 (*frag)->fr_flags |= PFFRAG_SEENLAST;
846 /* Still need to pay attention to !IP_MF */
847 if (!mff && *frag != NULL)
848 (*frag)->fr_flags |= PFFRAG_SEENLAST;
851 /* This fragment has been deemed bad. Don't reass */
852 if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
853 DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
855 (*frag)->fr_flags |= PFFRAG_DROP;
863 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
866 struct mbuf *m = *m0;
868 struct pf_frent *frent;
869 struct pf_fragment *frag = NULL;
870 struct ip *h = mtod(m, struct ip *);
871 int mff = (ntohs(h->ip_off) & IP_MF);
872 int hlen = h->ip_hl << 2;
873 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
881 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
884 if (pfi_kif_match(r->kif, kif) == r->ifnot)
885 r = r->skip[PF_SKIP_IFP].ptr;
886 else if (r->direction && r->direction != dir)
887 r = r->skip[PF_SKIP_DIR].ptr;
888 else if (r->af && r->af != AF_INET)
889 r = r->skip[PF_SKIP_AF].ptr;
890 else if (r->proto && r->proto != h->ip_p)
891 r = r->skip[PF_SKIP_PROTO].ptr;
892 else if (PF_MISMATCHAW(&r->src.addr,
893 (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
894 r->src.neg, kif, M_GETFIB(m)))
895 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
896 else if (PF_MISMATCHAW(&r->dst.addr,
897 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
898 r->dst.neg, NULL, M_GETFIB(m)))
899 r = r->skip[PF_SKIP_DST_ADDR].ptr;
900 else if (r->match_tag && !pf_match_tag(m, r, &tag,
901 pd->pf_mtag ? pd->pf_mtag->tag : 0))
902 r = TAILQ_NEXT(r, entries);
907 if (r == NULL || r->action == PF_NOSCRUB)
910 r->packets[dir == PF_OUT]++;
911 r->bytes[dir == PF_OUT] += pd->tot_len;
914 /* Check for illegal packets */
915 if (hlen < (int)sizeof(struct ip))
918 if (hlen > ntohs(h->ip_len))
921 /* Clear IP_DF if the rule uses the no-df option */
922 if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
923 u_int16_t ip_off = h->ip_off;
925 h->ip_off &= htons(~IP_DF);
926 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
929 /* We will need other tests here */
930 if (!fragoff && !mff)
933 /* We're dealing with a fragment now. Don't allow fragments
934 * with IP_DF to enter the cache. If the flag was cleared by
935 * no-df above, fine. Otherwise drop it.
937 if (h->ip_off & htons(IP_DF)) {
938 DPFPRINTF(("IP_DF\n"));
942 ip_len = ntohs(h->ip_len) - hlen;
943 ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
945 /* All fragments are 8 byte aligned */
946 if (mff && (ip_len & 0x7)) {
947 DPFPRINTF(("mff and %d\n", ip_len));
951 /* Respect maximum length */
952 if (fragoff + ip_len > IP_MAXPACKET) {
953 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
956 max = fragoff + ip_len;
958 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
960 /* Fully buffer all of the fragments */
962 frag = pf_find_fragment(h, &V_pf_frag_tree);
964 /* Check if we saw the last fragment already */
965 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
969 /* Get an entry for the fragment queue */
970 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
973 REASON_SET(reason, PFRES_MEMORY);
979 /* Might return a completely reassembled mbuf, or NULL */
980 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
981 *m0 = m = pf_reassemble(m0, &frag, frent, mff);
987 /* use mtag from concatenated mbuf chain */
988 pd->pf_mtag = pf_find_mtag(m);
990 if (pd->pf_mtag == NULL) {
991 printf("%s: pf_find_mtag returned NULL(1)\n", __func__);
992 if ((pd->pf_mtag = pf_get_mtag(m)) == NULL) {
999 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
1002 h = mtod(m, struct ip *);
1004 /* non-buffering fragment cache (drops or masks overlaps) */
1007 if (dir == PF_OUT && pd->pf_mtag->flags & PF_TAG_FRAGCACHE) {
1009 * Already passed the fragment cache in the
1010 * input direction. If we continued, it would
1011 * appear to be a dup and would be dropped.
1017 frag = pf_find_fragment(h, &V_pf_cache_tree);
1019 /* Check if we saw the last fragment already */
1020 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
1021 max > frag->fr_max) {
1022 if (r->rule_flag & PFRULE_FRAGDROP)
1023 frag->fr_flags |= PFFRAG_DROP;
1027 *m0 = m = pf_fragcache(m0, h, &frag, mff,
1028 (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
1036 /* use mtag from copied and trimmed mbuf chain */
1037 pd->pf_mtag = pf_find_mtag(m);
1039 if (pd->pf_mtag == NULL) {
1040 printf("%s: pf_find_mtag returned NULL(2)\n", __func__);
1041 if ((pd->pf_mtag = pf_get_mtag(m)) == NULL) {
1049 pd->pf_mtag->flags |= PF_TAG_FRAGCACHE;
1051 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
1057 /* At this point, only IP_DF is allowed in ip_off */
1058 if (h->ip_off & ~htons(IP_DF)) {
1059 u_int16_t ip_off = h->ip_off;
1061 h->ip_off &= htons(IP_DF);
1062 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1065 /* not missing a return here */
1068 pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos);
1070 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0)
1071 pd->flags |= PFDESC_IP_REAS;
1075 REASON_SET(reason, PFRES_MEMORY);
1076 if (r != NULL && r->log)
1077 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1082 REASON_SET(reason, PFRES_NORM);
1083 if (r != NULL && r->log)
1084 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1089 DPFPRINTF(("dropping bad fragment\n"));
1091 /* Free associated fragments */
1093 pf_free_fragment(frag);
1097 REASON_SET(reason, PFRES_FRAG);
1098 if (r != NULL && r->log)
1099 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1108 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
1109 u_short *reason, struct pf_pdesc *pd)
1111 struct mbuf *m = *m0;
1113 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1117 struct ip6_opt_jumbo jumbo;
1118 struct ip6_frag frag;
1119 u_int32_t jumbolen = 0, plen;
1120 u_int16_t fragoff = 0;
1128 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1131 if (pfi_kif_match(r->kif, kif) == r->ifnot)
1132 r = r->skip[PF_SKIP_IFP].ptr;
1133 else if (r->direction && r->direction != dir)
1134 r = r->skip[PF_SKIP_DIR].ptr;
1135 else if (r->af && r->af != AF_INET6)
1136 r = r->skip[PF_SKIP_AF].ptr;
1137 #if 0 /* header chain! */
1138 else if (r->proto && r->proto != h->ip6_nxt)
1139 r = r->skip[PF_SKIP_PROTO].ptr;
1141 else if (PF_MISMATCHAW(&r->src.addr,
1142 (struct pf_addr *)&h->ip6_src, AF_INET6,
1143 r->src.neg, kif, M_GETFIB(m)))
1144 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1145 else if (PF_MISMATCHAW(&r->dst.addr,
1146 (struct pf_addr *)&h->ip6_dst, AF_INET6,
1147 r->dst.neg, NULL, M_GETFIB(m)))
1148 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1153 if (r == NULL || r->action == PF_NOSCRUB)
1156 r->packets[dir == PF_OUT]++;
1157 r->bytes[dir == PF_OUT] += pd->tot_len;
1160 /* Check for illegal packets */
1161 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1164 off = sizeof(struct ip6_hdr);
1169 case IPPROTO_FRAGMENT:
1173 case IPPROTO_ROUTING:
1174 case IPPROTO_DSTOPTS:
1175 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1178 if (proto == IPPROTO_AH)
1179 off += (ext.ip6e_len + 2) * 4;
1181 off += (ext.ip6e_len + 1) * 8;
1182 proto = ext.ip6e_nxt;
1184 case IPPROTO_HOPOPTS:
1185 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1188 optend = off + (ext.ip6e_len + 1) * 8;
1189 ooff = off + sizeof(ext);
1191 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1192 sizeof(opt.ip6o_type), NULL, NULL,
1195 if (opt.ip6o_type == IP6OPT_PAD1) {
1199 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1200 NULL, NULL, AF_INET6))
1202 if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1204 switch (opt.ip6o_type) {
1206 if (h->ip6_plen != 0)
1208 if (!pf_pull_hdr(m, ooff, &jumbo,
1209 sizeof(jumbo), NULL, NULL,
1212 memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
1214 jumbolen = ntohl(jumbolen);
1215 if (jumbolen <= IPV6_MAXPACKET)
1217 if (sizeof(struct ip6_hdr) + jumbolen !=
1224 ooff += sizeof(opt) + opt.ip6o_len;
1225 } while (ooff < optend);
1228 proto = ext.ip6e_nxt;
1234 } while (!terminal);
1236 /* jumbo payload option must be present, or plen > 0 */
1237 if (ntohs(h->ip6_plen) == 0)
1240 plen = ntohs(h->ip6_plen);
1243 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1246 pf_scrub_ip6(&m, r->min_ttl);
1251 if (ntohs(h->ip6_plen) == 0 || jumbolen)
1253 plen = ntohs(h->ip6_plen);
1255 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1257 fragoff = ntohs(frag.ip6f_offlg & IP6F_OFF_MASK);
1258 if (fragoff + (plen - off - sizeof(frag)) > IPV6_MAXPACKET)
1261 /* do something about it */
1262 /* remember to set pd->flags |= PFDESC_IP_REAS */
1266 REASON_SET(reason, PFRES_SHORT);
1267 if (r != NULL && r->log)
1268 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1273 REASON_SET(reason, PFRES_NORM);
1274 if (r != NULL && r->log)
1275 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1280 REASON_SET(reason, PFRES_FRAG);
1281 if (r != NULL && r->log)
1282 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1289 pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
1290 int off, void *h, struct pf_pdesc *pd)
1292 struct pf_rule *r, *rm = NULL;
1293 struct tcphdr *th = pd->hdr.tcp;
1297 sa_family_t af = pd->af;
1301 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1304 if (pfi_kif_match(r->kif, kif) == r->ifnot)
1305 r = r->skip[PF_SKIP_IFP].ptr;
1306 else if (r->direction && r->direction != dir)
1307 r = r->skip[PF_SKIP_DIR].ptr;
1308 else if (r->af && r->af != af)
1309 r = r->skip[PF_SKIP_AF].ptr;
1310 else if (r->proto && r->proto != pd->proto)
1311 r = r->skip[PF_SKIP_PROTO].ptr;
1312 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1313 r->src.neg, kif, M_GETFIB(m)))
1314 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1315 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1316 r->src.port[0], r->src.port[1], th->th_sport))
1317 r = r->skip[PF_SKIP_SRC_PORT].ptr;
1318 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1319 r->dst.neg, NULL, M_GETFIB(m)))
1320 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1321 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1322 r->dst.port[0], r->dst.port[1], th->th_dport))
1323 r = r->skip[PF_SKIP_DST_PORT].ptr;
1324 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1325 pf_osfp_fingerprint(pd, m, off, th),
1327 r = TAILQ_NEXT(r, entries);
1334 if (rm == NULL || rm->action == PF_NOSCRUB)
1337 r->packets[dir == PF_OUT]++;
1338 r->bytes[dir == PF_OUT] += pd->tot_len;
1341 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1342 pd->flags |= PFDESC_TCP_NORM;
1344 flags = th->th_flags;
1345 if (flags & TH_SYN) {
1346 /* Illegal packet */
1353 /* Illegal packet */
1354 if (!(flags & (TH_ACK|TH_RST)))
1358 if (!(flags & TH_ACK)) {
1359 /* These flags are only valid if ACK is set */
1360 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1364 /* Check for illegal header length */
1365 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1368 /* If flags changed, or reserved data set, then adjust */
1369 if (flags != th->th_flags || th->th_x2 != 0) {
1372 ov = *(u_int16_t *)(&th->th_ack + 1);
1373 th->th_flags = flags;
1375 nv = *(u_int16_t *)(&th->th_ack + 1);
1377 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, ov, nv, 0);
1381 /* Remove urgent pointer, if TH_URG is not set */
1382 if (!(flags & TH_URG) && th->th_urp) {
1383 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp,
1389 /* Process options */
1390 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af))
1393 /* copy back packet headers if we sanitized */
1395 m_copyback(m, off, sizeof(*th), (caddr_t)th);
1400 REASON_SET(&reason, PFRES_NORM);
1401 if (rm != NULL && r->log)
1402 PFLOG_PACKET(kif, m, AF_INET, dir, reason, r, NULL, NULL, pd,
1408 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1409 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1411 u_int32_t tsval, tsecr;
1415 KASSERT((src->scrub == NULL),
1416 ("pf_normalize_tcp_init: src->scrub != NULL"));
1418 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1419 if (src->scrub == NULL)
1425 struct ip *h = mtod(m, struct ip *);
1426 src->scrub->pfss_ttl = h->ip_ttl;
1432 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1433 src->scrub->pfss_ttl = h->ip6_hlim;
1441 * All normalizations below are only begun if we see the start of
1442 * the connections. They must all set an enabled bit in pfss_flags
1444 if ((th->th_flags & TH_SYN) == 0)
1448 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1449 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1450 /* Diddle with TCP options */
1452 opt = hdr + sizeof(struct tcphdr);
1453 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1454 while (hlen >= TCPOLEN_TIMESTAMP) {
1456 case TCPOPT_EOL: /* FALLTHROUGH */
1461 case TCPOPT_TIMESTAMP:
1462 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1463 src->scrub->pfss_flags |=
1465 src->scrub->pfss_ts_mod =
1466 htonl(arc4random());
1468 /* note PFSS_PAWS not set yet */
1469 memcpy(&tsval, &opt[2],
1471 memcpy(&tsecr, &opt[6],
1473 src->scrub->pfss_tsval0 = ntohl(tsval);
1474 src->scrub->pfss_tsval = ntohl(tsval);
1475 src->scrub->pfss_tsecr = ntohl(tsecr);
1476 getmicrouptime(&src->scrub->pfss_last);
1480 hlen -= MAX(opt[1], 2);
1481 opt += MAX(opt[1], 2);
1491 pf_normalize_tcp_cleanup(struct pf_state *state)
1493 if (state->src.scrub)
1494 uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1495 if (state->dst.scrub)
1496 uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1498 /* Someday... flush the TCP segment reassembly descriptors. */
1502 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1503 u_short *reason, struct tcphdr *th, struct pf_state *state,
1504 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1506 struct timeval uptime;
1507 u_int32_t tsval, tsecr;
1508 u_int tsval_from_last;
1514 KASSERT((src->scrub || dst->scrub),
1515 ("%s: src->scrub && dst->scrub!", __func__));
1518 * Enforce the minimum TTL seen for this connection. Negate a common
1519 * technique to evade an intrusion detection system and confuse
1520 * firewall state code.
1526 struct ip *h = mtod(m, struct ip *);
1527 if (h->ip_ttl > src->scrub->pfss_ttl)
1528 src->scrub->pfss_ttl = h->ip_ttl;
1529 h->ip_ttl = src->scrub->pfss_ttl;
1537 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1538 if (h->ip6_hlim > src->scrub->pfss_ttl)
1539 src->scrub->pfss_ttl = h->ip6_hlim;
1540 h->ip6_hlim = src->scrub->pfss_ttl;
1547 if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1548 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1549 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1550 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1551 /* Diddle with TCP options */
1553 opt = hdr + sizeof(struct tcphdr);
1554 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1555 while (hlen >= TCPOLEN_TIMESTAMP) {
1557 case TCPOPT_EOL: /* FALLTHROUGH */
1562 case TCPOPT_TIMESTAMP:
1563 /* Modulate the timestamps. Can be used for
1564 * NAT detection, OS uptime determination or
1569 /* Huh? Multiple timestamps!? */
1570 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1571 DPFPRINTF(("multiple TS??"));
1572 pf_print_state(state);
1575 REASON_SET(reason, PFRES_TS);
1578 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1579 memcpy(&tsval, &opt[2],
1581 if (tsval && src->scrub &&
1582 (src->scrub->pfss_flags &
1584 tsval = ntohl(tsval);
1585 pf_change_proto_a(m, &opt[2],
1588 src->scrub->pfss_ts_mod),
1593 /* Modulate TS reply iff valid (!0) */
1594 memcpy(&tsecr, &opt[6],
1596 if (tsecr && dst->scrub &&
1597 (dst->scrub->pfss_flags &
1599 tsecr = ntohl(tsecr)
1600 - dst->scrub->pfss_ts_mod;
1601 pf_change_proto_a(m, &opt[6],
1602 &th->th_sum, htonl(tsecr),
1610 hlen -= MAX(opt[1], 2);
1611 opt += MAX(opt[1], 2);
1616 /* Copyback the options, caller copys back header */
1618 m_copyback(m, off + sizeof(struct tcphdr),
1619 (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1620 sizeof(struct tcphdr));
1626 * Must invalidate PAWS checks on connections idle for too long.
1627 * The fastest allowed timestamp clock is 1ms. That turns out to
1628 * be about 24 days before it wraps. XXX Right now our lowerbound
1629 * TS echo check only works for the first 12 days of a connection
1630 * when the TS has exhausted half its 32bit space
1632 #define TS_MAX_IDLE (24*24*60*60)
1633 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
1635 getmicrouptime(&uptime);
1636 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1637 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1638 time_uptime - state->creation > TS_MAX_CONN)) {
1639 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1640 DPFPRINTF(("src idled out of PAWS\n"));
1641 pf_print_state(state);
1644 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1647 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1648 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1649 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1650 DPFPRINTF(("dst idled out of PAWS\n"));
1651 pf_print_state(state);
1654 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1658 if (got_ts && src->scrub && dst->scrub &&
1659 (src->scrub->pfss_flags & PFSS_PAWS) &&
1660 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1661 /* Validate that the timestamps are "in-window".
1662 * RFC1323 describes TCP Timestamp options that allow
1663 * measurement of RTT (round trip time) and PAWS
1664 * (protection against wrapped sequence numbers). PAWS
1665 * gives us a set of rules for rejecting packets on
1666 * long fat pipes (packets that were somehow delayed
1667 * in transit longer than the time it took to send the
1668 * full TCP sequence space of 4Gb). We can use these
1669 * rules and infer a few others that will let us treat
1670 * the 32bit timestamp and the 32bit echoed timestamp
1671 * as sequence numbers to prevent a blind attacker from
1672 * inserting packets into a connection.
1675 * - The timestamp on this packet must be greater than
1676 * or equal to the last value echoed by the other
1677 * endpoint. The RFC says those will be discarded
1678 * since it is a dup that has already been acked.
1679 * This gives us a lowerbound on the timestamp.
1680 * timestamp >= other last echoed timestamp
1681 * - The timestamp will be less than or equal to
1682 * the last timestamp plus the time between the
1683 * last packet and now. The RFC defines the max
1684 * clock rate as 1ms. We will allow clocks to be
1685 * up to 10% fast and will allow a total difference
1686 * or 30 seconds due to a route change. And this
1687 * gives us an upperbound on the timestamp.
1688 * timestamp <= last timestamp + max ticks
1689 * We have to be careful here. Windows will send an
1690 * initial timestamp of zero and then initialize it
1691 * to a random value after the 3whs; presumably to
1692 * avoid a DoS by having to call an expensive RNG
1693 * during a SYN flood. Proof MS has at least one
1694 * good security geek.
1696 * - The TCP timestamp option must also echo the other
1697 * endpoints timestamp. The timestamp echoed is the
1698 * one carried on the earliest unacknowledged segment
1699 * on the left edge of the sequence window. The RFC
1700 * states that the host will reject any echoed
1701 * timestamps that were larger than any ever sent.
1702 * This gives us an upperbound on the TS echo.
1703 * tescr <= largest_tsval
1704 * - The lowerbound on the TS echo is a little more
1705 * tricky to determine. The other endpoint's echoed
1706 * values will not decrease. But there may be
1707 * network conditions that re-order packets and
1708 * cause our view of them to decrease. For now the
1709 * only lowerbound we can safely determine is that
1710 * the TS echo will never be less than the original
1711 * TS. XXX There is probably a better lowerbound.
1712 * Remove TS_MAX_CONN with better lowerbound check.
1713 * tescr >= other original TS
1715 * It is also important to note that the fastest
1716 * timestamp clock of 1ms will wrap its 32bit space in
1717 * 24 days. So we just disable TS checking after 24
1718 * days of idle time. We actually must use a 12d
1719 * connection limit until we can come up with a better
1720 * lowerbound to the TS echo check.
1722 struct timeval delta_ts;
1727 * PFTM_TS_DIFF is how many seconds of leeway to allow
1728 * a host's timestamp. This can happen if the previous
1729 * packet got delayed in transit for much longer than
1732 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
1733 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
1735 /* Calculate max ticks since the last timestamp */
1736 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
1737 #define TS_MICROSECS 1000000 /* microseconds per second */
1739 timevalsub(&delta_ts, &src->scrub->pfss_last);
1740 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1741 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1743 if ((src->state >= TCPS_ESTABLISHED &&
1744 dst->state >= TCPS_ESTABLISHED) &&
1745 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1746 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1747 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1748 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1749 /* Bad RFC1323 implementation or an insertion attack.
1751 * - Solaris 2.6 and 2.7 are known to send another ACK
1752 * after the FIN,FIN|ACK,ACK closing that carries
1756 DPFPRINTF(("Timestamp failed %c%c%c%c\n",
1757 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
1758 SEQ_GT(tsval, src->scrub->pfss_tsval +
1759 tsval_from_last) ? '1' : ' ',
1760 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1761 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
1762 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u "
1763 "idle: %jus %lums\n",
1764 tsval, tsecr, tsval_from_last,
1765 (uintmax_t)delta_ts.tv_sec,
1766 delta_ts.tv_usec / 1000));
1767 DPFPRINTF((" src->tsval: %u tsecr: %u\n",
1768 src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
1769 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u"
1770 "\n", dst->scrub->pfss_tsval,
1771 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
1772 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1773 pf_print_state(state);
1774 pf_print_flags(th->th_flags);
1777 REASON_SET(reason, PFRES_TS);
1781 /* XXX I'd really like to require tsecr but it's optional */
1783 } else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
1784 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1785 || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
1786 src->scrub && dst->scrub &&
1787 (src->scrub->pfss_flags & PFSS_PAWS) &&
1788 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1789 /* Didn't send a timestamp. Timestamps aren't really useful
1791 * - connection opening or closing (often not even sent).
1792 * but we must not let an attacker to put a FIN on a
1793 * data packet to sneak it through our ESTABLISHED check.
1794 * - on a TCP reset. RFC suggests not even looking at TS.
1795 * - on an empty ACK. The TS will not be echoed so it will
1796 * probably not help keep the RTT calculation in sync and
1797 * there isn't as much danger when the sequence numbers
1798 * got wrapped. So some stacks don't include TS on empty
1801 * To minimize the disruption to mostly RFC1323 conformant
1802 * stacks, we will only require timestamps on data packets.
1804 * And what do ya know, we cannot require timestamps on data
1805 * packets. There appear to be devices that do legitimate
1806 * TCP connection hijacking. There are HTTP devices that allow
1807 * a 3whs (with timestamps) and then buffer the HTTP request.
1808 * If the intermediate device has the HTTP response cache, it
1809 * will spoof the response but not bother timestamping its
1810 * packets. So we can look for the presence of a timestamp in
1811 * the first data packet and if there, require it in all future
1815 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1817 * Hey! Someone tried to sneak a packet in. Or the
1818 * stack changed its RFC1323 behavior?!?!
1820 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1821 DPFPRINTF(("Did not receive expected RFC1323 "
1823 pf_print_state(state);
1824 pf_print_flags(th->th_flags);
1827 REASON_SET(reason, PFRES_TS);
1834 * We will note if a host sends his data packets with or without
1835 * timestamps. And require all data packets to contain a timestamp
1836 * if the first does. PAWS implicitly requires that all data packets be
1837 * timestamped. But I think there are middle-man devices that hijack
1838 * TCP streams immediately after the 3whs and don't timestamp their
1839 * packets (seen in a WWW accelerator or cache).
1841 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1842 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1844 src->scrub->pfss_flags |= PFSS_DATA_TS;
1846 src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1847 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
1848 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1849 /* Don't warn if other host rejected RFC1323 */
1850 DPFPRINTF(("Broken RFC1323 stack did not "
1851 "timestamp data packet. Disabled PAWS "
1853 pf_print_state(state);
1854 pf_print_flags(th->th_flags);
1862 * Update PAWS values
1864 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1865 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1866 getmicrouptime(&src->scrub->pfss_last);
1867 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1868 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1869 src->scrub->pfss_tsval = tsval;
1872 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1873 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1874 src->scrub->pfss_tsecr = tsecr;
1876 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1877 (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
1878 src->scrub->pfss_tsval0 == 0)) {
1879 /* tsval0 MUST be the lowest timestamp */
1880 src->scrub->pfss_tsval0 = tsval;
1883 /* Only fully initialized after a TS gets echoed */
1884 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
1885 src->scrub->pfss_flags |= PFSS_PAWS;
1889 /* I have a dream.... TCP segment reassembly.... */
1894 pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
1895 int off, sa_family_t af)
1899 int opt, cnt, optlen = 0;
1901 u_char opts[TCP_MAXOLEN];
1902 u_char *optp = opts;
1904 thoff = th->th_off << 2;
1905 cnt = thoff - sizeof(struct tcphdr);
1907 if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt,
1911 for (; cnt > 0; cnt -= optlen, optp += optlen) {
1913 if (opt == TCPOPT_EOL)
1915 if (opt == TCPOPT_NOP)
1921 if (optlen < 2 || optlen > cnt)
1926 mss = (u_int16_t *)(optp + 2);
1927 if ((ntohs(*mss)) > r->max_mss) {
1928 th->th_sum = pf_proto_cksum_fixup(m,
1929 th->th_sum, *mss, htons(r->max_mss), 0);
1930 *mss = htons(r->max_mss);
1940 m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts);
1947 pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
1949 struct mbuf *m = *m0;
1950 struct ip *h = mtod(m, struct ip *);
1952 /* Clear IP_DF if no-df was requested */
1953 if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
1954 u_int16_t ip_off = h->ip_off;
1956 h->ip_off &= htons(~IP_DF);
1957 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1960 /* Enforce a minimum ttl, may cause endless packet loops */
1961 if (min_ttl && h->ip_ttl < min_ttl) {
1962 u_int16_t ip_ttl = h->ip_ttl;
1964 h->ip_ttl = min_ttl;
1965 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
1969 if (flags & PFRULE_SET_TOS) {
1972 ov = *(u_int16_t *)h;
1974 nv = *(u_int16_t *)h;
1976 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
1979 /* random-id, but not for fragments */
1980 if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
1981 u_int16_t ip_id = h->ip_id;
1983 h->ip_id = ip_randomid();
1984 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
1991 pf_scrub_ip6(struct mbuf **m0, u_int8_t min_ttl)
1993 struct mbuf *m = *m0;
1994 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1996 /* Enforce a minimum ttl, may cause endless packet loops */
1997 if (min_ttl && h->ip6_hlim < min_ttl)
1998 h->ip6_hlim = min_ttl;