2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
42 #include <sys/malloc.h>
44 #include <sys/domain.h>
45 #include <sys/eventhandler.h>
46 #include <sys/kernel.h>
47 #include <sys/protosw.h>
48 #include <sys/socket.h>
49 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/syslog.h>
54 #include <machine/atomic.h>
57 #include <net/if_var.h>
58 #include <net/netisr.h>
59 #include <net/route.h>
62 #include <netinet/in.h>
63 #include <netinet/in_var.h>
64 #include <netinet/ip6.h>
65 #include <netinet6/ip6_var.h>
66 #include <netinet/icmp6.h>
67 #include <netinet/in_systm.h> /* for ECN definitions */
68 #include <netinet/ip.h> /* for ECN definitions */
70 #include <security/mac/mac_framework.h>
73 * Reassembly headers are stored in hash buckets.
75 #define IP6REASS_NHASH_LOG2 10
76 #define IP6REASS_NHASH (1 << IP6REASS_NHASH_LOG2)
77 #define IP6REASS_HMASK (IP6REASS_NHASH - 1)
79 static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *,
80 uint32_t bucket __unused);
81 static void frag6_deq(struct ip6asfrag *, uint32_t bucket __unused);
82 static void frag6_insque_head(struct ip6q *, struct ip6q *,
84 static void frag6_remque(struct ip6q *, uint32_t bucket);
85 static void frag6_freef(struct ip6q *, uint32_t bucket);
93 VNET_DEFINE_STATIC(volatile u_int, frag6_nfragpackets);
94 volatile u_int frag6_nfrags = 0;
95 VNET_DEFINE_STATIC(struct ip6qbucket, ip6q[IP6REASS_NHASH]);
96 VNET_DEFINE_STATIC(uint32_t, ip6q_hashseed);
98 #define V_frag6_nfragpackets VNET(frag6_nfragpackets)
99 #define V_ip6q VNET(ip6q)
100 #define V_ip6q_hashseed VNET(ip6q_hashseed)
102 #define IP6Q_LOCK(i) mtx_lock(&V_ip6q[(i)].lock)
103 #define IP6Q_TRYLOCK(i) mtx_trylock(&V_ip6q[(i)].lock)
104 #define IP6Q_LOCK_ASSERT(i) mtx_assert(&V_ip6q[(i)].lock, MA_OWNED)
105 #define IP6Q_UNLOCK(i) mtx_unlock(&V_ip6q[(i)].lock)
106 #define IP6Q_HEAD(i) (&V_ip6q[(i)].ip6q)
108 static MALLOC_DEFINE(M_FTABLE, "fragment", "fragment reassembly header");
111 * By default, limit the number of IP6 fragments across all reassembly
112 * queues to 1/32 of the total number of mbuf clusters.
114 * Limit the total number of reassembly queues per VNET to the
115 * IP6 fragment limit, but ensure the limit will not allow any bucket
116 * to grow above 100 items. (The bucket limit is
117 * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct
118 * multiplier to reach a 100-item limit.)
119 * The 100-item limit was chosen as brief testing seems to show that
120 * this produces "reasonable" performance on some subset of systems
123 #define IP6_MAXFRAGS (nmbclusters / 32)
124 #define IP6_MAXFRAGPACKETS (imin(IP6_MAXFRAGS, IP6REASS_NHASH * 50))
127 * Initialise reassembly queue and fragment identifier.
130 frag6_set_bucketsize()
134 if ((i = V_ip6_maxfragpackets) > 0)
135 V_ip6_maxfragbucketsize = imax(i / (IP6REASS_NHASH / 2), 1);
139 frag6_change(void *tag)
141 VNET_ITERATOR_DECL(vnet_iter);
143 ip6_maxfrags = IP6_MAXFRAGS;
144 VNET_LIST_RLOCK_NOSLEEP();
145 VNET_FOREACH(vnet_iter) {
146 CURVNET_SET(vnet_iter);
147 V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
148 frag6_set_bucketsize();
151 VNET_LIST_RUNLOCK_NOSLEEP();
160 V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
161 frag6_set_bucketsize();
162 for (i = 0; i < IP6REASS_NHASH; i++) {
164 q6->ip6q_next = q6->ip6q_prev = q6;
165 mtx_init(&V_ip6q[i].lock, "ip6qlock", NULL, MTX_DEF);
168 V_ip6q_hashseed = arc4random();
169 V_ip6_maxfragsperpacket = 64;
170 if (!IS_DEFAULT_VNET(curvnet))
173 ip6_maxfrags = IP6_MAXFRAGS;
174 EVENTHANDLER_REGISTER(nmbclusters_change,
175 frag6_change, NULL, EVENTHANDLER_PRI_ANY);
179 * In RFC2460, fragment and reassembly rule do not agree with each other,
180 * in terms of next header field handling in fragment header.
181 * While the sender will use the same value for all of the fragmented packets,
182 * receiver is suggested not to check the consistency.
184 * fragment rule (p20):
185 * (2) A Fragment header containing:
186 * The Next Header value that identifies the first header of
187 * the Fragmentable Part of the original packet.
188 * -> next header field is same for all fragments
190 * reassembly rule (p21):
191 * The Next Header field of the last header of the Unfragmentable
192 * Part is obtained from the Next Header field of the first
193 * fragment's Fragment header.
194 * -> should grab it from the first fragment only
196 * The following note also contradicts with fragment rule - no one is going to
197 * send different fragment with different next header field.
199 * additional note (p22):
200 * The Next Header values in the Fragment headers of different
201 * fragments of the same original packet may differ. Only the value
202 * from the Offset zero fragment packet is used for reassembly.
203 * -> should grab it from the first fragment only
205 * There is no explicit reason given in the RFC. Historical reason maybe?
211 frag6_input(struct mbuf **mp, int *offp, int proto)
213 struct mbuf *m = *mp, *t;
215 struct ip6_frag *ip6f;
216 struct ip6q *head, *q6;
217 struct ip6asfrag *af6, *ip6af, *af6dwn;
218 struct in6_ifaddr *ia;
219 int offset = *offp, nxt, i, next;
221 int fragoff, frgpartlen; /* must be larger than u_int16_t */
222 uint32_t hashkey[(sizeof(struct in6_addr) * 2 +
223 sizeof(ip6f->ip6f_ident)) / sizeof(uint32_t)];
224 uint32_t hash, *hashkeyp;
225 struct ifnet *dstifp;
229 struct ip6_direct_ctx *ip6dc;
233 char ip6buf[INET6_ADDRSTRLEN];
236 ip6 = mtod(m, struct ip6_hdr *);
237 #ifndef PULLDOWN_TEST
238 IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), IPPROTO_DONE);
239 ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
241 IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f));
243 return (IPPROTO_DONE);
247 /* find the destination interface of the packet. */
248 ia = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
251 ifa_free(&ia->ia_ifa);
253 /* jumbo payload can't contain a fragment header */
254 if (ip6->ip6_plen == 0) {
255 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
256 in6_ifstat_inc(dstifp, ifs6_reass_fail);
261 * check whether fragment packet's fragment length is
262 * multiple of 8 octets.
263 * sizeof(struct ip6_frag) == 8
264 * sizeof(struct ip6_hdr) = 40
266 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
267 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
268 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
269 offsetof(struct ip6_hdr, ip6_plen));
270 in6_ifstat_inc(dstifp, ifs6_reass_fail);
274 IP6STAT_INC(ip6s_fragments);
275 in6_ifstat_inc(dstifp, ifs6_reass_reqd);
277 /* offset now points to data portion */
278 offset += sizeof(struct ip6_frag);
281 * Handle "atomic" fragments (offset and m bit set to 0) upfront,
282 * unrelated to any reassembly (see RFC 6946 and section 4.5 of RFC
283 * 8200). Just skip the fragment header.
285 if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) {
286 IP6STAT_INC(ip6s_atomicfrags);
287 in6_ifstat_inc(dstifp, ifs6_reass_ok);
289 m->m_flags |= M_FRAGMENTED;
290 return (ip6f->ip6f_nxt);
293 /* Get fragment length and discard 0-byte fragments. */
294 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
295 if (frgpartlen == 0) {
296 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
297 offsetof(struct ip6_hdr, ip6_plen));
298 in6_ifstat_inc(dstifp, ifs6_reass_fail);
299 IP6STAT_INC(ip6s_fragdropped);
304 memcpy(hashkeyp, &ip6->ip6_src, sizeof(struct in6_addr));
305 hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
306 memcpy(hashkeyp, &ip6->ip6_dst, sizeof(struct in6_addr));
307 hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
308 *hashkeyp = ip6f->ip6f_ident;
309 hash = jenkins_hash32(hashkey, nitems(hashkey), V_ip6q_hashseed);
310 hash &= IP6REASS_HMASK;
311 head = IP6Q_HEAD(hash);
315 * Enforce upper bound on number of fragments.
316 * If maxfrag is 0, never accept fragments.
317 * If maxfrag is -1, accept all fragments without limitation.
319 if (ip6_maxfrags < 0)
321 else if (atomic_load_int(&frag6_nfrags) >= (u_int)ip6_maxfrags)
324 for (q6 = head->ip6q_next; q6 != head; q6 = q6->ip6q_next)
325 if (ip6f->ip6f_ident == q6->ip6q_ident &&
326 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
327 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)
329 && mac_ip6q_match(m, q6)
336 * the first fragment to arrive, create a reassembly queue.
341 * Enforce upper bound on number of fragmented packets
342 * for which we attempt reassembly;
343 * If maxfragpackets is 0, never accept fragments.
344 * If maxfragpackets is -1, accept all fragments without
347 if (V_ip6_maxfragpackets < 0)
349 else if (V_ip6q[hash].count >= V_ip6_maxfragbucketsize ||
350 atomic_load_int(&V_frag6_nfragpackets) >=
351 (u_int)V_ip6_maxfragpackets)
353 atomic_add_int(&V_frag6_nfragpackets, 1);
354 q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FTABLE,
358 bzero(q6, sizeof(*q6));
360 if (mac_ip6q_init(q6, M_NOWAIT) != 0) {
364 mac_ip6q_create(m, q6);
366 frag6_insque_head(q6, head, hash);
368 /* ip6q_nxt will be filled afterwards, from 1st fragment */
369 q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6;
371 q6->ip6q_nxtp = (u_char *)nxtp;
373 q6->ip6q_ident = ip6f->ip6f_ident;
374 q6->ip6q_ttl = IPV6_FRAGTTL;
375 q6->ip6q_src = ip6->ip6_src;
376 q6->ip6q_dst = ip6->ip6_dst;
378 (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
379 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
385 * If it's the 1st fragment, record the length of the
386 * unfragmentable part and the next header of the fragment header.
388 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
390 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
391 sizeof(struct ip6_frag);
392 q6->ip6q_nxt = ip6f->ip6f_nxt;
396 * Check that the reassembled packet would not exceed 65535 bytes
398 * If it would exceed, discard the fragment and return an ICMP error.
400 if (q6->ip6q_unfrglen >= 0) {
401 /* The 1st fragment has already arrived. */
402 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
403 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
404 offset - sizeof(struct ip6_frag) +
405 offsetof(struct ip6_frag, ip6f_offlg));
407 return (IPPROTO_DONE);
409 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
410 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
411 offset - sizeof(struct ip6_frag) +
412 offsetof(struct ip6_frag, ip6f_offlg));
414 return (IPPROTO_DONE);
417 * If it's the first fragment, do the above check for each
418 * fragment already stored in the reassembly queue.
421 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
423 af6dwn = af6->ip6af_down;
425 if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen >
427 struct mbuf *merr = IP6_REASS_MBUF(af6);
428 struct ip6_hdr *ip6err;
429 int erroff = af6->ip6af_offset;
431 /* dequeue the fragment. */
432 frag6_deq(af6, hash);
435 /* adjust pointer. */
436 ip6err = mtod(merr, struct ip6_hdr *);
439 * Restore source and destination addresses
440 * in the erroneous IPv6 header.
442 ip6err->ip6_src = q6->ip6q_src;
443 ip6err->ip6_dst = q6->ip6q_dst;
445 icmp6_error(merr, ICMP6_PARAM_PROB,
446 ICMP6_PARAMPROB_HEADER,
447 erroff - sizeof(struct ip6_frag) +
448 offsetof(struct ip6_frag, ip6f_offlg));
453 ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FTABLE,
457 bzero(ip6af, sizeof(*ip6af));
458 ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG;
459 ip6af->ip6af_off = fragoff;
460 ip6af->ip6af_frglen = frgpartlen;
461 ip6af->ip6af_offset = offset;
462 IP6_REASS_MBUF(ip6af) = m;
465 af6 = (struct ip6asfrag *)q6;
470 * Handle ECN by comparing this segment with the first one;
471 * if CE is set, do not lose CE.
472 * drop if CE and not-ECT are mixed for the same packet.
474 ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
476 if (ecn == IPTOS_ECN_CE) {
477 if (ecn0 == IPTOS_ECN_NOTECT) {
478 free(ip6af, M_FTABLE);
481 if (ecn0 != IPTOS_ECN_CE)
482 q6->ip6q_ecn = IPTOS_ECN_CE;
484 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
485 free(ip6af, M_FTABLE);
490 * Find a segment which begins after this one does.
492 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
493 af6 = af6->ip6af_down)
494 if (af6->ip6af_off > ip6af->ip6af_off)
499 * If there is a preceding segment, it may provide some of
500 * our data already. If so, drop the data from the incoming
501 * segment. If it provides all of our data, drop us.
503 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
504 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
507 if (i >= ip6af->ip6af_frglen)
509 m_adj(IP6_REASS_MBUF(ip6af), i);
510 ip6af->ip6af_off += i;
511 ip6af->ip6af_frglen -= i;
516 * While we overlap succeeding segments trim them or,
517 * if they are completely covered, dequeue them.
519 while (af6 != (struct ip6asfrag *)q6 &&
520 ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) {
521 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
522 if (i < af6->ip6af_frglen) {
523 af6->ip6af_frglen -= i;
525 m_adj(IP6_REASS_MBUF(af6), i);
528 af6 = af6->ip6af_down;
529 m_freem(IP6_REASS_MBUF(af6->ip6af_up));
530 frag6_deq(af6->ip6af_up, hash);
534 * If the incoming framgent overlaps some existing fragments in
535 * the reassembly queue, drop it, since it is dangerous to override
536 * existing fragments from a security point of view.
537 * We don't know which fragment is the bad guy - here we trust
538 * fragment that came in earlier, with no real reason.
540 * Note: due to changes after disabling this part, mbuf passed to
541 * m_adj() below now does not meet the requirement.
543 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
544 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
547 #if 0 /* suppress the noisy log */
548 log(LOG_ERR, "%d bytes of a fragment from %s "
549 "overlaps the previous fragment\n",
550 i, ip6_sprintf(ip6buf, &q6->ip6q_src));
552 free(ip6af, M_FTABLE);
556 if (af6 != (struct ip6asfrag *)q6) {
557 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
559 #if 0 /* suppress the noisy log */
560 log(LOG_ERR, "%d bytes of a fragment from %s "
561 "overlaps the succeeding fragment",
562 i, ip6_sprintf(ip6buf, &q6->ip6q_src));
564 free(ip6af, M_FTABLE);
573 mac_ip6q_update(m, q6);
577 * Stick new segment in its place;
578 * check for complete reassembly.
579 * If not complete, check fragment limit.
580 * Move to front of packet queue, as we are
581 * the most recently active fragmented packet.
583 frag6_enq(ip6af, af6->ip6af_up, hash);
584 atomic_add_int(&frag6_nfrags, 1);
587 if (q6 != head->ip6q_next) {
588 frag6_remque(q6, hash);
589 frag6_insque_head(q6, head, hash);
593 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
594 af6 = af6->ip6af_down) {
595 if (af6->ip6af_off != next) {
596 if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
597 IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
598 frag6_freef(q6, hash);
603 next += af6->ip6af_frglen;
605 if (af6->ip6af_up->ip6af_mff) {
606 if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
607 IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
608 frag6_freef(q6, hash);
615 * Reassembly is complete; concatenate fragments.
617 ip6af = q6->ip6q_down;
618 t = m = IP6_REASS_MBUF(ip6af);
619 af6 = ip6af->ip6af_down;
620 frag6_deq(ip6af, hash);
621 while (af6 != (struct ip6asfrag *)q6) {
622 m->m_pkthdr.csum_flags &=
623 IP6_REASS_MBUF(af6)->m_pkthdr.csum_flags;
624 m->m_pkthdr.csum_data +=
625 IP6_REASS_MBUF(af6)->m_pkthdr.csum_data;
627 af6dwn = af6->ip6af_down;
628 frag6_deq(af6, hash);
631 m_adj(IP6_REASS_MBUF(af6), af6->ip6af_offset);
632 m_demote_pkthdr(IP6_REASS_MBUF(af6));
633 m_cat(t, IP6_REASS_MBUF(af6));
638 while (m->m_pkthdr.csum_data & 0xffff0000)
639 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
640 (m->m_pkthdr.csum_data >> 16);
642 /* adjust offset to point where the original next header starts */
643 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
644 free(ip6af, M_FTABLE);
645 ip6 = mtod(m, struct ip6_hdr *);
646 ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr));
647 if (q6->ip6q_ecn == IPTOS_ECN_CE)
648 ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
651 *q6->ip6q_nxtp = (u_char)(nxt & 0xff);
654 if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0) {
655 frag6_remque(q6, hash);
656 atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
658 mac_ip6q_destroy(q6);
661 atomic_subtract_int(&V_frag6_nfragpackets, 1);
667 * Store NXT to the original.
669 m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
672 frag6_remque(q6, hash);
673 atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
675 mac_ip6q_reassemble(q6, m);
676 mac_ip6q_destroy(q6);
679 atomic_subtract_int(&V_frag6_nfragpackets, 1);
681 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
683 for (t = m; t; t = t->m_next)
685 m->m_pkthdr.len = plen;
689 mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc),
694 ip6dc = (struct ip6_direct_ctx *)(mtag + 1);
695 ip6dc->ip6dc_nxt = nxt;
696 ip6dc->ip6dc_off = offset;
698 m_tag_prepend(m, mtag);
702 IP6STAT_INC(ip6s_reassembled);
703 in6_ifstat_inc(dstifp, ifs6_reass_ok);
707 * Queue/dispatch for reprocessing.
709 netisr_dispatch(NETISR_IPV6_DIRECT, m);
714 * Tell launch routine the next header
724 in6_ifstat_inc(dstifp, ifs6_reass_fail);
725 IP6STAT_INC(ip6s_fragdropped);
731 * Free a fragment reassembly header and all
732 * associated datagrams.
735 frag6_freef(struct ip6q *q6, uint32_t bucket)
737 struct ip6asfrag *af6, *down6;
739 IP6Q_LOCK_ASSERT(bucket);
741 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
743 struct mbuf *m = IP6_REASS_MBUF(af6);
745 down6 = af6->ip6af_down;
746 frag6_deq(af6, bucket);
749 * Return ICMP time exceeded error for the 1st fragment.
750 * Just free other fragments.
752 if (af6->ip6af_off == 0) {
756 ip6 = mtod(m, struct ip6_hdr *);
758 /* restore source and destination addresses */
759 ip6->ip6_src = q6->ip6q_src;
760 ip6->ip6_dst = q6->ip6q_dst;
762 icmp6_error(m, ICMP6_TIME_EXCEEDED,
763 ICMP6_TIME_EXCEED_REASSEMBLY, 0);
768 frag6_remque(q6, bucket);
769 atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
771 mac_ip6q_destroy(q6);
774 atomic_subtract_int(&V_frag6_nfragpackets, 1);
778 * Put an ip fragment on a reassembly chain.
779 * Like insque, but pointers in middle of structure.
782 frag6_enq(struct ip6asfrag *af6, struct ip6asfrag *up6,
783 uint32_t bucket __unused)
786 IP6Q_LOCK_ASSERT(bucket);
789 af6->ip6af_down = up6->ip6af_down;
790 up6->ip6af_down->ip6af_up = af6;
791 up6->ip6af_down = af6;
795 * To frag6_enq as remque is to insque.
798 frag6_deq(struct ip6asfrag *af6, uint32_t bucket __unused)
801 IP6Q_LOCK_ASSERT(bucket);
803 af6->ip6af_up->ip6af_down = af6->ip6af_down;
804 af6->ip6af_down->ip6af_up = af6->ip6af_up;
808 frag6_insque_head(struct ip6q *new, struct ip6q *old, uint32_t bucket)
811 IP6Q_LOCK_ASSERT(bucket);
812 KASSERT(IP6Q_HEAD(bucket) == old,
813 ("%s: attempt to insert at head of wrong bucket"
814 " (bucket=%u, old=%p)", __func__, bucket, old));
816 new->ip6q_prev = old;
817 new->ip6q_next = old->ip6q_next;
818 old->ip6q_next->ip6q_prev= new;
819 old->ip6q_next = new;
820 V_ip6q[bucket].count++;
824 frag6_remque(struct ip6q *p6, uint32_t bucket)
827 IP6Q_LOCK_ASSERT(bucket);
829 p6->ip6q_prev->ip6q_next = p6->ip6q_next;
830 p6->ip6q_next->ip6q_prev = p6->ip6q_prev;
831 V_ip6q[bucket].count--;
835 * IPv6 reassembling timer processing;
836 * if a timer expires on a reassembly
842 VNET_ITERATOR_DECL(vnet_iter);
843 struct ip6q *head, *q6;
846 VNET_LIST_RLOCK_NOSLEEP();
847 VNET_FOREACH(vnet_iter) {
848 CURVNET_SET(vnet_iter);
849 for (i = 0; i < IP6REASS_NHASH; i++) {
852 q6 = head->ip6q_next;
855 * XXXJTL: This should never happen. This
856 * should turn into an assertion.
864 if (q6->ip6q_prev->ip6q_ttl == 0) {
865 IP6STAT_ADD(ip6s_fragtimeout,
866 q6->ip6q_prev->ip6q_nfrag);
867 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
868 frag6_freef(q6->ip6q_prev, i);
872 * If we are over the maximum number of fragments
873 * (due to the limit being lowered), drain off
874 * enough to get down to the new limit.
875 * Note that we drain all reassembly queues if
876 * maxfragpackets is 0 (fragmentation is disabled),
877 * and don't enforce a limit when maxfragpackets
880 while ((V_ip6_maxfragpackets == 0 ||
881 (V_ip6_maxfragpackets > 0 &&
882 V_ip6q[i].count > V_ip6_maxfragbucketsize)) &&
883 head->ip6q_prev != head) {
884 IP6STAT_ADD(ip6s_fragoverflow,
885 q6->ip6q_prev->ip6q_nfrag);
886 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
887 frag6_freef(head->ip6q_prev, i);
892 * If we are still over the maximum number of fragmented
893 * packets, drain off enough to get down to the new limit.
896 while (V_ip6_maxfragpackets >= 0 &&
897 atomic_load_int(&V_frag6_nfragpackets) >
898 (u_int)V_ip6_maxfragpackets) {
901 if (head->ip6q_prev != head) {
902 IP6STAT_ADD(ip6s_fragoverflow,
903 q6->ip6q_prev->ip6q_nfrag);
904 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
905 frag6_freef(head->ip6q_prev, i);
908 i = (i + 1) % IP6REASS_NHASH;
912 VNET_LIST_RUNLOCK_NOSLEEP();
916 * Drain off all datagram fragments.
921 VNET_ITERATOR_DECL(vnet_iter);
925 VNET_LIST_RLOCK_NOSLEEP();
926 VNET_FOREACH(vnet_iter) {
927 CURVNET_SET(vnet_iter);
928 for (i = 0; i < IP6REASS_NHASH; i++) {
929 if (IP6Q_TRYLOCK(i) == 0)
932 while (head->ip6q_next != head) {
933 IP6STAT_INC(ip6s_fragdropped);
934 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
935 frag6_freef(head->ip6q_next, i);
941 VNET_LIST_RUNLOCK_NOSLEEP();
945 ip6_deletefraghdr(struct mbuf *m, int offset, int wait)
947 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
950 /* Delete frag6 header. */
951 if (m->m_len >= offset + sizeof(struct ip6_frag)) {
952 /* This is the only possible case with !PULLDOWN_TEST. */
953 bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag),
955 m->m_data += sizeof(struct ip6_frag);
956 m->m_len -= sizeof(struct ip6_frag);
958 /* This comes with no copy if the boundary is on cluster. */
959 if ((t = m_split(m, offset, wait)) == NULL)
961 m_adj(t, sizeof(struct ip6_frag));
965 m->m_flags |= M_FRAGMENTED;