2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
42 #include <sys/malloc.h>
44 #include <sys/domain.h>
45 #include <sys/eventhandler.h>
46 #include <sys/protosw.h>
47 #include <sys/socket.h>
48 #include <sys/errno.h>
50 #include <sys/kernel.h>
51 #include <sys/syslog.h>
53 #include <machine/atomic.h>
56 #include <net/if_var.h>
57 #include <net/netisr.h>
58 #include <net/route.h>
61 #include <netinet/in.h>
62 #include <netinet/in_var.h>
63 #include <netinet/ip6.h>
64 #include <netinet6/ip6_var.h>
65 #include <netinet/icmp6.h>
66 #include <netinet/in_systm.h> /* for ECN definitions */
67 #include <netinet/ip.h> /* for ECN definitions */
69 #include <security/mac/mac_framework.h>
72 * Reassembly headers are stored in hash buckets.
74 #define IP6REASS_NHASH_LOG2 6
75 #define IP6REASS_NHASH (1 << IP6REASS_NHASH_LOG2)
76 #define IP6REASS_HMASK (IP6REASS_NHASH - 1)
78 static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *,
79 uint32_t bucket __unused);
80 static void frag6_deq(struct ip6asfrag *, uint32_t bucket __unused);
81 static void frag6_insque_head(struct ip6q *, struct ip6q *,
82 uint32_t bucket __unused);
83 static void frag6_remque(struct ip6q *, uint32_t bucket __unused);
84 static void frag6_freef(struct ip6q *, uint32_t bucket);
91 VNET_DEFINE_STATIC(volatile u_int, frag6_nfragpackets);
92 VNET_DEFINE_STATIC(volatile u_int, frag6_nfrags);
93 VNET_DEFINE_STATIC(struct ip6qbucket, ip6q[IP6REASS_NHASH]);
94 VNET_DEFINE_STATIC(uint32_t, ip6q_hashseed);
96 #define V_frag6_nfragpackets VNET(frag6_nfragpackets)
97 #define V_frag6_nfrags VNET(frag6_nfrags)
98 #define V_ip6q VNET(ip6q)
99 #define V_ip6q_hashseed VNET(ip6q_hashseed)
101 #define IP6Q_LOCK(i) mtx_lock(&V_ip6q[(i)].lock)
102 #define IP6Q_TRYLOCK(i) mtx_trylock(&V_ip6q[(i)].lock)
103 #define IP6Q_LOCK_ASSERT(i) mtx_assert(&V_ip6q[(i)].lock, MA_OWNED)
104 #define IP6Q_UNLOCK(i) mtx_unlock(&V_ip6q[(i)].lock)
105 #define IP6Q_HEAD(i) (&V_ip6q[(i)].ip6q)
107 static MALLOC_DEFINE(M_FTABLE, "fragment", "fragment reassembly header");
110 * Initialise reassembly queue and fragment identifier.
113 frag6_change(void *tag)
116 V_ip6_maxfragpackets = nmbclusters / 4;
117 V_ip6_maxfrags = nmbclusters / 4;
126 V_ip6_maxfragpackets = nmbclusters / 4;
127 V_ip6_maxfrags = nmbclusters / 4;
128 for (i = 0; i < IP6REASS_NHASH; i++) {
130 q6->ip6q_next = q6->ip6q_prev = q6;
131 mtx_init(&V_ip6q[i].lock, "ip6qlock", NULL, MTX_DEF);
133 V_ip6q_hashseed = arc4random();
134 if (!IS_DEFAULT_VNET(curvnet))
137 EVENTHANDLER_REGISTER(nmbclusters_change,
138 frag6_change, NULL, EVENTHANDLER_PRI_ANY);
142 * In RFC2460, fragment and reassembly rule do not agree with each other,
143 * in terms of next header field handling in fragment header.
144 * While the sender will use the same value for all of the fragmented packets,
145 * receiver is suggested not to check the consistency.
147 * fragment rule (p20):
148 * (2) A Fragment header containing:
149 * The Next Header value that identifies the first header of
150 * the Fragmentable Part of the original packet.
151 * -> next header field is same for all fragments
153 * reassembly rule (p21):
154 * The Next Header field of the last header of the Unfragmentable
155 * Part is obtained from the Next Header field of the first
156 * fragment's Fragment header.
157 * -> should grab it from the first fragment only
159 * The following note also contradicts with fragment rule - no one is going to
160 * send different fragment with different next header field.
162 * additional note (p22):
163 * The Next Header values in the Fragment headers of different
164 * fragments of the same original packet may differ. Only the value
165 * from the Offset zero fragment packet is used for reassembly.
166 * -> should grab it from the first fragment only
168 * There is no explicit reason given in the RFC. Historical reason maybe?
174 frag6_input(struct mbuf **mp, int *offp, int proto)
176 struct mbuf *m = *mp, *t;
178 struct ip6_frag *ip6f;
179 struct ip6q *head, *q6;
180 struct ip6asfrag *af6, *ip6af, *af6dwn;
181 struct in6_ifaddr *ia;
182 int offset = *offp, nxt, i, next;
184 int fragoff, frgpartlen; /* must be larger than u_int16_t */
185 uint32_t hash, hashkey[sizeof(struct in6_addr) * 2 + 1], *hashkeyp;
186 struct ifnet *dstifp;
190 struct ip6_direct_ctx *ip6dc;
194 char ip6buf[INET6_ADDRSTRLEN];
197 ip6 = mtod(m, struct ip6_hdr *);
198 #ifndef PULLDOWN_TEST
199 IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), IPPROTO_DONE);
200 ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
202 IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f));
204 return (IPPROTO_DONE);
208 /* find the destination interface of the packet. */
209 ia = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
212 ifa_free(&ia->ia_ifa);
214 /* jumbo payload can't contain a fragment header */
215 if (ip6->ip6_plen == 0) {
216 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
217 in6_ifstat_inc(dstifp, ifs6_reass_fail);
222 * check whether fragment packet's fragment length is
223 * multiple of 8 octets.
224 * sizeof(struct ip6_frag) == 8
225 * sizeof(struct ip6_hdr) = 40
227 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
228 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
229 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
230 offsetof(struct ip6_hdr, ip6_plen));
231 in6_ifstat_inc(dstifp, ifs6_reass_fail);
235 IP6STAT_INC(ip6s_fragments);
236 in6_ifstat_inc(dstifp, ifs6_reass_reqd);
238 /* offset now points to data portion */
239 offset += sizeof(struct ip6_frag);
242 * RFC 6946: Handle "atomic" fragments (offset and m bit set to 0)
243 * upfront, unrelated to any reassembly. Just skip the fragment header.
245 if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) {
246 /* XXX-BZ we want dedicated counters for this. */
247 IP6STAT_INC(ip6s_reassembled);
248 in6_ifstat_inc(dstifp, ifs6_reass_ok);
250 m->m_flags |= M_FRAGMENTED;
251 return (ip6f->ip6f_nxt);
255 memcpy(hashkeyp, &ip6->ip6_src, sizeof(struct in6_addr));
256 hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
257 memcpy(hashkeyp, &ip6->ip6_dst, sizeof(struct in6_addr));
258 hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
259 *hashkeyp = ip6f->ip6f_ident;
260 hash = jenkins_hash32(hashkey, nitems(hashkey), V_ip6q_hashseed);
261 hash &= IP6REASS_HMASK;
262 head = IP6Q_HEAD(hash);
266 * Enforce upper bound on number of fragments.
267 * If maxfrag is 0, never accept fragments.
268 * If maxfrag is -1, accept all fragments without limitation.
270 if (V_ip6_maxfrags < 0)
272 else if (atomic_load_int(&V_frag6_nfrags) >= (u_int)V_ip6_maxfrags)
275 for (q6 = head->ip6q_next; q6 != head; q6 = q6->ip6q_next)
276 if (ip6f->ip6f_ident == q6->ip6q_ident &&
277 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
278 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)
280 && mac_ip6q_match(m, q6)
287 * the first fragment to arrive, create a reassembly queue.
292 * Enforce upper bound on number of fragmented packets
293 * for which we attempt reassembly;
294 * If maxfragpackets is 0, never accept fragments.
295 * If maxfragpackets is -1, accept all fragments without
298 if (V_ip6_maxfragpackets < 0)
300 else if (atomic_load_int(&V_frag6_nfragpackets) >=
301 (u_int)V_ip6_maxfragpackets)
303 atomic_add_int(&V_frag6_nfragpackets, 1);
304 q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FTABLE,
308 bzero(q6, sizeof(*q6));
310 if (mac_ip6q_init(q6, M_NOWAIT) != 0) {
314 mac_ip6q_create(m, q6);
316 frag6_insque_head(q6, head, hash);
318 /* ip6q_nxt will be filled afterwards, from 1st fragment */
319 q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6;
321 q6->ip6q_nxtp = (u_char *)nxtp;
323 q6->ip6q_ident = ip6f->ip6f_ident;
324 q6->ip6q_ttl = IPV6_FRAGTTL;
325 q6->ip6q_src = ip6->ip6_src;
326 q6->ip6q_dst = ip6->ip6_dst;
328 (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
329 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
335 * If it's the 1st fragment, record the length of the
336 * unfragmentable part and the next header of the fragment header.
338 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
340 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
341 sizeof(struct ip6_frag);
342 q6->ip6q_nxt = ip6f->ip6f_nxt;
346 * Check that the reassembled packet would not exceed 65535 bytes
348 * If it would exceed, discard the fragment and return an ICMP error.
350 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
351 if (q6->ip6q_unfrglen >= 0) {
352 /* The 1st fragment has already arrived. */
353 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
354 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
355 offset - sizeof(struct ip6_frag) +
356 offsetof(struct ip6_frag, ip6f_offlg));
358 return (IPPROTO_DONE);
360 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
361 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
362 offset - sizeof(struct ip6_frag) +
363 offsetof(struct ip6_frag, ip6f_offlg));
365 return (IPPROTO_DONE);
368 * If it's the first fragment, do the above check for each
369 * fragment already stored in the reassembly queue.
372 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
374 af6dwn = af6->ip6af_down;
376 if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen >
378 struct mbuf *merr = IP6_REASS_MBUF(af6);
379 struct ip6_hdr *ip6err;
380 int erroff = af6->ip6af_offset;
382 /* dequeue the fragment. */
383 frag6_deq(af6, hash);
386 /* adjust pointer. */
387 ip6err = mtod(merr, struct ip6_hdr *);
390 * Restore source and destination addresses
391 * in the erroneous IPv6 header.
393 ip6err->ip6_src = q6->ip6q_src;
394 ip6err->ip6_dst = q6->ip6q_dst;
396 icmp6_error(merr, ICMP6_PARAM_PROB,
397 ICMP6_PARAMPROB_HEADER,
398 erroff - sizeof(struct ip6_frag) +
399 offsetof(struct ip6_frag, ip6f_offlg));
404 ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FTABLE,
408 bzero(ip6af, sizeof(*ip6af));
409 ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG;
410 ip6af->ip6af_off = fragoff;
411 ip6af->ip6af_frglen = frgpartlen;
412 ip6af->ip6af_offset = offset;
413 IP6_REASS_MBUF(ip6af) = m;
416 af6 = (struct ip6asfrag *)q6;
421 * Handle ECN by comparing this segment with the first one;
422 * if CE is set, do not lose CE.
423 * drop if CE and not-ECT are mixed for the same packet.
425 ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
427 if (ecn == IPTOS_ECN_CE) {
428 if (ecn0 == IPTOS_ECN_NOTECT) {
429 free(ip6af, M_FTABLE);
432 if (ecn0 != IPTOS_ECN_CE)
433 q6->ip6q_ecn = IPTOS_ECN_CE;
435 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
436 free(ip6af, M_FTABLE);
441 * Find a segment which begins after this one does.
443 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
444 af6 = af6->ip6af_down)
445 if (af6->ip6af_off > ip6af->ip6af_off)
450 * If there is a preceding segment, it may provide some of
451 * our data already. If so, drop the data from the incoming
452 * segment. If it provides all of our data, drop us.
454 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
455 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
458 if (i >= ip6af->ip6af_frglen)
460 m_adj(IP6_REASS_MBUF(ip6af), i);
461 ip6af->ip6af_off += i;
462 ip6af->ip6af_frglen -= i;
467 * While we overlap succeeding segments trim them or,
468 * if they are completely covered, dequeue them.
470 while (af6 != (struct ip6asfrag *)q6 &&
471 ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) {
472 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
473 if (i < af6->ip6af_frglen) {
474 af6->ip6af_frglen -= i;
476 m_adj(IP6_REASS_MBUF(af6), i);
479 af6 = af6->ip6af_down;
480 m_freem(IP6_REASS_MBUF(af6->ip6af_up));
481 frag6_deq(af6->ip6af_up, hash);
485 * If the incoming framgent overlaps some existing fragments in
486 * the reassembly queue, drop it, since it is dangerous to override
487 * existing fragments from a security point of view.
488 * We don't know which fragment is the bad guy - here we trust
489 * fragment that came in earlier, with no real reason.
491 * Note: due to changes after disabling this part, mbuf passed to
492 * m_adj() below now does not meet the requirement.
494 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
495 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
498 #if 0 /* suppress the noisy log */
499 log(LOG_ERR, "%d bytes of a fragment from %s "
500 "overlaps the previous fragment\n",
501 i, ip6_sprintf(ip6buf, &q6->ip6q_src));
503 free(ip6af, M_FTABLE);
507 if (af6 != (struct ip6asfrag *)q6) {
508 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
510 #if 0 /* suppress the noisy log */
511 log(LOG_ERR, "%d bytes of a fragment from %s "
512 "overlaps the succeeding fragment",
513 i, ip6_sprintf(ip6buf, &q6->ip6q_src));
515 free(ip6af, M_FTABLE);
524 mac_ip6q_update(m, q6);
528 * Stick new segment in its place;
529 * check for complete reassembly.
530 * Move to front of packet queue, as we are
531 * the most recently active fragmented packet.
533 frag6_enq(ip6af, af6->ip6af_up, hash);
534 atomic_add_int(&V_frag6_nfrags, 1);
537 if (q6 != head->ip6q_next) {
538 frag6_remque(q6, hash);
539 frag6_insque_head(q6, head, hash);
543 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
544 af6 = af6->ip6af_down) {
545 if (af6->ip6af_off != next) {
549 next += af6->ip6af_frglen;
551 if (af6->ip6af_up->ip6af_mff) {
557 * Reassembly is complete; concatenate fragments.
559 ip6af = q6->ip6q_down;
560 t = m = IP6_REASS_MBUF(ip6af);
561 af6 = ip6af->ip6af_down;
562 frag6_deq(ip6af, hash);
563 while (af6 != (struct ip6asfrag *)q6) {
564 m->m_pkthdr.csum_flags &=
565 IP6_REASS_MBUF(af6)->m_pkthdr.csum_flags;
566 m->m_pkthdr.csum_data +=
567 IP6_REASS_MBUF(af6)->m_pkthdr.csum_data;
569 af6dwn = af6->ip6af_down;
570 frag6_deq(af6, hash);
573 m_adj(IP6_REASS_MBUF(af6), af6->ip6af_offset);
574 m_demote_pkthdr(IP6_REASS_MBUF(af6));
575 m_cat(t, IP6_REASS_MBUF(af6));
580 while (m->m_pkthdr.csum_data & 0xffff0000)
581 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
582 (m->m_pkthdr.csum_data >> 16);
584 /* adjust offset to point where the original next header starts */
585 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
586 free(ip6af, M_FTABLE);
587 ip6 = mtod(m, struct ip6_hdr *);
588 ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr));
589 if (q6->ip6q_ecn == IPTOS_ECN_CE)
590 ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
593 *q6->ip6q_nxtp = (u_char)(nxt & 0xff);
596 if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0) {
597 frag6_remque(q6, hash);
598 atomic_subtract_int(&V_frag6_nfrags, q6->ip6q_nfrag);
600 mac_ip6q_destroy(q6);
603 atomic_subtract_int(&V_frag6_nfragpackets, 1);
609 * Store NXT to the original.
611 m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
614 frag6_remque(q6, hash);
615 atomic_subtract_int(&V_frag6_nfrags, q6->ip6q_nfrag);
617 mac_ip6q_reassemble(q6, m);
618 mac_ip6q_destroy(q6);
621 atomic_subtract_int(&V_frag6_nfragpackets, 1);
623 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
625 for (t = m; t; t = t->m_next)
627 m->m_pkthdr.len = plen;
631 mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc),
636 ip6dc = (struct ip6_direct_ctx *)(mtag + 1);
637 ip6dc->ip6dc_nxt = nxt;
638 ip6dc->ip6dc_off = offset;
640 m_tag_prepend(m, mtag);
644 IP6STAT_INC(ip6s_reassembled);
645 in6_ifstat_inc(dstifp, ifs6_reass_ok);
649 * Queue/dispatch for reprocessing.
651 netisr_dispatch(NETISR_IPV6_DIRECT, m);
656 * Tell launch routine the next header
666 in6_ifstat_inc(dstifp, ifs6_reass_fail);
667 IP6STAT_INC(ip6s_fragdropped);
673 * Free a fragment reassembly header and all
674 * associated datagrams.
677 frag6_freef(struct ip6q *q6, uint32_t bucket)
679 struct ip6asfrag *af6, *down6;
681 IP6Q_LOCK_ASSERT(bucket);
683 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
685 struct mbuf *m = IP6_REASS_MBUF(af6);
687 down6 = af6->ip6af_down;
688 frag6_deq(af6, bucket);
691 * Return ICMP time exceeded error for the 1st fragment.
692 * Just free other fragments.
694 if (af6->ip6af_off == 0) {
698 ip6 = mtod(m, struct ip6_hdr *);
700 /* restore source and destination addresses */
701 ip6->ip6_src = q6->ip6q_src;
702 ip6->ip6_dst = q6->ip6q_dst;
704 icmp6_error(m, ICMP6_TIME_EXCEEDED,
705 ICMP6_TIME_EXCEED_REASSEMBLY, 0);
710 frag6_remque(q6, bucket);
711 atomic_subtract_int(&V_frag6_nfrags, q6->ip6q_nfrag);
713 mac_ip6q_destroy(q6);
716 atomic_subtract_int(&V_frag6_nfragpackets, 1);
720 * Put an ip fragment on a reassembly chain.
721 * Like insque, but pointers in middle of structure.
724 frag6_enq(struct ip6asfrag *af6, struct ip6asfrag *up6,
725 uint32_t bucket __unused)
728 IP6Q_LOCK_ASSERT(bucket);
731 af6->ip6af_down = up6->ip6af_down;
732 up6->ip6af_down->ip6af_up = af6;
733 up6->ip6af_down = af6;
737 * To frag6_enq as remque is to insque.
740 frag6_deq(struct ip6asfrag *af6, uint32_t bucket __unused)
743 IP6Q_LOCK_ASSERT(bucket);
745 af6->ip6af_up->ip6af_down = af6->ip6af_down;
746 af6->ip6af_down->ip6af_up = af6->ip6af_up;
750 frag6_insque_head(struct ip6q *new, struct ip6q *old, uint32_t bucket __unused)
753 IP6Q_LOCK_ASSERT(bucket);
754 KASSERT(IP6Q_HEAD(bucket) == old,
755 ("%s: attempt to insert at head of wrong bucket"
756 " (bucket=%u, old=%p)", __func__, bucket, old));
758 new->ip6q_prev = old;
759 new->ip6q_next = old->ip6q_next;
760 old->ip6q_next->ip6q_prev= new;
761 old->ip6q_next = new;
765 frag6_remque(struct ip6q *p6, uint32_t bucket __unused)
768 IP6Q_LOCK_ASSERT(bucket);
770 p6->ip6q_prev->ip6q_next = p6->ip6q_next;
771 p6->ip6q_next->ip6q_prev = p6->ip6q_prev;
775 * IPv6 reassembling timer processing;
776 * if a timer expires on a reassembly
782 VNET_ITERATOR_DECL(vnet_iter);
783 struct ip6q *head, *q6;
786 VNET_LIST_RLOCK_NOSLEEP();
787 VNET_FOREACH(vnet_iter) {
788 CURVNET_SET(vnet_iter);
789 for (i = 0; i < IP6REASS_NHASH; i++) {
792 q6 = head->ip6q_next;
797 if (q6->ip6q_prev->ip6q_ttl == 0) {
798 IP6STAT_INC(ip6s_fragtimeout);
799 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
800 frag6_freef(q6->ip6q_prev, i);
804 * If we are over the maximum number of fragments
805 * (due to the limit being lowered), drain off
806 * enough to get down to the new limit.
808 while (atomic_load_int(&V_frag6_nfragpackets) >
809 (u_int)V_ip6_maxfragpackets &&
810 head->ip6q_prev != head) {
811 IP6STAT_INC(ip6s_fragoverflow);
812 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
813 frag6_freef(head->ip6q_prev, i);
819 VNET_LIST_RUNLOCK_NOSLEEP();
823 * Drain off all datagram fragments.
828 VNET_ITERATOR_DECL(vnet_iter);
832 VNET_LIST_RLOCK_NOSLEEP();
833 VNET_FOREACH(vnet_iter) {
834 CURVNET_SET(vnet_iter);
835 for (i = 0; i < IP6REASS_NHASH; i++) {
836 if (IP6Q_TRYLOCK(i) == 0)
839 while (head->ip6q_next != head) {
840 IP6STAT_INC(ip6s_fragdropped);
841 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
842 frag6_freef(head->ip6q_next, i);
848 VNET_LIST_RUNLOCK_NOSLEEP();
852 ip6_deletefraghdr(struct mbuf *m, int offset, int wait)
854 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
857 /* Delete frag6 header. */
858 if (m->m_len >= offset + sizeof(struct ip6_frag)) {
859 /* This is the only possible case with !PULLDOWN_TEST. */
860 bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag),
862 m->m_data += sizeof(struct ip6_frag);
863 m->m_len -= sizeof(struct ip6_frag);
865 /* This comes with no copy if the boundary is on cluster. */
866 if ((t = m_split(m, offset, wait)) == NULL)
868 m_adj(t, sizeof(struct ip6_frag));
872 m->m_flags |= M_FRAGMENTED;