2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/domain.h>
42 #include <sys/eventhandler.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
47 #include <sys/protosw.h>
48 #include <sys/socket.h>
49 #include <sys/sysctl.h>
50 #include <sys/syslog.h>
53 #include <net/if_var.h>
54 #include <net/netisr.h>
55 #include <net/route.h>
58 #include <netinet/in.h>
59 #include <netinet/in_var.h>
60 #include <netinet/ip6.h>
61 #include <netinet6/ip6_var.h>
62 #include <netinet/icmp6.h>
63 #include <netinet/in_systm.h> /* For ECN definitions. */
64 #include <netinet/ip.h> /* For ECN definitions. */
67 #include <security/mac/mac_framework.h>
70 /* Reassembly headers are stored in hash buckets. */
71 #define IP6REASS_NHASH_LOG2 10
72 #define IP6REASS_NHASH (1 << IP6REASS_NHASH_LOG2)
73 #define IP6REASS_HMASK (IP6REASS_NHASH - 1)
75 static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *,
76 uint32_t bucket __unused);
77 static void frag6_deq(struct ip6asfrag *, uint32_t bucket __unused);
78 static void frag6_insque_head(struct ip6q *, struct ip6q *,
80 static void frag6_remque(struct ip6q *, uint32_t bucket);
81 static void frag6_freef(struct ip6q *, uint32_t bucket);
90 struct ip6asfrag *ip6af_down;
91 struct ip6asfrag *ip6af_up;
93 int ip6af_offset; /* offset in ip6af_m to next header */
94 int ip6af_frglen; /* fragmentable part length */
95 int ip6af_off; /* fragment offset */
96 u_int16_t ip6af_mff; /* more fragment bit in frag off */
99 #define IP6_REASS_MBUF(ip6af) (*(struct mbuf **)&((ip6af)->ip6af_m))
101 static MALLOC_DEFINE(M_FRAG6, "frag6", "IPv6 fragment reassembly header");
103 /* System wide (global) maximum and count of packets in reassembly queues. */
104 static int ip6_maxfrags;
105 static volatile u_int frag6_nfrags = 0;
107 /* Maximum and current packets in per-VNET reassembly queue. */
108 VNET_DEFINE_STATIC(int, ip6_maxfragpackets);
109 VNET_DEFINE_STATIC(volatile u_int, frag6_nfragpackets);
110 #define V_ip6_maxfragpackets VNET(ip6_maxfragpackets)
111 #define V_frag6_nfragpackets VNET(frag6_nfragpackets)
113 /* Maximum per-VNET reassembly queues per bucket and fragments per packet. */
114 VNET_DEFINE_STATIC(int, ip6_maxfragbucketsize);
115 VNET_DEFINE_STATIC(int, ip6_maxfragsperpacket);
116 #define V_ip6_maxfragbucketsize VNET(ip6_maxfragbucketsize)
117 #define V_ip6_maxfragsperpacket VNET(ip6_maxfragsperpacket)
119 /* Per-VNET reassembly queue buckets. */
120 VNET_DEFINE_STATIC(struct ip6qbucket, ip6qb[IP6REASS_NHASH]);
121 VNET_DEFINE_STATIC(uint32_t, ip6qb_hashseed);
122 #define V_ip6qb VNET(ip6qb)
123 #define V_ip6qb_hashseed VNET(ip6qb_hashseed)
125 #define IP6QB_LOCK(_b) mtx_lock(&V_ip6qb[(_b)].lock)
126 #define IP6QB_TRYLOCK(_b) mtx_trylock(&V_ip6qb[(_b)].lock)
127 #define IP6QB_LOCK_ASSERT(_b) mtx_assert(&V_ip6qb[(_b)].lock, MA_OWNED)
128 #define IP6QB_UNLOCK(_b) mtx_unlock(&V_ip6qb[(_b)].lock)
129 #define IP6QB_HEAD(_b) (&V_ip6qb[(_b)].ip6q)
132 * By default, limit the number of IP6 fragments across all reassembly
133 * queues to 1/32 of the total number of mbuf clusters.
135 * Limit the total number of reassembly queues per VNET to the
136 * IP6 fragment limit, but ensure the limit will not allow any bucket
137 * to grow above 100 items. (The bucket limit is
138 * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct
139 * multiplier to reach a 100-item limit.)
140 * The 100-item limit was chosen as brief testing seems to show that
141 * this produces "reasonable" performance on some subset of systems
144 #define IP6_MAXFRAGS (nmbclusters / 32)
145 #define IP6_MAXFRAGPACKETS (imin(IP6_MAXFRAGS, IP6REASS_NHASH * 50))
149 * Sysctls and helper function.
151 SYSCTL_DECL(_net_inet6_ip6);
154 frag6_set_bucketsize(void)
158 if ((i = V_ip6_maxfragpackets) > 0)
159 V_ip6_maxfragbucketsize = imax(i / (IP6REASS_NHASH / 2), 1);
162 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGS, maxfrags,
163 CTLFLAG_RW, &ip6_maxfrags, 0,
164 "Maximum allowed number of outstanding IPv6 packet fragments. "
165 "A value of 0 means no fragmented packets will be accepted, while a "
166 "a value of -1 means no limit");
169 sysctl_ip6_maxfragpackets(SYSCTL_HANDLER_ARGS)
173 val = V_ip6_maxfragpackets;
174 error = sysctl_handle_int(oidp, &val, 0, req);
175 if (error != 0 || !req->newptr)
177 V_ip6_maxfragpackets = val;
178 frag6_set_bucketsize();
181 SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets,
182 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW, NULL, 0,
183 sysctl_ip6_maxfragpackets, "I",
184 "Default maximum number of outstanding fragmented IPv6 packets. "
185 "A value of 0 means no fragmented packets will be accepted, while a "
186 "a value of -1 means no limit");
187 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGSPERPACKET, maxfragsperpacket,
188 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragsperpacket), 0,
189 "Maximum allowed number of fragments per packet");
190 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGBUCKETSIZE, maxfragbucketsize,
191 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragbucketsize), 0,
192 "Maximum number of reassembly queues per hash bucket");
196 * Remove the IPv6 fragmentation header from the mbuf.
199 ip6_deletefraghdr(struct mbuf *m, int offset, int wait)
204 /* Delete frag6 header. */
205 if (m->m_len >= offset + sizeof(struct ip6_frag)) {
207 /* This is the only possible case with !PULLDOWN_TEST. */
208 ip6 = mtod(m, struct ip6_hdr *);
209 bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag),
211 m->m_data += sizeof(struct ip6_frag);
212 m->m_len -= sizeof(struct ip6_frag);
215 /* This comes with no copy if the boundary is on cluster. */
216 if ((t = m_split(m, offset, wait)) == NULL)
218 m_adj(t, sizeof(struct ip6_frag));
222 m->m_flags |= M_FRAGMENTED;
227 * Free a fragment reassembly header and all associated datagrams.
230 frag6_freef(struct ip6q *q6, uint32_t bucket)
233 struct ip6asfrag *af6, *down6;
236 IP6QB_LOCK_ASSERT(bucket);
238 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
241 m = IP6_REASS_MBUF(af6);
242 down6 = af6->ip6af_down;
243 frag6_deq(af6, bucket);
246 * Return ICMP time exceeded error for the 1st fragment.
247 * Just free other fragments.
249 if (af6->ip6af_off == 0) {
251 /* Adjust pointer. */
252 ip6 = mtod(m, struct ip6_hdr *);
254 /* Restore source and destination addresses. */
255 ip6->ip6_src = q6->ip6q_src;
256 ip6->ip6_dst = q6->ip6q_dst;
258 icmp6_error(m, ICMP6_TIME_EXCEEDED,
259 ICMP6_TIME_EXCEED_REASSEMBLY, 0);
265 frag6_remque(q6, bucket);
266 atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
268 mac_ip6q_destroy(q6);
271 atomic_subtract_int(&V_frag6_nfragpackets, 1);
275 * Like in RFC2460, in RFC8200, fragment and reassembly rules do not agree with
276 * each other, in terms of next header field handling in fragment header.
277 * While the sender will use the same value for all of the fragmented packets,
278 * receiver is suggested not to check for consistency.
280 * Fragment rules (p18,p19):
281 * (2) A Fragment header containing:
282 * The Next Header value that identifies the first header
283 * after the Per-Fragment headers of the original packet.
284 * -> next header field is same for all fragments
286 * Reassembly rule (p20):
287 * The Next Header field of the last header of the Per-Fragment
288 * headers is obtained from the Next Header field of the first
289 * fragment's Fragment header.
290 * -> should grab it from the first fragment only
292 * The following note also contradicts with fragment rule - no one is going to
293 * send different fragment with different next header field.
295 * Additional note (p22) [not an error]:
296 * The Next Header values in the Fragment headers of different
297 * fragments of the same original packet may differ. Only the value
298 * from the Offset zero fragment packet is used for reassembly.
299 * -> should grab it from the first fragment only
301 * There is no explicit reason given in the RFC. Historical reason maybe?
307 frag6_input(struct mbuf **mp, int *offp, int proto)
309 struct ifnet *dstifp;
310 struct in6_ifaddr *ia6;
312 struct ip6_frag *ip6f;
313 struct ip6q *head, *q6;
314 struct ip6asfrag *af6, *af6dwn, *ip6af;
316 uint32_t hashkey[(sizeof(struct in6_addr) * 2 +
317 sizeof(ip6f->ip6f_ident)) / sizeof(uint32_t)];
318 uint32_t bucket, *hashkeyp;
319 int fragoff, frgpartlen; /* Must be larger than uint16_t. */
320 int nxt, offset, plen;
324 struct ip6_direct_ctx *ip6dc;
331 ip6 = mtod(m, struct ip6_hdr *);
332 #ifndef PULLDOWN_TEST
333 IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), IPPROTO_DONE);
334 ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
336 IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f));
338 return (IPPROTO_DONE);
342 /* Find the destination interface of the packet. */
343 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
345 dstifp = ia6->ia_ifp;
346 ifa_free(&ia6->ia_ifa);
349 /* Jumbo payload cannot contain a fragment header. */
350 if (ip6->ip6_plen == 0) {
351 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
352 in6_ifstat_inc(dstifp, ifs6_reass_fail);
353 return (IPPROTO_DONE);
357 * Check whether fragment packet's fragment length is a
358 * multiple of 8 octets (unless it is the last one).
359 * sizeof(struct ip6_frag) == 8
360 * sizeof(struct ip6_hdr) = 40
362 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
363 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
364 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
365 offsetof(struct ip6_hdr, ip6_plen));
366 in6_ifstat_inc(dstifp, ifs6_reass_fail);
367 return (IPPROTO_DONE);
370 IP6STAT_INC(ip6s_fragments);
371 in6_ifstat_inc(dstifp, ifs6_reass_reqd);
373 /* Offset now points to data portion. */
374 offset += sizeof(struct ip6_frag);
377 * Handle "atomic" fragments (offset and m bit set to 0) upfront,
378 * unrelated to any reassembly. Still need to remove the frag hdr.
379 * See RFC 6946 and section 4.5 of RFC 8200.
381 if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) {
382 IP6STAT_INC(ip6s_atomicfrags);
383 /* XXX-BZ handle correctly. */
384 in6_ifstat_inc(dstifp, ifs6_reass_ok);
386 m->m_flags |= M_FRAGMENTED;
387 return (ip6f->ip6f_nxt);
390 /* Get fragment length and discard 0-byte fragments. */
391 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
392 if (frgpartlen == 0) {
393 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
394 offsetof(struct ip6_hdr, ip6_plen));
395 in6_ifstat_inc(dstifp, ifs6_reass_fail);
396 IP6STAT_INC(ip6s_fragdropped);
397 return (IPPROTO_DONE);
400 /* Generate a hash value for fragment bucket selection. */
402 memcpy(hashkeyp, &ip6->ip6_src, sizeof(struct in6_addr));
403 hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
404 memcpy(hashkeyp, &ip6->ip6_dst, sizeof(struct in6_addr));
405 hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
406 *hashkeyp = ip6f->ip6f_ident;
407 bucket = jenkins_hash32(hashkey, nitems(hashkey), V_ip6qb_hashseed);
408 bucket &= IP6REASS_HMASK;
409 head = IP6QB_HEAD(bucket);
413 * Enforce upper bound on number of fragments for the entire system.
414 * If maxfrag is 0, never accept fragments.
415 * If maxfrag is -1, accept all fragments without limitation.
417 if (ip6_maxfrags < 0)
419 else if (atomic_load_int(&frag6_nfrags) >= (u_int)ip6_maxfrags)
422 for (q6 = head->ip6q_next; q6 != head; q6 = q6->ip6q_next)
423 if (ip6f->ip6f_ident == q6->ip6q_ident &&
424 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
425 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)
427 && mac_ip6q_match(m, q6)
435 /* A first fragment to arrive creates a reassembly queue. */
439 * Enforce upper bound on number of fragmented packets
440 * for which we attempt reassembly;
441 * If maxfragpackets is 0, never accept fragments.
442 * If maxfragpackets is -1, accept all fragments without
445 if (V_ip6_maxfragpackets < 0)
447 else if (V_ip6qb[bucket].count >= V_ip6_maxfragbucketsize ||
448 atomic_load_int(&V_frag6_nfragpackets) >=
449 (u_int)V_ip6_maxfragpackets)
451 atomic_add_int(&V_frag6_nfragpackets, 1);
453 /* Allocate IPv6 fragement packet queue entry. */
454 q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FRAG6,
459 if (mac_ip6q_init(q6, M_NOWAIT) != 0) {
463 mac_ip6q_create(m, q6);
465 frag6_insque_head(q6, head, bucket);
467 /* ip6q_nxt will be filled afterwards, from 1st fragment. */
468 q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6;
470 q6->ip6q_nxtp = (u_char *)nxtp;
472 q6->ip6q_ident = ip6f->ip6f_ident;
473 q6->ip6q_ttl = IPV6_FRAGTTL;
474 q6->ip6q_src = ip6->ip6_src;
475 q6->ip6q_dst = ip6->ip6_dst;
477 (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
478 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
484 * If it is the 1st fragment, record the length of the
485 * unfragmentable part and the next header of the fragment header.
487 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
489 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
490 sizeof(struct ip6_frag);
491 q6->ip6q_nxt = ip6f->ip6f_nxt;
495 * Check that the reassembled packet would not exceed 65535 bytes
497 * If it would exceed, discard the fragment and return an ICMP error.
499 if (q6->ip6q_unfrglen >= 0) {
500 /* The 1st fragment has already arrived. */
501 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
502 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
503 offset - sizeof(struct ip6_frag) +
504 offsetof(struct ip6_frag, ip6f_offlg));
505 IP6QB_UNLOCK(bucket);
506 return (IPPROTO_DONE);
508 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
509 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
510 offset - sizeof(struct ip6_frag) +
511 offsetof(struct ip6_frag, ip6f_offlg));
512 IP6QB_UNLOCK(bucket);
513 return (IPPROTO_DONE);
516 * If it is the first fragment, do the above check for each
517 * fragment already stored in the reassembly queue.
520 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
522 af6dwn = af6->ip6af_down;
524 if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen >
526 struct ip6_hdr *ip6err;
530 merr = IP6_REASS_MBUF(af6);
531 erroff = af6->ip6af_offset;
533 /* Dequeue the fragment. */
534 frag6_deq(af6, bucket);
537 /* Adjust pointer. */
538 ip6err = mtod(merr, struct ip6_hdr *);
541 * Restore source and destination addresses
542 * in the erroneous IPv6 header.
544 ip6err->ip6_src = q6->ip6q_src;
545 ip6err->ip6_dst = q6->ip6q_dst;
547 icmp6_error(merr, ICMP6_PARAM_PROB,
548 ICMP6_PARAMPROB_HEADER,
549 erroff - sizeof(struct ip6_frag) +
550 offsetof(struct ip6_frag, ip6f_offlg));
555 /* Allocate an IPv6 fragement queue entry for this fragmented part. */
556 ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FRAG6,
560 ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG;
561 ip6af->ip6af_off = fragoff;
562 ip6af->ip6af_frglen = frgpartlen;
563 ip6af->ip6af_offset = offset;
564 IP6_REASS_MBUF(ip6af) = m;
567 af6 = (struct ip6asfrag *)q6;
571 /* Do duplicate, condition, and boundry checks. */
573 * Handle ECN by comparing this segment with the first one;
574 * if CE is set, do not lose CE.
575 * Drop if CE and not-ECT are mixed for the same packet.
577 ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
579 if (ecn == IPTOS_ECN_CE) {
580 if (ecn0 == IPTOS_ECN_NOTECT) {
581 free(ip6af, M_FRAG6);
584 if (ecn0 != IPTOS_ECN_CE)
585 q6->ip6q_ecn = IPTOS_ECN_CE;
587 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
588 free(ip6af, M_FRAG6);
592 /* Find a fragmented part which begins after this one does. */
593 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
594 af6 = af6->ip6af_down)
595 if (af6->ip6af_off > ip6af->ip6af_off)
599 * If the incoming framgent overlaps some existing fragments in
600 * the reassembly queue, drop both the new fragment and the
601 * entire reassembly queue. However, if the new fragment
602 * is an exact duplicate of an existing fragment, only silently
603 * drop the existing fragment and leave the fragmentation queue
604 * unchanged, as allowed by the RFC. (RFC 8200, 4.5)
606 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
607 if (af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen -
608 ip6af->ip6af_off > 0) {
609 free(ip6af, M_FRAG6);
613 if (af6 != (struct ip6asfrag *)q6) {
614 if (ip6af->ip6af_off + ip6af->ip6af_frglen -
615 af6->ip6af_off > 0) {
616 free(ip6af, M_FRAG6);
624 mac_ip6q_update(m, q6);
628 * Stick new segment in its place; check for complete reassembly.
629 * If not complete, check fragment limit. Move to front of packet
630 * queue, as we are the most recently active fragmented packet.
632 frag6_enq(ip6af, af6->ip6af_up, bucket);
633 atomic_add_int(&frag6_nfrags, 1);
636 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
637 af6 = af6->ip6af_down) {
638 if (af6->ip6af_off != plen) {
639 if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
640 IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
641 frag6_freef(q6, bucket);
643 IP6QB_UNLOCK(bucket);
644 return (IPPROTO_DONE);
646 plen += af6->ip6af_frglen;
648 if (af6->ip6af_up->ip6af_mff) {
649 if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
650 IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
651 frag6_freef(q6, bucket);
653 IP6QB_UNLOCK(bucket);
654 return (IPPROTO_DONE);
657 /* Reassembly is complete; concatenate fragments. */
658 ip6af = q6->ip6q_down;
659 t = m = IP6_REASS_MBUF(ip6af);
660 af6 = ip6af->ip6af_down;
661 frag6_deq(ip6af, bucket);
662 while (af6 != (struct ip6asfrag *)q6) {
663 m->m_pkthdr.csum_flags &=
664 IP6_REASS_MBUF(af6)->m_pkthdr.csum_flags;
665 m->m_pkthdr.csum_data +=
666 IP6_REASS_MBUF(af6)->m_pkthdr.csum_data;
668 af6dwn = af6->ip6af_down;
669 frag6_deq(af6, bucket);
672 m_adj(IP6_REASS_MBUF(af6), af6->ip6af_offset);
673 m_demote_pkthdr(IP6_REASS_MBUF(af6));
674 m_cat(t, IP6_REASS_MBUF(af6));
679 while (m->m_pkthdr.csum_data & 0xffff0000)
680 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
681 (m->m_pkthdr.csum_data >> 16);
683 /* Adjust offset to point where the original next header starts. */
684 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
685 free(ip6af, M_FRAG6);
686 ip6 = mtod(m, struct ip6_hdr *);
687 ip6->ip6_plen = htons((u_short)plen + offset - sizeof(struct ip6_hdr));
688 if (q6->ip6q_ecn == IPTOS_ECN_CE)
689 ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
692 if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0) {
693 frag6_remque(q6, bucket);
694 atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
696 mac_ip6q_destroy(q6);
699 atomic_subtract_int(&V_frag6_nfragpackets, 1);
704 /* Set nxt(-hdr field value) to the original value. */
705 m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
708 frag6_remque(q6, bucket);
709 atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
711 mac_ip6q_reassemble(q6, m);
712 mac_ip6q_destroy(q6);
715 atomic_subtract_int(&V_frag6_nfragpackets, 1);
717 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
720 for (t = m; t; t = t->m_next)
722 m->m_pkthdr.len = plen;
726 mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc),
731 ip6dc = (struct ip6_direct_ctx *)(mtag + 1);
732 ip6dc->ip6dc_nxt = nxt;
733 ip6dc->ip6dc_off = offset;
735 m_tag_prepend(m, mtag);
738 IP6QB_UNLOCK(bucket);
739 IP6STAT_INC(ip6s_reassembled);
740 in6_ifstat_inc(dstifp, ifs6_reass_ok);
743 /* Queue/dispatch for reprocessing. */
744 netisr_dispatch(NETISR_IPV6_DIRECT, m);
745 return (IPPROTO_DONE);
748 /* Tell launch routine the next header. */
755 IP6QB_UNLOCK(bucket);
756 in6_ifstat_inc(dstifp, ifs6_reass_fail);
757 IP6STAT_INC(ip6s_fragdropped);
759 return (IPPROTO_DONE);
763 * IPv6 reassembling timer processing;
764 * if a timer expires on a reassembly queue, discard it.
769 VNET_ITERATOR_DECL(vnet_iter);
770 struct ip6q *head, *q6;
773 VNET_LIST_RLOCK_NOSLEEP();
774 VNET_FOREACH(vnet_iter) {
775 CURVNET_SET(vnet_iter);
776 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
778 head = IP6QB_HEAD(bucket);
779 q6 = head->ip6q_next;
782 * XXXJTL: This should never happen. This
783 * should turn into an assertion.
785 IP6QB_UNLOCK(bucket);
791 if (q6->ip6q_prev->ip6q_ttl == 0) {
792 IP6STAT_ADD(ip6s_fragtimeout,
793 q6->ip6q_prev->ip6q_nfrag);
794 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
795 frag6_freef(q6->ip6q_prev, bucket);
799 * If we are over the maximum number of fragments
800 * (due to the limit being lowered), drain off
801 * enough to get down to the new limit.
802 * Note that we drain all reassembly queues if
803 * maxfragpackets is 0 (fragmentation is disabled),
804 * and do not enforce a limit when maxfragpackets
807 while ((V_ip6_maxfragpackets == 0 ||
808 (V_ip6_maxfragpackets > 0 &&
809 V_ip6qb[bucket].count > V_ip6_maxfragbucketsize)) &&
810 head->ip6q_prev != head) {
811 IP6STAT_ADD(ip6s_fragoverflow,
812 q6->ip6q_prev->ip6q_nfrag);
813 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
814 frag6_freef(head->ip6q_prev, bucket);
816 IP6QB_UNLOCK(bucket);
819 * If we are still over the maximum number of fragmented
820 * packets, drain off enough to get down to the new limit.
823 while (V_ip6_maxfragpackets >= 0 &&
824 atomic_load_int(&V_frag6_nfragpackets) >
825 (u_int)V_ip6_maxfragpackets) {
827 head = IP6QB_HEAD(bucket);
828 if (head->ip6q_prev != head) {
829 IP6STAT_ADD(ip6s_fragoverflow,
830 q6->ip6q_prev->ip6q_nfrag);
831 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
832 frag6_freef(head->ip6q_prev, bucket);
834 IP6QB_UNLOCK(bucket);
835 bucket = (bucket + 1) % IP6REASS_NHASH;
839 VNET_LIST_RUNLOCK_NOSLEEP();
843 * Eventhandler to adjust limits in case nmbclusters change.
846 frag6_change(void *tag)
848 VNET_ITERATOR_DECL(vnet_iter);
850 ip6_maxfrags = IP6_MAXFRAGS;
851 VNET_LIST_RLOCK_NOSLEEP();
852 VNET_FOREACH(vnet_iter) {
853 CURVNET_SET(vnet_iter);
854 V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
855 frag6_set_bucketsize();
858 VNET_LIST_RUNLOCK_NOSLEEP();
862 * Initialise reassembly queue and fragment identifier.
870 V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
871 frag6_set_bucketsize();
872 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
873 q6 = IP6QB_HEAD(bucket);
874 q6->ip6q_next = q6->ip6q_prev = q6;
875 mtx_init(&V_ip6qb[bucket].lock, "ip6qlock", NULL, MTX_DEF);
876 V_ip6qb[bucket].count = 0;
878 V_ip6qb_hashseed = arc4random();
879 V_ip6_maxfragsperpacket = 64;
880 if (!IS_DEFAULT_VNET(curvnet))
883 ip6_maxfrags = IP6_MAXFRAGS;
884 EVENTHANDLER_REGISTER(nmbclusters_change,
885 frag6_change, NULL, EVENTHANDLER_PRI_ANY);
889 * Drain off all datagram fragments.
894 VNET_ITERATOR_DECL(vnet_iter);
898 VNET_LIST_RLOCK_NOSLEEP();
899 VNET_FOREACH(vnet_iter) {
900 CURVNET_SET(vnet_iter);
901 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
902 if (IP6QB_TRYLOCK(bucket) == 0)
904 head = IP6QB_HEAD(bucket);
905 while (head->ip6q_next != head) {
906 IP6STAT_INC(ip6s_fragdropped);
907 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
908 frag6_freef(head->ip6q_next, bucket);
910 IP6QB_UNLOCK(bucket);
914 VNET_LIST_RUNLOCK_NOSLEEP();
918 * Put an ip fragment on a reassembly chain.
919 * Like insque, but pointers in middle of structure.
922 frag6_enq(struct ip6asfrag *af6, struct ip6asfrag *up6,
923 uint32_t bucket __unused)
926 IP6QB_LOCK_ASSERT(bucket);
929 af6->ip6af_down = up6->ip6af_down;
930 up6->ip6af_down->ip6af_up = af6;
931 up6->ip6af_down = af6;
935 * To frag6_enq as remque is to insque.
938 frag6_deq(struct ip6asfrag *af6, uint32_t bucket __unused)
941 IP6QB_LOCK_ASSERT(bucket);
943 af6->ip6af_up->ip6af_down = af6->ip6af_down;
944 af6->ip6af_down->ip6af_up = af6->ip6af_up;
948 frag6_insque_head(struct ip6q *new, struct ip6q *old, uint32_t bucket)
951 IP6QB_LOCK_ASSERT(bucket);
952 KASSERT(IP6QB_HEAD(bucket) == old,
953 ("%s: attempt to insert at head of wrong bucket"
954 " (bucket=%u, old=%p)", __func__, bucket, old));
956 new->ip6q_prev = old;
957 new->ip6q_next = old->ip6q_next;
958 old->ip6q_next->ip6q_prev= new;
959 old->ip6q_next = new;
960 V_ip6qb[bucket].count++;
964 frag6_remque(struct ip6q *p6, uint32_t bucket)
967 IP6QB_LOCK_ASSERT(bucket);
969 p6->ip6q_prev->ip6q_next = p6->ip6q_next;
970 p6->ip6q_next->ip6q_prev = p6->ip6q_prev;
971 V_ip6qb[bucket].count--;