2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * Copyright (c) 2019 Netflix, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/domain.h>
43 #include <sys/eventhandler.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
48 #include <sys/protosw.h>
49 #include <sys/queue.h>
50 #include <sys/socket.h>
51 #include <sys/sysctl.h>
52 #include <sys/syslog.h>
55 #include <net/if_var.h>
56 #include <net/netisr.h>
57 #include <net/route.h>
60 #include <netinet/in.h>
61 #include <netinet/in_var.h>
62 #include <netinet/ip6.h>
63 #include <netinet6/ip6_var.h>
64 #include <netinet/icmp6.h>
65 #include <netinet/in_systm.h> /* For ECN definitions. */
66 #include <netinet/ip.h> /* For ECN definitions. */
69 #include <security/mac/mac_framework.h>
73 * A "big picture" of how IPv6 fragment queues are all linked together.
75 * struct ip6qbucket ip6qb[...]; hashed buckets
78 * +--- TAILQ(struct ip6q, packets) *q6; tailq entries holding
79 * |||||||| fragmented packets
80 * | (1 per original packet)
82 * +--- TAILQ(struct ip6asfrag, ip6q_frags) *af6; tailq entries of IPv6
83 * | *ip6af;fragment packets
84 * | for one original packet
88 /* Reassembly headers are stored in hash buckets. */
89 #define IP6REASS_NHASH_LOG2 10
90 #define IP6REASS_NHASH (1 << IP6REASS_NHASH_LOG2)
91 #define IP6REASS_HMASK (IP6REASS_NHASH - 1)
93 TAILQ_HEAD(ip6qhead, ip6q);
95 struct ip6qhead packets;
101 TAILQ_ENTRY(ip6asfrag) ip6af_tq;
102 struct mbuf *ip6af_m;
103 int ip6af_offset; /* Offset in ip6af_m to next header. */
104 int ip6af_frglen; /* Fragmentable part length. */
105 int ip6af_off; /* Fragment offset. */
106 bool ip6af_mff; /* More fragment bit in frag off. */
109 static MALLOC_DEFINE(M_FRAG6, "frag6", "IPv6 fragment reassembly header");
112 /* A flag to indicate if IPv6 fragmentation is initialized. */
113 VNET_DEFINE_STATIC(bool, frag6_on);
114 #define V_frag6_on VNET(frag6_on)
117 /* System wide (global) maximum and count of packets in reassembly queues. */
118 static int ip6_maxfrags;
119 static volatile u_int frag6_nfrags = 0;
121 /* Maximum and current packets in per-VNET reassembly queue. */
122 VNET_DEFINE_STATIC(int, ip6_maxfragpackets);
123 VNET_DEFINE_STATIC(volatile u_int, frag6_nfragpackets);
124 #define V_ip6_maxfragpackets VNET(ip6_maxfragpackets)
125 #define V_frag6_nfragpackets VNET(frag6_nfragpackets)
127 /* Maximum per-VNET reassembly queues per bucket and fragments per packet. */
128 VNET_DEFINE_STATIC(int, ip6_maxfragbucketsize);
129 VNET_DEFINE_STATIC(int, ip6_maxfragsperpacket);
130 #define V_ip6_maxfragbucketsize VNET(ip6_maxfragbucketsize)
131 #define V_ip6_maxfragsperpacket VNET(ip6_maxfragsperpacket)
133 /* Per-VNET reassembly queue buckets. */
134 VNET_DEFINE_STATIC(struct ip6qbucket, ip6qb[IP6REASS_NHASH]);
135 VNET_DEFINE_STATIC(uint32_t, ip6qb_hashseed);
136 #define V_ip6qb VNET(ip6qb)
137 #define V_ip6qb_hashseed VNET(ip6qb_hashseed)
139 #define IP6QB_LOCK(_b) mtx_lock(&V_ip6qb[(_b)].lock)
140 #define IP6QB_TRYLOCK(_b) mtx_trylock(&V_ip6qb[(_b)].lock)
141 #define IP6QB_LOCK_ASSERT(_b) mtx_assert(&V_ip6qb[(_b)].lock, MA_OWNED)
142 #define IP6QB_UNLOCK(_b) mtx_unlock(&V_ip6qb[(_b)].lock)
143 #define IP6QB_HEAD(_b) (&V_ip6qb[(_b)].packets)
146 * By default, limit the number of IP6 fragments across all reassembly
147 * queues to 1/32 of the total number of mbuf clusters.
149 * Limit the total number of reassembly queues per VNET to the
150 * IP6 fragment limit, but ensure the limit will not allow any bucket
151 * to grow above 100 items. (The bucket limit is
152 * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct
153 * multiplier to reach a 100-item limit.)
154 * The 100-item limit was chosen as brief testing seems to show that
155 * this produces "reasonable" performance on some subset of systems
158 #define IP6_MAXFRAGS (nmbclusters / 32)
159 #define IP6_MAXFRAGPACKETS (imin(IP6_MAXFRAGS, IP6REASS_NHASH * 50))
163 * Sysctls and helper function.
165 SYSCTL_DECL(_net_inet6_ip6);
167 SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfrags,
168 CTLFLAG_RD, __DEVOLATILE(u_int *, &frag6_nfrags), 0,
169 "Global number of IPv6 fragments across all reassembly queues.");
172 frag6_set_bucketsize(void)
176 if ((i = V_ip6_maxfragpackets) > 0)
177 V_ip6_maxfragbucketsize = imax(i / (IP6REASS_NHASH / 2), 1);
180 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGS, maxfrags,
181 CTLFLAG_RW, &ip6_maxfrags, 0,
182 "Maximum allowed number of outstanding IPv6 packet fragments. "
183 "A value of 0 means no fragmented packets will be accepted, while a "
184 "a value of -1 means no limit");
187 sysctl_ip6_maxfragpackets(SYSCTL_HANDLER_ARGS)
191 val = V_ip6_maxfragpackets;
192 error = sysctl_handle_int(oidp, &val, 0, req);
193 if (error != 0 || !req->newptr)
195 V_ip6_maxfragpackets = val;
196 frag6_set_bucketsize();
199 SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets,
200 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
201 NULL, 0, sysctl_ip6_maxfragpackets, "I",
202 "Default maximum number of outstanding fragmented IPv6 packets. "
203 "A value of 0 means no fragmented packets will be accepted, while a "
204 "a value of -1 means no limit");
205 SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfragpackets,
206 CTLFLAG_VNET | CTLFLAG_RD,
207 __DEVOLATILE(u_int *, &VNET_NAME(frag6_nfragpackets)), 0,
208 "Per-VNET number of IPv6 fragments across all reassembly queues.");
209 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGSPERPACKET, maxfragsperpacket,
210 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragsperpacket), 0,
211 "Maximum allowed number of fragments per packet");
212 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGBUCKETSIZE, maxfragbucketsize,
213 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragbucketsize), 0,
214 "Maximum number of reassembly queues per hash bucket");
218 * Remove the IPv6 fragmentation header from the mbuf.
221 ip6_deletefraghdr(struct mbuf *m, int offset, int wait __unused)
225 KASSERT(m->m_len >= offset + sizeof(struct ip6_frag),
226 ("%s: ext headers not contigous in mbuf %p m_len %d >= "
227 "offset %d + %zu\n", __func__, m, m->m_len, offset,
228 sizeof(struct ip6_frag)));
230 /* Delete frag6 header. */
231 ip6 = mtod(m, struct ip6_hdr *);
232 bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag), offset);
233 m->m_data += sizeof(struct ip6_frag);
234 m->m_len -= sizeof(struct ip6_frag);
235 m->m_flags |= M_FRAGMENTED;
241 * Free a fragment reassembly header and all associated datagrams.
244 frag6_freef(struct ip6q *q6, uint32_t bucket)
247 struct ip6asfrag *af6;
250 IP6QB_LOCK_ASSERT(bucket);
252 while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) {
255 TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
258 * Return ICMP time exceeded error for the 1st fragment.
259 * Just free other fragments.
261 if (af6->ip6af_off == 0 && m->m_pkthdr.rcvif != NULL) {
263 /* Adjust pointer. */
264 ip6 = mtod(m, struct ip6_hdr *);
266 /* Restore source and destination addresses. */
267 ip6->ip6_src = q6->ip6q_src;
268 ip6->ip6_dst = q6->ip6q_dst;
270 icmp6_error(m, ICMP6_TIME_EXCEEDED,
271 ICMP6_TIME_EXCEED_REASSEMBLY, 0);
278 TAILQ_REMOVE(IP6QB_HEAD(bucket), q6, ip6q_tq);
279 V_ip6qb[bucket].count--;
280 atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
282 mac_ip6q_destroy(q6);
285 atomic_subtract_int(&V_frag6_nfragpackets, 1);
289 * Drain off all datagram fragments belonging to
290 * the given network interface.
293 frag6_cleanup(void *arg __unused, struct ifnet *ifp)
295 struct ip6qhead *head;
297 struct ip6asfrag *af6;
300 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
302 CURVNET_SET_QUIET(ifp->if_vnet);
305 * Skip processing if IPv6 reassembly is not initialised or
306 * torn down by frag6_destroy().
314 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
316 head = IP6QB_HEAD(bucket);
317 /* Scan fragment list. */
318 TAILQ_FOREACH(q6, head, ip6q_tq) {
319 TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) {
321 /* Clear no longer valid rcvif pointer. */
322 if (af6->ip6af_m->m_pkthdr.rcvif == ifp)
323 af6->ip6af_m->m_pkthdr.rcvif = NULL;
326 IP6QB_UNLOCK(bucket);
330 EVENTHANDLER_DEFINE(ifnet_departure_event, frag6_cleanup, NULL, 0);
333 * Like in RFC2460, in RFC8200, fragment and reassembly rules do not agree with
334 * each other, in terms of next header field handling in fragment header.
335 * While the sender will use the same value for all of the fragmented packets,
336 * receiver is suggested not to check for consistency.
338 * Fragment rules (p18,p19):
339 * (2) A Fragment header containing:
340 * The Next Header value that identifies the first header
341 * after the Per-Fragment headers of the original packet.
342 * -> next header field is same for all fragments
344 * Reassembly rule (p20):
345 * The Next Header field of the last header of the Per-Fragment
346 * headers is obtained from the Next Header field of the first
347 * fragment's Fragment header.
348 * -> should grab it from the first fragment only
350 * The following note also contradicts with fragment rule - no one is going to
351 * send different fragment with different next header field.
353 * Additional note (p22) [not an error]:
354 * The Next Header values in the Fragment headers of different
355 * fragments of the same original packet may differ. Only the value
356 * from the Offset zero fragment packet is used for reassembly.
357 * -> should grab it from the first fragment only
359 * There is no explicit reason given in the RFC. Historical reason maybe?
365 frag6_input(struct mbuf **mp, int *offp, int proto)
369 struct ip6_frag *ip6f;
370 struct ip6qhead *head;
372 struct ip6asfrag *af6, *ip6af, *af6tmp;
373 struct in6_ifaddr *ia6;
374 struct ifnet *dstifp, *srcifp;
375 uint32_t hashkey[(sizeof(struct in6_addr) * 2 +
376 sizeof(ip6f->ip6f_ident)) / sizeof(uint32_t)];
377 uint32_t bucket, *hashkeyp;
378 int fragoff, frgpartlen; /* Must be larger than uint16_t. */
379 int nxt, offset, plen;
383 struct ip6_direct_ctx *ip6dc;
392 if (m->m_len < offset + sizeof(struct ip6_frag)) {
393 m = m_pullup(m, offset + sizeof(struct ip6_frag));
395 IP6STAT_INC(ip6s_exthdrtoolong);
397 return (IPPROTO_DONE);
400 ip6 = mtod(m, struct ip6_hdr *);
403 /* Find the destination interface of the packet. */
404 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
406 dstifp = ia6->ia_ifp;
407 ifa_free(&ia6->ia_ifa);
410 /* Jumbo payload cannot contain a fragment header. */
411 if (ip6->ip6_plen == 0) {
412 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
413 in6_ifstat_inc(dstifp, ifs6_reass_fail);
415 return (IPPROTO_DONE);
419 * Check whether fragment packet's fragment length is a
420 * multiple of 8 octets (unless it is the last one).
421 * sizeof(struct ip6_frag) == 8
422 * sizeof(struct ip6_hdr) = 40
424 ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
425 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
426 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
427 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
428 offsetof(struct ip6_hdr, ip6_plen));
429 in6_ifstat_inc(dstifp, ifs6_reass_fail);
431 return (IPPROTO_DONE);
434 IP6STAT_INC(ip6s_fragments);
435 in6_ifstat_inc(dstifp, ifs6_reass_reqd);
438 * Handle "atomic" fragments (offset and m bit set to 0) upfront,
439 * unrelated to any reassembly. We need to remove the frag hdr
441 * See RFC 6946 and section 4.5 of RFC 8200.
443 if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) {
444 IP6STAT_INC(ip6s_atomicfrags);
445 nxt = ip6f->ip6f_nxt;
447 * Set nxt(-hdr field value) to the original value.
448 * We cannot just set ip6->ip6_nxt as there might be
449 * an unfragmentable part with extension headers and
450 * we must update the last one.
452 m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
454 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) -
455 sizeof(struct ip6_frag));
456 if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0)
458 m->m_pkthdr.len -= sizeof(struct ip6_frag);
459 in6_ifstat_inc(dstifp, ifs6_reass_ok);
464 /* Offset now points to data portion. */
465 offset += sizeof(struct ip6_frag);
467 /* Get fragment length and discard 0-byte fragments. */
468 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
469 if (frgpartlen == 0) {
470 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
471 offsetof(struct ip6_hdr, ip6_plen));
472 in6_ifstat_inc(dstifp, ifs6_reass_fail);
473 IP6STAT_INC(ip6s_fragdropped);
475 return (IPPROTO_DONE);
479 * Enforce upper bound on number of fragments for the entire system.
480 * If maxfrag is 0, never accept fragments.
481 * If maxfrag is -1, accept all fragments without limitation.
483 if (ip6_maxfrags < 0)
485 else if (atomic_load_int(&frag6_nfrags) >= (u_int)ip6_maxfrags)
489 * Validate that a full header chain to the ULP is present in the
490 * packet containing the first fragment as per RFC RFC7112 and
491 * RFC 8200 pages 18,19:
492 * The first fragment packet is composed of:
493 * (3) Extension headers, if any, and the Upper-Layer header. These
494 * headers must be in the first fragment. ...
496 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
497 /* XXX TODO. thj has D16851 open for this. */
498 /* Send ICMPv6 4,3 in case of violation. */
500 /* Store receive network interface pointer for later. */
501 srcifp = m->m_pkthdr.rcvif;
503 /* Generate a hash value for fragment bucket selection. */
505 memcpy(hashkeyp, &ip6->ip6_src, sizeof(struct in6_addr));
506 hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
507 memcpy(hashkeyp, &ip6->ip6_dst, sizeof(struct in6_addr));
508 hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
509 *hashkeyp = ip6f->ip6f_ident;
510 bucket = jenkins_hash32(hashkey, nitems(hashkey), V_ip6qb_hashseed);
511 bucket &= IP6REASS_HMASK;
513 head = IP6QB_HEAD(bucket);
515 TAILQ_FOREACH(q6, head, ip6q_tq)
516 if (ip6f->ip6f_ident == q6->ip6q_ident &&
517 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
518 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)
520 && mac_ip6q_match(m, q6)
528 /* A first fragment to arrive creates a reassembly queue. */
532 * Enforce upper bound on number of fragmented packets
533 * for which we attempt reassembly;
534 * If maxfragpackets is 0, never accept fragments.
535 * If maxfragpackets is -1, accept all fragments without
538 if (V_ip6_maxfragpackets < 0)
540 else if (V_ip6qb[bucket].count >= V_ip6_maxfragbucketsize ||
541 atomic_load_int(&V_frag6_nfragpackets) >=
542 (u_int)V_ip6_maxfragpackets)
545 /* Allocate IPv6 fragement packet queue entry. */
546 q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FRAG6,
551 if (mac_ip6q_init(q6, M_NOWAIT) != 0) {
555 mac_ip6q_create(m, q6);
557 atomic_add_int(&V_frag6_nfragpackets, 1);
559 /* ip6q_nxt will be filled afterwards, from 1st fragment. */
560 TAILQ_INIT(&q6->ip6q_frags);
561 q6->ip6q_ident = ip6f->ip6f_ident;
562 q6->ip6q_ttl = IPV6_FRAGTTL;
563 q6->ip6q_src = ip6->ip6_src;
564 q6->ip6q_dst = ip6->ip6_dst;
566 (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
567 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
569 /* Add the fragemented packet to the bucket. */
570 TAILQ_INSERT_HEAD(head, q6, ip6q_tq);
571 V_ip6qb[bucket].count++;
575 * If it is the 1st fragment, record the length of the
576 * unfragmentable part and the next header of the fragment header.
577 * Assume the first 1st fragement to arrive will be correct.
578 * We do not have any duplicate checks here yet so another packet
579 * with fragoff == 0 could come and overwrite the ip6q_unfrglen
580 * and worse, the next header, at any time.
582 if (fragoff == 0 && q6->ip6q_unfrglen == -1) {
583 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
584 sizeof(struct ip6_frag);
585 q6->ip6q_nxt = ip6f->ip6f_nxt;
590 * Check that the reassembled packet would not exceed 65535 bytes
592 * If it would exceed, discard the fragment and return an ICMP error.
594 if (q6->ip6q_unfrglen >= 0) {
595 /* The 1st fragment has already arrived. */
596 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
598 TAILQ_REMOVE(head, q6, ip6q_tq);
599 V_ip6qb[bucket].count--;
600 atomic_subtract_int(&V_frag6_nfragpackets, 1);
602 mac_ip6q_destroy(q6);
606 IP6QB_UNLOCK(bucket);
607 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
608 offset - sizeof(struct ip6_frag) +
609 offsetof(struct ip6_frag, ip6f_offlg));
611 return (IPPROTO_DONE);
613 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
615 TAILQ_REMOVE(head, q6, ip6q_tq);
616 V_ip6qb[bucket].count--;
617 atomic_subtract_int(&V_frag6_nfragpackets, 1);
619 mac_ip6q_destroy(q6);
623 IP6QB_UNLOCK(bucket);
624 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
625 offset - sizeof(struct ip6_frag) +
626 offsetof(struct ip6_frag, ip6f_offlg));
628 return (IPPROTO_DONE);
632 * If it is the first fragment, do the above check for each
633 * fragment already stored in the reassembly queue.
635 if (fragoff == 0 && !only_frag) {
636 TAILQ_FOREACH_SAFE(af6, &q6->ip6q_frags, ip6af_tq, af6tmp) {
638 if (q6->ip6q_unfrglen + af6->ip6af_off +
639 af6->ip6af_frglen > IPV6_MAXPACKET) {
640 struct ip6_hdr *ip6err;
645 erroff = af6->ip6af_offset;
647 /* Dequeue the fragment. */
648 TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
650 atomic_subtract_int(&frag6_nfrags, 1);
653 /* Set a valid receive interface pointer. */
654 merr->m_pkthdr.rcvif = srcifp;
656 /* Adjust pointer. */
657 ip6err = mtod(merr, struct ip6_hdr *);
660 * Restore source and destination addresses
661 * in the erroneous IPv6 header.
663 ip6err->ip6_src = q6->ip6q_src;
664 ip6err->ip6_dst = q6->ip6q_dst;
666 icmp6_error(merr, ICMP6_PARAM_PROB,
667 ICMP6_PARAMPROB_HEADER,
668 erroff - sizeof(struct ip6_frag) +
669 offsetof(struct ip6_frag, ip6f_offlg));
674 /* Allocate an IPv6 fragement queue entry for this fragmented part. */
675 ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FRAG6,
679 ip6af->ip6af_mff = (ip6f->ip6f_offlg & IP6F_MORE_FRAG) ? true : false;
680 ip6af->ip6af_off = fragoff;
681 ip6af->ip6af_frglen = frgpartlen;
682 ip6af->ip6af_offset = offset;
687 * Do a manual insert rather than a hard-to-understand cast
688 * to a different type relying on data structure order to work.
690 TAILQ_INSERT_HEAD(&q6->ip6q_frags, ip6af, ip6af_tq);
694 /* Do duplicate, condition, and boundry checks. */
696 * Handle ECN by comparing this segment with the first one;
697 * if CE is set, do not lose CE.
698 * Drop if CE and not-ECT are mixed for the same packet.
700 ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
702 if (ecn == IPTOS_ECN_CE) {
703 if (ecn0 == IPTOS_ECN_NOTECT) {
704 free(ip6af, M_FRAG6);
707 if (ecn0 != IPTOS_ECN_CE)
708 q6->ip6q_ecn = IPTOS_ECN_CE;
710 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
711 free(ip6af, M_FRAG6);
715 /* Find a fragmented part which begins after this one does. */
716 TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq)
717 if (af6->ip6af_off > ip6af->ip6af_off)
721 * If the incoming framgent overlaps some existing fragments in
722 * the reassembly queue, drop both the new fragment and the
723 * entire reassembly queue. However, if the new fragment
724 * is an exact duplicate of an existing fragment, only silently
725 * drop the existing fragment and leave the fragmentation queue
726 * unchanged, as allowed by the RFC. (RFC 8200, 4.5)
729 af6tmp = TAILQ_PREV(af6, ip6fraghead, ip6af_tq);
731 af6tmp = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead);
732 if (af6tmp != NULL) {
733 if (af6tmp->ip6af_off + af6tmp->ip6af_frglen -
734 ip6af->ip6af_off > 0) {
735 if (af6tmp->ip6af_off != ip6af->ip6af_off ||
736 af6tmp->ip6af_frglen != ip6af->ip6af_frglen)
737 frag6_freef(q6, bucket);
738 free(ip6af, M_FRAG6);
743 if (ip6af->ip6af_off + ip6af->ip6af_frglen -
744 af6->ip6af_off > 0) {
745 if (af6->ip6af_off != ip6af->ip6af_off ||
746 af6->ip6af_frglen != ip6af->ip6af_frglen)
747 frag6_freef(q6, bucket);
748 free(ip6af, M_FRAG6);
754 mac_ip6q_update(m, q6);
758 * Stick new segment in its place; check for complete reassembly.
759 * If not complete, check fragment limit. Move to front of packet
760 * queue, as we are the most recently active fragmented packet.
763 TAILQ_INSERT_BEFORE(af6, ip6af, ip6af_tq);
765 TAILQ_INSERT_TAIL(&q6->ip6q_frags, ip6af, ip6af_tq);
767 atomic_add_int(&frag6_nfrags, 1);
771 TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) {
772 if (af6->ip6af_off != plen) {
773 if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
774 IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
775 frag6_freef(q6, bucket);
777 IP6QB_UNLOCK(bucket);
779 return (IPPROTO_DONE);
781 plen += af6->ip6af_frglen;
783 af6 = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead);
784 if (af6->ip6af_mff) {
785 if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
786 IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
787 frag6_freef(q6, bucket);
789 IP6QB_UNLOCK(bucket);
791 return (IPPROTO_DONE);
794 /* Reassembly is complete; concatenate fragments. */
795 ip6af = TAILQ_FIRST(&q6->ip6q_frags);
796 t = m = ip6af->ip6af_m;
797 TAILQ_REMOVE(&q6->ip6q_frags, ip6af, ip6af_tq);
798 while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) {
799 m->m_pkthdr.csum_flags &=
800 af6->ip6af_m->m_pkthdr.csum_flags;
801 m->m_pkthdr.csum_data +=
802 af6->ip6af_m->m_pkthdr.csum_data;
804 TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
806 m_adj(af6->ip6af_m, af6->ip6af_offset);
807 m_demote_pkthdr(af6->ip6af_m);
808 m_cat(t, af6->ip6af_m);
812 while (m->m_pkthdr.csum_data & 0xffff0000)
813 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
814 (m->m_pkthdr.csum_data >> 16);
816 /* Adjust offset to point where the original next header starts. */
817 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
818 free(ip6af, M_FRAG6);
819 ip6 = mtod(m, struct ip6_hdr *);
820 ip6->ip6_plen = htons((u_short)plen + offset - sizeof(struct ip6_hdr));
821 if (q6->ip6q_ecn == IPTOS_ECN_CE)
822 ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
825 TAILQ_REMOVE(head, q6, ip6q_tq);
826 V_ip6qb[bucket].count--;
827 atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
829 ip6_deletefraghdr(m, offset, M_NOWAIT);
831 /* Set nxt(-hdr field value) to the original value. */
832 m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
836 mac_ip6q_reassemble(q6, m);
837 mac_ip6q_destroy(q6);
840 atomic_subtract_int(&V_frag6_nfragpackets, 1);
842 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
845 for (t = m; t; t = t->m_next)
847 m->m_pkthdr.len = plen;
848 /* Set a valid receive interface pointer. */
849 m->m_pkthdr.rcvif = srcifp;
853 mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc),
858 ip6dc = (struct ip6_direct_ctx *)(mtag + 1);
859 ip6dc->ip6dc_nxt = nxt;
860 ip6dc->ip6dc_off = offset;
862 m_tag_prepend(m, mtag);
865 IP6QB_UNLOCK(bucket);
866 IP6STAT_INC(ip6s_reassembled);
867 in6_ifstat_inc(dstifp, ifs6_reass_ok);
870 /* Queue/dispatch for reprocessing. */
871 netisr_dispatch(NETISR_IPV6_DIRECT, m);
873 return (IPPROTO_DONE);
876 /* Tell launch routine the next header. */
883 IP6QB_UNLOCK(bucket);
885 in6_ifstat_inc(dstifp, ifs6_reass_fail);
886 IP6STAT_INC(ip6s_fragdropped);
889 return (IPPROTO_DONE);
893 * IPv6 reassembling timer processing;
894 * if a timer expires on a reassembly queue, discard it.
899 VNET_ITERATOR_DECL(vnet_iter);
900 struct ip6qhead *head;
901 struct ip6q *q6, *q6tmp;
904 VNET_LIST_RLOCK_NOSLEEP();
905 VNET_FOREACH(vnet_iter) {
906 CURVNET_SET(vnet_iter);
907 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
909 head = IP6QB_HEAD(bucket);
910 TAILQ_FOREACH_SAFE(q6, head, ip6q_tq, q6tmp)
911 if (--q6->ip6q_ttl == 0) {
912 IP6STAT_ADD(ip6s_fragtimeout,
914 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
915 frag6_freef(q6, bucket);
918 * If we are over the maximum number of fragments
919 * (due to the limit being lowered), drain off
920 * enough to get down to the new limit.
921 * Note that we drain all reassembly queues if
922 * maxfragpackets is 0 (fragmentation is disabled),
923 * and do not enforce a limit when maxfragpackets
926 while ((V_ip6_maxfragpackets == 0 ||
927 (V_ip6_maxfragpackets > 0 &&
928 V_ip6qb[bucket].count > V_ip6_maxfragbucketsize)) &&
929 (q6 = TAILQ_LAST(head, ip6qhead)) != NULL) {
930 IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag);
931 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
932 frag6_freef(q6, bucket);
934 IP6QB_UNLOCK(bucket);
937 * If we are still over the maximum number of fragmented
938 * packets, drain off enough to get down to the new limit.
941 while (V_ip6_maxfragpackets >= 0 &&
942 atomic_load_int(&V_frag6_nfragpackets) >
943 (u_int)V_ip6_maxfragpackets) {
945 q6 = TAILQ_LAST(IP6QB_HEAD(bucket), ip6qhead);
947 IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag);
948 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
949 frag6_freef(q6, bucket);
951 IP6QB_UNLOCK(bucket);
952 bucket = (bucket + 1) % IP6REASS_NHASH;
956 VNET_LIST_RUNLOCK_NOSLEEP();
960 * Eventhandler to adjust limits in case nmbclusters change.
963 frag6_change(void *tag)
965 VNET_ITERATOR_DECL(vnet_iter);
967 ip6_maxfrags = IP6_MAXFRAGS;
968 VNET_LIST_RLOCK_NOSLEEP();
969 VNET_FOREACH(vnet_iter) {
970 CURVNET_SET(vnet_iter);
971 V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
972 frag6_set_bucketsize();
975 VNET_LIST_RUNLOCK_NOSLEEP();
979 * Initialise reassembly queue and fragment identifier.
986 V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
987 frag6_set_bucketsize();
988 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
989 TAILQ_INIT(IP6QB_HEAD(bucket));
990 mtx_init(&V_ip6qb[bucket].lock, "ip6qb", NULL, MTX_DEF);
991 V_ip6qb[bucket].count = 0;
993 V_ip6qb_hashseed = arc4random();
994 V_ip6_maxfragsperpacket = 64;
998 if (!IS_DEFAULT_VNET(curvnet))
1001 ip6_maxfrags = IP6_MAXFRAGS;
1002 EVENTHANDLER_REGISTER(nmbclusters_change,
1003 frag6_change, NULL, EVENTHANDLER_PRI_ANY);
1007 * Drain off all datagram fragments.
1010 frag6_drain_one(void)
1015 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
1017 while ((q6 = TAILQ_FIRST(IP6QB_HEAD(bucket))) != NULL) {
1018 IP6STAT_INC(ip6s_fragdropped);
1019 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
1020 frag6_freef(q6, bucket);
1022 IP6QB_UNLOCK(bucket);
1029 VNET_ITERATOR_DECL(vnet_iter);
1031 VNET_LIST_RLOCK_NOSLEEP();
1032 VNET_FOREACH(vnet_iter) {
1033 CURVNET_SET(vnet_iter);
1037 VNET_LIST_RUNLOCK_NOSLEEP();
1042 * Clear up IPv6 reassembly structures.
1051 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
1052 KASSERT(V_ip6qb[bucket].count == 0,
1053 ("%s: V_ip6qb[%d] (%p) count not 0 (%d)", __func__,
1054 bucket, &V_ip6qb[bucket], V_ip6qb[bucket].count));
1055 mtx_destroy(&V_ip6qb[bucket].lock);