2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * Copyright (c) 2019 Netflix, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $
35 #include <sys/cdefs.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/domain.h>
41 #include <sys/eventhandler.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
46 #include <sys/protosw.h>
47 #include <sys/queue.h>
48 #include <sys/socket.h>
49 #include <sys/sysctl.h>
50 #include <sys/syslog.h>
53 #include <net/if_var.h>
54 #include <net/if_private.h>
55 #include <net/netisr.h>
56 #include <net/route.h>
59 #include <netinet/in.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip6.h>
62 #include <netinet6/ip6_var.h>
63 #include <netinet/icmp6.h>
64 #include <netinet/in_systm.h> /* For ECN definitions. */
65 #include <netinet/ip.h> /* For ECN definitions. */
68 #include <security/mac/mac_framework.h>
72 * A "big picture" of how IPv6 fragment queues are all linked together.
74 * struct ip6qbucket ip6qb[...]; hashed buckets
77 * +--- TAILQ(struct ip6q, packets) *q6; tailq entries holding
78 * |||||||| fragmented packets
79 * | (1 per original packet)
81 * +--- TAILQ(struct ip6asfrag, ip6q_frags) *af6; tailq entries of IPv6
82 * | *ip6af;fragment packets
83 * | for one original packet
87 /* Reassembly headers are stored in hash buckets. */
88 #define IP6REASS_NHASH_LOG2 10
89 #define IP6REASS_NHASH (1 << IP6REASS_NHASH_LOG2)
90 #define IP6REASS_HMASK (IP6REASS_NHASH - 1)
92 TAILQ_HEAD(ip6qhead, ip6q);
94 struct ip6qhead packets;
100 TAILQ_ENTRY(ip6asfrag) ip6af_tq;
101 struct mbuf *ip6af_m;
102 int ip6af_offset; /* Offset in ip6af_m to next header. */
103 int ip6af_frglen; /* Fragmentable part length. */
104 int ip6af_off; /* Fragment offset. */
105 bool ip6af_mff; /* More fragment bit in frag off. */
108 static MALLOC_DEFINE(M_FRAG6, "frag6", "IPv6 fragment reassembly header");
111 /* A flag to indicate if IPv6 fragmentation is initialized. */
112 VNET_DEFINE_STATIC(bool, frag6_on);
113 #define V_frag6_on VNET(frag6_on)
116 /* System wide (global) maximum and count of packets in reassembly queues. */
117 static int ip6_maxfrags;
118 static u_int __exclusive_cache_line frag6_nfrags;
120 /* Maximum and current packets in per-VNET reassembly queue. */
121 VNET_DEFINE_STATIC(int, ip6_maxfragpackets);
122 VNET_DEFINE_STATIC(volatile u_int, frag6_nfragpackets);
123 #define V_ip6_maxfragpackets VNET(ip6_maxfragpackets)
124 #define V_frag6_nfragpackets VNET(frag6_nfragpackets)
126 /* Maximum per-VNET reassembly timeout (milliseconds) */
127 VNET_DEFINE_STATIC(u_int, ip6_fraglifetime) = IPV6_DEFFRAGTTL;
128 #define V_ip6_fraglifetime VNET(ip6_fraglifetime)
130 /* Maximum per-VNET reassembly queues per bucket and fragments per packet. */
131 VNET_DEFINE_STATIC(int, ip6_maxfragbucketsize);
132 VNET_DEFINE_STATIC(int, ip6_maxfragsperpacket);
133 #define V_ip6_maxfragbucketsize VNET(ip6_maxfragbucketsize)
134 #define V_ip6_maxfragsperpacket VNET(ip6_maxfragsperpacket)
136 /* Per-VNET reassembly queue buckets. */
137 VNET_DEFINE_STATIC(struct ip6qbucket, ip6qb[IP6REASS_NHASH]);
138 VNET_DEFINE_STATIC(uint32_t, ip6qb_hashseed);
139 #define V_ip6qb VNET(ip6qb)
140 #define V_ip6qb_hashseed VNET(ip6qb_hashseed)
142 #define IP6QB_LOCK(_b) mtx_lock(&V_ip6qb[(_b)].lock)
143 #define IP6QB_TRYLOCK(_b) mtx_trylock(&V_ip6qb[(_b)].lock)
144 #define IP6QB_LOCK_ASSERT(_b) mtx_assert(&V_ip6qb[(_b)].lock, MA_OWNED)
145 #define IP6QB_UNLOCK(_b) mtx_unlock(&V_ip6qb[(_b)].lock)
146 #define IP6QB_HEAD(_b) (&V_ip6qb[(_b)].packets)
149 * By default, limit the number of IP6 fragments across all reassembly
150 * queues to 1/32 of the total number of mbuf clusters.
152 * Limit the total number of reassembly queues per VNET to the
153 * IP6 fragment limit, but ensure the limit will not allow any bucket
154 * to grow above 100 items. (The bucket limit is
155 * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct
156 * multiplier to reach a 100-item limit.)
157 * The 100-item limit was chosen as brief testing seems to show that
158 * this produces "reasonable" performance on some subset of systems
161 #define IP6_MAXFRAGS (nmbclusters / 32)
162 #define IP6_MAXFRAGPACKETS (imin(IP6_MAXFRAGS, IP6REASS_NHASH * 50))
164 /* Interval between periodic reassembly queue inspections */
165 #define IP6_CALLOUT_INTERVAL_MS 500
168 * Sysctls and helper function.
170 SYSCTL_DECL(_net_inet6_ip6);
172 SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfrags,
173 CTLFLAG_RD, &frag6_nfrags, 0,
174 "Global number of IPv6 fragments across all reassembly queues.");
177 frag6_set_bucketsize(void)
181 if ((i = V_ip6_maxfragpackets) > 0)
182 V_ip6_maxfragbucketsize = imax(i / (IP6REASS_NHASH / 2), 1);
185 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGS, maxfrags,
186 CTLFLAG_RW, &ip6_maxfrags, 0,
187 "Maximum allowed number of outstanding IPv6 packet fragments. "
188 "A value of 0 means no fragmented packets will be accepted, while "
189 "a value of -1 means no limit");
192 sysctl_ip6_maxfragpackets(SYSCTL_HANDLER_ARGS)
196 val = V_ip6_maxfragpackets;
197 error = sysctl_handle_int(oidp, &val, 0, req);
198 if (error != 0 || !req->newptr)
200 V_ip6_maxfragpackets = val;
201 frag6_set_bucketsize();
204 SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets,
205 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
206 NULL, 0, sysctl_ip6_maxfragpackets, "I",
207 "Default maximum number of outstanding fragmented IPv6 packets. "
208 "A value of 0 means no fragmented packets will be accepted, while a "
209 "a value of -1 means no limit");
210 SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfragpackets,
211 CTLFLAG_VNET | CTLFLAG_RD,
212 __DEVOLATILE(u_int *, &VNET_NAME(frag6_nfragpackets)), 0,
213 "Per-VNET number of IPv6 fragments across all reassembly queues.");
214 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGSPERPACKET, maxfragsperpacket,
215 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragsperpacket), 0,
216 "Maximum allowed number of fragments per packet");
217 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGBUCKETSIZE, maxfragbucketsize,
218 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragbucketsize), 0,
219 "Maximum number of reassembly queues per hash bucket");
222 frag6_milli_to_callout_ticks(int ms)
224 return (ms / IP6_CALLOUT_INTERVAL_MS);
228 frag6_callout_ticks_to_milli(int ms)
230 return (ms * IP6_CALLOUT_INTERVAL_MS);
233 _Static_assert(sizeof(((struct ip6q *)NULL)->ip6q_ttl) >= 2,
234 "ip6q_ttl field is not large enough");
237 sysctl_ip6_fraglifetime(SYSCTL_HANDLER_ARGS)
241 val = V_ip6_fraglifetime;
242 error = sysctl_handle_int(oidp, &val, 0, req);
243 if (error != 0 || !req->newptr)
246 val = IPV6_DEFFRAGTTL;
248 if (frag6_milli_to_callout_ticks(val) >= 65536)
249 val = frag6_callout_ticks_to_milli(65535);
251 if (!IS_DEFAULT_VNET(curvnet)) {
253 int host_val = V_ip6_fraglifetime;
260 V_ip6_fraglifetime = val;
263 SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, fraglifetime_ms,
264 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
265 NULL, 0, sysctl_ip6_fraglifetime, "I",
266 "Fragment lifetime, in milliseconds");
269 * Remove the IPv6 fragmentation header from the mbuf.
272 ip6_deletefraghdr(struct mbuf *m, int offset, int wait __unused)
276 KASSERT(m->m_len >= offset + sizeof(struct ip6_frag),
277 ("%s: ext headers not contigous in mbuf %p m_len %d >= "
278 "offset %d + %zu\n", __func__, m, m->m_len, offset,
279 sizeof(struct ip6_frag)));
281 /* Delete frag6 header. */
282 ip6 = mtod(m, struct ip6_hdr *);
283 bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag), offset);
284 m->m_data += sizeof(struct ip6_frag);
285 m->m_len -= sizeof(struct ip6_frag);
286 m->m_flags |= M_FRAGMENTED;
292 frag6_rmqueue(struct ip6q *q6, uint32_t bucket)
294 IP6QB_LOCK_ASSERT(bucket);
296 TAILQ_REMOVE(IP6QB_HEAD(bucket), q6, ip6q_tq);
297 V_ip6qb[bucket].count--;
299 mac_ip6q_destroy(q6);
302 atomic_subtract_int(&V_frag6_nfragpackets, 1);
306 * Free a fragment reassembly header and all associated datagrams.
309 frag6_freef(struct ip6q *q6, uint32_t bucket)
312 struct ip6asfrag *af6;
315 IP6QB_LOCK_ASSERT(bucket);
317 while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) {
319 TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
322 * Return ICMP time exceeded error for the 1st fragment.
323 * Just free other fragments.
325 if (af6->ip6af_off == 0 && m->m_pkthdr.rcvif != NULL) {
326 /* Adjust pointer. */
327 ip6 = mtod(m, struct ip6_hdr *);
329 /* Restore source and destination addresses. */
330 ip6->ip6_src = q6->ip6q_src;
331 ip6->ip6_dst = q6->ip6q_dst;
333 icmp6_error(m, ICMP6_TIME_EXCEEDED,
334 ICMP6_TIME_EXCEED_REASSEMBLY, 0);
341 atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
342 frag6_rmqueue(q6, bucket);
346 * Drain off all datagram fragments belonging to
347 * the given network interface.
350 frag6_cleanup(void *arg __unused, struct ifnet *ifp)
352 struct ip6qhead *head;
354 struct ip6asfrag *af6;
357 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
359 CURVNET_SET_QUIET(ifp->if_vnet);
362 * Skip processing if IPv6 reassembly is not initialised or
363 * torn down by frag6_destroy().
371 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
373 head = IP6QB_HEAD(bucket);
374 /* Scan fragment list. */
375 TAILQ_FOREACH(q6, head, ip6q_tq) {
376 TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) {
377 /* Clear no longer valid rcvif pointer. */
378 if (af6->ip6af_m->m_pkthdr.rcvif == ifp)
379 af6->ip6af_m->m_pkthdr.rcvif = NULL;
382 IP6QB_UNLOCK(bucket);
386 EVENTHANDLER_DEFINE(ifnet_departure_event, frag6_cleanup, NULL, 0);
389 * Like in RFC2460, in RFC8200, fragment and reassembly rules do not agree with
390 * each other, in terms of next header field handling in fragment header.
391 * While the sender will use the same value for all of the fragmented packets,
392 * receiver is suggested not to check for consistency.
394 * Fragment rules (p18,p19):
395 * (2) A Fragment header containing:
396 * The Next Header value that identifies the first header
397 * after the Per-Fragment headers of the original packet.
398 * -> next header field is same for all fragments
400 * Reassembly rule (p20):
401 * The Next Header field of the last header of the Per-Fragment
402 * headers is obtained from the Next Header field of the first
403 * fragment's Fragment header.
404 * -> should grab it from the first fragment only
406 * The following note also contradicts with fragment rule - no one is going to
407 * send different fragment with different next header field.
409 * Additional note (p22) [not an error]:
410 * The Next Header values in the Fragment headers of different
411 * fragments of the same original packet may differ. Only the value
412 * from the Offset zero fragment packet is used for reassembly.
413 * -> should grab it from the first fragment only
415 * There is no explicit reason given in the RFC. Historical reason maybe?
421 frag6_input(struct mbuf **mp, int *offp, int proto)
425 struct ip6_frag *ip6f;
426 struct ip6qhead *head;
428 struct ip6asfrag *af6, *ip6af, *af6tmp;
429 struct in6_ifaddr *ia6;
430 struct ifnet *dstifp, *srcifp;
431 uint32_t hashkey[(sizeof(struct in6_addr) * 2 +
432 sizeof(ip6f->ip6f_ident)) / sizeof(uint32_t)];
433 uint32_t bucket, *hashkeyp;
434 int fragoff, frgpartlen; /* Must be larger than uint16_t. */
435 int nxt, offset, plen;
439 struct ip6_direct_ctx *ip6dc;
448 if (m->m_len < offset + sizeof(struct ip6_frag)) {
449 m = m_pullup(m, offset + sizeof(struct ip6_frag));
451 IP6STAT_INC(ip6s_exthdrtoolong);
453 return (IPPROTO_DONE);
456 ip6 = mtod(m, struct ip6_hdr *);
459 /* Find the destination interface of the packet. */
460 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false);
462 dstifp = ia6->ia_ifp;
464 /* Jumbo payload cannot contain a fragment header. */
465 if (ip6->ip6_plen == 0) {
466 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
467 in6_ifstat_inc(dstifp, ifs6_reass_fail);
469 return (IPPROTO_DONE);
473 * Check whether fragment packet's fragment length is a
474 * multiple of 8 octets (unless it is the last one).
475 * sizeof(struct ip6_frag) == 8
476 * sizeof(struct ip6_hdr) = 40
478 ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
479 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
480 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
481 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
482 offsetof(struct ip6_hdr, ip6_plen));
483 in6_ifstat_inc(dstifp, ifs6_reass_fail);
485 return (IPPROTO_DONE);
488 IP6STAT_INC(ip6s_fragments);
489 in6_ifstat_inc(dstifp, ifs6_reass_reqd);
492 * Handle "atomic" fragments (offset and m bit set to 0) upfront,
493 * unrelated to any reassembly. We need to remove the frag hdr
495 * See RFC 6946 and section 4.5 of RFC 8200.
497 if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) {
498 IP6STAT_INC(ip6s_atomicfrags);
499 nxt = ip6f->ip6f_nxt;
501 * Set nxt(-hdr field value) to the original value.
502 * We cannot just set ip6->ip6_nxt as there might be
503 * an unfragmentable part with extension headers and
504 * we must update the last one.
506 m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
508 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) -
509 sizeof(struct ip6_frag));
510 if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0)
512 m->m_pkthdr.len -= sizeof(struct ip6_frag);
513 in6_ifstat_inc(dstifp, ifs6_reass_ok);
518 /* Offset now points to data portion. */
519 offset += sizeof(struct ip6_frag);
521 /* Get fragment length and discard 0-byte fragments. */
522 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
523 if (frgpartlen == 0) {
524 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
525 offsetof(struct ip6_hdr, ip6_plen));
526 in6_ifstat_inc(dstifp, ifs6_reass_fail);
527 IP6STAT_INC(ip6s_fragdropped);
529 return (IPPROTO_DONE);
533 * Enforce upper bound on number of fragments for the entire system.
534 * If maxfrag is 0, never accept fragments.
535 * If maxfrag is -1, accept all fragments without limitation.
537 if (ip6_maxfrags < 0)
539 else if (atomic_load_int(&frag6_nfrags) >= (u_int)ip6_maxfrags)
543 * Validate that a full header chain to the ULP is present in the
544 * packet containing the first fragment as per RFC RFC7112 and
545 * RFC 8200 pages 18,19:
546 * The first fragment packet is composed of:
547 * (3) Extension headers, if any, and the Upper-Layer header. These
548 * headers must be in the first fragment. ...
550 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
551 /* XXX TODO. thj has D16851 open for this. */
552 /* Send ICMPv6 4,3 in case of violation. */
554 /* Store receive network interface pointer for later. */
555 srcifp = m->m_pkthdr.rcvif;
557 /* Generate a hash value for fragment bucket selection. */
559 memcpy(hashkeyp, &ip6->ip6_src, sizeof(struct in6_addr));
560 hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
561 memcpy(hashkeyp, &ip6->ip6_dst, sizeof(struct in6_addr));
562 hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
563 *hashkeyp = ip6f->ip6f_ident;
564 bucket = jenkins_hash32(hashkey, nitems(hashkey), V_ip6qb_hashseed);
565 bucket &= IP6REASS_HMASK;
567 head = IP6QB_HEAD(bucket);
569 TAILQ_FOREACH(q6, head, ip6q_tq)
570 if (ip6f->ip6f_ident == q6->ip6q_ident &&
571 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
572 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)
574 && mac_ip6q_match(m, q6)
581 /* A first fragment to arrive creates a reassembly queue. */
585 * Enforce upper bound on number of fragmented packets
586 * for which we attempt reassembly;
587 * If maxfragpackets is 0, never accept fragments.
588 * If maxfragpackets is -1, accept all fragments without
591 if (V_ip6_maxfragpackets < 0)
593 else if (V_ip6qb[bucket].count >= V_ip6_maxfragbucketsize ||
594 atomic_load_int(&V_frag6_nfragpackets) >=
595 (u_int)V_ip6_maxfragpackets)
598 /* Allocate IPv6 fragement packet queue entry. */
599 q6 = malloc(sizeof(struct ip6q), M_FRAG6, M_NOWAIT | M_ZERO);
603 if (mac_ip6q_init(q6, M_NOWAIT) != 0) {
607 mac_ip6q_create(m, q6);
609 atomic_add_int(&V_frag6_nfragpackets, 1);
611 /* ip6q_nxt will be filled afterwards, from 1st fragment. */
612 TAILQ_INIT(&q6->ip6q_frags);
613 q6->ip6q_ident = ip6f->ip6f_ident;
614 q6->ip6q_ttl = frag6_milli_to_callout_ticks(V_ip6_fraglifetime);
615 q6->ip6q_src = ip6->ip6_src;
616 q6->ip6q_dst = ip6->ip6_dst;
617 q6->ip6q_ecn = IPV6_ECN(ip6);
618 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
620 /* Add the fragemented packet to the bucket. */
621 TAILQ_INSERT_HEAD(head, q6, ip6q_tq);
622 V_ip6qb[bucket].count++;
626 * If it is the 1st fragment, record the length of the
627 * unfragmentable part and the next header of the fragment header.
628 * Assume the first 1st fragement to arrive will be correct.
629 * We do not have any duplicate checks here yet so another packet
630 * with fragoff == 0 could come and overwrite the ip6q_unfrglen
631 * and worse, the next header, at any time.
633 if (fragoff == 0 && q6->ip6q_unfrglen == -1) {
634 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
635 sizeof(struct ip6_frag);
636 q6->ip6q_nxt = ip6f->ip6f_nxt;
641 * Check that the reassembled packet would not exceed 65535 bytes
643 * If it would exceed, discard the fragment and return an ICMP error.
645 if (q6->ip6q_unfrglen >= 0) {
646 /* The 1st fragment has already arrived. */
647 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
649 frag6_rmqueue(q6, bucket);
650 IP6QB_UNLOCK(bucket);
651 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
652 offset - sizeof(struct ip6_frag) +
653 offsetof(struct ip6_frag, ip6f_offlg));
655 return (IPPROTO_DONE);
657 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
659 frag6_rmqueue(q6, bucket);
660 IP6QB_UNLOCK(bucket);
661 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
662 offset - sizeof(struct ip6_frag) +
663 offsetof(struct ip6_frag, ip6f_offlg));
665 return (IPPROTO_DONE);
669 * If it is the first fragment, do the above check for each
670 * fragment already stored in the reassembly queue.
672 if (fragoff == 0 && !only_frag) {
673 TAILQ_FOREACH_SAFE(af6, &q6->ip6q_frags, ip6af_tq, af6tmp) {
674 if (q6->ip6q_unfrglen + af6->ip6af_off +
675 af6->ip6af_frglen > IPV6_MAXPACKET) {
676 struct ip6_hdr *ip6err;
681 erroff = af6->ip6af_offset;
683 /* Dequeue the fragment. */
684 TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
686 atomic_subtract_int(&frag6_nfrags, 1);
689 /* Set a valid receive interface pointer. */
690 merr->m_pkthdr.rcvif = srcifp;
692 /* Adjust pointer. */
693 ip6err = mtod(merr, struct ip6_hdr *);
696 * Restore source and destination addresses
697 * in the erroneous IPv6 header.
699 ip6err->ip6_src = q6->ip6q_src;
700 ip6err->ip6_dst = q6->ip6q_dst;
702 icmp6_error(merr, ICMP6_PARAM_PROB,
703 ICMP6_PARAMPROB_HEADER,
704 erroff - sizeof(struct ip6_frag) +
705 offsetof(struct ip6_frag, ip6f_offlg));
710 /* Allocate an IPv6 fragement queue entry for this fragmented part. */
711 ip6af = malloc(sizeof(struct ip6asfrag), M_FRAG6, M_NOWAIT | M_ZERO);
714 ip6af->ip6af_mff = (ip6f->ip6f_offlg & IP6F_MORE_FRAG) ? true : false;
715 ip6af->ip6af_off = fragoff;
716 ip6af->ip6af_frglen = frgpartlen;
717 ip6af->ip6af_offset = offset;
722 * Do a manual insert rather than a hard-to-understand cast
723 * to a different type relying on data structure order to work.
725 TAILQ_INSERT_HEAD(&q6->ip6q_frags, ip6af, ip6af_tq);
729 /* Do duplicate, condition, and boundry checks. */
731 * Handle ECN by comparing this segment with the first one;
732 * if CE is set, do not lose CE.
733 * Drop if CE and not-ECT are mixed for the same packet.
737 if (ecn == IPTOS_ECN_CE) {
738 if (ecn0 == IPTOS_ECN_NOTECT) {
739 free(ip6af, M_FRAG6);
742 if (ecn0 != IPTOS_ECN_CE)
743 q6->ip6q_ecn = IPTOS_ECN_CE;
745 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
746 free(ip6af, M_FRAG6);
750 /* Find a fragmented part which begins after this one does. */
751 TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq)
752 if (af6->ip6af_off > ip6af->ip6af_off)
756 * If the incoming framgent overlaps some existing fragments in
757 * the reassembly queue, drop both the new fragment and the
758 * entire reassembly queue. However, if the new fragment
759 * is an exact duplicate of an existing fragment, only silently
760 * drop the existing fragment and leave the fragmentation queue
761 * unchanged, as allowed by the RFC. (RFC 8200, 4.5)
764 af6tmp = TAILQ_PREV(af6, ip6fraghead, ip6af_tq);
766 af6tmp = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead);
767 if (af6tmp != NULL) {
768 if (af6tmp->ip6af_off + af6tmp->ip6af_frglen -
769 ip6af->ip6af_off > 0) {
770 if (af6tmp->ip6af_off != ip6af->ip6af_off ||
771 af6tmp->ip6af_frglen != ip6af->ip6af_frglen)
772 frag6_freef(q6, bucket);
773 free(ip6af, M_FRAG6);
778 if (ip6af->ip6af_off + ip6af->ip6af_frglen -
779 af6->ip6af_off > 0) {
780 if (af6->ip6af_off != ip6af->ip6af_off ||
781 af6->ip6af_frglen != ip6af->ip6af_frglen)
782 frag6_freef(q6, bucket);
783 free(ip6af, M_FRAG6);
789 mac_ip6q_update(m, q6);
793 * Stick new segment in its place; check for complete reassembly.
794 * If not complete, check fragment limit. Move to front of packet
795 * queue, as we are the most recently active fragmented packet.
798 TAILQ_INSERT_BEFORE(af6, ip6af, ip6af_tq);
800 TAILQ_INSERT_TAIL(&q6->ip6q_frags, ip6af, ip6af_tq);
802 atomic_add_int(&frag6_nfrags, 1);
806 TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) {
807 if (af6->ip6af_off != plen) {
808 if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
809 IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
810 frag6_freef(q6, bucket);
812 IP6QB_UNLOCK(bucket);
814 return (IPPROTO_DONE);
816 plen += af6->ip6af_frglen;
818 af6 = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead);
819 if (af6->ip6af_mff) {
820 if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
821 IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
822 frag6_freef(q6, bucket);
824 IP6QB_UNLOCK(bucket);
826 return (IPPROTO_DONE);
829 /* Reassembly is complete; concatenate fragments. */
830 ip6af = TAILQ_FIRST(&q6->ip6q_frags);
831 t = m = ip6af->ip6af_m;
832 TAILQ_REMOVE(&q6->ip6q_frags, ip6af, ip6af_tq);
833 while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) {
834 m->m_pkthdr.csum_flags &=
835 af6->ip6af_m->m_pkthdr.csum_flags;
836 m->m_pkthdr.csum_data +=
837 af6->ip6af_m->m_pkthdr.csum_data;
839 TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
841 m_adj(af6->ip6af_m, af6->ip6af_offset);
842 m_demote_pkthdr(af6->ip6af_m);
843 m_cat(t, af6->ip6af_m);
847 while (m->m_pkthdr.csum_data & 0xffff0000)
848 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
849 (m->m_pkthdr.csum_data >> 16);
851 /* Adjust offset to point where the original next header starts. */
852 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
853 free(ip6af, M_FRAG6);
854 if ((u_int)plen + (u_int)offset - sizeof(struct ip6_hdr) >
856 frag6_freef(q6, bucket);
859 ip6 = mtod(m, struct ip6_hdr *);
860 ip6->ip6_plen = htons((u_short)plen + offset - sizeof(struct ip6_hdr));
861 if (q6->ip6q_ecn == IPTOS_ECN_CE)
862 ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
865 ip6_deletefraghdr(m, offset, M_NOWAIT);
867 /* Set nxt(-hdr field value) to the original value. */
868 m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
872 mac_ip6q_reassemble(q6, m);
874 atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
875 frag6_rmqueue(q6, bucket);
877 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
880 for (t = m; t; t = t->m_next)
882 m->m_pkthdr.len = plen;
883 /* Set a valid receive interface pointer. */
884 m->m_pkthdr.rcvif = srcifp;
888 mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc),
893 ip6dc = (struct ip6_direct_ctx *)(mtag + 1);
894 ip6dc->ip6dc_nxt = nxt;
895 ip6dc->ip6dc_off = offset;
897 m_tag_prepend(m, mtag);
900 IP6QB_UNLOCK(bucket);
901 IP6STAT_INC(ip6s_reassembled);
902 in6_ifstat_inc(dstifp, ifs6_reass_ok);
905 /* Queue/dispatch for reprocessing. */
906 netisr_dispatch(NETISR_IPV6_DIRECT, m);
908 return (IPPROTO_DONE);
911 /* Tell launch routine the next header. */
918 IP6QB_UNLOCK(bucket);
920 in6_ifstat_inc(dstifp, ifs6_reass_fail);
921 IP6STAT_INC(ip6s_fragdropped);
924 return (IPPROTO_DONE);
928 * IPv6 reassembling timer processing;
929 * if a timer expires on a reassembly queue, discard it.
931 static struct callout frag6_callout;
933 frag6_slowtimo(void *arg __unused)
935 VNET_ITERATOR_DECL(vnet_iter);
936 struct ip6qhead *head;
937 struct ip6q *q6, *q6tmp;
940 if (atomic_load_int(&frag6_nfrags) == 0)
943 VNET_LIST_RLOCK_NOSLEEP();
944 VNET_FOREACH(vnet_iter) {
945 CURVNET_SET(vnet_iter);
946 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
947 if (V_ip6qb[bucket].count == 0)
950 head = IP6QB_HEAD(bucket);
951 TAILQ_FOREACH_SAFE(q6, head, ip6q_tq, q6tmp)
952 if (--q6->ip6q_ttl == 0) {
953 IP6STAT_ADD(ip6s_fragtimeout,
955 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
956 frag6_freef(q6, bucket);
959 * If we are over the maximum number of fragments
960 * (due to the limit being lowered), drain off
961 * enough to get down to the new limit.
962 * Note that we drain all reassembly queues if
963 * maxfragpackets is 0 (fragmentation is disabled),
964 * and do not enforce a limit when maxfragpackets
967 while ((V_ip6_maxfragpackets == 0 ||
968 (V_ip6_maxfragpackets > 0 &&
969 V_ip6qb[bucket].count > V_ip6_maxfragbucketsize)) &&
970 (q6 = TAILQ_LAST(head, ip6qhead)) != NULL) {
971 IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag);
972 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
973 frag6_freef(q6, bucket);
975 IP6QB_UNLOCK(bucket);
978 * If we are still over the maximum number of fragmented
979 * packets, drain off enough to get down to the new limit.
982 while (V_ip6_maxfragpackets >= 0 &&
983 atomic_load_int(&V_frag6_nfragpackets) >
984 (u_int)V_ip6_maxfragpackets) {
986 q6 = TAILQ_LAST(IP6QB_HEAD(bucket), ip6qhead);
988 IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag);
989 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
990 frag6_freef(q6, bucket);
992 IP6QB_UNLOCK(bucket);
993 bucket = (bucket + 1) % IP6REASS_NHASH;
997 VNET_LIST_RUNLOCK_NOSLEEP();
999 callout_reset_sbt(&frag6_callout, SBT_1MS * IP6_CALLOUT_INTERVAL_MS,
1000 SBT_1MS * 10, frag6_slowtimo, NULL, 0);
1004 frag6_slowtimo_init(void *arg __unused)
1007 callout_init(&frag6_callout, 1);
1008 callout_reset_sbt(&frag6_callout, SBT_1MS * IP6_CALLOUT_INTERVAL_MS,
1009 SBT_1MS * 10, frag6_slowtimo, NULL, 0);
1011 SYSINIT(frag6, SI_SUB_VNET_DONE, SI_ORDER_ANY, frag6_slowtimo_init, NULL);
1014 * Eventhandler to adjust limits in case nmbclusters change.
1017 frag6_change(void *tag)
1019 VNET_ITERATOR_DECL(vnet_iter);
1021 ip6_maxfrags = IP6_MAXFRAGS;
1022 VNET_LIST_RLOCK_NOSLEEP();
1023 VNET_FOREACH(vnet_iter) {
1024 CURVNET_SET(vnet_iter);
1025 V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
1026 frag6_set_bucketsize();
1029 VNET_LIST_RUNLOCK_NOSLEEP();
1033 * Initialise reassembly queue and fragment identifier.
1040 V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
1041 frag6_set_bucketsize();
1042 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
1043 TAILQ_INIT(IP6QB_HEAD(bucket));
1044 mtx_init(&V_ip6qb[bucket].lock, "ip6qb", NULL, MTX_DEF);
1045 V_ip6qb[bucket].count = 0;
1047 V_ip6qb_hashseed = arc4random();
1048 V_ip6_maxfragsperpacket = 64;
1052 if (!IS_DEFAULT_VNET(curvnet))
1055 ip6_maxfrags = IP6_MAXFRAGS;
1056 EVENTHANDLER_REGISTER(nmbclusters_change,
1057 frag6_change, NULL, EVENTHANDLER_PRI_ANY);
1061 * Drain off all datagram fragments.
1064 frag6_drain_one(void)
1069 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
1071 while ((q6 = TAILQ_FIRST(IP6QB_HEAD(bucket))) != NULL) {
1072 IP6STAT_INC(ip6s_fragdropped);
1073 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
1074 frag6_freef(q6, bucket);
1076 IP6QB_UNLOCK(bucket);
1083 VNET_ITERATOR_DECL(vnet_iter);
1085 VNET_LIST_RLOCK_NOSLEEP();
1086 VNET_FOREACH(vnet_iter) {
1087 CURVNET_SET(vnet_iter);
1091 VNET_LIST_RUNLOCK_NOSLEEP();
1096 * Clear up IPv6 reassembly structures.
1105 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
1106 KASSERT(V_ip6qb[bucket].count == 0,
1107 ("%s: V_ip6qb[%d] (%p) count not 0 (%d)", __func__,
1108 bucket, &V_ip6qb[bucket], V_ip6qb[bucket].count));
1109 mtx_destroy(&V_ip6qb[bucket].lock);