2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * Copyright (c) 2019 Netflix, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/domain.h>
43 #include <sys/eventhandler.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
48 #include <sys/protosw.h>
49 #include <sys/queue.h>
50 #include <sys/socket.h>
51 #include <sys/sysctl.h>
52 #include <sys/syslog.h>
55 #include <net/if_var.h>
56 #include <net/if_private.h>
57 #include <net/netisr.h>
58 #include <net/route.h>
61 #include <netinet/in.h>
62 #include <netinet/in_var.h>
63 #include <netinet/ip6.h>
64 #include <netinet6/ip6_var.h>
65 #include <netinet/icmp6.h>
66 #include <netinet/in_systm.h> /* For ECN definitions. */
67 #include <netinet/ip.h> /* For ECN definitions. */
70 #include <security/mac/mac_framework.h>
74 * A "big picture" of how IPv6 fragment queues are all linked together.
76 * struct ip6qbucket ip6qb[...]; hashed buckets
79 * +--- TAILQ(struct ip6q, packets) *q6; tailq entries holding
80 * |||||||| fragmented packets
81 * | (1 per original packet)
83 * +--- TAILQ(struct ip6asfrag, ip6q_frags) *af6; tailq entries of IPv6
84 * | *ip6af;fragment packets
85 * | for one original packet
89 /* Reassembly headers are stored in hash buckets. */
90 #define IP6REASS_NHASH_LOG2 10
91 #define IP6REASS_NHASH (1 << IP6REASS_NHASH_LOG2)
92 #define IP6REASS_HMASK (IP6REASS_NHASH - 1)
94 TAILQ_HEAD(ip6qhead, ip6q);
96 struct ip6qhead packets;
102 TAILQ_ENTRY(ip6asfrag) ip6af_tq;
103 struct mbuf *ip6af_m;
104 int ip6af_offset; /* Offset in ip6af_m to next header. */
105 int ip6af_frglen; /* Fragmentable part length. */
106 int ip6af_off; /* Fragment offset. */
107 bool ip6af_mff; /* More fragment bit in frag off. */
110 static MALLOC_DEFINE(M_FRAG6, "frag6", "IPv6 fragment reassembly header");
113 /* A flag to indicate if IPv6 fragmentation is initialized. */
114 VNET_DEFINE_STATIC(bool, frag6_on);
115 #define V_frag6_on VNET(frag6_on)
118 /* System wide (global) maximum and count of packets in reassembly queues. */
119 static int ip6_maxfrags;
120 static u_int __exclusive_cache_line frag6_nfrags;
122 /* Maximum and current packets in per-VNET reassembly queue. */
123 VNET_DEFINE_STATIC(int, ip6_maxfragpackets);
124 VNET_DEFINE_STATIC(volatile u_int, frag6_nfragpackets);
125 #define V_ip6_maxfragpackets VNET(ip6_maxfragpackets)
126 #define V_frag6_nfragpackets VNET(frag6_nfragpackets)
128 /* Maximum per-VNET reassembly timeout (milliseconds) */
129 VNET_DEFINE_STATIC(u_int, ip6_fraglifetime) = IPV6_DEFFRAGTTL;
130 #define V_ip6_fraglifetime VNET(ip6_fraglifetime)
132 /* Maximum per-VNET reassembly queues per bucket and fragments per packet. */
133 VNET_DEFINE_STATIC(int, ip6_maxfragbucketsize);
134 VNET_DEFINE_STATIC(int, ip6_maxfragsperpacket);
135 #define V_ip6_maxfragbucketsize VNET(ip6_maxfragbucketsize)
136 #define V_ip6_maxfragsperpacket VNET(ip6_maxfragsperpacket)
138 /* Per-VNET reassembly queue buckets. */
139 VNET_DEFINE_STATIC(struct ip6qbucket, ip6qb[IP6REASS_NHASH]);
140 VNET_DEFINE_STATIC(uint32_t, ip6qb_hashseed);
141 #define V_ip6qb VNET(ip6qb)
142 #define V_ip6qb_hashseed VNET(ip6qb_hashseed)
144 #define IP6QB_LOCK(_b) mtx_lock(&V_ip6qb[(_b)].lock)
145 #define IP6QB_TRYLOCK(_b) mtx_trylock(&V_ip6qb[(_b)].lock)
146 #define IP6QB_LOCK_ASSERT(_b) mtx_assert(&V_ip6qb[(_b)].lock, MA_OWNED)
147 #define IP6QB_UNLOCK(_b) mtx_unlock(&V_ip6qb[(_b)].lock)
148 #define IP6QB_HEAD(_b) (&V_ip6qb[(_b)].packets)
151 * By default, limit the number of IP6 fragments across all reassembly
152 * queues to 1/32 of the total number of mbuf clusters.
154 * Limit the total number of reassembly queues per VNET to the
155 * IP6 fragment limit, but ensure the limit will not allow any bucket
156 * to grow above 100 items. (The bucket limit is
157 * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct
158 * multiplier to reach a 100-item limit.)
159 * The 100-item limit was chosen as brief testing seems to show that
160 * this produces "reasonable" performance on some subset of systems
163 #define IP6_MAXFRAGS (nmbclusters / 32)
164 #define IP6_MAXFRAGPACKETS (imin(IP6_MAXFRAGS, IP6REASS_NHASH * 50))
166 /* Interval between periodic reassembly queue inspections */
167 #define IP6_CALLOUT_INTERVAL_MS 500
170 * Sysctls and helper function.
172 SYSCTL_DECL(_net_inet6_ip6);
174 SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfrags,
175 CTLFLAG_RD, &frag6_nfrags, 0,
176 "Global number of IPv6 fragments across all reassembly queues.");
179 frag6_set_bucketsize(void)
183 if ((i = V_ip6_maxfragpackets) > 0)
184 V_ip6_maxfragbucketsize = imax(i / (IP6REASS_NHASH / 2), 1);
187 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGS, maxfrags,
188 CTLFLAG_RW, &ip6_maxfrags, 0,
189 "Maximum allowed number of outstanding IPv6 packet fragments. "
190 "A value of 0 means no fragmented packets will be accepted, while "
191 "a value of -1 means no limit");
194 sysctl_ip6_maxfragpackets(SYSCTL_HANDLER_ARGS)
198 val = V_ip6_maxfragpackets;
199 error = sysctl_handle_int(oidp, &val, 0, req);
200 if (error != 0 || !req->newptr)
202 V_ip6_maxfragpackets = val;
203 frag6_set_bucketsize();
206 SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets,
207 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
208 NULL, 0, sysctl_ip6_maxfragpackets, "I",
209 "Default maximum number of outstanding fragmented IPv6 packets. "
210 "A value of 0 means no fragmented packets will be accepted, while a "
211 "a value of -1 means no limit");
212 SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfragpackets,
213 CTLFLAG_VNET | CTLFLAG_RD,
214 __DEVOLATILE(u_int *, &VNET_NAME(frag6_nfragpackets)), 0,
215 "Per-VNET number of IPv6 fragments across all reassembly queues.");
216 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGSPERPACKET, maxfragsperpacket,
217 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragsperpacket), 0,
218 "Maximum allowed number of fragments per packet");
219 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGBUCKETSIZE, maxfragbucketsize,
220 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragbucketsize), 0,
221 "Maximum number of reassembly queues per hash bucket");
224 frag6_milli_to_callout_ticks(int ms)
226 return (ms / IP6_CALLOUT_INTERVAL_MS);
230 frag6_callout_ticks_to_milli(int ms)
232 return (ms * IP6_CALLOUT_INTERVAL_MS);
235 _Static_assert(sizeof(((struct ip6q *)NULL)->ip6q_ttl) >= 2,
236 "ip6q_ttl field is not large enough");
239 sysctl_ip6_fraglifetime(SYSCTL_HANDLER_ARGS)
243 val = V_ip6_fraglifetime;
244 error = sysctl_handle_int(oidp, &val, 0, req);
245 if (error != 0 || !req->newptr)
248 val = IPV6_DEFFRAGTTL;
250 if (frag6_milli_to_callout_ticks(val) >= 65536)
251 val = frag6_callout_ticks_to_milli(65535);
253 if (!IS_DEFAULT_VNET(curvnet)) {
255 int host_val = V_ip6_fraglifetime;
262 V_ip6_fraglifetime = val;
265 SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, fraglifetime_ms,
266 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
267 NULL, 0, sysctl_ip6_fraglifetime, "I",
268 "Fragment lifetime, in milliseconds");
271 * Remove the IPv6 fragmentation header from the mbuf.
274 ip6_deletefraghdr(struct mbuf *m, int offset, int wait __unused)
278 KASSERT(m->m_len >= offset + sizeof(struct ip6_frag),
279 ("%s: ext headers not contigous in mbuf %p m_len %d >= "
280 "offset %d + %zu\n", __func__, m, m->m_len, offset,
281 sizeof(struct ip6_frag)));
283 /* Delete frag6 header. */
284 ip6 = mtod(m, struct ip6_hdr *);
285 bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag), offset);
286 m->m_data += sizeof(struct ip6_frag);
287 m->m_len -= sizeof(struct ip6_frag);
288 m->m_flags |= M_FRAGMENTED;
294 * Free a fragment reassembly header and all associated datagrams.
297 frag6_freef(struct ip6q *q6, uint32_t bucket)
300 struct ip6asfrag *af6;
303 IP6QB_LOCK_ASSERT(bucket);
305 while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) {
307 TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
310 * Return ICMP time exceeded error for the 1st fragment.
311 * Just free other fragments.
313 if (af6->ip6af_off == 0 && m->m_pkthdr.rcvif != NULL) {
314 /* Adjust pointer. */
315 ip6 = mtod(m, struct ip6_hdr *);
317 /* Restore source and destination addresses. */
318 ip6->ip6_src = q6->ip6q_src;
319 ip6->ip6_dst = q6->ip6q_dst;
321 icmp6_error(m, ICMP6_TIME_EXCEEDED,
322 ICMP6_TIME_EXCEED_REASSEMBLY, 0);
329 TAILQ_REMOVE(IP6QB_HEAD(bucket), q6, ip6q_tq);
330 V_ip6qb[bucket].count--;
331 atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
333 mac_ip6q_destroy(q6);
336 atomic_subtract_int(&V_frag6_nfragpackets, 1);
340 * Drain off all datagram fragments belonging to
341 * the given network interface.
344 frag6_cleanup(void *arg __unused, struct ifnet *ifp)
346 struct ip6qhead *head;
348 struct ip6asfrag *af6;
351 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
353 CURVNET_SET_QUIET(ifp->if_vnet);
356 * Skip processing if IPv6 reassembly is not initialised or
357 * torn down by frag6_destroy().
365 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
367 head = IP6QB_HEAD(bucket);
368 /* Scan fragment list. */
369 TAILQ_FOREACH(q6, head, ip6q_tq) {
370 TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) {
371 /* Clear no longer valid rcvif pointer. */
372 if (af6->ip6af_m->m_pkthdr.rcvif == ifp)
373 af6->ip6af_m->m_pkthdr.rcvif = NULL;
376 IP6QB_UNLOCK(bucket);
380 EVENTHANDLER_DEFINE(ifnet_departure_event, frag6_cleanup, NULL, 0);
383 * Like in RFC2460, in RFC8200, fragment and reassembly rules do not agree with
384 * each other, in terms of next header field handling in fragment header.
385 * While the sender will use the same value for all of the fragmented packets,
386 * receiver is suggested not to check for consistency.
388 * Fragment rules (p18,p19):
389 * (2) A Fragment header containing:
390 * The Next Header value that identifies the first header
391 * after the Per-Fragment headers of the original packet.
392 * -> next header field is same for all fragments
394 * Reassembly rule (p20):
395 * The Next Header field of the last header of the Per-Fragment
396 * headers is obtained from the Next Header field of the first
397 * fragment's Fragment header.
398 * -> should grab it from the first fragment only
400 * The following note also contradicts with fragment rule - no one is going to
401 * send different fragment with different next header field.
403 * Additional note (p22) [not an error]:
404 * The Next Header values in the Fragment headers of different
405 * fragments of the same original packet may differ. Only the value
406 * from the Offset zero fragment packet is used for reassembly.
407 * -> should grab it from the first fragment only
409 * There is no explicit reason given in the RFC. Historical reason maybe?
415 frag6_input(struct mbuf **mp, int *offp, int proto)
419 struct ip6_frag *ip6f;
420 struct ip6qhead *head;
422 struct ip6asfrag *af6, *ip6af, *af6tmp;
423 struct in6_ifaddr *ia6;
424 struct ifnet *dstifp, *srcifp;
425 uint32_t hashkey[(sizeof(struct in6_addr) * 2 +
426 sizeof(ip6f->ip6f_ident)) / sizeof(uint32_t)];
427 uint32_t bucket, *hashkeyp;
428 int fragoff, frgpartlen; /* Must be larger than uint16_t. */
429 int nxt, offset, plen;
433 struct ip6_direct_ctx *ip6dc;
442 if (m->m_len < offset + sizeof(struct ip6_frag)) {
443 m = m_pullup(m, offset + sizeof(struct ip6_frag));
445 IP6STAT_INC(ip6s_exthdrtoolong);
447 return (IPPROTO_DONE);
450 ip6 = mtod(m, struct ip6_hdr *);
453 /* Find the destination interface of the packet. */
454 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false);
456 dstifp = ia6->ia_ifp;
458 /* Jumbo payload cannot contain a fragment header. */
459 if (ip6->ip6_plen == 0) {
460 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
461 in6_ifstat_inc(dstifp, ifs6_reass_fail);
463 return (IPPROTO_DONE);
467 * Check whether fragment packet's fragment length is a
468 * multiple of 8 octets (unless it is the last one).
469 * sizeof(struct ip6_frag) == 8
470 * sizeof(struct ip6_hdr) = 40
472 ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
473 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
474 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
475 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
476 offsetof(struct ip6_hdr, ip6_plen));
477 in6_ifstat_inc(dstifp, ifs6_reass_fail);
479 return (IPPROTO_DONE);
482 IP6STAT_INC(ip6s_fragments);
483 in6_ifstat_inc(dstifp, ifs6_reass_reqd);
486 * Handle "atomic" fragments (offset and m bit set to 0) upfront,
487 * unrelated to any reassembly. We need to remove the frag hdr
489 * See RFC 6946 and section 4.5 of RFC 8200.
491 if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) {
492 IP6STAT_INC(ip6s_atomicfrags);
493 nxt = ip6f->ip6f_nxt;
495 * Set nxt(-hdr field value) to the original value.
496 * We cannot just set ip6->ip6_nxt as there might be
497 * an unfragmentable part with extension headers and
498 * we must update the last one.
500 m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
502 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) -
503 sizeof(struct ip6_frag));
504 if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0)
506 m->m_pkthdr.len -= sizeof(struct ip6_frag);
507 in6_ifstat_inc(dstifp, ifs6_reass_ok);
512 /* Offset now points to data portion. */
513 offset += sizeof(struct ip6_frag);
515 /* Get fragment length and discard 0-byte fragments. */
516 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
517 if (frgpartlen == 0) {
518 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
519 offsetof(struct ip6_hdr, ip6_plen));
520 in6_ifstat_inc(dstifp, ifs6_reass_fail);
521 IP6STAT_INC(ip6s_fragdropped);
523 return (IPPROTO_DONE);
527 * Enforce upper bound on number of fragments for the entire system.
528 * If maxfrag is 0, never accept fragments.
529 * If maxfrag is -1, accept all fragments without limitation.
531 if (ip6_maxfrags < 0)
533 else if (atomic_load_int(&frag6_nfrags) >= (u_int)ip6_maxfrags)
537 * Validate that a full header chain to the ULP is present in the
538 * packet containing the first fragment as per RFC RFC7112 and
539 * RFC 8200 pages 18,19:
540 * The first fragment packet is composed of:
541 * (3) Extension headers, if any, and the Upper-Layer header. These
542 * headers must be in the first fragment. ...
544 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
545 /* XXX TODO. thj has D16851 open for this. */
546 /* Send ICMPv6 4,3 in case of violation. */
548 /* Store receive network interface pointer for later. */
549 srcifp = m->m_pkthdr.rcvif;
551 /* Generate a hash value for fragment bucket selection. */
553 memcpy(hashkeyp, &ip6->ip6_src, sizeof(struct in6_addr));
554 hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
555 memcpy(hashkeyp, &ip6->ip6_dst, sizeof(struct in6_addr));
556 hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
557 *hashkeyp = ip6f->ip6f_ident;
558 bucket = jenkins_hash32(hashkey, nitems(hashkey), V_ip6qb_hashseed);
559 bucket &= IP6REASS_HMASK;
561 head = IP6QB_HEAD(bucket);
563 TAILQ_FOREACH(q6, head, ip6q_tq)
564 if (ip6f->ip6f_ident == q6->ip6q_ident &&
565 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
566 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)
568 && mac_ip6q_match(m, q6)
575 /* A first fragment to arrive creates a reassembly queue. */
579 * Enforce upper bound on number of fragmented packets
580 * for which we attempt reassembly;
581 * If maxfragpackets is 0, never accept fragments.
582 * If maxfragpackets is -1, accept all fragments without
585 if (V_ip6_maxfragpackets < 0)
587 else if (V_ip6qb[bucket].count >= V_ip6_maxfragbucketsize ||
588 atomic_load_int(&V_frag6_nfragpackets) >=
589 (u_int)V_ip6_maxfragpackets)
592 /* Allocate IPv6 fragement packet queue entry. */
593 q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FRAG6,
598 if (mac_ip6q_init(q6, M_NOWAIT) != 0) {
602 mac_ip6q_create(m, q6);
604 atomic_add_int(&V_frag6_nfragpackets, 1);
606 /* ip6q_nxt will be filled afterwards, from 1st fragment. */
607 TAILQ_INIT(&q6->ip6q_frags);
608 q6->ip6q_ident = ip6f->ip6f_ident;
609 q6->ip6q_ttl = frag6_milli_to_callout_ticks(V_ip6_fraglifetime);
610 q6->ip6q_src = ip6->ip6_src;
611 q6->ip6q_dst = ip6->ip6_dst;
612 q6->ip6q_ecn = IPV6_ECN(ip6);
613 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
615 /* Add the fragemented packet to the bucket. */
616 TAILQ_INSERT_HEAD(head, q6, ip6q_tq);
617 V_ip6qb[bucket].count++;
621 * If it is the 1st fragment, record the length of the
622 * unfragmentable part and the next header of the fragment header.
623 * Assume the first 1st fragement to arrive will be correct.
624 * We do not have any duplicate checks here yet so another packet
625 * with fragoff == 0 could come and overwrite the ip6q_unfrglen
626 * and worse, the next header, at any time.
628 if (fragoff == 0 && q6->ip6q_unfrglen == -1) {
629 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
630 sizeof(struct ip6_frag);
631 q6->ip6q_nxt = ip6f->ip6f_nxt;
636 * Check that the reassembled packet would not exceed 65535 bytes
638 * If it would exceed, discard the fragment and return an ICMP error.
640 if (q6->ip6q_unfrglen >= 0) {
641 /* The 1st fragment has already arrived. */
642 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
644 TAILQ_REMOVE(head, q6, ip6q_tq);
645 V_ip6qb[bucket].count--;
646 atomic_subtract_int(&V_frag6_nfragpackets, 1);
648 mac_ip6q_destroy(q6);
652 IP6QB_UNLOCK(bucket);
653 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
654 offset - sizeof(struct ip6_frag) +
655 offsetof(struct ip6_frag, ip6f_offlg));
657 return (IPPROTO_DONE);
659 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
661 TAILQ_REMOVE(head, q6, ip6q_tq);
662 V_ip6qb[bucket].count--;
663 atomic_subtract_int(&V_frag6_nfragpackets, 1);
665 mac_ip6q_destroy(q6);
669 IP6QB_UNLOCK(bucket);
670 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
671 offset - sizeof(struct ip6_frag) +
672 offsetof(struct ip6_frag, ip6f_offlg));
674 return (IPPROTO_DONE);
678 * If it is the first fragment, do the above check for each
679 * fragment already stored in the reassembly queue.
681 if (fragoff == 0 && !only_frag) {
682 TAILQ_FOREACH_SAFE(af6, &q6->ip6q_frags, ip6af_tq, af6tmp) {
683 if (q6->ip6q_unfrglen + af6->ip6af_off +
684 af6->ip6af_frglen > IPV6_MAXPACKET) {
685 struct ip6_hdr *ip6err;
690 erroff = af6->ip6af_offset;
692 /* Dequeue the fragment. */
693 TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
695 atomic_subtract_int(&frag6_nfrags, 1);
698 /* Set a valid receive interface pointer. */
699 merr->m_pkthdr.rcvif = srcifp;
701 /* Adjust pointer. */
702 ip6err = mtod(merr, struct ip6_hdr *);
705 * Restore source and destination addresses
706 * in the erroneous IPv6 header.
708 ip6err->ip6_src = q6->ip6q_src;
709 ip6err->ip6_dst = q6->ip6q_dst;
711 icmp6_error(merr, ICMP6_PARAM_PROB,
712 ICMP6_PARAMPROB_HEADER,
713 erroff - sizeof(struct ip6_frag) +
714 offsetof(struct ip6_frag, ip6f_offlg));
719 /* Allocate an IPv6 fragement queue entry for this fragmented part. */
720 ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FRAG6,
724 ip6af->ip6af_mff = (ip6f->ip6f_offlg & IP6F_MORE_FRAG) ? true : false;
725 ip6af->ip6af_off = fragoff;
726 ip6af->ip6af_frglen = frgpartlen;
727 ip6af->ip6af_offset = offset;
732 * Do a manual insert rather than a hard-to-understand cast
733 * to a different type relying on data structure order to work.
735 TAILQ_INSERT_HEAD(&q6->ip6q_frags, ip6af, ip6af_tq);
739 /* Do duplicate, condition, and boundry checks. */
741 * Handle ECN by comparing this segment with the first one;
742 * if CE is set, do not lose CE.
743 * Drop if CE and not-ECT are mixed for the same packet.
747 if (ecn == IPTOS_ECN_CE) {
748 if (ecn0 == IPTOS_ECN_NOTECT) {
749 free(ip6af, M_FRAG6);
752 if (ecn0 != IPTOS_ECN_CE)
753 q6->ip6q_ecn = IPTOS_ECN_CE;
755 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
756 free(ip6af, M_FRAG6);
760 /* Find a fragmented part which begins after this one does. */
761 TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq)
762 if (af6->ip6af_off > ip6af->ip6af_off)
766 * If the incoming framgent overlaps some existing fragments in
767 * the reassembly queue, drop both the new fragment and the
768 * entire reassembly queue. However, if the new fragment
769 * is an exact duplicate of an existing fragment, only silently
770 * drop the existing fragment and leave the fragmentation queue
771 * unchanged, as allowed by the RFC. (RFC 8200, 4.5)
774 af6tmp = TAILQ_PREV(af6, ip6fraghead, ip6af_tq);
776 af6tmp = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead);
777 if (af6tmp != NULL) {
778 if (af6tmp->ip6af_off + af6tmp->ip6af_frglen -
779 ip6af->ip6af_off > 0) {
780 if (af6tmp->ip6af_off != ip6af->ip6af_off ||
781 af6tmp->ip6af_frglen != ip6af->ip6af_frglen)
782 frag6_freef(q6, bucket);
783 free(ip6af, M_FRAG6);
788 if (ip6af->ip6af_off + ip6af->ip6af_frglen -
789 af6->ip6af_off > 0) {
790 if (af6->ip6af_off != ip6af->ip6af_off ||
791 af6->ip6af_frglen != ip6af->ip6af_frglen)
792 frag6_freef(q6, bucket);
793 free(ip6af, M_FRAG6);
799 mac_ip6q_update(m, q6);
803 * Stick new segment in its place; check for complete reassembly.
804 * If not complete, check fragment limit. Move to front of packet
805 * queue, as we are the most recently active fragmented packet.
808 TAILQ_INSERT_BEFORE(af6, ip6af, ip6af_tq);
810 TAILQ_INSERT_TAIL(&q6->ip6q_frags, ip6af, ip6af_tq);
812 atomic_add_int(&frag6_nfrags, 1);
816 TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) {
817 if (af6->ip6af_off != plen) {
818 if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
819 IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
820 frag6_freef(q6, bucket);
822 IP6QB_UNLOCK(bucket);
824 return (IPPROTO_DONE);
826 plen += af6->ip6af_frglen;
828 af6 = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead);
829 if (af6->ip6af_mff) {
830 if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
831 IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
832 frag6_freef(q6, bucket);
834 IP6QB_UNLOCK(bucket);
836 return (IPPROTO_DONE);
839 /* Reassembly is complete; concatenate fragments. */
840 ip6af = TAILQ_FIRST(&q6->ip6q_frags);
841 t = m = ip6af->ip6af_m;
842 TAILQ_REMOVE(&q6->ip6q_frags, ip6af, ip6af_tq);
843 while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) {
844 m->m_pkthdr.csum_flags &=
845 af6->ip6af_m->m_pkthdr.csum_flags;
846 m->m_pkthdr.csum_data +=
847 af6->ip6af_m->m_pkthdr.csum_data;
849 TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
851 m_adj(af6->ip6af_m, af6->ip6af_offset);
852 m_demote_pkthdr(af6->ip6af_m);
853 m_cat(t, af6->ip6af_m);
857 while (m->m_pkthdr.csum_data & 0xffff0000)
858 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
859 (m->m_pkthdr.csum_data >> 16);
861 /* Adjust offset to point where the original next header starts. */
862 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
863 free(ip6af, M_FRAG6);
864 if ((u_int)plen + (u_int)offset - sizeof(struct ip6_hdr) >
866 frag6_freef(q6, bucket);
869 ip6 = mtod(m, struct ip6_hdr *);
870 ip6->ip6_plen = htons((u_short)plen + offset - sizeof(struct ip6_hdr));
871 if (q6->ip6q_ecn == IPTOS_ECN_CE)
872 ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
875 TAILQ_REMOVE(head, q6, ip6q_tq);
876 V_ip6qb[bucket].count--;
877 atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
879 ip6_deletefraghdr(m, offset, M_NOWAIT);
881 /* Set nxt(-hdr field value) to the original value. */
882 m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
886 mac_ip6q_reassemble(q6, m);
887 mac_ip6q_destroy(q6);
890 atomic_subtract_int(&V_frag6_nfragpackets, 1);
892 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
895 for (t = m; t; t = t->m_next)
897 m->m_pkthdr.len = plen;
898 /* Set a valid receive interface pointer. */
899 m->m_pkthdr.rcvif = srcifp;
903 mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc),
908 ip6dc = (struct ip6_direct_ctx *)(mtag + 1);
909 ip6dc->ip6dc_nxt = nxt;
910 ip6dc->ip6dc_off = offset;
912 m_tag_prepend(m, mtag);
915 IP6QB_UNLOCK(bucket);
916 IP6STAT_INC(ip6s_reassembled);
917 in6_ifstat_inc(dstifp, ifs6_reass_ok);
920 /* Queue/dispatch for reprocessing. */
921 netisr_dispatch(NETISR_IPV6_DIRECT, m);
923 return (IPPROTO_DONE);
926 /* Tell launch routine the next header. */
933 IP6QB_UNLOCK(bucket);
935 in6_ifstat_inc(dstifp, ifs6_reass_fail);
936 IP6STAT_INC(ip6s_fragdropped);
939 return (IPPROTO_DONE);
943 * IPv6 reassembling timer processing;
944 * if a timer expires on a reassembly queue, discard it.
946 static struct callout frag6_callout;
948 frag6_slowtimo(void *arg __unused)
950 VNET_ITERATOR_DECL(vnet_iter);
951 struct ip6qhead *head;
952 struct ip6q *q6, *q6tmp;
955 if (atomic_load_int(&frag6_nfrags) == 0)
958 VNET_LIST_RLOCK_NOSLEEP();
959 VNET_FOREACH(vnet_iter) {
960 CURVNET_SET(vnet_iter);
961 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
962 if (V_ip6qb[bucket].count == 0)
965 head = IP6QB_HEAD(bucket);
966 TAILQ_FOREACH_SAFE(q6, head, ip6q_tq, q6tmp)
967 if (--q6->ip6q_ttl == 0) {
968 IP6STAT_ADD(ip6s_fragtimeout,
970 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
971 frag6_freef(q6, bucket);
974 * If we are over the maximum number of fragments
975 * (due to the limit being lowered), drain off
976 * enough to get down to the new limit.
977 * Note that we drain all reassembly queues if
978 * maxfragpackets is 0 (fragmentation is disabled),
979 * and do not enforce a limit when maxfragpackets
982 while ((V_ip6_maxfragpackets == 0 ||
983 (V_ip6_maxfragpackets > 0 &&
984 V_ip6qb[bucket].count > V_ip6_maxfragbucketsize)) &&
985 (q6 = TAILQ_LAST(head, ip6qhead)) != NULL) {
986 IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag);
987 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
988 frag6_freef(q6, bucket);
990 IP6QB_UNLOCK(bucket);
993 * If we are still over the maximum number of fragmented
994 * packets, drain off enough to get down to the new limit.
997 while (V_ip6_maxfragpackets >= 0 &&
998 atomic_load_int(&V_frag6_nfragpackets) >
999 (u_int)V_ip6_maxfragpackets) {
1001 q6 = TAILQ_LAST(IP6QB_HEAD(bucket), ip6qhead);
1003 IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag);
1004 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
1005 frag6_freef(q6, bucket);
1007 IP6QB_UNLOCK(bucket);
1008 bucket = (bucket + 1) % IP6REASS_NHASH;
1012 VNET_LIST_RUNLOCK_NOSLEEP();
1014 callout_reset_sbt(&frag6_callout, SBT_1MS * IP6_CALLOUT_INTERVAL_MS,
1015 SBT_1MS * 10, frag6_slowtimo, NULL, 0);
1019 frag6_slowtimo_init(void *arg __unused)
1022 callout_init(&frag6_callout, 1);
1023 callout_reset_sbt(&frag6_callout, SBT_1MS * IP6_CALLOUT_INTERVAL_MS,
1024 SBT_1MS * 10, frag6_slowtimo, NULL, 0);
1026 SYSINIT(frag6, SI_SUB_VNET_DONE, SI_ORDER_ANY, frag6_slowtimo_init, NULL);
1029 * Eventhandler to adjust limits in case nmbclusters change.
1032 frag6_change(void *tag)
1034 VNET_ITERATOR_DECL(vnet_iter);
1036 ip6_maxfrags = IP6_MAXFRAGS;
1037 VNET_LIST_RLOCK_NOSLEEP();
1038 VNET_FOREACH(vnet_iter) {
1039 CURVNET_SET(vnet_iter);
1040 V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
1041 frag6_set_bucketsize();
1044 VNET_LIST_RUNLOCK_NOSLEEP();
1048 * Initialise reassembly queue and fragment identifier.
1055 V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
1056 frag6_set_bucketsize();
1057 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
1058 TAILQ_INIT(IP6QB_HEAD(bucket));
1059 mtx_init(&V_ip6qb[bucket].lock, "ip6qb", NULL, MTX_DEF);
1060 V_ip6qb[bucket].count = 0;
1062 V_ip6qb_hashseed = arc4random();
1063 V_ip6_maxfragsperpacket = 64;
1067 if (!IS_DEFAULT_VNET(curvnet))
1070 ip6_maxfrags = IP6_MAXFRAGS;
1071 EVENTHANDLER_REGISTER(nmbclusters_change,
1072 frag6_change, NULL, EVENTHANDLER_PRI_ANY);
1076 * Drain off all datagram fragments.
1079 frag6_drain_one(void)
1084 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
1086 while ((q6 = TAILQ_FIRST(IP6QB_HEAD(bucket))) != NULL) {
1087 IP6STAT_INC(ip6s_fragdropped);
1088 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
1089 frag6_freef(q6, bucket);
1091 IP6QB_UNLOCK(bucket);
1098 VNET_ITERATOR_DECL(vnet_iter);
1100 VNET_LIST_RLOCK_NOSLEEP();
1101 VNET_FOREACH(vnet_iter) {
1102 CURVNET_SET(vnet_iter);
1106 VNET_LIST_RUNLOCK_NOSLEEP();
1111 * Clear up IPv6 reassembly structures.
1120 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
1121 KASSERT(V_ip6qb[bucket].count == 0,
1122 ("%s: V_ip6qb[%d] (%p) count not 0 (%d)", __func__,
1123 bucket, &V_ip6qb[bucket], V_ip6qb[bucket].count));
1124 mtx_destroy(&V_ip6qb[bucket].lock);