2 * Copyright (c) 2015 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org>
4 * Copyright (c) 1982, 1986, 1988, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/eventhandler.h>
42 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/limits.h>
48 #include <sys/mutex.h>
49 #include <sys/sysctl.h>
50 #include <sys/socket.h>
53 #include <net/if_var.h>
54 #include <net/if_private.h>
55 #include <net/rss_config.h>
56 #include <net/netisr.h>
59 #include <netinet/in.h>
60 #include <netinet/ip.h>
61 #include <netinet/ip_var.h>
62 #include <netinet/in_rss.h>
64 #include <security/mac/mac_framework.h>
67 SYSCTL_DECL(_net_inet_ip);
70 * Reassembly headers are stored in hash buckets.
72 #define IPREASS_NHASH_LOG2 10
73 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2)
74 #define IPREASS_HMASK (V_ipq_hashsize - 1)
77 TAILQ_HEAD(ipqhead, ipq) head;
86 VNET_DEFINE_STATIC(struct ipqbucket *, ipq);
87 #define V_ipq VNET(ipq)
88 VNET_DEFINE_STATIC(uint32_t, ipq_hashseed);
89 #define V_ipq_hashseed VNET(ipq_hashseed)
90 VNET_DEFINE_STATIC(uint32_t, ipq_hashsize);
91 #define V_ipq_hashsize VNET(ipq_hashsize)
93 #define IPQ_LOCK(i) mtx_lock(&V_ipq[i].lock)
94 #define IPQ_TRYLOCK(i) mtx_trylock(&V_ipq[i].lock)
95 #define IPQ_UNLOCK(i) mtx_unlock(&V_ipq[i].lock)
96 #define IPQ_LOCK_ASSERT(i) mtx_assert(&V_ipq[i].lock, MA_OWNED)
97 #define IPQ_BUCKET_LOCK_ASSERT(b) mtx_assert(&(b)->lock, MA_OWNED)
99 VNET_DEFINE_STATIC(int, ipreass_maxbucketsize);
100 #define V_ipreass_maxbucketsize VNET(ipreass_maxbucketsize)
102 void ipreass_init(void);
103 void ipreass_vnet_init(void);
105 void ipreass_destroy(void);
107 static int sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS);
108 static int sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS);
109 static int sysctl_fragttl(SYSCTL_HANDLER_ARGS);
110 static void ipreass_zone_change(void *);
111 static void ipreass_drain_tomax(void);
112 static void ipq_free(struct ipqbucket *, struct ipq *);
113 static struct ipq * ipq_reuse(int);
114 static void ipreass_callout(void *);
115 static void ipreass_reschedule(struct ipqbucket *);
118 ipq_timeout(struct ipqbucket *bucket, struct ipq *fp)
121 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags);
122 ipq_free(bucket, fp);
126 ipq_drop(struct ipqbucket *bucket, struct ipq *fp)
129 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
130 ipq_free(bucket, fp);
131 ipreass_reschedule(bucket);
135 * By default, limit the number of IP fragments across all reassembly
136 * queues to 1/32 of the total number of mbuf clusters.
138 * Limit the total number of reassembly queues per VNET to the
139 * IP fragment limit, but ensure the limit will not allow any bucket
140 * to grow above 100 items. (The bucket limit is
141 * IP_MAXFRAGPACKETS / (V_ipq_hashsize / 2), so the 50 is the correct
142 * multiplier to reach a 100-item limit.)
143 * The 100-item limit was chosen as brief testing seems to show that
144 * this produces "reasonable" performance on some subset of systems
147 #define IP_MAXFRAGS (nmbclusters / 32)
148 #define IP_MAXFRAGPACKETS (imin(IP_MAXFRAGS, V_ipq_hashsize * 50))
151 static u_int __exclusive_cache_line nfrags;
152 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfrags, CTLFLAG_RW,
154 "Maximum number of IPv4 fragments allowed across all reassembly queues");
155 SYSCTL_UINT(_net_inet_ip, OID_AUTO, curfrags, CTLFLAG_RD,
157 "Current number of IPv4 fragments across all reassembly queues");
159 VNET_DEFINE_STATIC(uma_zone_t, ipq_zone);
160 #define V_ipq_zone VNET(ipq_zone)
162 SYSCTL_UINT(_net_inet_ip, OID_AUTO, reass_hashsize,
163 CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(ipq_hashsize), 0,
164 "Size of IP fragment reassembly hashtable");
166 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets,
167 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
168 NULL, 0, sysctl_maxfragpackets, "I",
169 "Maximum number of IPv4 fragment reassembly queue entries");
170 SYSCTL_UMA_CUR(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_VNET,
171 &VNET_NAME(ipq_zone),
172 "Current number of IPv4 fragment reassembly queue entries");
174 VNET_DEFINE_STATIC(int, noreass);
175 #define V_noreass VNET(noreass)
177 VNET_DEFINE_STATIC(int, maxfragsperpacket);
178 #define V_maxfragsperpacket VNET(maxfragsperpacket)
179 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_VNET | CTLFLAG_RW,
180 &VNET_NAME(maxfragsperpacket), 0,
181 "Maximum number of IPv4 fragments allowed per packet");
182 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragbucketsize,
183 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0,
184 sysctl_maxfragbucketsize, "I",
185 "Maximum number of IPv4 fragment reassembly queue entries per bucket");
187 VNET_DEFINE_STATIC(u_int, ipfragttl) = 30;
188 #define V_ipfragttl VNET(ipfragttl)
189 SYSCTL_PROC(_net_inet_ip, OID_AUTO, fragttl, CTLTYPE_INT | CTLFLAG_RW |
190 CTLFLAG_MPSAFE | CTLFLAG_VNET, NULL, 0, sysctl_fragttl, "IU",
191 "IP fragment life time on reassembly queue (seconds)");
194 * Take incoming datagram fragment and try to reassemble it into
195 * whole datagram. If the argument is the first fragment or one
196 * in between the function will return NULL and store the mbuf
197 * in the fragment chain. If the argument is the last fragment
198 * the packet will be reassembled and the pointer to the new
199 * mbuf returned for further processing. Only m_tags attached
200 * to the first packet/fragment are preserved.
201 * The IP header is *NOT* adjusted out of iplen.
203 #define M_IP_FRAG M_PROTO9
205 ip_reass(struct mbuf *m)
208 struct mbuf *p, *q, *nq, *t;
210 struct ifnet *srcifp;
211 struct ipqhead *head;
212 int i, hlen, next, tmpmax;
214 uint32_t hash, hashkey[3];
216 uint32_t rss_hash, rss_type;
220 * If no reassembling or maxfragsperpacket are 0,
221 * never accept fragments.
222 * Also, drop packet if it would exceed the maximum
223 * number of fragments.
226 if (V_noreass == 1 || V_maxfragsperpacket == 0 ||
227 (tmpmax >= 0 && atomic_load_int(&nfrags) >= (u_int)tmpmax)) {
228 IPSTAT_INC(ips_fragments);
229 IPSTAT_INC(ips_fragdropped);
234 ip = mtod(m, struct ip *);
235 hlen = ip->ip_hl << 2;
238 * Adjust ip_len to not reflect header,
239 * convert offset of this to bytes.
241 ip->ip_len = htons(ntohs(ip->ip_len) - hlen);
243 * Make sure that fragments have a data length
244 * that's a non-zero multiple of 8 bytes, unless
245 * this is the last fragment.
247 if (ip->ip_len == htons(0) ||
248 ((ip->ip_off & htons(IP_MF)) && (ntohs(ip->ip_len) & 0x7) != 0)) {
249 IPSTAT_INC(ips_toosmall); /* XXX */
250 IPSTAT_INC(ips_fragdropped);
254 if (ip->ip_off & htons(IP_MF))
255 m->m_flags |= M_IP_FRAG;
257 m->m_flags &= ~M_IP_FRAG;
258 ip->ip_off = htons(ntohs(ip->ip_off) << 3);
261 * Make sure the fragment lies within a packet of valid size.
263 if (ntohs(ip->ip_len) + ntohs(ip->ip_off) > IP_MAXPACKET) {
264 IPSTAT_INC(ips_toolong);
265 IPSTAT_INC(ips_fragdropped);
271 * Store receive network interface pointer for later.
273 srcifp = m->m_pkthdr.rcvif;
276 * Attempt reassembly; if it succeeds, proceed.
277 * ip_reass() will return a different mbuf.
279 IPSTAT_INC(ips_fragments);
280 m->m_pkthdr.PH_loc.ptr = ip;
283 * Presence of header sizes in mbufs
284 * would confuse code below.
289 hashkey[0] = ip->ip_src.s_addr;
290 hashkey[1] = ip->ip_dst.s_addr;
291 hashkey[2] = (uint32_t)ip->ip_p << 16;
292 hashkey[2] += ip->ip_id;
293 hash = jenkins_hash32(hashkey, nitems(hashkey), V_ipq_hashseed);
294 hash &= IPREASS_HMASK;
295 head = &V_ipq[hash].head;
299 * Look for queue of fragments
302 TAILQ_FOREACH(fp, head, ipq_list)
303 if (ip->ip_id == fp->ipq_id &&
304 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
305 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
307 mac_ipq_match(m, fp) &&
309 ip->ip_p == fp->ipq_p)
312 * If first fragment to arrive, create a reassembly queue.
315 if (V_ipq[hash].count < V_ipreass_maxbucketsize)
316 fp = uma_zalloc(V_ipq_zone, M_NOWAIT);
318 fp = ipq_reuse(hash);
322 if (mac_ipq_init(fp, M_NOWAIT) != 0) {
323 uma_zfree(V_ipq_zone, fp);
327 mac_ipq_create(m, fp);
329 TAILQ_INSERT_HEAD(head, fp, ipq_list);
332 atomic_add_int(&nfrags, 1);
333 fp->ipq_expire = time_uptime + V_ipfragttl;
334 fp->ipq_p = ip->ip_p;
335 fp->ipq_id = ip->ip_id;
336 fp->ipq_src = ip->ip_src;
337 fp->ipq_dst = ip->ip_dst;
339 if (m->m_flags & M_IP_FRAG)
342 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len);
344 if (fp == TAILQ_LAST(head, ipqhead))
345 callout_reset_sbt(&V_ipq[hash].timer,
346 SBT_1S * V_ipfragttl, SBT_1S, ipreass_callout,
349 MPASS(callout_active(&V_ipq[hash].timer));
353 * If we already saw the last fragment, make sure
354 * this fragment's offset looks sane. Otherwise, if
355 * this is the last fragment, record its endpoint.
357 if (fp->ipq_maxoff > 0) {
358 i = ntohs(ip->ip_off) + ntohs(ip->ip_len);
359 if (((m->m_flags & M_IP_FRAG) && i >= fp->ipq_maxoff) ||
360 ((m->m_flags & M_IP_FRAG) == 0 &&
361 i != fp->ipq_maxoff)) {
365 } else if ((m->m_flags & M_IP_FRAG) == 0)
366 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len);
368 atomic_add_int(&nfrags, 1);
370 mac_ipq_update(m, fp);
374 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.PH_loc.ptr))
377 * Handle ECN by comparing this segment with the first one;
378 * if CE is set, do not lose CE.
379 * drop if CE and not-ECT are mixed for the same packet.
381 ecn = ip->ip_tos & IPTOS_ECN_MASK;
382 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
383 if (ecn == IPTOS_ECN_CE) {
384 if (ecn0 == IPTOS_ECN_NOTECT)
386 if (ecn0 != IPTOS_ECN_CE)
387 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
389 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
393 * Find a segment which begins after this one does.
395 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
396 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off))
400 * If there is a preceding segment, it may provide some of
401 * our data already. If so, drop the data from the incoming
402 * segment. If it provides all of our data, drop us, otherwise
403 * stick new segment in the proper place.
405 * If some of the data is dropped from the preceding
406 * segment, then it's checksum is invalidated.
409 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) -
412 if (i >= ntohs(ip->ip_len))
415 m->m_pkthdr.csum_flags = 0;
416 ip->ip_off = htons(ntohs(ip->ip_off) + i);
417 ip->ip_len = htons(ntohs(ip->ip_len) - i);
419 m->m_nextpkt = p->m_nextpkt;
422 m->m_nextpkt = fp->ipq_frags;
427 * While we overlap succeeding segments trim them or,
428 * if they are completely covered, dequeue them.
430 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) >
431 ntohs(GETIP(q)->ip_off); q = nq) {
432 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) -
433 ntohs(GETIP(q)->ip_off);
434 if (i < ntohs(GETIP(q)->ip_len)) {
435 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i);
436 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i);
438 q->m_pkthdr.csum_flags = 0;
443 IPSTAT_INC(ips_fragdropped);
445 atomic_subtract_int(&nfrags, 1);
450 * Check for complete reassembly and perform frag per packet
453 * Frag limiting is performed here so that the nth frag has
454 * a chance to complete the packet before we drop the packet.
455 * As a result, n+1 frags are actually allowed per packet, but
456 * only n will ever be stored. (n = maxfragsperpacket.)
460 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
461 if (ntohs(GETIP(q)->ip_off) != next) {
462 if (fp->ipq_nfrags > V_maxfragsperpacket)
463 ipq_drop(&V_ipq[hash], fp);
466 next += ntohs(GETIP(q)->ip_len);
468 /* Make sure the last packet didn't have the IP_MF flag */
469 if (p->m_flags & M_IP_FRAG) {
470 if (fp->ipq_nfrags > V_maxfragsperpacket)
471 ipq_drop(&V_ipq[hash], fp);
476 * Reassembly is complete. Make sure the packet is a sane size.
480 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) {
481 IPSTAT_INC(ips_toolong);
482 ipq_drop(&V_ipq[hash], fp);
487 * Concatenate fragments.
495 for (q = nq; q != NULL; q = nq) {
498 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
499 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
504 * In order to do checksumming faster we do 'end-around carry' here
505 * (and not in for{} loop), though it implies we are not going to
506 * reassemble more than 64k fragments.
508 while (m->m_pkthdr.csum_data & 0xffff0000)
509 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
510 (m->m_pkthdr.csum_data >> 16);
511 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
513 mac_ipq_reassemble(fp, m);
518 * Create header for new ip packet by modifying header of first
519 * packet; dequeue and discard fragment reassembly header.
520 * Make header visible.
522 ip->ip_len = htons((ip->ip_hl << 2) + next);
523 ip->ip_src = fp->ipq_src;
524 ip->ip_dst = fp->ipq_dst;
525 TAILQ_REMOVE(head, fp, ipq_list);
527 uma_zfree(V_ipq_zone, fp);
528 m->m_len += (ip->ip_hl << 2);
529 m->m_data -= (ip->ip_hl << 2);
530 /* some debugging cruft by sklower, below, will go away soon */
531 if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */
533 /* set valid receive interface pointer */
534 m->m_pkthdr.rcvif = srcifp;
536 IPSTAT_INC(ips_reassembled);
537 ipreass_reschedule(&V_ipq[hash]);
542 * Query the RSS layer for the flowid / flowtype for the
545 * For now, just assume we have to calculate a new one.
546 * Later on we should check to see if the assigned flowid matches
547 * what RSS wants for the given IP protocol and if so, just keep it.
549 * We then queue into the relevant netisr so it can be dispatched
550 * to the correct CPU.
552 * Note - this may return 1, which means the flowid in the mbuf
553 * is correct for the configured RSS hash types and can be used.
555 if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) {
556 m->m_pkthdr.flowid = rss_hash;
557 M_HASHTYPE_SET(m, rss_type);
561 * Queue/dispatch for reprocessing.
563 * Note: this is much slower than just handling the frame in the
564 * current receive context. It's likely worth investigating
567 netisr_dispatch(NETISR_IP_DIRECT, m);
575 IPSTAT_INC(ips_fragdropped);
578 atomic_subtract_int(&nfrags, 1);
589 * Timer expired on a bucket.
590 * There should be at least one ipq to be timed out.
593 ipreass_callout(void *arg)
595 struct ipqbucket *bucket = arg;
598 IPQ_BUCKET_LOCK_ASSERT(bucket);
599 MPASS(atomic_load_int(&nfrags) > 0);
601 CURVNET_SET(bucket->vnet);
602 fp = TAILQ_LAST(&bucket->head, ipqhead);
603 KASSERT(fp != NULL && fp->ipq_expire <= time_uptime,
604 ("%s: stray callout on bucket %p, %ju < %ju", __func__, bucket,
605 fp ? (uintmax_t)fp->ipq_expire : 0, (uintmax_t)time_uptime));
607 while (fp != NULL && fp->ipq_expire <= time_uptime) {
608 ipq_timeout(bucket, fp);
609 fp = TAILQ_LAST(&bucket->head, ipqhead);
611 ipreass_reschedule(bucket);
616 ipreass_reschedule(struct ipqbucket *bucket)
620 IPQ_BUCKET_LOCK_ASSERT(bucket);
622 if ((fp = TAILQ_LAST(&bucket->head, ipqhead)) != NULL) {
625 /* Protect against time_uptime tick. */
626 t = fp->ipq_expire - time_uptime;
628 callout_reset_sbt(&bucket->timer, SBT_1S * t, SBT_1S,
629 ipreass_callout, bucket, 0);
631 callout_stop(&bucket->timer);
635 ipreass_drain_vnet(void)
639 for (int i = 0; i < V_ipq_hashsize; i++) {
643 resched = !TAILQ_EMPTY(&V_ipq[i].head);
644 while(!TAILQ_EMPTY(&V_ipq[i].head)) {
645 struct ipq *fp = TAILQ_FIRST(&V_ipq[i].head);
647 dropped += fp->ipq_nfrags;
648 ipq_free(&V_ipq[i], fp);
651 ipreass_reschedule(&V_ipq[i]);
652 KASSERT(V_ipq[i].count == 0,
653 ("%s: V_ipq[%d] count %d (V_ipq=%p)", __func__, i,
654 V_ipq[i].count, V_ipq));
657 IPSTAT_ADD(ips_fragdropped, dropped);
661 * Drain off all datagram fragments.
666 VNET_ITERATOR_DECL(vnet_iter);
668 VNET_FOREACH(vnet_iter) {
669 CURVNET_SET(vnet_iter);
670 ipreass_drain_vnet();
677 * Initialize IP reassembly structures.
679 MALLOC_DEFINE(M_IPREASS_HASH, "IP reass", "IP packet reassembly hash headers");
681 ipreass_vnet_init(void)
685 V_ipq_hashsize = IPREASS_NHASH;
686 TUNABLE_INT_FETCH("net.inet.ip.reass_hashsize", &V_ipq_hashsize);
687 V_ipq = malloc(sizeof(struct ipqbucket) * V_ipq_hashsize,
688 M_IPREASS_HASH, M_WAITOK);
690 for (int i = 0; i < V_ipq_hashsize; i++) {
691 TAILQ_INIT(&V_ipq[i].head);
692 mtx_init(&V_ipq[i].lock, "IP reassembly", NULL,
693 MTX_DEF | MTX_DUPOK | MTX_NEW);
694 callout_init_mtx(&V_ipq[i].timer, &V_ipq[i].lock, 0);
697 V_ipq[i].vnet = curvnet;
700 V_ipq_hashseed = arc4random();
701 V_maxfragsperpacket = 16;
702 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL,
703 NULL, UMA_ALIGN_PTR, 0);
704 max = IP_MAXFRAGPACKETS;
705 max = uma_zone_set_max(V_ipq_zone, max);
706 V_ipreass_maxbucketsize = imax(max / (V_ipq_hashsize / 2), 1);
713 maxfrags = IP_MAXFRAGS;
714 EVENTHANDLER_REGISTER(nmbclusters_change, ipreass_zone_change,
715 NULL, EVENTHANDLER_PRI_ANY);
716 EVENTHANDLER_REGISTER(vm_lowmem, ipreass_drain, NULL,
718 EVENTHANDLER_REGISTER(mbuf_lowmem, ipreass_drain, NULL,
723 * Drain off all datagram fragments belonging to
724 * the given network interface.
727 ipreass_cleanup(void *arg __unused, struct ifnet *ifp)
729 struct ipq *fp, *temp;
733 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
735 CURVNET_SET_QUIET(ifp->if_vnet);
738 * Skip processing if IPv4 reassembly is not initialised or
739 * torn down by ipreass_destroy().
741 if (V_ipq_zone == NULL) {
746 for (i = 0; i < V_ipq_hashsize; i++) {
748 /* Scan fragment list. */
749 TAILQ_FOREACH_SAFE(fp, &V_ipq[i].head, ipq_list, temp) {
750 for (m = fp->ipq_frags; m != NULL; m = m->m_nextpkt) {
751 /* clear no longer valid rcvif pointer */
752 if (m->m_pkthdr.rcvif == ifp)
753 m->m_pkthdr.rcvif = NULL;
760 EVENTHANDLER_DEFINE(ifnet_departure_event, ipreass_cleanup, NULL, 0);
764 * Destroy IP reassembly structures.
767 ipreass_destroy(void)
770 ipreass_drain_vnet();
771 uma_zdestroy(V_ipq_zone);
773 for (int i = 0; i < V_ipq_hashsize; i++)
774 mtx_destroy(&V_ipq[i].lock);
775 free(V_ipq, M_IPREASS_HASH);
780 * After maxnipq has been updated, propagate the change to UMA. The UMA zone
781 * max has slightly different semantics than the sysctl, for historical
785 ipreass_drain_tomax(void)
791 * Make sure each bucket is under the new limit. If
792 * necessary, drop enough of the oldest elements from
793 * each bucket to get under the new limit.
795 for (int i = 0; i < V_ipq_hashsize; i++) {
797 while (V_ipq[i].count > V_ipreass_maxbucketsize &&
798 (fp = TAILQ_LAST(&V_ipq[i].head, ipqhead)) != NULL)
799 ipq_timeout(&V_ipq[i], fp);
800 ipreass_reschedule(&V_ipq[i]);
805 * If we are over the maximum number of fragments,
806 * drain off enough to get down to the new limit,
807 * stripping off last elements on queues. Every
808 * run we strip the oldest element from each bucket.
810 target = uma_zone_get_max(V_ipq_zone);
811 while (uma_zone_get_cur(V_ipq_zone) > target) {
812 for (int i = 0; i < V_ipq_hashsize; i++) {
814 fp = TAILQ_LAST(&V_ipq[i].head, ipqhead);
816 ipq_timeout(&V_ipq[i], fp);
817 ipreass_reschedule(&V_ipq[i]);
825 ipreass_zone_change(void *tag)
827 VNET_ITERATOR_DECL(vnet_iter);
830 maxfrags = IP_MAXFRAGS;
831 max = IP_MAXFRAGPACKETS;
832 VNET_LIST_RLOCK_NOSLEEP();
833 VNET_FOREACH(vnet_iter) {
834 CURVNET_SET(vnet_iter);
835 max = uma_zone_set_max(V_ipq_zone, max);
836 V_ipreass_maxbucketsize = imax(max / (V_ipq_hashsize / 2), 1);
837 ipreass_drain_tomax();
840 VNET_LIST_RUNLOCK_NOSLEEP();
844 * Change the limit on the UMA zone, or disable the fragment allocation
845 * at all. Since 0 and -1 is a special values here, we need our own handler,
846 * instead of sysctl_handle_uma_zone_max().
849 sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS)
853 if (V_noreass == 0) {
854 max = uma_zone_get_max(V_ipq_zone);
859 error = sysctl_handle_int(oidp, &max, 0, req);
860 if (error || !req->newptr)
864 * XXXRW: Might be a good idea to sanity check the argument
865 * and place an extreme upper bound.
867 max = uma_zone_set_max(V_ipq_zone, max);
868 V_ipreass_maxbucketsize = imax(max / (V_ipq_hashsize / 2), 1);
869 ipreass_drain_tomax();
871 } else if (max == 0) {
874 } else if (max == -1) {
876 uma_zone_set_max(V_ipq_zone, 0);
877 V_ipreass_maxbucketsize = INT_MAX;
884 * Seek for old fragment queue header that can be reused. Try to
885 * reuse a header from currently locked hash bucket.
893 IPQ_LOCK_ASSERT(start);
895 for (i = 0; i < V_ipq_hashsize; i++) {
896 bucket = (start + i) % V_ipq_hashsize;
897 if (bucket != start && IPQ_TRYLOCK(bucket) == 0)
899 fp = TAILQ_LAST(&V_ipq[bucket].head, ipqhead);
903 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags);
904 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
905 while (fp->ipq_frags) {
907 fp->ipq_frags = m->m_nextpkt;
910 TAILQ_REMOVE(&V_ipq[bucket].head, fp, ipq_list);
911 V_ipq[bucket].count--;
912 ipreass_reschedule(&V_ipq[bucket]);
920 IPQ_LOCK_ASSERT(start);
925 * Free a fragment reassembly header and all associated datagrams.
928 ipq_free(struct ipqbucket *bucket, struct ipq *fp)
932 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
933 while (fp->ipq_frags) {
935 fp->ipq_frags = q->m_nextpkt;
938 TAILQ_REMOVE(&bucket->head, fp, ipq_list);
940 uma_zfree(V_ipq_zone, fp);
944 * Get or set the maximum number of reassembly queues per bucket.
947 sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS)
951 max = V_ipreass_maxbucketsize;
952 error = sysctl_handle_int(oidp, &max, 0, req);
953 if (error || !req->newptr)
957 V_ipreass_maxbucketsize = max;
958 ipreass_drain_tomax();
963 * Get or set the IP fragment time to live.
966 sysctl_fragttl(SYSCTL_HANDLER_ARGS)
972 error = sysctl_handle_int(oidp, &ttl, 0, req);
973 if (error || !req->newptr)
976 if (ttl < 1 || ttl > MAXTTL)
979 atomic_store_int(&V_ipfragttl, ttl);