2 * Copyright (c) 2015 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org>
4 * Copyright (c) 1982, 1986, 1988, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/eventhandler.h>
42 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/limits.h>
48 #include <sys/mutex.h>
49 #include <sys/sysctl.h>
51 #include <net/rss_config.h>
52 #include <net/netisr.h>
55 #include <netinet/in.h>
56 #include <netinet/ip.h>
57 #include <netinet/ip_var.h>
58 #include <netinet/in_rss.h>
60 #include <security/mac/mac_framework.h>
63 SYSCTL_DECL(_net_inet_ip);
66 * Reassembly headers are stored in hash buckets.
68 #define IPREASS_NHASH_LOG2 10
69 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2)
70 #define IPREASS_HMASK (IPREASS_NHASH - 1)
73 TAILQ_HEAD(ipqhead, ipq) head;
78 VNET_DEFINE_STATIC(struct ipqbucket, ipq[IPREASS_NHASH]);
79 #define V_ipq VNET(ipq)
80 VNET_DEFINE_STATIC(uint32_t, ipq_hashseed);
81 #define V_ipq_hashseed VNET(ipq_hashseed)
83 #define IPQ_LOCK(i) mtx_lock(&V_ipq[i].lock)
84 #define IPQ_TRYLOCK(i) mtx_trylock(&V_ipq[i].lock)
85 #define IPQ_UNLOCK(i) mtx_unlock(&V_ipq[i].lock)
86 #define IPQ_LOCK_ASSERT(i) mtx_assert(&V_ipq[i].lock, MA_OWNED)
88 VNET_DEFINE_STATIC(int, ipreass_maxbucketsize);
89 #define V_ipreass_maxbucketsize VNET(ipreass_maxbucketsize)
91 void ipreass_init(void);
92 void ipreass_drain(void);
93 void ipreass_slowtimo(void);
95 void ipreass_destroy(void);
97 static int sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS);
98 static int sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS);
99 static void ipreass_zone_change(void *);
100 static void ipreass_drain_tomax(void);
101 static void ipq_free(struct ipqbucket *, struct ipq *);
102 static struct ipq * ipq_reuse(int);
105 ipq_timeout(struct ipqbucket *bucket, struct ipq *fp)
108 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags);
109 ipq_free(bucket, fp);
113 ipq_drop(struct ipqbucket *bucket, struct ipq *fp)
116 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
117 ipq_free(bucket, fp);
121 * By default, limit the number of IP fragments across all reassembly
122 * queues to 1/32 of the total number of mbuf clusters.
124 * Limit the total number of reassembly queues per VNET to the
125 * IP fragment limit, but ensure the limit will not allow any bucket
126 * to grow above 100 items. (The bucket limit is
127 * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct
128 * multiplier to reach a 100-item limit.)
129 * The 100-item limit was chosen as brief testing seems to show that
130 * this produces "reasonable" performance on some subset of systems
133 #define IP_MAXFRAGS (nmbclusters / 32)
134 #define IP_MAXFRAGPACKETS (imin(IP_MAXFRAGS, IPREASS_NHASH * 50))
137 static volatile u_int nfrags;
138 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfrags, CTLFLAG_RW,
140 "Maximum number of IPv4 fragments allowed across all reassembly queues");
141 SYSCTL_UINT(_net_inet_ip, OID_AUTO, curfrags, CTLFLAG_RD,
142 __DEVOLATILE(u_int *, &nfrags), 0,
143 "Current number of IPv4 fragments across all reassembly queues");
145 VNET_DEFINE_STATIC(uma_zone_t, ipq_zone);
146 #define V_ipq_zone VNET(ipq_zone)
147 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_VNET |
148 CTLTYPE_INT | CTLFLAG_RW, NULL, 0, sysctl_maxfragpackets, "I",
149 "Maximum number of IPv4 fragment reassembly queue entries");
150 SYSCTL_UMA_CUR(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_VNET,
151 &VNET_NAME(ipq_zone),
152 "Current number of IPv4 fragment reassembly queue entries");
154 VNET_DEFINE_STATIC(int, noreass);
155 #define V_noreass VNET(noreass)
157 VNET_DEFINE_STATIC(int, maxfragsperpacket);
158 #define V_maxfragsperpacket VNET(maxfragsperpacket)
159 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_VNET | CTLFLAG_RW,
160 &VNET_NAME(maxfragsperpacket), 0,
161 "Maximum number of IPv4 fragments allowed per packet");
162 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragbucketsize,
163 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0,
164 sysctl_maxfragbucketsize, "I",
165 "Maximum number of IPv4 fragment reassembly queue entries per bucket");
168 * Take incoming datagram fragment and try to reassemble it into
169 * whole datagram. If the argument is the first fragment or one
170 * in between the function will return NULL and store the mbuf
171 * in the fragment chain. If the argument is the last fragment
172 * the packet will be reassembled and the pointer to the new
173 * mbuf returned for further processing. Only m_tags attached
174 * to the first packet/fragment are preserved.
175 * The IP header is *NOT* adjusted out of iplen.
177 #define M_IP_FRAG M_PROTO9
179 ip_reass(struct mbuf *m)
182 struct mbuf *p, *q, *nq, *t;
184 struct ipqhead *head;
185 int i, hlen, next, tmpmax;
187 uint32_t hash, hashkey[3];
189 uint32_t rss_hash, rss_type;
193 * If no reassembling or maxfragsperpacket are 0,
194 * never accept fragments.
195 * Also, drop packet if it would exceed the maximum
196 * number of fragments.
199 if (V_noreass == 1 || V_maxfragsperpacket == 0 ||
200 (tmpmax >= 0 && atomic_load_int(&nfrags) >= (u_int)tmpmax)) {
201 IPSTAT_INC(ips_fragments);
202 IPSTAT_INC(ips_fragdropped);
207 ip = mtod(m, struct ip *);
208 hlen = ip->ip_hl << 2;
211 * Adjust ip_len to not reflect header,
212 * convert offset of this to bytes.
214 ip->ip_len = htons(ntohs(ip->ip_len) - hlen);
216 * Make sure that fragments have a data length
217 * that's a non-zero multiple of 8 bytes, unless
218 * this is the last fragment.
220 if (ip->ip_len == htons(0) ||
221 ((ip->ip_off & htons(IP_MF)) && (ntohs(ip->ip_len) & 0x7) != 0)) {
222 IPSTAT_INC(ips_toosmall); /* XXX */
223 IPSTAT_INC(ips_fragdropped);
227 if (ip->ip_off & htons(IP_MF))
228 m->m_flags |= M_IP_FRAG;
230 m->m_flags &= ~M_IP_FRAG;
231 ip->ip_off = htons(ntohs(ip->ip_off) << 3);
234 * Make sure the fragment lies within a packet of valid size.
236 if (ntohs(ip->ip_len) + ntohs(ip->ip_off) > IP_MAXPACKET) {
237 IPSTAT_INC(ips_toolong);
238 IPSTAT_INC(ips_fragdropped);
244 * Attempt reassembly; if it succeeds, proceed.
245 * ip_reass() will return a different mbuf.
247 IPSTAT_INC(ips_fragments);
248 m->m_pkthdr.PH_loc.ptr = ip;
251 * Presence of header sizes in mbufs
252 * would confuse code below.
257 hashkey[0] = ip->ip_src.s_addr;
258 hashkey[1] = ip->ip_dst.s_addr;
259 hashkey[2] = (uint32_t)ip->ip_p << 16;
260 hashkey[2] += ip->ip_id;
261 hash = jenkins_hash32(hashkey, nitems(hashkey), V_ipq_hashseed);
262 hash &= IPREASS_HMASK;
263 head = &V_ipq[hash].head;
267 * Look for queue of fragments
270 TAILQ_FOREACH(fp, head, ipq_list)
271 if (ip->ip_id == fp->ipq_id &&
272 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
273 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
275 mac_ipq_match(m, fp) &&
277 ip->ip_p == fp->ipq_p)
280 * If first fragment to arrive, create a reassembly queue.
283 if (V_ipq[hash].count < V_ipreass_maxbucketsize)
284 fp = uma_zalloc(V_ipq_zone, M_NOWAIT);
286 fp = ipq_reuse(hash);
290 if (mac_ipq_init(fp, M_NOWAIT) != 0) {
291 uma_zfree(V_ipq_zone, fp);
295 mac_ipq_create(m, fp);
297 TAILQ_INSERT_HEAD(head, fp, ipq_list);
300 atomic_add_int(&nfrags, 1);
301 fp->ipq_ttl = IPFRAGTTL;
302 fp->ipq_p = ip->ip_p;
303 fp->ipq_id = ip->ip_id;
304 fp->ipq_src = ip->ip_src;
305 fp->ipq_dst = ip->ip_dst;
307 if (m->m_flags & M_IP_FRAG)
310 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len);
315 * If we already saw the last fragment, make sure
316 * this fragment's offset looks sane. Otherwise, if
317 * this is the last fragment, record its endpoint.
319 if (fp->ipq_maxoff > 0) {
320 i = ntohs(ip->ip_off) + ntohs(ip->ip_len);
321 if (((m->m_flags & M_IP_FRAG) && i >= fp->ipq_maxoff) ||
322 ((m->m_flags & M_IP_FRAG) == 0 &&
323 i != fp->ipq_maxoff)) {
327 } else if ((m->m_flags & M_IP_FRAG) == 0)
328 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len);
330 atomic_add_int(&nfrags, 1);
332 mac_ipq_update(m, fp);
336 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.PH_loc.ptr))
339 * Handle ECN by comparing this segment with the first one;
340 * if CE is set, do not lose CE.
341 * drop if CE and not-ECT are mixed for the same packet.
343 ecn = ip->ip_tos & IPTOS_ECN_MASK;
344 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
345 if (ecn == IPTOS_ECN_CE) {
346 if (ecn0 == IPTOS_ECN_NOTECT)
348 if (ecn0 != IPTOS_ECN_CE)
349 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
351 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
355 * Find a segment which begins after this one does.
357 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
358 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off))
362 * If there is a preceding segment, it may provide some of
363 * our data already. If so, drop the data from the incoming
364 * segment. If it provides all of our data, drop us, otherwise
365 * stick new segment in the proper place.
367 * If some of the data is dropped from the preceding
368 * segment, then it's checksum is invalidated.
371 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) -
374 if (i >= ntohs(ip->ip_len))
377 m->m_pkthdr.csum_flags = 0;
378 ip->ip_off = htons(ntohs(ip->ip_off) + i);
379 ip->ip_len = htons(ntohs(ip->ip_len) - i);
381 m->m_nextpkt = p->m_nextpkt;
384 m->m_nextpkt = fp->ipq_frags;
389 * While we overlap succeeding segments trim them or,
390 * if they are completely covered, dequeue them.
392 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) >
393 ntohs(GETIP(q)->ip_off); q = nq) {
394 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) -
395 ntohs(GETIP(q)->ip_off);
396 if (i < ntohs(GETIP(q)->ip_len)) {
397 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i);
398 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i);
400 q->m_pkthdr.csum_flags = 0;
405 IPSTAT_INC(ips_fragdropped);
407 atomic_subtract_int(&nfrags, 1);
412 * Check for complete reassembly and perform frag per packet
415 * Frag limiting is performed here so that the nth frag has
416 * a chance to complete the packet before we drop the packet.
417 * As a result, n+1 frags are actually allowed per packet, but
418 * only n will ever be stored. (n = maxfragsperpacket.)
422 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
423 if (ntohs(GETIP(q)->ip_off) != next) {
424 if (fp->ipq_nfrags > V_maxfragsperpacket)
425 ipq_drop(&V_ipq[hash], fp);
428 next += ntohs(GETIP(q)->ip_len);
430 /* Make sure the last packet didn't have the IP_MF flag */
431 if (p->m_flags & M_IP_FRAG) {
432 if (fp->ipq_nfrags > V_maxfragsperpacket)
433 ipq_drop(&V_ipq[hash], fp);
438 * Reassembly is complete. Make sure the packet is a sane size.
442 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) {
443 IPSTAT_INC(ips_toolong);
444 ipq_drop(&V_ipq[hash], fp);
449 * Concatenate fragments.
457 for (q = nq; q != NULL; q = nq) {
460 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
461 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
466 * In order to do checksumming faster we do 'end-around carry' here
467 * (and not in for{} loop), though it implies we are not going to
468 * reassemble more than 64k fragments.
470 while (m->m_pkthdr.csum_data & 0xffff0000)
471 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
472 (m->m_pkthdr.csum_data >> 16);
473 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
475 mac_ipq_reassemble(fp, m);
480 * Create header for new ip packet by modifying header of first
481 * packet; dequeue and discard fragment reassembly header.
482 * Make header visible.
484 ip->ip_len = htons((ip->ip_hl << 2) + next);
485 ip->ip_src = fp->ipq_src;
486 ip->ip_dst = fp->ipq_dst;
487 TAILQ_REMOVE(head, fp, ipq_list);
489 uma_zfree(V_ipq_zone, fp);
490 m->m_len += (ip->ip_hl << 2);
491 m->m_data -= (ip->ip_hl << 2);
492 /* some debugging cruft by sklower, below, will go away soon */
493 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
495 IPSTAT_INC(ips_reassembled);
500 * Query the RSS layer for the flowid / flowtype for the
503 * For now, just assume we have to calculate a new one.
504 * Later on we should check to see if the assigned flowid matches
505 * what RSS wants for the given IP protocol and if so, just keep it.
507 * We then queue into the relevant netisr so it can be dispatched
508 * to the correct CPU.
510 * Note - this may return 1, which means the flowid in the mbuf
511 * is correct for the configured RSS hash types and can be used.
513 if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) {
514 m->m_pkthdr.flowid = rss_hash;
515 M_HASHTYPE_SET(m, rss_type);
519 * Queue/dispatch for reprocessing.
521 * Note: this is much slower than just handling the frame in the
522 * current receive context. It's likely worth investigating
525 netisr_dispatch(NETISR_IP_DIRECT, m);
533 IPSTAT_INC(ips_fragdropped);
536 atomic_subtract_int(&nfrags, 1);
547 * Initialize IP reassembly structures.
554 for (int i = 0; i < IPREASS_NHASH; i++) {
555 TAILQ_INIT(&V_ipq[i].head);
556 mtx_init(&V_ipq[i].lock, "IP reassembly", NULL,
557 MTX_DEF | MTX_DUPOK);
560 V_ipq_hashseed = arc4random();
561 V_maxfragsperpacket = 16;
562 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL,
563 NULL, UMA_ALIGN_PTR, 0);
564 max = IP_MAXFRAGPACKETS;
565 max = uma_zone_set_max(V_ipq_zone, max);
566 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
568 if (IS_DEFAULT_VNET(curvnet)) {
569 maxfrags = IP_MAXFRAGS;
570 EVENTHANDLER_REGISTER(nmbclusters_change, ipreass_zone_change,
571 NULL, EVENTHANDLER_PRI_ANY);
576 * If a timer expires on a reassembly queue, discard it.
579 ipreass_slowtimo(void)
581 struct ipq *fp, *tmp;
583 for (int i = 0; i < IPREASS_NHASH; i++) {
585 TAILQ_FOREACH_SAFE(fp, &V_ipq[i].head, ipq_list, tmp)
586 if (--fp->ipq_ttl == 0)
587 ipq_timeout(&V_ipq[i], fp);
593 * Drain off all datagram fragments.
599 for (int i = 0; i < IPREASS_NHASH; i++) {
601 while(!TAILQ_EMPTY(&V_ipq[i].head))
602 ipq_drop(&V_ipq[i], TAILQ_FIRST(&V_ipq[i].head));
603 KASSERT(V_ipq[i].count == 0,
604 ("%s: V_ipq[%d] count %d (V_ipq=%p)", __func__, i,
605 V_ipq[i].count, V_ipq));
612 * Destroy IP reassembly structures.
615 ipreass_destroy(void)
619 uma_zdestroy(V_ipq_zone);
620 for (int i = 0; i < IPREASS_NHASH; i++)
621 mtx_destroy(&V_ipq[i].lock);
626 * After maxnipq has been updated, propagate the change to UMA. The UMA zone
627 * max has slightly different semantics than the sysctl, for historical
631 ipreass_drain_tomax(void)
637 * Make sure each bucket is under the new limit. If
638 * necessary, drop enough of the oldest elements from
639 * each bucket to get under the new limit.
641 for (int i = 0; i < IPREASS_NHASH; i++) {
643 while (V_ipq[i].count > V_ipreass_maxbucketsize &&
644 (fp = TAILQ_LAST(&V_ipq[i].head, ipqhead)) != NULL)
645 ipq_timeout(&V_ipq[i], fp);
650 * If we are over the maximum number of fragments,
651 * drain off enough to get down to the new limit,
652 * stripping off last elements on queues. Every
653 * run we strip the oldest element from each bucket.
655 target = uma_zone_get_max(V_ipq_zone);
656 while (uma_zone_get_cur(V_ipq_zone) > target) {
657 for (int i = 0; i < IPREASS_NHASH; i++) {
659 fp = TAILQ_LAST(&V_ipq[i].head, ipqhead);
661 ipq_timeout(&V_ipq[i], fp);
668 ipreass_zone_change(void *tag)
670 VNET_ITERATOR_DECL(vnet_iter);
673 maxfrags = IP_MAXFRAGS;
674 max = IP_MAXFRAGPACKETS;
675 VNET_LIST_RLOCK_NOSLEEP();
676 VNET_FOREACH(vnet_iter) {
677 CURVNET_SET(vnet_iter);
678 max = uma_zone_set_max(V_ipq_zone, max);
679 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
680 ipreass_drain_tomax();
683 VNET_LIST_RUNLOCK_NOSLEEP();
687 * Change the limit on the UMA zone, or disable the fragment allocation
688 * at all. Since 0 and -1 is a special values here, we need our own handler,
689 * instead of sysctl_handle_uma_zone_max().
692 sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS)
696 if (V_noreass == 0) {
697 max = uma_zone_get_max(V_ipq_zone);
702 error = sysctl_handle_int(oidp, &max, 0, req);
703 if (error || !req->newptr)
707 * XXXRW: Might be a good idea to sanity check the argument
708 * and place an extreme upper bound.
710 max = uma_zone_set_max(V_ipq_zone, max);
711 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
712 ipreass_drain_tomax();
714 } else if (max == 0) {
717 } else if (max == -1) {
719 uma_zone_set_max(V_ipq_zone, 0);
720 V_ipreass_maxbucketsize = INT_MAX;
727 * Seek for old fragment queue header that can be reused. Try to
728 * reuse a header from currently locked hash bucket.
736 IPQ_LOCK_ASSERT(start);
738 for (i = 0; i < IPREASS_NHASH; i++) {
739 bucket = (start + i) % IPREASS_NHASH;
740 if (bucket != start && IPQ_TRYLOCK(bucket) == 0)
742 fp = TAILQ_LAST(&V_ipq[bucket].head, ipqhead);
746 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags);
747 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
748 while (fp->ipq_frags) {
750 fp->ipq_frags = m->m_nextpkt;
753 TAILQ_REMOVE(&V_ipq[bucket].head, fp, ipq_list);
754 V_ipq[bucket].count--;
762 IPQ_LOCK_ASSERT(start);
767 * Free a fragment reassembly header and all associated datagrams.
770 ipq_free(struct ipqbucket *bucket, struct ipq *fp)
774 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
775 while (fp->ipq_frags) {
777 fp->ipq_frags = q->m_nextpkt;
780 TAILQ_REMOVE(&bucket->head, fp, ipq_list);
782 uma_zfree(V_ipq_zone, fp);
786 * Get or set the maximum number of reassembly queues per bucket.
789 sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS)
793 max = V_ipreass_maxbucketsize;
794 error = sysctl_handle_int(oidp, &max, 0, req);
795 if (error || !req->newptr)
799 V_ipreass_maxbucketsize = max;
800 ipreass_drain_tomax();