2 * Copyright (c) 2015 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org>
4 * Copyright (c) 1982, 1986, 1988, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/eventhandler.h>
44 #include <sys/malloc.h>
45 #include <sys/limits.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
50 #include <net/rss_config.h>
51 #include <net/netisr.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/ip_var.h>
57 #include <netinet/in_rss.h>
59 #include <security/mac/mac_framework.h>
62 SYSCTL_DECL(_net_inet_ip);
65 * Reassembly headers are stored in hash buckets.
67 #define IPREASS_NHASH_LOG2 10
68 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2)
69 #define IPREASS_HMASK (IPREASS_NHASH - 1)
72 TAILQ_HEAD(ipqhead, ipq) head;
77 VNET_DEFINE_STATIC(struct ipqbucket, ipq[IPREASS_NHASH]);
78 #define V_ipq VNET(ipq)
79 VNET_DEFINE_STATIC(uint32_t, ipq_hashseed);
80 #define V_ipq_hashseed VNET(ipq_hashseed)
82 #define IPQ_LOCK(i) mtx_lock(&V_ipq[i].lock)
83 #define IPQ_TRYLOCK(i) mtx_trylock(&V_ipq[i].lock)
84 #define IPQ_UNLOCK(i) mtx_unlock(&V_ipq[i].lock)
85 #define IPQ_LOCK_ASSERT(i) mtx_assert(&V_ipq[i].lock, MA_OWNED)
87 VNET_DEFINE_STATIC(int, ipreass_maxbucketsize);
88 #define V_ipreass_maxbucketsize VNET(ipreass_maxbucketsize)
90 void ipreass_init(void);
91 void ipreass_drain(void);
92 void ipreass_slowtimo(void);
94 void ipreass_destroy(void);
96 static int sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS);
97 static int sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS);
98 static void ipreass_zone_change(void *);
99 static void ipreass_drain_tomax(void);
100 static void ipq_free(struct ipqbucket *, struct ipq *);
101 static struct ipq * ipq_reuse(int);
104 ipq_timeout(struct ipqbucket *bucket, struct ipq *fp)
107 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags);
108 ipq_free(bucket, fp);
112 ipq_drop(struct ipqbucket *bucket, struct ipq *fp)
115 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
116 ipq_free(bucket, fp);
120 * By default, limit the number of IP fragments across all reassembly
121 * queues to 1/32 of the total number of mbuf clusters.
123 * Limit the total number of reassembly queues per VNET to the
124 * IP fragment limit, but ensure the limit will not allow any bucket
125 * to grow above 100 items. (The bucket limit is
126 * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct
127 * multiplier to reach a 100-item limit.)
128 * The 100-item limit was chosen as brief testing seems to show that
129 * this produces "reasonable" performance on some subset of systems
132 #define IP_MAXFRAGS (nmbclusters / 32)
133 #define IP_MAXFRAGPACKETS (imin(IP_MAXFRAGS, IPREASS_NHASH * 50))
136 static volatile u_int nfrags;
137 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfrags, CTLFLAG_RW,
139 "Maximum number of IPv4 fragments allowed across all reassembly queues");
140 SYSCTL_UINT(_net_inet_ip, OID_AUTO, curfrags, CTLFLAG_RD,
141 __DEVOLATILE(u_int *, &nfrags), 0,
142 "Current number of IPv4 fragments across all reassembly queues");
144 VNET_DEFINE_STATIC(uma_zone_t, ipq_zone);
145 #define V_ipq_zone VNET(ipq_zone)
146 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_VNET |
147 CTLTYPE_INT | CTLFLAG_RW, NULL, 0, sysctl_maxfragpackets, "I",
148 "Maximum number of IPv4 fragment reassembly queue entries");
149 SYSCTL_UMA_CUR(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_VNET,
150 &VNET_NAME(ipq_zone),
151 "Current number of IPv4 fragment reassembly queue entries");
153 VNET_DEFINE_STATIC(int, noreass);
154 #define V_noreass VNET(noreass)
156 VNET_DEFINE_STATIC(int, maxfragsperpacket);
157 #define V_maxfragsperpacket VNET(maxfragsperpacket)
158 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_VNET | CTLFLAG_RW,
159 &VNET_NAME(maxfragsperpacket), 0,
160 "Maximum number of IPv4 fragments allowed per packet");
161 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragbucketsize,
162 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0,
163 sysctl_maxfragbucketsize, "I",
164 "Maximum number of IPv4 fragment reassembly queue entries per bucket");
167 * Take incoming datagram fragment and try to reassemble it into
168 * whole datagram. If the argument is the first fragment or one
169 * in between the function will return NULL and store the mbuf
170 * in the fragment chain. If the argument is the last fragment
171 * the packet will be reassembled and the pointer to the new
172 * mbuf returned for further processing. Only m_tags attached
173 * to the first packet/fragment are preserved.
174 * The IP header is *NOT* adjusted out of iplen.
176 #define M_IP_FRAG M_PROTO9
178 ip_reass(struct mbuf *m)
181 struct mbuf *p, *q, *nq, *t;
183 struct ipqhead *head;
184 int i, hlen, next, tmpmax;
186 uint32_t hash, hashkey[3];
188 uint32_t rss_hash, rss_type;
192 * If no reassembling or maxfragsperpacket are 0,
193 * never accept fragments.
194 * Also, drop packet if it would exceed the maximum
195 * number of fragments.
198 if (V_noreass == 1 || V_maxfragsperpacket == 0 ||
199 (tmpmax >= 0 && atomic_load_int(&nfrags) >= (u_int)tmpmax)) {
200 IPSTAT_INC(ips_fragments);
201 IPSTAT_INC(ips_fragdropped);
206 ip = mtod(m, struct ip *);
207 hlen = ip->ip_hl << 2;
210 * Adjust ip_len to not reflect header,
211 * convert offset of this to bytes.
213 ip->ip_len = htons(ntohs(ip->ip_len) - hlen);
215 * Make sure that fragments have a data length
216 * that's a non-zero multiple of 8 bytes, unless
217 * this is the last fragment.
219 if (ip->ip_len == htons(0) ||
220 ((ip->ip_off & htons(IP_MF)) && (ntohs(ip->ip_len) & 0x7) != 0)) {
221 IPSTAT_INC(ips_toosmall); /* XXX */
222 IPSTAT_INC(ips_fragdropped);
226 if (ip->ip_off & htons(IP_MF))
227 m->m_flags |= M_IP_FRAG;
229 m->m_flags &= ~M_IP_FRAG;
230 ip->ip_off = htons(ntohs(ip->ip_off) << 3);
233 * Make sure the fragment lies within a packet of valid size.
235 if (ntohs(ip->ip_len) + ntohs(ip->ip_off) > IP_MAXPACKET) {
236 IPSTAT_INC(ips_toolong);
237 IPSTAT_INC(ips_fragdropped);
243 * Attempt reassembly; if it succeeds, proceed.
244 * ip_reass() will return a different mbuf.
246 IPSTAT_INC(ips_fragments);
247 m->m_pkthdr.PH_loc.ptr = ip;
250 * Presence of header sizes in mbufs
251 * would confuse code below.
256 hashkey[0] = ip->ip_src.s_addr;
257 hashkey[1] = ip->ip_dst.s_addr;
258 hashkey[2] = (uint32_t)ip->ip_p << 16;
259 hashkey[2] += ip->ip_id;
260 hash = jenkins_hash32(hashkey, nitems(hashkey), V_ipq_hashseed);
261 hash &= IPREASS_HMASK;
262 head = &V_ipq[hash].head;
266 * Look for queue of fragments
269 TAILQ_FOREACH(fp, head, ipq_list)
270 if (ip->ip_id == fp->ipq_id &&
271 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
272 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
274 mac_ipq_match(m, fp) &&
276 ip->ip_p == fp->ipq_p)
279 * If first fragment to arrive, create a reassembly queue.
282 if (V_ipq[hash].count < V_ipreass_maxbucketsize)
283 fp = uma_zalloc(V_ipq_zone, M_NOWAIT);
285 fp = ipq_reuse(hash);
289 if (mac_ipq_init(fp, M_NOWAIT) != 0) {
290 uma_zfree(V_ipq_zone, fp);
294 mac_ipq_create(m, fp);
296 TAILQ_INSERT_HEAD(head, fp, ipq_list);
299 atomic_add_int(&nfrags, 1);
300 fp->ipq_ttl = IPFRAGTTL;
301 fp->ipq_p = ip->ip_p;
302 fp->ipq_id = ip->ip_id;
303 fp->ipq_src = ip->ip_src;
304 fp->ipq_dst = ip->ip_dst;
306 if (m->m_flags & M_IP_FRAG)
309 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len);
314 * If we already saw the last fragment, make sure
315 * this fragment's offset looks sane. Otherwise, if
316 * this is the last fragment, record its endpoint.
318 if (fp->ipq_maxoff > 0) {
319 i = ntohs(ip->ip_off) + ntohs(ip->ip_len);
320 if (((m->m_flags & M_IP_FRAG) && i >= fp->ipq_maxoff) ||
321 ((m->m_flags & M_IP_FRAG) == 0 &&
322 i != fp->ipq_maxoff)) {
326 } else if ((m->m_flags & M_IP_FRAG) == 0)
327 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len);
329 atomic_add_int(&nfrags, 1);
331 mac_ipq_update(m, fp);
335 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.PH_loc.ptr))
338 * Handle ECN by comparing this segment with the first one;
339 * if CE is set, do not lose CE.
340 * drop if CE and not-ECT are mixed for the same packet.
342 ecn = ip->ip_tos & IPTOS_ECN_MASK;
343 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
344 if (ecn == IPTOS_ECN_CE) {
345 if (ecn0 == IPTOS_ECN_NOTECT)
347 if (ecn0 != IPTOS_ECN_CE)
348 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
350 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
354 * Find a segment which begins after this one does.
356 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
357 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off))
361 * If there is a preceding segment, it may provide some of
362 * our data already. If so, drop the data from the incoming
363 * segment. If it provides all of our data, drop us, otherwise
364 * stick new segment in the proper place.
366 * If some of the data is dropped from the preceding
367 * segment, then it's checksum is invalidated.
370 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) -
373 if (i >= ntohs(ip->ip_len))
376 m->m_pkthdr.csum_flags = 0;
377 ip->ip_off = htons(ntohs(ip->ip_off) + i);
378 ip->ip_len = htons(ntohs(ip->ip_len) - i);
380 m->m_nextpkt = p->m_nextpkt;
383 m->m_nextpkt = fp->ipq_frags;
388 * While we overlap succeeding segments trim them or,
389 * if they are completely covered, dequeue them.
391 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) >
392 ntohs(GETIP(q)->ip_off); q = nq) {
393 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) -
394 ntohs(GETIP(q)->ip_off);
395 if (i < ntohs(GETIP(q)->ip_len)) {
396 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i);
397 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i);
399 q->m_pkthdr.csum_flags = 0;
404 IPSTAT_INC(ips_fragdropped);
406 atomic_subtract_int(&nfrags, 1);
411 * Check for complete reassembly and perform frag per packet
414 * Frag limiting is performed here so that the nth frag has
415 * a chance to complete the packet before we drop the packet.
416 * As a result, n+1 frags are actually allowed per packet, but
417 * only n will ever be stored. (n = maxfragsperpacket.)
421 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
422 if (ntohs(GETIP(q)->ip_off) != next) {
423 if (fp->ipq_nfrags > V_maxfragsperpacket)
424 ipq_drop(&V_ipq[hash], fp);
427 next += ntohs(GETIP(q)->ip_len);
429 /* Make sure the last packet didn't have the IP_MF flag */
430 if (p->m_flags & M_IP_FRAG) {
431 if (fp->ipq_nfrags > V_maxfragsperpacket)
432 ipq_drop(&V_ipq[hash], fp);
437 * Reassembly is complete. Make sure the packet is a sane size.
441 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) {
442 IPSTAT_INC(ips_toolong);
443 ipq_drop(&V_ipq[hash], fp);
448 * Concatenate fragments.
456 for (q = nq; q != NULL; q = nq) {
459 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
460 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
465 * In order to do checksumming faster we do 'end-around carry' here
466 * (and not in for{} loop), though it implies we are not going to
467 * reassemble more than 64k fragments.
469 while (m->m_pkthdr.csum_data & 0xffff0000)
470 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
471 (m->m_pkthdr.csum_data >> 16);
472 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
474 mac_ipq_reassemble(fp, m);
479 * Create header for new ip packet by modifying header of first
480 * packet; dequeue and discard fragment reassembly header.
481 * Make header visible.
483 ip->ip_len = htons((ip->ip_hl << 2) + next);
484 ip->ip_src = fp->ipq_src;
485 ip->ip_dst = fp->ipq_dst;
486 TAILQ_REMOVE(head, fp, ipq_list);
488 uma_zfree(V_ipq_zone, fp);
489 m->m_len += (ip->ip_hl << 2);
490 m->m_data -= (ip->ip_hl << 2);
491 /* some debugging cruft by sklower, below, will go away soon */
492 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
494 IPSTAT_INC(ips_reassembled);
499 * Query the RSS layer for the flowid / flowtype for the
502 * For now, just assume we have to calculate a new one.
503 * Later on we should check to see if the assigned flowid matches
504 * what RSS wants for the given IP protocol and if so, just keep it.
506 * We then queue into the relevant netisr so it can be dispatched
507 * to the correct CPU.
509 * Note - this may return 1, which means the flowid in the mbuf
510 * is correct for the configured RSS hash types and can be used.
512 if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) {
513 m->m_pkthdr.flowid = rss_hash;
514 M_HASHTYPE_SET(m, rss_type);
518 * Queue/dispatch for reprocessing.
520 * Note: this is much slower than just handling the frame in the
521 * current receive context. It's likely worth investigating
524 netisr_dispatch(NETISR_IP_DIRECT, m);
532 IPSTAT_INC(ips_fragdropped);
535 atomic_subtract_int(&nfrags, 1);
546 * Initialize IP reassembly structures.
553 for (int i = 0; i < IPREASS_NHASH; i++) {
554 TAILQ_INIT(&V_ipq[i].head);
555 mtx_init(&V_ipq[i].lock, "IP reassembly", NULL,
556 MTX_DEF | MTX_DUPOK);
559 V_ipq_hashseed = arc4random();
560 V_maxfragsperpacket = 16;
561 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL,
562 NULL, UMA_ALIGN_PTR, 0);
563 max = IP_MAXFRAGPACKETS;
564 max = uma_zone_set_max(V_ipq_zone, max);
565 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
567 if (IS_DEFAULT_VNET(curvnet)) {
568 maxfrags = IP_MAXFRAGS;
569 EVENTHANDLER_REGISTER(nmbclusters_change, ipreass_zone_change,
570 NULL, EVENTHANDLER_PRI_ANY);
575 * If a timer expires on a reassembly queue, discard it.
578 ipreass_slowtimo(void)
580 struct ipq *fp, *tmp;
582 for (int i = 0; i < IPREASS_NHASH; i++) {
584 TAILQ_FOREACH_SAFE(fp, &V_ipq[i].head, ipq_list, tmp)
585 if (--fp->ipq_ttl == 0)
586 ipq_timeout(&V_ipq[i], fp);
592 * Drain off all datagram fragments.
598 for (int i = 0; i < IPREASS_NHASH; i++) {
600 while(!TAILQ_EMPTY(&V_ipq[i].head))
601 ipq_drop(&V_ipq[i], TAILQ_FIRST(&V_ipq[i].head));
602 KASSERT(V_ipq[i].count == 0,
603 ("%s: V_ipq[%d] count %d (V_ipq=%p)", __func__, i,
604 V_ipq[i].count, V_ipq));
611 * Destroy IP reassembly structures.
614 ipreass_destroy(void)
618 uma_zdestroy(V_ipq_zone);
619 for (int i = 0; i < IPREASS_NHASH; i++)
620 mtx_destroy(&V_ipq[i].lock);
625 * After maxnipq has been updated, propagate the change to UMA. The UMA zone
626 * max has slightly different semantics than the sysctl, for historical
630 ipreass_drain_tomax(void)
636 * Make sure each bucket is under the new limit. If
637 * necessary, drop enough of the oldest elements from
638 * each bucket to get under the new limit.
640 for (int i = 0; i < IPREASS_NHASH; i++) {
642 while (V_ipq[i].count > V_ipreass_maxbucketsize &&
643 (fp = TAILQ_LAST(&V_ipq[i].head, ipqhead)) != NULL)
644 ipq_timeout(&V_ipq[i], fp);
649 * If we are over the maximum number of fragments,
650 * drain off enough to get down to the new limit,
651 * stripping off last elements on queues. Every
652 * run we strip the oldest element from each bucket.
654 target = uma_zone_get_max(V_ipq_zone);
655 while (uma_zone_get_cur(V_ipq_zone) > target) {
656 for (int i = 0; i < IPREASS_NHASH; i++) {
658 fp = TAILQ_LAST(&V_ipq[i].head, ipqhead);
660 ipq_timeout(&V_ipq[i], fp);
667 ipreass_zone_change(void *tag)
669 VNET_ITERATOR_DECL(vnet_iter);
672 maxfrags = IP_MAXFRAGS;
673 max = IP_MAXFRAGPACKETS;
674 VNET_LIST_RLOCK_NOSLEEP();
675 VNET_FOREACH(vnet_iter) {
676 CURVNET_SET(vnet_iter);
677 max = uma_zone_set_max(V_ipq_zone, max);
678 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
679 ipreass_drain_tomax();
682 VNET_LIST_RUNLOCK_NOSLEEP();
686 * Change the limit on the UMA zone, or disable the fragment allocation
687 * at all. Since 0 and -1 is a special values here, we need our own handler,
688 * instead of sysctl_handle_uma_zone_max().
691 sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS)
695 if (V_noreass == 0) {
696 max = uma_zone_get_max(V_ipq_zone);
701 error = sysctl_handle_int(oidp, &max, 0, req);
702 if (error || !req->newptr)
706 * XXXRW: Might be a good idea to sanity check the argument
707 * and place an extreme upper bound.
709 max = uma_zone_set_max(V_ipq_zone, max);
710 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
711 ipreass_drain_tomax();
713 } else if (max == 0) {
716 } else if (max == -1) {
718 uma_zone_set_max(V_ipq_zone, 0);
719 V_ipreass_maxbucketsize = INT_MAX;
726 * Seek for old fragment queue header that can be reused. Try to
727 * reuse a header from currently locked hash bucket.
735 IPQ_LOCK_ASSERT(start);
737 for (i = 0; i < IPREASS_NHASH; i++) {
738 bucket = (start + i) % IPREASS_NHASH;
739 if (bucket != start && IPQ_TRYLOCK(bucket) == 0)
741 fp = TAILQ_LAST(&V_ipq[bucket].head, ipqhead);
745 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags);
746 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
747 while (fp->ipq_frags) {
749 fp->ipq_frags = m->m_nextpkt;
752 TAILQ_REMOVE(&V_ipq[bucket].head, fp, ipq_list);
753 V_ipq[bucket].count--;
761 IPQ_LOCK_ASSERT(start);
766 * Free a fragment reassembly header and all associated datagrams.
769 ipq_free(struct ipqbucket *bucket, struct ipq *fp)
773 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
774 while (fp->ipq_frags) {
776 fp->ipq_frags = q->m_nextpkt;
779 TAILQ_REMOVE(&bucket->head, fp, ipq_list);
781 uma_zfree(V_ipq_zone, fp);
785 * Get or set the maximum number of reassembly queues per bucket.
788 sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS)
792 max = V_ipreass_maxbucketsize;
793 error = sysctl_handle_int(oidp, &max, 0, req);
794 if (error || !req->newptr)
798 V_ipreass_maxbucketsize = max;
799 ipreass_drain_tomax();