2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_param.h"
36 #include "opt_mbuf_stress_test.h"
37 #include "opt_mbuf_profiling.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
44 #include <sys/malloc.h>
46 #include <sys/sysctl.h>
47 #include <sys/domain.h>
48 #include <sys/protosw.h>
55 #ifdef MBUF_STRESS_TEST
60 int m_defragrandomfailures;
64 * sysctl(8) exported objects
66 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
67 &max_linkhdr, 0, "Size of largest link layer header");
68 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
69 &max_protohdr, 0, "Size of largest protocol layer header");
70 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
71 &max_hdr, 0, "Size of largest link plus protocol header");
72 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
73 &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
74 #ifdef MBUF_STRESS_TEST
75 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
76 &m_defragpackets, 0, "");
77 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
78 &m_defragbytes, 0, "");
79 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
80 &m_defraguseless, 0, "");
81 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
82 &m_defragfailure, 0, "");
83 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
84 &m_defragrandomfailures, 0, "");
88 * Ensure the correct size of various mbuf parameters. It could be off due
89 * to compiler-induced padding and alignment artifacts.
91 CTASSERT(sizeof(struct mbuf) == MSIZE);
92 CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN);
93 CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN);
96 * m_get2() allocates minimum mbuf that would fit "size" argument.
99 m_get2(int size, int how, short type, int flags)
107 if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0))
108 return (uma_zalloc_arg(zone_mbuf, &args, how));
109 if (size <= MCLBYTES)
110 return (uma_zalloc_arg(zone_pack, &args, how));
112 if (size > MJUMPAGESIZE)
115 m = uma_zalloc_arg(zone_mbuf, &args, how);
119 n = uma_zalloc_arg(zone_jumbop, m, how);
121 uma_zfree(zone_mbuf, m);
129 * m_getjcl() returns an mbuf with a cluster of the specified size attached.
130 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
133 m_getjcl(int how, short type, int flags, int size)
139 if (size == MCLBYTES)
140 return m_getcl(how, type, flags);
145 m = uma_zalloc_arg(zone_mbuf, &args, how);
149 zone = m_getzone(size);
150 n = uma_zalloc_arg(zone, m, how);
152 uma_zfree(zone_mbuf, m);
159 * Allocate a given length worth of mbufs and/or clusters (whatever fits
160 * best) and return a pointer to the top of the allocated chain. If an
161 * existing mbuf chain is provided, then we will append the new chain
162 * to the existing one but still return the top of the newly allocated
166 m_getm2(struct mbuf *m, int len, int how, short type, int flags)
168 struct mbuf *mb, *nm = NULL, *mtail = NULL;
170 KASSERT(len >= 0, ("%s: len is < 0", __func__));
172 /* Validate flags. */
173 flags &= (M_PKTHDR | M_EOR);
175 /* Packet header mbuf must be first in chain. */
176 if ((flags & M_PKTHDR) && m != NULL)
179 /* Loop and append maximum sized mbufs to the chain tail. */
182 mb = m_getjcl(how, type, (flags & M_PKTHDR),
184 else if (len >= MINCLSIZE)
185 mb = m_getcl(how, type, (flags & M_PKTHDR));
186 else if (flags & M_PKTHDR)
187 mb = m_gethdr(how, type);
189 mb = m_get(how, type);
191 /* Fail the whole operation if one mbuf can't be allocated. */
199 len -= (mb->m_flags & M_EXT) ? mb->m_ext.ext_size :
200 ((mb->m_flags & M_PKTHDR) ? MHLEN : MLEN);
206 flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */
209 mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */
211 /* If mbuf was supplied, append new chain to the end of it. */
213 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
216 mtail->m_flags &= ~M_EOR;
224 * Free an entire chain of mbufs and associated external buffers, if
228 m_freem(struct mbuf *mb)
236 * Configure a provided mbuf to refer to the provided external storage
237 * buffer and setup a reference count for said buffer. If the setting
238 * up of the reference count fails, the M_EXT bit will not be set. If
239 * successfull, the M_EXT bit is set in the mbuf's flags.
242 * mb The existing mbuf to which to attach the provided buffer.
243 * buf The address of the provided external storage buffer.
244 * size The size of the provided buffer.
245 * freef A pointer to a routine that is responsible for freeing the
246 * provided external storage buffer.
247 * args A pointer to an argument structure (of any type) to be passed
248 * to the provided freef routine (may be NULL).
249 * flags Any other flags to be passed to the provided mbuf.
250 * type The type that the external storage buffer should be
257 m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
258 void (*freef)(struct mbuf *, void *, void *), void *arg1, void *arg2,
259 int flags, int type, int wait)
261 KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
263 if (type != EXT_EXTREF)
264 mb->m_ext.ext_cnt = uma_zalloc(zone_ext_refcnt, wait);
266 if (mb->m_ext.ext_cnt == NULL)
269 *(mb->m_ext.ext_cnt) = 1;
270 mb->m_flags |= (M_EXT | flags);
271 mb->m_ext.ext_buf = buf;
272 mb->m_data = mb->m_ext.ext_buf;
273 mb->m_ext.ext_size = size;
274 mb->m_ext.ext_free = freef;
275 mb->m_ext.ext_arg1 = arg1;
276 mb->m_ext.ext_arg2 = arg2;
277 mb->m_ext.ext_type = type;
278 mb->m_ext.ext_flags = 0;
284 * Non-directly-exported function to clean up after mbufs with M_EXT
285 * storage attached to them if the reference count hits 1.
288 mb_free_ext(struct mbuf *m)
292 KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m));
295 * Check if the header is embedded in the cluster.
297 freembuf = (m->m_flags & M_NOFREE) ? 0 : 1;
299 switch (m->m_ext.ext_type) {
301 sf_ext_free(m->m_ext.ext_arg1, m->m_ext.ext_arg2);
304 KASSERT(m->m_ext.ext_cnt != NULL,
305 ("%s: no refcounting pointer on %p", __func__, m));
307 * Free attached storage if this mbuf is the only
310 if (*(m->m_ext.ext_cnt) != 1) {
311 if (atomic_fetchadd_int(m->m_ext.ext_cnt, -1) != 1)
315 switch (m->m_ext.ext_type) {
316 case EXT_PACKET: /* The packet zone is special. */
317 if (*(m->m_ext.ext_cnt) == 0)
318 *(m->m_ext.ext_cnt) = 1;
319 uma_zfree(zone_pack, m);
320 return; /* Job done. */
322 uma_zfree(zone_clust, m->m_ext.ext_buf);
325 uma_zfree(zone_jumbop, m->m_ext.ext_buf);
328 uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
331 uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
336 *(m->m_ext.ext_cnt) = 0;
337 uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *,
341 KASSERT(m->m_ext.ext_free != NULL,
342 ("%s: ext_free not set", __func__));
343 (*(m->m_ext.ext_free))(m, m->m_ext.ext_arg1,
347 KASSERT(m->m_ext.ext_type == 0,
348 ("%s: unknown ext_type", __func__));
353 uma_zfree(zone_mbuf, m);
357 * Attach the cluster from *m to *n, set up m_ext in *n
358 * and bump the refcount of the cluster.
361 mb_dupcl(struct mbuf *n, struct mbuf *m)
364 KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m));
365 KASSERT(!(n->m_flags & M_EXT), ("%s: M_EXT set on %p", __func__, n));
367 switch (m->m_ext.ext_type) {
369 sf_ext_ref(m->m_ext.ext_arg1, m->m_ext.ext_arg2);
372 KASSERT(m->m_ext.ext_cnt != NULL,
373 ("%s: no refcounting pointer on %p", __func__, m));
374 if (*(m->m_ext.ext_cnt) == 1)
375 *(m->m_ext.ext_cnt) += 1;
377 atomic_add_int(m->m_ext.ext_cnt, 1);
382 n->m_flags |= m->m_flags & M_RDONLY;
386 * Clean up mbuf (chain) from any tags and packet headers.
387 * If "all" is set then the first mbuf in the chain will be
391 m_demote(struct mbuf *m0, int all, int flags)
395 for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
396 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p",
398 if (m->m_flags & M_PKTHDR) {
399 m_tag_delete_chain(m, NULL);
400 m->m_flags &= ~M_PKTHDR;
401 bzero(&m->m_pkthdr, sizeof(struct pkthdr));
403 m->m_flags = m->m_flags & (M_EXT | M_RDONLY | M_NOFREE | flags);
408 * Sanity checks on mbuf (chain) for use in KASSERT() and general
410 * Returns 0 or panics when bad and 1 on all tests passed.
411 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
415 m_sanity(struct mbuf *m0, int sanitize)
422 #define M_SANITY_ACTION(s) panic("mbuf %p: " s, m)
424 #define M_SANITY_ACTION(s) printf("mbuf %p: " s, m)
427 for (m = m0; m != NULL; m = m->m_next) {
429 * Basic pointer checks. If any of these fails then some
430 * unrelated kernel memory before or after us is trashed.
431 * No way to recover from that.
433 a = ((m->m_flags & M_EXT) ? m->m_ext.ext_buf :
434 ((m->m_flags & M_PKTHDR) ? (caddr_t)(&m->m_pktdat) :
435 (caddr_t)(&m->m_dat)) );
436 b = (caddr_t)(a + (m->m_flags & M_EXT ? m->m_ext.ext_size :
437 ((m->m_flags & M_PKTHDR) ? MHLEN : MLEN)));
438 if ((caddr_t)m->m_data < a)
439 M_SANITY_ACTION("m_data outside mbuf data range left");
440 if ((caddr_t)m->m_data > b)
441 M_SANITY_ACTION("m_data outside mbuf data range right");
442 if ((caddr_t)m->m_data + m->m_len > b)
443 M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
445 /* m->m_nextpkt may only be set on first mbuf in chain. */
446 if (m != m0 && m->m_nextpkt != NULL) {
448 m_freem(m->m_nextpkt);
449 m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
451 M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
454 /* packet length (not mbuf length!) calculation */
455 if (m0->m_flags & M_PKTHDR)
458 /* m_tags may only be attached to first mbuf in chain. */
459 if (m != m0 && m->m_flags & M_PKTHDR &&
460 !SLIST_EMPTY(&m->m_pkthdr.tags)) {
462 m_tag_delete_chain(m, NULL);
463 /* put in 0xDEADC0DE perhaps? */
465 M_SANITY_ACTION("m_tags on in-chain mbuf");
468 /* M_PKTHDR may only be set on first mbuf in chain */
469 if (m != m0 && m->m_flags & M_PKTHDR) {
471 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
472 m->m_flags &= ~M_PKTHDR;
473 /* put in 0xDEADCODE and leave hdr flag in */
475 M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
479 if (pktlen && pktlen != m->m_pkthdr.len) {
483 M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
487 #undef M_SANITY_ACTION
492 * "Move" mbuf pkthdr from "from" to "to".
493 * "from" must have M_PKTHDR set, and "to" must be empty.
496 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
500 /* see below for why these are not enabled */
502 /* Note: with MAC, this may not be a good assertion. */
503 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
504 ("m_move_pkthdr: to has tags"));
508 * XXXMAC: It could be this should also occur for non-MAC?
510 if (to->m_flags & M_PKTHDR)
511 m_tag_delete_chain(to, NULL);
513 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
514 if ((to->m_flags & M_EXT) == 0)
515 to->m_data = to->m_pktdat;
516 to->m_pkthdr = from->m_pkthdr; /* especially tags */
517 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
518 from->m_flags &= ~M_PKTHDR;
522 * Duplicate "from"'s mbuf pkthdr in "to".
523 * "from" must have M_PKTHDR set, and "to" must be empty.
524 * In particular, this does a deep copy of the packet tags.
527 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
532 * The mbuf allocator only initializes the pkthdr
533 * when the mbuf is allocated with m_gethdr(). Many users
534 * (e.g. m_copy*, m_prepend) use m_get() and then
535 * smash the pkthdr as needed causing these
536 * assertions to trip. For now just disable them.
539 /* Note: with MAC, this may not be a good assertion. */
540 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
542 MBUF_CHECKSLEEP(how);
544 if (to->m_flags & M_PKTHDR)
545 m_tag_delete_chain(to, NULL);
547 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
548 if ((to->m_flags & M_EXT) == 0)
549 to->m_data = to->m_pktdat;
550 to->m_pkthdr = from->m_pkthdr;
551 SLIST_INIT(&to->m_pkthdr.tags);
552 return (m_tag_copy_chain(to, from, how));
556 * Lesser-used path for M_PREPEND:
557 * allocate new mbuf to prepend to chain,
561 m_prepend(struct mbuf *m, int len, int how)
565 if (m->m_flags & M_PKTHDR)
566 mn = m_gethdr(how, m->m_type);
568 mn = m_get(how, m->m_type);
573 if (m->m_flags & M_PKTHDR)
574 m_move_pkthdr(mn, m);
584 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
585 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
586 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller.
587 * Note that the copy is read-only, because clusters are not copied,
588 * only their reference counts are incremented.
591 m_copym(struct mbuf *m, int off0, int len, int wait)
593 struct mbuf *n, **np;
598 KASSERT(off >= 0, ("m_copym, negative off %d", off));
599 KASSERT(len >= 0, ("m_copym, negative len %d", len));
600 MBUF_CHECKSLEEP(wait);
601 if (off == 0 && m->m_flags & M_PKTHDR)
604 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
614 KASSERT(len == M_COPYALL,
615 ("m_copym, length > size of mbuf chain"));
619 n = m_gethdr(wait, m->m_type);
621 n = m_get(wait, m->m_type);
626 if (!m_dup_pkthdr(n, m, wait))
628 if (len == M_COPYALL)
629 n->m_pkthdr.len -= off0;
631 n->m_pkthdr.len = len;
634 n->m_len = min(len, m->m_len - off);
635 if (m->m_flags & M_EXT) {
636 n->m_data = m->m_data + off;
639 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
641 if (len != M_COPYALL)
655 * Returns mbuf chain with new head for the prepending case.
656 * Copies from mbuf (chain) n from off for len to mbuf (chain) m
657 * either prepending or appending the data.
658 * The resulting mbuf (chain) m is fully writeable.
659 * m is destination (is made writeable)
660 * n is source, off is offset in source, len is len from offset
661 * dir, 0 append, 1 prepend
662 * how, wait or nowait
666 m_bcopyxxx(void *s, void *t, u_int len)
668 bcopy(s, t, (size_t)len);
673 m_copymdata(struct mbuf *m, struct mbuf *n, int off, int len,
676 struct mbuf *mm, *x, *z, *prev = NULL;
681 KASSERT(m != NULL && n != NULL, ("m_copymdata, no target or source"));
682 KASSERT(off >= 0, ("m_copymdata, negative off %d", off));
683 KASSERT(len >= 0, ("m_copymdata, negative len %d", len));
684 KASSERT(prep == 0 || prep == 1, ("m_copymdata, unknown direction %d", prep));
693 for (z = n; z != NULL; z = z->m_next)
695 if (len == M_COPYALL)
697 if (off + len > nlen || len < 1)
700 if (!M_WRITABLE(mm)) {
701 /* XXX: Use proper m_xxx function instead. */
702 x = m_getcl(how, MT_DATA, mm->m_flags);
705 bcopy(mm->m_ext.ext_buf, x->m_ext.ext_buf, x->m_ext.ext_size);
706 p = x->m_ext.ext_buf + (mm->m_data - mm->m_ext.ext_buf);
716 * Append/prepend the data. Allocating mbufs as necessary.
718 /* Shortcut if enough free space in first/last mbuf. */
719 if (!prep && M_TRAILINGSPACE(mm) >= len) {
720 m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t) +
723 mm->m_pkthdr.len += len;
726 if (prep && M_LEADINGSPACE(mm) >= len) {
727 mm->m_data = mtod(mm, caddr_t) - len;
728 m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t));
730 mm->m_pkthdr.len += len;
734 /* Expand first/last mbuf to cluster if possible. */
735 if (!prep && !(mm->m_flags & M_EXT) && len > M_TRAILINGSPACE(mm)) {
736 bcopy(mm->m_data, &buf, mm->m_len);
738 if (!(mm->m_flags & M_EXT))
740 bcopy(&buf, mm->m_ext.ext_buf, mm->m_len);
741 mm->m_data = mm->m_ext.ext_buf;
743 if (prep && !(mm->m_flags & M_EXT) && len > M_LEADINGSPACE(mm)) {
744 bcopy(mm->m_data, &buf, mm->m_len);
746 if (!(mm->m_flags & M_EXT))
748 bcopy(&buf, (caddr_t *)mm->m_ext.ext_buf +
749 mm->m_ext.ext_size - mm->m_len, mm->m_len);
750 mm->m_data = (caddr_t)mm->m_ext.ext_buf +
751 mm->m_ext.ext_size - mm->m_len;
754 /* Append/prepend as many mbuf (clusters) as necessary to fit len. */
755 if (!prep && len > M_TRAILINGSPACE(mm)) {
756 if (!m_getm(mm, len - M_TRAILINGSPACE(mm), how, MT_DATA))
759 if (prep && len > M_LEADINGSPACE(mm)) {
760 if (!(z = m_getm(NULL, len - M_LEADINGSPACE(mm), how, MT_DATA)))
763 for (x = z; x != NULL; x = x->m_next) {
764 i += x->m_flags & M_EXT ? x->m_ext.ext_size :
765 (x->m_flags & M_PKTHDR ? MHLEN : MLEN);
769 z->m_data += i - len;
770 m_move_pkthdr(mm, z);
775 /* Seek to start position in source mbuf. Optimization for long chains. */
783 /* Copy data into target mbuf. */
786 KASSERT(z != NULL, ("m_copymdata, falling off target edge"));
787 i = M_TRAILINGSPACE(z);
788 m_apply(n, off, i, m_bcopyxxx, mtod(z, caddr_t) + z->m_len);
790 /* fixup pkthdr.len if necessary */
791 if ((prep ? mm : m)->m_flags & M_PKTHDR)
792 (prep ? mm : m)->m_pkthdr.len += i;
797 return (prep ? mm : m);
801 * Copy an entire packet, including header (which must be present).
802 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
803 * Note that the copy is read-only, because clusters are not copied,
804 * only their reference counts are incremented.
805 * Preserve alignment of the first mbuf so if the creator has left
806 * some room at the beginning (e.g. for inserting protocol headers)
807 * the copies still have the room available.
810 m_copypacket(struct mbuf *m, int how)
812 struct mbuf *top, *n, *o;
814 MBUF_CHECKSLEEP(how);
815 n = m_get(how, m->m_type);
820 if (!m_dup_pkthdr(n, m, how))
823 if (m->m_flags & M_EXT) {
824 n->m_data = m->m_data;
827 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
828 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
833 o = m_get(how, m->m_type);
841 if (m->m_flags & M_EXT) {
842 n->m_data = m->m_data;
845 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
857 * Copy data from an mbuf chain starting "off" bytes from the beginning,
858 * continuing for "len" bytes, into the indicated buffer.
861 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
865 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
866 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
868 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
875 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
876 count = min(m->m_len - off, len);
877 bcopy(mtod(m, caddr_t) + off, cp, count);
886 * Copy a packet header mbuf chain into a completely new chain, including
887 * copying any mbuf clusters. Use this instead of m_copypacket() when
888 * you need a writable copy of an mbuf chain.
891 m_dup(struct mbuf *m, int how)
893 struct mbuf **p, *top = NULL;
894 int remain, moff, nsize;
896 MBUF_CHECKSLEEP(how);
902 /* While there's more data, get a new mbuf, tack it on, and fill it */
903 remain = m->m_pkthdr.len;
906 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
909 /* Get the next new mbuf */
910 if (remain >= MINCLSIZE) {
911 n = m_getcl(how, m->m_type, 0);
914 n = m_get(how, m->m_type);
920 if (top == NULL) { /* First one, must be PKTHDR */
921 if (!m_dup_pkthdr(n, m, how)) {
925 if ((n->m_flags & M_EXT) == 0)
930 /* Link it into the new chain */
934 /* Copy data from original mbuf(s) into new mbuf */
935 while (n->m_len < nsize && m != NULL) {
936 int chunk = min(nsize - n->m_len, m->m_len - moff);
938 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
942 if (moff == m->m_len) {
948 /* Check correct total mbuf length */
949 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
950 ("%s: bogus m_pkthdr.len", __func__));
960 * Concatenate mbuf chain n to m.
961 * Both chains must be of the same type (e.g. MT_DATA).
962 * Any m_pkthdr is not updated.
965 m_cat(struct mbuf *m, struct mbuf *n)
970 if (!M_WRITABLE(m) ||
971 M_TRAILINGSPACE(m) < n->m_len) {
972 /* just join the two chains */
976 /* splat the data from one into the other */
977 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
979 m->m_len += n->m_len;
985 * Concatenate two pkthdr mbuf chains.
988 m_catpkt(struct mbuf *m, struct mbuf *n)
994 m->m_pkthdr.len += n->m_pkthdr.len;
1001 m_adj(struct mbuf *mp, int req_len)
1007 if ((m = mp) == NULL)
1013 while (m != NULL && len > 0) {
1014 if (m->m_len <= len) {
1024 if (mp->m_flags & M_PKTHDR)
1025 mp->m_pkthdr.len -= (req_len - len);
1028 * Trim from tail. Scan the mbuf chain,
1029 * calculating its length and finding the last mbuf.
1030 * If the adjustment only affects this mbuf, then just
1031 * adjust and return. Otherwise, rescan and truncate
1032 * after the remaining size.
1038 if (m->m_next == (struct mbuf *)0)
1042 if (m->m_len >= len) {
1044 if (mp->m_flags & M_PKTHDR)
1045 mp->m_pkthdr.len -= len;
1052 * Correct length for chain is "count".
1053 * Find the mbuf with last data, adjust its length,
1054 * and toss data from remaining mbufs on chain.
1057 if (m->m_flags & M_PKTHDR)
1058 m->m_pkthdr.len = count;
1059 for (; m; m = m->m_next) {
1060 if (m->m_len >= count) {
1062 if (m->m_next != NULL) {
1074 * Rearange an mbuf chain so that len bytes are contiguous
1075 * and in the data area of an mbuf (so that mtod will work
1076 * for a structure of size len). Returns the resulting
1077 * mbuf chain on success, frees it and returns null on failure.
1078 * If there is room, it will add up to max_protohdr-len extra bytes to the
1079 * contiguous region in an attempt to avoid being called next time.
1082 m_pullup(struct mbuf *n, int len)
1089 * If first mbuf has no cluster, and has room for len bytes
1090 * without shifting current data, pullup into it,
1091 * otherwise allocate a new mbuf to prepend to the chain.
1093 if ((n->m_flags & M_EXT) == 0 &&
1094 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1095 if (n->m_len >= len)
1103 m = m_get(M_NOWAIT, n->m_type);
1106 if (n->m_flags & M_PKTHDR)
1107 m_move_pkthdr(m, n);
1109 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1111 count = min(min(max(len, max_protohdr), space), n->m_len);
1112 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1122 } while (len > 0 && n);
1135 * Like m_pullup(), except a new mbuf is always allocated, and we allow
1136 * the amount of empty space before the data in the new mbuf to be specified
1137 * (in the event that the caller expects to prepend later).
1142 m_copyup(struct mbuf *n, int len, int dstoff)
1147 if (len > (MHLEN - dstoff))
1149 m = m_get(M_NOWAIT, n->m_type);
1152 if (n->m_flags & M_PKTHDR)
1153 m_move_pkthdr(m, n);
1154 m->m_data += dstoff;
1155 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1157 count = min(min(max(len, max_protohdr), space), n->m_len);
1158 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
1168 } while (len > 0 && n);
1182 * Partition an mbuf chain in two pieces, returning the tail --
1183 * all but the first len0 bytes. In case of failure, it returns NULL and
1184 * attempts to restore the chain to its original state.
1186 * Note that the resulting mbufs might be read-only, because the new
1187 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1188 * the "breaking point" happens to lie within a cluster mbuf. Use the
1189 * M_WRITABLE() macro to check for this case.
1192 m_split(struct mbuf *m0, int len0, int wait)
1195 u_int len = len0, remain;
1197 MBUF_CHECKSLEEP(wait);
1198 for (m = m0; m && len > m->m_len; m = m->m_next)
1202 remain = m->m_len - len;
1203 if (m0->m_flags & M_PKTHDR && remain == 0) {
1204 n = m_gethdr(wait, m0->m_type);
1207 n->m_next = m->m_next;
1209 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1210 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1211 m0->m_pkthdr.len = len0;
1213 } else if (m0->m_flags & M_PKTHDR) {
1214 n = m_gethdr(wait, m0->m_type);
1217 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1218 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1219 m0->m_pkthdr.len = len0;
1220 if (m->m_flags & M_EXT)
1222 if (remain > MHLEN) {
1223 /* m can't be the lead packet */
1225 n->m_next = m_split(m, len, wait);
1226 if (n->m_next == NULL) {
1235 } else if (remain == 0) {
1240 n = m_get(wait, m->m_type);
1246 if (m->m_flags & M_EXT) {
1247 n->m_data = m->m_data + len;
1250 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1254 n->m_next = m->m_next;
1259 * Routine to copy from device local memory into mbufs.
1260 * Note that `off' argument is offset into first mbuf of target chain from
1261 * which to begin copying the data to.
1264 m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
1265 void (*copy)(char *from, caddr_t to, u_int len))
1268 struct mbuf *top = NULL, **mp = ⊤
1271 if (off < 0 || off > MHLEN)
1274 while (totlen > 0) {
1275 if (top == NULL) { /* First one, must be PKTHDR */
1276 if (totlen + off >= MINCLSIZE) {
1277 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1280 m = m_gethdr(M_NOWAIT, MT_DATA);
1283 /* Place initial small packet/header at end of mbuf */
1284 if (m && totlen + off + max_linkhdr <= MLEN) {
1285 m->m_data += max_linkhdr;
1291 m->m_pkthdr.rcvif = ifp;
1292 m->m_pkthdr.len = totlen;
1294 if (totlen + off >= MINCLSIZE) {
1295 m = m_getcl(M_NOWAIT, MT_DATA, 0);
1298 m = m_get(M_NOWAIT, MT_DATA);
1311 m->m_len = len = min(totlen, len);
1313 copy(buf, mtod(m, caddr_t), (u_int)len);
1315 bcopy(buf, mtod(m, caddr_t), (u_int)len);
1325 * Copy data from a buffer back into the indicated mbuf chain,
1326 * starting "off" bytes from the beginning, extending the mbuf
1327 * chain if necessary.
1330 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
1333 struct mbuf *m = m0, *n;
1338 while (off > (mlen = m->m_len)) {
1341 if (m->m_next == NULL) {
1342 n = m_get(M_NOWAIT, m->m_type);
1345 bzero(mtod(n, caddr_t), MLEN);
1346 n->m_len = min(MLEN, len + off);
1352 if (m->m_next == NULL && (len > m->m_len - off)) {
1353 m->m_len += min(len - (m->m_len - off),
1354 M_TRAILINGSPACE(m));
1356 mlen = min (m->m_len - off, len);
1357 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
1365 if (m->m_next == NULL) {
1366 n = m_get(M_NOWAIT, m->m_type);
1369 n->m_len = min(MLEN, len);
1374 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1375 m->m_pkthdr.len = totlen;
1379 * Append the specified data to the indicated mbuf chain,
1380 * Extend the mbuf chain if the new data does not fit in
1383 * Return 1 if able to complete the job; otherwise 0.
1386 m_append(struct mbuf *m0, int len, c_caddr_t cp)
1389 int remainder, space;
1391 for (m = m0; m->m_next != NULL; m = m->m_next)
1394 space = M_TRAILINGSPACE(m);
1397 * Copy into available space.
1399 if (space > remainder)
1401 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1403 cp += space, remainder -= space;
1405 while (remainder > 0) {
1407 * Allocate a new mbuf; could check space
1408 * and allocate a cluster instead.
1410 n = m_get(M_NOWAIT, m->m_type);
1413 n->m_len = min(MLEN, remainder);
1414 bcopy(cp, mtod(n, caddr_t), n->m_len);
1415 cp += n->m_len, remainder -= n->m_len;
1419 if (m0->m_flags & M_PKTHDR)
1420 m0->m_pkthdr.len += len - remainder;
1421 return (remainder == 0);
1425 * Apply function f to the data in an mbuf chain starting "off" bytes from
1426 * the beginning, continuing for "len" bytes.
1429 m_apply(struct mbuf *m, int off, int len,
1430 int (*f)(void *, void *, u_int), void *arg)
1435 KASSERT(off >= 0, ("m_apply, negative off %d", off));
1436 KASSERT(len >= 0, ("m_apply, negative len %d", len));
1438 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1445 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1446 count = min(m->m_len - off, len);
1447 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1458 * Return a pointer to mbuf/offset of location in mbuf chain.
1461 m_getptr(struct mbuf *m, int loc, int *off)
1465 /* Normal end of search. */
1466 if (m->m_len > loc) {
1471 if (m->m_next == NULL) {
1473 /* Point at the end of valid data. */
1486 m_print(const struct mbuf *m, int maxlen)
1490 const struct mbuf *m2;
1493 printf("mbuf: %p\n", m);
1497 if (m->m_flags & M_PKTHDR)
1498 len = m->m_pkthdr.len;
1502 while (m2 != NULL && (len == -1 || len)) {
1504 if (maxlen != -1 && pdata > maxlen)
1506 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1507 m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1508 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1509 "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1511 printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1517 printf("%d bytes unaccounted for.\n", len);
1522 m_fixhdr(struct mbuf *m0)
1526 len = m_length(m0, NULL);
1527 m0->m_pkthdr.len = len;
1532 m_length(struct mbuf *m0, struct mbuf **last)
1538 for (m = m0; m != NULL; m = m->m_next) {
1540 if (m->m_next == NULL)
1549 * Defragment a mbuf chain, returning the shortest possible
1550 * chain of mbufs and clusters. If allocation fails and
1551 * this cannot be completed, NULL will be returned, but
1552 * the passed in chain will be unchanged. Upon success,
1553 * the original chain will be freed, and the new chain
1556 * If a non-packet header is passed in, the original
1557 * mbuf (chain?) will be returned unharmed.
1560 m_defrag(struct mbuf *m0, int how)
1562 struct mbuf *m_new = NULL, *m_final = NULL;
1563 int progress = 0, length;
1565 MBUF_CHECKSLEEP(how);
1566 if (!(m0->m_flags & M_PKTHDR))
1569 m_fixhdr(m0); /* Needed sanity check */
1571 #ifdef MBUF_STRESS_TEST
1572 if (m_defragrandomfailures) {
1573 int temp = arc4random() & 0xff;
1579 if (m0->m_pkthdr.len > MHLEN)
1580 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1582 m_final = m_gethdr(how, MT_DATA);
1584 if (m_final == NULL)
1587 if (m_dup_pkthdr(m_final, m0, how) == 0)
1592 while (progress < m0->m_pkthdr.len) {
1593 length = m0->m_pkthdr.len - progress;
1594 if (length > MCLBYTES)
1597 if (m_new == NULL) {
1599 m_new = m_getcl(how, MT_DATA, 0);
1601 m_new = m_get(how, MT_DATA);
1606 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1608 m_new->m_len = length;
1609 if (m_new != m_final)
1610 m_cat(m_final, m_new);
1613 #ifdef MBUF_STRESS_TEST
1614 if (m0->m_next == NULL)
1619 #ifdef MBUF_STRESS_TEST
1621 m_defragbytes += m0->m_pkthdr.len;
1625 #ifdef MBUF_STRESS_TEST
1634 * Defragment an mbuf chain, returning at most maxfrags separate
1635 * mbufs+clusters. If this is not possible NULL is returned and
1636 * the original mbuf chain is left in it's present (potentially
1637 * modified) state. We use two techniques: collapsing consecutive
1638 * mbufs and replacing consecutive mbufs by a cluster.
1640 * NB: this should really be named m_defrag but that name is taken
1643 m_collapse(struct mbuf *m0, int how, int maxfrags)
1645 struct mbuf *m, *n, *n2, **prev;
1649 * Calculate the current number of frags.
1652 for (m = m0; m != NULL; m = m->m_next)
1655 * First, try to collapse mbufs. Note that we always collapse
1656 * towards the front so we don't need to deal with moving the
1657 * pkthdr. This may be suboptimal if the first mbuf has much
1658 * less data than the following.
1666 if (M_WRITABLE(m) &&
1667 n->m_len < M_TRAILINGSPACE(m)) {
1668 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1670 m->m_len += n->m_len;
1671 m->m_next = n->m_next;
1673 if (--curfrags <= maxfrags)
1678 KASSERT(maxfrags > 1,
1679 ("maxfrags %u, but normal collapse failed", maxfrags));
1681 * Collapse consecutive mbufs to a cluster.
1683 prev = &m0->m_next; /* NB: not the first mbuf */
1684 while ((n = *prev) != NULL) {
1685 if ((n2 = n->m_next) != NULL &&
1686 n->m_len + n2->m_len < MCLBYTES) {
1687 m = m_getcl(how, MT_DATA, 0);
1690 bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1691 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1693 m->m_len = n->m_len + n2->m_len;
1694 m->m_next = n2->m_next;
1698 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
1701 * Still not there, try the normal collapse
1702 * again before we allocate another cluster.
1709 * No place where we can collapse to a cluster; punt.
1710 * This can occur if, for example, you request 2 frags
1711 * but the packet requires that both be clusters (we
1712 * never reallocate the first mbuf to avoid moving the
1719 #ifdef MBUF_STRESS_TEST
1722 * Fragment an mbuf chain. There's no reason you'd ever want to do
1723 * this in normal usage, but it's great for stress testing various
1726 * If fragmentation is not possible, the original chain will be
1729 * Possible length values:
1730 * 0 no fragmentation will occur
1731 * > 0 each fragment will be of the specified length
1732 * -1 each fragment will be the same random value in length
1733 * -2 each fragment's length will be entirely random
1734 * (Random values range from 1 to 256)
1737 m_fragment(struct mbuf *m0, int how, int length)
1739 struct mbuf *m_new = NULL, *m_final = NULL;
1742 if (!(m0->m_flags & M_PKTHDR))
1745 if ((length == 0) || (length < -2))
1748 m_fixhdr(m0); /* Needed sanity check */
1750 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1752 if (m_final == NULL)
1755 if (m_dup_pkthdr(m_final, m0, how) == 0)
1761 length = 1 + (arc4random() & 255);
1763 while (progress < m0->m_pkthdr.len) {
1769 fraglen = 1 + (arc4random() & 255);
1770 if (fraglen > m0->m_pkthdr.len - progress)
1771 fraglen = m0->m_pkthdr.len - progress;
1773 if (fraglen > MCLBYTES)
1776 if (m_new == NULL) {
1777 m_new = m_getcl(how, MT_DATA, 0);
1782 m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t));
1783 progress += fraglen;
1784 m_new->m_len = fraglen;
1785 if (m_new != m_final)
1786 m_cat(m_final, m_new);
1795 /* Return the original chain on failure */
1802 * Copy the contents of uio into a properly sized mbuf chain.
1805 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
1807 struct mbuf *m, *mb;
1813 * len can be zero or an arbitrary large value bound by
1814 * the total data supplied by the uio.
1817 total = min(uio->uio_resid, len);
1819 total = uio->uio_resid;
1822 * The smallest unit returned by m_getm2() is a single mbuf
1823 * with pkthdr. We can't align past it.
1829 * Give us the full allocation or nothing.
1830 * If len is zero return the smallest empty mbuf.
1832 m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
1837 /* Fill all mbufs with uio data and update header information. */
1838 for (mb = m; mb != NULL; mb = mb->m_next) {
1839 length = min(M_TRAILINGSPACE(mb), total - progress);
1841 error = uiomove(mtod(mb, void *), length, uio);
1849 if (flags & M_PKTHDR)
1850 m->m_pkthdr.len += length;
1852 KASSERT(progress == total, ("%s: progress != total", __func__));
1858 * Copy an mbuf chain into a uio limited by len if set.
1861 m_mbuftouio(struct uio *uio, struct mbuf *m, int len)
1863 int error, length, total;
1867 total = min(uio->uio_resid, len);
1869 total = uio->uio_resid;
1871 /* Fill the uio with data from the mbufs. */
1872 for (; m != NULL; m = m->m_next) {
1873 length = min(m->m_len, total - progress);
1875 error = uiomove(mtod(m, void *), length, uio);
1886 * Create a writable copy of the mbuf chain. While doing this
1887 * we compact the chain with a goal of producing a chain with
1888 * at most two mbufs. The second mbuf in this chain is likely
1889 * to be a cluster. The primary purpose of this work is to create
1890 * a writable packet for encryption, compression, etc. The
1891 * secondary goal is to linearize the data so the data can be
1892 * passed to crypto hardware in the most efficient manner possible.
1895 m_unshare(struct mbuf *m0, int how)
1897 struct mbuf *m, *mprev;
1898 struct mbuf *n, *mfirst, *mlast;
1902 for (m = m0; m != NULL; m = mprev->m_next) {
1904 * Regular mbufs are ignored unless there's a cluster
1905 * in front of it that we can use to coalesce. We do
1906 * the latter mainly so later clusters can be coalesced
1907 * also w/o having to handle them specially (i.e. convert
1908 * mbuf+cluster -> cluster). This optimization is heavily
1909 * influenced by the assumption that we're running over
1910 * Ethernet where MCLBYTES is large enough that the max
1911 * packet size will permit lots of coalescing into a
1912 * single cluster. This in turn permits efficient
1913 * crypto operations, especially when using hardware.
1915 if ((m->m_flags & M_EXT) == 0) {
1916 if (mprev && (mprev->m_flags & M_EXT) &&
1917 m->m_len <= M_TRAILINGSPACE(mprev)) {
1918 /* XXX: this ignores mbuf types */
1919 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1920 mtod(m, caddr_t), m->m_len);
1921 mprev->m_len += m->m_len;
1922 mprev->m_next = m->m_next; /* unlink from chain */
1923 m_free(m); /* reclaim mbuf */
1925 newipsecstat.ips_mbcoalesced++;
1933 * Writable mbufs are left alone (for now).
1935 if (M_WRITABLE(m)) {
1941 * Not writable, replace with a copy or coalesce with
1942 * the previous mbuf if possible (since we have to copy
1943 * it anyway, we try to reduce the number of mbufs and
1944 * clusters so that future work is easier).
1946 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1947 /* NB: we only coalesce into a cluster or larger */
1948 if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1949 m->m_len <= M_TRAILINGSPACE(mprev)) {
1950 /* XXX: this ignores mbuf types */
1951 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1952 mtod(m, caddr_t), m->m_len);
1953 mprev->m_len += m->m_len;
1954 mprev->m_next = m->m_next; /* unlink from chain */
1955 m_free(m); /* reclaim mbuf */
1957 newipsecstat.ips_clcoalesced++;
1963 * Allocate new space to hold the copy and copy the data.
1964 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
1965 * splitting them into clusters. We could just malloc a
1966 * buffer and make it external but too many device drivers
1967 * don't know how to break up the non-contiguous memory when
1970 n = m_getcl(how, m->m_type, m->m_flags);
1980 int cc = min(len, MCLBYTES);
1981 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
1987 newipsecstat.ips_clcopied++;
1995 n = m_getcl(how, m->m_type, m->m_flags);
2002 n->m_next = m->m_next;
2004 m0 = mfirst; /* new head of chain */
2006 mprev->m_next = mfirst; /* replace old mbuf */
2007 m_free(m); /* release old mbuf */
2013 #ifdef MBUF_PROFILING
2015 #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/
2016 struct mbufprofile {
2017 uintmax_t wasted[MP_BUCKETS];
2018 uintmax_t used[MP_BUCKETS];
2019 uintmax_t segments[MP_BUCKETS];
2022 #define MP_MAXDIGITS 21 /* strlen("16,000,000,000,000,000,000") == 21 */
2023 #define MP_NUMLINES 6
2024 #define MP_NUMSPERLINE 16
2025 #define MP_EXTRABYTES 64 /* > strlen("used:\nwasted:\nsegments:\n") */
2026 /* work out max space needed and add a bit of spare space too */
2027 #define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE)
2028 #define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES)
2030 char mbprofbuf[MP_BUFSIZE];
2033 m_profile(struct mbuf *m)
2042 if (m->m_flags & M_EXT) {
2043 wasted += MHLEN - sizeof(m->m_ext) +
2044 m->m_ext.ext_size - m->m_len;
2046 if (m->m_flags & M_PKTHDR)
2047 wasted += MHLEN - m->m_len;
2049 wasted += MLEN - m->m_len;
2053 /* be paranoid.. it helps */
2054 if (segments > MP_BUCKETS - 1)
2055 segments = MP_BUCKETS - 1;
2058 if (wasted > 100000)
2060 /* store in the appropriate bucket */
2061 /* don't bother locking. if it's slightly off, so what? */
2062 mbprof.segments[segments]++;
2063 mbprof.used[fls(used)]++;
2064 mbprof.wasted[fls(wasted)]++;
2068 mbprof_textify(void)
2074 p = &mbprof.wasted[0];
2076 offset = snprintf(c, MP_MAXLINE + 10,
2078 "%ju %ju %ju %ju %ju %ju %ju %ju "
2079 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2080 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2081 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2083 p = &mbprof.wasted[16];
2085 offset = snprintf(c, MP_MAXLINE,
2086 "%ju %ju %ju %ju %ju %ju %ju %ju "
2087 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2088 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2089 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2091 p = &mbprof.used[0];
2093 offset = snprintf(c, MP_MAXLINE + 10,
2095 "%ju %ju %ju %ju %ju %ju %ju %ju "
2096 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2097 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2098 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2100 p = &mbprof.used[16];
2102 offset = snprintf(c, MP_MAXLINE,
2103 "%ju %ju %ju %ju %ju %ju %ju %ju "
2104 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2105 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2106 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2108 p = &mbprof.segments[0];
2110 offset = snprintf(c, MP_MAXLINE + 10,
2112 "%ju %ju %ju %ju %ju %ju %ju %ju "
2113 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2114 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2115 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2117 p = &mbprof.segments[16];
2119 offset = snprintf(c, MP_MAXLINE,
2120 "%ju %ju %ju %ju %ju %ju %ju %ju "
2121 "%ju %ju %ju %ju %ju %ju %ju %jju",
2122 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2123 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2128 mbprof_handler(SYSCTL_HANDLER_ARGS)
2133 error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1);
2138 mbprof_clr_handler(SYSCTL_HANDLER_ARGS)
2143 error = sysctl_handle_int(oidp, &clear, 0, req);
2144 if (error || !req->newptr)
2148 bzero(&mbprof, sizeof(mbprof));
2155 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile, CTLTYPE_STRING|CTLFLAG_RD,
2156 NULL, 0, mbprof_handler, "A", "mbuf profiling statistics");
2158 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr, CTLTYPE_INT|CTLFLAG_RW,
2159 NULL, 0, mbprof_clr_handler, "I", "clear mbuf profiling statistics");