2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_param.h"
36 #include "opt_mbuf_stress_test.h"
37 #include "opt_mbuf_profiling.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
44 #include <sys/malloc.h>
46 #include <sys/sysctl.h>
47 #include <sys/domain.h>
48 #include <sys/protosw.h>
55 #ifdef MBUF_STRESS_TEST
60 int m_defragrandomfailures;
64 * sysctl(8) exported objects
66 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
67 &max_linkhdr, 0, "Size of largest link layer header");
68 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
69 &max_protohdr, 0, "Size of largest protocol layer header");
70 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
71 &max_hdr, 0, "Size of largest link plus protocol header");
72 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
73 &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
74 #ifdef MBUF_STRESS_TEST
75 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
76 &m_defragpackets, 0, "");
77 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
78 &m_defragbytes, 0, "");
79 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
80 &m_defraguseless, 0, "");
81 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
82 &m_defragfailure, 0, "");
83 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
84 &m_defragrandomfailures, 0, "");
88 * Ensure the correct size of various mbuf parameters. It could be off due
89 * to compiler-induced padding and alignment artifacts.
91 CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN);
92 CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN);
95 * mbuf data storage should be 64-bit aligned regardless of architectural
96 * pointer size; check this is the case with and without a packet header.
98 CTASSERT(offsetof(struct mbuf, m_dat) % 8 == 0);
99 CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0);
102 * While the specific values here don't matter too much (i.e., +/- a few
103 * words), we do want to ensure that changes to these values are carefully
104 * reasoned about and properly documented. This is especially the case as
105 * network-protocol and device-driver modules encode these layouts, and must
106 * be recompiled if the structures change. Check these values at compile time
107 * against the ones documented in comments in mbuf.h.
109 * NB: Possibly they should be documented there via #define's and not just
112 #if defined(__LP64__)
113 CTASSERT(offsetof(struct mbuf, m_dat) == 32);
114 CTASSERT(sizeof(struct pkthdr) == 56);
115 CTASSERT(sizeof(struct m_ext) == 48);
117 CTASSERT(offsetof(struct mbuf, m_dat) == 24);
118 CTASSERT(sizeof(struct pkthdr) == 48);
119 CTASSERT(sizeof(struct m_ext) == 28);
123 * Assert that the queue(3) macros produce code of the same size as an old
124 * plain pointer does.
127 static struct mbuf m_assertbuf;
128 CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next));
129 CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next));
130 CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt));
131 CTASSERT(sizeof(m_assertbuf.m_stailqpkt) == sizeof(m_assertbuf.m_nextpkt));
135 * m_get2() allocates minimum mbuf that would fit "size" argument.
138 m_get2(int size, int how, short type, int flags)
146 if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0))
147 return (uma_zalloc_arg(zone_mbuf, &args, how));
148 if (size <= MCLBYTES)
149 return (uma_zalloc_arg(zone_pack, &args, how));
151 if (size > MJUMPAGESIZE)
154 m = uma_zalloc_arg(zone_mbuf, &args, how);
158 n = uma_zalloc_arg(zone_jumbop, m, how);
160 uma_zfree(zone_mbuf, m);
168 * m_getjcl() returns an mbuf with a cluster of the specified size attached.
169 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
172 m_getjcl(int how, short type, int flags, int size)
178 if (size == MCLBYTES)
179 return m_getcl(how, type, flags);
184 m = uma_zalloc_arg(zone_mbuf, &args, how);
188 zone = m_getzone(size);
189 n = uma_zalloc_arg(zone, m, how);
191 uma_zfree(zone_mbuf, m);
198 * Allocate a given length worth of mbufs and/or clusters (whatever fits
199 * best) and return a pointer to the top of the allocated chain. If an
200 * existing mbuf chain is provided, then we will append the new chain
201 * to the existing one but still return the top of the newly allocated
205 m_getm2(struct mbuf *m, int len, int how, short type, int flags)
207 struct mbuf *mb, *nm = NULL, *mtail = NULL;
209 KASSERT(len >= 0, ("%s: len is < 0", __func__));
211 /* Validate flags. */
212 flags &= (M_PKTHDR | M_EOR);
214 /* Packet header mbuf must be first in chain. */
215 if ((flags & M_PKTHDR) && m != NULL)
218 /* Loop and append maximum sized mbufs to the chain tail. */
221 mb = m_getjcl(how, type, (flags & M_PKTHDR),
223 else if (len >= MINCLSIZE)
224 mb = m_getcl(how, type, (flags & M_PKTHDR));
225 else if (flags & M_PKTHDR)
226 mb = m_gethdr(how, type);
228 mb = m_get(how, type);
230 /* Fail the whole operation if one mbuf can't be allocated. */
244 flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */
247 mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */
249 /* If mbuf was supplied, append new chain to the end of it. */
251 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
254 mtail->m_flags &= ~M_EOR;
262 * Free an entire chain of mbufs and associated external buffers, if
266 m_freem(struct mbuf *mb)
274 * Configure a provided mbuf to refer to the provided external storage
275 * buffer and setup a reference count for said buffer. If the setting
276 * up of the reference count fails, the M_EXT bit will not be set. If
277 * successfull, the M_EXT bit is set in the mbuf's flags.
280 * mb The existing mbuf to which to attach the provided buffer.
281 * buf The address of the provided external storage buffer.
282 * size The size of the provided buffer.
283 * freef A pointer to a routine that is responsible for freeing the
284 * provided external storage buffer.
285 * args A pointer to an argument structure (of any type) to be passed
286 * to the provided freef routine (may be NULL).
287 * flags Any other flags to be passed to the provided mbuf.
288 * type The type that the external storage buffer should be
295 m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
296 void (*freef)(struct mbuf *, void *, void *), void *arg1, void *arg2,
297 int flags, int type, int wait)
299 KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
301 if (type != EXT_EXTREF)
302 mb->m_ext.ext_cnt = uma_zalloc(zone_ext_refcnt, wait);
304 if (mb->m_ext.ext_cnt == NULL)
307 *(mb->m_ext.ext_cnt) = 1;
308 mb->m_flags |= (M_EXT | flags);
309 mb->m_ext.ext_buf = buf;
310 mb->m_data = mb->m_ext.ext_buf;
311 mb->m_ext.ext_size = size;
312 mb->m_ext.ext_free = freef;
313 mb->m_ext.ext_arg1 = arg1;
314 mb->m_ext.ext_arg2 = arg2;
315 mb->m_ext.ext_type = type;
316 mb->m_ext.ext_flags = 0;
322 * Non-directly-exported function to clean up after mbufs with M_EXT
323 * storage attached to them if the reference count hits 1.
326 mb_free_ext(struct mbuf *m)
330 KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m));
333 * Check if the header is embedded in the cluster.
335 freembuf = (m->m_flags & M_NOFREE) ? 0 : 1;
337 switch (m->m_ext.ext_type) {
339 sf_ext_free(m->m_ext.ext_arg1, m->m_ext.ext_arg2);
342 KASSERT(m->m_ext.ext_cnt != NULL,
343 ("%s: no refcounting pointer on %p", __func__, m));
345 * Free attached storage if this mbuf is the only
348 if (*(m->m_ext.ext_cnt) != 1) {
349 if (atomic_fetchadd_int(m->m_ext.ext_cnt, -1) != 1)
353 switch (m->m_ext.ext_type) {
354 case EXT_PACKET: /* The packet zone is special. */
355 if (*(m->m_ext.ext_cnt) == 0)
356 *(m->m_ext.ext_cnt) = 1;
357 uma_zfree(zone_pack, m);
358 return; /* Job done. */
360 uma_zfree(zone_clust, m->m_ext.ext_buf);
363 uma_zfree(zone_jumbop, m->m_ext.ext_buf);
366 uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
369 uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
374 *(m->m_ext.ext_cnt) = 0;
375 uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *,
379 KASSERT(m->m_ext.ext_free != NULL,
380 ("%s: ext_free not set", __func__));
381 (*(m->m_ext.ext_free))(m, m->m_ext.ext_arg1,
385 KASSERT(m->m_ext.ext_type == 0,
386 ("%s: unknown ext_type", __func__));
391 uma_zfree(zone_mbuf, m);
395 * Attach the cluster from *m to *n, set up m_ext in *n
396 * and bump the refcount of the cluster.
399 mb_dupcl(struct mbuf *n, const struct mbuf *m)
402 KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m));
403 KASSERT(!(n->m_flags & M_EXT), ("%s: M_EXT set on %p", __func__, n));
405 switch (m->m_ext.ext_type) {
407 sf_ext_ref(m->m_ext.ext_arg1, m->m_ext.ext_arg2);
410 KASSERT(m->m_ext.ext_cnt != NULL,
411 ("%s: no refcounting pointer on %p", __func__, m));
412 if (*(m->m_ext.ext_cnt) == 1)
413 *(m->m_ext.ext_cnt) += 1;
415 atomic_add_int(m->m_ext.ext_cnt, 1);
420 n->m_flags |= m->m_flags & M_RDONLY;
424 m_demote_pkthdr(struct mbuf *m)
429 m_tag_delete_chain(m, NULL);
430 m->m_flags &= ~M_PKTHDR;
431 bzero(&m->m_pkthdr, sizeof(struct pkthdr));
435 * Clean up mbuf (chain) from any tags and packet headers.
436 * If "all" is set then the first mbuf in the chain will be
440 m_demote(struct mbuf *m0, int all, int flags)
444 for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
445 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p",
447 if (m->m_flags & M_PKTHDR)
449 m->m_flags = m->m_flags & (M_EXT | M_RDONLY | M_NOFREE | flags);
454 * Sanity checks on mbuf (chain) for use in KASSERT() and general
456 * Returns 0 or panics when bad and 1 on all tests passed.
457 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
461 m_sanity(struct mbuf *m0, int sanitize)
468 #define M_SANITY_ACTION(s) panic("mbuf %p: " s, m)
470 #define M_SANITY_ACTION(s) printf("mbuf %p: " s, m)
473 for (m = m0; m != NULL; m = m->m_next) {
475 * Basic pointer checks. If any of these fails then some
476 * unrelated kernel memory before or after us is trashed.
477 * No way to recover from that.
481 if ((caddr_t)m->m_data < a)
482 M_SANITY_ACTION("m_data outside mbuf data range left");
483 if ((caddr_t)m->m_data > b)
484 M_SANITY_ACTION("m_data outside mbuf data range right");
485 if ((caddr_t)m->m_data + m->m_len > b)
486 M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
488 /* m->m_nextpkt may only be set on first mbuf in chain. */
489 if (m != m0 && m->m_nextpkt != NULL) {
491 m_freem(m->m_nextpkt);
492 m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
494 M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
497 /* packet length (not mbuf length!) calculation */
498 if (m0->m_flags & M_PKTHDR)
501 /* m_tags may only be attached to first mbuf in chain. */
502 if (m != m0 && m->m_flags & M_PKTHDR &&
503 !SLIST_EMPTY(&m->m_pkthdr.tags)) {
505 m_tag_delete_chain(m, NULL);
506 /* put in 0xDEADC0DE perhaps? */
508 M_SANITY_ACTION("m_tags on in-chain mbuf");
511 /* M_PKTHDR may only be set on first mbuf in chain */
512 if (m != m0 && m->m_flags & M_PKTHDR) {
514 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
515 m->m_flags &= ~M_PKTHDR;
516 /* put in 0xDEADCODE and leave hdr flag in */
518 M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
522 if (pktlen && pktlen != m->m_pkthdr.len) {
526 M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
530 #undef M_SANITY_ACTION
535 * "Move" mbuf pkthdr from "from" to "to".
536 * "from" must have M_PKTHDR set, and "to" must be empty.
539 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
543 /* see below for why these are not enabled */
545 /* Note: with MAC, this may not be a good assertion. */
546 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
547 ("m_move_pkthdr: to has tags"));
551 * XXXMAC: It could be this should also occur for non-MAC?
553 if (to->m_flags & M_PKTHDR)
554 m_tag_delete_chain(to, NULL);
556 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
557 if ((to->m_flags & M_EXT) == 0)
558 to->m_data = to->m_pktdat;
559 to->m_pkthdr = from->m_pkthdr; /* especially tags */
560 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
561 from->m_flags &= ~M_PKTHDR;
565 * Duplicate "from"'s mbuf pkthdr in "to".
566 * "from" must have M_PKTHDR set, and "to" must be empty.
567 * In particular, this does a deep copy of the packet tags.
570 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
575 * The mbuf allocator only initializes the pkthdr
576 * when the mbuf is allocated with m_gethdr(). Many users
577 * (e.g. m_copy*, m_prepend) use m_get() and then
578 * smash the pkthdr as needed causing these
579 * assertions to trip. For now just disable them.
582 /* Note: with MAC, this may not be a good assertion. */
583 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
585 MBUF_CHECKSLEEP(how);
587 if (to->m_flags & M_PKTHDR)
588 m_tag_delete_chain(to, NULL);
590 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
591 if ((to->m_flags & M_EXT) == 0)
592 to->m_data = to->m_pktdat;
593 to->m_pkthdr = from->m_pkthdr;
594 SLIST_INIT(&to->m_pkthdr.tags);
595 return (m_tag_copy_chain(to, from, how));
599 * Lesser-used path for M_PREPEND:
600 * allocate new mbuf to prepend to chain,
604 m_prepend(struct mbuf *m, int len, int how)
608 if (m->m_flags & M_PKTHDR)
609 mn = m_gethdr(how, m->m_type);
611 mn = m_get(how, m->m_type);
616 if (m->m_flags & M_PKTHDR)
617 m_move_pkthdr(mn, m);
627 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
628 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
629 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller.
630 * Note that the copy is read-only, because clusters are not copied,
631 * only their reference counts are incremented.
634 m_copym(const struct mbuf *m, int off0, int len, int wait)
636 struct mbuf *n, **np;
641 KASSERT(off >= 0, ("m_copym, negative off %d", off));
642 KASSERT(len >= 0, ("m_copym, negative len %d", len));
643 MBUF_CHECKSLEEP(wait);
644 if (off == 0 && m->m_flags & M_PKTHDR)
647 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
657 KASSERT(len == M_COPYALL,
658 ("m_copym, length > size of mbuf chain"));
662 n = m_gethdr(wait, m->m_type);
664 n = m_get(wait, m->m_type);
669 if (!m_dup_pkthdr(n, m, wait))
671 if (len == M_COPYALL)
672 n->m_pkthdr.len -= off0;
674 n->m_pkthdr.len = len;
677 n->m_len = min(len, m->m_len - off);
678 if (m->m_flags & M_EXT) {
679 n->m_data = m->m_data + off;
682 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
684 if (len != M_COPYALL)
698 * Copy an entire packet, including header (which must be present).
699 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
700 * Note that the copy is read-only, because clusters are not copied,
701 * only their reference counts are incremented.
702 * Preserve alignment of the first mbuf so if the creator has left
703 * some room at the beginning (e.g. for inserting protocol headers)
704 * the copies still have the room available.
707 m_copypacket(struct mbuf *m, int how)
709 struct mbuf *top, *n, *o;
711 MBUF_CHECKSLEEP(how);
712 n = m_get(how, m->m_type);
717 if (!m_dup_pkthdr(n, m, how))
720 if (m->m_flags & M_EXT) {
721 n->m_data = m->m_data;
724 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
725 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
730 o = m_get(how, m->m_type);
738 if (m->m_flags & M_EXT) {
739 n->m_data = m->m_data;
742 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
754 * Copy data from an mbuf chain starting "off" bytes from the beginning,
755 * continuing for "len" bytes, into the indicated buffer.
758 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
762 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
763 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
765 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
772 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
773 count = min(m->m_len - off, len);
774 bcopy(mtod(m, caddr_t) + off, cp, count);
783 * Copy a packet header mbuf chain into a completely new chain, including
784 * copying any mbuf clusters. Use this instead of m_copypacket() when
785 * you need a writable copy of an mbuf chain.
788 m_dup(const struct mbuf *m, int how)
790 struct mbuf **p, *top = NULL;
791 int remain, moff, nsize;
793 MBUF_CHECKSLEEP(how);
799 /* While there's more data, get a new mbuf, tack it on, and fill it */
800 remain = m->m_pkthdr.len;
803 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
806 /* Get the next new mbuf */
807 if (remain >= MINCLSIZE) {
808 n = m_getcl(how, m->m_type, 0);
811 n = m_get(how, m->m_type);
817 if (top == NULL) { /* First one, must be PKTHDR */
818 if (!m_dup_pkthdr(n, m, how)) {
822 if ((n->m_flags & M_EXT) == 0)
824 n->m_flags &= ~M_RDONLY;
828 /* Link it into the new chain */
832 /* Copy data from original mbuf(s) into new mbuf */
833 while (n->m_len < nsize && m != NULL) {
834 int chunk = min(nsize - n->m_len, m->m_len - moff);
836 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
840 if (moff == m->m_len) {
846 /* Check correct total mbuf length */
847 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
848 ("%s: bogus m_pkthdr.len", __func__));
858 * Concatenate mbuf chain n to m.
859 * Both chains must be of the same type (e.g. MT_DATA).
860 * Any m_pkthdr is not updated.
863 m_cat(struct mbuf *m, struct mbuf *n)
868 if (!M_WRITABLE(m) ||
869 M_TRAILINGSPACE(m) < n->m_len) {
870 /* just join the two chains */
874 /* splat the data from one into the other */
875 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
877 m->m_len += n->m_len;
883 * Concatenate two pkthdr mbuf chains.
886 m_catpkt(struct mbuf *m, struct mbuf *n)
892 m->m_pkthdr.len += n->m_pkthdr.len;
899 m_adj(struct mbuf *mp, int req_len)
905 if ((m = mp) == NULL)
911 while (m != NULL && len > 0) {
912 if (m->m_len <= len) {
922 if (mp->m_flags & M_PKTHDR)
923 mp->m_pkthdr.len -= (req_len - len);
926 * Trim from tail. Scan the mbuf chain,
927 * calculating its length and finding the last mbuf.
928 * If the adjustment only affects this mbuf, then just
929 * adjust and return. Otherwise, rescan and truncate
930 * after the remaining size.
936 if (m->m_next == (struct mbuf *)0)
940 if (m->m_len >= len) {
942 if (mp->m_flags & M_PKTHDR)
943 mp->m_pkthdr.len -= len;
950 * Correct length for chain is "count".
951 * Find the mbuf with last data, adjust its length,
952 * and toss data from remaining mbufs on chain.
955 if (m->m_flags & M_PKTHDR)
956 m->m_pkthdr.len = count;
957 for (; m; m = m->m_next) {
958 if (m->m_len >= count) {
960 if (m->m_next != NULL) {
972 * Rearange an mbuf chain so that len bytes are contiguous
973 * and in the data area of an mbuf (so that mtod will work
974 * for a structure of size len). Returns the resulting
975 * mbuf chain on success, frees it and returns null on failure.
976 * If there is room, it will add up to max_protohdr-len extra bytes to the
977 * contiguous region in an attempt to avoid being called next time.
980 m_pullup(struct mbuf *n, int len)
987 * If first mbuf has no cluster, and has room for len bytes
988 * without shifting current data, pullup into it,
989 * otherwise allocate a new mbuf to prepend to the chain.
991 if ((n->m_flags & M_EXT) == 0 &&
992 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1001 m = m_get(M_NOWAIT, n->m_type);
1004 if (n->m_flags & M_PKTHDR)
1005 m_move_pkthdr(m, n);
1007 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1009 count = min(min(max(len, max_protohdr), space), n->m_len);
1010 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1020 } while (len > 0 && n);
1033 * Like m_pullup(), except a new mbuf is always allocated, and we allow
1034 * the amount of empty space before the data in the new mbuf to be specified
1035 * (in the event that the caller expects to prepend later).
1038 m_copyup(struct mbuf *n, int len, int dstoff)
1043 if (len > (MHLEN - dstoff))
1045 m = m_get(M_NOWAIT, n->m_type);
1048 if (n->m_flags & M_PKTHDR)
1049 m_move_pkthdr(m, n);
1050 m->m_data += dstoff;
1051 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1053 count = min(min(max(len, max_protohdr), space), n->m_len);
1054 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
1064 } while (len > 0 && n);
1077 * Partition an mbuf chain in two pieces, returning the tail --
1078 * all but the first len0 bytes. In case of failure, it returns NULL and
1079 * attempts to restore the chain to its original state.
1081 * Note that the resulting mbufs might be read-only, because the new
1082 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1083 * the "breaking point" happens to lie within a cluster mbuf. Use the
1084 * M_WRITABLE() macro to check for this case.
1087 m_split(struct mbuf *m0, int len0, int wait)
1090 u_int len = len0, remain;
1092 MBUF_CHECKSLEEP(wait);
1093 for (m = m0; m && len > m->m_len; m = m->m_next)
1097 remain = m->m_len - len;
1098 if (m0->m_flags & M_PKTHDR && remain == 0) {
1099 n = m_gethdr(wait, m0->m_type);
1102 n->m_next = m->m_next;
1104 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1105 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1106 m0->m_pkthdr.len = len0;
1108 } else if (m0->m_flags & M_PKTHDR) {
1109 n = m_gethdr(wait, m0->m_type);
1112 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1113 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1114 m0->m_pkthdr.len = len0;
1115 if (m->m_flags & M_EXT)
1117 if (remain > MHLEN) {
1118 /* m can't be the lead packet */
1120 n->m_next = m_split(m, len, wait);
1121 if (n->m_next == NULL) {
1130 } else if (remain == 0) {
1135 n = m_get(wait, m->m_type);
1141 if (m->m_flags & M_EXT) {
1142 n->m_data = m->m_data + len;
1145 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1149 n->m_next = m->m_next;
1154 * Routine to copy from device local memory into mbufs.
1155 * Note that `off' argument is offset into first mbuf of target chain from
1156 * which to begin copying the data to.
1159 m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
1160 void (*copy)(char *from, caddr_t to, u_int len))
1163 struct mbuf *top = NULL, **mp = ⊤
1166 if (off < 0 || off > MHLEN)
1169 while (totlen > 0) {
1170 if (top == NULL) { /* First one, must be PKTHDR */
1171 if (totlen + off >= MINCLSIZE) {
1172 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1175 m = m_gethdr(M_NOWAIT, MT_DATA);
1178 /* Place initial small packet/header at end of mbuf */
1179 if (m && totlen + off + max_linkhdr <= MLEN) {
1180 m->m_data += max_linkhdr;
1186 m->m_pkthdr.rcvif = ifp;
1187 m->m_pkthdr.len = totlen;
1189 if (totlen + off >= MINCLSIZE) {
1190 m = m_getcl(M_NOWAIT, MT_DATA, 0);
1193 m = m_get(M_NOWAIT, MT_DATA);
1206 m->m_len = len = min(totlen, len);
1208 copy(buf, mtod(m, caddr_t), (u_int)len);
1210 bcopy(buf, mtod(m, caddr_t), (u_int)len);
1220 * Copy data from a buffer back into the indicated mbuf chain,
1221 * starting "off" bytes from the beginning, extending the mbuf
1222 * chain if necessary.
1225 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
1228 struct mbuf *m = m0, *n;
1233 while (off > (mlen = m->m_len)) {
1236 if (m->m_next == NULL) {
1237 n = m_get(M_NOWAIT, m->m_type);
1240 bzero(mtod(n, caddr_t), MLEN);
1241 n->m_len = min(MLEN, len + off);
1247 if (m->m_next == NULL && (len > m->m_len - off)) {
1248 m->m_len += min(len - (m->m_len - off),
1249 M_TRAILINGSPACE(m));
1251 mlen = min (m->m_len - off, len);
1252 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
1260 if (m->m_next == NULL) {
1261 n = m_get(M_NOWAIT, m->m_type);
1264 n->m_len = min(MLEN, len);
1269 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1270 m->m_pkthdr.len = totlen;
1274 * Append the specified data to the indicated mbuf chain,
1275 * Extend the mbuf chain if the new data does not fit in
1278 * Return 1 if able to complete the job; otherwise 0.
1281 m_append(struct mbuf *m0, int len, c_caddr_t cp)
1284 int remainder, space;
1286 for (m = m0; m->m_next != NULL; m = m->m_next)
1289 space = M_TRAILINGSPACE(m);
1292 * Copy into available space.
1294 if (space > remainder)
1296 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1298 cp += space, remainder -= space;
1300 while (remainder > 0) {
1302 * Allocate a new mbuf; could check space
1303 * and allocate a cluster instead.
1305 n = m_get(M_NOWAIT, m->m_type);
1308 n->m_len = min(MLEN, remainder);
1309 bcopy(cp, mtod(n, caddr_t), n->m_len);
1310 cp += n->m_len, remainder -= n->m_len;
1314 if (m0->m_flags & M_PKTHDR)
1315 m0->m_pkthdr.len += len - remainder;
1316 return (remainder == 0);
1320 * Apply function f to the data in an mbuf chain starting "off" bytes from
1321 * the beginning, continuing for "len" bytes.
1324 m_apply(struct mbuf *m, int off, int len,
1325 int (*f)(void *, void *, u_int), void *arg)
1330 KASSERT(off >= 0, ("m_apply, negative off %d", off));
1331 KASSERT(len >= 0, ("m_apply, negative len %d", len));
1333 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1340 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1341 count = min(m->m_len - off, len);
1342 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1353 * Return a pointer to mbuf/offset of location in mbuf chain.
1356 m_getptr(struct mbuf *m, int loc, int *off)
1360 /* Normal end of search. */
1361 if (m->m_len > loc) {
1366 if (m->m_next == NULL) {
1368 /* Point at the end of valid data. */
1381 m_print(const struct mbuf *m, int maxlen)
1385 const struct mbuf *m2;
1388 printf("mbuf: %p\n", m);
1392 if (m->m_flags & M_PKTHDR)
1393 len = m->m_pkthdr.len;
1397 while (m2 != NULL && (len == -1 || len)) {
1399 if (maxlen != -1 && pdata > maxlen)
1401 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1402 m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1403 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1404 "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1406 printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1412 printf("%d bytes unaccounted for.\n", len);
1417 m_fixhdr(struct mbuf *m0)
1421 len = m_length(m0, NULL);
1422 m0->m_pkthdr.len = len;
1427 m_length(struct mbuf *m0, struct mbuf **last)
1433 for (m = m0; m != NULL; m = m->m_next) {
1435 if (m->m_next == NULL)
1444 * Defragment a mbuf chain, returning the shortest possible
1445 * chain of mbufs and clusters. If allocation fails and
1446 * this cannot be completed, NULL will be returned, but
1447 * the passed in chain will be unchanged. Upon success,
1448 * the original chain will be freed, and the new chain
1451 * If a non-packet header is passed in, the original
1452 * mbuf (chain?) will be returned unharmed.
1455 m_defrag(struct mbuf *m0, int how)
1457 struct mbuf *m_new = NULL, *m_final = NULL;
1458 int progress = 0, length;
1460 MBUF_CHECKSLEEP(how);
1461 if (!(m0->m_flags & M_PKTHDR))
1464 m_fixhdr(m0); /* Needed sanity check */
1466 #ifdef MBUF_STRESS_TEST
1467 if (m_defragrandomfailures) {
1468 int temp = arc4random() & 0xff;
1474 if (m0->m_pkthdr.len > MHLEN)
1475 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1477 m_final = m_gethdr(how, MT_DATA);
1479 if (m_final == NULL)
1482 if (m_dup_pkthdr(m_final, m0, how) == 0)
1487 while (progress < m0->m_pkthdr.len) {
1488 length = m0->m_pkthdr.len - progress;
1489 if (length > MCLBYTES)
1492 if (m_new == NULL) {
1494 m_new = m_getcl(how, MT_DATA, 0);
1496 m_new = m_get(how, MT_DATA);
1501 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1503 m_new->m_len = length;
1504 if (m_new != m_final)
1505 m_cat(m_final, m_new);
1508 #ifdef MBUF_STRESS_TEST
1509 if (m0->m_next == NULL)
1514 #ifdef MBUF_STRESS_TEST
1516 m_defragbytes += m0->m_pkthdr.len;
1520 #ifdef MBUF_STRESS_TEST
1529 * Defragment an mbuf chain, returning at most maxfrags separate
1530 * mbufs+clusters. If this is not possible NULL is returned and
1531 * the original mbuf chain is left in it's present (potentially
1532 * modified) state. We use two techniques: collapsing consecutive
1533 * mbufs and replacing consecutive mbufs by a cluster.
1535 * NB: this should really be named m_defrag but that name is taken
1538 m_collapse(struct mbuf *m0, int how, int maxfrags)
1540 struct mbuf *m, *n, *n2, **prev;
1544 * Calculate the current number of frags.
1547 for (m = m0; m != NULL; m = m->m_next)
1550 * First, try to collapse mbufs. Note that we always collapse
1551 * towards the front so we don't need to deal with moving the
1552 * pkthdr. This may be suboptimal if the first mbuf has much
1553 * less data than the following.
1561 if (M_WRITABLE(m) &&
1562 n->m_len < M_TRAILINGSPACE(m)) {
1563 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1565 m->m_len += n->m_len;
1566 m->m_next = n->m_next;
1568 if (--curfrags <= maxfrags)
1573 KASSERT(maxfrags > 1,
1574 ("maxfrags %u, but normal collapse failed", maxfrags));
1576 * Collapse consecutive mbufs to a cluster.
1578 prev = &m0->m_next; /* NB: not the first mbuf */
1579 while ((n = *prev) != NULL) {
1580 if ((n2 = n->m_next) != NULL &&
1581 n->m_len + n2->m_len < MCLBYTES) {
1582 m = m_getcl(how, MT_DATA, 0);
1585 bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1586 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1588 m->m_len = n->m_len + n2->m_len;
1589 m->m_next = n2->m_next;
1593 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
1596 * Still not there, try the normal collapse
1597 * again before we allocate another cluster.
1604 * No place where we can collapse to a cluster; punt.
1605 * This can occur if, for example, you request 2 frags
1606 * but the packet requires that both be clusters (we
1607 * never reallocate the first mbuf to avoid moving the
1614 #ifdef MBUF_STRESS_TEST
1617 * Fragment an mbuf chain. There's no reason you'd ever want to do
1618 * this in normal usage, but it's great for stress testing various
1621 * If fragmentation is not possible, the original chain will be
1624 * Possible length values:
1625 * 0 no fragmentation will occur
1626 * > 0 each fragment will be of the specified length
1627 * -1 each fragment will be the same random value in length
1628 * -2 each fragment's length will be entirely random
1629 * (Random values range from 1 to 256)
1632 m_fragment(struct mbuf *m0, int how, int length)
1634 struct mbuf *m_new = NULL, *m_final = NULL;
1637 if (!(m0->m_flags & M_PKTHDR))
1640 if ((length == 0) || (length < -2))
1643 m_fixhdr(m0); /* Needed sanity check */
1645 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1647 if (m_final == NULL)
1650 if (m_dup_pkthdr(m_final, m0, how) == 0)
1656 length = 1 + (arc4random() & 255);
1658 while (progress < m0->m_pkthdr.len) {
1664 fraglen = 1 + (arc4random() & 255);
1665 if (fraglen > m0->m_pkthdr.len - progress)
1666 fraglen = m0->m_pkthdr.len - progress;
1668 if (fraglen > MCLBYTES)
1671 if (m_new == NULL) {
1672 m_new = m_getcl(how, MT_DATA, 0);
1677 m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t));
1678 progress += fraglen;
1679 m_new->m_len = fraglen;
1680 if (m_new != m_final)
1681 m_cat(m_final, m_new);
1690 /* Return the original chain on failure */
1697 * Copy the contents of uio into a properly sized mbuf chain.
1700 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
1702 struct mbuf *m, *mb;
1708 * len can be zero or an arbitrary large value bound by
1709 * the total data supplied by the uio.
1712 total = min(uio->uio_resid, len);
1714 total = uio->uio_resid;
1717 * The smallest unit returned by m_getm2() is a single mbuf
1718 * with pkthdr. We can't align past it.
1724 * Give us the full allocation or nothing.
1725 * If len is zero return the smallest empty mbuf.
1727 m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
1732 /* Fill all mbufs with uio data and update header information. */
1733 for (mb = m; mb != NULL; mb = mb->m_next) {
1734 length = min(M_TRAILINGSPACE(mb), total - progress);
1736 error = uiomove(mtod(mb, void *), length, uio);
1744 if (flags & M_PKTHDR)
1745 m->m_pkthdr.len += length;
1747 KASSERT(progress == total, ("%s: progress != total", __func__));
1753 * Copy an mbuf chain into a uio limited by len if set.
1756 m_mbuftouio(struct uio *uio, struct mbuf *m, int len)
1758 int error, length, total;
1762 total = min(uio->uio_resid, len);
1764 total = uio->uio_resid;
1766 /* Fill the uio with data from the mbufs. */
1767 for (; m != NULL; m = m->m_next) {
1768 length = min(m->m_len, total - progress);
1770 error = uiomove(mtod(m, void *), length, uio);
1781 * Create a writable copy of the mbuf chain. While doing this
1782 * we compact the chain with a goal of producing a chain with
1783 * at most two mbufs. The second mbuf in this chain is likely
1784 * to be a cluster. The primary purpose of this work is to create
1785 * a writable packet for encryption, compression, etc. The
1786 * secondary goal is to linearize the data so the data can be
1787 * passed to crypto hardware in the most efficient manner possible.
1790 m_unshare(struct mbuf *m0, int how)
1792 struct mbuf *m, *mprev;
1793 struct mbuf *n, *mfirst, *mlast;
1797 for (m = m0; m != NULL; m = mprev->m_next) {
1799 * Regular mbufs are ignored unless there's a cluster
1800 * in front of it that we can use to coalesce. We do
1801 * the latter mainly so later clusters can be coalesced
1802 * also w/o having to handle them specially (i.e. convert
1803 * mbuf+cluster -> cluster). This optimization is heavily
1804 * influenced by the assumption that we're running over
1805 * Ethernet where MCLBYTES is large enough that the max
1806 * packet size will permit lots of coalescing into a
1807 * single cluster. This in turn permits efficient
1808 * crypto operations, especially when using hardware.
1810 if ((m->m_flags & M_EXT) == 0) {
1811 if (mprev && (mprev->m_flags & M_EXT) &&
1812 m->m_len <= M_TRAILINGSPACE(mprev)) {
1813 /* XXX: this ignores mbuf types */
1814 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1815 mtod(m, caddr_t), m->m_len);
1816 mprev->m_len += m->m_len;
1817 mprev->m_next = m->m_next; /* unlink from chain */
1818 m_free(m); /* reclaim mbuf */
1820 newipsecstat.ips_mbcoalesced++;
1828 * Writable mbufs are left alone (for now).
1830 if (M_WRITABLE(m)) {
1836 * Not writable, replace with a copy or coalesce with
1837 * the previous mbuf if possible (since we have to copy
1838 * it anyway, we try to reduce the number of mbufs and
1839 * clusters so that future work is easier).
1841 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1842 /* NB: we only coalesce into a cluster or larger */
1843 if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1844 m->m_len <= M_TRAILINGSPACE(mprev)) {
1845 /* XXX: this ignores mbuf types */
1846 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1847 mtod(m, caddr_t), m->m_len);
1848 mprev->m_len += m->m_len;
1849 mprev->m_next = m->m_next; /* unlink from chain */
1850 m_free(m); /* reclaim mbuf */
1852 newipsecstat.ips_clcoalesced++;
1858 * Allocate new space to hold the copy and copy the data.
1859 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
1860 * splitting them into clusters. We could just malloc a
1861 * buffer and make it external but too many device drivers
1862 * don't know how to break up the non-contiguous memory when
1865 n = m_getcl(how, m->m_type, m->m_flags);
1870 if (m->m_flags & M_PKTHDR) {
1871 KASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR",
1873 m_move_pkthdr(n, m);
1880 int cc = min(len, MCLBYTES);
1881 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
1887 newipsecstat.ips_clcopied++;
1895 n = m_getcl(how, m->m_type, m->m_flags);
1902 n->m_next = m->m_next;
1904 m0 = mfirst; /* new head of chain */
1906 mprev->m_next = mfirst; /* replace old mbuf */
1907 m_free(m); /* release old mbuf */
1913 #ifdef MBUF_PROFILING
1915 #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/
1916 struct mbufprofile {
1917 uintmax_t wasted[MP_BUCKETS];
1918 uintmax_t used[MP_BUCKETS];
1919 uintmax_t segments[MP_BUCKETS];
1922 #define MP_MAXDIGITS 21 /* strlen("16,000,000,000,000,000,000") == 21 */
1923 #define MP_NUMLINES 6
1924 #define MP_NUMSPERLINE 16
1925 #define MP_EXTRABYTES 64 /* > strlen("used:\nwasted:\nsegments:\n") */
1926 /* work out max space needed and add a bit of spare space too */
1927 #define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE)
1928 #define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES)
1930 char mbprofbuf[MP_BUFSIZE];
1933 m_profile(struct mbuf *m)
1942 if (m->m_flags & M_EXT) {
1943 wasted += MHLEN - sizeof(m->m_ext) +
1944 m->m_ext.ext_size - m->m_len;
1946 if (m->m_flags & M_PKTHDR)
1947 wasted += MHLEN - m->m_len;
1949 wasted += MLEN - m->m_len;
1953 /* be paranoid.. it helps */
1954 if (segments > MP_BUCKETS - 1)
1955 segments = MP_BUCKETS - 1;
1958 if (wasted > 100000)
1960 /* store in the appropriate bucket */
1961 /* don't bother locking. if it's slightly off, so what? */
1962 mbprof.segments[segments]++;
1963 mbprof.used[fls(used)]++;
1964 mbprof.wasted[fls(wasted)]++;
1968 mbprof_textify(void)
1974 p = &mbprof.wasted[0];
1976 offset = snprintf(c, MP_MAXLINE + 10,
1978 "%ju %ju %ju %ju %ju %ju %ju %ju "
1979 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1980 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1981 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1983 p = &mbprof.wasted[16];
1985 offset = snprintf(c, MP_MAXLINE,
1986 "%ju %ju %ju %ju %ju %ju %ju %ju "
1987 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1988 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1989 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1991 p = &mbprof.used[0];
1993 offset = snprintf(c, MP_MAXLINE + 10,
1995 "%ju %ju %ju %ju %ju %ju %ju %ju "
1996 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1997 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1998 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2000 p = &mbprof.used[16];
2002 offset = snprintf(c, MP_MAXLINE,
2003 "%ju %ju %ju %ju %ju %ju %ju %ju "
2004 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2005 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2006 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2008 p = &mbprof.segments[0];
2010 offset = snprintf(c, MP_MAXLINE + 10,
2012 "%ju %ju %ju %ju %ju %ju %ju %ju "
2013 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2014 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2015 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2017 p = &mbprof.segments[16];
2019 offset = snprintf(c, MP_MAXLINE,
2020 "%ju %ju %ju %ju %ju %ju %ju %ju "
2021 "%ju %ju %ju %ju %ju %ju %ju %jju",
2022 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2023 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2028 mbprof_handler(SYSCTL_HANDLER_ARGS)
2033 error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1);
2038 mbprof_clr_handler(SYSCTL_HANDLER_ARGS)
2043 error = sysctl_handle_int(oidp, &clear, 0, req);
2044 if (error || !req->newptr)
2048 bzero(&mbprof, sizeof(mbprof));
2055 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile, CTLTYPE_STRING|CTLFLAG_RD,
2056 NULL, 0, mbprof_handler, "A", "mbuf profiling statistics");
2058 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr, CTLTYPE_INT|CTLFLAG_RW,
2059 NULL, 0, mbprof_clr_handler, "I", "clear mbuf profiling statistics");