2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1988, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_param.h"
38 #include "opt_mbuf_stress_test.h"
39 #include "opt_mbuf_profiling.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/limits.h>
46 #include <sys/malloc.h>
48 #include <sys/sysctl.h>
49 #include <sys/domain.h>
50 #include <sys/protosw.h>
52 #include <sys/vmmeter.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_page.h>
58 SDT_PROBE_DEFINE5_XLATE(sdt, , , m__init,
59 "struct mbuf *", "mbufinfo_t *",
60 "uint32_t", "uint32_t",
61 "uint16_t", "uint16_t",
62 "uint32_t", "uint32_t",
63 "uint32_t", "uint32_t");
65 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__gethdr,
66 "uint32_t", "uint32_t",
67 "uint16_t", "uint16_t",
68 "struct mbuf *", "mbufinfo_t *");
70 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__get,
71 "uint32_t", "uint32_t",
72 "uint16_t", "uint16_t",
73 "struct mbuf *", "mbufinfo_t *");
75 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__getcl,
76 "uint32_t", "uint32_t",
77 "uint16_t", "uint16_t",
78 "uint32_t", "uint32_t",
79 "struct mbuf *", "mbufinfo_t *");
81 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__clget,
82 "struct mbuf *", "mbufinfo_t *",
83 "uint32_t", "uint32_t",
84 "uint32_t", "uint32_t");
86 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__cljget,
87 "struct mbuf *", "mbufinfo_t *",
88 "uint32_t", "uint32_t",
89 "uint32_t", "uint32_t",
92 SDT_PROBE_DEFINE(sdt, , , m__cljset);
94 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__free,
95 "struct mbuf *", "mbufinfo_t *");
97 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__freem,
98 "struct mbuf *", "mbufinfo_t *");
100 #include <security/mac/mac_framework.h>
106 #ifdef MBUF_STRESS_TEST
111 int m_defragrandomfailures;
115 * sysctl(8) exported objects
117 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
118 &max_linkhdr, 0, "Size of largest link layer header");
119 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
120 &max_protohdr, 0, "Size of largest protocol layer header");
121 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
122 &max_hdr, 0, "Size of largest link plus protocol header");
123 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
124 &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
125 #ifdef MBUF_STRESS_TEST
126 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
127 &m_defragpackets, 0, "");
128 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
129 &m_defragbytes, 0, "");
130 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
131 &m_defraguseless, 0, "");
132 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
133 &m_defragfailure, 0, "");
134 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
135 &m_defragrandomfailures, 0, "");
139 * Ensure the correct size of various mbuf parameters. It could be off due
140 * to compiler-induced padding and alignment artifacts.
142 CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN);
143 CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN);
146 * mbuf data storage should be 64-bit aligned regardless of architectural
147 * pointer size; check this is the case with and without a packet header.
149 CTASSERT(offsetof(struct mbuf, m_dat) % 8 == 0);
150 CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0);
153 * While the specific values here don't matter too much (i.e., +/- a few
154 * words), we do want to ensure that changes to these values are carefully
155 * reasoned about and properly documented. This is especially the case as
156 * network-protocol and device-driver modules encode these layouts, and must
157 * be recompiled if the structures change. Check these values at compile time
158 * against the ones documented in comments in mbuf.h.
160 * NB: Possibly they should be documented there via #define's and not just
163 #if defined(__LP64__)
164 CTASSERT(offsetof(struct mbuf, m_dat) == 32);
165 CTASSERT(sizeof(struct pkthdr) == 56);
166 CTASSERT(sizeof(struct m_ext) == 48);
168 CTASSERT(offsetof(struct mbuf, m_dat) == 24);
169 CTASSERT(sizeof(struct pkthdr) == 48);
170 CTASSERT(sizeof(struct m_ext) == 28);
174 * Assert that the queue(3) macros produce code of the same size as an old
175 * plain pointer does.
178 static struct mbuf __used m_assertbuf;
179 CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next));
180 CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next));
181 CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt));
182 CTASSERT(sizeof(m_assertbuf.m_stailqpkt) == sizeof(m_assertbuf.m_nextpkt));
186 * Attach the cluster from *m to *n, set up m_ext in *n
187 * and bump the refcount of the cluster.
190 mb_dupcl(struct mbuf *n, struct mbuf *m)
192 volatile u_int *refcnt;
194 KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m));
195 KASSERT(!(n->m_flags & M_EXT), ("%s: M_EXT set on %p", __func__, n));
198 * Cache access optimization. For most kinds of external
199 * storage we don't need full copy of m_ext, since the
200 * holder of the 'ext_count' is responsible to carry the
201 * free routine and its arguments. Exclusion is EXT_EXTREF,
202 * where 'ext_cnt' doesn't point into mbuf at all.
204 if (m->m_ext.ext_type == EXT_EXTREF)
205 bcopy(&m->m_ext, &n->m_ext, sizeof(struct m_ext));
207 bcopy(&m->m_ext, &n->m_ext, m_ext_copylen);
209 n->m_flags |= m->m_flags & (M_RDONLY | M_NOMAP);
211 /* See if this is the mbuf that holds the embedded refcount. */
212 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
213 refcnt = n->m_ext.ext_cnt = &m->m_ext.ext_count;
214 n->m_ext.ext_flags &= ~EXT_FLAG_EMBREF;
216 KASSERT(m->m_ext.ext_cnt != NULL,
217 ("%s: no refcounting pointer on %p", __func__, m));
218 refcnt = m->m_ext.ext_cnt;
224 atomic_add_int(refcnt, 1);
228 m_demote_pkthdr(struct mbuf *m)
233 m_tag_delete_chain(m, NULL);
234 m->m_flags &= ~M_PKTHDR;
235 bzero(&m->m_pkthdr, sizeof(struct pkthdr));
239 * Clean up mbuf (chain) from any tags and packet headers.
240 * If "all" is set then the first mbuf in the chain will be
244 m_demote(struct mbuf *m0, int all, int flags)
248 for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
249 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p",
251 if (m->m_flags & M_PKTHDR)
253 m->m_flags = m->m_flags & (M_EXT | M_RDONLY | M_NOFREE |
259 * Sanity checks on mbuf (chain) for use in KASSERT() and general
261 * Returns 0 or panics when bad and 1 on all tests passed.
262 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
266 m_sanity(struct mbuf *m0, int sanitize)
273 #define M_SANITY_ACTION(s) panic("mbuf %p: " s, m)
275 #define M_SANITY_ACTION(s) printf("mbuf %p: " s, m)
278 for (m = m0; m != NULL; m = m->m_next) {
280 * Basic pointer checks. If any of these fails then some
281 * unrelated kernel memory before or after us is trashed.
282 * No way to recover from that.
286 if ((caddr_t)m->m_data < a)
287 M_SANITY_ACTION("m_data outside mbuf data range left");
288 if ((caddr_t)m->m_data > b)
289 M_SANITY_ACTION("m_data outside mbuf data range right");
290 if ((caddr_t)m->m_data + m->m_len > b)
291 M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
293 /* m->m_nextpkt may only be set on first mbuf in chain. */
294 if (m != m0 && m->m_nextpkt != NULL) {
296 m_freem(m->m_nextpkt);
297 m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
299 M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
302 /* packet length (not mbuf length!) calculation */
303 if (m0->m_flags & M_PKTHDR)
306 /* m_tags may only be attached to first mbuf in chain. */
307 if (m != m0 && m->m_flags & M_PKTHDR &&
308 !SLIST_EMPTY(&m->m_pkthdr.tags)) {
310 m_tag_delete_chain(m, NULL);
311 /* put in 0xDEADC0DE perhaps? */
313 M_SANITY_ACTION("m_tags on in-chain mbuf");
316 /* M_PKTHDR may only be set on first mbuf in chain */
317 if (m != m0 && m->m_flags & M_PKTHDR) {
319 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
320 m->m_flags &= ~M_PKTHDR;
321 /* put in 0xDEADCODE and leave hdr flag in */
323 M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
327 if (pktlen && pktlen != m->m_pkthdr.len) {
331 M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
335 #undef M_SANITY_ACTION
339 * Non-inlined part of m_init().
342 m_pkthdr_init(struct mbuf *m, int how)
347 m->m_data = m->m_pktdat;
348 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
350 m->m_pkthdr.numa_domain = M_NODOM;
353 /* If the label init fails, fail the alloc */
354 error = mac_mbuf_init(m, how);
363 * "Move" mbuf pkthdr from "from" to "to".
364 * "from" must have M_PKTHDR set, and "to" must be empty.
367 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
371 /* see below for why these are not enabled */
373 /* Note: with MAC, this may not be a good assertion. */
374 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
375 ("m_move_pkthdr: to has tags"));
379 * XXXMAC: It could be this should also occur for non-MAC?
381 if (to->m_flags & M_PKTHDR)
382 m_tag_delete_chain(to, NULL);
384 to->m_flags = (from->m_flags & M_COPYFLAGS) |
385 (to->m_flags & (M_EXT | M_NOMAP));
386 if ((to->m_flags & M_EXT) == 0)
387 to->m_data = to->m_pktdat;
388 to->m_pkthdr = from->m_pkthdr; /* especially tags */
389 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
390 from->m_flags &= ~M_PKTHDR;
391 if (from->m_pkthdr.csum_flags & CSUM_SND_TAG) {
392 from->m_pkthdr.csum_flags &= ~CSUM_SND_TAG;
393 from->m_pkthdr.snd_tag = NULL;
398 * Duplicate "from"'s mbuf pkthdr in "to".
399 * "from" must have M_PKTHDR set, and "to" must be empty.
400 * In particular, this does a deep copy of the packet tags.
403 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
408 * The mbuf allocator only initializes the pkthdr
409 * when the mbuf is allocated with m_gethdr(). Many users
410 * (e.g. m_copy*, m_prepend) use m_get() and then
411 * smash the pkthdr as needed causing these
412 * assertions to trip. For now just disable them.
415 /* Note: with MAC, this may not be a good assertion. */
416 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
418 MBUF_CHECKSLEEP(how);
420 if (to->m_flags & M_PKTHDR)
421 m_tag_delete_chain(to, NULL);
423 to->m_flags = (from->m_flags & M_COPYFLAGS) |
424 (to->m_flags & (M_EXT | M_NOMAP));
425 if ((to->m_flags & M_EXT) == 0)
426 to->m_data = to->m_pktdat;
427 to->m_pkthdr = from->m_pkthdr;
428 if (from->m_pkthdr.csum_flags & CSUM_SND_TAG)
429 m_snd_tag_ref(from->m_pkthdr.snd_tag);
430 SLIST_INIT(&to->m_pkthdr.tags);
431 return (m_tag_copy_chain(to, from, how));
435 * Lesser-used path for M_PREPEND:
436 * allocate new mbuf to prepend to chain,
440 m_prepend(struct mbuf *m, int len, int how)
444 if (m->m_flags & M_PKTHDR)
445 mn = m_gethdr(how, m->m_type);
447 mn = m_get(how, m->m_type);
452 if (m->m_flags & M_PKTHDR)
453 m_move_pkthdr(mn, m);
463 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
464 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
465 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller.
466 * Note that the copy is read-only, because clusters are not copied,
467 * only their reference counts are incremented.
470 m_copym(struct mbuf *m, int off0, int len, int wait)
472 struct mbuf *n, **np;
477 KASSERT(off >= 0, ("m_copym, negative off %d", off));
478 KASSERT(len >= 0, ("m_copym, negative len %d", len));
479 MBUF_CHECKSLEEP(wait);
480 if (off == 0 && m->m_flags & M_PKTHDR)
483 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
493 KASSERT(len == M_COPYALL,
494 ("m_copym, length > size of mbuf chain"));
498 n = m_gethdr(wait, m->m_type);
500 n = m_get(wait, m->m_type);
505 if (!m_dup_pkthdr(n, m, wait))
507 if (len == M_COPYALL)
508 n->m_pkthdr.len -= off0;
510 n->m_pkthdr.len = len;
513 n->m_len = min(len, m->m_len - off);
514 if (m->m_flags & M_EXT) {
515 n->m_data = m->m_data + off;
518 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
520 if (len != M_COPYALL)
534 * Copy an entire packet, including header (which must be present).
535 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
536 * Note that the copy is read-only, because clusters are not copied,
537 * only their reference counts are incremented.
538 * Preserve alignment of the first mbuf so if the creator has left
539 * some room at the beginning (e.g. for inserting protocol headers)
540 * the copies still have the room available.
543 m_copypacket(struct mbuf *m, int how)
545 struct mbuf *top, *n, *o;
547 MBUF_CHECKSLEEP(how);
548 n = m_get(how, m->m_type);
553 if (!m_dup_pkthdr(n, m, how))
556 if (m->m_flags & M_EXT) {
557 n->m_data = m->m_data;
560 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
561 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
566 o = m_get(how, m->m_type);
574 if (m->m_flags & M_EXT) {
575 n->m_data = m->m_data;
578 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
590 m_copyfromunmapped(const struct mbuf *m, int off, int len, caddr_t cp)
596 KASSERT(off >= 0, ("m_copyfromunmapped: negative off %d", off));
597 KASSERT(len >= 0, ("m_copyfromunmapped: negative len %d", len));
598 KASSERT(off < m->m_len,
599 ("m_copyfromunmapped: len exceeds mbuf length"));
604 uio.uio_segflg = UIO_SYSSPACE;
607 uio.uio_rw = UIO_READ;
608 error = m_unmappedtouio(m, off, &uio, len);
609 KASSERT(error == 0, ("m_unmappedtouio failed: off %d, len %d", off,
614 * Copy data from an mbuf chain starting "off" bytes from the beginning,
615 * continuing for "len" bytes, into the indicated buffer.
618 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
622 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
623 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
625 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
632 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
633 count = min(m->m_len - off, len);
634 if ((m->m_flags & M_NOMAP) != 0)
635 m_copyfromunmapped(m, off, count, cp);
637 bcopy(mtod(m, caddr_t) + off, cp, count);
646 * Copy a packet header mbuf chain into a completely new chain, including
647 * copying any mbuf clusters. Use this instead of m_copypacket() when
648 * you need a writable copy of an mbuf chain.
651 m_dup(const struct mbuf *m, int how)
653 struct mbuf **p, *top = NULL;
654 int remain, moff, nsize;
656 MBUF_CHECKSLEEP(how);
662 /* While there's more data, get a new mbuf, tack it on, and fill it */
663 remain = m->m_pkthdr.len;
666 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
669 /* Get the next new mbuf */
670 if (remain >= MINCLSIZE) {
671 n = m_getcl(how, m->m_type, 0);
674 n = m_get(how, m->m_type);
680 if (top == NULL) { /* First one, must be PKTHDR */
681 if (!m_dup_pkthdr(n, m, how)) {
685 if ((n->m_flags & M_EXT) == 0)
687 n->m_flags &= ~M_RDONLY;
691 /* Link it into the new chain */
695 /* Copy data from original mbuf(s) into new mbuf */
696 while (n->m_len < nsize && m != NULL) {
697 int chunk = min(nsize - n->m_len, m->m_len - moff);
699 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
703 if (moff == m->m_len) {
709 /* Check correct total mbuf length */
710 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
711 ("%s: bogus m_pkthdr.len", __func__));
721 * Concatenate mbuf chain n to m.
722 * Both chains must be of the same type (e.g. MT_DATA).
723 * Any m_pkthdr is not updated.
726 m_cat(struct mbuf *m, struct mbuf *n)
731 if (!M_WRITABLE(m) ||
732 (n->m_flags & M_NOMAP) != 0 ||
733 M_TRAILINGSPACE(m) < n->m_len) {
734 /* just join the two chains */
738 /* splat the data from one into the other */
739 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
741 m->m_len += n->m_len;
747 * Concatenate two pkthdr mbuf chains.
750 m_catpkt(struct mbuf *m, struct mbuf *n)
756 m->m_pkthdr.len += n->m_pkthdr.len;
763 m_adj(struct mbuf *mp, int req_len)
769 if ((m = mp) == NULL)
775 while (m != NULL && len > 0) {
776 if (m->m_len <= len) {
786 if (mp->m_flags & M_PKTHDR)
787 mp->m_pkthdr.len -= (req_len - len);
790 * Trim from tail. Scan the mbuf chain,
791 * calculating its length and finding the last mbuf.
792 * If the adjustment only affects this mbuf, then just
793 * adjust and return. Otherwise, rescan and truncate
794 * after the remaining size.
800 if (m->m_next == (struct mbuf *)0)
804 if (m->m_len >= len) {
806 if (mp->m_flags & M_PKTHDR)
807 mp->m_pkthdr.len -= len;
814 * Correct length for chain is "count".
815 * Find the mbuf with last data, adjust its length,
816 * and toss data from remaining mbufs on chain.
819 if (m->m_flags & M_PKTHDR)
820 m->m_pkthdr.len = count;
821 for (; m; m = m->m_next) {
822 if (m->m_len >= count) {
824 if (m->m_next != NULL) {
836 * Rearange an mbuf chain so that len bytes are contiguous
837 * and in the data area of an mbuf (so that mtod will work
838 * for a structure of size len). Returns the resulting
839 * mbuf chain on success, frees it and returns null on failure.
840 * If there is room, it will add up to max_protohdr-len extra bytes to the
841 * contiguous region in an attempt to avoid being called next time.
844 m_pullup(struct mbuf *n, int len)
850 KASSERT((n->m_flags & M_NOMAP) == 0,
851 ("%s: unmapped mbuf %p", __func__, n));
854 * If first mbuf has no cluster, and has room for len bytes
855 * without shifting current data, pullup into it,
856 * otherwise allocate a new mbuf to prepend to the chain.
858 if ((n->m_flags & M_EXT) == 0 &&
859 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
868 m = m_get(M_NOWAIT, n->m_type);
871 if (n->m_flags & M_PKTHDR)
874 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
876 count = min(min(max(len, max_protohdr), space), n->m_len);
877 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
887 } while (len > 0 && n);
900 * Like m_pullup(), except a new mbuf is always allocated, and we allow
901 * the amount of empty space before the data in the new mbuf to be specified
902 * (in the event that the caller expects to prepend later).
905 m_copyup(struct mbuf *n, int len, int dstoff)
910 if (len > (MHLEN - dstoff))
912 m = m_get(M_NOWAIT, n->m_type);
915 if (n->m_flags & M_PKTHDR)
918 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
920 count = min(min(max(len, max_protohdr), space), n->m_len);
921 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
931 } while (len > 0 && n);
944 * Partition an mbuf chain in two pieces, returning the tail --
945 * all but the first len0 bytes. In case of failure, it returns NULL and
946 * attempts to restore the chain to its original state.
948 * Note that the resulting mbufs might be read-only, because the new
949 * mbuf can end up sharing an mbuf cluster with the original mbuf if
950 * the "breaking point" happens to lie within a cluster mbuf. Use the
951 * M_WRITABLE() macro to check for this case.
954 m_split(struct mbuf *m0, int len0, int wait)
957 u_int len = len0, remain;
959 MBUF_CHECKSLEEP(wait);
960 for (m = m0; m && len > m->m_len; m = m->m_next)
964 remain = m->m_len - len;
965 if (m0->m_flags & M_PKTHDR && remain == 0) {
966 n = m_gethdr(wait, m0->m_type);
969 n->m_next = m->m_next;
971 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) {
972 n->m_pkthdr.snd_tag =
973 m_snd_tag_ref(m0->m_pkthdr.snd_tag);
974 n->m_pkthdr.csum_flags |= CSUM_SND_TAG;
976 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
977 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
978 m0->m_pkthdr.len = len0;
980 } else if (m0->m_flags & M_PKTHDR) {
981 n = m_gethdr(wait, m0->m_type);
984 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) {
985 n->m_pkthdr.snd_tag =
986 m_snd_tag_ref(m0->m_pkthdr.snd_tag);
987 n->m_pkthdr.csum_flags |= CSUM_SND_TAG;
989 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
990 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
991 m0->m_pkthdr.len = len0;
992 if (m->m_flags & M_EXT)
994 if (remain > MHLEN) {
995 /* m can't be the lead packet */
997 n->m_next = m_split(m, len, wait);
998 if (n->m_next == NULL) {
1007 } else if (remain == 0) {
1012 n = m_get(wait, m->m_type);
1018 if (m->m_flags & M_EXT) {
1019 n->m_data = m->m_data + len;
1022 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1026 n->m_next = m->m_next;
1031 * Routine to copy from device local memory into mbufs.
1032 * Note that `off' argument is offset into first mbuf of target chain from
1033 * which to begin copying the data to.
1036 m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
1037 void (*copy)(char *from, caddr_t to, u_int len))
1040 struct mbuf *top = NULL, **mp = ⊤
1043 if (off < 0 || off > MHLEN)
1046 while (totlen > 0) {
1047 if (top == NULL) { /* First one, must be PKTHDR */
1048 if (totlen + off >= MINCLSIZE) {
1049 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1052 m = m_gethdr(M_NOWAIT, MT_DATA);
1055 /* Place initial small packet/header at end of mbuf */
1056 if (m && totlen + off + max_linkhdr <= MHLEN) {
1057 m->m_data += max_linkhdr;
1063 m->m_pkthdr.rcvif = ifp;
1064 m->m_pkthdr.len = totlen;
1066 if (totlen + off >= MINCLSIZE) {
1067 m = m_getcl(M_NOWAIT, MT_DATA, 0);
1070 m = m_get(M_NOWAIT, MT_DATA);
1083 m->m_len = len = min(totlen, len);
1085 copy(buf, mtod(m, caddr_t), (u_int)len);
1087 bcopy(buf, mtod(m, caddr_t), (u_int)len);
1097 * Copy data from a buffer back into the indicated mbuf chain,
1098 * starting "off" bytes from the beginning, extending the mbuf
1099 * chain if necessary.
1102 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
1105 struct mbuf *m = m0, *n;
1110 while (off > (mlen = m->m_len)) {
1113 if (m->m_next == NULL) {
1114 n = m_get(M_NOWAIT, m->m_type);
1117 bzero(mtod(n, caddr_t), MLEN);
1118 n->m_len = min(MLEN, len + off);
1124 if (m->m_next == NULL && (len > m->m_len - off)) {
1125 m->m_len += min(len - (m->m_len - off),
1126 M_TRAILINGSPACE(m));
1128 mlen = min (m->m_len - off, len);
1129 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
1137 if (m->m_next == NULL) {
1138 n = m_get(M_NOWAIT, m->m_type);
1141 n->m_len = min(MLEN, len);
1146 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1147 m->m_pkthdr.len = totlen;
1151 * Append the specified data to the indicated mbuf chain,
1152 * Extend the mbuf chain if the new data does not fit in
1155 * Return 1 if able to complete the job; otherwise 0.
1158 m_append(struct mbuf *m0, int len, c_caddr_t cp)
1161 int remainder, space;
1163 for (m = m0; m->m_next != NULL; m = m->m_next)
1166 space = M_TRAILINGSPACE(m);
1169 * Copy into available space.
1171 if (space > remainder)
1173 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1175 cp += space, remainder -= space;
1177 while (remainder > 0) {
1179 * Allocate a new mbuf; could check space
1180 * and allocate a cluster instead.
1182 n = m_get(M_NOWAIT, m->m_type);
1185 n->m_len = min(MLEN, remainder);
1186 bcopy(cp, mtod(n, caddr_t), n->m_len);
1187 cp += n->m_len, remainder -= n->m_len;
1191 if (m0->m_flags & M_PKTHDR)
1192 m0->m_pkthdr.len += len - remainder;
1193 return (remainder == 0);
1197 * Apply function f to the data in an mbuf chain starting "off" bytes from
1198 * the beginning, continuing for "len" bytes.
1201 m_apply(struct mbuf *m, int off, int len,
1202 int (*f)(void *, void *, u_int), void *arg)
1207 KASSERT(off >= 0, ("m_apply, negative off %d", off));
1208 KASSERT(len >= 0, ("m_apply, negative len %d", len));
1210 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1217 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1218 count = min(m->m_len - off, len);
1219 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1230 * Return a pointer to mbuf/offset of location in mbuf chain.
1233 m_getptr(struct mbuf *m, int loc, int *off)
1237 /* Normal end of search. */
1238 if (m->m_len > loc) {
1243 if (m->m_next == NULL) {
1245 /* Point at the end of valid data. */
1258 m_print(const struct mbuf *m, int maxlen)
1262 const struct mbuf *m2;
1265 printf("mbuf: %p\n", m);
1269 if (m->m_flags & M_PKTHDR)
1270 len = m->m_pkthdr.len;
1274 while (m2 != NULL && (len == -1 || len)) {
1276 if (maxlen != -1 && pdata > maxlen)
1278 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1279 m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1280 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1281 "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1283 printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1289 printf("%d bytes unaccounted for.\n", len);
1294 m_fixhdr(struct mbuf *m0)
1298 len = m_length(m0, NULL);
1299 m0->m_pkthdr.len = len;
1304 m_length(struct mbuf *m0, struct mbuf **last)
1310 for (m = m0; m != NULL; m = m->m_next) {
1312 if (m->m_next == NULL)
1321 * Defragment a mbuf chain, returning the shortest possible
1322 * chain of mbufs and clusters. If allocation fails and
1323 * this cannot be completed, NULL will be returned, but
1324 * the passed in chain will be unchanged. Upon success,
1325 * the original chain will be freed, and the new chain
1328 * If a non-packet header is passed in, the original
1329 * mbuf (chain?) will be returned unharmed.
1332 m_defrag(struct mbuf *m0, int how)
1334 struct mbuf *m_new = NULL, *m_final = NULL;
1335 int progress = 0, length;
1337 MBUF_CHECKSLEEP(how);
1338 if (!(m0->m_flags & M_PKTHDR))
1341 m_fixhdr(m0); /* Needed sanity check */
1343 #ifdef MBUF_STRESS_TEST
1344 if (m_defragrandomfailures) {
1345 int temp = arc4random() & 0xff;
1351 if (m0->m_pkthdr.len > MHLEN)
1352 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1354 m_final = m_gethdr(how, MT_DATA);
1356 if (m_final == NULL)
1359 if (m_dup_pkthdr(m_final, m0, how) == 0)
1364 while (progress < m0->m_pkthdr.len) {
1365 length = m0->m_pkthdr.len - progress;
1366 if (length > MCLBYTES)
1369 if (m_new == NULL) {
1371 m_new = m_getcl(how, MT_DATA, 0);
1373 m_new = m_get(how, MT_DATA);
1378 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1380 m_new->m_len = length;
1381 if (m_new != m_final)
1382 m_cat(m_final, m_new);
1385 #ifdef MBUF_STRESS_TEST
1386 if (m0->m_next == NULL)
1391 #ifdef MBUF_STRESS_TEST
1393 m_defragbytes += m0->m_pkthdr.len;
1397 #ifdef MBUF_STRESS_TEST
1406 * Return the number of fragments an mbuf will use. This is usually
1407 * used as a proxy for the number of scatter/gather elements needed by
1408 * a DMA engine to access an mbuf. In general mapped mbufs are
1409 * assumed to be backed by physically contiguous buffers that only
1410 * need a single fragment. Unmapped mbufs, on the other hand, can
1411 * span disjoint physical pages.
1414 frags_per_mbuf(struct mbuf *m)
1416 struct mbuf_ext_pgs *ext_pgs;
1419 if ((m->m_flags & M_NOMAP) == 0)
1423 * The header and trailer are counted as a single fragment
1424 * each when present.
1426 * XXX: This overestimates the number of fragments by assuming
1427 * all the backing physical pages are disjoint.
1429 ext_pgs = m->m_ext.ext_pgs;
1431 if (ext_pgs->hdr_len != 0)
1433 frags += ext_pgs->npgs;
1434 if (ext_pgs->trail_len != 0)
1441 * Defragment an mbuf chain, returning at most maxfrags separate
1442 * mbufs+clusters. If this is not possible NULL is returned and
1443 * the original mbuf chain is left in its present (potentially
1444 * modified) state. We use two techniques: collapsing consecutive
1445 * mbufs and replacing consecutive mbufs by a cluster.
1447 * NB: this should really be named m_defrag but that name is taken
1450 m_collapse(struct mbuf *m0, int how, int maxfrags)
1452 struct mbuf *m, *n, *n2, **prev;
1456 * Calculate the current number of frags.
1459 for (m = m0; m != NULL; m = m->m_next)
1460 curfrags += frags_per_mbuf(m);
1462 * First, try to collapse mbufs. Note that we always collapse
1463 * towards the front so we don't need to deal with moving the
1464 * pkthdr. This may be suboptimal if the first mbuf has much
1465 * less data than the following.
1473 if (M_WRITABLE(m) &&
1474 n->m_len < M_TRAILINGSPACE(m)) {
1475 m_copydata(n, 0, n->m_len,
1476 mtod(m, char *) + m->m_len);
1477 m->m_len += n->m_len;
1478 m->m_next = n->m_next;
1479 curfrags -= frags_per_mbuf(n);
1481 if (curfrags <= maxfrags)
1486 KASSERT(maxfrags > 1,
1487 ("maxfrags %u, but normal collapse failed", maxfrags));
1489 * Collapse consecutive mbufs to a cluster.
1491 prev = &m0->m_next; /* NB: not the first mbuf */
1492 while ((n = *prev) != NULL) {
1493 if ((n2 = n->m_next) != NULL &&
1494 n->m_len + n2->m_len < MCLBYTES) {
1495 m = m_getcl(how, MT_DATA, 0);
1498 m_copydata(n, 0, n->m_len, mtod(m, char *));
1499 m_copydata(n2, 0, n2->m_len,
1500 mtod(m, char *) + n->m_len);
1501 m->m_len = n->m_len + n2->m_len;
1502 m->m_next = n2->m_next;
1504 curfrags += 1; /* For the new cluster */
1505 curfrags -= frags_per_mbuf(n);
1506 curfrags -= frags_per_mbuf(n2);
1509 if (curfrags <= maxfrags)
1512 * Still not there, try the normal collapse
1513 * again before we allocate another cluster.
1520 * No place where we can collapse to a cluster; punt.
1521 * This can occur if, for example, you request 2 frags
1522 * but the packet requires that both be clusters (we
1523 * never reallocate the first mbuf to avoid moving the
1530 #ifdef MBUF_STRESS_TEST
1533 * Fragment an mbuf chain. There's no reason you'd ever want to do
1534 * this in normal usage, but it's great for stress testing various
1537 * If fragmentation is not possible, the original chain will be
1540 * Possible length values:
1541 * 0 no fragmentation will occur
1542 * > 0 each fragment will be of the specified length
1543 * -1 each fragment will be the same random value in length
1544 * -2 each fragment's length will be entirely random
1545 * (Random values range from 1 to 256)
1548 m_fragment(struct mbuf *m0, int how, int length)
1550 struct mbuf *m_first, *m_last;
1551 int divisor = 255, progress = 0, fraglen;
1553 if (!(m0->m_flags & M_PKTHDR))
1556 if (length == 0 || length < -2)
1558 if (length > MCLBYTES)
1560 if (length < 0 && divisor > MCLBYTES)
1563 length = 1 + (arc4random() % divisor);
1567 m_fixhdr(m0); /* Needed sanity check */
1569 m_first = m_getcl(how, MT_DATA, M_PKTHDR);
1570 if (m_first == NULL)
1573 if (m_dup_pkthdr(m_first, m0, how) == 0)
1578 while (progress < m0->m_pkthdr.len) {
1580 fraglen = 1 + (arc4random() % divisor);
1581 if (fraglen > m0->m_pkthdr.len - progress)
1582 fraglen = m0->m_pkthdr.len - progress;
1584 if (progress != 0) {
1585 struct mbuf *m_new = m_getcl(how, MT_DATA, 0);
1589 m_last->m_next = m_new;
1593 m_copydata(m0, progress, fraglen, mtod(m_last, caddr_t));
1594 progress += fraglen;
1595 m_last->m_len = fraglen;
1603 /* Return the original chain on failure */
1610 * Free pages from mbuf_ext_pgs, assuming they were allocated via
1611 * vm_page_alloc() and aren't associated with any object. Complement
1612 * to allocator from m_uiotombuf_nomap().
1615 mb_free_mext_pgs(struct mbuf *m)
1617 struct mbuf_ext_pgs *ext_pgs;
1621 MBUF_EXT_PGS_ASSERT(m);
1622 ext_pgs = m->m_ext.ext_pgs;
1624 for (int i = 0; i < ext_pgs->npgs; i++) {
1625 pg = PHYS_TO_VM_PAGE(ext_pgs->pa[i]);
1627 * Note: page is not locked, as it has no
1628 * object and is not on any queues.
1630 vm_page_free_toq(pg);
1634 vm_wire_sub(wire_adj);
1637 static struct mbuf *
1638 m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags)
1640 struct mbuf *m, *mb, *prev;
1641 struct mbuf_ext_pgs *pgs;
1642 vm_page_t pg_array[MBUF_PEXT_MAX_PGS];
1643 int error, length, i, needed, wire_adj = 0;
1645 int pflags = malloc2vm_flags(how) | VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP;
1648 * len can be zero or an arbitrary large value bound by
1649 * the total data supplied by the uio.
1652 total = MIN(uio->uio_resid, len);
1654 total = uio->uio_resid;
1657 maxseg = MBUF_PEXT_MAX_PGS * PAGE_SIZE;
1660 * Allocate the pages
1664 mb = mb_alloc_ext_pgs(how, (flags & M_PKTHDR),
1673 pgs = mb->m_ext.ext_pgs;
1674 needed = length = MIN(maxseg, total);
1675 for (i = 0; needed > 0; i++, needed -= PAGE_SIZE) {
1677 pg_array[i] = vm_page_alloc(NULL, 0, pflags);
1678 if (pg_array[i] == NULL) {
1680 vm_wire_add(wire_adj);
1682 if (how & M_NOWAIT) {
1690 pg_array[i]->flags &= ~PG_ZERO;
1691 pgs->pa[i] = VM_PAGE_TO_PHYS(pg_array[i]);
1694 pgs->last_pg_len = length - PAGE_SIZE * (pgs->npgs - 1);
1695 MBUF_EXT_PGS_ASSERT_SANITY(pgs);
1696 vm_wire_add(wire_adj);
1699 error = uiomove_fromphys(pg_array, 0, length, uio);
1703 mb->m_ext.ext_size += PAGE_SIZE * pgs->npgs;
1704 if (flags & M_PKTHDR)
1705 m->m_pkthdr.len += length;
1715 * Copy the contents of uio into a properly sized mbuf chain.
1718 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
1720 struct mbuf *m, *mb;
1725 if (flags & M_NOMAP)
1726 return (m_uiotombuf_nomap(uio, how, len, align, flags));
1729 * len can be zero or an arbitrary large value bound by
1730 * the total data supplied by the uio.
1733 total = (uio->uio_resid < len) ? uio->uio_resid : len;
1735 total = uio->uio_resid;
1738 * The smallest unit returned by m_getm2() is a single mbuf
1739 * with pkthdr. We can't align past it.
1745 * Give us the full allocation or nothing.
1746 * If len is zero return the smallest empty mbuf.
1748 m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
1753 /* Fill all mbufs with uio data and update header information. */
1754 for (mb = m; mb != NULL; mb = mb->m_next) {
1755 length = min(M_TRAILINGSPACE(mb), total - progress);
1757 error = uiomove(mtod(mb, void *), length, uio);
1765 if (flags & M_PKTHDR)
1766 m->m_pkthdr.len += length;
1768 KASSERT(progress == total, ("%s: progress != total", __func__));
1774 * Copy data from an unmapped mbuf into a uio limited by len if set.
1777 m_unmappedtouio(const struct mbuf *m, int m_off, struct uio *uio, int len)
1779 struct mbuf_ext_pgs *ext_pgs;
1781 int error, i, off, pglen, pgoff, seglen, segoff;
1783 MBUF_EXT_PGS_ASSERT(m);
1784 ext_pgs = m->m_ext.ext_pgs;
1787 /* Skip over any data removed from the front. */
1788 off = mtod(m, vm_offset_t);
1791 if (ext_pgs->hdr_len != 0) {
1792 if (off >= ext_pgs->hdr_len) {
1793 off -= ext_pgs->hdr_len;
1795 seglen = ext_pgs->hdr_len - off;
1797 seglen = min(seglen, len);
1800 error = uiomove(&ext_pgs->hdr[segoff], seglen, uio);
1803 pgoff = ext_pgs->first_pg_off;
1804 for (i = 0; i < ext_pgs->npgs && error == 0 && len > 0; i++) {
1805 pglen = mbuf_ext_pg_len(ext_pgs, i, pgoff);
1811 seglen = pglen - off;
1812 segoff = pgoff + off;
1814 seglen = min(seglen, len);
1816 pg = PHYS_TO_VM_PAGE(ext_pgs->pa[i]);
1817 error = uiomove_fromphys(&pg, segoff, seglen, uio);
1820 if (len != 0 && error == 0) {
1821 KASSERT((off + len) <= ext_pgs->trail_len,
1822 ("off + len > trail (%d + %d > %d, m_off = %d)", off, len,
1823 ext_pgs->trail_len, m_off));
1824 error = uiomove(&ext_pgs->trail[off], len, uio);
1830 * Copy an mbuf chain into a uio limited by len if set.
1833 m_mbuftouio(struct uio *uio, const struct mbuf *m, int len)
1835 int error, length, total;
1839 total = min(uio->uio_resid, len);
1841 total = uio->uio_resid;
1843 /* Fill the uio with data from the mbufs. */
1844 for (; m != NULL; m = m->m_next) {
1845 length = min(m->m_len, total - progress);
1847 if ((m->m_flags & M_NOMAP) != 0)
1848 error = m_unmappedtouio(m, 0, uio, length);
1850 error = uiomove(mtod(m, void *), length, uio);
1861 * Create a writable copy of the mbuf chain. While doing this
1862 * we compact the chain with a goal of producing a chain with
1863 * at most two mbufs. The second mbuf in this chain is likely
1864 * to be a cluster. The primary purpose of this work is to create
1865 * a writable packet for encryption, compression, etc. The
1866 * secondary goal is to linearize the data so the data can be
1867 * passed to crypto hardware in the most efficient manner possible.
1870 m_unshare(struct mbuf *m0, int how)
1872 struct mbuf *m, *mprev;
1873 struct mbuf *n, *mfirst, *mlast;
1877 for (m = m0; m != NULL; m = mprev->m_next) {
1879 * Regular mbufs are ignored unless there's a cluster
1880 * in front of it that we can use to coalesce. We do
1881 * the latter mainly so later clusters can be coalesced
1882 * also w/o having to handle them specially (i.e. convert
1883 * mbuf+cluster -> cluster). This optimization is heavily
1884 * influenced by the assumption that we're running over
1885 * Ethernet where MCLBYTES is large enough that the max
1886 * packet size will permit lots of coalescing into a
1887 * single cluster. This in turn permits efficient
1888 * crypto operations, especially when using hardware.
1890 if ((m->m_flags & M_EXT) == 0) {
1891 if (mprev && (mprev->m_flags & M_EXT) &&
1892 m->m_len <= M_TRAILINGSPACE(mprev)) {
1893 /* XXX: this ignores mbuf types */
1894 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1895 mtod(m, caddr_t), m->m_len);
1896 mprev->m_len += m->m_len;
1897 mprev->m_next = m->m_next; /* unlink from chain */
1898 m_free(m); /* reclaim mbuf */
1905 * Writable mbufs are left alone (for now).
1907 if (M_WRITABLE(m)) {
1913 * Not writable, replace with a copy or coalesce with
1914 * the previous mbuf if possible (since we have to copy
1915 * it anyway, we try to reduce the number of mbufs and
1916 * clusters so that future work is easier).
1918 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1919 /* NB: we only coalesce into a cluster or larger */
1920 if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1921 m->m_len <= M_TRAILINGSPACE(mprev)) {
1922 /* XXX: this ignores mbuf types */
1923 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1924 mtod(m, caddr_t), m->m_len);
1925 mprev->m_len += m->m_len;
1926 mprev->m_next = m->m_next; /* unlink from chain */
1927 m_free(m); /* reclaim mbuf */
1932 * Allocate new space to hold the copy and copy the data.
1933 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
1934 * splitting them into clusters. We could just malloc a
1935 * buffer and make it external but too many device drivers
1936 * don't know how to break up the non-contiguous memory when
1939 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1944 if (m->m_flags & M_PKTHDR) {
1945 KASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR",
1947 m_move_pkthdr(n, m);
1954 int cc = min(len, MCLBYTES);
1955 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
1961 newipsecstat.ips_clcopied++;
1969 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1976 n->m_next = m->m_next;
1978 m0 = mfirst; /* new head of chain */
1980 mprev->m_next = mfirst; /* replace old mbuf */
1981 m_free(m); /* release old mbuf */
1987 #ifdef MBUF_PROFILING
1989 #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/
1990 struct mbufprofile {
1991 uintmax_t wasted[MP_BUCKETS];
1992 uintmax_t used[MP_BUCKETS];
1993 uintmax_t segments[MP_BUCKETS];
1996 #define MP_MAXDIGITS 21 /* strlen("16,000,000,000,000,000,000") == 21 */
1997 #define MP_NUMLINES 6
1998 #define MP_NUMSPERLINE 16
1999 #define MP_EXTRABYTES 64 /* > strlen("used:\nwasted:\nsegments:\n") */
2000 /* work out max space needed and add a bit of spare space too */
2001 #define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE)
2002 #define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES)
2004 char mbprofbuf[MP_BUFSIZE];
2007 m_profile(struct mbuf *m)
2016 if (m->m_flags & M_EXT) {
2017 wasted += MHLEN - sizeof(m->m_ext) +
2018 m->m_ext.ext_size - m->m_len;
2020 if (m->m_flags & M_PKTHDR)
2021 wasted += MHLEN - m->m_len;
2023 wasted += MLEN - m->m_len;
2027 /* be paranoid.. it helps */
2028 if (segments > MP_BUCKETS - 1)
2029 segments = MP_BUCKETS - 1;
2032 if (wasted > 100000)
2034 /* store in the appropriate bucket */
2035 /* don't bother locking. if it's slightly off, so what? */
2036 mbprof.segments[segments]++;
2037 mbprof.used[fls(used)]++;
2038 mbprof.wasted[fls(wasted)]++;
2042 mbprof_textify(void)
2048 p = &mbprof.wasted[0];
2050 offset = snprintf(c, MP_MAXLINE + 10,
2052 "%ju %ju %ju %ju %ju %ju %ju %ju "
2053 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2054 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2055 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2057 p = &mbprof.wasted[16];
2059 offset = snprintf(c, MP_MAXLINE,
2060 "%ju %ju %ju %ju %ju %ju %ju %ju "
2061 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2062 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2063 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2065 p = &mbprof.used[0];
2067 offset = snprintf(c, MP_MAXLINE + 10,
2069 "%ju %ju %ju %ju %ju %ju %ju %ju "
2070 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2071 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2072 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2074 p = &mbprof.used[16];
2076 offset = snprintf(c, MP_MAXLINE,
2077 "%ju %ju %ju %ju %ju %ju %ju %ju "
2078 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2079 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2080 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2082 p = &mbprof.segments[0];
2084 offset = snprintf(c, MP_MAXLINE + 10,
2086 "%ju %ju %ju %ju %ju %ju %ju %ju "
2087 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2088 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2089 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2091 p = &mbprof.segments[16];
2093 offset = snprintf(c, MP_MAXLINE,
2094 "%ju %ju %ju %ju %ju %ju %ju %ju "
2095 "%ju %ju %ju %ju %ju %ju %ju %jju",
2096 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2097 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2102 mbprof_handler(SYSCTL_HANDLER_ARGS)
2107 error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1);
2112 mbprof_clr_handler(SYSCTL_HANDLER_ARGS)
2117 error = sysctl_handle_int(oidp, &clear, 0, req);
2118 if (error || !req->newptr)
2122 bzero(&mbprof, sizeof(mbprof));
2129 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile, CTLTYPE_STRING|CTLFLAG_RD,
2130 NULL, 0, mbprof_handler, "A", "mbuf profiling statistics");
2132 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr, CTLTYPE_INT|CTLFLAG_RW,
2133 NULL, 0, mbprof_clr_handler, "I", "clear mbuf profiling statistics");