2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1988, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_param.h"
38 #include "opt_mbuf_stress_test.h"
39 #include "opt_mbuf_profiling.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/limits.h>
46 #include <sys/malloc.h>
48 #include <sys/sysctl.h>
49 #include <sys/domain.h>
50 #include <sys/protosw.h>
52 #include <sys/vmmeter.h>
56 #include <vm/vm_pageout.h>
57 #include <vm/vm_page.h>
59 SDT_PROBE_DEFINE5_XLATE(sdt, , , m__init,
60 "struct mbuf *", "mbufinfo_t *",
61 "uint32_t", "uint32_t",
62 "uint16_t", "uint16_t",
63 "uint32_t", "uint32_t",
64 "uint32_t", "uint32_t");
66 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__gethdr_raw,
67 "uint32_t", "uint32_t",
68 "uint16_t", "uint16_t",
69 "struct mbuf *", "mbufinfo_t *");
71 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__gethdr,
72 "uint32_t", "uint32_t",
73 "uint16_t", "uint16_t",
74 "struct mbuf *", "mbufinfo_t *");
76 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__get_raw,
77 "uint32_t", "uint32_t",
78 "uint16_t", "uint16_t",
79 "struct mbuf *", "mbufinfo_t *");
81 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__get,
82 "uint32_t", "uint32_t",
83 "uint16_t", "uint16_t",
84 "struct mbuf *", "mbufinfo_t *");
86 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__getcl,
87 "uint32_t", "uint32_t",
88 "uint16_t", "uint16_t",
89 "uint32_t", "uint32_t",
90 "struct mbuf *", "mbufinfo_t *");
92 SDT_PROBE_DEFINE5_XLATE(sdt, , , m__getjcl,
93 "uint32_t", "uint32_t",
94 "uint16_t", "uint16_t",
95 "uint32_t", "uint32_t",
96 "uint32_t", "uint32_t",
97 "struct mbuf *", "mbufinfo_t *");
99 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__clget,
100 "struct mbuf *", "mbufinfo_t *",
101 "uint32_t", "uint32_t",
102 "uint32_t", "uint32_t");
104 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__cljget,
105 "struct mbuf *", "mbufinfo_t *",
106 "uint32_t", "uint32_t",
107 "uint32_t", "uint32_t",
110 SDT_PROBE_DEFINE(sdt, , , m__cljset);
112 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__free,
113 "struct mbuf *", "mbufinfo_t *");
115 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__freem,
116 "struct mbuf *", "mbufinfo_t *");
118 #include <security/mac/mac_framework.h>
121 * Provide minimum possible defaults for link and protocol header space,
122 * assuming IPv4 over Ethernet. Enabling IPv6, IEEE802.11 or some other
123 * protocol may grow these values.
125 u_int max_linkhdr = 16;
126 u_int max_protohdr = 40;
127 u_int max_hdr = 16 + 40;
128 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
129 &max_linkhdr, 16, "Size of largest link layer header");
130 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
131 &max_protohdr, 40, "Size of largest protocol layer header");
132 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
133 &max_hdr, 16 + 40, "Size of largest link plus protocol header");
139 max_hdr = max_linkhdr + max_protohdr;
140 MPASS(max_hdr <= MHLEN);
144 max_linkhdr_grow(u_int new)
147 if (new > max_linkhdr) {
154 max_protohdr_grow(u_int new)
157 if (new > max_protohdr) {
163 #ifdef MBUF_STRESS_TEST
168 int m_defragrandomfailures;
170 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
171 &m_defragpackets, 0, "");
172 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
173 &m_defragbytes, 0, "");
174 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
175 &m_defraguseless, 0, "");
176 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
177 &m_defragfailure, 0, "");
178 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
179 &m_defragrandomfailures, 0, "");
183 * Ensure the correct size of various mbuf parameters. It could be off due
184 * to compiler-induced padding and alignment artifacts.
186 CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN);
187 CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN);
190 * mbuf data storage should be 64-bit aligned regardless of architectural
191 * pointer size; check this is the case with and without a packet header.
193 CTASSERT(offsetof(struct mbuf, m_dat) % 8 == 0);
194 CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0);
197 * While the specific values here don't matter too much (i.e., +/- a few
198 * words), we do want to ensure that changes to these values are carefully
199 * reasoned about and properly documented. This is especially the case as
200 * network-protocol and device-driver modules encode these layouts, and must
201 * be recompiled if the structures change. Check these values at compile time
202 * against the ones documented in comments in mbuf.h.
204 * NB: Possibly they should be documented there via #define's and not just
207 #if defined(__LP64__)
208 CTASSERT(offsetof(struct mbuf, m_dat) == 32);
209 CTASSERT(sizeof(struct pkthdr) == 64);
210 CTASSERT(sizeof(struct m_ext) == 160);
212 CTASSERT(offsetof(struct mbuf, m_dat) == 24);
213 CTASSERT(sizeof(struct pkthdr) == 56);
214 #if defined(__powerpc__) && defined(BOOKE)
215 /* PowerPC booke has 64-bit physical pointers. */
216 CTASSERT(sizeof(struct m_ext) == 176);
218 CTASSERT(sizeof(struct m_ext) == 172);
223 * Assert that the queue(3) macros produce code of the same size as an old
224 * plain pointer does.
227 static struct mbuf __used m_assertbuf;
228 CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next));
229 CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next));
230 CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt));
231 CTASSERT(sizeof(m_assertbuf.m_stailqpkt) == sizeof(m_assertbuf.m_nextpkt));
235 * Attach the cluster from *m to *n, set up m_ext in *n
236 * and bump the refcount of the cluster.
239 mb_dupcl(struct mbuf *n, struct mbuf *m)
241 volatile u_int *refcnt;
243 KASSERT(m->m_flags & (M_EXT|M_EXTPG),
244 ("%s: M_EXT|M_EXTPG not set on %p", __func__, m));
245 KASSERT(!(n->m_flags & (M_EXT|M_EXTPG)),
246 ("%s: M_EXT|M_EXTPG set on %p", __func__, n));
249 * Cache access optimization.
251 * o Regular M_EXT storage doesn't need full copy of m_ext, since
252 * the holder of the 'ext_count' is responsible to carry the free
253 * routine and its arguments.
254 * o M_EXTPG data is split between main part of mbuf and m_ext, the
255 * main part is copied in full, the m_ext part is similar to M_EXT.
256 * o EXT_EXTREF, where 'ext_cnt' doesn't point into mbuf at all, is
257 * special - it needs full copy of m_ext into each mbuf, since any
258 * copy could end up as the last to free.
260 if (m->m_flags & M_EXTPG) {
261 bcopy(&m->m_epg_startcopy, &n->m_epg_startcopy,
262 __rangeof(struct mbuf, m_epg_startcopy, m_epg_endcopy));
263 bcopy(&m->m_ext, &n->m_ext, m_epg_ext_copylen);
264 } else if (m->m_ext.ext_type == EXT_EXTREF)
265 bcopy(&m->m_ext, &n->m_ext, sizeof(struct m_ext));
267 bcopy(&m->m_ext, &n->m_ext, m_ext_copylen);
269 n->m_flags |= m->m_flags & (M_RDONLY | M_EXT | M_EXTPG);
271 /* See if this is the mbuf that holds the embedded refcount. */
272 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
273 refcnt = n->m_ext.ext_cnt = &m->m_ext.ext_count;
274 n->m_ext.ext_flags &= ~EXT_FLAG_EMBREF;
276 KASSERT(m->m_ext.ext_cnt != NULL,
277 ("%s: no refcounting pointer on %p", __func__, m));
278 refcnt = m->m_ext.ext_cnt;
284 atomic_add_int(refcnt, 1);
288 m_demote_pkthdr(struct mbuf *m)
292 M_ASSERT_NO_SND_TAG(m);
294 m_tag_delete_chain(m, NULL);
295 m->m_flags &= ~M_PKTHDR;
296 bzero(&m->m_pkthdr, sizeof(struct pkthdr));
300 * Clean up mbuf (chain) from any tags and packet headers.
301 * If "all" is set then the first mbuf in the chain will be
305 m_demote(struct mbuf *m0, int all, int flags)
309 flags |= M_DEMOTEFLAGS;
311 for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
312 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p",
314 if (m->m_flags & M_PKTHDR)
321 * Sanity checks on mbuf (chain) for use in KASSERT() and general
323 * Returns 0 or panics when bad and 1 on all tests passed.
324 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
328 m_sanity(struct mbuf *m0, int sanitize)
335 #define M_SANITY_ACTION(s) panic("mbuf %p: " s, m)
337 #define M_SANITY_ACTION(s) printf("mbuf %p: " s, m)
340 for (m = m0; m != NULL; m = m->m_next) {
342 * Basic pointer checks. If any of these fails then some
343 * unrelated kernel memory before or after us is trashed.
344 * No way to recover from that.
348 if ((caddr_t)m->m_data < a)
349 M_SANITY_ACTION("m_data outside mbuf data range left");
350 if ((caddr_t)m->m_data > b)
351 M_SANITY_ACTION("m_data outside mbuf data range right");
352 if ((caddr_t)m->m_data + m->m_len > b)
353 M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
355 /* m->m_nextpkt may only be set on first mbuf in chain. */
356 if (m != m0 && m->m_nextpkt != NULL) {
358 m_freem(m->m_nextpkt);
359 m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
361 M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
364 /* packet length (not mbuf length!) calculation */
365 if (m0->m_flags & M_PKTHDR)
368 /* m_tags may only be attached to first mbuf in chain. */
369 if (m != m0 && m->m_flags & M_PKTHDR &&
370 !SLIST_EMPTY(&m->m_pkthdr.tags)) {
372 m_tag_delete_chain(m, NULL);
373 /* put in 0xDEADC0DE perhaps? */
375 M_SANITY_ACTION("m_tags on in-chain mbuf");
378 /* M_PKTHDR may only be set on first mbuf in chain */
379 if (m != m0 && m->m_flags & M_PKTHDR) {
381 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
382 m->m_flags &= ~M_PKTHDR;
383 /* put in 0xDEADCODE and leave hdr flag in */
385 M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
389 if (pktlen && pktlen != m->m_pkthdr.len) {
393 M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
397 #undef M_SANITY_ACTION
401 * Non-inlined part of m_init().
404 m_pkthdr_init(struct mbuf *m, int how)
409 m->m_data = m->m_pktdat;
410 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
412 m->m_pkthdr.numa_domain = M_NODOM;
415 /* If the label init fails, fail the alloc */
416 error = mac_mbuf_init(m, how);
425 * "Move" mbuf pkthdr from "from" to "to".
426 * "from" must have M_PKTHDR set, and "to" must be empty.
429 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
433 /* see below for why these are not enabled */
435 /* Note: with MAC, this may not be a good assertion. */
436 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
437 ("m_move_pkthdr: to has tags"));
441 * XXXMAC: It could be this should also occur for non-MAC?
443 if (to->m_flags & M_PKTHDR)
444 m_tag_delete_chain(to, NULL);
446 to->m_flags = (from->m_flags & M_COPYFLAGS) |
447 (to->m_flags & (M_EXT | M_EXTPG));
448 if ((to->m_flags & M_EXT) == 0)
449 to->m_data = to->m_pktdat;
450 to->m_pkthdr = from->m_pkthdr; /* especially tags */
451 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
452 from->m_flags &= ~M_PKTHDR;
453 if (from->m_pkthdr.csum_flags & CSUM_SND_TAG) {
454 from->m_pkthdr.csum_flags &= ~CSUM_SND_TAG;
455 from->m_pkthdr.snd_tag = NULL;
460 * Duplicate "from"'s mbuf pkthdr in "to".
461 * "from" must have M_PKTHDR set, and "to" must be empty.
462 * In particular, this does a deep copy of the packet tags.
465 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
470 * The mbuf allocator only initializes the pkthdr
471 * when the mbuf is allocated with m_gethdr(). Many users
472 * (e.g. m_copy*, m_prepend) use m_get() and then
473 * smash the pkthdr as needed causing these
474 * assertions to trip. For now just disable them.
477 /* Note: with MAC, this may not be a good assertion. */
478 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
480 MBUF_CHECKSLEEP(how);
482 if (to->m_flags & M_PKTHDR)
483 m_tag_delete_chain(to, NULL);
485 to->m_flags = (from->m_flags & M_COPYFLAGS) |
486 (to->m_flags & (M_EXT | M_EXTPG));
487 if ((to->m_flags & M_EXT) == 0)
488 to->m_data = to->m_pktdat;
489 to->m_pkthdr = from->m_pkthdr;
490 if (from->m_pkthdr.csum_flags & CSUM_SND_TAG)
491 m_snd_tag_ref(from->m_pkthdr.snd_tag);
492 SLIST_INIT(&to->m_pkthdr.tags);
493 return (m_tag_copy_chain(to, from, how));
497 * Lesser-used path for M_PREPEND:
498 * allocate new mbuf to prepend to chain,
502 m_prepend(struct mbuf *m, int len, int how)
506 if (m->m_flags & M_PKTHDR)
507 mn = m_gethdr(how, m->m_type);
509 mn = m_get(how, m->m_type);
514 if (m->m_flags & M_PKTHDR)
515 m_move_pkthdr(mn, m);
525 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
526 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
527 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller.
528 * Note that the copy is read-only, because clusters are not copied,
529 * only their reference counts are incremented.
532 m_copym(struct mbuf *m, int off0, int len, int wait)
534 struct mbuf *n, **np;
539 KASSERT(off >= 0, ("m_copym, negative off %d", off));
540 KASSERT(len >= 0, ("m_copym, negative len %d", len));
541 MBUF_CHECKSLEEP(wait);
542 if (off == 0 && m->m_flags & M_PKTHDR)
545 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
555 KASSERT(len == M_COPYALL,
556 ("m_copym, length > size of mbuf chain"));
560 n = m_gethdr(wait, m->m_type);
562 n = m_get(wait, m->m_type);
567 if (!m_dup_pkthdr(n, m, wait))
569 if (len == M_COPYALL)
570 n->m_pkthdr.len -= off0;
572 n->m_pkthdr.len = len;
575 n->m_len = min(len, m->m_len - off);
576 if (m->m_flags & (M_EXT|M_EXTPG)) {
577 n->m_data = m->m_data + off;
580 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
582 if (len != M_COPYALL)
596 * Copy an entire packet, including header (which must be present).
597 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
598 * Note that the copy is read-only, because clusters are not copied,
599 * only their reference counts are incremented.
600 * Preserve alignment of the first mbuf so if the creator has left
601 * some room at the beginning (e.g. for inserting protocol headers)
602 * the copies still have the room available.
605 m_copypacket(struct mbuf *m, int how)
607 struct mbuf *top, *n, *o;
609 MBUF_CHECKSLEEP(how);
610 n = m_get(how, m->m_type);
615 if (!m_dup_pkthdr(n, m, how))
618 if (m->m_flags & (M_EXT|M_EXTPG)) {
619 n->m_data = m->m_data;
622 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
623 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
628 o = m_get(how, m->m_type);
636 if (m->m_flags & (M_EXT|M_EXTPG)) {
637 n->m_data = m->m_data;
640 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
652 m_copyfromunmapped(const struct mbuf *m, int off, int len, caddr_t cp)
656 int error __diagused;
658 KASSERT(off >= 0, ("m_copyfromunmapped: negative off %d", off));
659 KASSERT(len >= 0, ("m_copyfromunmapped: negative len %d", len));
660 KASSERT(off < m->m_len,
661 ("m_copyfromunmapped: len exceeds mbuf length"));
666 uio.uio_segflg = UIO_SYSSPACE;
669 uio.uio_rw = UIO_READ;
670 error = m_unmapped_uiomove(m, off, &uio, len);
671 KASSERT(error == 0, ("m_unmapped_uiomove failed: off %d, len %d", off,
676 * Copy data from an mbuf chain starting "off" bytes from the beginning,
677 * continuing for "len" bytes, into the indicated buffer.
680 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
684 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
685 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
687 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
694 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
695 count = min(m->m_len - off, len);
696 if ((m->m_flags & M_EXTPG) != 0)
697 m_copyfromunmapped(m, off, count, cp);
699 bcopy(mtod(m, caddr_t) + off, cp, count);
708 * Copy a packet header mbuf chain into a completely new chain, including
709 * copying any mbuf clusters. Use this instead of m_copypacket() when
710 * you need a writable copy of an mbuf chain.
713 m_dup(const struct mbuf *m, int how)
715 struct mbuf **p, *top = NULL;
716 int remain, moff, nsize;
718 MBUF_CHECKSLEEP(how);
724 /* While there's more data, get a new mbuf, tack it on, and fill it */
725 remain = m->m_pkthdr.len;
728 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
731 /* Get the next new mbuf */
732 if (remain >= MINCLSIZE) {
733 n = m_getcl(how, m->m_type, 0);
736 n = m_get(how, m->m_type);
742 if (top == NULL) { /* First one, must be PKTHDR */
743 if (!m_dup_pkthdr(n, m, how)) {
747 if ((n->m_flags & M_EXT) == 0)
749 n->m_flags &= ~M_RDONLY;
753 /* Link it into the new chain */
757 /* Copy data from original mbuf(s) into new mbuf */
758 while (n->m_len < nsize && m != NULL) {
759 int chunk = min(nsize - n->m_len, m->m_len - moff);
761 m_copydata(m, moff, chunk, n->m_data + n->m_len);
765 if (moff == m->m_len) {
771 /* Check correct total mbuf length */
772 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
773 ("%s: bogus m_pkthdr.len", __func__));
783 * Concatenate mbuf chain n to m.
784 * Both chains must be of the same type (e.g. MT_DATA).
785 * Any m_pkthdr is not updated.
788 m_cat(struct mbuf *m, struct mbuf *n)
793 if (!M_WRITABLE(m) ||
794 (n->m_flags & M_EXTPG) != 0 ||
795 M_TRAILINGSPACE(m) < n->m_len) {
796 /* just join the two chains */
800 /* splat the data from one into the other */
801 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
803 m->m_len += n->m_len;
809 * Concatenate two pkthdr mbuf chains.
812 m_catpkt(struct mbuf *m, struct mbuf *n)
818 m->m_pkthdr.len += n->m_pkthdr.len;
825 m_adj(struct mbuf *mp, int req_len)
831 if ((m = mp) == NULL)
837 while (m != NULL && len > 0) {
838 if (m->m_len <= len) {
848 if (mp->m_flags & M_PKTHDR)
849 mp->m_pkthdr.len -= (req_len - len);
852 * Trim from tail. Scan the mbuf chain,
853 * calculating its length and finding the last mbuf.
854 * If the adjustment only affects this mbuf, then just
855 * adjust and return. Otherwise, rescan and truncate
856 * after the remaining size.
862 if (m->m_next == (struct mbuf *)0)
866 if (m->m_len >= len) {
868 if (mp->m_flags & M_PKTHDR)
869 mp->m_pkthdr.len -= len;
876 * Correct length for chain is "count".
877 * Find the mbuf with last data, adjust its length,
878 * and toss data from remaining mbufs on chain.
881 if (m->m_flags & M_PKTHDR)
882 m->m_pkthdr.len = count;
883 for (; m; m = m->m_next) {
884 if (m->m_len >= count) {
886 if (m->m_next != NULL) {
898 m_adj_decap(struct mbuf *mp, int len)
903 if ((mp->m_flags & M_PKTHDR) != 0) {
905 * If flowid was calculated by card from the inner
906 * headers, move flowid to the decapsulated mbuf
907 * chain, otherwise clear. This depends on the
908 * internals of m_adj, which keeps pkthdr as is, in
909 * particular not changing rsstype and flowid.
911 rsstype = mp->m_pkthdr.rsstype;
912 if ((rsstype & M_HASHTYPE_INNER) != 0) {
913 M_HASHTYPE_SET(mp, rsstype & ~M_HASHTYPE_INNER);
915 M_HASHTYPE_CLEAR(mp);
921 * Rearange an mbuf chain so that len bytes are contiguous
922 * and in the data area of an mbuf (so that mtod will work
923 * for a structure of size len). Returns the resulting
924 * mbuf chain on success, frees it and returns null on failure.
925 * If there is room, it will add up to max_protohdr-len extra bytes to the
926 * contiguous region in an attempt to avoid being called next time.
929 m_pullup(struct mbuf *n, int len)
935 KASSERT((n->m_flags & M_EXTPG) == 0,
936 ("%s: unmapped mbuf %p", __func__, n));
939 * If first mbuf has no cluster, and has room for len bytes
940 * without shifting current data, pullup into it,
941 * otherwise allocate a new mbuf to prepend to the chain.
943 if ((n->m_flags & M_EXT) == 0 &&
944 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
953 m = m_get(M_NOWAIT, n->m_type);
956 if (n->m_flags & M_PKTHDR)
959 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
961 count = min(min(max(len, max_protohdr), space), n->m_len);
962 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
972 } while (len > 0 && n);
985 * Like m_pullup(), except a new mbuf is always allocated, and we allow
986 * the amount of empty space before the data in the new mbuf to be specified
987 * (in the event that the caller expects to prepend later).
990 m_copyup(struct mbuf *n, int len, int dstoff)
995 if (len > (MHLEN - dstoff))
997 m = m_get(M_NOWAIT, n->m_type);
1000 if (n->m_flags & M_PKTHDR)
1001 m_move_pkthdr(m, n);
1002 m->m_data += dstoff;
1003 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1005 count = min(min(max(len, max_protohdr), space), n->m_len);
1006 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
1016 } while (len > 0 && n);
1029 * Partition an mbuf chain in two pieces, returning the tail --
1030 * all but the first len0 bytes. In case of failure, it returns NULL and
1031 * attempts to restore the chain to its original state.
1033 * Note that the resulting mbufs might be read-only, because the new
1034 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1035 * the "breaking point" happens to lie within a cluster mbuf. Use the
1036 * M_WRITABLE() macro to check for this case.
1039 m_split(struct mbuf *m0, int len0, int wait)
1042 u_int len = len0, remain;
1044 MBUF_CHECKSLEEP(wait);
1045 for (m = m0; m && len > m->m_len; m = m->m_next)
1049 remain = m->m_len - len;
1050 if (m0->m_flags & M_PKTHDR && remain == 0) {
1051 n = m_gethdr(wait, m0->m_type);
1054 n->m_next = m->m_next;
1056 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) {
1057 n->m_pkthdr.snd_tag =
1058 m_snd_tag_ref(m0->m_pkthdr.snd_tag);
1059 n->m_pkthdr.csum_flags |= CSUM_SND_TAG;
1061 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1062 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1063 m0->m_pkthdr.len = len0;
1065 } else if (m0->m_flags & M_PKTHDR) {
1066 n = m_gethdr(wait, m0->m_type);
1069 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) {
1070 n->m_pkthdr.snd_tag =
1071 m_snd_tag_ref(m0->m_pkthdr.snd_tag);
1072 n->m_pkthdr.csum_flags |= CSUM_SND_TAG;
1074 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1075 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1076 m0->m_pkthdr.len = len0;
1077 if (m->m_flags & (M_EXT|M_EXTPG))
1079 if (remain > MHLEN) {
1080 /* m can't be the lead packet */
1082 n->m_next = m_split(m, len, wait);
1083 if (n->m_next == NULL) {
1092 } else if (remain == 0) {
1097 n = m_get(wait, m->m_type);
1103 if (m->m_flags & (M_EXT|M_EXTPG)) {
1104 n->m_data = m->m_data + len;
1107 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1111 n->m_next = m->m_next;
1116 * Routine to copy from device local memory into mbufs.
1117 * Note that `off' argument is offset into first mbuf of target chain from
1118 * which to begin copying the data to.
1121 m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
1122 void (*copy)(char *from, caddr_t to, u_int len))
1125 struct mbuf *top = NULL, **mp = ⊤
1128 if (off < 0 || off > MHLEN)
1131 while (totlen > 0) {
1132 if (top == NULL) { /* First one, must be PKTHDR */
1133 if (totlen + off >= MINCLSIZE) {
1134 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1137 m = m_gethdr(M_NOWAIT, MT_DATA);
1140 /* Place initial small packet/header at end of mbuf */
1141 if (m && totlen + off + max_linkhdr <= MHLEN) {
1142 m->m_data += max_linkhdr;
1148 m->m_pkthdr.rcvif = ifp;
1149 m->m_pkthdr.len = totlen;
1151 if (totlen + off >= MINCLSIZE) {
1152 m = m_getcl(M_NOWAIT, MT_DATA, 0);
1155 m = m_get(M_NOWAIT, MT_DATA);
1168 m->m_len = len = min(totlen, len);
1170 copy(buf, mtod(m, caddr_t), (u_int)len);
1172 bcopy(buf, mtod(m, caddr_t), (u_int)len);
1182 m_copytounmapped(const struct mbuf *m, int off, int len, c_caddr_t cp)
1186 int error __diagused;
1188 KASSERT(off >= 0, ("m_copytounmapped: negative off %d", off));
1189 KASSERT(len >= 0, ("m_copytounmapped: negative len %d", len));
1190 KASSERT(off < m->m_len, ("m_copytounmapped: len exceeds mbuf length"));
1191 iov.iov_base = __DECONST(caddr_t, cp);
1193 uio.uio_resid = len;
1195 uio.uio_segflg = UIO_SYSSPACE;
1198 uio.uio_rw = UIO_WRITE;
1199 error = m_unmapped_uiomove(m, off, &uio, len);
1200 KASSERT(error == 0, ("m_unmapped_uiomove failed: off %d, len %d", off,
1205 * Copy data from a buffer back into the indicated mbuf chain,
1206 * starting "off" bytes from the beginning, extending the mbuf
1207 * chain if necessary.
1210 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
1213 struct mbuf *m = m0, *n;
1218 while (off > (mlen = m->m_len)) {
1221 if (m->m_next == NULL) {
1222 n = m_get(M_NOWAIT, m->m_type);
1225 bzero(mtod(n, caddr_t), MLEN);
1226 n->m_len = min(MLEN, len + off);
1232 if (m->m_next == NULL && (len > m->m_len - off)) {
1233 m->m_len += min(len - (m->m_len - off),
1234 M_TRAILINGSPACE(m));
1236 mlen = min (m->m_len - off, len);
1237 if ((m->m_flags & M_EXTPG) != 0)
1238 m_copytounmapped(m, off, mlen, cp);
1240 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
1248 if (m->m_next == NULL) {
1249 n = m_get(M_NOWAIT, m->m_type);
1252 n->m_len = min(MLEN, len);
1257 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1258 m->m_pkthdr.len = totlen;
1262 * Append the specified data to the indicated mbuf chain,
1263 * Extend the mbuf chain if the new data does not fit in
1266 * Return 1 if able to complete the job; otherwise 0.
1269 m_append(struct mbuf *m0, int len, c_caddr_t cp)
1272 int remainder, space;
1274 for (m = m0; m->m_next != NULL; m = m->m_next)
1277 space = M_TRAILINGSPACE(m);
1280 * Copy into available space.
1282 if (space > remainder)
1284 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1286 cp += space, remainder -= space;
1288 while (remainder > 0) {
1290 * Allocate a new mbuf; could check space
1291 * and allocate a cluster instead.
1293 n = m_get(M_NOWAIT, m->m_type);
1296 n->m_len = min(MLEN, remainder);
1297 bcopy(cp, mtod(n, caddr_t), n->m_len);
1298 cp += n->m_len, remainder -= n->m_len;
1302 if (m0->m_flags & M_PKTHDR)
1303 m0->m_pkthdr.len += len - remainder;
1304 return (remainder == 0);
1308 m_apply_extpg_one(struct mbuf *m, int off, int len,
1309 int (*f)(void *, void *, u_int), void *arg)
1312 u_int i, count, pgoff, pglen;
1315 KASSERT(PMAP_HAS_DMAP,
1316 ("m_apply_extpg_one does not support unmapped mbufs"));
1317 off += mtod(m, vm_offset_t);
1318 if (off < m->m_epg_hdrlen) {
1319 count = min(m->m_epg_hdrlen - off, len);
1320 rval = f(arg, m->m_epg_hdr + off, count);
1326 off -= m->m_epg_hdrlen;
1327 pgoff = m->m_epg_1st_off;
1328 for (i = 0; i < m->m_epg_npgs && len > 0; i++) {
1329 pglen = m_epg_pagelen(m, i, pgoff);
1331 count = min(pglen - off, len);
1332 p = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] + pgoff + off);
1333 rval = f(arg, p, count);
1343 KASSERT(off < m->m_epg_trllen,
1344 ("m_apply_extpg_one: offset beyond trailer"));
1345 KASSERT(len <= m->m_epg_trllen - off,
1346 ("m_apply_extpg_one: length beyond trailer"));
1347 return (f(arg, m->m_epg_trail + off, len));
1352 /* Apply function f to the data in a single mbuf. */
1354 m_apply_one(struct mbuf *m, int off, int len,
1355 int (*f)(void *, void *, u_int), void *arg)
1357 if ((m->m_flags & M_EXTPG) != 0)
1358 return (m_apply_extpg_one(m, off, len, f, arg));
1360 return (f(arg, mtod(m, caddr_t) + off, len));
1364 * Apply function f to the data in an mbuf chain starting "off" bytes from
1365 * the beginning, continuing for "len" bytes.
1368 m_apply(struct mbuf *m, int off, int len,
1369 int (*f)(void *, void *, u_int), void *arg)
1374 KASSERT(off >= 0, ("m_apply, negative off %d", off));
1375 KASSERT(len >= 0, ("m_apply, negative len %d", len));
1377 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1384 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1385 count = min(m->m_len - off, len);
1386 rval = m_apply_one(m, off, count, f, arg);
1397 * Return a pointer to mbuf/offset of location in mbuf chain.
1400 m_getptr(struct mbuf *m, int loc, int *off)
1404 /* Normal end of search. */
1405 if (m->m_len > loc) {
1410 if (m->m_next == NULL) {
1412 /* Point at the end of valid data. */
1425 m_print(const struct mbuf *m, int maxlen)
1429 const struct mbuf *m2;
1432 printf("mbuf: %p\n", m);
1436 if (m->m_flags & M_PKTHDR)
1437 len = m->m_pkthdr.len;
1441 while (m2 != NULL && (len == -1 || len)) {
1443 if (maxlen != -1 && pdata > maxlen)
1445 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1446 m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1447 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1448 "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1450 printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1456 printf("%d bytes unaccounted for.\n", len);
1461 m_fixhdr(struct mbuf *m0)
1465 len = m_length(m0, NULL);
1466 m0->m_pkthdr.len = len;
1471 m_length(struct mbuf *m0, struct mbuf **last)
1477 for (m = m0; m != NULL; m = m->m_next) {
1479 if (m->m_next == NULL)
1488 * Defragment a mbuf chain, returning the shortest possible
1489 * chain of mbufs and clusters. If allocation fails and
1490 * this cannot be completed, NULL will be returned, but
1491 * the passed in chain will be unchanged. Upon success,
1492 * the original chain will be freed, and the new chain
1495 * If a non-packet header is passed in, the original
1496 * mbuf (chain?) will be returned unharmed.
1499 m_defrag(struct mbuf *m0, int how)
1501 struct mbuf *m_new = NULL, *m_final = NULL;
1502 int progress = 0, length;
1504 MBUF_CHECKSLEEP(how);
1505 if (!(m0->m_flags & M_PKTHDR))
1508 m_fixhdr(m0); /* Needed sanity check */
1510 #ifdef MBUF_STRESS_TEST
1511 if (m_defragrandomfailures) {
1512 int temp = arc4random() & 0xff;
1518 if (m0->m_pkthdr.len > MHLEN)
1519 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1521 m_final = m_gethdr(how, MT_DATA);
1523 if (m_final == NULL)
1526 if (m_dup_pkthdr(m_final, m0, how) == 0)
1531 while (progress < m0->m_pkthdr.len) {
1532 length = m0->m_pkthdr.len - progress;
1533 if (length > MCLBYTES)
1536 if (m_new == NULL) {
1538 m_new = m_getcl(how, MT_DATA, 0);
1540 m_new = m_get(how, MT_DATA);
1545 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1547 m_new->m_len = length;
1548 if (m_new != m_final)
1549 m_cat(m_final, m_new);
1552 #ifdef MBUF_STRESS_TEST
1553 if (m0->m_next == NULL)
1558 #ifdef MBUF_STRESS_TEST
1560 m_defragbytes += m0->m_pkthdr.len;
1564 #ifdef MBUF_STRESS_TEST
1573 * Return the number of fragments an mbuf will use. This is usually
1574 * used as a proxy for the number of scatter/gather elements needed by
1575 * a DMA engine to access an mbuf. In general mapped mbufs are
1576 * assumed to be backed by physically contiguous buffers that only
1577 * need a single fragment. Unmapped mbufs, on the other hand, can
1578 * span disjoint physical pages.
1581 frags_per_mbuf(struct mbuf *m)
1585 if ((m->m_flags & M_EXTPG) == 0)
1589 * The header and trailer are counted as a single fragment
1590 * each when present.
1592 * XXX: This overestimates the number of fragments by assuming
1593 * all the backing physical pages are disjoint.
1596 if (m->m_epg_hdrlen != 0)
1598 frags += m->m_epg_npgs;
1599 if (m->m_epg_trllen != 0)
1606 * Defragment an mbuf chain, returning at most maxfrags separate
1607 * mbufs+clusters. If this is not possible NULL is returned and
1608 * the original mbuf chain is left in its present (potentially
1609 * modified) state. We use two techniques: collapsing consecutive
1610 * mbufs and replacing consecutive mbufs by a cluster.
1612 * NB: this should really be named m_defrag but that name is taken
1615 m_collapse(struct mbuf *m0, int how, int maxfrags)
1617 struct mbuf *m, *n, *n2, **prev;
1621 * Calculate the current number of frags.
1624 for (m = m0; m != NULL; m = m->m_next)
1625 curfrags += frags_per_mbuf(m);
1627 * First, try to collapse mbufs. Note that we always collapse
1628 * towards the front so we don't need to deal with moving the
1629 * pkthdr. This may be suboptimal if the first mbuf has much
1630 * less data than the following.
1638 if (M_WRITABLE(m) &&
1639 n->m_len < M_TRAILINGSPACE(m)) {
1640 m_copydata(n, 0, n->m_len,
1641 mtod(m, char *) + m->m_len);
1642 m->m_len += n->m_len;
1643 m->m_next = n->m_next;
1644 curfrags -= frags_per_mbuf(n);
1646 if (curfrags <= maxfrags)
1651 KASSERT(maxfrags > 1,
1652 ("maxfrags %u, but normal collapse failed", maxfrags));
1654 * Collapse consecutive mbufs to a cluster.
1656 prev = &m0->m_next; /* NB: not the first mbuf */
1657 while ((n = *prev) != NULL) {
1658 if ((n2 = n->m_next) != NULL &&
1659 n->m_len + n2->m_len < MCLBYTES) {
1660 m = m_getcl(how, MT_DATA, 0);
1663 m_copydata(n, 0, n->m_len, mtod(m, char *));
1664 m_copydata(n2, 0, n2->m_len,
1665 mtod(m, char *) + n->m_len);
1666 m->m_len = n->m_len + n2->m_len;
1667 m->m_next = n2->m_next;
1669 curfrags += 1; /* For the new cluster */
1670 curfrags -= frags_per_mbuf(n);
1671 curfrags -= frags_per_mbuf(n2);
1674 if (curfrags <= maxfrags)
1677 * Still not there, try the normal collapse
1678 * again before we allocate another cluster.
1685 * No place where we can collapse to a cluster; punt.
1686 * This can occur if, for example, you request 2 frags
1687 * but the packet requires that both be clusters (we
1688 * never reallocate the first mbuf to avoid moving the
1695 #ifdef MBUF_STRESS_TEST
1698 * Fragment an mbuf chain. There's no reason you'd ever want to do
1699 * this in normal usage, but it's great for stress testing various
1702 * If fragmentation is not possible, the original chain will be
1705 * Possible length values:
1706 * 0 no fragmentation will occur
1707 * > 0 each fragment will be of the specified length
1708 * -1 each fragment will be the same random value in length
1709 * -2 each fragment's length will be entirely random
1710 * (Random values range from 1 to 256)
1713 m_fragment(struct mbuf *m0, int how, int length)
1715 struct mbuf *m_first, *m_last;
1716 int divisor = 255, progress = 0, fraglen;
1718 if (!(m0->m_flags & M_PKTHDR))
1721 if (length == 0 || length < -2)
1723 if (length > MCLBYTES)
1725 if (length < 0 && divisor > MCLBYTES)
1728 length = 1 + (arc4random() % divisor);
1732 m_fixhdr(m0); /* Needed sanity check */
1734 m_first = m_getcl(how, MT_DATA, M_PKTHDR);
1735 if (m_first == NULL)
1738 if (m_dup_pkthdr(m_first, m0, how) == 0)
1743 while (progress < m0->m_pkthdr.len) {
1745 fraglen = 1 + (arc4random() % divisor);
1746 if (fraglen > m0->m_pkthdr.len - progress)
1747 fraglen = m0->m_pkthdr.len - progress;
1749 if (progress != 0) {
1750 struct mbuf *m_new = m_getcl(how, MT_DATA, 0);
1754 m_last->m_next = m_new;
1758 m_copydata(m0, progress, fraglen, mtod(m_last, caddr_t));
1759 progress += fraglen;
1760 m_last->m_len = fraglen;
1768 /* Return the original chain on failure */
1775 * Free pages from mbuf_ext_pgs, assuming they were allocated via
1776 * vm_page_alloc() and aren't associated with any object. Complement
1777 * to allocator from m_uiotombuf_nomap().
1780 mb_free_mext_pgs(struct mbuf *m)
1785 for (int i = 0; i < m->m_epg_npgs; i++) {
1786 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
1787 vm_page_unwire_noq(pg);
1792 static struct mbuf *
1793 m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags)
1795 struct mbuf *m, *mb, *prev;
1796 vm_page_t pg_array[MBUF_PEXT_MAX_PGS];
1797 int error, length, i, needed;
1799 int pflags = malloc2vm_flags(how) | VM_ALLOC_NODUMP | VM_ALLOC_WIRED;
1801 MPASS((flags & M_PKTHDR) == 0);
1802 MPASS((how & M_ZERO) == 0);
1805 * len can be zero or an arbitrary large value bound by
1806 * the total data supplied by the uio.
1809 total = MIN(uio->uio_resid, len);
1811 total = uio->uio_resid;
1814 maxseg = MBUF_PEXT_MAX_PGS * PAGE_SIZE;
1817 * If total is zero, return an empty mbuf. This can occur
1818 * for TLS 1.0 connections which send empty fragments as
1819 * a countermeasure against the known-IV weakness in CBC
1822 if (__predict_false(total == 0)) {
1823 mb = mb_alloc_ext_pgs(how, mb_free_mext_pgs);
1826 mb->m_epg_flags = EPG_FLAG_ANON;
1831 * Allocate the pages
1835 mb = mb_alloc_ext_pgs(how, mb_free_mext_pgs);
1843 mb->m_epg_flags = EPG_FLAG_ANON;
1844 needed = length = MIN(maxseg, total);
1845 for (i = 0; needed > 0; i++, needed -= PAGE_SIZE) {
1847 pg_array[i] = vm_page_alloc_noobj(pflags);
1848 if (pg_array[i] == NULL) {
1849 if (how & M_NOWAIT) {
1856 mb->m_epg_pa[i] = VM_PAGE_TO_PHYS(pg_array[i]);
1859 mb->m_epg_last_len = length - PAGE_SIZE * (mb->m_epg_npgs - 1);
1860 MBUF_EXT_PGS_ASSERT_SANITY(mb);
1862 error = uiomove_fromphys(pg_array, 0, length, uio);
1866 mb->m_ext.ext_size += PAGE_SIZE * mb->m_epg_npgs;
1867 if (flags & M_PKTHDR)
1868 m->m_pkthdr.len += length;
1878 * Copy the contents of uio into a properly sized mbuf chain.
1881 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
1883 struct mbuf *m, *mb;
1888 if (flags & M_EXTPG)
1889 return (m_uiotombuf_nomap(uio, how, len, align, flags));
1892 * len can be zero or an arbitrary large value bound by
1893 * the total data supplied by the uio.
1896 total = (uio->uio_resid < len) ? uio->uio_resid : len;
1898 total = uio->uio_resid;
1901 * The smallest unit returned by m_getm2() is a single mbuf
1902 * with pkthdr. We can't align past it.
1908 * Give us the full allocation or nothing.
1909 * If len is zero return the smallest empty mbuf.
1911 m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
1916 /* Fill all mbufs with uio data and update header information. */
1917 for (mb = m; mb != NULL; mb = mb->m_next) {
1918 length = min(M_TRAILINGSPACE(mb), total - progress);
1920 error = uiomove(mtod(mb, void *), length, uio);
1928 if (flags & M_PKTHDR) {
1929 m->m_pkthdr.len += length;
1930 m->m_pkthdr.memlen += MSIZE;
1931 if (mb->m_flags & M_EXT)
1932 m->m_pkthdr.memlen += mb->m_ext.ext_size;
1935 KASSERT(progress == total, ("%s: progress != total", __func__));
1941 * Copy data to/from an unmapped mbuf into a uio limited by len if set.
1944 m_unmapped_uiomove(const struct mbuf *m, int m_off, struct uio *uio, int len)
1947 int error, i, off, pglen, pgoff, seglen, segoff;
1952 /* Skip over any data removed from the front. */
1953 off = mtod(m, vm_offset_t);
1956 if (m->m_epg_hdrlen != 0) {
1957 if (off >= m->m_epg_hdrlen) {
1958 off -= m->m_epg_hdrlen;
1960 seglen = m->m_epg_hdrlen - off;
1962 seglen = min(seglen, len);
1965 error = uiomove(__DECONST(void *,
1966 &m->m_epg_hdr[segoff]), seglen, uio);
1969 pgoff = m->m_epg_1st_off;
1970 for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) {
1971 pglen = m_epg_pagelen(m, i, pgoff);
1977 seglen = pglen - off;
1978 segoff = pgoff + off;
1980 seglen = min(seglen, len);
1982 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
1983 error = uiomove_fromphys(&pg, segoff, seglen, uio);
1986 if (len != 0 && error == 0) {
1987 KASSERT((off + len) <= m->m_epg_trllen,
1988 ("off + len > trail (%d + %d > %d, m_off = %d)", off, len,
1989 m->m_epg_trllen, m_off));
1990 error = uiomove(__DECONST(void *, &m->m_epg_trail[off]),
1997 * Copy an mbuf chain into a uio limited by len if set.
2000 m_mbuftouio(struct uio *uio, const struct mbuf *m, int len)
2002 int error, length, total;
2006 total = min(uio->uio_resid, len);
2008 total = uio->uio_resid;
2010 /* Fill the uio with data from the mbufs. */
2011 for (; m != NULL; m = m->m_next) {
2012 length = min(m->m_len, total - progress);
2014 if ((m->m_flags & M_EXTPG) != 0)
2015 error = m_unmapped_uiomove(m, 0, uio, length);
2017 error = uiomove(mtod(m, void *), length, uio);
2028 * Create a writable copy of the mbuf chain. While doing this
2029 * we compact the chain with a goal of producing a chain with
2030 * at most two mbufs. The second mbuf in this chain is likely
2031 * to be a cluster. The primary purpose of this work is to create
2032 * a writable packet for encryption, compression, etc. The
2033 * secondary goal is to linearize the data so the data can be
2034 * passed to crypto hardware in the most efficient manner possible.
2037 m_unshare(struct mbuf *m0, int how)
2039 struct mbuf *m, *mprev;
2040 struct mbuf *n, *mfirst, *mlast;
2044 for (m = m0; m != NULL; m = mprev->m_next) {
2046 * Regular mbufs are ignored unless there's a cluster
2047 * in front of it that we can use to coalesce. We do
2048 * the latter mainly so later clusters can be coalesced
2049 * also w/o having to handle them specially (i.e. convert
2050 * mbuf+cluster -> cluster). This optimization is heavily
2051 * influenced by the assumption that we're running over
2052 * Ethernet where MCLBYTES is large enough that the max
2053 * packet size will permit lots of coalescing into a
2054 * single cluster. This in turn permits efficient
2055 * crypto operations, especially when using hardware.
2057 if ((m->m_flags & M_EXT) == 0) {
2058 if (mprev && (mprev->m_flags & M_EXT) &&
2059 m->m_len <= M_TRAILINGSPACE(mprev)) {
2060 /* XXX: this ignores mbuf types */
2061 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
2062 mtod(m, caddr_t), m->m_len);
2063 mprev->m_len += m->m_len;
2064 mprev->m_next = m->m_next; /* unlink from chain */
2065 m_free(m); /* reclaim mbuf */
2072 * Writable mbufs are left alone (for now).
2074 if (M_WRITABLE(m)) {
2080 * Not writable, replace with a copy or coalesce with
2081 * the previous mbuf if possible (since we have to copy
2082 * it anyway, we try to reduce the number of mbufs and
2083 * clusters so that future work is easier).
2085 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
2086 /* NB: we only coalesce into a cluster or larger */
2087 if (mprev != NULL && (mprev->m_flags & M_EXT) &&
2088 m->m_len <= M_TRAILINGSPACE(mprev)) {
2089 /* XXX: this ignores mbuf types */
2090 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
2091 mtod(m, caddr_t), m->m_len);
2092 mprev->m_len += m->m_len;
2093 mprev->m_next = m->m_next; /* unlink from chain */
2094 m_free(m); /* reclaim mbuf */
2099 * Allocate new space to hold the copy and copy the data.
2100 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
2101 * splitting them into clusters. We could just malloc a
2102 * buffer and make it external but too many device drivers
2103 * don't know how to break up the non-contiguous memory when
2106 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
2111 if (m->m_flags & M_PKTHDR) {
2112 KASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR",
2114 m_move_pkthdr(n, m);
2121 int cc = min(len, MCLBYTES);
2122 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
2128 newipsecstat.ips_clcopied++;
2136 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
2143 n->m_next = m->m_next;
2145 m0 = mfirst; /* new head of chain */
2147 mprev->m_next = mfirst; /* replace old mbuf */
2148 m_free(m); /* release old mbuf */
2154 #ifdef MBUF_PROFILING
2156 #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/
2157 struct mbufprofile {
2158 uintmax_t wasted[MP_BUCKETS];
2159 uintmax_t used[MP_BUCKETS];
2160 uintmax_t segments[MP_BUCKETS];
2164 m_profile(struct mbuf *m)
2173 if (m->m_flags & M_EXT) {
2174 wasted += MHLEN - sizeof(m->m_ext) +
2175 m->m_ext.ext_size - m->m_len;
2177 if (m->m_flags & M_PKTHDR)
2178 wasted += MHLEN - m->m_len;
2180 wasted += MLEN - m->m_len;
2184 /* be paranoid.. it helps */
2185 if (segments > MP_BUCKETS - 1)
2186 segments = MP_BUCKETS - 1;
2189 if (wasted > 100000)
2191 /* store in the appropriate bucket */
2192 /* don't bother locking. if it's slightly off, so what? */
2193 mbprof.segments[segments]++;
2194 mbprof.used[fls(used)]++;
2195 mbprof.wasted[fls(wasted)]++;
2199 mbprof_handler(SYSCTL_HANDLER_ARGS)
2206 sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req);
2208 p = &mbprof.wasted[0];
2211 "%ju %ju %ju %ju %ju %ju %ju %ju "
2212 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2213 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2214 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2216 p = &mbprof.wasted[16];
2218 "%ju %ju %ju %ju %ju %ju %ju %ju "
2219 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2220 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2221 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2223 p = &mbprof.used[0];
2226 "%ju %ju %ju %ju %ju %ju %ju %ju "
2227 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2228 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2229 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2231 p = &mbprof.used[16];
2233 "%ju %ju %ju %ju %ju %ju %ju %ju "
2234 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2235 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2236 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2238 p = &mbprof.segments[0];
2241 "%ju %ju %ju %ju %ju %ju %ju %ju "
2242 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2243 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2244 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2246 p = &mbprof.segments[16];
2248 "%ju %ju %ju %ju %ju %ju %ju %ju "
2249 "%ju %ju %ju %ju %ju %ju %ju %jju",
2250 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2251 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2254 error = sbuf_finish(&sb);
2260 mbprof_clr_handler(SYSCTL_HANDLER_ARGS)
2265 error = sysctl_handle_int(oidp, &clear, 0, req);
2266 if (error || !req->newptr)
2270 bzero(&mbprof, sizeof(mbprof));
2276 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile,
2277 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
2278 mbprof_handler, "A",
2279 "mbuf profiling statistics");
2281 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr,
2282 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
2283 mbprof_clr_handler, "I",
2284 "clear mbuf profiling statistics");