2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
36 #include "opt_param.h"
37 #include "opt_mbuf_stress_test.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
45 #include <sys/malloc.h>
47 #include <sys/sysctl.h>
48 #include <sys/domain.h>
49 #include <sys/protosw.h>
56 #ifdef MBUF_STRESS_TEST
61 int m_defragrandomfailures;
65 * sysctl(8) exported objects
67 SYSCTL_DECL(_kern_ipc);
68 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
70 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
71 &max_protohdr, 0, "");
72 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
73 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
75 #ifdef MBUF_STRESS_TEST
76 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
77 &m_defragpackets, 0, "");
78 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
79 &m_defragbytes, 0, "");
80 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
81 &m_defraguseless, 0, "");
82 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
83 &m_defragfailure, 0, "");
84 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
85 &m_defragrandomfailures, 0, "");
89 * Malloc-type for external ext_buf ref counts.
91 static MALLOC_DEFINE(M_MBUF, "mbextcnt", "mbuf external ref counts");
94 * Allocate a given length worth of mbufs and/or clusters (whatever fits
95 * best) and return a pointer to the top of the allocated chain. If an
96 * existing mbuf chain is provided, then we will append the new chain
97 * to the existing one but still return the top of the newly allocated
101 m_getm(struct mbuf *m, int len, int how, short type)
103 struct mbuf *mb, *top, *cur, *mtail;
107 KASSERT(len >= 0, ("m_getm(): len is < 0"));
109 /* If m != NULL, we will append to the end of that chain. */
111 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
116 * Calculate how many mbufs+clusters ("packets") we need and how much
117 * leftover there is after that and allocate the first mbuf+cluster
120 num = len / MCLBYTES;
121 rem = len % MCLBYTES;
124 if ((top = cur = m_getcl(how, type, 0)) == NULL)
130 for (i = 0; i < num; i++) {
131 mb = m_getcl(how, type, 0);
135 cur = (cur->m_next = mb);
138 mb = (rem > MINCLSIZE) ?
139 m_getcl(how, type, 0) : m_get(how, type);
159 * Free an entire chain of mbufs and associated external buffers, if
163 m_freem(struct mbuf *mb)
171 * Configure a provided mbuf to refer to the provided external storage
172 * buffer and setup a reference count for said buffer. If the setting
173 * up of the reference count fails, the M_EXT bit will not be set. If
174 * successfull, the M_EXT bit is set in the mbuf's flags.
177 * mb The existing mbuf to which to attach the provided buffer.
178 * buf The address of the provided external storage buffer.
179 * size The size of the provided buffer.
180 * freef A pointer to a routine that is responsible for freeing the
181 * provided external storage buffer.
182 * args A pointer to an argument structure (of any type) to be passed
183 * to the provided freef routine (may be NULL).
184 * flags Any other flags to be passed to the provided mbuf.
185 * type The type that the external storage buffer should be
192 m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
193 void (*freef)(void *, void *), void *args, int flags, int type)
195 u_int *ref_cnt = NULL;
197 /* XXX Shouldn't be adding EXT_CLUSTER with this API */
198 if (type == EXT_CLUSTER)
199 ref_cnt = (u_int *)uma_find_refcnt(zone_clust,
201 else if (type == EXT_EXTREF)
202 ref_cnt = __DEVOLATILE(u_int *, mb->m_ext.ref_cnt);
203 mb->m_ext.ref_cnt = (ref_cnt == NULL) ?
204 malloc(sizeof(u_int), M_MBUF, M_NOWAIT) : (u_int *)ref_cnt;
205 if (mb->m_ext.ref_cnt != NULL) {
206 *(mb->m_ext.ref_cnt) = 1;
207 mb->m_flags |= (M_EXT | flags);
208 mb->m_ext.ext_buf = buf;
209 mb->m_data = mb->m_ext.ext_buf;
210 mb->m_ext.ext_size = size;
211 mb->m_ext.ext_free = freef;
212 mb->m_ext.ext_args = args;
213 mb->m_ext.ext_type = type;
218 * Non-directly-exported function to clean up after mbufs with M_EXT
219 * storage attached to them if the reference count hits 0.
222 mb_free_ext(struct mbuf *m)
227 /* Account for lazy ref count assign. */
228 if (m->m_ext.ref_cnt == NULL)
234 * This is tricky. We need to make sure to decrement the
235 * refcount in a safe way but to also clean up if we're the
236 * last reference. This method seems to do it without race.
238 while (dofree == 0) {
239 cnt = *(m->m_ext.ref_cnt);
240 if (atomic_cmpset_int(m->m_ext.ref_cnt, cnt, cnt - 1)) {
249 * Do the free, should be safe.
251 if (m->m_ext.ext_type == EXT_PACKET) {
252 uma_zfree(zone_pack, m);
254 } else if (m->m_ext.ext_type == EXT_CLUSTER) {
255 uma_zfree(zone_clust, m->m_ext.ext_buf);
256 m->m_ext.ext_buf = NULL;
258 (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
260 if (m->m_ext.ext_type != EXT_EXTREF) {
261 if (m->m_ext.ref_cnt != NULL)
262 free(__DEVOLATILE(u_int *,
263 m->m_ext.ref_cnt), M_MBUF);
264 m->m_ext.ref_cnt = NULL;
266 m->m_ext.ext_buf = NULL;
269 uma_zfree(zone_mbuf, m);
273 * "Move" mbuf pkthdr from "from" to "to".
274 * "from" must have M_PKTHDR set, and "to" must be empty.
277 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
281 /* see below for why these are not enabled */
283 /* Note: with MAC, this may not be a good assertion. */
284 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
285 ("m_move_pkthdr: to has tags"));
289 * XXXMAC: It could be this should also occur for non-MAC?
291 if (to->m_flags & M_PKTHDR)
292 m_tag_delete_chain(to, NULL);
294 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
295 if ((to->m_flags & M_EXT) == 0)
296 to->m_data = to->m_pktdat;
297 to->m_pkthdr = from->m_pkthdr; /* especially tags */
298 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
299 from->m_flags &= ~M_PKTHDR;
303 * Duplicate "from"'s mbuf pkthdr in "to".
304 * "from" must have M_PKTHDR set, and "to" must be empty.
305 * In particular, this does a deep copy of the packet tags.
308 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
313 * The mbuf allocator only initializes the pkthdr
314 * when the mbuf is allocated with MGETHDR. Many users
315 * (e.g. m_copy*, m_prepend) use MGET and then
316 * smash the pkthdr as needed causing these
317 * assertions to trip. For now just disable them.
320 /* Note: with MAC, this may not be a good assertion. */
321 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
323 MBUF_CHECKSLEEP(how);
325 if (to->m_flags & M_PKTHDR)
326 m_tag_delete_chain(to, NULL);
328 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
329 if ((to->m_flags & M_EXT) == 0)
330 to->m_data = to->m_pktdat;
331 to->m_pkthdr = from->m_pkthdr;
332 SLIST_INIT(&to->m_pkthdr.tags);
333 return (m_tag_copy_chain(to, from, MBTOM(how)));
337 * Lesser-used path for M_PREPEND:
338 * allocate new mbuf to prepend to chain,
342 m_prepend(struct mbuf *m, int len, int how)
346 if (m->m_flags & M_PKTHDR)
347 MGETHDR(mn, how, m->m_type);
349 MGET(mn, how, m->m_type);
354 if (m->m_flags & M_PKTHDR)
355 M_MOVE_PKTHDR(mn, m);
365 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
366 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
367 * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller.
368 * Note that the copy is read-only, because clusters are not copied,
369 * only their reference counts are incremented.
372 m_copym(struct mbuf *m, int off0, int len, int wait)
374 struct mbuf *n, **np;
379 KASSERT(off >= 0, ("m_copym, negative off %d", off));
380 KASSERT(len >= 0, ("m_copym, negative len %d", len));
381 MBUF_CHECKSLEEP(wait);
382 if (off == 0 && m->m_flags & M_PKTHDR)
385 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
395 KASSERT(len == M_COPYALL,
396 ("m_copym, length > size of mbuf chain"));
400 MGETHDR(n, wait, m->m_type);
402 MGET(n, wait, m->m_type);
407 if (!m_dup_pkthdr(n, m, wait))
409 if (len == M_COPYALL)
410 n->m_pkthdr.len -= off0;
412 n->m_pkthdr.len = len;
415 n->m_len = min(len, m->m_len - off);
416 if (m->m_flags & M_EXT) {
417 n->m_data = m->m_data + off;
421 n->m_ext.ref_cnt = m->m_ext.ref_cnt;
423 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
425 if (len != M_COPYALL)
432 mbstat.m_mcfail++; /* XXX: No consistency. */
437 mbstat.m_mcfail++; /* XXX: No consistency. */
442 * Copy an entire packet, including header (which must be present).
443 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
444 * Note that the copy is read-only, because clusters are not copied,
445 * only their reference counts are incremented.
446 * Preserve alignment of the first mbuf so if the creator has left
447 * some room at the beginning (e.g. for inserting protocol headers)
448 * the copies still have the room available.
451 m_copypacket(struct mbuf *m, int how)
453 struct mbuf *top, *n, *o;
455 MBUF_CHECKSLEEP(how);
456 MGET(n, how, m->m_type);
461 if (!m_dup_pkthdr(n, m, how))
464 if (m->m_flags & M_EXT) {
465 n->m_data = m->m_data;
469 n->m_ext.ref_cnt = m->m_ext.ref_cnt;
471 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
472 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
477 MGET(o, how, m->m_type);
485 if (m->m_flags & M_EXT) {
486 n->m_data = m->m_data;
490 n->m_ext.ref_cnt = m->m_ext.ref_cnt;
492 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
500 mbstat.m_mcfail++; /* XXX: No consistency. */
505 * Copy data from an mbuf chain starting "off" bytes from the beginning,
506 * continuing for "len" bytes, into the indicated buffer.
509 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
513 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
514 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
516 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
523 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
524 count = min(m->m_len - off, len);
525 bcopy(mtod(m, caddr_t) + off, cp, count);
534 * Copy a packet header mbuf chain into a completely new chain, including
535 * copying any mbuf clusters. Use this instead of m_copypacket() when
536 * you need a writable copy of an mbuf chain.
539 m_dup(struct mbuf *m, int how)
541 struct mbuf **p, *top = NULL;
542 int remain, moff, nsize;
544 MBUF_CHECKSLEEP(how);
550 /* While there's more data, get a new mbuf, tack it on, and fill it */
551 remain = m->m_pkthdr.len;
554 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
557 /* Get the next new mbuf */
558 if (remain >= MINCLSIZE) {
559 n = m_getcl(how, m->m_type, 0);
562 n = m_get(how, m->m_type);
568 if (top == NULL) { /* First one, must be PKTHDR */
569 if (!m_dup_pkthdr(n, m, how)) {
577 /* Link it into the new chain */
581 /* Copy data from original mbuf(s) into new mbuf */
582 while (n->m_len < nsize && m != NULL) {
583 int chunk = min(nsize - n->m_len, m->m_len - moff);
585 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
589 if (moff == m->m_len) {
595 /* Check correct total mbuf length */
596 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
597 ("%s: bogus m_pkthdr.len", __func__));
603 mbstat.m_mcfail++; /* XXX: No consistency. */
608 * Concatenate mbuf chain n to m.
609 * Both chains must be of the same type (e.g. MT_DATA).
610 * Any m_pkthdr is not updated.
613 m_cat(struct mbuf *m, struct mbuf *n)
618 if (m->m_flags & M_EXT ||
619 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
620 /* just join the two chains */
624 /* splat the data from one into the other */
625 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
627 m->m_len += n->m_len;
633 m_adj(struct mbuf *mp, int req_len)
639 if ((m = mp) == NULL)
645 while (m != NULL && len > 0) {
646 if (m->m_len <= len) {
657 if (mp->m_flags & M_PKTHDR)
658 m->m_pkthdr.len -= (req_len - len);
661 * Trim from tail. Scan the mbuf chain,
662 * calculating its length and finding the last mbuf.
663 * If the adjustment only affects this mbuf, then just
664 * adjust and return. Otherwise, rescan and truncate
665 * after the remaining size.
671 if (m->m_next == (struct mbuf *)0)
675 if (m->m_len >= len) {
677 if (mp->m_flags & M_PKTHDR)
678 mp->m_pkthdr.len -= len;
685 * Correct length for chain is "count".
686 * Find the mbuf with last data, adjust its length,
687 * and toss data from remaining mbufs on chain.
690 if (m->m_flags & M_PKTHDR)
691 m->m_pkthdr.len = count;
692 for (; m; m = m->m_next) {
693 if (m->m_len >= count) {
695 if (m->m_next != NULL) {
707 * Rearange an mbuf chain so that len bytes are contiguous
708 * and in the data area of an mbuf (so that mtod and dtom
709 * will work for a structure of size len). Returns the resulting
710 * mbuf chain on success, frees it and returns null on failure.
711 * If there is room, it will add up to max_protohdr-len extra bytes to the
712 * contiguous region in an attempt to avoid being called next time.
715 m_pullup(struct mbuf *n, int len)
722 * If first mbuf has no cluster, and has room for len bytes
723 * without shifting current data, pullup into it,
724 * otherwise allocate a new mbuf to prepend to the chain.
726 if ((n->m_flags & M_EXT) == 0 &&
727 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
736 MGET(m, M_DONTWAIT, n->m_type);
740 if (n->m_flags & M_PKTHDR)
743 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
745 count = min(min(max(len, max_protohdr), space), n->m_len);
746 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
756 } while (len > 0 && n);
765 mbstat.m_mpfail++; /* XXX: No consistency. */
770 * Like m_pullup(), except a new mbuf is always allocated, and we allow
771 * the amount of empty space before the data in the new mbuf to be specified
772 * (in the event that the caller expects to prepend later).
777 m_copyup(struct mbuf *n, int len, int dstoff)
782 if (len > (MHLEN - dstoff))
784 MGET(m, M_DONTWAIT, n->m_type);
788 if (n->m_flags & M_PKTHDR)
791 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
793 count = min(min(max(len, max_protohdr), space), n->m_len);
794 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
804 } while (len > 0 && n);
818 * Partition an mbuf chain in two pieces, returning the tail --
819 * all but the first len0 bytes. In case of failure, it returns NULL and
820 * attempts to restore the chain to its original state.
822 * Note that the resulting mbufs might be read-only, because the new
823 * mbuf can end up sharing an mbuf cluster with the original mbuf if
824 * the "breaking point" happens to lie within a cluster mbuf. Use the
825 * M_WRITABLE() macro to check for this case.
828 m_split(struct mbuf *m0, int len0, int wait)
831 u_int len = len0, remain;
833 MBUF_CHECKSLEEP(wait);
834 for (m = m0; m && len > m->m_len; m = m->m_next)
838 remain = m->m_len - len;
839 if (m0->m_flags & M_PKTHDR) {
840 MGETHDR(n, wait, m0->m_type);
843 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
844 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
845 m0->m_pkthdr.len = len0;
846 if (m->m_flags & M_EXT)
848 if (remain > MHLEN) {
849 /* m can't be the lead packet */
851 n->m_next = m_split(m, len, wait);
852 if (n->m_next == NULL) {
861 } else if (remain == 0) {
866 MGET(n, wait, m->m_type);
872 if (m->m_flags & M_EXT) {
876 n->m_ext.ref_cnt = m->m_ext.ref_cnt;
877 n->m_data = m->m_data + len;
879 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
883 n->m_next = m->m_next;
888 * Routine to copy from device local memory into mbufs.
889 * Note that `off' argument is offset into first mbuf of target chain from
890 * which to begin copying the data to.
893 m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
894 void (*copy)(char *from, caddr_t to, u_int len))
897 struct mbuf *top = NULL, **mp = ⊤
900 if (off < 0 || off > MHLEN)
904 if (top == NULL) { /* First one, must be PKTHDR */
905 if (totlen + off >= MINCLSIZE) {
906 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
909 m = m_gethdr(M_DONTWAIT, MT_DATA);
912 /* Place initial small packet/header at end of mbuf */
913 if (m && totlen + off + max_linkhdr <= MLEN) {
914 m->m_data += max_linkhdr;
920 m->m_pkthdr.rcvif = ifp;
921 m->m_pkthdr.len = totlen;
923 if (totlen + off >= MINCLSIZE) {
924 m = m_getcl(M_DONTWAIT, MT_DATA, 0);
927 m = m_get(M_DONTWAIT, MT_DATA);
940 m->m_len = len = min(totlen, len);
942 copy(buf, mtod(m, caddr_t), (u_int)len);
944 bcopy(buf, mtod(m, caddr_t), (u_int)len);
954 * Copy data from a buffer back into the indicated mbuf chain,
955 * starting "off" bytes from the beginning, extending the mbuf
956 * chain if necessary.
959 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
962 struct mbuf *m = m0, *n;
967 while (off > (mlen = m->m_len)) {
970 if (m->m_next == NULL) {
971 n = m_get(M_DONTWAIT, m->m_type);
974 bzero(mtod(n, caddr_t), MLEN);
975 n->m_len = min(MLEN, len + off);
981 mlen = min (m->m_len - off, len);
982 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
990 if (m->m_next == NULL) {
991 n = m_get(M_DONTWAIT, m->m_type);
994 n->m_len = min(MLEN, len);
999 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1000 m->m_pkthdr.len = totlen;
1004 * Append the specified data to the indicated mbuf chain,
1005 * Extend the mbuf chain if the new data does not fit in
1008 * Return 1 if able to complete the job; otherwise 0.
1011 m_append(struct mbuf *m0, int len, c_caddr_t cp)
1014 int remainder, space;
1016 for (m = m0; m->m_next != NULL; m = m->m_next)
1019 space = M_TRAILINGSPACE(m);
1022 * Copy into available space.
1024 if (space > remainder)
1026 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1028 cp += space, remainder -= space;
1030 while (remainder > 0) {
1032 * Allocate a new mbuf; could check space
1033 * and allocate a cluster instead.
1035 n = m_get(M_DONTWAIT, m->m_type);
1038 n->m_len = min(MLEN, remainder);
1039 bcopy(cp, mtod(n, caddr_t), n->m_len);
1040 cp += n->m_len, remainder -= n->m_len;
1044 if (m0->m_flags & M_PKTHDR)
1045 m0->m_pkthdr.len += len - remainder;
1046 return (remainder == 0);
1050 * Apply function f to the data in an mbuf chain starting "off" bytes from
1051 * the beginning, continuing for "len" bytes.
1054 m_apply(struct mbuf *m, int off, int len,
1055 int (*f)(void *, void *, u_int), void *arg)
1060 KASSERT(off >= 0, ("m_apply, negative off %d", off));
1061 KASSERT(len >= 0, ("m_apply, negative len %d", len));
1063 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1070 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1071 count = min(m->m_len - off, len);
1072 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1083 * Return a pointer to mbuf/offset of location in mbuf chain.
1086 m_getptr(struct mbuf *m, int loc, int *off)
1090 /* Normal end of search. */
1091 if (m->m_len > loc) {
1096 if (m->m_next == NULL) {
1098 /* Point at the end of valid data. */
1111 m_print(const struct mbuf *m, int maxlen)
1115 const struct mbuf *m2;
1117 if (m->m_flags & M_PKTHDR)
1118 len = m->m_pkthdr.len;
1122 while (m2 != NULL && (len == -1 || len)) {
1124 if (maxlen != -1 && pdata > maxlen)
1126 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1127 m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1128 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1129 "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1131 printf(", %*D\n", m2->m_len, (u_char *)m2->m_data, "-");
1137 printf("%d bytes unaccounted for.\n", len);
1142 m_fixhdr(struct mbuf *m0)
1146 len = m_length(m0, NULL);
1147 m0->m_pkthdr.len = len;
1152 m_length(struct mbuf *m0, struct mbuf **last)
1158 for (m = m0; m != NULL; m = m->m_next) {
1160 if (m->m_next == NULL)
1169 * Defragment a mbuf chain, returning the shortest possible
1170 * chain of mbufs and clusters. If allocation fails and
1171 * this cannot be completed, NULL will be returned, but
1172 * the passed in chain will be unchanged. Upon success,
1173 * the original chain will be freed, and the new chain
1176 * If a non-packet header is passed in, the original
1177 * mbuf (chain?) will be returned unharmed.
1180 m_defrag(struct mbuf *m0, int how)
1182 struct mbuf *m_new = NULL, *m_final = NULL;
1183 int progress = 0, length;
1185 MBUF_CHECKSLEEP(how);
1186 if (!(m0->m_flags & M_PKTHDR))
1189 m_fixhdr(m0); /* Needed sanity check */
1191 #ifdef MBUF_STRESS_TEST
1192 if (m_defragrandomfailures) {
1193 int temp = arc4random() & 0xff;
1199 if (m0->m_pkthdr.len > MHLEN)
1200 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1202 m_final = m_gethdr(how, MT_DATA);
1204 if (m_final == NULL)
1207 if (m_dup_pkthdr(m_final, m0, how) == 0)
1212 while (progress < m0->m_pkthdr.len) {
1213 length = m0->m_pkthdr.len - progress;
1214 if (length > MCLBYTES)
1217 if (m_new == NULL) {
1219 m_new = m_getcl(how, MT_DATA, 0);
1221 m_new = m_get(how, MT_DATA);
1226 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1228 m_new->m_len = length;
1229 if (m_new != m_final)
1230 m_cat(m_final, m_new);
1233 #ifdef MBUF_STRESS_TEST
1234 if (m0->m_next == NULL)
1239 #ifdef MBUF_STRESS_TEST
1241 m_defragbytes += m0->m_pkthdr.len;
1245 #ifdef MBUF_STRESS_TEST
1253 #ifdef MBUF_STRESS_TEST
1256 * Fragment an mbuf chain. There's no reason you'd ever want to do
1257 * this in normal usage, but it's great for stress testing various
1260 * If fragmentation is not possible, the original chain will be
1263 * Possible length values:
1264 * 0 no fragmentation will occur
1265 * > 0 each fragment will be of the specified length
1266 * -1 each fragment will be the same random value in length
1267 * -2 each fragment's length will be entirely random
1268 * (Random values range from 1 to 256)
1271 m_fragment(struct mbuf *m0, int how, int length)
1273 struct mbuf *m_new = NULL, *m_final = NULL;
1276 if (!(m0->m_flags & M_PKTHDR))
1279 if ((length == 0) || (length < -2))
1282 m_fixhdr(m0); /* Needed sanity check */
1284 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1286 if (m_final == NULL)
1289 if (m_dup_pkthdr(m_final, m0, how) == 0)
1295 length = 1 + (arc4random() & 255);
1297 while (progress < m0->m_pkthdr.len) {
1303 fraglen = 1 + (arc4random() & 255);
1304 if (fraglen > m0->m_pkthdr.len - progress)
1305 fraglen = m0->m_pkthdr.len - progress;
1307 if (fraglen > MCLBYTES)
1310 if (m_new == NULL) {
1311 m_new = m_getcl(how, MT_DATA, 0);
1316 m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t));
1317 progress += fraglen;
1318 m_new->m_len = fraglen;
1319 if (m_new != m_final)
1320 m_cat(m_final, m_new);
1329 /* Return the original chain on failure */
1336 m_uiotombuf(struct uio *uio, int how, int len)
1338 struct mbuf *m_new = NULL, *m_final = NULL;
1339 int progress = 0, error = 0, length, total;
1342 total = min(uio->uio_resid, len);
1344 total = uio->uio_resid;
1346 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1348 m_final = m_gethdr(how, MT_DATA);
1349 if (m_final == NULL)
1352 while (progress < total) {
1353 length = total - progress;
1354 if (length > MCLBYTES)
1356 if (m_new == NULL) {
1358 m_new = m_getcl(how, MT_DATA, 0);
1360 m_new = m_get(how, MT_DATA);
1364 error = uiomove(mtod(m_new, void *), length, uio);
1368 m_new->m_len = length;
1369 if (m_new != m_final)
1370 m_cat(m_final, m_new);