2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)mbuf.h 8.5 (Berkeley) 2/19/95
40 #include <sys/mutex.h> /* XXX */
43 * Mbufs are of a single size, MSIZE (machine/param.h), which
44 * includes overhead. An mbuf may add a single "mbuf cluster" of size
45 * MCLBYTES (also in machine/param.h), which has no additional overhead
46 * and is used instead of the internal data area; this is done when
47 * at least MINCLSIZE of data must be stored.
50 #define MLEN (MSIZE - sizeof(struct m_hdr)) /* normal data len */
51 #define MHLEN (MLEN - sizeof(struct pkthdr)) /* data len w/pkthdr */
53 #define MINCLSIZE (MHLEN + 1) /* smallest amount to put in cluster */
54 #define M_MAXCOMPRESS (MHLEN / 2) /* max amount to copy for compression */
57 * Maximum number of allocatable counters for external buffers. This
58 * ensures enough VM address space for the allocation of counters
59 * in the extreme case where all possible external buffers are allocated.
61 * Note: When new types of external storage are allocated, EXT_COUNTERS
62 * must be tuned accordingly. Practically, this isn't a big deal
63 * as each counter is only a word long, so we can fit
64 * (PAGE_SIZE / length of word) counters in a single page.
66 * XXX: Must increase this if using any of if_ti, if_wb, if_sk drivers,
67 * or any other drivers which may manage their own buffers and
68 * eventually attach them to mbufs.
70 #define EXT_COUNTERS (nmbclusters + nsfbufs)
73 * Macros for type conversion
74 * mtod(m, t) - convert mbuf pointer to data pointer of correct type
75 * dtom(x) - convert data pointer within mbuf to mbuf pointer (XXX)
77 #define mtod(m, t) ((t)((m)->m_data))
78 #define dtom(x) ((struct mbuf *)((intptr_t)(x) & ~(MSIZE-1)))
80 /* header at beginning of each mbuf: */
82 struct mbuf *mh_next; /* next buffer in chain */
83 struct mbuf *mh_nextpkt; /* next chain in queue/record */
84 caddr_t mh_data; /* location of data */
85 int mh_len; /* amount of data in this mbuf */
86 short mh_type; /* type of data in this mbuf */
87 short mh_flags; /* flags; see below */
90 /* record/packet header in first mbuf of chain; valid if M_PKTHDR set */
92 struct ifnet *rcvif; /* rcv interface */
93 int len; /* total packet length */
94 /* variables for ip and tcp reassembly */
95 void *header; /* pointer to packet header */
96 /* variables for hardware checksum */
97 int csum_flags; /* flags regarding checksum */
98 int csum_data; /* data field used by csum routines */
99 struct mbuf *aux; /* extra data buffer; ipsec/others */
102 /* description of external storage mapped into mbuf, valid if M_EXT set */
104 caddr_t ext_buf; /* start of buffer */
105 void (*ext_free) /* free routine if not the usual */
106 __P((caddr_t, void *));
107 void *ext_args; /* optional argument pointer */
108 u_int ext_size; /* size of buffer, for ext_free */
109 union mext_refcnt *ref_cnt; /* pointer to ref count info */
110 int ext_type; /* type of external storage */
117 struct pkthdr MH_pkthdr; /* M_PKTHDR set */
119 struct m_ext MH_ext; /* M_EXT set */
120 char MH_databuf[MHLEN];
123 char M_databuf[MLEN]; /* !M_PKTHDR, !M_EXT */
126 #define m_next m_hdr.mh_next
127 #define m_len m_hdr.mh_len
128 #define m_data m_hdr.mh_data
129 #define m_type m_hdr.mh_type
130 #define m_flags m_hdr.mh_flags
131 #define m_nextpkt m_hdr.mh_nextpkt
132 #define m_act m_nextpkt
133 #define m_pkthdr M_dat.MH.MH_pkthdr
134 #define m_ext M_dat.MH.MH_dat.MH_ext
135 #define m_pktdat M_dat.MH.MH_dat.MH_databuf
136 #define m_dat M_dat.M_databuf
139 #define M_EXT 0x0001 /* has associated external storage */
140 #define M_PKTHDR 0x0002 /* start of record */
141 #define M_EOR 0x0004 /* end of record */
142 #define M_RDONLY 0x0008 /* associated data is marked read-only */
143 #define M_PROTO1 0x0010 /* protocol-specific */
144 #define M_PROTO2 0x0020 /* protocol-specific */
145 #define M_PROTO3 0x0040 /* protocol-specific */
146 #define M_PROTO4 0x0080 /* protocol-specific */
147 #define M_PROTO5 0x0100 /* protocol-specific */
149 /* mbuf pkthdr flags, also in m_flags */
150 #define M_BCAST 0x0200 /* send/received as link-level broadcast */
151 #define M_MCAST 0x0400 /* send/received as link-level multicast */
152 #define M_FRAG 0x0800 /* packet is a fragment of a larger packet */
153 #define M_FIRSTFRAG 0x1000 /* packet is first fragment */
154 #define M_LASTFRAG 0x2000 /* packet is last fragment */
156 /* external buffer types: identify ext_buf type */
157 #define EXT_CLUSTER 1 /* mbuf cluster */
158 #define EXT_SFBUF 2 /* sendfile(2)'s sf_bufs */
159 #define EXT_NET_DRV 100 /* custom ext_buf provided by net driver(s) */
160 #define EXT_MOD_TYPE 200 /* custom module's ext_buf type */
162 /* flags copied when copying m_pkthdr */
163 #define M_COPYFLAGS (M_PKTHDR|M_EOR|M_PROTO1|M_PROTO1|M_PROTO2|M_PROTO3 | \
164 M_PROTO4|M_PROTO5|M_BCAST|M_MCAST|M_FRAG|M_RDONLY)
166 /* flags indicating hw checksum support and sw checksum requirements */
167 #define CSUM_IP 0x0001 /* will csum IP */
168 #define CSUM_TCP 0x0002 /* will csum TCP */
169 #define CSUM_UDP 0x0004 /* will csum UDP */
170 #define CSUM_IP_FRAGS 0x0008 /* will csum IP fragments */
171 #define CSUM_FRAGMENT 0x0010 /* will do IP fragmentation */
173 #define CSUM_IP_CHECKED 0x0100 /* did csum IP */
174 #define CSUM_IP_VALID 0x0200 /* ... the csum is valid */
175 #define CSUM_DATA_VALID 0x0400 /* csum_data field is valid */
176 #define CSUM_PSEUDO_HDR 0x0800 /* csum_data has pseudo hdr */
178 #define CSUM_DELAY_DATA (CSUM_TCP | CSUM_UDP)
179 #define CSUM_DELAY_IP (CSUM_IP) /* XXX add ipv6 here too? */
182 #define MT_FREE 0 /* should be on free list */
183 #define MT_DATA 1 /* dynamic (data) allocation */
184 #define MT_HEADER 2 /* packet header */
186 #define MT_SOCKET 3 /* socket structure */
187 #define MT_PCB 4 /* protocol control block */
188 #define MT_RTABLE 5 /* routing tables */
189 #define MT_HTABLE 6 /* IMP host tables */
190 #define MT_ATABLE 7 /* address resolution tables */
192 #define MT_SONAME 8 /* socket name */
194 #define MT_SOOPTS 10 /* socket options */
196 #define MT_FTABLE 11 /* fragment reassembly header */
198 #define MT_RIGHTS 12 /* access rights */
199 #define MT_IFADDR 13 /* interface address */
201 #define MT_CONTROL 14 /* extra-data protocol message */
202 #define MT_OOBDATA 15 /* expedited data */
204 #define MT_NTYPES 16 /* number of mbuf types for mbtypes[] */
210 u_long m_mbufs; /* # mbufs obtained from page pool */
211 u_long m_clusters; /* # clusters obtained from page pool */
212 u_long m_clfree; /* # clusters on freelist (cache) */
213 u_long m_refcnt; /* # ref counters obtained from page pool */
214 u_long m_refree; /* # ref counters on freelist (cache) */
215 u_long m_spare; /* spare field */
216 u_long m_drops; /* times failed to find space */
217 u_long m_wait; /* times waited for space */
218 u_long m_drain; /* times drained protocols for space */
219 u_long m_mcfail; /* times m_copym failed */
220 u_long m_mpfail; /* times m_pullup failed */
221 u_long m_msize; /* length of an mbuf */
222 u_long m_mclbytes; /* length of an mbuf cluster */
223 u_long m_minclsize; /* min length of data to allocate a cluster */
224 u_long m_mlen; /* length of data in an mbuf */
225 u_long m_mhlen; /* length of data in a header mbuf */
228 /* flags to m_get/MGET */
231 #define M_WAIT M_TRYWAIT /* XXX: Deprecated. */
234 * Normal mbuf clusters are normally treated as character arrays
235 * after allocation, but use the first word of the buffer as a free list
236 * pointer while on the free list.
239 union mcluster *mcl_next;
240 char mcl_buf[MCLBYTES];
244 * The m_ext object reference counter structure.
247 union mext_refcnt *next_ref;
252 * free list header definitions: mbffree_lst, mclfree_lst, mcntfree_lst
260 union mcluster *m_head;
264 struct mcntfree_lst {
265 union mext_refcnt *m_head;
270 * Wake up the next instance (if any) of a sleeping allocation - which is
271 * waiting for a {cluster, mbuf} to be freed.
273 * Must be called with the appropriate mutex held.
275 #define MBWAKEUP(m_wid) do { \
277 wakeup_one(&(m_wid)); \
281 * mbuf external reference count management macros:
283 * MEXT_IS_REF(m): true if (m) is not the only mbuf referencing
284 * the external buffer ext_buf
285 * MEXT_REM_REF(m): remove reference to m_ext object
286 * MEXT_ADD_REF(m): add reference to m_ext object already
288 * MEXT_INIT_REF(m): allocate and initialize an external
289 * object reference counter for (m)
291 #define MEXT_IS_REF(m) ((m)->m_ext.ref_cnt->refcnt > 1)
293 #define MEXT_REM_REF(m) do { \
294 KASSERT((m)->m_ext.ref_cnt->refcnt > 0, ("m_ext refcnt < 0")); \
295 atomic_subtract_int(&((m)->m_ext.ref_cnt->refcnt), 1); \
298 #define MEXT_ADD_REF(m) atomic_add_int(&((m)->m_ext.ref_cnt->refcnt), 1)
300 #define _MEXT_ALLOC_CNT(m_cnt, how) do { \
301 union mext_refcnt *__mcnt; \
303 mtx_lock(&mcntfree.m_mtx); \
304 if (mcntfree.m_head == NULL) \
305 m_alloc_ref(1, (how)); \
306 __mcnt = mcntfree.m_head; \
307 if (__mcnt != NULL) { \
308 mcntfree.m_head = __mcnt->next_ref; \
310 __mcnt->refcnt = 0; \
312 mtx_unlock(&mcntfree.m_mtx); \
316 #define _MEXT_DEALLOC_CNT(m_cnt) do { \
317 union mext_refcnt *__mcnt = (m_cnt); \
319 mtx_lock(&mcntfree.m_mtx); \
320 __mcnt->next_ref = mcntfree.m_head; \
321 mcntfree.m_head = __mcnt; \
323 mtx_unlock(&mcntfree.m_mtx); \
326 #define MEXT_INIT_REF(m, how) do { \
327 struct mbuf *__mmm = (m); \
329 _MEXT_ALLOC_CNT(__mmm->m_ext.ref_cnt, (how)); \
330 if (__mmm->m_ext.ref_cnt != NULL) \
331 MEXT_ADD_REF(__mmm); \
335 * mbuf allocation/deallocation macros:
337 * MGET(struct mbuf *m, int how, int type)
338 * allocates an mbuf and initializes it to contain internal data.
340 * MGETHDR(struct mbuf *m, int how, int type)
341 * allocates an mbuf and initializes it to contain a packet header
345 * Lower-level macros for MGET(HDR)... Not to be used outside the
346 * subsystem ("non-exportable" macro names are prepended with "_").
348 #define _MGET_SETUP(m_set, m_set_type) do { \
349 (m_set)->m_type = (m_set_type); \
350 (m_set)->m_next = NULL; \
351 (m_set)->m_nextpkt = NULL; \
352 (m_set)->m_data = (m_set)->m_dat; \
353 (m_set)->m_flags = 0; \
356 #define _MGET(m_mget, m_get_how) do { \
357 if (mmbfree.m_head == NULL) \
358 m_mballoc(1, (m_get_how)); \
359 (m_mget) = mmbfree.m_head; \
360 if ((m_mget) != NULL) { \
361 mmbfree.m_head = (m_mget)->m_next; \
362 mbtypes[MT_FREE]--; \
364 if ((m_get_how) == M_TRYWAIT) \
365 (m_mget) = m_mballoc_wait(); \
369 #define MGET(m, how, type) do { \
372 int _mtype = (type); \
374 mtx_lock(&mmbfree.m_mtx); \
378 mtx_unlock(&mmbfree.m_mtx); \
379 _MGET_SETUP(_mm, _mtype); \
381 mtx_unlock(&mmbfree.m_mtx); \
385 #define _MGETHDR_SETUP(m_set, m_set_type) do { \
386 (m_set)->m_type = (m_set_type); \
387 (m_set)->m_next = NULL; \
388 (m_set)->m_nextpkt = NULL; \
389 (m_set)->m_data = (m_set)->m_pktdat; \
390 (m_set)->m_flags = M_PKTHDR; \
391 (m_set)->m_pkthdr.rcvif = NULL; \
392 (m_set)->m_pkthdr.csum_flags = 0; \
393 (m_set)->m_pkthdr.aux = NULL; \
396 #define MGETHDR(m, how, type) do { \
399 int _mtype = (type); \
401 mtx_lock(&mmbfree.m_mtx); \
405 mtx_unlock(&mmbfree.m_mtx); \
406 _MGETHDR_SETUP(_mm, _mtype); \
408 mtx_unlock(&mmbfree.m_mtx); \
413 * mbuf external storage macros:
415 * MCLGET allocates and refers an mcluster to an mbuf
416 * MEXTADD sets up pre-allocated external storage and refers to mbuf
417 * MEXTFREE removes reference to external object and frees it if
420 #define _MCLALLOC(p, how) do { \
424 if (mclfree.m_head == NULL) \
425 m_clalloc(1, _mhow); \
426 _mp = (caddr_t)mclfree.m_head; \
429 mclfree.m_head = ((union mcluster *)_mp)->mcl_next; \
431 if (_mhow == M_TRYWAIT) \
432 _mp = m_clalloc_wait(); \
437 #define MCLGET(m, how) do { \
438 struct mbuf *_mm = (m); \
440 mtx_lock(&mclfree.m_mtx); \
441 _MCLALLOC(_mm->m_ext.ext_buf, (how)); \
442 mtx_unlock(&mclfree.m_mtx); \
443 if (_mm->m_ext.ext_buf != NULL) { \
444 MEXT_INIT_REF(_mm, (how)); \
445 if (_mm->m_ext.ref_cnt == NULL) { \
446 _MCLFREE(_mm->m_ext.ext_buf); \
447 _mm->m_ext.ext_buf = NULL; \
449 _mm->m_data = _mm->m_ext.ext_buf; \
450 _mm->m_flags |= M_EXT; \
451 _mm->m_ext.ext_free = NULL; \
452 _mm->m_ext.ext_args = NULL; \
453 _mm->m_ext.ext_size = MCLBYTES; \
454 _mm->m_ext.ext_type = EXT_CLUSTER; \
459 #define MEXTADD(m, buf, size, free, args, flags, type) do { \
460 struct mbuf *_mm = (m); \
462 MEXT_INIT_REF(_mm, M_TRYWAIT); \
463 if (_mm->m_ext.ref_cnt != NULL) { \
464 _mm->m_flags |= (M_EXT | (flags)); \
465 _mm->m_ext.ext_buf = (caddr_t)(buf); \
466 _mm->m_data = _mm->m_ext.ext_buf; \
467 _mm->m_ext.ext_size = (size); \
468 _mm->m_ext.ext_free = (free); \
469 _mm->m_ext.ext_args = (args); \
470 _mm->m_ext.ext_type = (type); \
474 #define _MCLFREE(p) do { \
475 union mcluster *_mp = (union mcluster *)(p); \
477 mtx_lock(&mclfree.m_mtx); \
478 _mp->mcl_next = mclfree.m_head; \
479 mclfree.m_head = _mp; \
481 MBWAKEUP(m_clalloc_wid); \
482 mtx_unlock(&mclfree.m_mtx); \
486 * If the atomic_cmpset_int() returns 0, then we effectively do nothing
487 * in terms of "cleaning up" (freeing the ext buf and ref. counter) as
488 * this means that either there are still references, or another thread
489 * is taking care of the clean-up.
491 #define MEXTFREE(m) do { \
492 struct mbuf *_mmm = (m); \
494 MEXT_REM_REF(_mmm); \
495 if (atomic_cmpset_int(&_mmm->m_ext.ref_cnt->refcnt, 0, 1)) { \
496 if (_mmm->m_ext.ext_type != EXT_CLUSTER) { \
497 (*(_mmm->m_ext.ext_free))(_mmm->m_ext.ext_buf, \
498 _mmm->m_ext.ext_args); \
500 _MCLFREE(_mmm->m_ext.ext_buf); \
501 _MEXT_DEALLOC_CNT(_mmm->m_ext.ref_cnt); \
503 _mmm->m_flags &= ~M_EXT; \
507 * MFREE(struct mbuf *m, struct mbuf *n)
508 * Free a single mbuf and associated external storage.
509 * Place the successor, if any, in n.
511 #define MFREE(m, n) do { \
512 struct mbuf *_mm = (m); \
514 KASSERT(_mm->m_type != MT_FREE, ("freeing free mbuf")); \
515 if (_mm->m_flags & M_EXT) \
517 mtx_lock(&mmbfree.m_mtx); \
518 mbtypes[_mm->m_type]--; \
519 _mm->m_type = MT_FREE; \
520 mbtypes[MT_FREE]++; \
522 _mm->m_next = mmbfree.m_head; \
523 mmbfree.m_head = _mm; \
524 MBWAKEUP(m_mballoc_wid); \
525 mtx_unlock(&mmbfree.m_mtx); \
530 * Evaluate TRUE if it's safe to write to the mbuf m's data region (this
531 * can be both the local data payload, or an external buffer area,
532 * depending on whether M_EXT is set).
534 #define M_WRITABLE(m) (!((m)->m_flags & M_RDONLY) && (!((m)->m_flags \
535 & M_EXT) || !MEXT_IS_REF(m)))
538 * Copy mbuf pkthdr from "from" to "to".
539 * from must have M_PKTHDR set, and to must be empty.
540 * aux pointer will be moved to `to'.
542 #define M_COPY_PKTHDR(to, from) do { \
543 struct mbuf *_mfrom = (from); \
544 struct mbuf *_mto = (to); \
546 _mto->m_data = _mto->m_pktdat; \
547 _mto->m_flags = _mfrom->m_flags & M_COPYFLAGS; \
548 _mto->m_pkthdr = _mfrom->m_pkthdr; \
549 _mfrom->m_pkthdr.aux = (struct mbuf *)NULL; \
553 * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place
554 * an object of the specified size at the end of the mbuf, longword aligned.
556 #define M_ALIGN(m, len) do { \
557 (m)->m_data += (MLEN - (len)) & ~(sizeof(long) - 1); \
561 * As above, for mbufs allocated with m_gethdr/MGETHDR
562 * or initialized by M_COPY_PKTHDR.
564 #define MH_ALIGN(m, len) do { \
565 (m)->m_data += (MHLEN - (len)) & ~(sizeof(long) - 1); \
569 * Compute the amount of space available
570 * before the current start of data in an mbuf.
572 #define M_LEADINGSPACE(m) \
573 ((m)->m_flags & M_EXT ? \
574 /* (m)->m_data - (m)->m_ext.ext_buf */ 0 : \
575 (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat : \
576 (m)->m_data - (m)->m_dat)
579 * Compute the amount of space available
580 * after the end of data in an mbuf.
582 #define M_TRAILINGSPACE(m) \
583 ((m)->m_flags & M_EXT ? (m)->m_ext.ext_buf + \
584 (m)->m_ext.ext_size - ((m)->m_data + (m)->m_len) : \
585 &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len))
588 * Arrange to prepend space of size plen to mbuf m.
589 * If a new mbuf must be allocated, how specifies whether to wait.
590 * If the allocation fails, the original mbuf chain is freed and m is
593 #define M_PREPEND(m, plen, how) do { \
594 struct mbuf **_mmp = &(m); \
595 struct mbuf *_mm = *_mmp; \
596 int _mplen = (plen); \
597 int __mhow = (how); \
599 if (M_LEADINGSPACE(_mm) >= _mplen) { \
600 _mm->m_data -= _mplen; \
601 _mm->m_len += _mplen; \
603 _mm = m_prepend(_mm, _mplen, __mhow); \
604 if (_mm != NULL && _mm->m_flags & M_PKTHDR) \
605 _mm->m_pkthdr.len += _mplen; \
610 * change mbuf to new type
612 #define MCHTYPE(m, t) do { \
613 struct mbuf *_mm = (m); \
616 atomic_subtract_long(&mbtypes[_mm->m_type], 1); \
617 atomic_add_long(&mbtypes[_mt], 1); \
618 _mm->m_type = (_mt); \
621 /* length to m_copy to copy all */
622 #define M_COPYALL 1000000000
624 /* compatibility with 4.3 */
625 #define m_copy(m, o, l) m_copym((m), (o), (l), M_DONTWAIT)
628 * pkthdr.aux type tags.
636 extern u_long m_clalloc_wid; /* mbuf cluster wait count */
637 extern u_long m_mballoc_wid; /* mbuf wait count */
638 extern int max_linkhdr; /* largest link-level header */
639 extern int max_protohdr; /* largest protocol header */
640 extern int max_hdr; /* largest link+protocol header */
641 extern int max_datalen; /* MHLEN - max_hdr */
642 extern struct mbstat mbstat;
643 extern u_long mbtypes[MT_NTYPES]; /* per-type mbuf allocations */
644 extern int mbuf_wait; /* mbuf sleep time */
645 extern struct mbuf *mbutl; /* virtual address of mclusters */
646 extern struct mclfree_lst mclfree;
647 extern struct mbffree_lst mmbfree;
648 extern struct mcntfree_lst mcntfree;
649 extern int nmbclusters;
653 void m_adj __P((struct mbuf *, int));
654 int m_alloc_ref __P((u_int, int));
655 void m_cat __P((struct mbuf *,struct mbuf *));
656 int m_clalloc __P((int, int));
657 caddr_t m_clalloc_wait __P((void));
658 void m_copyback __P((struct mbuf *, int, int, caddr_t));
659 void m_copydata __P((struct mbuf *,int,int,caddr_t));
660 struct mbuf *m_copym __P((struct mbuf *, int, int, int));
661 struct mbuf *m_copypacket __P((struct mbuf *, int));
662 struct mbuf *m_devget __P((char *, int, int, struct ifnet *,
663 void (*copy)(char *, caddr_t, u_int)));
664 struct mbuf *m_dup __P((struct mbuf *, int));
665 struct mbuf *m_free __P((struct mbuf *));
666 void m_freem __P((struct mbuf *));
667 struct mbuf *m_get __P((int, int));
668 struct mbuf *m_getclr __P((int, int));
669 struct mbuf *m_gethdr __P((int, int));
670 int m_mballoc __P((int, int));
671 struct mbuf *m_mballoc_wait __P((void));
672 struct mbuf *m_prepend __P((struct mbuf *,int,int));
673 struct mbuf *m_pulldown __P((struct mbuf *, int, int, int *));
674 void m_print __P((const struct mbuf *m));
675 struct mbuf *m_pullup __P((struct mbuf *, int));
676 struct mbuf *m_split __P((struct mbuf *,int,int));
677 struct mbuf *m_aux_add __P((struct mbuf *, int, int));
678 struct mbuf *m_aux_find __P((struct mbuf *, int, int));
679 void m_aux_delete __P((struct mbuf *, struct mbuf *));
682 #endif /* !_SYS_MBUF_H_ */