2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2004, 2005,
5 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include "opt_param.h"
34 #include "opt_kern_tls.h"
36 #include <sys/param.h>
38 #include <sys/domainset.h>
39 #include <sys/malloc.h>
40 #include <sys/systm.h>
42 #include <sys/domain.h>
43 #include <sys/eventhandler.h>
44 #include <sys/kernel.h>
46 #include <sys/limits.h>
48 #include <sys/mutex.h>
49 #include <sys/protosw.h>
50 #include <sys/refcount.h>
51 #include <sys/sf_buf.h>
53 #include <sys/socket.h>
54 #include <sys/sysctl.h>
57 #include <net/if_var.h>
60 #include <vm/vm_extern.h>
61 #include <vm/vm_kern.h>
62 #include <vm/vm_page.h>
63 #include <vm/vm_map.h>
65 #include <vm/uma_dbg.h>
68 * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA
71 * Mbuf Clusters (2K, contiguous) are allocated from the Cluster
72 * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the
73 * administrator so desires.
75 * Mbufs are allocated from a UMA Master Zone called the Mbuf
78 * Additionally, FreeBSD provides a Packet Zone, which it
79 * configures as a Secondary Zone to the Mbuf Master Zone,
80 * thus sharing backend Slab kegs with the Mbuf Master Zone.
82 * Thus common-case allocations and locking are simplified:
86 * | .------------>[(Packet Cache)] m_get(), m_gethdr()
88 * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ]
89 * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ]
95 * \____________(VM)_________________/
98 * Whenever an object is allocated with uma_zalloc() out of
99 * one of the Zones its _ctor_ function is executed. The same
100 * for any deallocation through uma_zfree() the _dtor_ function
103 * Caches are per-CPU and are filled from the Master Zone.
105 * Whenever an object is allocated from the underlying global
106 * memory pool it gets pre-initialized with the _zinit_ functions.
107 * When the Keg's are overfull objects get decommissioned with
108 * _zfini_ functions and free'd back to the global memory pool.
112 int nmbufs; /* limits number of mbufs */
113 int nmbclusters; /* limits number of mbuf clusters */
114 int nmbjumbop; /* limits number of page size jumbo clusters */
115 int nmbjumbo9; /* limits number of 9k jumbo clusters */
116 int nmbjumbo16; /* limits number of 16k jumbo clusters */
118 bool mb_use_ext_pgs; /* use EXT_PGS mbufs for sendfile & TLS */
119 SYSCTL_BOOL(_kern_ipc, OID_AUTO, mb_use_ext_pgs, CTLFLAG_RWTUN,
121 "Use unmapped mbufs for sendfile(2) and TLS offload");
123 static quad_t maxmbufmem; /* overall real memory limit for all mbufs */
125 SYSCTL_QUAD(_kern_ipc, OID_AUTO, maxmbufmem, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &maxmbufmem, 0,
126 "Maximum real memory allocatable to various mbuf types");
128 static counter_u64_t snd_tag_count;
129 SYSCTL_COUNTER_U64(_kern_ipc, OID_AUTO, num_snd_tags, CTLFLAG_RW,
130 &snd_tag_count, "# of active mbuf send tags");
133 * tunable_mbinit() has to be run before any mbuf allocations are done.
136 tunable_mbinit(void *dummy)
141 * The default limit for all mbuf related memory is 1/2 of all
142 * available kernel memory (physical or kmem).
143 * At most it can be 3/4 of available kernel memory.
145 realmem = qmin((quad_t)physmem * PAGE_SIZE, vm_kmem_size);
146 maxmbufmem = realmem / 2;
147 TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem);
148 if (maxmbufmem > realmem / 4 * 3)
149 maxmbufmem = realmem / 4 * 3;
151 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
152 if (nmbclusters == 0)
153 nmbclusters = maxmbufmem / MCLBYTES / 4;
155 TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
157 nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4;
159 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9);
161 nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6;
163 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16);
165 nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6;
168 * We need at least as many mbufs as we have clusters of
169 * the various types added together.
171 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
172 if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16)
173 nmbufs = lmax(maxmbufmem / MSIZE / 5,
174 nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16);
176 SYSINIT(tunable_mbinit, SI_SUB_KMEM, SI_ORDER_MIDDLE, tunable_mbinit, NULL);
179 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
181 int error, newnmbclusters;
183 newnmbclusters = nmbclusters;
184 error = sysctl_handle_int(oidp, &newnmbclusters, 0, req);
185 if (error == 0 && req->newptr && newnmbclusters != nmbclusters) {
186 if (newnmbclusters > nmbclusters &&
187 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
188 nmbclusters = newnmbclusters;
189 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters);
190 EVENTHANDLER_INVOKE(nmbclusters_change);
196 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW,
197 &nmbclusters, 0, sysctl_nmbclusters, "IU",
198 "Maximum number of mbuf clusters allowed");
201 sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS)
203 int error, newnmbjumbop;
205 newnmbjumbop = nmbjumbop;
206 error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req);
207 if (error == 0 && req->newptr && newnmbjumbop != nmbjumbop) {
208 if (newnmbjumbop > nmbjumbop &&
209 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
210 nmbjumbop = newnmbjumbop;
211 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop);
217 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW,
218 &nmbjumbop, 0, sysctl_nmbjumbop, "IU",
219 "Maximum number of mbuf page size jumbo clusters allowed");
222 sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS)
224 int error, newnmbjumbo9;
226 newnmbjumbo9 = nmbjumbo9;
227 error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req);
228 if (error == 0 && req->newptr && newnmbjumbo9 != nmbjumbo9) {
229 if (newnmbjumbo9 > nmbjumbo9 &&
230 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
231 nmbjumbo9 = newnmbjumbo9;
232 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9);
238 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW,
239 &nmbjumbo9, 0, sysctl_nmbjumbo9, "IU",
240 "Maximum number of mbuf 9k jumbo clusters allowed");
243 sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)
245 int error, newnmbjumbo16;
247 newnmbjumbo16 = nmbjumbo16;
248 error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req);
249 if (error == 0 && req->newptr && newnmbjumbo16 != nmbjumbo16) {
250 if (newnmbjumbo16 > nmbjumbo16 &&
251 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
252 nmbjumbo16 = newnmbjumbo16;
253 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);
259 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW,
260 &nmbjumbo16, 0, sysctl_nmbjumbo16, "IU",
261 "Maximum number of mbuf 16k jumbo clusters allowed");
264 sysctl_nmbufs(SYSCTL_HANDLER_ARGS)
266 int error, newnmbufs;
269 error = sysctl_handle_int(oidp, &newnmbufs, 0, req);
270 if (error == 0 && req->newptr && newnmbufs != nmbufs) {
271 if (newnmbufs > nmbufs) {
273 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);
274 EVENTHANDLER_INVOKE(nmbufs_change);
280 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs, CTLTYPE_INT|CTLFLAG_RW,
281 &nmbufs, 0, sysctl_nmbufs, "IU",
282 "Maximum number of mbufs allowed");
285 * Zones from which we allocate.
287 uma_zone_t zone_mbuf;
288 uma_zone_t zone_clust;
289 uma_zone_t zone_pack;
290 uma_zone_t zone_jumbop;
291 uma_zone_t zone_jumbo9;
292 uma_zone_t zone_jumbo16;
293 uma_zone_t zone_extpgs;
298 static int mb_ctor_mbuf(void *, int, void *, int);
299 static int mb_ctor_clust(void *, int, void *, int);
300 static int mb_ctor_pack(void *, int, void *, int);
301 static void mb_dtor_mbuf(void *, int, void *);
302 static void mb_dtor_pack(void *, int, void *);
303 static int mb_zinit_pack(void *, int, int);
304 static void mb_zfini_pack(void *, int);
305 static void mb_reclaim(uma_zone_t, int);
306 static void *mbuf_jumbo_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
308 /* Ensure that MSIZE is a power of 2. */
309 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
311 _Static_assert(sizeof(struct mbuf_ext_pgs) == 256,
312 "mbuf_ext_pgs size mismatch");
315 * Initialize FreeBSD Network buffer allocation.
318 mbuf_init(void *dummy)
322 * Configure UMA zones for Mbufs, Clusters, and Packets.
324 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
325 mb_ctor_mbuf, mb_dtor_mbuf,
327 trash_init, trash_fini,
331 MSIZE - 1, UMA_ZONE_MAXBUCKET);
333 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);
334 uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached");
335 uma_zone_set_maxaction(zone_mbuf, mb_reclaim);
337 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
340 trash_dtor, trash_init, trash_fini,
346 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters);
347 uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached");
348 uma_zone_set_maxaction(zone_clust, mb_reclaim);
350 zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
351 mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
353 /* Make jumbo frame zone too. Page size, 9k and 16k. */
354 zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
357 trash_dtor, trash_init, trash_fini,
363 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop);
364 uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached");
365 uma_zone_set_maxaction(zone_jumbop, mb_reclaim);
367 zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
370 trash_dtor, trash_init, trash_fini,
375 uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc);
377 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9);
378 uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached");
379 uma_zone_set_maxaction(zone_jumbo9, mb_reclaim);
381 zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
384 trash_dtor, trash_init, trash_fini,
389 uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc);
391 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);
392 uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached");
393 uma_zone_set_maxaction(zone_jumbo16, mb_reclaim);
395 zone_extpgs = uma_zcreate(MBUF_EXTPGS_MEM_NAME,
396 sizeof(struct mbuf_ext_pgs),
398 trash_ctor, trash_dtor, trash_init, trash_fini,
400 NULL, NULL, NULL, NULL,
405 * Hook event handler for low-memory situation, used to
406 * drain protocols and push data back to the caches (UMA
407 * later pushes it back to VM).
409 EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
410 EVENTHANDLER_PRI_FIRST);
412 snd_tag_count = counter_u64_alloc(M_WAITOK);
414 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
418 * debugnet makes use of a pre-allocated pool of mbufs and clusters. When
419 * debugnet is configured, we initialize a set of UMA cache zones which return
420 * items from this pool. At panic-time, the regular UMA zone pointers are
421 * overwritten with those of the cache zones so that drivers may allocate and
422 * free mbufs and clusters without attempting to allocate physical memory.
424 * We keep mbufs and clusters in a pair of mbuf queues. In particular, for
425 * the purpose of caching clusters, we treat them as mbufs.
427 static struct mbufq dn_mbufq =
428 { STAILQ_HEAD_INITIALIZER(dn_mbufq.mq_head), 0, INT_MAX };
429 static struct mbufq dn_clustq =
430 { STAILQ_HEAD_INITIALIZER(dn_clustq.mq_head), 0, INT_MAX };
432 static int dn_clsize;
433 static uma_zone_t dn_zone_mbuf;
434 static uma_zone_t dn_zone_clust;
435 static uma_zone_t dn_zone_pack;
437 static struct debugnet_saved_zones {
439 uma_zone_t dsz_clust;
441 uma_zone_t dsz_jumbop;
442 uma_zone_t dsz_jumbo9;
443 uma_zone_t dsz_jumbo16;
444 bool dsz_debugnet_zones_enabled;
448 dn_buf_import(void *arg, void **store, int count, int domain __unused,
457 for (i = 0; i < count; i++) {
458 m = mbufq_dequeue(q);
461 trash_init(m, q == &dn_mbufq ? MSIZE : dn_clsize, flags);
464 KASSERT((flags & M_WAITOK) == 0 || i == count,
465 ("%s: ran out of pre-allocated mbufs", __func__));
470 dn_buf_release(void *arg, void **store, int count)
478 for (i = 0; i < count; i++) {
480 (void)mbufq_enqueue(q, m);
485 dn_pack_import(void *arg __unused, void **store, int count, int domain __unused,
492 for (i = 0; i < count; i++) {
493 m = m_get(MT_DATA, M_NOWAIT);
496 clust = uma_zalloc(dn_zone_clust, M_NOWAIT);
501 mb_ctor_clust(clust, dn_clsize, m, 0);
504 KASSERT((flags & M_WAITOK) == 0 || i == count,
505 ("%s: ran out of pre-allocated mbufs", __func__));
510 dn_pack_release(void *arg __unused, void **store, int count)
516 for (i = 0; i < count; i++) {
518 clust = m->m_ext.ext_buf;
519 uma_zfree(dn_zone_clust, clust);
520 uma_zfree(dn_zone_mbuf, m);
525 * Free the pre-allocated mbufs and clusters reserved for debugnet, and destroy
526 * the corresponding UMA cache zones.
529 debugnet_mbuf_drain(void)
534 if (dn_zone_mbuf != NULL) {
535 uma_zdestroy(dn_zone_mbuf);
538 if (dn_zone_clust != NULL) {
539 uma_zdestroy(dn_zone_clust);
540 dn_zone_clust = NULL;
542 if (dn_zone_pack != NULL) {
543 uma_zdestroy(dn_zone_pack);
547 while ((m = mbufq_dequeue(&dn_mbufq)) != NULL)
549 while ((item = mbufq_dequeue(&dn_clustq)) != NULL)
550 uma_zfree(m_getzone(dn_clsize), item);
554 * Callback invoked immediately prior to starting a debugnet connection.
557 debugnet_mbuf_start(void)
560 MPASS(!dn_saved_zones.dsz_debugnet_zones_enabled);
562 /* Save the old zone pointers to restore when debugnet is closed. */
563 dn_saved_zones = (struct debugnet_saved_zones) {
564 .dsz_debugnet_zones_enabled = true,
565 .dsz_mbuf = zone_mbuf,
566 .dsz_clust = zone_clust,
567 .dsz_pack = zone_pack,
568 .dsz_jumbop = zone_jumbop,
569 .dsz_jumbo9 = zone_jumbo9,
570 .dsz_jumbo16 = zone_jumbo16,
574 * All cluster zones return buffers of the size requested by the
575 * drivers. It's up to the driver to reinitialize the zones if the
576 * MTU of a debugnet-enabled interface changes.
578 printf("debugnet: overwriting mbuf zone pointers\n");
579 zone_mbuf = dn_zone_mbuf;
580 zone_clust = dn_zone_clust;
581 zone_pack = dn_zone_pack;
582 zone_jumbop = dn_zone_clust;
583 zone_jumbo9 = dn_zone_clust;
584 zone_jumbo16 = dn_zone_clust;
588 * Callback invoked when a debugnet connection is closed/finished.
591 debugnet_mbuf_finish(void)
594 MPASS(dn_saved_zones.dsz_debugnet_zones_enabled);
596 printf("debugnet: restoring mbuf zone pointers\n");
597 zone_mbuf = dn_saved_zones.dsz_mbuf;
598 zone_clust = dn_saved_zones.dsz_clust;
599 zone_pack = dn_saved_zones.dsz_pack;
600 zone_jumbop = dn_saved_zones.dsz_jumbop;
601 zone_jumbo9 = dn_saved_zones.dsz_jumbo9;
602 zone_jumbo16 = dn_saved_zones.dsz_jumbo16;
604 memset(&dn_saved_zones, 0, sizeof(dn_saved_zones));
608 * Reinitialize the debugnet mbuf+cluster pool and cache zones.
611 debugnet_mbuf_reinit(int nmbuf, int nclust, int clsize)
616 debugnet_mbuf_drain();
620 dn_zone_mbuf = uma_zcache_create("debugnet_" MBUF_MEM_NAME,
621 MSIZE, mb_ctor_mbuf, mb_dtor_mbuf,
623 trash_init, trash_fini,
627 dn_buf_import, dn_buf_release,
628 &dn_mbufq, UMA_ZONE_NOBUCKET);
630 dn_zone_clust = uma_zcache_create("debugnet_" MBUF_CLUSTER_MEM_NAME,
631 clsize, mb_ctor_clust,
633 trash_dtor, trash_init, trash_fini,
637 dn_buf_import, dn_buf_release,
638 &dn_clustq, UMA_ZONE_NOBUCKET);
640 dn_zone_pack = uma_zcache_create("debugnet_" MBUF_PACKET_MEM_NAME,
641 MCLBYTES, mb_ctor_pack, mb_dtor_pack, NULL, NULL,
642 dn_pack_import, dn_pack_release,
643 NULL, UMA_ZONE_NOBUCKET);
645 while (nmbuf-- > 0) {
646 m = m_get(MT_DATA, M_WAITOK);
647 uma_zfree(dn_zone_mbuf, m);
649 while (nclust-- > 0) {
650 item = uma_zalloc(m_getzone(dn_clsize), M_WAITOK);
651 uma_zfree(dn_zone_clust, item);
654 #endif /* DEBUGNET */
657 * UMA backend page allocator for the jumbo frame zones.
659 * Allocates kernel virtual memory that is backed by contiguous physical
663 mbuf_jumbo_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
667 /* Inform UMA that this allocator uses kernel_map/object. */
668 *flags = UMA_SLAB_KERNEL;
669 return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain),
670 bytes, wait, (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0,
671 VM_MEMATTR_DEFAULT));
675 * Constructor for Mbuf master zone.
677 * The 'arg' pointer points to a mb_args structure which
678 * contains call-specific information required to support the
679 * mbuf allocation API. See mbuf.h.
682 mb_ctor_mbuf(void *mem, int size, void *arg, int how)
685 struct mb_args *args;
691 trash_ctor(mem, size, arg, how);
693 args = (struct mb_args *)arg;
697 * The mbuf is initialized later. The caller has the
698 * responsibility to set up any MAC labels too.
700 if (type == MT_NOINIT)
703 m = (struct mbuf *)mem;
705 MPASS((flags & M_NOFREE) == 0);
707 error = m_init(m, how, type, flags);
713 * The Mbuf master zone destructor.
716 mb_dtor_mbuf(void *mem, int size, void *arg)
721 m = (struct mbuf *)mem;
722 flags = (unsigned long)arg;
724 KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__));
725 if (!(flags & MB_DTOR_SKIP) && (m->m_flags & M_PKTHDR) && !SLIST_EMPTY(&m->m_pkthdr.tags))
726 m_tag_delete_chain(m, NULL);
728 trash_dtor(mem, size, arg);
733 * The Mbuf Packet zone destructor.
736 mb_dtor_pack(void *mem, int size, void *arg)
740 m = (struct mbuf *)mem;
741 if ((m->m_flags & M_PKTHDR) != 0)
742 m_tag_delete_chain(m, NULL);
744 /* Make sure we've got a clean cluster back. */
745 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
746 KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
747 KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
748 KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__));
749 KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__));
750 KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
751 KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__));
753 trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
756 * If there are processes blocked on zone_clust, waiting for pages
757 * to be freed up, cause them to be woken up by draining the
758 * packet zone. We are exposed to a race here (in the check for
759 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that
760 * is deliberate. We don't want to acquire the zone lock for every
763 if (uma_zone_exhausted_nolock(zone_clust))
764 uma_zone_reclaim(zone_pack, UMA_RECLAIM_DRAIN);
768 * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor.
770 * Here the 'arg' pointer points to the Mbuf which we
771 * are configuring cluster storage for. If 'arg' is
772 * empty we allocate just the cluster without setting
773 * the mbuf to it. See mbuf.h.
776 mb_ctor_clust(void *mem, int size, void *arg, int how)
781 trash_ctor(mem, size, arg, how);
783 m = (struct mbuf *)arg;
785 m->m_ext.ext_buf = (char *)mem;
786 m->m_data = m->m_ext.ext_buf;
788 m->m_ext.ext_free = NULL;
789 m->m_ext.ext_arg1 = NULL;
790 m->m_ext.ext_arg2 = NULL;
791 m->m_ext.ext_size = size;
792 m->m_ext.ext_type = m_gettype(size);
793 m->m_ext.ext_flags = EXT_FLAG_EMBREF;
794 m->m_ext.ext_count = 1;
801 * The Packet secondary zone's init routine, executed on the
802 * object's transition from mbuf keg slab to zone cache.
805 mb_zinit_pack(void *mem, int size, int how)
809 m = (struct mbuf *)mem; /* m is virgin. */
810 if (uma_zalloc_arg(zone_clust, m, how) == NULL ||
811 m->m_ext.ext_buf == NULL)
813 m->m_ext.ext_type = EXT_PACKET; /* Override. */
815 trash_init(m->m_ext.ext_buf, MCLBYTES, how);
821 * The Packet secondary zone's fini routine, executed on the
822 * object's transition from zone cache to keg slab.
825 mb_zfini_pack(void *mem, int size)
829 m = (struct mbuf *)mem;
831 trash_fini(m->m_ext.ext_buf, MCLBYTES);
833 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
835 trash_dtor(mem, size, NULL);
840 * The "packet" keg constructor.
843 mb_ctor_pack(void *mem, int size, void *arg, int how)
846 struct mb_args *args;
850 m = (struct mbuf *)mem;
851 args = (struct mb_args *)arg;
854 MPASS((flags & M_NOFREE) == 0);
857 trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
860 error = m_init(m, how, type, flags);
862 /* m_ext is already initialized. */
863 m->m_data = m->m_ext.ext_buf;
864 m->m_flags = (flags | M_EXT);
870 * This is the protocol drain routine. Called by UMA whenever any of the
871 * mbuf zones is closed to its limit.
873 * No locks should be held when this is called. The drain routines have to
874 * presently acquire some locks which raises the possibility of lock order
878 mb_reclaim(uma_zone_t zone __unused, int pending __unused)
883 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL, __func__);
885 for (dp = domains; dp != NULL; dp = dp->dom_next)
886 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
887 if (pr->pr_drain != NULL)
892 * Free "count" units of I/O from an mbuf chain. They could be held
893 * in EXT_PGS or just as a normal mbuf. This code is intended to be
894 * called in an error path (I/O error, closed connection, etc).
897 mb_free_notready(struct mbuf *m, int count)
901 for (i = 0; i < count && m != NULL; i++) {
902 if ((m->m_flags & M_EXT) != 0 &&
903 m->m_ext.ext_type == EXT_PGS) {
904 m->m_ext.ext_pgs->nrdy--;
905 if (m->m_ext.ext_pgs->nrdy != 0)
910 KASSERT(i == count, ("Removed only %d items from %p", i, m));
914 * Compress an unmapped mbuf into a simple mbuf when it holds a small
915 * amount of data. This is used as a DOS defense to avoid having
916 * small packets tie up wired pages, an ext_pgs structure, and an
917 * mbuf. Since this converts the existing mbuf in place, it can only
918 * be used if there are no other references to 'm'.
921 mb_unmapped_compress(struct mbuf *m)
923 volatile u_int *refcnt;
927 * Assert that 'm' does not have a packet header. If 'm' had
928 * a packet header, it would only be able to hold MHLEN bytes
929 * and m_data would have to be initialized differently.
931 KASSERT((m->m_flags & M_PKTHDR) == 0 && (m->m_flags & M_EXT) &&
932 m->m_ext.ext_type == EXT_PGS,
933 ("%s: m %p !M_EXT or !EXT_PGS or M_PKTHDR", __func__, m));
934 KASSERT(m->m_len <= MLEN, ("m_len too large %p", m));
936 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
937 refcnt = &m->m_ext.ext_count;
939 KASSERT(m->m_ext.ext_cnt != NULL,
940 ("%s: no refcounting pointer on %p", __func__, m));
941 refcnt = m->m_ext.ext_cnt;
948 * Copy mbuf header and m_ext portion of 'm' to 'm_temp' to
949 * create a "fake" EXT_PGS mbuf that can be used with
950 * m_copydata() as well as the ext_free callback.
952 memcpy(&m_temp, m, offsetof(struct mbuf, m_ext) + sizeof (m->m_ext));
953 m_temp.m_next = NULL;
954 m_temp.m_nextpkt = NULL;
956 /* Turn 'm' into a "normal" mbuf. */
957 m->m_flags &= ~(M_EXT | M_RDONLY | M_NOMAP);
958 m->m_data = m->m_dat;
960 /* Copy data from template's ext_pgs. */
961 m_copydata(&m_temp, 0, m_temp.m_len, mtod(m, caddr_t));
963 /* Free the backing pages. */
964 m_temp.m_ext.ext_free(&m_temp);
966 /* Finally, free the ext_pgs struct. */
967 uma_zfree(zone_extpgs, m_temp.m_ext.ext_pgs);
972 * These next few routines are used to permit downgrading an unmapped
973 * mbuf to a chain of mapped mbufs. This is used when an interface
974 * doesn't supported unmapped mbufs or if checksums need to be
975 * computed in software.
977 * Each unmapped mbuf is converted to a chain of mbufs. First, any
978 * TLS header data is stored in a regular mbuf. Second, each page of
979 * unmapped data is stored in an mbuf with an EXT_SFBUF external
980 * cluster. These mbufs use an sf_buf to provide a valid KVA for the
981 * associated physical page. They also hold a reference on the
982 * original EXT_PGS mbuf to ensure the physical page doesn't go away.
983 * Finally, any TLS trailer data is stored in a regular mbuf.
985 * mb_unmapped_free_mext() is the ext_free handler for the EXT_SFBUF
986 * mbufs. It frees the associated sf_buf and releases its reference
987 * on the original EXT_PGS mbuf.
989 * _mb_unmapped_to_ext() is a helper function that converts a single
990 * unmapped mbuf into a chain of mbufs.
992 * mb_unmapped_to_ext() is the public function that walks an mbuf
993 * chain converting any unmapped mbufs to mapped mbufs. It returns
994 * the new chain of unmapped mbufs on success. On failure it frees
995 * the original mbuf chain and returns NULL.
998 mb_unmapped_free_mext(struct mbuf *m)
1003 sf = m->m_ext.ext_arg1;
1006 /* Drop the reference on the backing EXT_PGS mbuf. */
1007 old_m = m->m_ext.ext_arg2;
1011 static struct mbuf *
1012 _mb_unmapped_to_ext(struct mbuf *m)
1014 struct mbuf_ext_pgs *ext_pgs;
1015 struct mbuf *m_new, *top, *prev, *mref;
1018 int i, len, off, pglen, pgoff, seglen, segoff;
1019 volatile u_int *refcnt;
1022 MBUF_EXT_PGS_ASSERT(m);
1023 ext_pgs = m->m_ext.ext_pgs;
1025 KASSERT(ext_pgs->tls == NULL, ("%s: can't convert TLS mbuf %p",
1028 /* See if this is the mbuf that holds the embedded refcount. */
1029 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
1030 refcnt = &m->m_ext.ext_count;
1033 KASSERT(m->m_ext.ext_cnt != NULL,
1034 ("%s: no refcounting pointer on %p", __func__, m));
1035 refcnt = m->m_ext.ext_cnt;
1036 mref = __containerof(refcnt, struct mbuf, m_ext.ext_count);
1039 /* Skip over any data removed from the front. */
1040 off = mtod(m, vm_offset_t);
1043 if (ext_pgs->hdr_len != 0) {
1044 if (off >= ext_pgs->hdr_len) {
1045 off -= ext_pgs->hdr_len;
1047 seglen = ext_pgs->hdr_len - off;
1049 seglen = min(seglen, len);
1052 m_new = m_get(M_NOWAIT, MT_DATA);
1055 m_new->m_len = seglen;
1057 memcpy(mtod(m_new, void *), &ext_pgs->hdr[segoff],
1061 pgoff = ext_pgs->first_pg_off;
1062 for (i = 0; i < ext_pgs->npgs && len > 0; i++) {
1063 pglen = mbuf_ext_pg_len(ext_pgs, i, pgoff);
1069 seglen = pglen - off;
1070 segoff = pgoff + off;
1072 seglen = min(seglen, len);
1075 pg = PHYS_TO_VM_PAGE(ext_pgs->pa[i]);
1076 m_new = m_get(M_NOWAIT, MT_DATA);
1082 prev->m_next = m_new;
1085 sf = sf_buf_alloc(pg, SFB_NOWAIT);
1090 m_extadd(m_new, (char *)sf_buf_kva(sf), PAGE_SIZE,
1091 mb_unmapped_free_mext, sf, mref, M_RDONLY, EXT_SFBUF);
1092 m_new->m_data += segoff;
1093 m_new->m_len = seglen;
1098 KASSERT((off + len) <= ext_pgs->trail_len,
1099 ("off + len > trail (%d + %d > %d)", off, len,
1100 ext_pgs->trail_len));
1101 m_new = m_get(M_NOWAIT, MT_DATA);
1107 prev->m_next = m_new;
1109 memcpy(mtod(m_new, void *), &ext_pgs->trail[off], len);
1114 * Obtain an additional reference on the old mbuf for
1115 * each created EXT_SFBUF mbuf. They will be dropped
1116 * in mb_unmapped_free_mext().
1121 atomic_add_int(refcnt, ref_inc);
1129 * Obtain an additional reference on the old mbuf for
1130 * each created EXT_SFBUF mbuf. They will be
1131 * immediately dropped when these mbufs are freed
1137 atomic_add_int(refcnt, ref_inc);
1145 mb_unmapped_to_ext(struct mbuf *top)
1147 struct mbuf *m, *next, *prev = NULL;
1150 for (m = top; m != NULL; m = next) {
1151 /* m might be freed, so cache the next pointer. */
1153 if (m->m_flags & M_NOMAP) {
1156 * Remove 'm' from the new chain so
1157 * that the 'top' chain terminates
1158 * before 'm' in case 'top' is freed
1161 prev->m_next = NULL;
1163 m = _mb_unmapped_to_ext(m);
1176 * Replaced one mbuf with a chain, so we must
1177 * find the end of chain.
1191 * Allocate an empty EXT_PGS mbuf. The ext_free routine is
1192 * responsible for freeing any pages backing this mbuf when it is
1196 mb_alloc_ext_pgs(int how, bool pkthdr, m_ext_free_t ext_free)
1199 struct mbuf_ext_pgs *ext_pgs;
1202 m = m_gethdr(how, MT_DATA);
1204 m = m_get(how, MT_DATA);
1208 ext_pgs = uma_zalloc(zone_extpgs, how);
1209 if (ext_pgs == NULL) {
1215 ext_pgs->first_pg_off = 0;
1216 ext_pgs->last_pg_len = 0;
1218 ext_pgs->hdr_len = 0;
1219 ext_pgs->trail_len = 0;
1220 ext_pgs->tls = NULL;
1223 m->m_flags |= (M_EXT | M_RDONLY | M_NOMAP);
1224 m->m_ext.ext_type = EXT_PGS;
1225 m->m_ext.ext_flags = EXT_FLAG_EMBREF;
1226 m->m_ext.ext_count = 1;
1227 m->m_ext.ext_pgs = ext_pgs;
1228 m->m_ext.ext_size = 0;
1229 m->m_ext.ext_free = ext_free;
1233 #ifdef INVARIANT_SUPPORT
1235 mb_ext_pgs_check(struct mbuf_ext_pgs *ext_pgs)
1239 * NB: This expects a non-empty buffer (npgs > 0 and
1242 KASSERT(ext_pgs->npgs > 0,
1243 ("ext_pgs with no valid pages: %p", ext_pgs));
1244 KASSERT(ext_pgs->npgs <= nitems(ext_pgs->pa),
1245 ("ext_pgs with too many pages: %p", ext_pgs));
1246 KASSERT(ext_pgs->nrdy <= ext_pgs->npgs,
1247 ("ext_pgs with too many ready pages: %p", ext_pgs));
1248 KASSERT(ext_pgs->first_pg_off < PAGE_SIZE,
1249 ("ext_pgs with too large page offset: %p", ext_pgs));
1250 KASSERT(ext_pgs->last_pg_len > 0,
1251 ("ext_pgs with zero last page length: %p", ext_pgs));
1252 KASSERT(ext_pgs->last_pg_len <= PAGE_SIZE,
1253 ("ext_pgs with too large last page length: %p", ext_pgs));
1254 if (ext_pgs->npgs == 1) {
1255 KASSERT(ext_pgs->first_pg_off + ext_pgs->last_pg_len <=
1256 PAGE_SIZE, ("ext_pgs with single page too large: %p",
1259 KASSERT(ext_pgs->hdr_len <= sizeof(ext_pgs->hdr),
1260 ("ext_pgs with too large header length: %p", ext_pgs));
1261 KASSERT(ext_pgs->trail_len <= sizeof(ext_pgs->trail),
1262 ("ext_pgs with too large header length: %p", ext_pgs));
1267 * Clean up after mbufs with M_EXT storage attached to them if the
1268 * reference count hits 1.
1271 mb_free_ext(struct mbuf *m)
1273 volatile u_int *refcnt;
1277 KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m));
1279 /* See if this is the mbuf that holds the embedded refcount. */
1280 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
1281 refcnt = &m->m_ext.ext_count;
1284 KASSERT(m->m_ext.ext_cnt != NULL,
1285 ("%s: no refcounting pointer on %p", __func__, m));
1286 refcnt = m->m_ext.ext_cnt;
1287 mref = __containerof(refcnt, struct mbuf, m_ext.ext_count);
1291 * Check if the header is embedded in the cluster. It is
1292 * important that we can't touch any of the mbuf fields
1293 * after we have freed the external storage, since mbuf
1294 * could have been embedded in it. For now, the mbufs
1295 * embedded into the cluster are always of type EXT_EXTREF,
1296 * and for this type we won't free the mref.
1298 if (m->m_flags & M_NOFREE) {
1300 KASSERT(m->m_ext.ext_type == EXT_EXTREF ||
1301 m->m_ext.ext_type == EXT_RXRING,
1302 ("%s: no-free mbuf %p has wrong type", __func__, m));
1306 /* Free attached storage if this mbuf is the only reference to it. */
1307 if (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1) {
1308 switch (m->m_ext.ext_type) {
1310 /* The packet zone is special. */
1313 uma_zfree(zone_pack, mref);
1316 uma_zfree(zone_clust, m->m_ext.ext_buf);
1317 uma_zfree(zone_mbuf, mref);
1320 uma_zfree(zone_jumbop, m->m_ext.ext_buf);
1321 uma_zfree(zone_mbuf, mref);
1324 uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
1325 uma_zfree(zone_mbuf, mref);
1328 uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
1329 uma_zfree(zone_mbuf, mref);
1333 struct mbuf_ext_pgs *pgs;
1334 struct ktls_session *tls;
1337 KASSERT(mref->m_ext.ext_free != NULL,
1338 ("%s: ext_free not set", __func__));
1339 mref->m_ext.ext_free(mref);
1341 pgs = mref->m_ext.ext_pgs;
1344 !refcount_release_if_not_last(&tls->refcount))
1345 ktls_enqueue_to_free(pgs);
1348 uma_zfree(zone_extpgs, mref->m_ext.ext_pgs);
1349 uma_zfree(zone_mbuf, mref);
1355 case EXT_DISPOSABLE:
1356 KASSERT(mref->m_ext.ext_free != NULL,
1357 ("%s: ext_free not set", __func__));
1358 mref->m_ext.ext_free(mref);
1359 uma_zfree(zone_mbuf, mref);
1362 KASSERT(m->m_ext.ext_free != NULL,
1363 ("%s: ext_free not set", __func__));
1364 m->m_ext.ext_free(m);
1367 KASSERT(m->m_ext.ext_free == NULL,
1368 ("%s: ext_free is set", __func__));
1371 KASSERT(m->m_ext.ext_type == 0,
1372 ("%s: unknown ext_type", __func__));
1376 if (freembuf && m != mref)
1377 uma_zfree(zone_mbuf, m);
1381 * Official mbuf(9) allocation KPI for stack and drivers:
1383 * m_get() - a single mbuf without any attachments, sys/mbuf.h.
1384 * m_gethdr() - a single mbuf initialized as M_PKTHDR, sys/mbuf.h.
1385 * m_getcl() - an mbuf + 2k cluster, sys/mbuf.h.
1386 * m_clget() - attach cluster to already allocated mbuf.
1387 * m_cljget() - attach jumbo cluster to already allocated mbuf.
1388 * m_get2() - allocate minimum mbuf that would fit size argument.
1389 * m_getm2() - allocate a chain of mbufs/clusters.
1390 * m_extadd() - attach external cluster to mbuf.
1392 * m_free() - free single mbuf with its tags and ext, sys/mbuf.h.
1393 * m_freem() - free chain of mbufs.
1397 m_clget(struct mbuf *m, int how)
1400 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT",
1402 m->m_ext.ext_buf = (char *)NULL;
1403 uma_zalloc_arg(zone_clust, m, how);
1405 * On a cluster allocation failure, drain the packet zone and retry,
1406 * we might be able to loosen a few clusters up on the drain.
1408 if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) {
1409 uma_zone_reclaim(zone_pack, UMA_RECLAIM_DRAIN);
1410 uma_zalloc_arg(zone_clust, m, how);
1412 MBUF_PROBE2(m__clget, m, how);
1413 return (m->m_flags & M_EXT);
1417 * m_cljget() is different from m_clget() as it can allocate clusters without
1418 * attaching them to an mbuf. In that case the return value is the pointer
1419 * to the cluster of the requested size. If an mbuf was specified, it gets
1420 * the cluster attached to it and the return value can be safely ignored.
1421 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
1424 m_cljget(struct mbuf *m, int how, int size)
1430 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT",
1432 m->m_ext.ext_buf = NULL;
1435 zone = m_getzone(size);
1436 retval = uma_zalloc_arg(zone, m, how);
1438 MBUF_PROBE4(m__cljget, m, how, size, retval);
1444 * m_get2() allocates minimum mbuf that would fit "size" argument.
1447 m_get2(int size, int how, short type, int flags)
1449 struct mb_args args;
1455 if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0))
1456 return (uma_zalloc_arg(zone_mbuf, &args, how));
1457 if (size <= MCLBYTES)
1458 return (uma_zalloc_arg(zone_pack, &args, how));
1460 if (size > MJUMPAGESIZE)
1463 m = uma_zalloc_arg(zone_mbuf, &args, how);
1467 n = uma_zalloc_arg(zone_jumbop, m, how);
1469 uma_zfree(zone_mbuf, m);
1477 * m_getjcl() returns an mbuf with a cluster of the specified size attached.
1478 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
1481 m_getjcl(int how, short type, int flags, int size)
1483 struct mb_args args;
1487 if (size == MCLBYTES)
1488 return m_getcl(how, type, flags);
1493 m = uma_zalloc_arg(zone_mbuf, &args, how);
1497 zone = m_getzone(size);
1498 n = uma_zalloc_arg(zone, m, how);
1500 uma_zfree(zone_mbuf, m);
1507 * Allocate a given length worth of mbufs and/or clusters (whatever fits
1508 * best) and return a pointer to the top of the allocated chain. If an
1509 * existing mbuf chain is provided, then we will append the new chain
1510 * to the existing one and return a pointer to the provided mbuf.
1513 m_getm2(struct mbuf *m, int len, int how, short type, int flags)
1515 struct mbuf *mb, *nm = NULL, *mtail = NULL;
1517 KASSERT(len >= 0, ("%s: len is < 0", __func__));
1519 /* Validate flags. */
1520 flags &= (M_PKTHDR | M_EOR);
1522 /* Packet header mbuf must be first in chain. */
1523 if ((flags & M_PKTHDR) && m != NULL)
1526 /* Loop and append maximum sized mbufs to the chain tail. */
1529 mb = m_getjcl(how, type, (flags & M_PKTHDR),
1531 else if (len >= MINCLSIZE)
1532 mb = m_getcl(how, type, (flags & M_PKTHDR));
1533 else if (flags & M_PKTHDR)
1534 mb = m_gethdr(how, type);
1536 mb = m_get(how, type);
1538 /* Fail the whole operation if one mbuf can't be allocated. */
1552 flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */
1555 mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */
1557 /* If mbuf was supplied, append new chain to the end of it. */
1559 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
1562 mtail->m_flags &= ~M_EOR;
1570 * Configure a provided mbuf to refer to the provided external storage
1571 * buffer and setup a reference count for said buffer.
1574 * mb The existing mbuf to which to attach the provided buffer.
1575 * buf The address of the provided external storage buffer.
1576 * size The size of the provided buffer.
1577 * freef A pointer to a routine that is responsible for freeing the
1578 * provided external storage buffer.
1579 * args A pointer to an argument structure (of any type) to be passed
1580 * to the provided freef routine (may be NULL).
1581 * flags Any other flags to be passed to the provided mbuf.
1582 * type The type that the external storage buffer should be
1589 m_extadd(struct mbuf *mb, char *buf, u_int size, m_ext_free_t freef,
1590 void *arg1, void *arg2, int flags, int type)
1593 KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
1595 mb->m_flags |= (M_EXT | flags);
1596 mb->m_ext.ext_buf = buf;
1597 mb->m_data = mb->m_ext.ext_buf;
1598 mb->m_ext.ext_size = size;
1599 mb->m_ext.ext_free = freef;
1600 mb->m_ext.ext_arg1 = arg1;
1601 mb->m_ext.ext_arg2 = arg2;
1602 mb->m_ext.ext_type = type;
1604 if (type != EXT_EXTREF) {
1605 mb->m_ext.ext_count = 1;
1606 mb->m_ext.ext_flags = EXT_FLAG_EMBREF;
1608 mb->m_ext.ext_flags = 0;
1612 * Free an entire chain of mbufs and associated external buffers, if
1616 m_freem(struct mbuf *mb)
1619 MBUF_PROBE1(m__freem, mb);
1625 m_snd_tag_init(struct m_snd_tag *mst, struct ifnet *ifp)
1630 refcount_init(&mst->refcount, 1);
1631 counter_u64_add(snd_tag_count, 1);
1635 m_snd_tag_destroy(struct m_snd_tag *mst)
1640 ifp->if_snd_tag_free(mst);
1642 counter_u64_add(snd_tag_count, -1);