2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2004, 2005,
5 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include "opt_param.h"
34 #include "opt_kern_tls.h"
36 #include <sys/param.h>
38 #include <sys/domainset.h>
39 #include <sys/malloc.h>
40 #include <sys/systm.h>
42 #include <sys/eventhandler.h>
43 #include <sys/kernel.h>
45 #include <sys/limits.h>
47 #include <sys/mutex.h>
48 #include <sys/refcount.h>
49 #include <sys/sf_buf.h>
51 #include <sys/socket.h>
52 #include <sys/sysctl.h>
55 #include <net/if_var.h>
58 #include <vm/vm_extern.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_pageout.h>
62 #include <vm/vm_map.h>
64 #include <vm/uma_dbg.h>
66 _Static_assert(MJUMPAGESIZE > MCLBYTES,
67 "Cluster must be smaller than a jumbo page");
70 * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA
73 * Mbuf Clusters (2K, contiguous) are allocated from the Cluster
74 * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the
75 * administrator so desires.
77 * Mbufs are allocated from a UMA Primary Zone called the Mbuf
80 * Additionally, FreeBSD provides a Packet Zone, which it
81 * configures as a Secondary Zone to the Mbuf Primary Zone,
82 * thus sharing backend Slab kegs with the Mbuf Primary Zone.
84 * Thus common-case allocations and locking are simplified:
88 * | .------------>[(Packet Cache)] m_get(), m_gethdr()
90 * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ]
91 * [ Cluster Zone ] [ Zone ] [ Mbuf Primary Zone ]
97 * \____________(VM)_________________/
100 * Whenever an object is allocated with uma_zalloc() out of
101 * one of the Zones its _ctor_ function is executed. The same
102 * for any deallocation through uma_zfree() the _dtor_ function
105 * Caches are per-CPU and are filled from the Primary Zone.
107 * Whenever an object is allocated from the underlying global
108 * memory pool it gets pre-initialized with the _zinit_ functions.
109 * When the Keg's are overfull objects get decommissioned with
110 * _zfini_ functions and free'd back to the global memory pool.
114 int nmbufs; /* limits number of mbufs */
115 int nmbclusters; /* limits number of mbuf clusters */
116 int nmbjumbop; /* limits number of page size jumbo clusters */
117 int nmbjumbo9; /* limits number of 9k jumbo clusters */
118 int nmbjumbo16; /* limits number of 16k jumbo clusters */
120 bool mb_use_ext_pgs = false; /* use M_EXTPG mbufs for sendfile & TLS */
123 sysctl_mb_use_ext_pgs(SYSCTL_HANDLER_ARGS)
127 extpg = mb_use_ext_pgs;
128 error = sysctl_handle_int(oidp, &extpg, 0, req);
129 if (error == 0 && req->newptr != NULL) {
130 if (extpg != 0 && !PMAP_HAS_DMAP)
133 mb_use_ext_pgs = extpg != 0;
137 SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_use_ext_pgs, CTLTYPE_INT | CTLFLAG_RW,
139 sysctl_mb_use_ext_pgs, "IU",
140 "Use unmapped mbufs for sendfile(2) and TLS offload");
142 static quad_t maxmbufmem; /* overall real memory limit for all mbufs */
144 SYSCTL_QUAD(_kern_ipc, OID_AUTO, maxmbufmem, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &maxmbufmem, 0,
145 "Maximum real memory allocatable to various mbuf types");
147 static counter_u64_t snd_tag_count;
148 SYSCTL_COUNTER_U64(_kern_ipc, OID_AUTO, num_snd_tags, CTLFLAG_RW,
149 &snd_tag_count, "# of active mbuf send tags");
152 * tunable_mbinit() has to be run before any mbuf allocations are done.
155 tunable_mbinit(void *dummy)
161 * The default limit for all mbuf related memory is 1/2 of all
162 * available kernel memory (physical or kmem).
163 * At most it can be 3/4 of available kernel memory.
165 realmem = qmin((quad_t)physmem * PAGE_SIZE, vm_kmem_size);
166 maxmbufmem = realmem / 2;
167 TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem);
168 if (maxmbufmem > realmem / 4 * 3)
169 maxmbufmem = realmem / 4 * 3;
171 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
172 if (nmbclusters == 0)
173 nmbclusters = maxmbufmem / MCLBYTES / 4;
175 TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
177 nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4;
179 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9);
181 nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6;
183 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16);
185 nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6;
188 * We need at least as many mbufs as we have clusters of
189 * the various types added together.
191 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
192 if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16)
193 nmbufs = lmax(maxmbufmem / MSIZE / 5,
194 nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16);
197 * Unmapped mbufs can only safely be used on platforms with a direct
202 TUNABLE_INT_FETCH("kern.ipc.mb_use_ext_pgs", &extpg);
203 mb_use_ext_pgs = extpg != 0;
206 SYSINIT(tunable_mbinit, SI_SUB_KMEM, SI_ORDER_MIDDLE, tunable_mbinit, NULL);
209 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
211 int error, newnmbclusters;
213 newnmbclusters = nmbclusters;
214 error = sysctl_handle_int(oidp, &newnmbclusters, 0, req);
215 if (error == 0 && req->newptr && newnmbclusters != nmbclusters) {
216 if (newnmbclusters > nmbclusters &&
217 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
218 nmbclusters = newnmbclusters;
219 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters);
220 EVENTHANDLER_INVOKE(nmbclusters_change);
226 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters,
227 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &nmbclusters, 0,
228 sysctl_nmbclusters, "IU",
229 "Maximum number of mbuf clusters allowed");
232 sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS)
234 int error, newnmbjumbop;
236 newnmbjumbop = nmbjumbop;
237 error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req);
238 if (error == 0 && req->newptr && newnmbjumbop != nmbjumbop) {
239 if (newnmbjumbop > nmbjumbop &&
240 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
241 nmbjumbop = newnmbjumbop;
242 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop);
248 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop,
249 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &nmbjumbop, 0,
250 sysctl_nmbjumbop, "IU",
251 "Maximum number of mbuf page size jumbo clusters allowed");
254 sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS)
256 int error, newnmbjumbo9;
258 newnmbjumbo9 = nmbjumbo9;
259 error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req);
260 if (error == 0 && req->newptr && newnmbjumbo9 != nmbjumbo9) {
261 if (newnmbjumbo9 > nmbjumbo9 &&
262 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
263 nmbjumbo9 = newnmbjumbo9;
264 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9);
270 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9,
271 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &nmbjumbo9, 0,
272 sysctl_nmbjumbo9, "IU",
273 "Maximum number of mbuf 9k jumbo clusters allowed");
276 sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)
278 int error, newnmbjumbo16;
280 newnmbjumbo16 = nmbjumbo16;
281 error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req);
282 if (error == 0 && req->newptr && newnmbjumbo16 != nmbjumbo16) {
283 if (newnmbjumbo16 > nmbjumbo16 &&
284 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
285 nmbjumbo16 = newnmbjumbo16;
286 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);
292 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16,
293 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &nmbjumbo16, 0,
294 sysctl_nmbjumbo16, "IU",
295 "Maximum number of mbuf 16k jumbo clusters allowed");
298 sysctl_nmbufs(SYSCTL_HANDLER_ARGS)
300 int error, newnmbufs;
303 error = sysctl_handle_int(oidp, &newnmbufs, 0, req);
304 if (error == 0 && req->newptr && newnmbufs != nmbufs) {
305 if (newnmbufs > nmbufs) {
307 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);
308 EVENTHANDLER_INVOKE(nmbufs_change);
314 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs,
315 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
316 &nmbufs, 0, sysctl_nmbufs, "IU",
317 "Maximum number of mbufs allowed");
320 * Zones from which we allocate.
322 uma_zone_t zone_mbuf;
323 uma_zone_t zone_clust;
324 uma_zone_t zone_pack;
325 uma_zone_t zone_jumbop;
326 uma_zone_t zone_jumbo9;
327 uma_zone_t zone_jumbo16;
332 static int mb_ctor_mbuf(void *, int, void *, int);
333 static int mb_ctor_clust(void *, int, void *, int);
334 static int mb_ctor_pack(void *, int, void *, int);
335 static void mb_dtor_mbuf(void *, int, void *);
336 static void mb_dtor_pack(void *, int, void *);
337 static int mb_zinit_pack(void *, int, int);
338 static void mb_zfini_pack(void *, int);
339 static void mb_reclaim(uma_zone_t, int);
341 /* Ensure that MSIZE is a power of 2. */
342 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
344 _Static_assert(sizeof(struct mbuf) <= MSIZE,
345 "size of mbuf exceeds MSIZE");
347 * Initialize FreeBSD Network buffer allocation.
350 mbuf_init(void *dummy)
354 * Configure UMA zones for Mbufs, Clusters, and Packets.
356 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
357 mb_ctor_mbuf, mb_dtor_mbuf, NULL, NULL,
358 MSIZE - 1, UMA_ZONE_CONTIG | UMA_ZONE_MAXBUCKET);
360 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);
361 uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached");
362 uma_zone_set_maxaction(zone_mbuf, mb_reclaim);
364 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
365 mb_ctor_clust, NULL, NULL, NULL,
366 UMA_ALIGN_PTR, UMA_ZONE_CONTIG);
368 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters);
369 uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached");
370 uma_zone_set_maxaction(zone_clust, mb_reclaim);
372 zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
373 mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
375 /* Make jumbo frame zone too. Page size, 9k and 16k. */
376 zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
377 mb_ctor_clust, NULL, NULL, NULL,
378 UMA_ALIGN_PTR, UMA_ZONE_CONTIG);
380 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop);
381 uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached");
382 uma_zone_set_maxaction(zone_jumbop, mb_reclaim);
384 zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
385 mb_ctor_clust, NULL, NULL, NULL,
386 UMA_ALIGN_PTR, UMA_ZONE_CONTIG);
388 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9);
389 uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached");
390 uma_zone_set_maxaction(zone_jumbo9, mb_reclaim);
392 zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
393 mb_ctor_clust, NULL, NULL, NULL,
394 UMA_ALIGN_PTR, UMA_ZONE_CONTIG);
396 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);
397 uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached");
398 uma_zone_set_maxaction(zone_jumbo16, mb_reclaim);
400 snd_tag_count = counter_u64_alloc(M_WAITOK);
402 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
406 * debugnet makes use of a pre-allocated pool of mbufs and clusters. When
407 * debugnet is configured, we initialize a set of UMA cache zones which return
408 * items from this pool. At panic-time, the regular UMA zone pointers are
409 * overwritten with those of the cache zones so that drivers may allocate and
410 * free mbufs and clusters without attempting to allocate physical memory.
412 * We keep mbufs and clusters in a pair of mbuf queues. In particular, for
413 * the purpose of caching clusters, we treat them as mbufs.
415 static struct mbufq dn_mbufq =
416 { STAILQ_HEAD_INITIALIZER(dn_mbufq.mq_head), 0, INT_MAX };
417 static struct mbufq dn_clustq =
418 { STAILQ_HEAD_INITIALIZER(dn_clustq.mq_head), 0, INT_MAX };
420 static int dn_clsize;
421 static uma_zone_t dn_zone_mbuf;
422 static uma_zone_t dn_zone_clust;
423 static uma_zone_t dn_zone_pack;
425 static struct debugnet_saved_zones {
427 uma_zone_t dsz_clust;
429 uma_zone_t dsz_jumbop;
430 uma_zone_t dsz_jumbo9;
431 uma_zone_t dsz_jumbo16;
432 bool dsz_debugnet_zones_enabled;
436 dn_buf_import(void *arg, void **store, int count, int domain __unused,
445 for (i = 0; i < count; i++) {
446 m = mbufq_dequeue(q);
449 trash_init(m, q == &dn_mbufq ? MSIZE : dn_clsize, flags);
452 KASSERT((flags & M_WAITOK) == 0 || i == count,
453 ("%s: ran out of pre-allocated mbufs", __func__));
458 dn_buf_release(void *arg, void **store, int count)
466 for (i = 0; i < count; i++) {
468 (void)mbufq_enqueue(q, m);
473 dn_pack_import(void *arg __unused, void **store, int count, int domain __unused,
480 for (i = 0; i < count; i++) {
481 m = m_get(M_NOWAIT, MT_DATA);
484 clust = uma_zalloc(dn_zone_clust, M_NOWAIT);
489 mb_ctor_clust(clust, dn_clsize, m, 0);
492 KASSERT((flags & M_WAITOK) == 0 || i == count,
493 ("%s: ran out of pre-allocated mbufs", __func__));
498 dn_pack_release(void *arg __unused, void **store, int count)
504 for (i = 0; i < count; i++) {
506 clust = m->m_ext.ext_buf;
507 uma_zfree(dn_zone_clust, clust);
508 uma_zfree(dn_zone_mbuf, m);
513 * Free the pre-allocated mbufs and clusters reserved for debugnet, and destroy
514 * the corresponding UMA cache zones.
517 debugnet_mbuf_drain(void)
522 if (dn_zone_mbuf != NULL) {
523 uma_zdestroy(dn_zone_mbuf);
526 if (dn_zone_clust != NULL) {
527 uma_zdestroy(dn_zone_clust);
528 dn_zone_clust = NULL;
530 if (dn_zone_pack != NULL) {
531 uma_zdestroy(dn_zone_pack);
535 while ((m = mbufq_dequeue(&dn_mbufq)) != NULL)
537 while ((item = mbufq_dequeue(&dn_clustq)) != NULL)
538 uma_zfree(m_getzone(dn_clsize), item);
542 * Callback invoked immediately prior to starting a debugnet connection.
545 debugnet_mbuf_start(void)
548 MPASS(!dn_saved_zones.dsz_debugnet_zones_enabled);
550 /* Save the old zone pointers to restore when debugnet is closed. */
551 dn_saved_zones = (struct debugnet_saved_zones) {
552 .dsz_debugnet_zones_enabled = true,
553 .dsz_mbuf = zone_mbuf,
554 .dsz_clust = zone_clust,
555 .dsz_pack = zone_pack,
556 .dsz_jumbop = zone_jumbop,
557 .dsz_jumbo9 = zone_jumbo9,
558 .dsz_jumbo16 = zone_jumbo16,
562 * All cluster zones return buffers of the size requested by the
563 * drivers. It's up to the driver to reinitialize the zones if the
564 * MTU of a debugnet-enabled interface changes.
566 printf("debugnet: overwriting mbuf zone pointers\n");
567 zone_mbuf = dn_zone_mbuf;
568 zone_clust = dn_zone_clust;
569 zone_pack = dn_zone_pack;
570 zone_jumbop = dn_zone_clust;
571 zone_jumbo9 = dn_zone_clust;
572 zone_jumbo16 = dn_zone_clust;
576 * Callback invoked when a debugnet connection is closed/finished.
579 debugnet_mbuf_finish(void)
582 MPASS(dn_saved_zones.dsz_debugnet_zones_enabled);
584 printf("debugnet: restoring mbuf zone pointers\n");
585 zone_mbuf = dn_saved_zones.dsz_mbuf;
586 zone_clust = dn_saved_zones.dsz_clust;
587 zone_pack = dn_saved_zones.dsz_pack;
588 zone_jumbop = dn_saved_zones.dsz_jumbop;
589 zone_jumbo9 = dn_saved_zones.dsz_jumbo9;
590 zone_jumbo16 = dn_saved_zones.dsz_jumbo16;
592 memset(&dn_saved_zones, 0, sizeof(dn_saved_zones));
596 * Reinitialize the debugnet mbuf+cluster pool and cache zones.
599 debugnet_mbuf_reinit(int nmbuf, int nclust, int clsize)
604 debugnet_mbuf_drain();
608 dn_zone_mbuf = uma_zcache_create("debugnet_" MBUF_MEM_NAME,
609 MSIZE, mb_ctor_mbuf, mb_dtor_mbuf, NULL, NULL,
610 dn_buf_import, dn_buf_release,
611 &dn_mbufq, UMA_ZONE_NOBUCKET);
613 dn_zone_clust = uma_zcache_create("debugnet_" MBUF_CLUSTER_MEM_NAME,
614 clsize, mb_ctor_clust, NULL, NULL, NULL,
615 dn_buf_import, dn_buf_release,
616 &dn_clustq, UMA_ZONE_NOBUCKET);
618 dn_zone_pack = uma_zcache_create("debugnet_" MBUF_PACKET_MEM_NAME,
619 MCLBYTES, mb_ctor_pack, mb_dtor_pack, NULL, NULL,
620 dn_pack_import, dn_pack_release,
621 NULL, UMA_ZONE_NOBUCKET);
623 while (nmbuf-- > 0) {
624 m = m_get(M_WAITOK, MT_DATA);
625 uma_zfree(dn_zone_mbuf, m);
627 while (nclust-- > 0) {
628 item = uma_zalloc(m_getzone(dn_clsize), M_WAITOK);
629 uma_zfree(dn_zone_clust, item);
632 #endif /* DEBUGNET */
635 * Constructor for Mbuf primary zone.
637 * The 'arg' pointer points to a mb_args structure which
638 * contains call-specific information required to support the
639 * mbuf allocation API. See mbuf.h.
642 mb_ctor_mbuf(void *mem, int size, void *arg, int how)
645 struct mb_args *args;
650 args = (struct mb_args *)arg;
654 * The mbuf is initialized later. The caller has the
655 * responsibility to set up any MAC labels too.
657 if (type == MT_NOINIT)
660 m = (struct mbuf *)mem;
662 MPASS((flags & M_NOFREE) == 0);
664 error = m_init(m, how, type, flags);
670 * The Mbuf primary zone destructor.
673 mb_dtor_mbuf(void *mem, int size, void *arg)
676 unsigned long flags __diagused;
678 m = (struct mbuf *)mem;
679 flags = (unsigned long)arg;
681 KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__));
682 KASSERT((flags & 0x1) == 0, ("%s: obsolete MB_DTOR_SKIP passed", __func__));
683 if ((m->m_flags & M_PKTHDR) && !SLIST_EMPTY(&m->m_pkthdr.tags))
684 m_tag_delete_chain(m, NULL);
688 * The Mbuf Packet zone destructor.
691 mb_dtor_pack(void *mem, int size, void *arg)
695 m = (struct mbuf *)mem;
696 if ((m->m_flags & M_PKTHDR) != 0)
697 m_tag_delete_chain(m, NULL);
699 /* Make sure we've got a clean cluster back. */
700 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
701 KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
702 KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
703 KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__));
704 KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__));
705 KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
706 KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__));
707 #if defined(INVARIANTS) && !defined(KMSAN)
708 trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
711 * If there are processes blocked on zone_clust, waiting for pages
712 * to be freed up, cause them to be woken up by draining the
713 * packet zone. We are exposed to a race here (in the check for
714 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that
715 * is deliberate. We don't want to acquire the zone lock for every
718 if (uma_zone_exhausted(zone_clust))
719 uma_zone_reclaim(zone_pack, UMA_RECLAIM_DRAIN);
723 * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor.
725 * Here the 'arg' pointer points to the Mbuf which we
726 * are configuring cluster storage for. If 'arg' is
727 * empty we allocate just the cluster without setting
728 * the mbuf to it. See mbuf.h.
731 mb_ctor_clust(void *mem, int size, void *arg, int how)
735 m = (struct mbuf *)arg;
737 m->m_ext.ext_buf = (char *)mem;
738 m->m_data = m->m_ext.ext_buf;
740 m->m_ext.ext_free = NULL;
741 m->m_ext.ext_arg1 = NULL;
742 m->m_ext.ext_arg2 = NULL;
743 m->m_ext.ext_size = size;
744 m->m_ext.ext_type = m_gettype(size);
745 m->m_ext.ext_flags = EXT_FLAG_EMBREF;
746 m->m_ext.ext_count = 1;
753 * The Packet secondary zone's init routine, executed on the
754 * object's transition from mbuf keg slab to zone cache.
757 mb_zinit_pack(void *mem, int size, int how)
761 m = (struct mbuf *)mem; /* m is virgin. */
762 if (uma_zalloc_arg(zone_clust, m, how) == NULL ||
763 m->m_ext.ext_buf == NULL)
765 m->m_ext.ext_type = EXT_PACKET; /* Override. */
766 #if defined(INVARIANTS) && !defined(KMSAN)
767 trash_init(m->m_ext.ext_buf, MCLBYTES, how);
773 * The Packet secondary zone's fini routine, executed on the
774 * object's transition from zone cache to keg slab.
777 mb_zfini_pack(void *mem, int size)
781 m = (struct mbuf *)mem;
782 #if defined(INVARIANTS) && !defined(KMSAN)
783 trash_fini(m->m_ext.ext_buf, MCLBYTES);
785 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
786 #if defined(INVARIANTS) && !defined(KMSAN)
787 trash_dtor(mem, size, NULL);
792 * The "packet" keg constructor.
795 mb_ctor_pack(void *mem, int size, void *arg, int how)
798 struct mb_args *args;
802 m = (struct mbuf *)mem;
803 args = (struct mb_args *)arg;
806 MPASS((flags & M_NOFREE) == 0);
808 #if defined(INVARIANTS) && !defined(KMSAN)
809 trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
812 error = m_init(m, how, type, flags);
814 /* m_ext is already initialized. */
815 m->m_data = m->m_ext.ext_buf;
816 m->m_flags = (flags | M_EXT);
822 * This is the protocol drain routine. Called by UMA whenever any of the
823 * mbuf zones is closed to its limit.
826 mb_reclaim(uma_zone_t zone __unused, int pending __unused)
829 EVENTHANDLER_INVOKE(mbuf_lowmem, VM_LOW_MBUFS);
833 * Free "count" units of I/O from an mbuf chain. They could be held
834 * in M_EXTPG or just as a normal mbuf. This code is intended to be
835 * called in an error path (I/O error, closed connection, etc).
838 mb_free_notready(struct mbuf *m, int count)
842 for (i = 0; i < count && m != NULL; i++) {
843 if ((m->m_flags & M_EXTPG) != 0) {
845 if (m->m_epg_nrdy != 0)
850 KASSERT(i == count, ("Removed only %d items from %p", i, m));
854 * Compress an unmapped mbuf into a simple mbuf when it holds a small
855 * amount of data. This is used as a DOS defense to avoid having
856 * small packets tie up wired pages, an ext_pgs structure, and an
857 * mbuf. Since this converts the existing mbuf in place, it can only
858 * be used if there are no other references to 'm'.
861 mb_unmapped_compress(struct mbuf *m)
863 volatile u_int *refcnt;
867 * Assert that 'm' does not have a packet header. If 'm' had
868 * a packet header, it would only be able to hold MHLEN bytes
869 * and m_data would have to be initialized differently.
871 KASSERT((m->m_flags & M_PKTHDR) == 0 && (m->m_flags & M_EXTPG),
872 ("%s: m %p !M_EXTPG or M_PKTHDR", __func__, m));
873 KASSERT(m->m_len <= MLEN, ("m_len too large %p", m));
875 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
876 refcnt = &m->m_ext.ext_count;
878 KASSERT(m->m_ext.ext_cnt != NULL,
879 ("%s: no refcounting pointer on %p", __func__, m));
880 refcnt = m->m_ext.ext_cnt;
886 m_copydata(m, 0, m->m_len, buf);
888 /* Free the backing pages. */
889 m->m_ext.ext_free(m);
891 /* Turn 'm' into a "normal" mbuf. */
892 m->m_flags &= ~(M_EXT | M_RDONLY | M_EXTPG);
893 m->m_data = m->m_dat;
895 /* Copy data back into m. */
896 bcopy(buf, mtod(m, char *), m->m_len);
902 * These next few routines are used to permit downgrading an unmapped
903 * mbuf to a chain of mapped mbufs. This is used when an interface
904 * doesn't supported unmapped mbufs or if checksums need to be
905 * computed in software.
907 * Each unmapped mbuf is converted to a chain of mbufs. First, any
908 * TLS header data is stored in a regular mbuf. Second, each page of
909 * unmapped data is stored in an mbuf with an EXT_SFBUF external
910 * cluster. These mbufs use an sf_buf to provide a valid KVA for the
911 * associated physical page. They also hold a reference on the
912 * original M_EXTPG mbuf to ensure the physical page doesn't go away.
913 * Finally, any TLS trailer data is stored in a regular mbuf.
915 * mb_unmapped_free_mext() is the ext_free handler for the EXT_SFBUF
916 * mbufs. It frees the associated sf_buf and releases its reference
917 * on the original M_EXTPG mbuf.
919 * _mb_unmapped_to_ext() is a helper function that converts a single
920 * unmapped mbuf into a chain of mbufs.
922 * mb_unmapped_to_ext() is the public function that walks an mbuf
923 * chain converting any unmapped mbufs to mapped mbufs. It returns
924 * the new chain of unmapped mbufs on success. On failure it frees
925 * the original mbuf chain and returns NULL.
928 mb_unmapped_free_mext(struct mbuf *m)
933 sf = m->m_ext.ext_arg1;
936 /* Drop the reference on the backing M_EXTPG mbuf. */
937 old_m = m->m_ext.ext_arg2;
938 mb_free_extpg(old_m);
942 _mb_unmapped_to_ext(struct mbuf *m)
944 struct mbuf *m_new, *top, *prev, *mref;
947 int i, len, off, pglen, pgoff, seglen, segoff;
948 volatile u_int *refcnt;
953 KASSERT(m->m_epg_tls == NULL, ("%s: can't convert TLS mbuf %p",
956 /* See if this is the mbuf that holds the embedded refcount. */
957 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
958 refcnt = &m->m_ext.ext_count;
961 KASSERT(m->m_ext.ext_cnt != NULL,
962 ("%s: no refcounting pointer on %p", __func__, m));
963 refcnt = m->m_ext.ext_cnt;
964 mref = __containerof(refcnt, struct mbuf, m_ext.ext_count);
967 /* Skip over any data removed from the front. */
968 off = mtod(m, vm_offset_t);
971 if (m->m_epg_hdrlen != 0) {
972 if (off >= m->m_epg_hdrlen) {
973 off -= m->m_epg_hdrlen;
975 seglen = m->m_epg_hdrlen - off;
977 seglen = min(seglen, len);
980 m_new = m_get(M_NOWAIT, MT_DATA);
983 m_new->m_len = seglen;
985 memcpy(mtod(m_new, void *), &m->m_epg_hdr[segoff],
989 pgoff = m->m_epg_1st_off;
990 for (i = 0; i < m->m_epg_npgs && len > 0; i++) {
991 pglen = m_epg_pagelen(m, i, pgoff);
997 seglen = pglen - off;
998 segoff = pgoff + off;
1000 seglen = min(seglen, len);
1003 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
1004 m_new = m_get(M_NOWAIT, MT_DATA);
1010 prev->m_next = m_new;
1013 sf = sf_buf_alloc(pg, SFB_NOWAIT);
1018 m_extadd(m_new, (char *)sf_buf_kva(sf), PAGE_SIZE,
1019 mb_unmapped_free_mext, sf, mref, M_RDONLY, EXT_SFBUF);
1020 m_new->m_data += segoff;
1021 m_new->m_len = seglen;
1026 KASSERT((off + len) <= m->m_epg_trllen,
1027 ("off + len > trail (%d + %d > %d)", off, len,
1029 m_new = m_get(M_NOWAIT, MT_DATA);
1035 prev->m_next = m_new;
1037 memcpy(mtod(m_new, void *), &m->m_epg_trail[off], len);
1042 * Obtain an additional reference on the old mbuf for
1043 * each created EXT_SFBUF mbuf. They will be dropped
1044 * in mb_unmapped_free_mext().
1049 atomic_add_int(refcnt, ref_inc);
1057 * Obtain an additional reference on the old mbuf for
1058 * each created EXT_SFBUF mbuf. They will be
1059 * immediately dropped when these mbufs are freed
1065 atomic_add_int(refcnt, ref_inc);
1073 mb_unmapped_to_ext(struct mbuf *top)
1075 struct mbuf *m, *next, *prev = NULL;
1078 for (m = top; m != NULL; m = next) {
1079 /* m might be freed, so cache the next pointer. */
1081 if (m->m_flags & M_EXTPG) {
1084 * Remove 'm' from the new chain so
1085 * that the 'top' chain terminates
1086 * before 'm' in case 'top' is freed
1089 prev->m_next = NULL;
1091 m = _mb_unmapped_to_ext(m);
1104 * Replaced one mbuf with a chain, so we must
1105 * find the end of chain.
1119 * Allocate an empty M_EXTPG mbuf. The ext_free routine is
1120 * responsible for freeing any pages backing this mbuf when it is
1124 mb_alloc_ext_pgs(int how, m_ext_free_t ext_free)
1128 m = m_get(how, MT_DATA);
1134 m->m_epg_1st_off = 0;
1135 m->m_epg_last_len = 0;
1137 m->m_epg_hdrlen = 0;
1138 m->m_epg_trllen = 0;
1139 m->m_epg_tls = NULL;
1142 m->m_flags |= (M_EXT | M_RDONLY | M_EXTPG);
1143 m->m_ext.ext_flags = EXT_FLAG_EMBREF;
1144 m->m_ext.ext_count = 1;
1145 m->m_ext.ext_size = 0;
1146 m->m_ext.ext_free = ext_free;
1151 * Clean up after mbufs with M_EXT storage attached to them if the
1152 * reference count hits 1.
1155 mb_free_ext(struct mbuf *m)
1157 volatile u_int *refcnt;
1161 KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m));
1163 /* See if this is the mbuf that holds the embedded refcount. */
1164 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
1165 refcnt = &m->m_ext.ext_count;
1168 KASSERT(m->m_ext.ext_cnt != NULL,
1169 ("%s: no refcounting pointer on %p", __func__, m));
1170 refcnt = m->m_ext.ext_cnt;
1171 mref = __containerof(refcnt, struct mbuf, m_ext.ext_count);
1175 * Check if the header is embedded in the cluster. It is
1176 * important that we can't touch any of the mbuf fields
1177 * after we have freed the external storage, since mbuf
1178 * could have been embedded in it. For now, the mbufs
1179 * embedded into the cluster are always of type EXT_EXTREF,
1180 * and for this type we won't free the mref.
1182 if (m->m_flags & M_NOFREE) {
1184 KASSERT(m->m_ext.ext_type == EXT_EXTREF ||
1185 m->m_ext.ext_type == EXT_RXRING,
1186 ("%s: no-free mbuf %p has wrong type", __func__, m));
1190 /* Free attached storage if this mbuf is the only reference to it. */
1191 if (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1) {
1192 switch (m->m_ext.ext_type) {
1194 /* The packet zone is special. */
1197 uma_zfree(zone_pack, mref);
1200 uma_zfree(zone_clust, m->m_ext.ext_buf);
1204 uma_zfree(zone_jumbop, m->m_ext.ext_buf);
1208 uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
1212 uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
1218 case EXT_DISPOSABLE:
1219 KASSERT(mref->m_ext.ext_free != NULL,
1220 ("%s: ext_free not set", __func__));
1221 mref->m_ext.ext_free(mref);
1225 KASSERT(m->m_ext.ext_free != NULL,
1226 ("%s: ext_free not set", __func__));
1227 m->m_ext.ext_free(m);
1230 KASSERT(m->m_ext.ext_free == NULL,
1231 ("%s: ext_free is set", __func__));
1234 KASSERT(m->m_ext.ext_type == 0,
1235 ("%s: unknown ext_type", __func__));
1239 if (freembuf && m != mref)
1244 * Clean up after mbufs with M_EXTPG storage attached to them if the
1245 * reference count hits 1.
1248 mb_free_extpg(struct mbuf *m)
1250 volatile u_int *refcnt;
1255 /* See if this is the mbuf that holds the embedded refcount. */
1256 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
1257 refcnt = &m->m_ext.ext_count;
1260 KASSERT(m->m_ext.ext_cnt != NULL,
1261 ("%s: no refcounting pointer on %p", __func__, m));
1262 refcnt = m->m_ext.ext_cnt;
1263 mref = __containerof(refcnt, struct mbuf, m_ext.ext_count);
1266 /* Free attached storage if this mbuf is the only reference to it. */
1267 if (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1) {
1268 KASSERT(mref->m_ext.ext_free != NULL,
1269 ("%s: ext_free not set", __func__));
1271 mref->m_ext.ext_free(mref);
1273 if (mref->m_epg_tls != NULL &&
1274 !refcount_release_if_not_last(&mref->m_epg_tls->refcount))
1275 ktls_enqueue_to_free(mref);
1286 * Official mbuf(9) allocation KPI for stack and drivers:
1288 * m_get() - a single mbuf without any attachments, sys/mbuf.h.
1289 * m_gethdr() - a single mbuf initialized as M_PKTHDR, sys/mbuf.h.
1290 * m_getcl() - an mbuf + 2k cluster, sys/mbuf.h.
1291 * m_clget() - attach cluster to already allocated mbuf.
1292 * m_cljget() - attach jumbo cluster to already allocated mbuf.
1293 * m_get2() - allocate minimum mbuf that would fit size argument.
1294 * m_getm2() - allocate a chain of mbufs/clusters.
1295 * m_extadd() - attach external cluster to mbuf.
1297 * m_free() - free single mbuf with its tags and ext, sys/mbuf.h.
1298 * m_freem() - free chain of mbufs.
1302 m_clget(struct mbuf *m, int how)
1305 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT",
1307 m->m_ext.ext_buf = (char *)NULL;
1308 uma_zalloc_arg(zone_clust, m, how);
1310 * On a cluster allocation failure, drain the packet zone and retry,
1311 * we might be able to loosen a few clusters up on the drain.
1313 if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) {
1314 uma_zone_reclaim(zone_pack, UMA_RECLAIM_DRAIN);
1315 uma_zalloc_arg(zone_clust, m, how);
1317 MBUF_PROBE2(m__clget, m, how);
1318 return (m->m_flags & M_EXT);
1322 * m_cljget() is different from m_clget() as it can allocate clusters without
1323 * attaching them to an mbuf. In that case the return value is the pointer
1324 * to the cluster of the requested size. If an mbuf was specified, it gets
1325 * the cluster attached to it and the return value can be safely ignored.
1326 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
1329 m_cljget(struct mbuf *m, int how, int size)
1335 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT",
1337 m->m_ext.ext_buf = NULL;
1340 zone = m_getzone(size);
1341 retval = uma_zalloc_arg(zone, m, how);
1343 MBUF_PROBE4(m__cljget, m, how, size, retval);
1349 * m_get2() allocates minimum mbuf that would fit "size" argument.
1352 m_get2(int size, int how, short type, int flags)
1354 struct mb_args args;
1360 if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0))
1361 return (uma_zalloc_arg(zone_mbuf, &args, how));
1362 if (size <= MCLBYTES)
1363 return (uma_zalloc_arg(zone_pack, &args, how));
1365 if (size > MJUMPAGESIZE)
1368 m = uma_zalloc_arg(zone_mbuf, &args, how);
1372 n = uma_zalloc_arg(zone_jumbop, m, how);
1382 * m_get3() allocates minimum mbuf that would fit "size" argument.
1383 * Unlike m_get2() it can allocate clusters up to MJUM16BYTES.
1386 m_get3(int size, int how, short type, int flags)
1388 struct mb_args args;
1392 if (size <= MJUMPAGESIZE)
1393 return (m_get2(size, how, type, flags));
1395 if (size > MJUM16BYTES)
1401 m = uma_zalloc_arg(zone_mbuf, &args, how);
1405 if (size <= MJUM9BYTES)
1408 zone = zone_jumbo16;
1410 n = uma_zalloc_arg(zone, m, how);
1420 * m_getjcl() returns an mbuf with a cluster of the specified size attached.
1421 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
1424 m_getjcl(int how, short type, int flags, int size)
1426 struct mb_args args;
1430 if (size == MCLBYTES)
1431 return m_getcl(how, type, flags);
1436 m = uma_zalloc_arg(zone_mbuf, &args, how);
1440 zone = m_getzone(size);
1441 n = uma_zalloc_arg(zone, m, how);
1446 MBUF_PROBE5(m__getjcl, how, type, flags, size, m);
1451 * Allocate a given length worth of mbufs and/or clusters (whatever fits
1452 * best) and return a pointer to the top of the allocated chain. If an
1453 * existing mbuf chain is provided, then we will append the new chain
1454 * to the existing one and return a pointer to the provided mbuf.
1457 m_getm2(struct mbuf *m, int len, int how, short type, int flags)
1459 struct mbuf *mb, *nm = NULL, *mtail = NULL;
1461 KASSERT(len >= 0, ("%s: len is < 0", __func__));
1463 /* Validate flags. */
1464 flags &= (M_PKTHDR | M_EOR);
1466 /* Packet header mbuf must be first in chain. */
1467 if ((flags & M_PKTHDR) && m != NULL)
1470 /* Loop and append maximum sized mbufs to the chain tail. */
1473 if (len > MCLBYTES) {
1474 mb = m_getjcl(M_NOWAIT, type, (flags & M_PKTHDR),
1478 if (len >= MINCLSIZE)
1479 mb = m_getcl(how, type, (flags & M_PKTHDR));
1480 else if (flags & M_PKTHDR)
1481 mb = m_gethdr(how, type);
1483 mb = m_get(how, type);
1486 * Fail the whole operation if one mbuf can't be
1502 flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */
1505 mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */
1507 /* If mbuf was supplied, append new chain to the end of it. */
1509 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
1512 mtail->m_flags &= ~M_EOR;
1520 * Configure a provided mbuf to refer to the provided external storage
1521 * buffer and setup a reference count for said buffer.
1524 * mb The existing mbuf to which to attach the provided buffer.
1525 * buf The address of the provided external storage buffer.
1526 * size The size of the provided buffer.
1527 * freef A pointer to a routine that is responsible for freeing the
1528 * provided external storage buffer.
1529 * args A pointer to an argument structure (of any type) to be passed
1530 * to the provided freef routine (may be NULL).
1531 * flags Any other flags to be passed to the provided mbuf.
1532 * type The type that the external storage buffer should be
1539 m_extadd(struct mbuf *mb, char *buf, u_int size, m_ext_free_t freef,
1540 void *arg1, void *arg2, int flags, int type)
1543 KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
1545 mb->m_flags |= (M_EXT | flags);
1546 mb->m_ext.ext_buf = buf;
1547 mb->m_data = mb->m_ext.ext_buf;
1548 mb->m_ext.ext_size = size;
1549 mb->m_ext.ext_free = freef;
1550 mb->m_ext.ext_arg1 = arg1;
1551 mb->m_ext.ext_arg2 = arg2;
1552 mb->m_ext.ext_type = type;
1554 if (type != EXT_EXTREF) {
1555 mb->m_ext.ext_count = 1;
1556 mb->m_ext.ext_flags = EXT_FLAG_EMBREF;
1558 mb->m_ext.ext_flags = 0;
1562 * Free an entire chain of mbufs and associated external buffers, if
1566 m_freem(struct mbuf *mb)
1569 MBUF_PROBE1(m__freem, mb);
1575 * Temporary primitive to allow freeing without going through m_free.
1578 m_free_raw(struct mbuf *mb)
1581 uma_zfree(zone_mbuf, mb);
1585 m_snd_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
1586 struct m_snd_tag **mstp)
1589 return (if_snd_tag_alloc(ifp, params, mstp));
1593 m_snd_tag_init(struct m_snd_tag *mst, struct ifnet *ifp,
1594 const struct if_snd_tag_sw *sw)
1599 refcount_init(&mst->refcount, 1);
1601 counter_u64_add(snd_tag_count, 1);
1605 m_snd_tag_destroy(struct m_snd_tag *mst)
1610 mst->sw->snd_tag_free(mst);
1612 counter_u64_add(snd_tag_count, -1);
1616 m_rcvif_serialize(struct mbuf *m)
1621 idx = if_getindex(m->m_pkthdr.rcvif);
1622 gen = if_getidxgen(m->m_pkthdr.rcvif);
1623 m->m_pkthdr.rcvidx = idx;
1624 m->m_pkthdr.rcvgen = gen;
1625 if (__predict_false(m->m_pkthdr.leaf_rcvif != NULL)) {
1626 idx = if_getindex(m->m_pkthdr.leaf_rcvif);
1627 gen = if_getidxgen(m->m_pkthdr.leaf_rcvif);
1632 m->m_pkthdr.leaf_rcvidx = idx;
1633 m->m_pkthdr.leaf_rcvgen = gen;
1637 m_rcvif_restore(struct mbuf *m)
1639 struct ifnet *ifp, *leaf_ifp;
1644 ifp = ifnet_byindexgen(m->m_pkthdr.rcvidx, m->m_pkthdr.rcvgen);
1645 if (ifp == NULL || (if_getflags(ifp) & IFF_DYING))
1648 if (__predict_true(m->m_pkthdr.leaf_rcvidx == (u_short)-1)) {
1651 leaf_ifp = ifnet_byindexgen(m->m_pkthdr.leaf_rcvidx,
1652 m->m_pkthdr.leaf_rcvgen);
1653 if (__predict_false(leaf_ifp != NULL && (if_getflags(leaf_ifp) & IFF_DYING)))
1657 m->m_pkthdr.leaf_rcvif = leaf_ifp;
1658 m->m_pkthdr.rcvif = ifp;
1664 * Allocate an mbuf with anonymous external pages.
1667 mb_alloc_ext_plus_pages(int len, int how)
1673 m = mb_alloc_ext_pgs(how, mb_free_mext_pgs);
1676 m->m_epg_flags |= EPG_FLAG_ANON;
1677 npgs = howmany(len, PAGE_SIZE);
1678 for (i = 0; i < npgs; i++) {
1680 pg = vm_page_alloc_noobj(VM_ALLOC_NODUMP |
1683 if (how == M_NOWAIT) {
1690 } while (pg == NULL);
1691 m->m_epg_pa[i] = VM_PAGE_TO_PHYS(pg);
1693 m->m_epg_npgs = npgs;
1698 * Copy the data in the mbuf chain to a chain of mbufs with anonymous external
1700 * len is the length of data in the input mbuf chain.
1701 * mlen is the maximum number of bytes put into each ext_page mbuf.
1704 mb_mapped_to_unmapped(struct mbuf *mp, int len, int mlen, int how,
1705 struct mbuf **mlast)
1707 struct mbuf *m, *mout;
1708 char *pgpos, *mbpos;
1709 int i, mblen, mbufsiz, pglen, xfer;
1713 mbufsiz = min(mlen, len);
1714 m = mout = mb_alloc_ext_plus_pages(mbufsiz, how);
1717 pgpos = (char *)(void *)PHYS_TO_DMAP(m->m_epg_pa[0]);
1723 if (++i == m->m_epg_npgs) {
1724 m->m_epg_last_len = PAGE_SIZE;
1725 mbufsiz = min(mlen, len);
1726 m->m_next = mb_alloc_ext_plus_pages(mbufsiz,
1735 pgpos = (char *)(void *)PHYS_TO_DMAP(m->m_epg_pa[i]);
1738 while (mblen == 0) {
1743 KASSERT((mp->m_flags & M_EXTPG) == 0,
1744 ("mb_copym_ext_pgs: ext_pgs input mbuf"));
1745 mbpos = mtod(mp, char *);
1749 xfer = min(mblen, pglen);
1750 memcpy(pgpos, mbpos, xfer);
1758 m->m_epg_last_len = PAGE_SIZE - pglen;