2 * Copyright (c) 2004, 2005,
3 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_param.h"
33 #include <sys/param.h>
34 #include <sys/malloc.h>
35 #include <sys/systm.h>
37 #include <sys/domain.h>
38 #include <sys/eventhandler.h>
39 #include <sys/kernel.h>
40 #include <sys/protosw.h>
42 #include <sys/sysctl.h>
44 #include <security/mac/mac_framework.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_page.h>
51 #include <vm/uma_int.h>
52 #include <vm/uma_dbg.h>
55 * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA
58 * Mbuf Clusters (2K, contiguous) are allocated from the Cluster
59 * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the
60 * administrator so desires.
62 * Mbufs are allocated from a UMA Master Zone called the Mbuf
65 * Additionally, FreeBSD provides a Packet Zone, which it
66 * configures as a Secondary Zone to the Mbuf Master Zone,
67 * thus sharing backend Slab kegs with the Mbuf Master Zone.
69 * Thus common-case allocations and locking are simplified:
73 * | .------------>[(Packet Cache)] m_get(), m_gethdr()
75 * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ]
76 * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ]
82 * \____________(VM)_________________/
85 * Whenever an object is allocated with uma_zalloc() out of
86 * one of the Zones its _ctor_ function is executed. The same
87 * for any deallocation through uma_zfree() the _dtor_ function
90 * Caches are per-CPU and are filled from the Master Zone.
92 * Whenever an object is allocated from the underlying global
93 * memory pool it gets pre-initialized with the _zinit_ functions.
94 * When the Keg's are overfull objects get decomissioned with
95 * _zfini_ functions and free'd back to the global memory pool.
99 int nmbclusters; /* limits number of mbuf clusters */
100 int nmbjumbop; /* limits number of page size jumbo clusters */
101 int nmbjumbo9; /* limits number of 9k jumbo clusters */
102 int nmbjumbo16; /* limits number of 16k jumbo clusters */
103 struct mbstat mbstat;
106 * tunable_mbinit() has to be run before init_maxsockets() thus
107 * the SYSINIT order below is SI_ORDER_MIDDLE while init_maxsockets()
108 * runs at SI_ORDER_ANY.
111 tunable_mbinit(void *dummy)
114 /* This has to be done before VM init. */
115 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
116 if (nmbclusters == 0)
117 nmbclusters = 1024 + maxusers * 64;
119 TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
121 nmbjumbop = nmbclusters / 2;
123 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9);
125 nmbjumbo9 = nmbclusters / 4;
127 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16);
129 nmbjumbo16 = nmbclusters / 8;
131 SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_MIDDLE, tunable_mbinit, NULL);
134 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
136 int error, newnmbclusters;
138 newnmbclusters = nmbclusters;
139 error = sysctl_handle_int(oidp, &newnmbclusters, 0, req);
140 if (error == 0 && req->newptr) {
141 if (newnmbclusters > nmbclusters) {
142 nmbclusters = newnmbclusters;
143 uma_zone_set_max(zone_clust, nmbclusters);
144 EVENTHANDLER_INVOKE(nmbclusters_change);
150 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW,
151 &nmbclusters, 0, sysctl_nmbclusters, "IU",
152 "Maximum number of mbuf clusters allowed");
155 sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS)
157 int error, newnmbjumbop;
159 newnmbjumbop = nmbjumbop;
160 error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req);
161 if (error == 0 && req->newptr) {
162 if (newnmbjumbop> nmbjumbop) {
163 nmbjumbop = newnmbjumbop;
164 uma_zone_set_max(zone_jumbop, nmbjumbop);
170 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW,
171 &nmbjumbop, 0, sysctl_nmbjumbop, "IU",
172 "Maximum number of mbuf page size jumbo clusters allowed");
176 sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS)
178 int error, newnmbjumbo9;
180 newnmbjumbo9 = nmbjumbo9;
181 error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req);
182 if (error == 0 && req->newptr) {
183 if (newnmbjumbo9> nmbjumbo9) {
184 nmbjumbo9 = newnmbjumbo9;
185 uma_zone_set_max(zone_jumbo9, nmbjumbo9);
191 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW,
192 &nmbjumbo9, 0, sysctl_nmbjumbo9, "IU",
193 "Maximum number of mbuf 9k jumbo clusters allowed");
196 sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)
198 int error, newnmbjumbo16;
200 newnmbjumbo16 = nmbjumbo16;
201 error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req);
202 if (error == 0 && req->newptr) {
203 if (newnmbjumbo16> nmbjumbo16) {
204 nmbjumbo16 = newnmbjumbo16;
205 uma_zone_set_max(zone_jumbo16, nmbjumbo16);
211 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW,
212 &nmbjumbo16, 0, sysctl_nmbjumbo16, "IU",
213 "Maximum number of mbuf 16k jumbo clusters allowed");
217 SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat,
218 "Mbuf general information and statistics");
221 * Zones from which we allocate.
223 uma_zone_t zone_mbuf;
224 uma_zone_t zone_clust;
225 uma_zone_t zone_pack;
226 uma_zone_t zone_jumbop;
227 uma_zone_t zone_jumbo9;
228 uma_zone_t zone_jumbo16;
229 uma_zone_t zone_ext_refcnt;
234 static int mb_ctor_mbuf(void *, int, void *, int);
235 static int mb_ctor_clust(void *, int, void *, int);
236 static int mb_ctor_pack(void *, int, void *, int);
237 static void mb_dtor_mbuf(void *, int, void *);
238 static void mb_dtor_clust(void *, int, void *);
239 static void mb_dtor_pack(void *, int, void *);
240 static int mb_zinit_pack(void *, int, int);
241 static void mb_zfini_pack(void *, int);
243 static void mb_reclaim(void *);
244 static void mbuf_init(void *);
245 static void *mbuf_jumbo_alloc(uma_zone_t, int, uint8_t *, int);
247 /* Ensure that MSIZE doesn't break dtom() - it must be a power of 2 */
248 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
251 * Initialize FreeBSD Network buffer allocation.
253 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
255 mbuf_init(void *dummy)
259 * Configure UMA zones for Mbufs, Clusters, and Packets.
261 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
262 mb_ctor_mbuf, mb_dtor_mbuf,
264 trash_init, trash_fini,
268 MSIZE - 1, UMA_ZONE_MAXBUCKET);
270 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
271 mb_ctor_clust, mb_dtor_clust,
273 trash_init, trash_fini,
277 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
279 uma_zone_set_max(zone_clust, nmbclusters);
281 zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
282 mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
284 /* Make jumbo frame zone too. Page size, 9k and 16k. */
285 zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
286 mb_ctor_clust, mb_dtor_clust,
288 trash_init, trash_fini,
292 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
294 uma_zone_set_max(zone_jumbop, nmbjumbop);
296 zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
297 mb_ctor_clust, mb_dtor_clust,
299 trash_init, trash_fini,
303 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
305 uma_zone_set_max(zone_jumbo9, nmbjumbo9);
306 uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc);
308 zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
309 mb_ctor_clust, mb_dtor_clust,
311 trash_init, trash_fini,
315 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
317 uma_zone_set_max(zone_jumbo16, nmbjumbo16);
318 uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc);
320 zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int),
323 UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
325 /* uma_prealloc() goes here... */
328 * Hook event handler for low-memory situation, used to
329 * drain protocols and push data back to the caches (UMA
330 * later pushes it back to VM).
332 EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
333 EVENTHANDLER_PRI_FIRST);
336 * [Re]set counters and local statistics knobs.
337 * XXX Some of these should go and be replaced, but UMA stat
338 * gathering needs to be revised.
341 mbstat.m_mclusts = 0;
343 mbstat.m_msize = MSIZE;
344 mbstat.m_mclbytes = MCLBYTES;
345 mbstat.m_minclsize = MINCLSIZE;
346 mbstat.m_mlen = MLEN;
347 mbstat.m_mhlen = MHLEN;
348 mbstat.m_numtypes = MT_NTYPES;
350 mbstat.m_mcfail = mbstat.m_mpfail = 0;
352 mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
356 * UMA backend page allocator for the jumbo frame zones.
358 * Allocates kernel virtual memory that is backed by contiguous physical
362 mbuf_jumbo_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
365 /* Inform UMA that this allocator uses kernel_map/object. */
366 *flags = UMA_SLAB_KERNEL;
367 return ((void *)kmem_alloc_contig(kernel_map, bytes, wait,
368 (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
372 * Constructor for Mbuf master zone.
374 * The 'arg' pointer points to a mb_args structure which
375 * contains call-specific information required to support the
376 * mbuf allocation API. See mbuf.h.
379 mb_ctor_mbuf(void *mem, int size, void *arg, int how)
382 struct mb_args *args;
390 trash_ctor(mem, size, arg, how);
392 m = (struct mbuf *)mem;
393 args = (struct mb_args *)arg;
398 * The mbuf is initialized later. The caller has the
399 * responsibility to set up any MAC labels too.
401 if (type == MT_NOINIT)
409 if (flags & M_PKTHDR) {
410 m->m_data = m->m_pktdat;
411 m->m_pkthdr.rcvif = NULL;
412 m->m_pkthdr.header = NULL;
414 m->m_pkthdr.csum_flags = 0;
415 m->m_pkthdr.csum_data = 0;
416 m->m_pkthdr.tso_segsz = 0;
417 m->m_pkthdr.ether_vtag = 0;
418 m->m_pkthdr.flowid = 0;
419 SLIST_INIT(&m->m_pkthdr.tags);
421 /* If the label init fails, fail the alloc */
422 error = mac_mbuf_init(m, how);
427 m->m_data = m->m_dat;
432 * The Mbuf master zone destructor.
435 mb_dtor_mbuf(void *mem, int size, void *arg)
440 m = (struct mbuf *)mem;
441 flags = (unsigned long)arg;
443 if ((flags & MB_NOTAGS) == 0 && (m->m_flags & M_PKTHDR) != 0)
444 m_tag_delete_chain(m, NULL);
445 KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
446 KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__));
448 trash_dtor(mem, size, arg);
453 * The Mbuf Packet zone destructor.
456 mb_dtor_pack(void *mem, int size, void *arg)
460 m = (struct mbuf *)mem;
461 if ((m->m_flags & M_PKTHDR) != 0)
462 m_tag_delete_chain(m, NULL);
464 /* Make sure we've got a clean cluster back. */
465 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
466 KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
467 KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
468 KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__));
469 KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__));
470 KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
471 KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__));
472 KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__));
474 trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
477 * If there are processes blocked on zone_clust, waiting for pages
478 * to be freed up, * cause them to be woken up by draining the
479 * packet zone. We are exposed to a race here * (in the check for
480 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that
481 * is deliberate. We don't want to acquire the zone lock for every
484 if (uma_zone_exhausted_nolock(zone_clust))
485 zone_drain(zone_pack);
489 * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor.
491 * Here the 'arg' pointer points to the Mbuf which we
492 * are configuring cluster storage for. If 'arg' is
493 * empty we allocate just the cluster without setting
494 * the mbuf to it. See mbuf.h.
497 mb_ctor_clust(void *mem, int size, void *arg, int how)
505 trash_ctor(mem, size, arg, how);
512 #if MJUMPAGESIZE != MCLBYTES
527 panic("unknown cluster size");
531 m = (struct mbuf *)arg;
532 refcnt = uma_find_refcnt(zone, mem);
535 m->m_ext.ext_buf = (caddr_t)mem;
536 m->m_data = m->m_ext.ext_buf;
538 m->m_ext.ext_free = NULL;
539 m->m_ext.ext_arg1 = NULL;
540 m->m_ext.ext_arg2 = NULL;
541 m->m_ext.ext_size = size;
542 m->m_ext.ext_type = type;
543 m->m_ext.ref_cnt = refcnt;
550 * The Mbuf Cluster zone destructor.
553 mb_dtor_clust(void *mem, int size, void *arg)
558 zone = m_getzone(size);
559 KASSERT(*(uma_find_refcnt(zone, mem)) <= 1,
560 ("%s: refcnt incorrect %u", __func__,
561 *(uma_find_refcnt(zone, mem))) );
563 trash_dtor(mem, size, arg);
568 * The Packet secondary zone's init routine, executed on the
569 * object's transition from mbuf keg slab to zone cache.
572 mb_zinit_pack(void *mem, int size, int how)
576 m = (struct mbuf *)mem; /* m is virgin. */
577 if (uma_zalloc_arg(zone_clust, m, how) == NULL ||
578 m->m_ext.ext_buf == NULL)
580 m->m_ext.ext_type = EXT_PACKET; /* Override. */
582 trash_init(m->m_ext.ext_buf, MCLBYTES, how);
588 * The Packet secondary zone's fini routine, executed on the
589 * object's transition from zone cache to keg slab.
592 mb_zfini_pack(void *mem, int size)
596 m = (struct mbuf *)mem;
598 trash_fini(m->m_ext.ext_buf, MCLBYTES);
600 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
602 trash_dtor(mem, size, NULL);
607 * The "packet" keg constructor.
610 mb_ctor_pack(void *mem, int size, void *arg, int how)
613 struct mb_args *args;
620 m = (struct mbuf *)mem;
621 args = (struct mb_args *)arg;
626 trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
630 m->m_data = m->m_ext.ext_buf;
632 m->m_flags = (flags | M_EXT);
635 if (flags & M_PKTHDR) {
636 m->m_pkthdr.rcvif = NULL;
638 m->m_pkthdr.header = NULL;
639 m->m_pkthdr.csum_flags = 0;
640 m->m_pkthdr.csum_data = 0;
641 m->m_pkthdr.tso_segsz = 0;
642 m->m_pkthdr.ether_vtag = 0;
643 m->m_pkthdr.flowid = 0;
644 SLIST_INIT(&m->m_pkthdr.tags);
646 /* If the label init fails, fail the alloc */
647 error = mac_mbuf_init(m, how);
652 /* m_ext is already initialized. */
658 m_pkthdr_init(struct mbuf *m, int how)
663 m->m_data = m->m_pktdat;
664 SLIST_INIT(&m->m_pkthdr.tags);
665 m->m_pkthdr.rcvif = NULL;
666 m->m_pkthdr.header = NULL;
668 m->m_pkthdr.flowid = 0;
669 m->m_pkthdr.csum_flags = 0;
670 m->m_pkthdr.csum_data = 0;
671 m->m_pkthdr.tso_segsz = 0;
672 m->m_pkthdr.ether_vtag = 0;
674 /* If the label init fails, fail the alloc */
675 error = mac_mbuf_init(m, how);
684 * This is the protocol drain routine.
686 * No locks should be held when this is called. The drain routines have to
687 * presently acquire some locks which raises the possibility of lock order
691 mb_reclaim(void *junk)
696 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL,
699 for (dp = domains; dp != NULL; dp = dp->dom_next)
700 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
701 if (pr->pr_drain != NULL)