2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1987, 1991, 1993
5 * The Regents of the University of California.
6 * Copyright (c) 2005-2009 Robert N. M. Watson
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
37 * Kernel malloc(9) implementation -- general purpose kernel memory allocator
38 * based on memory types. Back end is implemented using the UMA(9) zone
39 * allocator. A set of fixed-size buckets are used for smaller allocations,
40 * and a special UMA allocation interface is used for larger allocations.
41 * Callers declare memory types, and statistics are maintained independently
42 * for each memory type. Statistics are maintained per-CPU for performance
43 * reasons. See malloc(9) and comments in malloc.h for a detailed
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
53 #include <sys/param.h>
54 #include <sys/systm.h>
56 #include <sys/kernel.h>
58 #include <sys/malloc.h>
59 #include <sys/mutex.h>
60 #include <sys/vmmeter.h>
63 #include <sys/sysctl.h>
69 #include <vm/vm_pageout.h>
70 #include <vm/vm_param.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_extern.h>
73 #include <vm/vm_map.h>
74 #include <vm/vm_page.h>
76 #include <vm/uma_int.h>
77 #include <vm/uma_dbg.h>
80 #include <vm/memguard.h>
83 #include <vm/redzone.h>
86 #if defined(INVARIANTS) && defined(__i386__)
87 #include <machine/cpu.h>
93 #include <sys/dtrace_bsd.h>
95 dtrace_malloc_probe_func_t dtrace_malloc_probe;
99 * When realloc() is called, if the new size is sufficiently smaller than
100 * the old size, realloc() will allocate a new, smaller block to avoid
101 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
102 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
104 #ifndef REALLOC_FRACTION
105 #define REALLOC_FRACTION 1 /* new block if <= half the size */
109 * Centrally define some common malloc types.
111 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
112 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
113 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
115 static struct malloc_type *kmemstatistics;
116 static int kmemcount;
118 #define KMEM_ZSHIFT 4
119 #define KMEM_ZBASE 16
120 #define KMEM_ZMASK (KMEM_ZBASE - 1)
122 #define KMEM_ZMAX 65536
123 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
124 static uint8_t kmemsize[KMEM_ZSIZE + 1];
126 #ifndef MALLOC_DEBUG_MAXZONES
127 #define MALLOC_DEBUG_MAXZONES 1
129 static int numzones = MALLOC_DEBUG_MAXZONES;
132 * Small malloc(9) memory allocations are allocated from a set of UMA buckets
135 * XXX: The comment here used to read "These won't be powers of two for
136 * long." It's possible that a significant amount of wasted memory could be
137 * recovered by tuning the sizes of these buckets.
142 uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
161 * Zone to allocate malloc type descriptions from. For ABI reasons, memory
162 * types are described by a data structure passed by the declaring code, but
163 * the malloc(9) implementation has its own data structure describing the
164 * type and statistics. This permits the malloc(9)-internal data structures
165 * to be modified without breaking binary-compiled kernel modules that
166 * declare malloc types.
168 static uma_zone_t mt_zone;
171 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
172 "Size of kernel memory");
174 static u_long kmem_zmax = KMEM_ZMAX;
175 SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
176 "Maximum allocation size that malloc(9) would use UMA as backend");
178 static u_long vm_kmem_size_min;
179 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
180 "Minimum size of kernel memory");
182 static u_long vm_kmem_size_max;
183 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
184 "Maximum size of kernel memory");
186 static u_int vm_kmem_size_scale;
187 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
188 "Scale factor for kernel memory size");
190 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
191 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
192 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
193 sysctl_kmem_map_size, "LU", "Current kmem allocation size");
195 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
196 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
197 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
198 sysctl_kmem_map_free, "LU", "Free space in kmem");
201 * The malloc_mtx protects the kmemstatistics linked list.
203 struct mtx malloc_mtx;
205 #ifdef MALLOC_PROFILE
206 uint64_t krequests[KMEM_ZSIZE + 1];
208 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
211 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
214 * time_uptime of the last malloc(9) failure (induced or real).
216 static time_t t_malloc_fail;
218 #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
219 static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
220 "Kernel malloc debugging options");
224 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
225 * the caller specifies M_NOWAIT. If set to 0, no failures are caused.
227 #ifdef MALLOC_MAKE_FAILURES
228 static int malloc_failure_rate;
229 static int malloc_nowait_count;
230 static int malloc_failure_count;
231 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
232 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
233 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
234 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
238 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
242 size = vmem_size(kmem_arena, VMEM_ALLOC);
243 return (sysctl_handle_long(oidp, &size, 0, req));
247 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
251 size = vmem_size(kmem_arena, VMEM_FREE);
252 return (sysctl_handle_long(oidp, &size, 0, req));
256 * malloc(9) uma zone separation -- sub-page buffer overruns in one
257 * malloc type will affect only a subset of other malloc types.
259 #if MALLOC_DEBUG_MAXZONES > 1
261 tunable_set_numzones(void)
264 TUNABLE_INT_FETCH("debug.malloc.numzones",
267 /* Sanity check the number of malloc uma zones. */
270 if (numzones > MALLOC_DEBUG_MAXZONES)
271 numzones = MALLOC_DEBUG_MAXZONES;
273 SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
274 SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
275 &numzones, 0, "Number of malloc uma subzones");
278 * Any number that changes regularly is an okay choice for the
279 * offset. Build numbers are pretty good of you have them.
281 static u_int zone_offset = __FreeBSD_version;
282 TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
283 SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
284 &zone_offset, 0, "Separate malloc types by examining the "
285 "Nth character in the malloc type short description.");
288 mtp_get_subzone(const char *desc)
293 if (desc == NULL || (len = strlen(desc)) == 0)
295 val = desc[zone_offset % len];
296 return (val % numzones);
298 #elif MALLOC_DEBUG_MAXZONES == 0
299 #error "MALLOC_DEBUG_MAXZONES must be positive."
302 mtp_get_subzone(const char *desc)
307 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
310 malloc_last_fail(void)
313 return (time_uptime - t_malloc_fail);
317 * An allocation has succeeded -- update malloc type statistics for the
318 * amount of bucket size. Occurs within a critical section so that the
319 * thread isn't preempted and doesn't migrate while updating per-PCU
323 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
326 struct malloc_type_internal *mtip;
327 struct malloc_type_stats *mtsp;
330 mtip = mtp->ks_handle;
331 mtsp = &mtip->mti_stats[curcpu];
333 mtsp->mts_memalloced += size;
334 mtsp->mts_numallocs++;
337 mtsp->mts_size |= 1 << zindx;
340 if (dtrace_malloc_probe != NULL) {
341 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
343 (dtrace_malloc_probe)(probe_id,
344 (uintptr_t) mtp, (uintptr_t) mtip,
345 (uintptr_t) mtsp, size, zindx);
353 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
357 malloc_type_zone_allocated(mtp, size, -1);
361 * A free operation has occurred -- update malloc type statistics for the
362 * amount of the bucket size. Occurs within a critical section so that the
363 * thread isn't preempted and doesn't migrate while updating per-CPU
367 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
369 struct malloc_type_internal *mtip;
370 struct malloc_type_stats *mtsp;
373 mtip = mtp->ks_handle;
374 mtsp = &mtip->mti_stats[curcpu];
375 mtsp->mts_memfreed += size;
376 mtsp->mts_numfrees++;
379 if (dtrace_malloc_probe != NULL) {
380 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
382 (dtrace_malloc_probe)(probe_id,
383 (uintptr_t) mtp, (uintptr_t) mtip,
384 (uintptr_t) mtsp, size, 0);
394 * Allocate a block of physically contiguous memory.
396 * If M_NOWAIT is set, this routine will not block and return NULL if
397 * the allocation fails.
400 contigmalloc(unsigned long size, struct malloc_type *type, int flags,
401 vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
406 ret = (void *)kmem_alloc_contig(kernel_arena, size, flags, low, high,
407 alignment, boundary, VM_MEMATTR_DEFAULT);
409 malloc_type_allocated(type, round_page(size));
416 * Free a block of memory allocated by contigmalloc.
418 * This routine may not block.
421 contigfree(void *addr, unsigned long size, struct malloc_type *type)
424 kmem_free(kernel_arena, (vm_offset_t)addr, size);
425 malloc_type_freed(type, round_page(size));
431 * Allocate a block of memory.
433 * If M_NOWAIT is set, this routine will not block and return NULL if
434 * the allocation fails.
437 malloc(unsigned long size, struct malloc_type *mtp, int flags)
440 struct malloc_type_internal *mtip;
443 #if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE)
444 unsigned long osize = size;
448 KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic"));
450 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
452 indx = flags & (M_WAITOK | M_NOWAIT);
453 if (indx != M_NOWAIT && indx != M_WAITOK) {
454 static struct timeval lasterr;
455 static int curerr, once;
456 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
457 printf("Bad malloc flags: %x\n", indx);
464 #ifdef MALLOC_MAKE_FAILURES
465 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
466 atomic_add_int(&malloc_nowait_count, 1);
467 if ((malloc_nowait_count % malloc_failure_rate) == 0) {
468 atomic_add_int(&malloc_failure_count, 1);
469 t_malloc_fail = time_uptime;
474 if (flags & M_WAITOK)
475 KASSERT(curthread->td_intr_nesting_level == 0,
476 ("malloc(M_WAITOK) in interrupt context"));
477 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
478 ("malloc: called with spinlock or critical section held"));
480 #ifdef DEBUG_MEMGUARD
481 if (memguard_cmp_mtp(mtp, size)) {
482 va = memguard_alloc(size, flags);
485 /* This is unfortunate but should not be fatal. */
490 size = redzone_size_ntor(size);
493 if (size <= kmem_zmax) {
494 mtip = mtp->ks_handle;
495 if (size & KMEM_ZMASK)
496 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
497 indx = kmemsize[size >> KMEM_ZSHIFT];
498 KASSERT(mtip->mti_zone < numzones,
499 ("mti_zone %u out of range %d",
500 mtip->mti_zone, numzones));
501 zone = kmemzones[indx].kz_zone[mtip->mti_zone];
502 #ifdef MALLOC_PROFILE
503 krequests[size >> KMEM_ZSHIFT]++;
505 va = uma_zalloc(zone, flags);
507 size = zone->uz_size;
508 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
510 size = roundup(size, PAGE_SIZE);
512 va = uma_large_malloc(size, flags);
513 malloc_type_allocated(mtp, va == NULL ? 0 : size);
515 if (flags & M_WAITOK)
516 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
518 t_malloc_fail = time_uptime;
520 if (va != NULL && !(flags & M_ZERO)) {
521 memset(va, 0x70, osize);
526 va = redzone_setup(va, osize);
528 return ((void *) va);
534 * Free a block of memory allocated by malloc.
536 * This routine may not block.
539 free(void *addr, struct malloc_type *mtp)
544 KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic"));
545 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
546 ("free: called with spinlock or critical section held"));
548 /* free(NULL, ...) does nothing */
552 #ifdef DEBUG_MEMGUARD
553 if (is_memguard_addr(addr)) {
561 addr = redzone_addr_ntor(addr);
564 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
567 panic("free: address %p(%p) has not been allocated.\n",
568 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
570 if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
572 struct malloc_type **mtpp = addr;
574 size = slab->us_keg->uk_size;
577 * Cache a pointer to the malloc_type that most recently freed
578 * this memory here. This way we know who is most likely to
579 * have stepped on it later.
581 * This code assumes that size is a multiple of 8 bytes for
584 mtpp = (struct malloc_type **)
585 ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
586 mtpp += (size - sizeof(struct malloc_type *)) /
587 sizeof(struct malloc_type *);
590 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
592 size = slab->us_size;
593 uma_large_free(slab);
595 malloc_type_freed(mtp, size);
599 * realloc: change the size of a memory block
602 realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
608 KASSERT(mtp->ks_magic == M_MAGIC,
609 ("realloc: bad malloc type magic"));
610 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
611 ("realloc: called with spinlock or critical section held"));
613 /* realloc(NULL, ...) is equivalent to malloc(...) */
615 return (malloc(size, mtp, flags));
618 * XXX: Should report free of old memory and alloc of new memory to
622 #ifdef DEBUG_MEMGUARD
623 if (is_memguard_addr(addr))
624 return (memguard_realloc(addr, size, mtp, flags));
629 alloc = redzone_get_size(addr);
631 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
634 KASSERT(slab != NULL,
635 ("realloc: address %p out of range", (void *)addr));
637 /* Get the size of the original block */
638 if (!(slab->us_flags & UMA_SLAB_MALLOC))
639 alloc = slab->us_keg->uk_size;
641 alloc = slab->us_size;
643 /* Reuse the original block if appropriate */
645 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
647 #endif /* !DEBUG_REDZONE */
649 /* Allocate a new, bigger (or smaller) block */
650 if ((newaddr = malloc(size, mtp, flags)) == NULL)
653 /* Copy over original contents */
654 bcopy(addr, newaddr, min(size, alloc));
660 * reallocf: same as realloc() but free memory on failure.
663 reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
667 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
673 * Wake the uma reclamation pagedaemon thread when we exhaust KVA. It
674 * will call the lowmem handler and uma_reclaim() callbacks in a
675 * context that is safe.
678 kmem_reclaim(vmem_t *vm, int flags)
681 uma_reclaim_wakeup();
686 CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
690 * Initialize the kernel memory (kmem) arena.
699 if (vm_kmem_size == 0)
700 vm_kmem_size = VM_KMEM_SIZE;
702 #ifdef VM_KMEM_SIZE_MIN
703 if (vm_kmem_size_min == 0)
704 vm_kmem_size_min = VM_KMEM_SIZE_MIN;
706 #ifdef VM_KMEM_SIZE_MAX
707 if (vm_kmem_size_max == 0)
708 vm_kmem_size_max = VM_KMEM_SIZE_MAX;
711 * Calculate the amount of kernel virtual address (KVA) space that is
712 * preallocated to the kmem arena. In order to support a wide range
713 * of machines, it is a function of the physical memory size,
716 * min(max(physical memory size / VM_KMEM_SIZE_SCALE,
717 * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
719 * Every architecture must define an integral value for
720 * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN
721 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
722 * ceiling on this preallocation, are optional. Typically,
723 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
724 * a given architecture.
726 mem_size = vm_cnt.v_page_count;
727 if (mem_size <= 32768) /* delphij XXX 128MB */
728 kmem_zmax = PAGE_SIZE;
730 if (vm_kmem_size_scale < 1)
731 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
734 * Check if we should use defaults for the "vm_kmem_size"
737 if (vm_kmem_size == 0) {
738 vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE;
740 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
741 vm_kmem_size = vm_kmem_size_min;
742 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
743 vm_kmem_size = vm_kmem_size_max;
747 * The amount of KVA space that is preallocated to the
748 * kmem arena can be set statically at compile-time or manually
749 * through the kernel environment. However, it is still limited to
750 * twice the physical memory size, which has been sufficient to handle
751 * the most severe cases of external fragmentation in the kmem arena.
753 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
754 vm_kmem_size = 2 * mem_size * PAGE_SIZE;
756 vm_kmem_size = round_page(vm_kmem_size);
757 #ifdef DEBUG_MEMGUARD
758 tmp = memguard_fudge(vm_kmem_size, kernel_map);
762 vmem_init(kmem_arena, "kmem arena", kva_alloc(tmp), tmp, PAGE_SIZE,
764 vmem_set_reclaim(kmem_arena, kmem_reclaim);
766 #ifdef DEBUG_MEMGUARD
768 * Initialize MemGuard if support compiled in. MemGuard is a
769 * replacement allocator used for detecting tamper-after-free
770 * scenarios as they occur. It is only used for debugging.
772 memguard_init(kmem_arena);
777 * Initialize the kernel memory allocator
781 mallocinit(void *dummy)
786 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
792 if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
793 kmem_zmax = KMEM_ZMAX;
795 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
797 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
799 NULL, NULL, NULL, NULL,
801 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
802 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
803 int size = kmemzones[indx].kz_size;
804 char *name = kmemzones[indx].kz_name;
807 for (subzone = 0; subzone < numzones; subzone++) {
808 kmemzones[indx].kz_zone[subzone] =
809 uma_zcreate(name, size,
811 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
813 NULL, NULL, NULL, NULL,
815 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
817 for (;i <= size; i+= KMEM_ZBASE)
818 kmemsize[i >> KMEM_ZSHIFT] = indx;
822 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
825 malloc_init(void *data)
827 struct malloc_type_internal *mtip;
828 struct malloc_type *mtp;
830 KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init"));
833 if (mtp->ks_magic != M_MAGIC)
834 panic("malloc_init: bad malloc type magic");
836 mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
837 mtp->ks_handle = mtip;
838 mtip->mti_zone = mtp_get_subzone(mtp->ks_shortdesc);
840 mtx_lock(&malloc_mtx);
841 mtp->ks_next = kmemstatistics;
842 kmemstatistics = mtp;
844 mtx_unlock(&malloc_mtx);
848 malloc_uninit(void *data)
850 struct malloc_type_internal *mtip;
851 struct malloc_type_stats *mtsp;
852 struct malloc_type *mtp, *temp;
854 long temp_allocs, temp_bytes;
858 KASSERT(mtp->ks_magic == M_MAGIC,
859 ("malloc_uninit: bad malloc type magic"));
860 KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL"));
862 mtx_lock(&malloc_mtx);
863 mtip = mtp->ks_handle;
864 mtp->ks_handle = NULL;
865 if (mtp != kmemstatistics) {
866 for (temp = kmemstatistics; temp != NULL;
867 temp = temp->ks_next) {
868 if (temp->ks_next == mtp) {
869 temp->ks_next = mtp->ks_next;
874 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
876 kmemstatistics = mtp->ks_next;
878 mtx_unlock(&malloc_mtx);
881 * Look for memory leaks.
883 temp_allocs = temp_bytes = 0;
884 for (i = 0; i < MAXCPU; i++) {
885 mtsp = &mtip->mti_stats[i];
886 temp_allocs += mtsp->mts_numallocs;
887 temp_allocs -= mtsp->mts_numfrees;
888 temp_bytes += mtsp->mts_memalloced;
889 temp_bytes -= mtsp->mts_memfreed;
891 if (temp_allocs > 0 || temp_bytes > 0) {
892 printf("Warning: memory type %s leaked memory on destroy "
893 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
894 temp_allocs, temp_bytes);
897 slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK));
898 uma_zfree_arg(mt_zone, mtip, slab);
902 malloc_desc2type(const char *desc)
904 struct malloc_type *mtp;
906 mtx_assert(&malloc_mtx, MA_OWNED);
907 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
908 if (strcmp(mtp->ks_shortdesc, desc) == 0)
915 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
917 struct malloc_type_stream_header mtsh;
918 struct malloc_type_internal *mtip;
919 struct malloc_type_header mth;
920 struct malloc_type *mtp;
924 error = sysctl_wire_old_buffer(req, 0);
927 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
928 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
929 mtx_lock(&malloc_mtx);
932 * Insert stream header.
934 bzero(&mtsh, sizeof(mtsh));
935 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
936 mtsh.mtsh_maxcpus = MAXCPU;
937 mtsh.mtsh_count = kmemcount;
938 (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
941 * Insert alternating sequence of type headers and type statistics.
943 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
944 mtip = (struct malloc_type_internal *)mtp->ks_handle;
947 * Insert type header.
949 bzero(&mth, sizeof(mth));
950 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
951 (void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
954 * Insert type statistics for each CPU.
956 for (i = 0; i < MAXCPU; i++) {
957 (void)sbuf_bcat(&sbuf, &mtip->mti_stats[i],
958 sizeof(mtip->mti_stats[i]));
961 mtx_unlock(&malloc_mtx);
962 error = sbuf_finish(&sbuf);
967 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
968 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats",
969 "Return malloc types");
971 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
972 "Count of kernel malloc types");
975 malloc_type_list(malloc_type_list_func_t *func, void *arg)
977 struct malloc_type *mtp, **bufmtp;
981 mtx_lock(&malloc_mtx);
983 mtx_assert(&malloc_mtx, MA_OWNED);
985 mtx_unlock(&malloc_mtx);
987 buflen = sizeof(struct malloc_type *) * count;
988 bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
990 mtx_lock(&malloc_mtx);
992 if (count < kmemcount) {
993 free(bufmtp, M_TEMP);
997 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
1000 mtx_unlock(&malloc_mtx);
1002 for (i = 0; i < count; i++)
1003 (func)(bufmtp[i], arg);
1005 free(bufmtp, M_TEMP);
1009 DB_SHOW_COMMAND(malloc, db_show_malloc)
1011 struct malloc_type_internal *mtip;
1012 struct malloc_type *mtp;
1013 uint64_t allocs, frees;
1014 uint64_t alloced, freed;
1017 db_printf("%18s %12s %12s %12s\n", "Type", "InUse", "MemUse",
1019 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1020 mtip = (struct malloc_type_internal *)mtp->ks_handle;
1025 for (i = 0; i < MAXCPU; i++) {
1026 allocs += mtip->mti_stats[i].mts_numallocs;
1027 frees += mtip->mti_stats[i].mts_numfrees;
1028 alloced += mtip->mti_stats[i].mts_memalloced;
1029 freed += mtip->mti_stats[i].mts_memfreed;
1031 db_printf("%18s %12ju %12juK %12ju\n",
1032 mtp->ks_shortdesc, allocs - frees,
1033 (alloced - freed + 1023) / 1024, allocs);
1039 #if MALLOC_DEBUG_MAXZONES > 1
1040 DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1042 struct malloc_type_internal *mtip;
1043 struct malloc_type *mtp;
1047 db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1051 if (mtp->ks_magic != M_MAGIC) {
1052 db_printf("Magic %lx does not match expected %x\n",
1053 mtp->ks_magic, M_MAGIC);
1057 mtip = mtp->ks_handle;
1058 subzone = mtip->mti_zone;
1060 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1061 mtip = mtp->ks_handle;
1062 if (mtip->mti_zone != subzone)
1064 db_printf("%s\n", mtp->ks_shortdesc);
1069 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
1072 #ifdef MALLOC_PROFILE
1075 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
1089 error = sysctl_wire_old_buffer(req, 0);
1092 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1094 "\n Size Requests Real Size\n");
1095 for (i = 0; i < KMEM_ZSIZE; i++) {
1096 size = i << KMEM_ZSHIFT;
1097 rsize = kmemzones[kmemsize[i]].kz_size;
1098 count = (long long unsigned)krequests[i];
1100 sbuf_printf(&sbuf, "%6d%28llu%11d\n", size,
1101 (unsigned long long)count, rsize);
1103 if ((rsize * count) > (size * count))
1104 waste += (rsize * count) - (size * count);
1105 mem += (rsize * count);
1108 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
1109 (unsigned long long)mem, (unsigned long long)waste);
1110 error = sbuf_finish(&sbuf);
1115 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
1116 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
1117 #endif /* MALLOC_PROFILE */