2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1987, 1991, 1993
5 * The Regents of the University of California.
6 * Copyright (c) 2005-2009 Robert N. M. Watson
7 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray)
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
38 * Kernel malloc(9) implementation -- general purpose kernel memory allocator
39 * based on memory types. Back end is implemented using the UMA(9) zone
40 * allocator. A set of fixed-size buckets are used for smaller allocations,
41 * and a special UMA allocation interface is used for larger allocations.
42 * Callers declare memory types, and statistics are maintained independently
43 * for each memory type. Statistics are maintained per-CPU for performance
44 * reasons. See malloc(9) and comments in malloc.h for a detailed
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
54 #include <sys/param.h>
55 #include <sys/systm.h>
58 #include <sys/kernel.h>
60 #include <sys/malloc.h>
62 #include <sys/mutex.h>
63 #include <sys/vmmeter.h>
65 #include <sys/queue.h>
68 #include <sys/sysctl.h>
72 #include <sys/epoch.h>
77 #include <vm/vm_domainset.h>
78 #include <vm/vm_pageout.h>
79 #include <vm/vm_param.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_extern.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_phys.h>
85 #include <vm/vm_pagequeue.h>
87 #include <vm/uma_int.h>
88 #include <vm/uma_dbg.h>
91 #include <vm/memguard.h>
94 #include <vm/redzone.h>
97 #if defined(INVARIANTS) && defined(__i386__)
98 #include <machine/cpu.h>
104 #include <sys/dtrace_bsd.h>
106 bool __read_frequently dtrace_malloc_enabled;
107 dtrace_malloc_probe_func_t __read_mostly dtrace_malloc_probe;
110 #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) || \
111 defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
112 #define MALLOC_DEBUG 1
115 #if defined(KASAN) || defined(DEBUG_REDZONE)
116 #define DEBUG_REDZONE_ARG_DEF , unsigned long osize
117 #define DEBUG_REDZONE_ARG , osize
119 #define DEBUG_REDZONE_ARG_DEF
120 #define DEBUG_REDZONE_ARG
124 * When realloc() is called, if the new size is sufficiently smaller than
125 * the old size, realloc() will allocate a new, smaller block to avoid
126 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
127 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
129 #ifndef REALLOC_FRACTION
130 #define REALLOC_FRACTION 1 /* new block if <= half the size */
134 * Centrally define some common malloc types.
136 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
137 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
138 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
140 static struct malloc_type *kmemstatistics;
141 static int kmemcount;
143 #define KMEM_ZSHIFT 4
144 #define KMEM_ZBASE 16
145 #define KMEM_ZMASK (KMEM_ZBASE - 1)
147 #define KMEM_ZMAX 65536
148 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
149 static uint8_t kmemsize[KMEM_ZSIZE + 1];
151 #ifndef MALLOC_DEBUG_MAXZONES
152 #define MALLOC_DEBUG_MAXZONES 1
154 static int numzones = MALLOC_DEBUG_MAXZONES;
157 * Small malloc(9) memory allocations are allocated from a set of UMA buckets
160 * Warning: the layout of the struct is duplicated in libmemstat for KVM support.
162 * XXX: The comment here used to read "These won't be powers of two for
163 * long." It's possible that a significant amount of wasted memory could be
164 * recovered by tuning the sizes of these buckets.
169 uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
174 {128, "malloc-128", },
175 {256, "malloc-256", },
176 {384, "malloc-384", },
177 {512, "malloc-512", },
178 {1024, "malloc-1024", },
179 {2048, "malloc-2048", },
180 {4096, "malloc-4096", },
181 {8192, "malloc-8192", },
182 {16384, "malloc-16384", },
183 {32768, "malloc-32768", },
184 {65536, "malloc-65536", },
189 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
190 "Size of kernel memory");
192 static u_long kmem_zmax = KMEM_ZMAX;
193 SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
194 "Maximum allocation size that malloc(9) would use UMA as backend");
196 static u_long vm_kmem_size_min;
197 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
198 "Minimum size of kernel memory");
200 static u_long vm_kmem_size_max;
201 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
202 "Maximum size of kernel memory");
204 static u_int vm_kmem_size_scale;
205 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
206 "Scale factor for kernel memory size");
208 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
209 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
210 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
211 sysctl_kmem_map_size, "LU", "Current kmem allocation size");
213 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
214 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
215 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
216 sysctl_kmem_map_free, "LU", "Free space in kmem");
218 static SYSCTL_NODE(_vm, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
219 "Malloc information");
221 static u_int vm_malloc_zone_count = nitems(kmemzones);
222 SYSCTL_UINT(_vm_malloc, OID_AUTO, zone_count,
223 CTLFLAG_RD, &vm_malloc_zone_count, 0,
224 "Number of malloc zones");
226 static int sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS);
227 SYSCTL_PROC(_vm_malloc, OID_AUTO, zone_sizes,
228 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0,
229 sysctl_vm_malloc_zone_sizes, "S", "Zone sizes used by malloc");
232 * The malloc_mtx protects the kmemstatistics linked list.
234 struct mtx malloc_mtx;
236 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
238 #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
239 static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
240 "Kernel malloc debugging options");
244 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
245 * the caller specifies M_NOWAIT. If set to 0, no failures are caused.
247 #ifdef MALLOC_MAKE_FAILURES
248 static int malloc_failure_rate;
249 static int malloc_nowait_count;
250 static int malloc_failure_count;
251 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
252 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
253 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
254 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
258 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
263 return (sysctl_handle_long(oidp, &size, 0, req));
267 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
271 /* The sysctl is unsigned, implement as a saturation value. */
278 return (sysctl_handle_long(oidp, &size, 0, req));
282 sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS)
284 int sizes[nitems(kmemzones)];
287 for (i = 0; i < nitems(kmemzones); i++) {
288 sizes[i] = kmemzones[i].kz_size;
291 return (SYSCTL_OUT(req, &sizes, sizeof(sizes)));
295 * malloc(9) uma zone separation -- sub-page buffer overruns in one
296 * malloc type will affect only a subset of other malloc types.
298 #if MALLOC_DEBUG_MAXZONES > 1
300 tunable_set_numzones(void)
303 TUNABLE_INT_FETCH("debug.malloc.numzones",
306 /* Sanity check the number of malloc uma zones. */
309 if (numzones > MALLOC_DEBUG_MAXZONES)
310 numzones = MALLOC_DEBUG_MAXZONES;
312 SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
313 SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
314 &numzones, 0, "Number of malloc uma subzones");
317 * Any number that changes regularly is an okay choice for the
318 * offset. Build numbers are pretty good of you have them.
320 static u_int zone_offset = __FreeBSD_version;
321 TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
322 SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
323 &zone_offset, 0, "Separate malloc types by examining the "
324 "Nth character in the malloc type short description.");
327 mtp_set_subzone(struct malloc_type *mtp)
329 struct malloc_type_internal *mtip;
335 desc = mtp->ks_shortdesc;
336 if (desc == NULL || (len = strlen(desc)) == 0)
339 val = desc[zone_offset % len];
340 mtip->mti_zone = (val % numzones);
344 mtp_get_subzone(struct malloc_type *mtp)
346 struct malloc_type_internal *mtip;
350 KASSERT(mtip->mti_zone < numzones,
351 ("mti_zone %u out of range %d",
352 mtip->mti_zone, numzones));
353 return (mtip->mti_zone);
355 #elif MALLOC_DEBUG_MAXZONES == 0
356 #error "MALLOC_DEBUG_MAXZONES must be positive."
359 mtp_set_subzone(struct malloc_type *mtp)
361 struct malloc_type_internal *mtip;
368 mtp_get_subzone(struct malloc_type *mtp)
373 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
376 * An allocation has succeeded -- update malloc type statistics for the
377 * amount of bucket size. Occurs within a critical section so that the
378 * thread isn't preempted and doesn't migrate while updating per-PCU
382 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
385 struct malloc_type_internal *mtip;
386 struct malloc_type_stats *mtsp;
390 mtsp = zpcpu_get(mtip->mti_stats);
392 mtsp->mts_memalloced += size;
393 mtsp->mts_numallocs++;
396 mtsp->mts_size |= 1 << zindx;
399 if (__predict_false(dtrace_malloc_enabled)) {
400 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
402 (dtrace_malloc_probe)(probe_id,
403 (uintptr_t) mtp, (uintptr_t) mtip,
404 (uintptr_t) mtsp, size, zindx);
412 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
416 malloc_type_zone_allocated(mtp, size, -1);
420 * A free operation has occurred -- update malloc type statistics for the
421 * amount of the bucket size. Occurs within a critical section so that the
422 * thread isn't preempted and doesn't migrate while updating per-CPU
426 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
428 struct malloc_type_internal *mtip;
429 struct malloc_type_stats *mtsp;
433 mtsp = zpcpu_get(mtip->mti_stats);
434 mtsp->mts_memfreed += size;
435 mtsp->mts_numfrees++;
438 if (__predict_false(dtrace_malloc_enabled)) {
439 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
441 (dtrace_malloc_probe)(probe_id,
442 (uintptr_t) mtp, (uintptr_t) mtip,
443 (uintptr_t) mtsp, size, 0);
453 * Allocate a block of physically contiguous memory.
455 * If M_NOWAIT is set, this routine will not block and return NULL if
456 * the allocation fails.
459 contigmalloc(unsigned long size, struct malloc_type *type, int flags,
460 vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
465 ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment,
466 boundary, VM_MEMATTR_DEFAULT);
468 malloc_type_allocated(type, round_page(size));
473 contigmalloc_domainset(unsigned long size, struct malloc_type *type,
474 struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
475 unsigned long alignment, vm_paddr_t boundary)
479 ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high,
480 alignment, boundary, VM_MEMATTR_DEFAULT);
482 malloc_type_allocated(type, round_page(size));
489 * Free a block of memory allocated by contigmalloc.
491 * This routine may not block.
494 contigfree(void *addr, unsigned long size, struct malloc_type *type)
497 kmem_free((vm_offset_t)addr, size);
498 malloc_type_freed(type, round_page(size));
503 malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp,
509 KASSERT(mtp->ks_version == M_VERSION, ("malloc: bad malloc type version"));
511 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
513 indx = flags & (M_WAITOK | M_NOWAIT);
514 if (indx != M_NOWAIT && indx != M_WAITOK) {
515 static struct timeval lasterr;
516 static int curerr, once;
517 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
518 printf("Bad malloc flags: %x\n", indx);
525 #ifdef MALLOC_MAKE_FAILURES
526 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
527 atomic_add_int(&malloc_nowait_count, 1);
528 if ((malloc_nowait_count % malloc_failure_rate) == 0) {
529 atomic_add_int(&malloc_failure_count, 1);
531 return (EJUSTRETURN);
535 if (flags & M_WAITOK) {
536 KASSERT(curthread->td_intr_nesting_level == 0,
537 ("malloc(M_WAITOK) in interrupt context"));
538 if (__predict_false(!THREAD_CAN_SLEEP())) {
540 epoch_trace_list(curthread);
543 ("malloc(M_WAITOK) with sleeping prohibited"));
546 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
547 ("malloc: called with spinlock or critical section held"));
549 #ifdef DEBUG_MEMGUARD
550 if (memguard_cmp_mtp(mtp, *sizep)) {
551 *vap = memguard_alloc(*sizep, flags);
553 return (EJUSTRETURN);
554 /* This is unfortunate but should not be fatal. */
559 *sizep = redzone_size_ntor(*sizep);
567 * Handle large allocations and frees by using kmem_malloc directly.
570 malloc_large_slab(uma_slab_t slab)
574 va = (uintptr_t)slab;
575 return ((va & 1) != 0);
579 malloc_large_size(uma_slab_t slab)
583 va = (uintptr_t)slab;
587 static caddr_t __noinline
588 malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy,
589 int flags DEBUG_REDZONE_ARG_DEF)
594 size = roundup(size, PAGE_SIZE);
595 kva = kmem_malloc_domainset(policy, size, flags);
597 /* The low bit is unused for slab pointers. */
598 vsetzoneslab(kva, NULL, (void *)((size << 1) | 1));
602 malloc_type_allocated(mtp, va == NULL ? 0 : size);
603 if (__predict_false(va == NULL)) {
604 KASSERT((flags & M_WAITOK) == 0,
605 ("malloc(M_WAITOK) returned NULL"));
608 va = redzone_setup(va, osize);
610 kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
616 free_large(void *addr, size_t size)
619 kmem_free((vm_offset_t)addr, size);
626 * Allocate a block of memory.
628 * If M_NOWAIT is set, this routine will not block and return NULL if
629 * the allocation fails.
632 (malloc)(size_t size, struct malloc_type *mtp, int flags)
637 #if defined(DEBUG_REDZONE) || defined(KASAN)
638 unsigned long osize = size;
641 MPASS((flags & M_EXEC) == 0);
645 if (malloc_dbg(&va, &size, mtp, flags) != 0)
649 if (__predict_false(size > kmem_zmax))
650 return (malloc_large(size, mtp, DOMAINSET_RR(), flags
653 if (size & KMEM_ZMASK)
654 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
655 indx = kmemsize[size >> KMEM_ZSHIFT];
656 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
657 va = uma_zalloc(zone, flags);
659 size = zone->uz_size;
660 if ((flags & M_ZERO) == 0) {
661 kmsan_mark(va, size, KMSAN_STATE_UNINIT);
662 kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
665 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
666 if (__predict_false(va == NULL)) {
667 KASSERT((flags & M_WAITOK) == 0,
668 ("malloc(M_WAITOK) returned NULL"));
672 va = redzone_setup(va, osize);
676 kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
678 return ((void *) va);
682 malloc_domain(size_t *sizep, int *indxp, struct malloc_type *mtp, int domain,
691 KASSERT(size <= kmem_zmax && (flags & M_EXEC) == 0,
692 ("malloc_domain: Called with bad flag / size combination."));
693 if (size & KMEM_ZMASK)
694 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
695 indx = kmemsize[size >> KMEM_ZSHIFT];
696 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
697 va = uma_zalloc_domain(zone, NULL, domain, flags);
699 *sizep = zone->uz_size;
705 malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
708 struct vm_domainset_iter di;
712 #if defined(KASAN) || defined(DEBUG_REDZONE)
713 unsigned long osize = size;
716 MPASS((flags & M_EXEC) == 0);
720 if (malloc_dbg(&va, &size, mtp, flags) != 0)
724 if (__predict_false(size > kmem_zmax))
725 return (malloc_large(size, mtp, DOMAINSET_RR(), flags
728 vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
730 va = malloc_domain(&size, &indx, mtp, domain, flags);
731 } while (va == NULL && vm_domainset_iter_policy(&di, &domain) == 0);
732 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
733 if (__predict_false(va == NULL)) {
734 KASSERT((flags & M_WAITOK) == 0,
735 ("malloc(M_WAITOK) returned NULL"));
739 va = redzone_setup(va, osize);
743 kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
746 if ((flags & M_ZERO) == 0) {
747 kmsan_mark(va, size, KMSAN_STATE_UNINIT);
748 kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
755 * Allocate an executable area.
758 malloc_exec(size_t size, struct malloc_type *mtp, int flags)
761 return (malloc_domainset_exec(size, mtp, DOMAINSET_RR(), flags));
765 malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds,
768 #if defined(DEBUG_REDZONE) || defined(KASAN)
769 unsigned long osize = size;
779 if (malloc_dbg(&va, &size, mtp, flags) != 0)
783 return (malloc_large(size, mtp, ds, flags DEBUG_REDZONE_ARG));
787 malloc_aligned(size_t size, size_t align, struct malloc_type *type, int flags)
789 return (malloc_domainset_aligned(size, align, type, DOMAINSET_RR(),
794 malloc_domainset_aligned(size_t size, size_t align,
795 struct malloc_type *mtp, struct domainset *ds, int flags)
800 KASSERT(powerof2(align),
801 ("malloc_domainset_aligned: wrong align %#zx size %#zx",
803 KASSERT(align <= PAGE_SIZE,
804 ("malloc_domainset_aligned: align %#zx (size %#zx) too large",
808 * Round the allocation size up to the next power of 2,
809 * because we can only guarantee alignment for
810 * power-of-2-sized allocations. Further increase the
811 * allocation size to align if the rounded size is less than
812 * align, since malloc zones provide alignment equal to their
817 asize = size <= align ? align : 1UL << flsl(size - 1);
819 res = malloc_domainset(asize, mtp, ds, flags);
820 KASSERT(res == NULL || ((uintptr_t)res & (align - 1)) == 0,
821 ("malloc_domainset_aligned: result not aligned %p size %#zx "
822 "allocsize %#zx align %#zx", res, size, asize, align));
827 mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
830 if (WOULD_OVERFLOW(nmemb, size))
831 panic("mallocarray: %zu * %zu overflowed", nmemb, size);
833 return (malloc(size * nmemb, type, flags));
837 mallocarray_domainset(size_t nmemb, size_t size, struct malloc_type *type,
838 struct domainset *ds, int flags)
841 if (WOULD_OVERFLOW(nmemb, size))
842 panic("mallocarray_domainset: %zu * %zu overflowed", nmemb, size);
844 return (malloc_domainset(size * nmemb, type, ds, flags));
847 #if defined(INVARIANTS) && !defined(KASAN)
849 free_save_type(void *addr, struct malloc_type *mtp, u_long size)
851 struct malloc_type **mtpp = addr;
854 * Cache a pointer to the malloc_type that most recently freed
855 * this memory here. This way we know who is most likely to
856 * have stepped on it later.
858 * This code assumes that size is a multiple of 8 bytes for
861 mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
862 mtpp += (size - sizeof(struct malloc_type *)) /
863 sizeof(struct malloc_type *);
870 free_dbg(void **addrp, struct malloc_type *mtp)
875 KASSERT(mtp->ks_version == M_VERSION, ("free: bad malloc type version"));
876 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
877 ("free: called with spinlock or critical section held"));
879 /* free(NULL, ...) does nothing */
881 return (EJUSTRETURN);
883 #ifdef DEBUG_MEMGUARD
884 if (is_memguard_addr(addr)) {
886 return (EJUSTRETURN);
892 *addrp = redzone_addr_ntor(addr);
902 * Free a block of memory allocated by malloc.
904 * This routine may not block.
907 free(void *addr, struct malloc_type *mtp)
914 if (free_dbg(&addr, mtp) != 0)
917 /* free(NULL, ...) does nothing */
921 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
923 panic("free: address %p(%p) has not been allocated.\n",
924 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
926 if (__predict_true(!malloc_large_slab(slab))) {
927 size = zone->uz_size;
928 #if defined(INVARIANTS) && !defined(KASAN)
929 free_save_type(addr, mtp, size);
931 uma_zfree_arg(zone, addr, slab);
933 size = malloc_large_size(slab);
934 free_large(addr, size);
936 malloc_type_freed(mtp, size);
942 * Zero then free a block of memory allocated by malloc.
944 * This routine may not block.
947 zfree(void *addr, struct malloc_type *mtp)
954 if (free_dbg(&addr, mtp) != 0)
957 /* free(NULL, ...) does nothing */
961 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
963 panic("free: address %p(%p) has not been allocated.\n",
964 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
966 if (__predict_true(!malloc_large_slab(slab))) {
967 size = zone->uz_size;
968 #if defined(INVARIANTS) && !defined(KASAN)
969 free_save_type(addr, mtp, size);
971 kasan_mark(addr, size, size, 0);
972 explicit_bzero(addr, size);
973 uma_zfree_arg(zone, addr, slab);
975 size = malloc_large_size(slab);
976 kasan_mark(addr, size, size, 0);
977 explicit_bzero(addr, size);
978 free_large(addr, size);
980 malloc_type_freed(mtp, size);
984 * realloc: change the size of a memory block
987 realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
989 #ifndef DEBUG_REDZONE
996 KASSERT(mtp->ks_version == M_VERSION,
997 ("realloc: bad malloc type version"));
998 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
999 ("realloc: called with spinlock or critical section held"));
1001 /* realloc(NULL, ...) is equivalent to malloc(...) */
1003 return (malloc(size, mtp, flags));
1006 * XXX: Should report free of old memory and alloc of new memory to
1010 #ifdef DEBUG_MEMGUARD
1011 if (is_memguard_addr(addr))
1012 return (memguard_realloc(addr, size, mtp, flags));
1015 #ifdef DEBUG_REDZONE
1016 alloc = redzone_get_size(addr);
1018 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
1021 KASSERT(slab != NULL,
1022 ("realloc: address %p out of range", (void *)addr));
1024 /* Get the size of the original block */
1025 if (!malloc_large_slab(slab))
1026 alloc = zone->uz_size;
1028 alloc = malloc_large_size(slab);
1030 /* Reuse the original block if appropriate */
1031 if (size <= alloc &&
1032 (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) {
1033 kasan_mark((void *)addr, size, alloc, KASAN_MALLOC_REDZONE);
1036 #endif /* !DEBUG_REDZONE */
1038 /* Allocate a new, bigger (or smaller) block */
1039 if ((newaddr = malloc(size, mtp, flags)) == NULL)
1043 * Copy over original contents. For KASAN, the redzone must be marked
1044 * valid before performing the copy.
1046 kasan_mark(addr, alloc, alloc, 0);
1047 bcopy(addr, newaddr, min(size, alloc));
1053 * reallocf: same as realloc() but free memory on failure.
1056 reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags)
1060 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
1066 * malloc_size: returns the number of bytes allocated for a request of the
1070 malloc_size(size_t size)
1074 if (size > kmem_zmax)
1076 if (size & KMEM_ZMASK)
1077 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
1078 indx = kmemsize[size >> KMEM_ZSHIFT];
1079 return (kmemzones[indx].kz_size);
1083 * malloc_usable_size: returns the usable size of the allocation.
1086 malloc_usable_size(const void *addr)
1088 #ifndef DEBUG_REDZONE
1097 #ifdef DEBUG_MEMGUARD
1098 if (is_memguard_addr(__DECONST(void *, addr)))
1099 return (memguard_get_req_size(addr));
1102 #ifdef DEBUG_REDZONE
1103 size = redzone_get_size(__DECONST(void *, addr));
1105 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
1107 panic("malloc_usable_size: address %p(%p) is not allocated.\n",
1108 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
1110 if (!malloc_large_slab(slab))
1111 size = zone->uz_size;
1113 size = malloc_large_size(slab);
1117 * Unmark the redzone to avoid reports from consumers who are
1118 * (presumably) about to use the full allocation size.
1120 kasan_mark(addr, size, size, 0);
1125 CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
1128 * Initialize the kernel memory (kmem) arena.
1137 if (vm_kmem_size == 0)
1138 vm_kmem_size = VM_KMEM_SIZE;
1140 #ifdef VM_KMEM_SIZE_MIN
1141 if (vm_kmem_size_min == 0)
1142 vm_kmem_size_min = VM_KMEM_SIZE_MIN;
1144 #ifdef VM_KMEM_SIZE_MAX
1145 if (vm_kmem_size_max == 0)
1146 vm_kmem_size_max = VM_KMEM_SIZE_MAX;
1149 * Calculate the amount of kernel virtual address (KVA) space that is
1150 * preallocated to the kmem arena. In order to support a wide range
1151 * of machines, it is a function of the physical memory size,
1154 * min(max(physical memory size / VM_KMEM_SIZE_SCALE,
1155 * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
1157 * Every architecture must define an integral value for
1158 * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN
1159 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
1160 * ceiling on this preallocation, are optional. Typically,
1161 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
1162 * a given architecture.
1164 mem_size = vm_cnt.v_page_count;
1165 if (mem_size <= 32768) /* delphij XXX 128MB */
1166 kmem_zmax = PAGE_SIZE;
1168 if (vm_kmem_size_scale < 1)
1169 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
1172 * Check if we should use defaults for the "vm_kmem_size"
1175 if (vm_kmem_size == 0) {
1176 vm_kmem_size = mem_size / vm_kmem_size_scale;
1177 vm_kmem_size = vm_kmem_size * PAGE_SIZE < vm_kmem_size ?
1178 vm_kmem_size_max : vm_kmem_size * PAGE_SIZE;
1179 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
1180 vm_kmem_size = vm_kmem_size_min;
1181 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
1182 vm_kmem_size = vm_kmem_size_max;
1184 if (vm_kmem_size == 0)
1185 panic("Tune VM_KMEM_SIZE_* for the platform");
1188 * The amount of KVA space that is preallocated to the
1189 * kmem arena can be set statically at compile-time or manually
1190 * through the kernel environment. However, it is still limited to
1191 * twice the physical memory size, which has been sufficient to handle
1192 * the most severe cases of external fragmentation in the kmem arena.
1194 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
1195 vm_kmem_size = 2 * mem_size * PAGE_SIZE;
1197 vm_kmem_size = round_page(vm_kmem_size);
1200 * With KASAN or KMSAN enabled, dynamically allocated kernel memory is
1201 * shadowed. Account for this when setting the UMA limit.
1204 vm_kmem_size = (vm_kmem_size * KASAN_SHADOW_SCALE) /
1205 (KASAN_SHADOW_SCALE + 1);
1206 #elif defined(KMSAN)
1210 #ifdef DEBUG_MEMGUARD
1211 tmp = memguard_fudge(vm_kmem_size, kernel_map);
1217 #ifdef DEBUG_MEMGUARD
1219 * Initialize MemGuard if support compiled in. MemGuard is a
1220 * replacement allocator used for detecting tamper-after-free
1221 * scenarios as they occur. It is only used for debugging.
1223 memguard_init(kernel_arena);
1228 * Initialize the kernel memory allocator
1232 mallocinit(void *dummy)
1237 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
1241 if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
1242 kmem_zmax = KMEM_ZMAX;
1244 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
1245 int size = kmemzones[indx].kz_size;
1246 const char *name = kmemzones[indx].kz_name;
1250 align = UMA_ALIGN_PTR;
1251 if (powerof2(size) && size > sizeof(void *))
1252 align = MIN(size, PAGE_SIZE) - 1;
1253 for (subzone = 0; subzone < numzones; subzone++) {
1254 kmemzones[indx].kz_zone[subzone] =
1255 uma_zcreate(name, size,
1256 #if defined(INVARIANTS) && !defined(KASAN) && !defined(KMSAN)
1257 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
1259 NULL, NULL, NULL, NULL,
1261 align, UMA_ZONE_MALLOC);
1263 for (;i <= size; i+= KMEM_ZBASE)
1264 kmemsize[i >> KMEM_ZSHIFT] = indx;
1267 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
1270 malloc_init(void *data)
1272 struct malloc_type_internal *mtip;
1273 struct malloc_type *mtp;
1275 KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init"));
1278 if (mtp->ks_version != M_VERSION)
1279 panic("malloc_init: type %s with unsupported version %lu",
1280 mtp->ks_shortdesc, mtp->ks_version);
1282 mtip = &mtp->ks_mti;
1283 mtip->mti_stats = uma_zalloc_pcpu(pcpu_zone_64, M_WAITOK | M_ZERO);
1284 mtp_set_subzone(mtp);
1286 mtx_lock(&malloc_mtx);
1287 mtp->ks_next = kmemstatistics;
1288 kmemstatistics = mtp;
1290 mtx_unlock(&malloc_mtx);
1294 malloc_uninit(void *data)
1296 struct malloc_type_internal *mtip;
1297 struct malloc_type_stats *mtsp;
1298 struct malloc_type *mtp, *temp;
1299 long temp_allocs, temp_bytes;
1303 KASSERT(mtp->ks_version == M_VERSION,
1304 ("malloc_uninit: bad malloc type version"));
1306 mtx_lock(&malloc_mtx);
1307 mtip = &mtp->ks_mti;
1308 if (mtp != kmemstatistics) {
1309 for (temp = kmemstatistics; temp != NULL;
1310 temp = temp->ks_next) {
1311 if (temp->ks_next == mtp) {
1312 temp->ks_next = mtp->ks_next;
1317 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
1319 kmemstatistics = mtp->ks_next;
1321 mtx_unlock(&malloc_mtx);
1324 * Look for memory leaks.
1326 temp_allocs = temp_bytes = 0;
1327 for (i = 0; i <= mp_maxid; i++) {
1328 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1329 temp_allocs += mtsp->mts_numallocs;
1330 temp_allocs -= mtsp->mts_numfrees;
1331 temp_bytes += mtsp->mts_memalloced;
1332 temp_bytes -= mtsp->mts_memfreed;
1334 if (temp_allocs > 0 || temp_bytes > 0) {
1335 printf("Warning: memory type %s leaked memory on destroy "
1336 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
1337 temp_allocs, temp_bytes);
1340 uma_zfree_pcpu(pcpu_zone_64, mtip->mti_stats);
1343 struct malloc_type *
1344 malloc_desc2type(const char *desc)
1346 struct malloc_type *mtp;
1348 mtx_assert(&malloc_mtx, MA_OWNED);
1349 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1350 if (strcmp(mtp->ks_shortdesc, desc) == 0)
1357 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
1359 struct malloc_type_stream_header mtsh;
1360 struct malloc_type_internal *mtip;
1361 struct malloc_type_stats *mtsp, zeromts;
1362 struct malloc_type_header mth;
1363 struct malloc_type *mtp;
1367 error = sysctl_wire_old_buffer(req, 0);
1370 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1371 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
1372 mtx_lock(&malloc_mtx);
1374 bzero(&zeromts, sizeof(zeromts));
1377 * Insert stream header.
1379 bzero(&mtsh, sizeof(mtsh));
1380 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
1381 mtsh.mtsh_maxcpus = MAXCPU;
1382 mtsh.mtsh_count = kmemcount;
1383 (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
1386 * Insert alternating sequence of type headers and type statistics.
1388 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1389 mtip = &mtp->ks_mti;
1392 * Insert type header.
1394 bzero(&mth, sizeof(mth));
1395 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
1396 (void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
1399 * Insert type statistics for each CPU.
1401 for (i = 0; i <= mp_maxid; i++) {
1402 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1403 (void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp));
1406 * Fill in the missing CPUs.
1408 for (; i < MAXCPU; i++) {
1409 (void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts));
1412 mtx_unlock(&malloc_mtx);
1413 error = sbuf_finish(&sbuf);
1418 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats,
1419 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_MPSAFE, 0, 0,
1420 sysctl_kern_malloc_stats, "s,malloc_type_ustats",
1421 "Return malloc types");
1423 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
1424 "Count of kernel malloc types");
1427 malloc_type_list(malloc_type_list_func_t *func, void *arg)
1429 struct malloc_type *mtp, **bufmtp;
1433 mtx_lock(&malloc_mtx);
1435 mtx_assert(&malloc_mtx, MA_OWNED);
1437 mtx_unlock(&malloc_mtx);
1439 buflen = sizeof(struct malloc_type *) * count;
1440 bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
1442 mtx_lock(&malloc_mtx);
1444 if (count < kmemcount) {
1445 free(bufmtp, M_TEMP);
1449 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
1452 mtx_unlock(&malloc_mtx);
1454 for (i = 0; i < count; i++)
1455 (func)(bufmtp[i], arg);
1457 free(bufmtp, M_TEMP);
1462 get_malloc_stats(const struct malloc_type_internal *mtip, uint64_t *allocs,
1465 const struct malloc_type_stats *mtsp;
1466 uint64_t frees, alloced, freed;
1473 for (i = 0; i <= mp_maxid; i++) {
1474 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1476 *allocs += mtsp->mts_numallocs;
1477 frees += mtsp->mts_numfrees;
1478 alloced += mtsp->mts_memalloced;
1479 freed += mtsp->mts_memfreed;
1481 *inuse = *allocs - frees;
1482 return (alloced - freed);
1485 DB_SHOW_COMMAND_FLAGS(malloc, db_show_malloc, DB_CMD_MEMSAFE)
1487 const char *fmt_hdr, *fmt_entry;
1488 struct malloc_type *mtp;
1489 uint64_t allocs, inuse;
1491 /* variables for sorting */
1492 struct malloc_type *last_mtype, *cur_mtype;
1493 int64_t cur_size, last_size;
1496 if (modif[0] == 'i') {
1497 fmt_hdr = "%s,%s,%s,%s\n";
1498 fmt_entry = "\"%s\",%ju,%jdK,%ju\n";
1500 fmt_hdr = "%18s %12s %12s %12s\n";
1501 fmt_entry = "%18s %12ju %12jdK %12ju\n";
1504 db_printf(fmt_hdr, "Type", "InUse", "MemUse", "Requests");
1506 /* Select sort, largest size first. */
1508 last_size = INT64_MAX;
1514 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1516 * In the case of size ties, print out mtypes
1517 * in the order they are encountered. That is,
1518 * when we encounter the most recently output
1519 * mtype, we have already printed all preceding
1520 * ties, and we must print all following ties.
1522 if (mtp == last_mtype) {
1526 size = get_malloc_stats(&mtp->ks_mti, &allocs,
1528 if (size > cur_size && size < last_size + ties) {
1533 if (cur_mtype == NULL)
1536 size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse);
1537 db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse,
1538 howmany(size, 1024), allocs);
1543 last_mtype = cur_mtype;
1544 last_size = cur_size;
1548 #if MALLOC_DEBUG_MAXZONES > 1
1549 DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1551 struct malloc_type_internal *mtip;
1552 struct malloc_type *mtp;
1556 db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1560 if (mtp->ks_version != M_VERSION) {
1561 db_printf("Version %lx does not match expected %x\n",
1562 mtp->ks_version, M_VERSION);
1566 mtip = &mtp->ks_mti;
1567 subzone = mtip->mti_zone;
1569 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1570 mtip = &mtp->ks_mti;
1571 if (mtip->mti_zone != subzone)
1573 db_printf("%s\n", mtp->ks_shortdesc);
1578 #endif /* MALLOC_DEBUG_MAXZONES > 1 */