2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1987, 1991, 1993
5 * The Regents of the University of California.
6 * Copyright (c) 2005-2009 Robert N. M. Watson
7 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray)
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
38 * Kernel malloc(9) implementation -- general purpose kernel memory allocator
39 * based on memory types. Back end is implemented using the UMA(9) zone
40 * allocator. A set of fixed-size buckets are used for smaller allocations,
41 * and a special UMA allocation interface is used for larger allocations.
42 * Callers declare memory types, and statistics are maintained independently
43 * for each memory type. Statistics are maintained per-CPU for performance
44 * reasons. See malloc(9) and comments in malloc.h for a detailed
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
54 #include <sys/param.h>
55 #include <sys/systm.h>
57 #include <sys/kernel.h>
59 #include <sys/malloc.h>
60 #include <sys/mutex.h>
61 #include <sys/vmmeter.h>
65 #include <sys/sysctl.h>
69 #include <sys/epoch.h>
74 #include <vm/vm_domainset.h>
75 #include <vm/vm_pageout.h>
76 #include <vm/vm_param.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_extern.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_page.h>
82 #include <vm/uma_int.h>
83 #include <vm/uma_dbg.h>
86 #include <vm/memguard.h>
89 #include <vm/redzone.h>
92 #if defined(INVARIANTS) && defined(__i386__)
93 #include <machine/cpu.h>
99 #include <sys/dtrace_bsd.h>
101 bool __read_frequently dtrace_malloc_enabled;
102 dtrace_malloc_probe_func_t __read_mostly dtrace_malloc_probe;
105 #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) || \
106 defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
107 #define MALLOC_DEBUG 1
111 * When realloc() is called, if the new size is sufficiently smaller than
112 * the old size, realloc() will allocate a new, smaller block to avoid
113 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
114 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
116 #ifndef REALLOC_FRACTION
117 #define REALLOC_FRACTION 1 /* new block if <= half the size */
121 * Centrally define some common malloc types.
123 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
124 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
125 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
127 static struct malloc_type *kmemstatistics;
128 static int kmemcount;
130 #define KMEM_ZSHIFT 4
131 #define KMEM_ZBASE 16
132 #define KMEM_ZMASK (KMEM_ZBASE - 1)
134 #define KMEM_ZMAX 65536
135 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
136 static uint8_t kmemsize[KMEM_ZSIZE + 1];
138 #ifndef MALLOC_DEBUG_MAXZONES
139 #define MALLOC_DEBUG_MAXZONES 1
141 static int numzones = MALLOC_DEBUG_MAXZONES;
144 * Small malloc(9) memory allocations are allocated from a set of UMA buckets
147 * XXX: The comment here used to read "These won't be powers of two for
148 * long." It's possible that a significant amount of wasted memory could be
149 * recovered by tuning the sizes of these buckets.
154 uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
173 * Zone to allocate malloc type descriptions from. For ABI reasons, memory
174 * types are described by a data structure passed by the declaring code, but
175 * the malloc(9) implementation has its own data structure describing the
176 * type and statistics. This permits the malloc(9)-internal data structures
177 * to be modified without breaking binary-compiled kernel modules that
178 * declare malloc types.
180 static uma_zone_t mt_zone;
181 static uma_zone_t mt_stats_zone;
184 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
185 "Size of kernel memory");
187 static u_long kmem_zmax = KMEM_ZMAX;
188 SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
189 "Maximum allocation size that malloc(9) would use UMA as backend");
191 static u_long vm_kmem_size_min;
192 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
193 "Minimum size of kernel memory");
195 static u_long vm_kmem_size_max;
196 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
197 "Maximum size of kernel memory");
199 static u_int vm_kmem_size_scale;
200 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
201 "Scale factor for kernel memory size");
203 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
204 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
205 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
206 sysctl_kmem_map_size, "LU", "Current kmem allocation size");
208 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
209 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
210 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
211 sysctl_kmem_map_free, "LU", "Free space in kmem");
214 * The malloc_mtx protects the kmemstatistics linked list.
216 struct mtx malloc_mtx;
218 #ifdef MALLOC_PROFILE
219 uint64_t krequests[KMEM_ZSIZE + 1];
221 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
224 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
227 * time_uptime of the last malloc(9) failure (induced or real).
229 static time_t t_malloc_fail;
231 #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
232 static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
233 "Kernel malloc debugging options");
237 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
238 * the caller specifies M_NOWAIT. If set to 0, no failures are caused.
240 #ifdef MALLOC_MAKE_FAILURES
241 static int malloc_failure_rate;
242 static int malloc_nowait_count;
243 static int malloc_failure_count;
244 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
245 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
246 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
247 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
251 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
256 return (sysctl_handle_long(oidp, &size, 0, req));
260 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
264 /* The sysctl is unsigned, implement as a saturation value. */
271 return (sysctl_handle_long(oidp, &size, 0, req));
275 * malloc(9) uma zone separation -- sub-page buffer overruns in one
276 * malloc type will affect only a subset of other malloc types.
278 #if MALLOC_DEBUG_MAXZONES > 1
280 tunable_set_numzones(void)
283 TUNABLE_INT_FETCH("debug.malloc.numzones",
286 /* Sanity check the number of malloc uma zones. */
289 if (numzones > MALLOC_DEBUG_MAXZONES)
290 numzones = MALLOC_DEBUG_MAXZONES;
292 SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
293 SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
294 &numzones, 0, "Number of malloc uma subzones");
297 * Any number that changes regularly is an okay choice for the
298 * offset. Build numbers are pretty good of you have them.
300 static u_int zone_offset = __FreeBSD_version;
301 TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
302 SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
303 &zone_offset, 0, "Separate malloc types by examining the "
304 "Nth character in the malloc type short description.");
307 mtp_set_subzone(struct malloc_type *mtp)
309 struct malloc_type_internal *mtip;
314 mtip = mtp->ks_handle;
315 desc = mtp->ks_shortdesc;
316 if (desc == NULL || (len = strlen(desc)) == 0)
319 val = desc[zone_offset % len];
320 mtip->mti_zone = (val % numzones);
324 mtp_get_subzone(struct malloc_type *mtp)
326 struct malloc_type_internal *mtip;
328 mtip = mtp->ks_handle;
330 KASSERT(mtip->mti_zone < numzones,
331 ("mti_zone %u out of range %d",
332 mtip->mti_zone, numzones));
333 return (mtip->mti_zone);
335 #elif MALLOC_DEBUG_MAXZONES == 0
336 #error "MALLOC_DEBUG_MAXZONES must be positive."
339 mtp_set_subzone(struct malloc_type *mtp)
341 struct malloc_type_internal *mtip;
343 mtip = mtp->ks_handle;
348 mtp_get_subzone(struct malloc_type *mtp)
353 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
356 malloc_last_fail(void)
359 return (time_uptime - t_malloc_fail);
363 * An allocation has succeeded -- update malloc type statistics for the
364 * amount of bucket size. Occurs within a critical section so that the
365 * thread isn't preempted and doesn't migrate while updating per-PCU
369 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
372 struct malloc_type_internal *mtip;
373 struct malloc_type_stats *mtsp;
376 mtip = mtp->ks_handle;
377 mtsp = zpcpu_get(mtip->mti_stats);
379 mtsp->mts_memalloced += size;
380 mtsp->mts_numallocs++;
383 mtsp->mts_size |= 1 << zindx;
386 if (__predict_false(dtrace_malloc_enabled)) {
387 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
389 (dtrace_malloc_probe)(probe_id,
390 (uintptr_t) mtp, (uintptr_t) mtip,
391 (uintptr_t) mtsp, size, zindx);
399 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
403 malloc_type_zone_allocated(mtp, size, -1);
407 * A free operation has occurred -- update malloc type statistics for the
408 * amount of the bucket size. Occurs within a critical section so that the
409 * thread isn't preempted and doesn't migrate while updating per-CPU
413 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
415 struct malloc_type_internal *mtip;
416 struct malloc_type_stats *mtsp;
419 mtip = mtp->ks_handle;
420 mtsp = zpcpu_get(mtip->mti_stats);
421 mtsp->mts_memfreed += size;
422 mtsp->mts_numfrees++;
425 if (__predict_false(dtrace_malloc_enabled)) {
426 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
428 (dtrace_malloc_probe)(probe_id,
429 (uintptr_t) mtp, (uintptr_t) mtip,
430 (uintptr_t) mtsp, size, 0);
440 * Allocate a block of physically contiguous memory.
442 * If M_NOWAIT is set, this routine will not block and return NULL if
443 * the allocation fails.
446 contigmalloc(unsigned long size, struct malloc_type *type, int flags,
447 vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
452 ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment,
453 boundary, VM_MEMATTR_DEFAULT);
455 malloc_type_allocated(type, round_page(size));
460 contigmalloc_domainset(unsigned long size, struct malloc_type *type,
461 struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
462 unsigned long alignment, vm_paddr_t boundary)
466 ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high,
467 alignment, boundary, VM_MEMATTR_DEFAULT);
469 malloc_type_allocated(type, round_page(size));
476 * Free a block of memory allocated by contigmalloc.
478 * This routine may not block.
481 contigfree(void *addr, unsigned long size, struct malloc_type *type)
484 kmem_free((vm_offset_t)addr, size);
485 malloc_type_freed(type, round_page(size));
490 malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp,
496 KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic"));
498 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
500 indx = flags & (M_WAITOK | M_NOWAIT);
501 if (indx != M_NOWAIT && indx != M_WAITOK) {
502 static struct timeval lasterr;
503 static int curerr, once;
504 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
505 printf("Bad malloc flags: %x\n", indx);
512 #ifdef MALLOC_MAKE_FAILURES
513 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
514 atomic_add_int(&malloc_nowait_count, 1);
515 if ((malloc_nowait_count % malloc_failure_rate) == 0) {
516 atomic_add_int(&malloc_failure_count, 1);
517 t_malloc_fail = time_uptime;
519 return (EJUSTRETURN);
523 if (flags & M_WAITOK) {
524 KASSERT(curthread->td_intr_nesting_level == 0,
525 ("malloc(M_WAITOK) in interrupt context"));
526 if (__predict_false(!THREAD_CAN_SLEEP())) {
528 epoch_trace_list(curthread);
531 ("malloc(M_WAITOK) with sleeping prohibited"));
534 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
535 ("malloc: called with spinlock or critical section held"));
537 #ifdef DEBUG_MEMGUARD
538 if (memguard_cmp_mtp(mtp, *sizep)) {
539 *vap = memguard_alloc(*sizep, flags);
541 return (EJUSTRETURN);
542 /* This is unfortunate but should not be fatal. */
547 *sizep = redzone_size_ntor(*sizep);
557 * Allocate a block of memory.
559 * If M_NOWAIT is set, this routine will not block and return NULL if
560 * the allocation fails.
563 (malloc)(size_t size, struct malloc_type *mtp, int flags)
568 #if defined(DEBUG_REDZONE)
569 unsigned long osize = size;
574 if (malloc_dbg(&va, &size, mtp, flags) != 0)
578 if (size <= kmem_zmax && (flags & M_EXEC) == 0) {
579 if (size & KMEM_ZMASK)
580 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
581 indx = kmemsize[size >> KMEM_ZSHIFT];
582 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
583 #ifdef MALLOC_PROFILE
584 krequests[size >> KMEM_ZSHIFT]++;
586 va = uma_zalloc(zone, flags);
588 size = zone->uz_size;
589 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
591 size = roundup(size, PAGE_SIZE);
593 va = uma_large_malloc(size, flags);
594 malloc_type_allocated(mtp, va == NULL ? 0 : size);
596 if (flags & M_WAITOK)
597 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
599 t_malloc_fail = time_uptime;
602 va = redzone_setup(va, osize);
604 return ((void *) va);
608 malloc_domain(size_t size, struct malloc_type *mtp, int domain, int flags)
613 #if defined(DEBUG_REDZONE)
614 unsigned long osize = size;
619 if (malloc_dbg(&va, &size, mtp, flags) != 0)
622 if (size <= kmem_zmax && (flags & M_EXEC) == 0) {
623 if (size & KMEM_ZMASK)
624 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
625 indx = kmemsize[size >> KMEM_ZSHIFT];
626 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
627 #ifdef MALLOC_PROFILE
628 krequests[size >> KMEM_ZSHIFT]++;
630 va = uma_zalloc_domain(zone, NULL, domain, flags);
632 size = zone->uz_size;
633 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
635 size = roundup(size, PAGE_SIZE);
637 va = uma_large_malloc_domain(size, domain, flags);
638 malloc_type_allocated(mtp, va == NULL ? 0 : size);
640 if (flags & M_WAITOK)
641 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
643 t_malloc_fail = time_uptime;
646 va = redzone_setup(va, osize);
648 return ((void *) va);
652 malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
655 struct vm_domainset_iter di;
659 vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
661 ret = malloc_domain(size, mtp, domain, flags);
664 } while (vm_domainset_iter_policy(&di, &domain) == 0);
670 mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
673 if (WOULD_OVERFLOW(nmemb, size))
674 panic("mallocarray: %zu * %zu overflowed", nmemb, size);
676 return (malloc(size * nmemb, type, flags));
681 free_save_type(void *addr, struct malloc_type *mtp, u_long size)
683 struct malloc_type **mtpp = addr;
686 * Cache a pointer to the malloc_type that most recently freed
687 * this memory here. This way we know who is most likely to
688 * have stepped on it later.
690 * This code assumes that size is a multiple of 8 bytes for
693 mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
694 mtpp += (size - sizeof(struct malloc_type *)) /
695 sizeof(struct malloc_type *);
702 free_dbg(void **addrp, struct malloc_type *mtp)
707 KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic"));
708 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
709 ("free: called with spinlock or critical section held"));
711 /* free(NULL, ...) does nothing */
713 return (EJUSTRETURN);
715 #ifdef DEBUG_MEMGUARD
716 if (is_memguard_addr(addr)) {
718 return (EJUSTRETURN);
724 *addrp = redzone_addr_ntor(addr);
734 * Free a block of memory allocated by malloc.
736 * This routine may not block.
739 free(void *addr, struct malloc_type *mtp)
745 if (free_dbg(&addr, mtp) != 0)
748 /* free(NULL, ...) does nothing */
752 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
754 panic("free: address %p(%p) has not been allocated.\n",
755 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
757 if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
758 size = slab->us_keg->uk_size;
760 free_save_type(addr, mtp, size);
762 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
764 size = slab->us_size;
765 uma_large_free(slab);
767 malloc_type_freed(mtp, size);
771 free_domain(void *addr, struct malloc_type *mtp)
777 if (free_dbg(&addr, mtp) != 0)
781 /* free(NULL, ...) does nothing */
785 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
787 panic("free_domain: address %p(%p) has not been allocated.\n",
788 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
790 if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
791 size = slab->us_keg->uk_size;
793 free_save_type(addr, mtp, size);
795 uma_zfree_domain(LIST_FIRST(&slab->us_keg->uk_zones),
798 size = slab->us_size;
799 uma_large_free(slab);
801 malloc_type_freed(mtp, size);
805 * realloc: change the size of a memory block
808 realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
814 KASSERT(mtp->ks_magic == M_MAGIC,
815 ("realloc: bad malloc type magic"));
816 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
817 ("realloc: called with spinlock or critical section held"));
819 /* realloc(NULL, ...) is equivalent to malloc(...) */
821 return (malloc(size, mtp, flags));
824 * XXX: Should report free of old memory and alloc of new memory to
828 #ifdef DEBUG_MEMGUARD
829 if (is_memguard_addr(addr))
830 return (memguard_realloc(addr, size, mtp, flags));
835 alloc = redzone_get_size(addr);
837 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
840 KASSERT(slab != NULL,
841 ("realloc: address %p out of range", (void *)addr));
843 /* Get the size of the original block */
844 if (!(slab->us_flags & UMA_SLAB_MALLOC))
845 alloc = slab->us_keg->uk_size;
847 alloc = slab->us_size;
849 /* Reuse the original block if appropriate */
851 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
853 #endif /* !DEBUG_REDZONE */
855 /* Allocate a new, bigger (or smaller) block */
856 if ((newaddr = malloc(size, mtp, flags)) == NULL)
859 /* Copy over original contents */
860 bcopy(addr, newaddr, min(size, alloc));
866 * reallocf: same as realloc() but free memory on failure.
869 reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags)
873 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
879 CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
883 * Initialize the kernel memory (kmem) arena.
892 if (vm_kmem_size == 0)
893 vm_kmem_size = VM_KMEM_SIZE;
895 #ifdef VM_KMEM_SIZE_MIN
896 if (vm_kmem_size_min == 0)
897 vm_kmem_size_min = VM_KMEM_SIZE_MIN;
899 #ifdef VM_KMEM_SIZE_MAX
900 if (vm_kmem_size_max == 0)
901 vm_kmem_size_max = VM_KMEM_SIZE_MAX;
904 * Calculate the amount of kernel virtual address (KVA) space that is
905 * preallocated to the kmem arena. In order to support a wide range
906 * of machines, it is a function of the physical memory size,
909 * min(max(physical memory size / VM_KMEM_SIZE_SCALE,
910 * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
912 * Every architecture must define an integral value for
913 * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN
914 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
915 * ceiling on this preallocation, are optional. Typically,
916 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
917 * a given architecture.
919 mem_size = vm_cnt.v_page_count;
920 if (mem_size <= 32768) /* delphij XXX 128MB */
921 kmem_zmax = PAGE_SIZE;
923 if (vm_kmem_size_scale < 1)
924 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
927 * Check if we should use defaults for the "vm_kmem_size"
930 if (vm_kmem_size == 0) {
931 vm_kmem_size = mem_size / vm_kmem_size_scale;
932 vm_kmem_size = vm_kmem_size * PAGE_SIZE < vm_kmem_size ?
933 vm_kmem_size_max : vm_kmem_size * PAGE_SIZE;
934 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
935 vm_kmem_size = vm_kmem_size_min;
936 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
937 vm_kmem_size = vm_kmem_size_max;
939 if (vm_kmem_size == 0)
940 panic("Tune VM_KMEM_SIZE_* for the platform");
943 * The amount of KVA space that is preallocated to the
944 * kmem arena can be set statically at compile-time or manually
945 * through the kernel environment. However, it is still limited to
946 * twice the physical memory size, which has been sufficient to handle
947 * the most severe cases of external fragmentation in the kmem arena.
949 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
950 vm_kmem_size = 2 * mem_size * PAGE_SIZE;
952 vm_kmem_size = round_page(vm_kmem_size);
953 #ifdef DEBUG_MEMGUARD
954 tmp = memguard_fudge(vm_kmem_size, kernel_map);
960 #ifdef DEBUG_MEMGUARD
962 * Initialize MemGuard if support compiled in. MemGuard is a
963 * replacement allocator used for detecting tamper-after-free
964 * scenarios as they occur. It is only used for debugging.
966 memguard_init(kernel_arena);
971 * Initialize the kernel memory allocator
975 mallocinit(void *dummy)
980 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
984 if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
985 kmem_zmax = KMEM_ZMAX;
987 mt_stats_zone = uma_zcreate("mt_stats_zone",
988 sizeof(struct malloc_type_stats), NULL, NULL, NULL, NULL,
989 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
990 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
992 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
994 NULL, NULL, NULL, NULL,
996 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
997 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
998 int size = kmemzones[indx].kz_size;
999 char *name = kmemzones[indx].kz_name;
1002 for (subzone = 0; subzone < numzones; subzone++) {
1003 kmemzones[indx].kz_zone[subzone] =
1004 uma_zcreate(name, size,
1006 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
1008 NULL, NULL, NULL, NULL,
1010 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
1012 for (;i <= size; i+= KMEM_ZBASE)
1013 kmemsize[i >> KMEM_ZSHIFT] = indx;
1017 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
1020 malloc_init(void *data)
1022 struct malloc_type_internal *mtip;
1023 struct malloc_type *mtp;
1025 KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init"));
1028 if (mtp->ks_magic != M_MAGIC)
1029 panic("malloc_init: bad malloc type magic");
1031 mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
1032 mtip->mti_stats = uma_zalloc_pcpu(mt_stats_zone, M_WAITOK | M_ZERO);
1033 mtp->ks_handle = mtip;
1034 mtp_set_subzone(mtp);
1036 mtx_lock(&malloc_mtx);
1037 mtp->ks_next = kmemstatistics;
1038 kmemstatistics = mtp;
1040 mtx_unlock(&malloc_mtx);
1044 malloc_uninit(void *data)
1046 struct malloc_type_internal *mtip;
1047 struct malloc_type_stats *mtsp;
1048 struct malloc_type *mtp, *temp;
1050 long temp_allocs, temp_bytes;
1054 KASSERT(mtp->ks_magic == M_MAGIC,
1055 ("malloc_uninit: bad malloc type magic"));
1056 KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL"));
1058 mtx_lock(&malloc_mtx);
1059 mtip = mtp->ks_handle;
1060 mtp->ks_handle = NULL;
1061 if (mtp != kmemstatistics) {
1062 for (temp = kmemstatistics; temp != NULL;
1063 temp = temp->ks_next) {
1064 if (temp->ks_next == mtp) {
1065 temp->ks_next = mtp->ks_next;
1070 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
1072 kmemstatistics = mtp->ks_next;
1074 mtx_unlock(&malloc_mtx);
1077 * Look for memory leaks.
1079 temp_allocs = temp_bytes = 0;
1080 for (i = 0; i <= mp_maxid; i++) {
1081 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1082 temp_allocs += mtsp->mts_numallocs;
1083 temp_allocs -= mtsp->mts_numfrees;
1084 temp_bytes += mtsp->mts_memalloced;
1085 temp_bytes -= mtsp->mts_memfreed;
1087 if (temp_allocs > 0 || temp_bytes > 0) {
1088 printf("Warning: memory type %s leaked memory on destroy "
1089 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
1090 temp_allocs, temp_bytes);
1093 slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK));
1094 uma_zfree_pcpu(mt_stats_zone, mtip->mti_stats);
1095 uma_zfree_arg(mt_zone, mtip, slab);
1098 struct malloc_type *
1099 malloc_desc2type(const char *desc)
1101 struct malloc_type *mtp;
1103 mtx_assert(&malloc_mtx, MA_OWNED);
1104 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1105 if (strcmp(mtp->ks_shortdesc, desc) == 0)
1112 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
1114 struct malloc_type_stream_header mtsh;
1115 struct malloc_type_internal *mtip;
1116 struct malloc_type_stats *mtsp, zeromts;
1117 struct malloc_type_header mth;
1118 struct malloc_type *mtp;
1122 error = sysctl_wire_old_buffer(req, 0);
1125 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1126 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
1127 mtx_lock(&malloc_mtx);
1129 bzero(&zeromts, sizeof(zeromts));
1132 * Insert stream header.
1134 bzero(&mtsh, sizeof(mtsh));
1135 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
1136 mtsh.mtsh_maxcpus = MAXCPU;
1137 mtsh.mtsh_count = kmemcount;
1138 (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
1141 * Insert alternating sequence of type headers and type statistics.
1143 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1144 mtip = (struct malloc_type_internal *)mtp->ks_handle;
1147 * Insert type header.
1149 bzero(&mth, sizeof(mth));
1150 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
1151 (void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
1154 * Insert type statistics for each CPU.
1156 for (i = 0; i <= mp_maxid; i++) {
1157 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1158 (void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp));
1161 * Fill in the missing CPUs.
1163 for (; i < MAXCPU; i++) {
1164 (void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts));
1168 mtx_unlock(&malloc_mtx);
1169 error = sbuf_finish(&sbuf);
1174 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
1175 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats",
1176 "Return malloc types");
1178 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
1179 "Count of kernel malloc types");
1182 malloc_type_list(malloc_type_list_func_t *func, void *arg)
1184 struct malloc_type *mtp, **bufmtp;
1188 mtx_lock(&malloc_mtx);
1190 mtx_assert(&malloc_mtx, MA_OWNED);
1192 mtx_unlock(&malloc_mtx);
1194 buflen = sizeof(struct malloc_type *) * count;
1195 bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
1197 mtx_lock(&malloc_mtx);
1199 if (count < kmemcount) {
1200 free(bufmtp, M_TEMP);
1204 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
1207 mtx_unlock(&malloc_mtx);
1209 for (i = 0; i < count; i++)
1210 (func)(bufmtp[i], arg);
1212 free(bufmtp, M_TEMP);
1217 get_malloc_stats(const struct malloc_type_internal *mtip, uint64_t *allocs,
1220 const struct malloc_type_stats *mtsp;
1221 uint64_t frees, alloced, freed;
1228 for (i = 0; i <= mp_maxid; i++) {
1229 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1231 *allocs += mtsp->mts_numallocs;
1232 frees += mtsp->mts_numfrees;
1233 alloced += mtsp->mts_memalloced;
1234 freed += mtsp->mts_memfreed;
1236 *inuse = *allocs - frees;
1237 return (alloced - freed);
1240 DB_SHOW_COMMAND(malloc, db_show_malloc)
1242 const char *fmt_hdr, *fmt_entry;
1243 struct malloc_type *mtp;
1244 uint64_t allocs, inuse;
1246 /* variables for sorting */
1247 struct malloc_type *last_mtype, *cur_mtype;
1248 int64_t cur_size, last_size;
1251 if (modif[0] == 'i') {
1252 fmt_hdr = "%s,%s,%s,%s\n";
1253 fmt_entry = "\"%s\",%ju,%jdK,%ju\n";
1255 fmt_hdr = "%18s %12s %12s %12s\n";
1256 fmt_entry = "%18s %12ju %12jdK %12ju\n";
1259 db_printf(fmt_hdr, "Type", "InUse", "MemUse", "Requests");
1261 /* Select sort, largest size first. */
1263 last_size = INT64_MAX;
1269 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1271 * In the case of size ties, print out mtypes
1272 * in the order they are encountered. That is,
1273 * when we encounter the most recently output
1274 * mtype, we have already printed all preceding
1275 * ties, and we must print all following ties.
1277 if (mtp == last_mtype) {
1281 size = get_malloc_stats(mtp->ks_handle, &allocs,
1283 if (size > cur_size && size < last_size + ties) {
1288 if (cur_mtype == NULL)
1291 size = get_malloc_stats(cur_mtype->ks_handle, &allocs, &inuse);
1292 db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse,
1293 howmany(size, 1024), allocs);
1298 last_mtype = cur_mtype;
1299 last_size = cur_size;
1303 #if MALLOC_DEBUG_MAXZONES > 1
1304 DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1306 struct malloc_type_internal *mtip;
1307 struct malloc_type *mtp;
1311 db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1315 if (mtp->ks_magic != M_MAGIC) {
1316 db_printf("Magic %lx does not match expected %x\n",
1317 mtp->ks_magic, M_MAGIC);
1321 mtip = mtp->ks_handle;
1322 subzone = mtip->mti_zone;
1324 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1325 mtip = mtp->ks_handle;
1326 if (mtip->mti_zone != subzone)
1328 db_printf("%s\n", mtp->ks_shortdesc);
1333 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
1336 #ifdef MALLOC_PROFILE
1339 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
1353 error = sysctl_wire_old_buffer(req, 0);
1356 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1358 "\n Size Requests Real Size\n");
1359 for (i = 0; i < KMEM_ZSIZE; i++) {
1360 size = i << KMEM_ZSHIFT;
1361 rsize = kmemzones[kmemsize[i]].kz_size;
1362 count = (long long unsigned)krequests[i];
1364 sbuf_printf(&sbuf, "%6d%28llu%11d\n", size,
1365 (unsigned long long)count, rsize);
1367 if ((rsize * count) > (size * count))
1368 waste += (rsize * count) - (size * count);
1369 mem += (rsize * count);
1372 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
1373 (unsigned long long)mem, (unsigned long long)waste);
1374 error = sbuf_finish(&sbuf);
1379 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
1380 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
1381 #endif /* MALLOC_PROFILE */