2 * Copyright (c) 1987, 1991, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2005-2009 Robert N. M. Watson
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
35 * Kernel malloc(9) implementation -- general purpose kernel memory allocator
36 * based on memory types. Back end is implemented using the UMA(9) zone
37 * allocator. A set of fixed-size buckets are used for smaller allocations,
38 * and a special UMA allocation interface is used for larger allocations.
39 * Callers declare memory types, and statistics are maintained independently
40 * for each memory type. Statistics are maintained per-CPU for performance
41 * reasons. See malloc(9) and comments in malloc.h for a detailed
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
49 #include "opt_kdtrace.h"
52 #include <sys/param.h>
53 #include <sys/systm.h>
55 #include <sys/kernel.h>
57 #include <sys/malloc.h>
59 #include <sys/mutex.h>
60 #include <sys/vmmeter.h>
63 #include <sys/sysctl.h>
68 #include <vm/vm_param.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_page.h>
74 #include <vm/uma_int.h>
75 #include <vm/uma_dbg.h>
78 #include <vm/memguard.h>
81 #include <vm/redzone.h>
84 #if defined(INVARIANTS) && defined(__i386__)
85 #include <machine/cpu.h>
91 #include <sys/dtrace_bsd.h>
93 dtrace_malloc_probe_func_t dtrace_malloc_probe;
97 * When realloc() is called, if the new size is sufficiently smaller than
98 * the old size, realloc() will allocate a new, smaller block to avoid
99 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
100 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
102 #ifndef REALLOC_FRACTION
103 #define REALLOC_FRACTION 1 /* new block if <= half the size */
107 * Centrally define some common malloc types.
109 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
110 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
111 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
113 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
114 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
116 static void kmeminit(void *);
117 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL);
119 static struct malloc_type *kmemstatistics;
120 static vm_offset_t kmembase;
121 static vm_offset_t kmemlimit;
122 static int kmemcount;
124 #define KMEM_ZSHIFT 4
125 #define KMEM_ZBASE 16
126 #define KMEM_ZMASK (KMEM_ZBASE - 1)
128 #define KMEM_ZMAX PAGE_SIZE
129 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
130 static u_int8_t kmemsize[KMEM_ZSIZE + 1];
133 * Small malloc(9) memory allocations are allocated from a set of UMA buckets
136 * XXX: The comment here used to read "These won't be powers of two for
137 * long." It's possible that a significant amount of wasted memory could be
138 * recovered by tuning the sizes of these buckets.
151 {1024, "1024", NULL},
152 {2048, "2048", NULL},
153 {4096, "4096", NULL},
155 {8192, "8192", NULL},
157 {16384, "16384", NULL},
158 #if PAGE_SIZE > 16384
159 {32768, "32768", NULL},
160 #if PAGE_SIZE > 32768
161 {65536, "65536", NULL},
162 #if PAGE_SIZE > 65536
163 #error "Unsupported PAGE_SIZE"
173 * Zone to allocate malloc type descriptions from. For ABI reasons, memory
174 * types are described by a data structure passed by the declaring code, but
175 * the malloc(9) implementation has its own data structure describing the
176 * type and statistics. This permits the malloc(9)-internal data structures
177 * to be modified without breaking binary-compiled kernel modules that
178 * declare malloc types.
180 static uma_zone_t mt_zone;
183 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
184 "Size of kernel memory");
186 static u_long vm_kmem_size_min;
187 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
188 "Minimum size of kernel memory");
190 static u_long vm_kmem_size_max;
191 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
192 "Maximum size of kernel memory");
194 static u_int vm_kmem_size_scale;
195 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
196 "Scale factor for kernel memory size");
198 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
199 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
200 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
201 sysctl_kmem_map_size, "LU", "Current kmem_map allocation size");
203 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
204 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
205 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
206 sysctl_kmem_map_free, "LU", "Largest contiguous free range in kmem_map");
209 * The malloc_mtx protects the kmemstatistics linked list.
211 struct mtx malloc_mtx;
213 #ifdef MALLOC_PROFILE
214 uint64_t krequests[KMEM_ZSIZE + 1];
216 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
219 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
222 * time_uptime of the last malloc(9) failure (induced or real).
224 static time_t t_malloc_fail;
227 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
228 * the caller specifies M_NOWAIT. If set to 0, no failures are caused.
230 #ifdef MALLOC_MAKE_FAILURES
231 SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
232 "Kernel malloc debugging options");
234 static int malloc_failure_rate;
235 static int malloc_nowait_count;
236 static int malloc_failure_count;
237 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW,
238 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
239 TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate);
240 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
241 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
245 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
249 size = kmem_map->size;
250 return (sysctl_handle_long(oidp, &size, 0, req));
254 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
258 vm_map_lock_read(kmem_map);
259 size = kmem_map->root != NULL ? kmem_map->root->max_free :
260 kmem_map->max_offset - kmem_map->min_offset;
261 vm_map_unlock_read(kmem_map);
262 return (sysctl_handle_long(oidp, &size, 0, req));
266 malloc_last_fail(void)
269 return (time_uptime - t_malloc_fail);
273 * An allocation has succeeded -- update malloc type statistics for the
274 * amount of bucket size. Occurs within a critical section so that the
275 * thread isn't preempted and doesn't migrate while updating per-PCU
279 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
282 struct malloc_type_internal *mtip;
283 struct malloc_type_stats *mtsp;
286 mtip = mtp->ks_handle;
287 mtsp = &mtip->mti_stats[curcpu];
289 mtsp->mts_memalloced += size;
290 mtsp->mts_numallocs++;
293 mtsp->mts_size |= 1 << zindx;
296 if (dtrace_malloc_probe != NULL) {
297 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
299 (dtrace_malloc_probe)(probe_id,
300 (uintptr_t) mtp, (uintptr_t) mtip,
301 (uintptr_t) mtsp, size, zindx);
309 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
313 malloc_type_zone_allocated(mtp, size, -1);
317 * A free operation has occurred -- update malloc type statistics for the
318 * amount of the bucket size. Occurs within a critical section so that the
319 * thread isn't preempted and doesn't migrate while updating per-CPU
323 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
325 struct malloc_type_internal *mtip;
326 struct malloc_type_stats *mtsp;
329 mtip = mtp->ks_handle;
330 mtsp = &mtip->mti_stats[curcpu];
331 mtsp->mts_memfreed += size;
332 mtsp->mts_numfrees++;
335 if (dtrace_malloc_probe != NULL) {
336 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
338 (dtrace_malloc_probe)(probe_id,
339 (uintptr_t) mtp, (uintptr_t) mtip,
340 (uintptr_t) mtsp, size, 0);
350 * Allocate a block of memory.
352 * If M_NOWAIT is set, this routine will not block and return NULL if
353 * the allocation fails.
356 malloc(unsigned long size, struct malloc_type *mtp, int flags)
361 #if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE)
362 unsigned long osize = size;
366 KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic"));
368 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
370 indx = flags & (M_WAITOK | M_NOWAIT);
371 if (indx != M_NOWAIT && indx != M_WAITOK) {
372 static struct timeval lasterr;
373 static int curerr, once;
374 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
375 printf("Bad malloc flags: %x\n", indx);
382 #ifdef MALLOC_MAKE_FAILURES
383 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
384 atomic_add_int(&malloc_nowait_count, 1);
385 if ((malloc_nowait_count % malloc_failure_rate) == 0) {
386 atomic_add_int(&malloc_failure_count, 1);
387 t_malloc_fail = time_uptime;
392 if (flags & M_WAITOK)
393 KASSERT(curthread->td_intr_nesting_level == 0,
394 ("malloc(M_WAITOK) in interrupt context"));
396 #ifdef DEBUG_MEMGUARD
397 if (memguard_cmp(mtp, size)) {
398 va = memguard_alloc(size, flags);
401 /* This is unfortunate but should not be fatal. */
406 size = redzone_size_ntor(size);
409 if (size <= KMEM_ZMAX) {
410 if (size & KMEM_ZMASK)
411 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
412 indx = kmemsize[size >> KMEM_ZSHIFT];
413 zone = kmemzones[indx].kz_zone;
414 #ifdef MALLOC_PROFILE
415 krequests[size >> KMEM_ZSHIFT]++;
417 va = uma_zalloc(zone, flags);
419 size = zone->uz_size;
420 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
422 size = roundup(size, PAGE_SIZE);
424 va = uma_large_malloc(size, flags);
425 malloc_type_allocated(mtp, va == NULL ? 0 : size);
427 if (flags & M_WAITOK)
428 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
430 t_malloc_fail = time_uptime;
432 if (va != NULL && !(flags & M_ZERO)) {
433 memset(va, 0x70, osize);
438 va = redzone_setup(va, osize);
440 return ((void *) va);
446 * Free a block of memory allocated by malloc.
448 * This routine may not block.
451 free(void *addr, struct malloc_type *mtp)
456 KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic"));
458 /* free(NULL, ...) does nothing */
462 #ifdef DEBUG_MEMGUARD
463 if (is_memguard_addr(addr)) {
471 addr = redzone_addr_ntor(addr);
474 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
477 panic("free: address %p(%p) has not been allocated.\n",
478 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
481 if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
483 struct malloc_type **mtpp = addr;
485 size = slab->us_keg->uk_size;
488 * Cache a pointer to the malloc_type that most recently freed
489 * this memory here. This way we know who is most likely to
490 * have stepped on it later.
492 * This code assumes that size is a multiple of 8 bytes for
495 mtpp = (struct malloc_type **)
496 ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
497 mtpp += (size - sizeof(struct malloc_type *)) /
498 sizeof(struct malloc_type *);
501 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
503 size = slab->us_size;
504 uma_large_free(slab);
506 malloc_type_freed(mtp, size);
510 * realloc: change the size of a memory block
513 realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
519 KASSERT(mtp->ks_magic == M_MAGIC,
520 ("realloc: bad malloc type magic"));
522 /* realloc(NULL, ...) is equivalent to malloc(...) */
524 return (malloc(size, mtp, flags));
527 * XXX: Should report free of old memory and alloc of new memory to
531 #ifdef DEBUG_MEMGUARD
532 if (is_memguard_addr(addr))
533 return (memguard_realloc(addr, size, mtp, flags));
538 alloc = redzone_get_size(addr);
540 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
543 KASSERT(slab != NULL,
544 ("realloc: address %p out of range", (void *)addr));
546 /* Get the size of the original block */
547 if (!(slab->us_flags & UMA_SLAB_MALLOC))
548 alloc = slab->us_keg->uk_size;
550 alloc = slab->us_size;
552 /* Reuse the original block if appropriate */
554 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
556 #endif /* !DEBUG_REDZONE */
558 /* Allocate a new, bigger (or smaller) block */
559 if ((newaddr = malloc(size, mtp, flags)) == NULL)
562 /* Copy over original contents */
563 bcopy(addr, newaddr, min(size, alloc));
569 * reallocf: same as realloc() but free memory on failure.
572 reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
576 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
582 * Initialize the kernel memory allocator
586 kmeminit(void *dummy)
589 u_long mem_size, tmp;
592 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
595 * Try to auto-tune the kernel memory size, so that it is
596 * more applicable for a wider range of machine sizes. The
597 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
600 * Note that the kmem_map is also used by the zone allocator,
601 * so make sure that there is enough space.
603 vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE;
604 mem_size = cnt.v_page_count;
606 #if defined(VM_KMEM_SIZE_SCALE)
607 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
609 TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale);
610 if (vm_kmem_size_scale > 0 &&
611 (mem_size / vm_kmem_size_scale) > (vm_kmem_size / PAGE_SIZE))
612 vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE;
614 #if defined(VM_KMEM_SIZE_MIN)
615 vm_kmem_size_min = VM_KMEM_SIZE_MIN;
617 TUNABLE_ULONG_FETCH("vm.kmem_size_min", &vm_kmem_size_min);
618 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min) {
619 vm_kmem_size = vm_kmem_size_min;
622 #if defined(VM_KMEM_SIZE_MAX)
623 vm_kmem_size_max = VM_KMEM_SIZE_MAX;
625 TUNABLE_ULONG_FETCH("vm.kmem_size_max", &vm_kmem_size_max);
626 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
627 vm_kmem_size = vm_kmem_size_max;
629 /* Allow final override from the kernel environment */
630 TUNABLE_ULONG_FETCH("vm.kmem_size", &vm_kmem_size);
633 * Limit kmem virtual size to twice the physical memory.
634 * This allows for kmem map sparseness, but limits the size
635 * to something sane. Be careful to not overflow the 32bit
636 * ints while doing the check or the adjustment.
638 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
639 vm_kmem_size = 2 * mem_size * PAGE_SIZE;
642 * Tune settings based on the kmem map's size at this time.
644 init_param3(vm_kmem_size / PAGE_SIZE);
646 #ifdef DEBUG_MEMGUARD
647 tmp = memguard_fudge(vm_kmem_size, kernel_map);
651 kmem_map = kmem_suballoc(kernel_map, &kmembase, &kmemlimit,
653 kmem_map->system_map = 1;
655 #ifdef DEBUG_MEMGUARD
657 * Initialize MemGuard if support compiled in. MemGuard is a
658 * replacement allocator used for detecting tamper-after-free
659 * scenarios as they occur. It is only used for debugging.
661 memguard_init(kmem_map);
666 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
668 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
670 NULL, NULL, NULL, NULL,
672 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
673 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
674 int size = kmemzones[indx].kz_size;
675 char *name = kmemzones[indx].kz_name;
677 kmemzones[indx].kz_zone = uma_zcreate(name, size,
679 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
681 NULL, NULL, NULL, NULL,
683 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
685 for (;i <= size; i+= KMEM_ZBASE)
686 kmemsize[i >> KMEM_ZSHIFT] = indx;
692 malloc_init(void *data)
694 struct malloc_type_internal *mtip;
695 struct malloc_type *mtp;
697 KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init"));
700 if (mtp->ks_magic != M_MAGIC)
701 panic("malloc_init: bad malloc type magic");
703 mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
704 mtp->ks_handle = mtip;
706 mtx_lock(&malloc_mtx);
707 mtp->ks_next = kmemstatistics;
708 kmemstatistics = mtp;
710 mtx_unlock(&malloc_mtx);
714 malloc_uninit(void *data)
716 struct malloc_type_internal *mtip;
717 struct malloc_type_stats *mtsp;
718 struct malloc_type *mtp, *temp;
720 long temp_allocs, temp_bytes;
724 KASSERT(mtp->ks_magic == M_MAGIC,
725 ("malloc_uninit: bad malloc type magic"));
726 KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL"));
728 mtx_lock(&malloc_mtx);
729 mtip = mtp->ks_handle;
730 mtp->ks_handle = NULL;
731 if (mtp != kmemstatistics) {
732 for (temp = kmemstatistics; temp != NULL;
733 temp = temp->ks_next) {
734 if (temp->ks_next == mtp) {
735 temp->ks_next = mtp->ks_next;
740 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
742 kmemstatistics = mtp->ks_next;
744 mtx_unlock(&malloc_mtx);
747 * Look for memory leaks.
749 temp_allocs = temp_bytes = 0;
750 for (i = 0; i < MAXCPU; i++) {
751 mtsp = &mtip->mti_stats[i];
752 temp_allocs += mtsp->mts_numallocs;
753 temp_allocs -= mtsp->mts_numfrees;
754 temp_bytes += mtsp->mts_memalloced;
755 temp_bytes -= mtsp->mts_memfreed;
757 if (temp_allocs > 0 || temp_bytes > 0) {
758 printf("Warning: memory type %s leaked memory on destroy "
759 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
760 temp_allocs, temp_bytes);
763 slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK));
764 uma_zfree_arg(mt_zone, mtip, slab);
768 malloc_desc2type(const char *desc)
770 struct malloc_type *mtp;
772 mtx_assert(&malloc_mtx, MA_OWNED);
773 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
774 if (strcmp(mtp->ks_shortdesc, desc) == 0)
781 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
783 struct malloc_type_stream_header mtsh;
784 struct malloc_type_internal *mtip;
785 struct malloc_type_header mth;
786 struct malloc_type *mtp;
787 int buflen, count, error, i;
791 mtx_lock(&malloc_mtx);
793 mtx_assert(&malloc_mtx, MA_OWNED);
795 mtx_unlock(&malloc_mtx);
796 buflen = sizeof(mtsh) + count * (sizeof(mth) +
797 sizeof(struct malloc_type_stats) * MAXCPU) + 1;
798 buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
799 mtx_lock(&malloc_mtx);
800 if (count < kmemcount) {
801 free(buffer, M_TEMP);
805 sbuf_new(&sbuf, buffer, buflen, SBUF_FIXEDLEN);
808 * Insert stream header.
810 bzero(&mtsh, sizeof(mtsh));
811 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
812 mtsh.mtsh_maxcpus = MAXCPU;
813 mtsh.mtsh_count = kmemcount;
814 if (sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)) < 0) {
815 mtx_unlock(&malloc_mtx);
821 * Insert alternating sequence of type headers and type statistics.
823 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
824 mtip = (struct malloc_type_internal *)mtp->ks_handle;
827 * Insert type header.
829 bzero(&mth, sizeof(mth));
830 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
831 if (sbuf_bcat(&sbuf, &mth, sizeof(mth)) < 0) {
832 mtx_unlock(&malloc_mtx);
838 * Insert type statistics for each CPU.
840 for (i = 0; i < MAXCPU; i++) {
841 if (sbuf_bcat(&sbuf, &mtip->mti_stats[i],
842 sizeof(mtip->mti_stats[i])) < 0) {
843 mtx_unlock(&malloc_mtx);
849 mtx_unlock(&malloc_mtx);
851 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
854 free(buffer, M_TEMP);
858 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
859 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats",
860 "Return malloc types");
862 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
863 "Count of kernel malloc types");
866 malloc_type_list(malloc_type_list_func_t *func, void *arg)
868 struct malloc_type *mtp, **bufmtp;
872 mtx_lock(&malloc_mtx);
874 mtx_assert(&malloc_mtx, MA_OWNED);
876 mtx_unlock(&malloc_mtx);
878 buflen = sizeof(struct malloc_type *) * count;
879 bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
881 mtx_lock(&malloc_mtx);
883 if (count < kmemcount) {
884 free(bufmtp, M_TEMP);
888 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
891 mtx_unlock(&malloc_mtx);
893 for (i = 0; i < count; i++)
894 (func)(bufmtp[i], arg);
896 free(bufmtp, M_TEMP);
900 DB_SHOW_COMMAND(malloc, db_show_malloc)
902 struct malloc_type_internal *mtip;
903 struct malloc_type *mtp;
904 u_int64_t allocs, frees;
905 u_int64_t alloced, freed;
908 db_printf("%18s %12s %12s %12s\n", "Type", "InUse", "MemUse",
910 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
911 mtip = (struct malloc_type_internal *)mtp->ks_handle;
916 for (i = 0; i < MAXCPU; i++) {
917 allocs += mtip->mti_stats[i].mts_numallocs;
918 frees += mtip->mti_stats[i].mts_numfrees;
919 alloced += mtip->mti_stats[i].mts_memalloced;
920 freed += mtip->mti_stats[i].mts_memfreed;
922 db_printf("%18s %12ju %12juK %12ju\n",
923 mtp->ks_shortdesc, allocs - frees,
924 (alloced - freed + 1023) / 1024, allocs);
931 #ifdef MALLOC_PROFILE
934 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
948 bufsize = linesize * (KMEM_ZSIZE + 1);
949 bufsize += 128; /* For the stats line */
950 bufsize += 128; /* For the banner line */
954 buf = malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
955 sbuf_new(&sbuf, buf, bufsize, SBUF_FIXEDLEN);
957 "\n Size Requests Real Size\n");
958 for (i = 0; i < KMEM_ZSIZE; i++) {
959 size = i << KMEM_ZSHIFT;
960 rsize = kmemzones[kmemsize[i]].kz_size;
961 count = (long long unsigned)krequests[i];
963 sbuf_printf(&sbuf, "%6d%28llu%11d\n", size,
964 (unsigned long long)count, rsize);
966 if ((rsize * count) > (size * count))
967 waste += (rsize * count) - (size * count);
968 mem += (rsize * count);
971 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
972 (unsigned long long)mem, (unsigned long long)waste);
975 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
982 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
983 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
984 #endif /* MALLOC_PROFILE */