2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2005 Robert N. M. Watson
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 #include <sys/param.h>
33 #include <sys/malloc.h>
34 #include <sys/sysctl.h>
45 #include "memstat_internal.h"
47 static int memstat_malloc_zone_count;
48 static int memstat_malloc_zone_sizes[32];
50 static int memstat_malloc_zone_init(void);
51 static int memstat_malloc_zone_init_kvm(kvm_t *kvm);
53 static struct nlist namelist[] = {
54 #define X_KMEMSTATISTICS 0
55 { .n_name = "_kmemstatistics" },
57 { .n_name = "_kmemzones" },
59 { .n_name = "_numzones" },
60 #define X_VM_MALLOC_ZONE_COUNT 3
61 { .n_name = "_vm_malloc_zone_count" },
62 #define X_MP_MAXCPUS 4
63 { .n_name = "_mp_maxcpus" },
68 * Extract malloc(9) statistics from the running kernel, and store all memory
69 * type information in the passed list. For each type, check the list for an
70 * existing entry with the right name/allocator -- if present, update that
71 * entry. Otherwise, add a new entry. On error, the entire list will be
72 * cleared, as entries will be in an inconsistent state.
74 * To reduce the level of work for a list that starts empty, we keep around a
75 * hint as to whether it was empty when we began, so we can avoid searching
76 * the list for entries to update. Updates are O(n^2) due to searching for
77 * each entry before adding it.
80 memstat_sysctl_malloc(struct memory_type_list *list, int flags)
82 struct malloc_type_stream_header *mtshp;
83 struct malloc_type_header *mthp;
84 struct malloc_type_stats *mtsp;
85 struct memory_type *mtp;
86 int count, hint_dontsearch, i, j, maxcpus;
90 hint_dontsearch = LIST_EMPTY(&list->mtl_list);
93 * Query the number of CPUs, number of malloc types so that we can
94 * guess an initial buffer size. We loop until we succeed or really
95 * fail. Note that the value of maxcpus we query using sysctl is not
96 * the version we use when processing the real data -- that is read
100 size = sizeof(maxcpus);
101 if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) {
102 if (errno == EACCES || errno == EPERM)
103 list->mtl_error = MEMSTAT_ERROR_PERMISSION;
105 list->mtl_error = MEMSTAT_ERROR_DATAERROR;
108 if (size != sizeof(maxcpus)) {
109 list->mtl_error = MEMSTAT_ERROR_DATAERROR;
113 size = sizeof(count);
114 if (sysctlbyname("kern.malloc_count", &count, &size, NULL, 0) < 0) {
115 if (errno == EACCES || errno == EPERM)
116 list->mtl_error = MEMSTAT_ERROR_PERMISSION;
118 list->mtl_error = MEMSTAT_ERROR_VERSION;
121 if (size != sizeof(count)) {
122 list->mtl_error = MEMSTAT_ERROR_DATAERROR;
126 if (memstat_malloc_zone_init() == -1) {
127 list->mtl_error = MEMSTAT_ERROR_VERSION;
131 size = sizeof(*mthp) + count * (sizeof(*mthp) + sizeof(*mtsp) *
134 buffer = malloc(size);
135 if (buffer == NULL) {
136 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
140 if (sysctlbyname("kern.malloc_stats", buffer, &size, NULL, 0) < 0) {
142 * XXXRW: ENOMEM is an ambiguous return, we should bound the
143 * number of loops, perhaps.
145 if (errno == ENOMEM) {
149 if (errno == EACCES || errno == EPERM)
150 list->mtl_error = MEMSTAT_ERROR_PERMISSION;
152 list->mtl_error = MEMSTAT_ERROR_VERSION;
162 if (size < sizeof(*mtshp)) {
163 list->mtl_error = MEMSTAT_ERROR_VERSION;
168 mtshp = (struct malloc_type_stream_header *)p;
171 if (mtshp->mtsh_version != MALLOC_TYPE_STREAM_VERSION) {
172 list->mtl_error = MEMSTAT_ERROR_VERSION;
178 * For the remainder of this function, we are quite trusting about
179 * the layout of structures and sizes, since we've determined we have
180 * a matching version and acceptable CPU count.
182 maxcpus = mtshp->mtsh_maxcpus;
183 count = mtshp->mtsh_count;
184 for (i = 0; i < count; i++) {
185 mthp = (struct malloc_type_header *)p;
188 if (hint_dontsearch == 0) {
189 mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC,
194 mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC,
195 mthp->mth_name, maxcpus);
197 _memstat_mtl_empty(list);
199 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
204 * Reset the statistics on a current node.
206 _memstat_mt_reset_stats(mtp, maxcpus);
208 for (j = 0; j < maxcpus; j++) {
209 mtsp = (struct malloc_type_stats *)p;
213 * Sumarize raw statistics across CPUs into coalesced
216 mtp->mt_memalloced += mtsp->mts_memalloced;
217 mtp->mt_memfreed += mtsp->mts_memfreed;
218 mtp->mt_numallocs += mtsp->mts_numallocs;
219 mtp->mt_numfrees += mtsp->mts_numfrees;
220 mtp->mt_sizemask |= mtsp->mts_size;
223 * Copies of per-CPU statistics.
225 mtp->mt_percpu_alloc[j].mtp_memalloced =
226 mtsp->mts_memalloced;
227 mtp->mt_percpu_alloc[j].mtp_memfreed =
229 mtp->mt_percpu_alloc[j].mtp_numallocs =
231 mtp->mt_percpu_alloc[j].mtp_numfrees =
233 mtp->mt_percpu_alloc[j].mtp_sizemask =
238 * Derived cross-CPU statistics.
240 mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
241 mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
250 kread(kvm_t *kvm, void *kvm_pointer, void *address, size_t size,
255 ret = kvm_read(kvm, (unsigned long)kvm_pointer + offset, address,
258 return (MEMSTAT_ERROR_KVM);
259 if ((size_t)ret != size)
260 return (MEMSTAT_ERROR_KVM_SHORTREAD);
265 kread_string(kvm_t *kvm, const void *kvm_pointer, char *buffer, int buflen)
270 for (i = 0; i < buflen; i++) {
271 ret = kvm_read(kvm, __DECONST(unsigned long, kvm_pointer) +
272 i, &(buffer[i]), sizeof(char));
274 return (MEMSTAT_ERROR_KVM);
275 if ((size_t)ret != sizeof(char))
276 return (MEMSTAT_ERROR_KVM_SHORTREAD);
277 if (buffer[i] == '\0')
286 kread_symbol(kvm_t *kvm, int index, void *address, size_t size,
291 ret = kvm_read(kvm, namelist[index].n_value + offset, address, size);
293 return (MEMSTAT_ERROR_KVM);
294 if ((size_t)ret != size)
295 return (MEMSTAT_ERROR_KVM_SHORTREAD);
300 kread_zpcpu(kvm_t *kvm, u_long base, void *buf, size_t size, int cpu)
304 ret = kvm_read_zpcpu(kvm, base, buf, size, cpu);
306 return (MEMSTAT_ERROR_KVM);
307 if ((size_t)ret != size)
308 return (MEMSTAT_ERROR_KVM_SHORTREAD);
313 memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
315 struct memory_type *mtp;
316 void *kmemstatistics;
317 int hint_dontsearch, j, mp_maxcpus, mp_ncpus, ret;
318 char name[MEMTYPE_MAXNAME];
319 struct malloc_type_stats mts;
320 struct malloc_type_internal *mtip;
321 struct malloc_type type, *typep;
324 kvm = (kvm_t *)kvm_handle;
326 hint_dontsearch = LIST_EMPTY(&list->mtl_list);
328 if (kvm_nlist(kvm, namelist) != 0) {
329 list->mtl_error = MEMSTAT_ERROR_KVM;
333 if (namelist[X_KMEMSTATISTICS].n_type == 0 ||
334 namelist[X_KMEMSTATISTICS].n_value == 0) {
335 list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL;
339 ret = kread_symbol(kvm, X_MP_MAXCPUS, &mp_maxcpus,
340 sizeof(mp_maxcpus), 0);
342 list->mtl_error = ret;
346 ret = kread_symbol(kvm, X_KMEMSTATISTICS, &kmemstatistics,
347 sizeof(kmemstatistics), 0);
349 list->mtl_error = ret;
353 ret = memstat_malloc_zone_init_kvm(kvm);
355 list->mtl_error = ret;
359 mp_ncpus = kvm_getncpus(kvm);
361 for (typep = kmemstatistics; typep != NULL; typep = type.ks_next) {
362 ret = kread(kvm, typep, &type, sizeof(type), 0);
364 _memstat_mtl_empty(list);
365 list->mtl_error = ret;
368 ret = kread_string(kvm, (void *)type.ks_shortdesc, name,
371 _memstat_mtl_empty(list);
372 list->mtl_error = ret;
375 if (type.ks_version != M_VERSION) {
376 warnx("type %s with unsupported version %lu; skipped",
377 name, type.ks_version);
382 * Since our compile-time value for MAXCPU may differ from the
383 * kernel's, we populate our own array.
387 if (hint_dontsearch == 0) {
388 mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC, name);
392 mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC,
395 _memstat_mtl_empty(list);
396 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
401 * This logic is replicated from kern_malloc.c, and should
404 _memstat_mt_reset_stats(mtp, mp_maxcpus);
405 for (j = 0; j < mp_ncpus; j++) {
406 ret = kread_zpcpu(kvm, (u_long)mtip->mti_stats, &mts,
409 _memstat_mtl_empty(list);
410 list->mtl_error = ret;
413 mtp->mt_memalloced += mts.mts_memalloced;
414 mtp->mt_memfreed += mts.mts_memfreed;
415 mtp->mt_numallocs += mts.mts_numallocs;
416 mtp->mt_numfrees += mts.mts_numfrees;
417 mtp->mt_sizemask |= mts.mts_size;
419 mtp->mt_percpu_alloc[j].mtp_memalloced =
421 mtp->mt_percpu_alloc[j].mtp_memfreed =
423 mtp->mt_percpu_alloc[j].mtp_numallocs =
425 mtp->mt_percpu_alloc[j].mtp_numfrees =
427 mtp->mt_percpu_alloc[j].mtp_sizemask =
430 for (; j < mp_maxcpus; j++) {
431 bzero(&mtp->mt_percpu_alloc[j],
432 sizeof(mtp->mt_percpu_alloc[0]));
435 mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
436 mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
443 memstat_malloc_zone_init(void)
447 size = sizeof(memstat_malloc_zone_count);
448 if (sysctlbyname("vm.malloc.zone_count", &memstat_malloc_zone_count,
449 &size, NULL, 0) < 0) {
453 if (memstat_malloc_zone_count > (int)nitems(memstat_malloc_zone_sizes)) {
457 size = sizeof(memstat_malloc_zone_sizes);
458 if (sysctlbyname("vm.malloc.zone_sizes", &memstat_malloc_zone_sizes,
459 &size, NULL, 0) < 0) {
467 * Copied from kern_malloc.c
469 * kz_zone is an array sized at compilation time, the size is exported in
470 * "numzones". Below we need to iterate kz_size.
472 struct memstat_kmemzone {
479 memstat_malloc_zone_init_kvm(kvm_t *kvm)
481 struct memstat_kmemzone *kmemzones, *kz;
482 int numzones, objsize, allocsize, ret;
485 ret = kread_symbol(kvm, X_VM_MALLOC_ZONE_COUNT,
486 &memstat_malloc_zone_count, sizeof(memstat_malloc_zone_count), 0);
491 ret = kread_symbol(kvm, X_NUMZONES, &numzones, sizeof(numzones), 0);
496 objsize = __offsetof(struct memstat_kmemzone, kz_zone) +
497 sizeof(void *) * numzones;
499 allocsize = objsize * memstat_malloc_zone_count;
500 kmemzones = malloc(allocsize);
501 if (kmemzones == NULL) {
502 return (MEMSTAT_ERROR_NOMEMORY);
504 ret = kread_symbol(kvm, X_KMEMZONES, kmemzones, allocsize, 0);
511 for (i = 0; i < (int)nitems(memstat_malloc_zone_sizes); i++) {
512 memstat_malloc_zone_sizes[i] = kz->kz_size;
513 kz = (struct memstat_kmemzone *)((char *)kz + objsize);
521 memstat_malloc_zone_get_count(void)
524 return (memstat_malloc_zone_count);
528 memstat_malloc_zone_get_size(size_t n)
531 if (n >= nitems(memstat_malloc_zone_sizes)) {
535 return (memstat_malloc_zone_sizes[n]);
539 memstat_malloc_zone_used(const struct memory_type *mtp, size_t n)
542 if (memstat_get_sizemask(mtp) & (1 << n))