2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2005-2006 Robert N. M. Watson
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/param.h>
32 #include <sys/counter.h>
33 #include <sys/cpuset.h>
34 #include <sys/sysctl.h>
37 #include <vm/uma_int.h>
50 #include "memstat_internal.h"
52 static struct nlist namelist[] = {
54 { .n_name = "_uma_kegs" },
56 { .n_name = "_mp_maxid" },
58 { .n_name = "_all_cpus" },
59 #define X_VM_NDOMAINS 3
60 { .n_name = "_vm_ndomains" },
65 * Extract uma(9) statistics from the running kernel, and store all memory
66 * type information in the passed list. For each type, check the list for an
67 * existing entry with the right name/allocator -- if present, update that
68 * entry. Otherwise, add a new entry. On error, the entire list will be
69 * cleared, as entries will be in an inconsistent state.
71 * To reduce the level of work for a list that starts empty, we keep around a
72 * hint as to whether it was empty when we began, so we can avoid searching
73 * the list for entries to update. Updates are O(n^2) due to searching for
74 * each entry before adding it.
77 memstat_sysctl_uma(struct memory_type_list *list, int flags)
79 struct uma_stream_header *ushp;
80 struct uma_type_header *uthp;
81 struct uma_percpu_stat *upsp;
82 struct memory_type *mtp;
83 int count, hint_dontsearch, i, j, maxcpus, maxid;
87 hint_dontsearch = LIST_EMPTY(&list->mtl_list);
90 * Query the number of CPUs, number of malloc types so that we can
91 * guess an initial buffer size. We loop until we succeed or really
92 * fail. Note that the value of maxcpus we query using sysctl is not
93 * the version we use when processing the real data -- that is read
98 if (sysctlbyname("kern.smp.maxid", &maxid, &size, NULL, 0) < 0) {
99 if (errno == EACCES || errno == EPERM)
100 list->mtl_error = MEMSTAT_ERROR_PERMISSION;
102 list->mtl_error = MEMSTAT_ERROR_DATAERROR;
105 if (size != sizeof(maxid)) {
106 list->mtl_error = MEMSTAT_ERROR_DATAERROR;
110 size = sizeof(count);
111 if (sysctlbyname("vm.zone_count", &count, &size, NULL, 0) < 0) {
112 if (errno == EACCES || errno == EPERM)
113 list->mtl_error = MEMSTAT_ERROR_PERMISSION;
115 list->mtl_error = MEMSTAT_ERROR_VERSION;
118 if (size != sizeof(count)) {
119 list->mtl_error = MEMSTAT_ERROR_DATAERROR;
123 size = sizeof(*uthp) + count * (sizeof(*uthp) + sizeof(*upsp) *
126 buffer = malloc(size);
127 if (buffer == NULL) {
128 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
132 if (sysctlbyname("vm.zone_stats", buffer, &size, NULL, 0) < 0) {
134 * XXXRW: ENOMEM is an ambiguous return, we should bound the
135 * number of loops, perhaps.
137 if (errno == ENOMEM) {
141 if (errno == EACCES || errno == EPERM)
142 list->mtl_error = MEMSTAT_ERROR_PERMISSION;
144 list->mtl_error = MEMSTAT_ERROR_VERSION;
154 if (size < sizeof(*ushp)) {
155 list->mtl_error = MEMSTAT_ERROR_VERSION;
160 ushp = (struct uma_stream_header *)p;
163 if (ushp->ush_version != UMA_STREAM_VERSION) {
164 list->mtl_error = MEMSTAT_ERROR_VERSION;
170 * For the remainder of this function, we are quite trusting about
171 * the layout of structures and sizes, since we've determined we have
172 * a matching version and acceptable CPU count.
174 maxcpus = ushp->ush_maxcpus;
175 count = ushp->ush_count;
176 for (i = 0; i < count; i++) {
177 uthp = (struct uma_type_header *)p;
180 if (hint_dontsearch == 0) {
181 mtp = memstat_mtl_find(list, ALLOCATOR_UMA,
186 mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA,
187 uthp->uth_name, maxid + 1);
189 _memstat_mtl_empty(list);
191 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
196 * Reset the statistics on a current node.
198 _memstat_mt_reset_stats(mtp, maxid + 1);
200 mtp->mt_numallocs = uthp->uth_allocs;
201 mtp->mt_numfrees = uthp->uth_frees;
202 mtp->mt_failures = uthp->uth_fails;
203 mtp->mt_sleeps = uthp->uth_sleeps;
205 for (j = 0; j < maxcpus; j++) {
206 upsp = (struct uma_percpu_stat *)p;
209 mtp->mt_percpu_cache[j].mtp_free =
210 upsp->ups_cache_free;
211 mtp->mt_free += upsp->ups_cache_free;
212 mtp->mt_numallocs += upsp->ups_allocs;
213 mtp->mt_numfrees += upsp->ups_frees;
216 mtp->mt_size = uthp->uth_size;
217 mtp->mt_rsize = uthp->uth_rsize;
218 mtp->mt_memalloced = mtp->mt_numallocs * uthp->uth_size;
219 mtp->mt_memfreed = mtp->mt_numfrees * uthp->uth_size;
220 mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
221 mtp->mt_countlimit = uthp->uth_limit;
222 mtp->mt_byteslimit = uthp->uth_limit * uthp->uth_size;
224 mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
225 mtp->mt_zonefree = uthp->uth_zone_free;
228 * UMA secondary zones share a keg with the primary zone. To
229 * avoid double-reporting of free items, report keg free
230 * items only in the primary zone.
232 if (!(uthp->uth_zone_flags & UTH_ZONE_SECONDARY)) {
233 mtp->mt_kegfree = uthp->uth_keg_free;
234 mtp->mt_free += mtp->mt_kegfree;
236 mtp->mt_free += mtp->mt_zonefree;
245 kread(kvm_t *kvm, void *kvm_pointer, void *address, size_t size,
250 ret = kvm_read(kvm, (unsigned long)kvm_pointer + offset, address,
253 return (MEMSTAT_ERROR_KVM);
254 if ((size_t)ret != size)
255 return (MEMSTAT_ERROR_KVM_SHORTREAD);
260 kread_string(kvm_t *kvm, const void *kvm_pointer, char *buffer, int buflen)
265 for (i = 0; i < buflen; i++) {
266 ret = kvm_read(kvm, (unsigned long)kvm_pointer + i,
267 &(buffer[i]), sizeof(char));
269 return (MEMSTAT_ERROR_KVM);
270 if ((size_t)ret != sizeof(char))
271 return (MEMSTAT_ERROR_KVM_SHORTREAD);
272 if (buffer[i] == '\0')
281 kread_symbol(kvm_t *kvm, int index, void *address, size_t size,
286 ret = kvm_read(kvm, namelist[index].n_value + offset, address, size);
288 return (MEMSTAT_ERROR_KVM);
289 if ((size_t)ret != size)
290 return (MEMSTAT_ERROR_KVM_SHORTREAD);
295 * memstat_kvm_uma() is similar to memstat_sysctl_uma(), only it extracts
296 * UMA(9) statistics from a kernel core/memory file.
299 memstat_kvm_uma(struct memory_type_list *list, void *kvm_handle)
301 LIST_HEAD(, uma_keg) uma_kegs;
302 struct memory_type *mtp;
303 struct uma_zone_domain uzd;
304 struct uma_bucket *ubp, ub;
305 struct uma_cache *ucp, *ucp_array;
306 struct uma_zone *uzp, uz;
307 struct uma_keg *kzp, kz;
308 int hint_dontsearch, i, mp_maxid, ndomains, ret;
309 char name[MEMTYPE_MAXNAME];
314 kvm = (kvm_t *)kvm_handle;
315 hint_dontsearch = LIST_EMPTY(&list->mtl_list);
316 if (kvm_nlist(kvm, namelist) != 0) {
317 list->mtl_error = MEMSTAT_ERROR_KVM;
320 if (namelist[X_UMA_KEGS].n_type == 0 ||
321 namelist[X_UMA_KEGS].n_value == 0) {
322 list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL;
325 ret = kread_symbol(kvm, X_MP_MAXID, &mp_maxid, sizeof(mp_maxid), 0);
327 list->mtl_error = ret;
330 ret = kread_symbol(kvm, X_VM_NDOMAINS, &ndomains,
331 sizeof(ndomains), 0);
333 list->mtl_error = ret;
336 ret = kread_symbol(kvm, X_UMA_KEGS, &uma_kegs, sizeof(uma_kegs), 0);
338 list->mtl_error = ret;
341 cpusetsize = sysconf(_SC_CPUSET_SIZE);
342 if (cpusetsize == -1 || (u_long)cpusetsize > sizeof(cpuset_t)) {
343 list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL;
347 ret = kread_symbol(kvm, X_ALL_CPUS, &all_cpus, cpusetsize, 0);
349 list->mtl_error = ret;
352 ucp_array = malloc(sizeof(struct uma_cache) * (mp_maxid + 1));
353 if (ucp_array == NULL) {
354 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
357 for (kzp = LIST_FIRST(&uma_kegs); kzp != NULL; kzp =
358 LIST_NEXT(&kz, uk_link)) {
359 ret = kread(kvm, kzp, &kz, sizeof(kz), 0);
362 _memstat_mtl_empty(list);
363 list->mtl_error = ret;
366 for (uzp = LIST_FIRST(&kz.uk_zones); uzp != NULL; uzp =
367 LIST_NEXT(&uz, uz_link)) {
368 ret = kread(kvm, uzp, &uz, sizeof(uz), 0);
371 _memstat_mtl_empty(list);
372 list->mtl_error = ret;
375 ret = kread(kvm, uzp, ucp_array,
376 sizeof(struct uma_cache) * (mp_maxid + 1),
377 offsetof(struct uma_zone, uz_cpu[0]));
380 _memstat_mtl_empty(list);
381 list->mtl_error = ret;
384 ret = kread_string(kvm, uz.uz_name, name,
388 _memstat_mtl_empty(list);
389 list->mtl_error = ret;
392 if (hint_dontsearch == 0) {
393 mtp = memstat_mtl_find(list, ALLOCATOR_UMA,
398 mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA,
402 _memstat_mtl_empty(list);
403 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
407 * Reset the statistics on a current node.
409 _memstat_mt_reset_stats(mtp, mp_maxid + 1);
410 mtp->mt_numallocs = kvm_counter_u64_fetch(kvm,
411 (unsigned long )uz.uz_allocs);
412 mtp->mt_numfrees = kvm_counter_u64_fetch(kvm,
413 (unsigned long )uz.uz_frees);
414 mtp->mt_failures = kvm_counter_u64_fetch(kvm,
415 (unsigned long )uz.uz_fails);
416 mtp->mt_sleeps = uz.uz_sleeps;
417 if (kz.uk_flags & UMA_ZFLAG_INTERNAL)
419 for (i = 0; i < mp_maxid + 1; i++) {
420 if (!CPU_ISSET(i, &all_cpus))
423 mtp->mt_numallocs += ucp->uc_allocs;
424 mtp->mt_numfrees += ucp->uc_frees;
426 if (ucp->uc_allocbucket != NULL) {
427 ret = kread(kvm, ucp->uc_allocbucket,
431 _memstat_mtl_empty(list);
432 list->mtl_error = ret;
435 mtp->mt_free += ub.ub_cnt;
437 if (ucp->uc_freebucket != NULL) {
438 ret = kread(kvm, ucp->uc_freebucket,
442 _memstat_mtl_empty(list);
443 list->mtl_error = ret;
446 mtp->mt_free += ub.ub_cnt;
450 mtp->mt_size = kz.uk_size;
451 mtp->mt_rsize = kz.uk_rsize;
452 mtp->mt_memalloced = mtp->mt_numallocs * mtp->mt_size;
453 mtp->mt_memfreed = mtp->mt_numfrees * mtp->mt_size;
454 mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
455 mtp->mt_countlimit = uz.uz_max_items;
456 mtp->mt_byteslimit = mtp->mt_countlimit * mtp->mt_size;
457 mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
458 for (i = 0; i < ndomains; i++) {
459 ret = kread(kvm, &uz.uz_domain[i], &uzd,
462 LIST_FIRST(&uzd.uzd_buckets);
464 ubp = LIST_NEXT(&ub, ub_link)) {
465 ret = kread(kvm, ubp, &ub,
467 mtp->mt_zonefree += ub.ub_cnt;
470 if (!((kz.uk_flags & UMA_ZONE_SECONDARY) &&
471 LIST_FIRST(&kz.uk_zones) != uzp)) {
472 mtp->mt_kegfree = kz.uk_free;
473 mtp->mt_free += mtp->mt_kegfree;
475 mtp->mt_free += mtp->mt_zonefree;