2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2005-2006 Robert N. M. Watson
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #define _WANT_FREEBSD_BITSET
33 #include <sys/param.h>
34 #include <sys/counter.h>
35 #include <sys/cpuset.h>
36 #include <sys/sysctl.h>
39 #include <vm/uma_int.h>
52 #include "memstat_internal.h"
54 static struct nlist namelist[] = {
56 { .n_name = "_uma_kegs" },
58 { .n_name = "_mp_maxid" },
60 { .n_name = "_all_cpus" },
61 #define X_VM_NDOMAINS 3
62 { .n_name = "_vm_ndomains" },
67 * Extract uma(9) statistics from the running kernel, and store all memory
68 * type information in the passed list. For each type, check the list for an
69 * existing entry with the right name/allocator -- if present, update that
70 * entry. Otherwise, add a new entry. On error, the entire list will be
71 * cleared, as entries will be in an inconsistent state.
73 * To reduce the level of work for a list that starts empty, we keep around a
74 * hint as to whether it was empty when we began, so we can avoid searching
75 * the list for entries to update. Updates are O(n^2) due to searching for
76 * each entry before adding it.
79 memstat_sysctl_uma(struct memory_type_list *list, int flags)
81 struct uma_stream_header *ushp;
82 struct uma_type_header *uthp;
83 struct uma_percpu_stat *upsp;
84 struct memory_type *mtp;
85 int count, hint_dontsearch, i, j, maxcpus, maxid;
89 hint_dontsearch = LIST_EMPTY(&list->mtl_list);
92 * Query the number of CPUs, number of malloc types so that we can
93 * guess an initial buffer size. We loop until we succeed or really
94 * fail. Note that the value of maxcpus we query using sysctl is not
95 * the version we use when processing the real data -- that is read
100 if (sysctlbyname("kern.smp.maxid", &maxid, &size, NULL, 0) < 0) {
101 if (errno == EACCES || errno == EPERM)
102 list->mtl_error = MEMSTAT_ERROR_PERMISSION;
104 list->mtl_error = MEMSTAT_ERROR_DATAERROR;
107 if (size != sizeof(maxid)) {
108 list->mtl_error = MEMSTAT_ERROR_DATAERROR;
112 size = sizeof(count);
113 if (sysctlbyname("vm.zone_count", &count, &size, NULL, 0) < 0) {
114 if (errno == EACCES || errno == EPERM)
115 list->mtl_error = MEMSTAT_ERROR_PERMISSION;
117 list->mtl_error = MEMSTAT_ERROR_VERSION;
120 if (size != sizeof(count)) {
121 list->mtl_error = MEMSTAT_ERROR_DATAERROR;
125 size = sizeof(*uthp) + count * (sizeof(*uthp) + sizeof(*upsp) *
128 buffer = malloc(size);
129 if (buffer == NULL) {
130 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
134 if (sysctlbyname("vm.zone_stats", buffer, &size, NULL, 0) < 0) {
136 * XXXRW: ENOMEM is an ambiguous return, we should bound the
137 * number of loops, perhaps.
139 if (errno == ENOMEM) {
143 if (errno == EACCES || errno == EPERM)
144 list->mtl_error = MEMSTAT_ERROR_PERMISSION;
146 list->mtl_error = MEMSTAT_ERROR_VERSION;
156 if (size < sizeof(*ushp)) {
157 list->mtl_error = MEMSTAT_ERROR_VERSION;
162 ushp = (struct uma_stream_header *)p;
165 if (ushp->ush_version != UMA_STREAM_VERSION) {
166 list->mtl_error = MEMSTAT_ERROR_VERSION;
172 * For the remainder of this function, we are quite trusting about
173 * the layout of structures and sizes, since we've determined we have
174 * a matching version and acceptable CPU count.
176 maxcpus = ushp->ush_maxcpus;
177 count = ushp->ush_count;
178 for (i = 0; i < count; i++) {
179 uthp = (struct uma_type_header *)p;
182 if (hint_dontsearch == 0) {
183 mtp = memstat_mtl_find(list, ALLOCATOR_UMA,
188 mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA,
189 uthp->uth_name, maxid + 1);
191 _memstat_mtl_empty(list);
193 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
198 * Reset the statistics on a current node.
200 _memstat_mt_reset_stats(mtp, maxid + 1);
202 mtp->mt_numallocs = uthp->uth_allocs;
203 mtp->mt_numfrees = uthp->uth_frees;
204 mtp->mt_failures = uthp->uth_fails;
205 mtp->mt_sleeps = uthp->uth_sleeps;
206 mtp->mt_xdomain = uthp->uth_xdomain;
208 for (j = 0; j < maxcpus; j++) {
209 upsp = (struct uma_percpu_stat *)p;
212 mtp->mt_percpu_cache[j].mtp_free =
213 upsp->ups_cache_free;
214 mtp->mt_free += upsp->ups_cache_free;
215 mtp->mt_numallocs += upsp->ups_allocs;
216 mtp->mt_numfrees += upsp->ups_frees;
220 * Values for uth_allocs and uth_frees frees are snap.
221 * It may happen that kernel reports that number of frees
222 * is greater than number of allocs. See counter(9) for
225 if (mtp->mt_numallocs < mtp->mt_numfrees)
226 mtp->mt_numallocs = mtp->mt_numfrees;
228 mtp->mt_size = uthp->uth_size;
229 mtp->mt_rsize = uthp->uth_rsize;
230 mtp->mt_memalloced = mtp->mt_numallocs * uthp->uth_size;
231 mtp->mt_memfreed = mtp->mt_numfrees * uthp->uth_size;
232 mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
233 mtp->mt_countlimit = uthp->uth_limit;
234 mtp->mt_byteslimit = uthp->uth_limit * uthp->uth_size;
236 mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
237 mtp->mt_zonefree = uthp->uth_zone_free;
240 * UMA secondary zones share a keg with the primary zone. To
241 * avoid double-reporting of free items, report keg free
242 * items only in the primary zone.
244 if (!(uthp->uth_zone_flags & UTH_ZONE_SECONDARY)) {
245 mtp->mt_kegfree = uthp->uth_keg_free;
246 mtp->mt_free += mtp->mt_kegfree;
248 mtp->mt_free += mtp->mt_zonefree;
257 kread(kvm_t *kvm, void *kvm_pointer, void *address, size_t size,
262 ret = kvm_read(kvm, (unsigned long)kvm_pointer + offset, address,
265 return (MEMSTAT_ERROR_KVM);
266 if ((size_t)ret != size)
267 return (MEMSTAT_ERROR_KVM_SHORTREAD);
272 kread_string(kvm_t *kvm, const void *kvm_pointer, char *buffer, int buflen)
277 for (i = 0; i < buflen; i++) {
278 ret = kvm_read(kvm, (unsigned long)kvm_pointer + i,
279 &(buffer[i]), sizeof(char));
281 return (MEMSTAT_ERROR_KVM);
282 if ((size_t)ret != sizeof(char))
283 return (MEMSTAT_ERROR_KVM_SHORTREAD);
284 if (buffer[i] == '\0')
293 kread_symbol(kvm_t *kvm, int index, void *address, size_t size,
298 ret = kvm_read(kvm, namelist[index].n_value + offset, address, size);
300 return (MEMSTAT_ERROR_KVM);
301 if ((size_t)ret != size)
302 return (MEMSTAT_ERROR_KVM_SHORTREAD);
307 * memstat_kvm_uma() is similar to memstat_sysctl_uma(), only it extracts
308 * UMA(9) statistics from a kernel core/memory file.
311 memstat_kvm_uma(struct memory_type_list *list, void *kvm_handle)
313 LIST_HEAD(, uma_keg) uma_kegs;
314 struct memory_type *mtp;
315 struct uma_zone_domain uzd;
316 struct uma_domain ukd;
317 struct uma_bucket *ubp, ub;
318 struct uma_cache *ucp, *ucp_array;
319 struct uma_zone *uzp, uz;
320 struct uma_keg *kzp, kz;
322 int hint_dontsearch, i, mp_maxid, ndomains, ret;
323 char name[MEMTYPE_MAXNAME];
328 kvm = (kvm_t *)kvm_handle;
329 hint_dontsearch = LIST_EMPTY(&list->mtl_list);
330 if (kvm_nlist(kvm, namelist) != 0) {
331 list->mtl_error = MEMSTAT_ERROR_KVM;
334 if (namelist[X_UMA_KEGS].n_type == 0 ||
335 namelist[X_UMA_KEGS].n_value == 0) {
336 list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL;
339 ret = kread_symbol(kvm, X_MP_MAXID, &mp_maxid, sizeof(mp_maxid), 0);
341 list->mtl_error = ret;
344 ret = kread_symbol(kvm, X_VM_NDOMAINS, &ndomains,
345 sizeof(ndomains), 0);
347 list->mtl_error = ret;
350 ret = kread_symbol(kvm, X_UMA_KEGS, &uma_kegs, sizeof(uma_kegs), 0);
352 list->mtl_error = ret;
355 cpusetsize = sysconf(_SC_CPUSET_SIZE);
356 if (cpusetsize == -1 || (u_long)cpusetsize > sizeof(cpuset_t)) {
357 list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL;
361 ret = kread_symbol(kvm, X_ALL_CPUS, &all_cpus, cpusetsize, 0);
363 list->mtl_error = ret;
366 ucp_array = malloc(sizeof(struct uma_cache) * (mp_maxid + 1));
367 if (ucp_array == NULL) {
368 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
371 for (kzp = LIST_FIRST(&uma_kegs); kzp != NULL; kzp =
372 LIST_NEXT(&kz, uk_link)) {
373 ret = kread(kvm, kzp, &kz, sizeof(kz), 0);
376 _memstat_mtl_empty(list);
377 list->mtl_error = ret;
380 for (uzp = LIST_FIRST(&kz.uk_zones); uzp != NULL; uzp =
381 LIST_NEXT(&uz, uz_link)) {
382 ret = kread(kvm, uzp, &uz, sizeof(uz), 0);
385 _memstat_mtl_empty(list);
386 list->mtl_error = ret;
389 ret = kread(kvm, uzp, ucp_array,
390 sizeof(struct uma_cache) * (mp_maxid + 1),
391 offsetof(struct uma_zone, uz_cpu[0]));
394 _memstat_mtl_empty(list);
395 list->mtl_error = ret;
398 ret = kread_string(kvm, uz.uz_name, name,
402 _memstat_mtl_empty(list);
403 list->mtl_error = ret;
406 if (hint_dontsearch == 0) {
407 mtp = memstat_mtl_find(list, ALLOCATOR_UMA,
412 mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA,
416 _memstat_mtl_empty(list);
417 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
421 * Reset the statistics on a current node.
423 _memstat_mt_reset_stats(mtp, mp_maxid + 1);
424 mtp->mt_numallocs = kvm_counter_u64_fetch(kvm,
425 (unsigned long )uz.uz_allocs);
426 mtp->mt_numfrees = kvm_counter_u64_fetch(kvm,
427 (unsigned long )uz.uz_frees);
428 mtp->mt_failures = kvm_counter_u64_fetch(kvm,
429 (unsigned long )uz.uz_fails);
430 mtp->mt_xdomain = kvm_counter_u64_fetch(kvm,
431 (unsigned long )uz.uz_xdomain);
432 mtp->mt_sleeps = uz.uz_sleeps;
433 /* See comment above in memstat_sysctl_uma(). */
434 if (mtp->mt_numallocs < mtp->mt_numfrees)
435 mtp->mt_numallocs = mtp->mt_numfrees;
437 if (kz.uk_flags & UMA_ZFLAG_INTERNAL)
439 for (i = 0; i < mp_maxid + 1; i++) {
440 if (!CPU_ISSET(i, &all_cpus))
443 mtp->mt_numallocs += ucp->uc_allocs;
444 mtp->mt_numfrees += ucp->uc_frees;
446 mtp->mt_free += ucp->uc_allocbucket.ucb_cnt;
447 mtp->mt_free += ucp->uc_freebucket.ucb_cnt;
448 mtp->mt_free += ucp->uc_crossbucket.ucb_cnt;
451 mtp->mt_size = kz.uk_size;
452 mtp->mt_rsize = kz.uk_rsize;
453 mtp->mt_memalloced = mtp->mt_numallocs * mtp->mt_size;
454 mtp->mt_memfreed = mtp->mt_numfrees * mtp->mt_size;
455 mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
456 mtp->mt_countlimit = uz.uz_max_items;
457 mtp->mt_byteslimit = mtp->mt_countlimit * mtp->mt_size;
458 mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
459 for (i = 0; i < ndomains; i++) {
460 ret = kread(kvm, ZDOM_GET(uzp, i), &uzd,
465 STAILQ_FIRST(&uzd.uzd_buckets);
467 ubp = STAILQ_NEXT(&ub, ub_link)) {
468 ret = kread(kvm, ubp, &ub,
472 mtp->mt_zonefree += ub.ub_cnt;
475 if (!((kz.uk_flags & UMA_ZONE_SECONDARY) &&
476 LIST_FIRST(&kz.uk_zones) != uzp)) {
478 for (i = 0; i < ndomains; i++) {
479 ret = kread(kvm, &kzp->uk_domain[i],
480 &ukd, sizeof(ukd), 0);
482 kegfree += ukd.ud_free_items;
484 mtp->mt_kegfree = kegfree;
485 mtp->mt_free += mtp->mt_kegfree;
487 mtp->mt_free += mtp->mt_zonefree;