2 * Copyright (c) 2005-2006 Robert N. M. Watson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/param.h>
30 #include <sys/cpuset.h>
31 #include <sys/sysctl.h>
34 #include <vm/uma_int.h>
47 #include "memstat_internal.h"
49 static struct nlist namelist[] = {
51 { .n_name = "_uma_kegs" },
53 { .n_name = "_mp_maxid" },
55 { .n_name = "_all_cpus" },
60 * Extract uma(9) statistics from the running kernel, and store all memory
61 * type information in the passed list. For each type, check the list for an
62 * existing entry with the right name/allocator -- if present, update that
63 * entry. Otherwise, add a new entry. On error, the entire list will be
64 * cleared, as entries will be in an inconsistent state.
66 * To reduce the level of work for a list that starts empty, we keep around a
67 * hint as to whether it was empty when we began, so we can avoid searching
68 * the list for entries to update. Updates are O(n^2) due to searching for
69 * each entry before adding it.
72 memstat_sysctl_uma(struct memory_type_list *list, int flags)
74 struct uma_stream_header *ushp;
75 struct uma_type_header *uthp;
76 struct uma_percpu_stat *upsp;
77 struct memory_type *mtp;
78 int count, hint_dontsearch, i, j, maxcpus, maxid;
82 hint_dontsearch = LIST_EMPTY(&list->mtl_list);
85 * Query the number of CPUs, number of malloc types so that we can
86 * guess an initial buffer size. We loop until we succeed or really
87 * fail. Note that the value of maxcpus we query using sysctl is not
88 * the version we use when processing the real data -- that is read
93 if (sysctlbyname("kern.smp.maxid", &maxid, &size, NULL, 0) < 0) {
94 if (errno == EACCES || errno == EPERM)
95 list->mtl_error = MEMSTAT_ERROR_PERMISSION;
97 list->mtl_error = MEMSTAT_ERROR_DATAERROR;
100 if (size != sizeof(maxid)) {
101 list->mtl_error = MEMSTAT_ERROR_DATAERROR;
105 size = sizeof(count);
106 if (sysctlbyname("vm.zone_count", &count, &size, NULL, 0) < 0) {
107 if (errno == EACCES || errno == EPERM)
108 list->mtl_error = MEMSTAT_ERROR_PERMISSION;
110 list->mtl_error = MEMSTAT_ERROR_VERSION;
113 if (size != sizeof(count)) {
114 list->mtl_error = MEMSTAT_ERROR_DATAERROR;
118 size = sizeof(*uthp) + count * (sizeof(*uthp) + sizeof(*upsp) *
121 buffer = malloc(size);
122 if (buffer == NULL) {
123 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
127 if (sysctlbyname("vm.zone_stats", buffer, &size, NULL, 0) < 0) {
129 * XXXRW: ENOMEM is an ambiguous return, we should bound the
130 * number of loops, perhaps.
132 if (errno == ENOMEM) {
136 if (errno == EACCES || errno == EPERM)
137 list->mtl_error = MEMSTAT_ERROR_PERMISSION;
139 list->mtl_error = MEMSTAT_ERROR_VERSION;
149 if (size < sizeof(*ushp)) {
150 list->mtl_error = MEMSTAT_ERROR_VERSION;
155 ushp = (struct uma_stream_header *)p;
158 if (ushp->ush_version != UMA_STREAM_VERSION) {
159 list->mtl_error = MEMSTAT_ERROR_VERSION;
165 * For the remainder of this function, we are quite trusting about
166 * the layout of structures and sizes, since we've determined we have
167 * a matching version and acceptable CPU count.
169 maxcpus = ushp->ush_maxcpus;
170 count = ushp->ush_count;
171 for (i = 0; i < count; i++) {
172 uthp = (struct uma_type_header *)p;
175 if (hint_dontsearch == 0) {
176 mtp = memstat_mtl_find(list, ALLOCATOR_UMA,
181 mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA,
182 uthp->uth_name, maxid + 1);
184 _memstat_mtl_empty(list);
186 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
191 * Reset the statistics on a current node.
193 _memstat_mt_reset_stats(mtp, maxid + 1);
195 mtp->mt_numallocs = uthp->uth_allocs;
196 mtp->mt_numfrees = uthp->uth_frees;
197 mtp->mt_failures = uthp->uth_fails;
198 mtp->mt_sleeps = uthp->uth_sleeps;
200 for (j = 0; j < maxcpus; j++) {
201 upsp = (struct uma_percpu_stat *)p;
204 mtp->mt_percpu_cache[j].mtp_free =
205 upsp->ups_cache_free;
206 mtp->mt_free += upsp->ups_cache_free;
207 mtp->mt_numallocs += upsp->ups_allocs;
208 mtp->mt_numfrees += upsp->ups_frees;
211 mtp->mt_size = uthp->uth_size;
212 mtp->mt_rsize = uthp->uth_rsize;
213 mtp->mt_memalloced = mtp->mt_numallocs * uthp->uth_size;
214 mtp->mt_memfreed = mtp->mt_numfrees * uthp->uth_size;
215 mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
216 mtp->mt_countlimit = uthp->uth_limit;
217 mtp->mt_byteslimit = uthp->uth_limit * uthp->uth_size;
219 mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
220 mtp->mt_zonefree = uthp->uth_zone_free;
223 * UMA secondary zones share a keg with the primary zone. To
224 * avoid double-reporting of free items, report keg free
225 * items only in the primary zone.
227 if (!(uthp->uth_zone_flags & UTH_ZONE_SECONDARY)) {
228 mtp->mt_kegfree = uthp->uth_keg_free;
229 mtp->mt_free += mtp->mt_kegfree;
231 mtp->mt_free += mtp->mt_zonefree;
240 kread(kvm_t *kvm, void *kvm_pointer, void *address, size_t size,
245 ret = kvm_read(kvm, (unsigned long)kvm_pointer + offset, address,
248 return (MEMSTAT_ERROR_KVM);
249 if ((size_t)ret != size)
250 return (MEMSTAT_ERROR_KVM_SHORTREAD);
255 kread_string(kvm_t *kvm, const void *kvm_pointer, char *buffer, int buflen)
260 for (i = 0; i < buflen; i++) {
261 ret = kvm_read(kvm, (unsigned long)kvm_pointer + i,
262 &(buffer[i]), sizeof(char));
264 return (MEMSTAT_ERROR_KVM);
265 if ((size_t)ret != sizeof(char))
266 return (MEMSTAT_ERROR_KVM_SHORTREAD);
267 if (buffer[i] == '\0')
276 kread_symbol(kvm_t *kvm, int index, void *address, size_t size,
281 ret = kvm_read(kvm, namelist[index].n_value + offset, address, size);
283 return (MEMSTAT_ERROR_KVM);
284 if ((size_t)ret != size)
285 return (MEMSTAT_ERROR_KVM_SHORTREAD);
290 * memstat_kvm_uma() is similar to memstat_sysctl_uma(), only it extracts
291 * UMA(9) statistics from a kernel core/memory file.
294 memstat_kvm_uma(struct memory_type_list *list, void *kvm_handle)
296 LIST_HEAD(, uma_keg) uma_kegs;
297 struct memory_type *mtp;
298 struct uma_bucket *ubp, ub;
299 struct uma_cache *ucp, *ucp_array;
300 struct uma_zone *uzp, uz;
301 struct uma_keg *kzp, kz;
302 int hint_dontsearch, i, mp_maxid, ret;
303 char name[MEMTYPE_MAXNAME];
308 kvm = (kvm_t *)kvm_handle;
309 hint_dontsearch = LIST_EMPTY(&list->mtl_list);
310 if (kvm_nlist(kvm, namelist) != 0) {
311 list->mtl_error = MEMSTAT_ERROR_KVM;
314 if (namelist[X_UMA_KEGS].n_type == 0 ||
315 namelist[X_UMA_KEGS].n_value == 0) {
316 list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL;
319 ret = kread_symbol(kvm, X_MP_MAXID, &mp_maxid, sizeof(mp_maxid), 0);
321 list->mtl_error = ret;
324 ret = kread_symbol(kvm, X_UMA_KEGS, &uma_kegs, sizeof(uma_kegs), 0);
326 list->mtl_error = ret;
329 cpusetsize = sysconf(_SC_CPUSET_SIZE);
330 if (cpusetsize == -1 || (u_long)cpusetsize > sizeof(cpuset_t)) {
331 list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL;
335 ret = kread_symbol(kvm, X_ALL_CPUS, &all_cpus, cpusetsize, 0);
337 list->mtl_error = ret;
340 ucp_array = malloc(sizeof(struct uma_cache) * (mp_maxid + 1));
341 if (ucp_array == NULL) {
342 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
345 for (kzp = LIST_FIRST(&uma_kegs); kzp != NULL; kzp =
346 LIST_NEXT(&kz, uk_link)) {
347 ret = kread(kvm, kzp, &kz, sizeof(kz), 0);
350 _memstat_mtl_empty(list);
351 list->mtl_error = ret;
354 for (uzp = LIST_FIRST(&kz.uk_zones); uzp != NULL; uzp =
355 LIST_NEXT(&uz, uz_link)) {
356 ret = kread(kvm, uzp, &uz, sizeof(uz), 0);
359 _memstat_mtl_empty(list);
360 list->mtl_error = ret;
363 ret = kread(kvm, uzp, ucp_array,
364 sizeof(struct uma_cache) * (mp_maxid + 1),
365 offsetof(struct uma_zone, uz_cpu[0]));
368 _memstat_mtl_empty(list);
369 list->mtl_error = ret;
372 ret = kread_string(kvm, uz.uz_name, name,
376 _memstat_mtl_empty(list);
377 list->mtl_error = ret;
380 if (hint_dontsearch == 0) {
381 mtp = memstat_mtl_find(list, ALLOCATOR_UMA,
386 mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA,
390 _memstat_mtl_empty(list);
391 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
395 * Reset the statistics on a current node.
397 _memstat_mt_reset_stats(mtp, mp_maxid + 1);
398 mtp->mt_numallocs = uz.uz_allocs;
399 mtp->mt_numfrees = uz.uz_frees;
400 mtp->mt_failures = uz.uz_fails;
401 mtp->mt_sleeps = uz.uz_sleeps;
402 if (kz.uk_flags & UMA_ZFLAG_INTERNAL)
404 for (i = 0; i < mp_maxid + 1; i++) {
405 if (!CPU_ISSET(i, &all_cpus))
408 mtp->mt_numallocs += ucp->uc_allocs;
409 mtp->mt_numfrees += ucp->uc_frees;
411 if (ucp->uc_allocbucket != NULL) {
412 ret = kread(kvm, ucp->uc_allocbucket,
416 _memstat_mtl_empty(list);
417 list->mtl_error = ret;
420 mtp->mt_free += ub.ub_cnt;
422 if (ucp->uc_freebucket != NULL) {
423 ret = kread(kvm, ucp->uc_freebucket,
427 _memstat_mtl_empty(list);
428 list->mtl_error = ret;
431 mtp->mt_free += ub.ub_cnt;
435 mtp->mt_size = kz.uk_size;
436 mtp->mt_rsize = kz.uk_rsize;
437 mtp->mt_memalloced = mtp->mt_numallocs * mtp->mt_size;
438 mtp->mt_memfreed = mtp->mt_numfrees * mtp->mt_size;
439 mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
441 mtp->mt_countlimit = kz.uk_maxpages /
444 mtp->mt_countlimit = kz.uk_maxpages *
446 mtp->mt_byteslimit = mtp->mt_countlimit * mtp->mt_size;
447 mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
448 for (ubp = LIST_FIRST(&uz.uz_buckets); ubp !=
449 NULL; ubp = LIST_NEXT(&ub, ub_link)) {
450 ret = kread(kvm, ubp, &ub, sizeof(ub), 0);
451 mtp->mt_zonefree += ub.ub_cnt;
453 if (!((kz.uk_flags & UMA_ZONE_SECONDARY) &&
454 LIST_FIRST(&kz.uk_zones) != uzp)) {
455 mtp->mt_kegfree = kz.uk_free;
456 mtp->mt_free += mtp->mt_kegfree;
458 mtp->mt_free += mtp->mt_zonefree;