2 * Copyright (c) 2005 Robert N. M. Watson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/param.h>
31 #define LIBMEMSTAT /* Cause vm_page.h not to include opt_vmpage.h */
33 #include <vm/vm_page.h>
36 #include <vm/uma_int.h>
44 static struct nlist namelist[] = {
46 { .n_name = "_uma_kegs" },
47 #define X_MP_MAXCPUS 1
48 { .n_name = "_mp_maxcpus" },
50 { .n_name = "_mp_maxid" },
52 { .n_name = "_all_cpus" },
60 fprintf(stderr, "umastat\n");
65 kread(kvm_t *kvm, void *kvm_pointer, void *address, size_t size,
70 ret = kvm_read(kvm, (unsigned long)kvm_pointer + offset, address,
73 return (MEMSTAT_ERROR_KVM);
74 if ((size_t)ret != size)
75 return (MEMSTAT_ERROR_KVM_SHORTREAD);
80 kread_string(kvm_t *kvm, void *kvm_pointer, char *buffer, int buflen)
85 for (i = 0; i < buflen; i++) {
86 ret = kvm_read(kvm, (unsigned long)kvm_pointer + i,
87 &(buffer[i]), sizeof(char));
89 return (MEMSTAT_ERROR_KVM);
90 if ((size_t)ret != sizeof(char))
91 return (MEMSTAT_ERROR_KVM_SHORTREAD);
92 if (buffer[i] == '\0')
101 kread_symbol(kvm_t *kvm, int index, void *address, size_t size,
106 ret = kvm_read(kvm, namelist[index].n_value + offset, address, size);
108 return (MEMSTAT_ERROR_KVM);
109 if ((size_t)ret != size)
110 return (MEMSTAT_ERROR_KVM_SHORTREAD);
114 static const struct flaginfo {
118 { UMA_ZFLAG_PRIVALLOC, "privalloc" },
119 { UMA_ZFLAG_INTERNAL, "internal" },
120 { UMA_ZFLAG_FULL, "full" },
121 { UMA_ZFLAG_CACHEONLY, "cacheonly" },
122 { UMA_ZONE_PAGEABLE, "pageable" },
123 { UMA_ZONE_ZINIT, "zinit" },
124 { UMA_ZONE_STATIC, "static" },
125 { UMA_ZONE_OFFPAGE, "offpage" },
126 { UMA_ZONE_MALLOC, "malloc" },
127 { UMA_ZONE_NOFREE, "nofree" },
128 { UMA_ZONE_MTXCLASS, "mtxclass" },
129 { UMA_ZONE_VM, "vm" },
130 { UMA_ZONE_HASH, "hash" },
131 { UMA_ZONE_SECONDARY, "secondary" },
132 { UMA_ZONE_REFCNT, "refcnt" },
133 { UMA_ZONE_MAXBUCKET, "maxbucket" },
135 static const int flaginfo_count = sizeof(flaginfo) / sizeof(struct flaginfo);
138 uma_print_keg_flags(struct uma_keg *ukp, const char *spaces)
142 if (!ukp->uk_flags) {
143 printf("%suk_flags = 0;\n", spaces);
147 printf("%suk_flags = ", spaces);
148 for (i = 0, count = 0; i < flaginfo_count; i++) {
149 if (ukp->uk_flags & flaginfo[i].fi_flag) {
152 printf(flaginfo[i].fi_name);
160 uma_print_keg_align(struct uma_keg *ukp, const char *spaces)
163 switch(ukp->uk_align) {
165 printf("%suk_align = UMA_ALIGN_PTR;\n", spaces);
170 printf("%suk_align = UMA_ALIGN_LONG;\n", spaces);
174 printf("%suk_align = UMA_ALIGN_INT;\n", spaces);
178 case UMA_ALIGN_SHORT:
179 printf("%suk_align = UMA_ALIGN_SHORT;\n", spaces);
183 printf("%suk_align = UMA_ALIGN_CHAR;\n", spaces);
186 case UMA_ALIGN_CACHE:
187 printf("%suk_align = UMA_ALIGN_CACHE;\n", spaces);
191 printf("%suk_align = %d\n", spaces, ukp->uk_align);
195 LIST_HEAD(bucketlist, uma_bucket);
198 uma_print_bucket(struct uma_bucket *ubp, const char *spaces)
201 printf("{ ub_cnt = %d, ub_entries = %d }", ubp->ub_cnt,
206 uma_print_bucketlist(kvm_t *kvm, struct bucketlist *bucketlist,
207 const char *name, const char *spaces)
209 struct uma_bucket *ubp, ub;
210 uint64_t total_entries, total_cnt;
213 printf("%s%s {", spaces, name);
215 total_entries = total_cnt = 0;
217 for (ubp = LIST_FIRST(bucketlist); ubp != NULL; ubp =
218 LIST_NEXT(&ub, ub_link)) {
219 ret = kread(kvm, ubp, &ub, sizeof(ub), 0);
221 errx(-1, "uma_print_bucketlist: %s", kvm_geterr(kvm));
223 printf("\n%s ", spaces);
224 uma_print_bucket(&ub, "");
226 total_entries += ub.ub_entries;
227 total_cnt += ub.ub_cnt;
232 printf("%s}; // total cnt %llu, total entries %llu\n", spaces,
233 total_cnt, total_entries);
237 uma_print_cache(kvm_t *kvm, struct uma_cache *cache, const char *name,
238 int cpu, const char *spaces, int *ub_cnt_add, int *ub_entries_add)
240 struct uma_bucket ub;
243 printf("%s%s[%d] = {\n", spaces, name, cpu);
244 printf("%s uc_frees = %llu;\n", spaces, cache->uc_frees);
245 printf("%s uc_allocs = %llu;\n", spaces, cache->uc_allocs);
247 if (cache->uc_freebucket != NULL) {
248 ret = kread(kvm, cache->uc_freebucket, &ub, sizeof(ub), 0);
250 errx(-1, "uma_print_cache: %s", kvm_geterr(kvm));
251 printf("%s uc_freebucket ", spaces);
252 uma_print_bucket(&ub, spaces);
254 if (ub_cnt_add != NULL)
255 *ub_cnt_add += ub.ub_cnt;
256 if (ub_entries_add != NULL)
257 *ub_entries_add += ub.ub_entries;
259 printf("%s uc_freebucket = NULL;\n", spaces);
260 if (cache->uc_allocbucket != NULL) {
261 ret = kread(kvm, cache->uc_allocbucket, &ub, sizeof(ub), 0);
263 errx(-1, "uma_print_cache: %s", kvm_geterr(kvm));
264 printf("%s uc_allocbucket ", spaces);
265 uma_print_bucket(&ub, spaces);
267 if (ub_cnt_add != NULL)
268 *ub_cnt_add += ub.ub_cnt;
269 if (ub_entries_add != NULL)
270 *ub_entries_add += ub.ub_entries;
272 printf("%s uc_allocbucket = NULL;\n", spaces);
273 printf("%s};\n", spaces);
277 main(int argc, char *argv[])
279 LIST_HEAD(, uma_keg) uma_kegs;
280 char name[MEMTYPE_MAXNAME];
281 struct uma_keg *kzp, kz;
282 struct uma_zone *uzp, *uzp_userspace;
284 int all_cpus, cpu, mp_maxcpus, mp_maxid, ret, ub_cnt, ub_entries;
285 size_t uzp_userspace_len;
290 kvm = kvm_open(NULL, NULL, NULL, 0, "umastat");
294 if (kvm_nlist(kvm, namelist) != 0)
295 err(-1, "kvm_nlist");
297 if (namelist[X_UMA_KEGS].n_type == 0 ||
298 namelist[X_UMA_KEGS].n_value == 0)
299 errx(-1, "kvm_nlist return");
301 ret = kread_symbol(kvm, X_MP_MAXCPUS, &mp_maxcpus, sizeof(mp_maxcpus),
304 errx(-1, "kread_symbol: %s", kvm_geterr(kvm));
306 printf("mp_maxcpus = %d\n", mp_maxcpus);
308 ret = kread_symbol(kvm, X_MP_MAXID, &mp_maxid, sizeof(mp_maxid), 0);
310 errx(-1, "kread_symbol: %s", kvm_geterr(kvm));
312 printf("mp_maxid = %d\n", mp_maxid);
314 ret = kread_symbol(kvm, X_ALLCPU, &all_cpus, sizeof(all_cpus), 0);
316 errx(-1, "kread_symbol: %s", kvm_geterr(kvm));
318 printf("all_cpus = %x\n", all_cpus);
320 ret = kread_symbol(kvm, X_UMA_KEGS, &uma_kegs, sizeof(uma_kegs), 0);
322 errx(-1, "kread_symbol: %s", kvm_geterr(kvm));
325 * uma_zone_t ends in an array of mp_maxid cache entries. However,
326 * it is statically declared as an array of size 1, so we need to
327 * provide additional space.
329 uzp_userspace_len = sizeof(struct uma_zone) + mp_maxid *
330 sizeof(struct uma_cache);
331 uzp_userspace = malloc(uzp_userspace_len);
332 if (uzp_userspace == NULL)
335 for (kzp = LIST_FIRST(&uma_kegs); kzp != NULL; kzp =
336 LIST_NEXT(&kz, uk_link)) {
337 ret = kread(kvm, kzp, &kz, sizeof(kz), 0);
340 errx(-1, "kread: %s", kvm_geterr(kvm));
344 printf(" uk_recurse = %d\n", kz.uk_recurse);
345 uma_print_keg_align(&kz, " ");
346 printf(" uk_pages = %d\n", kz.uk_pages);
347 printf(" uk_free = %d\n", kz.uk_free);
348 printf(" uk_size = %d\n", kz.uk_size);
349 printf(" uk_rsize = %d\n", kz.uk_rsize);
350 printf(" uk_maxpages = %d\n", kz.uk_maxpages);
352 printf(" uk_pgoff = %d\n", kz.uk_pgoff);
353 printf(" uk_ppera = %d\n", kz.uk_ppera);
354 printf(" uk_ipers = %d\n", kz.uk_ipers);
355 uma_print_keg_flags(&kz, " ");
357 if (LIST_FIRST(&kz.uk_zones) == NULL) {
358 printf("; No zones.\n");
362 for (uzp = LIST_FIRST(&kz.uk_zones); uzp != NULL; uzp =
363 LIST_NEXT(uzp_userspace, uz_link)) {
365 * We actually copy in twice: once with the base
366 * structure, so that we can then decide if we also
367 * need to copy in the caches. This prevents us
368 * from reading past the end of the base UMA zones,
369 * which is unlikely to cause problems but could.
371 ret = kread(kvm, uzp, uzp_userspace,
372 sizeof(struct uma_zone), 0);
375 errx(-1, "kread: %s", kvm_geterr(kvm));
377 if (!(kz.uk_flags & UMA_ZFLAG_INTERNAL)) {
378 ret = kread(kvm, uzp, uzp_userspace,
379 uzp_userspace_len, 0);
382 errx(-1, "kread: %s",
386 ret = kread_string(kvm, uzp_userspace->uz_name, name,
390 errx(-1, "kread_string: %s", kvm_geterr(kvm));
393 printf(" uz_name = \"%s\";\n", name);
394 printf(" uz_allocs = %llu;\n",
395 uzp_userspace->uz_allocs);
396 printf(" uz_frees = %llu;\n",
397 uzp_userspace->uz_frees);
398 printf(" uz_fails = %llu;\n",
399 uzp_userspace->uz_fails);
400 printf(" uz_fills = %u;\n",
401 uzp_userspace->uz_fills);
402 printf(" uz_count = %u;\n",
403 uzp_userspace->uz_count);
404 uma_print_bucketlist(kvm, (struct bucketlist *)
405 &uzp_userspace->uz_full_bucket, "uz_full_bucket",
407 uma_print_bucketlist(kvm, (struct bucketlist *)
408 &uzp_userspace->uz_free_bucket, "uz_free_bucket",
411 if (!(kz.uk_flags & UMA_ZFLAG_INTERNAL)) {
412 ub_cnt = ub_entries = 0;
413 for (cpu = 0; cpu <= mp_maxid; cpu++) {
414 /* if (CPU_ABSENT(cpu)) */
415 if ((all_cpus & (1 << cpu)) == 0)
418 &uzp_userspace->uz_cpu[cpu],
419 "uc_cache", cpu, " ", &ub_cnt,
422 printf(" // %d cache total cnt, %d total "
423 "entries\n", ub_cnt, ub_entries);