2 * Copyright (c) 2005 Robert N. M. Watson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/param.h>
32 #include <vm/uma_int.h>
42 static struct nlist namelist[] = {
44 { .n_name = "_uma_kegs" },
45 #define X_MP_MAXCPUS 1
46 { .n_name = "_mp_maxcpus" },
48 { .n_name = "_mp_maxid" },
50 { .n_name = "_all_cpus" },
58 fprintf(stderr, "umastat [-M core [-N system]]\n");
63 kread(kvm_t *kvm, void *kvm_pointer, void *address, size_t size,
68 ret = kvm_read(kvm, (unsigned long)kvm_pointer + offset, address,
71 return (MEMSTAT_ERROR_KVM);
72 if ((size_t)ret != size)
73 return (MEMSTAT_ERROR_KVM_SHORTREAD);
78 kread_string(kvm_t *kvm, const void *kvm_pointer, char *buffer, int buflen)
83 for (i = 0; i < buflen; i++) {
84 ret = kvm_read(kvm, (unsigned long)kvm_pointer + i,
85 &(buffer[i]), sizeof(char));
87 return (MEMSTAT_ERROR_KVM);
88 if ((size_t)ret != sizeof(char))
89 return (MEMSTAT_ERROR_KVM_SHORTREAD);
90 if (buffer[i] == '\0')
99 kread_symbol(kvm_t *kvm, int index, void *address, size_t size,
104 ret = kvm_read(kvm, namelist[index].n_value + offset, address, size);
106 return (MEMSTAT_ERROR_KVM);
107 if ((size_t)ret != size)
108 return (MEMSTAT_ERROR_KVM_SHORTREAD);
112 static const struct flaginfo {
116 { UMA_ZFLAG_MULTI, "multi" },
117 { UMA_ZFLAG_DRAINING, "draining" },
118 { UMA_ZFLAG_BUCKET, "bucket" },
119 { UMA_ZFLAG_INTERNAL, "internal" },
120 { UMA_ZFLAG_FULL, "full" },
121 { UMA_ZFLAG_CACHEONLY, "cacheonly" },
122 { UMA_ZONE_PAGEABLE, "pageable" },
123 { UMA_ZONE_ZINIT, "zinit" },
124 { UMA_ZONE_STATIC, "static" },
125 { UMA_ZONE_OFFPAGE, "offpage" },
126 { UMA_ZONE_MALLOC, "malloc" },
127 { UMA_ZONE_NOFREE, "nofree" },
128 { UMA_ZONE_MTXCLASS, "mtxclass" },
129 { UMA_ZONE_VM, "vm" },
130 { UMA_ZONE_HASH, "hash" },
131 { UMA_ZONE_SECONDARY, "secondary" },
132 { UMA_ZONE_MAXBUCKET, "maxbucket" },
133 { UMA_ZONE_CACHESPREAD, "cachespread" },
134 { UMA_ZONE_VTOSLAB, "vtoslab" },
135 { UMA_ZONE_NODUMP, "nodump" },
136 { UMA_ZONE_PCPU, "pcpu" },
138 static const int flaginfo_count = sizeof(flaginfo) / sizeof(struct flaginfo);
141 uma_print_keg_flags(struct uma_keg *ukp, const char *spaces)
145 if (!ukp->uk_flags) {
146 printf("%suk_flags = 0;\n", spaces);
150 printf("%suk_flags = ", spaces);
151 for (i = 0, count = 0; i < flaginfo_count; i++) {
152 if (ukp->uk_flags & flaginfo[i].fi_flag) {
155 printf("%s", flaginfo[i].fi_name);
163 uma_print_keg_align(struct uma_keg *ukp, const char *spaces)
166 switch(ukp->uk_align) {
168 printf("%suk_align = UMA_ALIGN_PTR;\n", spaces);
173 printf("%suk_align = UMA_ALIGN_LONG;\n", spaces);
177 printf("%suk_align = UMA_ALIGN_INT;\n", spaces);
181 case UMA_ALIGN_SHORT:
182 printf("%suk_align = UMA_ALIGN_SHORT;\n", spaces);
186 printf("%suk_align = UMA_ALIGN_CHAR;\n", spaces);
189 case UMA_ALIGN_CACHE:
190 printf("%suk_align = UMA_ALIGN_CACHE;\n", spaces);
194 printf("%suk_align = %d\n", spaces, ukp->uk_align);
198 LIST_HEAD(bucketlist, uma_bucket);
201 uma_print_bucket(struct uma_bucket *ubp, const char *spaces __unused)
204 printf("{ ub_cnt = %d, ub_entries = %d }", ubp->ub_cnt,
209 uma_print_bucketlist(kvm_t *kvm, struct bucketlist *bucketlist,
210 const char *name, const char *spaces)
212 struct uma_bucket *ubp, ub;
213 uint64_t total_entries, total_cnt;
216 printf("%s%s {", spaces, name);
218 total_entries = total_cnt = 0;
220 for (ubp = LIST_FIRST(bucketlist); ubp != NULL; ubp =
221 LIST_NEXT(&ub, ub_link)) {
222 ret = kread(kvm, ubp, &ub, sizeof(ub), 0);
224 errx(-1, "uma_print_bucketlist: %s", kvm_geterr(kvm));
226 printf("\n%s ", spaces);
227 uma_print_bucket(&ub, "");
229 total_entries += ub.ub_entries;
230 total_cnt += ub.ub_cnt;
235 printf("%s}; // total cnt %ju, total entries %ju\n", spaces,
236 total_cnt, total_entries);
240 uma_print_cache(kvm_t *kvm, struct uma_cache *cache, const char *name,
241 int cpu, const char *spaces, int *ub_cnt_add, int *ub_entries_add)
243 struct uma_bucket ub;
246 printf("%s%s[%d] = {\n", spaces, name, cpu);
247 printf("%s uc_frees = %ju;\n", spaces, cache->uc_frees);
248 printf("%s uc_allocs = %ju;\n", spaces, cache->uc_allocs);
250 if (cache->uc_freebucket != NULL) {
251 ret = kread(kvm, cache->uc_freebucket, &ub, sizeof(ub), 0);
253 errx(-1, "uma_print_cache: %s", kvm_geterr(kvm));
254 printf("%s uc_freebucket ", spaces);
255 uma_print_bucket(&ub, spaces);
257 if (ub_cnt_add != NULL)
258 *ub_cnt_add += ub.ub_cnt;
259 if (ub_entries_add != NULL)
260 *ub_entries_add += ub.ub_entries;
262 printf("%s uc_freebucket = NULL;\n", spaces);
263 if (cache->uc_allocbucket != NULL) {
264 ret = kread(kvm, cache->uc_allocbucket, &ub, sizeof(ub), 0);
266 errx(-1, "uma_print_cache: %s", kvm_geterr(kvm));
267 printf("%s uc_allocbucket ", spaces);
268 uma_print_bucket(&ub, spaces);
270 if (ub_cnt_add != NULL)
271 *ub_cnt_add += ub.ub_cnt;
272 if (ub_entries_add != NULL)
273 *ub_entries_add += ub.ub_entries;
275 printf("%s uc_allocbucket = NULL;\n", spaces);
276 printf("%s};\n", spaces);
280 main(int argc, char *argv[])
282 LIST_HEAD(, uma_keg) uma_kegs;
283 char name[MEMTYPE_MAXNAME];
284 struct uma_keg *kzp, kz;
285 struct uma_zone *uzp, *uzp_userspace;
287 int all_cpus, cpu, mp_maxcpus, mp_maxid, ret, ub_cnt, ub_entries;
288 size_t uzp_userspace_len;
291 char errbuf[_POSIX2_LINE_MAX];
293 memf = nlistf = NULL;
294 while ((ch = getopt(argc, argv, "M:N:")) != -1) {
311 if (nlistf != NULL && memf == NULL)
314 kvm = kvm_openfiles(nlistf, memf, NULL, 0, errbuf);
316 errx(-1, "kvm_openfiles: %s", errbuf);
318 if (kvm_nlist(kvm, namelist) != 0)
319 err(-1, "kvm_nlist");
321 if (namelist[X_UMA_KEGS].n_type == 0 ||
322 namelist[X_UMA_KEGS].n_value == 0)
323 errx(-1, "kvm_nlist return");
325 ret = kread_symbol(kvm, X_MP_MAXCPUS, &mp_maxcpus, sizeof(mp_maxcpus),
328 errx(-1, "kread_symbol: %s", kvm_geterr(kvm));
330 printf("mp_maxcpus = %d\n", mp_maxcpus);
332 ret = kread_symbol(kvm, X_MP_MAXID, &mp_maxid, sizeof(mp_maxid), 0);
334 errx(-1, "kread_symbol: %s", kvm_geterr(kvm));
336 printf("mp_maxid = %d\n", mp_maxid);
338 ret = kread_symbol(kvm, X_ALLCPU, &all_cpus, sizeof(all_cpus), 0);
340 errx(-1, "kread_symbol: %s", kvm_geterr(kvm));
342 printf("all_cpus = %x\n", all_cpus);
344 ret = kread_symbol(kvm, X_UMA_KEGS, &uma_kegs, sizeof(uma_kegs), 0);
346 errx(-1, "kread_symbol: %s", kvm_geterr(kvm));
349 * uma_zone_t ends in an array of mp_maxid cache entries. However,
350 * it is statically declared as an array of size 1, so we need to
351 * provide additional space.
353 uzp_userspace_len = sizeof(struct uma_zone) + mp_maxid *
354 sizeof(struct uma_cache);
355 uzp_userspace = malloc(uzp_userspace_len);
356 if (uzp_userspace == NULL)
359 for (kzp = LIST_FIRST(&uma_kegs); kzp != NULL; kzp =
360 LIST_NEXT(&kz, uk_link)) {
361 ret = kread(kvm, kzp, &kz, sizeof(kz), 0);
364 errx(-1, "kread: %s", kvm_geterr(kvm));
368 uma_print_keg_align(&kz, " ");
369 printf(" uk_pages = %d\n", kz.uk_pages);
370 printf(" uk_free = %d\n", kz.uk_free);
371 printf(" uk_reserve = %d\n", kz.uk_reserve);
372 printf(" uk_size = %d\n", kz.uk_size);
373 printf(" uk_rsize = %d\n", kz.uk_rsize);
374 printf(" uk_maxpages = %d\n", kz.uk_maxpages);
376 printf(" uk_pgoff = %d\n", kz.uk_pgoff);
377 printf(" uk_ppera = %d\n", kz.uk_ppera);
378 printf(" uk_ipers = %d\n", kz.uk_ipers);
379 uma_print_keg_flags(&kz, " ");
381 if (LIST_FIRST(&kz.uk_zones) == NULL) {
382 printf("; No zones.\n");
386 for (uzp = LIST_FIRST(&kz.uk_zones); uzp != NULL; uzp =
387 LIST_NEXT(uzp_userspace, uz_link)) {
389 * We actually copy in twice: once with the base
390 * structure, so that we can then decide if we also
391 * need to copy in the caches. This prevents us
392 * from reading past the end of the base UMA zones,
393 * which is unlikely to cause problems but could.
395 ret = kread(kvm, uzp, uzp_userspace,
396 sizeof(struct uma_zone), 0);
399 errx(-1, "kread: %s", kvm_geterr(kvm));
401 if (!(kz.uk_flags & UMA_ZFLAG_INTERNAL)) {
402 ret = kread(kvm, uzp, uzp_userspace,
403 uzp_userspace_len, 0);
406 errx(-1, "kread: %s",
410 ret = kread_string(kvm, uzp_userspace->uz_name, name,
414 errx(-1, "kread_string: %s", kvm_geterr(kvm));
417 printf(" uz_name = \"%s\";\n", name);
418 printf(" uz_allocs = %lu;\n",
419 uzp_userspace->uz_allocs);
420 printf(" uz_frees = %lu;\n",
421 uzp_userspace->uz_frees);
422 printf(" uz_fails = %lu;\n",
423 uzp_userspace->uz_fails);
424 printf(" uz_sleeps = %ju;\n",
425 uzp_userspace->uz_sleeps);
426 printf(" uz_count = %u;\n",
427 uzp_userspace->uz_count);
428 uma_print_bucketlist(kvm, (void *)
429 &uzp_userspace->uz_buckets, "uz_buckets",
432 if (!(kz.uk_flags & UMA_ZFLAG_INTERNAL)) {
433 ub_cnt = ub_entries = 0;
434 for (cpu = 0; cpu <= mp_maxid; cpu++) {
435 /* if (CPU_ABSENT(cpu)) */
436 if ((all_cpus & (1 << cpu)) == 0)
439 &uzp_userspace->uz_cpu[cpu],
440 "uc_cache", cpu, " ", &ub_cnt,
443 printf(" // %d cache total cnt, %d total "
444 "entries\n", ub_cnt, ub_entries);