2 * Copyright (c) 2005 Robert N. M. Watson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/param.h>
31 #define LIBMEMSTAT /* Cause vm_page.h not to include opt_vmpage.h */
33 #include <vm/vm_page.h>
36 #include <vm/uma_int.h>
46 static struct nlist namelist[] = {
48 { .n_name = "_uma_kegs" },
49 #define X_MP_MAXCPUS 1
50 { .n_name = "_mp_maxcpus" },
52 { .n_name = "_mp_maxid" },
54 { .n_name = "_all_cpus" },
62 fprintf(stderr, "umastat [-M core [-N system]]\n");
67 kread(kvm_t *kvm, void *kvm_pointer, void *address, size_t size,
72 ret = kvm_read(kvm, (unsigned long)kvm_pointer + offset, address,
75 return (MEMSTAT_ERROR_KVM);
76 if ((size_t)ret != size)
77 return (MEMSTAT_ERROR_KVM_SHORTREAD);
82 kread_string(kvm_t *kvm, void *kvm_pointer, char *buffer, int buflen)
87 for (i = 0; i < buflen; i++) {
88 ret = kvm_read(kvm, (unsigned long)kvm_pointer + i,
89 &(buffer[i]), sizeof(char));
91 return (MEMSTAT_ERROR_KVM);
92 if ((size_t)ret != sizeof(char))
93 return (MEMSTAT_ERROR_KVM_SHORTREAD);
94 if (buffer[i] == '\0')
103 kread_symbol(kvm_t *kvm, int index, void *address, size_t size,
108 ret = kvm_read(kvm, namelist[index].n_value + offset, address, size);
110 return (MEMSTAT_ERROR_KVM);
111 if ((size_t)ret != size)
112 return (MEMSTAT_ERROR_KVM_SHORTREAD);
116 static const struct flaginfo {
120 { UMA_ZFLAG_PRIVALLOC, "privalloc" },
121 { UMA_ZFLAG_INTERNAL, "internal" },
122 { UMA_ZFLAG_FULL, "full" },
123 { UMA_ZFLAG_CACHEONLY, "cacheonly" },
124 { UMA_ZONE_PAGEABLE, "pageable" },
125 { UMA_ZONE_ZINIT, "zinit" },
126 { UMA_ZONE_STATIC, "static" },
127 { UMA_ZONE_OFFPAGE, "offpage" },
128 { UMA_ZONE_MALLOC, "malloc" },
129 { UMA_ZONE_NOFREE, "nofree" },
130 { UMA_ZONE_MTXCLASS, "mtxclass" },
131 { UMA_ZONE_VM, "vm" },
132 { UMA_ZONE_HASH, "hash" },
133 { UMA_ZONE_SECONDARY, "secondary" },
134 { UMA_ZONE_REFCNT, "refcnt" },
135 { UMA_ZONE_MAXBUCKET, "maxbucket" },
137 static const int flaginfo_count = sizeof(flaginfo) / sizeof(struct flaginfo);
140 uma_print_keg_flags(struct uma_keg *ukp, const char *spaces)
144 if (!ukp->uk_flags) {
145 printf("%suk_flags = 0;\n", spaces);
149 printf("%suk_flags = ", spaces);
150 for (i = 0, count = 0; i < flaginfo_count; i++) {
151 if (ukp->uk_flags & flaginfo[i].fi_flag) {
154 printf(flaginfo[i].fi_name);
162 uma_print_keg_align(struct uma_keg *ukp, const char *spaces)
165 switch(ukp->uk_align) {
167 printf("%suk_align = UMA_ALIGN_PTR;\n", spaces);
172 printf("%suk_align = UMA_ALIGN_LONG;\n", spaces);
176 printf("%suk_align = UMA_ALIGN_INT;\n", spaces);
180 case UMA_ALIGN_SHORT:
181 printf("%suk_align = UMA_ALIGN_SHORT;\n", spaces);
185 printf("%suk_align = UMA_ALIGN_CHAR;\n", spaces);
188 case UMA_ALIGN_CACHE:
189 printf("%suk_align = UMA_ALIGN_CACHE;\n", spaces);
193 printf("%suk_align = %d\n", spaces, ukp->uk_align);
197 LIST_HEAD(bucketlist, uma_bucket);
200 uma_print_bucket(struct uma_bucket *ubp, const char *spaces __unused)
203 printf("{ ub_cnt = %d, ub_entries = %d }", ubp->ub_cnt,
208 uma_print_bucketlist(kvm_t *kvm, struct bucketlist *bucketlist,
209 const char *name, const char *spaces)
211 struct uma_bucket *ubp, ub;
212 uint64_t total_entries, total_cnt;
215 printf("%s%s {", spaces, name);
217 total_entries = total_cnt = 0;
219 for (ubp = LIST_FIRST(bucketlist); ubp != NULL; ubp =
220 LIST_NEXT(&ub, ub_link)) {
221 ret = kread(kvm, ubp, &ub, sizeof(ub), 0);
223 errx(-1, "uma_print_bucketlist: %s", kvm_geterr(kvm));
225 printf("\n%s ", spaces);
226 uma_print_bucket(&ub, "");
228 total_entries += ub.ub_entries;
229 total_cnt += ub.ub_cnt;
234 printf("%s}; // total cnt %ju, total entries %ju\n", spaces,
235 total_cnt, total_entries);
239 uma_print_cache(kvm_t *kvm, struct uma_cache *cache, const char *name,
240 int cpu, const char *spaces, int *ub_cnt_add, int *ub_entries_add)
242 struct uma_bucket ub;
245 printf("%s%s[%d] = {\n", spaces, name, cpu);
246 printf("%s uc_frees = %ju;\n", spaces, cache->uc_frees);
247 printf("%s uc_allocs = %ju;\n", spaces, cache->uc_allocs);
249 if (cache->uc_freebucket != NULL) {
250 ret = kread(kvm, cache->uc_freebucket, &ub, sizeof(ub), 0);
252 errx(-1, "uma_print_cache: %s", kvm_geterr(kvm));
253 printf("%s uc_freebucket ", spaces);
254 uma_print_bucket(&ub, spaces);
256 if (ub_cnt_add != NULL)
257 *ub_cnt_add += ub.ub_cnt;
258 if (ub_entries_add != NULL)
259 *ub_entries_add += ub.ub_entries;
261 printf("%s uc_freebucket = NULL;\n", spaces);
262 if (cache->uc_allocbucket != NULL) {
263 ret = kread(kvm, cache->uc_allocbucket, &ub, sizeof(ub), 0);
265 errx(-1, "uma_print_cache: %s", kvm_geterr(kvm));
266 printf("%s uc_allocbucket ", spaces);
267 uma_print_bucket(&ub, spaces);
269 if (ub_cnt_add != NULL)
270 *ub_cnt_add += ub.ub_cnt;
271 if (ub_entries_add != NULL)
272 *ub_entries_add += ub.ub_entries;
274 printf("%s uc_allocbucket = NULL;\n", spaces);
275 printf("%s};\n", spaces);
279 main(int argc, char *argv[])
281 LIST_HEAD(, uma_keg) uma_kegs;
282 char name[MEMTYPE_MAXNAME];
283 struct uma_keg *kzp, kz;
284 struct uma_zone *uzp, *uzp_userspace;
286 int all_cpus, cpu, mp_maxcpus, mp_maxid, ret, ub_cnt, ub_entries;
287 size_t uzp_userspace_len;
290 char errbuf[_POSIX2_LINE_MAX];
292 memf = nlistf = NULL;
293 while ((ch = getopt(argc, argv, "M:N:")) != -1) {
310 if (nlistf != NULL && memf == NULL)
313 kvm = kvm_openfiles(nlistf, memf, NULL, 0, errbuf);
315 errx(-1, "kvm_openfiles: %s", errbuf);
317 if (kvm_nlist(kvm, namelist) != 0)
318 err(-1, "kvm_nlist");
320 if (namelist[X_UMA_KEGS].n_type == 0 ||
321 namelist[X_UMA_KEGS].n_value == 0)
322 errx(-1, "kvm_nlist return");
324 ret = kread_symbol(kvm, X_MP_MAXCPUS, &mp_maxcpus, sizeof(mp_maxcpus),
327 errx(-1, "kread_symbol: %s", kvm_geterr(kvm));
329 printf("mp_maxcpus = %d\n", mp_maxcpus);
331 ret = kread_symbol(kvm, X_MP_MAXID, &mp_maxid, sizeof(mp_maxid), 0);
333 errx(-1, "kread_symbol: %s", kvm_geterr(kvm));
335 printf("mp_maxid = %d\n", mp_maxid);
337 ret = kread_symbol(kvm, X_ALLCPU, &all_cpus, sizeof(all_cpus), 0);
339 errx(-1, "kread_symbol: %s", kvm_geterr(kvm));
341 printf("all_cpus = %x\n", all_cpus);
343 ret = kread_symbol(kvm, X_UMA_KEGS, &uma_kegs, sizeof(uma_kegs), 0);
345 errx(-1, "kread_symbol: %s", kvm_geterr(kvm));
348 * uma_zone_t ends in an array of mp_maxid cache entries. However,
349 * it is statically declared as an array of size 1, so we need to
350 * provide additional space.
352 uzp_userspace_len = sizeof(struct uma_zone) + mp_maxid *
353 sizeof(struct uma_cache);
354 uzp_userspace = malloc(uzp_userspace_len);
355 if (uzp_userspace == NULL)
358 for (kzp = LIST_FIRST(&uma_kegs); kzp != NULL; kzp =
359 LIST_NEXT(&kz, uk_link)) {
360 ret = kread(kvm, kzp, &kz, sizeof(kz), 0);
363 errx(-1, "kread: %s", kvm_geterr(kvm));
367 printf(" uk_recurse = %d\n", kz.uk_recurse);
368 uma_print_keg_align(&kz, " ");
369 printf(" uk_pages = %d\n", kz.uk_pages);
370 printf(" uk_free = %d\n", kz.uk_free);
371 printf(" uk_size = %d\n", kz.uk_size);
372 printf(" uk_rsize = %d\n", kz.uk_rsize);
373 printf(" uk_maxpages = %d\n", kz.uk_maxpages);
375 printf(" uk_pgoff = %d\n", kz.uk_pgoff);
376 printf(" uk_ppera = %d\n", kz.uk_ppera);
377 printf(" uk_ipers = %d\n", kz.uk_ipers);
378 uma_print_keg_flags(&kz, " ");
380 if (LIST_FIRST(&kz.uk_zones) == NULL) {
381 printf("; No zones.\n");
385 for (uzp = LIST_FIRST(&kz.uk_zones); uzp != NULL; uzp =
386 LIST_NEXT(uzp_userspace, uz_link)) {
388 * We actually copy in twice: once with the base
389 * structure, so that we can then decide if we also
390 * need to copy in the caches. This prevents us
391 * from reading past the end of the base UMA zones,
392 * which is unlikely to cause problems but could.
394 ret = kread(kvm, uzp, uzp_userspace,
395 sizeof(struct uma_zone), 0);
398 errx(-1, "kread: %s", kvm_geterr(kvm));
400 if (!(kz.uk_flags & UMA_ZFLAG_INTERNAL)) {
401 ret = kread(kvm, uzp, uzp_userspace,
402 uzp_userspace_len, 0);
405 errx(-1, "kread: %s",
409 ret = kread_string(kvm, uzp_userspace->uz_name, name,
413 errx(-1, "kread_string: %s", kvm_geterr(kvm));
416 printf(" uz_name = \"%s\";\n", name);
417 printf(" uz_allocs = %ju;\n",
418 uzp_userspace->uz_allocs);
419 printf(" uz_frees = %ju;\n",
420 uzp_userspace->uz_frees);
421 printf(" uz_fails = %ju;\n",
422 uzp_userspace->uz_fails);
423 printf(" uz_fills = %u;\n",
424 uzp_userspace->uz_fills);
425 printf(" uz_count = %u;\n",
426 uzp_userspace->uz_count);
427 uma_print_bucketlist(kvm, (void *)
428 &uzp_userspace->uz_full_bucket, "uz_full_bucket",
430 uma_print_bucketlist(kvm, (void *)
431 &uzp_userspace->uz_free_bucket, "uz_free_bucket",
434 if (!(kz.uk_flags & UMA_ZFLAG_INTERNAL)) {
435 ub_cnt = ub_entries = 0;
436 for (cpu = 0; cpu <= mp_maxid; cpu++) {
437 /* if (CPU_ABSENT(cpu)) */
438 if ((all_cpus & (1 << cpu)) == 0)
441 &uzp_userspace->uz_cpu[cpu],
442 "uc_cache", cpu, " ", &ub_cnt,
445 printf(" // %d cache total cnt, %d total "
446 "entries\n", ub_cnt, ub_entries);