2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2005 Robert N. M. Watson
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/param.h>
32 #include <sys/sysctl.h>
41 #include "memstat_internal.h"
44 memstat_strerror(int error)
48 case MEMSTAT_ERROR_NOMEMORY:
49 return ("Cannot allocate memory");
50 case MEMSTAT_ERROR_VERSION:
51 return ("Version mismatch");
52 case MEMSTAT_ERROR_PERMISSION:
53 return ("Permission denied");
54 case MEMSTAT_ERROR_DATAERROR:
55 return ("Data format error");
56 case MEMSTAT_ERROR_KVM:
58 case MEMSTAT_ERROR_KVM_NOSYMBOL:
59 return ("KVM unable to find symbol");
60 case MEMSTAT_ERROR_KVM_SHORTREAD:
61 return ("KVM short read");
62 case MEMSTAT_ERROR_UNDEFINED:
64 return ("Unknown error");
68 struct memory_type_list *
69 memstat_mtl_alloc(void)
71 struct memory_type_list *mtlp;
73 mtlp = malloc(sizeof(*mtlp));
77 LIST_INIT(&mtlp->mtl_list);
78 mtlp->mtl_error = MEMSTAT_ERROR_UNDEFINED;
83 memstat_mtl_first(struct memory_type_list *list)
86 return (LIST_FIRST(&list->mtl_list));
90 memstat_mtl_next(struct memory_type *mtp)
93 return (LIST_NEXT(mtp, mt_list));
97 _memstat_mtl_empty(struct memory_type_list *list)
99 struct memory_type *mtp;
101 while ((mtp = LIST_FIRST(&list->mtl_list))) {
102 free(mtp->mt_percpu_alloc);
103 free(mtp->mt_percpu_cache);
104 LIST_REMOVE(mtp, mt_list);
110 memstat_mtl_free(struct memory_type_list *list)
113 _memstat_mtl_empty(list);
118 memstat_mtl_geterror(struct memory_type_list *list)
121 return (list->mtl_error);
125 * Look for an existing memory_type entry in a memory_type list, based on the
126 * allocator and name of the type. If not found, return NULL. No errno or
130 memstat_mtl_find(struct memory_type_list *list, int allocator,
133 struct memory_type *mtp;
135 LIST_FOREACH(mtp, &list->mtl_list, mt_list) {
136 if ((mtp->mt_allocator == allocator ||
137 allocator == ALLOCATOR_ANY) &&
138 strcmp(mtp->mt_name, name) == 0)
145 * Allocate a new memory_type with the specificed allocator type and name,
146 * then insert into the list. The structure will be zero'd.
148 * libmemstat(3) internal function.
151 _memstat_mt_allocate(struct memory_type_list *list, int allocator,
152 const char *name, int maxcpus)
154 struct memory_type *mtp;
156 mtp = malloc(sizeof(*mtp));
160 bzero(mtp, sizeof(*mtp));
162 mtp->mt_allocator = allocator;
163 mtp->mt_percpu_alloc = malloc(sizeof(struct mt_percpu_alloc_s) *
165 mtp->mt_percpu_cache = malloc(sizeof(struct mt_percpu_cache_s) *
167 strlcpy(mtp->mt_name, name, MEMTYPE_MAXNAME);
168 LIST_INSERT_HEAD(&list->mtl_list, mtp, mt_list);
173 * Reset any libmemstat(3)-owned statistics in a memory_type record so that
174 * it can be reused without incremental addition problems. Caller-owned
175 * memory is left "as-is", and must be updated by the caller if desired.
177 * libmemstat(3) internal function.
180 _memstat_mt_reset_stats(struct memory_type *mtp, int maxcpus)
184 mtp->mt_countlimit = 0;
185 mtp->mt_byteslimit = 0;
186 mtp->mt_sizemask = 0;
189 mtp->mt_memalloced = 0;
190 mtp->mt_memfreed = 0;
191 mtp->mt_numallocs = 0;
192 mtp->mt_numfrees = 0;
196 mtp->mt_failures = 0;
199 mtp->mt_zonefree = 0;
202 for (i = 0; i < maxcpus; i++) {
203 mtp->mt_percpu_alloc[i].mtp_memalloced = 0;
204 mtp->mt_percpu_alloc[i].mtp_memfreed = 0;
205 mtp->mt_percpu_alloc[i].mtp_numallocs = 0;
206 mtp->mt_percpu_alloc[i].mtp_numfrees = 0;
207 mtp->mt_percpu_alloc[i].mtp_sizemask = 0;
208 mtp->mt_percpu_cache[i].mtp_free = 0;
213 * Accessor methods for struct memory_type. Avoids encoding the structure
214 * ABI into the application.
217 memstat_get_name(const struct memory_type *mtp)
220 return (mtp->mt_name);
224 memstat_get_allocator(const struct memory_type *mtp)
227 return (mtp->mt_allocator);
231 memstat_get_countlimit(const struct memory_type *mtp)
234 return (mtp->mt_countlimit);
238 memstat_get_byteslimit(const struct memory_type *mtp)
241 return (mtp->mt_byteslimit);
245 memstat_get_sizemask(const struct memory_type *mtp)
248 return (mtp->mt_sizemask);
252 memstat_get_size(const struct memory_type *mtp)
255 return (mtp->mt_size);
259 memstat_get_rsize(const struct memory_type *mtp)
262 return (mtp->mt_rsize);
266 memstat_get_memalloced(const struct memory_type *mtp)
269 return (mtp->mt_memalloced);
273 memstat_get_memfreed(const struct memory_type *mtp)
276 return (mtp->mt_memfreed);
280 memstat_get_numallocs(const struct memory_type *mtp)
283 return (mtp->mt_numallocs);
287 memstat_get_numfrees(const struct memory_type *mtp)
290 return (mtp->mt_numfrees);
294 memstat_get_bytes(const struct memory_type *mtp)
297 return (mtp->mt_bytes);
301 memstat_get_count(const struct memory_type *mtp)
304 return (mtp->mt_count);
308 memstat_get_free(const struct memory_type *mtp)
311 return (mtp->mt_free);
315 memstat_get_failures(const struct memory_type *mtp)
318 return (mtp->mt_failures);
322 memstat_get_sleeps(const struct memory_type *mtp)
325 return (mtp->mt_sleeps);
329 memstat_get_caller_pointer(const struct memory_type *mtp, int index)
332 return (mtp->mt_caller_pointer[index]);
336 memstat_set_caller_pointer(struct memory_type *mtp, int index, void *value)
339 mtp->mt_caller_pointer[index] = value;
343 memstat_get_caller_uint64(const struct memory_type *mtp, int index)
346 return (mtp->mt_caller_uint64[index]);
350 memstat_set_caller_uint64(struct memory_type *mtp, int index, uint64_t value)
353 mtp->mt_caller_uint64[index] = value;
357 memstat_get_zonefree(const struct memory_type *mtp)
360 return (mtp->mt_zonefree);
364 memstat_get_kegfree(const struct memory_type *mtp)
367 return (mtp->mt_kegfree);
371 memstat_get_percpu_memalloced(const struct memory_type *mtp, int cpu)
374 return (mtp->mt_percpu_alloc[cpu].mtp_memalloced);
378 memstat_get_percpu_memfreed(const struct memory_type *mtp, int cpu)
381 return (mtp->mt_percpu_alloc[cpu].mtp_memfreed);
385 memstat_get_percpu_numallocs(const struct memory_type *mtp, int cpu)
388 return (mtp->mt_percpu_alloc[cpu].mtp_numallocs);
392 memstat_get_percpu_numfrees(const struct memory_type *mtp, int cpu)
395 return (mtp->mt_percpu_alloc[cpu].mtp_numfrees);
399 memstat_get_percpu_sizemask(const struct memory_type *mtp, int cpu)
402 return (mtp->mt_percpu_alloc[cpu].mtp_sizemask);
406 memstat_get_percpu_caller_pointer(const struct memory_type *mtp, int cpu,
410 return (mtp->mt_percpu_alloc[cpu].mtp_caller_pointer[index]);
414 memstat_set_percpu_caller_pointer(struct memory_type *mtp, int cpu,
415 int index, void *value)
418 mtp->mt_percpu_alloc[cpu].mtp_caller_pointer[index] = value;
422 memstat_get_percpu_caller_uint64(const struct memory_type *mtp, int cpu,
426 return (mtp->mt_percpu_alloc[cpu].mtp_caller_uint64[index]);
430 memstat_set_percpu_caller_uint64(struct memory_type *mtp, int cpu, int index,
434 mtp->mt_percpu_alloc[cpu].mtp_caller_uint64[index] = value;
438 memstat_get_percpu_free(const struct memory_type *mtp, int cpu)
441 return (mtp->mt_percpu_cache[cpu].mtp_free);