2 * Copyright (C) 2004-2006 Internet Systems Consortium, Inc. ("ISC")
3 * Copyright (C) 1997-2003 Internet Software Consortium.
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
10 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
11 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
12 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
13 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
14 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
15 * PERFORMANCE OF THIS SOFTWARE.
18 /* $Id: mem.c,v 1.116.18.12 2006/12/08 05:07:59 marka Exp $ */
30 #include <isc/magic.h>
34 #include <isc/ondestroy.h>
35 #include <isc/string.h>
37 #include <isc/mutex.h>
40 #define MCTXLOCK(m, l) if (((m)->flags & ISC_MEMFLAG_NOLOCK) == 0) LOCK(l)
41 #define MCTXUNLOCK(m, l) if (((m)->flags & ISC_MEMFLAG_NOLOCK) == 0) UNLOCK(l)
43 #ifndef ISC_MEM_DEBUGGING
44 #define ISC_MEM_DEBUGGING 0
46 LIBISC_EXTERNAL_DATA unsigned int isc_mem_debugging = ISC_MEM_DEBUGGING;
52 #define DEF_MAX_SIZE 1100
53 #define DEF_MEM_TARGET 4096
54 #define ALIGNMENT_SIZE 8 /*%< must be a power of 2 */
55 #define NUM_BASIC_BLOCKS 64 /*%< must be > 1 */
56 #define TABLE_INCREMENT 1024
57 #define DEBUGLIST_COUNT 1024
62 #if ISC_MEM_TRACKLINES
63 typedef struct debuglink debuglink_t;
65 ISC_LINK(debuglink_t) link;
66 const void *ptr[DEBUGLIST_COUNT];
67 unsigned int size[DEBUGLIST_COUNT];
68 const char *file[DEBUGLIST_COUNT];
69 unsigned int line[DEBUGLIST_COUNT];
73 #define FLARG_PASS , file, line
74 #define FLARG , const char *file, int line
80 typedef struct element element;
87 * This structure must be ALIGNMENT_SIZE bytes.
92 char bytes[ALIGNMENT_SIZE];
98 unsigned long totalgets;
100 unsigned long freefrags;
103 #define MEM_MAGIC ISC_MAGIC('M', 'e', 'm', 'C')
104 #define VALID_CONTEXT(c) ISC_MAGIC_VALID(c, MEM_MAGIC)
106 #if ISC_MEM_TRACKLINES
107 typedef ISC_LIST(debuglink_t) debuglist_t;
110 /* List of all active memory contexts. */
112 static ISC_LIST(isc_mem_t) contexts;
113 static isc_once_t once = ISC_ONCE_INIT;
114 static isc_mutex_t lock;
118 isc_ondestroy_t ondestroy;
121 isc_memalloc_t memalloc;
122 isc_memfree_t memfree;
125 isc_boolean_t checkfree;
126 struct stats * stats;
127 unsigned int references;
134 isc_boolean_t hi_called;
135 isc_mem_water_t water;
137 ISC_LIST(isc_mempool_t) pools;
139 /* ISC_MEMFLAG_INTERNAL */
141 element ** freelists;
142 element * basic_blocks;
143 unsigned char ** basic_table;
144 unsigned int basic_table_count;
145 unsigned int basic_table_size;
146 unsigned char * lowest;
147 unsigned char * highest;
149 #if ISC_MEM_TRACKLINES
150 debuglist_t * debuglist;
153 unsigned int memalloc_failures;
154 ISC_LINK(isc_mem_t) link;
157 #define MEMPOOL_MAGIC ISC_MAGIC('M', 'E', 'M', 'p')
158 #define VALID_MEMPOOL(c) ISC_MAGIC_VALID(c, MEMPOOL_MAGIC)
161 /* always unlocked */
162 unsigned int magic; /*%< magic number */
163 isc_mutex_t *lock; /*%< optional lock */
164 isc_mem_t *mctx; /*%< our memory context */
165 /*%< locked via the memory context's lock */
166 ISC_LINK(isc_mempool_t) link; /*%< next pool in this mem context */
167 /*%< optionally locked from here down */
168 element *items; /*%< low water item list */
169 size_t size; /*%< size of each item on this pool */
170 unsigned int maxalloc; /*%< max number of items allowed */
171 unsigned int allocated; /*%< # of items currently given out */
172 unsigned int freecount; /*%< # of items on reserved list */
173 unsigned int freemax; /*%< # of items allowed on free list */
174 unsigned int fillcount; /*%< # of items to fetch on each fill */
176 unsigned int gets; /*%< # of requests to this pool */
177 /*%< Debugging only. */
178 #if ISC_MEMPOOL_NAMES
179 char name[16]; /*%< printed name in stats reports */
184 * Private Inline-able.
187 #if ! ISC_MEM_TRACKLINES
188 #define ADD_TRACE(a, b, c, d, e)
189 #define DELETE_TRACE(a, b, c, d, e)
191 #define ADD_TRACE(a, b, c, d, e) \
193 if ((isc_mem_debugging & (ISC_MEM_DEBUGTRACE | \
194 ISC_MEM_DEBUGRECORD)) != 0 && \
196 add_trace_entry(a, b, c, d, e); \
198 #define DELETE_TRACE(a, b, c, d, e) delete_trace_entry(a, b, c, d, e)
201 print_active(isc_mem_t *ctx, FILE *out);
204 * mctx must be locked.
207 add_trace_entry(isc_mem_t *mctx, const void *ptr, unsigned int size
213 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0)
214 fprintf(stderr, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
217 "file %s line %u mctx %p\n"),
218 ptr, size, file, line, mctx);
220 if (mctx->debuglist == NULL)
223 if (size > mctx->max_size)
224 size = mctx->max_size;
226 dl = ISC_LIST_HEAD(mctx->debuglist[size]);
228 if (dl->count == DEBUGLIST_COUNT)
230 for (i = 0; i < DEBUGLIST_COUNT; i++) {
231 if (dl->ptr[i] == NULL) {
241 dl = ISC_LIST_NEXT(dl, link);
244 dl = malloc(sizeof(debuglink_t));
247 ISC_LINK_INIT(dl, link);
248 for (i = 1; i < DEBUGLIST_COUNT; i++) {
261 ISC_LIST_PREPEND(mctx->debuglist[size], dl, link);
265 delete_trace_entry(isc_mem_t *mctx, const void *ptr, unsigned int size,
266 const char *file, unsigned int line)
271 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0)
272 fprintf(stderr, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
275 "file %s line %u mctx %p\n"),
276 ptr, size, file, line, mctx);
278 if (mctx->debuglist == NULL)
281 if (size > mctx->max_size)
282 size = mctx->max_size;
284 dl = ISC_LIST_HEAD(mctx->debuglist[size]);
286 for (i = 0; i < DEBUGLIST_COUNT; i++) {
287 if (dl->ptr[i] == ptr) {
293 INSIST(dl->count > 0);
295 if (dl->count == 0) {
296 ISC_LIST_UNLINK(mctx->debuglist[size],
303 dl = ISC_LIST_NEXT(dl, link);
307 * If we get here, we didn't find the item on the list. We're
312 #endif /* ISC_MEM_TRACKLINES */
315 rmsize(size_t size) {
317 * round down to ALIGNMENT_SIZE
319 return (size & (~(ALIGNMENT_SIZE - 1)));
323 quantize(size_t size) {
325 * Round up the result in order to get a size big
326 * enough to satisfy the request and be aligned on ALIGNMENT_SIZE
331 return (ALIGNMENT_SIZE);
332 return ((size + ALIGNMENT_SIZE - 1) & (~(ALIGNMENT_SIZE - 1)));
335 static inline isc_boolean_t
336 more_basic_blocks(isc_mem_t *ctx) {
338 unsigned char *curr, *next;
339 unsigned char *first, *last;
340 unsigned char **table;
341 unsigned int table_size;
345 /* Require: we hold the context lock. */
348 * Did we hit the quota for this context?
350 increment = NUM_BASIC_BLOCKS * ctx->mem_target;
351 if (ctx->quota != 0U && ctx->total + increment > ctx->quota)
354 INSIST(ctx->basic_table_count <= ctx->basic_table_size);
355 if (ctx->basic_table_count == ctx->basic_table_size) {
356 table_size = ctx->basic_table_size + TABLE_INCREMENT;
357 table = (ctx->memalloc)(ctx->arg,
358 table_size * sizeof(unsigned char *));
360 ctx->memalloc_failures++;
363 if (ctx->basic_table_size != 0) {
364 memcpy(table, ctx->basic_table,
365 ctx->basic_table_size *
366 sizeof(unsigned char *));
367 (ctx->memfree)(ctx->arg, ctx->basic_table);
369 ctx->basic_table = table;
370 ctx->basic_table_size = table_size;
373 new = (ctx->memalloc)(ctx->arg, NUM_BASIC_BLOCKS * ctx->mem_target);
375 ctx->memalloc_failures++;
378 ctx->total += increment;
379 ctx->basic_table[ctx->basic_table_count] = new;
380 ctx->basic_table_count++;
383 next = curr + ctx->mem_target;
384 for (i = 0; i < (NUM_BASIC_BLOCKS - 1); i++) {
385 ((element *)curr)->next = (element *)next;
387 next += ctx->mem_target;
390 * curr is now pointing at the last block in the
393 ((element *)curr)->next = NULL;
395 last = first + NUM_BASIC_BLOCKS * ctx->mem_target - 1;
396 if (first < ctx->lowest || ctx->lowest == NULL)
398 if (last > ctx->highest)
400 ctx->basic_blocks = new;
405 static inline isc_boolean_t
406 more_frags(isc_mem_t *ctx, size_t new_size) {
410 unsigned char *curr, *next;
413 * Try to get more fragments by chopping up a basic block.
416 if (ctx->basic_blocks == NULL) {
417 if (!more_basic_blocks(ctx)) {
419 * We can't get more memory from the OS, or we've
420 * hit the quota for this context.
423 * XXXRTH "At quota" notification here.
429 total_size = ctx->mem_target;
430 new = ctx->basic_blocks;
431 ctx->basic_blocks = ctx->basic_blocks->next;
432 frags = total_size / new_size;
433 ctx->stats[new_size].blocks++;
434 ctx->stats[new_size].freefrags += frags;
436 * Set up a linked-list of blocks of size
440 next = curr + new_size;
441 total_size -= new_size;
442 for (i = 0; i < (frags - 1); i++) {
443 ((element *)curr)->next = (element *)next;
446 total_size -= new_size;
449 * Add the remaining fragment of the basic block to a free list.
451 total_size = rmsize(total_size);
452 if (total_size > 0U) {
453 ((element *)next)->next = ctx->freelists[total_size];
454 ctx->freelists[total_size] = (element *)next;
455 ctx->stats[total_size].freefrags++;
458 * curr is now pointing at the last block in the
461 ((element *)curr)->next = NULL;
462 ctx->freelists[new_size] = new;
468 mem_getunlocked(isc_mem_t *ctx, size_t size) {
469 size_t new_size = quantize(size);
472 if (size >= ctx->max_size || new_size >= ctx->max_size) {
474 * memget() was called on something beyond our upper limit.
476 if (ctx->quota != 0U && ctx->total + size > ctx->quota) {
480 ret = (ctx->memalloc)(ctx->arg, size);
482 ctx->memalloc_failures++;
487 ctx->stats[ctx->max_size].gets++;
488 ctx->stats[ctx->max_size].totalgets++;
490 * If we don't set new_size to size, then the
491 * ISC_MEM_FILL code might write over bytes we
499 * If there are no blocks in the free list for this size, get a chunk
500 * of memory and then break it up into "new_size"-sized blocks, adding
501 * them to the free list.
503 if (ctx->freelists[new_size] == NULL && !more_frags(ctx, new_size))
507 * The free list uses the "rounded-up" size "new_size".
509 ret = ctx->freelists[new_size];
510 ctx->freelists[new_size] = ctx->freelists[new_size]->next;
513 * The stats[] uses the _actual_ "size" requested by the
514 * caller, with the caveat (in the code above) that "size" >= the
515 * max. size (max_size) ends up getting recorded as a call to
518 ctx->stats[size].gets++;
519 ctx->stats[size].totalgets++;
520 ctx->stats[new_size].freefrags--;
521 ctx->inuse += new_size;
527 memset(ret, 0xbe, new_size); /* Mnemonic for "beef". */
533 #if ISC_MEM_FILL && ISC_MEM_CHECKOVERRUN
535 check_overrun(void *mem, size_t size, size_t new_size) {
538 cp = (unsigned char *)mem;
540 while (size < new_size) {
549 mem_putunlocked(isc_mem_t *ctx, void *mem, size_t size) {
550 size_t new_size = quantize(size);
552 if (size == ctx->max_size || new_size >= ctx->max_size) {
554 * memput() called on something beyond our upper limit.
557 memset(mem, 0xde, size); /* Mnemonic for "dead". */
559 (ctx->memfree)(ctx->arg, mem);
560 INSIST(ctx->stats[ctx->max_size].gets != 0U);
561 ctx->stats[ctx->max_size].gets--;
562 INSIST(size <= ctx->total);
569 #if ISC_MEM_CHECKOVERRUN
570 check_overrun(mem, size, new_size);
572 memset(mem, 0xde, new_size); /* Mnemonic for "dead". */
576 * The free list uses the "rounded-up" size "new_size".
578 ((element *)mem)->next = ctx->freelists[new_size];
579 ctx->freelists[new_size] = (element *)mem;
582 * The stats[] uses the _actual_ "size" requested by the
583 * caller, with the caveat (in the code above) that "size" >= the
584 * max. size (max_size) ends up getting recorded as a call to
587 INSIST(ctx->stats[size].gets != 0U);
588 ctx->stats[size].gets--;
589 ctx->stats[new_size].freefrags++;
590 ctx->inuse -= new_size;
594 * Perform a malloc, doing memory filling and overrun detection as necessary.
597 mem_get(isc_mem_t *ctx, size_t size) {
600 #if ISC_MEM_CHECKOVERRUN
604 ret = (ctx->memalloc)(ctx->arg, size);
606 ctx->memalloc_failures++;
610 memset(ret, 0xbe, size); /* Mnemonic for "beef". */
612 # if ISC_MEM_CHECKOVERRUN
622 * Perform a free, doing memory filling and overrun detection as necessary.
625 mem_put(isc_mem_t *ctx, void *mem, size_t size) {
626 #if ISC_MEM_CHECKOVERRUN
627 INSIST(((unsigned char *)mem)[size] == 0xbe);
630 memset(mem, 0xde, size); /* Mnemonic for "dead". */
634 (ctx->memfree)(ctx->arg, mem);
638 * Update internal counters after a memory get.
641 mem_getstats(isc_mem_t *ctx, size_t size) {
645 if (size > ctx->max_size) {
646 ctx->stats[ctx->max_size].gets++;
647 ctx->stats[ctx->max_size].totalgets++;
649 ctx->stats[size].gets++;
650 ctx->stats[size].totalgets++;
655 * Update internal counters after a memory put.
658 mem_putstats(isc_mem_t *ctx, void *ptr, size_t size) {
661 INSIST(ctx->inuse >= size);
664 if (size > ctx->max_size) {
665 INSIST(ctx->stats[ctx->max_size].gets > 0U);
666 ctx->stats[ctx->max_size].gets--;
668 INSIST(ctx->stats[size].gets > 0U);
669 ctx->stats[size].gets--;
678 default_memalloc(void *arg, size_t size) {
682 return (malloc(size));
686 default_memfree(void *arg, void *ptr) {
692 initialize_action(void) {
693 RUNTIME_CHECK(isc_mutex_init(&lock) == ISC_R_SUCCESS);
701 isc_mem_createx(size_t init_max_size, size_t target_size,
702 isc_memalloc_t memalloc, isc_memfree_t memfree, void *arg,
705 return (isc_mem_createx2(init_max_size, target_size, memalloc, memfree,
706 arg, ctxp, ISC_MEMFLAG_DEFAULT));
711 isc_mem_createx2(size_t init_max_size, size_t target_size,
712 isc_memalloc_t memalloc, isc_memfree_t memfree, void *arg,
713 isc_mem_t **ctxp, unsigned int flags)
718 REQUIRE(ctxp != NULL && *ctxp == NULL);
719 REQUIRE(memalloc != NULL);
720 REQUIRE(memfree != NULL);
722 INSIST((ALIGNMENT_SIZE & (ALIGNMENT_SIZE - 1)) == 0);
724 RUNTIME_CHECK(isc_once_do(&once, initialize_action) == ISC_R_SUCCESS);
726 ctx = (memalloc)(arg, sizeof(*ctx));
728 return (ISC_R_NOMEMORY);
730 if ((flags & ISC_MEMFLAG_NOLOCK) == 0) {
731 result = isc_mutex_init(&ctx->lock);
732 if (result != ISC_R_SUCCESS) {
738 if (init_max_size == 0U)
739 ctx->max_size = DEF_MAX_SIZE;
741 ctx->max_size = init_max_size;
750 ctx->hi_called = ISC_FALSE;
752 ctx->water_arg = NULL;
753 ctx->magic = MEM_MAGIC;
754 isc_ondestroy_init(&ctx->ondestroy);
755 ctx->memalloc = memalloc;
756 ctx->memfree = memfree;
759 ctx->checkfree = ISC_TRUE;
760 #if ISC_MEM_TRACKLINES
761 ctx->debuglist = NULL;
763 ISC_LIST_INIT(ctx->pools);
764 ctx->freelists = NULL;
765 ctx->basic_blocks = NULL;
766 ctx->basic_table = NULL;
767 ctx->basic_table_count = 0;
768 ctx->basic_table_size = 0;
772 ctx->stats = (memalloc)(arg,
773 (ctx->max_size+1) * sizeof(struct stats));
774 if (ctx->stats == NULL) {
775 result = ISC_R_NOMEMORY;
778 memset(ctx->stats, 0, (ctx->max_size + 1) * sizeof(struct stats));
780 if ((flags & ISC_MEMFLAG_INTERNAL) != 0) {
781 if (target_size == 0U)
782 ctx->mem_target = DEF_MEM_TARGET;
784 ctx->mem_target = target_size;
785 ctx->freelists = (memalloc)(arg, ctx->max_size *
787 if (ctx->freelists == NULL) {
788 result = ISC_R_NOMEMORY;
791 memset(ctx->freelists, 0,
792 ctx->max_size * sizeof(element *));
795 #if ISC_MEM_TRACKLINES
796 if ((isc_mem_debugging & ISC_MEM_DEBUGRECORD) != 0) {
799 ctx->debuglist = (memalloc)(arg,
800 (ctx->max_size+1) * sizeof(debuglist_t));
801 if (ctx->debuglist == NULL) {
802 result = ISC_R_NOMEMORY;
805 for (i = 0; i <= ctx->max_size; i++)
806 ISC_LIST_INIT(ctx->debuglist[i]);
810 ctx->memalloc_failures = 0;
813 ISC_LIST_INITANDAPPEND(contexts, ctx, link);
817 return (ISC_R_SUCCESS);
821 if (ctx->stats != NULL)
822 (memfree)(arg, ctx->stats);
823 if (ctx->freelists != NULL)
824 (memfree)(arg, ctx->freelists);
825 #if ISC_MEM_TRACKLINES
826 if (ctx->debuglist != NULL)
827 (ctx->memfree)(ctx->arg, ctx->debuglist);
828 #endif /* ISC_MEM_TRACKLINES */
829 if ((ctx->flags & ISC_MEMFLAG_NOLOCK) == 0)
830 DESTROYLOCK(&ctx->lock);
838 isc_mem_create(size_t init_max_size, size_t target_size,
841 return (isc_mem_createx2(init_max_size, target_size,
842 default_memalloc, default_memfree, NULL,
843 ctxp, ISC_MEMFLAG_DEFAULT));
847 isc_mem_create2(size_t init_max_size, size_t target_size,
848 isc_mem_t **ctxp, unsigned int flags)
850 return (isc_mem_createx2(init_max_size, target_size,
851 default_memalloc, default_memfree, NULL,
856 destroy(isc_mem_t *ctx) {
858 isc_ondestroy_t ondest;
863 ISC_LIST_UNLINK(contexts, ctx, link);
866 INSIST(ISC_LIST_EMPTY(ctx->pools));
868 #if ISC_MEM_TRACKLINES
869 if (ctx->debuglist != NULL) {
870 if (ctx->checkfree) {
871 for (i = 0; i <= ctx->max_size; i++) {
872 if (!ISC_LIST_EMPTY(ctx->debuglist[i]))
873 print_active(ctx, stderr);
874 INSIST(ISC_LIST_EMPTY(ctx->debuglist[i]));
879 for (i = 0; i <= ctx->max_size; i++)
880 for (dl = ISC_LIST_HEAD(ctx->debuglist[i]);
882 dl = ISC_LIST_HEAD(ctx->debuglist[i])) {
883 ISC_LIST_UNLINK(ctx->debuglist[i],
888 (ctx->memfree)(ctx->arg, ctx->debuglist);
891 INSIST(ctx->references == 0);
893 if (ctx->checkfree) {
894 for (i = 0; i <= ctx->max_size; i++) {
895 #if ISC_MEM_TRACKLINES
896 if (ctx->stats[i].gets != 0U)
897 print_active(ctx, stderr);
899 INSIST(ctx->stats[i].gets == 0U);
903 (ctx->memfree)(ctx->arg, ctx->stats);
905 if ((ctx->flags & ISC_MEMFLAG_INTERNAL) != 0) {
906 for (i = 0; i < ctx->basic_table_count; i++)
907 (ctx->memfree)(ctx->arg, ctx->basic_table[i]);
908 (ctx->memfree)(ctx->arg, ctx->freelists);
909 (ctx->memfree)(ctx->arg, ctx->basic_table);
912 ondest = ctx->ondestroy;
914 if ((ctx->flags & ISC_MEMFLAG_NOLOCK) == 0)
915 DESTROYLOCK(&ctx->lock);
916 (ctx->memfree)(ctx->arg, ctx);
918 isc_ondestroy_notify(&ondest, ctx);
922 isc_mem_attach(isc_mem_t *source, isc_mem_t **targetp) {
923 REQUIRE(VALID_CONTEXT(source));
924 REQUIRE(targetp != NULL && *targetp == NULL);
926 MCTXLOCK(source, &source->lock);
927 source->references++;
928 MCTXUNLOCK(source, &source->lock);
934 isc_mem_detach(isc_mem_t **ctxp) {
936 isc_boolean_t want_destroy = ISC_FALSE;
938 REQUIRE(ctxp != NULL);
940 REQUIRE(VALID_CONTEXT(ctx));
942 MCTXLOCK(ctx, &ctx->lock);
943 INSIST(ctx->references > 0);
945 if (ctx->references == 0)
946 want_destroy = ISC_TRUE;
947 MCTXUNLOCK(ctx, &ctx->lock);
956 * isc_mem_putanddetach() is the equivalent of:
959 * isc_mem_attach(ptr->mctx, &mctx);
960 * isc_mem_detach(&ptr->mctx);
961 * isc_mem_put(mctx, ptr, sizeof(*ptr);
962 * isc_mem_detach(&mctx);
966 isc__mem_putanddetach(isc_mem_t **ctxp, void *ptr, size_t size FLARG) {
968 isc_boolean_t want_destroy = ISC_FALSE;
972 REQUIRE(ctxp != NULL);
974 REQUIRE(VALID_CONTEXT(ctx));
975 REQUIRE(ptr != NULL);
978 * Must be before mem_putunlocked() as ctxp is usually within
983 if ((isc_mem_debugging & (ISC_MEM_DEBUGSIZE|ISC_MEM_DEBUGCTX)) != 0) {
984 if ((isc_mem_debugging & ISC_MEM_DEBUGSIZE) != 0) {
985 si = &(((size_info *)ptr)[-1]);
986 oldsize = si->u.size - ALIGNMENT_SIZE;
987 if ((isc_mem_debugging & ISC_MEM_DEBUGCTX) != 0)
988 oldsize -= ALIGNMENT_SIZE;
989 INSIST(oldsize == size);
991 isc__mem_free(ctx, ptr FLARG_PASS);
993 MCTXLOCK(ctx, &ctx->lock);
995 if (ctx->references == 0)
996 want_destroy = ISC_TRUE;
997 MCTXUNLOCK(ctx, &ctx->lock);
1004 if ((ctx->flags & ISC_MEMFLAG_INTERNAL) != 0) {
1005 MCTXLOCK(ctx, &ctx->lock);
1006 mem_putunlocked(ctx, ptr, size);
1008 mem_put(ctx, ptr, size);
1009 MCTXLOCK(ctx, &ctx->lock);
1010 mem_putstats(ctx, ptr, size);
1013 DELETE_TRACE(ctx, ptr, size, file, line);
1014 INSIST(ctx->references > 0);
1016 if (ctx->references == 0)
1017 want_destroy = ISC_TRUE;
1019 MCTXUNLOCK(ctx, &ctx->lock);
1026 isc_mem_destroy(isc_mem_t **ctxp) {
1030 * This routine provides legacy support for callers who use mctxs
1031 * without attaching/detaching.
1034 REQUIRE(ctxp != NULL);
1036 REQUIRE(VALID_CONTEXT(ctx));
1038 MCTXLOCK(ctx, &ctx->lock);
1039 #if ISC_MEM_TRACKLINES
1040 if (ctx->references != 1)
1041 print_active(ctx, stderr);
1043 REQUIRE(ctx->references == 1);
1045 MCTXUNLOCK(ctx, &ctx->lock);
1053 isc_mem_ondestroy(isc_mem_t *ctx, isc_task_t *task, isc_event_t **event) {
1056 MCTXLOCK(ctx, &ctx->lock);
1057 res = isc_ondestroy_register(&ctx->ondestroy, task, event);
1058 MCTXUNLOCK(ctx, &ctx->lock);
1065 isc__mem_get(isc_mem_t *ctx, size_t size FLARG) {
1067 isc_boolean_t call_water = ISC_FALSE;
1069 REQUIRE(VALID_CONTEXT(ctx));
1071 if ((isc_mem_debugging & (ISC_MEM_DEBUGSIZE|ISC_MEM_DEBUGCTX)) != 0)
1072 return (isc__mem_allocate(ctx, size FLARG_PASS));
1074 if ((ctx->flags & ISC_MEMFLAG_INTERNAL) != 0) {
1075 MCTXLOCK(ctx, &ctx->lock);
1076 ptr = mem_getunlocked(ctx, size);
1078 ptr = mem_get(ctx, size);
1079 MCTXLOCK(ctx, &ctx->lock);
1081 mem_getstats(ctx, size);
1084 ADD_TRACE(ctx, ptr, size, file, line);
1085 if (ctx->hi_water != 0U && !ctx->hi_called &&
1086 ctx->inuse > ctx->hi_water) {
1087 ctx->hi_called = ISC_TRUE;
1088 call_water = ISC_TRUE;
1090 if (ctx->inuse > ctx->maxinuse) {
1091 ctx->maxinuse = ctx->inuse;
1092 if (ctx->hi_water != 0U && ctx->inuse > ctx->hi_water &&
1093 (isc_mem_debugging & ISC_MEM_DEBUGUSAGE) != 0)
1094 fprintf(stderr, "maxinuse = %lu\n",
1095 (unsigned long)ctx->inuse);
1097 MCTXUNLOCK(ctx, &ctx->lock);
1100 (ctx->water)(ctx->water_arg, ISC_MEM_HIWATER);
1106 isc__mem_put(isc_mem_t *ctx, void *ptr, size_t size FLARG)
1108 isc_boolean_t call_water = ISC_FALSE;
1112 REQUIRE(VALID_CONTEXT(ctx));
1113 REQUIRE(ptr != NULL);
1115 if ((isc_mem_debugging & (ISC_MEM_DEBUGSIZE|ISC_MEM_DEBUGCTX)) != 0) {
1116 if ((isc_mem_debugging & ISC_MEM_DEBUGSIZE) != 0) {
1117 si = &(((size_info *)ptr)[-1]);
1118 oldsize = si->u.size - ALIGNMENT_SIZE;
1119 if ((isc_mem_debugging & ISC_MEM_DEBUGCTX) != 0)
1120 oldsize -= ALIGNMENT_SIZE;
1121 INSIST(oldsize == size);
1123 isc__mem_free(ctx, ptr FLARG_PASS);
1127 if ((ctx->flags & ISC_MEMFLAG_INTERNAL) != 0) {
1128 MCTXLOCK(ctx, &ctx->lock);
1129 mem_putunlocked(ctx, ptr, size);
1131 mem_put(ctx, ptr, size);
1132 MCTXLOCK(ctx, &ctx->lock);
1133 mem_putstats(ctx, ptr, size);
1136 DELETE_TRACE(ctx, ptr, size, file, line);
1139 * The check against ctx->lo_water == 0 is for the condition
1140 * when the context was pushed over hi_water but then had
1141 * isc_mem_setwater() called with 0 for hi_water and lo_water.
1143 if (ctx->hi_called &&
1144 (ctx->inuse < ctx->lo_water || ctx->lo_water == 0U)) {
1145 ctx->hi_called = ISC_FALSE;
1147 if (ctx->water != NULL)
1148 call_water = ISC_TRUE;
1150 MCTXUNLOCK(ctx, &ctx->lock);
1153 (ctx->water)(ctx->water_arg, ISC_MEM_LOWATER);
1156 #if ISC_MEM_TRACKLINES
1158 print_active(isc_mem_t *mctx, FILE *out) {
1159 if (mctx->debuglist != NULL) {
1163 isc_boolean_t found;
1165 fprintf(out, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1167 "Dump of all outstanding "
1168 "memory allocations:\n"));
1170 format = isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1171 ISC_MSG_PTRFILELINE,
1172 "\tptr %p size %u file %s line %u\n");
1173 for (i = 0; i <= mctx->max_size; i++) {
1174 dl = ISC_LIST_HEAD(mctx->debuglist[i]);
1179 while (dl != NULL) {
1180 for (j = 0; j < DEBUGLIST_COUNT; j++)
1181 if (dl->ptr[j] != NULL)
1182 fprintf(out, format,
1187 dl = ISC_LIST_NEXT(dl, link);
1191 fprintf(out, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1192 ISC_MSG_NONE, "\tNone.\n"));
1198 * Print the stats[] on the stream "out" with suitable formatting.
1201 isc_mem_stats(isc_mem_t *ctx, FILE *out) {
1203 const struct stats *s;
1204 const isc_mempool_t *pool;
1206 REQUIRE(VALID_CONTEXT(ctx));
1207 MCTXLOCK(ctx, &ctx->lock);
1209 for (i = 0; i <= ctx->max_size; i++) {
1212 if (s->totalgets == 0U && s->gets == 0U)
1214 fprintf(out, "%s%5lu: %11lu gets, %11lu rem",
1215 (i == ctx->max_size) ? ">=" : " ",
1216 (unsigned long) i, s->totalgets, s->gets);
1217 if ((ctx->flags & ISC_MEMFLAG_INTERNAL) != 0 &&
1218 (s->blocks != 0U || s->freefrags != 0U))
1219 fprintf(out, " (%lu bl, %lu ff)",
1220 s->blocks, s->freefrags);
1225 * Note that since a pool can be locked now, these stats might be
1226 * somewhat off if the pool is in active use at the time the stats
1227 * are dumped. The link fields are protected by the isc_mem_t's
1228 * lock, however, so walking this list and extracting integers from
1229 * stats fields is always safe.
1231 pool = ISC_LIST_HEAD(ctx->pools);
1233 fprintf(out, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1235 "[Pool statistics]\n"));
1236 fprintf(out, "%15s %10s %10s %10s %10s %10s %10s %10s %1s\n",
1237 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1238 ISC_MSG_POOLNAME, "name"),
1239 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1240 ISC_MSG_POOLSIZE, "size"),
1241 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1242 ISC_MSG_POOLMAXALLOC, "maxalloc"),
1243 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1244 ISC_MSG_POOLALLOCATED, "allocated"),
1245 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1246 ISC_MSG_POOLFREECOUNT, "freecount"),
1247 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1248 ISC_MSG_POOLFREEMAX, "freemax"),
1249 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1250 ISC_MSG_POOLFILLCOUNT, "fillcount"),
1251 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1252 ISC_MSG_POOLGETS, "gets"),
1255 while (pool != NULL) {
1256 fprintf(out, "%15s %10lu %10u %10u %10u %10u %10u %10u %s\n",
1257 pool->name, (unsigned long) pool->size, pool->maxalloc,
1258 pool->allocated, pool->freecount, pool->freemax,
1259 pool->fillcount, pool->gets,
1260 (pool->lock == NULL ? "N" : "Y"));
1261 pool = ISC_LIST_NEXT(pool, link);
1264 #if ISC_MEM_TRACKLINES
1265 print_active(ctx, out);
1268 MCTXUNLOCK(ctx, &ctx->lock);
1272 * Replacements for malloc() and free() -- they implicitly remember the
1273 * size of the object allocated (with some additional overhead).
1277 isc__mem_allocateunlocked(isc_mem_t *ctx, size_t size) {
1280 size += ALIGNMENT_SIZE;
1281 if ((isc_mem_debugging & ISC_MEM_DEBUGCTX) != 0)
1282 size += ALIGNMENT_SIZE;
1284 if ((ctx->flags & ISC_MEMFLAG_INTERNAL) != 0)
1285 si = mem_getunlocked(ctx, size);
1287 si = mem_get(ctx, size);
1291 if ((isc_mem_debugging & ISC_MEM_DEBUGCTX) != 0) {
1300 isc__mem_allocate(isc_mem_t *ctx, size_t size FLARG) {
1302 isc_boolean_t call_water = ISC_FALSE;
1304 REQUIRE(VALID_CONTEXT(ctx));
1306 if ((ctx->flags & ISC_MEMFLAG_INTERNAL) != 0) {
1307 MCTXLOCK(ctx, &ctx->lock);
1308 si = isc__mem_allocateunlocked(ctx, size);
1310 si = isc__mem_allocateunlocked(ctx, size);
1311 MCTXLOCK(ctx, &ctx->lock);
1313 mem_getstats(ctx, si[-1].u.size);
1316 #if ISC_MEM_TRACKLINES
1317 ADD_TRACE(ctx, si, si[-1].u.size, file, line);
1319 if (ctx->hi_water != 0U && !ctx->hi_called &&
1320 ctx->inuse > ctx->hi_water) {
1321 ctx->hi_called = ISC_TRUE;
1322 call_water = ISC_TRUE;
1324 if (ctx->inuse > ctx->maxinuse) {
1325 ctx->maxinuse = ctx->inuse;
1326 if (ctx->hi_water != 0U && ctx->inuse > ctx->hi_water &&
1327 (isc_mem_debugging & ISC_MEM_DEBUGUSAGE) != 0)
1328 fprintf(stderr, "maxinuse = %lu\n",
1329 (unsigned long)ctx->inuse);
1331 MCTXUNLOCK(ctx, &ctx->lock);
1334 (ctx->water)(ctx->water_arg, ISC_MEM_HIWATER);
1340 isc__mem_free(isc_mem_t *ctx, void *ptr FLARG) {
1343 isc_boolean_t call_water= ISC_FALSE;
1345 REQUIRE(VALID_CONTEXT(ctx));
1346 REQUIRE(ptr != NULL);
1348 if ((isc_mem_debugging & ISC_MEM_DEBUGCTX) != 0) {
1349 si = &(((size_info *)ptr)[-2]);
1350 REQUIRE(si->u.ctx == ctx);
1351 size = si[1].u.size;
1353 si = &(((size_info *)ptr)[-1]);
1357 if ((ctx->flags & ISC_MEMFLAG_INTERNAL) != 0) {
1358 MCTXLOCK(ctx, &ctx->lock);
1359 mem_putunlocked(ctx, si, size);
1361 mem_put(ctx, si, size);
1362 MCTXLOCK(ctx, &ctx->lock);
1363 mem_putstats(ctx, si, size);
1366 DELETE_TRACE(ctx, ptr, size, file, line);
1369 * The check against ctx->lo_water == 0 is for the condition
1370 * when the context was pushed over hi_water but then had
1371 * isc_mem_setwater() called with 0 for hi_water and lo_water.
1373 if (ctx->hi_called &&
1374 (ctx->inuse < ctx->lo_water || ctx->lo_water == 0U)) {
1375 ctx->hi_called = ISC_FALSE;
1377 if (ctx->water != NULL)
1378 call_water = ISC_TRUE;
1380 MCTXUNLOCK(ctx, &ctx->lock);
1383 (ctx->water)(ctx->water_arg, ISC_MEM_LOWATER);
1388 * Other useful things.
1392 isc__mem_strdup(isc_mem_t *mctx, const char *s FLARG) {
1396 REQUIRE(VALID_CONTEXT(mctx));
1401 ns = isc__mem_allocate(mctx, len + 1 FLARG_PASS);
1404 strncpy(ns, s, len + 1);
1410 isc_mem_setdestroycheck(isc_mem_t *ctx, isc_boolean_t flag) {
1411 REQUIRE(VALID_CONTEXT(ctx));
1412 MCTXLOCK(ctx, &ctx->lock);
1414 ctx->checkfree = flag;
1416 MCTXUNLOCK(ctx, &ctx->lock);
1424 isc_mem_setquota(isc_mem_t *ctx, size_t quota) {
1425 REQUIRE(VALID_CONTEXT(ctx));
1426 MCTXLOCK(ctx, &ctx->lock);
1430 MCTXUNLOCK(ctx, &ctx->lock);
1434 isc_mem_getquota(isc_mem_t *ctx) {
1437 REQUIRE(VALID_CONTEXT(ctx));
1438 MCTXLOCK(ctx, &ctx->lock);
1442 MCTXUNLOCK(ctx, &ctx->lock);
1448 isc_mem_inuse(isc_mem_t *ctx) {
1451 REQUIRE(VALID_CONTEXT(ctx));
1452 MCTXLOCK(ctx, &ctx->lock);
1456 MCTXUNLOCK(ctx, &ctx->lock);
1462 isc_mem_setwater(isc_mem_t *ctx, isc_mem_water_t water, void *water_arg,
1463 size_t hiwater, size_t lowater)
1465 isc_boolean_t callwater = ISC_FALSE;
1466 isc_mem_water_t oldwater;
1469 REQUIRE(VALID_CONTEXT(ctx));
1470 REQUIRE(hiwater >= lowater);
1472 MCTXLOCK(ctx, &ctx->lock);
1473 oldwater = ctx->water;
1474 oldwater_arg = ctx->water_arg;
1475 if (water == NULL) {
1476 callwater = ctx->hi_called;
1478 ctx->water_arg = NULL;
1481 ctx->hi_called = ISC_FALSE;
1483 if (ctx->hi_called &&
1484 (ctx->water != water || ctx->water_arg != water_arg ||
1485 ctx->inuse < lowater || lowater == 0U))
1486 callwater = ISC_TRUE;
1488 ctx->water_arg = water_arg;
1489 ctx->hi_water = hiwater;
1490 ctx->lo_water = lowater;
1491 ctx->hi_called = ISC_FALSE;
1493 MCTXUNLOCK(ctx, &ctx->lock);
1495 if (callwater && oldwater != NULL)
1496 (oldwater)(oldwater_arg, ISC_MEM_LOWATER);
1504 isc_mempool_create(isc_mem_t *mctx, size_t size, isc_mempool_t **mpctxp) {
1505 isc_mempool_t *mpctx;
1507 REQUIRE(VALID_CONTEXT(mctx));
1509 REQUIRE(mpctxp != NULL && *mpctxp == NULL);
1512 * Allocate space for this pool, initialize values, and if all works
1513 * well, attach to the memory context.
1515 mpctx = isc_mem_get(mctx, sizeof(isc_mempool_t));
1517 return (ISC_R_NOMEMORY);
1519 mpctx->magic = MEMPOOL_MAGIC;
1523 mpctx->maxalloc = UINT_MAX;
1524 mpctx->allocated = 0;
1525 mpctx->freecount = 0;
1527 mpctx->fillcount = 1;
1529 #if ISC_MEMPOOL_NAMES
1532 mpctx->items = NULL;
1536 MCTXLOCK(mctx, &mctx->lock);
1537 ISC_LIST_INITANDAPPEND(mctx->pools, mpctx, link);
1538 MCTXUNLOCK(mctx, &mctx->lock);
1540 return (ISC_R_SUCCESS);
1544 isc_mempool_setname(isc_mempool_t *mpctx, const char *name) {
1545 REQUIRE(name != NULL);
1547 #if ISC_MEMPOOL_NAMES
1548 if (mpctx->lock != NULL)
1551 strncpy(mpctx->name, name, sizeof(mpctx->name) - 1);
1552 mpctx->name[sizeof(mpctx->name) - 1] = '\0';
1554 if (mpctx->lock != NULL)
1555 UNLOCK(mpctx->lock);
1563 isc_mempool_destroy(isc_mempool_t **mpctxp) {
1564 isc_mempool_t *mpctx;
1569 REQUIRE(mpctxp != NULL);
1571 REQUIRE(VALID_MEMPOOL(mpctx));
1572 #if ISC_MEMPOOL_NAMES
1573 if (mpctx->allocated > 0)
1574 UNEXPECTED_ERROR(__FILE__, __LINE__,
1575 "isc_mempool_destroy(): mempool %s "
1579 REQUIRE(mpctx->allocated == 0);
1589 * Return any items on the free list
1591 MCTXLOCK(mctx, &mctx->lock);
1592 while (mpctx->items != NULL) {
1593 INSIST(mpctx->freecount > 0);
1595 item = mpctx->items;
1596 mpctx->items = item->next;
1598 if ((mctx->flags & ISC_MEMFLAG_INTERNAL) != 0) {
1599 mem_putunlocked(mctx, item, mpctx->size);
1601 mem_put(mctx, item, mpctx->size);
1602 mem_putstats(mctx, item, mpctx->size);
1605 MCTXUNLOCK(mctx, &mctx->lock);
1608 * Remove our linked list entry from the memory context.
1610 MCTXLOCK(mctx, &mctx->lock);
1611 ISC_LIST_UNLINK(mctx->pools, mpctx, link);
1612 MCTXUNLOCK(mctx, &mctx->lock);
1616 isc_mem_put(mpctx->mctx, mpctx, sizeof(isc_mempool_t));
1625 isc_mempool_associatelock(isc_mempool_t *mpctx, isc_mutex_t *lock) {
1626 REQUIRE(VALID_MEMPOOL(mpctx));
1627 REQUIRE(mpctx->lock == NULL);
1628 REQUIRE(lock != NULL);
1634 isc__mempool_get(isc_mempool_t *mpctx FLARG) {
1639 REQUIRE(VALID_MEMPOOL(mpctx));
1643 if (mpctx->lock != NULL)
1647 * Don't let the caller go over quota
1649 if (mpctx->allocated >= mpctx->maxalloc) {
1655 * if we have a free list item, return the first here
1657 item = mpctx->items;
1659 mpctx->items = item->next;
1660 INSIST(mpctx->freecount > 0);
1668 * We need to dip into the well. Lock the memory context here and
1669 * fill up our free list.
1671 MCTXLOCK(mctx, &mctx->lock);
1672 for (i = 0; i < mpctx->fillcount; i++) {
1673 if ((mctx->flags & ISC_MEMFLAG_INTERNAL) != 0) {
1674 item = mem_getunlocked(mctx, mpctx->size);
1676 item = mem_get(mctx, mpctx->size);
1678 mem_getstats(mctx, mpctx->size);
1682 item->next = mpctx->items;
1683 mpctx->items = item;
1686 MCTXUNLOCK(mctx, &mctx->lock);
1689 * If we didn't get any items, return NULL.
1691 item = mpctx->items;
1695 mpctx->items = item->next;
1701 if (mpctx->lock != NULL)
1702 UNLOCK(mpctx->lock);
1704 #if ISC_MEM_TRACKLINES
1706 MCTXLOCK(mctx, &mctx->lock);
1707 ADD_TRACE(mctx, item, mpctx->size, file, line);
1708 MCTXUNLOCK(mctx, &mctx->lock);
1710 #endif /* ISC_MEM_TRACKLINES */
1716 isc__mempool_put(isc_mempool_t *mpctx, void *mem FLARG) {
1720 REQUIRE(VALID_MEMPOOL(mpctx));
1721 REQUIRE(mem != NULL);
1725 if (mpctx->lock != NULL)
1728 INSIST(mpctx->allocated > 0);
1731 #if ISC_MEM_TRACKLINES
1732 MCTXLOCK(mctx, &mctx->lock);
1733 DELETE_TRACE(mctx, mem, mpctx->size, file, line);
1734 MCTXUNLOCK(mctx, &mctx->lock);
1735 #endif /* ISC_MEM_TRACKLINES */
1738 * If our free list is full, return this to the mctx directly.
1740 if (mpctx->freecount >= mpctx->freemax) {
1741 if ((mctx->flags & ISC_MEMFLAG_INTERNAL) != 0) {
1742 MCTXLOCK(mctx, &mctx->lock);
1743 mem_putunlocked(mctx, mem, mpctx->size);
1744 MCTXUNLOCK(mctx, &mctx->lock);
1746 mem_put(mctx, mem, mpctx->size);
1747 MCTXLOCK(mctx, &mctx->lock);
1748 mem_putstats(mctx, mem, mpctx->size);
1749 MCTXUNLOCK(mctx, &mctx->lock);
1751 if (mpctx->lock != NULL)
1752 UNLOCK(mpctx->lock);
1757 * Otherwise, attach it to our free list and bump the counter.
1760 item = (element *)mem;
1761 item->next = mpctx->items;
1762 mpctx->items = item;
1764 if (mpctx->lock != NULL)
1765 UNLOCK(mpctx->lock);
1773 isc_mempool_setfreemax(isc_mempool_t *mpctx, unsigned int limit) {
1774 REQUIRE(VALID_MEMPOOL(mpctx));
1776 if (mpctx->lock != NULL)
1779 mpctx->freemax = limit;
1781 if (mpctx->lock != NULL)
1782 UNLOCK(mpctx->lock);
1786 isc_mempool_getfreemax(isc_mempool_t *mpctx) {
1787 unsigned int freemax;
1789 REQUIRE(VALID_MEMPOOL(mpctx));
1791 if (mpctx->lock != NULL)
1794 freemax = mpctx->freemax;
1796 if (mpctx->lock != NULL)
1797 UNLOCK(mpctx->lock);
1803 isc_mempool_getfreecount(isc_mempool_t *mpctx) {
1804 unsigned int freecount;
1806 REQUIRE(VALID_MEMPOOL(mpctx));
1808 if (mpctx->lock != NULL)
1811 freecount = mpctx->freecount;
1813 if (mpctx->lock != NULL)
1814 UNLOCK(mpctx->lock);
1820 isc_mempool_setmaxalloc(isc_mempool_t *mpctx, unsigned int limit) {
1823 REQUIRE(VALID_MEMPOOL(mpctx));
1825 if (mpctx->lock != NULL)
1828 mpctx->maxalloc = limit;
1830 if (mpctx->lock != NULL)
1831 UNLOCK(mpctx->lock);
1835 isc_mempool_getmaxalloc(isc_mempool_t *mpctx) {
1836 unsigned int maxalloc;
1838 REQUIRE(VALID_MEMPOOL(mpctx));
1840 if (mpctx->lock != NULL)
1843 maxalloc = mpctx->maxalloc;
1845 if (mpctx->lock != NULL)
1846 UNLOCK(mpctx->lock);
1852 isc_mempool_getallocated(isc_mempool_t *mpctx) {
1853 unsigned int allocated;
1855 REQUIRE(VALID_MEMPOOL(mpctx));
1857 if (mpctx->lock != NULL)
1860 allocated = mpctx->allocated;
1862 if (mpctx->lock != NULL)
1863 UNLOCK(mpctx->lock);
1869 isc_mempool_setfillcount(isc_mempool_t *mpctx, unsigned int limit) {
1871 REQUIRE(VALID_MEMPOOL(mpctx));
1873 if (mpctx->lock != NULL)
1876 mpctx->fillcount = limit;
1878 if (mpctx->lock != NULL)
1879 UNLOCK(mpctx->lock);
1883 isc_mempool_getfillcount(isc_mempool_t *mpctx) {
1884 unsigned int fillcount;
1886 REQUIRE(VALID_MEMPOOL(mpctx));
1888 if (mpctx->lock != NULL)
1891 fillcount = mpctx->fillcount;
1893 if (mpctx->lock != NULL)
1894 UNLOCK(mpctx->lock);
1900 isc_mem_printactive(isc_mem_t *ctx, FILE *file) {
1902 REQUIRE(VALID_CONTEXT(ctx));
1903 REQUIRE(file != NULL);
1905 #if !ISC_MEM_TRACKLINES
1909 print_active(ctx, file);
1914 isc_mem_printallactive(FILE *file) {
1915 #if !ISC_MEM_TRACKLINES
1920 RUNTIME_CHECK(isc_once_do(&once, initialize_action) == ISC_R_SUCCESS);
1923 for (ctx = ISC_LIST_HEAD(contexts);
1925 ctx = ISC_LIST_NEXT(ctx, link)) {
1926 fprintf(file, "context: %p\n", ctx);
1927 print_active(ctx, file);
1934 isc_mem_checkdestroyed(FILE *file) {
1936 RUNTIME_CHECK(isc_once_do(&once, initialize_action) == ISC_R_SUCCESS);
1939 if (!ISC_LIST_EMPTY(contexts)) {
1940 #if ISC_MEM_TRACKLINES
1943 for (ctx = ISC_LIST_HEAD(contexts);
1945 ctx = ISC_LIST_NEXT(ctx, link)) {
1946 fprintf(file, "context: %p\n", ctx);
1947 print_active(ctx, file);