2 * Copyright (C) 2004-2007 Internet Systems Consortium, Inc. ("ISC")
3 * Copyright (C) 1997-2003 Internet Software Consortium.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
10 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
11 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
12 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
13 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
14 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
15 * PERFORMANCE OF THIS SOFTWARE.
18 /* $Id: mem.c,v 1.98.2.7.2.12 2007/11/26 23:45:51 tbox Exp $ */
28 #include <isc/magic.h>
31 #include <isc/ondestroy.h>
32 #include <isc/string.h>
34 #include <isc/mutex.h>
37 #ifndef ISC_MEM_DEBUGGING
38 #define ISC_MEM_DEBUGGING 0
40 LIBISC_EXTERNAL_DATA unsigned int isc_mem_debugging = ISC_MEM_DEBUGGING;
43 * Define ISC_MEM_USE_INTERNAL_MALLOC=1 to use the internal malloc()
44 * implementation in preference to the system one. The internal malloc()
45 * is very space-efficient, and quite fast on uniprocessor systems. It
46 * performs poorly on multiprocessor machines.
48 #ifndef ISC_MEM_USE_INTERNAL_MALLOC
49 #define ISC_MEM_USE_INTERNAL_MALLOC 0
56 #define DEF_MAX_SIZE 1100
57 #define DEF_MEM_TARGET 4096
58 #define ALIGNMENT_SIZE 8 /* must be a power of 2 */
59 #define NUM_BASIC_BLOCKS 64 /* must be > 1 */
60 #define TABLE_INCREMENT 1024
61 #define DEBUGLIST_COUNT 1024
66 #if ISC_MEM_TRACKLINES
67 typedef struct debuglink debuglink_t;
69 ISC_LINK(debuglink_t) link;
70 const void *ptr[DEBUGLIST_COUNT];
71 unsigned int size[DEBUGLIST_COUNT];
72 const char *file[DEBUGLIST_COUNT];
73 unsigned int line[DEBUGLIST_COUNT];
77 #define FLARG_PASS , file, line
78 #define FLARG , const char *file, int line
84 typedef struct element element;
91 * This structure must be ALIGNMENT_SIZE bytes.
95 char bytes[ALIGNMENT_SIZE];
101 unsigned long totalgets;
102 #if ISC_MEM_USE_INTERNAL_MALLOC
103 unsigned long blocks;
104 unsigned long freefrags;
105 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
108 #define MEM_MAGIC ISC_MAGIC('M', 'e', 'm', 'C')
109 #define VALID_CONTEXT(c) ISC_MAGIC_VALID(c, MEM_MAGIC)
111 #if ISC_MEM_TRACKLINES
112 typedef ISC_LIST(debuglink_t) debuglist_t;
117 isc_ondestroy_t ondestroy;
119 isc_memalloc_t memalloc;
120 isc_memfree_t memfree;
123 isc_boolean_t checkfree;
124 struct stats * stats;
125 unsigned int references;
132 isc_boolean_t hi_called;
133 isc_mem_water_t water;
135 ISC_LIST(isc_mempool_t) pools;
137 #if ISC_MEM_USE_INTERNAL_MALLOC
139 element ** freelists;
140 element * basic_blocks;
141 unsigned char ** basic_table;
142 unsigned int basic_table_count;
143 unsigned int basic_table_size;
144 unsigned char * lowest;
145 unsigned char * highest;
146 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
148 #if ISC_MEM_TRACKLINES
149 debuglist_t * debuglist;
152 unsigned int memalloc_failures;
155 #define MEMPOOL_MAGIC ISC_MAGIC('M', 'E', 'M', 'p')
156 #define VALID_MEMPOOL(c) ISC_MAGIC_VALID(c, MEMPOOL_MAGIC)
159 /* always unlocked */
160 unsigned int magic; /* magic number */
161 isc_mutex_t *lock; /* optional lock */
162 isc_mem_t *mctx; /* our memory context */
163 /* locked via the memory context's lock */
164 ISC_LINK(isc_mempool_t) link; /* next pool in this mem context */
165 /* optionally locked from here down */
166 element *items; /* low water item list */
167 size_t size; /* size of each item on this pool */
168 unsigned int maxalloc; /* max number of items allowed */
169 unsigned int allocated; /* # of items currently given out */
170 unsigned int freecount; /* # of items on reserved list */
171 unsigned int freemax; /* # of items allowed on free list */
172 unsigned int fillcount; /* # of items to fetch on each fill */
174 unsigned int gets; /* # of requests to this pool */
175 /* Debugging only. */
176 #if ISC_MEMPOOL_NAMES
177 char name[16]; /* printed name in stats reports */
182 * Private Inline-able.
185 #if ! ISC_MEM_TRACKLINES
186 #define ADD_TRACE(a, b, c, d, e)
187 #define DELETE_TRACE(a, b, c, d, e)
189 #define ADD_TRACE(a, b, c, d, e) \
191 if ((isc_mem_debugging & (ISC_MEM_DEBUGTRACE | \
192 ISC_MEM_DEBUGRECORD)) != 0 && \
194 add_trace_entry(a, b, c, d, e); \
196 #define DELETE_TRACE(a, b, c, d, e) delete_trace_entry(a, b, c, d, e)
199 print_active(isc_mem_t *ctx, FILE *out);
202 * mctx must be locked.
205 add_trace_entry(isc_mem_t *mctx, const void *ptr, unsigned int size
211 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0)
212 fprintf(stderr, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
215 "file %s line %u mctx %p\n"),
216 ptr, size, file, line, mctx);
218 if (mctx->debuglist == NULL)
221 if (size > mctx->max_size)
222 size = mctx->max_size;
224 dl = ISC_LIST_HEAD(mctx->debuglist[size]);
226 if (dl->count == DEBUGLIST_COUNT)
228 for (i = 0; i < DEBUGLIST_COUNT; i++) {
229 if (dl->ptr[i] == NULL) {
239 dl = ISC_LIST_NEXT(dl, link);
242 dl = malloc(sizeof(debuglink_t));
245 ISC_LINK_INIT(dl, link);
246 for (i = 1; i < DEBUGLIST_COUNT; i++) {
259 ISC_LIST_PREPEND(mctx->debuglist[size], dl, link);
263 delete_trace_entry(isc_mem_t *mctx, const void *ptr, unsigned int size,
264 const char *file, unsigned int line)
269 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0)
270 fprintf(stderr, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
273 "file %s line %u mctx %p\n"),
274 ptr, size, file, line, mctx);
276 if (mctx->debuglist == NULL)
279 if (size > mctx->max_size)
280 size = mctx->max_size;
282 dl = ISC_LIST_HEAD(mctx->debuglist[size]);
284 for (i = 0; i < DEBUGLIST_COUNT; i++) {
285 if (dl->ptr[i] == ptr) {
291 INSIST(dl->count > 0);
293 if (dl->count == 0) {
294 ISC_LIST_UNLINK(mctx->debuglist[size],
301 dl = ISC_LIST_NEXT(dl, link);
305 * If we get here, we didn't find the item on the list. We're
310 #endif /* ISC_MEM_TRACKLINES */
312 #if ISC_MEM_USE_INTERNAL_MALLOC
314 rmsize(size_t size) {
316 * round down to ALIGNMENT_SIZE
318 return (size & (~(ALIGNMENT_SIZE - 1)));
322 quantize(size_t size) {
324 * Round up the result in order to get a size big
325 * enough to satisfy the request and be aligned on ALIGNMENT_SIZE
330 return (ALIGNMENT_SIZE);
331 return ((size + ALIGNMENT_SIZE - 1) & (~(ALIGNMENT_SIZE - 1)));
334 static inline isc_boolean_t
335 more_basic_blocks(isc_mem_t *ctx) {
337 unsigned char *curr, *next;
338 unsigned char *first, *last;
339 unsigned char **table;
340 unsigned int table_size;
344 /* Require: we hold the context lock. */
347 * Did we hit the quota for this context?
349 increment = NUM_BASIC_BLOCKS * ctx->mem_target;
350 if (ctx->quota != 0 && ctx->total + increment > ctx->quota)
353 INSIST(ctx->basic_table_count <= ctx->basic_table_size);
354 if (ctx->basic_table_count == ctx->basic_table_size) {
355 table_size = ctx->basic_table_size + TABLE_INCREMENT;
356 table = (ctx->memalloc)(ctx->arg,
357 table_size * sizeof(unsigned char *));
359 ctx->memalloc_failures++;
362 if (ctx->basic_table_size != 0) {
363 memcpy(table, ctx->basic_table,
364 ctx->basic_table_size *
365 sizeof(unsigned char *));
366 (ctx->memfree)(ctx->arg, ctx->basic_table);
368 ctx->basic_table = table;
369 ctx->basic_table_size = table_size;
372 new = (ctx->memalloc)(ctx->arg, NUM_BASIC_BLOCKS * ctx->mem_target);
374 ctx->memalloc_failures++;
377 ctx->total += increment;
378 ctx->basic_table[ctx->basic_table_count] = new;
379 ctx->basic_table_count++;
382 next = curr + ctx->mem_target;
383 for (i = 0; i < (NUM_BASIC_BLOCKS - 1); i++) {
384 ((element *)curr)->next = (element *)next;
386 next += ctx->mem_target;
389 * curr is now pointing at the last block in the
392 ((element *)curr)->next = NULL;
394 last = first + NUM_BASIC_BLOCKS * ctx->mem_target - 1;
395 if (first < ctx->lowest || ctx->lowest == NULL)
397 if (last > ctx->highest)
399 ctx->basic_blocks = new;
404 static inline isc_boolean_t
405 more_frags(isc_mem_t *ctx, size_t new_size) {
409 unsigned char *curr, *next;
412 * Try to get more fragments by chopping up a basic block.
415 if (ctx->basic_blocks == NULL) {
416 if (!more_basic_blocks(ctx)) {
418 * We can't get more memory from the OS, or we've
419 * hit the quota for this context.
422 * XXXRTH "At quota" notification here.
428 total_size = ctx->mem_target;
429 new = ctx->basic_blocks;
430 ctx->basic_blocks = ctx->basic_blocks->next;
431 frags = total_size / new_size;
432 ctx->stats[new_size].blocks++;
433 ctx->stats[new_size].freefrags += frags;
435 * Set up a linked-list of blocks of size
439 next = curr + new_size;
440 total_size -= new_size;
441 for (i = 0; i < (frags - 1); i++) {
442 ((element *)curr)->next = (element *)next;
445 total_size -= new_size;
448 * Add the remaining fragment of the basic block to a free list.
450 total_size = rmsize(total_size);
451 if (total_size > 0) {
452 ((element *)next)->next = ctx->freelists[total_size];
453 ctx->freelists[total_size] = (element *)next;
454 ctx->stats[total_size].freefrags++;
457 * curr is now pointing at the last block in the
460 ((element *)curr)->next = NULL;
461 ctx->freelists[new_size] = new;
467 mem_getunlocked(isc_mem_t *ctx, size_t size) {
468 size_t new_size = quantize(size);
471 if (size >= ctx->max_size || new_size >= ctx->max_size) {
473 * memget() was called on something beyond our upper limit.
475 if (ctx->quota != 0 && ctx->total + size > ctx->quota) {
479 ret = (ctx->memalloc)(ctx->arg, size);
481 ctx->memalloc_failures++;
486 ctx->stats[ctx->max_size].gets++;
487 ctx->stats[ctx->max_size].totalgets++;
489 * If we don't set new_size to size, then the
490 * ISC_MEM_FILL code might write over bytes we
498 * If there are no blocks in the free list for this size, get a chunk
499 * of memory and then break it up into "new_size"-sized blocks, adding
500 * them to the free list.
502 if (ctx->freelists[new_size] == NULL && !more_frags(ctx, new_size))
506 * The free list uses the "rounded-up" size "new_size".
508 ret = ctx->freelists[new_size];
509 ctx->freelists[new_size] = ctx->freelists[new_size]->next;
512 * The stats[] uses the _actual_ "size" requested by the
513 * caller, with the caveat (in the code above) that "size" >= the
514 * max. size (max_size) ends up getting recorded as a call to
517 ctx->stats[size].gets++;
518 ctx->stats[size].totalgets++;
519 ctx->stats[new_size].freefrags--;
520 ctx->inuse += new_size;
526 memset(ret, 0xbe, new_size); /* Mnemonic for "beef". */
532 #if ISC_MEM_FILL && ISC_MEM_CHECKOVERRUN
534 check_overrun(void *mem, size_t size, size_t new_size) {
537 cp = (unsigned char *)mem;
539 while (size < new_size) {
548 mem_putunlocked(isc_mem_t *ctx, void *mem, size_t size) {
549 size_t new_size = quantize(size);
551 if (size == ctx->max_size || new_size >= ctx->max_size) {
553 * memput() called on something beyond our upper limit.
556 memset(mem, 0xde, size); /* Mnemonic for "dead". */
558 (ctx->memfree)(ctx->arg, mem);
559 INSIST(ctx->stats[ctx->max_size].gets != 0);
560 ctx->stats[ctx->max_size].gets--;
561 INSIST(size <= ctx->total);
568 #if ISC_MEM_CHECKOVERRUN
569 check_overrun(mem, size, new_size);
571 memset(mem, 0xde, new_size); /* Mnemonic for "dead". */
575 * The free list uses the "rounded-up" size "new_size".
577 ((element *)mem)->next = ctx->freelists[new_size];
578 ctx->freelists[new_size] = (element *)mem;
581 * The stats[] uses the _actual_ "size" requested by the
582 * caller, with the caveat (in the code above) that "size" >= the
583 * max. size (max_size) ends up getting recorded as a call to
586 INSIST(ctx->stats[size].gets != 0);
587 ctx->stats[size].gets--;
588 ctx->stats[new_size].freefrags++;
589 ctx->inuse -= new_size;
592 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
595 * Perform a malloc, doing memory filling and overrun detection as necessary.
598 mem_get(isc_mem_t *ctx, size_t size) {
601 #if ISC_MEM_CHECKOVERRUN
605 ret = (ctx->memalloc)(ctx->arg, size);
607 ctx->memalloc_failures++;
611 memset(ret, 0xbe, size); /* Mnemonic for "beef". */
613 # if ISC_MEM_CHECKOVERRUN
623 * Perform a free, doing memory filling and overrun detection as necessary.
626 mem_put(isc_mem_t *ctx, void *mem, size_t size) {
627 #if ISC_MEM_CHECKOVERRUN
628 INSIST(((unsigned char *)mem)[size] == 0xbe);
631 memset(mem, 0xde, size); /* Mnemonic for "dead". */
635 (ctx->memfree)(ctx->arg, mem);
639 * Update internal counters after a memory get.
642 mem_getstats(isc_mem_t *ctx, size_t size) {
646 if (size > ctx->max_size) {
647 ctx->stats[ctx->max_size].gets++;
648 ctx->stats[ctx->max_size].totalgets++;
650 ctx->stats[size].gets++;
651 ctx->stats[size].totalgets++;
656 * Update internal counters after a memory put.
659 mem_putstats(isc_mem_t *ctx, void *ptr, size_t size) {
662 INSIST(ctx->inuse >= size);
665 if (size > ctx->max_size) {
666 INSIST(ctx->stats[ctx->max_size].gets > 0U);
667 ctx->stats[ctx->max_size].gets--;
669 INSIST(ctx->stats[size].gets > 0U);
670 ctx->stats[size].gets--;
674 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
681 default_memalloc(void *arg, size_t size) {
685 return (malloc(size));
689 default_memfree(void *arg, void *ptr) {
699 isc_mem_createx(size_t init_max_size, size_t target_size,
700 isc_memalloc_t memalloc, isc_memfree_t memfree, void *arg,
706 REQUIRE(ctxp != NULL && *ctxp == NULL);
707 REQUIRE(memalloc != NULL);
708 REQUIRE(memfree != NULL);
710 INSIST((ALIGNMENT_SIZE & (ALIGNMENT_SIZE - 1)) == 0);
712 #if !ISC_MEM_USE_INTERNAL_MALLOC
716 ctx = (memalloc)(arg, sizeof(*ctx));
718 return (ISC_R_NOMEMORY);
720 if (isc_mutex_init(&ctx->lock) != ISC_R_SUCCESS) {
721 UNEXPECTED_ERROR(__FILE__, __LINE__,
722 "isc_mutex_init() %s",
723 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
724 ISC_MSG_FAILED, "failed"));
726 return (ISC_R_UNEXPECTED);
729 if (init_max_size == 0U)
730 ctx->max_size = DEF_MAX_SIZE;
732 ctx->max_size = init_max_size;
740 ctx->hi_called = ISC_FALSE;
742 ctx->water_arg = NULL;
743 ctx->magic = MEM_MAGIC;
744 isc_ondestroy_init(&ctx->ondestroy);
745 ctx->memalloc = memalloc;
746 ctx->memfree = memfree;
749 ctx->checkfree = ISC_TRUE;
750 #if ISC_MEM_TRACKLINES
751 ctx->debuglist = NULL;
753 ISC_LIST_INIT(ctx->pools);
755 #if ISC_MEM_USE_INTERNAL_MALLOC
756 ctx->freelists = NULL;
757 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
759 ctx->stats = (memalloc)(arg,
760 (ctx->max_size+1) * sizeof(struct stats));
761 if (ctx->stats == NULL) {
762 result = ISC_R_NOMEMORY;
765 memset(ctx->stats, 0, (ctx->max_size + 1) * sizeof(struct stats));
767 #if ISC_MEM_USE_INTERNAL_MALLOC
768 if (target_size == 0)
769 ctx->mem_target = DEF_MEM_TARGET;
771 ctx->mem_target = target_size;
772 ctx->freelists = (memalloc)(arg, ctx->max_size * sizeof(element *));
773 if (ctx->freelists == NULL) {
774 result = ISC_R_NOMEMORY;
777 memset(ctx->freelists, 0,
778 ctx->max_size * sizeof(element *));
779 ctx->basic_blocks = NULL;
780 ctx->basic_table = NULL;
781 ctx->basic_table_count = 0;
782 ctx->basic_table_size = 0;
785 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
787 #if ISC_MEM_TRACKLINES
788 if ((isc_mem_debugging & ISC_MEM_DEBUGRECORD) != 0) {
791 ctx->debuglist = (memalloc)(arg,
792 (ctx->max_size+1) * sizeof(debuglist_t));
793 if (ctx->debuglist == NULL) {
794 result = ISC_R_NOMEMORY;
797 for (i = 0; i <= ctx->max_size; i++)
798 ISC_LIST_INIT(ctx->debuglist[i]);
802 ctx->memalloc_failures = 0;
805 return (ISC_R_SUCCESS);
809 if (ctx->stats != NULL)
810 (memfree)(arg, ctx->stats);
811 #if ISC_MEM_USE_INTERNAL_MALLOC
812 if (ctx->freelists != NULL)
813 (memfree)(arg, ctx->freelists);
814 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
815 #if ISC_MEM_TRACKLINES
816 if (ctx->debuglist != NULL)
817 (ctx->memfree)(ctx->arg, ctx->debuglist);
818 #endif /* ISC_MEM_TRACKLINES */
819 DESTROYLOCK(&ctx->lock);
827 isc_mem_create(size_t init_max_size, size_t target_size,
830 return (isc_mem_createx(init_max_size, target_size,
831 default_memalloc, default_memfree, NULL,
836 destroy(isc_mem_t *ctx) {
838 isc_ondestroy_t ondest;
842 #if ISC_MEM_USE_INTERNAL_MALLOC
843 INSIST(ISC_LIST_EMPTY(ctx->pools));
844 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
846 #if ISC_MEM_TRACKLINES
847 if (ctx->debuglist != NULL) {
848 if (ctx->checkfree) {
849 for (i = 0; i <= ctx->max_size; i++) {
850 if (!ISC_LIST_EMPTY(ctx->debuglist[i]))
851 print_active(ctx, stderr);
852 INSIST(ISC_LIST_EMPTY(ctx->debuglist[i]));
857 for (i = 0; i <= ctx->max_size; i++)
858 for (dl = ISC_LIST_HEAD(ctx->debuglist[i]);
860 dl = ISC_LIST_HEAD(ctx->debuglist[i])) {
861 ISC_LIST_UNLINK(ctx->debuglist[i],
866 (ctx->memfree)(ctx->arg, ctx->debuglist);
869 INSIST(ctx->references == 0);
871 if (ctx->checkfree) {
872 for (i = 0; i <= ctx->max_size; i++) {
873 #if ISC_MEM_TRACKLINES
874 if (ctx->stats[i].gets != 0U)
875 print_active(ctx, stderr);
877 INSIST(ctx->stats[i].gets == 0U);
881 (ctx->memfree)(ctx->arg, ctx->stats);
883 #if ISC_MEM_USE_INTERNAL_MALLOC
884 for (i = 0; i < ctx->basic_table_count; i++)
885 (ctx->memfree)(ctx->arg, ctx->basic_table[i]);
886 (ctx->memfree)(ctx->arg, ctx->freelists);
887 if (ctx->basic_table != NULL)
888 (ctx->memfree)(ctx->arg, ctx->basic_table);
889 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
891 ondest = ctx->ondestroy;
893 DESTROYLOCK(&ctx->lock);
894 (ctx->memfree)(ctx->arg, ctx);
896 isc_ondestroy_notify(&ondest, ctx);
900 isc_mem_attach(isc_mem_t *source, isc_mem_t **targetp) {
901 REQUIRE(VALID_CONTEXT(source));
902 REQUIRE(targetp != NULL && *targetp == NULL);
905 source->references++;
906 UNLOCK(&source->lock);
912 isc_mem_detach(isc_mem_t **ctxp) {
914 isc_boolean_t want_destroy = ISC_FALSE;
916 REQUIRE(ctxp != NULL);
918 REQUIRE(VALID_CONTEXT(ctx));
921 INSIST(ctx->references > 0);
923 if (ctx->references == 0)
924 want_destroy = ISC_TRUE;
934 * isc_mem_putanddetach() is the equivalent of:
937 * isc_mem_attach(ptr->mctx, &mctx);
938 * isc_mem_detach(&ptr->mctx);
939 * isc_mem_put(mctx, ptr, sizeof(*ptr);
940 * isc_mem_detach(&mctx);
944 isc__mem_putanddetach(isc_mem_t **ctxp, void *ptr, size_t size FLARG) {
946 isc_boolean_t want_destroy = ISC_FALSE;
948 REQUIRE(ctxp != NULL);
950 REQUIRE(VALID_CONTEXT(ctx));
951 REQUIRE(ptr != NULL);
954 * Must be before mem_putunlocked() as ctxp is usually within
959 #if ISC_MEM_USE_INTERNAL_MALLOC
961 mem_putunlocked(ctx, ptr, size);
962 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
963 mem_put(ctx, ptr, size);
965 mem_putstats(ctx, ptr, size);
966 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
968 DELETE_TRACE(ctx, ptr, size, file, line);
969 INSIST(ctx->references > 0);
971 if (ctx->references == 0)
972 want_destroy = ISC_TRUE;
981 isc_mem_destroy(isc_mem_t **ctxp) {
985 * This routine provides legacy support for callers who use mctxs
986 * without attaching/detaching.
989 REQUIRE(ctxp != NULL);
991 REQUIRE(VALID_CONTEXT(ctx));
994 #if ISC_MEM_TRACKLINES
995 if (ctx->references != 1)
996 print_active(ctx, stderr);
998 REQUIRE(ctx->references == 1);
1008 isc_mem_ondestroy(isc_mem_t *ctx, isc_task_t *task, isc_event_t **event) {
1012 res = isc_ondestroy_register(&ctx->ondestroy, task, event);
1020 isc__mem_get(isc_mem_t *ctx, size_t size FLARG) {
1022 isc_boolean_t call_water = ISC_FALSE;
1024 REQUIRE(VALID_CONTEXT(ctx));
1026 #if ISC_MEM_USE_INTERNAL_MALLOC
1028 ptr = mem_getunlocked(ctx, size);
1029 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1030 ptr = mem_get(ctx, size);
1033 mem_getstats(ctx, size);
1034 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1036 ADD_TRACE(ctx, ptr, size, file, line);
1037 if (ctx->hi_water != 0U && !ctx->hi_called &&
1038 ctx->inuse > ctx->hi_water) {
1039 ctx->hi_called = ISC_TRUE;
1040 call_water = ISC_TRUE;
1042 if (ctx->inuse > ctx->maxinuse) {
1043 ctx->maxinuse = ctx->inuse;
1044 if (ctx->hi_water != 0U && ctx->inuse > ctx->hi_water &&
1045 (isc_mem_debugging & ISC_MEM_DEBUGUSAGE) != 0)
1046 fprintf(stderr, "maxinuse = %lu\n",
1047 (unsigned long)ctx->inuse);
1052 (ctx->water)(ctx->water_arg, ISC_MEM_HIWATER);
1058 isc__mem_put(isc_mem_t *ctx, void *ptr, size_t size FLARG)
1060 isc_boolean_t call_water = ISC_FALSE;
1062 REQUIRE(VALID_CONTEXT(ctx));
1063 REQUIRE(ptr != NULL);
1065 #if ISC_MEM_USE_INTERNAL_MALLOC
1067 mem_putunlocked(ctx, ptr, size);
1068 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1069 mem_put(ctx, ptr, size);
1071 mem_putstats(ctx, ptr, size);
1072 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1074 DELETE_TRACE(ctx, ptr, size, file, line);
1077 * The check against ctx->lo_water == 0 is for the condition
1078 * when the context was pushed over hi_water but then had
1079 * isc_mem_setwater() called with 0 for hi_water and lo_water.
1081 if (ctx->hi_called &&
1082 (ctx->inuse < ctx->lo_water || ctx->lo_water == 0U)) {
1083 ctx->hi_called = ISC_FALSE;
1085 if (ctx->water != NULL)
1086 call_water = ISC_TRUE;
1091 (ctx->water)(ctx->water_arg, ISC_MEM_LOWATER);
1094 #if ISC_MEM_TRACKLINES
1096 print_active(isc_mem_t *mctx, FILE *out) {
1097 if (mctx->debuglist != NULL) {
1101 isc_boolean_t found;
1103 fprintf(out, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1105 "Dump of all outstanding "
1106 "memory allocations:\n"));
1108 format = isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1109 ISC_MSG_PTRFILELINE,
1110 "\tptr %p size %u file %s line %u\n");
1111 for (i = 0; i <= mctx->max_size; i++) {
1112 dl = ISC_LIST_HEAD(mctx->debuglist[i]);
1117 while (dl != NULL) {
1118 for (j = 0; j < DEBUGLIST_COUNT; j++)
1119 if (dl->ptr[j] != NULL)
1120 fprintf(out, format,
1125 dl = ISC_LIST_NEXT(dl, link);
1129 fprintf(out, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1130 ISC_MSG_NONE, "\tNone.\n"));
1136 * Print the stats[] on the stream "out" with suitable formatting.
1139 isc_mem_stats(isc_mem_t *ctx, FILE *out) {
1141 const struct stats *s;
1142 const isc_mempool_t *pool;
1144 REQUIRE(VALID_CONTEXT(ctx));
1147 for (i = 0; i <= ctx->max_size; i++) {
1150 if (s->totalgets == 0U && s->gets == 0U)
1152 fprintf(out, "%s%5lu: %11lu gets, %11lu rem",
1153 (i == ctx->max_size) ? ">=" : " ",
1154 (unsigned long) i, s->totalgets, s->gets);
1155 #if ISC_MEM_USE_INTERNAL_MALLOC
1156 if (s->blocks != 0 || s->freefrags != 0)
1157 fprintf(out, " (%lu bl, %lu ff)",
1158 s->blocks, s->freefrags);
1159 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1164 * Note that since a pool can be locked now, these stats might be
1165 * somewhat off if the pool is in active use at the time the stats
1166 * are dumped. The link fields are protected by the isc_mem_t's
1167 * lock, however, so walking this list and extracting integers from
1168 * stats fields is always safe.
1170 pool = ISC_LIST_HEAD(ctx->pools);
1172 fprintf(out, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1174 "[Pool statistics]\n"));
1175 fprintf(out, "%15s %10s %10s %10s %10s %10s %10s %10s %1s\n",
1176 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1177 ISC_MSG_POOLNAME, "name"),
1178 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1179 ISC_MSG_POOLSIZE, "size"),
1180 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1181 ISC_MSG_POOLMAXALLOC, "maxalloc"),
1182 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1183 ISC_MSG_POOLALLOCATED, "allocated"),
1184 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1185 ISC_MSG_POOLFREECOUNT, "freecount"),
1186 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1187 ISC_MSG_POOLFREEMAX, "freemax"),
1188 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1189 ISC_MSG_POOLFILLCOUNT, "fillcount"),
1190 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1191 ISC_MSG_POOLGETS, "gets"),
1194 while (pool != NULL) {
1195 fprintf(out, "%15s %10lu %10u %10u %10u %10u %10u %10u %s\n",
1196 pool->name, (unsigned long) pool->size, pool->maxalloc,
1197 pool->allocated, pool->freecount, pool->freemax,
1198 pool->fillcount, pool->gets,
1199 (pool->lock == NULL ? "N" : "Y"));
1200 pool = ISC_LIST_NEXT(pool, link);
1203 #if ISC_MEM_TRACKLINES
1204 print_active(ctx, out);
1211 * Replacements for malloc() and free() -- they implicitly remember the
1212 * size of the object allocated (with some additional overhead).
1216 isc__mem_allocateunlocked(isc_mem_t *ctx, size_t size) {
1219 size += ALIGNMENT_SIZE;
1220 #if ISC_MEM_USE_INTERNAL_MALLOC
1221 si = mem_getunlocked(ctx, size);
1222 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1223 si = mem_get(ctx, size);
1224 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1232 isc__mem_allocate(isc_mem_t *ctx, size_t size FLARG) {
1235 REQUIRE(VALID_CONTEXT(ctx));
1237 #if ISC_MEM_USE_INTERNAL_MALLOC
1239 si = isc__mem_allocateunlocked(ctx, size);
1240 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1241 si = isc__mem_allocateunlocked(ctx, size);
1244 mem_getstats(ctx, si[-1].u.size);
1245 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1247 #if ISC_MEM_TRACKLINES
1248 ADD_TRACE(ctx, si, si[-1].u.size, file, line);
1257 isc__mem_free(isc_mem_t *ctx, void *ptr FLARG) {
1261 REQUIRE(VALID_CONTEXT(ctx));
1262 REQUIRE(ptr != NULL);
1264 si = &(((size_info *)ptr)[-1]);
1267 #if ISC_MEM_USE_INTERNAL_MALLOC
1269 mem_putunlocked(ctx, si, size);
1270 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1271 mem_put(ctx, si, size);
1273 mem_putstats(ctx, si, size);
1274 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1276 DELETE_TRACE(ctx, ptr, size, file, line);
1283 * Other useful things.
1287 isc__mem_strdup(isc_mem_t *mctx, const char *s FLARG) {
1291 REQUIRE(VALID_CONTEXT(mctx));
1296 ns = isc__mem_allocate(mctx, len + 1 FLARG_PASS);
1299 strncpy(ns, s, len + 1);
1305 isc_mem_setdestroycheck(isc_mem_t *ctx, isc_boolean_t flag) {
1306 REQUIRE(VALID_CONTEXT(ctx));
1309 ctx->checkfree = flag;
1319 isc_mem_setquota(isc_mem_t *ctx, size_t quota) {
1320 REQUIRE(VALID_CONTEXT(ctx));
1329 isc_mem_getquota(isc_mem_t *ctx) {
1332 REQUIRE(VALID_CONTEXT(ctx));
1343 isc_mem_inuse(isc_mem_t *ctx) {
1346 REQUIRE(VALID_CONTEXT(ctx));
1357 isc_mem_setwater(isc_mem_t *ctx, isc_mem_water_t water, void *water_arg,
1358 size_t hiwater, size_t lowater)
1360 isc_boolean_t callwater = ISC_FALSE;
1361 isc_mem_water_t oldwater;
1364 REQUIRE(VALID_CONTEXT(ctx));
1365 REQUIRE(hiwater >= lowater);
1368 oldwater = ctx->water;
1369 oldwater_arg = ctx->water_arg;
1370 if (water == NULL) {
1371 callwater = ctx->hi_called;
1373 ctx->water_arg = NULL;
1376 ctx->hi_called = ISC_FALSE;
1378 if (ctx->hi_called &&
1379 (ctx->water != water || ctx->water_arg != water_arg ||
1380 ctx->inuse < lowater || lowater == 0U))
1381 callwater = ISC_TRUE;
1383 ctx->water_arg = water_arg;
1384 ctx->hi_water = hiwater;
1385 ctx->lo_water = lowater;
1386 ctx->hi_called = ISC_FALSE;
1390 if (callwater && oldwater != NULL)
1391 (oldwater)(oldwater_arg, ISC_MEM_LOWATER);
1399 isc_mempool_create(isc_mem_t *mctx, size_t size, isc_mempool_t **mpctxp) {
1400 isc_mempool_t *mpctx;
1402 REQUIRE(VALID_CONTEXT(mctx));
1404 REQUIRE(mpctxp != NULL && *mpctxp == NULL);
1407 * Allocate space for this pool, initialize values, and if all works
1408 * well, attach to the memory context.
1410 mpctx = isc_mem_get(mctx, sizeof(isc_mempool_t));
1412 return (ISC_R_NOMEMORY);
1414 mpctx->magic = MEMPOOL_MAGIC;
1418 mpctx->maxalloc = UINT_MAX;
1419 mpctx->allocated = 0;
1420 mpctx->freecount = 0;
1422 mpctx->fillcount = 1;
1424 #if ISC_MEMPOOL_NAMES
1427 mpctx->items = NULL;
1432 ISC_LIST_INITANDAPPEND(mctx->pools, mpctx, link);
1433 UNLOCK(&mctx->lock);
1435 return (ISC_R_SUCCESS);
1439 isc_mempool_setname(isc_mempool_t *mpctx, const char *name) {
1440 REQUIRE(name != NULL);
1442 #if ISC_MEMPOOL_NAMES
1443 if (mpctx->lock != NULL)
1446 strncpy(mpctx->name, name, sizeof(mpctx->name) - 1);
1447 mpctx->name[sizeof(mpctx->name) - 1] = '\0';
1449 if (mpctx->lock != NULL)
1450 UNLOCK(mpctx->lock);
1458 isc_mempool_destroy(isc_mempool_t **mpctxp) {
1459 isc_mempool_t *mpctx;
1464 REQUIRE(mpctxp != NULL);
1466 REQUIRE(VALID_MEMPOOL(mpctx));
1467 #if ISC_MEMPOOL_NAMES
1468 if (mpctx->allocated > 0)
1469 UNEXPECTED_ERROR(__FILE__, __LINE__,
1470 "isc_mempool_destroy(): mempool %s "
1474 REQUIRE(mpctx->allocated == 0);
1484 * Return any items on the free list
1487 while (mpctx->items != NULL) {
1488 INSIST(mpctx->freecount > 0);
1490 item = mpctx->items;
1491 mpctx->items = item->next;
1493 #if ISC_MEM_USE_INTERNAL_MALLOC
1494 mem_putunlocked(mctx, item, mpctx->size);
1495 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1496 mem_put(mctx, item, mpctx->size);
1497 mem_putstats(mctx, item, mpctx->size);
1498 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1500 UNLOCK(&mctx->lock);
1503 * Remove our linked list entry from the memory context.
1506 ISC_LIST_UNLINK(mctx->pools, mpctx, link);
1507 UNLOCK(&mctx->lock);
1511 isc_mem_put(mpctx->mctx, mpctx, sizeof(isc_mempool_t));
1520 isc_mempool_associatelock(isc_mempool_t *mpctx, isc_mutex_t *lock) {
1521 REQUIRE(VALID_MEMPOOL(mpctx));
1522 REQUIRE(mpctx->lock == NULL);
1523 REQUIRE(lock != NULL);
1529 isc__mempool_get(isc_mempool_t *mpctx FLARG) {
1534 REQUIRE(VALID_MEMPOOL(mpctx));
1538 if (mpctx->lock != NULL)
1542 * Don't let the caller go over quota
1544 if (mpctx->allocated >= mpctx->maxalloc) {
1550 * if we have a free list item, return the first here
1552 item = mpctx->items;
1554 mpctx->items = item->next;
1555 INSIST(mpctx->freecount > 0);
1563 * We need to dip into the well. Lock the memory context here and
1564 * fill up our free list.
1567 for (i = 0; i < mpctx->fillcount; i++) {
1568 #if ISC_MEM_USE_INTERNAL_MALLOC
1569 item = mem_getunlocked(mctx, mpctx->size);
1570 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1571 item = mem_get(mctx, mpctx->size);
1573 mem_getstats(mctx, mpctx->size);
1574 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1577 item->next = mpctx->items;
1578 mpctx->items = item;
1581 UNLOCK(&mctx->lock);
1584 * If we didn't get any items, return NULL.
1586 item = mpctx->items;
1590 mpctx->items = item->next;
1596 if (mpctx->lock != NULL)
1597 UNLOCK(mpctx->lock);
1599 #if ISC_MEM_TRACKLINES
1602 ADD_TRACE(mctx, item, mpctx->size, file, line);
1603 UNLOCK(&mctx->lock);
1605 #endif /* ISC_MEM_TRACKLINES */
1611 isc__mempool_put(isc_mempool_t *mpctx, void *mem FLARG) {
1615 REQUIRE(VALID_MEMPOOL(mpctx));
1616 REQUIRE(mem != NULL);
1620 if (mpctx->lock != NULL)
1623 INSIST(mpctx->allocated > 0);
1626 #if ISC_MEM_TRACKLINES
1628 DELETE_TRACE(mctx, mem, mpctx->size, file, line);
1629 UNLOCK(&mctx->lock);
1630 #endif /* ISC_MEM_TRACKLINES */
1633 * If our free list is full, return this to the mctx directly.
1635 if (mpctx->freecount >= mpctx->freemax) {
1636 #if ISC_MEM_USE_INTERNAL_MALLOC
1638 mem_putunlocked(mctx, mem, mpctx->size);
1639 UNLOCK(&mctx->lock);
1640 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1641 mem_put(mctx, mem, mpctx->size);
1643 mem_putstats(mctx, mem, mpctx->size);
1644 UNLOCK(&mctx->lock);
1645 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1646 if (mpctx->lock != NULL)
1647 UNLOCK(mpctx->lock);
1652 * Otherwise, attach it to our free list and bump the counter.
1655 item = (element *)mem;
1656 item->next = mpctx->items;
1657 mpctx->items = item;
1659 if (mpctx->lock != NULL)
1660 UNLOCK(mpctx->lock);
1668 isc_mempool_setfreemax(isc_mempool_t *mpctx, unsigned int limit) {
1669 REQUIRE(VALID_MEMPOOL(mpctx));
1671 if (mpctx->lock != NULL)
1674 mpctx->freemax = limit;
1676 if (mpctx->lock != NULL)
1677 UNLOCK(mpctx->lock);
1681 isc_mempool_getfreemax(isc_mempool_t *mpctx) {
1682 unsigned int freemax;
1684 REQUIRE(VALID_MEMPOOL(mpctx));
1686 if (mpctx->lock != NULL)
1689 freemax = mpctx->freemax;
1691 if (mpctx->lock != NULL)
1692 UNLOCK(mpctx->lock);
1698 isc_mempool_getfreecount(isc_mempool_t *mpctx) {
1699 unsigned int freecount;
1701 REQUIRE(VALID_MEMPOOL(mpctx));
1703 if (mpctx->lock != NULL)
1706 freecount = mpctx->freecount;
1708 if (mpctx->lock != NULL)
1709 UNLOCK(mpctx->lock);
1715 isc_mempool_setmaxalloc(isc_mempool_t *mpctx, unsigned int limit) {
1718 REQUIRE(VALID_MEMPOOL(mpctx));
1720 if (mpctx->lock != NULL)
1723 mpctx->maxalloc = limit;
1725 if (mpctx->lock != NULL)
1726 UNLOCK(mpctx->lock);
1730 isc_mempool_getmaxalloc(isc_mempool_t *mpctx) {
1731 unsigned int maxalloc;
1733 REQUIRE(VALID_MEMPOOL(mpctx));
1735 if (mpctx->lock != NULL)
1738 maxalloc = mpctx->maxalloc;
1740 if (mpctx->lock != NULL)
1741 UNLOCK(mpctx->lock);
1747 isc_mempool_getallocated(isc_mempool_t *mpctx) {
1748 unsigned int allocated;
1750 REQUIRE(VALID_MEMPOOL(mpctx));
1752 if (mpctx->lock != NULL)
1755 allocated = mpctx->allocated;
1757 if (mpctx->lock != NULL)
1758 UNLOCK(mpctx->lock);
1764 isc_mempool_setfillcount(isc_mempool_t *mpctx, unsigned int limit) {
1766 REQUIRE(VALID_MEMPOOL(mpctx));
1768 if (mpctx->lock != NULL)
1771 mpctx->fillcount = limit;
1773 if (mpctx->lock != NULL)
1774 UNLOCK(mpctx->lock);
1778 isc_mempool_getfillcount(isc_mempool_t *mpctx) {
1779 unsigned int fillcount;
1781 REQUIRE(VALID_MEMPOOL(mpctx));
1783 if (mpctx->lock != NULL)
1786 fillcount = mpctx->fillcount;
1788 if (mpctx->lock != NULL)
1789 UNLOCK(mpctx->lock);