2 * Copyright (C) 2004 Internet Systems Consortium, Inc. ("ISC")
3 * Copyright (C) 1997-2003 Internet Software Consortium.
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
10 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
11 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
12 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
13 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
14 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
15 * PERFORMANCE OF THIS SOFTWARE.
18 /* $Id: mem.c,v 1.98.2.7.2.5 2004/03/16 05:50:24 marka Exp $ */
28 #include <isc/magic.h>
31 #include <isc/ondestroy.h>
32 #include <isc/string.h>
34 #include <isc/mutex.h>
37 #ifndef ISC_MEM_DEBUGGING
38 #define ISC_MEM_DEBUGGING 0
40 LIBISC_EXTERNAL_DATA unsigned int isc_mem_debugging = ISC_MEM_DEBUGGING;
43 * Define ISC_MEM_USE_INTERNAL_MALLOC=1 to use the internal malloc()
44 * implementation in preference to the system one. The internal malloc()
45 * is very space-efficient, and quite fast on uniprocessor systems. It
46 * performs poorly on multiprocessor machines.
48 #ifndef ISC_MEM_USE_INTERNAL_MALLOC
49 #define ISC_MEM_USE_INTERNAL_MALLOC 0
56 #define DEF_MAX_SIZE 1100
57 #define DEF_MEM_TARGET 4096
58 #define ALIGNMENT_SIZE 8 /* must be a power of 2 */
59 #define NUM_BASIC_BLOCKS 64 /* must be > 1 */
60 #define TABLE_INCREMENT 1024
61 #define DEBUGLIST_COUNT 1024
66 #if ISC_MEM_TRACKLINES
67 typedef struct debuglink debuglink_t;
69 ISC_LINK(debuglink_t) link;
70 const void *ptr[DEBUGLIST_COUNT];
71 unsigned int size[DEBUGLIST_COUNT];
72 const char *file[DEBUGLIST_COUNT];
73 unsigned int line[DEBUGLIST_COUNT];
77 #define FLARG_PASS , file, line
78 #define FLARG , const char *file, int line
84 typedef struct element element;
91 * This structure must be ALIGNMENT_SIZE bytes.
95 char bytes[ALIGNMENT_SIZE];
101 unsigned long totalgets;
102 #if ISC_MEM_USE_INTERNAL_MALLOC
103 unsigned long blocks;
104 unsigned long freefrags;
105 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
108 #define MEM_MAGIC ISC_MAGIC('M', 'e', 'm', 'C')
109 #define VALID_CONTEXT(c) ISC_MAGIC_VALID(c, MEM_MAGIC)
111 #if ISC_MEM_TRACKLINES
112 typedef ISC_LIST(debuglink_t) debuglist_t;
117 isc_ondestroy_t ondestroy;
119 isc_memalloc_t memalloc;
120 isc_memfree_t memfree;
123 isc_boolean_t checkfree;
124 struct stats * stats;
125 unsigned int references;
132 isc_boolean_t hi_called;
133 isc_mem_water_t water;
135 ISC_LIST(isc_mempool_t) pools;
137 #if ISC_MEM_USE_INTERNAL_MALLOC
139 element ** freelists;
140 element * basic_blocks;
141 unsigned char ** basic_table;
142 unsigned int basic_table_count;
143 unsigned int basic_table_size;
144 unsigned char * lowest;
145 unsigned char * highest;
146 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
148 #if ISC_MEM_TRACKLINES
149 debuglist_t * debuglist;
152 unsigned int memalloc_failures;
155 #define MEMPOOL_MAGIC ISC_MAGIC('M', 'E', 'M', 'p')
156 #define VALID_MEMPOOL(c) ISC_MAGIC_VALID(c, MEMPOOL_MAGIC)
159 /* always unlocked */
160 unsigned int magic; /* magic number */
161 isc_mutex_t *lock; /* optional lock */
162 isc_mem_t *mctx; /* our memory context */
163 /* locked via the memory context's lock */
164 ISC_LINK(isc_mempool_t) link; /* next pool in this mem context */
165 /* optionally locked from here down */
166 element *items; /* low water item list */
167 size_t size; /* size of each item on this pool */
168 unsigned int maxalloc; /* max number of items allowed */
169 unsigned int allocated; /* # of items currently given out */
170 unsigned int freecount; /* # of items on reserved list */
171 unsigned int freemax; /* # of items allowed on free list */
172 unsigned int fillcount; /* # of items to fetch on each fill */
174 unsigned int gets; /* # of requests to this pool */
175 /* Debugging only. */
176 #if ISC_MEMPOOL_NAMES
177 char name[16]; /* printed name in stats reports */
182 * Private Inline-able.
185 #if ! ISC_MEM_TRACKLINES
186 #define ADD_TRACE(a, b, c, d, e)
187 #define DELETE_TRACE(a, b, c, d, e)
189 #define ADD_TRACE(a, b, c, d, e) \
191 if ((isc_mem_debugging & (ISC_MEM_DEBUGTRACE | \
192 ISC_MEM_DEBUGRECORD)) != 0 && \
194 add_trace_entry(a, b, c, d, e); \
196 #define DELETE_TRACE(a, b, c, d, e) delete_trace_entry(a, b, c, d, e)
199 print_active(isc_mem_t *ctx, FILE *out);
202 * mctx must be locked.
205 add_trace_entry(isc_mem_t *mctx, const void *ptr, unsigned int size
211 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0)
212 fprintf(stderr, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
215 "file %s line %u mctx %p\n"),
216 ptr, size, file, line, mctx);
218 if (mctx->debuglist == NULL)
221 if (size > mctx->max_size)
222 size = mctx->max_size;
224 dl = ISC_LIST_HEAD(mctx->debuglist[size]);
226 if (dl->count == DEBUGLIST_COUNT)
228 for (i = 0; i < DEBUGLIST_COUNT; i++) {
229 if (dl->ptr[i] == NULL) {
239 dl = ISC_LIST_NEXT(dl, link);
242 dl = malloc(sizeof(debuglink_t));
245 ISC_LINK_INIT(dl, link);
246 for (i = 1; i < DEBUGLIST_COUNT; i++) {
259 ISC_LIST_PREPEND(mctx->debuglist[size], dl, link);
263 delete_trace_entry(isc_mem_t *mctx, const void *ptr, unsigned int size,
264 const char *file, unsigned int line)
269 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0)
270 fprintf(stderr, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
273 "file %s line %u mctx %p\n"),
274 ptr, size, file, line, mctx);
276 if (mctx->debuglist == NULL)
279 if (size > mctx->max_size)
280 size = mctx->max_size;
282 dl = ISC_LIST_HEAD(mctx->debuglist[size]);
284 for (i = 0; i < DEBUGLIST_COUNT; i++) {
285 if (dl->ptr[i] == ptr) {
291 INSIST(dl->count > 0);
293 if (dl->count == 0) {
294 ISC_LIST_UNLINK(mctx->debuglist[size],
301 dl = ISC_LIST_NEXT(dl, link);
305 * If we get here, we didn't find the item on the list. We're
310 #endif /* ISC_MEM_TRACKLINES */
312 #if ISC_MEM_USE_INTERNAL_MALLOC
314 rmsize(size_t size) {
316 * round down to ALIGNMENT_SIZE
318 return (size & (~(ALIGNMENT_SIZE - 1)));
322 quantize(size_t size) {
324 * Round up the result in order to get a size big
325 * enough to satisfy the request and be aligned on ALIGNMENT_SIZE
330 return (ALIGNMENT_SIZE);
331 return ((size + ALIGNMENT_SIZE - 1) & (~(ALIGNMENT_SIZE - 1)));
334 static inline isc_boolean_t
335 more_basic_blocks(isc_mem_t *ctx) {
337 unsigned char *curr, *next;
338 unsigned char *first, *last;
339 unsigned char **table;
340 unsigned int table_size;
344 /* Require: we hold the context lock. */
347 * Did we hit the quota for this context?
349 increment = NUM_BASIC_BLOCKS * ctx->mem_target;
350 if (ctx->quota != 0 && ctx->total + increment > ctx->quota)
353 INSIST(ctx->basic_table_count <= ctx->basic_table_size);
354 if (ctx->basic_table_count == ctx->basic_table_size) {
355 table_size = ctx->basic_table_size + TABLE_INCREMENT;
356 table = (ctx->memalloc)(ctx->arg,
357 table_size * sizeof(unsigned char *));
359 ctx->memalloc_failures++;
362 if (ctx->basic_table_size != 0) {
363 memcpy(table, ctx->basic_table,
364 ctx->basic_table_size *
365 sizeof(unsigned char *));
366 (ctx->memfree)(ctx->arg, ctx->basic_table);
368 ctx->basic_table = table;
369 ctx->basic_table_size = table_size;
372 new = (ctx->memalloc)(ctx->arg, NUM_BASIC_BLOCKS * ctx->mem_target);
374 ctx->memalloc_failures++;
377 ctx->total += increment;
378 ctx->basic_table[ctx->basic_table_count] = new;
379 ctx->basic_table_count++;
382 next = curr + ctx->mem_target;
383 for (i = 0; i < (NUM_BASIC_BLOCKS - 1); i++) {
384 ((element *)curr)->next = (element *)next;
386 next += ctx->mem_target;
389 * curr is now pointing at the last block in the
392 ((element *)curr)->next = NULL;
394 last = first + NUM_BASIC_BLOCKS * ctx->mem_target - 1;
395 if (first < ctx->lowest || ctx->lowest == NULL)
397 if (last > ctx->highest)
399 ctx->basic_blocks = new;
404 static inline isc_boolean_t
405 more_frags(isc_mem_t *ctx, size_t new_size) {
409 unsigned char *curr, *next;
412 * Try to get more fragments by chopping up a basic block.
415 if (ctx->basic_blocks == NULL) {
416 if (!more_basic_blocks(ctx)) {
418 * We can't get more memory from the OS, or we've
419 * hit the quota for this context.
422 * XXXRTH "At quota" notification here.
428 total_size = ctx->mem_target;
429 new = ctx->basic_blocks;
430 ctx->basic_blocks = ctx->basic_blocks->next;
431 frags = total_size / new_size;
432 ctx->stats[new_size].blocks++;
433 ctx->stats[new_size].freefrags += frags;
435 * Set up a linked-list of blocks of size
439 next = curr + new_size;
440 total_size -= new_size;
441 for (i = 0; i < (frags - 1); i++) {
442 ((element *)curr)->next = (element *)next;
445 total_size -= new_size;
448 * Add the remaining fragment of the basic block to a free list.
450 total_size = rmsize(total_size);
451 if (total_size > 0) {
452 ((element *)next)->next = ctx->freelists[total_size];
453 ctx->freelists[total_size] = (element *)next;
454 ctx->stats[total_size].freefrags++;
457 * curr is now pointing at the last block in the
460 ((element *)curr)->next = NULL;
461 ctx->freelists[new_size] = new;
467 mem_getunlocked(isc_mem_t *ctx, size_t size) {
468 size_t new_size = quantize(size);
471 if (size >= ctx->max_size || new_size >= ctx->max_size) {
473 * memget() was called on something beyond our upper limit.
475 if (ctx->quota != 0 && ctx->total + size > ctx->quota) {
479 ret = (ctx->memalloc)(ctx->arg, size);
481 ctx->memalloc_failures++;
486 ctx->stats[ctx->max_size].gets++;
487 ctx->stats[ctx->max_size].totalgets++;
489 * If we don't set new_size to size, then the
490 * ISC_MEM_FILL code might write over bytes we
498 * If there are no blocks in the free list for this size, get a chunk
499 * of memory and then break it up into "new_size"-sized blocks, adding
500 * them to the free list.
502 if (ctx->freelists[new_size] == NULL && !more_frags(ctx, new_size))
506 * The free list uses the "rounded-up" size "new_size".
508 ret = ctx->freelists[new_size];
509 ctx->freelists[new_size] = ctx->freelists[new_size]->next;
512 * The stats[] uses the _actual_ "size" requested by the
513 * caller, with the caveat (in the code above) that "size" >= the
514 * max. size (max_size) ends up getting recorded as a call to
517 ctx->stats[size].gets++;
518 ctx->stats[size].totalgets++;
519 ctx->stats[new_size].freefrags--;
520 ctx->inuse += new_size;
526 memset(ret, 0xbe, new_size); /* Mnemonic for "beef". */
532 #if ISC_MEM_FILL && ISC_MEM_CHECKOVERRUN
534 check_overrun(void *mem, size_t size, size_t new_size) {
537 cp = (unsigned char *)mem;
539 while (size < new_size) {
548 mem_putunlocked(isc_mem_t *ctx, void *mem, size_t size) {
549 size_t new_size = quantize(size);
551 if (size == ctx->max_size || new_size >= ctx->max_size) {
553 * memput() called on something beyond our upper limit.
556 memset(mem, 0xde, size); /* Mnemonic for "dead". */
558 (ctx->memfree)(ctx->arg, mem);
559 INSIST(ctx->stats[ctx->max_size].gets != 0);
560 ctx->stats[ctx->max_size].gets--;
561 INSIST(size <= ctx->total);
568 #if ISC_MEM_CHECKOVERRUN
569 check_overrun(mem, size, new_size);
571 memset(mem, 0xde, new_size); /* Mnemonic for "dead". */
575 * The free list uses the "rounded-up" size "new_size".
577 ((element *)mem)->next = ctx->freelists[new_size];
578 ctx->freelists[new_size] = (element *)mem;
581 * The stats[] uses the _actual_ "size" requested by the
582 * caller, with the caveat (in the code above) that "size" >= the
583 * max. size (max_size) ends up getting recorded as a call to
586 INSIST(ctx->stats[size].gets != 0);
587 ctx->stats[size].gets--;
588 ctx->stats[new_size].freefrags++;
589 ctx->inuse -= new_size;
592 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
595 * Perform a malloc, doing memory filling and overrun detection as necessary.
598 mem_get(isc_mem_t *ctx, size_t size) {
601 #if ISC_MEM_CHECKOVERRUN
605 ret = (ctx->memalloc)(ctx->arg, size);
607 ctx->memalloc_failures++;
611 memset(ret, 0xbe, size); /* Mnemonic for "beef". */
613 # if ISC_MEM_CHECKOVERRUN
623 * Perform a free, doing memory filling and overrun detection as necessary.
626 mem_put(isc_mem_t *ctx, void *mem, size_t size) {
627 #if ISC_MEM_CHECKOVERRUN
628 INSIST(((unsigned char *)mem)[size] == 0xbe);
631 memset(mem, 0xde, size); /* Mnemonic for "dead". */
635 (ctx->memfree)(ctx->arg, mem);
639 * Update internal counters after a memory get.
642 mem_getstats(isc_mem_t *ctx, size_t size) {
646 if (size > ctx->max_size) {
647 ctx->stats[ctx->max_size].gets++;
648 ctx->stats[ctx->max_size].totalgets++;
650 ctx->stats[size].gets++;
651 ctx->stats[size].totalgets++;
656 * Update internal counters after a memory put.
659 mem_putstats(isc_mem_t *ctx, void *ptr, size_t size) {
662 INSIST(ctx->inuse >= size);
665 if (size > ctx->max_size) {
666 INSIST(ctx->stats[ctx->max_size].gets > 0U);
667 ctx->stats[ctx->max_size].gets--;
669 INSIST(ctx->stats[size].gets > 0U);
670 ctx->stats[size].gets--;
674 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
681 default_memalloc(void *arg, size_t size) {
685 return (malloc(size));
689 default_memfree(void *arg, void *ptr) {
699 isc_mem_createx(size_t init_max_size, size_t target_size,
700 isc_memalloc_t memalloc, isc_memfree_t memfree, void *arg,
706 REQUIRE(ctxp != NULL && *ctxp == NULL);
707 REQUIRE(memalloc != NULL);
708 REQUIRE(memfree != NULL);
710 INSIST((ALIGNMENT_SIZE & (ALIGNMENT_SIZE - 1)) == 0);
712 #if !ISC_MEM_USE_INTERNAL_MALLOC
716 ctx = (memalloc)(arg, sizeof(*ctx));
718 return (ISC_R_NOMEMORY);
720 if (init_max_size == 0U)
721 ctx->max_size = DEF_MAX_SIZE;
723 ctx->max_size = init_max_size;
731 ctx->hi_called = ISC_FALSE;
733 ctx->water_arg = NULL;
734 ctx->magic = MEM_MAGIC;
735 isc_ondestroy_init(&ctx->ondestroy);
736 ctx->memalloc = memalloc;
737 ctx->memfree = memfree;
740 ctx->checkfree = ISC_TRUE;
741 #if ISC_MEM_TRACKLINES
742 ctx->debuglist = NULL;
744 ISC_LIST_INIT(ctx->pools);
746 #if ISC_MEM_USE_INTERNAL_MALLOC
747 ctx->freelists = NULL;
748 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
750 ctx->stats = (memalloc)(arg,
751 (ctx->max_size+1) * sizeof(struct stats));
752 if (ctx->stats == NULL) {
753 result = ISC_R_NOMEMORY;
756 memset(ctx->stats, 0, (ctx->max_size + 1) * sizeof(struct stats));
758 #if ISC_MEM_USE_INTERNAL_MALLOC
759 if (target_size == 0)
760 ctx->mem_target = DEF_MEM_TARGET;
762 ctx->mem_target = target_size;
763 ctx->freelists = (memalloc)(arg, ctx->max_size * sizeof(element *));
764 if (ctx->freelists == NULL) {
765 result = ISC_R_NOMEMORY;
768 memset(ctx->freelists, 0,
769 ctx->max_size * sizeof(element *));
770 ctx->basic_blocks = NULL;
771 ctx->basic_table = NULL;
772 ctx->basic_table_count = 0;
773 ctx->basic_table_size = 0;
776 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
778 if (isc_mutex_init(&ctx->lock) != ISC_R_SUCCESS) {
779 UNEXPECTED_ERROR(__FILE__, __LINE__,
780 "isc_mutex_init() %s",
781 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
782 ISC_MSG_FAILED, "failed"));
783 result = ISC_R_UNEXPECTED;
787 #if ISC_MEM_TRACKLINES
788 if ((isc_mem_debugging & ISC_MEM_DEBUGRECORD) != 0) {
791 ctx->debuglist = (memalloc)(arg,
792 (ctx->max_size+1) * sizeof(debuglist_t));
793 if (ctx->debuglist == NULL) {
794 result = ISC_R_NOMEMORY;
797 for (i = 0; i <= ctx->max_size; i++)
798 ISC_LIST_INIT(ctx->debuglist[i]);
802 ctx->memalloc_failures = 0;
805 return (ISC_R_SUCCESS);
810 (memfree)(arg, ctx->stats);
811 #if ISC_MEM_USE_INTERNAL_MALLOC
813 (memfree)(arg, ctx->freelists);
814 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
815 #if ISC_MEM_TRACKLINES
817 (ctx->memfree)(ctx->arg, ctx->debuglist);
818 #endif /* ISC_MEM_TRACKLINES */
826 isc_mem_create(size_t init_max_size, size_t target_size,
829 return (isc_mem_createx(init_max_size, target_size,
830 default_memalloc, default_memfree, NULL,
835 destroy(isc_mem_t *ctx) {
837 isc_ondestroy_t ondest;
841 #if ISC_MEM_USE_INTERNAL_MALLOC
842 INSIST(ISC_LIST_EMPTY(ctx->pools));
843 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
845 #if ISC_MEM_TRACKLINES
846 if (ctx->debuglist != NULL) {
847 if (ctx->checkfree) {
848 for (i = 0; i <= ctx->max_size; i++) {
849 if (!ISC_LIST_EMPTY(ctx->debuglist[i]))
850 print_active(ctx, stderr);
851 INSIST(ISC_LIST_EMPTY(ctx->debuglist[i]));
856 for (i = 0; i <= ctx->max_size; i++)
857 for (dl = ISC_LIST_HEAD(ctx->debuglist[i]);
859 dl = ISC_LIST_HEAD(ctx->debuglist[i])) {
860 ISC_LIST_UNLINK(ctx->debuglist[i],
865 (ctx->memfree)(ctx->arg, ctx->debuglist);
868 INSIST(ctx->references == 0);
870 if (ctx->checkfree) {
871 for (i = 0; i <= ctx->max_size; i++) {
872 #if ISC_MEM_TRACKLINES
873 if (ctx->stats[i].gets != 0U)
874 print_active(ctx, stderr);
876 INSIST(ctx->stats[i].gets == 0U);
880 (ctx->memfree)(ctx->arg, ctx->stats);
882 #if ISC_MEM_USE_INTERNAL_MALLOC
883 for (i = 0; i < ctx->basic_table_count; i++)
884 (ctx->memfree)(ctx->arg, ctx->basic_table[i]);
885 (ctx->memfree)(ctx->arg, ctx->freelists);
886 (ctx->memfree)(ctx->arg, ctx->basic_table);
887 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
889 ondest = ctx->ondestroy;
891 DESTROYLOCK(&ctx->lock);
892 (ctx->memfree)(ctx->arg, ctx);
894 isc_ondestroy_notify(&ondest, ctx);
898 isc_mem_attach(isc_mem_t *source, isc_mem_t **targetp) {
899 REQUIRE(VALID_CONTEXT(source));
900 REQUIRE(targetp != NULL && *targetp == NULL);
903 source->references++;
904 UNLOCK(&source->lock);
910 isc_mem_detach(isc_mem_t **ctxp) {
912 isc_boolean_t want_destroy = ISC_FALSE;
914 REQUIRE(ctxp != NULL);
916 REQUIRE(VALID_CONTEXT(ctx));
919 INSIST(ctx->references > 0);
921 if (ctx->references == 0)
922 want_destroy = ISC_TRUE;
932 * isc_mem_putanddetach() is the equivalent of:
935 * isc_mem_attach(ptr->mctx, &mctx);
936 * isc_mem_detach(&ptr->mctx);
937 * isc_mem_put(mctx, ptr, sizeof(*ptr);
938 * isc_mem_detach(&mctx);
942 isc__mem_putanddetach(isc_mem_t **ctxp, void *ptr, size_t size FLARG) {
944 isc_boolean_t want_destroy = ISC_FALSE;
946 REQUIRE(ctxp != NULL);
948 REQUIRE(VALID_CONTEXT(ctx));
949 REQUIRE(ptr != NULL);
952 * Must be before mem_putunlocked() as ctxp is usually within
957 #if ISC_MEM_USE_INTERNAL_MALLOC
959 mem_putunlocked(ctx, ptr, size);
960 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
961 mem_put(ctx, ptr, size);
963 mem_putstats(ctx, ptr, size);
964 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
966 DELETE_TRACE(ctx, ptr, size, file, line);
967 INSIST(ctx->references > 0);
969 if (ctx->references == 0)
970 want_destroy = ISC_TRUE;
979 isc_mem_destroy(isc_mem_t **ctxp) {
983 * This routine provides legacy support for callers who use mctxs
984 * without attaching/detaching.
987 REQUIRE(ctxp != NULL);
989 REQUIRE(VALID_CONTEXT(ctx));
992 #if ISC_MEM_TRACKLINES
993 if (ctx->references != 1)
994 print_active(ctx, stderr);
996 REQUIRE(ctx->references == 1);
1006 isc_mem_ondestroy(isc_mem_t *ctx, isc_task_t *task, isc_event_t **event) {
1010 res = isc_ondestroy_register(&ctx->ondestroy, task, event);
1018 isc__mem_get(isc_mem_t *ctx, size_t size FLARG) {
1020 isc_boolean_t call_water = ISC_FALSE;
1022 REQUIRE(VALID_CONTEXT(ctx));
1024 #if ISC_MEM_USE_INTERNAL_MALLOC
1026 ptr = mem_getunlocked(ctx, size);
1027 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1028 ptr = mem_get(ctx, size);
1031 mem_getstats(ctx, size);
1032 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1034 ADD_TRACE(ctx, ptr, size, file, line);
1035 if (ctx->hi_water != 0U && !ctx->hi_called &&
1036 ctx->inuse > ctx->hi_water) {
1037 ctx->hi_called = ISC_TRUE;
1038 call_water = ISC_TRUE;
1040 if (ctx->inuse > ctx->maxinuse) {
1041 ctx->maxinuse = ctx->inuse;
1042 if (ctx->hi_water != 0U && ctx->inuse > ctx->hi_water &&
1043 (isc_mem_debugging & ISC_MEM_DEBUGUSAGE) != 0)
1044 fprintf(stderr, "maxinuse = %lu\n",
1045 (unsigned long)ctx->inuse);
1050 (ctx->water)(ctx->water_arg, ISC_MEM_HIWATER);
1056 isc__mem_put(isc_mem_t *ctx, void *ptr, size_t size FLARG)
1058 isc_boolean_t call_water = ISC_FALSE;
1060 REQUIRE(VALID_CONTEXT(ctx));
1061 REQUIRE(ptr != NULL);
1063 #if ISC_MEM_USE_INTERNAL_MALLOC
1065 mem_putunlocked(ctx, ptr, size);
1066 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1067 mem_put(ctx, ptr, size);
1069 mem_putstats(ctx, ptr, size);
1070 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1072 DELETE_TRACE(ctx, ptr, size, file, line);
1075 * The check against ctx->lo_water == 0 is for the condition
1076 * when the context was pushed over hi_water but then had
1077 * isc_mem_setwater() called with 0 for hi_water and lo_water.
1079 if (ctx->hi_called &&
1080 (ctx->inuse < ctx->lo_water || ctx->lo_water == 0U)) {
1081 ctx->hi_called = ISC_FALSE;
1083 if (ctx->water != NULL)
1084 call_water = ISC_TRUE;
1089 (ctx->water)(ctx->water_arg, ISC_MEM_LOWATER);
1092 #if ISC_MEM_TRACKLINES
1094 print_active(isc_mem_t *mctx, FILE *out) {
1095 if (mctx->debuglist != NULL) {
1099 isc_boolean_t found;
1101 fprintf(out, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1103 "Dump of all outstanding "
1104 "memory allocations:\n"));
1106 format = isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1107 ISC_MSG_PTRFILELINE,
1108 "\tptr %p size %u file %s line %u\n");
1109 for (i = 0; i <= mctx->max_size; i++) {
1110 dl = ISC_LIST_HEAD(mctx->debuglist[i]);
1115 while (dl != NULL) {
1116 for (j = 0; j < DEBUGLIST_COUNT; j++)
1117 if (dl->ptr[j] != NULL)
1118 fprintf(out, format,
1123 dl = ISC_LIST_NEXT(dl, link);
1127 fprintf(out, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1128 ISC_MSG_NONE, "\tNone.\n"));
1134 * Print the stats[] on the stream "out" with suitable formatting.
1137 isc_mem_stats(isc_mem_t *ctx, FILE *out) {
1139 const struct stats *s;
1140 const isc_mempool_t *pool;
1142 REQUIRE(VALID_CONTEXT(ctx));
1145 for (i = 0; i <= ctx->max_size; i++) {
1148 if (s->totalgets == 0U && s->gets == 0U)
1150 fprintf(out, "%s%5lu: %11lu gets, %11lu rem",
1151 (i == ctx->max_size) ? ">=" : " ",
1152 (unsigned long) i, s->totalgets, s->gets);
1153 #if ISC_MEM_USE_INTERNAL_MALLOC
1154 if (s->blocks != 0 || s->freefrags != 0)
1155 fprintf(out, " (%lu bl, %lu ff)",
1156 s->blocks, s->freefrags);
1157 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1162 * Note that since a pool can be locked now, these stats might be
1163 * somewhat off if the pool is in active use at the time the stats
1164 * are dumped. The link fields are protected by the isc_mem_t's
1165 * lock, however, so walking this list and extracting integers from
1166 * stats fields is always safe.
1168 pool = ISC_LIST_HEAD(ctx->pools);
1170 fprintf(out, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1172 "[Pool statistics]\n"));
1173 fprintf(out, "%15s %10s %10s %10s %10s %10s %10s %10s %1s\n",
1174 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1175 ISC_MSG_POOLNAME, "name"),
1176 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1177 ISC_MSG_POOLSIZE, "size"),
1178 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1179 ISC_MSG_POOLMAXALLOC, "maxalloc"),
1180 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1181 ISC_MSG_POOLALLOCATED, "allocated"),
1182 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1183 ISC_MSG_POOLFREECOUNT, "freecount"),
1184 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1185 ISC_MSG_POOLFREEMAX, "freemax"),
1186 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1187 ISC_MSG_POOLFILLCOUNT, "fillcount"),
1188 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1189 ISC_MSG_POOLGETS, "gets"),
1192 while (pool != NULL) {
1193 fprintf(out, "%15s %10lu %10u %10u %10u %10u %10u %10u %s\n",
1194 pool->name, (unsigned long) pool->size, pool->maxalloc,
1195 pool->allocated, pool->freecount, pool->freemax,
1196 pool->fillcount, pool->gets,
1197 (pool->lock == NULL ? "N" : "Y"));
1198 pool = ISC_LIST_NEXT(pool, link);
1201 #if ISC_MEM_TRACKLINES
1202 print_active(ctx, out);
1209 * Replacements for malloc() and free() -- they implicitly remember the
1210 * size of the object allocated (with some additional overhead).
1214 isc__mem_allocateunlocked(isc_mem_t *ctx, size_t size) {
1217 size += ALIGNMENT_SIZE;
1218 #if ISC_MEM_USE_INTERNAL_MALLOC
1219 si = mem_getunlocked(ctx, size);
1220 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1221 si = mem_get(ctx, size);
1222 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1230 isc__mem_allocate(isc_mem_t *ctx, size_t size FLARG) {
1233 REQUIRE(VALID_CONTEXT(ctx));
1235 #if ISC_MEM_USE_INTERNAL_MALLOC
1237 si = isc__mem_allocateunlocked(ctx, size);
1238 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1239 si = isc__mem_allocateunlocked(ctx, size);
1242 mem_getstats(ctx, si[-1].u.size);
1243 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1245 #if ISC_MEM_TRACKLINES
1246 ADD_TRACE(ctx, si, si[-1].u.size, file, line);
1255 isc__mem_free(isc_mem_t *ctx, void *ptr FLARG) {
1259 REQUIRE(VALID_CONTEXT(ctx));
1260 REQUIRE(ptr != NULL);
1262 si = &(((size_info *)ptr)[-1]);
1265 #if ISC_MEM_USE_INTERNAL_MALLOC
1267 mem_putunlocked(ctx, si, size);
1268 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1269 mem_put(ctx, si, size);
1271 mem_putstats(ctx, si, size);
1272 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1274 DELETE_TRACE(ctx, ptr, size, file, line);
1281 * Other useful things.
1285 isc__mem_strdup(isc_mem_t *mctx, const char *s FLARG) {
1289 REQUIRE(VALID_CONTEXT(mctx));
1294 ns = isc__mem_allocate(mctx, len + 1 FLARG_PASS);
1297 strncpy(ns, s, len + 1);
1303 isc_mem_setdestroycheck(isc_mem_t *ctx, isc_boolean_t flag) {
1304 REQUIRE(VALID_CONTEXT(ctx));
1307 ctx->checkfree = flag;
1317 isc_mem_setquota(isc_mem_t *ctx, size_t quota) {
1318 REQUIRE(VALID_CONTEXT(ctx));
1327 isc_mem_getquota(isc_mem_t *ctx) {
1330 REQUIRE(VALID_CONTEXT(ctx));
1341 isc_mem_inuse(isc_mem_t *ctx) {
1344 REQUIRE(VALID_CONTEXT(ctx));
1355 isc_mem_setwater(isc_mem_t *ctx, isc_mem_water_t water, void *water_arg,
1356 size_t hiwater, size_t lowater)
1358 REQUIRE(VALID_CONTEXT(ctx));
1359 REQUIRE(hiwater >= lowater);
1362 if (water == NULL) {
1364 ctx->water_arg = NULL;
1367 ctx->hi_called = ISC_FALSE;
1370 ctx->water_arg = water_arg;
1371 ctx->hi_water = hiwater;
1372 ctx->lo_water = lowater;
1373 ctx->hi_called = ISC_FALSE;
1383 isc_mempool_create(isc_mem_t *mctx, size_t size, isc_mempool_t **mpctxp) {
1384 isc_mempool_t *mpctx;
1386 REQUIRE(VALID_CONTEXT(mctx));
1388 REQUIRE(mpctxp != NULL && *mpctxp == NULL);
1391 * Allocate space for this pool, initialize values, and if all works
1392 * well, attach to the memory context.
1394 mpctx = isc_mem_get(mctx, sizeof(isc_mempool_t));
1396 return (ISC_R_NOMEMORY);
1398 mpctx->magic = MEMPOOL_MAGIC;
1402 mpctx->maxalloc = UINT_MAX;
1403 mpctx->allocated = 0;
1404 mpctx->freecount = 0;
1406 mpctx->fillcount = 1;
1408 #if ISC_MEMPOOL_NAMES
1411 mpctx->items = NULL;
1416 ISC_LIST_INITANDAPPEND(mctx->pools, mpctx, link);
1417 UNLOCK(&mctx->lock);
1419 return (ISC_R_SUCCESS);
1423 isc_mempool_setname(isc_mempool_t *mpctx, const char *name) {
1424 REQUIRE(name != NULL);
1426 #if ISC_MEMPOOL_NAMES
1427 if (mpctx->lock != NULL)
1430 strncpy(mpctx->name, name, sizeof(mpctx->name) - 1);
1431 mpctx->name[sizeof(mpctx->name) - 1] = '\0';
1433 if (mpctx->lock != NULL)
1434 UNLOCK(mpctx->lock);
1442 isc_mempool_destroy(isc_mempool_t **mpctxp) {
1443 isc_mempool_t *mpctx;
1448 REQUIRE(mpctxp != NULL);
1450 REQUIRE(VALID_MEMPOOL(mpctx));
1451 #if ISC_MEMPOOL_NAMES
1452 if (mpctx->allocated > 0)
1453 UNEXPECTED_ERROR(__FILE__, __LINE__,
1454 "isc_mempool_destroy(): mempool %s "
1458 REQUIRE(mpctx->allocated == 0);
1468 * Return any items on the free list
1471 while (mpctx->items != NULL) {
1472 INSIST(mpctx->freecount > 0);
1474 item = mpctx->items;
1475 mpctx->items = item->next;
1477 #if ISC_MEM_USE_INTERNAL_MALLOC
1478 mem_putunlocked(mctx, item, mpctx->size);
1479 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1480 mem_put(mctx, item, mpctx->size);
1481 mem_putstats(mctx, item, mpctx->size);
1482 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1484 UNLOCK(&mctx->lock);
1487 * Remove our linked list entry from the memory context.
1490 ISC_LIST_UNLINK(mctx->pools, mpctx, link);
1491 UNLOCK(&mctx->lock);
1495 isc_mem_put(mpctx->mctx, mpctx, sizeof(isc_mempool_t));
1504 isc_mempool_associatelock(isc_mempool_t *mpctx, isc_mutex_t *lock) {
1505 REQUIRE(VALID_MEMPOOL(mpctx));
1506 REQUIRE(mpctx->lock == NULL);
1507 REQUIRE(lock != NULL);
1513 isc__mempool_get(isc_mempool_t *mpctx FLARG) {
1518 REQUIRE(VALID_MEMPOOL(mpctx));
1522 if (mpctx->lock != NULL)
1526 * Don't let the caller go over quota
1528 if (mpctx->allocated >= mpctx->maxalloc) {
1534 * if we have a free list item, return the first here
1536 item = mpctx->items;
1538 mpctx->items = item->next;
1539 INSIST(mpctx->freecount > 0);
1547 * We need to dip into the well. Lock the memory context here and
1548 * fill up our free list.
1551 for (i = 0; i < mpctx->fillcount; i++) {
1552 #if ISC_MEM_USE_INTERNAL_MALLOC
1553 item = mem_getunlocked(mctx, mpctx->size);
1554 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1555 item = mem_get(mctx, mpctx->size);
1557 mem_getstats(mctx, mpctx->size);
1558 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1561 item->next = mpctx->items;
1562 mpctx->items = item;
1565 UNLOCK(&mctx->lock);
1568 * If we didn't get any items, return NULL.
1570 item = mpctx->items;
1574 mpctx->items = item->next;
1580 if (mpctx->lock != NULL)
1581 UNLOCK(mpctx->lock);
1583 #if ISC_MEM_TRACKLINES
1586 ADD_TRACE(mctx, item, mpctx->size, file, line);
1587 UNLOCK(&mctx->lock);
1589 #endif /* ISC_MEM_TRACKLINES */
1595 isc__mempool_put(isc_mempool_t *mpctx, void *mem FLARG) {
1599 REQUIRE(VALID_MEMPOOL(mpctx));
1600 REQUIRE(mem != NULL);
1604 if (mpctx->lock != NULL)
1607 INSIST(mpctx->allocated > 0);
1610 #if ISC_MEM_TRACKLINES
1612 DELETE_TRACE(mctx, mem, mpctx->size, file, line);
1613 UNLOCK(&mctx->lock);
1614 #endif /* ISC_MEM_TRACKLINES */
1617 * If our free list is full, return this to the mctx directly.
1619 if (mpctx->freecount >= mpctx->freemax) {
1620 #if ISC_MEM_USE_INTERNAL_MALLOC
1622 mem_putunlocked(mctx, mem, mpctx->size);
1623 UNLOCK(&mctx->lock);
1624 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1625 mem_put(mctx, mem, mpctx->size);
1627 mem_putstats(mctx, mem, mpctx->size);
1628 UNLOCK(&mctx->lock);
1629 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1630 if (mpctx->lock != NULL)
1631 UNLOCK(mpctx->lock);
1636 * Otherwise, attach it to our free list and bump the counter.
1639 item = (element *)mem;
1640 item->next = mpctx->items;
1641 mpctx->items = item;
1643 if (mpctx->lock != NULL)
1644 UNLOCK(mpctx->lock);
1652 isc_mempool_setfreemax(isc_mempool_t *mpctx, unsigned int limit) {
1653 REQUIRE(VALID_MEMPOOL(mpctx));
1655 if (mpctx->lock != NULL)
1658 mpctx->freemax = limit;
1660 if (mpctx->lock != NULL)
1661 UNLOCK(mpctx->lock);
1665 isc_mempool_getfreemax(isc_mempool_t *mpctx) {
1666 unsigned int freemax;
1668 REQUIRE(VALID_MEMPOOL(mpctx));
1670 if (mpctx->lock != NULL)
1673 freemax = mpctx->freemax;
1675 if (mpctx->lock != NULL)
1676 UNLOCK(mpctx->lock);
1682 isc_mempool_getfreecount(isc_mempool_t *mpctx) {
1683 unsigned int freecount;
1685 REQUIRE(VALID_MEMPOOL(mpctx));
1687 if (mpctx->lock != NULL)
1690 freecount = mpctx->freecount;
1692 if (mpctx->lock != NULL)
1693 UNLOCK(mpctx->lock);
1699 isc_mempool_setmaxalloc(isc_mempool_t *mpctx, unsigned int limit) {
1702 REQUIRE(VALID_MEMPOOL(mpctx));
1704 if (mpctx->lock != NULL)
1707 mpctx->maxalloc = limit;
1709 if (mpctx->lock != NULL)
1710 UNLOCK(mpctx->lock);
1714 isc_mempool_getmaxalloc(isc_mempool_t *mpctx) {
1715 unsigned int maxalloc;
1717 REQUIRE(VALID_MEMPOOL(mpctx));
1719 if (mpctx->lock != NULL)
1722 maxalloc = mpctx->maxalloc;
1724 if (mpctx->lock != NULL)
1725 UNLOCK(mpctx->lock);
1731 isc_mempool_getallocated(isc_mempool_t *mpctx) {
1732 unsigned int allocated;
1734 REQUIRE(VALID_MEMPOOL(mpctx));
1736 if (mpctx->lock != NULL)
1739 allocated = mpctx->allocated;
1741 if (mpctx->lock != NULL)
1742 UNLOCK(mpctx->lock);
1748 isc_mempool_setfillcount(isc_mempool_t *mpctx, unsigned int limit) {
1750 REQUIRE(VALID_MEMPOOL(mpctx));
1752 if (mpctx->lock != NULL)
1755 mpctx->fillcount = limit;
1757 if (mpctx->lock != NULL)
1758 UNLOCK(mpctx->lock);
1762 isc_mempool_getfillcount(isc_mempool_t *mpctx) {
1763 unsigned int fillcount;
1765 REQUIRE(VALID_MEMPOOL(mpctx));
1767 if (mpctx->lock != NULL)
1770 fillcount = mpctx->fillcount;
1772 if (mpctx->lock != NULL)
1773 UNLOCK(mpctx->lock);