2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
11 #include <sys/cdefs.h>
12 __FBSDID("$FreeBSD$");
15 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
16 * to internal conditions and consistency in malloc.c. This has a
17 * noticeable runtime performance hit, and generally will not do you
18 * any good unless you fiddle with the internals of malloc or want
19 * to catch random pointer corruption as early as possible.
21 #undef MALLOC_EXTRA_SANITY
24 * What to use for Junk. This is the byte value we use to fill with
25 * when the 'J' option is enabled.
27 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
30 * The basic parameters you can tweak.
32 * malloc_pageshift pagesize = 1 << malloc_pageshift
33 * It's probably best if this is the native
34 * page size, but it doesn't have to be.
36 * malloc_minsize minimum size of an allocation in bytes.
37 * If this is too small it's too much work
38 * to manage them. This is also the smallest
39 * unit of alignment used for the storage
40 * returned by malloc/realloc.
44 #include "namespace.h"
45 #if defined(__FreeBSD__)
46 # if defined(__i386__)
47 # define malloc_pageshift 12U
48 # define malloc_minsize 16U
50 # if defined(__ia64__)
51 # define malloc_pageshift 13U
52 # define malloc_minsize 16U
54 # if defined(__alpha__)
55 # define malloc_pageshift 13U
56 # define malloc_minsize 16U
58 # if defined(__sparc64__)
59 # define malloc_pageshift 13U
60 # define malloc_minsize 16U
62 # if defined(__amd64__)
63 # define malloc_pageshift 12U
64 # define malloc_minsize 16U
67 # define malloc_pageshift 12U
68 # define malloc_minsize 16U
72 * Make malloc/free/realloc thread-safe in libc for use with
75 # include "libc_private.h"
76 # include "spinlock.h"
77 static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
78 spinlock_t *__malloc_lock = &thread_lock;
79 # define _MALLOC_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
80 # define _MALLOC_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
81 #endif /* __FreeBSD__ */
83 #if defined(__sparc__) && defined(sun)
84 # define malloc_pageshift 12U
85 # define malloc_minsize 16U
88 # define MMAP_FD fdzero
89 # define INIT_MMAP() \
90 { if ((fdzero = _open(_PATH_DEVZERO, O_RDWR, 0000)) == -1) \
91 wrterror("open of /dev/zero"); }
92 # define MADV_FREE MADV_DONTNEED
93 #endif /* __sparc__ */
95 /* Insert your combination here... */
96 #if defined(__FOOCPU__) && defined(__BAROS__)
97 # define malloc_pageshift 12U
98 # define malloc_minsize 16U
99 #endif /* __FOOCPU__ && __BAROS__ */
102 #define ZEROSIZEPTR ((void *)(uintptr_t)(1 << (malloc_pageshift - 1)))
106 * No user serviceable parts behind this point.
108 #include <sys/types.h>
109 #include <sys/mman.h>
118 #include "un-namespace.h"
121 * This structure describes a page worth of chunks.
125 struct pginfo *next; /* next on the free list */
126 void *page; /* Pointer to the page */
127 u_short size; /* size of this page's chunks */
128 u_short shift; /* How far to shift for this size chunks */
129 u_short free; /* How many free chunks */
130 u_short total; /* How many chunk */
131 u_int bits[1]; /* Which chunks are free */
135 * This structure describes a number of free pages.
139 struct pgfree *next; /* next run of free pages */
140 struct pgfree *prev; /* prev run of free pages */
141 void *page; /* pointer to free pages */
142 void *end; /* pointer to end of free pages */
143 size_t size; /* number of bytes free */
147 * How many bits per u_int in the bitmap.
148 * Change only if not 8 bits/byte
150 #define MALLOC_BITS (8*sizeof(u_int))
153 * Magic values to put in the page_directory
155 #define MALLOC_NOT_MINE ((struct pginfo*) 0)
156 #define MALLOC_FREE ((struct pginfo*) 1)
157 #define MALLOC_FIRST ((struct pginfo*) 2)
158 #define MALLOC_FOLLOW ((struct pginfo*) 3)
159 #define MALLOC_MAGIC ((struct pginfo*) 4)
161 #ifndef malloc_pageshift
162 #define malloc_pageshift 12U
165 #ifndef malloc_minsize
166 #define malloc_minsize 16U
169 #if !defined(malloc_pagesize)
170 #define malloc_pagesize (1UL<<malloc_pageshift)
173 #if ((1<<malloc_pageshift) != malloc_pagesize)
174 #error "(1<<malloc_pageshift) != malloc_pagesize"
177 #ifndef malloc_maxsize
178 #define malloc_maxsize ((malloc_pagesize)>>1)
181 /* A mask for the offset inside a page. */
182 #define malloc_pagemask ((malloc_pagesize)-1)
184 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
185 #define ptr2index(foo) (((u_long)(foo) >> malloc_pageshift)-malloc_origo)
188 #define _MALLOC_LOCK()
191 #ifndef _MALLOC_UNLOCK
192 #define _MALLOC_UNLOCK()
203 /* Number of free pages we cache */
204 static unsigned malloc_cache = 16;
206 /* The offset from pagenumber to index into the page directory */
207 static u_long malloc_origo;
209 /* The last index in the page directory we care about */
210 static u_long last_index;
212 /* Pointer to page directory. Allocated "as if with" malloc */
213 static struct pginfo **page_dir;
215 /* How many slots in the page directory */
216 static unsigned malloc_ninfo;
218 /* Free pages line up here */
219 static struct pgfree free_list;
221 /* Abort(), user doesn't handle problems. */
222 static int malloc_abort = 0;
224 /* Are we trying to die ? */
227 /* always realloc ? */
228 static int malloc_realloc;
230 #if defined(MADV_FREE)
231 /* pass the kernel a hint on free pages ? */
232 static int malloc_hint = 0;
235 /* xmalloc behaviour ? */
236 static int malloc_xmalloc;
238 /* sysv behaviour for malloc(0) ? */
239 static int malloc_sysv;
242 static int malloc_zero;
245 static int malloc_junk = 0;
250 static int malloc_utrace;
252 struct ut { void *p; size_t s; void *r; };
254 void utrace(struct ut *, int);
256 #define UTRACE(a, b, c) \
258 {struct ut u; u.p=a; u.s = b; u.r=c; utrace(&u, sizeof u);}
259 #else /* !HAS_UTRACE */
260 #define UTRACE(a,b,c)
261 #endif /* HAS_UTRACE */
264 static void *malloc_brk;
266 /* one location cache for free-list holders */
267 static struct pgfree *px;
269 /* compile-time options */
270 const char *_malloc_options;
272 /* Name of the current public function */
273 static const char *malloc_func;
277 mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
281 * Necessary function declarations
283 static int extend_pgdir(u_long index);
284 static void *imalloc(size_t size);
285 static void ifree(void *ptr);
286 static void *irealloc(void *ptr, size_t size);
289 wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
292 _write(STDERR_FILENO, p1, strlen(p1));
293 _write(STDERR_FILENO, p2, strlen(p2));
294 _write(STDERR_FILENO, p3, strlen(p3));
295 _write(STDERR_FILENO, p4, strlen(p4));
298 void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
299 const char *p4) = wrtmessage;
302 wrterror(char const *p)
306 _malloc_message(_getprogname(), malloc_func, " error: ", p);
315 * Sensitive processes, somewhat arbitrarily defined here as setuid,
316 * setgid, root and wheel cannot afford to have malloc mistakes.
318 if (malloc_abort || issetugid() || getuid() == 0 || getgid() == 0)
320 _malloc_message(_getprogname(), malloc_func, " warning: ", p);
324 * Allocate a number of pages from the OS
327 map_pages(size_t pages)
329 caddr_t result, tail;
331 result = (caddr_t)pageround((u_long)sbrk(0));
332 tail = result + (pages << malloc_pageshift);
337 #ifdef MALLOC_EXTRA_SANITY
338 wrterror("(ES): map_pages fails\n");
339 #endif /* MALLOC_EXTRA_SANITY */
343 last_index = ptr2index(tail) - 1;
346 if ((last_index+1) >= malloc_ninfo && !extend_pgdir(last_index))
353 * Extend page directory
356 extend_pgdir(u_long index)
358 struct pginfo **new, **old;
361 /* Make it this many pages */
362 i = index * sizeof *page_dir;
363 i /= malloc_pagesize;
366 /* remember the old mapping size */
367 oldlen = malloc_ninfo * sizeof *page_dir;
370 * NOTE: we allocate new pages and copy the directory rather than tempt
371 * fate by trying to "grow" the region.. There is nothing to prevent
372 * us from accidently re-mapping space that's been allocated by our caller
373 * via dlopen() or other mmap().
375 * The copy problem is not too bad, as there is 4K of page index per
376 * 4MB of malloc arena.
378 * We can totally avoid the copy if we open a file descriptor to associate
379 * the anon mappings with. Then, when we remap the pages at the new
380 * address, the old pages will be "magically" remapped.. But this means
381 * keeping open a "secret" file descriptor.....
385 new = (struct pginfo**) MMAP(i * malloc_pagesize);
386 if (new == MAP_FAILED)
389 /* Copy the old stuff */
390 memcpy(new, page_dir,
391 malloc_ninfo * sizeof *page_dir);
393 /* register the new size */
394 malloc_ninfo = i * malloc_pagesize / sizeof *page_dir;
396 /* swap the pointers */
400 /* Now free the old stuff */
406 * Initialize the world
414 int save_errno = errno;
418 #ifdef MALLOC_EXTRA_SANITY
420 #endif /* MALLOC_EXTRA_SANITY */
422 for (i = 0; i < 3; i++) {
424 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
429 } else if (i == 1 && issetugid() == 0) {
430 p = getenv("MALLOC_OPTIONS");
436 for (; p != NULL && *p != '\0'; p++) {
438 case '>': malloc_cache <<= 1; break;
439 case '<': malloc_cache >>= 1; break;
440 case 'a': malloc_abort = 0; break;
441 case 'A': malloc_abort = 1; break;
442 #if defined(MADV_FREE)
443 case 'h': malloc_hint = 0; break;
444 case 'H': malloc_hint = 1; break;
446 case 'r': malloc_realloc = 0; break;
447 case 'R': malloc_realloc = 1; break;
448 case 'j': malloc_junk = 0; break;
449 case 'J': malloc_junk = 1; break;
451 case 'u': malloc_utrace = 0; break;
452 case 'U': malloc_utrace = 1; break;
454 case 'v': malloc_sysv = 0; break;
455 case 'V': malloc_sysv = 1; break;
456 case 'x': malloc_xmalloc = 0; break;
457 case 'X': malloc_xmalloc = 1; break;
458 case 'z': malloc_zero = 0; break;
459 case 'Z': malloc_zero = 1; break;
461 _malloc_message(_getprogname(), malloc_func,
462 " warning: ", "unknown char in MALLOC_OPTIONS\n");
472 * We want junk in the entire allocation, and zero only in the part
473 * the user asked for.
478 /* Allocate one page for the page directory */
479 page_dir = (struct pginfo **) MMAP(malloc_pagesize);
481 if (page_dir == MAP_FAILED)
482 wrterror("mmap(2) failed, check limits\n");
485 * We need a maximum of malloc_pageshift buckets, steal these from the
486 * front of the page_directory;
488 malloc_origo = ((u_long)pageround((u_long)sbrk(0))) >> malloc_pageshift;
489 malloc_origo -= malloc_pageshift;
491 malloc_ninfo = malloc_pagesize / sizeof *page_dir;
493 /* Recalculate the cache size in bytes, and make sure it's nonzero */
498 malloc_cache <<= malloc_pageshift;
501 * This is a nice hack from Kaleb Keithly (kaleb@x.org).
502 * We can sbrk(2) further back when we keep this on a low address.
504 px = (struct pgfree *) imalloc (sizeof *px);
509 * Allocate a number of complete pages
512 malloc_pages(size_t size)
514 void *p, *delay_free = NULL;
519 size = pageround(size);
523 /* Look for free pages before asking for more */
524 for(pf = free_list.next; pf; pf = pf->next) {
526 #ifdef MALLOC_EXTRA_SANITY
527 if (pf->size & malloc_pagemask)
528 wrterror("(ES): junk length entry on free_list\n");
530 wrterror("(ES): zero length entry on free_list\n");
531 if (pf->page == pf->end)
532 wrterror("(ES): zero entry on free_list\n");
533 if (pf->page > pf->end)
534 wrterror("(ES): sick entry on free_list\n");
535 if ((void*)pf->page >= (void*)sbrk(0))
536 wrterror("(ES): entry on free_list past brk\n");
537 if (page_dir[ptr2index(pf->page)] != MALLOC_FREE)
538 wrterror("(ES): non-free first page on free-list\n");
539 if (page_dir[ptr2index(pf->end)-1] != MALLOC_FREE)
540 wrterror("(ES): non-free last page on free-list\n");
541 #endif /* MALLOC_EXTRA_SANITY */
546 if (pf->size == size) {
548 if (pf->next != NULL)
549 pf->next->prev = pf->prev;
550 pf->prev->next = pf->next;
556 pf->page = (char *)pf->page + size;
561 #ifdef MALLOC_EXTRA_SANITY
562 if (p != NULL && page_dir[ptr2index(p)] != MALLOC_FREE)
563 wrterror("(ES): allocated non-free page on free-list\n");
564 #endif /* MALLOC_EXTRA_SANITY */
566 size >>= malloc_pageshift;
574 index = ptr2index(p);
575 page_dir[index] = MALLOC_FIRST;
577 page_dir[index+i] = MALLOC_FOLLOW;
580 memset(p, SOME_JUNK, size << malloc_pageshift);
594 * Allocate a page of fragments
598 malloc_make_chunks(int bits)
604 /* Allocate a new bucket */
605 pp = malloc_pages(malloc_pagesize);
609 /* Find length of admin structure */
610 l = offsetof(struct pginfo, bits[0]);
611 l += sizeof bp->bits[0] *
612 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
614 /* Don't waste more than two chunks on this */
615 if ((1<<(bits)) <= l+l) {
616 bp = (struct pginfo *)pp;
618 bp = (struct pginfo *)imalloc(l);
625 bp->size = (1<<bits);
627 bp->total = bp->free = malloc_pagesize >> bits;
630 /* set all valid bits in the bitmap */
634 /* Do a bunch at a time */
635 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
636 bp->bits[i / MALLOC_BITS] = ~0;
639 bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
641 if (bp == bp->page) {
642 /* Mark the ones we stole for ourselves */
644 bp->bits[i/MALLOC_BITS] &= ~(1<<(i%MALLOC_BITS));
653 page_dir[ptr2index(pp)] = bp;
655 bp->next = page_dir[bits];
664 * Allocate a fragment
667 malloc_bytes(size_t size)
675 /* Don't bother with anything less than this */
676 if (size < malloc_minsize)
677 size = malloc_minsize;
679 /* Find the right bucket */
685 /* If it's empty, make a page more of that size chunks */
686 if (page_dir[j] == NULL && !malloc_make_chunks(j))
691 /* Find first word of bitmap which isn't empty */
692 for (lp = bp->bits; !*lp; lp++)
695 /* Find that bit, and tweak it */
704 /* If there are no more free, remove from free-list */
706 page_dir[j] = bp->next;
710 /* Adjust to the real offset of that chunk */
711 k += (lp-bp->bits)*MALLOC_BITS;
715 memset((u_char *)bp->page + k, SOME_JUNK, bp->size);
717 return ((u_char *)bp->page + k);
721 * Allocate a piece of memory
731 if ((size + malloc_pagesize) < size) /* Check for overflow */
733 else if ((size + malloc_pagesize) >= (uintptr_t)page_dir)
735 else if (size <= malloc_maxsize)
736 result = malloc_bytes(size);
738 result = malloc_pages(size);
740 if (malloc_zero && result != NULL)
741 memset(result, 0, size);
747 * Change the size of an allocation.
750 irealloc(void *ptr, size_t size)
760 index = ptr2index(ptr);
762 if (index < malloc_pageshift) {
763 wrtwarning("junk pointer, too low to make sense\n");
767 if (index > last_index) {
768 wrtwarning("junk pointer, too high to make sense\n");
772 mp = &page_dir[index];
774 if (*mp == MALLOC_FIRST) { /* Page allocation */
776 /* Check the pointer */
777 if ((u_long)ptr & malloc_pagemask) {
778 wrtwarning("modified (page-) pointer\n");
782 /* Find the size in bytes */
783 for (osize = malloc_pagesize; *(++mp) == MALLOC_FOLLOW;)
784 osize += malloc_pagesize;
786 if (!malloc_realloc && /* Unless we have to, */
787 size <= osize && /* .. or are too small, */
788 size > (osize - malloc_pagesize)) { /* .. or can free a page, */
790 memset((u_char *)ptr + size, SOME_JUNK, osize-size);
791 return (ptr); /* ..don't do anything else. */
794 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
796 /* Check the pointer for sane values */
797 if (((u_long)ptr & ((*mp)->size-1))) {
798 wrtwarning("modified (chunk-) pointer\n");
802 /* Find the chunk index in the page */
803 i = ((u_long)ptr & malloc_pagemask) >> (*mp)->shift;
805 /* Verify that it isn't a free chunk already */
806 if ((*mp)->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
807 wrtwarning("chunk is already free\n");
813 if (!malloc_realloc && /* Unless we have to, */
814 size <= osize && /* ..or are too small, */
815 (size > osize/2 || /* ..or could use a smaller size, */
816 osize == malloc_minsize)) { /* ..(if there is one) */
818 memset((u_char *)ptr + size, SOME_JUNK, osize-size);
819 return (ptr); /* ..don't do anything else. */
823 wrtwarning("pointer to wrong page\n");
830 /* copy the lesser of the two sizes, and free the old one */
833 else if (osize < size)
834 memcpy(p, ptr, osize);
836 memcpy(p, ptr, size);
843 * Free a sequence of pages
847 free_pages(void *ptr, u_long index, struct pginfo const *info)
850 struct pgfree *pf, *pt=NULL;
854 if (info == MALLOC_FREE) {
855 wrtwarning("page is already free\n");
859 if (info != MALLOC_FIRST) {
860 wrtwarning("pointer to wrong page\n");
864 if ((u_long)ptr & malloc_pagemask) {
865 wrtwarning("modified (page-) pointer\n");
869 /* Count how many pages and mark them free at the same time */
870 page_dir[index] = MALLOC_FREE;
871 for (i = 1; page_dir[index+i] == MALLOC_FOLLOW; i++)
872 page_dir[index + i] = MALLOC_FREE;
874 l = i << malloc_pageshift;
877 memset(ptr, SOME_JUNK, l);
879 #if defined(MADV_FREE)
881 madvise(ptr, l, MADV_FREE);
884 tail = (char *)ptr+l;
886 /* add to free-list */
888 px = imalloc(sizeof *px); /* This cannot fail... */
892 if (free_list.next == NULL) {
894 /* Nothing on free list, put this at head */
895 px->next = free_list.next;
896 px->prev = &free_list;
903 /* Find the right spot, leave pf pointing to the modified entry. */
904 tail = (char *)ptr+l;
906 for(pf = free_list.next; pf->end < ptr && pf->next != NULL;
908 ; /* Race ahead here */
910 if (pf->page > tail) {
911 /* Insert before entry */
918 } else if (pf->end == ptr ) {
919 /* Append to the previous entry */
920 pf->end = (char *)pf->end + l;
922 if (pf->next != NULL && pf->end == pf->next->page ) {
923 /* And collapse the next too. */
926 pf->size += pt->size;
928 if (pf->next != NULL)
931 } else if (pf->page == tail) {
932 /* Prepend to entry */
935 } else if (pf->next == NULL) {
936 /* Append at tail of chain */
943 wrterror("freelist is destroyed\n");
947 /* Return something to OS ? */
948 if (pf->next == NULL && /* If we're the last one, */
949 pf->size > malloc_cache && /* ..and the cache is full, */
950 pf->end == malloc_brk && /* ..and none behind us, */
951 malloc_brk == sbrk(0)) { /* ..and it's OK to do... */
954 * Keep the cache intact. Notice that the '>' above guarantees that
955 * the pf will always have at least one page afterwards.
957 pf->end = (char *)pf->page + malloc_cache;
958 pf->size = malloc_cache;
961 malloc_brk = pf->end;
963 index = ptr2index(pf->end);
965 for(i=index;i <= last_index;)
966 page_dir[i++] = MALLOC_NOT_MINE;
968 last_index = index - 1;
970 /* XXX: We could realloc/shrink the pagedir here I guess. */
977 * Free a chunk, and possibly the page it's on, if the page becomes empty.
981 free_bytes(void *ptr, u_long index, struct pginfo *info)
987 /* Find the chunk number on the page */
988 i = ((u_long)ptr & malloc_pagemask) >> info->shift;
990 if (((u_long)ptr & (info->size-1))) {
991 wrtwarning("modified (chunk-) pointer\n");
995 if (info->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
996 wrtwarning("chunk is already free\n");
1001 memset(ptr, SOME_JUNK, info->size);
1003 info->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
1006 mp = page_dir + info->shift;
1008 if (info->free == 1) {
1010 /* Page became non-full */
1012 mp = page_dir + info->shift;
1013 /* Insert in address order */
1014 while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1021 if (info->free != info->total)
1024 /* Find & remove this page in the queue */
1025 while (*mp != info) {
1026 mp = &((*mp)->next);
1027 #ifdef MALLOC_EXTRA_SANITY
1029 wrterror("(ES): Not on queue\n");
1030 #endif /* MALLOC_EXTRA_SANITY */
1034 /* Free the page & the info structure if need be */
1035 page_dir[ptr2index(info->page)] = MALLOC_FIRST;
1036 vp = info->page; /* Order is important ! */
1037 if(vp != (void*)info)
1045 struct pginfo *info;
1052 /* If we're already sinking, don't make matters any worse. */
1056 index = ptr2index(ptr);
1058 if (index < malloc_pageshift) {
1059 wrtwarning("junk pointer, too low to make sense\n");
1063 if (index > last_index) {
1064 wrtwarning("junk pointer, too high to make sense\n");
1068 info = page_dir[index];
1070 if (info < MALLOC_MAGIC)
1071 free_pages(ptr, index, info);
1073 free_bytes(ptr, index, info);
1078 pubrealloc(void *ptr, size_t size, const char *func)
1082 static int malloc_active; /* Recusion flag for public interface. */
1083 static unsigned malloc_started; /* Set when initialization has been done */
1086 * If a thread is inside our code with a functional lock held, and then
1087 * catches a signal which calls us again, we would get a deadlock if the
1088 * lock is not of a recursive type.
1092 if (malloc_active > 0) {
1093 if (malloc_active == 1) {
1094 wrtwarning("recursive call\n");
1103 if (!malloc_started) {
1105 wrtwarning("malloc() has never been called\n");
1115 if (ptr == ZEROSIZEPTR)
1117 if (malloc_sysv && !size) {
1125 } else if (ptr == NULL) {
1129 r = irealloc(ptr, size);
1132 UTRACE(ptr, size, r);
1135 if (malloc_xmalloc && err)
1136 wrterror("out of memory\n");
1143 * These are the public exported interface routines.
1150 return (pubrealloc(NULL, size, " in malloc():"));
1157 pubrealloc(ptr, 0, " in free():");
1161 realloc(void *ptr, size_t size)
1164 return (pubrealloc(ptr, size, " in realloc():"));