2 Copyright (c) 2001 Wolfram Gloger
3 Copyright (c) 2006 Cavium networks
5 Permission to use, copy, modify, distribute, and sell this software
6 and its documentation for any purpose is hereby granted without fee,
7 provided that (i) the above copyright notices and this permission
8 notice appear in all copies of the software and related documentation,
9 and (ii) the name of Wolfram Gloger may not be used in any advertising
10 or publicity relating to the software.
12 THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
13 EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
14 WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
16 IN NO EVENT SHALL WOLFRAM GLOGER BE LIABLE FOR ANY SPECIAL,
17 INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY
18 DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
19 WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY
20 OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
21 PERFORMANCE OF THIS SOFTWARE.
24 /* $Id: arena.c 30481 2007-12-05 21:46:59Z rfranz $ */
26 /* Compile-time constants. */
28 #define HEAP_MIN_SIZE (4096) /* Must leave room for struct malloc_state, arena ptrs, etc., totals about 2400 bytes */
31 #define THREAD_STATS 0
34 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
37 /***************************************************************************/
39 // made static to avoid conflicts with newlib
40 static mstate _int_new_arena __MALLOC_P ((size_t __ini_size));
42 /***************************************************************************/
44 #define top(ar_ptr) ((ar_ptr)->top)
46 /* A heap is a single contiguous memory region holding (coalesceable)
47 malloc_chunks. Not used unless compiling with
50 typedef struct _heap_info {
51 mstate ar_ptr; /* Arena for this heap. */
52 struct _heap_info *prev; /* Previous heap. */
53 size_t size; /* Current size in bytes. */
54 size_t pad; /* Make sure the following data is properly aligned. */
57 /* Thread specific data */
59 static tsd_key_t arena_key; // one per PP (thread)
60 static CVMX_SHARED mutex_t list_lock; // shared...
63 static int stat_n_heaps;
64 #define THREAD_STAT(x) x
66 #define THREAD_STAT(x) do ; while(0)
69 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
70 static unsigned long arena_mem;
72 /* Already initialized? */
73 int CVMX_SHARED cvmx__malloc_initialized = -1;
75 /**************************************************************************/
79 /* find the heap and corresponding arena for a given ptr */
81 #define arena_for_chunk(ptr) ((ptr)->arena_ptr)
82 #define set_arena_for_chunk(ptr, arena) (ptr)->arena_ptr = (arena)
85 #endif /* USE_ARENAS */
87 /**************************************************************************/
93 static __malloc_ptr_t (*save_malloc_hook) __MALLOC_P ((size_t __size,
94 __const __malloc_ptr_t));
95 static void (*save_free_hook) __MALLOC_P ((__malloc_ptr_t __ptr,
96 __const __malloc_ptr_t));
97 static Void_t* save_arena;
99 /* Magic value for the thread-specific arena pointer when
100 malloc_atfork() is in use. */
102 #define ATFORK_ARENA_PTR ((Void_t*)-1)
104 /* The following hooks are used while the `atfork' handling mechanism
108 malloc_atfork(size_t sz, const Void_t *caller)
114 free_atfork(Void_t* mem, const Void_t *caller)
118 mchunkptr p; /* chunk corresponding to mem */
120 if (mem == 0) /* free(0) has no effect */
123 p = mem2chunk(mem); /* do not bother to replicate free_check here */
126 if (chunk_is_mmapped(p)) /* release mmapped memory. */
133 ar_ptr = arena_for_chunk(p);
134 tsd_getspecific(arena_key, vptr);
135 if(vptr != ATFORK_ARENA_PTR)
136 (void)mutex_lock(&ar_ptr->mutex);
137 _int_free(ar_ptr, mem);
138 if(vptr != ATFORK_ARENA_PTR)
139 (void)mutex_unlock(&ar_ptr->mutex);
145 #error __linux__defined!
148 #endif /* !defined NO_THREADS */
152 /* Initialization routine. */
154 #error _LIBC is defined, and should not be
157 static CVMX_SHARED cvmx_spinlock_t malloc_init_spin_lock;
162 /* Managing heaps and arenas (for concurrent threads) */
168 /* Print the complete contents of a single heap to stderr. */
172 dump_heap(heap_info *heap)
174 dump_heap(heap) heap_info *heap;
180 fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
181 ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
182 (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
183 p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
186 fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
187 if(p == top(heap->ar_ptr)) {
188 fprintf(stderr, " (top)\n");
190 } else if(p->size == (0|PREV_INUSE)) {
191 fprintf(stderr, " (fence)\n");
194 fprintf(stderr, "\n");
199 #endif /* MALLOC_DEBUG > 1 */
203 static mstate cvmx_new_arena(void *addr, size_t size)
208 unsigned long misalign;
209 int page_mask = malloc_getpagesize - 1;
211 debug_printf("cvmx_new_arena called, addr: %p, size %ld\n", addr, size);
212 debug_printf("heapinfo size: %ld, mstate size: %d\n", sizeof(heap_info), sizeof(struct malloc_state));
214 if (!addr || (size < HEAP_MIN_SIZE))
218 /* We must zero out the arena as the malloc code assumes this. */
219 memset(addr, 0, size);
221 h = (heap_info *)addr;
224 a = h->ar_ptr = (mstate)(h+1);
225 malloc_init_state(a);
227 a->system_mem = a->max_system_mem = h->size;
228 arena_mem += h->size;
231 /* Set up the top chunk, with proper alignment. */
232 ptr = (char *)(a + 1);
233 misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
235 ptr += MALLOC_ALIGNMENT - misalign;
236 top(a) = (mchunkptr)ptr;
237 set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
243 int cvmx_add_arena(cvmx_arena_list_t *arena_list, void *ptr, size_t size)
247 /* Enforce required alignement, and adjust size */
248 int misaligned = ((size_t)ptr) & (MALLOC_ALIGNMENT - 1);
251 ptr = (char*)ptr + MALLOC_ALIGNMENT - misaligned;
252 size -= MALLOC_ALIGNMENT - misaligned;
255 debug_printf("Adding arena at addr: %p, size %d\n", ptr, size);
257 a = cvmx_new_arena(ptr, size); /* checks ptr and size */
263 debug_printf("cmvx_add_arena - arena_list: %p, *arena_list: %p\n", arena_list, *arena_list);
264 debug_printf("cmvx_add_arena - list: %p, new: %p\n", *arena_list, a);
265 mutex_init(&a->mutex);
266 mutex_lock(&a->mutex);
271 mstate ar_ptr = *arena_list;
272 (void)mutex_lock(&ar_ptr->mutex);
273 a->next = ar_ptr->next; // lock held on a and ar_ptr
275 (void)mutex_unlock(&ar_ptr->mutex);
283 debug_printf("cvmx_add_arena - list: %p, list->next: %p\n", *arena_list, ((mstate)*arena_list)->next);
285 // unlock, since it is not going to be used immediately
286 (void)mutex_unlock(&a->mutex);
293 #endif /* USE_ARENAS */