1 #define JEMALLOC_BASE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 static malloc_mutex_t base_mtx;
8 static extent_tree_t base_avail_szad;
9 static extent_node_t *base_nodes;
10 static size_t base_allocated;
11 static size_t base_resident;
12 static size_t base_mapped;
14 /******************************************************************************/
16 /* base_mtx must be held. */
17 static extent_node_t *
18 base_node_try_alloc(void)
22 if (base_nodes == NULL)
25 base_nodes = *(extent_node_t **)node;
26 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
30 /* base_mtx must be held. */
32 base_node_dalloc(extent_node_t *node)
35 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
36 *(extent_node_t **)node = base_nodes;
40 /* base_mtx must be held. */
41 static extent_node_t *
42 base_chunk_alloc(size_t minsize)
49 node = base_node_try_alloc();
50 /* Allocate enough space to also carve a node out if necessary. */
51 nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
52 csize = CHUNK_CEILING(minsize + nsize);
53 addr = chunk_alloc_base(csize);
56 base_node_dalloc(node);
61 node = (extent_node_t *)addr;
62 addr = (void *)((uintptr_t)addr + nsize);
65 base_allocated += nsize;
66 base_resident += PAGE_CEILING(nsize);
69 extent_node_init(node, NULL, addr, csize, true, true);
74 * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
75 * sparse data structures such as radix tree nodes efficient with respect to
76 * physical memory usage.
79 base_alloc(size_t size)
87 * Round size up to nearest multiple of the cacheline size, so that
88 * there is no chance of false cache line sharing.
90 csize = CACHELINE_CEILING(size);
93 extent_node_init(&key, NULL, NULL, usize, false, false);
94 malloc_mutex_lock(&base_mtx);
95 node = extent_tree_szad_nsearch(&base_avail_szad, &key);
97 /* Use existing space. */
98 extent_tree_szad_remove(&base_avail_szad, node);
100 /* Try to allocate more space. */
101 node = base_chunk_alloc(csize);
108 ret = extent_node_addr_get(node);
109 if (extent_node_size_get(node) > csize) {
110 extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
111 extent_node_size_set(node, extent_node_size_get(node) - csize);
112 extent_tree_szad_insert(&base_avail_szad, node);
114 base_node_dalloc(node);
116 base_allocated += csize;
118 * Add one PAGE to base_resident for every page boundary that is
119 * crossed by the new allocation.
121 base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
122 PAGE_CEILING((uintptr_t)ret);
124 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
126 malloc_mutex_unlock(&base_mtx);
131 base_stats_get(size_t *allocated, size_t *resident, size_t *mapped)
134 malloc_mutex_lock(&base_mtx);
135 assert(base_allocated <= base_resident);
136 assert(base_resident <= base_mapped);
137 *allocated = base_allocated;
138 *resident = base_resident;
139 *mapped = base_mapped;
140 malloc_mutex_unlock(&base_mtx);
147 if (malloc_mutex_init(&base_mtx))
149 extent_tree_szad_new(&base_avail_szad);
159 malloc_mutex_prefork(&base_mtx);
163 base_postfork_parent(void)
166 malloc_mutex_postfork_parent(&base_mtx);
170 base_postfork_child(void)
173 malloc_mutex_postfork_child(&base_mtx);