1 #define JEMALLOC_BASE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 static malloc_mutex_t base_mtx;
8 static extent_tree_t base_avail_szad;
9 static extent_node_t *base_nodes;
10 static size_t base_allocated;
11 static size_t base_resident;
12 static size_t base_mapped;
14 /******************************************************************************/
16 static extent_node_t *
17 base_node_try_alloc(tsdn_t *tsdn)
21 malloc_mutex_assert_owner(tsdn, &base_mtx);
23 if (base_nodes == NULL)
26 base_nodes = *(extent_node_t **)node;
27 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
32 base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
35 malloc_mutex_assert_owner(tsdn, &base_mtx);
37 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
38 *(extent_node_t **)node = base_nodes;
42 static extent_node_t *
43 base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
49 malloc_mutex_assert_owner(tsdn, &base_mtx);
51 node = base_node_try_alloc(tsdn);
52 /* Allocate enough space to also carve a node out if necessary. */
53 nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
54 csize = CHUNK_CEILING(minsize + nsize);
55 addr = chunk_alloc_base(csize);
58 base_node_dalloc(tsdn, node);
63 node = (extent_node_t *)addr;
64 addr = (void *)((uintptr_t)addr + nsize);
67 base_allocated += nsize;
68 base_resident += PAGE_CEILING(nsize);
71 extent_node_init(node, NULL, addr, csize, true, true);
76 * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
77 * sparse data structures such as radix tree nodes efficient with respect to
78 * physical memory usage.
81 base_alloc(tsdn_t *tsdn, size_t size)
89 * Round size up to nearest multiple of the cacheline size, so that
90 * there is no chance of false cache line sharing.
92 csize = CACHELINE_CEILING(size);
95 extent_node_init(&key, NULL, NULL, usize, false, false);
96 malloc_mutex_lock(tsdn, &base_mtx);
97 node = extent_tree_szad_nsearch(&base_avail_szad, &key);
99 /* Use existing space. */
100 extent_tree_szad_remove(&base_avail_szad, node);
102 /* Try to allocate more space. */
103 node = base_chunk_alloc(tsdn, csize);
110 ret = extent_node_addr_get(node);
111 if (extent_node_size_get(node) > csize) {
112 extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
113 extent_node_size_set(node, extent_node_size_get(node) - csize);
114 extent_tree_szad_insert(&base_avail_szad, node);
116 base_node_dalloc(tsdn, node);
118 base_allocated += csize;
120 * Add one PAGE to base_resident for every page boundary that is
121 * crossed by the new allocation.
123 base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
124 PAGE_CEILING((uintptr_t)ret);
126 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
128 malloc_mutex_unlock(tsdn, &base_mtx);
133 base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
137 malloc_mutex_lock(tsdn, &base_mtx);
138 assert(base_allocated <= base_resident);
139 assert(base_resident <= base_mapped);
140 *allocated = base_allocated;
141 *resident = base_resident;
142 *mapped = base_mapped;
143 malloc_mutex_unlock(tsdn, &base_mtx);
150 if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
152 extent_tree_szad_new(&base_avail_szad);
159 base_prefork(tsdn_t *tsdn)
162 malloc_mutex_prefork(tsdn, &base_mtx);
166 base_postfork_parent(tsdn_t *tsdn)
169 malloc_mutex_postfork_parent(tsdn, &base_mtx);
173 base_postfork_child(tsdn_t *tsdn)
176 malloc_mutex_postfork_child(tsdn, &base_mtx);