1 #define JEMALLOC_EXTENT_DSS_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_dss.h"
7 #include "jemalloc/internal/spin.h"
9 /******************************************************************************/
12 const char *opt_dss = DSS_DEFAULT;
14 const char *dss_prec_names[] = {
22 * Current dss precedence default, used when creating new arenas. NB: This is
23 * stored as unsigned rather than dss_prec_t because in principle there's no
24 * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
25 * atomic operations to synchronize the setting.
27 static atomic_u_t dss_prec_default = ATOMIC_INIT(
28 (unsigned)DSS_PREC_DEFAULT);
30 /* Base address of the DSS. */
31 static void *dss_base;
32 /* Atomic boolean indicating whether a thread is currently extending DSS. */
33 static atomic_b_t dss_extending;
34 /* Atomic boolean indicating whether the DSS is exhausted. */
35 static atomic_b_t dss_exhausted;
36 /* Atomic current upper limit on DSS addresses. */
37 static atomic_p_t dss_max;
39 /******************************************************************************/
42 extent_dss_sbrk(intptr_t increment) {
44 return sbrk(increment);
52 extent_dss_prec_get(void) {
56 return dss_prec_disabled;
58 ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE);
63 extent_dss_prec_set(dss_prec_t dss_prec) {
65 return (dss_prec != dss_prec_disabled);
67 atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE);
72 extent_dss_extending_start(void) {
73 spin_t spinner = SPIN_INITIALIZER;
75 bool expected = false;
76 if (atomic_compare_exchange_weak_b(&dss_extending, &expected,
77 true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
80 spin_adaptive(&spinner);
85 extent_dss_extending_finish(void) {
86 assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED));
88 atomic_store_b(&dss_extending, false, ATOMIC_RELEASE);
92 extent_dss_max_update(void *new_addr) {
94 * Get the current end of the DSS as max_cur and assure that dss_max is
97 void *max_cur = extent_dss_sbrk(0);
98 if (max_cur == (void *)-1) {
101 atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE);
102 /* Fixed new_addr can only be supported if it is at the edge of DSS. */
103 if (new_addr != NULL && max_cur != new_addr) {
110 extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
111 size_t alignment, bool *zero, bool *commit) {
116 assert(alignment == ALIGNMENT_CEILING(alignment, PAGE));
119 * sbrk() uses a signed increment argument, so take care not to
120 * interpret a large allocation request as a negative increment.
122 if ((intptr_t)size < 0) {
126 gap = extent_alloc(tsdn, arena);
131 extent_dss_extending_start();
132 if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) {
134 * The loop is necessary to recover from races with other
135 * threads that are using the DSS for something other than
139 void *max_cur = extent_dss_max_update(new_addr);
140 if (max_cur == NULL) {
145 * Compute how much page-aligned gap space (if any) is
146 * necessary to satisfy alignment. This space can be
147 * recycled for later use.
149 void *gap_addr_page = (void *)(PAGE_CEILING(
150 (uintptr_t)max_cur));
151 void *ret = (void *)ALIGNMENT_CEILING(
152 (uintptr_t)gap_addr_page, alignment);
153 size_t gap_size_page = (uintptr_t)ret -
154 (uintptr_t)gap_addr_page;
155 if (gap_size_page != 0) {
156 extent_init(gap, arena, gap_addr_page,
157 gap_size_page, false, SC_NSIZES,
158 arena_extent_sn_next(arena),
159 extent_state_active, false, true, true,
163 * Compute the address just past the end of the desired
166 void *dss_next = (void *)((uintptr_t)ret + size);
167 if ((uintptr_t)ret < (uintptr_t)max_cur ||
168 (uintptr_t)dss_next < (uintptr_t)max_cur) {
169 goto label_oom; /* Wrap-around. */
171 /* Compute the increment, including subpage bytes. */
172 void *gap_addr_subpage = max_cur;
173 size_t gap_size_subpage = (uintptr_t)ret -
174 (uintptr_t)gap_addr_subpage;
175 intptr_t incr = gap_size_subpage + size;
177 assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
180 /* Try to allocate. */
181 void *dss_prev = extent_dss_sbrk(incr);
182 if (dss_prev == max_cur) {
184 atomic_store_p(&dss_max, dss_next,
186 extent_dss_extending_finish();
188 if (gap_size_page != 0) {
189 extent_dalloc_gap(tsdn, arena, gap);
191 extent_dalloc(tsdn, arena, gap);
194 *commit = pages_decommit(ret, size);
196 if (*zero && *commit) {
197 extent_hooks_t *extent_hooks =
198 EXTENT_HOOKS_INITIALIZER;
201 extent_init(&extent, arena, ret, size,
202 size, false, SC_NSIZES,
203 extent_state_active, false, true,
204 true, EXTENT_NOT_HEAD);
205 if (extent_purge_forced_wrapper(tsdn,
206 arena, &extent_hooks, &extent, 0,
208 memset(ret, 0, size);
214 * Failure, whether due to OOM or a race with a raw
215 * sbrk() call from outside the allocator.
217 if (dss_prev == (void *)-1) {
219 atomic_store_b(&dss_exhausted, true,
226 extent_dss_extending_finish();
227 extent_dalloc(tsdn, arena, gap);
232 extent_in_dss_helper(void *addr, void *max) {
233 return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <
238 extent_in_dss(void *addr) {
241 return extent_in_dss_helper(addr, atomic_load_p(&dss_max,
246 extent_dss_mergeable(void *addr_a, void *addr_b) {
251 if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
252 (uintptr_t)dss_base) {
256 max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE);
257 return (extent_in_dss_helper(addr_a, max) ==
258 extent_in_dss_helper(addr_b, max));
262 extent_dss_boot(void) {
265 dss_base = extent_dss_sbrk(0);
266 atomic_store_b(&dss_extending, false, ATOMIC_RELAXED);
267 atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED);
268 atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED);
271 /******************************************************************************/