1 #define JEMALLOC_CHUNK_DSS_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 /******************************************************************************/
6 const char *dss_prec_names[] = {
13 /* Current dss precedence default, used when creating new arenas. */
14 static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
17 * Protects sbrk() calls. This avoids malloc races among threads, though it
18 * does not protect against races with threads that call sbrk() directly.
20 static malloc_mutex_t dss_mtx;
22 /* Base address of the DSS. */
23 static void *dss_base;
24 /* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
25 static void *dss_prev;
26 /* Current upper limit on DSS addresses. */
29 /******************************************************************************/
32 chunk_dss_sbrk(intptr_t increment)
36 return (sbrk(increment));
44 chunk_dss_prec_get(void)
49 return (dss_prec_disabled);
50 malloc_mutex_lock(&dss_mtx);
51 ret = dss_prec_default;
52 malloc_mutex_unlock(&dss_mtx);
57 chunk_dss_prec_set(dss_prec_t dss_prec)
61 return (dss_prec != dss_prec_disabled);
62 malloc_mutex_lock(&dss_mtx);
63 dss_prec_default = dss_prec;
64 malloc_mutex_unlock(&dss_mtx);
69 chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
70 bool *zero, bool *commit)
75 assert(size > 0 && (size & chunksize_mask) == 0);
76 assert(alignment > 0 && (alignment & chunksize_mask) == 0);
79 * sbrk() uses a signed increment argument, so take care not to
80 * interpret a huge allocation request as a negative increment.
82 if ((intptr_t)size < 0)
85 malloc_mutex_lock(&dss_mtx);
86 if (dss_prev != (void *)-1) {
87 size_t gap_size, cpad_size;
88 void *cpad, *dss_next;
92 * The loop is necessary to recover from races with other
93 * threads that are using the DSS for something other than
97 /* Avoid an unnecessary system call. */
98 if (new_addr != NULL && dss_max != new_addr)
101 /* Get the current end of the DSS. */
102 dss_max = chunk_dss_sbrk(0);
104 /* Make sure the earlier condition still holds. */
105 if (new_addr != NULL && dss_max != new_addr)
109 * Calculate how much padding is necessary to
110 * chunk-align the end of the DSS.
112 gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
115 * Compute how much chunk-aligned pad space (if any) is
116 * necessary to satisfy alignment. This space can be
117 * recycled for later use.
119 cpad = (void *)((uintptr_t)dss_max + gap_size);
120 ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
122 cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
123 dss_next = (void *)((uintptr_t)ret + size);
124 if ((uintptr_t)ret < (uintptr_t)dss_max ||
125 (uintptr_t)dss_next < (uintptr_t)dss_max) {
127 malloc_mutex_unlock(&dss_mtx);
130 incr = gap_size + cpad_size + size;
131 dss_prev = chunk_dss_sbrk(incr);
132 if (dss_prev == dss_max) {
135 malloc_mutex_unlock(&dss_mtx);
136 if (cpad_size != 0) {
137 chunk_hooks_t chunk_hooks =
138 CHUNK_HOOKS_INITIALIZER;
139 chunk_dalloc_wrapper(arena,
140 &chunk_hooks, cpad, cpad_size,
144 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
146 memset(ret, 0, size);
149 *commit = pages_decommit(ret, size);
152 } while (dss_prev != (void *)-1);
154 malloc_mutex_unlock(&dss_mtx);
160 chunk_in_dss(void *chunk)
166 malloc_mutex_lock(&dss_mtx);
167 if ((uintptr_t)chunk >= (uintptr_t)dss_base
168 && (uintptr_t)chunk < (uintptr_t)dss_max)
172 malloc_mutex_unlock(&dss_mtx);
183 if (malloc_mutex_init(&dss_mtx))
185 dss_base = chunk_dss_sbrk(0);
193 chunk_dss_prefork(void)
197 malloc_mutex_prefork(&dss_mtx);
201 chunk_dss_postfork_parent(void)
205 malloc_mutex_postfork_parent(&dss_mtx);
209 chunk_dss_postfork_child(void)
213 malloc_mutex_postfork_child(&dss_mtx);
216 /******************************************************************************/