1 /******************************************************************************/
2 #ifdef JEMALLOC_H_TYPES
4 typedef struct tcache_bin_info_s tcache_bin_info_t;
5 typedef struct tcache_bin_s tcache_bin_t;
6 typedef struct tcache_s tcache_t;
9 * tcache pointers close to NULL are used to encode state information that is
10 * used for two purposes: preventing thread caching on a per thread basis and
11 * cleaning up during thread shutdown.
13 #define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
14 #define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
15 #define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
16 #define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
19 * Absolute maximum number of cache slots for each small bin in the thread
20 * cache. This is an additional constraint beyond that imposed as: twice the
21 * number of regions per run for this size class.
23 * This constant must be an even number.
25 #define TCACHE_NSLOTS_SMALL_MAX 200
27 /* Number of cache slots for large size classes. */
28 #define TCACHE_NSLOTS_LARGE 20
30 /* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
31 #define LG_TCACHE_MAXCLASS_DEFAULT 15
34 * TCACHE_GC_SWEEP is the approximate number of allocation events between
35 * full GC sweeps. Integer rounding may cause the actual number to be
36 * slightly higher, since GC is performed incrementally.
38 #define TCACHE_GC_SWEEP 8192
40 /* Number of tcache allocation/deallocation events between incremental GCs. */
41 #define TCACHE_GC_INCR \
42 ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
44 #endif /* JEMALLOC_H_TYPES */
45 /******************************************************************************/
46 #ifdef JEMALLOC_H_STRUCTS
49 tcache_enabled_false = 0, /* Enable cast to/from bool. */
50 tcache_enabled_true = 1,
51 tcache_enabled_default = 2
55 * Read-only information associated with each element of tcache_t's tbins array
56 * is stored separately, mainly to reduce memory usage.
58 struct tcache_bin_info_s {
59 unsigned ncached_max; /* Upper limit on ncached. */
63 tcache_bin_stats_t tstats;
64 int low_water; /* Min # cached since last GC. */
65 unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
66 unsigned ncached; /* # of cached objects. */
67 void **avail; /* Stack of available objects. */
71 ql_elm(tcache_t) link; /* Used for aggregating stats. */
72 uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
73 arena_t *arena; /* This thread's arena. */
74 unsigned ev_cnt; /* Event count since incremental GC. */
75 unsigned next_gc_bin; /* Next bin to GC. */
76 tcache_bin_t tbins[1]; /* Dynamically sized. */
78 * The pointer stacks associated with tbins follow as a contiguous
79 * array. During tcache initialization, the avail pointer in each
80 * element of tbins is initialized to point to the proper offset within
85 #endif /* JEMALLOC_H_STRUCTS */
86 /******************************************************************************/
87 #ifdef JEMALLOC_H_EXTERNS
89 extern bool opt_tcache;
90 extern ssize_t opt_lg_tcache_max;
92 extern tcache_bin_info_t *tcache_bin_info;
95 * Number of tcache bins. There are NBINS small-object bins, plus 0 or more
100 /* Maximum cached size class. */
101 extern size_t tcache_maxclass;
103 size_t tcache_salloc(const void *ptr);
104 void tcache_event_hard(tcache_t *tcache);
105 void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
107 void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
109 void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
111 void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
112 void tcache_arena_dissociate(tcache_t *tcache);
113 tcache_t *tcache_create(arena_t *arena);
114 void tcache_destroy(tcache_t *tcache);
115 void tcache_thread_cleanup(void *arg);
116 void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
117 bool tcache_boot0(void);
118 bool tcache_boot1(void);
120 #endif /* JEMALLOC_H_EXTERNS */
121 /******************************************************************************/
122 #ifdef JEMALLOC_H_INLINES
124 #ifndef JEMALLOC_ENABLE_INLINE
125 malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *)
126 malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t)
128 void tcache_event(tcache_t *tcache);
129 void tcache_flush(void);
130 bool tcache_enabled_get(void);
131 tcache_t *tcache_get(bool create);
132 void tcache_enabled_set(bool enabled);
133 void *tcache_alloc_easy(tcache_bin_t *tbin);
134 void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
135 void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
136 void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
137 void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
140 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
141 /* Map of thread-specific caches. */
142 malloc_tsd_externs(tcache, tcache_t *)
143 malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tcache_t *, NULL,
144 tcache_thread_cleanup)
145 /* Per thread flag that allows thread caches to be disabled. */
146 malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
147 malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t,
148 tcache_enabled_default, malloc_tsd_no_cleanup)
155 cassert(config_tcache);
157 tcache = *tcache_tsd_get();
158 if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
160 tcache_destroy(tcache);
162 tcache_tsd_set(&tcache);
166 tcache_enabled_get(void)
168 tcache_enabled_t tcache_enabled;
170 cassert(config_tcache);
172 tcache_enabled = *tcache_enabled_tsd_get();
173 if (tcache_enabled == tcache_enabled_default) {
174 tcache_enabled = (tcache_enabled_t)opt_tcache;
175 tcache_enabled_tsd_set(&tcache_enabled);
178 return ((bool)tcache_enabled);
182 tcache_enabled_set(bool enabled)
184 tcache_enabled_t tcache_enabled;
187 cassert(config_tcache);
189 tcache_enabled = (tcache_enabled_t)enabled;
190 tcache_enabled_tsd_set(&tcache_enabled);
191 tcache = *tcache_tsd_get();
193 if (tcache == TCACHE_STATE_DISABLED) {
195 tcache_tsd_set(&tcache);
197 } else /* disabled */ {
198 if (tcache > TCACHE_STATE_MAX) {
199 tcache_destroy(tcache);
202 if (tcache == NULL) {
203 tcache = TCACHE_STATE_DISABLED;
204 tcache_tsd_set(&tcache);
209 JEMALLOC_ALWAYS_INLINE tcache_t *
210 tcache_get(bool create)
214 if (config_tcache == false)
216 if (config_lazy_lock && isthreaded == false)
219 tcache = *tcache_tsd_get();
220 if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
221 if (tcache == TCACHE_STATE_DISABLED)
223 if (tcache == NULL) {
224 if (create == false) {
226 * Creating a tcache here would cause
227 * allocation as a side effect of free().
228 * Ordinarily that would be okay since
229 * tcache_create() failure is a soft failure
230 * that doesn't propagate. However, if TLS
231 * data are freed via free() as in glibc,
232 * subtle corruption could result from setting
233 * a TLS variable after its backing memory is
238 if (tcache_enabled_get() == false) {
239 tcache_enabled_set(false); /* Memoize. */
242 return (tcache_create(choose_arena(NULL)));
244 if (tcache == TCACHE_STATE_PURGATORY) {
246 * Make a note that an allocator function was called
247 * after tcache_thread_cleanup() was called.
249 tcache = TCACHE_STATE_REINCARNATED;
250 tcache_tsd_set(&tcache);
253 if (tcache == TCACHE_STATE_REINCARNATED)
261 JEMALLOC_ALWAYS_INLINE void
262 tcache_event(tcache_t *tcache)
265 if (TCACHE_GC_INCR == 0)
269 assert(tcache->ev_cnt <= TCACHE_GC_INCR);
270 if (tcache->ev_cnt == TCACHE_GC_INCR)
271 tcache_event_hard(tcache);
274 JEMALLOC_ALWAYS_INLINE void *
275 tcache_alloc_easy(tcache_bin_t *tbin)
279 if (tbin->ncached == 0) {
280 tbin->low_water = -1;
284 if ((int)tbin->ncached < tbin->low_water)
285 tbin->low_water = tbin->ncached;
286 ret = tbin->avail[tbin->ncached];
290 JEMALLOC_ALWAYS_INLINE void *
291 tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
297 binind = SMALL_SIZE2BIN(size);
298 assert(binind < NBINS);
299 tbin = &tcache->tbins[binind];
300 ret = tcache_alloc_easy(tbin);
302 ret = tcache_alloc_small_hard(tcache, tbin, binind);
306 assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size);
311 arena_alloc_junk_small(ret,
312 &arena_bin_info[binind], false);
314 memset(ret, 0, size);
317 if (config_fill && opt_junk) {
318 arena_alloc_junk_small(ret, &arena_bin_info[binind],
321 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
322 memset(ret, 0, size);
324 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
327 tbin->tstats.nrequests++;
329 tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
330 tcache_event(tcache);
334 JEMALLOC_ALWAYS_INLINE void *
335 tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
341 size = PAGE_CEILING(size);
342 assert(size <= tcache_maxclass);
343 binind = NBINS + (size >> LG_PAGE) - 1;
344 assert(binind < nhbins);
345 tbin = &tcache->tbins[binind];
346 ret = tcache_alloc_easy(tbin);
349 * Only allocate one large object at a time, because it's quite
350 * expensive to create one and not use it.
352 ret = arena_malloc_large(tcache->arena, size, zero);
356 if (config_prof && prof_promote && size == PAGE) {
357 arena_chunk_t *chunk =
358 (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
359 size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
361 arena_mapbits_large_binind_set(chunk, pageind,
367 memset(ret, 0xa5, size);
369 memset(ret, 0, size);
372 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
373 memset(ret, 0, size);
375 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
378 tbin->tstats.nrequests++;
380 tcache->prof_accumbytes += size;
383 tcache_event(tcache);
387 JEMALLOC_ALWAYS_INLINE void
388 tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
391 tcache_bin_info_t *tbin_info;
393 assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
395 if (config_fill && opt_junk)
396 arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
398 tbin = &tcache->tbins[binind];
399 tbin_info = &tcache_bin_info[binind];
400 if (tbin->ncached == tbin_info->ncached_max) {
401 tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
404 assert(tbin->ncached < tbin_info->ncached_max);
405 tbin->avail[tbin->ncached] = ptr;
408 tcache_event(tcache);
411 JEMALLOC_ALWAYS_INLINE void
412 tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
416 tcache_bin_info_t *tbin_info;
418 assert((size & PAGE_MASK) == 0);
419 assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
420 assert(tcache_salloc(ptr) <= tcache_maxclass);
422 binind = NBINS + (size >> LG_PAGE) - 1;
424 if (config_fill && opt_junk)
425 memset(ptr, 0x5a, size);
427 tbin = &tcache->tbins[binind];
428 tbin_info = &tcache_bin_info[binind];
429 if (tbin->ncached == tbin_info->ncached_max) {
430 tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
433 assert(tbin->ncached < tbin_info->ncached_max);
434 tbin->avail[tbin->ncached] = ptr;
437 tcache_event(tcache);
441 #endif /* JEMALLOC_H_INLINES */
442 /******************************************************************************/