1 #ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
2 #define JEMALLOC_INTERNAL_TCACHE_INLINES_H
4 #include "jemalloc/internal/bin.h"
5 #include "jemalloc/internal/jemalloc_internal_types.h"
6 #include "jemalloc/internal/size_classes.h"
7 #include "jemalloc/internal/sz.h"
8 #include "jemalloc/internal/ticker.h"
9 #include "jemalloc/internal/util.h"
12 tcache_enabled_get(tsd_t *tsd) {
13 return tsd_tcache_enabled_get(tsd);
17 tcache_enabled_set(tsd_t *tsd, bool enabled) {
18 bool was_enabled = tsd_tcache_enabled_get(tsd);
20 if (!was_enabled && enabled) {
21 tsd_tcache_data_init(tsd);
22 } else if (was_enabled && !enabled) {
25 /* Commit the state last. Above calls check current state. */
26 tsd_tcache_enabled_set(tsd, enabled);
30 JEMALLOC_ALWAYS_INLINE void
31 tcache_event(tsd_t *tsd, tcache_t *tcache) {
32 if (TCACHE_GC_INCR == 0) {
36 if (unlikely(ticker_tick(&tcache->gc_ticker))) {
37 tcache_event_hard(tsd, tcache);
41 JEMALLOC_ALWAYS_INLINE void *
42 tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
43 UNUSED size_t size, szind_t binind, bool zero, bool slow_path) {
47 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
49 assert(binind < NBINS);
50 bin = tcache_small_bin_get(tcache, binind);
51 ret = cache_bin_alloc_easy(bin, &tcache_success);
52 assert(tcache_success == (ret != NULL));
53 if (unlikely(!tcache_success)) {
54 bool tcache_hard_success;
55 arena = arena_choose(tsd, arena);
56 if (unlikely(arena == NULL)) {
60 ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
61 bin, binind, &tcache_hard_success);
62 if (tcache_hard_success == false) {
69 * Only compute usize if required. The checks in the following if
70 * statement are all static.
72 if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
73 usize = sz_index2size(binind);
74 assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
78 if (slow_path && config_fill) {
79 if (unlikely(opt_junk_alloc)) {
80 arena_alloc_junk_small(ret, &bin_infos[binind],
82 } else if (unlikely(opt_zero)) {
83 memset(ret, 0, usize);
87 if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
88 arena_alloc_junk_small(ret, &bin_infos[binind], true);
90 memset(ret, 0, usize);
94 bin->tstats.nrequests++;
97 tcache->prof_accumbytes += usize;
99 tcache_event(tsd, tcache);
103 JEMALLOC_ALWAYS_INLINE void *
104 tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
105 szind_t binind, bool zero, bool slow_path) {
110 assert(binind >= NBINS &&binind < nhbins);
111 bin = tcache_large_bin_get(tcache, binind);
112 ret = cache_bin_alloc_easy(bin, &tcache_success);
113 assert(tcache_success == (ret != NULL));
114 if (unlikely(!tcache_success)) {
116 * Only allocate one large object at a time, because it's quite
117 * expensive to create one and not use it.
119 arena = arena_choose(tsd, arena);
120 if (unlikely(arena == NULL)) {
124 ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
129 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
131 /* Only compute usize on demand */
132 if (config_prof || (slow_path && config_fill) ||
134 usize = sz_index2size(binind);
135 assert(usize <= tcache_maxclass);
139 if (slow_path && config_fill) {
140 if (unlikely(opt_junk_alloc)) {
141 memset(ret, JEMALLOC_ALLOC_JUNK,
143 } else if (unlikely(opt_zero)) {
144 memset(ret, 0, usize);
148 memset(ret, 0, usize);
152 bin->tstats.nrequests++;
155 tcache->prof_accumbytes += usize;
159 tcache_event(tsd, tcache);
163 JEMALLOC_ALWAYS_INLINE void
164 tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
167 cache_bin_info_t *bin_info;
169 assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
171 if (slow_path && config_fill && unlikely(opt_junk_free)) {
172 arena_dalloc_junk_small(ptr, &bin_infos[binind]);
175 bin = tcache_small_bin_get(tcache, binind);
176 bin_info = &tcache_bin_info[binind];
177 if (unlikely(bin->ncached == bin_info->ncached_max)) {
178 tcache_bin_flush_small(tsd, tcache, bin, binind,
179 (bin_info->ncached_max >> 1));
181 assert(bin->ncached < bin_info->ncached_max);
183 *(bin->avail - bin->ncached) = ptr;
185 tcache_event(tsd, tcache);
188 JEMALLOC_ALWAYS_INLINE void
189 tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
192 cache_bin_info_t *bin_info;
194 assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
195 assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
197 if (slow_path && config_fill && unlikely(opt_junk_free)) {
198 large_dalloc_junk(ptr, sz_index2size(binind));
201 bin = tcache_large_bin_get(tcache, binind);
202 bin_info = &tcache_bin_info[binind];
203 if (unlikely(bin->ncached == bin_info->ncached_max)) {
204 tcache_bin_flush_large(tsd, bin, binind,
205 (bin_info->ncached_max >> 1), tcache);
207 assert(bin->ncached < bin_info->ncached_max);
209 *(bin->avail - bin->ncached) = ptr;
211 tcache_event(tsd, tcache);
214 JEMALLOC_ALWAYS_INLINE tcache_t *
215 tcaches_get(tsd_t *tsd, unsigned ind) {
216 tcaches_t *elm = &tcaches[ind];
217 if (unlikely(elm->tcache == NULL)) {
218 elm->tcache = tcache_create_explicit(tsd);
223 #endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */