1 #ifndef JEMALLOC_INTERNAL_INLINES_A_H
2 #define JEMALLOC_INTERNAL_INLINES_A_H
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/bit_util.h"
6 #include "jemalloc/internal/jemalloc_internal_types.h"
7 #include "jemalloc/internal/size_classes.h"
8 #include "jemalloc/internal/ticker.h"
10 JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
12 assert(have_percpu_arena);
13 #if defined(JEMALLOC_HAVE_SCHED_GETCPU)
14 return (malloc_cpuid_t)sched_getcpu();
21 /* Return the chosen arena index based on current cpu. */
22 JEMALLOC_ALWAYS_INLINE unsigned
23 percpu_arena_choose(void) {
24 assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
26 malloc_cpuid_t cpuid = malloc_getcpu();
30 if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
34 assert(opt_percpu_arena == per_phycpu_arena);
35 /* Hyper threads on the same physical CPU share arena. */
36 arena_ind = cpuid - ncpus / 2;
42 /* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
43 JEMALLOC_ALWAYS_INLINE unsigned
44 percpu_arena_ind_limit(percpu_arena_mode_t mode) {
45 assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
46 if (mode == per_phycpu_arena && ncpus > 1) {
48 /* This likely means a misconfig. */
57 static inline arena_tdata_t *
58 arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
60 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
62 if (unlikely(arenas_tdata == NULL)) {
63 /* arenas_tdata hasn't been initialized yet. */
64 return arena_tdata_get_hard(tsd, ind);
66 if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
68 * ind is invalid, cache is old (too small), or tdata to be
71 return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
75 tdata = &arenas_tdata[ind];
76 if (likely(tdata != NULL) || !refresh_if_missing) {
79 return arena_tdata_get_hard(tsd, ind);
82 static inline arena_t *
83 arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
86 assert(ind < MALLOCX_ARENA_LIMIT);
88 ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
89 if (unlikely(ret == NULL)) {
90 if (init_if_missing) {
91 ret = arena_init(tsdn, ind,
92 (extent_hooks_t *)&extent_hooks_default);
98 static inline ticker_t *
99 decay_ticker_get(tsd_t *tsd, unsigned ind) {
100 arena_tdata_t *tdata;
102 tdata = arena_tdata_get(tsd, ind, true);
103 if (unlikely(tdata == NULL)) {
106 return &tdata->decay_ticker;
109 JEMALLOC_ALWAYS_INLINE tcache_bin_t *
110 tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
111 assert(binind < NBINS);
112 return &tcache->tbins_small[binind];
115 JEMALLOC_ALWAYS_INLINE tcache_bin_t *
116 tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
117 assert(binind >= NBINS &&binind < nhbins);
118 return &tcache->tbins_large[binind - NBINS];
121 JEMALLOC_ALWAYS_INLINE bool
122 tcache_available(tsd_t *tsd) {
124 * Thread specific auto tcache might be unavailable if: 1) during tcache
125 * initialization, or 2) disabled through thread.tcache.enabled mallctl
126 * or config options. This check covers all cases.
128 if (likely(tsd_tcache_enabled_get(tsd))) {
129 /* Associated arena == NULL implies tcache init in progress. */
130 assert(tsd_tcachep_get(tsd)->arena == NULL ||
131 tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail !=
139 JEMALLOC_ALWAYS_INLINE tcache_t *
140 tcache_get(tsd_t *tsd) {
141 if (!tcache_available(tsd)) {
145 return tsd_tcachep_get(tsd);
149 pre_reentrancy(tsd_t *tsd, arena_t *arena) {
150 /* arena is the current context. Reentry from a0 is not allowed. */
151 assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
153 bool fast = tsd_fast(tsd);
154 ++*tsd_reentrancy_levelp_get(tsd);
156 /* Prepare slow path for reentrancy. */
157 tsd_slow_update(tsd);
158 assert(tsd->state == tsd_state_nominal_slow);
163 post_reentrancy(tsd_t *tsd) {
164 int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
165 assert(*reentrancy_level > 0);
166 if (--*reentrancy_level == 0) {
167 tsd_slow_update(tsd);
171 #endif /* JEMALLOC_INTERNAL_INLINES_A_H */