1 #ifndef JEMALLOC_INTERNAL_ARENA_STATS_H
2 #define JEMALLOC_INTERNAL_ARENA_STATS_H
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/mutex.h"
6 #include "jemalloc/internal/mutex_prof.h"
7 #include "jemalloc/internal/size_classes.h"
10 * In those architectures that support 64-bit atomics, we use atomic updates for
11 * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
14 #ifdef JEMALLOC_ATOMIC_U64
15 typedef atomic_u64_t arena_stats_u64_t;
17 /* Must hold the arena stats mutex while reading atomically. */
18 typedef uint64_t arena_stats_u64_t;
21 typedef struct arena_stats_large_s arena_stats_large_t;
22 struct arena_stats_large_s {
24 * Total number of allocation/deallocation requests served directly by
27 arena_stats_u64_t nmalloc;
28 arena_stats_u64_t ndalloc;
31 * Number of allocation requests that correspond to this size class.
32 * This includes requests served by tcache, though tcache only
33 * periodically merges into this counter.
35 arena_stats_u64_t nrequests; /* Partially derived. */
37 /* Current number of allocations of this size class. */
38 size_t curlextents; /* Derived. */
41 typedef struct arena_stats_decay_s arena_stats_decay_t;
42 struct arena_stats_decay_s {
43 /* Total number of purge sweeps. */
44 arena_stats_u64_t npurge;
45 /* Total number of madvise calls made. */
46 arena_stats_u64_t nmadvise;
47 /* Total number of pages purged. */
48 arena_stats_u64_t purged;
52 * Arena stats. Note that fields marked "derived" are not directly maintained
53 * within the arena code; rather their values are derived during stats merge
56 typedef struct arena_stats_s arena_stats_t;
57 struct arena_stats_s {
58 #ifndef JEMALLOC_ATOMIC_U64
62 /* Number of bytes currently mapped, excluding retained memory. */
63 atomic_zu_t mapped; /* Partially derived. */
66 * Number of unused virtual memory bytes currently retained. Retained
67 * bytes are technically mapped (though always decommitted or purged),
68 * but they are excluded from the mapped statistic (above).
70 atomic_zu_t retained; /* Derived. */
72 arena_stats_decay_t decay_dirty;
73 arena_stats_decay_t decay_muzzy;
75 atomic_zu_t base; /* Derived. */
77 atomic_zu_t resident; /* Derived. */
78 atomic_zu_t metadata_thp;
80 atomic_zu_t allocated_large; /* Derived. */
81 arena_stats_u64_t nmalloc_large; /* Derived. */
82 arena_stats_u64_t ndalloc_large; /* Derived. */
83 arena_stats_u64_t nrequests_large; /* Derived. */
85 /* Number of bytes cached in tcache associated with this arena. */
86 atomic_zu_t tcache_bytes; /* Derived. */
88 mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
90 /* One element for each large size class. */
91 arena_stats_large_t lstats[NSIZES - NBINS];
98 arena_stats_init(UNUSED tsdn_t *tsdn, arena_stats_t *arena_stats) {
100 for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
101 assert(((char *)arena_stats)[i] == 0);
104 #ifndef JEMALLOC_ATOMIC_U64
105 if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
106 WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
110 /* Memory is zeroed, so there is no need to clear stats. */
115 arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
116 #ifndef JEMALLOC_ATOMIC_U64
117 malloc_mutex_lock(tsdn, &arena_stats->mtx);
122 arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
123 #ifndef JEMALLOC_ATOMIC_U64
124 malloc_mutex_unlock(tsdn, &arena_stats->mtx);
128 static inline uint64_t
129 arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
130 arena_stats_u64_t *p) {
131 #ifdef JEMALLOC_ATOMIC_U64
132 return atomic_load_u64(p, ATOMIC_RELAXED);
134 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
140 arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
141 arena_stats_u64_t *p, uint64_t x) {
142 #ifdef JEMALLOC_ATOMIC_U64
143 atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
145 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
150 UNUSED static inline void
151 arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
152 arena_stats_u64_t *p, uint64_t x) {
153 #ifdef JEMALLOC_ATOMIC_U64
154 UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
157 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
159 assert(*p + x >= *p);
164 * Non-atomically sets *dst += src. *dst needs external synchronization.
165 * This lets us avoid the cost of a fetch_add when its unnecessary (note that
166 * the types here are atomic).
169 arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
170 #ifdef JEMALLOC_ATOMIC_U64
171 uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
172 atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
179 arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
180 #ifdef JEMALLOC_ATOMIC_U64
181 return atomic_load_zu(p, ATOMIC_RELAXED);
183 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
184 return atomic_load_zu(p, ATOMIC_RELAXED);
189 arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
191 #ifdef JEMALLOC_ATOMIC_U64
192 atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
194 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
195 size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
196 atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
201 arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
203 #ifdef JEMALLOC_ATOMIC_U64
204 UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
207 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
208 size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
209 atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
213 /* Like the _u64 variant, needs an externally synchronized *dst. */
215 arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
216 size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
217 atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
221 arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
222 szind_t szind, uint64_t nrequests) {
223 arena_stats_lock(tsdn, arena_stats);
224 arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind -
225 NBINS].nrequests, nrequests);
226 arena_stats_unlock(tsdn, arena_stats);
230 arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
231 arena_stats_lock(tsdn, arena_stats);
232 arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
233 arena_stats_unlock(tsdn, arena_stats);
237 #endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */