1 #ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
2 #define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
4 #include "jemalloc/internal/arena_stats.h"
5 #include "jemalloc/internal/atomic.h"
6 #include "jemalloc/internal/bin.h"
7 #include "jemalloc/internal/bitmap.h"
8 #include "jemalloc/internal/extent_dss.h"
9 #include "jemalloc/internal/jemalloc_internal_types.h"
10 #include "jemalloc/internal/mutex.h"
11 #include "jemalloc/internal/nstime.h"
12 #include "jemalloc/internal/ql.h"
13 #include "jemalloc/internal/size_classes.h"
14 #include "jemalloc/internal/smoothstep.h"
15 #include "jemalloc/internal/ticker.h"
17 struct arena_decay_s {
18 /* Synchronizes all non-atomic fields. */
21 * True if a thread is currently purging the extents associated with
22 * this decay structure.
26 * Approximate time in milliseconds from the creation of a set of unused
27 * dirty pages until an equivalent set of unused dirty pages is purged
31 /* time / SMOOTHSTEP_NSTEPS. */
34 * Time at which the current decay interval logically started. We do
35 * not actually advance to a new epoch until sometime after it starts
36 * because of scheduling and computation delays, and it is even possible
37 * to completely skip epochs. In all cases, during epoch advancement we
38 * merge all relevant activity into the most recently recorded epoch.
41 /* Deadline randomness generator. */
42 uint64_t jitter_state;
44 * Deadline for current epoch. This is the sum of interval and per
45 * epoch jitter which is a uniform random variable in [0..interval).
46 * Epochs always advance by precise multiples of interval, but we
47 * randomize the deadline to reduce the likelihood of arenas purging in
52 * Number of unpurged pages at beginning of current epoch. During epoch
53 * advancement we use the delta between arena->decay_*.nunpurged and
54 * extents_npages_get(&arena->extents_*) to determine how many dirty
55 * pages, if any, were generated.
59 * Trailing log of how many unused dirty pages were generated during
60 * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
61 * element is the most recent epoch. Corresponding epoch times are
64 size_t backlog[SMOOTHSTEP_NSTEPS];
67 * Pointer to associated stats. These stats are embedded directly in
68 * the arena's stats due to how stats structures are shared between the
71 * Synchronization: Same as associated arena's stats field. */
72 arena_stats_decay_t *stats;
73 /* Peak number of pages in associated extents. Used for debug only. */
79 * Number of threads currently assigned to this arena. Each thread has
80 * two distinct assignments, one for application-serving allocation, and
81 * the other for internal metadata allocation. Internal metadata must
82 * not be allocated from arenas explicitly created via the arenas.create
83 * mallctl, because the arena.<i>.reset mallctl indiscriminately
84 * discards all allocations for the affected arena.
86 * 0: Application allocation.
87 * 1: Internal metadata allocation.
89 * Synchronization: atomic.
91 atomic_u_t nthreads[2];
94 * When percpu_arena is enabled, to amortize the cost of reading /
95 * updating the current CPU id, track the most recent thread accessing
96 * this arena, and only read CPU if there is a mismatch.
100 /* Synchronization: internal. */
104 * Lists of tcaches and cache_bin_array_descriptors for extant threads
105 * associated with this arena. Stats from these are merged
106 * incrementally, and at exit if opt_stats_print is enabled.
108 * Synchronization: tcache_ql_mtx.
110 ql_head(tcache_t) tcache_ql;
111 ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
112 malloc_mutex_t tcache_ql_mtx;
114 /* Synchronization: internal. */
115 prof_accum_t prof_accum;
116 uint64_t prof_accumbytes;
119 * PRNG state for cache index randomization of large allocation base
122 * Synchronization: atomic.
124 atomic_zu_t offset_state;
127 * Extent serial number generator state.
129 * Synchronization: atomic.
131 atomic_zu_t extent_sn_next;
134 * Represents a dss_prec_t, but atomically.
136 * Synchronization: atomic.
141 * Number of pages in active extents.
143 * Synchronization: atomic.
148 * Extant large allocations.
150 * Synchronization: large_mtx.
153 /* Synchronizes all large allocation/update/deallocation. */
154 malloc_mutex_t large_mtx;
157 * Collections of extents that were previously allocated. These are
158 * used when allocating extents, in an attempt to re-use address space.
160 * Synchronization: internal.
162 extents_t extents_dirty;
163 extents_t extents_muzzy;
164 extents_t extents_retained;
167 * Decay-based purging state, responsible for scheduling extent state
170 * Synchronization: internal.
172 arena_decay_t decay_dirty; /* dirty --> muzzy */
173 arena_decay_t decay_muzzy; /* muzzy --> retained */
176 * Next extent size class in a growing series to use when satisfying a
177 * request via the extent hooks (only if opt_retain). This limits the
178 * number of disjoint virtual memory ranges so that extent merging can
179 * be effective even if multiple arenas' extent allocation requests are
180 * highly interleaved.
182 * retain_grow_limit is the max allowed size ind to expand (unless the
183 * required size is greater). Default is no limit, and controlled
184 * through mallctl only.
186 * Synchronization: extent_grow_mtx
188 pszind_t extent_grow_next;
189 pszind_t retain_grow_limit;
190 malloc_mutex_t extent_grow_mtx;
193 * Available extent structures that were allocated via
194 * base_alloc_extent().
196 * Synchronization: extent_avail_mtx.
198 extent_tree_t extent_avail;
199 malloc_mutex_t extent_avail_mtx;
202 * bins is used to store heaps of free regions.
204 * Synchronization: internal.
209 * Base allocator, from which arena metadata are allocated.
211 * Synchronization: internal.
214 /* Used to determine uptime. Read-only after initialization. */
215 nstime_t create_time;
218 /* Used in conjunction with tsd for fast arena-related context lookup. */
219 struct arena_tdata_s {
220 ticker_t decay_ticker;
223 /* Used to pass rtree lookup context down the path. */
229 #endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */