1 #ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
2 #define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/bitmap.h"
6 #include "jemalloc/internal/mutex.h"
7 #include "jemalloc/internal/ql.h"
8 #include "jemalloc/internal/ph.h"
9 #include "jemalloc/internal/size_classes.h"
12 extent_state_active = 0,
13 extent_state_dirty = 1,
14 extent_state_muzzy = 2,
15 extent_state_retained = 3
18 /* Extent (span of pages). Use accessor functions for e_* fields. */
21 * Bitfield containing several fields:
33 * nnnnnnnn ... nnnnffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
35 * arena_ind: Arena from which this extent came, or all 1 bits if
38 * slab: The slab flag indicates whether the extent is used for a slab
39 * of small regions. This helps differentiate small size classes,
40 * and it indicates whether interior pointers can be looked up via
43 * committed: The committed flag indicates whether physical memory is
44 * committed to the extent, whether explicitly or implicitly
45 * as on a system that overcommits and satisfies physical
46 * memory needs on demand via soft page faults.
48 * dumpable: The dumpable flag indicates whether or not we've set the
49 * memory in question to be dumpable. Note that this
50 * interacts somewhat subtly with user-specified extent hooks,
51 * since we don't know if *they* are fiddling with
52 * dumpability (in which case, we don't want to undo whatever
53 * they're doing). To deal with this scenario, we:
54 * - Make dumpable false only for memory allocated with the
56 * - Only allow memory to go from non-dumpable to dumpable,
58 * - Never make the OS call to allow dumping when the
59 * dumpable bit is already set.
60 * These three constraints mean that we will never
61 * accidentally dump user memory that the user meant to set
62 * nondumpable with their extent hooks.
65 * zeroed: The zeroed flag is used by extent recycling code to track
66 * whether memory is zero-filled.
68 * state: The state flag is an extent_state_t.
70 * szind: The szind flag indicates usable size class index for
71 * allocations residing in this extent, regardless of whether the
72 * extent is a slab. Extent size and usable size often differ
73 * even for non-slabs, either due to sz_large_pad or promotion of
74 * sampled small regions.
76 * nfree: Number of free regions in slab.
78 * sn: Serial number (potentially non-unique).
80 * Serial numbers may wrap around if !opt_retain, but as long as
81 * comparison functions fall back on address comparison for equal
82 * serial numbers, stable (if imperfect) ordering is maintained.
84 * Serial numbers may not be unique even in the absence of
85 * wrap-around, e.g. when splitting an extent and assigning the same
86 * serial number to both resulting adjacent extents.
89 #define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
91 #define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
92 #define EXTENT_BITS_ARENA_SHIFT 0
93 #define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
95 #define EXTENT_BITS_SLAB_WIDTH 1
96 #define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
97 #define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
99 #define EXTENT_BITS_COMMITTED_WIDTH 1
100 #define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
101 #define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
103 #define EXTENT_BITS_DUMPABLE_WIDTH 1
104 #define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
105 #define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
107 #define EXTENT_BITS_ZEROED_WIDTH 1
108 #define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
109 #define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
111 #define EXTENT_BITS_STATE_WIDTH 2
112 #define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
113 #define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
115 #define EXTENT_BITS_SZIND_WIDTH LG_CEIL_NSIZES
116 #define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
117 #define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
119 #define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1)
120 #define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
121 #define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
123 #define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
124 #define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
126 /* Pointer to the extent that this structure is responsible for. */
131 * Extent size and serial number associated with the extent
132 * structure (different than the serial number for the extent at
135 * ssssssss [...] ssssssss ssssnnnn nnnnnnnn
138 #define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
139 #define EXTENT_ESN_MASK ((size_t)PAGE-1)
140 /* Base extent size, which may not be a multiple of PAGE. */
145 * List linkage, used by a variety of lists:
146 * - bin_t's slabs_full
148 * - stashed dirty extents
149 * - arena's large allocations
151 ql_elm(extent_t) ql_link;
154 * Linkage for per size class sn/address-ordered heaps, and
157 phn(extent_t) ph_link;
160 /* Small region slab metadata. */
161 arena_slab_data_t e_slab_data;
164 * Profile counters, used for large objects. Points to a
167 atomic_p_t e_prof_tctx;
170 typedef ql_head(extent_t) extent_list_t;
171 typedef ph(extent_t) extent_tree_t;
172 typedef ph(extent_t) extent_heap_t;
174 /* Quantized collection of extents, with built-in LRU queue. */
179 * Quantized per size class heaps of extents.
181 * Synchronization: mtx.
183 extent_heap_t heaps[NPSIZES+1];
186 * Bitmap for which set bits correspond to non-empty heaps.
188 * Synchronization: mtx.
190 bitmap_t bitmap[BITMAP_GROUPS(NPSIZES+1)];
193 * LRU of all extents in heaps.
195 * Synchronization: mtx.
200 * Page sum for all extents in heaps.
202 * The synchronization here is a little tricky. Modifications to npages
203 * must hold mtx, but reads need not (though, a reader who sees npages
204 * without holding the mutex can't assume anything about the rest of the
205 * state of the extents_t).
209 /* All stored extents must be in the same state. */
210 extent_state_t state;
213 * If true, delay coalescing until eviction; otherwise coalesce during
219 #endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */