1 #ifndef JEMALLOC_INTERNAL_TYPES_H
2 #define JEMALLOC_INTERNAL_TYPES_H
4 /* Page size index type. */
5 typedef unsigned pszind_t;
7 /* Size class index type. */
8 typedef unsigned szind_t;
10 /* Processor / core id type. */
11 typedef int malloc_cpuid_t;
22 * aaaaaaaa aaaatttt tttttttt 0znnnnnn
24 #define MALLOCX_ARENA_BITS 12
25 #define MALLOCX_TCACHE_BITS 12
26 #define MALLOCX_LG_ALIGN_BITS 6
27 #define MALLOCX_ARENA_SHIFT 20
28 #define MALLOCX_TCACHE_SHIFT 8
29 #define MALLOCX_ARENA_MASK \
30 (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
31 /* NB: Arena index bias decreases the maximum number of arenas by 1. */
32 #define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1)
33 #define MALLOCX_TCACHE_MASK \
34 (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
35 #define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
36 #define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
37 /* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
38 #define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
39 (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
40 #define MALLOCX_ALIGN_GET(flags) \
41 (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
42 #define MALLOCX_ZERO_GET(flags) \
43 ((bool)(flags & MALLOCX_ZERO))
45 #define MALLOCX_TCACHE_GET(flags) \
46 (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
47 #define MALLOCX_ARENA_GET(flags) \
48 (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
50 /* Smallest size class to support. */
51 #define TINY_MIN (1U << LG_TINY_MIN)
54 * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
58 # if (defined(__i386__) || defined(_M_IX86))
67 # if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
70 # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
101 # define LG_QUANTUM 4
104 # define LG_QUANTUM 4
107 # error "Unknown minimum alignment for architecture; specify via "
112 #define QUANTUM ((size_t)(1U << LG_QUANTUM))
113 #define QUANTUM_MASK (QUANTUM - 1)
115 /* Return the smallest quantum multiple that is >= a. */
116 #define QUANTUM_CEILING(a) \
117 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
119 #define LONG ((size_t)(1U << LG_SIZEOF_LONG))
120 #define LONG_MASK (LONG - 1)
122 /* Return the smallest long multiple that is >= a. */
123 #define LONG_CEILING(a) \
124 (((a) + LONG_MASK) & ~LONG_MASK)
126 #define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
127 #define PTR_MASK (SIZEOF_PTR - 1)
129 /* Return the smallest (void *) multiple that is >= a. */
130 #define PTR_CEILING(a) \
131 (((a) + PTR_MASK) & ~PTR_MASK)
134 * Maximum size of L1 cache line. This is used to avoid cache line aliasing.
135 * In addition, this controls the spacing of cacheline-spaced size classes.
137 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
138 * only handle raw constants.
140 #define LG_CACHELINE 6
142 #define CACHELINE_MASK (CACHELINE - 1)
144 /* Return the smallest cacheline multiple that is >= s. */
145 #define CACHELINE_CEILING(s) \
146 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
148 /* Return the nearest aligned address at or below a. */
149 #define ALIGNMENT_ADDR2BASE(a, alignment) \
150 ((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
152 /* Return the offset between a and the nearest aligned address at or below a. */
153 #define ALIGNMENT_ADDR2OFFSET(a, alignment) \
154 ((size_t)((uintptr_t)(a) & (alignment - 1)))
156 /* Return the smallest alignment multiple that is >= s. */
157 #define ALIGNMENT_CEILING(s, alignment) \
158 (((s) + (alignment - 1)) & ((~(alignment)) + 1))
160 /* Declare a variable-length array. */
161 #if __STDC_VERSION__ < 199901L
164 # define alloca _alloca
166 # ifdef JEMALLOC_HAS_ALLOCA_H
172 # define VARIABLE_ARRAY(type, name, count) \
173 type *name = alloca(sizeof(type) * (count))
175 # define VARIABLE_ARRAY(type, name, count) type name[(count)]
178 #endif /* JEMALLOC_INTERNAL_TYPES_H */