2 * This radix tree implementation is tailored to the singular purpose of
3 * tracking which chunks are currently owned by jemalloc. This functionality
4 * is mandatory for OS X, where jemalloc must be able to respond to object
7 *******************************************************************************
9 #ifdef JEMALLOC_H_TYPES
11 typedef struct rtree_s rtree_t;
14 * Size of each radix tree node (must be a power of 2). This impacts tree
17 #if (LG_SIZEOF_PTR == 2)
18 # define RTREE_NODESIZE (1U << 14)
20 # define RTREE_NODESIZE CACHELINE
23 #endif /* JEMALLOC_H_TYPES */
24 /******************************************************************************/
25 #ifdef JEMALLOC_H_STRUCTS
31 unsigned level2bits[1]; /* Dynamically sized. */
34 #endif /* JEMALLOC_H_STRUCTS */
35 /******************************************************************************/
36 #ifdef JEMALLOC_H_EXTERNS
38 rtree_t *rtree_new(unsigned bits);
39 void rtree_prefork(rtree_t *rtree);
40 void rtree_postfork_parent(rtree_t *rtree);
41 void rtree_postfork_child(rtree_t *rtree);
43 #endif /* JEMALLOC_H_EXTERNS */
44 /******************************************************************************/
45 #ifdef JEMALLOC_H_INLINES
47 #ifndef JEMALLOC_ENABLE_INLINE
48 #ifndef JEMALLOC_DEBUG
49 void *rtree_get_locked(rtree_t *rtree, uintptr_t key);
51 void *rtree_get(rtree_t *rtree, uintptr_t key);
52 bool rtree_set(rtree_t *rtree, uintptr_t key, void *val);
55 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
56 #define RTREE_GET_GENERATE(f) \
57 /* The least significant bits of the key are ignored. */ \
58 JEMALLOC_INLINE void * \
59 f(rtree_t *rtree, uintptr_t key) \
63 unsigned i, lshift, height, bits; \
64 void **node, **child; \
66 RTREE_LOCK(&rtree->mutex); \
67 for (i = lshift = 0, height = rtree->height, node = rtree->root;\
69 i++, lshift += bits, node = child) { \
70 bits = rtree->level2bits[i]; \
71 subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
73 child = (void**)node[subkey]; \
74 if (child == NULL) { \
75 RTREE_UNLOCK(&rtree->mutex); \
81 * node is a leaf, so it contains values rather than node \
84 bits = rtree->level2bits[i]; \
85 subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
88 RTREE_UNLOCK(&rtree->mutex); \
95 # define RTREE_LOCK(l) malloc_mutex_lock(l)
96 # define RTREE_UNLOCK(l) malloc_mutex_unlock(l)
97 # define RTREE_GET_VALIDATE
98 RTREE_GET_GENERATE(rtree_get_locked)
101 # undef RTREE_GET_VALIDATE
104 #define RTREE_LOCK(l)
105 #define RTREE_UNLOCK(l)
106 #ifdef JEMALLOC_DEBUG
108 * Suppose that it were possible for a jemalloc-allocated chunk to be
109 * munmap()ped, followed by a different allocator in another thread re-using
110 * overlapping virtual memory, all without invalidating the cached rtree
111 * value. The result would be a false positive (the rtree would claim that
112 * jemalloc owns memory that it had actually discarded). This scenario
113 * seems impossible, but the following assertion is a prudent sanity check.
115 # define RTREE_GET_VALIDATE \
116 assert(rtree_get_locked(rtree, key) == ret);
118 # define RTREE_GET_VALIDATE
120 RTREE_GET_GENERATE(rtree_get)
123 #undef RTREE_GET_VALIDATE
126 rtree_set(rtree_t *rtree, uintptr_t key, void *val)
129 unsigned i, lshift, height, bits;
130 void **node, **child;
132 malloc_mutex_lock(&rtree->mutex);
133 for (i = lshift = 0, height = rtree->height, node = rtree->root;
135 i++, lshift += bits, node = child) {
136 bits = rtree->level2bits[i];
137 subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
139 child = (void**)node[subkey];
141 child = (void**)base_alloc(sizeof(void *) <<
142 rtree->level2bits[i+1]);
144 malloc_mutex_unlock(&rtree->mutex);
147 memset(child, 0, sizeof(void *) <<
148 rtree->level2bits[i+1]);
149 node[subkey] = child;
153 /* node is a leaf, so it contains values rather than node pointers. */
154 bits = rtree->level2bits[i];
155 subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
157 malloc_mutex_unlock(&rtree->mutex);
163 #endif /* JEMALLOC_H_INLINES */
164 /******************************************************************************/