1 #define JEMALLOC_RTREE_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/mutex.h"
9 * Only the most significant bits of keys passed to rtree_{read,write}() are
13 rtree_new(rtree_t *rtree, bool zeroed) {
16 memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */
22 if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE,
23 malloc_mutex_rank_exclusive)) {
30 static rtree_node_elm_t *
31 rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
32 return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms *
33 sizeof(rtree_node_elm_t), CACHELINE);
35 rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl;
38 rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) {
39 /* Nodes are never deleted during normal operation. */
42 UNUSED rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc =
43 rtree_node_dalloc_impl;
45 static rtree_leaf_elm_t *
46 rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
47 return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms *
48 sizeof(rtree_leaf_elm_t), CACHELINE);
50 rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl;
53 rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) {
54 /* Leaves are never deleted during normal operation. */
57 UNUSED rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc =
58 rtree_leaf_dalloc_impl;
63 rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree,
65 size_t nchildren = ZU(1) << rtree_levels[level].bits;
66 if (level + 2 < RTREE_HEIGHT) {
67 for (size_t i = 0; i < nchildren; i++) {
68 rtree_node_elm_t *node =
69 (rtree_node_elm_t *)atomic_load_p(&subtree[i].child,
72 rtree_delete_subtree(tsdn, rtree, node, level +
77 for (size_t i = 0; i < nchildren; i++) {
78 rtree_leaf_elm_t *leaf =
79 (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child,
82 rtree_leaf_dalloc(tsdn, rtree, leaf);
87 if (subtree != rtree->root) {
88 rtree_node_dalloc(tsdn, rtree, subtree);
94 rtree_delete(tsdn_t *tsdn, rtree_t *rtree) {
96 rtree_delete_subtree(tsdn, rtree, rtree->root, 0);
101 static rtree_node_elm_t *
102 rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
104 malloc_mutex_lock(tsdn, &rtree->init_lock);
106 * If *elmp is non-null, then it was initialized with the init lock
107 * held, so we can get by with 'relaxed' here.
109 rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED);
111 node = rtree_node_alloc(tsdn, rtree, ZU(1) <<
112 rtree_levels[level].bits);
114 malloc_mutex_unlock(tsdn, &rtree->init_lock);
118 * Even though we hold the lock, a later reader might not; we
119 * need release semantics.
121 atomic_store_p(elmp, node, ATOMIC_RELEASE);
123 malloc_mutex_unlock(tsdn, &rtree->init_lock);
128 static rtree_leaf_elm_t *
129 rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) {
130 malloc_mutex_lock(tsdn, &rtree->init_lock);
132 * If *elmp is non-null, then it was initialized with the init lock
133 * held, so we can get by with 'relaxed' here.
135 rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED);
137 leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) <<
138 rtree_levels[RTREE_HEIGHT-1].bits);
140 malloc_mutex_unlock(tsdn, &rtree->init_lock);
144 * Even though we hold the lock, a later reader might not; we
145 * need release semantics.
147 atomic_store_p(elmp, leaf, ATOMIC_RELEASE);
149 malloc_mutex_unlock(tsdn, &rtree->init_lock);
155 rtree_node_valid(rtree_node_elm_t *node) {
156 return ((uintptr_t)node != (uintptr_t)0);
160 rtree_leaf_valid(rtree_leaf_elm_t *leaf) {
161 return ((uintptr_t)leaf != (uintptr_t)0);
164 static rtree_node_elm_t *
165 rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) {
166 rtree_node_elm_t *node;
169 node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
172 node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
176 assert(!dependent || node != NULL);
180 static rtree_node_elm_t *
181 rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
182 unsigned level, bool dependent) {
183 rtree_node_elm_t *node;
185 node = rtree_child_node_tryread(elm, dependent);
186 if (!dependent && unlikely(!rtree_node_valid(node))) {
187 node = rtree_node_init(tsdn, rtree, level + 1, &elm->child);
189 assert(!dependent || node != NULL);
193 static rtree_leaf_elm_t *
194 rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) {
195 rtree_leaf_elm_t *leaf;
198 leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
201 leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
205 assert(!dependent || leaf != NULL);
209 static rtree_leaf_elm_t *
210 rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
211 unsigned level, bool dependent) {
212 rtree_leaf_elm_t *leaf;
214 leaf = rtree_child_leaf_tryread(elm, dependent);
215 if (!dependent && unlikely(!rtree_leaf_valid(leaf))) {
216 leaf = rtree_leaf_init(tsdn, rtree, &elm->child);
218 assert(!dependent || leaf != NULL);
223 rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
224 uintptr_t key, bool dependent, bool init_missing) {
225 rtree_node_elm_t *node;
226 rtree_leaf_elm_t *leaf;
234 uintptr_t leafkey = rtree_leafkey(key);
235 for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
236 assert(rtree_ctx->cache[i].leafkey != leafkey);
238 for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
239 assert(rtree_ctx->l2_cache[i].leafkey != leafkey);
243 #define RTREE_GET_CHILD(level) { \
244 assert(level < RTREE_HEIGHT-1); \
245 if (level != 0 && !dependent && \
246 unlikely(!rtree_node_valid(node))) { \
249 uintptr_t subkey = rtree_subkey(key, level); \
250 if (level + 2 < RTREE_HEIGHT) { \
251 node = init_missing ? \
252 rtree_child_node_read(tsdn, rtree, \
253 &node[subkey], level, dependent) : \
254 rtree_child_node_tryread(&node[subkey], \
257 leaf = init_missing ? \
258 rtree_child_leaf_read(tsdn, rtree, \
259 &node[subkey], level, dependent) : \
260 rtree_child_leaf_tryread(&node[subkey], \
265 * Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss):
266 * (1) evict last entry in L2 cache; (2) move the collision slot from L1
267 * cache down to L2; and 3) fill L1.
269 #define RTREE_GET_LEAF(level) { \
270 assert(level == RTREE_HEIGHT-1); \
271 if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \
274 if (RTREE_CTX_NCACHE_L2 > 1) { \
275 memmove(&rtree_ctx->l2_cache[1], \
276 &rtree_ctx->l2_cache[0], \
277 sizeof(rtree_ctx_cache_elm_t) * \
278 (RTREE_CTX_NCACHE_L2 - 1)); \
280 size_t slot = rtree_cache_direct_map(key); \
281 rtree_ctx->l2_cache[0].leafkey = \
282 rtree_ctx->cache[slot].leafkey; \
283 rtree_ctx->l2_cache[0].leaf = \
284 rtree_ctx->cache[slot].leaf; \
285 uintptr_t leafkey = rtree_leafkey(key); \
286 rtree_ctx->cache[slot].leafkey = leafkey; \
287 rtree_ctx->cache[slot].leaf = leaf; \
288 uintptr_t subkey = rtree_subkey(key, level); \
289 return &leaf[subkey]; \
291 if (RTREE_HEIGHT > 1) {
294 if (RTREE_HEIGHT > 2) {
297 if (RTREE_HEIGHT > 3) {
298 for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) {
302 RTREE_GET_LEAF(RTREE_HEIGHT-1)
303 #undef RTREE_GET_CHILD
304 #undef RTREE_GET_LEAF
309 rtree_ctx_data_init(rtree_ctx_t *ctx) {
310 for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
311 rtree_ctx_cache_elm_t *cache = &ctx->cache[i];
312 cache->leafkey = RTREE_LEAFKEY_INVALID;
315 for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
316 rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i];
317 cache->leafkey = RTREE_LEAFKEY_INVALID;