]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - contrib/jemalloc/include/jemalloc/internal/rtree.h
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / contrib / jemalloc / include / jemalloc / internal / rtree.h
1 /*
2  * This radix tree implementation is tailored to the singular purpose of
3  * tracking which chunks are currently owned by jemalloc.  This functionality
4  * is mandatory for OS X, where jemalloc must be able to respond to object
5  * ownership queries.
6  *
7  *******************************************************************************
8  */
9 #ifdef JEMALLOC_H_TYPES
10
11 typedef struct rtree_s rtree_t;
12
13 /*
14  * Size of each radix tree node (must be a power of 2).  This impacts tree
15  * depth.
16  */
17 #if (LG_SIZEOF_PTR == 2)
18 #  define RTREE_NODESIZE (1U << 14)
19 #else
20 #  define RTREE_NODESIZE CACHELINE
21 #endif
22
23 #endif /* JEMALLOC_H_TYPES */
24 /******************************************************************************/
25 #ifdef JEMALLOC_H_STRUCTS
26
27 struct rtree_s {
28         malloc_mutex_t  mutex;
29         void            **root;
30         unsigned        height;
31         unsigned        level2bits[1]; /* Dynamically sized. */
32 };
33
34 #endif /* JEMALLOC_H_STRUCTS */
35 /******************************************************************************/
36 #ifdef JEMALLOC_H_EXTERNS
37
38 rtree_t *rtree_new(unsigned bits);
39 void    rtree_prefork(rtree_t *rtree);
40 void    rtree_postfork_parent(rtree_t *rtree);
41 void    rtree_postfork_child(rtree_t *rtree);
42
43 #endif /* JEMALLOC_H_EXTERNS */
44 /******************************************************************************/
45 #ifdef JEMALLOC_H_INLINES
46
47 #ifndef JEMALLOC_ENABLE_INLINE
48 #ifndef JEMALLOC_DEBUG
49 void    *rtree_get_locked(rtree_t *rtree, uintptr_t key);
50 #endif
51 void    *rtree_get(rtree_t *rtree, uintptr_t key);
52 bool    rtree_set(rtree_t *rtree, uintptr_t key, void *val);
53 #endif
54
55 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
56 #define RTREE_GET_GENERATE(f)                                           \
57 /* The least significant bits of the key are ignored. */                \
58 JEMALLOC_INLINE void *                                                  \
59 f(rtree_t *rtree, uintptr_t key)                                        \
60 {                                                                       \
61         void *ret;                                                      \
62         uintptr_t subkey;                                               \
63         unsigned i, lshift, height, bits;                               \
64         void **node, **child;                                           \
65                                                                         \
66         RTREE_LOCK(&rtree->mutex);                                      \
67         for (i = lshift = 0, height = rtree->height, node = rtree->root;\
68             i < height - 1;                                             \
69             i++, lshift += bits, node = child) {                        \
70                 bits = rtree->level2bits[i];                            \
71                 subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
72                     3)) - bits);                                        \
73                 child = (void**)node[subkey];                           \
74                 if (child == NULL) {                                    \
75                         RTREE_UNLOCK(&rtree->mutex);                    \
76                         return (NULL);                                  \
77                 }                                                       \
78         }                                                               \
79                                                                         \
80         /*                                                              \
81          * node is a leaf, so it contains values rather than node       \
82          * pointers.                                                    \
83          */                                                             \
84         bits = rtree->level2bits[i];                                    \
85         subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -     \
86             bits);                                                      \
87         ret = node[subkey];                                             \
88         RTREE_UNLOCK(&rtree->mutex);                                    \
89                                                                         \
90         RTREE_GET_VALIDATE                                              \
91         return (ret);                                                   \
92 }
93
94 #ifdef JEMALLOC_DEBUG
95 #  define RTREE_LOCK(l)         malloc_mutex_lock(l)
96 #  define RTREE_UNLOCK(l)       malloc_mutex_unlock(l)
97 #  define RTREE_GET_VALIDATE
98 RTREE_GET_GENERATE(rtree_get_locked)
99 #  undef RTREE_LOCK
100 #  undef RTREE_UNLOCK
101 #  undef RTREE_GET_VALIDATE
102 #endif
103
104 #define RTREE_LOCK(l)
105 #define RTREE_UNLOCK(l)
106 #ifdef JEMALLOC_DEBUG
107    /*
108     * Suppose that it were possible for a jemalloc-allocated chunk to be
109     * munmap()ped, followed by a different allocator in another thread re-using
110     * overlapping virtual memory, all without invalidating the cached rtree
111     * value.  The result would be a false positive (the rtree would claim that
112     * jemalloc owns memory that it had actually discarded).  This scenario
113     * seems impossible, but the following assertion is a prudent sanity check.
114     */
115 #  define RTREE_GET_VALIDATE                                            \
116         assert(rtree_get_locked(rtree, key) == ret);
117 #else
118 #  define RTREE_GET_VALIDATE
119 #endif
120 RTREE_GET_GENERATE(rtree_get)
121 #undef RTREE_LOCK
122 #undef RTREE_UNLOCK
123 #undef RTREE_GET_VALIDATE
124
125 JEMALLOC_INLINE bool
126 rtree_set(rtree_t *rtree, uintptr_t key, void *val)
127 {
128         uintptr_t subkey;
129         unsigned i, lshift, height, bits;
130         void **node, **child;
131
132         malloc_mutex_lock(&rtree->mutex);
133         for (i = lshift = 0, height = rtree->height, node = rtree->root;
134             i < height - 1;
135             i++, lshift += bits, node = child) {
136                 bits = rtree->level2bits[i];
137                 subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
138                     bits);
139                 child = (void**)node[subkey];
140                 if (child == NULL) {
141                         child = (void**)base_alloc(sizeof(void *) <<
142                             rtree->level2bits[i+1]);
143                         if (child == NULL) {
144                                 malloc_mutex_unlock(&rtree->mutex);
145                                 return (true);
146                         }
147                         memset(child, 0, sizeof(void *) <<
148                             rtree->level2bits[i+1]);
149                         node[subkey] = child;
150                 }
151         }
152
153         /* node is a leaf, so it contains values rather than node pointers. */
154         bits = rtree->level2bits[i];
155         subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
156         node[subkey] = val;
157         malloc_mutex_unlock(&rtree->mutex);
158
159         return (false);
160 }
161 #endif
162
163 #endif /* JEMALLOC_H_INLINES */
164 /******************************************************************************/