1 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/hook.h"
5 #include "jemalloc/internal/atomic.h"
6 #include "jemalloc/internal/mutex.h"
7 #include "jemalloc/internal/seq.h"
9 typedef struct hooks_internal_s hooks_internal_t;
10 struct hooks_internal_s {
15 seq_define(hooks_internal_t, hooks)
17 static atomic_u_t nhooks = ATOMIC_INIT(0);
18 static seq_hooks_t hooks[HOOK_MAX];
19 static malloc_mutex_t hooks_mu;
23 return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK,
24 malloc_mutex_rank_exclusive);
28 hook_install_locked(hooks_t *to_install) {
29 hooks_internal_t hooks_internal;
30 for (int i = 0; i < HOOK_MAX; i++) {
31 bool success = seq_try_load_hooks(&hooks_internal, &hooks[i]);
32 /* We hold mu; no concurrent access. */
34 if (!hooks_internal.in_use) {
35 hooks_internal.hooks = *to_install;
36 hooks_internal.in_use = true;
37 seq_store_hooks(&hooks[i], &hooks_internal);
38 atomic_store_u(&nhooks,
39 atomic_load_u(&nhooks, ATOMIC_RELAXED) + 1,
48 hook_install(tsdn_t *tsdn, hooks_t *to_install) {
49 malloc_mutex_lock(tsdn, &hooks_mu);
50 void *ret = hook_install_locked(to_install);
52 tsd_global_slow_inc(tsdn);
54 malloc_mutex_unlock(tsdn, &hooks_mu);
59 hook_remove_locked(seq_hooks_t *to_remove) {
60 hooks_internal_t hooks_internal;
61 bool success = seq_try_load_hooks(&hooks_internal, to_remove);
62 /* We hold mu; no concurrent access. */
64 /* Should only remove hooks that were added. */
65 assert(hooks_internal.in_use);
66 hooks_internal.in_use = false;
67 seq_store_hooks(to_remove, &hooks_internal);
68 atomic_store_u(&nhooks, atomic_load_u(&nhooks, ATOMIC_RELAXED) - 1,
73 hook_remove(tsdn_t *tsdn, void *opaque) {
75 char *hooks_begin = (char *)&hooks[0];
76 char *hooks_end = (char *)&hooks[HOOK_MAX];
77 char *hook = (char *)opaque;
78 assert(hooks_begin <= hook && hook < hooks_end
79 && (hook - hooks_begin) % sizeof(seq_hooks_t) == 0);
81 malloc_mutex_lock(tsdn, &hooks_mu);
82 hook_remove_locked((seq_hooks_t *)opaque);
83 tsd_global_slow_dec(tsdn);
84 malloc_mutex_unlock(tsdn, &hooks_mu);
87 #define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \
88 for (int for_each_hook_counter = 0; \
89 for_each_hook_counter < HOOK_MAX; \
90 for_each_hook_counter++) { \
91 bool for_each_hook_success = seq_try_load_hooks( \
92 (hooks_internal_ptr), &hooks[for_each_hook_counter]); \
93 if (!for_each_hook_success) { \
96 if (!(hooks_internal_ptr)->in_use) { \
99 #define FOR_EACH_HOOK_END \
105 * We prevent user reentrancy within hooks. This is basically just a
106 * thread-local bool that triggers an early-exit.
108 * We don't fold in_hook into reentrancy. There are two reasons for
110 * - Right now, we turn on reentrancy during things like extent hook
111 * execution. Allocating during extent hooks is not officially
112 * supported, but we don't want to break it for the time being. These
113 * sorts of allocations should probably still be hooked, though.
114 * - If a hook allocates, we may want it to be relatively fast (after
115 * all, it executes on every allocator operation). Turning on
116 * reentrancy is a fairly heavyweight mode (disabling tcache,
117 * redirecting to arena 0, etc.). It's possible we may one day want
118 * to turn on reentrant mode here, if it proves too difficult to keep
119 * this working. But that's fairly easy for us to see; OTOH, people
120 * not using hooks because they're too slow is easy for us to miss.
123 * that this code might get invoked even if we don't have access to tsd.
124 * This function mimics getting a pointer to thread-local data, except
125 * that it might secretly return a pointer to some global data if we
126 * know that the caller will take the early-exit path.
127 * If we return a bool that indicates that we are reentrant, then the
128 * caller will go down the early exit path, leaving the global
131 static bool in_hook_global = true;
132 tsdn_t *tsdn = tsdn_fetch();
133 tcache_t *tcache = tsdn_tcachep_get(tsdn);
134 if (tcache != NULL) {
135 return &tcache->in_hook;
137 return &in_hook_global;
140 #define HOOK_PROLOGUE \
141 if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \
144 bool *in_hook = hook_reentrantp(); \
150 #define HOOK_EPILOGUE \
154 hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
155 uintptr_t args_raw[3]) {
158 hooks_internal_t hook;
159 FOR_EACH_HOOK_BEGIN(&hook)
160 hook_alloc h = hook.hooks.alloc_hook;
162 h(hook.hooks.extra, type, result, result_raw, args_raw);
170 hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
172 hooks_internal_t hook;
173 FOR_EACH_HOOK_BEGIN(&hook)
174 hook_dalloc h = hook.hooks.dalloc_hook;
176 h(hook.hooks.extra, type, address, args_raw);
183 hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
184 size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) {
186 hooks_internal_t hook;
187 FOR_EACH_HOOK_BEGIN(&hook)
188 hook_expand h = hook.hooks.expand_hook;
190 h(hook.hooks.extra, type, address, old_usize, new_usize,
191 result_raw, args_raw);