]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - contrib/jemalloc/src/ctl.c
MFV: r333378
[FreeBSD/FreeBSD.git] / contrib / jemalloc / src / ctl.c
1 #define JEMALLOC_CTL_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/ctl.h"
7 #include "jemalloc/internal/extent_dss.h"
8 #include "jemalloc/internal/extent_mmap.h"
9 #include "jemalloc/internal/mutex.h"
10 #include "jemalloc/internal/nstime.h"
11 #include "jemalloc/internal/size_classes.h"
12 #include "jemalloc/internal/util.h"
13
14 /******************************************************************************/
15 /* Data. */
16
17 /*
18  * ctl_mtx protects the following:
19  * - ctl_stats->*
20  */
21 static malloc_mutex_t   ctl_mtx;
22 static bool             ctl_initialized;
23 static ctl_stats_t      *ctl_stats;
24 static ctl_arenas_t     *ctl_arenas;
25
26 /******************************************************************************/
27 /* Helpers for named and indexed nodes. */
28
29 static const ctl_named_node_t *
30 ctl_named_node(const ctl_node_t *node) {
31         return ((node->named) ? (const ctl_named_node_t *)node : NULL);
32 }
33
34 static const ctl_named_node_t *
35 ctl_named_children(const ctl_named_node_t *node, size_t index) {
36         const ctl_named_node_t *children = ctl_named_node(node->children);
37
38         return (children ? &children[index] : NULL);
39 }
40
41 static const ctl_indexed_node_t *
42 ctl_indexed_node(const ctl_node_t *node) {
43         return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
44 }
45
46 /******************************************************************************/
47 /* Function prototypes for non-inline static functions. */
48
49 #define CTL_PROTO(n)                                                    \
50 static int      n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,   \
51     void *oldp, size_t *oldlenp, void *newp, size_t newlen);
52
53 #define INDEX_PROTO(n)                                                  \
54 static const ctl_named_node_t   *n##_index(tsdn_t *tsdn,                \
55     const size_t *mib, size_t miblen, size_t i);
56
57 CTL_PROTO(version)
58 CTL_PROTO(epoch)
59 CTL_PROTO(background_thread)
60 CTL_PROTO(thread_tcache_enabled)
61 CTL_PROTO(thread_tcache_flush)
62 CTL_PROTO(thread_prof_name)
63 CTL_PROTO(thread_prof_active)
64 CTL_PROTO(thread_arena)
65 CTL_PROTO(thread_allocated)
66 CTL_PROTO(thread_allocatedp)
67 CTL_PROTO(thread_deallocated)
68 CTL_PROTO(thread_deallocatedp)
69 CTL_PROTO(config_cache_oblivious)
70 CTL_PROTO(config_debug)
71 CTL_PROTO(config_fill)
72 CTL_PROTO(config_lazy_lock)
73 CTL_PROTO(config_malloc_conf)
74 CTL_PROTO(config_prof)
75 CTL_PROTO(config_prof_libgcc)
76 CTL_PROTO(config_prof_libunwind)
77 CTL_PROTO(config_stats)
78 CTL_PROTO(config_thp)
79 CTL_PROTO(config_utrace)
80 CTL_PROTO(config_xmalloc)
81 CTL_PROTO(opt_abort)
82 CTL_PROTO(opt_abort_conf)
83 CTL_PROTO(opt_retain)
84 CTL_PROTO(opt_dss)
85 CTL_PROTO(opt_narenas)
86 CTL_PROTO(opt_percpu_arena)
87 CTL_PROTO(opt_background_thread)
88 CTL_PROTO(opt_dirty_decay_ms)
89 CTL_PROTO(opt_muzzy_decay_ms)
90 CTL_PROTO(opt_stats_print)
91 CTL_PROTO(opt_stats_print_opts)
92 CTL_PROTO(opt_junk)
93 CTL_PROTO(opt_zero)
94 CTL_PROTO(opt_utrace)
95 CTL_PROTO(opt_xmalloc)
96 CTL_PROTO(opt_tcache)
97 CTL_PROTO(opt_lg_tcache_max)
98 CTL_PROTO(opt_prof)
99 CTL_PROTO(opt_prof_prefix)
100 CTL_PROTO(opt_prof_active)
101 CTL_PROTO(opt_prof_thread_active_init)
102 CTL_PROTO(opt_lg_prof_sample)
103 CTL_PROTO(opt_lg_prof_interval)
104 CTL_PROTO(opt_prof_gdump)
105 CTL_PROTO(opt_prof_final)
106 CTL_PROTO(opt_prof_leak)
107 CTL_PROTO(opt_prof_accum)
108 CTL_PROTO(tcache_create)
109 CTL_PROTO(tcache_flush)
110 CTL_PROTO(tcache_destroy)
111 CTL_PROTO(arena_i_initialized)
112 CTL_PROTO(arena_i_decay)
113 CTL_PROTO(arena_i_purge)
114 CTL_PROTO(arena_i_reset)
115 CTL_PROTO(arena_i_destroy)
116 CTL_PROTO(arena_i_dss)
117 CTL_PROTO(arena_i_dirty_decay_ms)
118 CTL_PROTO(arena_i_muzzy_decay_ms)
119 CTL_PROTO(arena_i_extent_hooks)
120 INDEX_PROTO(arena_i)
121 CTL_PROTO(arenas_bin_i_size)
122 CTL_PROTO(arenas_bin_i_nregs)
123 CTL_PROTO(arenas_bin_i_slab_size)
124 INDEX_PROTO(arenas_bin_i)
125 CTL_PROTO(arenas_lextent_i_size)
126 INDEX_PROTO(arenas_lextent_i)
127 CTL_PROTO(arenas_narenas)
128 CTL_PROTO(arenas_dirty_decay_ms)
129 CTL_PROTO(arenas_muzzy_decay_ms)
130 CTL_PROTO(arenas_quantum)
131 CTL_PROTO(arenas_page)
132 CTL_PROTO(arenas_tcache_max)
133 CTL_PROTO(arenas_nbins)
134 CTL_PROTO(arenas_nhbins)
135 CTL_PROTO(arenas_nlextents)
136 CTL_PROTO(arenas_create)
137 CTL_PROTO(prof_thread_active_init)
138 CTL_PROTO(prof_active)
139 CTL_PROTO(prof_dump)
140 CTL_PROTO(prof_gdump)
141 CTL_PROTO(prof_reset)
142 CTL_PROTO(prof_interval)
143 CTL_PROTO(lg_prof_sample)
144 CTL_PROTO(stats_arenas_i_small_allocated)
145 CTL_PROTO(stats_arenas_i_small_nmalloc)
146 CTL_PROTO(stats_arenas_i_small_ndalloc)
147 CTL_PROTO(stats_arenas_i_small_nrequests)
148 CTL_PROTO(stats_arenas_i_large_allocated)
149 CTL_PROTO(stats_arenas_i_large_nmalloc)
150 CTL_PROTO(stats_arenas_i_large_ndalloc)
151 CTL_PROTO(stats_arenas_i_large_nrequests)
152 CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
153 CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
154 CTL_PROTO(stats_arenas_i_bins_j_nrequests)
155 CTL_PROTO(stats_arenas_i_bins_j_curregs)
156 CTL_PROTO(stats_arenas_i_bins_j_nfills)
157 CTL_PROTO(stats_arenas_i_bins_j_nflushes)
158 CTL_PROTO(stats_arenas_i_bins_j_nslabs)
159 CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
160 CTL_PROTO(stats_arenas_i_bins_j_curslabs)
161 INDEX_PROTO(stats_arenas_i_bins_j)
162 CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
163 CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
164 CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
165 CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
166 INDEX_PROTO(stats_arenas_i_lextents_j)
167 CTL_PROTO(stats_arenas_i_nthreads)
168 CTL_PROTO(stats_arenas_i_uptime)
169 CTL_PROTO(stats_arenas_i_dss)
170 CTL_PROTO(stats_arenas_i_dirty_decay_ms)
171 CTL_PROTO(stats_arenas_i_muzzy_decay_ms)
172 CTL_PROTO(stats_arenas_i_pactive)
173 CTL_PROTO(stats_arenas_i_pdirty)
174 CTL_PROTO(stats_arenas_i_pmuzzy)
175 CTL_PROTO(stats_arenas_i_mapped)
176 CTL_PROTO(stats_arenas_i_retained)
177 CTL_PROTO(stats_arenas_i_dirty_npurge)
178 CTL_PROTO(stats_arenas_i_dirty_nmadvise)
179 CTL_PROTO(stats_arenas_i_dirty_purged)
180 CTL_PROTO(stats_arenas_i_muzzy_npurge)
181 CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
182 CTL_PROTO(stats_arenas_i_muzzy_purged)
183 CTL_PROTO(stats_arenas_i_base)
184 CTL_PROTO(stats_arenas_i_internal)
185 CTL_PROTO(stats_arenas_i_tcache_bytes)
186 CTL_PROTO(stats_arenas_i_resident)
187 INDEX_PROTO(stats_arenas_i)
188 CTL_PROTO(stats_allocated)
189 CTL_PROTO(stats_active)
190 CTL_PROTO(stats_background_thread_num_threads)
191 CTL_PROTO(stats_background_thread_num_runs)
192 CTL_PROTO(stats_background_thread_run_interval)
193 CTL_PROTO(stats_metadata)
194 CTL_PROTO(stats_resident)
195 CTL_PROTO(stats_mapped)
196 CTL_PROTO(stats_retained)
197
198 #define MUTEX_STATS_CTL_PROTO_GEN(n)                                    \
199 CTL_PROTO(stats_##n##_num_ops)                                          \
200 CTL_PROTO(stats_##n##_num_wait)                                         \
201 CTL_PROTO(stats_##n##_num_spin_acq)                                     \
202 CTL_PROTO(stats_##n##_num_owner_switch)                                 \
203 CTL_PROTO(stats_##n##_total_wait_time)                                  \
204 CTL_PROTO(stats_##n##_max_wait_time)                                    \
205 CTL_PROTO(stats_##n##_max_num_thds)
206
207 /* Global mutexes. */
208 #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
209 MUTEX_PROF_GLOBAL_MUTEXES
210 #undef OP
211
212 /* Per arena mutexes. */
213 #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
214 MUTEX_PROF_ARENA_MUTEXES
215 #undef OP
216
217 /* Arena bin mutexes. */
218 MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex)
219 #undef MUTEX_STATS_CTL_PROTO_GEN
220
221 CTL_PROTO(stats_mutexes_reset)
222
223 /******************************************************************************/
224 /* mallctl tree. */
225
226 #define NAME(n) {true}, n
227 #define CHILD(t, c)                                                     \
228         sizeof(c##_node) / sizeof(ctl_##t##_node_t),                    \
229         (ctl_node_t *)c##_node,                                         \
230         NULL
231 #define CTL(c)  0, NULL, c##_ctl
232
233 /*
234  * Only handles internal indexed nodes, since there are currently no external
235  * ones.
236  */
237 #define INDEX(i)        {false},        i##_index
238
239 static const ctl_named_node_t   thread_tcache_node[] = {
240         {NAME("enabled"),       CTL(thread_tcache_enabled)},
241         {NAME("flush"),         CTL(thread_tcache_flush)}
242 };
243
244 static const ctl_named_node_t   thread_prof_node[] = {
245         {NAME("name"),          CTL(thread_prof_name)},
246         {NAME("active"),        CTL(thread_prof_active)}
247 };
248
249 static const ctl_named_node_t   thread_node[] = {
250         {NAME("arena"),         CTL(thread_arena)},
251         {NAME("allocated"),     CTL(thread_allocated)},
252         {NAME("allocatedp"),    CTL(thread_allocatedp)},
253         {NAME("deallocated"),   CTL(thread_deallocated)},
254         {NAME("deallocatedp"),  CTL(thread_deallocatedp)},
255         {NAME("tcache"),        CHILD(named, thread_tcache)},
256         {NAME("prof"),          CHILD(named, thread_prof)}
257 };
258
259 static const ctl_named_node_t   config_node[] = {
260         {NAME("cache_oblivious"), CTL(config_cache_oblivious)},
261         {NAME("debug"),         CTL(config_debug)},
262         {NAME("fill"),          CTL(config_fill)},
263         {NAME("lazy_lock"),     CTL(config_lazy_lock)},
264         {NAME("malloc_conf"),   CTL(config_malloc_conf)},
265         {NAME("prof"),          CTL(config_prof)},
266         {NAME("prof_libgcc"),   CTL(config_prof_libgcc)},
267         {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
268         {NAME("stats"),         CTL(config_stats)},
269         {NAME("thp"),           CTL(config_thp)},
270         {NAME("utrace"),        CTL(config_utrace)},
271         {NAME("xmalloc"),       CTL(config_xmalloc)}
272 };
273
274 static const ctl_named_node_t opt_node[] = {
275         {NAME("abort"),         CTL(opt_abort)},
276         {NAME("abort_conf"),    CTL(opt_abort_conf)},
277         {NAME("retain"),        CTL(opt_retain)},
278         {NAME("dss"),           CTL(opt_dss)},
279         {NAME("narenas"),       CTL(opt_narenas)},
280         {NAME("percpu_arena"),  CTL(opt_percpu_arena)},
281         {NAME("background_thread"),     CTL(opt_background_thread)},
282         {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
283         {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
284         {NAME("stats_print"),   CTL(opt_stats_print)},
285         {NAME("stats_print_opts"),      CTL(opt_stats_print_opts)},
286         {NAME("junk"),          CTL(opt_junk)},
287         {NAME("zero"),          CTL(opt_zero)},
288         {NAME("utrace"),        CTL(opt_utrace)},
289         {NAME("xmalloc"),       CTL(opt_xmalloc)},
290         {NAME("tcache"),        CTL(opt_tcache)},
291         {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
292         {NAME("prof"),          CTL(opt_prof)},
293         {NAME("prof_prefix"),   CTL(opt_prof_prefix)},
294         {NAME("prof_active"),   CTL(opt_prof_active)},
295         {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
296         {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
297         {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
298         {NAME("prof_gdump"),    CTL(opt_prof_gdump)},
299         {NAME("prof_final"),    CTL(opt_prof_final)},
300         {NAME("prof_leak"),     CTL(opt_prof_leak)},
301         {NAME("prof_accum"),    CTL(opt_prof_accum)}
302 };
303
304 static const ctl_named_node_t   tcache_node[] = {
305         {NAME("create"),        CTL(tcache_create)},
306         {NAME("flush"),         CTL(tcache_flush)},
307         {NAME("destroy"),       CTL(tcache_destroy)}
308 };
309
310 static const ctl_named_node_t arena_i_node[] = {
311         {NAME("initialized"),   CTL(arena_i_initialized)},
312         {NAME("decay"),         CTL(arena_i_decay)},
313         {NAME("purge"),         CTL(arena_i_purge)},
314         {NAME("reset"),         CTL(arena_i_reset)},
315         {NAME("destroy"),       CTL(arena_i_destroy)},
316         {NAME("dss"),           CTL(arena_i_dss)},
317         {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
318         {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
319         {NAME("extent_hooks"),  CTL(arena_i_extent_hooks)}
320 };
321 static const ctl_named_node_t super_arena_i_node[] = {
322         {NAME(""),              CHILD(named, arena_i)}
323 };
324
325 static const ctl_indexed_node_t arena_node[] = {
326         {INDEX(arena_i)}
327 };
328
329 static const ctl_named_node_t arenas_bin_i_node[] = {
330         {NAME("size"),          CTL(arenas_bin_i_size)},
331         {NAME("nregs"),         CTL(arenas_bin_i_nregs)},
332         {NAME("slab_size"),     CTL(arenas_bin_i_slab_size)}
333 };
334 static const ctl_named_node_t super_arenas_bin_i_node[] = {
335         {NAME(""),              CHILD(named, arenas_bin_i)}
336 };
337
338 static const ctl_indexed_node_t arenas_bin_node[] = {
339         {INDEX(arenas_bin_i)}
340 };
341
342 static const ctl_named_node_t arenas_lextent_i_node[] = {
343         {NAME("size"),          CTL(arenas_lextent_i_size)}
344 };
345 static const ctl_named_node_t super_arenas_lextent_i_node[] = {
346         {NAME(""),              CHILD(named, arenas_lextent_i)}
347 };
348
349 static const ctl_indexed_node_t arenas_lextent_node[] = {
350         {INDEX(arenas_lextent_i)}
351 };
352
353 static const ctl_named_node_t arenas_node[] = {
354         {NAME("narenas"),       CTL(arenas_narenas)},
355         {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)},
356         {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)},
357         {NAME("quantum"),       CTL(arenas_quantum)},
358         {NAME("page"),          CTL(arenas_page)},
359         {NAME("tcache_max"),    CTL(arenas_tcache_max)},
360         {NAME("nbins"),         CTL(arenas_nbins)},
361         {NAME("nhbins"),        CTL(arenas_nhbins)},
362         {NAME("bin"),           CHILD(indexed, arenas_bin)},
363         {NAME("nlextents"),     CTL(arenas_nlextents)},
364         {NAME("lextent"),       CHILD(indexed, arenas_lextent)},
365         {NAME("create"),        CTL(arenas_create)}
366 };
367
368 static const ctl_named_node_t   prof_node[] = {
369         {NAME("thread_active_init"), CTL(prof_thread_active_init)},
370         {NAME("active"),        CTL(prof_active)},
371         {NAME("dump"),          CTL(prof_dump)},
372         {NAME("gdump"),         CTL(prof_gdump)},
373         {NAME("reset"),         CTL(prof_reset)},
374         {NAME("interval"),      CTL(prof_interval)},
375         {NAME("lg_sample"),     CTL(lg_prof_sample)}
376 };
377
378 static const ctl_named_node_t stats_arenas_i_small_node[] = {
379         {NAME("allocated"),     CTL(stats_arenas_i_small_allocated)},
380         {NAME("nmalloc"),       CTL(stats_arenas_i_small_nmalloc)},
381         {NAME("ndalloc"),       CTL(stats_arenas_i_small_ndalloc)},
382         {NAME("nrequests"),     CTL(stats_arenas_i_small_nrequests)}
383 };
384
385 static const ctl_named_node_t stats_arenas_i_large_node[] = {
386         {NAME("allocated"),     CTL(stats_arenas_i_large_allocated)},
387         {NAME("nmalloc"),       CTL(stats_arenas_i_large_nmalloc)},
388         {NAME("ndalloc"),       CTL(stats_arenas_i_large_ndalloc)},
389         {NAME("nrequests"),     CTL(stats_arenas_i_large_nrequests)}
390 };
391
392 #define MUTEX_PROF_DATA_NODE(prefix)                                    \
393 static const ctl_named_node_t stats_##prefix##_node[] = {               \
394         {NAME("num_ops"),                                               \
395          CTL(stats_##prefix##_num_ops)},                                \
396         {NAME("num_wait"),                                              \
397          CTL(stats_##prefix##_num_wait)},                               \
398         {NAME("num_spin_acq"),                                          \
399          CTL(stats_##prefix##_num_spin_acq)},                           \
400         {NAME("num_owner_switch"),                                      \
401          CTL(stats_##prefix##_num_owner_switch)},                       \
402         {NAME("total_wait_time"),                                       \
403          CTL(stats_##prefix##_total_wait_time)},                        \
404         {NAME("max_wait_time"),                                         \
405          CTL(stats_##prefix##_max_wait_time)},                          \
406         {NAME("max_num_thds"),                                          \
407          CTL(stats_##prefix##_max_num_thds)}                            \
408         /* Note that # of current waiting thread not provided. */       \
409 };
410
411 MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex)
412
413 static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
414         {NAME("nmalloc"),       CTL(stats_arenas_i_bins_j_nmalloc)},
415         {NAME("ndalloc"),       CTL(stats_arenas_i_bins_j_ndalloc)},
416         {NAME("nrequests"),     CTL(stats_arenas_i_bins_j_nrequests)},
417         {NAME("curregs"),       CTL(stats_arenas_i_bins_j_curregs)},
418         {NAME("nfills"),        CTL(stats_arenas_i_bins_j_nfills)},
419         {NAME("nflushes"),      CTL(stats_arenas_i_bins_j_nflushes)},
420         {NAME("nslabs"),        CTL(stats_arenas_i_bins_j_nslabs)},
421         {NAME("nreslabs"),      CTL(stats_arenas_i_bins_j_nreslabs)},
422         {NAME("curslabs"),      CTL(stats_arenas_i_bins_j_curslabs)},
423         {NAME("mutex"),         CHILD(named, stats_arenas_i_bins_j_mutex)}
424 };
425
426 static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
427         {NAME(""),              CHILD(named, stats_arenas_i_bins_j)}
428 };
429
430 static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
431         {INDEX(stats_arenas_i_bins_j)}
432 };
433
434 static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
435         {NAME("nmalloc"),       CTL(stats_arenas_i_lextents_j_nmalloc)},
436         {NAME("ndalloc"),       CTL(stats_arenas_i_lextents_j_ndalloc)},
437         {NAME("nrequests"),     CTL(stats_arenas_i_lextents_j_nrequests)},
438         {NAME("curlextents"),   CTL(stats_arenas_i_lextents_j_curlextents)}
439 };
440 static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
441         {NAME(""),              CHILD(named, stats_arenas_i_lextents_j)}
442 };
443
444 static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
445         {INDEX(stats_arenas_i_lextents_j)}
446 };
447
448 #define OP(mtx)  MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
449 MUTEX_PROF_ARENA_MUTEXES
450 #undef OP
451
452 static const ctl_named_node_t stats_arenas_i_mutexes_node[] = {
453 #define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
454 MUTEX_PROF_ARENA_MUTEXES
455 #undef OP
456 };
457
458 static const ctl_named_node_t stats_arenas_i_node[] = {
459         {NAME("nthreads"),      CTL(stats_arenas_i_nthreads)},
460         {NAME("uptime"),        CTL(stats_arenas_i_uptime)},
461         {NAME("dss"),           CTL(stats_arenas_i_dss)},
462         {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)},
463         {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)},
464         {NAME("pactive"),       CTL(stats_arenas_i_pactive)},
465         {NAME("pdirty"),        CTL(stats_arenas_i_pdirty)},
466         {NAME("pmuzzy"),        CTL(stats_arenas_i_pmuzzy)},
467         {NAME("mapped"),        CTL(stats_arenas_i_mapped)},
468         {NAME("retained"),      CTL(stats_arenas_i_retained)},
469         {NAME("dirty_npurge"),  CTL(stats_arenas_i_dirty_npurge)},
470         {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)},
471         {NAME("dirty_purged"),  CTL(stats_arenas_i_dirty_purged)},
472         {NAME("muzzy_npurge"),  CTL(stats_arenas_i_muzzy_npurge)},
473         {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)},
474         {NAME("muzzy_purged"),  CTL(stats_arenas_i_muzzy_purged)},
475         {NAME("base"),          CTL(stats_arenas_i_base)},
476         {NAME("internal"),      CTL(stats_arenas_i_internal)},
477         {NAME("tcache_bytes"),  CTL(stats_arenas_i_tcache_bytes)},
478         {NAME("resident"),      CTL(stats_arenas_i_resident)},
479         {NAME("small"),         CHILD(named, stats_arenas_i_small)},
480         {NAME("large"),         CHILD(named, stats_arenas_i_large)},
481         {NAME("bins"),          CHILD(indexed, stats_arenas_i_bins)},
482         {NAME("lextents"),      CHILD(indexed, stats_arenas_i_lextents)},
483         {NAME("mutexes"),       CHILD(named, stats_arenas_i_mutexes)}
484 };
485 static const ctl_named_node_t super_stats_arenas_i_node[] = {
486         {NAME(""),              CHILD(named, stats_arenas_i)}
487 };
488
489 static const ctl_indexed_node_t stats_arenas_node[] = {
490         {INDEX(stats_arenas_i)}
491 };
492
493 static const ctl_named_node_t stats_background_thread_node[] = {
494         {NAME("num_threads"),   CTL(stats_background_thread_num_threads)},
495         {NAME("num_runs"),      CTL(stats_background_thread_num_runs)},
496         {NAME("run_interval"),  CTL(stats_background_thread_run_interval)}
497 };
498
499 #define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
500 MUTEX_PROF_GLOBAL_MUTEXES
501 #undef OP
502
503 static const ctl_named_node_t stats_mutexes_node[] = {
504 #define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
505 MUTEX_PROF_GLOBAL_MUTEXES
506 #undef OP
507         {NAME("reset"),         CTL(stats_mutexes_reset)}
508 };
509 #undef MUTEX_PROF_DATA_NODE
510
511 static const ctl_named_node_t stats_node[] = {
512         {NAME("allocated"),     CTL(stats_allocated)},
513         {NAME("active"),        CTL(stats_active)},
514         {NAME("metadata"),      CTL(stats_metadata)},
515         {NAME("resident"),      CTL(stats_resident)},
516         {NAME("mapped"),        CTL(stats_mapped)},
517         {NAME("retained"),      CTL(stats_retained)},
518         {NAME("background_thread"),
519          CHILD(named, stats_background_thread)},
520         {NAME("mutexes"),       CHILD(named, stats_mutexes)},
521         {NAME("arenas"),        CHILD(indexed, stats_arenas)}
522 };
523
524 static const ctl_named_node_t   root_node[] = {
525         {NAME("version"),       CTL(version)},
526         {NAME("epoch"),         CTL(epoch)},
527         {NAME("background_thread"),     CTL(background_thread)},
528         {NAME("thread"),        CHILD(named, thread)},
529         {NAME("config"),        CHILD(named, config)},
530         {NAME("opt"),           CHILD(named, opt)},
531         {NAME("tcache"),        CHILD(named, tcache)},
532         {NAME("arena"),         CHILD(indexed, arena)},
533         {NAME("arenas"),        CHILD(named, arenas)},
534         {NAME("prof"),          CHILD(named, prof)},
535         {NAME("stats"),         CHILD(named, stats)}
536 };
537 static const ctl_named_node_t super_root_node[] = {
538         {NAME(""),              CHILD(named, root)}
539 };
540
541 #undef NAME
542 #undef CHILD
543 #undef CTL
544 #undef INDEX
545
546 /******************************************************************************/
547
548 /*
549  * Sets *dst + *src non-atomically.  This is safe, since everything is
550  * synchronized by the ctl mutex.
551  */
552 static void
553 accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
554 #ifdef JEMALLOC_ATOMIC_U64
555         uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
556         uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
557         atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
558 #else
559         *dst += *src;
560 #endif
561 }
562
563 /* Likewise: with ctl mutex synchronization, reading is simple. */
564 static uint64_t
565 arena_stats_read_u64(arena_stats_u64_t *p) {
566 #ifdef JEMALLOC_ATOMIC_U64
567         return atomic_load_u64(p, ATOMIC_RELAXED);
568 #else
569         return *p;
570 #endif
571 }
572
573 static void accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
574         size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
575         size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
576         atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
577 }
578
579 /******************************************************************************/
580
581 static unsigned
582 arenas_i2a_impl(size_t i, bool compat, bool validate) {
583         unsigned a;
584
585         switch (i) {
586         case MALLCTL_ARENAS_ALL:
587                 a = 0;
588                 break;
589         case MALLCTL_ARENAS_DESTROYED:
590                 a = 1;
591                 break;
592         default:
593                 if (compat && i == ctl_arenas->narenas) {
594                         /*
595                          * Provide deprecated backward compatibility for
596                          * accessing the merged stats at index narenas rather
597                          * than via MALLCTL_ARENAS_ALL.  This is scheduled for
598                          * removal in 6.0.0.
599                          */
600                         a = 0;
601                 } else if (validate && i >= ctl_arenas->narenas) {
602                         a = UINT_MAX;
603                 } else {
604                         /*
605                          * This function should never be called for an index
606                          * more than one past the range of indices that have
607                          * initialized ctl data.
608                          */
609                         assert(i < ctl_arenas->narenas || (!validate && i ==
610                             ctl_arenas->narenas));
611                         a = (unsigned)i + 2;
612                 }
613                 break;
614         }
615
616         return a;
617 }
618
619 static unsigned
620 arenas_i2a(size_t i) {
621         return arenas_i2a_impl(i, true, false);
622 }
623
624 static ctl_arena_t *
625 arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) {
626         ctl_arena_t *ret;
627
628         assert(!compat || !init);
629
630         ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)];
631         if (init && ret == NULL) {
632                 if (config_stats) {
633                         struct container_s {
634                                 ctl_arena_t             ctl_arena;
635                                 ctl_arena_stats_t       astats;
636                         };
637                         struct container_s *cont =
638                             (struct container_s *)base_alloc(tsd_tsdn(tsd),
639                             b0get(), sizeof(struct container_s), QUANTUM);
640                         if (cont == NULL) {
641                                 return NULL;
642                         }
643                         ret = &cont->ctl_arena;
644                         ret->astats = &cont->astats;
645                 } else {
646                         ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(),
647                             sizeof(ctl_arena_t), QUANTUM);
648                         if (ret == NULL) {
649                                 return NULL;
650                         }
651                 }
652                 ret->arena_ind = (unsigned)i;
653                 ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret;
654         }
655
656         assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
657         return ret;
658 }
659
660 static ctl_arena_t *
661 arenas_i(size_t i) {
662         ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false);
663         assert(ret != NULL);
664         return ret;
665 }
666
667 static void
668 ctl_arena_clear(ctl_arena_t *ctl_arena) {
669         ctl_arena->nthreads = 0;
670         ctl_arena->dss = dss_prec_names[dss_prec_limit];
671         ctl_arena->dirty_decay_ms = -1;
672         ctl_arena->muzzy_decay_ms = -1;
673         ctl_arena->pactive = 0;
674         ctl_arena->pdirty = 0;
675         ctl_arena->pmuzzy = 0;
676         if (config_stats) {
677                 memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
678                 ctl_arena->astats->allocated_small = 0;
679                 ctl_arena->astats->nmalloc_small = 0;
680                 ctl_arena->astats->ndalloc_small = 0;
681                 ctl_arena->astats->nrequests_small = 0;
682                 memset(ctl_arena->astats->bstats, 0, NBINS *
683                     sizeof(malloc_bin_stats_t));
684                 memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) *
685                     sizeof(malloc_large_stats_t));
686         }
687 }
688
689 static void
690 ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
691         unsigned i;
692
693         if (config_stats) {
694                 arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
695                     &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
696                     &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
697                     &ctl_arena->pdirty, &ctl_arena->pmuzzy,
698                     &ctl_arena->astats->astats, ctl_arena->astats->bstats,
699                     ctl_arena->astats->lstats);
700
701                 for (i = 0; i < NBINS; i++) {
702                         ctl_arena->astats->allocated_small +=
703                             ctl_arena->astats->bstats[i].curregs *
704                             sz_index2size(i);
705                         ctl_arena->astats->nmalloc_small +=
706                             ctl_arena->astats->bstats[i].nmalloc;
707                         ctl_arena->astats->ndalloc_small +=
708                             ctl_arena->astats->bstats[i].ndalloc;
709                         ctl_arena->astats->nrequests_small +=
710                             ctl_arena->astats->bstats[i].nrequests;
711                 }
712         } else {
713                 arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
714                     &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
715                     &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
716                     &ctl_arena->pdirty, &ctl_arena->pmuzzy);
717         }
718 }
719
720 static void
721 ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
722     bool destroyed) {
723         unsigned i;
724
725         if (!destroyed) {
726                 ctl_sdarena->nthreads += ctl_arena->nthreads;
727                 ctl_sdarena->pactive += ctl_arena->pactive;
728                 ctl_sdarena->pdirty += ctl_arena->pdirty;
729                 ctl_sdarena->pmuzzy += ctl_arena->pmuzzy;
730         } else {
731                 assert(ctl_arena->nthreads == 0);
732                 assert(ctl_arena->pactive == 0);
733                 assert(ctl_arena->pdirty == 0);
734                 assert(ctl_arena->pmuzzy == 0);
735         }
736
737         if (config_stats) {
738                 ctl_arena_stats_t *sdstats = ctl_sdarena->astats;
739                 ctl_arena_stats_t *astats = ctl_arena->astats;
740
741                 if (!destroyed) {
742                         accum_atomic_zu(&sdstats->astats.mapped,
743                             &astats->astats.mapped);
744                         accum_atomic_zu(&sdstats->astats.retained,
745                             &astats->astats.retained);
746                 }
747
748                 accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
749                     &astats->astats.decay_dirty.npurge);
750                 accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
751                     &astats->astats.decay_dirty.nmadvise);
752                 accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
753                     &astats->astats.decay_dirty.purged);
754
755                 accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
756                     &astats->astats.decay_muzzy.npurge);
757                 accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
758                     &astats->astats.decay_muzzy.nmadvise);
759                 accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
760                     &astats->astats.decay_muzzy.purged);
761
762 #define OP(mtx) malloc_mutex_prof_merge(                                \
763                     &(sdstats->astats.mutex_prof_data[                  \
764                         arena_prof_mutex_##mtx]),                       \
765                     &(astats->astats.mutex_prof_data[                   \
766                         arena_prof_mutex_##mtx]));
767 MUTEX_PROF_ARENA_MUTEXES
768 #undef OP
769                 if (!destroyed) {
770                         accum_atomic_zu(&sdstats->astats.base,
771                             &astats->astats.base);
772                         accum_atomic_zu(&sdstats->astats.internal,
773                             &astats->astats.internal);
774                         accum_atomic_zu(&sdstats->astats.resident,
775                             &astats->astats.resident);
776                 } else {
777                         assert(atomic_load_zu(
778                             &astats->astats.internal, ATOMIC_RELAXED) == 0);
779                 }
780
781                 if (!destroyed) {
782                         sdstats->allocated_small += astats->allocated_small;
783                 } else {
784                         assert(astats->allocated_small == 0);
785                 }
786                 sdstats->nmalloc_small += astats->nmalloc_small;
787                 sdstats->ndalloc_small += astats->ndalloc_small;
788                 sdstats->nrequests_small += astats->nrequests_small;
789
790                 if (!destroyed) {
791                         accum_atomic_zu(&sdstats->astats.allocated_large,
792                             &astats->astats.allocated_large);
793                 } else {
794                         assert(atomic_load_zu(&astats->astats.allocated_large,
795                             ATOMIC_RELAXED) == 0);
796                 }
797                 accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
798                     &astats->astats.nmalloc_large);
799                 accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
800                     &astats->astats.ndalloc_large);
801                 accum_arena_stats_u64(&sdstats->astats.nrequests_large,
802                     &astats->astats.nrequests_large);
803
804                 accum_atomic_zu(&sdstats->astats.tcache_bytes,
805                     &astats->astats.tcache_bytes);
806
807                 if (ctl_arena->arena_ind == 0) {
808                         sdstats->astats.uptime = astats->astats.uptime;
809                 }
810
811                 for (i = 0; i < NBINS; i++) {
812                         sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
813                         sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
814                         sdstats->bstats[i].nrequests +=
815                             astats->bstats[i].nrequests;
816                         if (!destroyed) {
817                                 sdstats->bstats[i].curregs +=
818                                     astats->bstats[i].curregs;
819                         } else {
820                                 assert(astats->bstats[i].curregs == 0);
821                         }
822                         sdstats->bstats[i].nfills += astats->bstats[i].nfills;
823                         sdstats->bstats[i].nflushes +=
824                             astats->bstats[i].nflushes;
825                         sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
826                         sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
827                         if (!destroyed) {
828                                 sdstats->bstats[i].curslabs +=
829                                     astats->bstats[i].curslabs;
830                         } else {
831                                 assert(astats->bstats[i].curslabs == 0);
832                         }
833                         malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
834                             &astats->bstats[i].mutex_data);
835                 }
836
837                 for (i = 0; i < NSIZES - NBINS; i++) {
838                         accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
839                             &astats->lstats[i].nmalloc);
840                         accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
841                             &astats->lstats[i].ndalloc);
842                         accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
843                             &astats->lstats[i].nrequests);
844                         if (!destroyed) {
845                                 sdstats->lstats[i].curlextents +=
846                                     astats->lstats[i].curlextents;
847                         } else {
848                                 assert(astats->lstats[i].curlextents == 0);
849                         }
850                 }
851         }
852 }
853
854 static void
855 ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
856     unsigned i, bool destroyed) {
857         ctl_arena_t *ctl_arena = arenas_i(i);
858
859         ctl_arena_clear(ctl_arena);
860         ctl_arena_stats_amerge(tsdn, ctl_arena, arena);
861         /* Merge into sum stats as well. */
862         ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed);
863 }
864
865 static unsigned
866 ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
867         unsigned arena_ind;
868         ctl_arena_t *ctl_arena;
869
870         if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) !=
871             NULL) {
872                 ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
873                 arena_ind = ctl_arena->arena_ind;
874         } else {
875                 arena_ind = ctl_arenas->narenas;
876         }
877
878         /* Trigger stats allocation. */
879         if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) {
880                 return UINT_MAX;
881         }
882
883         /* Initialize new arena. */
884         if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) {
885                 return UINT_MAX;
886         }
887
888         if (arena_ind == ctl_arenas->narenas) {
889                 ctl_arenas->narenas++;
890         }
891
892         return arena_ind;
893 }
894
895 static void
896 ctl_background_thread_stats_read(tsdn_t *tsdn) {
897         background_thread_stats_t *stats = &ctl_stats->background_thread;
898         if (!have_background_thread ||
899             background_thread_stats_read(tsdn, stats)) {
900                 memset(stats, 0, sizeof(background_thread_stats_t));
901                 nstime_init(&stats->run_interval, 0);
902         }
903 }
904
905 static void
906 ctl_refresh(tsdn_t *tsdn) {
907         unsigned i;
908         ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
909         VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
910
911         /*
912          * Clear sum stats, since they will be merged into by
913          * ctl_arena_refresh().
914          */
915         ctl_arena_clear(ctl_sarena);
916
917         for (i = 0; i < ctl_arenas->narenas; i++) {
918                 tarenas[i] = arena_get(tsdn, i, false);
919         }
920
921         for (i = 0; i < ctl_arenas->narenas; i++) {
922                 ctl_arena_t *ctl_arena = arenas_i(i);
923                 bool initialized = (tarenas[i] != NULL);
924
925                 ctl_arena->initialized = initialized;
926                 if (initialized) {
927                         ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i,
928                             false);
929                 }
930         }
931
932         if (config_stats) {
933                 ctl_stats->allocated = ctl_sarena->astats->allocated_small +
934                     atomic_load_zu(&ctl_sarena->astats->astats.allocated_large,
935                         ATOMIC_RELAXED);
936                 ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
937                 ctl_stats->metadata = atomic_load_zu(
938                     &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
939                     atomic_load_zu(&ctl_sarena->astats->astats.internal,
940                         ATOMIC_RELAXED);
941                 ctl_stats->resident = atomic_load_zu(
942                     &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
943                 ctl_stats->mapped = atomic_load_zu(
944                     &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED);
945                 ctl_stats->retained = atomic_load_zu(
946                     &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED);
947
948                 ctl_background_thread_stats_read(tsdn);
949
950 #define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx)                             \
951     malloc_mutex_lock(tsdn, &mtx);                                      \
952     malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \
953     malloc_mutex_unlock(tsdn, &mtx);
954
955                 if (config_prof && opt_prof) {
956                         READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof,
957                             bt2gctx_mtx);
958                 }
959                 if (have_background_thread) {
960                         READ_GLOBAL_MUTEX_PROF_DATA(
961                             global_prof_mutex_background_thread,
962                             background_thread_lock);
963                 } else {
964                         memset(&ctl_stats->mutex_prof_data[
965                             global_prof_mutex_background_thread], 0,
966                             sizeof(mutex_prof_data_t));
967                 }
968                 /* We own ctl mutex already. */
969                 malloc_mutex_prof_read(tsdn,
970                     &ctl_stats->mutex_prof_data[global_prof_mutex_ctl],
971                     &ctl_mtx);
972 #undef READ_GLOBAL_MUTEX_PROF_DATA
973         }
974         ctl_arenas->epoch++;
975 }
976
977 static bool
978 ctl_init(tsd_t *tsd) {
979         bool ret;
980         tsdn_t *tsdn = tsd_tsdn(tsd);
981
982         malloc_mutex_lock(tsdn, &ctl_mtx);
983         if (!ctl_initialized) {
984                 ctl_arena_t *ctl_sarena, *ctl_darena;
985                 unsigned i;
986
987                 /*
988                  * Allocate demand-zeroed space for pointers to the full
989                  * range of supported arena indices.
990                  */
991                 if (ctl_arenas == NULL) {
992                         ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn,
993                             b0get(), sizeof(ctl_arenas_t), QUANTUM);
994                         if (ctl_arenas == NULL) {
995                                 ret = true;
996                                 goto label_return;
997                         }
998                 }
999
1000                 if (config_stats && ctl_stats == NULL) {
1001                         ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(),
1002                             sizeof(ctl_stats_t), QUANTUM);
1003                         if (ctl_stats == NULL) {
1004                                 ret = true;
1005                                 goto label_return;
1006                         }
1007                 }
1008
1009                 /*
1010                  * Allocate space for the current full range of arenas
1011                  * here rather than doing it lazily elsewhere, in order
1012                  * to limit when OOM-caused errors can occur.
1013                  */
1014                 if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false,
1015                     true)) == NULL) {
1016                         ret = true;
1017                         goto label_return;
1018                 }
1019                 ctl_sarena->initialized = true;
1020
1021                 if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED,
1022                     false, true)) == NULL) {
1023                         ret = true;
1024                         goto label_return;
1025                 }
1026                 ctl_arena_clear(ctl_darena);
1027                 /*
1028                  * Don't toggle ctl_darena to initialized until an arena is
1029                  * actually destroyed, so that arena.<i>.initialized can be used
1030                  * to query whether the stats are relevant.
1031                  */
1032
1033                 ctl_arenas->narenas = narenas_total_get();
1034                 for (i = 0; i < ctl_arenas->narenas; i++) {
1035                         if (arenas_i_impl(tsd, i, false, true) == NULL) {
1036                                 ret = true;
1037                                 goto label_return;
1038                         }
1039                 }
1040
1041                 ql_new(&ctl_arenas->destroyed);
1042                 ctl_refresh(tsdn);
1043
1044                 ctl_initialized = true;
1045         }
1046
1047         ret = false;
1048 label_return:
1049         malloc_mutex_unlock(tsdn, &ctl_mtx);
1050         return ret;
1051 }
1052
1053 static int
1054 ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
1055     size_t *mibp, size_t *depthp) {
1056         int ret;
1057         const char *elm, *tdot, *dot;
1058         size_t elen, i, j;
1059         const ctl_named_node_t *node;
1060
1061         elm = name;
1062         /* Equivalent to strchrnul(). */
1063         dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
1064         elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
1065         if (elen == 0) {
1066                 ret = ENOENT;
1067                 goto label_return;
1068         }
1069         node = super_root_node;
1070         for (i = 0; i < *depthp; i++) {
1071                 assert(node);
1072                 assert(node->nchildren > 0);
1073                 if (ctl_named_node(node->children) != NULL) {
1074                         const ctl_named_node_t *pnode = node;
1075
1076                         /* Children are named. */
1077                         for (j = 0; j < node->nchildren; j++) {
1078                                 const ctl_named_node_t *child =
1079                                     ctl_named_children(node, j);
1080                                 if (strlen(child->name) == elen &&
1081                                     strncmp(elm, child->name, elen) == 0) {
1082                                         node = child;
1083                                         if (nodesp != NULL) {
1084                                                 nodesp[i] =
1085                                                     (const ctl_node_t *)node;
1086                                         }
1087                                         mibp[i] = j;
1088                                         break;
1089                                 }
1090                         }
1091                         if (node == pnode) {
1092                                 ret = ENOENT;
1093                                 goto label_return;
1094                         }
1095                 } else {
1096                         uintmax_t index;
1097                         const ctl_indexed_node_t *inode;
1098
1099                         /* Children are indexed. */
1100                         index = malloc_strtoumax(elm, NULL, 10);
1101                         if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
1102                                 ret = ENOENT;
1103                                 goto label_return;
1104                         }
1105
1106                         inode = ctl_indexed_node(node->children);
1107                         node = inode->index(tsdn, mibp, *depthp, (size_t)index);
1108                         if (node == NULL) {
1109                                 ret = ENOENT;
1110                                 goto label_return;
1111                         }
1112
1113                         if (nodesp != NULL) {
1114                                 nodesp[i] = (const ctl_node_t *)node;
1115                         }
1116                         mibp[i] = (size_t)index;
1117                 }
1118
1119                 if (node->ctl != NULL) {
1120                         /* Terminal node. */
1121                         if (*dot != '\0') {
1122                                 /*
1123                                  * The name contains more elements than are
1124                                  * in this path through the tree.
1125                                  */
1126                                 ret = ENOENT;
1127                                 goto label_return;
1128                         }
1129                         /* Complete lookup successful. */
1130                         *depthp = i + 1;
1131                         break;
1132                 }
1133
1134                 /* Update elm. */
1135                 if (*dot == '\0') {
1136                         /* No more elements. */
1137                         ret = ENOENT;
1138                         goto label_return;
1139                 }
1140                 elm = &dot[1];
1141                 dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
1142                     strchr(elm, '\0');
1143                 elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
1144         }
1145
1146         ret = 0;
1147 label_return:
1148         return ret;
1149 }
1150
1151 int
1152 ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
1153     void *newp, size_t newlen) {
1154         int ret;
1155         size_t depth;
1156         ctl_node_t const *nodes[CTL_MAX_DEPTH];
1157         size_t mib[CTL_MAX_DEPTH];
1158         const ctl_named_node_t *node;
1159
1160         if (!ctl_initialized && ctl_init(tsd)) {
1161                 ret = EAGAIN;
1162                 goto label_return;
1163         }
1164
1165         depth = CTL_MAX_DEPTH;
1166         ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
1167         if (ret != 0) {
1168                 goto label_return;
1169         }
1170
1171         node = ctl_named_node(nodes[depth-1]);
1172         if (node != NULL && node->ctl) {
1173                 ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
1174         } else {
1175                 /* The name refers to a partial path through the ctl tree. */
1176                 ret = ENOENT;
1177         }
1178
1179 label_return:
1180         return(ret);
1181 }
1182
1183 int
1184 ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
1185         int ret;
1186
1187         if (!ctl_initialized && ctl_init(tsd)) {
1188                 ret = EAGAIN;
1189                 goto label_return;
1190         }
1191
1192         ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp);
1193 label_return:
1194         return(ret);
1195 }
1196
1197 int
1198 ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1199     size_t *oldlenp, void *newp, size_t newlen) {
1200         int ret;
1201         const ctl_named_node_t *node;
1202         size_t i;
1203
1204         if (!ctl_initialized && ctl_init(tsd)) {
1205                 ret = EAGAIN;
1206                 goto label_return;
1207         }
1208
1209         /* Iterate down the tree. */
1210         node = super_root_node;
1211         for (i = 0; i < miblen; i++) {
1212                 assert(node);
1213                 assert(node->nchildren > 0);
1214                 if (ctl_named_node(node->children) != NULL) {
1215                         /* Children are named. */
1216                         if (node->nchildren <= mib[i]) {
1217                                 ret = ENOENT;
1218                                 goto label_return;
1219                         }
1220                         node = ctl_named_children(node, mib[i]);
1221                 } else {
1222                         const ctl_indexed_node_t *inode;
1223
1224                         /* Indexed element. */
1225                         inode = ctl_indexed_node(node->children);
1226                         node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
1227                         if (node == NULL) {
1228                                 ret = ENOENT;
1229                                 goto label_return;
1230                         }
1231                 }
1232         }
1233
1234         /* Call the ctl function. */
1235         if (node && node->ctl) {
1236                 ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
1237         } else {
1238                 /* Partial MIB. */
1239                 ret = ENOENT;
1240         }
1241
1242 label_return:
1243         return(ret);
1244 }
1245
1246 bool
1247 ctl_boot(void) {
1248         if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
1249             malloc_mutex_rank_exclusive)) {
1250                 return true;
1251         }
1252
1253         ctl_initialized = false;
1254
1255         return false;
1256 }
1257
1258 void
1259 ctl_prefork(tsdn_t *tsdn) {
1260         malloc_mutex_prefork(tsdn, &ctl_mtx);
1261 }
1262
1263 void
1264 ctl_postfork_parent(tsdn_t *tsdn) {
1265         malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
1266 }
1267
1268 void
1269 ctl_postfork_child(tsdn_t *tsdn) {
1270         malloc_mutex_postfork_child(tsdn, &ctl_mtx);
1271 }
1272
1273 /******************************************************************************/
1274 /* *_ctl() functions. */
1275
1276 #define READONLY()      do {                                            \
1277         if (newp != NULL || newlen != 0) {                              \
1278                 ret = EPERM;                                            \
1279                 goto label_return;                                      \
1280         }                                                               \
1281 } while (0)
1282
1283 #define WRITEONLY()     do {                                            \
1284         if (oldp != NULL || oldlenp != NULL) {                          \
1285                 ret = EPERM;                                            \
1286                 goto label_return;                                      \
1287         }                                                               \
1288 } while (0)
1289
1290 #define READ_XOR_WRITE()        do {                                    \
1291         if ((oldp != NULL && oldlenp != NULL) && (newp != NULL ||       \
1292             newlen != 0)) {                                             \
1293                 ret = EPERM;                                            \
1294                 goto label_return;                                      \
1295         }                                                               \
1296 } while (0)
1297
1298 #define READ(v, t)      do {                                            \
1299         if (oldp != NULL && oldlenp != NULL) {                          \
1300                 if (*oldlenp != sizeof(t)) {                            \
1301                         size_t  copylen = (sizeof(t) <= *oldlenp)       \
1302                             ? sizeof(t) : *oldlenp;                     \
1303                         memcpy(oldp, (void *)&(v), copylen);            \
1304                         ret = EINVAL;                                   \
1305                         goto label_return;                              \
1306                 }                                                       \
1307                 *(t *)oldp = (v);                                       \
1308         }                                                               \
1309 } while (0)
1310
1311 #define WRITE(v, t)     do {                                            \
1312         if (newp != NULL) {                                             \
1313                 if (newlen != sizeof(t)) {                              \
1314                         ret = EINVAL;                                   \
1315                         goto label_return;                              \
1316                 }                                                       \
1317                 (v) = *(t *)newp;                                       \
1318         }                                                               \
1319 } while (0)
1320
1321 #define MIB_UNSIGNED(v, i) do {                                         \
1322         if (mib[i] > UINT_MAX) {                                        \
1323                 ret = EFAULT;                                           \
1324                 goto label_return;                                      \
1325         }                                                               \
1326         v = (unsigned)mib[i];                                           \
1327 } while (0)
1328
1329 /*
1330  * There's a lot of code duplication in the following macros due to limitations
1331  * in how nested cpp macros are expanded.
1332  */
1333 #define CTL_RO_CLGEN(c, l, n, v, t)                                     \
1334 static int                                                              \
1335 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,       \
1336     size_t *oldlenp, void *newp, size_t newlen) {                       \
1337         int ret;                                                        \
1338         t oldval;                                                       \
1339                                                                         \
1340         if (!(c)) {                                                     \
1341                 return ENOENT;                                          \
1342         }                                                               \
1343         if (l) {                                                        \
1344                 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);             \
1345         }                                                               \
1346         READONLY();                                                     \
1347         oldval = (v);                                                   \
1348         READ(oldval, t);                                                \
1349                                                                         \
1350         ret = 0;                                                        \
1351 label_return:                                                           \
1352         if (l) {                                                        \
1353                 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);           \
1354         }                                                               \
1355         return ret;                                                     \
1356 }
1357
1358 #define CTL_RO_CGEN(c, n, v, t)                                         \
1359 static int                                                              \
1360 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,       \
1361     size_t *oldlenp, void *newp, size_t newlen) {                       \
1362         int ret;                                                        \
1363         t oldval;                                                       \
1364                                                                         \
1365         if (!(c)) {                                                     \
1366                 return ENOENT;                                          \
1367         }                                                               \
1368         malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);                     \
1369         READONLY();                                                     \
1370         oldval = (v);                                                   \
1371         READ(oldval, t);                                                \
1372                                                                         \
1373         ret = 0;                                                        \
1374 label_return:                                                           \
1375         malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);                   \
1376         return ret;                                                     \
1377 }
1378
1379 #define CTL_RO_GEN(n, v, t)                                             \
1380 static int                                                              \
1381 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,       \
1382     size_t *oldlenp, void *newp, size_t newlen) {                       \
1383         int ret;                                                        \
1384         t oldval;                                                       \
1385                                                                         \
1386         malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);                     \
1387         READONLY();                                                     \
1388         oldval = (v);                                                   \
1389         READ(oldval, t);                                                \
1390                                                                         \
1391         ret = 0;                                                        \
1392 label_return:                                                           \
1393         malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);                   \
1394         return ret;                                                     \
1395 }
1396
1397 /*
1398  * ctl_mtx is not acquired, under the assumption that no pertinent data will
1399  * mutate during the call.
1400  */
1401 #define CTL_RO_NL_CGEN(c, n, v, t)                                      \
1402 static int                                                              \
1403 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,       \
1404     size_t *oldlenp, void *newp, size_t newlen) {                       \
1405         int ret;                                                        \
1406         t oldval;                                                       \
1407                                                                         \
1408         if (!(c)) {                                                     \
1409                 return ENOENT;                                          \
1410         }                                                               \
1411         READONLY();                                                     \
1412         oldval = (v);                                                   \
1413         READ(oldval, t);                                                \
1414                                                                         \
1415         ret = 0;                                                        \
1416 label_return:                                                           \
1417         return ret;                                                     \
1418 }
1419
1420 #define CTL_RO_NL_GEN(n, v, t)                                          \
1421 static int                                                              \
1422 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,       \
1423     size_t *oldlenp, void *newp, size_t newlen) {                       \
1424         int ret;                                                        \
1425         t oldval;                                                       \
1426                                                                         \
1427         READONLY();                                                     \
1428         oldval = (v);                                                   \
1429         READ(oldval, t);                                                \
1430                                                                         \
1431         ret = 0;                                                        \
1432 label_return:                                                           \
1433         return ret;                                                     \
1434 }
1435
1436 #define CTL_TSD_RO_NL_CGEN(c, n, m, t)                                  \
1437 static int                                                              \
1438 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,       \
1439     size_t *oldlenp, void *newp, size_t newlen) {                       \
1440         int ret;                                                        \
1441         t oldval;                                                       \
1442                                                                         \
1443         if (!(c)) {                                                     \
1444                 return ENOENT;                                          \
1445         }                                                               \
1446         READONLY();                                                     \
1447         oldval = (m(tsd));                                              \
1448         READ(oldval, t);                                                \
1449                                                                         \
1450         ret = 0;                                                        \
1451 label_return:                                                           \
1452         return ret;                                                     \
1453 }
1454
1455 #define CTL_RO_CONFIG_GEN(n, t)                                         \
1456 static int                                                              \
1457 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,       \
1458     size_t *oldlenp, void *newp, size_t newlen) {                       \
1459         int ret;                                                        \
1460         t oldval;                                                       \
1461                                                                         \
1462         READONLY();                                                     \
1463         oldval = n;                                                     \
1464         READ(oldval, t);                                                \
1465                                                                         \
1466         ret = 0;                                                        \
1467 label_return:                                                           \
1468         return ret;                                                     \
1469 }
1470
1471 /******************************************************************************/
1472
1473 CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
1474
1475 static int
1476 epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1477     size_t *oldlenp, void *newp, size_t newlen) {
1478         int ret;
1479         UNUSED uint64_t newval;
1480
1481         malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1482         WRITE(newval, uint64_t);
1483         if (newp != NULL) {
1484                 ctl_refresh(tsd_tsdn(tsd));
1485         }
1486         READ(ctl_arenas->epoch, uint64_t);
1487
1488         ret = 0;
1489 label_return:
1490         malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1491         return ret;
1492 }
1493
1494 static int
1495 background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1496     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1497         int ret;
1498         bool oldval;
1499
1500         if (!have_background_thread) {
1501                 return ENOENT;
1502         }
1503         background_thread_ctl_init(tsd_tsdn(tsd));
1504
1505         malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1506         malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1507         if (newp == NULL) {
1508                 oldval = background_thread_enabled();
1509                 READ(oldval, bool);
1510         } else {
1511                 if (newlen != sizeof(bool)) {
1512                         ret = EINVAL;
1513                         goto label_return;
1514                 }
1515                 oldval = background_thread_enabled();
1516                 READ(oldval, bool);
1517
1518                 bool newval = *(bool *)newp;
1519                 if (newval == oldval) {
1520                         ret = 0;
1521                         goto label_return;
1522                 }
1523
1524                 background_thread_enabled_set(tsd_tsdn(tsd), newval);
1525                 if (newval) {
1526                         if (!can_enable_background_thread) {
1527                                 malloc_printf("<jemalloc>: Error in dlsym("
1528                                     "RTLD_NEXT, \"pthread_create\"). Cannot "
1529                                     "enable background_thread\n");
1530                                 ret = EFAULT;
1531                                 goto label_return;
1532                         }
1533                         if (background_threads_enable(tsd)) {
1534                                 ret = EFAULT;
1535                                 goto label_return;
1536                         }
1537                 } else {
1538                         if (background_threads_disable(tsd)) {
1539                                 ret = EFAULT;
1540                                 goto label_return;
1541                         }
1542                 }
1543         }
1544         ret = 0;
1545 label_return:
1546         malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1547         malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1548
1549         return ret;
1550 }
1551
1552 /******************************************************************************/
1553
1554 CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
1555 CTL_RO_CONFIG_GEN(config_debug, bool)
1556 CTL_RO_CONFIG_GEN(config_fill, bool)
1557 CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
1558 CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
1559 CTL_RO_CONFIG_GEN(config_prof, bool)
1560 CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
1561 CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
1562 CTL_RO_CONFIG_GEN(config_stats, bool)
1563 CTL_RO_CONFIG_GEN(config_thp, bool)
1564 CTL_RO_CONFIG_GEN(config_utrace, bool)
1565 CTL_RO_CONFIG_GEN(config_xmalloc, bool)
1566
1567 /******************************************************************************/
1568
1569 CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
1570 CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
1571 CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
1572 CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
1573 CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
1574 CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
1575     const char *)
1576 CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
1577 CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
1578 CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
1579 CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
1580 CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
1581 CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
1582 CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
1583 CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
1584 CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
1585 CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
1586 CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
1587 CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
1588 CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
1589 CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
1590 CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
1591     opt_prof_thread_active_init, bool)
1592 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
1593 CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
1594 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
1595 CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
1596 CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
1597 CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
1598
1599 /******************************************************************************/
1600
1601 static int
1602 thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1603     size_t *oldlenp, void *newp, size_t newlen) {
1604         int ret;
1605         arena_t *oldarena;
1606         unsigned newind, oldind;
1607
1608         oldarena = arena_choose(tsd, NULL);
1609         if (oldarena == NULL) {
1610                 return EAGAIN;
1611         }
1612         newind = oldind = arena_ind_get(oldarena);
1613         WRITE(newind, unsigned);
1614         READ(oldind, unsigned);
1615
1616         if (newind != oldind) {
1617                 arena_t *newarena;
1618
1619                 if (newind >= narenas_total_get()) {
1620                         /* New arena index is out of range. */
1621                         ret = EFAULT;
1622                         goto label_return;
1623                 }
1624
1625                 if (have_percpu_arena &&
1626                     PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
1627                         if (newind < percpu_arena_ind_limit(opt_percpu_arena)) {
1628                                 /*
1629                                  * If perCPU arena is enabled, thread_arena
1630                                  * control is not allowed for the auto arena
1631                                  * range.
1632                                  */
1633                                 ret = EPERM;
1634                                 goto label_return;
1635                         }
1636                 }
1637
1638                 /* Initialize arena if necessary. */
1639                 newarena = arena_get(tsd_tsdn(tsd), newind, true);
1640                 if (newarena == NULL) {
1641                         ret = EAGAIN;
1642                         goto label_return;
1643                 }
1644                 /* Set new arena/tcache associations. */
1645                 arena_migrate(tsd, oldind, newind);
1646                 if (tcache_available(tsd)) {
1647                         tcache_arena_reassociate(tsd_tsdn(tsd),
1648                             tsd_tcachep_get(tsd), newarena);
1649                 }
1650         }
1651
1652         ret = 0;
1653 label_return:
1654         return ret;
1655 }
1656
1657 CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
1658     uint64_t)
1659 CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
1660     uint64_t *)
1661 CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
1662     uint64_t)
1663 CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
1664     tsd_thread_deallocatedp_get, uint64_t *)
1665
1666 static int
1667 thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1668     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1669         int ret;
1670         bool oldval;
1671
1672         oldval = tcache_enabled_get(tsd);
1673         if (newp != NULL) {
1674                 if (newlen != sizeof(bool)) {
1675                         ret = EINVAL;
1676                         goto label_return;
1677                 }
1678                 tcache_enabled_set(tsd, *(bool *)newp);
1679         }
1680         READ(oldval, bool);
1681
1682         ret = 0;
1683 label_return:
1684         return ret;
1685 }
1686
1687 static int
1688 thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1689     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1690         int ret;
1691
1692         if (!tcache_available(tsd)) {
1693                 ret = EFAULT;
1694                 goto label_return;
1695         }
1696
1697         READONLY();
1698         WRITEONLY();
1699
1700         tcache_flush(tsd);
1701
1702         ret = 0;
1703 label_return:
1704         return ret;
1705 }
1706
1707 static int
1708 thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1709     size_t *oldlenp, void *newp, size_t newlen) {
1710         int ret;
1711
1712         if (!config_prof) {
1713                 return ENOENT;
1714         }
1715
1716         READ_XOR_WRITE();
1717
1718         if (newp != NULL) {
1719                 if (newlen != sizeof(const char *)) {
1720                         ret = EINVAL;
1721                         goto label_return;
1722                 }
1723
1724                 if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
1725                     0) {
1726                         goto label_return;
1727                 }
1728         } else {
1729                 const char *oldname = prof_thread_name_get(tsd);
1730                 READ(oldname, const char *);
1731         }
1732
1733         ret = 0;
1734 label_return:
1735         return ret;
1736 }
1737
1738 static int
1739 thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1740     size_t *oldlenp, void *newp, size_t newlen) {
1741         int ret;
1742         bool oldval;
1743
1744         if (!config_prof) {
1745                 return ENOENT;
1746         }
1747
1748         oldval = prof_thread_active_get(tsd);
1749         if (newp != NULL) {
1750                 if (newlen != sizeof(bool)) {
1751                         ret = EINVAL;
1752                         goto label_return;
1753                 }
1754                 if (prof_thread_active_set(tsd, *(bool *)newp)) {
1755                         ret = EAGAIN;
1756                         goto label_return;
1757                 }
1758         }
1759         READ(oldval, bool);
1760
1761         ret = 0;
1762 label_return:
1763         return ret;
1764 }
1765
1766 /******************************************************************************/
1767
1768 static int
1769 tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1770     size_t *oldlenp, void *newp, size_t newlen) {
1771         int ret;
1772         unsigned tcache_ind;
1773
1774         READONLY();
1775         if (tcaches_create(tsd, &tcache_ind)) {
1776                 ret = EFAULT;
1777                 goto label_return;
1778         }
1779         READ(tcache_ind, unsigned);
1780
1781         ret = 0;
1782 label_return:
1783         return ret;
1784 }
1785
1786 static int
1787 tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1788     size_t *oldlenp, void *newp, size_t newlen) {
1789         int ret;
1790         unsigned tcache_ind;
1791
1792         WRITEONLY();
1793         tcache_ind = UINT_MAX;
1794         WRITE(tcache_ind, unsigned);
1795         if (tcache_ind == UINT_MAX) {
1796                 ret = EFAULT;
1797                 goto label_return;
1798         }
1799         tcaches_flush(tsd, tcache_ind);
1800
1801         ret = 0;
1802 label_return:
1803         return ret;
1804 }
1805
1806 static int
1807 tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1808     size_t *oldlenp, void *newp, size_t newlen) {
1809         int ret;
1810         unsigned tcache_ind;
1811
1812         WRITEONLY();
1813         tcache_ind = UINT_MAX;
1814         WRITE(tcache_ind, unsigned);
1815         if (tcache_ind == UINT_MAX) {
1816                 ret = EFAULT;
1817                 goto label_return;
1818         }
1819         tcaches_destroy(tsd, tcache_ind);
1820
1821         ret = 0;
1822 label_return:
1823         return ret;
1824 }
1825
1826 /******************************************************************************/
1827
1828 static int
1829 arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1830     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1831         int ret;
1832         tsdn_t *tsdn = tsd_tsdn(tsd);
1833         unsigned arena_ind;
1834         bool initialized;
1835
1836         READONLY();
1837         MIB_UNSIGNED(arena_ind, 1);
1838
1839         malloc_mutex_lock(tsdn, &ctl_mtx);
1840         initialized = arenas_i(arena_ind)->initialized;
1841         malloc_mutex_unlock(tsdn, &ctl_mtx);
1842
1843         READ(initialized, bool);
1844
1845         ret = 0;
1846 label_return:
1847         return ret;
1848 }
1849
1850 static void
1851 arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
1852         malloc_mutex_lock(tsdn, &ctl_mtx);
1853         {
1854                 unsigned narenas = ctl_arenas->narenas;
1855
1856                 /*
1857                  * Access via index narenas is deprecated, and scheduled for
1858                  * removal in 6.0.0.
1859                  */
1860                 if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) {
1861                         unsigned i;
1862                         VARIABLE_ARRAY(arena_t *, tarenas, narenas);
1863
1864                         for (i = 0; i < narenas; i++) {
1865                                 tarenas[i] = arena_get(tsdn, i, false);
1866                         }
1867
1868                         /*
1869                          * No further need to hold ctl_mtx, since narenas and
1870                          * tarenas contain everything needed below.
1871                          */
1872                         malloc_mutex_unlock(tsdn, &ctl_mtx);
1873
1874                         for (i = 0; i < narenas; i++) {
1875                                 if (tarenas[i] != NULL) {
1876                                         arena_decay(tsdn, tarenas[i], false,
1877                                             all);
1878                                 }
1879                         }
1880                 } else {
1881                         arena_t *tarena;
1882
1883                         assert(arena_ind < narenas);
1884
1885                         tarena = arena_get(tsdn, arena_ind, false);
1886
1887                         /* No further need to hold ctl_mtx. */
1888                         malloc_mutex_unlock(tsdn, &ctl_mtx);
1889
1890                         if (tarena != NULL) {
1891                                 arena_decay(tsdn, tarena, false, all);
1892                         }
1893                 }
1894         }
1895 }
1896
1897 static int
1898 arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1899     size_t *oldlenp, void *newp, size_t newlen) {
1900         int ret;
1901         unsigned arena_ind;
1902
1903         READONLY();
1904         WRITEONLY();
1905         MIB_UNSIGNED(arena_ind, 1);
1906         arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
1907
1908         ret = 0;
1909 label_return:
1910         return ret;
1911 }
1912
1913 static int
1914 arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1915     size_t *oldlenp, void *newp, size_t newlen) {
1916         int ret;
1917         unsigned arena_ind;
1918
1919         READONLY();
1920         WRITEONLY();
1921         MIB_UNSIGNED(arena_ind, 1);
1922         arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
1923
1924         ret = 0;
1925 label_return:
1926         return ret;
1927 }
1928
1929 static int
1930 arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
1931     void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
1932     arena_t **arena) {
1933         int ret;
1934
1935         READONLY();
1936         WRITEONLY();
1937         MIB_UNSIGNED(*arena_ind, 1);
1938
1939         *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
1940         if (*arena == NULL || arena_is_auto(*arena)) {
1941                 ret = EFAULT;
1942                 goto label_return;
1943         }
1944
1945         ret = 0;
1946 label_return:
1947         return ret;
1948 }
1949
1950 static void
1951 arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) {
1952         /* Temporarily disable the background thread during arena reset. */
1953         if (have_background_thread) {
1954                 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1955                 if (background_thread_enabled()) {
1956                         unsigned ind = arena_ind % ncpus;
1957                         background_thread_info_t *info =
1958                             &background_thread_info[ind];
1959                         assert(info->state == background_thread_started);
1960                         malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
1961                         info->state = background_thread_paused;
1962                         malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
1963                 }
1964         }
1965 }
1966
1967 static void
1968 arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) {
1969         if (have_background_thread) {
1970                 if (background_thread_enabled()) {
1971                         unsigned ind = arena_ind % ncpus;
1972                         background_thread_info_t *info =
1973                             &background_thread_info[ind];
1974                         assert(info->state == background_thread_paused);
1975                         malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
1976                         info->state = background_thread_started;
1977                         malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
1978                 }
1979                 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1980         }
1981 }
1982
1983 static int
1984 arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1985     size_t *oldlenp, void *newp, size_t newlen) {
1986         int ret;
1987         unsigned arena_ind;
1988         arena_t *arena;
1989
1990         ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
1991             newp, newlen, &arena_ind, &arena);
1992         if (ret != 0) {
1993                 return ret;
1994         }
1995
1996         arena_reset_prepare_background_thread(tsd, arena_ind);
1997         arena_reset(tsd, arena);
1998         arena_reset_finish_background_thread(tsd, arena_ind);
1999
2000         return ret;
2001 }
2002
2003 static int
2004 arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2005     size_t *oldlenp, void *newp, size_t newlen) {
2006         int ret;
2007         unsigned arena_ind;
2008         arena_t *arena;
2009         ctl_arena_t *ctl_darena, *ctl_arena;
2010
2011         ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
2012             newp, newlen, &arena_ind, &arena);
2013         if (ret != 0) {
2014                 goto label_return;
2015         }
2016
2017         if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
2018             true) != 0) {
2019                 ret = EFAULT;
2020                 goto label_return;
2021         }
2022
2023         arena_reset_prepare_background_thread(tsd, arena_ind);
2024         /* Merge stats after resetting and purging arena. */
2025         arena_reset(tsd, arena);
2026         arena_decay(tsd_tsdn(tsd), arena, false, true);
2027         ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
2028         ctl_darena->initialized = true;
2029         ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
2030         /* Destroy arena. */
2031         arena_destroy(tsd, arena);
2032         ctl_arena = arenas_i(arena_ind);
2033         ctl_arena->initialized = false;
2034         /* Record arena index for later recycling via arenas.create. */
2035         ql_elm_new(ctl_arena, destroyed_link);
2036         ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
2037         arena_reset_finish_background_thread(tsd, arena_ind);
2038
2039         assert(ret == 0);
2040 label_return:
2041         return ret;
2042 }
2043
2044 static int
2045 arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2046     size_t *oldlenp, void *newp, size_t newlen) {
2047         int ret;
2048         const char *dss = NULL;
2049         unsigned arena_ind;
2050         dss_prec_t dss_prec_old = dss_prec_limit;
2051         dss_prec_t dss_prec = dss_prec_limit;
2052
2053         malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2054         WRITE(dss, const char *);
2055         MIB_UNSIGNED(arena_ind, 1);
2056         if (dss != NULL) {
2057                 int i;
2058                 bool match = false;
2059
2060                 for (i = 0; i < dss_prec_limit; i++) {
2061                         if (strcmp(dss_prec_names[i], dss) == 0) {
2062                                 dss_prec = i;
2063                                 match = true;
2064                                 break;
2065                         }
2066                 }
2067
2068                 if (!match) {
2069                         ret = EINVAL;
2070                         goto label_return;
2071                 }
2072         }
2073
2074         /*
2075          * Access via index narenas is deprecated, and scheduled for removal in
2076          * 6.0.0.
2077          */
2078         if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind ==
2079             ctl_arenas->narenas) {
2080                 if (dss_prec != dss_prec_limit &&
2081                     extent_dss_prec_set(dss_prec)) {
2082                         ret = EFAULT;
2083                         goto label_return;
2084                 }
2085                 dss_prec_old = extent_dss_prec_get();
2086         } else {
2087                 arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2088                 if (arena == NULL || (dss_prec != dss_prec_limit &&
2089                     arena_dss_prec_set(arena, dss_prec))) {
2090                         ret = EFAULT;
2091                         goto label_return;
2092                 }
2093                 dss_prec_old = arena_dss_prec_get(arena);
2094         }
2095
2096         dss = dss_prec_names[dss_prec_old];
2097         READ(dss, const char *);
2098
2099         ret = 0;
2100 label_return:
2101         malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2102         return ret;
2103 }
2104
2105 static int
2106 arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
2107     void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
2108         int ret;
2109         unsigned arena_ind;
2110         arena_t *arena;
2111
2112         MIB_UNSIGNED(arena_ind, 1);
2113         arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2114         if (arena == NULL) {
2115                 ret = EFAULT;
2116                 goto label_return;
2117         }
2118
2119         if (oldp != NULL && oldlenp != NULL) {
2120                 size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) :
2121                     arena_muzzy_decay_ms_get(arena);
2122                 READ(oldval, ssize_t);
2123         }
2124         if (newp != NULL) {
2125                 if (newlen != sizeof(ssize_t)) {
2126                         ret = EINVAL;
2127                         goto label_return;
2128                 }
2129                 if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
2130                     *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
2131                     arena, *(ssize_t *)newp)) {
2132                         ret = EFAULT;
2133                         goto label_return;
2134                 }
2135         }
2136
2137         ret = 0;
2138 label_return:
2139         return ret;
2140 }
2141
2142 static int
2143 arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2144     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2145         return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2146             newlen, true);
2147 }
2148
2149 static int
2150 arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2151     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2152         return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2153             newlen, false);
2154 }
2155
2156 static int
2157 arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2158     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2159         int ret;
2160         unsigned arena_ind;
2161         arena_t *arena;
2162
2163         malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2164         MIB_UNSIGNED(arena_ind, 1);
2165         if (arena_ind < narenas_total_get() && (arena =
2166             arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
2167                 if (newp != NULL) {
2168                         extent_hooks_t *old_extent_hooks;
2169                         extent_hooks_t *new_extent_hooks
2170                             JEMALLOC_CC_SILENCE_INIT(NULL);
2171                         WRITE(new_extent_hooks, extent_hooks_t *);
2172                         old_extent_hooks = extent_hooks_set(tsd, arena,
2173                             new_extent_hooks);
2174                         READ(old_extent_hooks, extent_hooks_t *);
2175                 } else {
2176                         extent_hooks_t *old_extent_hooks =
2177                             extent_hooks_get(arena);
2178                         READ(old_extent_hooks, extent_hooks_t *);
2179                 }
2180         } else {
2181                 ret = EFAULT;
2182                 goto label_return;
2183         }
2184         ret = 0;
2185 label_return:
2186         malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2187         return ret;
2188 }
2189
2190 static const ctl_named_node_t *
2191 arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
2192         const ctl_named_node_t *ret;
2193
2194         malloc_mutex_lock(tsdn, &ctl_mtx);
2195         switch (i) {
2196         case MALLCTL_ARENAS_ALL:
2197         case MALLCTL_ARENAS_DESTROYED:
2198                 break;
2199         default:
2200                 if (i > ctl_arenas->narenas) {
2201                         ret = NULL;
2202                         goto label_return;
2203                 }
2204                 break;
2205         }
2206
2207         ret = super_arena_i_node;
2208 label_return:
2209         malloc_mutex_unlock(tsdn, &ctl_mtx);
2210         return ret;
2211 }
2212
2213 /******************************************************************************/
2214
2215 static int
2216 arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2217     size_t *oldlenp, void *newp, size_t newlen) {
2218         int ret;
2219         unsigned narenas;
2220
2221         malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2222         READONLY();
2223         if (*oldlenp != sizeof(unsigned)) {
2224                 ret = EINVAL;
2225                 goto label_return;
2226         }
2227         narenas = ctl_arenas->narenas;
2228         READ(narenas, unsigned);
2229
2230         ret = 0;
2231 label_return:
2232         malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2233         return ret;
2234 }
2235
2236 static int
2237 arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
2238     void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
2239         int ret;
2240
2241         if (oldp != NULL && oldlenp != NULL) {
2242                 size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() :
2243                     arena_muzzy_decay_ms_default_get());
2244                 READ(oldval, ssize_t);
2245         }
2246         if (newp != NULL) {
2247                 if (newlen != sizeof(ssize_t)) {
2248                         ret = EINVAL;
2249                         goto label_return;
2250                 }
2251                 if (dirty ?  arena_dirty_decay_ms_default_set(*(ssize_t *)newp)
2252                     : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) {
2253                         ret = EFAULT;
2254                         goto label_return;
2255                 }
2256         }
2257
2258         ret = 0;
2259 label_return:
2260         return ret;
2261 }
2262
2263 static int
2264 arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2265     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2266         return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2267             newlen, true);
2268 }
2269
2270 static int
2271 arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2272     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2273         return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2274             newlen, false);
2275 }
2276
2277 CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
2278 CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
2279 CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
2280 CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
2281 CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
2282 CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
2283 CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
2284 CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t)
2285 static const ctl_named_node_t *
2286 arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
2287         if (i > NBINS) {
2288                 return NULL;
2289         }
2290         return super_arenas_bin_i_node;
2291 }
2292
2293 CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned)
2294 CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]),
2295     size_t)
2296 static const ctl_named_node_t *
2297 arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
2298     size_t i) {
2299         if (i > NSIZES - NBINS) {
2300                 return NULL;
2301         }
2302         return super_arenas_lextent_i_node;
2303 }
2304
2305 static int
2306 arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2307     size_t *oldlenp, void *newp, size_t newlen) {
2308         int ret;
2309         extent_hooks_t *extent_hooks;
2310         unsigned arena_ind;
2311
2312         malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2313
2314         extent_hooks = (extent_hooks_t *)&extent_hooks_default;
2315         WRITE(extent_hooks, extent_hooks_t *);
2316         if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) {
2317                 ret = EAGAIN;
2318                 goto label_return;
2319         }
2320         READ(arena_ind, unsigned);
2321
2322         ret = 0;
2323 label_return:
2324         malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2325         return ret;
2326 }
2327
2328 /******************************************************************************/
2329
2330 static int
2331 prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2332     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2333         int ret;
2334         bool oldval;
2335
2336         if (!config_prof) {
2337                 return ENOENT;
2338         }
2339
2340         if (newp != NULL) {
2341                 if (newlen != sizeof(bool)) {
2342                         ret = EINVAL;
2343                         goto label_return;
2344                 }
2345                 oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
2346                     *(bool *)newp);
2347         } else {
2348                 oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
2349         }
2350         READ(oldval, bool);
2351
2352         ret = 0;
2353 label_return:
2354         return ret;
2355 }
2356
2357 static int
2358 prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2359     size_t *oldlenp, void *newp, size_t newlen) {
2360         int ret;
2361         bool oldval;
2362
2363         if (!config_prof) {
2364                 return ENOENT;
2365         }
2366
2367         if (newp != NULL) {
2368                 if (newlen != sizeof(bool)) {
2369                         ret = EINVAL;
2370                         goto label_return;
2371                 }
2372                 oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
2373         } else {
2374                 oldval = prof_active_get(tsd_tsdn(tsd));
2375         }
2376         READ(oldval, bool);
2377
2378         ret = 0;
2379 label_return:
2380         return ret;
2381 }
2382
2383 static int
2384 prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2385     size_t *oldlenp, void *newp, size_t newlen) {
2386         int ret;
2387         const char *filename = NULL;
2388
2389         if (!config_prof) {
2390                 return ENOENT;
2391         }
2392
2393         WRITEONLY();
2394         WRITE(filename, const char *);
2395
2396         if (prof_mdump(tsd, filename)) {
2397                 ret = EFAULT;
2398                 goto label_return;
2399         }
2400
2401         ret = 0;
2402 label_return:
2403         return ret;
2404 }
2405
2406 static int
2407 prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2408     size_t *oldlenp, void *newp, size_t newlen) {
2409         int ret;
2410         bool oldval;
2411
2412         if (!config_prof) {
2413                 return ENOENT;
2414         }
2415
2416         if (newp != NULL) {
2417                 if (newlen != sizeof(bool)) {
2418                         ret = EINVAL;
2419                         goto label_return;
2420                 }
2421                 oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
2422         } else {
2423                 oldval = prof_gdump_get(tsd_tsdn(tsd));
2424         }
2425         READ(oldval, bool);
2426
2427         ret = 0;
2428 label_return:
2429         return ret;
2430 }
2431
2432 static int
2433 prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2434     size_t *oldlenp, void *newp, size_t newlen) {
2435         int ret;
2436         size_t lg_sample = lg_prof_sample;
2437
2438         if (!config_prof) {
2439                 return ENOENT;
2440         }
2441
2442         WRITEONLY();
2443         WRITE(lg_sample, size_t);
2444         if (lg_sample >= (sizeof(uint64_t) << 3)) {
2445                 lg_sample = (sizeof(uint64_t) << 3) - 1;
2446         }
2447
2448         prof_reset(tsd, lg_sample);
2449
2450         ret = 0;
2451 label_return:
2452         return ret;
2453 }
2454
2455 CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
2456 CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
2457
2458 /******************************************************************************/
2459
2460 CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
2461 CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
2462 CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
2463 CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
2464 CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
2465 CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
2466
2467 CTL_RO_CGEN(config_stats, stats_background_thread_num_threads,
2468     ctl_stats->background_thread.num_threads, size_t)
2469 CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
2470     ctl_stats->background_thread.num_runs, uint64_t)
2471 CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
2472     nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
2473
2474 CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
2475 CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
2476     ssize_t)
2477 CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms,
2478     ssize_t)
2479 CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
2480 CTL_RO_GEN(stats_arenas_i_uptime,
2481     nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t)
2482 CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
2483 CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
2484 CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
2485 CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
2486     atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED),
2487     size_t)
2488 CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
2489     atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
2490     size_t)
2491
2492 CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
2493     arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_dirty.npurge),
2494     uint64_t)
2495 CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
2496     arena_stats_read_u64(
2497     &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
2498 CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
2499     arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_dirty.purged),
2500     uint64_t)
2501
2502 CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
2503     arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_muzzy.npurge),
2504     uint64_t)
2505 CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
2506     arena_stats_read_u64(
2507     &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
2508 CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
2509     arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_muzzy.purged),
2510     uint64_t)
2511
2512 CTL_RO_CGEN(config_stats, stats_arenas_i_base,
2513     atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
2514     size_t)
2515 CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
2516     atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
2517     size_t)
2518 CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
2519     atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
2520     ATOMIC_RELAXED), size_t)
2521 CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
2522     atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
2523     size_t)
2524
2525 CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
2526     arenas_i(mib[2])->astats->allocated_small, size_t)
2527 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
2528     arenas_i(mib[2])->astats->nmalloc_small, uint64_t)
2529 CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
2530     arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
2531 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
2532     arenas_i(mib[2])->astats->nrequests_small, uint64_t)
2533 CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
2534     atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
2535     ATOMIC_RELAXED), size_t)
2536 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
2537     arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmalloc_large),
2538     uint64_t)
2539 CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
2540     arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.ndalloc_large),
2541     uint64_t)
2542 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
2543     arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmalloc_large),
2544     uint64_t) /* Intentional. */
2545
2546 /* Lock profiling related APIs below. */
2547 #define RO_MUTEX_CTL_GEN(n, l)                                          \
2548 CTL_RO_CGEN(config_stats, stats_##n##_num_ops,                          \
2549     l.n_lock_ops, uint64_t)                                             \
2550 CTL_RO_CGEN(config_stats, stats_##n##_num_wait,                         \
2551     l.n_wait_times, uint64_t)                                           \
2552 CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq,                     \
2553     l.n_spin_acquired, uint64_t)                                        \
2554 CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch,                 \
2555     l.n_owner_switches, uint64_t)                                       \
2556 CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time,                  \
2557     nstime_ns(&l.tot_wait_time), uint64_t)                              \
2558 CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time,                    \
2559     nstime_ns(&l.max_wait_time), uint64_t)                              \
2560 CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds,                     \
2561     l.max_n_thds, uint32_t)
2562
2563 /* Global mutexes. */
2564 #define OP(mtx)                                                         \
2565     RO_MUTEX_CTL_GEN(mutexes_##mtx,                                     \
2566         ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
2567 MUTEX_PROF_GLOBAL_MUTEXES
2568 #undef OP
2569
2570 /* Per arena mutexes */
2571 #define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx,                \
2572     arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
2573 MUTEX_PROF_ARENA_MUTEXES
2574 #undef OP
2575
2576 /* tcache bin mutex */
2577 RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex,
2578     arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data)
2579 #undef RO_MUTEX_CTL_GEN
2580
2581 /* Resets all mutex stats, including global, arena and bin mutexes. */
2582 static int
2583 stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2584     void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2585         if (!config_stats) {
2586                 return ENOENT;
2587         }
2588
2589         tsdn_t *tsdn = tsd_tsdn(tsd);
2590
2591 #define MUTEX_PROF_RESET(mtx)                                           \
2592     malloc_mutex_lock(tsdn, &mtx);                                      \
2593     malloc_mutex_prof_data_reset(tsdn, &mtx);                           \
2594     malloc_mutex_unlock(tsdn, &mtx);
2595
2596         /* Global mutexes: ctl and prof. */
2597         MUTEX_PROF_RESET(ctl_mtx);
2598         if (have_background_thread) {
2599                 MUTEX_PROF_RESET(background_thread_lock);
2600         }
2601         if (config_prof && opt_prof) {
2602                 MUTEX_PROF_RESET(bt2gctx_mtx);
2603         }
2604
2605
2606         /* Per arena mutexes. */
2607         unsigned n = narenas_total_get();
2608
2609         for (unsigned i = 0; i < n; i++) {
2610                 arena_t *arena = arena_get(tsdn, i, false);
2611                 if (!arena) {
2612                         continue;
2613                 }
2614                 MUTEX_PROF_RESET(arena->large_mtx);
2615                 MUTEX_PROF_RESET(arena->extent_avail_mtx);
2616                 MUTEX_PROF_RESET(arena->extents_dirty.mtx);
2617                 MUTEX_PROF_RESET(arena->extents_muzzy.mtx);
2618                 MUTEX_PROF_RESET(arena->extents_retained.mtx);
2619                 MUTEX_PROF_RESET(arena->decay_dirty.mtx);
2620                 MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
2621                 MUTEX_PROF_RESET(arena->tcache_ql_mtx);
2622                 MUTEX_PROF_RESET(arena->base->mtx);
2623
2624                 for (szind_t i = 0; i < NBINS; i++) {
2625                         arena_bin_t *bin = &arena->bins[i];
2626                         MUTEX_PROF_RESET(bin->lock);
2627                 }
2628         }
2629 #undef MUTEX_PROF_RESET
2630         return 0;
2631 }
2632
2633 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
2634     arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
2635 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
2636     arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
2637 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
2638     arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
2639 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
2640     arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
2641 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
2642     arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
2643 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
2644     arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
2645 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
2646     arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
2647 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
2648     arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
2649 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
2650     arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
2651
2652 static const ctl_named_node_t *
2653 stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
2654     size_t j) {
2655         if (j > NBINS) {
2656                 return NULL;
2657         }
2658         return super_stats_arenas_i_bins_j_node;
2659 }
2660
2661 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
2662     arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc),
2663     uint64_t)
2664 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
2665     arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc),
2666     uint64_t)
2667 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
2668     arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].nrequests),
2669     uint64_t)
2670 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
2671     arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
2672
2673 static const ctl_named_node_t *
2674 stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
2675     size_t j) {
2676         if (j > NSIZES - NBINS) {
2677                 return NULL;
2678         }
2679         return super_stats_arenas_i_lextents_j_node;
2680 }
2681
2682 static const ctl_named_node_t *
2683 stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
2684         const ctl_named_node_t *ret;
2685         size_t a;
2686
2687         malloc_mutex_lock(tsdn, &ctl_mtx);
2688         a = arenas_i2a_impl(i, true, true);
2689         if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
2690                 ret = NULL;
2691                 goto label_return;
2692         }
2693
2694         ret = super_stats_arenas_i_node;
2695 label_return:
2696         malloc_mutex_unlock(tsdn, &ctl_mtx);
2697         return ret;
2698 }