1 #define JEMALLOC_BACKGROUND_THREAD_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
5 #include "jemalloc/internal/assert.h"
7 /******************************************************************************/
10 /* This option should be opt-in only. */
11 #define BACKGROUND_THREAD_DEFAULT false
12 /* Read-only after initialization. */
13 bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
15 /* Used for thread creation, termination and stats. */
16 malloc_mutex_t background_thread_lock;
17 /* Indicates global state. Atomic because decay reads this w/o locking. */
18 atomic_b_t background_thread_enabled_state;
19 size_t n_background_threads;
20 /* Thread info per-index. */
21 background_thread_info_t *background_thread_info;
23 /* False if no necessary runtime support. */
24 bool can_enable_background_thread;
26 /******************************************************************************/
28 #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
31 static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
32 void *(*)(void *), void *__restrict);
33 static pthread_once_t once_control = PTHREAD_ONCE_INIT;
36 pthread_create_wrapper_once(void) {
37 #ifdef JEMALLOC_LAZY_LOCK
43 pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
44 void *(*start_routine)(void *), void *__restrict arg) {
45 pthread_once(&once_control, pthread_create_wrapper_once);
47 return pthread_create_fptr(thread, attr, start_routine, arg);
49 #endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */
51 #ifndef JEMALLOC_BACKGROUND_THREAD
52 #define NOT_REACHED { not_reached(); }
53 bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
54 bool background_threads_enable(tsd_t *tsd) NOT_REACHED
55 bool background_threads_disable(tsd_t *tsd) NOT_REACHED
56 void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
57 arena_decay_t *decay, size_t npages_new) NOT_REACHED
58 void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
59 void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
60 void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
61 void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED
62 bool background_thread_stats_read(tsdn_t *tsdn,
63 background_thread_stats_t *stats) NOT_REACHED
64 void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED
68 static bool background_thread_enabled_at_fork;
71 background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
72 background_thread_wakeup_time_set(tsdn, info, 0);
73 info->npages_to_purge_new = 0;
76 nstime_init(&info->tot_sleep_time, 0);
81 set_current_thread_affinity(UNUSED int cpu) {
82 #if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
85 CPU_SET(cpu, &cpuset);
86 int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
94 /* Threshold for determining when to wake up the background thread. */
95 #define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
96 #define BILLION UINT64_C(1000000000)
97 /* Minimal sleep interval 100 ms. */
98 #define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
101 decay_npurge_after_interval(arena_decay_t *decay, size_t interval) {
104 for (i = 0; i < interval; i++) {
105 sum += decay->backlog[i] * h_steps[i];
107 for (; i < SMOOTHSTEP_NSTEPS; i++) {
108 sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]);
111 return (size_t)(sum >> SMOOTHSTEP_BFP);
115 arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
116 extents_t *extents) {
117 if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
118 /* Use minimal interval if decay is contended. */
119 return BACKGROUND_THREAD_MIN_INTERVAL_NS;
123 ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
124 if (decay_time <= 0) {
125 /* Purging is eagerly done or disabled currently. */
126 interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
130 uint64_t decay_interval_ns = nstime_ns(&decay->interval);
131 assert(decay_interval_ns > 0);
132 size_t npages = extents_npages_get(extents);
135 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
136 if (decay->backlog[i] > 0) {
140 if (i == SMOOTHSTEP_NSTEPS) {
141 /* No dirty pages recorded. Sleep indefinitely. */
142 interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
146 if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) {
147 /* Use max interval. */
148 interval = decay_interval_ns * SMOOTHSTEP_NSTEPS;
152 size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns;
153 size_t ub = SMOOTHSTEP_NSTEPS;
154 /* Minimal 2 intervals to ensure reaching next epoch deadline. */
155 lb = (lb < 2) ? 2 : lb;
156 if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) ||
158 interval = BACKGROUND_THREAD_MIN_INTERVAL_NS;
162 assert(lb + 2 <= ub);
163 size_t npurge_lb, npurge_ub;
164 npurge_lb = decay_npurge_after_interval(decay, lb);
165 if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
166 interval = decay_interval_ns * lb;
169 npurge_ub = decay_npurge_after_interval(decay, ub);
170 if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) {
171 interval = decay_interval_ns * ub;
175 unsigned n_search = 0;
176 size_t target, npurge;
177 while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub)
179 target = (lb + ub) / 2;
180 npurge = decay_npurge_after_interval(decay, target);
181 if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
188 assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
190 interval = decay_interval_ns * (ub + lb) / 2;
192 interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ?
193 BACKGROUND_THREAD_MIN_INTERVAL_NS : interval;
194 malloc_mutex_unlock(tsdn, &decay->mtx);
199 /* Compute purge interval for background threads. */
201 arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
203 i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
204 &arena->extents_dirty);
205 if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
208 i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
209 &arena->extents_muzzy);
211 return i1 < i2 ? i1 : i2;
215 background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
220 info->npages_to_purge_new = 0;
223 /* Specific clock required by timedwait. */
224 gettimeofday(&tv, NULL);
225 nstime_t before_sleep;
226 nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000);
229 if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
230 assert(background_thread_indefinite_sleep(info));
231 ret = pthread_cond_wait(&info->cond, &info->mtx.lock);
234 assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS &&
235 interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
236 /* We need malloc clock (can be different from tv). */
237 nstime_t next_wakeup;
238 nstime_init(&next_wakeup, 0);
239 nstime_update(&next_wakeup);
240 nstime_iadd(&next_wakeup, interval);
241 assert(nstime_ns(&next_wakeup) <
242 BACKGROUND_THREAD_INDEFINITE_SLEEP);
243 background_thread_wakeup_time_set(tsdn, info,
244 nstime_ns(&next_wakeup));
247 nstime_copy(&ts_wakeup, &before_sleep);
248 nstime_iadd(&ts_wakeup, interval);
250 ts.tv_sec = (size_t)nstime_sec(&ts_wakeup);
251 ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup);
253 assert(!background_thread_indefinite_sleep(info));
254 ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts);
255 assert(ret == ETIMEDOUT || ret == 0);
256 background_thread_wakeup_time_set(tsdn, info,
257 BACKGROUND_THREAD_INDEFINITE_SLEEP);
260 gettimeofday(&tv, NULL);
261 nstime_t after_sleep;
262 nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000);
263 if (nstime_compare(&after_sleep, &before_sleep) > 0) {
264 nstime_subtract(&after_sleep, &before_sleep);
265 nstime_add(&info->tot_sleep_time, &after_sleep);
271 background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
272 if (unlikely(info->state == background_thread_paused)) {
273 malloc_mutex_unlock(tsdn, &info->mtx);
274 /* Wait on global lock to update status. */
275 malloc_mutex_lock(tsdn, &background_thread_lock);
276 malloc_mutex_unlock(tsdn, &background_thread_lock);
277 malloc_mutex_lock(tsdn, &info->mtx);
285 background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) {
286 uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
287 unsigned narenas = narenas_total_get();
289 for (unsigned i = ind; i < narenas; i += ncpus) {
290 arena_t *arena = arena_get(tsdn, i, false);
294 arena_decay(tsdn, arena, true, false);
295 if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
296 /* Min interval will be used. */
299 uint64_t interval = arena_decay_compute_purge_interval(tsdn,
301 assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS);
302 if (min_interval > interval) {
303 min_interval = interval;
306 background_thread_sleep(tsdn, info, min_interval);
310 background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) {
311 if (info == &background_thread_info[0]) {
312 malloc_mutex_assert_owner(tsd_tsdn(tsd),
313 &background_thread_lock);
315 malloc_mutex_assert_not_owner(tsd_tsdn(tsd),
316 &background_thread_lock);
319 pre_reentrancy(tsd, NULL);
320 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
322 assert(info->state != background_thread_paused);
323 if (info->state == background_thread_started) {
325 info->state = background_thread_stopped;
326 pthread_cond_signal(&info->cond);
330 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
333 post_reentrancy(tsd);
337 if (pthread_join(info->thread, &ret)) {
338 post_reentrancy(tsd);
342 n_background_threads--;
343 post_reentrancy(tsd);
348 static void *background_thread_entry(void *ind_arg);
351 background_thread_create_signals_masked(pthread_t *thread,
352 const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) {
354 * Mask signals during thread creation so that the thread inherits
355 * an empty signal set.
360 int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset);
364 int create_err = pthread_create_wrapper(thread, attr, start_routine,
367 * Restore the signal mask. Failure to restore the signal mask here
368 * changes program behavior.
370 int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
371 if (restore_err != 0) {
372 malloc_printf("<jemalloc>: background thread creation "
373 "failed (%d), and signal mask restoration failed "
374 "(%d)\n", create_err, restore_err);
383 check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
384 bool *created_threads) {
385 if (likely(*n_created == n_background_threads)) {
389 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_info[0].mtx);
391 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
392 for (unsigned i = 1; i < ncpus; i++) {
393 if (created_threads[i]) {
396 background_thread_info_t *info = &background_thread_info[i];
397 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
398 assert(info->state != background_thread_paused);
399 bool create = (info->state == background_thread_started);
400 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
406 * To avoid deadlock with prefork handlers (which waits for the
407 * mutex held here), unlock before calling pthread_create().
409 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
411 pre_reentrancy(tsd, NULL);
412 int err = background_thread_create_signals_masked(&info->thread,
413 NULL, background_thread_entry, (void *)(uintptr_t)i);
414 post_reentrancy(tsd);
418 created_threads[i] = true;
420 malloc_printf("<jemalloc>: background thread "
421 "creation failed (%d)\n", err);
426 /* Restart since we unlocked. */
429 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_info[0].mtx);
430 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
434 background_thread0_work(tsd_t *tsd) {
435 /* Thread0 is also responsible for launching / terminating threads. */
436 VARIABLE_ARRAY(bool, created_threads, ncpus);
438 for (i = 1; i < ncpus; i++) {
439 created_threads[i] = false;
441 /* Start working, and create more threads when asked. */
442 unsigned n_created = 1;
443 while (background_thread_info[0].state != background_thread_stopped) {
444 if (background_thread_pause_check(tsd_tsdn(tsd),
445 &background_thread_info[0])) {
448 check_background_thread_creation(tsd, &n_created,
449 (bool *)&created_threads);
450 background_work_sleep_once(tsd_tsdn(tsd),
451 &background_thread_info[0], 0);
455 * Shut down other threads at exit. Note that the ctl thread is holding
456 * the global background_thread mutex (and is waiting) for us.
458 assert(!background_thread_enabled());
459 for (i = 1; i < ncpus; i++) {
460 background_thread_info_t *info = &background_thread_info[i];
461 assert(info->state != background_thread_paused);
462 if (created_threads[i]) {
463 background_threads_disable_single(tsd, info);
465 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
466 /* Clear in case the thread wasn't created. */
467 info->state = background_thread_stopped;
468 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
471 background_thread_info[0].state = background_thread_stopped;
472 assert(n_background_threads == 1);
476 background_work(tsd_t *tsd, unsigned ind) {
477 background_thread_info_t *info = &background_thread_info[ind];
479 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
480 background_thread_wakeup_time_set(tsd_tsdn(tsd), info,
481 BACKGROUND_THREAD_INDEFINITE_SLEEP);
483 background_thread0_work(tsd);
485 while (info->state != background_thread_stopped) {
486 if (background_thread_pause_check(tsd_tsdn(tsd),
490 background_work_sleep_once(tsd_tsdn(tsd), info, ind);
493 assert(info->state == background_thread_stopped);
494 background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0);
495 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
499 background_thread_entry(void *ind_arg) {
500 unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
501 assert(thread_ind < ncpus);
502 #ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
503 pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
505 if (opt_percpu_arena != percpu_arena_disabled) {
506 set_current_thread_affinity((int)thread_ind);
509 * Start periodic background work. We use internal tsd which avoids
510 * side effects, for example triggering new arena creation (which in
511 * turn triggers another background thread creation).
513 background_work(tsd_internal_fetch(), thread_ind);
514 assert(pthread_equal(pthread_self(),
515 background_thread_info[thread_ind].thread));
521 background_thread_init(tsd_t *tsd, background_thread_info_t *info) {
522 malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
523 info->state = background_thread_started;
524 background_thread_info_init(tsd_tsdn(tsd), info);
525 n_background_threads++;
528 /* Create a new background thread if needed. */
530 background_thread_create(tsd_t *tsd, unsigned arena_ind) {
531 assert(have_background_thread);
532 malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
534 /* We create at most NCPUs threads. */
535 size_t thread_ind = arena_ind % ncpus;
536 background_thread_info_t *info = &background_thread_info[thread_ind];
538 bool need_new_thread;
539 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
540 need_new_thread = background_thread_enabled() &&
541 (info->state == background_thread_stopped);
542 if (need_new_thread) {
543 background_thread_init(tsd, info);
545 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
546 if (!need_new_thread) {
549 if (arena_ind != 0) {
550 /* Threads are created asynchronously by Thread 0. */
551 background_thread_info_t *t0 = &background_thread_info[0];
552 malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx);
553 assert(t0->state == background_thread_started);
554 pthread_cond_signal(&t0->cond);
555 malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx);
560 pre_reentrancy(tsd, NULL);
562 * To avoid complications (besides reentrancy), create internal
563 * background threads with the underlying pthread_create.
565 int err = background_thread_create_signals_masked(&info->thread, NULL,
566 background_thread_entry, (void *)thread_ind);
567 post_reentrancy(tsd);
570 malloc_printf("<jemalloc>: arena 0 background thread creation "
571 "failed (%d)\n", err);
572 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
573 info->state = background_thread_stopped;
574 n_background_threads--;
575 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
584 background_threads_enable(tsd_t *tsd) {
585 assert(n_background_threads == 0);
586 assert(background_thread_enabled());
587 malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
589 VARIABLE_ARRAY(bool, marked, ncpus);
591 for (i = 0; i < ncpus; i++) {
595 /* Mark the threads we need to create for thread 0. */
596 unsigned n = narenas_total_get();
597 for (i = 1; i < n; i++) {
598 if (marked[i % ncpus] ||
599 arena_get(tsd_tsdn(tsd), i, false) == NULL) {
602 background_thread_info_t *info = &background_thread_info[i];
603 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
604 assert(info->state == background_thread_stopped);
605 background_thread_init(tsd, info);
606 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
607 marked[i % ncpus] = true;
608 if (++nmarked == ncpus) {
613 return background_thread_create(tsd, 0);
617 background_threads_disable(tsd_t *tsd) {
618 assert(!background_thread_enabled());
619 malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
621 /* Thread 0 will be responsible for terminating other threads. */
622 if (background_threads_disable_single(tsd,
623 &background_thread_info[0])) {
626 assert(n_background_threads == 0);
631 /* Check if we need to signal the background thread early. */
633 background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
634 arena_decay_t *decay, size_t npages_new) {
635 background_thread_info_t *info = arena_background_thread_info_get(
637 if (malloc_mutex_trylock(tsdn, &info->mtx)) {
639 * Background thread may hold the mutex for a long period of
640 * time. We'd like to avoid the variance on application
641 * threads. So keep this non-blocking, and leave the work to a
647 if (info->state != background_thread_started) {
650 if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
654 ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
655 if (decay_time <= 0) {
656 /* Purging is eagerly done or disabled currently. */
657 goto label_done_unlock2;
659 uint64_t decay_interval_ns = nstime_ns(&decay->interval);
660 assert(decay_interval_ns > 0);
663 nstime_init(&diff, background_thread_wakeup_time_get(info));
664 if (nstime_compare(&diff, &decay->epoch) <= 0) {
665 goto label_done_unlock2;
667 nstime_subtract(&diff, &decay->epoch);
668 if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) {
669 goto label_done_unlock2;
672 if (npages_new > 0) {
673 size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns);
675 * Compute how many new pages we would need to purge by the next
676 * wakeup, which is used to determine if we should signal the
680 if (n_epoch >= SMOOTHSTEP_NSTEPS) {
681 npurge_new = npages_new;
683 uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
684 assert(h_steps_max >=
685 h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
686 npurge_new = npages_new * (h_steps_max -
687 h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
688 npurge_new >>= SMOOTHSTEP_BFP;
690 info->npages_to_purge_new += npurge_new;
694 if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
695 should_signal = true;
696 } else if (unlikely(background_thread_indefinite_sleep(info)) &&
697 (extents_npages_get(&arena->extents_dirty) > 0 ||
698 extents_npages_get(&arena->extents_muzzy) > 0 ||
699 info->npages_to_purge_new > 0)) {
700 should_signal = true;
702 should_signal = false;
706 info->npages_to_purge_new = 0;
707 pthread_cond_signal(&info->cond);
710 malloc_mutex_unlock(tsdn, &decay->mtx);
712 malloc_mutex_unlock(tsdn, &info->mtx);
716 background_thread_prefork0(tsdn_t *tsdn) {
717 malloc_mutex_prefork(tsdn, &background_thread_lock);
718 background_thread_enabled_at_fork = background_thread_enabled();
722 background_thread_prefork1(tsdn_t *tsdn) {
723 for (unsigned i = 0; i < ncpus; i++) {
724 malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx);
729 background_thread_postfork_parent(tsdn_t *tsdn) {
730 for (unsigned i = 0; i < ncpus; i++) {
731 malloc_mutex_postfork_parent(tsdn,
732 &background_thread_info[i].mtx);
734 malloc_mutex_postfork_parent(tsdn, &background_thread_lock);
738 background_thread_postfork_child(tsdn_t *tsdn) {
739 for (unsigned i = 0; i < ncpus; i++) {
740 malloc_mutex_postfork_child(tsdn,
741 &background_thread_info[i].mtx);
743 malloc_mutex_postfork_child(tsdn, &background_thread_lock);
744 if (!background_thread_enabled_at_fork) {
748 /* Clear background_thread state (reset to disabled for child). */
749 malloc_mutex_lock(tsdn, &background_thread_lock);
750 n_background_threads = 0;
751 background_thread_enabled_set(tsdn, false);
752 for (unsigned i = 0; i < ncpus; i++) {
753 background_thread_info_t *info = &background_thread_info[i];
754 malloc_mutex_lock(tsdn, &info->mtx);
755 info->state = background_thread_stopped;
756 int ret = pthread_cond_init(&info->cond, NULL);
758 background_thread_info_init(tsdn, info);
759 malloc_mutex_unlock(tsdn, &info->mtx);
761 malloc_mutex_unlock(tsdn, &background_thread_lock);
765 background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
766 assert(config_stats);
767 malloc_mutex_lock(tsdn, &background_thread_lock);
768 if (!background_thread_enabled()) {
769 malloc_mutex_unlock(tsdn, &background_thread_lock);
773 stats->num_threads = n_background_threads;
774 uint64_t num_runs = 0;
775 nstime_init(&stats->run_interval, 0);
776 for (unsigned i = 0; i < ncpus; i++) {
777 background_thread_info_t *info = &background_thread_info[i];
778 malloc_mutex_lock(tsdn, &info->mtx);
779 if (info->state != background_thread_stopped) {
780 num_runs += info->tot_n_runs;
781 nstime_add(&stats->run_interval, &info->tot_sleep_time);
783 malloc_mutex_unlock(tsdn, &info->mtx);
785 stats->num_runs = num_runs;
787 nstime_idivide(&stats->run_interval, num_runs);
789 malloc_mutex_unlock(tsdn, &background_thread_lock);
794 #undef BACKGROUND_THREAD_NPAGES_THRESHOLD
796 #undef BACKGROUND_THREAD_MIN_INTERVAL_NS
799 * When lazy lock is enabled, we need to make sure setting isthreaded before
800 * taking any background_thread locks. This is called early in ctl (instead of
801 * wait for the pthread_create calls to trigger) because the mutex is required
802 * before creating background threads.
805 background_thread_ctl_init(tsdn_t *tsdn) {
806 malloc_mutex_assert_not_owner(tsdn, &background_thread_lock);
807 #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
808 pthread_once(&once_control, pthread_create_wrapper_once);
812 #endif /* defined(JEMALLOC_BACKGROUND_THREAD) */
815 background_thread_boot0(void) {
816 if (!have_background_thread && opt_background_thread) {
817 malloc_printf("<jemalloc>: option background_thread currently "
818 "supports pthread only\n");
822 #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
823 pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
824 if (pthread_create_fptr == NULL) {
825 can_enable_background_thread = false;
826 if (config_lazy_lock || opt_background_thread) {
827 malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
828 "\"pthread_create\")\n");
832 can_enable_background_thread = true;
839 background_thread_boot1(tsdn_t *tsdn) {
840 #ifdef JEMALLOC_BACKGROUND_THREAD
841 assert(have_background_thread);
842 assert(narenas_total_get() > 0);
844 background_thread_enabled_set(tsdn, opt_background_thread);
845 if (malloc_mutex_init(&background_thread_lock,
846 "background_thread_global",
847 WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
848 malloc_mutex_rank_exclusive)) {
851 if (opt_background_thread) {
852 background_thread_ctl_init(tsdn);
855 background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
856 b0get(), ncpus * sizeof(background_thread_info_t), CACHELINE);
857 if (background_thread_info == NULL) {
861 for (unsigned i = 0; i < ncpus; i++) {
862 background_thread_info_t *info = &background_thread_info[i];
863 /* Thread mutex is rank_inclusive because of thread0. */
864 if (malloc_mutex_init(&info->mtx, "background_thread",
865 WITNESS_RANK_BACKGROUND_THREAD,
866 malloc_mutex_address_ordered)) {
869 if (pthread_cond_init(&info->cond, NULL)) {
872 malloc_mutex_lock(tsdn, &info->mtx);
873 info->state = background_thread_stopped;
874 background_thread_info_init(tsdn, info);
875 malloc_mutex_unlock(tsdn, &info->mtx);