1 #define JEMALLOC_BACKGROUND_THREAD_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
5 #include "jemalloc/internal/assert.h"
7 /******************************************************************************/
10 /* This option should be opt-in only. */
11 #define BACKGROUND_THREAD_DEFAULT false
12 /* Read-only after initialization. */
13 bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
14 size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT;
16 /* Used for thread creation, termination and stats. */
17 malloc_mutex_t background_thread_lock;
18 /* Indicates global state. Atomic because decay reads this w/o locking. */
19 atomic_b_t background_thread_enabled_state;
20 size_t n_background_threads;
21 size_t max_background_threads;
22 /* Thread info per-index. */
23 background_thread_info_t *background_thread_info;
25 /* False if no necessary runtime support. */
26 bool can_enable_background_thread;
28 /******************************************************************************/
30 #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
33 static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
34 void *(*)(void *), void *__restrict);
37 pthread_create_wrapper_init(void) {
38 #ifdef JEMALLOC_LAZY_LOCK
46 pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
47 void *(*start_routine)(void *), void *__restrict arg) {
48 pthread_create_wrapper_init();
50 return pthread_create_fptr(thread, attr, start_routine, arg);
52 #endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */
54 #ifndef JEMALLOC_BACKGROUND_THREAD
55 #define NOT_REACHED { not_reached(); }
56 bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
57 bool background_threads_enable(tsd_t *tsd) NOT_REACHED
58 bool background_threads_disable(tsd_t *tsd) NOT_REACHED
59 void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
60 arena_decay_t *decay, size_t npages_new) NOT_REACHED
61 void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
62 void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
63 void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
64 void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED
65 bool background_thread_stats_read(tsdn_t *tsdn,
66 background_thread_stats_t *stats) NOT_REACHED
67 void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED
71 static bool background_thread_enabled_at_fork;
74 background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
75 background_thread_wakeup_time_set(tsdn, info, 0);
76 info->npages_to_purge_new = 0;
79 nstime_init(&info->tot_sleep_time, 0);
84 set_current_thread_affinity(UNUSED int cpu) {
85 #if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
88 CPU_SET(cpu, &cpuset);
89 int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
97 /* Threshold for determining when to wake up the background thread. */
98 #define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
99 #define BILLION UINT64_C(1000000000)
100 /* Minimal sleep interval 100 ms. */
101 #define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
104 decay_npurge_after_interval(arena_decay_t *decay, size_t interval) {
107 for (i = 0; i < interval; i++) {
108 sum += decay->backlog[i] * h_steps[i];
110 for (; i < SMOOTHSTEP_NSTEPS; i++) {
111 sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]);
114 return (size_t)(sum >> SMOOTHSTEP_BFP);
118 arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
119 extents_t *extents) {
120 if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
121 /* Use minimal interval if decay is contended. */
122 return BACKGROUND_THREAD_MIN_INTERVAL_NS;
126 ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
127 if (decay_time <= 0) {
128 /* Purging is eagerly done or disabled currently. */
129 interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
133 uint64_t decay_interval_ns = nstime_ns(&decay->interval);
134 assert(decay_interval_ns > 0);
135 size_t npages = extents_npages_get(extents);
138 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
139 if (decay->backlog[i] > 0) {
143 if (i == SMOOTHSTEP_NSTEPS) {
144 /* No dirty pages recorded. Sleep indefinitely. */
145 interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
149 if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) {
150 /* Use max interval. */
151 interval = decay_interval_ns * SMOOTHSTEP_NSTEPS;
155 size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns;
156 size_t ub = SMOOTHSTEP_NSTEPS;
157 /* Minimal 2 intervals to ensure reaching next epoch deadline. */
158 lb = (lb < 2) ? 2 : lb;
159 if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) ||
161 interval = BACKGROUND_THREAD_MIN_INTERVAL_NS;
165 assert(lb + 2 <= ub);
166 size_t npurge_lb, npurge_ub;
167 npurge_lb = decay_npurge_after_interval(decay, lb);
168 if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
169 interval = decay_interval_ns * lb;
172 npurge_ub = decay_npurge_after_interval(decay, ub);
173 if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) {
174 interval = decay_interval_ns * ub;
178 unsigned n_search = 0;
179 size_t target, npurge;
180 while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub)
182 target = (lb + ub) / 2;
183 npurge = decay_npurge_after_interval(decay, target);
184 if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
191 assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
193 interval = decay_interval_ns * (ub + lb) / 2;
195 interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ?
196 BACKGROUND_THREAD_MIN_INTERVAL_NS : interval;
197 malloc_mutex_unlock(tsdn, &decay->mtx);
202 /* Compute purge interval for background threads. */
204 arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
206 i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
207 &arena->extents_dirty);
208 if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
211 i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
212 &arena->extents_muzzy);
214 return i1 < i2 ? i1 : i2;
218 background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
223 info->npages_to_purge_new = 0;
226 /* Specific clock required by timedwait. */
227 gettimeofday(&tv, NULL);
228 nstime_t before_sleep;
229 nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000);
232 if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
233 assert(background_thread_indefinite_sleep(info));
234 ret = pthread_cond_wait(&info->cond, &info->mtx.lock);
237 assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS &&
238 interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
239 /* We need malloc clock (can be different from tv). */
240 nstime_t next_wakeup;
241 nstime_init(&next_wakeup, 0);
242 nstime_update(&next_wakeup);
243 nstime_iadd(&next_wakeup, interval);
244 assert(nstime_ns(&next_wakeup) <
245 BACKGROUND_THREAD_INDEFINITE_SLEEP);
246 background_thread_wakeup_time_set(tsdn, info,
247 nstime_ns(&next_wakeup));
250 nstime_copy(&ts_wakeup, &before_sleep);
251 nstime_iadd(&ts_wakeup, interval);
253 ts.tv_sec = (size_t)nstime_sec(&ts_wakeup);
254 ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup);
256 assert(!background_thread_indefinite_sleep(info));
257 ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts);
258 assert(ret == ETIMEDOUT || ret == 0);
259 background_thread_wakeup_time_set(tsdn, info,
260 BACKGROUND_THREAD_INDEFINITE_SLEEP);
263 gettimeofday(&tv, NULL);
264 nstime_t after_sleep;
265 nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000);
266 if (nstime_compare(&after_sleep, &before_sleep) > 0) {
267 nstime_subtract(&after_sleep, &before_sleep);
268 nstime_add(&info->tot_sleep_time, &after_sleep);
274 background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
275 if (unlikely(info->state == background_thread_paused)) {
276 malloc_mutex_unlock(tsdn, &info->mtx);
277 /* Wait on global lock to update status. */
278 malloc_mutex_lock(tsdn, &background_thread_lock);
279 malloc_mutex_unlock(tsdn, &background_thread_lock);
280 malloc_mutex_lock(tsdn, &info->mtx);
288 background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) {
289 uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
290 unsigned narenas = narenas_total_get();
292 for (unsigned i = ind; i < narenas; i += max_background_threads) {
293 arena_t *arena = arena_get(tsdn, i, false);
297 arena_decay(tsdn, arena, true, false);
298 if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
299 /* Min interval will be used. */
302 uint64_t interval = arena_decay_compute_purge_interval(tsdn,
304 assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS);
305 if (min_interval > interval) {
306 min_interval = interval;
309 background_thread_sleep(tsdn, info, min_interval);
313 background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) {
314 if (info == &background_thread_info[0]) {
315 malloc_mutex_assert_owner(tsd_tsdn(tsd),
316 &background_thread_lock);
318 malloc_mutex_assert_not_owner(tsd_tsdn(tsd),
319 &background_thread_lock);
322 pre_reentrancy(tsd, NULL);
323 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
325 assert(info->state != background_thread_paused);
326 if (info->state == background_thread_started) {
328 info->state = background_thread_stopped;
329 pthread_cond_signal(&info->cond);
333 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
336 post_reentrancy(tsd);
340 if (pthread_join(info->thread, &ret)) {
341 post_reentrancy(tsd);
345 n_background_threads--;
346 post_reentrancy(tsd);
351 static void *background_thread_entry(void *ind_arg);
354 background_thread_create_signals_masked(pthread_t *thread,
355 const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) {
357 * Mask signals during thread creation so that the thread inherits
358 * an empty signal set.
363 int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset);
367 int create_err = pthread_create_wrapper(thread, attr, start_routine,
370 * Restore the signal mask. Failure to restore the signal mask here
371 * changes program behavior.
373 int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
374 if (restore_err != 0) {
375 malloc_printf("<jemalloc>: background thread creation "
376 "failed (%d), and signal mask restoration failed "
377 "(%d)\n", create_err, restore_err);
386 check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
387 bool *created_threads) {
389 if (likely(*n_created == n_background_threads)) {
393 tsdn_t *tsdn = tsd_tsdn(tsd);
394 malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx);
395 for (unsigned i = 1; i < max_background_threads; i++) {
396 if (created_threads[i]) {
399 background_thread_info_t *info = &background_thread_info[i];
400 malloc_mutex_lock(tsdn, &info->mtx);
402 * In case of the background_thread_paused state because of
403 * arena reset, delay the creation.
405 bool create = (info->state == background_thread_started);
406 malloc_mutex_unlock(tsdn, &info->mtx);
411 pre_reentrancy(tsd, NULL);
412 int err = background_thread_create_signals_masked(&info->thread,
413 NULL, background_thread_entry, (void *)(uintptr_t)i);
414 post_reentrancy(tsd);
418 created_threads[i] = true;
420 malloc_printf("<jemalloc>: background thread "
421 "creation failed (%d)\n", err);
426 /* Return to restart the loop since we unlocked. */
430 malloc_mutex_lock(tsdn, &background_thread_info[0].mtx);
436 background_thread0_work(tsd_t *tsd) {
437 /* Thread0 is also responsible for launching / terminating threads. */
438 VARIABLE_ARRAY(bool, created_threads, max_background_threads);
440 for (i = 1; i < max_background_threads; i++) {
441 created_threads[i] = false;
443 /* Start working, and create more threads when asked. */
444 unsigned n_created = 1;
445 while (background_thread_info[0].state != background_thread_stopped) {
446 if (background_thread_pause_check(tsd_tsdn(tsd),
447 &background_thread_info[0])) {
450 if (check_background_thread_creation(tsd, &n_created,
451 (bool *)&created_threads)) {
454 background_work_sleep_once(tsd_tsdn(tsd),
455 &background_thread_info[0], 0);
459 * Shut down other threads at exit. Note that the ctl thread is holding
460 * the global background_thread mutex (and is waiting) for us.
462 assert(!background_thread_enabled());
463 for (i = 1; i < max_background_threads; i++) {
464 background_thread_info_t *info = &background_thread_info[i];
465 assert(info->state != background_thread_paused);
466 if (created_threads[i]) {
467 background_threads_disable_single(tsd, info);
469 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
470 if (info->state != background_thread_stopped) {
471 /* The thread was not created. */
472 assert(info->state ==
473 background_thread_started);
474 n_background_threads--;
475 info->state = background_thread_stopped;
477 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
480 background_thread_info[0].state = background_thread_stopped;
481 assert(n_background_threads == 1);
485 background_work(tsd_t *tsd, unsigned ind) {
486 background_thread_info_t *info = &background_thread_info[ind];
488 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
489 background_thread_wakeup_time_set(tsd_tsdn(tsd), info,
490 BACKGROUND_THREAD_INDEFINITE_SLEEP);
492 background_thread0_work(tsd);
494 while (info->state != background_thread_stopped) {
495 if (background_thread_pause_check(tsd_tsdn(tsd),
499 background_work_sleep_once(tsd_tsdn(tsd), info, ind);
502 assert(info->state == background_thread_stopped);
503 background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0);
504 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
508 background_thread_entry(void *ind_arg) {
509 unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
510 assert(thread_ind < max_background_threads);
511 #ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
512 pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
514 if (opt_percpu_arena != percpu_arena_disabled) {
515 set_current_thread_affinity((int)thread_ind);
518 * Start periodic background work. We use internal tsd which avoids
519 * side effects, for example triggering new arena creation (which in
520 * turn triggers another background thread creation).
522 background_work(tsd_internal_fetch(), thread_ind);
523 assert(pthread_equal(pthread_self(),
524 background_thread_info[thread_ind].thread));
530 background_thread_init(tsd_t *tsd, background_thread_info_t *info) {
531 malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
532 info->state = background_thread_started;
533 background_thread_info_init(tsd_tsdn(tsd), info);
534 n_background_threads++;
537 /* Create a new background thread if needed. */
539 background_thread_create(tsd_t *tsd, unsigned arena_ind) {
540 assert(have_background_thread);
541 malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
543 /* We create at most NCPUs threads. */
544 size_t thread_ind = arena_ind % max_background_threads;
545 background_thread_info_t *info = &background_thread_info[thread_ind];
547 bool need_new_thread;
548 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
549 need_new_thread = background_thread_enabled() &&
550 (info->state == background_thread_stopped);
551 if (need_new_thread) {
552 background_thread_init(tsd, info);
554 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
555 if (!need_new_thread) {
558 if (arena_ind != 0) {
559 /* Threads are created asynchronously by Thread 0. */
560 background_thread_info_t *t0 = &background_thread_info[0];
561 malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx);
562 assert(t0->state == background_thread_started);
563 pthread_cond_signal(&t0->cond);
564 malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx);
569 pre_reentrancy(tsd, NULL);
571 * To avoid complications (besides reentrancy), create internal
572 * background threads with the underlying pthread_create.
574 int err = background_thread_create_signals_masked(&info->thread, NULL,
575 background_thread_entry, (void *)thread_ind);
576 post_reentrancy(tsd);
579 malloc_printf("<jemalloc>: arena 0 background thread creation "
580 "failed (%d)\n", err);
581 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
582 info->state = background_thread_stopped;
583 n_background_threads--;
584 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
593 background_threads_enable(tsd_t *tsd) {
594 assert(n_background_threads == 0);
595 assert(background_thread_enabled());
596 malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
598 VARIABLE_ARRAY(bool, marked, max_background_threads);
600 for (i = 0; i < max_background_threads; i++) {
604 /* Thread 0 is required and created at the end. */
606 /* Mark the threads we need to create for thread 0. */
607 unsigned n = narenas_total_get();
608 for (i = 1; i < n; i++) {
609 if (marked[i % max_background_threads] ||
610 arena_get(tsd_tsdn(tsd), i, false) == NULL) {
613 background_thread_info_t *info = &background_thread_info[
614 i % max_background_threads];
615 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
616 assert(info->state == background_thread_stopped);
617 background_thread_init(tsd, info);
618 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
619 marked[i % max_background_threads] = true;
620 if (++nmarked == max_background_threads) {
625 return background_thread_create(tsd, 0);
629 background_threads_disable(tsd_t *tsd) {
630 assert(!background_thread_enabled());
631 malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
633 /* Thread 0 will be responsible for terminating other threads. */
634 if (background_threads_disable_single(tsd,
635 &background_thread_info[0])) {
638 assert(n_background_threads == 0);
643 /* Check if we need to signal the background thread early. */
645 background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
646 arena_decay_t *decay, size_t npages_new) {
647 background_thread_info_t *info = arena_background_thread_info_get(
649 if (malloc_mutex_trylock(tsdn, &info->mtx)) {
651 * Background thread may hold the mutex for a long period of
652 * time. We'd like to avoid the variance on application
653 * threads. So keep this non-blocking, and leave the work to a
659 if (info->state != background_thread_started) {
662 if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
666 ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
667 if (decay_time <= 0) {
668 /* Purging is eagerly done or disabled currently. */
669 goto label_done_unlock2;
671 uint64_t decay_interval_ns = nstime_ns(&decay->interval);
672 assert(decay_interval_ns > 0);
675 nstime_init(&diff, background_thread_wakeup_time_get(info));
676 if (nstime_compare(&diff, &decay->epoch) <= 0) {
677 goto label_done_unlock2;
679 nstime_subtract(&diff, &decay->epoch);
680 if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) {
681 goto label_done_unlock2;
684 if (npages_new > 0) {
685 size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns);
687 * Compute how many new pages we would need to purge by the next
688 * wakeup, which is used to determine if we should signal the
692 if (n_epoch >= SMOOTHSTEP_NSTEPS) {
693 npurge_new = npages_new;
695 uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
696 assert(h_steps_max >=
697 h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
698 npurge_new = npages_new * (h_steps_max -
699 h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
700 npurge_new >>= SMOOTHSTEP_BFP;
702 info->npages_to_purge_new += npurge_new;
706 if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
707 should_signal = true;
708 } else if (unlikely(background_thread_indefinite_sleep(info)) &&
709 (extents_npages_get(&arena->extents_dirty) > 0 ||
710 extents_npages_get(&arena->extents_muzzy) > 0 ||
711 info->npages_to_purge_new > 0)) {
712 should_signal = true;
714 should_signal = false;
718 info->npages_to_purge_new = 0;
719 pthread_cond_signal(&info->cond);
722 malloc_mutex_unlock(tsdn, &decay->mtx);
724 malloc_mutex_unlock(tsdn, &info->mtx);
728 background_thread_prefork0(tsdn_t *tsdn) {
729 malloc_mutex_prefork(tsdn, &background_thread_lock);
730 background_thread_enabled_at_fork = background_thread_enabled();
734 background_thread_prefork1(tsdn_t *tsdn) {
735 for (unsigned i = 0; i < max_background_threads; i++) {
736 malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx);
741 background_thread_postfork_parent(tsdn_t *tsdn) {
742 for (unsigned i = 0; i < max_background_threads; i++) {
743 malloc_mutex_postfork_parent(tsdn,
744 &background_thread_info[i].mtx);
746 malloc_mutex_postfork_parent(tsdn, &background_thread_lock);
750 background_thread_postfork_child(tsdn_t *tsdn) {
751 for (unsigned i = 0; i < max_background_threads; i++) {
752 malloc_mutex_postfork_child(tsdn,
753 &background_thread_info[i].mtx);
755 malloc_mutex_postfork_child(tsdn, &background_thread_lock);
756 if (!background_thread_enabled_at_fork) {
760 /* Clear background_thread state (reset to disabled for child). */
761 malloc_mutex_lock(tsdn, &background_thread_lock);
762 n_background_threads = 0;
763 background_thread_enabled_set(tsdn, false);
764 for (unsigned i = 0; i < max_background_threads; i++) {
765 background_thread_info_t *info = &background_thread_info[i];
766 malloc_mutex_lock(tsdn, &info->mtx);
767 info->state = background_thread_stopped;
768 int ret = pthread_cond_init(&info->cond, NULL);
770 background_thread_info_init(tsdn, info);
771 malloc_mutex_unlock(tsdn, &info->mtx);
773 malloc_mutex_unlock(tsdn, &background_thread_lock);
777 background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
778 assert(config_stats);
779 malloc_mutex_lock(tsdn, &background_thread_lock);
780 if (!background_thread_enabled()) {
781 malloc_mutex_unlock(tsdn, &background_thread_lock);
785 stats->num_threads = n_background_threads;
786 uint64_t num_runs = 0;
787 nstime_init(&stats->run_interval, 0);
788 for (unsigned i = 0; i < max_background_threads; i++) {
789 background_thread_info_t *info = &background_thread_info[i];
790 malloc_mutex_lock(tsdn, &info->mtx);
791 if (info->state != background_thread_stopped) {
792 num_runs += info->tot_n_runs;
793 nstime_add(&stats->run_interval, &info->tot_sleep_time);
795 malloc_mutex_unlock(tsdn, &info->mtx);
797 stats->num_runs = num_runs;
799 nstime_idivide(&stats->run_interval, num_runs);
801 malloc_mutex_unlock(tsdn, &background_thread_lock);
806 #undef BACKGROUND_THREAD_NPAGES_THRESHOLD
808 #undef BACKGROUND_THREAD_MIN_INTERVAL_NS
811 pthread_create_fptr_init(void) {
812 if (pthread_create_fptr != NULL) {
815 pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
816 if (pthread_create_fptr == NULL) {
817 can_enable_background_thread = false;
818 if (config_lazy_lock || opt_background_thread) {
819 malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
820 "\"pthread_create\")\n");
824 can_enable_background_thread = true;
831 * When lazy lock is enabled, we need to make sure setting isthreaded before
832 * taking any background_thread locks. This is called early in ctl (instead of
833 * wait for the pthread_create calls to trigger) because the mutex is required
834 * before creating background threads.
837 background_thread_ctl_init(tsdn_t *tsdn) {
838 malloc_mutex_assert_not_owner(tsdn, &background_thread_lock);
839 #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
840 pthread_create_fptr_init();
841 pthread_create_wrapper_init();
845 #endif /* defined(JEMALLOC_BACKGROUND_THREAD) */
848 background_thread_boot0(void) {
849 if (!have_background_thread && opt_background_thread) {
850 malloc_printf("<jemalloc>: option background_thread currently "
851 "supports pthread only\n");
854 #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
855 if ((config_lazy_lock || opt_background_thread) &&
856 pthread_create_fptr_init()) {
864 background_thread_boot1(tsdn_t *tsdn) {
865 #ifdef JEMALLOC_BACKGROUND_THREAD
866 assert(have_background_thread);
867 assert(narenas_total_get() > 0);
869 if (opt_max_background_threads == MAX_BACKGROUND_THREAD_LIMIT &&
870 ncpus < MAX_BACKGROUND_THREAD_LIMIT) {
871 opt_max_background_threads = ncpus;
873 max_background_threads = opt_max_background_threads;
875 background_thread_enabled_set(tsdn, opt_background_thread);
876 if (malloc_mutex_init(&background_thread_lock,
877 "background_thread_global",
878 WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
879 malloc_mutex_rank_exclusive)) {
883 background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
884 b0get(), opt_max_background_threads *
885 sizeof(background_thread_info_t), CACHELINE);
886 if (background_thread_info == NULL) {
890 for (unsigned i = 0; i < max_background_threads; i++) {
891 background_thread_info_t *info = &background_thread_info[i];
892 /* Thread mutex is rank_inclusive because of thread0. */
893 if (malloc_mutex_init(&info->mtx, "background_thread",
894 WITNESS_RANK_BACKGROUND_THREAD,
895 malloc_mutex_address_ordered)) {
898 if (pthread_cond_init(&info->cond, NULL)) {
901 malloc_mutex_lock(tsdn, &info->mtx);
902 info->state = background_thread_stopped;
903 background_thread_info_init(tsdn, info);
904 malloc_mutex_unlock(tsdn, &info->mtx);