2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/counter.h>
36 #include <sys/epoch.h>
37 #include <sys/gtaskqueue.h>
38 #include <sys/kernel.h>
39 #include <sys/limits.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
45 #include <sys/sched.h>
48 #include <sys/sysctl.h>
49 #include <sys/turnstile.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_kern.h>
57 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
60 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
62 #define EPOCH_ALIGN CACHE_LINE_SIZE
65 TAILQ_HEAD (epoch_tdlist, epoch_tracker);
66 typedef struct epoch_record {
67 ck_epoch_record_t er_record;
68 struct epoch_context er_drain_ctx;
69 struct epoch *er_parent;
70 volatile struct epoch_tdlist er_tdlist;
71 volatile uint32_t er_gen;
73 } __aligned(EPOCH_ALIGN) *epoch_record_t;
76 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
77 epoch_record_t e_pcpu_record;
81 struct mtx e_drain_mtx;
82 volatile int e_drain_count;
85 /* arbitrary --- needs benchmarking */
86 #define MAX_ADAPTIVE_SPIN 100
89 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
90 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
91 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
94 static counter_u64_t block_count;
96 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
97 &block_count, "# of times a thread was in an epoch when epoch_wait was called");
98 static counter_u64_t migrate_count;
100 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
101 &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
102 static counter_u64_t turnstile_count;
104 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
105 &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
106 static counter_u64_t switch_count;
108 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
109 &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
110 static counter_u64_t epoch_call_count;
112 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
113 &epoch_call_count, "# of times a callback was deferred");
114 static counter_u64_t epoch_call_task_count;
116 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
117 &epoch_call_task_count, "# of times a callback task was run");
119 TAILQ_HEAD (threadlist, thread);
121 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
122 ck_epoch_entry_container)
124 epoch_t allepochs[MAX_EPOCHS];
126 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
127 DPCPU_DEFINE(int, epoch_cb_count);
129 static __read_mostly int inited;
130 static __read_mostly int epoch_count;
131 __read_mostly epoch_t global_epoch;
132 __read_mostly epoch_t global_epoch_preempt;
134 static void epoch_call_task(void *context __unused);
135 static uma_zone_t pcpu_zone_record;
138 epoch_init(void *arg __unused)
142 block_count = counter_u64_alloc(M_WAITOK);
143 migrate_count = counter_u64_alloc(M_WAITOK);
144 turnstile_count = counter_u64_alloc(M_WAITOK);
145 switch_count = counter_u64_alloc(M_WAITOK);
146 epoch_call_count = counter_u64_alloc(M_WAITOK);
147 epoch_call_task_count = counter_u64_alloc(M_WAITOK);
149 pcpu_zone_record = uma_zcreate("epoch_record pcpu",
150 sizeof(struct epoch_record), NULL, NULL, NULL, NULL,
151 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
153 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0,
154 epoch_call_task, NULL);
155 taskqgroup_attach_cpu(qgroup_softirq,
156 DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL,
160 global_epoch = epoch_alloc(0);
161 global_epoch_preempt = epoch_alloc(EPOCH_PREEMPT);
163 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL);
165 #if !defined(EARLY_AP_STARTUP)
167 epoch_init_smp(void *dummy __unused)
171 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
175 epoch_ctor(epoch_t epoch)
180 epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
182 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
183 bzero(er, sizeof(*er));
184 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
185 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
187 er->er_parent = epoch;
192 epoch_adjust_prio(struct thread *td, u_char prio)
196 sched_prio(td, prio);
201 epoch_alloc(int flags)
205 if (__predict_false(!inited))
206 panic("%s called too early in boot", __func__);
207 epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK);
208 ck_epoch_init(&epoch->e_epoch);
210 MPASS(epoch_count < MAX_EPOCHS - 2);
211 epoch->e_flags = flags;
212 epoch->e_idx = epoch_count;
213 sx_init(&epoch->e_drain_sx, "epoch-drain-sx");
214 mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF);
215 allepochs[epoch_count++] = epoch;
220 epoch_free(epoch_t epoch)
223 epoch_drain_callbacks(epoch);
224 allepochs[epoch->e_idx] = NULL;
225 epoch_wait(global_epoch);
226 uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
227 mtx_destroy(&epoch->e_drain_mtx);
228 sx_destroy(&epoch->e_drain_sx);
229 free(epoch, M_EPOCH);
232 static epoch_record_t
233 epoch_currecord(epoch_t epoch)
236 return (zpcpu_get_cpu(epoch->e_pcpu_record, curcpu));
239 #define INIT_CHECK(epoch) \
241 if (__predict_false((epoch) == NULL)) \
246 epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et)
248 struct epoch_record *er;
251 MPASS(cold || epoch != NULL);
253 MPASS(epoch->e_flags & EPOCH_PREEMPT);
254 #ifdef EPOCH_TRACKER_DEBUG
255 et->et_magic_pre = EPOCH_MAGIC0;
256 et->et_magic_post = EPOCH_MAGIC1;
264 td->td_pre_epoch_prio = td->td_priority;
265 er = epoch_currecord(epoch);
266 TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link);
267 ck_epoch_begin(&er->er_record, &et->et_section);
272 epoch_enter(epoch_t epoch)
277 MPASS(cold || epoch != NULL);
283 er = epoch_currecord(epoch);
284 ck_epoch_begin(&er->er_record, NULL);
288 epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et)
290 struct epoch_record *er;
297 MPASS(td->td_epochnest);
299 er = epoch_currecord(epoch);
300 MPASS(epoch->e_flags & EPOCH_PREEMPT);
302 MPASS(et->et_td == td);
303 #ifdef EPOCH_TRACKER_DEBUG
304 MPASS(et->et_magic_pre == EPOCH_MAGIC0);
305 MPASS(et->et_magic_post == EPOCH_MAGIC1);
306 et->et_magic_pre = 0;
307 et->et_magic_post = 0;
310 et->et_td = (void*)0xDEADBEEF;
312 ck_epoch_end(&er->er_record, &et->et_section);
313 TAILQ_REMOVE(&er->er_tdlist, et, et_link);
315 if (__predict_false(td->td_pre_epoch_prio != td->td_priority))
316 epoch_adjust_prio(td, td->td_pre_epoch_prio);
321 epoch_exit(epoch_t epoch)
328 MPASS(td->td_epochnest);
330 er = epoch_currecord(epoch);
331 ck_epoch_end(&er->er_record, NULL);
336 * epoch_block_handler_preempt() is a callback from the CK code when another
337 * thread is currently in an epoch section.
340 epoch_block_handler_preempt(struct ck_epoch *global __unused,
341 ck_epoch_record_t *cr, void *arg __unused)
343 epoch_record_t record;
344 struct thread *td, *owner, *curwaittd;
345 struct epoch_tracker *tdwait;
346 struct turnstile *ts;
347 struct lock_object *lock;
349 int locksheld __unused;
351 record = __containerof(cr, struct epoch_record, er_record);
353 locksheld = td->td_locks;
355 counter_u64_add(block_count, 1);
357 * We lost a race and there's no longer any threads
358 * on the CPU in an epoch section.
360 if (TAILQ_EMPTY(&record->er_tdlist))
363 if (record->er_cpuid != curcpu) {
365 * If the head of the list is running, we can wait for it
366 * to remove itself from the list and thus save us the
367 * overhead of a migration
369 gen = record->er_gen;
372 * We can't actually check if the waiting thread is running
373 * so we simply poll for it to exit before giving up and
378 } while (!TAILQ_EMPTY(&record->er_tdlist) &&
379 gen == record->er_gen &&
380 spincount++ < MAX_ADAPTIVE_SPIN);
383 * If the generation has changed we can poll again
384 * otherwise we need to migrate.
386 if (gen != record->er_gen)
389 * Being on the same CPU as that of the record on which
390 * we need to wait allows us access to the thread
391 * list associated with that CPU. We can then examine the
392 * oldest thread in the queue and wait on its turnstile
393 * until it resumes and so on until a grace period
397 counter_u64_add(migrate_count, 1);
398 sched_bind(td, record->er_cpuid);
400 * At this point we need to return to the ck code
401 * to scan to see if a grace period has elapsed.
402 * We can't move on to check the thread list, because
403 * in the meantime new threads may have arrived that
404 * in fact belong to a different epoch.
409 * Try to find a thread in an epoch section on this CPU
410 * waiting on a turnstile. Otherwise find the lowest
411 * priority thread (highest prio value) and drop our priority
412 * to match to allow it to run.
414 TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
416 * Propagate our priority to any other waiters to prevent us
417 * from starving them. They will have their original priority
418 * restore on exit from epoch_wait().
420 curwaittd = tdwait->et_td;
421 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
424 thread_lock(curwaittd);
425 sched_prio(curwaittd, td->td_priority);
426 thread_unlock(curwaittd);
430 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
431 ((ts = curwaittd->td_blocked) != NULL)) {
433 * We unlock td to allow turnstile_wait to reacquire
434 * the thread lock. Before unlocking it we enter a
435 * critical section to prevent preemption after we
436 * reenable interrupts by dropping the thread lock in
437 * order to prevent curwaittd from getting to run.
442 if (turnstile_lock(ts, &lock, &owner)) {
443 if (ts == curwaittd->td_blocked) {
444 MPASS(TD_IS_INHIBITED(curwaittd) &&
445 TD_ON_LOCK(curwaittd));
447 turnstile_wait(ts, owner,
448 curwaittd->td_tsqueue);
449 counter_u64_add(turnstile_count, 1);
453 turnstile_unlock(ts, lock);
457 KASSERT(td->td_locks == locksheld,
458 ("%d extra locks held", td->td_locks - locksheld));
462 * We didn't find any threads actually blocked on a lock
463 * so we have nothing to do except context switch away.
465 counter_u64_add(switch_count, 1);
466 mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
469 * Release the thread lock while yielding to
470 * allow other threads to acquire the lock
471 * pointed to by TDQ_LOCKPTR(td). Else a
472 * deadlock like situation might happen. (HPS)
479 epoch_wait_preempt(epoch_t epoch)
488 MPASS(cold || epoch != NULL);
492 locks = curthread->td_locks;
493 MPASS(epoch->e_flags & EPOCH_PREEMPT);
494 if ((epoch->e_flags & EPOCH_LOCKED) == 0)
495 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
496 "epoch_wait() can be long running");
497 KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle "
498 "of an epoch section of the same epoch"));
503 old_cpu = PCPU_GET(cpuid);
504 old_pinned = td->td_pinned;
505 old_prio = td->td_priority;
506 was_bound = sched_is_bound(td);
509 sched_bind(td, old_cpu);
511 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt,
514 /* restore CPU binding, if any */
515 if (was_bound != 0) {
516 sched_bind(td, old_cpu);
518 /* get thread back to initial CPU, if any */
520 sched_bind(td, old_cpu);
523 /* restore pinned after bind */
524 td->td_pinned = old_pinned;
526 /* restore thread priority */
527 sched_prio(td, old_prio);
530 KASSERT(td->td_locks == locks,
531 ("%d residual locks held", td->td_locks - locks));
535 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
542 epoch_wait(epoch_t epoch)
545 MPASS(cold || epoch != NULL);
547 MPASS(epoch->e_flags == 0);
549 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
554 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t))
557 ck_epoch_entry_t *cb;
562 /* too early in boot to have epoch set up */
563 if (__predict_false(epoch == NULL))
565 #if !defined(EARLY_AP_STARTUP)
566 if (__predict_false(inited < 2))
571 *DPCPU_PTR(epoch_cb_count) += 1;
572 er = epoch_currecord(epoch);
573 ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback);
581 epoch_call_task(void *arg __unused)
583 ck_stack_entry_t *cursor, *head, *next;
584 ck_epoch_record_t *record;
588 int i, npending, total;
590 ck_stack_init(&cb_stack);
592 epoch_enter(global_epoch);
593 for (total = i = 0; i < epoch_count; i++) {
594 if (__predict_false((epoch = allepochs[i]) == NULL))
596 er = epoch_currecord(epoch);
597 record = &er->er_record;
598 if ((npending = record->n_pending) == 0)
600 ck_epoch_poll_deferred(record, &cb_stack);
601 total += npending - record->n_pending;
603 epoch_exit(global_epoch);
604 *DPCPU_PTR(epoch_cb_count) -= total;
607 counter_u64_add(epoch_call_count, total);
608 counter_u64_add(epoch_call_task_count, 1);
610 head = ck_stack_batch_pop_npsc(&cb_stack);
611 for (cursor = head; cursor != NULL; cursor = next) {
612 struct ck_epoch_entry *entry =
613 ck_epoch_entry_container(cursor);
615 next = CK_STACK_NEXT(cursor);
616 entry->function(entry);
621 in_epoch_verbose(epoch_t epoch, int dump_onfail)
623 struct epoch_tracker *tdwait;
628 if (td->td_epochnest == 0)
630 if (__predict_false((epoch) == NULL))
633 er = epoch_currecord(epoch);
634 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
635 if (tdwait->et_td == td) {
641 MPASS(td->td_pinned);
642 printf("cpu: %d id: %d\n", curcpu, td->td_tid);
643 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
644 printf("td_tid: %d ", tdwait->et_td->td_tid);
653 in_epoch(epoch_t epoch)
655 return (in_epoch_verbose(epoch, 0));
659 epoch_drain_cb(struct epoch_context *ctx)
661 struct epoch *epoch =
662 __containerof(ctx, struct epoch_record, er_drain_ctx)->er_parent;
664 if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) {
665 mtx_lock(&epoch->e_drain_mtx);
667 mtx_unlock(&epoch->e_drain_mtx);
672 epoch_drain_callbacks(epoch_t epoch)
681 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
682 "epoch_drain_callbacks() may sleep!");
684 /* too early in boot to have epoch set up */
685 if (__predict_false(epoch == NULL))
687 #if !defined(EARLY_AP_STARTUP)
688 if (__predict_false(inited < 2))
693 sx_xlock(&epoch->e_drain_sx);
694 mtx_lock(&epoch->e_drain_mtx);
698 old_cpu = PCPU_GET(cpuid);
699 old_pinned = td->td_pinned;
700 was_bound = sched_is_bound(td);
705 epoch->e_drain_count++;
707 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
709 epoch_call(epoch, &er->er_drain_ctx, &epoch_drain_cb);
712 /* restore CPU binding, if any */
713 if (was_bound != 0) {
714 sched_bind(td, old_cpu);
716 /* get thread back to initial CPU, if any */
718 sched_bind(td, old_cpu);
721 /* restore pinned after bind */
722 td->td_pinned = old_pinned;
726 while (epoch->e_drain_count != 0)
727 msleep(epoch, &epoch->e_drain_mtx, PZERO, "EDRAIN", 0);
729 mtx_unlock(&epoch->e_drain_mtx);
730 sx_xunlock(&epoch->e_drain_sx);
736 epoch_thread_init(struct thread *td)
739 td->td_et = malloc(sizeof(struct epoch_tracker), M_EPOCH, M_WAITOK);
743 epoch_thread_fini(struct thread *td)
746 free(td->td_et, M_EPOCH);