2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
35 #include <sys/epoch.h>
36 #include <sys/gtaskqueue.h>
37 #include <sys/kernel.h>
38 #include <sys/limits.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
44 #include <sys/sched.h>
47 #include <sys/sysctl.h>
48 #include <sys/turnstile.h>
50 #include <machine/stdarg.h>
51 #include <sys/stack.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_kern.h>
61 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
64 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
66 #define EPOCH_ALIGN CACHE_LINE_SIZE
69 TAILQ_HEAD (epoch_tdlist, epoch_tracker);
70 typedef struct epoch_record {
71 ck_epoch_record_t er_record;
72 struct epoch_context er_drain_ctx;
73 struct epoch *er_parent;
74 volatile struct epoch_tdlist er_tdlist;
75 volatile uint32_t er_gen;
77 } __aligned(EPOCH_ALIGN) *epoch_record_t;
80 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
81 epoch_record_t e_pcpu_record;
85 struct mtx e_drain_mtx;
86 volatile int e_drain_count;
90 /* arbitrary --- needs benchmarking */
91 #define MAX_ADAPTIVE_SPIN 100
94 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
95 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
96 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
99 static counter_u64_t block_count;
101 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
102 &block_count, "# of times a thread was in an epoch when epoch_wait was called");
103 static counter_u64_t migrate_count;
105 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
106 &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
107 static counter_u64_t turnstile_count;
109 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
110 &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
111 static counter_u64_t switch_count;
113 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
114 &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
115 static counter_u64_t epoch_call_count;
117 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
118 &epoch_call_count, "# of times a callback was deferred");
119 static counter_u64_t epoch_call_task_count;
121 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
122 &epoch_call_task_count, "# of times a callback task was run");
124 TAILQ_HEAD (threadlist, thread);
126 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
127 ck_epoch_entry_container)
129 epoch_t allepochs[MAX_EPOCHS];
131 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
132 DPCPU_DEFINE(int, epoch_cb_count);
134 static __read_mostly int inited;
135 static __read_mostly int epoch_count;
136 __read_mostly epoch_t global_epoch;
137 __read_mostly epoch_t global_epoch_preempt;
139 static void epoch_call_task(void *context __unused);
140 static uma_zone_t pcpu_zone_record;
144 RB_ENTRY(stackentry) se_node;
145 struct stack se_stack;
149 stackentry_compare(struct stackentry *a, struct stackentry *b)
152 if (a->se_stack.depth > b->se_stack.depth)
154 if (a->se_stack.depth < b->se_stack.depth)
156 for (int i = 0; i < a->se_stack.depth; i++) {
157 if (a->se_stack.pcs[i] > b->se_stack.pcs[i])
159 if (a->se_stack.pcs[i] < b->se_stack.pcs[i])
166 RB_HEAD(stacktree, stackentry) epoch_stacks = RB_INITIALIZER(&epoch_stacks);
167 RB_GENERATE_STATIC(stacktree, stackentry, se_node, stackentry_compare);
169 static struct mtx epoch_stacks_lock;
170 MTX_SYSINIT(epochstacks, &epoch_stacks_lock, "epoch_stacks", MTX_DEF);
172 static void epoch_trace_report(const char *fmt, ...) __printflike(1, 2);
174 epoch_trace_report(const char *fmt, ...)
177 struct stackentry se, *new;
179 stack_zero(&se.se_stack); /* XXX: is it really needed? */
180 stack_save(&se.se_stack);
182 /* Tree is never reduced - go lockless. */
183 if (RB_FIND(stacktree, &epoch_stacks, &se) != NULL)
186 new = malloc(sizeof(*new), M_STACK, M_NOWAIT);
188 bcopy(&se.se_stack, &new->se_stack, sizeof(struct stack));
190 mtx_lock(&epoch_stacks_lock);
191 new = RB_INSERT(stacktree, &epoch_stacks, new);
192 mtx_unlock(&epoch_stacks_lock);
198 (void)vprintf(fmt, ap);
200 stack_print_ddb(&se.se_stack);
204 epoch_trace_enter(struct thread *td, epoch_t epoch, epoch_tracker_t et,
205 const char *file, int line)
209 SLIST_FOREACH(iet, &td->td_epochs, et_tlink)
210 if (iet->et_epoch == epoch)
211 epoch_trace_report("Recursively entering epoch %s "
212 "previously entered at %s:%d\n",
213 epoch->e_name, iet->et_file, iet->et_line);
214 et->et_epoch = epoch;
217 SLIST_INSERT_HEAD(&td->td_epochs, et, et_tlink);
221 epoch_trace_exit(struct thread *td, epoch_t epoch, epoch_tracker_t et,
222 const char *file, int line)
225 if (SLIST_FIRST(&td->td_epochs) != et) {
226 epoch_trace_report("Exiting epoch %s in a not nested order. "
227 "Most recently entered %s at %s:%d\n",
229 SLIST_FIRST(&td->td_epochs)->et_epoch->e_name,
230 SLIST_FIRST(&td->td_epochs)->et_file,
231 SLIST_FIRST(&td->td_epochs)->et_line);
232 /* This will panic if et is not anywhere on td_epochs. */
233 SLIST_REMOVE(&td->td_epochs, et, epoch_tracker, et_tlink);
235 SLIST_REMOVE_HEAD(&td->td_epochs, et_tlink);
238 /* Used by assertions that check thread state before going to sleep. */
240 epoch_trace_list(struct thread *td)
244 SLIST_FOREACH(iet, &td->td_epochs, et_tlink)
245 printf("Epoch %s entered at %s:%d\n", iet->et_epoch->e_name,
246 iet->et_file, iet->et_line);
248 #endif /* EPOCH_TRACE */
251 epoch_init(void *arg __unused)
255 block_count = counter_u64_alloc(M_WAITOK);
256 migrate_count = counter_u64_alloc(M_WAITOK);
257 turnstile_count = counter_u64_alloc(M_WAITOK);
258 switch_count = counter_u64_alloc(M_WAITOK);
259 epoch_call_count = counter_u64_alloc(M_WAITOK);
260 epoch_call_task_count = counter_u64_alloc(M_WAITOK);
262 pcpu_zone_record = uma_zcreate("epoch_record pcpu",
263 sizeof(struct epoch_record), NULL, NULL, NULL, NULL,
264 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
266 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0,
267 epoch_call_task, NULL);
268 taskqgroup_attach_cpu(qgroup_softirq,
269 DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL,
273 SLIST_INIT(&thread0.td_epochs);
276 global_epoch = epoch_alloc("Global", 0);
277 global_epoch_preempt = epoch_alloc("Global preemptible", EPOCH_PREEMPT);
279 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL);
281 #if !defined(EARLY_AP_STARTUP)
283 epoch_init_smp(void *dummy __unused)
287 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
291 epoch_ctor(epoch_t epoch)
296 epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
298 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
299 bzero(er, sizeof(*er));
300 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
301 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
303 er->er_parent = epoch;
308 epoch_adjust_prio(struct thread *td, u_char prio)
312 sched_prio(td, prio);
317 epoch_alloc(const char *name, int flags)
321 if (__predict_false(!inited))
322 panic("%s called too early in boot", __func__);
323 epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK);
324 ck_epoch_init(&epoch->e_epoch);
326 MPASS(epoch_count < MAX_EPOCHS - 2);
327 epoch->e_flags = flags;
328 epoch->e_idx = epoch_count;
329 epoch->e_name = name;
330 sx_init(&epoch->e_drain_sx, "epoch-drain-sx");
331 mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF);
332 allepochs[epoch_count++] = epoch;
337 epoch_free(epoch_t epoch)
340 epoch_drain_callbacks(epoch);
341 allepochs[epoch->e_idx] = NULL;
342 epoch_wait(global_epoch);
343 uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
344 mtx_destroy(&epoch->e_drain_mtx);
345 sx_destroy(&epoch->e_drain_sx);
346 free(epoch, M_EPOCH);
349 static epoch_record_t
350 epoch_currecord(epoch_t epoch)
353 return (zpcpu_get_cpu(epoch->e_pcpu_record, curcpu));
356 #define INIT_CHECK(epoch) \
358 if (__predict_false((epoch) == NULL)) \
363 _epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
365 struct epoch_record *er;
368 MPASS(cold || epoch != NULL);
369 MPASS(epoch->e_flags & EPOCH_PREEMPT);
371 MPASS((vm_offset_t)et >= td->td_kstack &&
372 (vm_offset_t)et + sizeof(struct epoch_tracker) <=
373 td->td_kstack + td->td_kstack_pages * PAGE_SIZE);
377 epoch_trace_enter(td, epoch, et, file, line);
380 THREAD_NO_SLEEPING();
383 td->td_pre_epoch_prio = td->td_priority;
384 er = epoch_currecord(epoch);
385 TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link);
386 ck_epoch_begin(&er->er_record, &et->et_section);
391 epoch_enter(epoch_t epoch)
395 MPASS(cold || epoch != NULL);
398 er = epoch_currecord(epoch);
399 ck_epoch_begin(&er->er_record, NULL);
403 _epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
405 struct epoch_record *er;
412 THREAD_SLEEPING_OK();
413 er = epoch_currecord(epoch);
414 MPASS(epoch->e_flags & EPOCH_PREEMPT);
416 MPASS(et->et_td == td);
418 et->et_td = (void*)0xDEADBEEF;
420 ck_epoch_end(&er->er_record, &et->et_section);
421 TAILQ_REMOVE(&er->er_tdlist, et, et_link);
423 if (__predict_false(td->td_pre_epoch_prio != td->td_priority))
424 epoch_adjust_prio(td, td->td_pre_epoch_prio);
427 epoch_trace_exit(td, epoch, et, file, line);
432 epoch_exit(epoch_t epoch)
437 er = epoch_currecord(epoch);
438 ck_epoch_end(&er->er_record, NULL);
443 * epoch_block_handler_preempt() is a callback from the CK code when another
444 * thread is currently in an epoch section.
447 epoch_block_handler_preempt(struct ck_epoch *global __unused,
448 ck_epoch_record_t *cr, void *arg __unused)
450 epoch_record_t record;
451 struct thread *td, *owner, *curwaittd;
452 struct epoch_tracker *tdwait;
453 struct turnstile *ts;
454 struct lock_object *lock;
456 int locksheld __unused;
458 record = __containerof(cr, struct epoch_record, er_record);
460 locksheld = td->td_locks;
462 counter_u64_add(block_count, 1);
464 * We lost a race and there's no longer any threads
465 * on the CPU in an epoch section.
467 if (TAILQ_EMPTY(&record->er_tdlist))
470 if (record->er_cpuid != curcpu) {
472 * If the head of the list is running, we can wait for it
473 * to remove itself from the list and thus save us the
474 * overhead of a migration
476 gen = record->er_gen;
479 * We can't actually check if the waiting thread is running
480 * so we simply poll for it to exit before giving up and
485 } while (!TAILQ_EMPTY(&record->er_tdlist) &&
486 gen == record->er_gen &&
487 spincount++ < MAX_ADAPTIVE_SPIN);
490 * If the generation has changed we can poll again
491 * otherwise we need to migrate.
493 if (gen != record->er_gen)
496 * Being on the same CPU as that of the record on which
497 * we need to wait allows us access to the thread
498 * list associated with that CPU. We can then examine the
499 * oldest thread in the queue and wait on its turnstile
500 * until it resumes and so on until a grace period
504 counter_u64_add(migrate_count, 1);
505 sched_bind(td, record->er_cpuid);
507 * At this point we need to return to the ck code
508 * to scan to see if a grace period has elapsed.
509 * We can't move on to check the thread list, because
510 * in the meantime new threads may have arrived that
511 * in fact belong to a different epoch.
516 * Try to find a thread in an epoch section on this CPU
517 * waiting on a turnstile. Otherwise find the lowest
518 * priority thread (highest prio value) and drop our priority
519 * to match to allow it to run.
521 TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
523 * Propagate our priority to any other waiters to prevent us
524 * from starving them. They will have their original priority
525 * restore on exit from epoch_wait().
527 curwaittd = tdwait->et_td;
528 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
531 thread_lock(curwaittd);
532 sched_prio(curwaittd, td->td_priority);
533 thread_unlock(curwaittd);
537 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
538 ((ts = curwaittd->td_blocked) != NULL)) {
540 * We unlock td to allow turnstile_wait to reacquire
541 * the thread lock. Before unlocking it we enter a
542 * critical section to prevent preemption after we
543 * reenable interrupts by dropping the thread lock in
544 * order to prevent curwaittd from getting to run.
549 if (turnstile_lock(ts, &lock, &owner)) {
550 if (ts == curwaittd->td_blocked) {
551 MPASS(TD_IS_INHIBITED(curwaittd) &&
552 TD_ON_LOCK(curwaittd));
554 turnstile_wait(ts, owner,
555 curwaittd->td_tsqueue);
556 counter_u64_add(turnstile_count, 1);
560 turnstile_unlock(ts, lock);
564 KASSERT(td->td_locks == locksheld,
565 ("%d extra locks held", td->td_locks - locksheld));
569 * We didn't find any threads actually blocked on a lock
570 * so we have nothing to do except context switch away.
572 counter_u64_add(switch_count, 1);
573 mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
576 * Release the thread lock while yielding to
577 * allow other threads to acquire the lock
578 * pointed to by TDQ_LOCKPTR(td). Else a
579 * deadlock like situation might happen. (HPS)
586 epoch_wait_preempt(epoch_t epoch)
595 MPASS(cold || epoch != NULL);
599 locks = curthread->td_locks;
600 MPASS(epoch->e_flags & EPOCH_PREEMPT);
601 if ((epoch->e_flags & EPOCH_LOCKED) == 0)
602 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
603 "epoch_wait() can be long running");
604 KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle "
605 "of an epoch section of the same epoch"));
610 old_cpu = PCPU_GET(cpuid);
611 old_pinned = td->td_pinned;
612 old_prio = td->td_priority;
613 was_bound = sched_is_bound(td);
616 sched_bind(td, old_cpu);
618 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt,
621 /* restore CPU binding, if any */
622 if (was_bound != 0) {
623 sched_bind(td, old_cpu);
625 /* get thread back to initial CPU, if any */
627 sched_bind(td, old_cpu);
630 /* restore pinned after bind */
631 td->td_pinned = old_pinned;
633 /* restore thread priority */
634 sched_prio(td, old_prio);
637 KASSERT(td->td_locks == locks,
638 ("%d residual locks held", td->td_locks - locks));
642 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
649 epoch_wait(epoch_t epoch)
652 MPASS(cold || epoch != NULL);
654 MPASS(epoch->e_flags == 0);
656 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
661 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t))
664 ck_epoch_entry_t *cb;
669 /* too early in boot to have epoch set up */
670 if (__predict_false(epoch == NULL))
672 #if !defined(EARLY_AP_STARTUP)
673 if (__predict_false(inited < 2))
678 *DPCPU_PTR(epoch_cb_count) += 1;
679 er = epoch_currecord(epoch);
680 ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback);
688 epoch_call_task(void *arg __unused)
690 ck_stack_entry_t *cursor, *head, *next;
691 ck_epoch_record_t *record;
695 int i, npending, total;
697 ck_stack_init(&cb_stack);
699 epoch_enter(global_epoch);
700 for (total = i = 0; i < epoch_count; i++) {
701 if (__predict_false((epoch = allepochs[i]) == NULL))
703 er = epoch_currecord(epoch);
704 record = &er->er_record;
705 if ((npending = record->n_pending) == 0)
707 ck_epoch_poll_deferred(record, &cb_stack);
708 total += npending - record->n_pending;
710 epoch_exit(global_epoch);
711 *DPCPU_PTR(epoch_cb_count) -= total;
714 counter_u64_add(epoch_call_count, total);
715 counter_u64_add(epoch_call_task_count, 1);
717 head = ck_stack_batch_pop_npsc(&cb_stack);
718 for (cursor = head; cursor != NULL; cursor = next) {
719 struct ck_epoch_entry *entry =
720 ck_epoch_entry_container(cursor);
722 next = CK_STACK_NEXT(cursor);
723 entry->function(entry);
728 in_epoch_verbose(epoch_t epoch, int dump_onfail)
730 struct epoch_tracker *tdwait;
735 if (THREAD_CAN_SLEEP())
737 if (__predict_false((epoch) == NULL))
740 er = epoch_currecord(epoch);
741 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
742 if (tdwait->et_td == td) {
748 MPASS(td->td_pinned);
749 printf("cpu: %d id: %d\n", curcpu, td->td_tid);
750 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
751 printf("td_tid: %d ", tdwait->et_td->td_tid);
760 in_epoch(epoch_t epoch)
762 return (in_epoch_verbose(epoch, 0));
766 epoch_drain_cb(struct epoch_context *ctx)
768 struct epoch *epoch =
769 __containerof(ctx, struct epoch_record, er_drain_ctx)->er_parent;
771 if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) {
772 mtx_lock(&epoch->e_drain_mtx);
774 mtx_unlock(&epoch->e_drain_mtx);
779 epoch_drain_callbacks(epoch_t epoch)
788 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
789 "epoch_drain_callbacks() may sleep!");
791 /* too early in boot to have epoch set up */
792 if (__predict_false(epoch == NULL))
794 #if !defined(EARLY_AP_STARTUP)
795 if (__predict_false(inited < 2))
800 sx_xlock(&epoch->e_drain_sx);
801 mtx_lock(&epoch->e_drain_mtx);
805 old_cpu = PCPU_GET(cpuid);
806 old_pinned = td->td_pinned;
807 was_bound = sched_is_bound(td);
812 epoch->e_drain_count++;
814 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
816 epoch_call(epoch, &er->er_drain_ctx, &epoch_drain_cb);
819 /* restore CPU binding, if any */
820 if (was_bound != 0) {
821 sched_bind(td, old_cpu);
823 /* get thread back to initial CPU, if any */
825 sched_bind(td, old_cpu);
828 /* restore pinned after bind */
829 td->td_pinned = old_pinned;
833 while (epoch->e_drain_count != 0)
834 msleep(epoch, &epoch->e_drain_mtx, PZERO, "EDRAIN", 0);
836 mtx_unlock(&epoch->e_drain_mtx);
837 sx_xunlock(&epoch->e_drain_sx);