2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
35 #include <sys/epoch.h>
36 #include <sys/gtaskqueue.h>
37 #include <sys/kernel.h>
38 #include <sys/limits.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
44 #include <sys/sched.h>
47 #include <sys/sysctl.h>
48 #include <sys/turnstile.h>
50 #include <machine/stdarg.h>
51 #include <sys/stack.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_kern.h>
59 #include <machine/stack.h>
64 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
66 #define EPOCH_ALIGN CACHE_LINE_SIZE
69 TAILQ_HEAD (epoch_tdlist, epoch_tracker);
70 typedef struct epoch_record {
71 ck_epoch_record_t er_record;
72 struct epoch_context er_drain_ctx;
73 struct epoch *er_parent;
74 volatile struct epoch_tdlist er_tdlist;
75 volatile uint32_t er_gen;
78 /* Used to verify record ownership for non-preemptible epochs. */
81 } __aligned(EPOCH_ALIGN) *epoch_record_t;
84 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
85 epoch_record_t e_pcpu_record;
89 struct mtx e_drain_mtx;
90 volatile int e_drain_count;
94 /* arbitrary --- needs benchmarking */
95 #define MAX_ADAPTIVE_SPIN 100
98 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
99 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
100 "epoch information");
101 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
105 static counter_u64_t block_count;
107 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
108 &block_count, "# of times a thread was in an epoch when epoch_wait was called");
109 static counter_u64_t migrate_count;
111 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
112 &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
113 static counter_u64_t turnstile_count;
115 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
116 &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
117 static counter_u64_t switch_count;
119 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
120 &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
121 static counter_u64_t epoch_call_count;
123 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
124 &epoch_call_count, "# of times a callback was deferred");
125 static counter_u64_t epoch_call_task_count;
127 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
128 &epoch_call_task_count, "# of times a callback task was run");
130 TAILQ_HEAD (threadlist, thread);
132 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
133 ck_epoch_entry_container)
135 static struct epoch epoch_array[MAX_EPOCHS];
137 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
138 DPCPU_DEFINE(int, epoch_cb_count);
140 static __read_mostly int inited;
141 __read_mostly epoch_t global_epoch;
142 __read_mostly epoch_t global_epoch_preempt;
144 static void epoch_call_task(void *context __unused);
145 static uma_zone_t pcpu_zone_record;
147 static struct sx epoch_sx;
149 #define EPOCH_LOCK() sx_xlock(&epoch_sx)
150 #define EPOCH_UNLOCK() sx_xunlock(&epoch_sx)
152 static epoch_record_t
153 epoch_currecord(epoch_t epoch)
156 return (zpcpu_get(epoch->e_pcpu_record));
161 RB_ENTRY(stackentry) se_node;
162 struct stack se_stack;
166 stackentry_compare(struct stackentry *a, struct stackentry *b)
169 if (a->se_stack.depth > b->se_stack.depth)
171 if (a->se_stack.depth < b->se_stack.depth)
173 for (int i = 0; i < a->se_stack.depth; i++) {
174 if (a->se_stack.pcs[i] > b->se_stack.pcs[i])
176 if (a->se_stack.pcs[i] < b->se_stack.pcs[i])
183 RB_HEAD(stacktree, stackentry) epoch_stacks = RB_INITIALIZER(&epoch_stacks);
184 RB_GENERATE_STATIC(stacktree, stackentry, se_node, stackentry_compare);
186 static struct mtx epoch_stacks_lock;
187 MTX_SYSINIT(epochstacks, &epoch_stacks_lock, "epoch_stacks", MTX_DEF);
189 static bool epoch_trace_stack_print = true;
190 SYSCTL_BOOL(_kern_epoch, OID_AUTO, trace_stack_print, CTLFLAG_RWTUN,
191 &epoch_trace_stack_print, 0, "Print stack traces on epoch reports");
193 static void epoch_trace_report(const char *fmt, ...) __printflike(1, 2);
195 epoch_trace_report(const char *fmt, ...)
198 struct stackentry se, *new;
200 stack_save(&se.se_stack);
202 /* Tree is never reduced - go lockless. */
203 if (RB_FIND(stacktree, &epoch_stacks, &se) != NULL)
206 new = malloc(sizeof(*new), M_STACK, M_NOWAIT);
208 bcopy(&se.se_stack, &new->se_stack, sizeof(struct stack));
210 mtx_lock(&epoch_stacks_lock);
211 new = RB_INSERT(stacktree, &epoch_stacks, new);
212 mtx_unlock(&epoch_stacks_lock);
218 (void)vprintf(fmt, ap);
220 if (epoch_trace_stack_print)
221 stack_print_ddb(&se.se_stack);
225 epoch_trace_enter(struct thread *td, epoch_t epoch, epoch_tracker_t et,
226 const char *file, int line)
230 SLIST_FOREACH(iet, &td->td_epochs, et_tlink) {
231 if (iet->et_epoch != epoch)
233 epoch_trace_report("Recursively entering epoch %s "
234 "at %s:%d, previously entered at %s:%d\n",
235 epoch->e_name, file, line,
236 iet->et_file, iet->et_line);
238 et->et_epoch = epoch;
242 SLIST_INSERT_HEAD(&td->td_epochs, et, et_tlink);
246 epoch_trace_exit(struct thread *td, epoch_t epoch, epoch_tracker_t et,
247 const char *file, int line)
250 if (SLIST_FIRST(&td->td_epochs) != et) {
251 epoch_trace_report("Exiting epoch %s in a not nested order "
252 "at %s:%d. Most recently entered %s at %s:%d\n",
255 SLIST_FIRST(&td->td_epochs)->et_epoch->e_name,
256 SLIST_FIRST(&td->td_epochs)->et_file,
257 SLIST_FIRST(&td->td_epochs)->et_line);
258 /* This will panic if et is not anywhere on td_epochs. */
259 SLIST_REMOVE(&td->td_epochs, et, epoch_tracker, et_tlink);
261 SLIST_REMOVE_HEAD(&td->td_epochs, et_tlink);
262 if (et->et_flags & ET_REPORT_EXIT)
263 printf("Td %p exiting epoch %s at %s:%d\n", td, epoch->e_name,
267 /* Used by assertions that check thread state before going to sleep. */
269 epoch_trace_list(struct thread *td)
273 SLIST_FOREACH(iet, &td->td_epochs, et_tlink)
274 printf("Epoch %s entered at %s:%d\n", iet->et_epoch->e_name,
275 iet->et_file, iet->et_line);
279 epoch_where_report(epoch_t epoch)
282 struct epoch_tracker *tdwait;
284 MPASS(epoch != NULL);
285 MPASS((epoch->e_flags & EPOCH_PREEMPT) != 0);
286 MPASS(!THREAD_CAN_SLEEP());
288 er = epoch_currecord(epoch);
289 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
290 if (tdwait->et_td == curthread)
293 if (tdwait != NULL) {
294 tdwait->et_flags |= ET_REPORT_EXIT;
295 printf("Td %p entered epoch %s at %s:%d\n", curthread,
296 epoch->e_name, tdwait->et_file, tdwait->et_line);
299 #endif /* EPOCH_TRACE */
302 epoch_init(void *arg __unused)
306 block_count = counter_u64_alloc(M_WAITOK);
307 migrate_count = counter_u64_alloc(M_WAITOK);
308 turnstile_count = counter_u64_alloc(M_WAITOK);
309 switch_count = counter_u64_alloc(M_WAITOK);
310 epoch_call_count = counter_u64_alloc(M_WAITOK);
311 epoch_call_task_count = counter_u64_alloc(M_WAITOK);
313 pcpu_zone_record = uma_zcreate("epoch_record pcpu",
314 sizeof(struct epoch_record), NULL, NULL, NULL, NULL,
315 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
317 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0,
318 epoch_call_task, NULL);
319 taskqgroup_attach_cpu(qgroup_softirq,
320 DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL,
324 SLIST_INIT(&thread0.td_epochs);
326 sx_init(&epoch_sx, "epoch-sx");
328 global_epoch = epoch_alloc("Global", 0);
329 global_epoch_preempt = epoch_alloc("Global preemptible", EPOCH_PREEMPT);
331 SYSINIT(epoch, SI_SUB_EPOCH, SI_ORDER_FIRST, epoch_init, NULL);
333 #if !defined(EARLY_AP_STARTUP)
335 epoch_init_smp(void *dummy __unused)
339 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
343 epoch_ctor(epoch_t epoch)
348 epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
350 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
351 bzero(er, sizeof(*er));
352 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
353 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
355 er->er_parent = epoch;
360 epoch_adjust_prio(struct thread *td, u_char prio)
364 sched_prio(td, prio);
369 epoch_alloc(const char *name, int flags)
376 if (__predict_false(!inited))
377 panic("%s called too early in boot", __func__);
382 * Find a free index in the epoch array. If no free index is
383 * found, try to use the index after the last one.
387 * If too many epochs are currently allocated,
390 if (i == MAX_EPOCHS) {
394 if (epoch_array[i].e_in_use == 0)
398 epoch = epoch_array + i;
399 ck_epoch_init(&epoch->e_epoch);
401 epoch->e_flags = flags;
402 epoch->e_name = name;
403 sx_init(&epoch->e_drain_sx, "epoch-drain-sx");
404 mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF);
407 * Set e_in_use last, because when this field is set the
408 * epoch_call_task() function will start scanning this epoch
411 atomic_store_rel_int(&epoch->e_in_use, 1);
418 epoch_free(epoch_t epoch)
426 MPASS(epoch->e_in_use != 0);
428 epoch_drain_callbacks(epoch);
430 atomic_store_rel_int(&epoch->e_in_use, 0);
432 * Make sure the epoch_call_task() function see e_in_use equal
433 * to zero, by calling epoch_wait() on the global_epoch:
435 epoch_wait(global_epoch);
440 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
443 * Sanity check: none of the records should be in use anymore.
444 * We drained callbacks above and freeing the pcpu records is
447 MPASS(er->er_td == NULL);
448 MPASS(TAILQ_EMPTY(&er->er_tdlist));
451 uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
452 mtx_destroy(&epoch->e_drain_mtx);
453 sx_destroy(&epoch->e_drain_sx);
454 memset(epoch, 0, sizeof(*epoch));
459 #define INIT_CHECK(epoch) \
461 if (__predict_false((epoch) == NULL)) \
466 _epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
468 struct epoch_record *er;
471 MPASS(cold || epoch != NULL);
473 MPASS(kstack_contains(td, (vm_offset_t)et, sizeof(*et)));
476 MPASS(epoch->e_flags & EPOCH_PREEMPT);
479 epoch_trace_enter(td, epoch, et, file, line);
482 THREAD_NO_SLEEPING();
485 et->et_old_priority = td->td_priority;
486 er = epoch_currecord(epoch);
487 /* Record-level tracking is reserved for non-preemptible epochs. */
488 MPASS(er->er_td == NULL);
489 TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link);
490 ck_epoch_begin(&er->er_record, &et->et_section);
495 epoch_enter(epoch_t epoch)
499 MPASS(cold || epoch != NULL);
502 er = epoch_currecord(epoch);
504 if (er->er_record.active == 0) {
505 MPASS(er->er_td == NULL);
506 er->er_td = curthread;
508 /* We've recursed, just make sure our accounting isn't wrong. */
509 MPASS(er->er_td == curthread);
512 ck_epoch_begin(&er->er_record, NULL);
516 _epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
518 struct epoch_record *er;
525 THREAD_SLEEPING_OK();
526 er = epoch_currecord(epoch);
527 MPASS(epoch->e_flags & EPOCH_PREEMPT);
529 MPASS(et->et_td == td);
531 et->et_td = (void*)0xDEADBEEF;
532 /* Record-level tracking is reserved for non-preemptible epochs. */
533 MPASS(er->er_td == NULL);
535 ck_epoch_end(&er->er_record, &et->et_section);
536 TAILQ_REMOVE(&er->er_tdlist, et, et_link);
538 if (__predict_false(et->et_old_priority != td->td_priority))
539 epoch_adjust_prio(td, et->et_old_priority);
542 epoch_trace_exit(td, epoch, et, file, line);
547 epoch_exit(epoch_t epoch)
552 er = epoch_currecord(epoch);
553 ck_epoch_end(&er->er_record, NULL);
555 MPASS(er->er_td == curthread);
556 if (er->er_record.active == 0)
563 * epoch_block_handler_preempt() is a callback from the CK code when another
564 * thread is currently in an epoch section.
567 epoch_block_handler_preempt(struct ck_epoch *global __unused,
568 ck_epoch_record_t *cr, void *arg __unused)
570 epoch_record_t record;
571 struct thread *td, *owner, *curwaittd;
572 struct epoch_tracker *tdwait;
573 struct turnstile *ts;
574 struct lock_object *lock;
576 int locksheld __unused;
578 record = __containerof(cr, struct epoch_record, er_record);
580 locksheld = td->td_locks;
582 counter_u64_add(block_count, 1);
584 * We lost a race and there's no longer any threads
585 * on the CPU in an epoch section.
587 if (TAILQ_EMPTY(&record->er_tdlist))
590 if (record->er_cpuid != curcpu) {
592 * If the head of the list is running, we can wait for it
593 * to remove itself from the list and thus save us the
594 * overhead of a migration
596 gen = record->er_gen;
599 * We can't actually check if the waiting thread is running
600 * so we simply poll for it to exit before giving up and
605 } while (!TAILQ_EMPTY(&record->er_tdlist) &&
606 gen == record->er_gen &&
607 spincount++ < MAX_ADAPTIVE_SPIN);
610 * If the generation has changed we can poll again
611 * otherwise we need to migrate.
613 if (gen != record->er_gen)
616 * Being on the same CPU as that of the record on which
617 * we need to wait allows us access to the thread
618 * list associated with that CPU. We can then examine the
619 * oldest thread in the queue and wait on its turnstile
620 * until it resumes and so on until a grace period
624 counter_u64_add(migrate_count, 1);
625 sched_bind(td, record->er_cpuid);
627 * At this point we need to return to the ck code
628 * to scan to see if a grace period has elapsed.
629 * We can't move on to check the thread list, because
630 * in the meantime new threads may have arrived that
631 * in fact belong to a different epoch.
636 * Try to find a thread in an epoch section on this CPU
637 * waiting on a turnstile. Otherwise find the lowest
638 * priority thread (highest prio value) and drop our priority
639 * to match to allow it to run.
641 TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
643 * Propagate our priority to any other waiters to prevent us
644 * from starving them. They will have their original priority
645 * restore on exit from epoch_wait().
647 curwaittd = tdwait->et_td;
648 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
651 thread_lock(curwaittd);
652 sched_prio(curwaittd, td->td_priority);
653 thread_unlock(curwaittd);
657 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
658 ((ts = curwaittd->td_blocked) != NULL)) {
660 * We unlock td to allow turnstile_wait to reacquire
661 * the thread lock. Before unlocking it we enter a
662 * critical section to prevent preemption after we
663 * reenable interrupts by dropping the thread lock in
664 * order to prevent curwaittd from getting to run.
669 if (turnstile_lock(ts, &lock, &owner)) {
670 if (ts == curwaittd->td_blocked) {
671 MPASS(TD_IS_INHIBITED(curwaittd) &&
672 TD_ON_LOCK(curwaittd));
674 turnstile_wait(ts, owner,
675 curwaittd->td_tsqueue);
676 counter_u64_add(turnstile_count, 1);
680 turnstile_unlock(ts, lock);
684 KASSERT(td->td_locks == locksheld,
685 ("%d extra locks held", td->td_locks - locksheld));
689 * We didn't find any threads actually blocked on a lock
690 * so we have nothing to do except context switch away.
692 counter_u64_add(switch_count, 1);
693 mi_switch(SW_VOL | SWT_RELINQUISH);
695 * It is important the thread lock is dropped while yielding
696 * to allow other threads to acquire the lock pointed to by
697 * TDQ_LOCKPTR(td). Currently mi_switch() will unlock the
698 * thread lock before returning. Else a deadlock like
699 * situation might happen.
705 epoch_wait_preempt(epoch_t epoch)
714 MPASS(cold || epoch != NULL);
718 locks = curthread->td_locks;
719 MPASS(epoch->e_flags & EPOCH_PREEMPT);
720 if ((epoch->e_flags & EPOCH_LOCKED) == 0)
721 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
722 "epoch_wait() can be long running");
723 KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle "
724 "of an epoch section of the same epoch"));
729 old_cpu = PCPU_GET(cpuid);
730 old_pinned = td->td_pinned;
731 old_prio = td->td_priority;
732 was_bound = sched_is_bound(td);
735 sched_bind(td, old_cpu);
737 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt,
740 /* restore CPU binding, if any */
741 if (was_bound != 0) {
742 sched_bind(td, old_cpu);
744 /* get thread back to initial CPU, if any */
746 sched_bind(td, old_cpu);
749 /* restore pinned after bind */
750 td->td_pinned = old_pinned;
752 /* restore thread priority */
753 sched_prio(td, old_prio);
756 KASSERT(td->td_locks == locks,
757 ("%d residual locks held", td->td_locks - locks));
761 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
768 epoch_wait(epoch_t epoch)
771 MPASS(cold || epoch != NULL);
773 MPASS(epoch->e_flags == 0);
775 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
780 epoch_call(epoch_t epoch, epoch_callback_t callback, epoch_context_t ctx)
783 ck_epoch_entry_t *cb;
788 /* too early in boot to have epoch set up */
789 if (__predict_false(epoch == NULL))
791 #if !defined(EARLY_AP_STARTUP)
792 if (__predict_false(inited < 2))
797 *DPCPU_PTR(epoch_cb_count) += 1;
798 er = epoch_currecord(epoch);
799 ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback);
807 epoch_call_task(void *arg __unused)
809 ck_stack_entry_t *cursor, *head, *next;
810 ck_epoch_record_t *record;
814 int i, npending, total;
816 ck_stack_init(&cb_stack);
818 epoch_enter(global_epoch);
819 for (total = i = 0; i != MAX_EPOCHS; i++) {
820 epoch = epoch_array + i;
822 atomic_load_acq_int(&epoch->e_in_use) == 0))
824 er = epoch_currecord(epoch);
825 record = &er->er_record;
826 if ((npending = record->n_pending) == 0)
828 ck_epoch_poll_deferred(record, &cb_stack);
829 total += npending - record->n_pending;
831 epoch_exit(global_epoch);
832 *DPCPU_PTR(epoch_cb_count) -= total;
835 counter_u64_add(epoch_call_count, total);
836 counter_u64_add(epoch_call_task_count, 1);
838 head = ck_stack_batch_pop_npsc(&cb_stack);
839 for (cursor = head; cursor != NULL; cursor = next) {
840 struct ck_epoch_entry *entry =
841 ck_epoch_entry_container(cursor);
843 next = CK_STACK_NEXT(cursor);
844 entry->function(entry);
849 in_epoch_verbose_preempt(epoch_t epoch, int dump_onfail)
852 struct epoch_tracker *tdwait;
855 MPASS(epoch != NULL);
856 MPASS((epoch->e_flags & EPOCH_PREEMPT) != 0);
858 if (THREAD_CAN_SLEEP())
861 er = epoch_currecord(epoch);
862 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
863 if (tdwait->et_td == td) {
869 MPASS(td->td_pinned);
870 printf("cpu: %d id: %d\n", curcpu, td->td_tid);
871 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
872 printf("td_tid: %d ", tdwait->et_td->td_tid);
882 epoch_assert_nocpu(epoch_t epoch, struct thread *td)
888 crit = td->td_critnest > 0;
890 /* Check for a critical section mishap. */
892 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
893 KASSERT(er->er_td != td,
894 ("%s critical section in epoch '%s', from cpu %d",
895 (crit ? "exited" : "re-entered"), epoch->e_name, cpu));
899 #define epoch_assert_nocpu(e, td) do {} while (0)
903 in_epoch_verbose(epoch_t epoch, int dump_onfail)
908 if (__predict_false((epoch) == NULL))
910 if ((epoch->e_flags & EPOCH_PREEMPT) != 0)
911 return (in_epoch_verbose_preempt(epoch, dump_onfail));
914 * The thread being in a critical section is a necessary
915 * condition to be correctly inside a non-preemptible epoch,
916 * so it's definitely not in this epoch.
919 if (td->td_critnest == 0) {
920 epoch_assert_nocpu(epoch, td);
925 * The current cpu is in a critical section, so the epoch record will be
926 * stable for the rest of this function. Knowing that the record is not
927 * active is sufficient for knowing whether we're in this epoch or not,
928 * since it's a pcpu record.
930 er = epoch_currecord(epoch);
931 if (er->er_record.active == 0) {
932 epoch_assert_nocpu(epoch, td);
936 MPASS(er->er_td == td);
941 in_epoch(epoch_t epoch)
943 return (in_epoch_verbose(epoch, 0));
947 epoch_drain_cb(struct epoch_context *ctx)
949 struct epoch *epoch =
950 __containerof(ctx, struct epoch_record, er_drain_ctx)->er_parent;
952 if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) {
953 mtx_lock(&epoch->e_drain_mtx);
955 mtx_unlock(&epoch->e_drain_mtx);
960 epoch_drain_callbacks(epoch_t epoch)
969 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
970 "epoch_drain_callbacks() may sleep!");
972 /* too early in boot to have epoch set up */
973 if (__predict_false(epoch == NULL))
975 #if !defined(EARLY_AP_STARTUP)
976 if (__predict_false(inited < 2))
981 sx_xlock(&epoch->e_drain_sx);
982 mtx_lock(&epoch->e_drain_mtx);
986 old_cpu = PCPU_GET(cpuid);
987 old_pinned = td->td_pinned;
988 was_bound = sched_is_bound(td);
993 epoch->e_drain_count++;
995 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
997 epoch_call(epoch, &epoch_drain_cb, &er->er_drain_ctx);
1000 /* restore CPU binding, if any */
1001 if (was_bound != 0) {
1002 sched_bind(td, old_cpu);
1004 /* get thread back to initial CPU, if any */
1005 if (old_pinned != 0)
1006 sched_bind(td, old_cpu);
1009 /* restore pinned after bind */
1010 td->td_pinned = old_pinned;
1014 while (epoch->e_drain_count != 0)
1015 msleep(epoch, &epoch->e_drain_mtx, PZERO, "EDRAIN", 0);
1017 mtx_unlock(&epoch->e_drain_mtx);
1018 sx_xunlock(&epoch->e_drain_sx);