2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/counter.h>
36 #include <sys/epoch.h>
37 #include <sys/gtaskqueue.h>
38 #include <sys/kernel.h>
39 #include <sys/limits.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
45 #include <sys/sched.h>
48 #include <sys/sysctl.h>
49 #include <sys/turnstile.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_kern.h>
58 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
60 #define EPOCH_ALIGN CACHE_LINE_SIZE
63 TAILQ_HEAD (epoch_tdlist, epoch_tracker);
64 typedef struct epoch_record {
65 ck_epoch_record_t er_record;
66 volatile struct epoch_tdlist er_tdlist;
67 volatile uint32_t er_gen;
69 /* fields above are part of KBI and cannot be modified */
70 struct epoch_context er_drain_ctx;
71 struct epoch *er_parent;
72 } __aligned(EPOCH_ALIGN) *epoch_record_t;
75 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
76 epoch_record_t e_pcpu_record;
79 /* fields above are part of KBI and cannot be modified */
81 struct mtx e_drain_mtx;
82 volatile int e_drain_count;
85 /* arbitrary --- needs benchmarking */
86 #define MAX_ADAPTIVE_SPIN 100
89 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
90 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
91 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
94 static counter_u64_t block_count;
96 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
97 &block_count, "# of times a thread was in an epoch when epoch_wait was called");
98 static counter_u64_t migrate_count;
100 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
101 &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
102 static counter_u64_t turnstile_count;
104 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
105 &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
106 static counter_u64_t switch_count;
108 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
109 &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
110 static counter_u64_t epoch_call_count;
112 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
113 &epoch_call_count, "# of times a callback was deferred");
114 static counter_u64_t epoch_call_task_count;
116 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
117 &epoch_call_task_count, "# of times a callback task was run");
119 TAILQ_HEAD (threadlist, thread);
121 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
122 ck_epoch_entry_container)
124 static struct epoch epoch_array[MAX_EPOCHS];
126 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
127 DPCPU_DEFINE(int, epoch_cb_count);
129 static __read_mostly int inited;
130 __read_mostly epoch_t global_epoch;
131 __read_mostly epoch_t global_epoch_preempt;
133 static void epoch_call_task(void *context __unused);
134 static uma_zone_t pcpu_zone_record;
136 static struct sx epoch_sx;
138 #define EPOCH_LOCK() sx_xlock(&epoch_sx)
139 #define EPOCH_UNLOCK() sx_xunlock(&epoch_sx)
142 epoch_init(void *arg __unused)
146 block_count = counter_u64_alloc(M_WAITOK);
147 migrate_count = counter_u64_alloc(M_WAITOK);
148 turnstile_count = counter_u64_alloc(M_WAITOK);
149 switch_count = counter_u64_alloc(M_WAITOK);
150 epoch_call_count = counter_u64_alloc(M_WAITOK);
151 epoch_call_task_count = counter_u64_alloc(M_WAITOK);
153 pcpu_zone_record = uma_zcreate("epoch_record pcpu",
154 sizeof(struct epoch_record), NULL, NULL, NULL, NULL,
155 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
157 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0,
158 epoch_call_task, NULL);
159 taskqgroup_attach_cpu(qgroup_softirq,
160 DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, -1,
163 sx_init(&epoch_sx, "epoch-sx");
165 global_epoch = epoch_alloc(0);
166 global_epoch_preempt = epoch_alloc(EPOCH_PREEMPT);
168 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL);
170 #if !defined(EARLY_AP_STARTUP)
172 epoch_init_smp(void *dummy __unused)
176 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
180 epoch_ctor(epoch_t epoch)
185 epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
187 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
188 bzero(er, sizeof(*er));
189 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
190 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
192 er->er_parent = epoch;
197 epoch_adjust_prio(struct thread *td, u_char prio)
201 sched_prio(td, prio);
206 epoch_alloc(int flags)
211 if (__predict_false(!inited))
212 panic("%s called too early in boot", __func__);
217 * Find a free index in the epoch array. If no free index is
218 * found, try to use the index after the last one.
222 * If too many epochs are currently allocated,
225 if (i == MAX_EPOCHS) {
229 if (epoch_array[i].e_in_use == 0)
233 epoch = epoch_array + i;
234 ck_epoch_init(&epoch->e_epoch);
236 epoch->e_flags = flags;
237 sx_init(&epoch->e_drain_sx, "epoch-drain-sx");
238 mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF);
241 * Set e_in_use last, because when this field is set the
242 * epoch_call_task() function will start scanning this epoch
245 atomic_store_rel_int(&epoch->e_in_use, 1);
252 epoch_free(epoch_t epoch)
257 MPASS(epoch->e_in_use != 0);
259 epoch_drain_callbacks(epoch);
261 atomic_store_rel_int(&epoch->e_in_use, 0);
263 * Make sure the epoch_call_task() function see e_in_use equal
264 * to zero, by calling epoch_wait() on the global_epoch:
266 epoch_wait(global_epoch);
267 uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
268 mtx_destroy(&epoch->e_drain_mtx);
269 sx_destroy(&epoch->e_drain_sx);
270 memset(epoch, 0, sizeof(*epoch));
275 static epoch_record_t
276 epoch_currecord(epoch_t epoch)
279 return (zpcpu_get_cpu(epoch->e_pcpu_record, curcpu));
282 #define INIT_CHECK(epoch) \
284 if (__predict_false((epoch) == NULL)) \
289 epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et)
291 struct epoch_record *er;
294 MPASS(cold || epoch != NULL);
296 MPASS(epoch->e_flags & EPOCH_PREEMPT);
297 #ifdef EPOCH_TRACKER_DEBUG
298 et->et_magic_pre = EPOCH_MAGIC0;
299 et->et_magic_post = EPOCH_MAGIC1;
307 td->td_pre_epoch_prio = td->td_priority;
308 er = epoch_currecord(epoch);
309 TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link);
310 ck_epoch_begin(&er->er_record, &et->et_section);
315 epoch_enter(epoch_t epoch)
320 MPASS(cold || epoch != NULL);
326 er = epoch_currecord(epoch);
327 ck_epoch_begin(&er->er_record, NULL);
331 epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et)
333 struct epoch_record *er;
340 MPASS(td->td_epochnest);
342 er = epoch_currecord(epoch);
343 MPASS(epoch->e_flags & EPOCH_PREEMPT);
345 MPASS(et->et_td == td);
346 #ifdef EPOCH_TRACKER_DEBUG
347 MPASS(et->et_magic_pre == EPOCH_MAGIC0);
348 MPASS(et->et_magic_post == EPOCH_MAGIC1);
349 et->et_magic_pre = 0;
350 et->et_magic_post = 0;
353 et->et_td = (void*)0xDEADBEEF;
355 ck_epoch_end(&er->er_record, &et->et_section);
356 TAILQ_REMOVE(&er->er_tdlist, et, et_link);
358 if (__predict_false(td->td_pre_epoch_prio != td->td_priority))
359 epoch_adjust_prio(td, td->td_pre_epoch_prio);
364 epoch_exit(epoch_t epoch)
371 MPASS(td->td_epochnest);
373 er = epoch_currecord(epoch);
374 ck_epoch_end(&er->er_record, NULL);
379 * epoch_block_handler_preempt() is a callback from the CK code when another
380 * thread is currently in an epoch section.
383 epoch_block_handler_preempt(struct ck_epoch *global __unused,
384 ck_epoch_record_t *cr, void *arg __unused)
386 epoch_record_t record;
387 struct thread *td, *owner, *curwaittd;
388 struct epoch_tracker *tdwait;
389 struct turnstile *ts;
390 struct lock_object *lock;
392 int locksheld __unused;
394 record = __containerof(cr, struct epoch_record, er_record);
396 locksheld = td->td_locks;
398 counter_u64_add(block_count, 1);
400 * We lost a race and there's no longer any threads
401 * on the CPU in an epoch section.
403 if (TAILQ_EMPTY(&record->er_tdlist))
406 if (record->er_cpuid != curcpu) {
408 * If the head of the list is running, we can wait for it
409 * to remove itself from the list and thus save us the
410 * overhead of a migration
412 gen = record->er_gen;
415 * We can't actually check if the waiting thread is running
416 * so we simply poll for it to exit before giving up and
421 } while (!TAILQ_EMPTY(&record->er_tdlist) &&
422 gen == record->er_gen &&
423 spincount++ < MAX_ADAPTIVE_SPIN);
426 * If the generation has changed we can poll again
427 * otherwise we need to migrate.
429 if (gen != record->er_gen)
432 * Being on the same CPU as that of the record on which
433 * we need to wait allows us access to the thread
434 * list associated with that CPU. We can then examine the
435 * oldest thread in the queue and wait on its turnstile
436 * until it resumes and so on until a grace period
440 counter_u64_add(migrate_count, 1);
441 sched_bind(td, record->er_cpuid);
443 * At this point we need to return to the ck code
444 * to scan to see if a grace period has elapsed.
445 * We can't move on to check the thread list, because
446 * in the meantime new threads may have arrived that
447 * in fact belong to a different epoch.
452 * Try to find a thread in an epoch section on this CPU
453 * waiting on a turnstile. Otherwise find the lowest
454 * priority thread (highest prio value) and drop our priority
455 * to match to allow it to run.
457 TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
459 * Propagate our priority to any other waiters to prevent us
460 * from starving them. They will have their original priority
461 * restore on exit from epoch_wait().
463 curwaittd = tdwait->et_td;
464 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
467 thread_lock(curwaittd);
468 sched_prio(curwaittd, td->td_priority);
469 thread_unlock(curwaittd);
473 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
474 ((ts = curwaittd->td_blocked) != NULL)) {
476 * We unlock td to allow turnstile_wait to reacquire
477 * the thread lock. Before unlocking it we enter a
478 * critical section to prevent preemption after we
479 * reenable interrupts by dropping the thread lock in
480 * order to prevent curwaittd from getting to run.
485 if (turnstile_lock(ts, &lock, &owner)) {
486 if (ts == curwaittd->td_blocked) {
487 MPASS(TD_IS_INHIBITED(curwaittd) &&
488 TD_ON_LOCK(curwaittd));
490 turnstile_wait(ts, owner,
491 curwaittd->td_tsqueue);
492 counter_u64_add(turnstile_count, 1);
496 turnstile_unlock(ts, lock);
500 KASSERT(td->td_locks == locksheld,
501 ("%d extra locks held", td->td_locks - locksheld));
505 * We didn't find any threads actually blocked on a lock
506 * so we have nothing to do except context switch away.
508 counter_u64_add(switch_count, 1);
509 mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
512 * Release the thread lock while yielding to
513 * allow other threads to acquire the lock
514 * pointed to by TDQ_LOCKPTR(td). Else a
515 * deadlock like situation might happen. (HPS)
522 epoch_wait_preempt(epoch_t epoch)
531 MPASS(cold || epoch != NULL);
535 locks = curthread->td_locks;
536 MPASS(epoch->e_flags & EPOCH_PREEMPT);
537 if ((epoch->e_flags & EPOCH_LOCKED) == 0)
538 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
539 "epoch_wait() can be long running");
540 KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle "
541 "of an epoch section of the same epoch"));
546 old_cpu = PCPU_GET(cpuid);
547 old_pinned = td->td_pinned;
548 old_prio = td->td_priority;
549 was_bound = sched_is_bound(td);
552 sched_bind(td, old_cpu);
554 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt,
557 /* restore CPU binding, if any */
558 if (was_bound != 0) {
559 sched_bind(td, old_cpu);
561 /* get thread back to initial CPU, if any */
563 sched_bind(td, old_cpu);
566 /* restore pinned after bind */
567 td->td_pinned = old_pinned;
569 /* restore thread priority */
570 sched_prio(td, old_prio);
573 KASSERT(td->td_locks == locks,
574 ("%d residual locks held", td->td_locks - locks));
578 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
585 epoch_wait(epoch_t epoch)
588 MPASS(cold || epoch != NULL);
590 MPASS(epoch->e_flags == 0);
592 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
597 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t))
600 ck_epoch_entry_t *cb;
605 /* too early in boot to have epoch set up */
606 if (__predict_false(epoch == NULL))
608 #if !defined(EARLY_AP_STARTUP)
609 if (__predict_false(inited < 2))
614 *DPCPU_PTR(epoch_cb_count) += 1;
615 er = epoch_currecord(epoch);
616 ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback);
624 epoch_call_task(void *arg __unused)
626 ck_stack_entry_t *cursor, *head, *next;
627 ck_epoch_record_t *record;
631 int i, npending, total;
633 ck_stack_init(&cb_stack);
635 epoch_enter(global_epoch);
636 for (total = i = 0; i != MAX_EPOCHS; i++) {
637 epoch = epoch_array + i;
639 atomic_load_acq_int(&epoch->e_in_use) == 0))
641 er = epoch_currecord(epoch);
642 record = &er->er_record;
643 if ((npending = record->n_pending) == 0)
645 ck_epoch_poll_deferred(record, &cb_stack);
646 total += npending - record->n_pending;
648 epoch_exit(global_epoch);
649 *DPCPU_PTR(epoch_cb_count) -= total;
652 counter_u64_add(epoch_call_count, total);
653 counter_u64_add(epoch_call_task_count, 1);
655 head = ck_stack_batch_pop_npsc(&cb_stack);
656 for (cursor = head; cursor != NULL; cursor = next) {
657 struct ck_epoch_entry *entry =
658 ck_epoch_entry_container(cursor);
660 next = CK_STACK_NEXT(cursor);
661 entry->function(entry);
666 in_epoch_verbose(epoch_t epoch, int dump_onfail)
668 struct epoch_tracker *tdwait;
673 if (td->td_epochnest == 0)
675 if (__predict_false((epoch) == NULL))
678 er = epoch_currecord(epoch);
679 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
680 if (tdwait->et_td == td) {
686 MPASS(td->td_pinned);
687 printf("cpu: %d id: %d\n", curcpu, td->td_tid);
688 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
689 printf("td_tid: %d ", tdwait->et_td->td_tid);
698 in_epoch(epoch_t epoch)
700 return (in_epoch_verbose(epoch, 0));
704 epoch_drain_cb(struct epoch_context *ctx)
706 struct epoch *epoch =
707 __containerof(ctx, struct epoch_record, er_drain_ctx)->er_parent;
709 if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) {
710 mtx_lock(&epoch->e_drain_mtx);
712 mtx_unlock(&epoch->e_drain_mtx);
717 epoch_drain_callbacks(epoch_t epoch)
726 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
727 "epoch_drain_callbacks() may sleep!");
729 /* too early in boot to have epoch set up */
730 if (__predict_false(epoch == NULL))
732 #if !defined(EARLY_AP_STARTUP)
733 if (__predict_false(inited < 2))
738 sx_xlock(&epoch->e_drain_sx);
739 mtx_lock(&epoch->e_drain_mtx);
743 old_cpu = PCPU_GET(cpuid);
744 old_pinned = td->td_pinned;
745 was_bound = sched_is_bound(td);
750 epoch->e_drain_count++;
752 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
754 epoch_call(epoch, &er->er_drain_ctx, &epoch_drain_cb);
757 /* restore CPU binding, if any */
758 if (was_bound != 0) {
759 sched_bind(td, old_cpu);
761 /* get thread back to initial CPU, if any */
763 sched_bind(td, old_cpu);
766 /* restore pinned after bind */
767 td->td_pinned = old_pinned;
771 while (epoch->e_drain_count != 0)
772 msleep(epoch, &epoch->e_drain_mtx, PZERO, "EDRAIN", 0);
774 mtx_unlock(&epoch->e_drain_mtx);
775 sx_xunlock(&epoch->e_drain_sx);
780 /* for binary compatibility */
782 struct epoch_tracker_KBI {
784 #ifdef EPOCH_TRACKER_DEBUG
789 } __aligned(sizeof(void *));
791 CTASSERT(sizeof(struct epoch_tracker_KBI) >= sizeof(struct epoch_tracker));
794 epoch_enter_preempt_KBI(epoch_t epoch, epoch_tracker_t et)
796 epoch_enter_preempt(epoch, et);
800 epoch_exit_preempt_KBI(epoch_t epoch, epoch_tracker_t et)
802 epoch_exit_preempt(epoch, et);
806 epoch_enter_KBI(epoch_t epoch)
812 epoch_exit_KBI(epoch_t epoch)