2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/counter.h>
36 #include <sys/epoch.h>
37 #include <sys/gtaskqueue.h>
38 #include <sys/kernel.h>
39 #include <sys/limits.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
45 #include <sys/sched.h>
47 #include <sys/sysctl.h>
48 #include <sys/turnstile.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_kern.h>
56 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
59 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
61 #define EPOCH_ALIGN CACHE_LINE_SIZE
64 TAILQ_HEAD (epoch_tdlist, epoch_tracker);
65 typedef struct epoch_record {
66 ck_epoch_record_t er_record;
67 volatile struct epoch_tdlist er_tdlist;
68 volatile uint32_t er_gen;
70 } __aligned(EPOCH_ALIGN) *epoch_record_t;
73 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
74 epoch_record_t e_pcpu_record;
79 /* arbitrary --- needs benchmarking */
80 #define MAX_ADAPTIVE_SPIN 100
83 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
84 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
85 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
88 static counter_u64_t block_count;
90 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
91 &block_count, "# of times a thread was in an epoch when epoch_wait was called");
92 static counter_u64_t migrate_count;
94 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
95 &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
96 static counter_u64_t turnstile_count;
98 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
99 &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
100 static counter_u64_t switch_count;
102 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
103 &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
104 static counter_u64_t epoch_call_count;
106 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
107 &epoch_call_count, "# of times a callback was deferred");
108 static counter_u64_t epoch_call_task_count;
110 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
111 &epoch_call_task_count, "# of times a callback task was run");
113 TAILQ_HEAD (threadlist, thread);
115 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
116 ck_epoch_entry_container)
118 epoch_t allepochs[MAX_EPOCHS];
120 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
121 DPCPU_DEFINE(int, epoch_cb_count);
123 static __read_mostly int inited;
124 static __read_mostly int epoch_count;
125 __read_mostly epoch_t global_epoch;
126 __read_mostly epoch_t global_epoch_preempt;
128 static void epoch_call_task(void *context __unused);
129 static uma_zone_t pcpu_zone_record;
132 epoch_init(void *arg __unused)
136 block_count = counter_u64_alloc(M_WAITOK);
137 migrate_count = counter_u64_alloc(M_WAITOK);
138 turnstile_count = counter_u64_alloc(M_WAITOK);
139 switch_count = counter_u64_alloc(M_WAITOK);
140 epoch_call_count = counter_u64_alloc(M_WAITOK);
141 epoch_call_task_count = counter_u64_alloc(M_WAITOK);
143 pcpu_zone_record = uma_zcreate("epoch_record pcpu",
144 sizeof(struct epoch_record), NULL, NULL, NULL, NULL,
145 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
147 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0,
148 epoch_call_task, NULL);
149 taskqgroup_attach_cpu(qgroup_softirq,
150 DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL,
154 global_epoch = epoch_alloc(0);
155 global_epoch_preempt = epoch_alloc(EPOCH_PREEMPT);
157 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL);
159 #if !defined(EARLY_AP_STARTUP)
161 epoch_init_smp(void *dummy __unused)
165 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
169 epoch_ctor(epoch_t epoch)
174 epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
176 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
177 bzero(er, sizeof(*er));
178 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
179 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
185 epoch_adjust_prio(struct thread *td, u_char prio)
189 sched_prio(td, prio);
194 epoch_alloc(int flags)
198 if (__predict_false(!inited))
199 panic("%s called too early in boot", __func__);
200 epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK);
201 ck_epoch_init(&epoch->e_epoch);
203 MPASS(epoch_count < MAX_EPOCHS - 2);
204 epoch->e_flags = flags;
205 epoch->e_idx = epoch_count;
206 allepochs[epoch_count++] = epoch;
211 epoch_free(epoch_t epoch)
214 struct epoch_record *er;
218 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
219 MPASS(TAILQ_EMPTY(&er->er_tdlist));
222 allepochs[epoch->e_idx] = NULL;
223 epoch_wait(global_epoch);
224 uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
225 free(epoch, M_EPOCH);
228 static epoch_record_t
229 epoch_currecord(epoch_t epoch)
232 return (zpcpu_get_cpu(epoch->e_pcpu_record, curcpu));
235 #define INIT_CHECK(epoch) \
237 if (__predict_false((epoch) == NULL)) \
242 epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et)
244 struct epoch_record *er;
247 MPASS(cold || epoch != NULL);
249 MPASS(epoch->e_flags & EPOCH_PREEMPT);
250 #ifdef EPOCH_TRACKER_DEBUG
251 et->et_magic_pre = EPOCH_MAGIC0;
252 et->et_magic_post = EPOCH_MAGIC1;
260 td->td_pre_epoch_prio = td->td_priority;
261 er = epoch_currecord(epoch);
262 TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link);
263 ck_epoch_begin(&er->er_record, &et->et_section);
268 epoch_enter(epoch_t epoch)
273 MPASS(cold || epoch != NULL);
279 er = epoch_currecord(epoch);
280 ck_epoch_begin(&er->er_record, NULL);
284 epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et)
286 struct epoch_record *er;
293 MPASS(td->td_epochnest);
295 er = epoch_currecord(epoch);
296 MPASS(epoch->e_flags & EPOCH_PREEMPT);
298 MPASS(et->et_td == td);
299 #ifdef EPOCH_TRACKER_DEBUG
300 MPASS(et->et_magic_pre == EPOCH_MAGIC0);
301 MPASS(et->et_magic_post == EPOCH_MAGIC1);
302 et->et_magic_pre = 0;
303 et->et_magic_post = 0;
306 et->et_td = (void*)0xDEADBEEF;
308 ck_epoch_end(&er->er_record, &et->et_section);
309 TAILQ_REMOVE(&er->er_tdlist, et, et_link);
311 if (__predict_false(td->td_pre_epoch_prio != td->td_priority))
312 epoch_adjust_prio(td, td->td_pre_epoch_prio);
317 epoch_exit(epoch_t epoch)
324 MPASS(td->td_epochnest);
326 er = epoch_currecord(epoch);
327 ck_epoch_end(&er->er_record, NULL);
332 * epoch_block_handler_preempt() is a callback from the CK code when another
333 * thread is currently in an epoch section.
336 epoch_block_handler_preempt(struct ck_epoch *global __unused,
337 ck_epoch_record_t *cr, void *arg __unused)
339 epoch_record_t record;
340 struct thread *td, *owner, *curwaittd;
341 struct epoch_tracker *tdwait;
342 struct turnstile *ts;
343 struct lock_object *lock;
345 int locksheld __unused;
347 record = __containerof(cr, struct epoch_record, er_record);
349 locksheld = td->td_locks;
351 counter_u64_add(block_count, 1);
353 * We lost a race and there's no longer any threads
354 * on the CPU in an epoch section.
356 if (TAILQ_EMPTY(&record->er_tdlist))
359 if (record->er_cpuid != curcpu) {
361 * If the head of the list is running, we can wait for it
362 * to remove itself from the list and thus save us the
363 * overhead of a migration
365 gen = record->er_gen;
368 * We can't actually check if the waiting thread is running
369 * so we simply poll for it to exit before giving up and
374 } while (!TAILQ_EMPTY(&record->er_tdlist) &&
375 gen == record->er_gen &&
376 spincount++ < MAX_ADAPTIVE_SPIN);
379 * If the generation has changed we can poll again
380 * otherwise we need to migrate.
382 if (gen != record->er_gen)
385 * Being on the same CPU as that of the record on which
386 * we need to wait allows us access to the thread
387 * list associated with that CPU. We can then examine the
388 * oldest thread in the queue and wait on its turnstile
389 * until it resumes and so on until a grace period
393 counter_u64_add(migrate_count, 1);
394 sched_bind(td, record->er_cpuid);
396 * At this point we need to return to the ck code
397 * to scan to see if a grace period has elapsed.
398 * We can't move on to check the thread list, because
399 * in the meantime new threads may have arrived that
400 * in fact belong to a different epoch.
405 * Try to find a thread in an epoch section on this CPU
406 * waiting on a turnstile. Otherwise find the lowest
407 * priority thread (highest prio value) and drop our priority
408 * to match to allow it to run.
410 TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
412 * Propagate our priority to any other waiters to prevent us
413 * from starving them. They will have their original priority
414 * restore on exit from epoch_wait().
416 curwaittd = tdwait->et_td;
417 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
420 thread_lock(curwaittd);
421 sched_prio(curwaittd, td->td_priority);
422 thread_unlock(curwaittd);
426 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
427 ((ts = curwaittd->td_blocked) != NULL)) {
429 * We unlock td to allow turnstile_wait to reacquire
430 * the thread lock. Before unlocking it we enter a
431 * critical section to prevent preemption after we
432 * reenable interrupts by dropping the thread lock in
433 * order to prevent curwaittd from getting to run.
437 owner = turnstile_lock(ts, &lock);
439 * The owner pointer indicates that the lock succeeded.
440 * Only in case we hold the lock and the turnstile we
441 * locked is still the one that curwaittd is blocked on
442 * can we continue. Otherwise the turnstile pointer has
443 * been changed out from underneath us, as in the case
444 * where the lock holder has signalled curwaittd,
445 * and we need to continue.
447 if (owner != NULL && ts == curwaittd->td_blocked) {
448 MPASS(TD_IS_INHIBITED(curwaittd) &&
449 TD_ON_LOCK(curwaittd));
451 turnstile_wait(ts, owner, curwaittd->td_tsqueue);
452 counter_u64_add(turnstile_count, 1);
455 } else if (owner != NULL)
456 turnstile_unlock(ts, lock);
459 KASSERT(td->td_locks == locksheld,
460 ("%d extra locks held", td->td_locks - locksheld));
464 * We didn't find any threads actually blocked on a lock
465 * so we have nothing to do except context switch away.
467 counter_u64_add(switch_count, 1);
468 mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
471 * Release the thread lock while yielding to
472 * allow other threads to acquire the lock
473 * pointed to by TDQ_LOCKPTR(td). Else a
474 * deadlock like situation might happen. (HPS)
481 epoch_wait_preempt(epoch_t epoch)
490 MPASS(cold || epoch != NULL);
494 locks = curthread->td_locks;
495 MPASS(epoch->e_flags & EPOCH_PREEMPT);
496 if ((epoch->e_flags & EPOCH_LOCKED) == 0)
497 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
498 "epoch_wait() can be long running");
499 KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle "
500 "of an epoch section of the same epoch"));
505 old_cpu = PCPU_GET(cpuid);
506 old_pinned = td->td_pinned;
507 old_prio = td->td_priority;
508 was_bound = sched_is_bound(td);
511 sched_bind(td, old_cpu);
513 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt,
516 /* restore CPU binding, if any */
517 if (was_bound != 0) {
518 sched_bind(td, old_cpu);
520 /* get thread back to initial CPU, if any */
522 sched_bind(td, old_cpu);
525 /* restore pinned after bind */
526 td->td_pinned = old_pinned;
528 /* restore thread priority */
529 sched_prio(td, old_prio);
532 KASSERT(td->td_locks == locks,
533 ("%d residual locks held", td->td_locks - locks));
537 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
544 epoch_wait(epoch_t epoch)
547 MPASS(cold || epoch != NULL);
549 MPASS(epoch->e_flags == 0);
551 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
556 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t))
559 ck_epoch_entry_t *cb;
564 /* too early in boot to have epoch set up */
565 if (__predict_false(epoch == NULL))
567 #if !defined(EARLY_AP_STARTUP)
568 if (__predict_false(inited < 2))
573 *DPCPU_PTR(epoch_cb_count) += 1;
574 er = epoch_currecord(epoch);
575 ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback);
583 epoch_call_task(void *arg __unused)
585 ck_stack_entry_t *cursor, *head, *next;
586 ck_epoch_record_t *record;
590 int i, npending, total;
592 ck_stack_init(&cb_stack);
594 epoch_enter(global_epoch);
595 for (total = i = 0; i < epoch_count; i++) {
596 if (__predict_false((epoch = allepochs[i]) == NULL))
598 er = epoch_currecord(epoch);
599 record = &er->er_record;
600 if ((npending = record->n_pending) == 0)
602 ck_epoch_poll_deferred(record, &cb_stack);
603 total += npending - record->n_pending;
605 epoch_exit(global_epoch);
606 *DPCPU_PTR(epoch_cb_count) -= total;
609 counter_u64_add(epoch_call_count, total);
610 counter_u64_add(epoch_call_task_count, 1);
612 head = ck_stack_batch_pop_npsc(&cb_stack);
613 for (cursor = head; cursor != NULL; cursor = next) {
614 struct ck_epoch_entry *entry =
615 ck_epoch_entry_container(cursor);
617 next = CK_STACK_NEXT(cursor);
618 entry->function(entry);
623 in_epoch_verbose(epoch_t epoch, int dump_onfail)
625 struct epoch_tracker *tdwait;
630 if (td->td_epochnest == 0)
632 if (__predict_false((epoch) == NULL))
635 er = epoch_currecord(epoch);
636 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
637 if (tdwait->et_td == td) {
643 MPASS(td->td_pinned);
644 printf("cpu: %d id: %d\n", curcpu, td->td_tid);
645 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
646 printf("td_tid: %d ", tdwait->et_td->td_tid);
655 in_epoch(epoch_t epoch)
657 return (in_epoch_verbose(epoch, 0));
661 epoch_thread_init(struct thread *td)
664 td->td_et = malloc(sizeof(struct epoch_tracker), M_EPOCH, M_WAITOK);
668 epoch_thread_fini(struct thread *td)
671 free(td->td_et, M_EPOCH);