2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/counter.h>
36 #include <sys/epoch.h>
37 #include <sys/gtaskqueue.h>
38 #include <sys/kernel.h>
39 #include <sys/limits.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
45 #include <sys/sched.h>
47 #include <sys/sysctl.h>
48 #include <sys/turnstile.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_kern.h>
56 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
58 /* arbitrary --- needs benchmarking */
59 #define MAX_ADAPTIVE_SPIN 100
62 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
63 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
64 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
67 static counter_u64_t block_count;
69 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
70 &block_count, "# of times a thread was in an epoch when epoch_wait was called");
71 static counter_u64_t migrate_count;
73 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
74 &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
75 static counter_u64_t turnstile_count;
77 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
78 &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
79 static counter_u64_t switch_count;
81 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
82 &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
83 static counter_u64_t epoch_call_count;
85 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
86 &epoch_call_count, "# of times a callback was deferred");
87 static counter_u64_t epoch_call_task_count;
89 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
90 &epoch_call_task_count, "# of times a callback task was run");
92 TAILQ_HEAD (threadlist, thread);
94 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
95 ck_epoch_entry_container)
97 epoch_t allepochs[MAX_EPOCHS];
99 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
100 DPCPU_DEFINE(int, epoch_cb_count);
102 static __read_mostly int inited;
103 static __read_mostly int epoch_count;
104 __read_mostly epoch_t global_epoch;
105 __read_mostly epoch_t global_epoch_preempt;
107 static void epoch_call_task(void *context __unused);
108 static uma_zone_t pcpu_zone_record;
111 epoch_init(void *arg __unused)
115 block_count = counter_u64_alloc(M_WAITOK);
116 migrate_count = counter_u64_alloc(M_WAITOK);
117 turnstile_count = counter_u64_alloc(M_WAITOK);
118 switch_count = counter_u64_alloc(M_WAITOK);
119 epoch_call_count = counter_u64_alloc(M_WAITOK);
120 epoch_call_task_count = counter_u64_alloc(M_WAITOK);
122 pcpu_zone_record = uma_zcreate("epoch_record pcpu", sizeof(struct epoch_record),
123 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_PCPU);
125 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0, epoch_call_task, NULL);
126 taskqgroup_attach_cpu(qgroup_softirq, DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, -1, "epoch call task");
129 global_epoch = epoch_alloc(0);
130 global_epoch_preempt = epoch_alloc(EPOCH_PREEMPT);
132 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL);
134 #if !defined(EARLY_AP_STARTUP)
136 epoch_init_smp(void *dummy __unused)
140 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
144 epoch_ctor(epoch_t epoch)
149 epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
151 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
152 bzero(er, sizeof(*er));
153 ck_epoch_register(&epoch->e_epoch, &er->er_read_record, NULL);
154 ck_epoch_register(&epoch->e_epoch, &er->er_write_record, NULL);
155 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
161 epoch_alloc(int flags)
165 if (__predict_false(!inited))
166 panic("%s called too early in boot", __func__);
167 epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK);
168 ck_epoch_init(&epoch->e_epoch);
170 MPASS(epoch_count < MAX_EPOCHS - 2);
171 epoch->e_flags = flags;
172 epoch->e_idx = epoch_count;
173 allepochs[epoch_count++] = epoch;
178 epoch_free(epoch_t epoch)
181 struct epoch_record *er;
185 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
186 MPASS(TAILQ_EMPTY(&er->er_tdlist));
189 allepochs[epoch->e_idx] = NULL;
190 epoch_wait(global_epoch);
191 uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
192 free(epoch, M_EPOCH);
196 epoch_enter_preempt_KBI(epoch_t epoch, epoch_tracker_t et)
199 epoch_enter_preempt(epoch, et);
203 epoch_exit_preempt_KBI(epoch_t epoch, epoch_tracker_t et)
206 epoch_exit_preempt(epoch, et);
210 epoch_enter_KBI(epoch_t epoch)
217 epoch_exit_KBI(epoch_t epoch)
224 * epoch_block_handler_preempt is a callback from the ck code when another thread is
225 * currently in an epoch section.
228 epoch_block_handler_preempt(struct ck_epoch *global __unused, ck_epoch_record_t *cr,
231 epoch_record_t record;
232 struct thread *td, *owner, *curwaittd;
233 struct epoch_thread *tdwait;
234 struct turnstile *ts;
235 struct lock_object *lock;
237 int locksheld __unused;
239 record = __containerof(cr, struct epoch_record, er_read_record);
241 locksheld = td->td_locks;
243 counter_u64_add(block_count, 1);
245 * We lost a race and there's no longer any threads
246 * on the CPU in an epoch section.
248 if (TAILQ_EMPTY(&record->er_tdlist))
251 if (record->er_cpuid != curcpu) {
253 * If the head of the list is running, we can wait for it
254 * to remove itself from the list and thus save us the
255 * overhead of a migration
257 gen = record->er_gen;
260 * We can't actually check if the waiting thread is running
261 * so we simply poll for it to exit before giving up and
266 } while (!TAILQ_EMPTY(&record->er_tdlist) &&
267 gen == record->er_gen &&
268 spincount++ < MAX_ADAPTIVE_SPIN);
271 * If the generation has changed we can poll again
272 * otherwise we need to migrate.
274 if (gen != record->er_gen)
277 * Being on the same CPU as that of the record on which
278 * we need to wait allows us access to the thread
279 * list associated with that CPU. We can then examine the
280 * oldest thread in the queue and wait on its turnstile
281 * until it resumes and so on until a grace period
285 counter_u64_add(migrate_count, 1);
286 sched_bind(td, record->er_cpuid);
288 * At this point we need to return to the ck code
289 * to scan to see if a grace period has elapsed.
290 * We can't move on to check the thread list, because
291 * in the meantime new threads may have arrived that
292 * in fact belong to a different epoch.
297 * Try to find a thread in an epoch section on this CPU
298 * waiting on a turnstile. Otherwise find the lowest
299 * priority thread (highest prio value) and drop our priority
300 * to match to allow it to run.
302 TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
304 * Propagate our priority to any other waiters to prevent us
305 * from starving them. They will have their original priority
306 * restore on exit from epoch_wait().
308 curwaittd = tdwait->et_td;
309 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
312 thread_lock(curwaittd);
313 sched_prio(curwaittd, td->td_priority);
314 thread_unlock(curwaittd);
318 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
319 ((ts = curwaittd->td_blocked) != NULL)) {
321 * We unlock td to allow turnstile_wait to reacquire the
322 * the thread lock. Before unlocking it we enter a critical
323 * section to prevent preemption after we reenable interrupts
324 * by dropping the thread lock in order to prevent curwaittd
325 * from getting to run.
329 owner = turnstile_lock(ts, &lock);
331 * The owner pointer indicates that the lock succeeded. Only
332 * in case we hold the lock and the turnstile we locked is still
333 * the one that curwaittd is blocked on can we continue. Otherwise
334 * The turnstile pointer has been changed out from underneath
335 * us, as in the case where the lock holder has signalled curwaittd,
336 * and we need to continue.
338 if (owner != NULL && ts == curwaittd->td_blocked) {
339 MPASS(TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd));
341 turnstile_wait(ts, owner, curwaittd->td_tsqueue);
342 counter_u64_add(turnstile_count, 1);
345 } else if (owner != NULL)
346 turnstile_unlock(ts, lock);
349 KASSERT(td->td_locks == locksheld,
350 ("%d extra locks held", td->td_locks - locksheld));
354 * We didn't find any threads actually blocked on a lock
355 * so we have nothing to do except context switch away.
357 counter_u64_add(switch_count, 1);
358 mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
361 * Release the thread lock while yielding to
362 * allow other threads to acquire the lock
363 * pointed to by TDQ_LOCKPTR(td). Else a
364 * deadlock like situation might happen. (HPS)
371 epoch_wait_preempt(epoch_t epoch)
380 MPASS(cold || epoch != NULL);
384 locks = curthread->td_locks;
385 MPASS(epoch->e_flags & EPOCH_PREEMPT);
386 if ((epoch->e_flags & EPOCH_LOCKED) == 0)
387 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
388 "epoch_wait() can be long running");
389 KASSERT(!in_epoch(epoch),
390 ("epoch_wait_preempt() called in the middle "
391 "of an epoch section of the same epoch"));
396 old_cpu = PCPU_GET(cpuid);
397 old_pinned = td->td_pinned;
398 old_prio = td->td_priority;
399 was_bound = sched_is_bound(td);
402 sched_bind(td, old_cpu);
404 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt, NULL);
406 /* restore CPU binding, if any */
407 if (was_bound != 0) {
408 sched_bind(td, old_cpu);
410 /* get thread back to initial CPU, if any */
412 sched_bind(td, old_cpu);
415 /* restore pinned after bind */
416 td->td_pinned = old_pinned;
418 /* restore thread priority */
419 sched_prio(td, old_prio);
422 KASSERT(td->td_locks == locks,
423 ("%d residual locks held", td->td_locks - locks));
427 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
434 epoch_wait(epoch_t epoch)
437 MPASS(cold || epoch != NULL);
439 MPASS(epoch->e_flags == 0);
441 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
446 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t))
449 ck_epoch_entry_t *cb;
454 /* too early in boot to have epoch set up */
455 if (__predict_false(epoch == NULL))
457 #if !defined(EARLY_AP_STARTUP)
458 if (__predict_false(inited < 2))
463 *DPCPU_PTR(epoch_cb_count) += 1;
464 er = epoch_currecord(epoch);
465 ck_epoch_call(&er->er_write_record, cb, (ck_epoch_cb_t *)callback);
473 epoch_call_task(void *arg __unused)
475 ck_stack_entry_t *cursor, *head, *next;
476 ck_epoch_record_t *record;
480 int i, npending, total;
482 ck_stack_init(&cb_stack);
484 epoch_enter(global_epoch);
485 for (total = i = 0; i < epoch_count; i++) {
486 if (__predict_false((epoch = allepochs[i]) == NULL))
488 er = epoch_currecord(epoch);
489 record = &er->er_write_record;
490 if ((npending = record->n_pending) == 0)
492 ck_epoch_poll_deferred(record, &cb_stack);
493 total += npending - record->n_pending;
495 epoch_exit(global_epoch);
496 *DPCPU_PTR(epoch_cb_count) -= total;
499 counter_u64_add(epoch_call_count, total);
500 counter_u64_add(epoch_call_task_count, 1);
502 head = ck_stack_batch_pop_npsc(&cb_stack);
503 for (cursor = head; cursor != NULL; cursor = next) {
504 struct ck_epoch_entry *entry =
505 ck_epoch_entry_container(cursor);
507 next = CK_STACK_NEXT(cursor);
508 entry->function(entry);
513 in_epoch_verbose(epoch_t epoch, int dump_onfail)
515 struct epoch_thread *tdwait;
520 if (td->td_epochnest == 0)
522 if (__predict_false((epoch) == NULL))
525 er = epoch_currecord(epoch);
526 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
527 if (tdwait->et_td == td) {
533 MPASS(td->td_pinned);
534 printf("cpu: %d id: %d\n", curcpu, td->td_tid);
535 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
536 printf("td_tid: %d ", tdwait->et_td->td_tid);
545 in_epoch(epoch_t epoch)
547 return (in_epoch_verbose(epoch, 0));
551 epoch_adjust_prio(struct thread *td, u_char prio)
554 sched_prio(td, prio);