2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/counter.h>
36 #include <sys/epoch.h>
37 #include <sys/gtaskqueue.h>
38 #include <sys/kernel.h>
39 #include <sys/limits.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
45 #include <sys/sched.h>
47 #include <sys/sysctl.h>
48 #include <sys/turnstile.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_kern.h>
55 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
57 /* arbitrary --- needs benchmarking */
58 #define MAX_ADAPTIVE_SPIN 1000
61 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
62 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
63 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
66 static counter_u64_t block_count;
68 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
69 &block_count, "# of times a thread was in an epoch when epoch_wait was called");
70 static counter_u64_t migrate_count;
72 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
73 &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
74 static counter_u64_t turnstile_count;
76 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
77 &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
78 static counter_u64_t switch_count;
80 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
81 &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
82 static counter_u64_t epoch_call_count;
84 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
85 &epoch_call_count, "# of times a callback was deferred");
86 static counter_u64_t epoch_call_task_count;
88 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
89 &epoch_call_task_count, "# of times a callback task was run");
91 TAILQ_HEAD (threadlist, thread);
93 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
94 ck_epoch_entry_container)
96 epoch_t allepochs[MAX_EPOCHS];
98 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
99 DPCPU_DEFINE(int, epoch_cb_count);
101 static __read_mostly int domcount[MAXMEMDOM];
102 static __read_mostly int domoffsets[MAXMEMDOM];
103 static __read_mostly int inited;
104 static __read_mostly int epoch_count;
105 __read_mostly epoch_t global_epoch;
106 __read_mostly epoch_t global_epoch_preempt;
108 static void epoch_call_task(void *context __unused);
110 #if defined(__powerpc64__) || defined(__powerpc__) || !defined(NUMA)
111 static bool usedomains = false;
113 static bool usedomains = true;
116 epoch_init(void *arg __unused)
120 block_count = counter_u64_alloc(M_WAITOK);
121 migrate_count = counter_u64_alloc(M_WAITOK);
122 turnstile_count = counter_u64_alloc(M_WAITOK);
123 switch_count = counter_u64_alloc(M_WAITOK);
124 epoch_call_count = counter_u64_alloc(M_WAITOK);
125 epoch_call_task_count = counter_u64_alloc(M_WAITOK);
126 if (usedomains == false)
130 for (domain = 0; domain < vm_ndomains; domain++) {
131 domcount[domain] = CPU_COUNT(&cpuset_domain[domain]);
133 printf("domcount[%d] %d\n", domain, domcount[domain]);
135 for (domain = 1; domain < vm_ndomains; domain++)
136 domoffsets[domain] = domoffsets[domain - 1] + domcount[domain - 1];
138 for (domain = 0; domain < vm_ndomains; domain++) {
139 if (domcount[domain] == 0) {
146 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0, epoch_call_task, NULL);
147 taskqgroup_attach_cpu(qgroup_softirq, DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, -1, "epoch call task");
150 global_epoch = epoch_alloc(0);
151 global_epoch_preempt = epoch_alloc(EPOCH_PREEMPT);
153 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL);
155 #if !defined(EARLY_AP_STARTUP)
157 epoch_init_smp(void *dummy __unused)
161 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
166 epoch_init_numa(epoch_t epoch)
168 int domain, cpu_offset;
171 for (domain = 0; domain < vm_ndomains; domain++) {
172 er = malloc_domain(sizeof(*er) * domcount[domain], M_EPOCH,
173 domain, M_ZERO | M_WAITOK);
174 epoch->e_pcpu_dom[domain] = er;
175 cpu_offset = domoffsets[domain];
176 for (int i = 0; i < domcount[domain]; i++, er++) {
177 epoch->e_pcpu[cpu_offset + i] = er;
178 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
179 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
180 er->er_cpuid = cpu_offset + i;
186 epoch_init_legacy(epoch_t epoch)
190 er = malloc(sizeof(*er) * mp_ncpus, M_EPOCH, M_ZERO | M_WAITOK);
191 epoch->e_pcpu_dom[0] = er;
192 for (int i = 0; i < mp_ncpus; i++, er++) {
193 epoch->e_pcpu[i] = er;
194 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
195 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
201 epoch_alloc(int flags)
205 if (__predict_false(!inited))
206 panic("%s called too early in boot", __func__);
207 epoch = malloc(sizeof(struct epoch) + mp_ncpus * sizeof(void *),
208 M_EPOCH, M_ZERO | M_WAITOK);
209 ck_epoch_init(&epoch->e_epoch);
211 epoch_init_numa(epoch);
213 epoch_init_legacy(epoch);
214 MPASS(epoch_count < MAX_EPOCHS - 2);
215 epoch->e_flags = flags;
216 epoch->e_idx = epoch_count;
217 allepochs[epoch_count++] = epoch;
222 epoch_free(epoch_t epoch)
226 struct epoch_record *er;
230 er = epoch->e_pcpu[cpu];
231 MPASS(TAILQ_EMPTY(&er->er_tdlist));
234 allepochs[epoch->e_idx] = NULL;
235 epoch_wait(global_epoch);
237 for (domain = 0; domain < vm_ndomains; domain++)
238 free_domain(epoch->e_pcpu_dom[domain], M_EPOCH);
240 free(epoch->e_pcpu_dom[0], M_EPOCH);
241 free(epoch, M_EPOCH);
245 epoch_enter_preempt_KBI(epoch_t epoch, epoch_tracker_t et)
248 epoch_enter_preempt(epoch, et);
252 epoch_exit_preempt_KBI(epoch_t epoch, epoch_tracker_t et)
255 epoch_exit_preempt(epoch, et);
259 epoch_enter_KBI(epoch_t epoch)
266 epoch_exit_KBI(epoch_t epoch)
273 * epoch_block_handler_preempt is a callback from the ck code when another thread is
274 * currently in an epoch section.
277 epoch_block_handler_preempt(struct ck_epoch *global __unused, ck_epoch_record_t *cr,
280 epoch_record_t record;
281 struct thread *td, *owner, *curwaittd;
282 struct epoch_thread *tdwait;
283 struct turnstile *ts;
284 struct lock_object *lock;
286 int locksheld __unused;
288 record = __containerof(cr, struct epoch_record, er_record);
290 locksheld = td->td_locks;
292 counter_u64_add(block_count, 1);
293 if (record->er_cpuid != curcpu) {
295 * If the head of the list is running, we can wait for it
296 * to remove itself from the list and thus save us the
297 * overhead of a migration
299 if ((tdwait = TAILQ_FIRST(&record->er_tdlist)) != NULL &&
300 TD_IS_RUNNING(tdwait->et_td)) {
301 gen = record->er_gen;
305 } while (tdwait == TAILQ_FIRST(&record->er_tdlist) &&
306 gen == record->er_gen && TD_IS_RUNNING(tdwait->et_td) &&
307 spincount++ < MAX_ADAPTIVE_SPIN);
312 * Being on the same CPU as that of the record on which
313 * we need to wait allows us access to the thread
314 * list associated with that CPU. We can then examine the
315 * oldest thread in the queue and wait on its turnstile
316 * until it resumes and so on until a grace period
320 counter_u64_add(migrate_count, 1);
321 sched_bind(td, record->er_cpuid);
323 * At this point we need to return to the ck code
324 * to scan to see if a grace period has elapsed.
325 * We can't move on to check the thread list, because
326 * in the meantime new threads may have arrived that
327 * in fact belong to a different epoch.
332 * Try to find a thread in an epoch section on this CPU
333 * waiting on a turnstile. Otherwise find the lowest
334 * priority thread (highest prio value) and drop our priority
335 * to match to allow it to run.
337 TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
339 * Propagate our priority to any other waiters to prevent us
340 * from starving them. They will have their original priority
341 * restore on exit from epoch_wait().
343 curwaittd = tdwait->et_td;
344 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
347 thread_lock(curwaittd);
348 sched_prio(curwaittd, td->td_priority);
349 thread_unlock(curwaittd);
353 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
354 ((ts = curwaittd->td_blocked) != NULL)) {
356 * We unlock td to allow turnstile_wait to reacquire the
357 * the thread lock. Before unlocking it we enter a critical
358 * section to prevent preemption after we reenable interrupts
359 * by dropping the thread lock in order to prevent curwaittd
360 * from getting to run.
364 owner = turnstile_lock(ts, &lock);
366 * The owner pointer indicates that the lock succeeded. Only
367 * in case we hold the lock and the turnstile we locked is still
368 * the one that curwaittd is blocked on can we continue. Otherwise
369 * The turnstile pointer has been changed out from underneath
370 * us, as in the case where the lock holder has signalled curwaittd,
371 * and we need to continue.
373 if (owner != NULL && ts == curwaittd->td_blocked) {
374 MPASS(TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd));
376 turnstile_wait(ts, owner, curwaittd->td_tsqueue);
377 counter_u64_add(turnstile_count, 1);
380 } else if (owner != NULL)
381 turnstile_unlock(ts, lock);
384 KASSERT(td->td_locks == locksheld,
385 ("%d extra locks held", td->td_locks - locksheld));
389 * We didn't find any threads actually blocked on a lock
390 * so we have nothing to do except context switch away.
392 counter_u64_add(switch_count, 1);
393 mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
396 * Release the thread lock while yielding to
397 * allow other threads to acquire the lock
398 * pointed to by TDQ_LOCKPTR(td). Else a
399 * deadlock like situation might happen. (HPS)
406 epoch_wait_preempt(epoch_t epoch)
415 MPASS(cold || epoch != NULL);
419 locks = curthread->td_locks;
420 MPASS(epoch->e_flags & EPOCH_PREEMPT);
421 if ((epoch->e_flags & EPOCH_LOCKED) == 0)
422 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
423 "epoch_wait() can be long running");
424 KASSERT(td->td_epochnest == 0, ("epoch_wait() in the middle of an epoch section"));
429 old_cpu = PCPU_GET(cpuid);
430 old_pinned = td->td_pinned;
431 old_prio = td->td_priority;
432 was_bound = sched_is_bound(td);
435 sched_bind(td, old_cpu);
437 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt, NULL);
439 /* restore CPU binding, if any */
440 if (was_bound != 0) {
441 sched_bind(td, old_cpu);
443 /* get thread back to initial CPU, if any */
445 sched_bind(td, old_cpu);
448 /* restore pinned after bind */
449 td->td_pinned = old_pinned;
451 /* restore thread priority */
452 sched_prio(td, old_prio);
455 KASSERT(td->td_locks == locks,
456 ("%d residual locks held", td->td_locks - locks));
460 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
467 epoch_wait(epoch_t epoch)
470 MPASS(cold || epoch != NULL);
472 MPASS(epoch->e_flags == 0);
474 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
479 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t))
482 ck_epoch_entry_t *cb;
487 /* too early in boot to have epoch set up */
488 if (__predict_false(epoch == NULL))
490 #if !defined(EARLY_AP_STARTUP)
491 if (__predict_false(inited < 2))
496 *DPCPU_PTR(epoch_cb_count) += 1;
497 er = epoch->e_pcpu[curcpu];
498 ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback);
506 epoch_call_task(void *arg __unused)
508 ck_stack_entry_t *cursor, *head, *next;
509 ck_epoch_record_t *record;
512 int i, npending, total;
514 ck_stack_init(&cb_stack);
516 epoch_enter(global_epoch);
517 for (total = i = 0; i < epoch_count; i++) {
518 if (__predict_false((epoch = allepochs[i]) == NULL))
520 record = &epoch->e_pcpu[curcpu]->er_record;
521 if ((npending = record->n_pending) == 0)
523 ck_epoch_poll_deferred(record, &cb_stack);
524 total += npending - record->n_pending;
526 epoch_exit(global_epoch);
527 *DPCPU_PTR(epoch_cb_count) -= total;
530 counter_u64_add(epoch_call_count, total);
531 counter_u64_add(epoch_call_task_count, 1);
533 head = ck_stack_batch_pop_npsc(&cb_stack);
534 for (cursor = head; cursor != NULL; cursor = next) {
535 struct ck_epoch_entry *entry =
536 ck_epoch_entry_container(cursor);
538 next = CK_STACK_NEXT(cursor);
539 entry->function(entry);
544 in_epoch_verbose(epoch_t epoch, int dump_onfail)
546 struct epoch_thread *tdwait;
551 if (td->td_epochnest == 0)
553 if (__predict_false((epoch) == NULL))
556 er = epoch->e_pcpu[curcpu];
557 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
558 if (tdwait->et_td == td) {
564 MPASS(td->td_pinned);
565 printf("cpu: %d id: %d\n", curcpu, td->td_tid);
566 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
567 printf("td_tid: %d ", tdwait->et_td->td_tid);
576 in_epoch(epoch_t epoch)
578 return (in_epoch_verbose(epoch, 0));
582 epoch_adjust_prio(struct thread *td, u_char prio)
585 sched_prio(td, prio);