2 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * 1. Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * 2. Neither the name of Matthew Macy nor the names of its
11 * contributors may be used to endorse or promote products derived from
12 * this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
18 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
24 * POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/systm.h>
33 #include <sys/counter.h>
34 #include <sys/epoch.h>
35 #include <sys/gtaskqueue.h>
36 #include <sys/kernel.h>
37 #include <sys/limits.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
42 #include <sys/sched.h>
44 #include <sys/sysctl.h>
45 #include <sys/turnstile.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_kern.h>
52 MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
54 /* arbitrary --- needs benchmarking */
55 #define MAX_ADAPTIVE_SPIN 5000
57 #define EPOCH_EXITING 0x1
59 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
61 #define EPOCH_ALIGN CACHE_LINE_SIZE
64 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
65 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
67 static int poll_intvl;
68 SYSCTL_INT(_kern_epoch, OID_AUTO, poll_intvl, CTLFLAG_RWTUN,
69 &poll_intvl, 0, "# of ticks to wait between garbage collecting deferred frees");
71 static counter_u64_t block_count;
72 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
73 &block_count, "# of times a thread was in an epoch when epoch_wait was called");
74 static counter_u64_t migrate_count;
75 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
76 &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
77 static counter_u64_t turnstile_count;
78 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
79 &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
80 static counter_u64_t switch_count;
81 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
82 &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
84 typedef struct epoch_cb {
85 void (*ec_callback)(epoch_context_t);
86 STAILQ_ENTRY(epoch_cb) ec_link;
89 TAILQ_HEAD(threadlist, thread);
91 typedef struct epoch_record {
92 ck_epoch_record_t er_record;
93 volatile struct threadlist er_tdlist;
94 volatile uint32_t er_gen;
98 struct epoch_pcpu_state {
99 struct epoch_record eps_record;
100 STAILQ_HEAD(, epoch_cb) eps_cblist;
101 } __aligned(EPOCH_ALIGN);
104 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
105 struct grouptask e_gtask;
106 struct callout e_timer;
109 /* make sure that immutable data doesn't overlap with the gtask, callout, and mutex*/
110 struct epoch_pcpu_state *e_pcpu_dom[MAXMEMDOM] __aligned(EPOCH_ALIGN);
111 counter_u64_t e_frees;
112 uint64_t e_free_last;
113 struct epoch_pcpu_state *e_pcpu[0];
116 static __read_mostly int domcount[MAXMEMDOM];
117 static __read_mostly int domoffsets[MAXMEMDOM];
118 static __read_mostly int inited;
120 static void epoch_call_task(void *context);
122 #if defined(__powerpc64__) || defined(__powerpc__)
123 static bool usedomains = false;
125 static bool usedomains = true;
128 epoch_init(void *arg __unused)
135 block_count = counter_u64_alloc(M_WAITOK);
136 migrate_count = counter_u64_alloc(M_WAITOK);
137 turnstile_count = counter_u64_alloc(M_WAITOK);
138 switch_count = counter_u64_alloc(M_WAITOK);
139 if (usedomains == false)
143 for (domain = 0; domain < vm_ndomains; domain++) {
144 domcount[domain] = CPU_COUNT(&cpuset_domain[domain]);
146 printf("domcount[%d] %d\n", domain, domcount[domain]);
148 for (domain = 1; domain < vm_ndomains; domain++)
149 domoffsets[domain] = domoffsets[domain-1] + domcount[domain-1];
151 for (domain = 0; domain < vm_ndomains; domain++) {
152 if (domcount[domain] == 0) {
160 SYSINIT(epoch, SI_SUB_CPU + 1, SI_ORDER_FIRST, epoch_init, NULL);
163 epoch_init_numa(epoch_t epoch)
165 int domain, cpu_offset;
166 struct epoch_pcpu_state *eps;
169 for (domain = 0; domain < vm_ndomains; domain++) {
170 eps = malloc_domain(sizeof(*eps)*domcount[domain], M_EPOCH,
171 domain, M_ZERO|M_WAITOK);
172 epoch->e_pcpu_dom[domain] = eps;
173 cpu_offset = domoffsets[domain];
174 for (int i = 0; i < domcount[domain]; i++, eps++) {
175 epoch->e_pcpu[cpu_offset + i] = eps;
176 er = &eps->eps_record;
177 STAILQ_INIT(&eps->eps_cblist);
178 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
179 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
180 er->er_cpuid = cpu_offset + i;
186 epoch_init_legacy(epoch_t epoch)
188 struct epoch_pcpu_state *eps;
191 eps = malloc(sizeof(*eps)*mp_ncpus, M_EPOCH, M_ZERO|M_WAITOK);
192 epoch->e_pcpu_dom[0] = eps;
193 for (int i = 0; i < mp_ncpus; i++, eps++) {
194 epoch->e_pcpu[i] = eps;
195 er = &eps->eps_record;
196 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
197 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
198 STAILQ_INIT(&eps->eps_cblist);
204 epoch_callout(void *arg)
210 frees = counter_u64_fetch(epoch->e_frees);
211 /* pick some better value */
212 if (frees - epoch->e_free_last > 10) {
213 GROUPTASK_ENQUEUE(&epoch->e_gtask);
214 epoch->e_free_last = frees;
216 if ((epoch->e_flags & EPOCH_EXITING) == 0)
217 callout_reset(&epoch->e_timer, poll_intvl, epoch_callout, epoch);
225 if (__predict_false(!inited))
226 panic("%s called too early in boot", __func__);
227 epoch = malloc(sizeof(struct epoch) + mp_ncpus*sizeof(void*),
228 M_EPOCH, M_ZERO|M_WAITOK);
229 ck_epoch_init(&epoch->e_epoch);
230 epoch->e_frees = counter_u64_alloc(M_WAITOK);
231 mtx_init(&epoch->e_lock, "epoch callout", NULL, MTX_DEF);
232 callout_init_mtx(&epoch->e_timer, &epoch->e_lock, 0);
233 taskqgroup_config_gtask_init(epoch, &epoch->e_gtask, epoch_call_task, "epoch call task");
235 epoch_init_numa(epoch);
237 epoch_init_legacy(epoch);
238 callout_reset(&epoch->e_timer, poll_intvl, epoch_callout, epoch);
243 epoch_free(epoch_t epoch)
247 struct epoch_pcpu_state *eps;
251 eps = epoch->e_pcpu[cpu];
252 MPASS(TAILQ_EMPTY(&eps->eps_record.er_tdlist));
255 mtx_lock(&epoch->e_lock);
256 epoch->e_flags |= EPOCH_EXITING;
257 mtx_unlock(&epoch->e_lock);
259 * Execute any lingering callbacks
261 GROUPTASK_ENQUEUE(&epoch->e_gtask);
262 gtaskqueue_drain(epoch->e_gtask.gt_taskqueue, &epoch->e_gtask.gt_task);
263 callout_drain(&epoch->e_timer);
264 mtx_destroy(&epoch->e_lock);
265 counter_u64_free(epoch->e_frees);
266 taskqgroup_config_gtask_deinit(&epoch->e_gtask);
268 for (domain = 0; domain < vm_ndomains; domain++)
269 free_domain(epoch->e_pcpu_dom[domain], M_EPOCH);
271 free(epoch->e_pcpu_dom[0], M_EPOCH);
272 free(epoch, M_EPOCH);
275 #define INIT_CHECK(epoch) \
277 if (__predict_false((epoch) == NULL)) \
282 epoch_enter(epoch_t epoch)
284 struct epoch_pcpu_state *eps;
291 eps = epoch->e_pcpu[curcpu];
293 MPASS(td->td_epochnest < UCHAR_MAX - 2);
294 if (td->td_epochnest == 1)
295 TAILQ_INSERT_TAIL(&eps->eps_record.er_tdlist, td, td_epochq);
297 if (td->td_epochnest > 1) {
298 struct thread *curtd;
301 TAILQ_FOREACH(curtd, &eps->eps_record.er_tdlist, td_epochq)
304 KASSERT(found, ("recursing on a second epoch"));
308 ck_epoch_begin(&eps->eps_record.er_record, NULL);
313 epoch_enter_nopreempt(epoch_t epoch)
315 struct epoch_pcpu_state *eps;
319 eps = epoch->e_pcpu[curcpu];
320 curthread->td_epochnest++;
321 MPASS(curthread->td_epochnest < UCHAR_MAX - 2);
322 ck_epoch_begin(&eps->eps_record.er_record, NULL);
326 epoch_exit(epoch_t epoch)
328 struct epoch_pcpu_state *eps;
334 eps = epoch->e_pcpu[curcpu];
336 ck_epoch_end(&eps->eps_record.er_record, NULL);
338 if (td->td_epochnest == 0)
339 TAILQ_REMOVE(&eps->eps_record.er_tdlist, td, td_epochq);
340 eps->eps_record.er_gen++;
345 epoch_exit_nopreempt(epoch_t epoch)
347 struct epoch_pcpu_state *eps;
350 MPASS(curthread->td_critnest);
351 eps = epoch->e_pcpu[curcpu];
352 ck_epoch_end(&eps->eps_record.er_record, NULL);
353 curthread->td_epochnest--;
358 * epoch_block_handler is a callback from the ck code when another thread is
359 * currently in an epoch section.
362 epoch_block_handler(struct ck_epoch *global __unused, ck_epoch_record_t *cr,
365 epoch_record_t record;
366 struct epoch_pcpu_state *eps;
367 struct thread *td, *tdwait, *owner;
368 struct turnstile *ts;
369 struct lock_object *lock;
373 record = __containerof(cr, struct epoch_record, er_record);
376 counter_u64_add(block_count, 1);
377 if (record->er_cpuid != curcpu) {
379 * If the head of the list is running, we can wait for it
380 * to remove itself from the list and thus save us the
381 * overhead of a migration
383 if ((tdwait = TAILQ_FIRST(&record->er_tdlist)) != NULL &&
384 TD_IS_RUNNING(tdwait)) {
385 gen = record->er_gen;
389 } while (tdwait == TAILQ_FIRST(&record->er_tdlist) &&
390 gen == record->er_gen && TD_IS_RUNNING(tdwait) &&
391 spincount++ < MAX_ADAPTIVE_SPIN);
397 * Being on the same CPU as that of the record on which
398 * we need to wait allows us access to the thread
399 * list associated with that CPU. We can then examine the
400 * oldest thread in the queue and wait on its turnstile
401 * until it resumes and so on until a grace period
405 counter_u64_add(migrate_count, 1);
406 sched_bind(td, record->er_cpuid);
408 * At this point we need to return to the ck code
409 * to scan to see if a grace period has elapsed.
410 * We can't move on to check the thread list, because
411 * in the meantime new threads may have arrived that
412 * in fact belong to a different epoch.
417 * Try to find a thread in an epoch section on this CPU
418 * waiting on a turnstile. Otherwise find the lowest
419 * priority thread (highest prio value) and drop our priority
420 * to match to allow it to run.
422 TAILQ_FOREACH(tdwait, &record->er_tdlist, td_epochq) {
424 * Propagate our priority to any other waiters to prevent us
425 * from starving them. They will have their original priority
426 * restore on exit from epoch_wait().
428 if (!TD_IS_INHIBITED(tdwait) && tdwait->td_priority > td->td_priority) {
430 sched_prio(tdwait, td->td_priority);
431 thread_unlock(tdwait);
433 if (TD_IS_INHIBITED(tdwait) && TD_ON_LOCK(tdwait) &&
434 ((ts = tdwait->td_blocked) != NULL)) {
436 * We unlock td to allow turnstile_wait to reacquire the
437 * the thread lock. Before unlocking it we enter a critical
438 * section to prevent preemption after we reenable interrupts
439 * by dropping the thread lock in order to prevent tdwait
440 * from getting to run.
444 owner = turnstile_lock(ts, &lock);
446 * The owner pointer indicates that the lock succeeded. Only
447 * in case we hold the lock and the turnstile we locked is still
448 * the one that tdwait is blocked on can we continue. Otherwise
449 * The turnstile pointer has been changed out from underneath
450 * us, as in the case where the lock holder has signalled tdwait,
451 * and we need to continue.
453 if (owner != NULL && ts == tdwait->td_blocked) {
454 MPASS(TD_IS_INHIBITED(tdwait) && TD_ON_LOCK(tdwait));
456 turnstile_wait(ts, owner, tdwait->td_tsqueue);
457 counter_u64_add(turnstile_count, 1);
460 } else if (owner != NULL)
461 turnstile_unlock(ts, lock);
464 KASSERT(td->td_locks == 0,
465 ("%d locks held", td->td_locks));
469 * We didn't find any threads actually blocked on a lock
470 * so we have nothing to do except context switch away.
472 counter_u64_add(switch_count, 1);
473 mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
476 * Release the thread lock while yielding to
477 * allow other threads to acquire the lock
478 * pointed to by TDQ_LOCKPTR(td). Else a
479 * deadlock like situation might happen. (HPS)
486 epoch_wait(epoch_t epoch)
496 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
497 "epoch_wait() can sleep");
500 KASSERT(td->td_epochnest == 0, ("epoch_wait() in the middle of an epoch section"));
505 old_cpu = PCPU_GET(cpuid);
506 old_pinned = td->td_pinned;
507 old_prio = td->td_priority;
508 was_bound = sched_is_bound(td);
511 sched_bind(td, old_cpu);
513 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
515 /* restore CPU binding, if any */
516 if (was_bound != 0) {
517 sched_bind(td, old_cpu);
519 /* get thread back to initial CPU, if any */
521 sched_bind(td, old_cpu);
524 /* restore pinned after bind */
525 td->td_pinned = old_pinned;
527 /* restore thread priority */
528 sched_prio(td, old_prio);
530 KASSERT(td->td_locks == 0,
531 ("%d locks held", td->td_locks));
536 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t))
538 struct epoch_pcpu_state *eps;
543 MPASS(cb->ec_callback == NULL);
544 MPASS(cb->ec_link.stqe_next == NULL);
547 cb->ec_callback = callback;
548 counter_u64_add(epoch->e_frees, 1);
550 eps = epoch->e_pcpu[curcpu];
551 STAILQ_INSERT_HEAD(&eps->eps_cblist, cb, ec_link);
556 epoch_call_task(void *context)
558 struct epoch_pcpu_state *eps;
563 STAILQ_HEAD(, epoch_cb) tmp_head;
566 STAILQ_INIT(&tmp_head);
571 eps = epoch->e_pcpu[cpu];
572 if (!STAILQ_EMPTY(&eps->eps_cblist))
573 STAILQ_CONCAT(&tmp_head, &eps->eps_cblist);
579 while ((cb = STAILQ_FIRST(&tmp_head)) != NULL) {
580 STAILQ_REMOVE_HEAD(&tmp_head, ec_link);
581 cb->ec_callback((void*)cb);
588 return (curthread->td_epochnest != 0);