]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/subr_epoch.c
Update tcsh to 6.21.00.
[FreeBSD/FreeBSD.git] / sys / kern / subr_epoch.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
35 #include <sys/epoch.h>
36 #include <sys/gtaskqueue.h>
37 #include <sys/kernel.h>
38 #include <sys/limits.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/pcpu.h>
43 #include <sys/proc.h>
44 #include <sys/sched.h>
45 #include <sys/sx.h>
46 #include <sys/smp.h>
47 #include <sys/sysctl.h>
48 #include <sys/turnstile.h>
49 #ifdef EPOCH_TRACE
50 #include <machine/stdarg.h>
51 #include <sys/stack.h>
52 #include <sys/tree.h>
53 #endif
54 #include <vm/vm.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_kern.h>
57 #include <vm/uma.h>
58
59 #include <ck_epoch.h>
60
61 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
62
63 #ifdef __amd64__
64 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
65 #else
66 #define EPOCH_ALIGN CACHE_LINE_SIZE
67 #endif
68
69 TAILQ_HEAD (epoch_tdlist, epoch_tracker);
70 typedef struct epoch_record {
71         ck_epoch_record_t er_record;
72         struct epoch_context er_drain_ctx;
73         struct epoch *er_parent;
74         volatile struct epoch_tdlist er_tdlist;
75         volatile uint32_t er_gen;
76         uint32_t er_cpuid;
77 } __aligned(EPOCH_ALIGN)     *epoch_record_t;
78
79 struct epoch {
80         struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
81         epoch_record_t e_pcpu_record;
82         int     e_idx;
83         int     e_flags;
84         struct sx e_drain_sx;
85         struct mtx e_drain_mtx;
86         volatile int e_drain_count;
87         const char *e_name;
88 };
89
90 /* arbitrary --- needs benchmarking */
91 #define MAX_ADAPTIVE_SPIN 100
92 #define MAX_EPOCHS 64
93
94 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
95 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
96 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
97
98 /* Stats. */
99 static counter_u64_t block_count;
100
101 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
102     &block_count, "# of times a thread was in an epoch when epoch_wait was called");
103 static counter_u64_t migrate_count;
104
105 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
106     &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
107 static counter_u64_t turnstile_count;
108
109 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
110     &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
111 static counter_u64_t switch_count;
112
113 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
114     &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
115 static counter_u64_t epoch_call_count;
116
117 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
118     &epoch_call_count, "# of times a callback was deferred");
119 static counter_u64_t epoch_call_task_count;
120
121 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
122     &epoch_call_task_count, "# of times a callback task was run");
123
124 TAILQ_HEAD (threadlist, thread);
125
126 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
127     ck_epoch_entry_container)
128
129 epoch_t allepochs[MAX_EPOCHS];
130
131 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
132 DPCPU_DEFINE(int, epoch_cb_count);
133
134 static __read_mostly int inited;
135 static __read_mostly int epoch_count;
136 __read_mostly epoch_t global_epoch;
137 __read_mostly epoch_t global_epoch_preempt;
138
139 static void epoch_call_task(void *context __unused);
140 static  uma_zone_t pcpu_zone_record;
141
142 #ifdef EPOCH_TRACE
143 struct stackentry {
144         RB_ENTRY(stackentry) se_node;
145         struct stack se_stack;
146 };
147
148 static int
149 stackentry_compare(struct stackentry *a, struct stackentry *b)
150 {
151
152         if (a->se_stack.depth > b->se_stack.depth)
153                 return (1);
154         if (a->se_stack.depth < b->se_stack.depth)
155                 return (-1);
156         for (int i = 0; i < a->se_stack.depth; i++) {
157                 if (a->se_stack.pcs[i] > b->se_stack.pcs[i])
158                         return (1);
159                 if (a->se_stack.pcs[i] < b->se_stack.pcs[i])
160                         return (-1);
161         }
162
163         return (0);
164 }
165
166 RB_HEAD(stacktree, stackentry) epoch_stacks = RB_INITIALIZER(&epoch_stacks);
167 RB_GENERATE_STATIC(stacktree, stackentry, se_node, stackentry_compare);
168
169 static struct mtx epoch_stacks_lock;
170 MTX_SYSINIT(epochstacks, &epoch_stacks_lock, "epoch_stacks", MTX_DEF);
171
172 static void epoch_trace_report(const char *fmt, ...) __printflike(1, 2);
173 static inline void
174 epoch_trace_report(const char *fmt, ...)
175 {
176         va_list ap;
177         struct stackentry se, *new;
178
179         stack_zero(&se.se_stack);       /* XXX: is it really needed? */
180         stack_save(&se.se_stack);
181
182         /* Tree is never reduced - go lockless. */
183         if (RB_FIND(stacktree, &epoch_stacks, &se) != NULL)
184                 return;
185
186         new = malloc(sizeof(*new), M_STACK, M_NOWAIT);
187         if (new != NULL) {
188                 bcopy(&se.se_stack, &new->se_stack, sizeof(struct stack));
189
190                 mtx_lock(&epoch_stacks_lock);
191                 new = RB_INSERT(stacktree, &epoch_stacks, new);
192                 mtx_unlock(&epoch_stacks_lock);
193                 if (new != NULL)
194                         free(new, M_STACK);
195         }
196
197         va_start(ap, fmt);
198         (void)vprintf(fmt, ap);
199         va_end(ap);
200         stack_print_ddb(&se.se_stack);
201 }
202
203 static inline void
204 epoch_trace_enter(struct thread *td, epoch_t epoch, epoch_tracker_t et,
205     const char *file, int line)
206 {
207         epoch_tracker_t iet;
208
209         SLIST_FOREACH(iet, &td->td_epochs, et_tlink)
210                 if (iet->et_epoch == epoch)
211                         epoch_trace_report("Recursively entering epoch %s "
212                             "previously entered at %s:%d\n",
213                             epoch->e_name, iet->et_file, iet->et_line);
214         et->et_epoch = epoch;
215         et->et_file = file;
216         et->et_line = line;
217         SLIST_INSERT_HEAD(&td->td_epochs, et, et_tlink);
218 }
219
220 static inline void
221 epoch_trace_exit(struct thread *td, epoch_t epoch, epoch_tracker_t et,
222     const char *file, int line)
223 {
224
225         if (SLIST_FIRST(&td->td_epochs) != et) {
226                 epoch_trace_report("Exiting epoch %s in a not nested order. "
227                     "Most recently entered %s at %s:%d\n",
228                     epoch->e_name,
229                     SLIST_FIRST(&td->td_epochs)->et_epoch->e_name,
230                     SLIST_FIRST(&td->td_epochs)->et_file,
231                     SLIST_FIRST(&td->td_epochs)->et_line);
232                 /* This will panic if et is not anywhere on td_epochs. */
233                 SLIST_REMOVE(&td->td_epochs, et, epoch_tracker, et_tlink);
234         } else
235                 SLIST_REMOVE_HEAD(&td->td_epochs, et_tlink);
236 }
237 #endif /* EPOCH_TRACE */
238
239 static void
240 epoch_init(void *arg __unused)
241 {
242         int cpu;
243
244         block_count = counter_u64_alloc(M_WAITOK);
245         migrate_count = counter_u64_alloc(M_WAITOK);
246         turnstile_count = counter_u64_alloc(M_WAITOK);
247         switch_count = counter_u64_alloc(M_WAITOK);
248         epoch_call_count = counter_u64_alloc(M_WAITOK);
249         epoch_call_task_count = counter_u64_alloc(M_WAITOK);
250
251         pcpu_zone_record = uma_zcreate("epoch_record pcpu",
252             sizeof(struct epoch_record), NULL, NULL, NULL, NULL,
253             UMA_ALIGN_PTR, UMA_ZONE_PCPU);
254         CPU_FOREACH(cpu) {
255                 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0,
256                     epoch_call_task, NULL);
257                 taskqgroup_attach_cpu(qgroup_softirq,
258                     DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL,
259                     "epoch call task");
260         }
261         SLIST_INIT(&thread0.td_epochs);
262         inited = 1;
263         global_epoch = epoch_alloc("Global", 0);
264         global_epoch_preempt = epoch_alloc("Global preemptible", EPOCH_PREEMPT);
265 }
266 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL);
267
268 #if !defined(EARLY_AP_STARTUP)
269 static void
270 epoch_init_smp(void *dummy __unused)
271 {
272         inited = 2;
273 }
274 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
275 #endif
276
277 static void
278 epoch_ctor(epoch_t epoch)
279 {
280         epoch_record_t er;
281         int cpu;
282
283         epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
284         CPU_FOREACH(cpu) {
285                 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
286                 bzero(er, sizeof(*er));
287                 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
288                 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
289                 er->er_cpuid = cpu;
290                 er->er_parent = epoch;
291         }
292 }
293
294 static void
295 epoch_adjust_prio(struct thread *td, u_char prio)
296 {
297
298         thread_lock(td);
299         sched_prio(td, prio);
300         thread_unlock(td);
301 }
302
303 epoch_t
304 epoch_alloc(const char *name, int flags)
305 {
306         epoch_t epoch;
307
308         if (__predict_false(!inited))
309                 panic("%s called too early in boot", __func__);
310         epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK);
311         ck_epoch_init(&epoch->e_epoch);
312         epoch_ctor(epoch);
313         MPASS(epoch_count < MAX_EPOCHS - 2);
314         epoch->e_flags = flags;
315         epoch->e_idx = epoch_count;
316         epoch->e_name = name;
317         sx_init(&epoch->e_drain_sx, "epoch-drain-sx");
318         mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF);
319         allepochs[epoch_count++] = epoch;
320         return (epoch);
321 }
322
323 void
324 epoch_free(epoch_t epoch)
325 {
326
327         epoch_drain_callbacks(epoch);
328         allepochs[epoch->e_idx] = NULL;
329         epoch_wait(global_epoch);
330         uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
331         mtx_destroy(&epoch->e_drain_mtx);
332         sx_destroy(&epoch->e_drain_sx);
333         free(epoch, M_EPOCH);
334 }
335
336 static epoch_record_t
337 epoch_currecord(epoch_t epoch)
338 {
339
340         return (zpcpu_get_cpu(epoch->e_pcpu_record, curcpu));
341 }
342
343 #define INIT_CHECK(epoch)                                       \
344         do {                                                    \
345                 if (__predict_false((epoch) == NULL))           \
346                         return;                                 \
347         } while (0)
348
349 void
350 _epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
351 {
352         struct epoch_record *er;
353         struct thread *td;
354
355         MPASS(cold || epoch != NULL);
356         INIT_CHECK(epoch);
357         MPASS(epoch->e_flags & EPOCH_PREEMPT);
358         td = curthread;
359 #ifdef EPOCH_TRACE
360         epoch_trace_enter(td, epoch, et, file, line);
361 #endif
362         et->et_td = td;
363         td->td_epochnest++;
364         critical_enter();
365         sched_pin();
366         td->td_pre_epoch_prio = td->td_priority;
367         er = epoch_currecord(epoch);
368         TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link);
369         ck_epoch_begin(&er->er_record, &et->et_section);
370         critical_exit();
371 }
372
373 void
374 epoch_enter(epoch_t epoch)
375 {
376         struct thread *td;
377         epoch_record_t er;
378
379         MPASS(cold || epoch != NULL);
380         INIT_CHECK(epoch);
381         td = curthread;
382         td->td_epochnest++;
383         critical_enter();
384         er = epoch_currecord(epoch);
385         ck_epoch_begin(&er->er_record, NULL);
386 }
387
388 void
389 _epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
390 {
391         struct epoch_record *er;
392         struct thread *td;
393
394         INIT_CHECK(epoch);
395         td = curthread;
396         critical_enter();
397         sched_unpin();
398         MPASS(td->td_epochnest);
399         td->td_epochnest--;
400         er = epoch_currecord(epoch);
401         MPASS(epoch->e_flags & EPOCH_PREEMPT);
402         MPASS(et != NULL);
403         MPASS(et->et_td == td);
404 #ifdef INVARIANTS
405         et->et_td = (void*)0xDEADBEEF;
406 #endif
407         ck_epoch_end(&er->er_record, &et->et_section);
408         TAILQ_REMOVE(&er->er_tdlist, et, et_link);
409         er->er_gen++;
410         if (__predict_false(td->td_pre_epoch_prio != td->td_priority))
411                 epoch_adjust_prio(td, td->td_pre_epoch_prio);
412         critical_exit();
413 #ifdef EPOCH_TRACE
414         epoch_trace_exit(td, epoch, et, file, line);
415 #endif
416 }
417
418 void
419 epoch_exit(epoch_t epoch)
420 {
421         struct thread *td;
422         epoch_record_t er;
423
424         INIT_CHECK(epoch);
425         td = curthread;
426         MPASS(td->td_epochnest);
427         td->td_epochnest--;
428         er = epoch_currecord(epoch);
429         ck_epoch_end(&er->er_record, NULL);
430         critical_exit();
431 }
432
433 /*
434  * epoch_block_handler_preempt() is a callback from the CK code when another
435  * thread is currently in an epoch section.
436  */
437 static void
438 epoch_block_handler_preempt(struct ck_epoch *global __unused,
439     ck_epoch_record_t *cr, void *arg __unused)
440 {
441         epoch_record_t record;
442         struct thread *td, *owner, *curwaittd;
443         struct epoch_tracker *tdwait;
444         struct turnstile *ts;
445         struct lock_object *lock;
446         int spincount, gen;
447         int locksheld __unused;
448
449         record = __containerof(cr, struct epoch_record, er_record);
450         td = curthread;
451         locksheld = td->td_locks;
452         spincount = 0;
453         counter_u64_add(block_count, 1);
454         /*
455          * We lost a race and there's no longer any threads
456          * on the CPU in an epoch section.
457          */
458         if (TAILQ_EMPTY(&record->er_tdlist))
459                 return;
460
461         if (record->er_cpuid != curcpu) {
462                 /*
463                  * If the head of the list is running, we can wait for it
464                  * to remove itself from the list and thus save us the
465                  * overhead of a migration
466                  */
467                 gen = record->er_gen;
468                 thread_unlock(td);
469                 /*
470                  * We can't actually check if the waiting thread is running
471                  * so we simply poll for it to exit before giving up and
472                  * migrating.
473                  */
474                 do {
475                         cpu_spinwait();
476                 } while (!TAILQ_EMPTY(&record->er_tdlist) &&
477                                  gen == record->er_gen &&
478                                  spincount++ < MAX_ADAPTIVE_SPIN);
479                 thread_lock(td);
480                 /*
481                  * If the generation has changed we can poll again
482                  * otherwise we need to migrate.
483                  */
484                 if (gen != record->er_gen)
485                         return;
486                 /*
487                  * Being on the same CPU as that of the record on which
488                  * we need to wait allows us access to the thread
489                  * list associated with that CPU. We can then examine the
490                  * oldest thread in the queue and wait on its turnstile
491                  * until it resumes and so on until a grace period
492                  * elapses.
493                  *
494                  */
495                 counter_u64_add(migrate_count, 1);
496                 sched_bind(td, record->er_cpuid);
497                 /*
498                  * At this point we need to return to the ck code
499                  * to scan to see if a grace period has elapsed.
500                  * We can't move on to check the thread list, because
501                  * in the meantime new threads may have arrived that
502                  * in fact belong to a different epoch.
503                  */
504                 return;
505         }
506         /*
507          * Try to find a thread in an epoch section on this CPU
508          * waiting on a turnstile. Otherwise find the lowest
509          * priority thread (highest prio value) and drop our priority
510          * to match to allow it to run.
511          */
512         TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
513                 /*
514                  * Propagate our priority to any other waiters to prevent us
515                  * from starving them. They will have their original priority
516                  * restore on exit from epoch_wait().
517                  */
518                 curwaittd = tdwait->et_td;
519                 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
520                         critical_enter();
521                         thread_unlock(td);
522                         thread_lock(curwaittd);
523                         sched_prio(curwaittd, td->td_priority);
524                         thread_unlock(curwaittd);
525                         thread_lock(td);
526                         critical_exit();
527                 }
528                 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
529                     ((ts = curwaittd->td_blocked) != NULL)) {
530                         /*
531                          * We unlock td to allow turnstile_wait to reacquire
532                          * the thread lock. Before unlocking it we enter a
533                          * critical section to prevent preemption after we
534                          * reenable interrupts by dropping the thread lock in
535                          * order to prevent curwaittd from getting to run.
536                          */
537                         critical_enter();
538                         thread_unlock(td);
539
540                         if (turnstile_lock(ts, &lock, &owner)) {
541                                 if (ts == curwaittd->td_blocked) {
542                                         MPASS(TD_IS_INHIBITED(curwaittd) &&
543                                             TD_ON_LOCK(curwaittd));
544                                         critical_exit();
545                                         turnstile_wait(ts, owner,
546                                             curwaittd->td_tsqueue);
547                                         counter_u64_add(turnstile_count, 1);
548                                         thread_lock(td);
549                                         return;
550                                 }
551                                 turnstile_unlock(ts, lock);
552                         }
553                         thread_lock(td);
554                         critical_exit();
555                         KASSERT(td->td_locks == locksheld,
556                             ("%d extra locks held", td->td_locks - locksheld));
557                 }
558         }
559         /*
560          * We didn't find any threads actually blocked on a lock
561          * so we have nothing to do except context switch away.
562          */
563         counter_u64_add(switch_count, 1);
564         mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
565
566         /*
567          * Release the thread lock while yielding to
568          * allow other threads to acquire the lock
569          * pointed to by TDQ_LOCKPTR(td). Else a
570          * deadlock like situation might happen. (HPS)
571          */
572         thread_unlock(td);
573         thread_lock(td);
574 }
575
576 void
577 epoch_wait_preempt(epoch_t epoch)
578 {
579         struct thread *td;
580         int was_bound;
581         int old_cpu;
582         int old_pinned;
583         u_char old_prio;
584         int locks __unused;
585
586         MPASS(cold || epoch != NULL);
587         INIT_CHECK(epoch);
588         td = curthread;
589 #ifdef INVARIANTS
590         locks = curthread->td_locks;
591         MPASS(epoch->e_flags & EPOCH_PREEMPT);
592         if ((epoch->e_flags & EPOCH_LOCKED) == 0)
593                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
594                     "epoch_wait() can be long running");
595         KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle "
596             "of an epoch section of the same epoch"));
597 #endif
598         thread_lock(td);
599         DROP_GIANT();
600
601         old_cpu = PCPU_GET(cpuid);
602         old_pinned = td->td_pinned;
603         old_prio = td->td_priority;
604         was_bound = sched_is_bound(td);
605         sched_unbind(td);
606         td->td_pinned = 0;
607         sched_bind(td, old_cpu);
608
609         ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt,
610             NULL);
611
612         /* restore CPU binding, if any */
613         if (was_bound != 0) {
614                 sched_bind(td, old_cpu);
615         } else {
616                 /* get thread back to initial CPU, if any */
617                 if (old_pinned != 0)
618                         sched_bind(td, old_cpu);
619                 sched_unbind(td);
620         }
621         /* restore pinned after bind */
622         td->td_pinned = old_pinned;
623
624         /* restore thread priority */
625         sched_prio(td, old_prio);
626         thread_unlock(td);
627         PICKUP_GIANT();
628         KASSERT(td->td_locks == locks,
629             ("%d residual locks held", td->td_locks - locks));
630 }
631
632 static void
633 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
634     void *arg __unused)
635 {
636         cpu_spinwait();
637 }
638
639 void
640 epoch_wait(epoch_t epoch)
641 {
642
643         MPASS(cold || epoch != NULL);
644         INIT_CHECK(epoch);
645         MPASS(epoch->e_flags == 0);
646         critical_enter();
647         ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
648         critical_exit();
649 }
650
651 void
652 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t))
653 {
654         epoch_record_t er;
655         ck_epoch_entry_t *cb;
656
657         cb = (void *)ctx;
658
659         MPASS(callback);
660         /* too early in boot to have epoch set up */
661         if (__predict_false(epoch == NULL))
662                 goto boottime;
663 #if !defined(EARLY_AP_STARTUP)
664         if (__predict_false(inited < 2))
665                 goto boottime;
666 #endif
667
668         critical_enter();
669         *DPCPU_PTR(epoch_cb_count) += 1;
670         er = epoch_currecord(epoch);
671         ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback);
672         critical_exit();
673         return;
674 boottime:
675         callback(ctx);
676 }
677
678 static void
679 epoch_call_task(void *arg __unused)
680 {
681         ck_stack_entry_t *cursor, *head, *next;
682         ck_epoch_record_t *record;
683         epoch_record_t er;
684         epoch_t epoch;
685         ck_stack_t cb_stack;
686         int i, npending, total;
687
688         ck_stack_init(&cb_stack);
689         critical_enter();
690         epoch_enter(global_epoch);
691         for (total = i = 0; i < epoch_count; i++) {
692                 if (__predict_false((epoch = allepochs[i]) == NULL))
693                         continue;
694                 er = epoch_currecord(epoch);
695                 record = &er->er_record;
696                 if ((npending = record->n_pending) == 0)
697                         continue;
698                 ck_epoch_poll_deferred(record, &cb_stack);
699                 total += npending - record->n_pending;
700         }
701         epoch_exit(global_epoch);
702         *DPCPU_PTR(epoch_cb_count) -= total;
703         critical_exit();
704
705         counter_u64_add(epoch_call_count, total);
706         counter_u64_add(epoch_call_task_count, 1);
707
708         head = ck_stack_batch_pop_npsc(&cb_stack);
709         for (cursor = head; cursor != NULL; cursor = next) {
710                 struct ck_epoch_entry *entry =
711                     ck_epoch_entry_container(cursor);
712
713                 next = CK_STACK_NEXT(cursor);
714                 entry->function(entry);
715         }
716 }
717
718 int
719 in_epoch_verbose(epoch_t epoch, int dump_onfail)
720 {
721         struct epoch_tracker *tdwait;
722         struct thread *td;
723         epoch_record_t er;
724
725         td = curthread;
726         if (td->td_epochnest == 0)
727                 return (0);
728         if (__predict_false((epoch) == NULL))
729                 return (0);
730         critical_enter();
731         er = epoch_currecord(epoch);
732         TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
733                 if (tdwait->et_td == td) {
734                         critical_exit();
735                         return (1);
736                 }
737 #ifdef INVARIANTS
738         if (dump_onfail) {
739                 MPASS(td->td_pinned);
740                 printf("cpu: %d id: %d\n", curcpu, td->td_tid);
741                 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
742                         printf("td_tid: %d ", tdwait->et_td->td_tid);
743                 printf("\n");
744         }
745 #endif
746         critical_exit();
747         return (0);
748 }
749
750 int
751 in_epoch(epoch_t epoch)
752 {
753         return (in_epoch_verbose(epoch, 0));
754 }
755
756 static void
757 epoch_drain_cb(struct epoch_context *ctx)
758 {
759         struct epoch *epoch =
760             __containerof(ctx, struct epoch_record, er_drain_ctx)->er_parent;
761
762         if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) {
763                 mtx_lock(&epoch->e_drain_mtx);
764                 wakeup(epoch);
765                 mtx_unlock(&epoch->e_drain_mtx);
766         }
767 }
768
769 void
770 epoch_drain_callbacks(epoch_t epoch)
771 {
772         epoch_record_t er;
773         struct thread *td;
774         int was_bound;
775         int old_pinned;
776         int old_cpu;
777         int cpu;
778
779         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
780             "epoch_drain_callbacks() may sleep!");
781
782         /* too early in boot to have epoch set up */
783         if (__predict_false(epoch == NULL))
784                 return;
785 #if !defined(EARLY_AP_STARTUP)
786         if (__predict_false(inited < 2))
787                 return;
788 #endif
789         DROP_GIANT();
790
791         sx_xlock(&epoch->e_drain_sx);
792         mtx_lock(&epoch->e_drain_mtx);
793
794         td = curthread;
795         thread_lock(td);
796         old_cpu = PCPU_GET(cpuid);
797         old_pinned = td->td_pinned;
798         was_bound = sched_is_bound(td);
799         sched_unbind(td);
800         td->td_pinned = 0;
801
802         CPU_FOREACH(cpu)
803                 epoch->e_drain_count++;
804         CPU_FOREACH(cpu) {
805                 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
806                 sched_bind(td, cpu);
807                 epoch_call(epoch, &er->er_drain_ctx, &epoch_drain_cb);
808         }
809
810         /* restore CPU binding, if any */
811         if (was_bound != 0) {
812                 sched_bind(td, old_cpu);
813         } else {
814                 /* get thread back to initial CPU, if any */
815                 if (old_pinned != 0)
816                         sched_bind(td, old_cpu);
817                 sched_unbind(td);
818         }
819         /* restore pinned after bind */
820         td->td_pinned = old_pinned;
821
822         thread_unlock(td);
823
824         while (epoch->e_drain_count != 0)
825                 msleep(epoch, &epoch->e_drain_mtx, PZERO, "EDRAIN", 0);
826
827         mtx_unlock(&epoch->e_drain_mtx);
828         sx_xunlock(&epoch->e_drain_sx);
829
830         PICKUP_GIANT();
831 }
832
833 void
834 epoch_thread_init(struct thread *td)
835 {
836
837         td->td_et = malloc(sizeof(struct epoch_tracker), M_EPOCH, M_WAITOK);
838 }
839
840 void
841 epoch_thread_fini(struct thread *td)
842 {
843
844         free(td->td_et, M_EPOCH);
845 }