]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/subr_epoch.c
proc: load/store p_cowgen using atomic primitives
[FreeBSD/FreeBSD.git] / sys / kern / subr_epoch.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
35 #include <sys/epoch.h>
36 #include <sys/gtaskqueue.h>
37 #include <sys/kernel.h>
38 #include <sys/limits.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/pcpu.h>
43 #include <sys/proc.h>
44 #include <sys/sched.h>
45 #include <sys/sx.h>
46 #include <sys/smp.h>
47 #include <sys/sysctl.h>
48 #include <sys/turnstile.h>
49 #ifdef EPOCH_TRACE
50 #include <machine/stdarg.h>
51 #include <sys/stack.h>
52 #include <sys/tree.h>
53 #endif
54 #include <vm/vm.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_kern.h>
57 #include <vm/uma.h>
58
59 #include <ck_epoch.h>
60
61 #ifdef __amd64__
62 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
63 #else
64 #define EPOCH_ALIGN CACHE_LINE_SIZE
65 #endif
66
67 TAILQ_HEAD (epoch_tdlist, epoch_tracker);
68 typedef struct epoch_record {
69         ck_epoch_record_t er_record;
70         struct epoch_context er_drain_ctx;
71         struct epoch *er_parent;
72         volatile struct epoch_tdlist er_tdlist;
73         volatile uint32_t er_gen;
74         uint32_t er_cpuid;
75 #ifdef INVARIANTS
76         /* Used to verify record ownership for non-preemptible epochs. */
77         struct thread *er_td;
78 #endif
79 } __aligned(EPOCH_ALIGN)     *epoch_record_t;
80
81 struct epoch {
82         struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
83         epoch_record_t e_pcpu_record;
84         int     e_in_use;
85         int     e_flags;
86         struct sx e_drain_sx;
87         struct mtx e_drain_mtx;
88         volatile int e_drain_count;
89         const char *e_name;
90 };
91
92 /* arbitrary --- needs benchmarking */
93 #define MAX_ADAPTIVE_SPIN 100
94 #define MAX_EPOCHS 64
95
96 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
97 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
98     "epoch information");
99 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
100     "epoch stats");
101
102 /* Stats. */
103 static counter_u64_t block_count;
104
105 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
106     &block_count, "# of times a thread was in an epoch when epoch_wait was called");
107 static counter_u64_t migrate_count;
108
109 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
110     &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
111 static counter_u64_t turnstile_count;
112
113 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
114     &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
115 static counter_u64_t switch_count;
116
117 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
118     &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
119 static counter_u64_t epoch_call_count;
120
121 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
122     &epoch_call_count, "# of times a callback was deferred");
123 static counter_u64_t epoch_call_task_count;
124
125 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
126     &epoch_call_task_count, "# of times a callback task was run");
127
128 TAILQ_HEAD (threadlist, thread);
129
130 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
131     ck_epoch_entry_container)
132
133 static struct epoch epoch_array[MAX_EPOCHS];
134
135 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
136 DPCPU_DEFINE(int, epoch_cb_count);
137
138 static __read_mostly int inited;
139 __read_mostly epoch_t global_epoch;
140 __read_mostly epoch_t global_epoch_preempt;
141
142 static void epoch_call_task(void *context __unused);
143 static  uma_zone_t pcpu_zone_record;
144
145 static struct sx epoch_sx;
146
147 #define EPOCH_LOCK() sx_xlock(&epoch_sx)
148 #define EPOCH_UNLOCK() sx_xunlock(&epoch_sx)
149
150 static epoch_record_t
151 epoch_currecord(epoch_t epoch)
152 {
153
154         return (zpcpu_get(epoch->e_pcpu_record));
155 }
156
157 #ifdef EPOCH_TRACE
158 struct stackentry {
159         RB_ENTRY(stackentry) se_node;
160         struct stack se_stack;
161 };
162
163 static int
164 stackentry_compare(struct stackentry *a, struct stackentry *b)
165 {
166
167         if (a->se_stack.depth > b->se_stack.depth)
168                 return (1);
169         if (a->se_stack.depth < b->se_stack.depth)
170                 return (-1);
171         for (int i = 0; i < a->se_stack.depth; i++) {
172                 if (a->se_stack.pcs[i] > b->se_stack.pcs[i])
173                         return (1);
174                 if (a->se_stack.pcs[i] < b->se_stack.pcs[i])
175                         return (-1);
176         }
177
178         return (0);
179 }
180
181 RB_HEAD(stacktree, stackentry) epoch_stacks = RB_INITIALIZER(&epoch_stacks);
182 RB_GENERATE_STATIC(stacktree, stackentry, se_node, stackentry_compare);
183
184 static struct mtx epoch_stacks_lock;
185 MTX_SYSINIT(epochstacks, &epoch_stacks_lock, "epoch_stacks", MTX_DEF);
186
187 static bool epoch_trace_stack_print = true;
188 SYSCTL_BOOL(_kern_epoch, OID_AUTO, trace_stack_print, CTLFLAG_RWTUN,
189     &epoch_trace_stack_print, 0, "Print stack traces on epoch reports");
190
191 static void epoch_trace_report(const char *fmt, ...) __printflike(1, 2);
192 static inline void
193 epoch_trace_report(const char *fmt, ...)
194 {
195         va_list ap;
196         struct stackentry se, *new;
197
198         stack_zero(&se.se_stack);       /* XXX: is it really needed? */
199         stack_save(&se.se_stack);
200
201         /* Tree is never reduced - go lockless. */
202         if (RB_FIND(stacktree, &epoch_stacks, &se) != NULL)
203                 return;
204
205         new = malloc(sizeof(*new), M_STACK, M_NOWAIT);
206         if (new != NULL) {
207                 bcopy(&se.se_stack, &new->se_stack, sizeof(struct stack));
208
209                 mtx_lock(&epoch_stacks_lock);
210                 new = RB_INSERT(stacktree, &epoch_stacks, new);
211                 mtx_unlock(&epoch_stacks_lock);
212                 if (new != NULL)
213                         free(new, M_STACK);
214         }
215
216         va_start(ap, fmt);
217         (void)vprintf(fmt, ap);
218         va_end(ap);
219         if (epoch_trace_stack_print)
220                 stack_print_ddb(&se.se_stack);
221 }
222
223 static inline void
224 epoch_trace_enter(struct thread *td, epoch_t epoch, epoch_tracker_t et,
225     const char *file, int line)
226 {
227         epoch_tracker_t iet;
228
229         SLIST_FOREACH(iet, &td->td_epochs, et_tlink) {
230                 if (iet->et_epoch != epoch)
231                         continue;
232                 epoch_trace_report("Recursively entering epoch %s "
233                     "at %s:%d, previously entered at %s:%d\n",
234                     epoch->e_name, file, line,
235                     iet->et_file, iet->et_line);
236         }
237         et->et_epoch = epoch;
238         et->et_file = file;
239         et->et_line = line;
240         et->et_flags = 0;
241         SLIST_INSERT_HEAD(&td->td_epochs, et, et_tlink);
242 }
243
244 static inline void
245 epoch_trace_exit(struct thread *td, epoch_t epoch, epoch_tracker_t et,
246     const char *file, int line)
247 {
248
249         if (SLIST_FIRST(&td->td_epochs) != et) {
250                 epoch_trace_report("Exiting epoch %s in a not nested order "
251                     "at %s:%d. Most recently entered %s at %s:%d\n",
252                     epoch->e_name,
253                     file, line,
254                     SLIST_FIRST(&td->td_epochs)->et_epoch->e_name,
255                     SLIST_FIRST(&td->td_epochs)->et_file,
256                     SLIST_FIRST(&td->td_epochs)->et_line);
257                 /* This will panic if et is not anywhere on td_epochs. */
258                 SLIST_REMOVE(&td->td_epochs, et, epoch_tracker, et_tlink);
259         } else
260                 SLIST_REMOVE_HEAD(&td->td_epochs, et_tlink);
261         if (et->et_flags & ET_REPORT_EXIT)
262                 printf("Td %p exiting epoch %s at %s:%d\n", td, epoch->e_name,
263                     file, line);
264 }
265
266 /* Used by assertions that check thread state before going to sleep. */
267 void
268 epoch_trace_list(struct thread *td)
269 {
270         epoch_tracker_t iet;
271
272         SLIST_FOREACH(iet, &td->td_epochs, et_tlink)
273                 printf("Epoch %s entered at %s:%d\n", iet->et_epoch->e_name,
274                     iet->et_file, iet->et_line);
275 }
276
277 void
278 epoch_where_report(epoch_t epoch)
279 {
280         epoch_record_t er;
281         struct epoch_tracker *tdwait;
282
283         MPASS(epoch != NULL);
284         MPASS((epoch->e_flags & EPOCH_PREEMPT) != 0);
285         MPASS(!THREAD_CAN_SLEEP());
286         critical_enter();
287         er = epoch_currecord(epoch);
288         TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
289                 if (tdwait->et_td == curthread)
290                         break;
291         critical_exit();
292         if (tdwait != NULL) {
293                 tdwait->et_flags |= ET_REPORT_EXIT;
294                 printf("Td %p entered epoch %s at %s:%d\n", curthread,
295                     epoch->e_name, tdwait->et_file, tdwait->et_line);
296         }
297 }
298 #endif /* EPOCH_TRACE */
299
300 static void
301 epoch_init(void *arg __unused)
302 {
303         int cpu;
304
305         block_count = counter_u64_alloc(M_WAITOK);
306         migrate_count = counter_u64_alloc(M_WAITOK);
307         turnstile_count = counter_u64_alloc(M_WAITOK);
308         switch_count = counter_u64_alloc(M_WAITOK);
309         epoch_call_count = counter_u64_alloc(M_WAITOK);
310         epoch_call_task_count = counter_u64_alloc(M_WAITOK);
311
312         pcpu_zone_record = uma_zcreate("epoch_record pcpu",
313             sizeof(struct epoch_record), NULL, NULL, NULL, NULL,
314             UMA_ALIGN_PTR, UMA_ZONE_PCPU);
315         CPU_FOREACH(cpu) {
316                 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0,
317                     epoch_call_task, NULL);
318                 taskqgroup_attach_cpu(qgroup_softirq,
319                     DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL,
320                     "epoch call task");
321         }
322 #ifdef EPOCH_TRACE
323         SLIST_INIT(&thread0.td_epochs);
324 #endif
325         sx_init(&epoch_sx, "epoch-sx");
326         inited = 1;
327         global_epoch = epoch_alloc("Global", 0);
328         global_epoch_preempt = epoch_alloc("Global preemptible", EPOCH_PREEMPT);
329 }
330 SYSINIT(epoch, SI_SUB_EPOCH, SI_ORDER_FIRST, epoch_init, NULL);
331
332 #if !defined(EARLY_AP_STARTUP)
333 static void
334 epoch_init_smp(void *dummy __unused)
335 {
336         inited = 2;
337 }
338 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
339 #endif
340
341 static void
342 epoch_ctor(epoch_t epoch)
343 {
344         epoch_record_t er;
345         int cpu;
346
347         epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
348         CPU_FOREACH(cpu) {
349                 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
350                 bzero(er, sizeof(*er));
351                 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
352                 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
353                 er->er_cpuid = cpu;
354                 er->er_parent = epoch;
355         }
356 }
357
358 static void
359 epoch_adjust_prio(struct thread *td, u_char prio)
360 {
361
362         thread_lock(td);
363         sched_prio(td, prio);
364         thread_unlock(td);
365 }
366
367 epoch_t
368 epoch_alloc(const char *name, int flags)
369 {
370         epoch_t epoch;
371         int i;
372
373         MPASS(name != NULL);
374
375         if (__predict_false(!inited))
376                 panic("%s called too early in boot", __func__);
377
378         EPOCH_LOCK();
379
380         /*
381          * Find a free index in the epoch array. If no free index is
382          * found, try to use the index after the last one.
383          */
384         for (i = 0;; i++) {
385                 /*
386                  * If too many epochs are currently allocated,
387                  * return NULL.
388                  */
389                 if (i == MAX_EPOCHS) {
390                         epoch = NULL;
391                         goto done;
392                 }
393                 if (epoch_array[i].e_in_use == 0)
394                         break;
395         }
396
397         epoch = epoch_array + i;
398         ck_epoch_init(&epoch->e_epoch);
399         epoch_ctor(epoch);
400         epoch->e_flags = flags;
401         epoch->e_name = name;
402         sx_init(&epoch->e_drain_sx, "epoch-drain-sx");
403         mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF);
404
405         /*
406          * Set e_in_use last, because when this field is set the
407          * epoch_call_task() function will start scanning this epoch
408          * structure.
409          */
410         atomic_store_rel_int(&epoch->e_in_use, 1);
411 done:
412         EPOCH_UNLOCK();
413         return (epoch);
414 }
415
416 void
417 epoch_free(epoch_t epoch)
418 {
419 #ifdef INVARIANTS
420         int cpu;
421 #endif
422
423         EPOCH_LOCK();
424
425         MPASS(epoch->e_in_use != 0);
426
427         epoch_drain_callbacks(epoch);
428
429         atomic_store_rel_int(&epoch->e_in_use, 0);
430         /*
431          * Make sure the epoch_call_task() function see e_in_use equal
432          * to zero, by calling epoch_wait() on the global_epoch:
433          */
434         epoch_wait(global_epoch);
435 #ifdef INVARIANTS
436         CPU_FOREACH(cpu) {
437                 epoch_record_t er;
438
439                 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
440
441                 /*
442                  * Sanity check: none of the records should be in use anymore.
443                  * We drained callbacks above and freeing the pcpu records is
444                  * imminent.
445                  */
446                 MPASS(er->er_td == NULL);
447                 MPASS(TAILQ_EMPTY(&er->er_tdlist));
448         }
449 #endif
450         uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
451         mtx_destroy(&epoch->e_drain_mtx);
452         sx_destroy(&epoch->e_drain_sx);
453         memset(epoch, 0, sizeof(*epoch));
454
455         EPOCH_UNLOCK();
456 }
457
458 #define INIT_CHECK(epoch)                                       \
459         do {                                                    \
460                 if (__predict_false((epoch) == NULL))           \
461                         return;                                 \
462         } while (0)
463
464 void
465 _epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
466 {
467         struct epoch_record *er;
468         struct thread *td;
469
470         MPASS(cold || epoch != NULL);
471         td = curthread;
472         MPASS((vm_offset_t)et >= td->td_kstack &&
473             (vm_offset_t)et + sizeof(struct epoch_tracker) <=
474             td->td_kstack + td->td_kstack_pages * PAGE_SIZE);
475
476         INIT_CHECK(epoch);
477         MPASS(epoch->e_flags & EPOCH_PREEMPT);
478
479 #ifdef EPOCH_TRACE
480         epoch_trace_enter(td, epoch, et, file, line);
481 #endif
482         et->et_td = td;
483         THREAD_NO_SLEEPING();
484         critical_enter();
485         sched_pin();
486         et->et_old_priority = td->td_priority;
487         er = epoch_currecord(epoch);
488         /* Record-level tracking is reserved for non-preemptible epochs. */
489         MPASS(er->er_td == NULL);
490         TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link);
491         ck_epoch_begin(&er->er_record, &et->et_section);
492         critical_exit();
493 }
494
495 void
496 epoch_enter(epoch_t epoch)
497 {
498         epoch_record_t er;
499
500         MPASS(cold || epoch != NULL);
501         INIT_CHECK(epoch);
502         critical_enter();
503         er = epoch_currecord(epoch);
504 #ifdef INVARIANTS
505         if (er->er_record.active == 0) {
506                 MPASS(er->er_td == NULL);
507                 er->er_td = curthread;
508         } else {
509                 /* We've recursed, just make sure our accounting isn't wrong. */
510                 MPASS(er->er_td == curthread);
511         }
512 #endif
513         ck_epoch_begin(&er->er_record, NULL);
514 }
515
516 void
517 _epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
518 {
519         struct epoch_record *er;
520         struct thread *td;
521
522         INIT_CHECK(epoch);
523         td = curthread;
524         critical_enter();
525         sched_unpin();
526         THREAD_SLEEPING_OK();
527         er = epoch_currecord(epoch);
528         MPASS(epoch->e_flags & EPOCH_PREEMPT);
529         MPASS(et != NULL);
530         MPASS(et->et_td == td);
531 #ifdef INVARIANTS
532         et->et_td = (void*)0xDEADBEEF;
533         /* Record-level tracking is reserved for non-preemptible epochs. */
534         MPASS(er->er_td == NULL);
535 #endif
536         ck_epoch_end(&er->er_record, &et->et_section);
537         TAILQ_REMOVE(&er->er_tdlist, et, et_link);
538         er->er_gen++;
539         if (__predict_false(et->et_old_priority != td->td_priority))
540                 epoch_adjust_prio(td, et->et_old_priority);
541         critical_exit();
542 #ifdef EPOCH_TRACE
543         epoch_trace_exit(td, epoch, et, file, line);
544 #endif
545 }
546
547 void
548 epoch_exit(epoch_t epoch)
549 {
550         epoch_record_t er;
551
552         INIT_CHECK(epoch);
553         er = epoch_currecord(epoch);
554         ck_epoch_end(&er->er_record, NULL);
555 #ifdef INVARIANTS
556         MPASS(er->er_td == curthread);
557         if (er->er_record.active == 0)
558                 er->er_td = NULL;
559 #endif
560         critical_exit();
561 }
562
563 /*
564  * epoch_block_handler_preempt() is a callback from the CK code when another
565  * thread is currently in an epoch section.
566  */
567 static void
568 epoch_block_handler_preempt(struct ck_epoch *global __unused,
569     ck_epoch_record_t *cr, void *arg __unused)
570 {
571         epoch_record_t record;
572         struct thread *td, *owner, *curwaittd;
573         struct epoch_tracker *tdwait;
574         struct turnstile *ts;
575         struct lock_object *lock;
576         int spincount, gen;
577         int locksheld __unused;
578
579         record = __containerof(cr, struct epoch_record, er_record);
580         td = curthread;
581         locksheld = td->td_locks;
582         spincount = 0;
583         counter_u64_add(block_count, 1);
584         /*
585          * We lost a race and there's no longer any threads
586          * on the CPU in an epoch section.
587          */
588         if (TAILQ_EMPTY(&record->er_tdlist))
589                 return;
590
591         if (record->er_cpuid != curcpu) {
592                 /*
593                  * If the head of the list is running, we can wait for it
594                  * to remove itself from the list and thus save us the
595                  * overhead of a migration
596                  */
597                 gen = record->er_gen;
598                 thread_unlock(td);
599                 /*
600                  * We can't actually check if the waiting thread is running
601                  * so we simply poll for it to exit before giving up and
602                  * migrating.
603                  */
604                 do {
605                         cpu_spinwait();
606                 } while (!TAILQ_EMPTY(&record->er_tdlist) &&
607                                  gen == record->er_gen &&
608                                  spincount++ < MAX_ADAPTIVE_SPIN);
609                 thread_lock(td);
610                 /*
611                  * If the generation has changed we can poll again
612                  * otherwise we need to migrate.
613                  */
614                 if (gen != record->er_gen)
615                         return;
616                 /*
617                  * Being on the same CPU as that of the record on which
618                  * we need to wait allows us access to the thread
619                  * list associated with that CPU. We can then examine the
620                  * oldest thread in the queue and wait on its turnstile
621                  * until it resumes and so on until a grace period
622                  * elapses.
623                  *
624                  */
625                 counter_u64_add(migrate_count, 1);
626                 sched_bind(td, record->er_cpuid);
627                 /*
628                  * At this point we need to return to the ck code
629                  * to scan to see if a grace period has elapsed.
630                  * We can't move on to check the thread list, because
631                  * in the meantime new threads may have arrived that
632                  * in fact belong to a different epoch.
633                  */
634                 return;
635         }
636         /*
637          * Try to find a thread in an epoch section on this CPU
638          * waiting on a turnstile. Otherwise find the lowest
639          * priority thread (highest prio value) and drop our priority
640          * to match to allow it to run.
641          */
642         TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
643                 /*
644                  * Propagate our priority to any other waiters to prevent us
645                  * from starving them. They will have their original priority
646                  * restore on exit from epoch_wait().
647                  */
648                 curwaittd = tdwait->et_td;
649                 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
650                         critical_enter();
651                         thread_unlock(td);
652                         thread_lock(curwaittd);
653                         sched_prio(curwaittd, td->td_priority);
654                         thread_unlock(curwaittd);
655                         thread_lock(td);
656                         critical_exit();
657                 }
658                 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
659                     ((ts = curwaittd->td_blocked) != NULL)) {
660                         /*
661                          * We unlock td to allow turnstile_wait to reacquire
662                          * the thread lock. Before unlocking it we enter a
663                          * critical section to prevent preemption after we
664                          * reenable interrupts by dropping the thread lock in
665                          * order to prevent curwaittd from getting to run.
666                          */
667                         critical_enter();
668                         thread_unlock(td);
669
670                         if (turnstile_lock(ts, &lock, &owner)) {
671                                 if (ts == curwaittd->td_blocked) {
672                                         MPASS(TD_IS_INHIBITED(curwaittd) &&
673                                             TD_ON_LOCK(curwaittd));
674                                         critical_exit();
675                                         turnstile_wait(ts, owner,
676                                             curwaittd->td_tsqueue);
677                                         counter_u64_add(turnstile_count, 1);
678                                         thread_lock(td);
679                                         return;
680                                 }
681                                 turnstile_unlock(ts, lock);
682                         }
683                         thread_lock(td);
684                         critical_exit();
685                         KASSERT(td->td_locks == locksheld,
686                             ("%d extra locks held", td->td_locks - locksheld));
687                 }
688         }
689         /*
690          * We didn't find any threads actually blocked on a lock
691          * so we have nothing to do except context switch away.
692          */
693         counter_u64_add(switch_count, 1);
694         mi_switch(SW_VOL | SWT_RELINQUISH);
695         /*
696          * It is important the thread lock is dropped while yielding
697          * to allow other threads to acquire the lock pointed to by
698          * TDQ_LOCKPTR(td). Currently mi_switch() will unlock the
699          * thread lock before returning. Else a deadlock like
700          * situation might happen.
701          */
702         thread_lock(td);
703 }
704
705 void
706 epoch_wait_preempt(epoch_t epoch)
707 {
708         struct thread *td;
709         int was_bound;
710         int old_cpu;
711         int old_pinned;
712         u_char old_prio;
713         int locks __unused;
714
715         MPASS(cold || epoch != NULL);
716         INIT_CHECK(epoch);
717         td = curthread;
718 #ifdef INVARIANTS
719         locks = curthread->td_locks;
720         MPASS(epoch->e_flags & EPOCH_PREEMPT);
721         if ((epoch->e_flags & EPOCH_LOCKED) == 0)
722                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
723                     "epoch_wait() can be long running");
724         KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle "
725             "of an epoch section of the same epoch"));
726 #endif
727         DROP_GIANT();
728         thread_lock(td);
729
730         old_cpu = PCPU_GET(cpuid);
731         old_pinned = td->td_pinned;
732         old_prio = td->td_priority;
733         was_bound = sched_is_bound(td);
734         sched_unbind(td);
735         td->td_pinned = 0;
736         sched_bind(td, old_cpu);
737
738         ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt,
739             NULL);
740
741         /* restore CPU binding, if any */
742         if (was_bound != 0) {
743                 sched_bind(td, old_cpu);
744         } else {
745                 /* get thread back to initial CPU, if any */
746                 if (old_pinned != 0)
747                         sched_bind(td, old_cpu);
748                 sched_unbind(td);
749         }
750         /* restore pinned after bind */
751         td->td_pinned = old_pinned;
752
753         /* restore thread priority */
754         sched_prio(td, old_prio);
755         thread_unlock(td);
756         PICKUP_GIANT();
757         KASSERT(td->td_locks == locks,
758             ("%d residual locks held", td->td_locks - locks));
759 }
760
761 static void
762 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
763     void *arg __unused)
764 {
765         cpu_spinwait();
766 }
767
768 void
769 epoch_wait(epoch_t epoch)
770 {
771
772         MPASS(cold || epoch != NULL);
773         INIT_CHECK(epoch);
774         MPASS(epoch->e_flags == 0);
775         critical_enter();
776         ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
777         critical_exit();
778 }
779
780 void
781 epoch_call(epoch_t epoch, epoch_callback_t callback, epoch_context_t ctx)
782 {
783         epoch_record_t er;
784         ck_epoch_entry_t *cb;
785
786         cb = (void *)ctx;
787
788         MPASS(callback);
789         /* too early in boot to have epoch set up */
790         if (__predict_false(epoch == NULL))
791                 goto boottime;
792 #if !defined(EARLY_AP_STARTUP)
793         if (__predict_false(inited < 2))
794                 goto boottime;
795 #endif
796
797         critical_enter();
798         *DPCPU_PTR(epoch_cb_count) += 1;
799         er = epoch_currecord(epoch);
800         ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback);
801         critical_exit();
802         return;
803 boottime:
804         callback(ctx);
805 }
806
807 static void
808 epoch_call_task(void *arg __unused)
809 {
810         ck_stack_entry_t *cursor, *head, *next;
811         ck_epoch_record_t *record;
812         epoch_record_t er;
813         epoch_t epoch;
814         ck_stack_t cb_stack;
815         int i, npending, total;
816
817         ck_stack_init(&cb_stack);
818         critical_enter();
819         epoch_enter(global_epoch);
820         for (total = i = 0; i != MAX_EPOCHS; i++) {
821                 epoch = epoch_array + i;
822                 if (__predict_false(
823                     atomic_load_acq_int(&epoch->e_in_use) == 0))
824                         continue;
825                 er = epoch_currecord(epoch);
826                 record = &er->er_record;
827                 if ((npending = record->n_pending) == 0)
828                         continue;
829                 ck_epoch_poll_deferred(record, &cb_stack);
830                 total += npending - record->n_pending;
831         }
832         epoch_exit(global_epoch);
833         *DPCPU_PTR(epoch_cb_count) -= total;
834         critical_exit();
835
836         counter_u64_add(epoch_call_count, total);
837         counter_u64_add(epoch_call_task_count, 1);
838
839         head = ck_stack_batch_pop_npsc(&cb_stack);
840         for (cursor = head; cursor != NULL; cursor = next) {
841                 struct ck_epoch_entry *entry =
842                     ck_epoch_entry_container(cursor);
843
844                 next = CK_STACK_NEXT(cursor);
845                 entry->function(entry);
846         }
847 }
848
849 static int
850 in_epoch_verbose_preempt(epoch_t epoch, int dump_onfail)
851 {
852         epoch_record_t er;
853         struct epoch_tracker *tdwait;
854         struct thread *td;
855
856         MPASS(epoch != NULL);
857         MPASS((epoch->e_flags & EPOCH_PREEMPT) != 0);
858         td = curthread;
859         if (THREAD_CAN_SLEEP())
860                 return (0);
861         critical_enter();
862         er = epoch_currecord(epoch);
863         TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
864                 if (tdwait->et_td == td) {
865                         critical_exit();
866                         return (1);
867                 }
868 #ifdef INVARIANTS
869         if (dump_onfail) {
870                 MPASS(td->td_pinned);
871                 printf("cpu: %d id: %d\n", curcpu, td->td_tid);
872                 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
873                         printf("td_tid: %d ", tdwait->et_td->td_tid);
874                 printf("\n");
875         }
876 #endif
877         critical_exit();
878         return (0);
879 }
880
881 #ifdef INVARIANTS
882 static void
883 epoch_assert_nocpu(epoch_t epoch, struct thread *td)
884 {
885         epoch_record_t er;
886         int cpu;
887         bool crit;
888
889         crit = td->td_critnest > 0;
890
891         /* Check for a critical section mishap. */
892         CPU_FOREACH(cpu) {
893                 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
894                 KASSERT(er->er_td != td,
895                     ("%s critical section in epoch '%s', from cpu %d",
896                     (crit ? "exited" : "re-entered"), epoch->e_name, cpu));
897         }
898 }
899 #else
900 #define epoch_assert_nocpu(e, td) do {} while (0)
901 #endif
902
903 int
904 in_epoch_verbose(epoch_t epoch, int dump_onfail)
905 {
906         epoch_record_t er;
907         struct thread *td;
908
909         if (__predict_false((epoch) == NULL))
910                 return (0);
911         if ((epoch->e_flags & EPOCH_PREEMPT) != 0)
912                 return (in_epoch_verbose_preempt(epoch, dump_onfail));
913
914         /*
915          * The thread being in a critical section is a necessary
916          * condition to be correctly inside a non-preemptible epoch,
917          * so it's definitely not in this epoch.
918          */
919         td = curthread;
920         if (td->td_critnest == 0) {
921                 epoch_assert_nocpu(epoch, td);
922                 return (0);
923         }
924
925         /*
926          * The current cpu is in a critical section, so the epoch record will be
927          * stable for the rest of this function.  Knowing that the record is not
928          * active is sufficient for knowing whether we're in this epoch or not,
929          * since it's a pcpu record.
930          */
931         er = epoch_currecord(epoch);
932         if (er->er_record.active == 0) {
933                 epoch_assert_nocpu(epoch, td);
934                 return (0);
935         }
936
937         MPASS(er->er_td == td);
938         return (1);
939 }
940
941 int
942 in_epoch(epoch_t epoch)
943 {
944         return (in_epoch_verbose(epoch, 0));
945 }
946
947 static void
948 epoch_drain_cb(struct epoch_context *ctx)
949 {
950         struct epoch *epoch =
951             __containerof(ctx, struct epoch_record, er_drain_ctx)->er_parent;
952
953         if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) {
954                 mtx_lock(&epoch->e_drain_mtx);
955                 wakeup(epoch);
956                 mtx_unlock(&epoch->e_drain_mtx);
957         }
958 }
959
960 void
961 epoch_drain_callbacks(epoch_t epoch)
962 {
963         epoch_record_t er;
964         struct thread *td;
965         int was_bound;
966         int old_pinned;
967         int old_cpu;
968         int cpu;
969
970         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
971             "epoch_drain_callbacks() may sleep!");
972
973         /* too early in boot to have epoch set up */
974         if (__predict_false(epoch == NULL))
975                 return;
976 #if !defined(EARLY_AP_STARTUP)
977         if (__predict_false(inited < 2))
978                 return;
979 #endif
980         DROP_GIANT();
981
982         sx_xlock(&epoch->e_drain_sx);
983         mtx_lock(&epoch->e_drain_mtx);
984
985         td = curthread;
986         thread_lock(td);
987         old_cpu = PCPU_GET(cpuid);
988         old_pinned = td->td_pinned;
989         was_bound = sched_is_bound(td);
990         sched_unbind(td);
991         td->td_pinned = 0;
992
993         CPU_FOREACH(cpu)
994                 epoch->e_drain_count++;
995         CPU_FOREACH(cpu) {
996                 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
997                 sched_bind(td, cpu);
998                 epoch_call(epoch, &epoch_drain_cb, &er->er_drain_ctx);
999         }
1000
1001         /* restore CPU binding, if any */
1002         if (was_bound != 0) {
1003                 sched_bind(td, old_cpu);
1004         } else {
1005                 /* get thread back to initial CPU, if any */
1006                 if (old_pinned != 0)
1007                         sched_bind(td, old_cpu);
1008                 sched_unbind(td);
1009         }
1010         /* restore pinned after bind */
1011         td->td_pinned = old_pinned;
1012
1013         thread_unlock(td);
1014
1015         while (epoch->e_drain_count != 0)
1016                 msleep(epoch, &epoch->e_drain_mtx, PZERO, "EDRAIN", 0);
1017
1018         mtx_unlock(&epoch->e_drain_mtx);
1019         sx_xunlock(&epoch->e_drain_sx);
1020
1021         PICKUP_GIANT();
1022 }