]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/subr_epoch.c
Fix encoding issues with python 3
[FreeBSD/FreeBSD.git] / sys / kern / subr_epoch.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/counter.h>
36 #include <sys/epoch.h>
37 #include <sys/gtaskqueue.h>
38 #include <sys/kernel.h>
39 #include <sys/limits.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/pcpu.h>
44 #include <sys/proc.h>
45 #include <sys/sched.h>
46 #include <sys/smp.h>
47 #include <sys/sysctl.h>
48 #include <sys/turnstile.h>
49 #include <vm/vm.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_kern.h>
52 #include <vm/uma.h>
53
54 #include <ck_epoch.h>
55
56 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
57
58 #ifdef __amd64__
59 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
60 #else
61 #define EPOCH_ALIGN CACHE_LINE_SIZE
62 #endif
63
64 TAILQ_HEAD (epoch_tdlist, epoch_tracker);
65 typedef struct epoch_record {
66         ck_epoch_record_t er_record;
67         volatile struct epoch_tdlist er_tdlist;
68         volatile uint32_t er_gen;
69         uint32_t er_cpuid;
70 } __aligned(EPOCH_ALIGN)     *epoch_record_t;
71
72 struct epoch {
73         struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
74         epoch_record_t e_pcpu_record;
75         int     e_idx;
76         int     e_flags;
77 };
78
79 /* arbitrary --- needs benchmarking */
80 #define MAX_ADAPTIVE_SPIN 100
81 #define MAX_EPOCHS 64
82
83 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
84 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
85 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
86
87 /* Stats. */
88 static counter_u64_t block_count;
89
90 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
91     &block_count, "# of times a thread was in an epoch when epoch_wait was called");
92 static counter_u64_t migrate_count;
93
94 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
95     &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
96 static counter_u64_t turnstile_count;
97
98 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
99     &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
100 static counter_u64_t switch_count;
101
102 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
103     &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
104 static counter_u64_t epoch_call_count;
105
106 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
107     &epoch_call_count, "# of times a callback was deferred");
108 static counter_u64_t epoch_call_task_count;
109
110 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
111     &epoch_call_task_count, "# of times a callback task was run");
112
113 TAILQ_HEAD (threadlist, thread);
114
115 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
116     ck_epoch_entry_container)
117
118 epoch_t allepochs[MAX_EPOCHS];
119
120 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
121 DPCPU_DEFINE(int, epoch_cb_count);
122
123 static __read_mostly int inited;
124 static __read_mostly int epoch_count;
125 __read_mostly epoch_t global_epoch;
126 __read_mostly epoch_t global_epoch_preempt;
127
128 static void epoch_call_task(void *context __unused);
129 static  uma_zone_t pcpu_zone_record;
130
131 static void
132 epoch_init(void *arg __unused)
133 {
134         int cpu;
135
136         block_count = counter_u64_alloc(M_WAITOK);
137         migrate_count = counter_u64_alloc(M_WAITOK);
138         turnstile_count = counter_u64_alloc(M_WAITOK);
139         switch_count = counter_u64_alloc(M_WAITOK);
140         epoch_call_count = counter_u64_alloc(M_WAITOK);
141         epoch_call_task_count = counter_u64_alloc(M_WAITOK);
142
143         pcpu_zone_record = uma_zcreate("epoch_record pcpu",
144             sizeof(struct epoch_record), NULL, NULL, NULL, NULL,
145             UMA_ALIGN_PTR, UMA_ZONE_PCPU);
146         CPU_FOREACH(cpu) {
147                 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0,
148                     epoch_call_task, NULL);
149                 taskqgroup_attach_cpu(qgroup_softirq,
150                     DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL,
151                     "epoch call task");
152         }
153         inited = 1;
154         global_epoch = epoch_alloc(0);
155         global_epoch_preempt = epoch_alloc(EPOCH_PREEMPT);
156 }
157 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL);
158
159 #if !defined(EARLY_AP_STARTUP)
160 static void
161 epoch_init_smp(void *dummy __unused)
162 {
163         inited = 2;
164 }
165 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
166 #endif
167
168 static void
169 epoch_ctor(epoch_t epoch)
170 {
171         epoch_record_t er;
172         int cpu;
173
174         epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
175         CPU_FOREACH(cpu) {
176                 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
177                 bzero(er, sizeof(*er));
178                 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
179                 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
180                 er->er_cpuid = cpu;
181         }
182 }
183
184 static void
185 epoch_adjust_prio(struct thread *td, u_char prio)
186 {
187
188         thread_lock(td);
189         sched_prio(td, prio);
190         thread_unlock(td);
191 }
192
193 epoch_t
194 epoch_alloc(int flags)
195 {
196         epoch_t epoch;
197
198         if (__predict_false(!inited))
199                 panic("%s called too early in boot", __func__);
200         epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK);
201         ck_epoch_init(&epoch->e_epoch);
202         epoch_ctor(epoch);
203         MPASS(epoch_count < MAX_EPOCHS - 2);
204         epoch->e_flags = flags;
205         epoch->e_idx = epoch_count;
206         allepochs[epoch_count++] = epoch;
207         return (epoch);
208 }
209
210 void
211 epoch_free(epoch_t epoch)
212 {
213 #ifdef INVARIANTS
214         struct epoch_record *er;
215         int cpu;
216
217         CPU_FOREACH(cpu) {
218                 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
219                 MPASS(TAILQ_EMPTY(&er->er_tdlist));
220         }
221 #endif
222         allepochs[epoch->e_idx] = NULL;
223         epoch_wait(global_epoch);
224         uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
225         free(epoch, M_EPOCH);
226 }
227
228 static epoch_record_t
229 epoch_currecord(epoch_t epoch)
230 {
231
232         return (zpcpu_get_cpu(epoch->e_pcpu_record, curcpu));
233 }
234
235 #define INIT_CHECK(epoch)                                       \
236         do {                                                    \
237                 if (__predict_false((epoch) == NULL))           \
238                         return;                                 \
239         } while (0)
240
241 void
242 epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et)
243 {
244         struct epoch_record *er;
245         struct thread *td;
246
247         MPASS(cold || epoch != NULL);
248         INIT_CHECK(epoch);
249         MPASS(epoch->e_flags & EPOCH_PREEMPT);
250 #ifdef EPOCH_TRACKER_DEBUG
251         et->et_magic_pre = EPOCH_MAGIC0;
252         et->et_magic_post = EPOCH_MAGIC1;
253 #endif
254         td = curthread;
255         et->et_td = td;
256         td->td_epochnest++;
257         critical_enter();
258         sched_pin();
259
260         td->td_pre_epoch_prio = td->td_priority;
261         er = epoch_currecord(epoch);
262         TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link);
263         ck_epoch_begin(&er->er_record, &et->et_section);
264         critical_exit();
265 }
266
267 void
268 epoch_enter(epoch_t epoch)
269 {
270         struct thread *td;
271         epoch_record_t er;
272
273         MPASS(cold || epoch != NULL);
274         INIT_CHECK(epoch);
275         td = curthread;
276
277         td->td_epochnest++;
278         critical_enter();
279         er = epoch_currecord(epoch);
280         ck_epoch_begin(&er->er_record, NULL);
281 }
282
283 void
284 epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et)
285 {
286         struct epoch_record *er;
287         struct thread *td;
288
289         INIT_CHECK(epoch);
290         td = curthread;
291         critical_enter();
292         sched_unpin();
293         MPASS(td->td_epochnest);
294         td->td_epochnest--;
295         er = epoch_currecord(epoch);
296         MPASS(epoch->e_flags & EPOCH_PREEMPT);
297         MPASS(et != NULL);
298         MPASS(et->et_td == td);
299 #ifdef EPOCH_TRACKER_DEBUG
300         MPASS(et->et_magic_pre == EPOCH_MAGIC0);
301         MPASS(et->et_magic_post == EPOCH_MAGIC1);
302         et->et_magic_pre = 0;
303         et->et_magic_post = 0;
304 #endif
305 #ifdef INVARIANTS
306         et->et_td = (void*)0xDEADBEEF;
307 #endif
308         ck_epoch_end(&er->er_record, &et->et_section);
309         TAILQ_REMOVE(&er->er_tdlist, et, et_link);
310         er->er_gen++;
311         if (__predict_false(td->td_pre_epoch_prio != td->td_priority))
312                 epoch_adjust_prio(td, td->td_pre_epoch_prio);
313         critical_exit();
314 }
315
316 void
317 epoch_exit(epoch_t epoch)
318 {
319         struct thread *td;
320         epoch_record_t er;
321
322         INIT_CHECK(epoch);
323         td = curthread;
324         MPASS(td->td_epochnest);
325         td->td_epochnest--;
326         er = epoch_currecord(epoch);
327         ck_epoch_end(&er->er_record, NULL);
328         critical_exit();
329 }
330
331 /*
332  * epoch_block_handler_preempt() is a callback from the CK code when another
333  * thread is currently in an epoch section.
334  */
335 static void
336 epoch_block_handler_preempt(struct ck_epoch *global __unused,
337     ck_epoch_record_t *cr, void *arg __unused)
338 {
339         epoch_record_t record;
340         struct thread *td, *owner, *curwaittd;
341         struct epoch_tracker *tdwait;
342         struct turnstile *ts;
343         struct lock_object *lock;
344         int spincount, gen;
345         int locksheld __unused;
346
347         record = __containerof(cr, struct epoch_record, er_record);
348         td = curthread;
349         locksheld = td->td_locks;
350         spincount = 0;
351         counter_u64_add(block_count, 1);
352         /*
353          * We lost a race and there's no longer any threads
354          * on the CPU in an epoch section.
355          */
356         if (TAILQ_EMPTY(&record->er_tdlist))
357                 return;
358
359         if (record->er_cpuid != curcpu) {
360                 /*
361                  * If the head of the list is running, we can wait for it
362                  * to remove itself from the list and thus save us the
363                  * overhead of a migration
364                  */
365                 gen = record->er_gen;
366                 thread_unlock(td);
367                 /*
368                  * We can't actually check if the waiting thread is running
369                  * so we simply poll for it to exit before giving up and
370                  * migrating.
371                  */
372                 do {
373                         cpu_spinwait();
374                 } while (!TAILQ_EMPTY(&record->er_tdlist) &&
375                                  gen == record->er_gen &&
376                                  spincount++ < MAX_ADAPTIVE_SPIN);
377                 thread_lock(td);
378                 /*
379                  * If the generation has changed we can poll again
380                  * otherwise we need to migrate.
381                  */
382                 if (gen != record->er_gen)
383                         return;
384                 /*
385                  * Being on the same CPU as that of the record on which
386                  * we need to wait allows us access to the thread
387                  * list associated with that CPU. We can then examine the
388                  * oldest thread in the queue and wait on its turnstile
389                  * until it resumes and so on until a grace period
390                  * elapses.
391                  *
392                  */
393                 counter_u64_add(migrate_count, 1);
394                 sched_bind(td, record->er_cpuid);
395                 /*
396                  * At this point we need to return to the ck code
397                  * to scan to see if a grace period has elapsed.
398                  * We can't move on to check the thread list, because
399                  * in the meantime new threads may have arrived that
400                  * in fact belong to a different epoch.
401                  */
402                 return;
403         }
404         /*
405          * Try to find a thread in an epoch section on this CPU
406          * waiting on a turnstile. Otherwise find the lowest
407          * priority thread (highest prio value) and drop our priority
408          * to match to allow it to run.
409          */
410         TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
411                 /*
412                  * Propagate our priority to any other waiters to prevent us
413                  * from starving them. They will have their original priority
414                  * restore on exit from epoch_wait().
415                  */
416                 curwaittd = tdwait->et_td;
417                 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
418                         critical_enter();
419                         thread_unlock(td);
420                         thread_lock(curwaittd);
421                         sched_prio(curwaittd, td->td_priority);
422                         thread_unlock(curwaittd);
423                         thread_lock(td);
424                         critical_exit();
425                 }
426                 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
427                     ((ts = curwaittd->td_blocked) != NULL)) {
428                         /*
429                          * We unlock td to allow turnstile_wait to reacquire
430                          * the thread lock. Before unlocking it we enter a
431                          * critical section to prevent preemption after we
432                          * reenable interrupts by dropping the thread lock in
433                          * order to prevent curwaittd from getting to run.
434                          */
435                         critical_enter();
436                         thread_unlock(td);
437                         owner = turnstile_lock(ts, &lock);
438                         /*
439                          * The owner pointer indicates that the lock succeeded.
440                          * Only in case we hold the lock and the turnstile we
441                          * locked is still the one that curwaittd is blocked on
442                          * can we continue. Otherwise the turnstile pointer has
443                          * been changed out from underneath us, as in the case
444                          * where the lock holder has signalled curwaittd,
445                          * and we need to continue.
446                          */
447                         if (owner != NULL && ts == curwaittd->td_blocked) {
448                                 MPASS(TD_IS_INHIBITED(curwaittd) &&
449                                     TD_ON_LOCK(curwaittd));
450                                 critical_exit();
451                                 turnstile_wait(ts, owner, curwaittd->td_tsqueue);
452                                 counter_u64_add(turnstile_count, 1);
453                                 thread_lock(td);
454                                 return;
455                         } else if (owner != NULL)
456                                 turnstile_unlock(ts, lock);
457                         thread_lock(td);
458                         critical_exit();
459                         KASSERT(td->td_locks == locksheld,
460                             ("%d extra locks held", td->td_locks - locksheld));
461                 }
462         }
463         /*
464          * We didn't find any threads actually blocked on a lock
465          * so we have nothing to do except context switch away.
466          */
467         counter_u64_add(switch_count, 1);
468         mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
469
470         /*
471          * Release the thread lock while yielding to
472          * allow other threads to acquire the lock
473          * pointed to by TDQ_LOCKPTR(td). Else a
474          * deadlock like situation might happen. (HPS)
475          */
476         thread_unlock(td);
477         thread_lock(td);
478 }
479
480 void
481 epoch_wait_preempt(epoch_t epoch)
482 {
483         struct thread *td;
484         int was_bound;
485         int old_cpu;
486         int old_pinned;
487         u_char old_prio;
488         int locks __unused;
489
490         MPASS(cold || epoch != NULL);
491         INIT_CHECK(epoch);
492         td = curthread;
493 #ifdef INVARIANTS
494         locks = curthread->td_locks;
495         MPASS(epoch->e_flags & EPOCH_PREEMPT);
496         if ((epoch->e_flags & EPOCH_LOCKED) == 0)
497                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
498                     "epoch_wait() can be long running");
499         KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle "
500             "of an epoch section of the same epoch"));
501 #endif
502         thread_lock(td);
503         DROP_GIANT();
504
505         old_cpu = PCPU_GET(cpuid);
506         old_pinned = td->td_pinned;
507         old_prio = td->td_priority;
508         was_bound = sched_is_bound(td);
509         sched_unbind(td);
510         td->td_pinned = 0;
511         sched_bind(td, old_cpu);
512
513         ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt,
514             NULL);
515
516         /* restore CPU binding, if any */
517         if (was_bound != 0) {
518                 sched_bind(td, old_cpu);
519         } else {
520                 /* get thread back to initial CPU, if any */
521                 if (old_pinned != 0)
522                         sched_bind(td, old_cpu);
523                 sched_unbind(td);
524         }
525         /* restore pinned after bind */
526         td->td_pinned = old_pinned;
527
528         /* restore thread priority */
529         sched_prio(td, old_prio);
530         thread_unlock(td);
531         PICKUP_GIANT();
532         KASSERT(td->td_locks == locks,
533             ("%d residual locks held", td->td_locks - locks));
534 }
535
536 static void
537 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
538     void *arg __unused)
539 {
540         cpu_spinwait();
541 }
542
543 void
544 epoch_wait(epoch_t epoch)
545 {
546
547         MPASS(cold || epoch != NULL);
548         INIT_CHECK(epoch);
549         MPASS(epoch->e_flags == 0);
550         critical_enter();
551         ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
552         critical_exit();
553 }
554
555 void
556 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t))
557 {
558         epoch_record_t er;
559         ck_epoch_entry_t *cb;
560
561         cb = (void *)ctx;
562
563         MPASS(callback);
564         /* too early in boot to have epoch set up */
565         if (__predict_false(epoch == NULL))
566                 goto boottime;
567 #if !defined(EARLY_AP_STARTUP)
568         if (__predict_false(inited < 2))
569                 goto boottime;
570 #endif
571
572         critical_enter();
573         *DPCPU_PTR(epoch_cb_count) += 1;
574         er = epoch_currecord(epoch);
575         ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback);
576         critical_exit();
577         return;
578 boottime:
579         callback(ctx);
580 }
581
582 static void
583 epoch_call_task(void *arg __unused)
584 {
585         ck_stack_entry_t *cursor, *head, *next;
586         ck_epoch_record_t *record;
587         epoch_record_t er;
588         epoch_t epoch;
589         ck_stack_t cb_stack;
590         int i, npending, total;
591
592         ck_stack_init(&cb_stack);
593         critical_enter();
594         epoch_enter(global_epoch);
595         for (total = i = 0; i < epoch_count; i++) {
596                 if (__predict_false((epoch = allepochs[i]) == NULL))
597                         continue;
598                 er = epoch_currecord(epoch);
599                 record = &er->er_record;
600                 if ((npending = record->n_pending) == 0)
601                         continue;
602                 ck_epoch_poll_deferred(record, &cb_stack);
603                 total += npending - record->n_pending;
604         }
605         epoch_exit(global_epoch);
606         *DPCPU_PTR(epoch_cb_count) -= total;
607         critical_exit();
608
609         counter_u64_add(epoch_call_count, total);
610         counter_u64_add(epoch_call_task_count, 1);
611
612         head = ck_stack_batch_pop_npsc(&cb_stack);
613         for (cursor = head; cursor != NULL; cursor = next) {
614                 struct ck_epoch_entry *entry =
615                     ck_epoch_entry_container(cursor);
616
617                 next = CK_STACK_NEXT(cursor);
618                 entry->function(entry);
619         }
620 }
621
622 int
623 in_epoch_verbose(epoch_t epoch, int dump_onfail)
624 {
625         struct epoch_tracker *tdwait;
626         struct thread *td;
627         epoch_record_t er;
628
629         td = curthread;
630         if (td->td_epochnest == 0)
631                 return (0);
632         if (__predict_false((epoch) == NULL))
633                 return (0);
634         critical_enter();
635         er = epoch_currecord(epoch);
636         TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
637                 if (tdwait->et_td == td) {
638                         critical_exit();
639                         return (1);
640                 }
641 #ifdef INVARIANTS
642         if (dump_onfail) {
643                 MPASS(td->td_pinned);
644                 printf("cpu: %d id: %d\n", curcpu, td->td_tid);
645                 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
646                         printf("td_tid: %d ", tdwait->et_td->td_tid);
647                 printf("\n");
648         }
649 #endif
650         critical_exit();
651         return (0);
652 }
653
654 int
655 in_epoch(epoch_t epoch)
656 {
657         return (in_epoch_verbose(epoch, 0));
658 }
659
660 void
661 epoch_thread_init(struct thread *td)
662 {
663
664         td->td_et = malloc(sizeof(struct epoch_tracker), M_EPOCH, M_WAITOK);
665 }
666
667 void
668 epoch_thread_fini(struct thread *td)
669 {
670
671         free(td->td_et, M_EPOCH);
672 }