2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #ifndef _SYS_EPOCH_PRIVATE_H_
31 #define _SYS_EPOCH_PRIVATE_H_
33 #error "no user serviceable parts"
36 #include <sys/kpilite.h>
38 #include <sys/mutex.h>
40 extern void epoch_adjust_prio(struct thread *td, u_char prio);
42 extern void critical_exit_preempt(void);
46 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
48 #define EPOCH_ALIGN CACHE_LINE_SIZE
52 * Standalone (_sa) routines for thread state manipulation
55 critical_enter_sa(void *tdarg)
57 struct thread_lite *td;
65 critical_exit_sa(void *tdarg)
67 struct thread_lite *td;
70 MPASS(td->td_critnest > 0);
74 if (__predict_false(td->td_owepreempt != 0))
75 critical_exit_preempt();
78 typedef struct epoch_thread {
80 uint64_t et_magic_pre;
82 TAILQ_ENTRY(epoch_thread) et_link; /* Epoch queue. */
83 struct thread *et_td; /* pointer to thread in section */
84 ck_epoch_section_t et_section; /* epoch section object */
86 uint64_t et_magic_post;
89 TAILQ_HEAD (epoch_tdlist, epoch_thread);
91 typedef struct epoch_record {
92 ck_epoch_record_t er_record;
93 volatile struct epoch_tdlist er_tdlist;
94 volatile uint32_t er_gen;
96 } __aligned(EPOCH_ALIGN) *epoch_record_t;
99 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
100 epoch_record_t e_pcpu_record;
105 static epoch_record_t
106 epoch_currecord(epoch_t epoch)
108 return zpcpu_get_cpu(epoch->e_pcpu_record, curcpu);
111 #define INIT_CHECK(epoch) \
113 if (__predict_false((epoch) == NULL)) \
118 epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et)
120 struct epoch_record *er;
121 struct epoch_thread *etd;
122 struct thread_lite *td;
124 MPASS(cold || epoch != NULL);
128 MPASS(epoch->e_flags & EPOCH_PREEMPT);
129 etd->et_magic_pre = EPOCH_MAGIC0;
130 etd->et_magic_post = EPOCH_MAGIC1;
132 td = (struct thread_lite *)curthread;
133 etd->et_td = (void*)td;
135 critical_enter_sa(td);
138 td->td_pre_epoch_prio = td->td_priority;
139 er = epoch_currecord(epoch);
140 TAILQ_INSERT_TAIL(&er->er_tdlist, etd, et_link);
141 ck_epoch_begin(&er->er_record, (ck_epoch_section_t *)&etd->et_section);
142 critical_exit_sa(td);
146 epoch_enter(epoch_t epoch)
148 struct thread_lite *td;
151 MPASS(cold || epoch != NULL);
153 td = (struct thread_lite *)curthread;
156 critical_enter_sa(td);
157 er = epoch_currecord(epoch);
158 ck_epoch_begin(&er->er_record, NULL);
162 epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et)
164 struct epoch_record *er;
165 struct epoch_thread *etd;
166 struct thread_lite *td;
169 td = (struct thread_lite *)curthread;
170 critical_enter_sa(td);
171 sched_unpin_lite(td);
172 MPASS(td->td_epochnest);
174 er = epoch_currecord(epoch);
175 MPASS(epoch->e_flags & EPOCH_PREEMPT);
179 MPASS(etd->et_td == (struct thread *)td);
180 MPASS(etd->et_magic_pre == EPOCH_MAGIC0);
181 MPASS(etd->et_magic_post == EPOCH_MAGIC1);
182 etd->et_magic_pre = 0;
183 etd->et_magic_post = 0;
184 etd->et_td = (void*)0xDEADBEEF;
186 ck_epoch_end(&er->er_record,
187 (ck_epoch_section_t *)&etd->et_section);
188 TAILQ_REMOVE(&er->er_tdlist, etd, et_link);
190 if (__predict_false(td->td_pre_epoch_prio != td->td_priority))
191 epoch_adjust_prio((struct thread *)td, td->td_pre_epoch_prio);
192 critical_exit_sa(td);
196 epoch_exit(epoch_t epoch)
198 struct thread_lite *td;
202 td = (struct thread_lite *)curthread;
203 MPASS(td->td_epochnest);
205 er = epoch_currecord(epoch);
206 ck_epoch_end(&er->er_record, NULL);
207 critical_exit_sa(td);
210 #endif /* _SYS_EPOCH_PRIVATE_H_ */