2 * Copyright (c) 2016 Matt Macy (mmacy@nextbsd.org)
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/types.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
35 #include <sys/mutex.h>
37 #include <sys/sched.h>
39 #include <sys/queue.h>
40 #include <sys/taskqueue.h>
45 #include <linux/rcupdate.h>
46 #include <linux/srcu.h>
47 #include <linux/slab.h>
48 #include <linux/kernel.h>
49 #include <linux/compat.h>
52 * By defining CONFIG_NO_RCU_SKIP LinuxKPI RCU locks and asserts will
53 * not be skipped during panic().
55 #ifdef CONFIG_NO_RCU_SKIP
56 #define RCU_SKIP(void) 0
58 #define RCU_SKIP(void) unlikely(SCHEDULER_STOPPED() || kdb_active)
61 struct callback_head {
62 STAILQ_ENTRY(callback_head) entry;
66 struct linux_epoch_head {
67 STAILQ_HEAD(, callback_head) cb_head;
70 } __aligned(CACHE_LINE_SIZE);
72 struct linux_epoch_record {
73 ck_epoch_record_t epoch_record;
74 TAILQ_HEAD(, task_struct) ts_head;
76 } __aligned(CACHE_LINE_SIZE);
79 * Verify that "struct rcu_head" is big enough to hold "struct
80 * callback_head". This has been done to avoid having to add special
81 * compile flags for including ck_epoch.h to all clients of the
84 CTASSERT(sizeof(struct rcu_head) == sizeof(struct callback_head));
87 * Verify that "epoch_record" is at beginning of "struct
88 * linux_epoch_record":
90 CTASSERT(offsetof(struct linux_epoch_record, epoch_record) == 0);
92 static ck_epoch_t linux_epoch;
93 static struct linux_epoch_head linux_epoch_head;
94 static DPCPU_DEFINE(struct linux_epoch_record, linux_epoch_record);
96 static void linux_rcu_cleaner_func(void *, int);
99 linux_rcu_runtime_init(void *arg __unused)
101 struct linux_epoch_head *head;
104 ck_epoch_init(&linux_epoch);
106 head = &linux_epoch_head;
108 mtx_init(&head->lock, "LRCU-HEAD", NULL, MTX_DEF);
109 TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, NULL);
110 STAILQ_INIT(&head->cb_head);
113 struct linux_epoch_record *record;
115 record = &DPCPU_ID_GET(i, linux_epoch_record);
118 ck_epoch_register(&linux_epoch, &record->epoch_record, NULL);
119 TAILQ_INIT(&record->ts_head);
122 SYSINIT(linux_rcu_runtime, SI_SUB_CPU, SI_ORDER_ANY, linux_rcu_runtime_init, NULL);
125 linux_rcu_runtime_uninit(void *arg __unused)
127 struct linux_epoch_head *head;
129 head = &linux_epoch_head;
131 /* destroy head lock */
132 mtx_destroy(&head->lock);
134 SYSUNINIT(linux_rcu_runtime, SI_SUB_LOCK, SI_ORDER_SECOND, linux_rcu_runtime_uninit, NULL);
137 linux_rcu_cleaner_func(void *context __unused, int pending __unused)
139 struct linux_epoch_head *head;
140 struct callback_head *rcu;
141 STAILQ_HEAD(, callback_head) tmp_head;
143 linux_set_current(curthread);
145 head = &linux_epoch_head;
147 /* move current callbacks into own queue */
148 mtx_lock(&head->lock);
149 STAILQ_INIT(&tmp_head);
150 STAILQ_CONCAT(&tmp_head, &head->cb_head);
151 mtx_unlock(&head->lock);
154 linux_synchronize_rcu();
156 /* dispatch all callbacks, if any */
157 while ((rcu = STAILQ_FIRST(&tmp_head)) != NULL) {
160 STAILQ_REMOVE_HEAD(&tmp_head, entry);
162 offset = (uintptr_t)rcu->func;
164 if (offset < LINUX_KFREE_RCU_OFFSET_MAX)
165 kfree((char *)rcu - offset);
167 rcu->func((struct rcu_head *)rcu);
172 linux_rcu_read_lock(void)
174 struct linux_epoch_record *record;
175 struct task_struct *ts;
181 * Pin thread to current CPU so that the unlock code gets the
182 * same per-CPU epoch record:
186 record = &DPCPU_GET(linux_epoch_record);
190 * Use a critical section to prevent recursion inside
191 * ck_epoch_begin(). Else this function supports recursion.
194 ck_epoch_begin(&record->epoch_record, NULL);
196 if (ts->rcu_recurse == 1)
197 TAILQ_INSERT_TAIL(&record->ts_head, ts, rcu_entry);
202 linux_rcu_read_unlock(void)
204 struct linux_epoch_record *record;
205 struct task_struct *ts;
210 record = &DPCPU_GET(linux_epoch_record);
214 * Use a critical section to prevent recursion inside
215 * ck_epoch_end(). Else this function supports recursion.
218 ck_epoch_end(&record->epoch_record, NULL);
220 if (ts->rcu_recurse == 0)
221 TAILQ_REMOVE(&record->ts_head, ts, rcu_entry);
228 linux_synchronize_rcu_cb(ck_epoch_t *epoch __unused, ck_epoch_record_t *epoch_record, void *arg __unused)
230 struct linux_epoch_record *record =
231 container_of(epoch_record, struct linux_epoch_record, epoch_record);
232 struct thread *td = curthread;
233 struct task_struct *ts;
235 /* check if blocked on the current CPU */
236 if (record->cpuid == PCPU_GET(cpuid)) {
237 bool is_sleeping = 0;
241 * Find the lowest priority or sleeping thread which
242 * is blocking synchronization on this CPU core. All
243 * the threads in the queue are CPU-pinned and cannot
244 * go anywhere while the current thread is locked.
246 TAILQ_FOREACH(ts, &record->ts_head, rcu_entry) {
247 if (ts->task_thread->td_priority > prio)
248 prio = ts->task_thread->td_priority;
249 is_sleeping |= (ts->task_thread->td_inhibitors != 0);
257 /* set new thread priority */
258 sched_prio(td, prio);
260 mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
263 * Release the thread lock while yielding to
264 * allow other threads to acquire the lock
265 * pointed to by TDQ_LOCKPTR(td). Else a
266 * deadlock like situation might happen.
273 * To avoid spinning move execution to the other CPU
274 * which is blocking synchronization. Set highest
275 * thread priority so that code gets run. The thread
276 * priority will be restored later.
279 sched_bind(td, record->cpuid);
284 linux_synchronize_rcu(void)
295 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
296 "linux_synchronize_rcu() can sleep");
303 * Synchronizing RCU might change the CPU core this function
304 * is running on. Save current values:
308 old_cpu = PCPU_GET(cpuid);
309 old_pinned = td->td_pinned;
310 old_prio = td->td_priority;
311 was_bound = sched_is_bound(td);
314 sched_bind(td, old_cpu);
316 ck_epoch_synchronize_wait(&linux_epoch,
317 &linux_synchronize_rcu_cb, NULL);
319 /* restore CPU binding, if any */
320 if (was_bound != 0) {
321 sched_bind(td, old_cpu);
323 /* get thread back to initial CPU, if any */
325 sched_bind(td, old_cpu);
328 /* restore pinned after bind */
329 td->td_pinned = old_pinned;
331 /* restore thread priority */
332 sched_prio(td, old_prio);
339 linux_rcu_barrier(void)
341 struct linux_epoch_head *head;
343 linux_synchronize_rcu();
345 head = &linux_epoch_head;
347 /* wait for callbacks to complete */
348 taskqueue_drain(taskqueue_fast, &head->task);
352 linux_call_rcu(struct rcu_head *context, rcu_callback_t func)
354 struct callback_head *rcu = (struct callback_head *)context;
355 struct linux_epoch_head *head = &linux_epoch_head;
357 mtx_lock(&head->lock);
359 STAILQ_INSERT_TAIL(&head->cb_head, rcu, entry);
360 taskqueue_enqueue(taskqueue_fast, &head->task);
361 mtx_unlock(&head->lock);
365 init_srcu_struct(struct srcu_struct *srcu)
371 cleanup_srcu_struct(struct srcu_struct *srcu)
376 srcu_read_lock(struct srcu_struct *srcu)
378 linux_rcu_read_lock();
383 srcu_read_unlock(struct srcu_struct *srcu, int key __unused)
385 linux_rcu_read_unlock();
389 synchronize_srcu(struct srcu_struct *srcu)
391 linux_synchronize_rcu();
395 srcu_barrier(struct srcu_struct *srcu)