2 * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
3 * Copyright (c) 2017-2020 Hans Petter Selasky (hselasky@freebsd.org)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/types.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
36 #include <sys/mutex.h>
38 #include <sys/sched.h>
40 #include <sys/queue.h>
41 #include <sys/taskqueue.h>
46 #include <linux/rcupdate.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
49 #include <linux/kernel.h>
50 #include <linux/compat.h>
53 * By defining CONFIG_NO_RCU_SKIP LinuxKPI RCU locks and asserts will
54 * not be skipped during panic().
56 #ifdef CONFIG_NO_RCU_SKIP
57 #define RCU_SKIP(void) 0
59 #define RCU_SKIP(void) unlikely(SCHEDULER_STOPPED() || kdb_active)
62 struct callback_head {
63 STAILQ_ENTRY(callback_head) entry;
67 struct linux_epoch_head {
68 STAILQ_HEAD(, callback_head) cb_head;
71 } __aligned(CACHE_LINE_SIZE);
73 struct linux_epoch_record {
74 ck_epoch_record_t epoch_record;
75 TAILQ_HEAD(, task_struct) ts_head;
77 } __aligned(CACHE_LINE_SIZE);
80 * Verify that "struct rcu_head" is big enough to hold "struct
81 * callback_head". This has been done to avoid having to add special
82 * compile flags for including ck_epoch.h to all clients of the
85 CTASSERT(sizeof(struct rcu_head) == sizeof(struct callback_head));
88 * Verify that "epoch_record" is at beginning of "struct
89 * linux_epoch_record":
91 CTASSERT(offsetof(struct linux_epoch_record, epoch_record) == 0);
93 static ck_epoch_t linux_epoch[RCU_TYPE_MAX];
94 static struct linux_epoch_head linux_epoch_head[RCU_TYPE_MAX];
95 DPCPU_DEFINE_STATIC(struct linux_epoch_record, linux_epoch_record[RCU_TYPE_MAX]);
97 static void linux_rcu_cleaner_func(void *, int);
100 linux_rcu_runtime_init(void *arg __unused)
102 struct linux_epoch_head *head;
106 for (j = 0; j != RCU_TYPE_MAX; j++) {
107 ck_epoch_init(&linux_epoch[j]);
109 head = &linux_epoch_head[j];
111 mtx_init(&head->lock, "LRCU-HEAD", NULL, MTX_DEF);
112 TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, head);
113 STAILQ_INIT(&head->cb_head);
116 struct linux_epoch_record *record;
118 record = &DPCPU_ID_GET(i, linux_epoch_record[j]);
121 ck_epoch_register(&linux_epoch[j],
122 &record->epoch_record, NULL);
123 TAILQ_INIT(&record->ts_head);
127 SYSINIT(linux_rcu_runtime, SI_SUB_CPU, SI_ORDER_ANY, linux_rcu_runtime_init, NULL);
130 linux_rcu_runtime_uninit(void *arg __unused)
132 struct linux_epoch_head *head;
135 for (j = 0; j != RCU_TYPE_MAX; j++) {
136 head = &linux_epoch_head[j];
138 mtx_destroy(&head->lock);
141 SYSUNINIT(linux_rcu_runtime, SI_SUB_LOCK, SI_ORDER_SECOND, linux_rcu_runtime_uninit, NULL);
144 linux_rcu_cleaner_func(void *context, int pending __unused)
146 struct linux_epoch_head *head;
147 struct callback_head *rcu;
148 STAILQ_HEAD(, callback_head) tmp_head;
151 linux_set_current(curthread);
155 /* move current callbacks into own queue */
156 mtx_lock(&head->lock);
157 STAILQ_INIT(&tmp_head);
158 STAILQ_CONCAT(&tmp_head, &head->cb_head);
159 mtx_unlock(&head->lock);
162 linux_synchronize_rcu(head - linux_epoch_head);
164 /* dispatch all callbacks, if any */
165 while ((rcu = STAILQ_FIRST(&tmp_head)) != NULL) {
167 STAILQ_REMOVE_HEAD(&tmp_head, entry);
169 offset = (uintptr_t)rcu->func;
171 if (offset < LINUX_KFREE_RCU_OFFSET_MAX)
172 kfree((char *)rcu - offset);
174 rcu->func((struct rcu_head *)rcu);
179 linux_rcu_read_lock(unsigned type)
181 struct linux_epoch_record *record;
182 struct task_struct *ts;
184 MPASS(type < RCU_TYPE_MAX);
190 * Pin thread to current CPU so that the unlock code gets the
191 * same per-CPU epoch record:
195 record = &DPCPU_GET(linux_epoch_record[type]);
199 * Use a critical section to prevent recursion inside
200 * ck_epoch_begin(). Else this function supports recursion.
203 ck_epoch_begin(&record->epoch_record, NULL);
205 if (ts->rcu_recurse == 1)
206 TAILQ_INSERT_TAIL(&record->ts_head, ts, rcu_entry);
211 linux_rcu_read_unlock(unsigned type)
213 struct linux_epoch_record *record;
214 struct task_struct *ts;
216 MPASS(type < RCU_TYPE_MAX);
221 record = &DPCPU_GET(linux_epoch_record[type]);
225 * Use a critical section to prevent recursion inside
226 * ck_epoch_end(). Else this function supports recursion.
229 ck_epoch_end(&record->epoch_record, NULL);
231 if (ts->rcu_recurse == 0)
232 TAILQ_REMOVE(&record->ts_head, ts, rcu_entry);
239 linux_synchronize_rcu_cb(ck_epoch_t *epoch __unused, ck_epoch_record_t *epoch_record, void *arg __unused)
241 struct linux_epoch_record *record =
242 container_of(epoch_record, struct linux_epoch_record, epoch_record);
243 struct thread *td = curthread;
244 struct task_struct *ts;
246 /* check if blocked on the current CPU */
247 if (record->cpuid == PCPU_GET(cpuid)) {
248 bool is_sleeping = 0;
252 * Find the lowest priority or sleeping thread which
253 * is blocking synchronization on this CPU core. All
254 * the threads in the queue are CPU-pinned and cannot
255 * go anywhere while the current thread is locked.
257 TAILQ_FOREACH(ts, &record->ts_head, rcu_entry) {
258 if (ts->task_thread->td_priority > prio)
259 prio = ts->task_thread->td_priority;
260 is_sleeping |= (ts->task_thread->td_inhibitors != 0);
268 /* set new thread priority */
269 sched_prio(td, prio);
271 mi_switch(SW_VOL | SWT_RELINQUISH);
273 * It is important the thread lock is dropped
274 * while yielding to allow other threads to
275 * acquire the lock pointed to by
276 * TDQ_LOCKPTR(td). Currently mi_switch() will
277 * unlock the thread lock before
278 * returning. Else a deadlock like situation
285 * To avoid spinning move execution to the other CPU
286 * which is blocking synchronization. Set highest
287 * thread priority so that code gets run. The thread
288 * priority will be restored later.
291 sched_bind(td, record->cpuid);
296 linux_synchronize_rcu(unsigned type)
304 MPASS(type < RCU_TYPE_MAX);
309 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
310 "linux_synchronize_rcu() can sleep");
316 * Synchronizing RCU might change the CPU core this function
317 * is running on. Save current values:
321 old_cpu = PCPU_GET(cpuid);
322 old_pinned = td->td_pinned;
323 old_prio = td->td_priority;
324 was_bound = sched_is_bound(td);
327 sched_bind(td, old_cpu);
329 ck_epoch_synchronize_wait(&linux_epoch[type],
330 &linux_synchronize_rcu_cb, NULL);
332 /* restore CPU binding, if any */
333 if (was_bound != 0) {
334 sched_bind(td, old_cpu);
336 /* get thread back to initial CPU, if any */
338 sched_bind(td, old_cpu);
341 /* restore pinned after bind */
342 td->td_pinned = old_pinned;
344 /* restore thread priority */
345 sched_prio(td, old_prio);
352 linux_rcu_barrier(unsigned type)
354 struct linux_epoch_head *head;
356 MPASS(type < RCU_TYPE_MAX);
358 linux_synchronize_rcu(type);
360 head = &linux_epoch_head[type];
362 /* wait for callbacks to complete */
363 taskqueue_drain(taskqueue_fast, &head->task);
367 linux_call_rcu(unsigned type, struct rcu_head *context, rcu_callback_t func)
369 struct callback_head *rcu;
370 struct linux_epoch_head *head;
372 MPASS(type < RCU_TYPE_MAX);
374 rcu = (struct callback_head *)context;
375 head = &linux_epoch_head[type];
377 mtx_lock(&head->lock);
379 STAILQ_INSERT_TAIL(&head->cb_head, rcu, entry);
380 taskqueue_enqueue(taskqueue_fast, &head->task);
381 mtx_unlock(&head->lock);
385 init_srcu_struct(struct srcu_struct *srcu)
391 cleanup_srcu_struct(struct srcu_struct *srcu)
396 srcu_read_lock(struct srcu_struct *srcu)
398 linux_rcu_read_lock(RCU_TYPE_SLEEPABLE);
403 srcu_read_unlock(struct srcu_struct *srcu, int key __unused)
405 linux_rcu_read_unlock(RCU_TYPE_SLEEPABLE);
409 synchronize_srcu(struct srcu_struct *srcu)
411 linux_synchronize_rcu(RCU_TYPE_SLEEPABLE);
415 srcu_barrier(struct srcu_struct *srcu)
417 linux_rcu_barrier(RCU_TYPE_SLEEPABLE);