2 * Copyright (c) 2016 Matt Macy (mmacy@nextbsd.org)
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/types.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
35 #include <sys/mutex.h>
37 #include <sys/sched.h>
39 #include <sys/queue.h>
40 #include <sys/taskqueue.h>
44 #include <linux/rcupdate.h>
45 #include <linux/srcu.h>
46 #include <linux/slab.h>
47 #include <linux/kernel.h>
49 struct callback_head {
50 ck_epoch_entry_t epoch_entry;
52 ck_epoch_record_t *epoch_record;
57 * Verify that "struct rcu_head" is big enough to hold "struct
58 * callback_head". This has been done to avoid having to add special
59 * compile flags for including ck_epoch.h to all clients of the
62 CTASSERT(sizeof(struct rcu_head) >= sizeof(struct callback_head));
64 static ck_epoch_t linux_epoch;
65 static MALLOC_DEFINE(M_LRCU, "lrcu", "Linux RCU");
66 static DPCPU_DEFINE(ck_epoch_record_t *, epoch_record);
69 linux_rcu_runtime_init(void *arg __unused)
71 ck_epoch_record_t **pcpu_record;
72 ck_epoch_record_t *record;
75 ck_epoch_init(&linux_epoch);
78 record = malloc(sizeof(*record), M_LRCU, M_WAITOK | M_ZERO);
79 ck_epoch_register(&linux_epoch, record);
80 pcpu_record = DPCPU_ID_PTR(i, epoch_record);
81 *pcpu_record = record;
85 * Populate the epoch with 5 * ncpus # of records
87 for (i = 0; i < 5 * mp_ncpus; i++) {
88 record = malloc(sizeof(*record), M_LRCU, M_WAITOK | M_ZERO);
89 ck_epoch_register(&linux_epoch, record);
90 ck_epoch_unregister(record);
93 SYSINIT(linux_rcu_runtime, SI_SUB_LOCK, SI_ORDER_SECOND, linux_rcu_runtime_init, NULL);
96 linux_rcu_runtime_uninit(void *arg __unused)
98 ck_epoch_record_t **pcpu_record;
99 ck_epoch_record_t *record;
102 while ((record = ck_epoch_recycle(&linux_epoch)) != NULL)
103 free(record, M_LRCU);
106 pcpu_record = DPCPU_ID_PTR(i, epoch_record);
107 record = *pcpu_record;
109 free(record, M_LRCU);
112 SYSUNINIT(linux_rcu_runtime, SI_SUB_LOCK, SI_ORDER_SECOND, linux_rcu_runtime_uninit, NULL);
114 static ck_epoch_record_t *
115 linux_rcu_get_record(int canblock)
117 ck_epoch_record_t *record;
119 if (__predict_true((record = ck_epoch_recycle(&linux_epoch)) != NULL))
121 if ((record = malloc(sizeof(*record), M_LRCU, M_NOWAIT | M_ZERO)) != NULL) {
122 ck_epoch_register(&linux_epoch, record);
124 } else if (!canblock)
127 record = malloc(sizeof(*record), M_LRCU, M_WAITOK | M_ZERO);
128 ck_epoch_register(&linux_epoch, record);
133 linux_rcu_destroy_object(ck_epoch_entry_t *e)
135 struct callback_head *rcu;
138 rcu = container_of(e, struct callback_head, epoch_entry);
140 offset = (uintptr_t)rcu->func;
142 MPASS(rcu->task.ta_pending == 0);
144 if (offset < LINUX_KFREE_RCU_OFFSET_MAX)
145 kfree((char *)rcu - offset);
147 rcu->func((struct rcu_head *)rcu);
151 linux_rcu_cleaner_func(void *context, int pending __unused)
153 struct callback_head *rcu = context;
154 ck_epoch_record_t *record = rcu->epoch_record;
156 ck_epoch_barrier(record);
157 ck_epoch_unregister(record);
161 linux_rcu_read_lock(void)
163 ck_epoch_record_t *record;
166 record = DPCPU_GET(epoch_record);
167 MPASS(record != NULL);
169 ck_epoch_begin(record, NULL);
173 linux_rcu_read_unlock(void)
175 ck_epoch_record_t *record;
177 record = DPCPU_GET(epoch_record);
178 ck_epoch_end(record, NULL);
183 linux_synchronize_rcu(void)
185 ck_epoch_record_t *record;
188 record = DPCPU_GET(epoch_record);
189 MPASS(record != NULL);
190 ck_epoch_synchronize(record);
195 linux_rcu_barrier(void)
197 ck_epoch_record_t *record;
199 record = linux_rcu_get_record(0);
200 ck_epoch_barrier(record);
201 ck_epoch_unregister(record);
205 linux_call_rcu(struct rcu_head *context, rcu_callback_t func)
207 struct callback_head *ptr = (struct callback_head *)context;
208 ck_epoch_record_t *record;
210 record = linux_rcu_get_record(0);
213 MPASS(record != NULL);
215 ptr->epoch_record = record;
216 ck_epoch_call(record, &ptr->epoch_entry, linux_rcu_destroy_object);
217 TASK_INIT(&ptr->task, 0, linux_rcu_cleaner_func, ptr);
218 taskqueue_enqueue(taskqueue_fast, &ptr->task);
223 init_srcu_struct(struct srcu_struct *srcu)
225 ck_epoch_record_t *record;
227 record = linux_rcu_get_record(0);
228 srcu->ss_epoch_record = record;
233 cleanup_srcu_struct(struct srcu_struct *srcu)
235 ck_epoch_record_t *record;
237 record = srcu->ss_epoch_record;
238 srcu->ss_epoch_record = NULL;
239 ck_epoch_unregister(record);
243 srcu_read_lock(struct srcu_struct *srcu)
245 ck_epoch_begin(srcu->ss_epoch_record, NULL);
250 srcu_read_unlock(struct srcu_struct *srcu, int key __unused)
252 ck_epoch_end(srcu->ss_epoch_record, NULL);
256 synchronize_srcu(struct srcu_struct *srcu)
258 ck_epoch_synchronize(srcu->ss_epoch_record);