2 * Copyright 2013-2015 Samy Al Bahra.
3 * Copyright 2013 Brendon Scheinman.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * This is an implementation of NUMA-aware reader-writer locks as described in:
33 * Calciu, I.; Dice, D.; Lev, Y.; Luchangco, V.; Marathe, V.; and Shavit, N. 2014.
34 * NUMA-Aware Reader-Writer Locks
39 #include <ck_stddef.h>
40 #include <ck_cohort.h>
42 #define CK_RWCOHORT_WP_NAME(N) ck_rwcohort_wp_##N
43 #define CK_RWCOHORT_WP_INSTANCE(N) struct CK_RWCOHORT_WP_NAME(N)
44 #define CK_RWCOHORT_WP_INIT(N, RW, WL) ck_rwcohort_wp_##N##_init(RW, WL)
45 #define CK_RWCOHORT_WP_READ_LOCK(N, RW, C, GC, LC) \
46 ck_rwcohort_wp_##N##_read_lock(RW, C, GC, LC)
47 #define CK_RWCOHORT_WP_READ_UNLOCK(N, RW, C, GC, LC) \
48 ck_rwcohort_wp_##N##_read_unlock(RW)
49 #define CK_RWCOHORT_WP_WRITE_LOCK(N, RW, C, GC, LC) \
50 ck_rwcohort_wp_##N##_write_lock(RW, C, GC, LC)
51 #define CK_RWCOHORT_WP_WRITE_UNLOCK(N, RW, C, GC, LC) \
52 ck_rwcohort_wp_##N##_write_unlock(RW, C, GC, LC)
53 #define CK_RWCOHORT_WP_DEFAULT_WAIT_LIMIT 1000
55 #define CK_RWCOHORT_WP_PROTOTYPE(N) \
56 CK_RWCOHORT_WP_INSTANCE(N) { \
57 unsigned int read_counter; \
58 unsigned int write_barrier; \
59 unsigned int wait_limit; \
61 CK_CC_INLINE static void \
62 ck_rwcohort_wp_##N##_init(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \
63 unsigned int wait_limit) \
66 rw_cohort->read_counter = 0; \
67 rw_cohort->write_barrier = 0; \
68 rw_cohort->wait_limit = wait_limit; \
72 CK_CC_INLINE static void \
73 ck_rwcohort_wp_##N##_write_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \
74 CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
75 void *local_context) \
78 while (ck_pr_load_uint(&rw_cohort->write_barrier) > 0) \
81 CK_COHORT_LOCK(N, cohort, global_context, local_context); \
83 while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) \
88 CK_CC_INLINE static void \
89 ck_rwcohort_wp_##N##_write_unlock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \
90 CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
91 void *local_context) \
95 CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
98 CK_CC_INLINE static void \
99 ck_rwcohort_wp_##N##_read_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \
100 CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
101 void *local_context) \
103 unsigned int wait_count = 0; \
104 bool raised = false; \
107 ck_pr_inc_uint(&rw_cohort->read_counter); \
108 ck_pr_fence_atomic_load(); \
109 if (CK_COHORT_LOCKED(N, cohort, global_context, \
110 local_context) == false) \
113 ck_pr_dec_uint(&rw_cohort->read_counter); \
114 while (CK_COHORT_LOCKED(N, cohort, global_context, \
115 local_context) == true) { \
117 if (++wait_count > rw_cohort->wait_limit && \
119 ck_pr_inc_uint(&rw_cohort->write_barrier); \
125 if (raised == true) \
126 ck_pr_dec_uint(&rw_cohort->write_barrier); \
128 ck_pr_fence_load(); \
131 CK_CC_INLINE static void \
132 ck_rwcohort_wp_##N##_read_unlock(CK_RWCOHORT_WP_INSTANCE(N) *cohort) \
135 ck_pr_fence_load_atomic(); \
136 ck_pr_dec_uint(&cohort->read_counter); \
140 #define CK_RWCOHORT_WP_INITIALIZER { \
142 .write_barrier = 0, \
146 #define CK_RWCOHORT_RP_NAME(N) ck_rwcohort_rp_##N
147 #define CK_RWCOHORT_RP_INSTANCE(N) struct CK_RWCOHORT_RP_NAME(N)
148 #define CK_RWCOHORT_RP_INIT(N, RW, WL) ck_rwcohort_rp_##N##_init(RW, WL)
149 #define CK_RWCOHORT_RP_READ_LOCK(N, RW, C, GC, LC) \
150 ck_rwcohort_rp_##N##_read_lock(RW, C, GC, LC)
151 #define CK_RWCOHORT_RP_READ_UNLOCK(N, RW, C, GC, LC) \
152 ck_rwcohort_rp_##N##_read_unlock(RW)
153 #define CK_RWCOHORT_RP_WRITE_LOCK(N, RW, C, GC, LC) \
154 ck_rwcohort_rp_##N##_write_lock(RW, C, GC, LC)
155 #define CK_RWCOHORT_RP_WRITE_UNLOCK(N, RW, C, GC, LC) \
156 ck_rwcohort_rp_##N##_write_unlock(RW, C, GC, LC)
157 #define CK_RWCOHORT_RP_DEFAULT_WAIT_LIMIT 1000
159 #define CK_RWCOHORT_RP_PROTOTYPE(N) \
160 CK_RWCOHORT_RP_INSTANCE(N) { \
161 unsigned int read_counter; \
162 unsigned int read_barrier; \
163 unsigned int wait_limit; \
165 CK_CC_INLINE static void \
166 ck_rwcohort_rp_##N##_init(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \
167 unsigned int wait_limit) \
170 rw_cohort->read_counter = 0; \
171 rw_cohort->read_barrier = 0; \
172 rw_cohort->wait_limit = wait_limit; \
176 CK_CC_INLINE static void \
177 ck_rwcohort_rp_##N##_write_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \
178 CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
179 void *local_context) \
181 unsigned int wait_count = 0; \
182 bool raised = false; \
185 CK_COHORT_LOCK(N, cohort, global_context, local_context); \
186 if (ck_pr_load_uint(&rw_cohort->read_counter) == 0) \
189 CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
190 while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \
192 if (++wait_count > rw_cohort->wait_limit && \
194 ck_pr_inc_uint(&rw_cohort->read_barrier); \
200 if (raised == true) \
201 ck_pr_dec_uint(&rw_cohort->read_barrier); \
205 CK_CC_INLINE static void \
206 ck_rwcohort_rp_##N##_write_unlock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \
207 CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \
211 CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
214 CK_CC_INLINE static void \
215 ck_rwcohort_rp_##N##_read_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \
216 CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
217 void *local_context) \
220 while (ck_pr_load_uint(&rw_cohort->read_barrier) > 0) \
223 ck_pr_inc_uint(&rw_cohort->read_counter); \
224 ck_pr_fence_atomic_load(); \
226 while (CK_COHORT_LOCKED(N, cohort, global_context, \
227 local_context) == true) \
232 CK_CC_INLINE static void \
233 ck_rwcohort_rp_##N##_read_unlock(CK_RWCOHORT_RP_INSTANCE(N) *cohort) \
236 ck_pr_fence_load_atomic(); \
237 ck_pr_dec_uint(&cohort->read_counter); \
241 #define CK_RWCOHORT_RP_INITIALIZER { \
247 #define CK_RWCOHORT_NEUTRAL_NAME(N) ck_rwcohort_neutral_##N
248 #define CK_RWCOHORT_NEUTRAL_INSTANCE(N) struct CK_RWCOHORT_NEUTRAL_NAME(N)
249 #define CK_RWCOHORT_NEUTRAL_INIT(N, RW) ck_rwcohort_neutral_##N##_init(RW)
250 #define CK_RWCOHORT_NEUTRAL_READ_LOCK(N, RW, C, GC, LC) \
251 ck_rwcohort_neutral_##N##_read_lock(RW, C, GC, LC)
252 #define CK_RWCOHORT_NEUTRAL_READ_UNLOCK(N, RW, C, GC, LC) \
253 ck_rwcohort_neutral_##N##_read_unlock(RW)
254 #define CK_RWCOHORT_NEUTRAL_WRITE_LOCK(N, RW, C, GC, LC) \
255 ck_rwcohort_neutral_##N##_write_lock(RW, C, GC, LC)
256 #define CK_RWCOHORT_NEUTRAL_WRITE_UNLOCK(N, RW, C, GC, LC) \
257 ck_rwcohort_neutral_##N##_write_unlock(RW, C, GC, LC)
258 #define CK_RWCOHORT_NEUTRAL_DEFAULT_WAIT_LIMIT 1000
260 #define CK_RWCOHORT_NEUTRAL_PROTOTYPE(N) \
261 CK_RWCOHORT_NEUTRAL_INSTANCE(N) { \
262 unsigned int read_counter; \
264 CK_CC_INLINE static void \
265 ck_rwcohort_neutral_##N##_init(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort) \
268 rw_cohort->read_counter = 0; \
272 CK_CC_INLINE static void \
273 ck_rwcohort_neutral_##N##_write_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort,\
274 CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
275 void *local_context) \
278 CK_COHORT_LOCK(N, cohort, global_context, local_context); \
279 while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \
284 CK_CC_INLINE static void \
285 ck_rwcohort_neutral_##N##_write_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort,\
286 CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \
290 CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
293 CK_CC_INLINE static void \
294 ck_rwcohort_neutral_##N##_read_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \
295 CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
296 void *local_context) \
299 CK_COHORT_LOCK(N, cohort, global_context, local_context); \
300 ck_pr_inc_uint(&rw_cohort->read_counter); \
301 CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
304 CK_CC_INLINE static void \
305 ck_rwcohort_neutral_##N##_read_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *cohort) \
308 ck_pr_fence_load_atomic(); \
309 ck_pr_dec_uint(&cohort->read_counter); \
313 #define CK_RWCOHORT_NEUTRAL_INITIALIZER { \
317 #endif /* CK_RWCOHORT_H */