2 * Copyright 2011-2015 Samy Al Bahra.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <ck_stdbool.h>
33 #include <ck_stddef.h>
37 unsigned int n_readers;
39 typedef struct ck_rwlock ck_rwlock_t;
41 #define CK_RWLOCK_INITIALIZER {0, 0}
43 CK_CC_INLINE static void
44 ck_rwlock_init(struct ck_rwlock *rw)
53 CK_CC_INLINE static void
54 ck_rwlock_write_unlock(ck_rwlock_t *rw)
58 ck_pr_store_uint(&rw->writer, 0);
62 CK_CC_INLINE static bool
63 ck_rwlock_locked_writer(ck_rwlock_t *rw)
67 r = ck_pr_load_uint(&rw->writer);
68 ck_pr_fence_acquire();
72 CK_CC_INLINE static void
73 ck_rwlock_write_downgrade(ck_rwlock_t *rw)
76 ck_pr_inc_uint(&rw->n_readers);
77 ck_rwlock_write_unlock(rw);
81 CK_CC_INLINE static bool
82 ck_rwlock_locked(ck_rwlock_t *rw)
86 l = ck_pr_load_uint(&rw->n_readers) |
87 ck_pr_load_uint(&rw->writer);
88 ck_pr_fence_acquire();
92 CK_CC_INLINE static bool
93 ck_rwlock_write_trylock(ck_rwlock_t *rw)
96 if (ck_pr_fas_uint(&rw->writer, 1) != 0)
99 ck_pr_fence_atomic_load();
101 if (ck_pr_load_uint(&rw->n_readers) != 0) {
102 ck_rwlock_write_unlock(rw);
110 CK_ELIDE_TRYLOCK_PROTOTYPE(ck_rwlock_write, ck_rwlock_t,
111 ck_rwlock_locked, ck_rwlock_write_trylock)
113 CK_CC_INLINE static void
114 ck_rwlock_write_lock(ck_rwlock_t *rw)
117 while (ck_pr_fas_uint(&rw->writer, 1) != 0)
120 ck_pr_fence_atomic_load();
122 while (ck_pr_load_uint(&rw->n_readers) != 0)
129 CK_ELIDE_PROTOTYPE(ck_rwlock_write, ck_rwlock_t,
130 ck_rwlock_locked, ck_rwlock_write_lock,
131 ck_rwlock_locked_writer, ck_rwlock_write_unlock)
133 CK_CC_INLINE static bool
134 ck_rwlock_read_trylock(ck_rwlock_t *rw)
137 if (ck_pr_load_uint(&rw->writer) != 0)
140 ck_pr_inc_uint(&rw->n_readers);
143 * Serialize with respect to concurrent write
146 ck_pr_fence_atomic_load();
148 if (ck_pr_load_uint(&rw->writer) == 0) {
153 ck_pr_dec_uint(&rw->n_readers);
157 CK_ELIDE_TRYLOCK_PROTOTYPE(ck_rwlock_read, ck_rwlock_t,
158 ck_rwlock_locked_writer, ck_rwlock_read_trylock)
160 CK_CC_INLINE static void
161 ck_rwlock_read_lock(ck_rwlock_t *rw)
165 while (ck_pr_load_uint(&rw->writer) != 0)
168 ck_pr_inc_uint(&rw->n_readers);
171 * Serialize with respect to concurrent write
174 ck_pr_fence_atomic_load();
176 if (ck_pr_load_uint(&rw->writer) == 0)
179 ck_pr_dec_uint(&rw->n_readers);
182 /* Acquire semantics are necessary. */
187 CK_CC_INLINE static bool
188 ck_rwlock_locked_reader(ck_rwlock_t *rw)
192 return ck_pr_load_uint(&rw->n_readers);
195 CK_CC_INLINE static void
196 ck_rwlock_read_unlock(ck_rwlock_t *rw)
199 ck_pr_fence_load_atomic();
200 ck_pr_dec_uint(&rw->n_readers);
204 CK_ELIDE_PROTOTYPE(ck_rwlock_read, ck_rwlock_t,
205 ck_rwlock_locked_writer, ck_rwlock_read_lock,
206 ck_rwlock_locked_reader, ck_rwlock_read_unlock)
209 * Recursive writer reader-writer lock implementation.
211 struct ck_rwlock_recursive {
215 typedef struct ck_rwlock_recursive ck_rwlock_recursive_t;
217 #define CK_RWLOCK_RECURSIVE_INITIALIZER {CK_RWLOCK_INITIALIZER, 0}
219 CK_CC_INLINE static void
220 ck_rwlock_recursive_write_lock(ck_rwlock_recursive_t *rw, unsigned int tid)
224 o = ck_pr_load_uint(&rw->rw.writer);
228 while (ck_pr_cas_uint(&rw->rw.writer, 0, tid) == false)
231 ck_pr_fence_atomic_load();
233 while (ck_pr_load_uint(&rw->rw.n_readers) != 0)
242 CK_CC_INLINE static bool
243 ck_rwlock_recursive_write_trylock(ck_rwlock_recursive_t *rw, unsigned int tid)
247 o = ck_pr_load_uint(&rw->rw.writer);
251 if (ck_pr_cas_uint(&rw->rw.writer, 0, tid) == false)
254 ck_pr_fence_atomic_load();
256 if (ck_pr_load_uint(&rw->rw.n_readers) != 0) {
257 ck_pr_store_uint(&rw->rw.writer, 0);
267 CK_CC_INLINE static void
268 ck_rwlock_recursive_write_unlock(ck_rwlock_recursive_t *rw)
272 ck_pr_fence_unlock();
273 ck_pr_store_uint(&rw->rw.writer, 0);
279 CK_CC_INLINE static void
280 ck_rwlock_recursive_read_lock(ck_rwlock_recursive_t *rw)
283 ck_rwlock_read_lock(&rw->rw);
287 CK_CC_INLINE static bool
288 ck_rwlock_recursive_read_trylock(ck_rwlock_recursive_t *rw)
291 return ck_rwlock_read_trylock(&rw->rw);
294 CK_CC_INLINE static void
295 ck_rwlock_recursive_read_unlock(ck_rwlock_recursive_t *rw)
298 ck_rwlock_read_unlock(&rw->rw);
302 #endif /* CK_RWLOCK_H */