2 * Copyright (c) 1998 Alex Nash
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include "namespace.h"
35 #include "un-namespace.h"
36 #include "thr_private.h"
38 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
39 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
40 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
41 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
42 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
43 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
44 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
45 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
46 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
48 #define CHECK_AND_INIT_RWLOCK \
49 if (__predict_false((prwlock = (*rwlock)) <= THR_RWLOCK_DESTROYED)) { \
50 if (prwlock == THR_RWLOCK_INITIALIZER) { \
52 ret = init_static(_get_curthread(), rwlock); \
55 } else if (prwlock == THR_RWLOCK_DESTROYED) { \
66 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
68 pthread_rwlock_t prwlock;
70 prwlock = (pthread_rwlock_t)calloc(1, sizeof(struct pthread_rwlock));
78 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
80 pthread_rwlock_t prwlock;
84 if (prwlock == THR_RWLOCK_INITIALIZER)
86 else if (prwlock == THR_RWLOCK_DESTROYED)
89 *rwlock = THR_RWLOCK_DESTROYED;
98 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
102 THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
104 if (*rwlock == THR_RWLOCK_INITIALIZER)
105 ret = rwlock_init(rwlock, NULL);
109 THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
115 _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
118 return (rwlock_init(rwlock, attr));
122 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
124 struct pthread *curthread = _get_curthread();
125 pthread_rwlock_t prwlock;
126 struct timespec ts, ts2, *tsp;
130 CHECK_AND_INIT_RWLOCK
132 if (curthread->rdlock_count) {
134 * To avoid having to track all the rdlocks held by
135 * a thread or all of the threads that hold a rdlock,
136 * we keep a simple count of all the rdlocks held by
137 * a thread. If a thread holds any rdlocks it is
138 * possible that it is attempting to take a recursive
139 * rdlock. If there are blocked writers and precedence
140 * is given to them, then that would result in the thread
141 * deadlocking. So allowing a thread to take the rdlock
142 * when it already has one or more rdlocks avoids the
143 * deadlock. I hope the reader can follow that logic ;-)
145 flags = URWLOCK_PREFER_READER;
151 * POSIX said the validity of the abstimeout parameter need
152 * not be checked if the lock can be immediately acquired.
154 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
156 curthread->rdlock_count++;
160 if (__predict_false(abstime &&
161 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
166 clock_gettime(CLOCK_REALTIME, &ts);
167 TIMESPEC_SUB(&ts2, abstime, &ts);
168 if (ts2.tv_sec < 0 ||
169 (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
175 /* goto kernel and lock it */
176 ret = __thr_rwlock_rdlock(&prwlock->lock, flags, tsp);
180 /* if interrupted, try to lock it in userland again. */
181 if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
187 curthread->rdlock_count++;
192 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
194 return (rwlock_rdlock_common(rwlock, NULL));
198 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
199 const struct timespec *abstime)
201 return (rwlock_rdlock_common(rwlock, abstime));
205 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
207 struct pthread *curthread = _get_curthread();
208 pthread_rwlock_t prwlock;
212 CHECK_AND_INIT_RWLOCK
214 if (curthread->rdlock_count) {
216 * To avoid having to track all the rdlocks held by
217 * a thread or all of the threads that hold a rdlock,
218 * we keep a simple count of all the rdlocks held by
219 * a thread. If a thread holds any rdlocks it is
220 * possible that it is attempting to take a recursive
221 * rdlock. If there are blocked writers and precedence
222 * is given to them, then that would result in the thread
223 * deadlocking. So allowing a thread to take the rdlock
224 * when it already has one or more rdlocks avoids the
225 * deadlock. I hope the reader can follow that logic ;-)
227 flags = URWLOCK_PREFER_READER;
232 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
234 curthread->rdlock_count++;
239 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
241 struct pthread *curthread = _get_curthread();
242 pthread_rwlock_t prwlock;
245 CHECK_AND_INIT_RWLOCK
247 ret = _thr_rwlock_trywrlock(&prwlock->lock);
249 prwlock->owner = curthread;
254 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
256 struct pthread *curthread = _get_curthread();
257 pthread_rwlock_t prwlock;
258 struct timespec ts, ts2, *tsp;
261 CHECK_AND_INIT_RWLOCK
264 * POSIX said the validity of the abstimeout parameter need
265 * not be checked if the lock can be immediately acquired.
267 ret = _thr_rwlock_trywrlock(&prwlock->lock);
269 prwlock->owner = curthread;
273 if (__predict_false(abstime &&
274 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
278 if (abstime != NULL) {
279 clock_gettime(CLOCK_REALTIME, &ts);
280 TIMESPEC_SUB(&ts2, abstime, &ts);
281 if (ts2.tv_sec < 0 ||
282 (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
288 /* goto kernel and lock it */
289 ret = __thr_rwlock_wrlock(&prwlock->lock, tsp);
291 prwlock->owner = curthread;
298 /* if interrupted, try to lock it in userland again. */
299 if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
301 prwlock->owner = curthread;
309 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
311 return (rwlock_wrlock_common (rwlock, NULL));
315 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
316 const struct timespec *abstime)
318 return (rwlock_wrlock_common (rwlock, abstime));
322 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
324 struct pthread *curthread = _get_curthread();
325 pthread_rwlock_t prwlock;
331 if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
334 state = prwlock->lock.rw_state;
335 if (state & URWLOCK_WRITE_OWNER) {
336 if (__predict_false(prwlock->owner != curthread))
338 prwlock->owner = NULL;
341 ret = _thr_rwlock_unlock(&prwlock->lock);
342 if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
343 curthread->rdlock_count--;