2 * Copyright (c) 1998 Alex Nash
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include "namespace.h"
35 #include "un-namespace.h"
36 #include "thr_private.h"
38 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
39 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
40 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
41 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
42 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
43 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
44 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
45 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
46 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
53 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
55 pthread_rwlock_t prwlock;
57 prwlock = (pthread_rwlock_t)calloc(1, sizeof(struct pthread_rwlock));
65 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
72 pthread_rwlock_t prwlock;
84 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
88 THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
91 ret = rwlock_init(rwlock, NULL);
95 THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
101 _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
104 return (rwlock_init(rwlock, attr));
108 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
110 struct pthread *curthread = _get_curthread();
111 pthread_rwlock_t prwlock;
112 struct timespec ts, ts2, *tsp;
116 if (__predict_false(rwlock == NULL))
121 /* check for static initialization */
122 if (__predict_false(prwlock == NULL)) {
123 if ((ret = init_static(curthread, rwlock)) != 0)
129 if (curthread->rdlock_count) {
131 * To avoid having to track all the rdlocks held by
132 * a thread or all of the threads that hold a rdlock,
133 * we keep a simple count of all the rdlocks held by
134 * a thread. If a thread holds any rdlocks it is
135 * possible that it is attempting to take a recursive
136 * rdlock. If there are blocked writers and precedence
137 * is given to them, then that would result in the thread
138 * deadlocking. So allowing a thread to take the rdlock
139 * when it already has one or more rdlocks avoids the
140 * deadlock. I hope the reader can follow that logic ;-)
142 flags = URWLOCK_PREFER_READER;
148 * POSIX said the validity of the abstimeout parameter need
149 * not be checked if the lock can be immediately acquired.
151 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
153 curthread->rdlock_count++;
157 if (__predict_false(abstime &&
158 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
163 clock_gettime(CLOCK_REALTIME, &ts);
164 TIMESPEC_SUB(&ts2, abstime, &ts);
165 if (ts2.tv_sec < 0 ||
166 (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
172 /* goto kernel and lock it */
173 ret = __thr_rwlock_rdlock(&prwlock->lock, flags, tsp);
177 /* if interrupted, try to lock it in userland again. */
178 if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
180 curthread->rdlock_count++;
188 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
190 return (rwlock_rdlock_common(rwlock, NULL));
194 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
195 const struct timespec *abstime)
197 return (rwlock_rdlock_common(rwlock, abstime));
201 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
203 struct pthread *curthread = _get_curthread();
204 pthread_rwlock_t prwlock;
208 if (__predict_false(rwlock == NULL))
213 /* check for static initialization */
214 if (__predict_false(prwlock == NULL)) {
215 if ((ret = init_static(curthread, rwlock)) != 0)
221 if (curthread->rdlock_count) {
223 * To avoid having to track all the rdlocks held by
224 * a thread or all of the threads that hold a rdlock,
225 * we keep a simple count of all the rdlocks held by
226 * a thread. If a thread holds any rdlocks it is
227 * possible that it is attempting to take a recursive
228 * rdlock. If there are blocked writers and precedence
229 * is given to them, then that would result in the thread
230 * deadlocking. So allowing a thread to take the rdlock
231 * when it already has one or more rdlocks avoids the
232 * deadlock. I hope the reader can follow that logic ;-)
234 flags = URWLOCK_PREFER_READER;
239 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
241 curthread->rdlock_count++;
246 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
248 struct pthread *curthread = _get_curthread();
249 pthread_rwlock_t prwlock;
252 if (__predict_false(rwlock == NULL))
257 /* check for static initialization */
258 if (__predict_false(prwlock == NULL)) {
259 if ((ret = init_static(curthread, rwlock)) != 0)
265 ret = _thr_rwlock_trywrlock(&prwlock->lock);
267 prwlock->owner = curthread;
272 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
274 struct pthread *curthread = _get_curthread();
275 pthread_rwlock_t prwlock;
276 struct timespec ts, ts2, *tsp;
279 if (__predict_false(rwlock == NULL))
284 /* check for static initialization */
285 if (__predict_false(prwlock == NULL)) {
286 if ((ret = init_static(curthread, rwlock)) != 0)
293 * POSIX said the validity of the abstimeout parameter need
294 * not be checked if the lock can be immediately acquired.
296 ret = _thr_rwlock_trywrlock(&prwlock->lock);
298 prwlock->owner = curthread;
302 if (__predict_false(abstime &&
303 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
307 if (abstime != NULL) {
308 clock_gettime(CLOCK_REALTIME, &ts);
309 TIMESPEC_SUB(&ts2, abstime, &ts);
310 if (ts2.tv_sec < 0 ||
311 (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
317 /* goto kernel and lock it */
318 ret = __thr_rwlock_wrlock(&prwlock->lock, tsp);
320 prwlock->owner = curthread;
327 /* if interrupted, try to lock it in userland again. */
328 if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
330 prwlock->owner = curthread;
338 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
340 return (rwlock_wrlock_common (rwlock, NULL));
344 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
345 const struct timespec *abstime)
347 return (rwlock_wrlock_common (rwlock, abstime));
351 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
353 struct pthread *curthread = _get_curthread();
354 pthread_rwlock_t prwlock;
358 if (__predict_false(rwlock == NULL))
363 if (__predict_false(prwlock == NULL))
366 state = prwlock->lock.rw_state;
367 if (state & URWLOCK_WRITE_OWNER) {
368 if (__predict_false(prwlock->owner != curthread))
370 prwlock->owner = NULL;
373 ret = _thr_rwlock_unlock(&prwlock->lock);
374 if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
375 curthread->rdlock_count--;