2 * Copyright (c) 1998 Alex Nash
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
34 #include "namespace.h"
36 #include "un-namespace.h"
37 #include "thr_private.h"
39 _Static_assert(sizeof(struct pthread_rwlock) <= PAGE_SIZE,
40 "pthread_rwlock is too large for off-page");
42 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
43 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
44 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
45 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
46 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
47 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
48 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
49 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
50 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
52 #define CHECK_AND_INIT_RWLOCK \
53 if (*rwlock == THR_PSHARED_PTR) { \
54 prwlock = __thr_pshared_offpage(rwlock, 0); \
55 if (prwlock == NULL) \
57 } else if (__predict_false((prwlock = (*rwlock)) <= \
58 THR_RWLOCK_DESTROYED)) { \
59 if (prwlock == THR_RWLOCK_INITIALIZER) { \
61 ret = init_static(_get_curthread(), rwlock); \
64 } else if (prwlock == THR_RWLOCK_DESTROYED) { \
75 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
77 pthread_rwlock_t prwlock;
79 if (attr == NULL || *attr == NULL ||
80 (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
81 prwlock = calloc(1, sizeof(struct pthread_rwlock));
86 prwlock = __thr_pshared_offpage(rwlock, 1);
89 prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
90 *rwlock = THR_PSHARED_PTR;
96 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
98 pthread_rwlock_t prwlock;
102 if (prwlock == THR_RWLOCK_INITIALIZER)
104 else if (prwlock == THR_RWLOCK_DESTROYED)
106 else if (prwlock == THR_PSHARED_PTR) {
107 *rwlock = THR_RWLOCK_DESTROYED;
108 __thr_pshared_destroy(rwlock);
111 *rwlock = THR_RWLOCK_DESTROYED;
119 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
123 THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
125 if (*rwlock == THR_RWLOCK_INITIALIZER)
126 ret = rwlock_init(rwlock, NULL);
130 THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
136 _pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
140 return (rwlock_init(rwlock, attr));
144 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
146 struct pthread *curthread = _get_curthread();
147 pthread_rwlock_t prwlock;
151 CHECK_AND_INIT_RWLOCK
153 if (curthread->rdlock_count) {
155 * To avoid having to track all the rdlocks held by
156 * a thread or all of the threads that hold a rdlock,
157 * we keep a simple count of all the rdlocks held by
158 * a thread. If a thread holds any rdlocks it is
159 * possible that it is attempting to take a recursive
160 * rdlock. If there are blocked writers and precedence
161 * is given to them, then that would result in the thread
162 * deadlocking. So allowing a thread to take the rdlock
163 * when it already has one or more rdlocks avoids the
164 * deadlock. I hope the reader can follow that logic ;-)
166 flags = URWLOCK_PREFER_READER;
172 * POSIX said the validity of the abstimeout parameter need
173 * not be checked if the lock can be immediately acquired.
175 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
177 curthread->rdlock_count++;
181 if (__predict_false(abstime &&
182 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
186 /* goto kernel and lock it */
187 ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
191 /* if interrupted, try to lock it in userland again. */
192 if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
198 curthread->rdlock_count++;
203 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
205 return (rwlock_rdlock_common(rwlock, NULL));
209 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
210 const struct timespec *abstime)
212 return (rwlock_rdlock_common(rwlock, abstime));
216 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
218 struct pthread *curthread = _get_curthread();
219 pthread_rwlock_t prwlock;
223 CHECK_AND_INIT_RWLOCK
225 if (curthread->rdlock_count) {
227 * To avoid having to track all the rdlocks held by
228 * a thread or all of the threads that hold a rdlock,
229 * we keep a simple count of all the rdlocks held by
230 * a thread. If a thread holds any rdlocks it is
231 * possible that it is attempting to take a recursive
232 * rdlock. If there are blocked writers and precedence
233 * is given to them, then that would result in the thread
234 * deadlocking. So allowing a thread to take the rdlock
235 * when it already has one or more rdlocks avoids the
236 * deadlock. I hope the reader can follow that logic ;-)
238 flags = URWLOCK_PREFER_READER;
243 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
245 curthread->rdlock_count++;
250 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
252 struct pthread *curthread = _get_curthread();
253 pthread_rwlock_t prwlock;
256 CHECK_AND_INIT_RWLOCK
258 ret = _thr_rwlock_trywrlock(&prwlock->lock);
260 prwlock->owner = TID(curthread);
265 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
267 struct pthread *curthread = _get_curthread();
268 pthread_rwlock_t prwlock;
271 CHECK_AND_INIT_RWLOCK
274 * POSIX said the validity of the abstimeout parameter need
275 * not be checked if the lock can be immediately acquired.
277 ret = _thr_rwlock_trywrlock(&prwlock->lock);
279 prwlock->owner = TID(curthread);
283 if (__predict_false(abstime &&
284 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
288 /* goto kernel and lock it */
289 ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
291 prwlock->owner = TID(curthread);
298 /* if interrupted, try to lock it in userland again. */
299 if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
301 prwlock->owner = TID(curthread);
309 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
311 return (rwlock_wrlock_common (rwlock, NULL));
315 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
316 const struct timespec *abstime)
318 return (rwlock_wrlock_common (rwlock, abstime));
322 _pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
324 struct pthread *curthread = _get_curthread();
325 pthread_rwlock_t prwlock;
329 if (*rwlock == THR_PSHARED_PTR) {
330 prwlock = __thr_pshared_offpage(rwlock, 0);
337 if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
340 state = prwlock->lock.rw_state;
341 if (state & URWLOCK_WRITE_OWNER) {
342 if (__predict_false(prwlock->owner != TID(curthread)))
347 ret = _thr_rwlock_unlock(&prwlock->lock);
348 if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
349 curthread->rdlock_count--;