2 * Copyright (c) 1998 Alex Nash
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
34 #include "namespace.h"
36 #include "un-namespace.h"
37 #include "thr_private.h"
39 _Static_assert(sizeof(struct pthread_rwlock) <= PAGE_SIZE,
40 "pthread_rwlock is too large for off-page");
42 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
43 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
44 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
45 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
46 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
47 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
48 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
49 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
50 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
52 static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock);
53 static int init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out);
55 static int __always_inline
56 check_and_init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
58 if (__predict_false(*rwlock == THR_PSHARED_PTR ||
59 *rwlock <= THR_RWLOCK_DESTROYED))
60 return (init_rwlock(rwlock, rwlock_out));
61 *rwlock_out = *rwlock;
66 init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
68 pthread_rwlock_t prwlock;
71 if (*rwlock == THR_PSHARED_PTR) {
72 prwlock = __thr_pshared_offpage(rwlock, 0);
75 } else if ((prwlock = *rwlock) <= THR_RWLOCK_DESTROYED) {
76 if (prwlock == THR_RWLOCK_INITIALIZER) {
77 ret = init_static(_get_curthread(), rwlock);
80 } else if (prwlock == THR_RWLOCK_DESTROYED) {
85 *rwlock_out = prwlock;
90 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
92 pthread_rwlock_t prwlock;
94 if (attr == NULL || *attr == NULL ||
95 (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
96 prwlock = calloc(1, sizeof(struct pthread_rwlock));
101 prwlock = __thr_pshared_offpage(rwlock, 1);
104 prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
105 *rwlock = THR_PSHARED_PTR;
111 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
113 pthread_rwlock_t prwlock;
117 if (prwlock == THR_RWLOCK_INITIALIZER)
119 else if (prwlock == THR_RWLOCK_DESTROYED)
121 else if (prwlock == THR_PSHARED_PTR) {
122 *rwlock = THR_RWLOCK_DESTROYED;
123 __thr_pshared_destroy(rwlock);
126 *rwlock = THR_RWLOCK_DESTROYED;
134 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
138 THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
140 if (*rwlock == THR_RWLOCK_INITIALIZER)
141 ret = rwlock_init(rwlock, NULL);
145 THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
151 _pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
155 return (rwlock_init(rwlock, attr));
159 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
161 struct pthread *curthread = _get_curthread();
162 pthread_rwlock_t prwlock;
166 ret = check_and_init_rwlock(rwlock, &prwlock);
170 if (curthread->rdlock_count) {
172 * To avoid having to track all the rdlocks held by
173 * a thread or all of the threads that hold a rdlock,
174 * we keep a simple count of all the rdlocks held by
175 * a thread. If a thread holds any rdlocks it is
176 * possible that it is attempting to take a recursive
177 * rdlock. If there are blocked writers and precedence
178 * is given to them, then that would result in the thread
179 * deadlocking. So allowing a thread to take the rdlock
180 * when it already has one or more rdlocks avoids the
181 * deadlock. I hope the reader can follow that logic ;-)
183 flags = URWLOCK_PREFER_READER;
189 * POSIX said the validity of the abstimeout parameter need
190 * not be checked if the lock can be immediately acquired.
192 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
194 curthread->rdlock_count++;
198 if (__predict_false(abstime &&
199 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
203 /* goto kernel and lock it */
204 ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
208 /* if interrupted, try to lock it in userland again. */
209 if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
215 curthread->rdlock_count++;
220 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
222 return (rwlock_rdlock_common(rwlock, NULL));
226 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
227 const struct timespec *abstime)
229 return (rwlock_rdlock_common(rwlock, abstime));
233 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
235 struct pthread *curthread = _get_curthread();
236 pthread_rwlock_t prwlock;
240 ret = check_and_init_rwlock(rwlock, &prwlock);
244 if (curthread->rdlock_count) {
246 * To avoid having to track all the rdlocks held by
247 * a thread or all of the threads that hold a rdlock,
248 * we keep a simple count of all the rdlocks held by
249 * a thread. If a thread holds any rdlocks it is
250 * possible that it is attempting to take a recursive
251 * rdlock. If there are blocked writers and precedence
252 * is given to them, then that would result in the thread
253 * deadlocking. So allowing a thread to take the rdlock
254 * when it already has one or more rdlocks avoids the
255 * deadlock. I hope the reader can follow that logic ;-)
257 flags = URWLOCK_PREFER_READER;
262 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
264 curthread->rdlock_count++;
269 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
271 struct pthread *curthread = _get_curthread();
272 pthread_rwlock_t prwlock;
275 ret = check_and_init_rwlock(rwlock, &prwlock);
279 ret = _thr_rwlock_trywrlock(&prwlock->lock);
281 prwlock->owner = TID(curthread);
286 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
288 struct pthread *curthread = _get_curthread();
289 pthread_rwlock_t prwlock;
292 ret = check_and_init_rwlock(rwlock, &prwlock);
297 * POSIX said the validity of the abstimeout parameter need
298 * not be checked if the lock can be immediately acquired.
300 ret = _thr_rwlock_trywrlock(&prwlock->lock);
302 prwlock->owner = TID(curthread);
306 if (__predict_false(abstime &&
307 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
311 /* goto kernel and lock it */
312 ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
314 prwlock->owner = TID(curthread);
321 /* if interrupted, try to lock it in userland again. */
322 if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
324 prwlock->owner = TID(curthread);
332 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
334 return (rwlock_wrlock_common (rwlock, NULL));
338 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
339 const struct timespec *abstime)
341 return (rwlock_wrlock_common (rwlock, abstime));
345 _pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
347 struct pthread *curthread = _get_curthread();
348 pthread_rwlock_t prwlock;
352 if (*rwlock == THR_PSHARED_PTR) {
353 prwlock = __thr_pshared_offpage(rwlock, 0);
360 if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
363 state = prwlock->lock.rw_state;
364 if (state & URWLOCK_WRITE_OWNER) {
365 if (__predict_false(prwlock->owner != TID(curthread)))
370 ret = _thr_rwlock_unlock(&prwlock->lock);
371 if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
372 curthread->rdlock_count--;