2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 1998 Alex Nash
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
36 #include "namespace.h"
38 #include "un-namespace.h"
39 #include "thr_private.h"
41 _Static_assert(sizeof(struct pthread_rwlock) <= PAGE_SIZE,
42 "pthread_rwlock is too large for off-page");
44 __weak_reference(_thr_rwlock_destroy, pthread_rwlock_destroy);
45 __weak_reference(_thr_rwlock_destroy, _pthread_rwlock_destroy);
46 __weak_reference(_thr_rwlock_init, pthread_rwlock_init);
47 __weak_reference(_thr_rwlock_init, _pthread_rwlock_init);
48 __weak_reference(_Tthr_rwlock_rdlock, pthread_rwlock_rdlock);
49 __weak_reference(_Tthr_rwlock_rdlock, _pthread_rwlock_rdlock);
50 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
51 __weak_reference(_Tthr_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
52 __weak_reference(_Tthr_rwlock_tryrdlock, _pthread_rwlock_tryrdlock);
53 __weak_reference(_Tthr_rwlock_trywrlock, pthread_rwlock_trywrlock);
54 __weak_reference(_Tthr_rwlock_trywrlock, _pthread_rwlock_trywrlock);
55 __weak_reference(_Tthr_rwlock_unlock, pthread_rwlock_unlock);
56 __weak_reference(_Tthr_rwlock_unlock, _pthread_rwlock_unlock);
57 __weak_reference(_Tthr_rwlock_wrlock, pthread_rwlock_wrlock);
58 __weak_reference(_Tthr_rwlock_wrlock, _pthread_rwlock_wrlock);
59 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
61 static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock);
62 static int init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out);
64 static int __always_inline
65 check_and_init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
67 if (__predict_false(*rwlock == THR_PSHARED_PTR ||
68 *rwlock <= THR_RWLOCK_DESTROYED))
69 return (init_rwlock(rwlock, rwlock_out));
70 *rwlock_out = *rwlock;
75 init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
77 pthread_rwlock_t prwlock;
80 if (*rwlock == THR_PSHARED_PTR) {
81 prwlock = __thr_pshared_offpage(rwlock, 0);
84 } else if ((prwlock = *rwlock) <= THR_RWLOCK_DESTROYED) {
85 if (prwlock == THR_RWLOCK_INITIALIZER) {
86 ret = init_static(_get_curthread(), rwlock);
89 } else if (prwlock == THR_RWLOCK_DESTROYED) {
94 *rwlock_out = prwlock;
99 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
101 pthread_rwlock_t prwlock;
103 if (attr == NULL || *attr == NULL ||
104 (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
105 prwlock = calloc(1, sizeof(struct pthread_rwlock));
110 prwlock = __thr_pshared_offpage(rwlock, 1);
113 prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
114 *rwlock = THR_PSHARED_PTR;
120 _thr_rwlock_destroy(pthread_rwlock_t *rwlock)
122 pthread_rwlock_t prwlock;
126 if (prwlock == THR_RWLOCK_INITIALIZER)
128 else if (prwlock == THR_RWLOCK_DESTROYED)
130 else if (prwlock == THR_PSHARED_PTR) {
131 *rwlock = THR_RWLOCK_DESTROYED;
132 __thr_pshared_destroy(rwlock);
135 *rwlock = THR_RWLOCK_DESTROYED;
143 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
147 THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
149 if (*rwlock == THR_RWLOCK_INITIALIZER)
150 ret = rwlock_init(rwlock, NULL);
154 THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
160 _thr_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
164 return (rwlock_init(rwlock, attr));
168 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
170 struct pthread *curthread = _get_curthread();
171 pthread_rwlock_t prwlock;
175 ret = check_and_init_rwlock(rwlock, &prwlock);
179 if (curthread->rdlock_count) {
181 * To avoid having to track all the rdlocks held by
182 * a thread or all of the threads that hold a rdlock,
183 * we keep a simple count of all the rdlocks held by
184 * a thread. If a thread holds any rdlocks it is
185 * possible that it is attempting to take a recursive
186 * rdlock. If there are blocked writers and precedence
187 * is given to them, then that would result in the thread
188 * deadlocking. So allowing a thread to take the rdlock
189 * when it already has one or more rdlocks avoids the
190 * deadlock. I hope the reader can follow that logic ;-)
192 flags = URWLOCK_PREFER_READER;
198 * POSIX said the validity of the abstimeout parameter need
199 * not be checked if the lock can be immediately acquired.
201 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
203 curthread->rdlock_count++;
207 if (__predict_false(abstime &&
208 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
212 /* goto kernel and lock it */
213 ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
217 /* if interrupted, try to lock it in userland again. */
218 if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
224 curthread->rdlock_count++;
229 _Tthr_rwlock_rdlock(pthread_rwlock_t *rwlock)
231 return (rwlock_rdlock_common(rwlock, NULL));
235 _pthread_rwlock_timedrdlock(pthread_rwlock_t * __restrict rwlock,
236 const struct timespec * __restrict abstime)
238 return (rwlock_rdlock_common(rwlock, abstime));
242 _Tthr_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
244 struct pthread *curthread = _get_curthread();
245 pthread_rwlock_t prwlock;
249 ret = check_and_init_rwlock(rwlock, &prwlock);
253 if (curthread->rdlock_count) {
255 * To avoid having to track all the rdlocks held by
256 * a thread or all of the threads that hold a rdlock,
257 * we keep a simple count of all the rdlocks held by
258 * a thread. If a thread holds any rdlocks it is
259 * possible that it is attempting to take a recursive
260 * rdlock. If there are blocked writers and precedence
261 * is given to them, then that would result in the thread
262 * deadlocking. So allowing a thread to take the rdlock
263 * when it already has one or more rdlocks avoids the
264 * deadlock. I hope the reader can follow that logic ;-)
266 flags = URWLOCK_PREFER_READER;
271 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
273 curthread->rdlock_count++;
278 _Tthr_rwlock_trywrlock(pthread_rwlock_t *rwlock)
280 struct pthread *curthread = _get_curthread();
281 pthread_rwlock_t prwlock;
284 ret = check_and_init_rwlock(rwlock, &prwlock);
288 ret = _thr_rwlock_trywrlock(&prwlock->lock);
290 prwlock->owner = TID(curthread);
295 rwlock_wrlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
297 struct pthread *curthread = _get_curthread();
298 pthread_rwlock_t prwlock;
301 ret = check_and_init_rwlock(rwlock, &prwlock);
306 * POSIX said the validity of the abstimeout parameter need
307 * not be checked if the lock can be immediately acquired.
309 ret = _thr_rwlock_trywrlock(&prwlock->lock);
311 prwlock->owner = TID(curthread);
315 if (__predict_false(abstime &&
316 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
320 /* goto kernel and lock it */
321 ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
323 prwlock->owner = TID(curthread);
330 /* if interrupted, try to lock it in userland again. */
331 if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
333 prwlock->owner = TID(curthread);
341 _Tthr_rwlock_wrlock(pthread_rwlock_t *rwlock)
343 return (rwlock_wrlock_common (rwlock, NULL));
347 _pthread_rwlock_timedwrlock(pthread_rwlock_t * __restrict rwlock,
348 const struct timespec * __restrict abstime)
350 return (rwlock_wrlock_common (rwlock, abstime));
354 _Tthr_rwlock_unlock(pthread_rwlock_t *rwlock)
356 struct pthread *curthread = _get_curthread();
357 pthread_rwlock_t prwlock;
361 if (*rwlock == THR_PSHARED_PTR) {
362 prwlock = __thr_pshared_offpage(rwlock, 0);
369 if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
372 state = prwlock->lock.rw_state;
373 if (state & URWLOCK_WRITE_OWNER) {
374 if (__predict_false(prwlock->owner != TID(curthread)))
379 ret = _thr_rwlock_unlock(&prwlock->lock);
380 if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
381 curthread->rdlock_count--;