2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 1998 Alex Nash
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
36 #include "namespace.h"
38 #include "un-namespace.h"
39 #include "thr_private.h"
41 _Static_assert(sizeof(struct pthread_rwlock) <= PAGE_SIZE,
42 "pthread_rwlock is too large for off-page");
44 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
45 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
46 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
47 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
48 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
49 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
50 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
51 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
52 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
54 static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock);
55 static int init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out);
57 static int __always_inline
58 check_and_init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
60 if (__predict_false(*rwlock == THR_PSHARED_PTR ||
61 *rwlock <= THR_RWLOCK_DESTROYED))
62 return (init_rwlock(rwlock, rwlock_out));
63 *rwlock_out = *rwlock;
68 init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
70 pthread_rwlock_t prwlock;
73 if (*rwlock == THR_PSHARED_PTR) {
74 prwlock = __thr_pshared_offpage(rwlock, 0);
77 } else if ((prwlock = *rwlock) <= THR_RWLOCK_DESTROYED) {
78 if (prwlock == THR_RWLOCK_INITIALIZER) {
79 ret = init_static(_get_curthread(), rwlock);
82 } else if (prwlock == THR_RWLOCK_DESTROYED) {
87 *rwlock_out = prwlock;
92 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
94 pthread_rwlock_t prwlock;
96 if (attr == NULL || *attr == NULL ||
97 (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
98 prwlock = calloc(1, sizeof(struct pthread_rwlock));
103 prwlock = __thr_pshared_offpage(rwlock, 1);
106 prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
107 *rwlock = THR_PSHARED_PTR;
113 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
115 pthread_rwlock_t prwlock;
119 if (prwlock == THR_RWLOCK_INITIALIZER)
121 else if (prwlock == THR_RWLOCK_DESTROYED)
123 else if (prwlock == THR_PSHARED_PTR) {
124 *rwlock = THR_RWLOCK_DESTROYED;
125 __thr_pshared_destroy(rwlock);
128 *rwlock = THR_RWLOCK_DESTROYED;
136 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
140 THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
142 if (*rwlock == THR_RWLOCK_INITIALIZER)
143 ret = rwlock_init(rwlock, NULL);
147 THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
153 _pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
157 return (rwlock_init(rwlock, attr));
161 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
163 struct pthread *curthread = _get_curthread();
164 pthread_rwlock_t prwlock;
168 ret = check_and_init_rwlock(rwlock, &prwlock);
172 if (curthread->rdlock_count) {
174 * To avoid having to track all the rdlocks held by
175 * a thread or all of the threads that hold a rdlock,
176 * we keep a simple count of all the rdlocks held by
177 * a thread. If a thread holds any rdlocks it is
178 * possible that it is attempting to take a recursive
179 * rdlock. If there are blocked writers and precedence
180 * is given to them, then that would result in the thread
181 * deadlocking. So allowing a thread to take the rdlock
182 * when it already has one or more rdlocks avoids the
183 * deadlock. I hope the reader can follow that logic ;-)
185 flags = URWLOCK_PREFER_READER;
191 * POSIX said the validity of the abstimeout parameter need
192 * not be checked if the lock can be immediately acquired.
194 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
196 curthread->rdlock_count++;
200 if (__predict_false(abstime &&
201 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
205 /* goto kernel and lock it */
206 ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
210 /* if interrupted, try to lock it in userland again. */
211 if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
217 curthread->rdlock_count++;
222 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
224 return (rwlock_rdlock_common(rwlock, NULL));
228 _pthread_rwlock_timedrdlock(pthread_rwlock_t * __restrict rwlock,
229 const struct timespec * __restrict abstime)
231 return (rwlock_rdlock_common(rwlock, abstime));
235 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
237 struct pthread *curthread = _get_curthread();
238 pthread_rwlock_t prwlock;
242 ret = check_and_init_rwlock(rwlock, &prwlock);
246 if (curthread->rdlock_count) {
248 * To avoid having to track all the rdlocks held by
249 * a thread or all of the threads that hold a rdlock,
250 * we keep a simple count of all the rdlocks held by
251 * a thread. If a thread holds any rdlocks it is
252 * possible that it is attempting to take a recursive
253 * rdlock. If there are blocked writers and precedence
254 * is given to them, then that would result in the thread
255 * deadlocking. So allowing a thread to take the rdlock
256 * when it already has one or more rdlocks avoids the
257 * deadlock. I hope the reader can follow that logic ;-)
259 flags = URWLOCK_PREFER_READER;
264 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
266 curthread->rdlock_count++;
271 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
273 struct pthread *curthread = _get_curthread();
274 pthread_rwlock_t prwlock;
277 ret = check_and_init_rwlock(rwlock, &prwlock);
281 ret = _thr_rwlock_trywrlock(&prwlock->lock);
283 prwlock->owner = TID(curthread);
288 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
290 struct pthread *curthread = _get_curthread();
291 pthread_rwlock_t prwlock;
294 ret = check_and_init_rwlock(rwlock, &prwlock);
299 * POSIX said the validity of the abstimeout parameter need
300 * not be checked if the lock can be immediately acquired.
302 ret = _thr_rwlock_trywrlock(&prwlock->lock);
304 prwlock->owner = TID(curthread);
308 if (__predict_false(abstime &&
309 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
313 /* goto kernel and lock it */
314 ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
316 prwlock->owner = TID(curthread);
323 /* if interrupted, try to lock it in userland again. */
324 if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
326 prwlock->owner = TID(curthread);
334 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
336 return (rwlock_wrlock_common (rwlock, NULL));
340 _pthread_rwlock_timedwrlock(pthread_rwlock_t * __restrict rwlock,
341 const struct timespec * __restrict abstime)
343 return (rwlock_wrlock_common (rwlock, abstime));
347 _pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
349 struct pthread *curthread = _get_curthread();
350 pthread_rwlock_t prwlock;
354 if (*rwlock == THR_PSHARED_PTR) {
355 prwlock = __thr_pshared_offpage(rwlock, 0);
362 if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
365 state = prwlock->lock.rw_state;
366 if (state & URWLOCK_WRITE_OWNER) {
367 if (__predict_false(prwlock->owner != TID(curthread)))
372 ret = _thr_rwlock_unlock(&prwlock->lock);
373 if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
374 curthread->rdlock_count--;