2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 #include "thr_private.h"
33 #ifndef HAS__UMTX_OP_ERR
34 int _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2)
37 if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1)
44 _thr_umutex_init(struct umutex *mtx)
46 static const struct umutex default_mtx = DEFAULT_UMUTEX;
52 _thr_urwlock_init(struct urwlock *rwl)
54 static const struct urwlock default_rwl = DEFAULT_URWLOCK;
60 __thr_umutex_lock(struct umutex *mtx, uint32_t id)
64 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)
65 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0));
69 if ((owner & ~UMUTEX_CONTESTED) == 0 &&
70 atomic_cmpset_acq_32(&mtx->m_owner, owner, id | owner))
72 if (owner == UMUTEX_RB_OWNERDEAD &&
73 atomic_cmpset_acq_32(&mtx->m_owner, owner,
74 id | UMUTEX_CONTESTED))
76 if (owner == UMUTEX_RB_NOTRECOV)
77 return (ENOTRECOVERABLE);
80 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
84 #define SPINLOOPS 1000
87 __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
93 return (__thr_umutex_lock(mtx, id));
94 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)
95 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0));
100 owner = mtx->m_owner;
101 if ((owner & ~UMUTEX_CONTESTED) == 0 &&
102 atomic_cmpset_acq_32(&mtx->m_owner, owner,
105 if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) &&
106 atomic_cmpset_acq_32(&mtx->m_owner, owner,
107 id | UMUTEX_CONTESTED))
109 if (__predict_false(owner == UMUTEX_RB_NOTRECOV))
110 return (ENOTRECOVERABLE);
115 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
120 __thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
121 const struct timespec *abstime)
123 struct _umtx_time *tm_p, timeout;
128 if (abstime == NULL) {
132 timeout._clockid = CLOCK_REALTIME;
133 timeout._flags = UMTX_ABSTIME;
134 timeout._timeout = *abstime;
136 tm_size = sizeof(timeout);
140 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT |
141 UMUTEX_PRIO_INHERIT)) == 0) {
143 owner = mtx->m_owner;
144 if ((owner & ~UMUTEX_CONTESTED) == 0 &&
145 atomic_cmpset_acq_32(&mtx->m_owner, owner,
148 if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) &&
149 atomic_cmpset_acq_32(&mtx->m_owner, owner,
150 id | UMUTEX_CONTESTED))
152 if (__predict_false(owner == UMUTEX_RB_NOTRECOV))
153 return (ENOTRECOVERABLE);
155 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0,
156 (void *)tm_size, __DECONST(void *, tm_p));
158 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0,
159 (void *)tm_size, __DECONST(void *, tm_p));
160 if (ret == 0 || ret == EOWNERDEAD ||
161 ret == ENOTRECOVERABLE)
164 if (ret == ETIMEDOUT)
171 __thr_umutex_unlock(struct umutex *mtx)
174 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0));
178 __thr_umutex_trylock(struct umutex *mtx)
181 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0));
185 __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
186 uint32_t *oldceiling)
189 return (_umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0));
193 _thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout)
196 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
197 timeout->tv_nsec <= 0)))
199 return (_umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0,
200 __DECONST(void*, timeout)));
204 _thr_umtx_wait_uint(volatile u_int *mtx, u_int id,
205 const struct timespec *timeout, int shared)
208 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
209 timeout->tv_nsec <= 0)))
211 return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ?
212 UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0,
213 __DECONST(void*, timeout)));
217 _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid,
218 const struct timespec *abstime, int shared)
220 struct _umtx_time *tm_p, timeout;
223 if (abstime == NULL) {
227 timeout._clockid = clockid;
228 timeout._flags = UMTX_ABSTIME;
229 timeout._timeout = *abstime;
231 tm_size = sizeof(timeout);
234 return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ?
235 UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id,
236 (void *)tm_size, __DECONST(void *, tm_p)));
240 _thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared)
243 return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ?
244 UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, nr_wakeup, 0, 0));
248 _thr_ucond_init(struct ucond *cv)
251 bzero(cv, sizeof(struct ucond));
255 _thr_ucond_wait(struct ucond *cv, struct umutex *m,
256 const struct timespec *timeout, int flags)
258 struct pthread *curthread;
260 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
261 timeout->tv_nsec <= 0))) {
262 curthread = _get_curthread();
263 _thr_umutex_unlock(m, TID(curthread));
266 return (_umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, m,
267 __DECONST(void*, timeout)));
271 _thr_ucond_signal(struct ucond *cv)
274 if (!cv->c_has_waiters)
276 return (_umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL));
280 _thr_ucond_broadcast(struct ucond *cv)
283 if (!cv->c_has_waiters)
285 return (_umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL));
289 __thr_rwlock_rdlock(struct urwlock *rwlock, int flags,
290 const struct timespec *tsp)
292 struct _umtx_time timeout, *tm_p;
299 timeout._timeout = *tsp;
300 timeout._flags = UMTX_ABSTIME;
301 timeout._clockid = CLOCK_REALTIME;
303 tm_size = sizeof(timeout);
305 return (_umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags,
306 (void *)tm_size, tm_p));
310 __thr_rwlock_wrlock(struct urwlock *rwlock, const struct timespec *tsp)
312 struct _umtx_time timeout, *tm_p;
319 timeout._timeout = *tsp;
320 timeout._flags = UMTX_ABSTIME;
321 timeout._clockid = CLOCK_REALTIME;
323 tm_size = sizeof(timeout);
325 return (_umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size,
330 __thr_rwlock_unlock(struct urwlock *rwlock)
333 return (_umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL));
337 _thr_rwl_rdlock(struct urwlock *rwlock)
342 if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0)
344 ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL);
348 PANIC("rdlock error");
353 _thr_rwl_wrlock(struct urwlock *rwlock)
358 if (_thr_rwlock_trywrlock(rwlock) == 0)
360 ret = __thr_rwlock_wrlock(rwlock, NULL);
364 PANIC("wrlock error");
369 _thr_rwl_unlock(struct urwlock *rwlock)
372 if (_thr_rwlock_unlock(rwlock))
373 PANIC("unlock error");