2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "thr_private.h"
33 #ifndef HAS__UMTX_OP_ERR
34 int _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2)
36 if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1)
43 _thr_umutex_init(struct umutex *mtx)
45 static struct umutex default_mtx = DEFAULT_UMUTEX;
51 _thr_urwlock_init(struct urwlock *rwl)
53 static struct urwlock default_rwl = DEFAULT_URWLOCK;
58 __thr_umutex_lock(struct umutex *mtx, uint32_t id)
62 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
65 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
68 if ((owner & ~UMUTEX_CONTESTED) == 0 &&
69 atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
74 return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
77 #define SPINLOOPS 1000
80 __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
85 return __thr_umutex_lock(mtx, id);
87 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
89 int count = SPINLOOPS;
92 if ((owner & ~UMUTEX_CONTESTED) == 0) {
93 if (atomic_cmpset_acq_32(
103 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
107 return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
111 __thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
112 const struct timespec *ets)
114 struct timespec timo, cts;
118 clock_gettime(CLOCK_REALTIME, &cts);
119 TIMESPEC_SUB(&timo, ets, &cts);
125 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
128 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, &timo);
130 /* now try to lock it */
131 owner = mtx->m_owner;
132 if ((owner & ~UMUTEX_CONTESTED) == 0 &&
133 atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
136 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, &timo);
140 if (ret == ETIMEDOUT)
142 clock_gettime(CLOCK_REALTIME, &cts);
143 TIMESPEC_SUB(&timo, ets, &cts);
144 if (timo.tv_sec < 0 || (timo.tv_sec == 0 && timo.tv_nsec == 0)) {
153 __thr_umutex_unlock(struct umutex *mtx, uint32_t id)
155 static int wake2_avail = 0;
157 if (__predict_false(wake2_avail == 0)) {
158 struct umutex test = DEFAULT_UMUTEX;
160 if (_umtx_op(&test, UMTX_OP_MUTEX_WAKE2, test.m_flags, 0, 0) == -1)
166 if (wake2_avail != 1)
169 uint32_t flags = mtx->m_flags;
171 if ((flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
174 owner = mtx->m_owner;
175 if (__predict_false((owner & ~UMUTEX_CONTESTED) != id))
177 } while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner,
178 owner, UMUTEX_UNOWNED)));
179 if ((owner & UMUTEX_CONTESTED))
180 (void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2, flags, 0, 0);
184 return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0);
188 __thr_umutex_trylock(struct umutex *mtx)
190 return _umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0);
194 __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
195 uint32_t *oldceiling)
197 return _umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0);
201 _thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout)
203 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
204 timeout->tv_nsec <= 0)))
206 return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0,
207 __DECONST(void*, timeout));
211 _thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout, int shared)
213 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
214 timeout->tv_nsec <= 0)))
216 return _umtx_op_err(__DEVOLATILE(void *, mtx),
217 shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0,
218 __DECONST(void*, timeout));
222 _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid,
223 const struct timespec *abstime, int shared)
225 struct timespec ts, ts2, *tsp;
227 if (abstime != NULL) {
228 clock_gettime(clockid, &ts);
229 TIMESPEC_SUB(&ts2, abstime, &ts);
230 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
236 return _umtx_op_err(__DEVOLATILE(void *, mtx),
237 shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, NULL,
242 _thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared)
244 return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE,
249 _thr_ucond_init(struct ucond *cv)
251 bzero(cv, sizeof(struct ucond));
255 _thr_ucond_wait(struct ucond *cv, struct umutex *m,
256 const struct timespec *timeout, int flags)
258 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
259 timeout->tv_nsec <= 0))) {
260 struct pthread *curthread = _get_curthread();
261 _thr_umutex_unlock(m, TID(curthread));
264 return _umtx_op_err(cv, UMTX_OP_CV_WAIT, flags,
265 m, __DECONST(void*, timeout));
269 _thr_ucond_signal(struct ucond *cv)
271 if (!cv->c_has_waiters)
273 return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL);
277 _thr_ucond_broadcast(struct ucond *cv)
279 if (!cv->c_has_waiters)
281 return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL);
285 __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp)
287 return _umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, NULL, tsp);
291 __thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp)
293 return _umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, NULL, tsp);
297 __thr_rwlock_unlock(struct urwlock *rwlock)
299 return _umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL);
303 _thr_rwl_rdlock(struct urwlock *rwlock)
308 if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0)
310 ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL);
314 PANIC("rdlock error");
319 _thr_rwl_wrlock(struct urwlock *rwlock)
324 if (_thr_rwlock_trywrlock(rwlock) == 0)
326 ret = __thr_rwlock_wrlock(rwlock, NULL);
330 PANIC("wrlock error");
335 _thr_rwl_unlock(struct urwlock *rwlock)
337 if (_thr_rwlock_unlock(rwlock))
338 PANIC("unlock error");