2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #ifndef _THR_FBSD_UMTX_H_
30 #define _THR_FBSD_UMTX_H_
36 #define DEFAULT_UMUTEX {0,0,{0,0},0,{0,0}}
38 #define DEFAULT_UMUTEX {0,0,{0,0},0,0,{0,0}}
40 #define DEFAULT_URWLOCK {0,0,0,0,{0,0,0,0}}
42 int _umtx_op_err(void *, int op, u_long, void *, void *) __hidden;
43 int __thr_umutex_lock(struct umutex *mtx, uint32_t id) __hidden;
44 int __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) __hidden;
45 int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
46 const struct timespec *timeout) __hidden;
47 int __thr_umutex_unlock(struct umutex *mtx, uint32_t id) __hidden;
48 int __thr_umutex_trylock(struct umutex *mtx) __hidden;
49 int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
50 uint32_t *oldceiling) __hidden;
52 void _thr_umutex_init(struct umutex *mtx) __hidden;
53 void _thr_urwlock_init(struct urwlock *rwl) __hidden;
55 int _thr_umtx_wait(volatile long *mtx, long exp,
56 const struct timespec *timeout) __hidden;
57 int _thr_umtx_wait_uint(volatile u_int *mtx, u_int exp,
58 const struct timespec *timeout, int shared) __hidden;
59 int _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int exp, int clockid,
60 const struct timespec *timeout, int shared) __hidden;
61 int _thr_umtx_wake(volatile void *mtx, int count, int shared) __hidden;
62 int _thr_ucond_wait(struct ucond *cv, struct umutex *m,
63 const struct timespec *timeout, int flags) __hidden;
64 void _thr_ucond_init(struct ucond *cv) __hidden;
65 int _thr_ucond_signal(struct ucond *cv) __hidden;
66 int _thr_ucond_broadcast(struct ucond *cv) __hidden;
68 int __thr_rwlock_rdlock(struct urwlock *rwlock, int flags,
69 const struct timespec *tsp) __hidden;
70 int __thr_rwlock_wrlock(struct urwlock *rwlock,
71 const struct timespec *tsp) __hidden;
72 int __thr_rwlock_unlock(struct urwlock *rwlock) __hidden;
74 /* Internal used only */
75 void _thr_rwl_rdlock(struct urwlock *rwlock) __hidden;
76 void _thr_rwl_wrlock(struct urwlock *rwlock) __hidden;
77 void _thr_rwl_unlock(struct urwlock *rwlock) __hidden;
80 _thr_umutex_trylock(struct umutex *mtx, uint32_t id)
83 if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id))
85 if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_OWNERDEAD) &&
86 atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_RB_OWNERDEAD,
87 id | UMUTEX_CONTESTED))
89 if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_NOTRECOV))
90 return (ENOTRECOVERABLE);
91 if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0)
93 return (__thr_umutex_trylock(mtx));
97 _thr_umutex_trylock2(struct umutex *mtx, uint32_t id)
100 if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0)
102 if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED &&
103 __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT |
104 UMUTEX_PRIO_INHERIT)) == 0) &&
105 atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED,
106 id | UMUTEX_CONTESTED))
108 if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_OWNERDEAD) &&
109 atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_RB_OWNERDEAD,
110 id | UMUTEX_CONTESTED))
112 if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_NOTRECOV))
113 return (ENOTRECOVERABLE);
118 _thr_umutex_lock(struct umutex *mtx, uint32_t id)
121 if (_thr_umutex_trylock2(mtx, id) == 0)
123 return (__thr_umutex_lock(mtx, id));
127 _thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
130 if (_thr_umutex_trylock2(mtx, id) == 0)
132 return (__thr_umutex_lock_spin(mtx, id));
136 _thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
137 const struct timespec *timeout)
140 if (_thr_umutex_trylock2(mtx, id) == 0)
142 return (__thr_umutex_timedlock(mtx, id, timeout));
146 _thr_umutex_unlock2(struct umutex *mtx, uint32_t id, int *defer)
148 uint32_t flags, owner;
151 flags = mtx->m_flags;
152 noncst = (flags & UMUTEX_NONCONSISTENT) != 0;
154 if ((flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) {
155 if (atomic_cmpset_rel_32(&mtx->m_owner, id, noncst ?
156 UMUTEX_RB_NOTRECOV : UMUTEX_UNOWNED))
158 return (__thr_umutex_unlock(mtx, id));
162 owner = mtx->m_owner;
163 if (__predict_false((owner & ~UMUTEX_CONTESTED) != id))
165 } while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner, owner,
166 noncst ? UMUTEX_RB_NOTRECOV : UMUTEX_UNOWNED)));
167 if ((owner & UMUTEX_CONTESTED) != 0) {
168 if (defer == NULL || noncst)
169 (void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2,
178 _thr_umutex_unlock(struct umutex *mtx, uint32_t id)
181 return (_thr_umutex_unlock2(mtx, id, NULL));
185 _thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags)
187 int32_t state, wrflags;
189 if ((flags & URWLOCK_PREFER_READER) != 0 ||
190 (rwlock->rw_flags & URWLOCK_PREFER_READER) != 0)
191 wrflags = URWLOCK_WRITE_OWNER;
193 wrflags = URWLOCK_WRITE_OWNER | URWLOCK_WRITE_WAITERS;
194 state = rwlock->rw_state;
195 while (!(state & wrflags)) {
196 if (__predict_false(URWLOCK_READER_COUNT(state) ==
197 URWLOCK_MAX_READERS))
199 if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state + 1))
201 state = rwlock->rw_state;
208 _thr_rwlock_trywrlock(struct urwlock *rwlock)
212 state = rwlock->rw_state;
213 while ((state & URWLOCK_WRITE_OWNER) == 0 &&
214 URWLOCK_READER_COUNT(state) == 0) {
215 if (atomic_cmpset_acq_32(&rwlock->rw_state, state,
216 state | URWLOCK_WRITE_OWNER))
218 state = rwlock->rw_state;
225 _thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp)
228 if (_thr_rwlock_tryrdlock(rwlock, flags) == 0)
230 return (__thr_rwlock_rdlock(rwlock, flags, tsp));
234 _thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp)
237 if (_thr_rwlock_trywrlock(rwlock) == 0)
239 return (__thr_rwlock_wrlock(rwlock, tsp));
243 _thr_rwlock_unlock(struct urwlock *rwlock)
247 state = rwlock->rw_state;
248 if ((state & URWLOCK_WRITE_OWNER) != 0) {
249 if (atomic_cmpset_rel_32(&rwlock->rw_state,
250 URWLOCK_WRITE_OWNER, 0))
254 if (__predict_false(URWLOCK_READER_COUNT(state) == 0))
256 if (!((state & (URWLOCK_WRITE_WAITERS |
257 URWLOCK_READ_WAITERS)) != 0 &&
258 URWLOCK_READER_COUNT(state) == 1)) {
259 if (atomic_cmpset_rel_32(&rwlock->rw_state,
262 state = rwlock->rw_state;
268 return (__thr_rwlock_unlock(rwlock));